From 102b0d2daa97dae68d3eed54d8fe37a9cc38a892 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 28 Apr 2024 11:13:47 +0200 Subject: Adding upstream version 2.8.0+dfsg. Signed-off-by: Daniel Baumann --- plat/ti/k3/board/generic/board.mk | 24 + plat/ti/k3/board/generic/include/board_def.h | 43 + plat/ti/k3/board/j784s4/board.mk | 24 + plat/ti/k3/board/j784s4/include/board_def.h | 43 + plat/ti/k3/board/lite/board.mk | 24 + plat/ti/k3/board/lite/include/board_def.h | 45 + plat/ti/k3/common/drivers/sec_proxy/sec_proxy.c | 341 ++++ plat/ti/k3/common/drivers/sec_proxy/sec_proxy.h | 82 + plat/ti/k3/common/drivers/ti_sci/ti_sci.c | 1739 ++++++++++++++++++++ plat/ti/k3/common/drivers/ti_sci/ti_sci.h | 232 +++ plat/ti/k3/common/drivers/ti_sci/ti_sci_protocol.h | 734 +++++++++ plat/ti/k3/common/k3_bl31_setup.c | 197 +++ plat/ti/k3/common/k3_console.c | 21 + plat/ti/k3/common/k3_gicv3.c | 113 ++ plat/ti/k3/common/k3_helpers.S | 155 ++ plat/ti/k3/common/k3_psci.c | 295 ++++ plat/ti/k3/common/k3_topology.c | 46 + plat/ti/k3/common/plat_common.mk | 95 ++ plat/ti/k3/include/k3_console.h | 12 + plat/ti/k3/include/k3_gicv3.h | 20 + plat/ti/k3/include/plat_macros.S | 21 + plat/ti/k3/include/platform_def.h | 191 +++ plat/ti/k3/platform.mk | 14 + 23 files changed, 4511 insertions(+) create mode 100644 plat/ti/k3/board/generic/board.mk create mode 100644 plat/ti/k3/board/generic/include/board_def.h create mode 100644 plat/ti/k3/board/j784s4/board.mk create mode 100644 plat/ti/k3/board/j784s4/include/board_def.h create mode 100644 plat/ti/k3/board/lite/board.mk create mode 100644 plat/ti/k3/board/lite/include/board_def.h create mode 100644 plat/ti/k3/common/drivers/sec_proxy/sec_proxy.c create mode 100644 plat/ti/k3/common/drivers/sec_proxy/sec_proxy.h create mode 100644 plat/ti/k3/common/drivers/ti_sci/ti_sci.c create mode 100644 plat/ti/k3/common/drivers/ti_sci/ti_sci.h create mode 100644 plat/ti/k3/common/drivers/ti_sci/ti_sci_protocol.h create mode 100644 plat/ti/k3/common/k3_bl31_setup.c create mode 100644 plat/ti/k3/common/k3_console.c create mode 100644 plat/ti/k3/common/k3_gicv3.c create mode 100644 plat/ti/k3/common/k3_helpers.S create mode 100644 plat/ti/k3/common/k3_psci.c create mode 100644 plat/ti/k3/common/k3_topology.c create mode 100644 plat/ti/k3/common/plat_common.mk create mode 100644 plat/ti/k3/include/k3_console.h create mode 100644 plat/ti/k3/include/k3_gicv3.h create mode 100644 plat/ti/k3/include/plat_macros.S create mode 100644 plat/ti/k3/include/platform_def.h create mode 100644 plat/ti/k3/platform.mk (limited to 'plat/ti/k3') diff --git a/plat/ti/k3/board/generic/board.mk b/plat/ti/k3/board/generic/board.mk new file mode 100644 index 0000000..ef74cd6 --- /dev/null +++ b/plat/ti/k3/board/generic/board.mk @@ -0,0 +1,24 @@ +# +# Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +BL32_BASE ?= 0x9e800000 +$(eval $(call add_define,BL32_BASE)) + +PRELOADED_BL33_BASE ?= 0x80080000 +$(eval $(call add_define,PRELOADED_BL33_BASE)) + +K3_HW_CONFIG_BASE ?= 0x82000000 +$(eval $(call add_define,K3_HW_CONFIG_BASE)) + +# Define sec_proxy usage as the full prioritized communication scheme +K3_SEC_PROXY_LITE := 0 +$(eval $(call add_define,K3_SEC_PROXY_LITE)) + +# System coherency is managed in hardware +USE_COHERENT_MEM := 1 + +PLAT_INCLUDES += \ + -Iplat/ti/k3/board/generic/include \ diff --git a/plat/ti/k3/board/generic/include/board_def.h b/plat/ti/k3/board/generic/include/board_def.h new file mode 100644 index 0000000..4ff687c --- /dev/null +++ b/plat/ti/k3/board/generic/include/board_def.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef BOARD_DEF_H +#define BOARD_DEF_H + +#include + +/* The ports must be in order and contiguous */ +#define K3_CLUSTER0_CORE_COUNT U(2) +#define K3_CLUSTER1_CORE_COUNT U(2) +#define K3_CLUSTER2_CORE_COUNT U(2) +#define K3_CLUSTER3_CORE_COUNT U(2) + +/* + * This RAM will be used for the bootloader including code, bss, and stacks. + * It may need to be increased if BL31 grows in size. + * + * The link addresses are determined by SEC_SRAM_BASE + offset. + * When ENABLE_PIE is set, the TF images can be loaded anywhere, so + * SEC_SRAM_BASE is really arbitrary. + * + * When ENABLE_PIE is unset, SEC_SRAM_BASE should be chosen so that + * it matches to the physical address where BL31 is loaded, that is, + * SEC_SRAM_BASE should be the base address of the RAM region. + * + * Lets make things explicit by mapping SRAM_BASE to 0x0 since ENABLE_PIE is + * defined as default for our platform. + */ +#define SEC_SRAM_BASE UL(0x00000000) /* PIE remapped on fly */ +#define SEC_SRAM_SIZE UL(0x00020000) /* 128k */ + +#define PLAT_MAX_OFF_STATE U(2) +#define PLAT_MAX_RET_STATE U(1) + +#define PLAT_PROC_START_ID U(32) +#define PLAT_PROC_DEVICE_START_ID U(202) +#define PLAT_CLUSTER_DEVICE_START_ID U(198) + +#endif /* BOARD_DEF_H */ diff --git a/plat/ti/k3/board/j784s4/board.mk b/plat/ti/k3/board/j784s4/board.mk new file mode 100644 index 0000000..92433ab --- /dev/null +++ b/plat/ti/k3/board/j784s4/board.mk @@ -0,0 +1,24 @@ +# +# Copyright (c) 2022, ARM Limited and Contributors. All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +BL32_BASE ?= 0x9e800000 +$(eval $(call add_define,BL32_BASE)) + +PRELOADED_BL33_BASE ?= 0x80080000 +$(eval $(call add_define,PRELOADED_BL33_BASE)) + +K3_HW_CONFIG_BASE ?= 0x82000000 +$(eval $(call add_define,K3_HW_CONFIG_BASE)) + +# Define sec_proxy usage as the full prioritized communication scheme +K3_SEC_PROXY_LITE := 0 +$(eval $(call add_define,K3_SEC_PROXY_LITE)) + +# System coherency is managed in hardware +USE_COHERENT_MEM := 1 + +PLAT_INCLUDES += \ + -Iplat/ti/k3/board/j784s4/include \ diff --git a/plat/ti/k3/board/j784s4/include/board_def.h b/plat/ti/k3/board/j784s4/include/board_def.h new file mode 100644 index 0000000..c2debc7 --- /dev/null +++ b/plat/ti/k3/board/j784s4/include/board_def.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef BOARD_DEF_H +#define BOARD_DEF_H + +#include + +/* The ports must be in order and contiguous */ +#define K3_CLUSTER0_CORE_COUNT U(4) +#define K3_CLUSTER1_CORE_COUNT U(4) +#define K3_CLUSTER2_CORE_COUNT U(0) +#define K3_CLUSTER3_CORE_COUNT U(0) +/* + * This RAM will be used for the bootloader including code, bss, and stacks. + * It may need to be increased if BL31 grows in size. + * + * The link addresses are determined by SEC_SRAM_BASE + offset. + * When ENABLE_PIE is set, the TF images can be loaded anywhere, so + * SEC_SRAM_BASE is really arbitrary. + * + * When ENABLE_PIE is unset, SEC_SRAM_BASE should be chosen so that + * it matches to the physical address where BL31 is loaded, that is, + * SEC_SRAM_BASE should be the base address of the RAM region. + * + * Lets make things explicit by mapping SRAM_BASE to 0x0 since ENABLE_PIE is + * defined as default for our platform. + */ +#define SEC_SRAM_BASE UL(0x00000000) /* PIE remapped on fly */ +#define SEC_SRAM_SIZE UL(0x00020000) /* 128k */ + +#define PLAT_MAX_OFF_STATE U(2) +#define PLAT_MAX_RET_STATE U(1) + +#define PLAT_PROC_START_ID U(32) + +#define PLAT_PROC_DEVICE_START_ID U(202) +#define PLAT_CLUSTER_DEVICE_START_ID U(198) + +#endif /* BOARD_DEF_H */ diff --git a/plat/ti/k3/board/lite/board.mk b/plat/ti/k3/board/lite/board.mk new file mode 100644 index 0000000..76246be --- /dev/null +++ b/plat/ti/k3/board/lite/board.mk @@ -0,0 +1,24 @@ +# +# Copyright (c) 2020, ARM Limited and Contributors. All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +BL32_BASE ?= 0x9e800000 +$(eval $(call add_define,BL32_BASE)) + +PRELOADED_BL33_BASE ?= 0x80080000 +$(eval $(call add_define,PRELOADED_BL33_BASE)) + +K3_HW_CONFIG_BASE ?= 0x82000000 +$(eval $(call add_define,K3_HW_CONFIG_BASE)) + +# Define sec_proxy usage as the lite version +K3_SEC_PROXY_LITE := 1 +$(eval $(call add_define,K3_SEC_PROXY_LITE)) + +# We dont have system level coherency capability +USE_COHERENT_MEM := 0 + +PLAT_INCLUDES += \ + -Iplat/ti/k3/board/lite/include \ diff --git a/plat/ti/k3/board/lite/include/board_def.h b/plat/ti/k3/board/lite/include/board_def.h new file mode 100644 index 0000000..fd4e5b1 --- /dev/null +++ b/plat/ti/k3/board/lite/include/board_def.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2020-2022, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef BOARD_DEF_H +#define BOARD_DEF_H + +#include + +/* The ports must be in order and contiguous */ +#define K3_CLUSTER0_CORE_COUNT U(4) +#define K3_CLUSTER1_CORE_COUNT U(0) +#define K3_CLUSTER2_CORE_COUNT U(0) +#define K3_CLUSTER3_CORE_COUNT U(0) + +/* + * This RAM will be used for the bootloader including code, bss, and stacks. + * It may need to be increased if BL31 grows in size. + * Current computation assumes data structures necessary for GIC and ARM for + * a single cluster of 4 processor. + * + * The link addresses are determined by SEC_SRAM_BASE + offset. + * When ENABLE_PIE is set, the TF images can be loaded anywhere, so + * SEC_SRAM_BASE is really arbitrary. + * + * When ENABLE_PIE is unset, SEC_SRAM_BASE should be chosen so that + * it matches to the physical address where BL31 is loaded, that is, + * SEC_SRAM_BASE should be the base address of the RAM region. + * + * Lets make things explicit by mapping SRAM_BASE to 0x0 since ENABLE_PIE is + * defined as default for our platform. + */ +#define SEC_SRAM_BASE UL(0x00000000) /* PIE remapped on fly */ +#define SEC_SRAM_SIZE UL(0x00020000) /* 128k */ + +#define PLAT_MAX_OFF_STATE U(2) +#define PLAT_MAX_RET_STATE U(1) + +#define PLAT_PROC_START_ID U(32) +#define PLAT_PROC_DEVICE_START_ID U(135) +#define PLAT_CLUSTER_DEVICE_START_ID U(134) + +#endif /* BOARD_DEF_H */ diff --git a/plat/ti/k3/common/drivers/sec_proxy/sec_proxy.c b/plat/ti/k3/common/drivers/sec_proxy/sec_proxy.c new file mode 100644 index 0000000..a0bfdee --- /dev/null +++ b/plat/ti/k3/common/drivers/sec_proxy/sec_proxy.c @@ -0,0 +1,341 @@ +/* + * Texas Instruments K3 Secure Proxy Driver + * Based on Linux and U-Boot implementation + * + * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/ + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include + +#include + +#include +#include +#include +#include +#include + +#include "sec_proxy.h" + +/* SEC PROXY RT THREAD STATUS */ +#define RT_THREAD_STATUS (0x0) +#define RT_THREAD_STATUS_ERROR_SHIFT (31) +#define RT_THREAD_STATUS_ERROR_MASK BIT(31) +#define RT_THREAD_STATUS_CUR_CNT_SHIFT (0) +#define RT_THREAD_STATUS_CUR_CNT_MASK GENMASK(7, 0) + +/* SEC PROXY SCFG THREAD CTRL */ +#define SCFG_THREAD_CTRL (0x1000) +#define SCFG_THREAD_CTRL_DIR_SHIFT (31) +#define SCFG_THREAD_CTRL_DIR_MASK BIT(31) + +#define SEC_PROXY_THREAD(base, x) ((base) + (0x1000 * (x))) +#define THREAD_IS_RX (1) +#define THREAD_IS_TX (0) + +/** + * struct k3_sec_proxy_desc - Description of secure proxy integration + * @timeout_us: Timeout for communication (in Microseconds) + * @max_msg_size: Message size in bytes + * @data_start_offset: Offset of the First data register of the thread + * @data_end_offset: Offset of the Last data register of the thread + */ +struct k3_sec_proxy_desc { + uint32_t timeout_us; + uint16_t max_msg_size; + uint16_t data_start_offset; + uint16_t data_end_offset; +}; + +/** + * struct k3_sec_proxy_thread - Description of a Secure Proxy Thread + * @name: Thread Name + * @data: Thread Data path region for target + * @scfg: Secure Config Region for Thread + * @rt: RealTime Region for Thread + */ +struct k3_sec_proxy_thread { + const char *name; + uintptr_t data; + uintptr_t scfg; + uintptr_t rt; +}; + +/** + * struct k3_sec_proxy_mbox - Description of a Secure Proxy Instance + * @desc: Description of the SoC integration + * @chans: Array for valid thread instances + */ +struct k3_sec_proxy_mbox { + const struct k3_sec_proxy_desc desc; + struct k3_sec_proxy_thread threads[]; +}; + +/* + * Thread ID #0: DMSC notify + * Thread ID #1: DMSC request response + * Thread ID #2: DMSC request high priority + * Thread ID #3: DMSC request low priority + * Thread ID #4: DMSC notify response + */ +#define SP_THREAD(_x) \ + [_x] = { \ + .name = #_x, \ + .data = SEC_PROXY_THREAD(SEC_PROXY_DATA_BASE, _x), \ + .scfg = SEC_PROXY_THREAD(SEC_PROXY_SCFG_BASE, _x), \ + .rt = SEC_PROXY_THREAD(SEC_PROXY_RT_BASE, _x), \ + } + +static struct k3_sec_proxy_mbox spm = { + .desc = { + .timeout_us = SEC_PROXY_TIMEOUT_US, + .max_msg_size = SEC_PROXY_MAX_MESSAGE_SIZE, + .data_start_offset = 0x4, + .data_end_offset = 0x3C, + }, + .threads = { +#if !K3_SEC_PROXY_LITE + SP_THREAD(SP_NOTIFY), + SP_THREAD(SP_RESPONSE), + SP_THREAD(SP_HIGH_PRIORITY), + SP_THREAD(SP_LOW_PRIORITY), + SP_THREAD(SP_NOTIFY_RESP), +#else + SP_THREAD(SP_RESPONSE), + SP_THREAD(SP_HIGH_PRIORITY), +#endif /* K3_SEC_PROXY_LITE */ + }, +}; + +/** + * struct sec_msg_hdr - Message header for secure messages and responses + * @checksum: CRC of message for integrity checking + */ +union sec_msg_hdr { + struct { + uint16_t checksum; + uint16_t reserved; + } __packed; + uint32_t data; +}; + +/** + * k3_sec_proxy_verify_thread() - Verify thread status before + * sending/receiving data + * @spt: Pointer to Secure Proxy thread description + * @dir: Direction of the thread + * + * Return: 0 if all goes well, else appropriate error message + */ +static inline int k3_sec_proxy_verify_thread(struct k3_sec_proxy_thread *spt, + uint32_t dir) +{ + /* Check for any errors already available */ + if (mmio_read_32(spt->rt + RT_THREAD_STATUS) & + RT_THREAD_STATUS_ERROR_MASK) { + ERROR("Thread %s is corrupted, cannot send data\n", spt->name); + return -EINVAL; + } + + /* Make sure thread is configured for right direction */ + if ((mmio_read_32(spt->scfg + SCFG_THREAD_CTRL) & SCFG_THREAD_CTRL_DIR_MASK) + != (dir << SCFG_THREAD_CTRL_DIR_SHIFT)) { + if (dir == THREAD_IS_TX) + ERROR("Trying to send data on RX Thread %s\n", + spt->name); + else + ERROR("Trying to receive data on TX Thread %s\n", + spt->name); + return -EINVAL; + } + + /* Check the message queue before sending/receiving data */ + uint32_t tick_start = (uint32_t)read_cntpct_el0(); + uint32_t ticks_per_us = SYS_COUNTER_FREQ_IN_TICKS / 1000000; + while (!(mmio_read_32(spt->rt + RT_THREAD_STATUS) & RT_THREAD_STATUS_CUR_CNT_MASK)) { + VERBOSE("Waiting for thread %s to %s\n", + spt->name, (dir == THREAD_IS_TX) ? "empty" : "fill"); + if (((uint32_t)read_cntpct_el0() - tick_start) > + (spm.desc.timeout_us * ticks_per_us)) { + ERROR("Timeout waiting for thread %s to %s\n", + spt->name, (dir == THREAD_IS_TX) ? "empty" : "fill"); + return -ETIMEDOUT; + } + } + + return 0; +} + +/** + * k3_sec_proxy_clear_rx_thread() - Clear Secure Proxy thread + * + * @id: Channel Identifier + * + * Return: 0 if all goes well, else appropriate error message + */ +int k3_sec_proxy_clear_rx_thread(enum k3_sec_proxy_chan_id id) +{ + struct k3_sec_proxy_thread *spt = &spm.threads[id]; + + /* Check for any errors already available */ + if (mmio_read_32(spt->rt + RT_THREAD_STATUS) & + RT_THREAD_STATUS_ERROR_MASK) { + ERROR("Thread %s is corrupted, cannot send data\n", spt->name); + return -EINVAL; + } + + /* Make sure thread is configured for right direction */ + if (!(mmio_read_32(spt->scfg + SCFG_THREAD_CTRL) & SCFG_THREAD_CTRL_DIR_MASK)) { + ERROR("Cannot clear a transmit thread %s\n", spt->name); + return -EINVAL; + } + + /* Read off messages from thread until empty */ + uint32_t try_count = 10; + while (mmio_read_32(spt->rt + RT_THREAD_STATUS) & RT_THREAD_STATUS_CUR_CNT_MASK) { + if (!(try_count--)) { + ERROR("Could not clear all messages from thread %s\n", spt->name); + return -ETIMEDOUT; + } + WARN("Clearing message from thread %s\n", spt->name); + mmio_read_32(spt->data + spm.desc.data_end_offset); + } + + return 0; +} + +/** + * k3_sec_proxy_send() - Send data over a Secure Proxy thread + * @id: Channel Identifier + * @msg: Pointer to k3_sec_proxy_msg + * + * Return: 0 if all goes well, else appropriate error message + */ +int k3_sec_proxy_send(enum k3_sec_proxy_chan_id id, const struct k3_sec_proxy_msg *msg) +{ + struct k3_sec_proxy_thread *spt = &spm.threads[id]; + union sec_msg_hdr secure_header; + int num_words, trail_bytes, i, ret; + uintptr_t data_reg; + + ret = k3_sec_proxy_verify_thread(spt, THREAD_IS_TX); + if (ret) { + ERROR("Thread %s verification failed (%d)\n", spt->name, ret); + return ret; + } + + /* Check the message size */ + if (msg->len + sizeof(secure_header) > spm.desc.max_msg_size) { + ERROR("Thread %s message length %lu > max msg size\n", + spt->name, msg->len); + return -EINVAL; + } + + /* TODO: Calculate checksum */ + secure_header.checksum = 0; + + /* Send the secure header */ + data_reg = spm.desc.data_start_offset; + mmio_write_32(spt->data + data_reg, secure_header.data); + data_reg += sizeof(uint32_t); + + /* Send whole words */ + num_words = msg->len / sizeof(uint32_t); + for (i = 0; i < num_words; i++) { + mmio_write_32(spt->data + data_reg, ((uint32_t *)msg->buf)[i]); + data_reg += sizeof(uint32_t); + } + + /* Send remaining bytes */ + trail_bytes = msg->len % sizeof(uint32_t); + if (trail_bytes) { + uint32_t data_trail = 0; + + i = msg->len - trail_bytes; + while (trail_bytes--) { + data_trail <<= 8; + data_trail |= msg->buf[i++]; + } + + mmio_write_32(spt->data + data_reg, data_trail); + data_reg += sizeof(uint32_t); + } + /* + * 'data_reg' indicates next register to write. If we did not already + * write on tx complete reg(last reg), we must do so for transmit + * In addition, we also need to make sure all intermediate data + * registers(if any required), are reset to 0 for TISCI backward + * compatibility to be maintained. + */ + while (data_reg <= spm.desc.data_end_offset) { + mmio_write_32(spt->data + data_reg, 0); + data_reg += sizeof(uint32_t); + } + + VERBOSE("Message successfully sent on thread %s\n", spt->name); + + return 0; +} + +/** + * k3_sec_proxy_recv() - Receive data from a Secure Proxy thread + * @id: Channel Identifier + * @msg: Pointer to k3_sec_proxy_msg + * + * Return: 0 if all goes well, else appropriate error message + */ +int k3_sec_proxy_recv(enum k3_sec_proxy_chan_id id, struct k3_sec_proxy_msg *msg) +{ + struct k3_sec_proxy_thread *spt = &spm.threads[id]; + union sec_msg_hdr secure_header; + uintptr_t data_reg; + int num_words, trail_bytes, i, ret; + + ret = k3_sec_proxy_verify_thread(spt, THREAD_IS_RX); + if (ret) { + ERROR("Thread %s verification failed (%d)\n", spt->name, ret); + return ret; + } + + /* Read secure header */ + data_reg = spm.desc.data_start_offset; + secure_header.data = mmio_read_32(spt->data + data_reg); + data_reg += sizeof(uint32_t); + + /* Read whole words */ + num_words = msg->len / sizeof(uint32_t); + for (i = 0; i < num_words; i++) { + ((uint32_t *)msg->buf)[i] = mmio_read_32(spt->data + data_reg); + data_reg += sizeof(uint32_t); + } + + /* Read remaining bytes */ + trail_bytes = msg->len % sizeof(uint32_t); + if (trail_bytes) { + uint32_t data_trail = mmio_read_32(spt->data + data_reg); + data_reg += sizeof(uint32_t); + + i = msg->len - trail_bytes; + while (trail_bytes--) { + msg->buf[i] = data_trail & 0xff; + data_trail >>= 8; + } + } + + /* + * 'data_reg' indicates next register to read. If we did not already + * read on rx complete reg(last reg), we must do so for receive + */ + if (data_reg <= spm.desc.data_end_offset) + mmio_read_32(spt->data + spm.desc.data_end_offset); + + /* TODO: Verify checksum */ + (void)secure_header.checksum; + + VERBOSE("Message successfully received from thread %s\n", spt->name); + + return 0; +} diff --git a/plat/ti/k3/common/drivers/sec_proxy/sec_proxy.h b/plat/ti/k3/common/drivers/sec_proxy/sec_proxy.h new file mode 100644 index 0000000..f4b0b4b --- /dev/null +++ b/plat/ti/k3/common/drivers/sec_proxy/sec_proxy.h @@ -0,0 +1,82 @@ +/* + * Texas Instruments K3 Secure Proxy Driver + * Based on Linux and U-Boot implementation + * + * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/ + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef SEC_PROXY_H +#define SEC_PROXY_H + +#include + +/** + * enum k3_sec_proxy_chan_id - Secure Proxy thread IDs + * + * These the available IDs used in k3_sec_proxy_{send,recv}() + * There are two schemes we use: + * * if K3_SEC_PROXY_LITE = 1, we just have two threads to talk + * * if K3_SEC_PROXY_LITE = 0, we have the full fledged + * communication scheme available. + */ +enum k3_sec_proxy_chan_id { +#if !K3_SEC_PROXY_LITE + SP_NOTIFY = 0, + SP_RESPONSE, + SP_HIGH_PRIORITY, + SP_LOW_PRIORITY, + SP_NOTIFY_RESP, +#else + SP_RESPONSE = 8, + /* + * Note: TISCI documentation indicates "low priority", but in reality + * with a single thread, there is no low or high priority.. This usage + * is more appropriate for TF-A since we can reduce the churn as a + * result. + */ + SP_HIGH_PRIORITY, +#endif /* K3_SEC_PROXY_LITE */ +}; + +/** + * struct k3_sec_proxy_msg - Secure proxy message structure + * @len: Length of data in the Buffer + * @buf: Buffer pointer + * + * This is the structure for data used in k3_sec_proxy_{send,recv}() + */ +struct k3_sec_proxy_msg { + size_t len; + uint8_t *buf; +}; + +/** + * k3_sec_proxy_send() - Send data over a Secure Proxy thread + * @id: Channel Identifier + * @msg: Pointer to k3_sec_proxy_msg + * + * Return: 0 if all goes well, else appropriate error message + */ +int k3_sec_proxy_clear_rx_thread(enum k3_sec_proxy_chan_id id); + +/** + * k3_sec_proxy_send() - Send data over a Secure Proxy thread + * @id: Channel Identifier + * @msg: Pointer to k3_sec_proxy_msg + * + * Return: 0 if all goes well, else appropriate error message + */ +int k3_sec_proxy_send(enum k3_sec_proxy_chan_id id, const struct k3_sec_proxy_msg *msg); + +/** + * k3_sec_proxy_recv() - Receive data from a Secure Proxy thread + * @id: Channel Identifier + * @msg: Pointer to k3_sec_proxy_msg + * + * Return: 0 if all goes well, else appropriate error message + */ +int k3_sec_proxy_recv(enum k3_sec_proxy_chan_id id, struct k3_sec_proxy_msg *msg); + +#endif /* SEC_PROXY_H */ diff --git a/plat/ti/k3/common/drivers/ti_sci/ti_sci.c b/plat/ti/k3/common/drivers/ti_sci/ti_sci.c new file mode 100644 index 0000000..2cbfa3d --- /dev/null +++ b/plat/ti/k3/common/drivers/ti_sci/ti_sci.c @@ -0,0 +1,1739 @@ +/* + * Texas Instruments System Control Interface Driver + * Based on Linux and U-Boot implementation + * + * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/ + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include +#include +#include + +#include + +#include +#include + +#include "ti_sci_protocol.h" +#include "ti_sci.h" + +#if USE_COHERENT_MEM +__section("tzfw_coherent_mem") +#endif +static uint8_t message_sequence; + +/** + * struct ti_sci_xfer - Structure representing a message flow + * @tx_message: Transmit message + * @rx_message: Receive message + */ +struct ti_sci_xfer { + struct k3_sec_proxy_msg tx_message; + struct k3_sec_proxy_msg rx_message; +}; + +/** + * ti_sci_setup_one_xfer() - Setup one message type + * + * @msg_type: Message type + * @msg_flags: Flag to set for the message + * @tx_buf: Buffer to be sent to mailbox channel + * @tx_message_size: transmit message size + * @rx_buf: Buffer to be received from mailbox channel + * @rx_message_size: receive message size + * + * Helper function which is used by various command functions that are + * exposed to clients of this driver for allocating a message traffic event. + * + * Return: 0 if all goes well, else appropriate error message + */ +static int ti_sci_setup_one_xfer(uint16_t msg_type, uint32_t msg_flags, + void *tx_buf, + size_t tx_message_size, + void *rx_buf, + size_t rx_message_size, + struct ti_sci_xfer *xfer) +{ + struct ti_sci_msg_hdr *hdr; + + /* Ensure we have sane transfer sizes */ + if (rx_message_size > TI_SCI_MAX_MESSAGE_SIZE || + tx_message_size > TI_SCI_MAX_MESSAGE_SIZE || + rx_message_size < sizeof(*hdr) || + tx_message_size < sizeof(*hdr)) + return -ERANGE; + + hdr = (struct ti_sci_msg_hdr *)tx_buf; + hdr->seq = ++message_sequence; + hdr->type = msg_type; + hdr->host = TI_SCI_HOST_ID; + hdr->flags = msg_flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED; + + xfer->tx_message.buf = tx_buf; + xfer->tx_message.len = tx_message_size; + + xfer->rx_message.buf = rx_buf; + xfer->rx_message.len = rx_message_size; + + return 0; +} + +/** + * ti_sci_get_response() - Receive response from mailbox channel + * + * @xfer: Transfer to initiate and wait for response + * @chan: Channel to receive the response + * + * Return: 0 if all goes well, else appropriate error message + */ +static inline int ti_sci_get_response(struct ti_sci_xfer *xfer, + enum k3_sec_proxy_chan_id chan) +{ + struct k3_sec_proxy_msg *msg = &xfer->rx_message; + struct ti_sci_msg_hdr *hdr; + unsigned int retry = 5; + int ret; + + for (; retry > 0; retry--) { + /* Receive the response */ + ret = k3_sec_proxy_recv(chan, msg); + if (ret) { + ERROR("Message receive failed (%d)\n", ret); + return ret; + } + + /* msg is updated by Secure Proxy driver */ + hdr = (struct ti_sci_msg_hdr *)msg->buf; + + /* Sanity check for message response */ + if (hdr->seq == message_sequence) + break; + else + WARN("Message with sequence ID %u is not expected\n", hdr->seq); + } + if (!retry) { + ERROR("Timed out waiting for message\n"); + return -EINVAL; + } + + if (msg->len > TI_SCI_MAX_MESSAGE_SIZE) { + ERROR("Unable to handle %lu xfer (max %d)\n", + msg->len, TI_SCI_MAX_MESSAGE_SIZE); + return -EINVAL; + } + + if (!(hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK)) + return -ENODEV; + + return 0; +} + +/** + * ti_sci_do_xfer() - Do one transfer + * + * @xfer: Transfer to initiate and wait for response + * + * Return: 0 if all goes well, else appropriate error message + */ +static inline int ti_sci_do_xfer(struct ti_sci_xfer *xfer) +{ + struct k3_sec_proxy_msg *msg = &xfer->tx_message; + int ret; + + /* Clear any spurious messages in receive queue */ + ret = k3_sec_proxy_clear_rx_thread(SP_RESPONSE); + if (ret) { + ERROR("Could not clear response queue (%d)\n", ret); + return ret; + } + + /* Send the message */ + ret = k3_sec_proxy_send(SP_HIGH_PRIORITY, msg); + if (ret) { + ERROR("Message sending failed (%d)\n", ret); + return ret; + } + + /* Get the response */ + ret = ti_sci_get_response(xfer, SP_RESPONSE); + if (ret) { + ERROR("Failed to get response (%d)\n", ret); + return ret; + } + + return 0; +} + +/** + * ti_sci_get_revision() - Get the revision of the SCI entity + * + * Updates the SCI information in the internal data structure. + * + * Return: 0 if all goes well, else appropriate error message + */ +int ti_sci_get_revision(struct ti_sci_msg_resp_version *rev_info) +{ + struct ti_sci_msg_hdr hdr; + struct ti_sci_xfer xfer; + int ret; + + ret = ti_sci_setup_one_xfer(TI_SCI_MSG_VERSION, 0x0, + &hdr, sizeof(hdr), + rev_info, sizeof(*rev_info), + &xfer); + if (ret) { + ERROR("Message alloc failed (%d)\n", ret); + return ret; + } + + ret = ti_sci_do_xfer(&xfer); + if (ret) { + ERROR("Transfer send failed (%d)\n", ret); + return ret; + } + + return 0; +} + +/** + * ti_sci_device_set_state() - Set device state + * + * @id: Device identifier + * @flags: flags to setup for the device + * @state: State to move the device to + * + * Return: 0 if all goes well, else appropriate error message + */ +static int ti_sci_device_set_state(uint32_t id, uint32_t flags, uint8_t state) +{ + struct ti_sci_msg_req_set_device_state req; + struct ti_sci_msg_hdr resp; + + struct ti_sci_xfer xfer; + int ret; + + ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SET_DEVICE_STATE, flags, + &req, sizeof(req), + &resp, sizeof(resp), + &xfer); + if (ret) { + ERROR("Message alloc failed (%d)\n", ret); + return ret; + } + + req.id = id; + req.state = state; + + ret = ti_sci_do_xfer(&xfer); + if (ret) { + ERROR("Transfer send failed (%d)\n", ret); + return ret; + } + + return 0; +} + +/** + * ti_sci_device_get_state() - Get device state + * + * @id: Device Identifier + * @clcnt: Pointer to Context Loss Count + * @resets: pointer to resets + * @p_state: pointer to p_state + * @c_state: pointer to c_state + * + * Return: 0 if all goes well, else appropriate error message + */ +static int ti_sci_device_get_state(uint32_t id, uint32_t *clcnt, + uint32_t *resets, uint8_t *p_state, + uint8_t *c_state) +{ + struct ti_sci_msg_req_get_device_state req; + struct ti_sci_msg_resp_get_device_state resp; + + struct ti_sci_xfer xfer; + int ret; + + if (!clcnt && !resets && !p_state && !c_state) + return -EINVAL; + + ret = ti_sci_setup_one_xfer(TI_SCI_MSG_GET_DEVICE_STATE, 0, + &req, sizeof(req), + &resp, sizeof(resp), + &xfer); + if (ret) { + ERROR("Message alloc failed (%d)\n", ret); + return ret; + } + + req.id = id; + + ret = ti_sci_do_xfer(&xfer); + if (ret) { + ERROR("Transfer send failed (%d)\n", ret); + return ret; + } + + if (clcnt) + *clcnt = resp.context_loss_count; + if (resets) + *resets = resp.resets; + if (p_state) + *p_state = resp.programmed_state; + if (c_state) + *c_state = resp.current_state; + + return 0; +} + +/** + * ti_sci_device_get() - Request for device managed by TISCI + * + * @id: Device Identifier + * + * Request for the device - NOTE: the client MUST maintain integrity of + * usage count by balancing get_device with put_device. No refcounting is + * managed by driver for that purpose. + * + * Return: 0 if all goes well, else appropriate error message + */ +int ti_sci_device_get(uint32_t id) +{ + return ti_sci_device_set_state(id, 0, MSG_DEVICE_SW_STATE_ON); +} + +/** + * ti_sci_device_get_exclusive() - Exclusive request for device managed by TISCI + * + * @id: Device Identifier + * + * Request for the device - NOTE: the client MUST maintain integrity of + * usage count by balancing get_device with put_device. No refcounting is + * managed by driver for that purpose. + * + * NOTE: This _exclusive version of the get API is for exclusive access to the + * device. Any other host in the system will fail to get this device after this + * call until exclusive access is released with device_put or a non-exclusive + * set call. + * + * Return: 0 if all goes well, else appropriate error message + */ +int ti_sci_device_get_exclusive(uint32_t id) +{ + return ti_sci_device_set_state(id, + MSG_FLAG_DEVICE_EXCLUSIVE, + MSG_DEVICE_SW_STATE_ON); +} + +/** + * ti_sci_device_idle() - Idle a device managed by TISCI + * + * @id: Device Identifier + * + * Request for the device - NOTE: the client MUST maintain integrity of + * usage count by balancing get_device with put_device. No refcounting is + * managed by driver for that purpose. + * + * Return: 0 if all goes well, else appropriate error message + */ +int ti_sci_device_idle(uint32_t id) +{ + return ti_sci_device_set_state(id, 0, MSG_DEVICE_SW_STATE_RETENTION); +} + +/** + * ti_sci_device_idle_exclusive() - Exclusive idle a device managed by TISCI + * + * @id: Device Identifier + * + * Request for the device - NOTE: the client MUST maintain integrity of + * usage count by balancing get_device with put_device. No refcounting is + * managed by driver for that purpose. + * + * NOTE: This _exclusive version of the idle API is for exclusive access to + * the device. Any other host in the system will fail to get this device after + * this call until exclusive access is released with device_put or a + * non-exclusive set call. + * + * Return: 0 if all goes well, else appropriate error message + */ +int ti_sci_device_idle_exclusive(uint32_t id) +{ + return ti_sci_device_set_state(id, + MSG_FLAG_DEVICE_EXCLUSIVE, + MSG_DEVICE_SW_STATE_RETENTION); +} + +/** + * ti_sci_device_put() - Release a device managed by TISCI + * + * @id: Device Identifier + * + * Request for the device - NOTE: the client MUST maintain integrity of + * usage count by balancing get_device with put_device. No refcounting is + * managed by driver for that purpose. + * + * Return: 0 if all goes well, else appropriate error message + */ +int ti_sci_device_put(uint32_t id) +{ + return ti_sci_device_set_state(id, 0, MSG_DEVICE_SW_STATE_AUTO_OFF); +} + +/** + * ti_sci_device_put_no_wait() - Release a device without requesting or waiting + * for a response. + * + * @id: Device Identifier + * + * Request for the device - NOTE: the client MUST maintain integrity of + * usage count by balancing get_device with put_device. No refcounting is + * managed by driver for that purpose. + * + * Return: 0 if all goes well, else appropriate error message + */ +int ti_sci_device_put_no_wait(uint32_t id) +{ + struct ti_sci_msg_req_set_device_state req; + struct ti_sci_msg_hdr *hdr; + struct k3_sec_proxy_msg tx_message; + int ret; + + /* Ensure we have sane transfer size */ + if (sizeof(req) > TI_SCI_MAX_MESSAGE_SIZE) + return -ERANGE; + + hdr = (struct ti_sci_msg_hdr *)&req; + hdr->seq = ++message_sequence; + hdr->type = TI_SCI_MSG_SET_DEVICE_STATE; + hdr->host = TI_SCI_HOST_ID; + /* Setup with NORESPONSE flag to keep response queue clean */ + hdr->flags = TI_SCI_FLAG_REQ_GENERIC_NORESPONSE; + + req.id = id; + req.state = MSG_DEVICE_SW_STATE_AUTO_OFF; + + tx_message.buf = (uint8_t *)&req; + tx_message.len = sizeof(req); + + /* Send message */ + ret = k3_sec_proxy_send(SP_HIGH_PRIORITY, &tx_message); + if (ret) { + ERROR("Message sending failed (%d)\n", ret); + return ret; + } + + /* Return without waiting for response */ + return 0; +} + +/** + * ti_sci_device_is_valid() - Is the device valid + * + * @id: Device Identifier + * + * Return: 0 if all goes well and the device ID is valid, else return + * appropriate error + */ +int ti_sci_device_is_valid(uint32_t id) +{ + uint8_t unused; + + /* check the device state which will also tell us if the ID is valid */ + return ti_sci_device_get_state(id, NULL, NULL, NULL, &unused); +} + +/** + * ti_sci_device_get_clcnt() - Get context loss counter + * + * @id: Device Identifier + * @count: Pointer to Context Loss counter to populate + * + * Return: 0 if all goes well, else appropriate error message + */ +int ti_sci_device_get_clcnt(uint32_t id, uint32_t *count) +{ + return ti_sci_device_get_state(id, count, NULL, NULL, NULL); +} + +/** + * ti_sci_device_is_idle() - Check if the device is requested to be idle + * + * @id: Device Identifier + * @r_state: true if requested to be idle + * + * Return: 0 if all goes well, else appropriate error message + */ +int ti_sci_device_is_idle(uint32_t id, bool *r_state) +{ + int ret; + uint8_t state; + + if (!r_state) + return -EINVAL; + + ret = ti_sci_device_get_state(id, NULL, NULL, &state, NULL); + if (ret) + return ret; + + *r_state = (state == MSG_DEVICE_SW_STATE_RETENTION); + + return 0; +} + +/** + * ti_sci_device_is_stop() - Check if the device is requested to be stopped + * + * @id: Device Identifier + * @r_state: true if requested to be stopped + * @curr_state: true if currently stopped + * + * Return: 0 if all goes well, else appropriate error message + */ +int ti_sci_device_is_stop(uint32_t id, bool *r_state, bool *curr_state) +{ + int ret; + uint8_t p_state, c_state; + + if (!r_state && !curr_state) + return -EINVAL; + + ret = ti_sci_device_get_state(id, NULL, NULL, &p_state, &c_state); + if (ret) + return ret; + + if (r_state) + *r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF); + if (curr_state) + *curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF); + + return 0; +} + +/** + * ti_sci_device_is_on() - Check if the device is requested to be ON + * + * @id: Device Identifier + * @r_state: true if requested to be ON + * @curr_state: true if currently ON and active + * + * Return: 0 if all goes well, else appropriate error message + */ +int ti_sci_device_is_on(uint32_t id, bool *r_state, bool *curr_state) +{ + int ret; + uint8_t p_state, c_state; + + if (!r_state && !curr_state) + return -EINVAL; + + ret = + ti_sci_device_get_state(id, NULL, NULL, &p_state, &c_state); + if (ret) + return ret; + + if (r_state) + *r_state = (p_state == MSG_DEVICE_SW_STATE_ON); + if (curr_state) + *curr_state = (c_state == MSG_DEVICE_HW_STATE_ON); + + return 0; +} + +/** + * ti_sci_device_is_trans() - Check if the device is currently transitioning + * + * @id: Device Identifier + * @curr_state: true if currently transitioning + * + * Return: 0 if all goes well, else appropriate error message + */ +int ti_sci_device_is_trans(uint32_t id, bool *curr_state) +{ + int ret; + uint8_t state; + + if (!curr_state) + return -EINVAL; + + ret = ti_sci_device_get_state(id, NULL, NULL, NULL, &state); + if (ret) + return ret; + + *curr_state = (state == MSG_DEVICE_HW_STATE_TRANS); + + return 0; +} + +/** + * ti_sci_device_set_resets() - Set resets for device managed by TISCI + * + * @id: Device Identifier + * @reset_state: Device specific reset bit field + * + * Return: 0 if all goes well, else appropriate error message + */ +int ti_sci_device_set_resets(uint32_t id, uint32_t reset_state) +{ + struct ti_sci_msg_req_set_device_resets req; + struct ti_sci_msg_hdr resp; + + struct ti_sci_xfer xfer; + int ret; + + ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SET_DEVICE_RESETS, 0, + &req, sizeof(req), + &resp, sizeof(resp), + &xfer); + if (ret) { + ERROR("Message alloc failed (%d)\n", ret); + return ret; + } + + req.id = id; + req.resets = reset_state; + + ret = ti_sci_do_xfer(&xfer); + if (ret) { + ERROR("Transfer send failed (%d)\n", ret); + return ret; + } + + return 0; +} + +/** + * ti_sci_device_get_resets() - Get reset state for device managed by TISCI + * + * @id: Device Identifier + * @reset_state: Pointer to reset state to populate + * + * Return: 0 if all goes well, else appropriate error message + */ +int ti_sci_device_get_resets(uint32_t id, uint32_t *reset_state) +{ + return ti_sci_device_get_state(id, NULL, reset_state, NULL, NULL); +} + +/** + * ti_sci_clock_set_state() - Set clock state helper + * + * @dev_id: Device identifier this request is for + * @clk_id: Clock identifier for the device for this request, + * Each device has its own set of clock inputs, This indexes + * which clock input to modify + * @flags: Header flags as needed + * @state: State to request for the clock + * + * Return: 0 if all goes well, else appropriate error message + */ +int ti_sci_clock_set_state(uint32_t dev_id, uint8_t clk_id, + uint32_t flags, uint8_t state) +{ + struct ti_sci_msg_req_set_clock_state req; + struct ti_sci_msg_hdr resp; + + struct ti_sci_xfer xfer; + int ret; + + ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SET_CLOCK_STATE, flags, + &req, sizeof(req), + &resp, sizeof(resp), + &xfer); + if (ret) { + ERROR("Message alloc failed (%d)\n", ret); + return ret; + } + + req.dev_id = dev_id; + req.clk_id = clk_id; + req.request_state = state; + + ret = ti_sci_do_xfer(&xfer); + if (ret) { + ERROR("Transfer send failed (%d)\n", ret); + return ret; + } + + return 0; +} + +/** + * ti_sci_clock_get_state() - Get clock state helper + * + * @dev_id: Device identifier this request is for + * @clk_id: Clock identifier for the device for this request. + * Each device has its own set of clock inputs. This indexes + * which clock input to modify. + * @programmed_state: State requested for clock to move to + * @current_state: State that the clock is currently in + * + * Return: 0 if all goes well, else appropriate error message + */ +int ti_sci_clock_get_state(uint32_t dev_id, uint8_t clk_id, + uint8_t *programmed_state, + uint8_t *current_state) +{ + struct ti_sci_msg_req_get_clock_state req; + struct ti_sci_msg_resp_get_clock_state resp; + + struct ti_sci_xfer xfer; + int ret; + + if (!programmed_state && !current_state) + return -EINVAL; + + ret = ti_sci_setup_one_xfer(TI_SCI_MSG_GET_CLOCK_STATE, 0, + &req, sizeof(req), + &resp, sizeof(resp), + &xfer); + if (ret) { + ERROR("Message alloc failed (%d)\n", ret); + return ret; + } + + req.dev_id = dev_id; + req.clk_id = clk_id; + + ret = ti_sci_do_xfer(&xfer); + if (ret) { + ERROR("Transfer send failed (%d)\n", ret); + return ret; + } + + if (programmed_state) + *programmed_state = resp.programmed_state; + if (current_state) + *current_state = resp.current_state; + + return 0; +} + +/** + * ti_sci_clock_get() - Get control of a clock from TI SCI + + * @dev_id: Device identifier this request is for + * @clk_id: Clock identifier for the device for this request. + * Each device has its own set of clock inputs. This indexes + * which clock input to modify. + * @needs_ssc: 'true' iff Spread Spectrum clock is desired + * @can_change_freq: 'true' iff frequency change is desired + * @enable_input_term: 'true' iff input termination is desired + * + * Return: 0 if all goes well, else appropriate error message + */ +int ti_sci_clock_get(uint32_t dev_id, uint8_t clk_id, + bool needs_ssc, bool can_change_freq, + bool enable_input_term) +{ + uint32_t flags = 0; + + flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0; + flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0; + flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0; + + return ti_sci_clock_set_state(dev_id, clk_id, flags, + MSG_CLOCK_SW_STATE_REQ); +} + +/** + * ti_sci_clock_idle() - Idle a clock which is in our control + + * @dev_id: Device identifier this request is for + * @clk_id: Clock identifier for the device for this request. + * Each device has its own set of clock inputs. This indexes + * which clock input to modify. + * + * NOTE: This clock must have been requested by get_clock previously. + * + * Return: 0 if all goes well, else appropriate error message + */ +int ti_sci_clock_idle(uint32_t dev_id, uint8_t clk_id) +{ + return ti_sci_clock_set_state(dev_id, clk_id, 0, + MSG_CLOCK_SW_STATE_UNREQ); +} + +/** + * ti_sci_clock_put() - Release a clock from our control + * + * @dev_id: Device identifier this request is for + * @clk_id: Clock identifier for the device for this request. + * Each device has its own set of clock inputs. This indexes + * which clock input to modify. + * + * NOTE: This clock must have been requested by get_clock previously. + * + * Return: 0 if all goes well, else appropriate error message + */ +int ti_sci_clock_put(uint32_t dev_id, uint8_t clk_id) +{ + return ti_sci_clock_set_state(dev_id, clk_id, 0, + MSG_CLOCK_SW_STATE_AUTO); +} + +/** + * ti_sci_clock_is_auto() - Is the clock being auto managed + * + * @dev_id: Device identifier this request is for + * @clk_id: Clock identifier for the device for this request. + * Each device has its own set of clock inputs. This indexes + * which clock input to modify. + * @req_state: state indicating if the clock is auto managed + * + * Return: 0 if all goes well, else appropriate error message + */ +int ti_sci_clock_is_auto(uint32_t dev_id, uint8_t clk_id, bool *req_state) +{ + uint8_t state = 0; + int ret; + + if (!req_state) + return -EINVAL; + + ret = ti_sci_clock_get_state(dev_id, clk_id, &state, NULL); + if (ret) + return ret; + + *req_state = (state == MSG_CLOCK_SW_STATE_AUTO); + + return 0; +} + +/** + * ti_sci_clock_is_on() - Is the clock ON + * + * @dev_id: Device identifier this request is for + * @clk_id: Clock identifier for the device for this request. + * Each device has its own set of clock inputs. This indexes + * which clock input to modify. + * @req_state: state indicating if the clock is managed by us and enabled + * @curr_state: state indicating if the clock is ready for operation + * + * Return: 0 if all goes well, else appropriate error message + */ +int ti_sci_clock_is_on(uint32_t dev_id, uint8_t clk_id, + bool *req_state, bool *curr_state) +{ + uint8_t c_state = 0, r_state = 0; + int ret; + + if (!req_state && !curr_state) + return -EINVAL; + + ret = ti_sci_clock_get_state(dev_id, clk_id, &r_state, &c_state); + if (ret) + return ret; + + if (req_state) + *req_state = (r_state == MSG_CLOCK_SW_STATE_REQ); + if (curr_state) + *curr_state = (c_state == MSG_CLOCK_HW_STATE_READY); + + return 0; +} + +/** + * ti_sci_clock_is_off() - Is the clock OFF + * + * @dev_id: Device identifier this request is for + * @clk_id: Clock identifier for the device for this request. + * Each device has its own set of clock inputs. This indexes + * which clock input to modify. + * @req_state: state indicating if the clock is managed by us and disabled + * @curr_state: state indicating if the clock is NOT ready for operation + * + * Return: 0 if all goes well, else appropriate error message + */ +int ti_sci_clock_is_off(uint32_t dev_id, uint8_t clk_id, + bool *req_state, bool *curr_state) +{ + uint8_t c_state = 0, r_state = 0; + int ret; + + if (!req_state && !curr_state) + return -EINVAL; + + ret = ti_sci_clock_get_state(dev_id, clk_id, &r_state, &c_state); + if (ret) + return ret; + + if (req_state) + *req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ); + if (curr_state) + *curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY); + + return 0; +} + +/** + * ti_sci_clock_set_parent() - Set the clock source of a specific device clock + * + * @dev_id: Device identifier this request is for + * @clk_id: Clock identifier for the device for this request. + * Each device has its own set of clock inputs. This indexes + * which clock input to modify. + * @parent_id: Parent clock identifier to set + * + * Return: 0 if all goes well, else appropriate error message + */ +int ti_sci_clock_set_parent(uint32_t dev_id, uint8_t clk_id, uint8_t parent_id) +{ + struct ti_sci_msg_req_set_clock_parent req; + struct ti_sci_msg_hdr resp; + + struct ti_sci_xfer xfer; + int ret; + + ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SET_CLOCK_PARENT, 0, + &req, sizeof(req), + &resp, sizeof(resp), + &xfer); + if (ret) { + ERROR("Message alloc failed (%d)\n", ret); + return ret; + } + + req.dev_id = dev_id; + req.clk_id = clk_id; + req.parent_id = parent_id; + + ret = ti_sci_do_xfer(&xfer); + if (ret) { + ERROR("Transfer send failed (%d)\n", ret); + return ret; + } + + return 0; +} + +/** + * ti_sci_clock_get_parent() - Get current parent clock source + * + * @dev_id: Device identifier this request is for + * @clk_id: Clock identifier for the device for this request. + * Each device has its own set of clock inputs. This indexes + * which clock input to modify. + * @parent_id: Current clock parent + * + * Return: 0 if all goes well, else appropriate error message + */ +int ti_sci_clock_get_parent(uint32_t dev_id, uint8_t clk_id, uint8_t *parent_id) +{ + struct ti_sci_msg_req_get_clock_parent req; + struct ti_sci_msg_resp_get_clock_parent resp; + + struct ti_sci_xfer xfer; + int ret; + + ret = ti_sci_setup_one_xfer(TI_SCI_MSG_GET_CLOCK_PARENT, 0, + &req, sizeof(req), + &resp, sizeof(resp), + &xfer); + if (ret) { + ERROR("Message alloc failed (%d)\n", ret); + return ret; + } + + req.dev_id = dev_id; + req.clk_id = clk_id; + + ret = ti_sci_do_xfer(&xfer); + if (ret) { + ERROR("Transfer send failed (%d)\n", ret); + return ret; + } + + *parent_id = resp.parent_id; + + return 0; +} + +/** + * ti_sci_clock_get_num_parents() - Get num parents of the current clk source + * + * @dev_id: Device identifier this request is for + * @clk_id: Clock identifier for the device for this request. + * Each device has its own set of clock inputs. This indexes + * which clock input to modify. + * @num_parents: Returns he number of parents to the current clock. + * + * Return: 0 if all goes well, else appropriate error message + */ +int ti_sci_clock_get_num_parents(uint32_t dev_id, uint8_t clk_id, + uint8_t *num_parents) +{ + struct ti_sci_msg_req_get_clock_num_parents req; + struct ti_sci_msg_resp_get_clock_num_parents resp; + + struct ti_sci_xfer xfer; + int ret; + + ret = ti_sci_setup_one_xfer(TI_SCI_MSG_GET_NUM_CLOCK_PARENTS, 0, + &req, sizeof(req), + &resp, sizeof(resp), + &xfer); + if (ret) { + ERROR("Message alloc failed (%d)\n", ret); + return ret; + } + + req.dev_id = dev_id; + req.clk_id = clk_id; + + ret = ti_sci_do_xfer(&xfer); + if (ret) { + ERROR("Transfer send failed (%d)\n", ret); + return ret; + } + + *num_parents = resp.num_parents; + + return 0; +} + +/** + * ti_sci_clock_get_match_freq() - Find a good match for frequency + * + * @dev_id: Device identifier this request is for + * @clk_id: Clock identifier for the device for this request. + * Each device has its own set of clock inputs. This indexes + * which clock input to modify. + * @min_freq: The minimum allowable frequency in Hz. This is the minimum + * allowable programmed frequency and does not account for clock + * tolerances and jitter. + * @target_freq: The target clock frequency in Hz. A frequency will be + * processed as close to this target frequency as possible. + * @max_freq: The maximum allowable frequency in Hz. This is the maximum + * allowable programmed frequency and does not account for clock + * tolerances and jitter. + * @match_freq: Frequency match in Hz response. + * + * Return: 0 if all goes well, else appropriate error message + */ +int ti_sci_clock_get_match_freq(uint32_t dev_id, uint8_t clk_id, + uint64_t min_freq, uint64_t target_freq, + uint64_t max_freq, uint64_t *match_freq) +{ + struct ti_sci_msg_req_query_clock_freq req; + struct ti_sci_msg_resp_query_clock_freq resp; + + struct ti_sci_xfer xfer; + int ret; + + ret = ti_sci_setup_one_xfer(TI_SCI_MSG_QUERY_CLOCK_FREQ, 0, + &req, sizeof(req), + &resp, sizeof(resp), + &xfer); + if (ret) { + ERROR("Message alloc failed (%d)\n", ret); + return ret; + } + + req.dev_id = dev_id; + req.clk_id = clk_id; + req.min_freq_hz = min_freq; + req.target_freq_hz = target_freq; + req.max_freq_hz = max_freq; + + ret = ti_sci_do_xfer(&xfer); + if (ret) { + ERROR("Transfer send failed (%d)\n", ret); + return ret; + } + + *match_freq = resp.freq_hz; + + return 0; +} + +/** + * ti_sci_clock_set_freq() - Set a frequency for clock + * + * @dev_id: Device identifier this request is for + * @clk_id: Clock identifier for the device for this request. + * Each device has its own set of clock inputs. This indexes + * which clock input to modify. + * @min_freq: The minimum allowable frequency in Hz. This is the minimum + * allowable programmed frequency and does not account for clock + * tolerances and jitter. + * @target_freq: The target clock frequency in Hz. A frequency will be + * processed as close to this target frequency as possible. + * @max_freq: The maximum allowable frequency in Hz. This is the maximum + * allowable programmed frequency and does not account for clock + * tolerances and jitter. + * + * Return: 0 if all goes well, else appropriate error message + */ +int ti_sci_clock_set_freq(uint32_t dev_id, uint8_t clk_id, uint64_t min_freq, + uint64_t target_freq, uint64_t max_freq) +{ + struct ti_sci_msg_req_set_clock_freq req; + struct ti_sci_msg_hdr resp; + + struct ti_sci_xfer xfer; + int ret; + + ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SET_CLOCK_FREQ, 0, + &req, sizeof(req), + &resp, sizeof(resp), + &xfer); + if (ret) { + ERROR("Message alloc failed (%d)\n", ret); + return ret; + } + req.dev_id = dev_id; + req.clk_id = clk_id; + req.min_freq_hz = min_freq; + req.target_freq_hz = target_freq; + req.max_freq_hz = max_freq; + + ret = ti_sci_do_xfer(&xfer); + if (ret) { + ERROR("Transfer send failed (%d)\n", ret); + return ret; + } + + return 0; +} + +/** + * ti_sci_clock_get_freq() - Get current frequency + * + * @dev_id: Device identifier this request is for + * @clk_id: Clock identifier for the device for this request. + * Each device has its own set of clock inputs. This indexes + * which clock input to modify. + * @freq: Currently frequency in Hz + * + * Return: 0 if all goes well, else appropriate error message + */ +int ti_sci_clock_get_freq(uint32_t dev_id, uint8_t clk_id, uint64_t *freq) +{ + struct ti_sci_msg_req_get_clock_freq req; + struct ti_sci_msg_resp_get_clock_freq resp; + + struct ti_sci_xfer xfer; + int ret; + + ret = ti_sci_setup_one_xfer(TI_SCI_MSG_GET_CLOCK_FREQ, 0, + &req, sizeof(req), + &resp, sizeof(resp), + &xfer); + if (ret) { + ERROR("Message alloc failed (%d)\n", ret); + return ret; + } + + req.dev_id = dev_id; + req.clk_id = clk_id; + + ret = ti_sci_do_xfer(&xfer); + if (ret) { + ERROR("Transfer send failed (%d)\n", ret); + return ret; + } + + *freq = resp.freq_hz; + + return 0; +} + +/** + * ti_sci_core_reboot() - Command to request system reset + * + * Return: 0 if all goes well, else appropriate error message + */ +int ti_sci_core_reboot(void) +{ + struct ti_sci_msg_req_reboot req; + struct ti_sci_msg_hdr resp; + + struct ti_sci_xfer xfer; + int ret; + + ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SYS_RESET, 0, + &req, sizeof(req), + &resp, sizeof(resp), + &xfer); + if (ret) { + ERROR("Message alloc failed (%d)\n", ret); + return ret; + } + req.domain = TI_SCI_DOMAIN_FULL_SOC_RESET; + + ret = ti_sci_do_xfer(&xfer); + if (ret) { + ERROR("Transfer send failed (%d)\n", ret); + return ret; + } + + return 0; +} + +/** + * ti_sci_proc_request() - Request a physical processor control + * + * @proc_id: Processor ID this request is for + * + * Return: 0 if all goes well, else appropriate error message + */ +int ti_sci_proc_request(uint8_t proc_id) +{ + struct ti_sci_msg_req_proc_request req; + struct ti_sci_msg_hdr resp; + + struct ti_sci_xfer xfer; + int ret; + + ret = ti_sci_setup_one_xfer(TISCI_MSG_PROC_REQUEST, 0, + &req, sizeof(req), + &resp, sizeof(resp), + &xfer); + if (ret) { + ERROR("Message alloc failed (%d)\n", ret); + return ret; + } + + req.processor_id = proc_id; + + ret = ti_sci_do_xfer(&xfer); + if (ret) { + ERROR("Transfer send failed (%d)\n", ret); + return ret; + } + + return 0; +} + +/** + * ti_sci_proc_release() - Release a physical processor control + * + * @proc_id: Processor ID this request is for + * + * Return: 0 if all goes well, else appropriate error message + */ +int ti_sci_proc_release(uint8_t proc_id) +{ + struct ti_sci_msg_req_proc_release req; + struct ti_sci_msg_hdr resp; + + struct ti_sci_xfer xfer; + int ret; + + ret = ti_sci_setup_one_xfer(TISCI_MSG_PROC_RELEASE, 0, + &req, sizeof(req), + &resp, sizeof(resp), + &xfer); + if (ret) { + ERROR("Message alloc failed (%d)\n", ret); + return ret; + } + + req.processor_id = proc_id; + + ret = ti_sci_do_xfer(&xfer); + if (ret) { + ERROR("Transfer send failed (%d)\n", ret); + return ret; + } + + return 0; +} + +/** + * ti_sci_proc_handover() - Handover a physical processor control to a host in + * the processor's access control list. + * + * @proc_id: Processor ID this request is for + * @host_id: Host ID to get the control of the processor + * + * Return: 0 if all goes well, else appropriate error message + */ +int ti_sci_proc_handover(uint8_t proc_id, uint8_t host_id) +{ + struct ti_sci_msg_req_proc_handover req; + struct ti_sci_msg_hdr resp; + + struct ti_sci_xfer xfer; + int ret; + + ret = ti_sci_setup_one_xfer(TISCI_MSG_PROC_HANDOVER, 0, + &req, sizeof(req), + &resp, sizeof(resp), + &xfer); + if (ret) { + ERROR("Message alloc failed (%d)\n", ret); + return ret; + } + + req.processor_id = proc_id; + req.host_id = host_id; + + ret = ti_sci_do_xfer(&xfer); + if (ret) { + ERROR("Transfer send failed (%d)\n", ret); + return ret; + } + + return 0; +} + +/** + * ti_sci_proc_set_boot_cfg() - Set the processor boot configuration flags + * + * @proc_id: Processor ID this request is for + * @config_flags_set: Configuration flags to be set + * @config_flags_clear: Configuration flags to be cleared + * + * Return: 0 if all goes well, else appropriate error message + */ +int ti_sci_proc_set_boot_cfg(uint8_t proc_id, uint64_t bootvector, + uint32_t config_flags_set, + uint32_t config_flags_clear) +{ + struct ti_sci_msg_req_set_proc_boot_config req; + struct ti_sci_msg_hdr resp; + + struct ti_sci_xfer xfer; + int ret; + + ret = ti_sci_setup_one_xfer(TISCI_MSG_SET_PROC_BOOT_CONFIG, 0, + &req, sizeof(req), + &resp, sizeof(resp), + &xfer); + if (ret) { + ERROR("Message alloc failed (%d)\n", ret); + return ret; + } + + req.processor_id = proc_id; + req.bootvector_low = bootvector & TISCI_ADDR_LOW_MASK; + req.bootvector_high = (bootvector & TISCI_ADDR_HIGH_MASK) >> + TISCI_ADDR_HIGH_SHIFT; + req.config_flags_set = config_flags_set; + req.config_flags_clear = config_flags_clear; + + ret = ti_sci_do_xfer(&xfer); + if (ret) { + ERROR("Transfer send failed (%d)\n", ret); + return ret; + } + + return 0; +} + +/** + * ti_sci_proc_set_boot_ctrl() - Set the processor boot control flags + * + * @proc_id: Processor ID this request is for + * @control_flags_set: Control flags to be set + * @control_flags_clear: Control flags to be cleared + * + * Return: 0 if all goes well, else appropriate error message + */ +int ti_sci_proc_set_boot_ctrl(uint8_t proc_id, uint32_t control_flags_set, + uint32_t control_flags_clear) +{ + struct ti_sci_msg_req_set_proc_boot_ctrl req; + struct ti_sci_msg_hdr resp; + + struct ti_sci_xfer xfer; + int ret; + + ret = ti_sci_setup_one_xfer(TISCI_MSG_SET_PROC_BOOT_CTRL, 0, + &req, sizeof(req), + &resp, sizeof(resp), + &xfer); + if (ret) { + ERROR("Message alloc failed (%d)\n", ret); + return ret; + } + + req.processor_id = proc_id; + req.control_flags_set = control_flags_set; + req.control_flags_clear = control_flags_clear; + + ret = ti_sci_do_xfer(&xfer); + if (ret) { + ERROR("Transfer send failed (%d)\n", ret); + return ret; + } + + return 0; +} + +/** + * ti_sci_proc_set_boot_ctrl_no_wait() - Set the processor boot control flags + * without requesting or waiting for a + * response. + * + * @proc_id: Processor ID this request is for + * @control_flags_set: Control flags to be set + * @control_flags_clear: Control flags to be cleared + * + * Return: 0 if all goes well, else appropriate error message + */ +int ti_sci_proc_set_boot_ctrl_no_wait(uint8_t proc_id, + uint32_t control_flags_set, + uint32_t control_flags_clear) +{ + struct ti_sci_msg_req_set_proc_boot_ctrl req; + struct ti_sci_msg_hdr *hdr; + struct k3_sec_proxy_msg tx_message; + int ret; + + /* Ensure we have sane transfer size */ + if (sizeof(req) > TI_SCI_MAX_MESSAGE_SIZE) + return -ERANGE; + + hdr = (struct ti_sci_msg_hdr *)&req; + hdr->seq = ++message_sequence; + hdr->type = TISCI_MSG_SET_PROC_BOOT_CTRL; + hdr->host = TI_SCI_HOST_ID; + /* Setup with NORESPONSE flag to keep response queue clean */ + hdr->flags = TI_SCI_FLAG_REQ_GENERIC_NORESPONSE; + + req.processor_id = proc_id; + req.control_flags_set = control_flags_set; + req.control_flags_clear = control_flags_clear; + + tx_message.buf = (uint8_t *)&req; + tx_message.len = sizeof(req); + + /* Send message */ + ret = k3_sec_proxy_send(SP_HIGH_PRIORITY, &tx_message); + if (ret) { + ERROR("Message sending failed (%d)\n", ret); + return ret; + } + + /* Return without waiting for response */ + return 0; +} + +/** + * ti_sci_proc_auth_boot_image() - Authenticate and load image and then set the + * processor configuration flags + * + * @proc_id: Processor ID this request is for + * @cert_addr: Memory address at which payload image certificate is located + * + * Return: 0 if all goes well, else appropriate error message + */ +int ti_sci_proc_auth_boot_image(uint8_t proc_id, uint64_t cert_addr) +{ + struct ti_sci_msg_req_proc_auth_boot_image req; + struct ti_sci_msg_hdr resp; + + struct ti_sci_xfer xfer; + int ret; + + ret = ti_sci_setup_one_xfer(TISCI_MSG_PROC_AUTH_BOOT_IMIAGE, 0, + &req, sizeof(req), + &resp, sizeof(resp), + &xfer); + if (ret) { + ERROR("Message alloc failed (%d)\n", ret); + return ret; + } + + req.processor_id = proc_id; + req.cert_addr_low = cert_addr & TISCI_ADDR_LOW_MASK; + req.cert_addr_high = (cert_addr & TISCI_ADDR_HIGH_MASK) >> + TISCI_ADDR_HIGH_SHIFT; + + ret = ti_sci_do_xfer(&xfer); + if (ret) { + ERROR("Transfer send failed (%d)\n", ret); + return ret; + } + + return 0; +} + +/** + * ti_sci_proc_get_boot_status() - Get the processor boot status + * + * @proc_id: Processor ID this request is for + * + * Return: 0 if all goes well, else appropriate error message + */ +int ti_sci_proc_get_boot_status(uint8_t proc_id, uint64_t *bv, + uint32_t *cfg_flags, + uint32_t *ctrl_flags, + uint32_t *sts_flags) +{ + struct ti_sci_msg_req_get_proc_boot_status req; + struct ti_sci_msg_resp_get_proc_boot_status resp; + + struct ti_sci_xfer xfer; + int ret; + + ret = ti_sci_setup_one_xfer(TISCI_MSG_GET_PROC_BOOT_STATUS, 0, + &req, sizeof(req), + &resp, sizeof(resp), + &xfer); + if (ret) { + ERROR("Message alloc failed (%d)\n", ret); + return ret; + } + + req.processor_id = proc_id; + + ret = ti_sci_do_xfer(&xfer); + if (ret) { + ERROR("Transfer send failed (%d)\n", ret); + return ret; + } + + *bv = (resp.bootvector_low & TISCI_ADDR_LOW_MASK) | + (((uint64_t)resp.bootvector_high << TISCI_ADDR_HIGH_SHIFT) & + TISCI_ADDR_HIGH_MASK); + *cfg_flags = resp.config_flags; + *ctrl_flags = resp.control_flags; + *sts_flags = resp.status_flags; + + return 0; +} + +/** + * ti_sci_proc_wait_boot_status() - Wait for a processor boot status + * + * @proc_id: Processor ID this request is for + * @num_wait_iterations Total number of iterations we will check before + * we will timeout and give up + * @num_match_iterations How many iterations should we have continued + * status to account for status bits glitching. + * This is to make sure that match occurs for + * consecutive checks. This implies that the + * worst case should consider that the stable + * time should at the worst be num_wait_iterations + * num_match_iterations to prevent timeout. + * @delay_per_iteration_us Specifies how long to wait (in micro seconds) + * between each status checks. This is the minimum + * duration, and overhead of register reads and + * checks are on top of this and can vary based on + * varied conditions. + * @delay_before_iterations_us Specifies how long to wait (in micro seconds) + * before the very first check in the first + * iteration of status check loop. This is the + * minimum duration, and overhead of register + * reads and checks are. + * @status_flags_1_set_all_wait If non-zero, Specifies that all bits of the + * status matching this field requested MUST be 1. + * @status_flags_1_set_any_wait If non-zero, Specifies that at least one of the + * bits matching this field requested MUST be 1. + * @status_flags_1_clr_all_wait If non-zero, Specifies that all bits of the + * status matching this field requested MUST be 0. + * @status_flags_1_clr_any_wait If non-zero, Specifies that at least one of the + * bits matching this field requested MUST be 0. + * + * Return: 0 if all goes well, else appropriate error message + */ +int ti_sci_proc_wait_boot_status(uint8_t proc_id, uint8_t num_wait_iterations, + uint8_t num_match_iterations, + uint8_t delay_per_iteration_us, + uint8_t delay_before_iterations_us, + uint32_t status_flags_1_set_all_wait, + uint32_t status_flags_1_set_any_wait, + uint32_t status_flags_1_clr_all_wait, + uint32_t status_flags_1_clr_any_wait) +{ + struct ti_sci_msg_req_wait_proc_boot_status req; + struct ti_sci_msg_hdr resp; + + struct ti_sci_xfer xfer; + int ret; + + ret = ti_sci_setup_one_xfer(TISCI_MSG_WAIT_PROC_BOOT_STATUS, 0, + &req, sizeof(req), + &resp, sizeof(resp), + &xfer); + if (ret) { + ERROR("Message alloc failed (%d)\n", ret); + return ret; + } + + req.processor_id = proc_id; + req.num_wait_iterations = num_wait_iterations; + req.num_match_iterations = num_match_iterations; + req.delay_per_iteration_us = delay_per_iteration_us; + req.delay_before_iterations_us = delay_before_iterations_us; + req.status_flags_1_set_all_wait = status_flags_1_set_all_wait; + req.status_flags_1_set_any_wait = status_flags_1_set_any_wait; + req.status_flags_1_clr_all_wait = status_flags_1_clr_all_wait; + req.status_flags_1_clr_any_wait = status_flags_1_clr_any_wait; + + ret = ti_sci_do_xfer(&xfer); + if (ret) { + ERROR("Transfer send failed (%d)\n", ret); + return ret; + } + + return 0; +} + +/** + * ti_sci_proc_wait_boot_status_no_wait() - Wait for a processor boot status + * without requesting or waiting for + * a response. + * + * @proc_id: Processor ID this request is for + * @num_wait_iterations Total number of iterations we will check before + * we will timeout and give up + * @num_match_iterations How many iterations should we have continued + * status to account for status bits glitching. + * This is to make sure that match occurs for + * consecutive checks. This implies that the + * worst case should consider that the stable + * time should at the worst be num_wait_iterations + * num_match_iterations to prevent timeout. + * @delay_per_iteration_us Specifies how long to wait (in micro seconds) + * between each status checks. This is the minimum + * duration, and overhead of register reads and + * checks are on top of this and can vary based on + * varied conditions. + * @delay_before_iterations_us Specifies how long to wait (in micro seconds) + * before the very first check in the first + * iteration of status check loop. This is the + * minimum duration, and overhead of register + * reads and checks are. + * @status_flags_1_set_all_wait If non-zero, Specifies that all bits of the + * status matching this field requested MUST be 1. + * @status_flags_1_set_any_wait If non-zero, Specifies that at least one of the + * bits matching this field requested MUST be 1. + * @status_flags_1_clr_all_wait If non-zero, Specifies that all bits of the + * status matching this field requested MUST be 0. + * @status_flags_1_clr_any_wait If non-zero, Specifies that at least one of the + * bits matching this field requested MUST be 0. + * + * Return: 0 if all goes well, else appropriate error message + */ +int ti_sci_proc_wait_boot_status_no_wait(uint8_t proc_id, + uint8_t num_wait_iterations, + uint8_t num_match_iterations, + uint8_t delay_per_iteration_us, + uint8_t delay_before_iterations_us, + uint32_t status_flags_1_set_all_wait, + uint32_t status_flags_1_set_any_wait, + uint32_t status_flags_1_clr_all_wait, + uint32_t status_flags_1_clr_any_wait) +{ + struct ti_sci_msg_req_wait_proc_boot_status req; + struct ti_sci_msg_hdr *hdr; + struct k3_sec_proxy_msg tx_message; + int ret; + + /* Ensure we have sane transfer size */ + if (sizeof(req) > TI_SCI_MAX_MESSAGE_SIZE) + return -ERANGE; + + hdr = (struct ti_sci_msg_hdr *)&req; + hdr->seq = ++message_sequence; + hdr->type = TISCI_MSG_WAIT_PROC_BOOT_STATUS; + hdr->host = TI_SCI_HOST_ID; + /* Setup with NORESPONSE flag to keep response queue clean */ + hdr->flags = TI_SCI_FLAG_REQ_GENERIC_NORESPONSE; + + req.processor_id = proc_id; + req.num_wait_iterations = num_wait_iterations; + req.num_match_iterations = num_match_iterations; + req.delay_per_iteration_us = delay_per_iteration_us; + req.delay_before_iterations_us = delay_before_iterations_us; + req.status_flags_1_set_all_wait = status_flags_1_set_all_wait; + req.status_flags_1_set_any_wait = status_flags_1_set_any_wait; + req.status_flags_1_clr_all_wait = status_flags_1_clr_all_wait; + req.status_flags_1_clr_any_wait = status_flags_1_clr_any_wait; + + tx_message.buf = (uint8_t *)&req; + tx_message.len = sizeof(req); + + /* Send message */ + ret = k3_sec_proxy_send(SP_HIGH_PRIORITY, &tx_message); + if (ret) { + ERROR("Message sending failed (%d)\n", ret); + return ret; + } + + /* Return without waiting for response */ + return 0; +} + +/** + * ti_sci_enter_sleep - Command to initiate system transition into suspend. + * + * @proc_id: Processor ID. + * @mode: Low power mode to enter. + * @core_resume_addr: Address that core should be + * resumed from after low power transition. + * + * Return: 0 if all goes well, else appropriate error message + */ +int ti_sci_enter_sleep(uint8_t proc_id, + uint8_t mode, + uint64_t core_resume_addr) +{ + struct ti_sci_msg_req_enter_sleep req; + struct ti_sci_msg_hdr *hdr; + struct k3_sec_proxy_msg tx_message; + int ret; + + /* Ensure we have sane transfer size */ + if (sizeof(req) > TI_SCI_MAX_MESSAGE_SIZE) { + return -ERANGE; + } + + hdr = (struct ti_sci_msg_hdr *)&req; + hdr->seq = ++message_sequence; + hdr->type = TI_SCI_MSG_ENTER_SLEEP; + hdr->host = TI_SCI_HOST_ID; + /* Setup with NORESPONSE flag to keep response queue clean */ + hdr->flags = TI_SCI_FLAG_REQ_GENERIC_NORESPONSE; + + req.processor_id = proc_id; + req.mode = mode; + req.core_resume_lo = core_resume_addr & TISCI_ADDR_LOW_MASK; + req.core_resume_hi = (core_resume_addr & TISCI_ADDR_HIGH_MASK) >> + TISCI_ADDR_HIGH_SHIFT; + + tx_message.buf = (uint8_t *)&req; + tx_message.len = sizeof(req); + + /* Send message */ + ret = k3_sec_proxy_send(SP_HIGH_PRIORITY, &tx_message); + if (ret != 0) { + ERROR("Message sending failed (%d)\n", ret); + return ret; + } + + /* Return without waiting for response */ + return 0; +} + +/** + * ti_sci_init() - Basic initialization + * + * Return: 0 if all goes well, else appropriate error message + */ +int ti_sci_init(void) +{ + struct ti_sci_msg_resp_version rev_info; + int ret; + + ret = ti_sci_get_revision(&rev_info); + if (ret) { + ERROR("Unable to communicate with control firmware (%d)\n", ret); + return ret; + } + + INFO("SYSFW ABI: %d.%d (firmware rev 0x%04x '%s')\n", + rev_info.abi_major, rev_info.abi_minor, + rev_info.firmware_revision, + rev_info.firmware_description); + + return 0; +} diff --git a/plat/ti/k3/common/drivers/ti_sci/ti_sci.h b/plat/ti/k3/common/drivers/ti_sci/ti_sci.h new file mode 100644 index 0000000..06944a7 --- /dev/null +++ b/plat/ti/k3/common/drivers/ti_sci/ti_sci.h @@ -0,0 +1,232 @@ +/* + * Texas Instruments System Control Interface API + * Based on Linux and U-Boot implementation + * + * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/ + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef TI_SCI_H +#define TI_SCI_H + +#include +#include + +/** + * Device control operations + * + * - ti_sci_device_get - command to request for device managed by TISCI + * - ti_sci_device_get_exclusive - exclusively request a device + * - ti_sci_device_idle - Command to idle a device managed by TISCI + * - ti_sci_device_idle_exclusive - exclusively idle a device + * - ti_sci_device_put - command to release a device managed by TISCI + * - ti_sci_device_put_no_wait - release a device without waiting for response + * - ti_sci_device_is_valid - Is the device valid + * - ti_sci_device_get_clcnt - Get context loss counter + * @count: Pointer to Context Loss counter to populate + * - ti_sci_device_is_idle - Check if the device is requested to be idle + * @r_state: true if requested to be idle + * - ti_sci_device_is_stop - Check if the device is requested to be stopped + * @r_state: true if requested to be stopped + * @curr_state: true if currently stopped. + * - ti_sci_device_is_on - Check if the device is requested to be ON + * @r_state: true if requested to be ON + * @curr_state: true if currently ON and active + * - ti_sci_device_is_trans - Check if the device is currently transitioning + * @curr_state: true if currently transitioning. + * - ti_sci_device_set_resets - Command to set resets for + * device managed by TISCI + * @reset_state: Device specific reset bit field + * - ti_sci_device_get_resets - Get reset state for device managed by TISCI + * @reset_state: Pointer to reset state to populate + * + * NOTE: for all these functions, the following are generic in nature: + * @id: Device Identifier + * Returns 0 for successful request, else returns corresponding error message. + * + * Request for the device - NOTE: the client MUST maintain integrity of + * usage count by balancing get_device with put_device. No refcounting is + * managed by driver for that purpose. + */ +int ti_sci_device_get(uint32_t id); +int ti_sci_device_get_exclusive(uint32_t id); +int ti_sci_device_idle(uint32_t id); +int ti_sci_device_idle_exclusive(uint32_t id); +int ti_sci_device_put(uint32_t id); +int ti_sci_device_put_no_wait(uint32_t id); +int ti_sci_device_is_valid(uint32_t id); +int ti_sci_device_get_clcnt(uint32_t id, uint32_t *count); +int ti_sci_device_is_idle(uint32_t id, bool *r_state); +int ti_sci_device_is_stop(uint32_t id, bool *r_state, bool *curr_state); +int ti_sci_device_is_on(uint32_t id, bool *r_state, bool *curr_state); +int ti_sci_device_is_trans(uint32_t id, bool *curr_state); +int ti_sci_device_set_resets(uint32_t id, uint32_t reset_state); +int ti_sci_device_get_resets(uint32_t id, uint32_t *reset_state); + +/** + * Clock control operations + * + * - ti_sci_clock_get - Get control of a clock from TI SCI + * @needs_ssc: 'true' iff Spread Spectrum clock is desired + * @can_change_freq: 'true' iff frequency change is desired + * @enable_input_term: 'true' iff input termination is desired + * - ti_sci_clock_idle - Idle a clock which is in our control + * - ti_sci_clock_put - Release a clock from our control + * - ti_sci_clock_is_auto - Is the clock being auto managed + * @req_state: state indicating if the clock is auto managed + * - ti_sci_clock_is_on - Is the clock ON + * @req_state: state indicating if the clock is managed by us and enabled + * @curr_state: state indicating if the clock is ready for operation + * - ti_sci_clock_is_off - Is the clock OFF + * @req_state: state indicating if the clock is managed by us and disabled + * @curr_state: state indicating if the clock is NOT ready for operation + * - ti_sci_clock_set_parent - Set the clock source of a specific device clock + * @parent_id: Parent clock identifier to set + * - ti_sci_clock_get_parent - Get current parent clock source + * @parent_id: Current clock parent + * - ti_sci_clock_get_num_parents - Get num parents of the current clk source + * @num_parents: Returns he number of parents to the current clock. + * - ti_sci_clock_get_match_freq - Find a good match for frequency + * @match_freq: Frequency match in Hz response. + * - ti_sci_clock_set_freq - Set a frequency for clock + * - ti_sci_clock_get_freq - Get current frequency + * @freq: Currently frequency in Hz + * + * NOTE: for all these functions, the following are generic in nature: + * @dev_id: Device identifier this request is for + * @clk_id: Clock identifier for the device for this request. + * Each device has its own set of clock inputs. This indexes + * which clock input to modify. + * @min_freq: The minimum allowable frequency in Hz. This is the minimum + * allowable programmed frequency and does not account for clock + * tolerances and jitter. + * @target_freq: The target clock frequency in Hz. A frequency will be + * processed as close to this target frequency as possible. + * @max_freq: The maximum allowable frequency in Hz. This is the maximum + * allowable programmed frequency and does not account for clock + * tolerances and jitter. + * Returns 0 for successful request, else returns corresponding error message. + * + * Request for the clock - NOTE: the client MUST maintain integrity of + * usage count by balancing get_clock with put_clock. No refcounting is + * managed by driver for that purpose. + */ +int ti_sci_clock_get(uint32_t dev_id, uint8_t clk_id, + bool needs_ssc, bool can_change_freq, + bool enable_input_term); +int ti_sci_clock_idle(uint32_t dev_id, uint8_t clk_id); +int ti_sci_clock_put(uint32_t dev_id, uint8_t clk_id); +int ti_sci_clock_is_auto(uint32_t dev_id, uint8_t clk_id, + bool *req_state); +int ti_sci_clock_is_on(uint32_t dev_id, uint8_t clk_id, + bool *req_state, bool *curr_state); +int ti_sci_clock_is_off(uint32_t dev_id, uint8_t clk_id, + bool *req_state, bool *curr_state); +int ti_sci_clock_set_parent(uint32_t dev_id, uint8_t clk_id, + uint8_t parent_id); +int ti_sci_clock_get_parent(uint32_t dev_id, uint8_t clk_id, + uint8_t *parent_id); +int ti_sci_clock_get_num_parents(uint32_t dev_id, uint8_t clk_id, + uint8_t *num_parents); +int ti_sci_clock_get_match_freq(uint32_t dev_id, uint8_t clk_id, + uint64_t min_freq, uint64_t target_freq, + uint64_t max_freq, uint64_t *match_freq); +int ti_sci_clock_set_freq(uint32_t dev_id, uint8_t clk_id, + uint64_t min_freq, uint64_t target_freq, + uint64_t max_freq); +int ti_sci_clock_get_freq(uint32_t dev_id, uint8_t clk_id, uint64_t *freq); + +/** + * Core control operations + * + * - ti_sci_core_reboot() - Command to request system reset + * + * Return: 0 if all went well, else returns appropriate error value. + */ +int ti_sci_core_reboot(void); + +/** + * Processor control operations + * + * - ti_sci_proc_request - Command to request a physical processor control + * - ti_sci_proc_release - Command to release a physical processor control + * - ti_sci_proc_handover - Command to handover a physical processor control to + * a host in the processor's access control list. + * @host_id: Host ID to get the control of the processor + * - ti_sci_proc_set_boot_cfg - Command to set the processor boot configuration flags + * @config_flags_set: Configuration flags to be set + * @config_flags_clear: Configuration flags to be cleared. + * - ti_sci_proc_set_boot_ctrl - Command to set the processor boot control flags + * @control_flags_set: Control flags to be set + * @control_flags_clear: Control flags to be cleared + * - ti_sci_proc_set_boot_ctrl_no_wait - Same as above without waiting for response + * - ti_sci_proc_auth_boot_image - Command to authenticate and load the image + * and then set the processor configuration flags. + * @cert_addr: Memory address at which payload image certificate is located. + * - ti_sci_proc_get_boot_status - Command to get the processor boot status + * - ti_sci_proc_wait_boot_status - Command to wait for a processor boot status + * - ti_sci_proc_wait_boot_status_no_wait - Same as above without waiting for response + * + * NOTE: for all these functions, the following are generic in nature: + * @proc_id: Processor ID + * Returns 0 for successful request, else returns corresponding error message. + */ +int ti_sci_proc_request(uint8_t proc_id); +int ti_sci_proc_release(uint8_t proc_id); +int ti_sci_proc_handover(uint8_t proc_id, uint8_t host_id); +int ti_sci_proc_set_boot_cfg(uint8_t proc_id, uint64_t bootvector, + uint32_t config_flags_set, + uint32_t config_flags_clear); +int ti_sci_proc_set_boot_ctrl(uint8_t proc_id, uint32_t control_flags_set, + uint32_t control_flags_clear); +int ti_sci_proc_set_boot_ctrl_no_wait(uint8_t proc_id, + uint32_t control_flags_set, + uint32_t control_flags_clear); +int ti_sci_proc_auth_boot_image(uint8_t proc_id, uint64_t cert_addr); +int ti_sci_proc_get_boot_status(uint8_t proc_id, uint64_t *bv, + uint32_t *cfg_flags, + uint32_t *ctrl_flags, + uint32_t *sts_flags); +int ti_sci_proc_wait_boot_status(uint8_t proc_id, uint8_t num_wait_iterations, + uint8_t num_match_iterations, + uint8_t delay_per_iteration_us, + uint8_t delay_before_iterations_us, + uint32_t status_flags_1_set_all_wait, + uint32_t status_flags_1_set_any_wait, + uint32_t status_flags_1_clr_all_wait, + uint32_t status_flags_1_clr_any_wait); +int ti_sci_proc_wait_boot_status_no_wait(uint8_t proc_id, + uint8_t num_wait_iterations, + uint8_t num_match_iterations, + uint8_t delay_per_iteration_us, + uint8_t delay_before_iterations_us, + uint32_t status_flags_1_set_all_wait, + uint32_t status_flags_1_set_any_wait, + uint32_t status_flags_1_clr_all_wait, + uint32_t status_flags_1_clr_any_wait); + +/** + * System Low Power Operations + * + * - ti_sci_enter_sleep - Command to initiate system transition into suspend. + * @proc_id: Processor ID. + * @mode: Low power mode to enter. + * @core_resume_addr: Address that core should be resumed from + * after low power transition. + * + * NOTE: for all these functions, the following are generic in nature: + * Returns 0 for successful request, else returns corresponding error message. + */ +int ti_sci_enter_sleep(uint8_t proc_id, + uint8_t mode, + uint64_t core_resume_addr); + +/** + * ti_sci_init() - Basic initialization + * + * Return: 0 if all goes good, else appropriate error message. + */ +int ti_sci_init(void); + +#endif /* TI_SCI_H */ diff --git a/plat/ti/k3/common/drivers/ti_sci/ti_sci_protocol.h b/plat/ti/k3/common/drivers/ti_sci/ti_sci_protocol.h new file mode 100644 index 0000000..d220612 --- /dev/null +++ b/plat/ti/k3/common/drivers/ti_sci/ti_sci_protocol.h @@ -0,0 +1,734 @@ +/* + * Texas Instruments System Control Interface (TISCI) Protocol + * + * Communication protocol with TI SCI hardware + * The system works in a message response protocol + * See: http://processors.wiki.ti.com/index.php/TISCI for details + * + * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/ + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef TI_SCI_PROTOCOL_H +#define TI_SCI_PROTOCOL_H + +#include + +/* Generic Messages */ +#define TI_SCI_MSG_ENABLE_WDT 0x0000 +#define TI_SCI_MSG_WAKE_RESET 0x0001 +#define TI_SCI_MSG_VERSION 0x0002 +#define TI_SCI_MSG_WAKE_REASON 0x0003 +#define TI_SCI_MSG_GOODBYE 0x0004 +#define TI_SCI_MSG_SYS_RESET 0x0005 + +/* Device requests */ +#define TI_SCI_MSG_SET_DEVICE_STATE 0x0200 +#define TI_SCI_MSG_GET_DEVICE_STATE 0x0201 +#define TI_SCI_MSG_SET_DEVICE_RESETS 0x0202 + +/* Low Power Mode Requests */ +#define TI_SCI_MSG_ENTER_SLEEP 0x0301 + +/* Clock requests */ +#define TI_SCI_MSG_SET_CLOCK_STATE 0x0100 +#define TI_SCI_MSG_GET_CLOCK_STATE 0x0101 +#define TI_SCI_MSG_SET_CLOCK_PARENT 0x0102 +#define TI_SCI_MSG_GET_CLOCK_PARENT 0x0103 +#define TI_SCI_MSG_GET_NUM_CLOCK_PARENTS 0x0104 +#define TI_SCI_MSG_SET_CLOCK_FREQ 0x010c +#define TI_SCI_MSG_QUERY_CLOCK_FREQ 0x010d +#define TI_SCI_MSG_GET_CLOCK_FREQ 0x010e + +/* Processor Control Messages */ +#define TISCI_MSG_PROC_REQUEST 0xc000 +#define TISCI_MSG_PROC_RELEASE 0xc001 +#define TISCI_MSG_PROC_HANDOVER 0xc005 +#define TISCI_MSG_SET_PROC_BOOT_CONFIG 0xc100 +#define TISCI_MSG_SET_PROC_BOOT_CTRL 0xc101 +#define TISCI_MSG_PROC_AUTH_BOOT_IMIAGE 0xc120 +#define TISCI_MSG_GET_PROC_BOOT_STATUS 0xc400 +#define TISCI_MSG_WAIT_PROC_BOOT_STATUS 0xc401 + +/** + * struct ti_sci_msg_hdr - Generic Message Header for All messages and responses + * @type: Type of messages: One of TI_SCI_MSG* values + * @host: Host of the message + * @seq: Message identifier indicating a transfer sequence + * @flags: Flag for the message + */ +struct ti_sci_msg_hdr { + uint16_t type; + uint8_t host; + uint8_t seq; +#define TI_SCI_MSG_FLAG(val) (1 << (val)) +#define TI_SCI_FLAG_REQ_GENERIC_NORESPONSE 0x0 +#define TI_SCI_FLAG_REQ_ACK_ON_RECEIVED TI_SCI_MSG_FLAG(0) +#define TI_SCI_FLAG_REQ_ACK_ON_PROCESSED TI_SCI_MSG_FLAG(1) +#define TI_SCI_FLAG_RESP_GENERIC_NACK 0x0 +#define TI_SCI_FLAG_RESP_GENERIC_ACK TI_SCI_MSG_FLAG(1) + /* Additional Flags */ + uint32_t flags; +} __packed; + +/** + * struct ti_sci_msg_resp_version - Response for a message + * @hdr: Generic header + * @firmware_description: String describing the firmware + * @firmware_revision: Firmware revision + * @abi_major: Major version of the ABI that firmware supports + * @abi_minor: Minor version of the ABI that firmware supports + * + * In general, ABI version changes follow the rule that minor version increments + * are backward compatible. Major revision changes in ABI may not be + * backward compatible. + * + * Response to a generic message with message type TI_SCI_MSG_VERSION + */ +struct ti_sci_msg_resp_version { + struct ti_sci_msg_hdr hdr; +#define FIRMWARE_DESCRIPTION_LENGTH 32 + char firmware_description[FIRMWARE_DESCRIPTION_LENGTH]; + uint16_t firmware_revision; + uint8_t abi_major; + uint8_t abi_minor; +} __packed; + +/** + * struct ti_sci_msg_req_reboot - Reboot the SoC + * @hdr: Generic Header + * @domain: Domain to be reset, 0 for full SoC reboot + * + * Request type is TI_SCI_MSG_SYS_RESET, responded with a generic + * ACK/NACK message. + */ +struct ti_sci_msg_req_reboot { + struct ti_sci_msg_hdr hdr; +#define TI_SCI_DOMAIN_FULL_SOC_RESET 0x0 + uint8_t domain; +} __packed; + +/** + * struct ti_sci_msg_req_set_device_state - Set the desired state of the device + * @hdr: Generic header + * @id: Indicates which device to modify + * @reserved: Reserved space in message, must be 0 for backward compatibility + * @state: The desired state of the device. + * + * Certain flags can also be set to alter the device state: + * + MSG_FLAG_DEVICE_WAKE_ENABLED - Configure the device to be a wake source. + * The meaning of this flag will vary slightly from device to device and from + * SoC to SoC but it generally allows the device to wake the SoC out of deep + * suspend states. + * + MSG_FLAG_DEVICE_RESET_ISO - Enable reset isolation for this device. + * + MSG_FLAG_DEVICE_EXCLUSIVE - Claim this device exclusively. When passed + * with STATE_RETENTION or STATE_ON, it will claim the device exclusively. + * If another host already has this device set to STATE_RETENTION or STATE_ON, + * the message will fail. Once successful, other hosts attempting to set + * STATE_RETENTION or STATE_ON will fail. + * + * Request type is TI_SCI_MSG_SET_DEVICE_STATE, responded with a generic + * ACK/NACK message. + */ +struct ti_sci_msg_req_set_device_state { + /* Additional hdr->flags options */ +#define MSG_FLAG_DEVICE_WAKE_ENABLED TI_SCI_MSG_FLAG(8) +#define MSG_FLAG_DEVICE_RESET_ISO TI_SCI_MSG_FLAG(9) +#define MSG_FLAG_DEVICE_EXCLUSIVE TI_SCI_MSG_FLAG(10) + struct ti_sci_msg_hdr hdr; + uint32_t id; + uint32_t reserved; + +#define MSG_DEVICE_SW_STATE_AUTO_OFF 0 +#define MSG_DEVICE_SW_STATE_RETENTION 1 +#define MSG_DEVICE_SW_STATE_ON 2 + uint8_t state; +} __packed; + +/** + * struct ti_sci_msg_req_get_device_state - Request to get device. + * @hdr: Generic header + * @id: Device Identifier + * + * Request type is TI_SCI_MSG_GET_DEVICE_STATE, responded device state + * information + */ +struct ti_sci_msg_req_get_device_state { + struct ti_sci_msg_hdr hdr; + uint32_t id; +} __packed; + +/** + * struct ti_sci_msg_resp_get_device_state - Response to get device request. + * @hdr: Generic header + * @context_loss_count: Indicates how many times the device has lost context. A + * driver can use this monotonic counter to determine if the device has + * lost context since the last time this message was exchanged. + * @resets: Programmed state of the reset lines. + * @programmed_state: The state as programmed by set_device. + * - Uses the MSG_DEVICE_SW_* macros + * @current_state: The actual state of the hardware. + * + * Response to request TI_SCI_MSG_GET_DEVICE_STATE. + */ +struct ti_sci_msg_resp_get_device_state { + struct ti_sci_msg_hdr hdr; + uint32_t context_loss_count; + uint32_t resets; + uint8_t programmed_state; +#define MSG_DEVICE_HW_STATE_OFF 0 +#define MSG_DEVICE_HW_STATE_ON 1 +#define MSG_DEVICE_HW_STATE_TRANS 2 + uint8_t current_state; +} __packed; + +/** + * struct ti_sci_msg_req_set_device_resets - Set the desired resets + * configuration of the device + * @hdr: Generic header + * @id: Indicates which device to modify + * @resets: A bit field of resets for the device. The meaning, behavior, + * and usage of the reset flags are device specific. 0 for a bit + * indicates releasing the reset represented by that bit while 1 + * indicates keeping it held. + * + * Request type is TI_SCI_MSG_SET_DEVICE_RESETS, responded with a generic + * ACK/NACK message. + */ +struct ti_sci_msg_req_set_device_resets { + struct ti_sci_msg_hdr hdr; + uint32_t id; + uint32_t resets; +} __packed; + +/** + * struct ti_sci_msg_req_set_clock_state - Request to setup a Clock state + * @hdr: Generic Header, Certain flags can be set specific to the clocks: + * MSG_FLAG_CLOCK_ALLOW_SSC: Allow this clock to be modified + * via spread spectrum clocking. + * MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE: Allow this clock's + * frequency to be changed while it is running so long as it + * is within the min/max limits. + * MSG_FLAG_CLOCK_INPUT_TERM: Enable input termination, this + * is only applicable to clock inputs on the SoC pseudo-device. + * @dev_id: Device identifier this request is for + * @clk_id: Clock identifier for the device for this request. + * Each device has it's own set of clock inputs. This indexes + * which clock input to modify. + * @request_state: Request the state for the clock to be set to. + * MSG_CLOCK_SW_STATE_UNREQ: The IP does not require this clock, + * it can be disabled, regardless of the state of the device + * MSG_CLOCK_SW_STATE_AUTO: Allow the System Controller to + * automatically manage the state of this clock. If the device + * is enabled, then the clock is enabled. If the device is set + * to off or retention, then the clock is internally set as not + * being required by the device.(default) + * MSG_CLOCK_SW_STATE_REQ: Configure the clock to be enabled, + * regardless of the state of the device. + * + * Normally, all required clocks are managed by TISCI entity, this is used + * only for specific control *IF* required. Auto managed state is + * MSG_CLOCK_SW_STATE_AUTO, in other states, TISCI entity assume remote + * will explicitly control. + * + * Request type is TI_SCI_MSG_SET_CLOCK_STATE, response is a generic + * ACK or NACK message. + */ +struct ti_sci_msg_req_set_clock_state { + /* Additional hdr->flags options */ +#define MSG_FLAG_CLOCK_ALLOW_SSC TI_SCI_MSG_FLAG(8) +#define MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE TI_SCI_MSG_FLAG(9) +#define MSG_FLAG_CLOCK_INPUT_TERM TI_SCI_MSG_FLAG(10) + struct ti_sci_msg_hdr hdr; + uint32_t dev_id; + uint8_t clk_id; +#define MSG_CLOCK_SW_STATE_UNREQ 0 +#define MSG_CLOCK_SW_STATE_AUTO 1 +#define MSG_CLOCK_SW_STATE_REQ 2 + uint8_t request_state; +} __packed; + +/** + * struct ti_sci_msg_req_get_clock_state - Request for clock state + * @hdr: Generic Header + * @dev_id: Device identifier this request is for + * @clk_id: Clock identifier for the device for this request. + * Each device has it's own set of clock inputs. This indexes + * which clock input to get state of. + * + * Request type is TI_SCI_MSG_GET_CLOCK_STATE, response is state + * of the clock + */ +struct ti_sci_msg_req_get_clock_state { + struct ti_sci_msg_hdr hdr; + uint32_t dev_id; + uint8_t clk_id; +} __packed; + +/** + * struct ti_sci_msg_resp_get_clock_state - Response to get clock state + * @hdr: Generic Header + * @programmed_state: Any programmed state of the clock. This is one of + * MSG_CLOCK_SW_STATE* values. + * @current_state: Current state of the clock. This is one of: + * MSG_CLOCK_HW_STATE_NOT_READY: Clock is not ready + * MSG_CLOCK_HW_STATE_READY: Clock is ready + * + * Response to TI_SCI_MSG_GET_CLOCK_STATE. + */ +struct ti_sci_msg_resp_get_clock_state { + struct ti_sci_msg_hdr hdr; + uint8_t programmed_state; +#define MSG_CLOCK_HW_STATE_NOT_READY 0 +#define MSG_CLOCK_HW_STATE_READY 1 + uint8_t current_state; +} __packed; + +/** + * struct ti_sci_msg_req_set_clock_parent - Set the clock parent + * @hdr: Generic Header + * @dev_id: Device identifier this request is for + * @clk_id: Clock identifier for the device for this request. + * Each device has it's own set of clock inputs. This indexes + * which clock input to modify. + * @parent_id: The new clock parent is selectable by an index via this + * parameter. + * + * Request type is TI_SCI_MSG_SET_CLOCK_PARENT, response is generic + * ACK / NACK message. + */ +struct ti_sci_msg_req_set_clock_parent { + struct ti_sci_msg_hdr hdr; + uint32_t dev_id; + uint8_t clk_id; + uint8_t parent_id; +} __packed; + +/** + * struct ti_sci_msg_req_get_clock_parent - Get the clock parent + * @hdr: Generic Header + * @dev_id: Device identifier this request is for + * @clk_id: Clock identifier for the device for this request. + * Each device has it's own set of clock inputs. This indexes + * which clock input to get the parent for. + * + * Request type is TI_SCI_MSG_GET_CLOCK_PARENT, response is parent information + */ +struct ti_sci_msg_req_get_clock_parent { + struct ti_sci_msg_hdr hdr; + uint32_t dev_id; + uint8_t clk_id; +} __packed; + +/** + * struct ti_sci_msg_resp_get_clock_parent - Response with clock parent + * @hdr: Generic Header + * @parent_id: The current clock parent + * + * Response to TI_SCI_MSG_GET_CLOCK_PARENT. + */ +struct ti_sci_msg_resp_get_clock_parent { + struct ti_sci_msg_hdr hdr; + uint8_t parent_id; +} __packed; + +/** + * struct ti_sci_msg_req_get_clock_num_parents - Request to get clock parents + * @hdr: Generic header + * @dev_id: Device identifier this request is for + * @clk_id: Clock identifier for the device for this request. + * + * This request provides information about how many clock parent options + * are available for a given clock to a device. This is typically used + * for input clocks. + * + * Request type is TI_SCI_MSG_GET_NUM_CLOCK_PARENTS, response is appropriate + * message, or NACK in case of inability to satisfy request. + */ +struct ti_sci_msg_req_get_clock_num_parents { + struct ti_sci_msg_hdr hdr; + uint32_t dev_id; + uint8_t clk_id; +} __packed; + +/** + * struct ti_sci_msg_resp_get_clock_num_parents - Response for get clk parents + * @hdr: Generic header + * @num_parents: Number of clock parents + * + * Response to TI_SCI_MSG_GET_NUM_CLOCK_PARENTS + */ +struct ti_sci_msg_resp_get_clock_num_parents { + struct ti_sci_msg_hdr hdr; + uint8_t num_parents; +} __packed; + +/** + * struct ti_sci_msg_req_query_clock_freq - Request to query a frequency + * @hdr: Generic Header + * @dev_id: Device identifier this request is for + * @min_freq_hz: The minimum allowable frequency in Hz. This is the minimum + * allowable programmed frequency and does not account for clock + * tolerances and jitter. + * @target_freq_hz: The target clock frequency. A frequency will be found + * as close to this target frequency as possible. + * @max_freq_hz: The maximum allowable frequency in Hz. This is the maximum + * allowable programmed frequency and does not account for clock + * tolerances and jitter. + * @clk_id: Clock identifier for the device for this request. + * + * NOTE: Normally clock frequency management is automatically done by TISCI + * entity. In case of specific requests, TISCI evaluates capability to achieve + * requested frequency within provided range and responds with + * result message. + * + * Request type is TI_SCI_MSG_QUERY_CLOCK_FREQ, response is appropriate message, + * or NACK in case of inability to satisfy request. + */ +struct ti_sci_msg_req_query_clock_freq { + struct ti_sci_msg_hdr hdr; + uint32_t dev_id; + uint64_t min_freq_hz; + uint64_t target_freq_hz; + uint64_t max_freq_hz; + uint8_t clk_id; +} __packed; + +/** + * struct ti_sci_msg_resp_query_clock_freq - Response to a clock frequency query + * @hdr: Generic Header + * @freq_hz: Frequency that is the best match in Hz. + * + * Response to request type TI_SCI_MSG_QUERY_CLOCK_FREQ. NOTE: if the request + * cannot be satisfied, the message will be of type NACK. + */ +struct ti_sci_msg_resp_query_clock_freq { + struct ti_sci_msg_hdr hdr; + uint64_t freq_hz; +} __packed; + +/** + * struct ti_sci_msg_req_set_clock_freq - Request to setup a clock frequency + * @hdr: Generic Header + * @dev_id: Device identifier this request is for + * @min_freq_hz: The minimum allowable frequency in Hz. This is the minimum + * allowable programmed frequency and does not account for clock + * tolerances and jitter. + * @target_freq_hz: The target clock frequency. The clock will be programmed + * at a rate as close to this target frequency as possible. + * @max_freq_hz: The maximum allowable frequency in Hz. This is the maximum + * allowable programmed frequency and does not account for clock + * tolerances and jitter. + * @clk_id: Clock identifier for the device for this request. + * + * NOTE: Normally clock frequency management is automatically done by TISCI + * entity. In case of specific requests, TISCI evaluates capability to achieve + * requested range and responds with success/failure message. + * + * This sets the desired frequency for a clock within an allowable + * range. This message will fail on an enabled clock unless + * MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE is set for the clock. Additionally, + * if other clocks have their frequency modified due to this message, + * they also must have the MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE or be disabled. + * + * Calling set frequency on a clock input to the SoC pseudo-device will + * inform the PMMC of that clock's frequency. Setting a frequency of + * zero will indicate the clock is disabled. + * + * Calling set frequency on clock outputs from the SoC pseudo-device will + * function similarly to setting the clock frequency on a device. + * + * Request type is TI_SCI_MSG_SET_CLOCK_FREQ, response is a generic ACK/NACK + * message. + */ +struct ti_sci_msg_req_set_clock_freq { + struct ti_sci_msg_hdr hdr; + uint32_t dev_id; + uint64_t min_freq_hz; + uint64_t target_freq_hz; + uint64_t max_freq_hz; + uint8_t clk_id; +} __packed; + +/** + * struct ti_sci_msg_req_get_clock_freq - Request to get the clock frequency + * @hdr: Generic Header + * @dev_id: Device identifier this request is for + * @clk_id: Clock identifier for the device for this request. + * + * NOTE: Normally clock frequency management is automatically done by TISCI + * entity. In some cases, clock frequencies are configured by host. + * + * Request type is TI_SCI_MSG_GET_CLOCK_FREQ, responded with clock frequency + * that the clock is currently at. + */ +struct ti_sci_msg_req_get_clock_freq { + struct ti_sci_msg_hdr hdr; + uint32_t dev_id; + uint8_t clk_id; +} __packed; + +/** + * struct ti_sci_msg_resp_get_clock_freq - Response of clock frequency request + * @hdr: Generic Header + * @freq_hz: Frequency that the clock is currently on, in Hz. + * + * Response to request type TI_SCI_MSG_GET_CLOCK_FREQ. + */ +struct ti_sci_msg_resp_get_clock_freq { + struct ti_sci_msg_hdr hdr; + uint64_t freq_hz; +} __packed; + +#define TISCI_ADDR_LOW_MASK 0x00000000ffffffff +#define TISCI_ADDR_HIGH_MASK 0xffffffff00000000 +#define TISCI_ADDR_HIGH_SHIFT 32 + +/** + * struct ti_sci_msg_req_proc_request - Request a processor + * + * @hdr: Generic Header + * @processor_id: ID of processor + * + * Request type is TISCI_MSG_PROC_REQUEST, response is a generic ACK/NACK + * message. + */ +struct ti_sci_msg_req_proc_request { + struct ti_sci_msg_hdr hdr; + uint8_t processor_id; +} __packed; + +/** + * struct ti_sci_msg_req_proc_release - Release a processor + * + * @hdr: Generic Header + * @processor_id: ID of processor + * + * Request type is TISCI_MSG_PROC_RELEASE, response is a generic ACK/NACK + * message. + */ +struct ti_sci_msg_req_proc_release { + struct ti_sci_msg_hdr hdr; + uint8_t processor_id; +} __packed; + +/** + * struct ti_sci_msg_req_proc_handover - Handover a processor to a host + * + * @hdr: Generic Header + * @processor_id: ID of processor + * @host_id: New Host we want to give control to + * + * Request type is TISCI_MSG_PROC_HANDOVER, response is a generic ACK/NACK + * message. + */ +struct ti_sci_msg_req_proc_handover { + struct ti_sci_msg_hdr hdr; + uint8_t processor_id; + uint8_t host_id; +} __packed; + +/* A53 Config Flags */ +#define PROC_BOOT_CFG_FLAG_ARMV8_DBG_EN 0x00000001 +#define PROC_BOOT_CFG_FLAG_ARMV8_DBG_NIDEN 0x00000002 +#define PROC_BOOT_CFG_FLAG_ARMV8_DBG_SPIDEN 0x00000004 +#define PROC_BOOT_CFG_FLAG_ARMV8_DBG_SPNIDEN 0x00000008 +#define PROC_BOOT_CFG_FLAG_ARMV8_AARCH32 0x00000100 + +/* R5 Config Flags */ +#define PROC_BOOT_CFG_FLAG_R5_DBG_EN 0x00000001 +#define PROC_BOOT_CFG_FLAG_R5_DBG_NIDEN 0x00000002 +#define PROC_BOOT_CFG_FLAG_R5_LOCKSTEP 0x00000100 +#define PROC_BOOT_CFG_FLAG_R5_TEINIT 0x00000200 +#define PROC_BOOT_CFG_FLAG_R5_NMFI_EN 0x00000400 +#define PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE 0x00000800 +#define PROC_BOOT_CFG_FLAG_R5_BTCM_EN 0x00001000 +#define PROC_BOOT_CFG_FLAG_R5_ATCM_EN 0x00002000 + +/** + * struct ti_sci_msg_req_set_proc_boot_config - Set Processor boot configuration + * @hdr: Generic Header + * @processor_id: ID of processor + * @bootvector_low: Lower 32bit (Little Endian) of boot vector + * @bootvector_high: Higher 32bit (Little Endian) of boot vector + * @config_flags_set: Optional Processor specific Config Flags to set. + * Setting a bit here implies required bit sets to 1. + * @config_flags_clear: Optional Processor specific Config Flags to clear. + * Setting a bit here implies required bit gets cleared. + * + * Request type is TISCI_MSG_SET_PROC_BOOT_CONFIG, response is a generic + * ACK/NACK message. + */ +struct ti_sci_msg_req_set_proc_boot_config { + struct ti_sci_msg_hdr hdr; + uint8_t processor_id; + uint32_t bootvector_low; + uint32_t bootvector_high; + uint32_t config_flags_set; + uint32_t config_flags_clear; +} __packed; + +/* ARMV8 Control Flags */ +#define PROC_BOOT_CTRL_FLAG_ARMV8_ACINACTM 0x00000001 +#define PROC_BOOT_CTRL_FLAG_ARMV8_AINACTS 0x00000002 +#define PROC_BOOT_CTRL_FLAG_ARMV8_L2FLUSHREQ 0x00000100 + +/* R5 Control Flags */ +#define PROC_BOOT_CTRL_FLAG_R5_CORE_HALT 0x00000001 + +/** + * struct ti_sci_msg_req_set_proc_boot_ctrl - Set Processor boot control flags + * @hdr: Generic Header + * @processor_id: ID of processor + * @config_flags_set: Optional Processor specific Config Flags to set. + * Setting a bit here implies required bit sets to 1. + * @config_flags_clear: Optional Processor specific Config Flags to clear. + * Setting a bit here implies required bit gets cleared. + * + * Request type is TISCI_MSG_SET_PROC_BOOT_CTRL, response is a generic ACK/NACK + * message. + */ +struct ti_sci_msg_req_set_proc_boot_ctrl { + struct ti_sci_msg_hdr hdr; + uint8_t processor_id; + uint32_t control_flags_set; + uint32_t control_flags_clear; +} __packed; + +/** + * struct ti_sci_msg_req_proc_auth_start_image - Authenticate and start image + * @hdr: Generic Header + * @processor_id: ID of processor + * @cert_addr_low: Lower 32bit (Little Endian) of certificate + * @cert_addr_high: Higher 32bit (Little Endian) of certificate + * + * Request type is TISCI_MSG_PROC_AUTH_BOOT_IMAGE, response is a generic + * ACK/NACK message. + */ +struct ti_sci_msg_req_proc_auth_boot_image { + struct ti_sci_msg_hdr hdr; + uint8_t processor_id; + uint32_t cert_addr_low; + uint32_t cert_addr_high; +} __packed; + +/** + * struct ti_sci_msg_req_get_proc_boot_status - Get processor boot status + * @hdr: Generic Header + * @processor_id: ID of processor + * + * Request type is TISCI_MSG_GET_PROC_BOOT_STATUS, response is appropriate + * message, or NACK in case of inability to satisfy request. + */ +struct ti_sci_msg_req_get_proc_boot_status { + struct ti_sci_msg_hdr hdr; + uint8_t processor_id; +} __packed; + +/* ARMv8 Status Flags */ +#define PROC_BOOT_STATUS_FLAG_ARMV8_WFE 0x00000001 +#define PROC_BOOT_STATUS_FLAG_ARMV8_WFI 0x00000002 +#define PROC_BOOT_STATUS_FLAG_ARMV8_L2F_DONE 0x00000010 +#define PROC_BOOT_STATUS_FLAG_ARMV8_STANDBYWFIL2 0x00000020 + +/* R5 Status Flags */ +#define PROC_BOOT_STATUS_FLAG_R5_WFE 0x00000001 +#define PROC_BOOT_STATUS_FLAG_R5_WFI 0x00000002 +#define PROC_BOOT_STATUS_FLAG_R5_CLK_GATED 0x00000004 +#define PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED 0x00000100 + +/** + * \brief Processor Status Response + * struct ti_sci_msg_resp_get_proc_boot_status - Processor boot status response + * @hdr: Generic Header + * @processor_id: ID of processor + * @bootvector_low: Lower 32bit (Little Endian) of boot vector + * @bootvector_high: Higher 32bit (Little Endian) of boot vector + * @config_flags: Optional Processor specific Config Flags set. + * @control_flags: Optional Processor specific Control Flags. + * @status_flags: Optional Processor specific Status Flags set. + * + * Response to TISCI_MSG_GET_PROC_BOOT_STATUS. + */ +struct ti_sci_msg_resp_get_proc_boot_status { + struct ti_sci_msg_hdr hdr; + uint8_t processor_id; + uint32_t bootvector_low; + uint32_t bootvector_high; + uint32_t config_flags; + uint32_t control_flags; + uint32_t status_flags; +} __packed; + +/** + * struct ti_sci_msg_req_wait_proc_boot_status - Wait for a processor boot status + * @hdr: Generic Header + * @processor_id: ID of processor + * @num_wait_iterations Total number of iterations we will check before + * we will timeout and give up + * @num_match_iterations How many iterations should we have continued + * status to account for status bits glitching. + * This is to make sure that match occurs for + * consecutive checks. This implies that the + * worst case should consider that the stable + * time should at the worst be num_wait_iterations + * num_match_iterations to prevent timeout. + * @delay_per_iteration_us Specifies how long to wait (in micro seconds) + * between each status checks. This is the minimum + * duration, and overhead of register reads and + * checks are on top of this and can vary based on + * varied conditions. + * @delay_before_iterations_us Specifies how long to wait (in micro seconds) + * before the very first check in the first + * iteration of status check loop. This is the + * minimum duration, and overhead of register + * reads and checks are. + * @status_flags_1_set_all_wait If non-zero, Specifies that all bits of the + * status matching this field requested MUST be 1. + * @status_flags_1_set_any_wait If non-zero, Specifies that at least one of the + * bits matching this field requested MUST be 1. + * @status_flags_1_clr_all_wait If non-zero, Specifies that all bits of the + * status matching this field requested MUST be 0. + * @status_flags_1_clr_any_wait If non-zero, Specifies that at least one of the + * bits matching this field requested MUST be 0. + * + * Request type is TISCI_MSG_WAIT_PROC_BOOT_STATUS, response is appropriate + * message, or NACK in case of inability to satisfy request. + */ +struct ti_sci_msg_req_wait_proc_boot_status { + struct ti_sci_msg_hdr hdr; + uint8_t processor_id; + uint8_t num_wait_iterations; + uint8_t num_match_iterations; + uint8_t delay_per_iteration_us; + uint8_t delay_before_iterations_us; + uint32_t status_flags_1_set_all_wait; + uint32_t status_flags_1_set_any_wait; + uint32_t status_flags_1_clr_all_wait; + uint32_t status_flags_1_clr_any_wait; +} __packed; + +/** + * struct ti_sci_msg_req_enter_sleep - Request for TI_SCI_MSG_ENTER_SLEEP. + * + * @hdr Generic Header + * @mode Low power mode to enter. + * @proc_id Processor id to be restored. + * @core_resume_lo Low 32-bits of physical pointer to address for core + * to begin execution upon resume. + * @core_resume_hi High 32-bits of physical pointer to address for core + * to begin execution upon resume. + * + * This message is to be sent after TI_SCI_MSG_PREPARE_SLEEP is sent from OS + * and is what actually triggers entry into the specified low power mode. + */ +struct ti_sci_msg_req_enter_sleep { + struct ti_sci_msg_hdr hdr; + uint8_t mode; + uint8_t processor_id; + uint32_t core_resume_lo; + uint32_t core_resume_hi; +} __packed; + +#endif /* TI_SCI_PROTOCOL_H */ diff --git a/plat/ti/k3/common/k3_bl31_setup.c b/plat/ti/k3/common/k3_bl31_setup.c new file mode 100644 index 0000000..457c95d --- /dev/null +++ b/plat/ti/k3/common/k3_bl31_setup.c @@ -0,0 +1,197 @@ +/* + * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include + +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +/* Table of regions to map using the MMU */ +const mmap_region_t plat_k3_mmap[] = { + MAP_REGION_FLAT(K3_USART_BASE, K3_USART_SIZE, MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(K3_GIC_BASE, K3_GIC_SIZE, MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(K3_GTC_BASE, K3_GTC_SIZE, MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(SEC_PROXY_RT_BASE, SEC_PROXY_RT_SIZE, MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(SEC_PROXY_SCFG_BASE, SEC_PROXY_SCFG_SIZE, MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(SEC_PROXY_DATA_BASE, SEC_PROXY_DATA_SIZE, MT_DEVICE | MT_RW | MT_SECURE), + { /* sentinel */ } +}; + +/* + * Placeholder variables for maintaining information about the next image(s) + */ +static entry_point_info_t bl32_image_ep_info; +static entry_point_info_t bl33_image_ep_info; + +/******************************************************************************* + * Gets SPSR for BL33 entry + ******************************************************************************/ +static uint32_t k3_get_spsr_for_bl33_entry(void) +{ + unsigned long el_status; + unsigned int mode; + uint32_t spsr; + + /* Figure out what mode we enter the non-secure world in */ + el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT; + el_status &= ID_AA64PFR0_ELX_MASK; + + mode = (el_status) ? MODE_EL2 : MODE_EL1; + + spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS); + return spsr; +} + +/******************************************************************************* + * Perform any BL3-1 early platform setup, such as console init and deciding on + * memory layout. + ******************************************************************************/ +void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1, + u_register_t arg2, u_register_t arg3) +{ + /* There are no parameters from BL2 if BL31 is a reset vector */ + assert(arg0 == 0U); + assert(arg1 == 0U); + + bl31_console_setup(); + +#ifdef BL32_BASE + /* Populate entry point information for BL32 */ + SET_PARAM_HEAD(&bl32_image_ep_info, PARAM_EP, VERSION_1, 0); + bl32_image_ep_info.pc = BL32_BASE; + bl32_image_ep_info.spsr = SPSR_64(MODE_EL1, MODE_SP_ELX, + DISABLE_ALL_EXCEPTIONS); + SET_SECURITY_STATE(bl32_image_ep_info.h.attr, SECURE); +#endif + + /* Populate entry point information for BL33 */ + SET_PARAM_HEAD(&bl33_image_ep_info, PARAM_EP, VERSION_1, 0); + bl33_image_ep_info.pc = PRELOADED_BL33_BASE; + bl33_image_ep_info.spsr = k3_get_spsr_for_bl33_entry(); + SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE); + +#ifdef K3_HW_CONFIG_BASE + /* + * According to the file ``Documentation/arm64/booting.txt`` of the + * Linux kernel tree, Linux expects the physical address of the device + * tree blob (DTB) in x0, while x1-x3 are reserved for future use and + * must be 0. + */ + bl33_image_ep_info.args.arg0 = (u_register_t)K3_HW_CONFIG_BASE; + bl33_image_ep_info.args.arg1 = 0U; + bl33_image_ep_info.args.arg2 = 0U; + bl33_image_ep_info.args.arg3 = 0U; +#endif +} + +void bl31_plat_arch_setup(void) +{ + const mmap_region_t bl_regions[] = { + MAP_REGION_FLAT(BL31_START, BL31_SIZE, MT_MEMORY | MT_RW | MT_SECURE), + MAP_REGION_FLAT(BL_CODE_BASE, BL_CODE_END - BL_CODE_BASE, MT_CODE | MT_RO | MT_SECURE), + MAP_REGION_FLAT(BL_RO_DATA_BASE, BL_RO_DATA_END - BL_RO_DATA_BASE, MT_RO_DATA | MT_RO | MT_SECURE), +#if USE_COHERENT_MEM + MAP_REGION_FLAT(BL_COHERENT_RAM_BASE, BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE, MT_DEVICE | MT_RW | MT_SECURE), +#endif + { /* sentinel */ } + }; + + setup_page_tables(bl_regions, plat_k3_mmap); + enable_mmu_el3(0); +} + +void bl31_platform_setup(void) +{ + k3_gic_driver_init(K3_GIC_BASE); + k3_gic_init(); + + ti_sci_init(); +} + +void platform_mem_init(void) +{ + /* Do nothing for now... */ +} + +unsigned int plat_get_syscnt_freq2(void) +{ + uint32_t gtc_freq; + uint32_t gtc_ctrl; + + /* Lets try and provide basic diagnostics - cost is low */ + gtc_ctrl = mmio_read_32(K3_GTC_BASE + K3_GTC_CNTCR_OFFSET); + /* Did the bootloader fail to enable timer and OS guys are confused? */ + if ((gtc_ctrl & K3_GTC_CNTCR_EN_MASK) == 0U) { + ERROR("GTC is disabled! Timekeeping broken. Fix Bootloader\n"); + } + /* + * If debug will not pause time, we will have issues like + * drivers timing out while debugging, in cases of OS like Linux, + * RCU stall errors, which can be hard to differentiate vs real issues. + */ + if ((gtc_ctrl & K3_GTC_CNTCR_HDBG_MASK) == 0U) { + WARN("GTC: Debug access doesn't stop time. Fix Bootloader\n"); + } + + gtc_freq = mmio_read_32(K3_GTC_BASE + K3_GTC_CNTFID0_OFFSET); + /* Many older bootloaders may have missed programming FID0 register */ + if (gtc_freq != 0U) { + return gtc_freq; + } + + /* + * We could have just warned about this, but this can have serious + * hard to debug side effects if we are NOT sure what the actual + * frequency is. Lets make sure people don't miss this. + */ + ERROR("GTC_CNTFID0 is 0! Assuming %d Hz. Fix Bootloader\n", + SYS_COUNTER_FREQ_IN_TICKS); + + return SYS_COUNTER_FREQ_IN_TICKS; +} + +/* + * Empty function to prevent the console from being uninitialized after BL33 is + * started and allow us to see messages from BL31. + */ +void bl31_plat_runtime_setup(void) +{ +} + +/******************************************************************************* + * Return a pointer to the 'entry_point_info' structure of the next image + * for the security state specified. BL3-3 corresponds to the non-secure + * image type while BL3-2 corresponds to the secure image type. A NULL + * pointer is returned if the image does not exist. + ******************************************************************************/ +entry_point_info_t *bl31_plat_get_next_image_ep_info(uint32_t type) +{ + entry_point_info_t *next_image_info; + + assert(sec_state_is_valid(type)); + next_image_info = (type == NON_SECURE) ? &bl33_image_ep_info : + &bl32_image_ep_info; + /* + * None of the images on the ARM development platforms can have 0x0 + * as the entrypoint + */ + if (next_image_info->pc) + return next_image_info; + + NOTICE("Requested nonexistent image\n"); + return NULL; +} diff --git a/plat/ti/k3/common/k3_console.c b/plat/ti/k3/common/k3_console.c new file mode 100644 index 0000000..8c44c17 --- /dev/null +++ b/plat/ti/k3/common/k3_console.c @@ -0,0 +1,21 @@ +/* + * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include + +#include +#include + +#include + +void bl31_console_setup(void) +{ + static console_t console; + + /* Initialize the console to provide early debug support */ + console_16550_register(K3_USART_BASE, K3_USART_CLK_SPEED, + K3_USART_BAUD, &console); +} diff --git a/plat/ti/k3/common/k3_gicv3.c b/plat/ti/k3/common/k3_gicv3.c new file mode 100644 index 0000000..0199822 --- /dev/null +++ b/plat/ti/k3/common/k3_gicv3.c @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include + +/* The GICv3 driver only needs to be initialized in EL3 */ +uintptr_t rdistif_base_addrs[PLATFORM_CORE_COUNT]; + +#if K3_PM_SYSTEM_SUSPEND +static gicv3_redist_ctx_t rdist_ctx[PLATFORM_CORE_COUNT]; +static gicv3_dist_ctx_t dist_ctx; +#endif + +static const interrupt_prop_t k3_interrupt_props[] = { + PLAT_ARM_G1S_IRQ_PROPS(INTR_GROUP1S), + PLAT_ARM_G0_IRQ_PROPS(INTR_GROUP0) +}; + +static unsigned int k3_mpidr_to_core_pos(unsigned long mpidr) +{ + return (unsigned int)plat_core_pos_by_mpidr(mpidr); +} + +gicv3_driver_data_t k3_gic_data = { + .rdistif_num = PLATFORM_CORE_COUNT, + .rdistif_base_addrs = rdistif_base_addrs, + .interrupt_props = k3_interrupt_props, + .interrupt_props_num = ARRAY_SIZE(k3_interrupt_props), + .mpidr_to_core_pos = k3_mpidr_to_core_pos, +}; + +void k3_gic_driver_init(uintptr_t gic_base) +{ + /* GIC Distributor is always at the base of the IP */ + uintptr_t gicd_base = gic_base; + /* GIC Redistributor base is run-time detected */ + uintptr_t gicr_base = 0; + + for (unsigned int gicr_shift = 18; gicr_shift < 21; gicr_shift++) { + uintptr_t gicr_check = gic_base + BIT(gicr_shift); + uint32_t iidr = mmio_read_32(gicr_check + GICR_IIDR); + if (iidr != 0) { + /* Found the GICR base */ + gicr_base = gicr_check; + break; + } + } + /* Assert if we have not found the GICR base */ + assert(gicr_base != 0); + + /* + * The GICv3 driver is initialized in EL3 and does not need + * to be initialized again in SEL1. This is because the S-EL1 + * can use GIC system registers to manage interrupts and does + * not need GIC interface base addresses to be configured. + */ + k3_gic_data.gicd_base = gicd_base; + k3_gic_data.gicr_base = gicr_base; + gicv3_driver_init(&k3_gic_data); +} + +void k3_gic_init(void) +{ + gicv3_distif_init(); + gicv3_rdistif_init(plat_my_core_pos()); + gicv3_cpuif_enable(plat_my_core_pos()); +} + +void k3_gic_cpuif_enable(void) +{ + gicv3_cpuif_enable(plat_my_core_pos()); +} + +void k3_gic_cpuif_disable(void) +{ + gicv3_cpuif_disable(plat_my_core_pos()); +} + +void k3_gic_pcpu_init(void) +{ + gicv3_rdistif_init(plat_my_core_pos()); +} + +#if K3_PM_SYSTEM_SUSPEND +void k3_gic_save_context(void) +{ + for (unsigned int i = 0U; i < PLATFORM_CORE_COUNT; i++) { + gicv3_rdistif_save(i, &rdist_ctx[i]); + } + gicv3_distif_save(&dist_ctx); +} + +void k3_gic_restore_context(void) +{ + gicv3_distif_init_restore(&dist_ctx); + for (unsigned int i = 0U; i < PLATFORM_CORE_COUNT; i++) { + gicv3_rdistif_init_restore(i, &rdist_ctx[i]); + } +} +#endif diff --git a/plat/ti/k3/common/k3_helpers.S b/plat/ti/k3/common/k3_helpers.S new file mode 100644 index 0000000..f4f7d18 --- /dev/null +++ b/plat/ti/k3/common/k3_helpers.S @@ -0,0 +1,155 @@ +/* + * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include +#include +#include +#include + +#define K3_BOOT_REASON_COLD_RESET 0x1 + + /* ------------------------------------------------------------------ + * uintptr_t plat_get_my_entrypoint(void) + * ------------------------------------------------------------------ + * + * This function is called with the called with the MMU and caches + * disabled (SCTLR_EL3.M = 0 and SCTLR_EL3.C = 0). The function is + * responsible for distinguishing between a warm and cold reset for the + * current CPU using platform-specific means. If it's a warm reset, + * then it returns the warm reset entrypoint point provided to + * plat_setup_psci_ops() during BL31 initialization. If it's a cold + * reset then this function must return zero. + * + * This function does not follow the Procedure Call Standard used by + * the Application Binary Interface for the ARM 64-bit architecture. + * The caller should not assume that callee saved registers are + * preserved across a call to this function. + */ + .globl plat_get_my_entrypoint +func plat_get_my_entrypoint + ldr x0, k3_boot_reason_data_store + cmp x0, #K3_BOOT_REASON_COLD_RESET + + /* We ONLY support cold boot at this point */ + bne plat_unsupported_boot + mov x0, #0 + ret + + /* + * We self manage our boot reason. + * At load time, we have just a default reason - which is cold reset + */ +k3_boot_reason_data_store: + .word K3_BOOT_REASON_COLD_RESET + +plat_unsupported_boot: + b plat_unsupported_boot + +endfunc plat_get_my_entrypoint + + /* ------------------------------------------------------------------ + * unsigned int plat_my_core_pos(void) + * ------------------------------------------------------------------ + * + * This function returns the index of the calling CPU which is used as a + * CPU-specific linear index into blocks of memory (for example while + * allocating per-CPU stacks). This function will be invoked very early + * in the initialization sequence which mandates that this function + * should be implemented in assembly and should not rely on the + * avalability of a C runtime environment. This function can clobber x0 + * - x8 and must preserve x9 - x29. + * + * This function plays a crucial role in the power domain topology + * framework in PSCI and details of this can be found in Power Domain + * Topology Design. + */ + .globl plat_my_core_pos +func plat_my_core_pos + mrs x0, MPIDR_EL1 + + and x1, x0, #MPIDR_CLUSTER_MASK + lsr x1, x1, #MPIDR_AFF1_SHIFT + and x0, x0, #MPIDR_CPU_MASK + + cmp x1, 0 + b.eq out + add x0, x0, #K3_CLUSTER0_CORE_COUNT + + cmp x1, 1 + b.eq out + add x0, x0, #K3_CLUSTER1_CORE_COUNT + + cmp x1, 2 + b.eq out + add x0, x0, #K3_CLUSTER2_CORE_COUNT + +out: + ret +endfunc plat_my_core_pos + + /* -------------------------------------------------------------------- + * This handler does the following: + * - Set the L2 Data RAM latency to 2 (i.e. 3 cycles) for Cortex-A72 + * -------------------------------------------------------------------- + */ + .globl plat_reset_handler +func plat_reset_handler + /* Only on Cortex-A72 */ + jump_if_cpu_midr CORTEX_A72_MIDR, a72 + ret + + /* Cortex-A72 specific settings */ +a72: + mrs x0, CORTEX_A72_L2CTLR_EL1 + orr x0, x0, #(CORTEX_A72_L2_DATA_RAM_LATENCY_3_CYCLES << CORTEX_A72_L2CTLR_DATA_RAM_LATENCY_SHIFT) + msr CORTEX_A72_L2CTLR_EL1, x0 + isb + ret +endfunc plat_reset_handler + + /* --------------------------------------------- + * int plat_crash_console_init(void) + * Function to initialize the crash console + * without a C Runtime to print crash report. + * Clobber list : x0 - x4 + * --------------------------------------------- + */ + .globl plat_crash_console_init +func plat_crash_console_init + mov_imm x0, CRASH_CONSOLE_BASE + mov_imm x1, CRASH_CONSOLE_CLK + mov_imm x2, CRASH_CONSOLE_BAUD_RATE + mov w3, #0x0 + b console_16550_core_init +endfunc plat_crash_console_init + + /* --------------------------------------------- + * int plat_crash_console_putc(void) + * Function to print a character on the crash + * console without a C Runtime. + * Clobber list : x1, x2 + * --------------------------------------------- + */ + .globl plat_crash_console_putc +func plat_crash_console_putc + mov_imm x1, CRASH_CONSOLE_BASE + b console_16550_core_putc +endfunc plat_crash_console_putc + + /* --------------------------------------------- + * void plat_crash_console_flush() + * Function to force a write of all buffered + * data that hasn't been output. + * Out : void. + * Clobber list : x0, x1 + * --------------------------------------------- + */ + .globl plat_crash_console_flush +func plat_crash_console_flush + mov_imm x0, CRASH_CONSOLE_BASE + b console_16550_core_flush +endfunc plat_crash_console_flush diff --git a/plat/ti/k3/common/k3_psci.c b/plat/ti/k3/common/k3_psci.c new file mode 100644 index 0000000..6febbc6 --- /dev/null +++ b/plat/ti/k3/common/k3_psci.c @@ -0,0 +1,295 @@ +/* + * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include + +#define CORE_PWR_STATE(state) ((state)->pwr_domain_state[MPIDR_AFFLVL0]) +#define CLUSTER_PWR_STATE(state) ((state)->pwr_domain_state[MPIDR_AFFLVL1]) +#define SYSTEM_PWR_STATE(state) ((state)->pwr_domain_state[PLAT_MAX_PWR_LVL]) + +uintptr_t k3_sec_entrypoint; + +static void k3_cpu_standby(plat_local_state_t cpu_state) +{ + u_register_t scr; + + scr = read_scr_el3(); + /* Enable the Non secure interrupt to wake the CPU */ + write_scr_el3(scr | SCR_IRQ_BIT | SCR_FIQ_BIT); + isb(); + /* dsb is good practice before using wfi to enter low power states */ + dsb(); + /* Enter standby state */ + wfi(); + /* Restore SCR */ + write_scr_el3(scr); +} + +static int k3_pwr_domain_on(u_register_t mpidr) +{ + int core, proc_id, device_id, ret; + + core = plat_core_pos_by_mpidr(mpidr); + if (core < 0) { + ERROR("Could not get target core id: %d\n", core); + return PSCI_E_INTERN_FAIL; + } + + proc_id = PLAT_PROC_START_ID + core; + device_id = PLAT_PROC_DEVICE_START_ID + core; + + ret = ti_sci_proc_request(proc_id); + if (ret) { + ERROR("Request for processor failed: %d\n", ret); + return PSCI_E_INTERN_FAIL; + } + + ret = ti_sci_proc_set_boot_cfg(proc_id, k3_sec_entrypoint, 0, 0); + if (ret) { + ERROR("Request to set core boot address failed: %d\n", ret); + return PSCI_E_INTERN_FAIL; + } + + /* sanity check these are off before starting a core */ + ret = ti_sci_proc_set_boot_ctrl(proc_id, + 0, PROC_BOOT_CTRL_FLAG_ARMV8_L2FLUSHREQ | + PROC_BOOT_CTRL_FLAG_ARMV8_AINACTS | + PROC_BOOT_CTRL_FLAG_ARMV8_ACINACTM); + if (ret) { + ERROR("Request to clear boot configuration failed: %d\n", ret); + return PSCI_E_INTERN_FAIL; + } + + ret = ti_sci_device_get(device_id); + if (ret) { + ERROR("Request to start core failed: %d\n", ret); + return PSCI_E_INTERN_FAIL; + } + + return PSCI_E_SUCCESS; +} + +void k3_pwr_domain_off(const psci_power_state_t *target_state) +{ + int core, cluster, proc_id, device_id, cluster_id, ret; + + /* At very least the local core should be powering down */ + assert(CORE_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE); + + /* Prevent interrupts from spuriously waking up this cpu */ + k3_gic_cpuif_disable(); + + core = plat_my_core_pos(); + cluster = MPIDR_AFFLVL1_VAL(read_mpidr_el1()); + proc_id = PLAT_PROC_START_ID + core; + device_id = PLAT_PROC_DEVICE_START_ID + core; + cluster_id = PLAT_CLUSTER_DEVICE_START_ID + (cluster * 2); + + /* + * If we are the last core in the cluster then we take a reference to + * the cluster device so that it does not get shutdown before we + * execute the entire cluster L2 cleaning sequence below. + */ + if (CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) { + ret = ti_sci_device_get(cluster_id); + if (ret) { + ERROR("Request to get cluster failed: %d\n", ret); + return; + } + } + + /* Start by sending wait for WFI command */ + ret = ti_sci_proc_wait_boot_status_no_wait(proc_id, + /* + * Wait maximum time to give us the best chance to get + * to WFI before this command timeouts + */ + UINT8_MAX, 100, UINT8_MAX, UINT8_MAX, + /* Wait for WFI */ + PROC_BOOT_STATUS_FLAG_ARMV8_WFI, 0, 0, 0); + if (ret) { + ERROR("Sending wait for WFI failed (%d)\n", ret); + return; + } + + /* Now queue up the core shutdown request */ + ret = ti_sci_device_put_no_wait(device_id); + if (ret) { + ERROR("Sending core shutdown message failed (%d)\n", ret); + return; + } + + /* If our cluster is not going down we stop here */ + if (CLUSTER_PWR_STATE(target_state) != PLAT_MAX_OFF_STATE) + return; + + /* set AINACTS */ + ret = ti_sci_proc_set_boot_ctrl_no_wait(proc_id, + PROC_BOOT_CTRL_FLAG_ARMV8_AINACTS, 0); + if (ret) { + ERROR("Sending set control message failed (%d)\n", ret); + return; + } + + /* set L2FLUSHREQ */ + ret = ti_sci_proc_set_boot_ctrl_no_wait(proc_id, + PROC_BOOT_CTRL_FLAG_ARMV8_L2FLUSHREQ, 0); + if (ret) { + ERROR("Sending set control message failed (%d)\n", ret); + return; + } + + /* wait for L2FLUSHDONE*/ + ret = ti_sci_proc_wait_boot_status_no_wait(proc_id, + UINT8_MAX, 2, UINT8_MAX, UINT8_MAX, + PROC_BOOT_STATUS_FLAG_ARMV8_L2F_DONE, 0, 0, 0); + if (ret) { + ERROR("Sending wait message failed (%d)\n", ret); + return; + } + + /* clear L2FLUSHREQ */ + ret = ti_sci_proc_set_boot_ctrl_no_wait(proc_id, + 0, PROC_BOOT_CTRL_FLAG_ARMV8_L2FLUSHREQ); + if (ret) { + ERROR("Sending set control message failed (%d)\n", ret); + return; + } + + /* set ACINACTM */ + ret = ti_sci_proc_set_boot_ctrl_no_wait(proc_id, + PROC_BOOT_CTRL_FLAG_ARMV8_ACINACTM, 0); + if (ret) { + ERROR("Sending set control message failed (%d)\n", ret); + return; + } + + /* wait for STANDBYWFIL2 */ + ret = ti_sci_proc_wait_boot_status_no_wait(proc_id, + UINT8_MAX, 2, UINT8_MAX, UINT8_MAX, + PROC_BOOT_STATUS_FLAG_ARMV8_STANDBYWFIL2, 0, 0, 0); + if (ret) { + ERROR("Sending wait message failed (%d)\n", ret); + return; + } + + /* Now queue up the cluster shutdown request */ + ret = ti_sci_device_put_no_wait(cluster_id); + if (ret) { + ERROR("Sending cluster shutdown message failed (%d)\n", ret); + return; + } +} + +void k3_pwr_domain_on_finish(const psci_power_state_t *target_state) +{ + /* TODO: Indicate to System firmware about completion */ + + k3_gic_pcpu_init(); + k3_gic_cpuif_enable(); +} + +static void __dead2 k3_system_off(void) +{ + ERROR("System Off: operation not handled.\n"); + while (true) + wfi(); +} + +static void __dead2 k3_system_reset(void) +{ + /* Send the system reset request to system firmware */ + ti_sci_core_reboot(); + + while (true) + wfi(); +} + +static int k3_validate_power_state(unsigned int power_state, + psci_power_state_t *req_state) +{ + /* TODO: perform the proper validation */ + + return PSCI_E_SUCCESS; +} + +static int k3_validate_ns_entrypoint(uintptr_t entrypoint) +{ + /* TODO: perform the proper validation */ + + return PSCI_E_SUCCESS; +} + +#if K3_PM_SYSTEM_SUSPEND +static void k3_pwr_domain_suspend(const psci_power_state_t *target_state) +{ + unsigned int core, proc_id; + + core = plat_my_core_pos(); + proc_id = PLAT_PROC_START_ID + core; + + /* Prevent interrupts from spuriously waking up this cpu */ + k3_gic_cpuif_disable(); + k3_gic_save_context(); + + k3_pwr_domain_off(target_state); + + ti_sci_enter_sleep(proc_id, 0, k3_sec_entrypoint); +} + +static void k3_pwr_domain_suspend_finish(const psci_power_state_t *target_state) +{ + k3_gic_restore_context(); + k3_gic_cpuif_enable(); +} + +static void k3_get_sys_suspend_power_state(psci_power_state_t *req_state) +{ + unsigned int i; + + /* CPU & cluster off, system in retention */ + for (i = MPIDR_AFFLVL0; i <= PLAT_MAX_PWR_LVL; i++) { + req_state->pwr_domain_state[i] = PLAT_MAX_OFF_STATE; + } +} +#endif + +static const plat_psci_ops_t k3_plat_psci_ops = { + .cpu_standby = k3_cpu_standby, + .pwr_domain_on = k3_pwr_domain_on, + .pwr_domain_off = k3_pwr_domain_off, + .pwr_domain_on_finish = k3_pwr_domain_on_finish, +#if K3_PM_SYSTEM_SUSPEND + .pwr_domain_suspend = k3_pwr_domain_suspend, + .pwr_domain_suspend_finish = k3_pwr_domain_suspend_finish, + .get_sys_suspend_power_state = k3_get_sys_suspend_power_state, +#endif + .system_off = k3_system_off, + .system_reset = k3_system_reset, + .validate_power_state = k3_validate_power_state, + .validate_ns_entrypoint = k3_validate_ns_entrypoint +}; + +int plat_setup_psci_ops(uintptr_t sec_entrypoint, + const plat_psci_ops_t **psci_ops) +{ + k3_sec_entrypoint = sec_entrypoint; + + *psci_ops = &k3_plat_psci_ops; + + return 0; +} diff --git a/plat/ti/k3/common/k3_topology.c b/plat/ti/k3/common/k3_topology.c new file mode 100644 index 0000000..139f1fd --- /dev/null +++ b/plat/ti/k3/common/k3_topology.c @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include + +#include + +/* The power domain tree descriptor */ +static unsigned char power_domain_tree_desc[] = { + PLATFORM_SYSTEM_COUNT, + PLATFORM_CLUSTER_COUNT, + K3_CLUSTER0_CORE_COUNT, + K3_CLUSTER1_CORE_COUNT, + K3_CLUSTER2_CORE_COUNT, + K3_CLUSTER3_CORE_COUNT, +}; + +const unsigned char *plat_get_power_domain_tree_desc(void) +{ + return power_domain_tree_desc; +} + +int plat_core_pos_by_mpidr(u_register_t mpidr) +{ + unsigned int cluster = MPIDR_AFFLVL1_VAL(mpidr); + unsigned int core = MPIDR_AFFLVL0_VAL(mpidr); + + if (MPIDR_AFFLVL3_VAL(mpidr) > 0 || + MPIDR_AFFLVL2_VAL(mpidr) > 0) { + return -1; + } + + if (cluster > 0) + core += K3_CLUSTER0_CORE_COUNT; + if (cluster > 1) + core += K3_CLUSTER1_CORE_COUNT; + if (cluster > 2) + core += K3_CLUSTER2_CORE_COUNT; + if (cluster > 3) + return -1; + + return core; +} diff --git a/plat/ti/k3/common/plat_common.mk b/plat/ti/k3/common/plat_common.mk new file mode 100644 index 0000000..026d6a3 --- /dev/null +++ b/plat/ti/k3/common/plat_common.mk @@ -0,0 +1,95 @@ +# +# Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +# We don't use BL1 or BL2, so BL31 is the first image to execute +RESET_TO_BL31 := 1 +# Only one core starts up at first +COLD_BOOT_SINGLE_CPU := 1 +# We can choose where a core starts executing +PROGRAMMABLE_RESET_ADDRESS:= 1 + +# ARM coherency is managed in hardware +WARMBOOT_ENABLE_DCACHE_EARLY := 1 + +# A53 erratum for SoC. (enable them all) +ERRATA_A53_826319 := 1 +ERRATA_A53_835769 := 1 +ERRATA_A53_836870 := 1 +ERRATA_A53_843419 := 1 +ERRATA_A53_855873 := 1 +ERRATA_A53_1530924 := 1 + +# A72 Erratum for SoC +ERRATA_A72_859971 := 1 +ERRATA_A72_1319367 := 1 + +CRASH_REPORTING := 1 +HANDLE_EA_EL3_FIRST_NS := 1 + +# Split out RO data into a non-executable section +SEPARATE_CODE_AND_RODATA := 1 + +# Generate a Position Independent Executable +ENABLE_PIE := 1 + +TI_16550_MDR_QUIRK := 1 +$(eval $(call add_define,TI_16550_MDR_QUIRK)) + +K3_USART := 0 +$(eval $(call add_define,K3_USART)) + +# Allow customizing the UART baud rate +K3_USART_BAUD := 115200 +$(eval $(call add_define,K3_USART_BAUD)) + +# Enable system suspend modes +K3_PM_SYSTEM_SUSPEND := 0 +$(eval $(call add_define,K3_PM_SYSTEM_SUSPEND)) + +# Libraries +include lib/xlat_tables_v2/xlat_tables.mk + +PLAT_INCLUDES += \ + -I${PLAT_PATH}/include \ + -I${PLAT_PATH}/common/drivers/sec_proxy \ + -I${PLAT_PATH}/common/drivers/ti_sci \ + +K3_CONSOLE_SOURCES += \ + drivers/ti/uart/aarch64/16550_console.S \ + ${PLAT_PATH}/common/k3_console.c \ + +# Include GICv3 driver files +include drivers/arm/gic/v3/gicv3.mk + +K3_GIC_SOURCES += \ + ${GICV3_SOURCES} \ + plat/common/plat_gicv3.c \ + ${PLAT_PATH}/common/k3_gicv3.c \ + +K3_PSCI_SOURCES += \ + plat/common/plat_psci_common.c \ + ${PLAT_PATH}/common/k3_psci.c \ + +K3_SEC_PROXY_SOURCES += \ + ${PLAT_PATH}/common/drivers/sec_proxy/sec_proxy.c \ + +K3_TI_SCI_SOURCES += \ + ${PLAT_PATH}/common/drivers/ti_sci/ti_sci.c \ + +PLAT_BL_COMMON_SOURCES += \ + lib/cpus/aarch64/cortex_a53.S \ + lib/cpus/aarch64/cortex_a72.S \ + ${XLAT_TABLES_LIB_SRCS} \ + ${K3_CONSOLE_SOURCES} \ + +BL31_SOURCES += \ + ${PLAT_PATH}/common/k3_bl31_setup.c \ + ${PLAT_PATH}/common/k3_helpers.S \ + ${PLAT_PATH}/common/k3_topology.c \ + ${K3_GIC_SOURCES} \ + ${K3_PSCI_SOURCES} \ + ${K3_SEC_PROXY_SOURCES} \ + ${K3_TI_SCI_SOURCES} \ diff --git a/plat/ti/k3/include/k3_console.h b/plat/ti/k3/include/k3_console.h new file mode 100644 index 0000000..6376ab3 --- /dev/null +++ b/plat/ti/k3/include/k3_console.h @@ -0,0 +1,12 @@ +/* + * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef K3_CONSOLE_H +#define K3_CONSOLE_H + +void bl31_console_setup(void); + +#endif /* K3_CONSOLE_H */ diff --git a/plat/ti/k3/include/k3_gicv3.h b/plat/ti/k3/include/k3_gicv3.h new file mode 100644 index 0000000..2c68a75 --- /dev/null +++ b/plat/ti/k3/include/k3_gicv3.h @@ -0,0 +1,20 @@ +/* + * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef K3_GICV3_H +#define K3_GICV3_H + +#include + +void k3_gic_driver_init(uintptr_t gic_base); +void k3_gic_init(void); +void k3_gic_cpuif_enable(void); +void k3_gic_cpuif_disable(void); +void k3_gic_pcpu_init(void); +void k3_gic_save_context(void); +void k3_gic_restore_context(void); + +#endif /* K3_GICV3_H */ diff --git a/plat/ti/k3/include/plat_macros.S b/plat/ti/k3/include/plat_macros.S new file mode 100644 index 0000000..38056b5 --- /dev/null +++ b/plat/ti/k3/include/plat_macros.S @@ -0,0 +1,21 @@ +/* + * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef PLAT_MACROS_S +#define PLAT_MACROS_S + + /* --------------------------------------------- + * The below required platform porting macro + * prints out relevant platform registers + * whenever an unhandled exception is taken in + * BL31. + * --------------------------------------------- + */ + .macro plat_crash_print_regs + /* STUB */ + .endm + +#endif /* PLAT_MACROS_S */ diff --git a/plat/ti/k3/include/platform_def.h b/plat/ti/k3/include/platform_def.h new file mode 100644 index 0000000..81a383a --- /dev/null +++ b/plat/ti/k3/include/platform_def.h @@ -0,0 +1,191 @@ +/* + * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef PLATFORM_DEF_H +#define PLATFORM_DEF_H + +#include +#include + +#include + +/******************************************************************************* + * Generic platform constants + ******************************************************************************/ + +/* Size of cacheable stack */ +#if IMAGE_BL31 +#define PLATFORM_STACK_SIZE 0x800 +#else +#define PLATFORM_STACK_SIZE 0x1000 +#endif + +#define PLATFORM_SYSTEM_COUNT 1 +#define PLATFORM_CORE_COUNT (K3_CLUSTER0_CORE_COUNT + \ + K3_CLUSTER1_CORE_COUNT + \ + K3_CLUSTER2_CORE_COUNT + \ + K3_CLUSTER3_CORE_COUNT) + +#define PLATFORM_CLUSTER_COUNT ((K3_CLUSTER0_CORE_COUNT != 0) + \ + (K3_CLUSTER1_CORE_COUNT != 0) + \ + (K3_CLUSTER2_CORE_COUNT != 0) + \ + (K3_CLUSTER3_CORE_COUNT != 0)) + +#define PLAT_NUM_PWR_DOMAINS (PLATFORM_SYSTEM_COUNT + \ + PLATFORM_CLUSTER_COUNT + \ + PLATFORM_CORE_COUNT) +#define PLAT_MAX_PWR_LVL MPIDR_AFFLVL2 + +/******************************************************************************* + * Memory layout constants + ******************************************************************************/ + +/* + * ARM-TF lives in SRAM, partition it here + * + * BL3-1 specific defines. + * + * Put BL3-1 at the base of the Trusted SRAM. + */ +#define BL31_BASE SEC_SRAM_BASE +#define BL31_SIZE SEC_SRAM_SIZE +#define BL31_LIMIT (BL31_BASE + BL31_SIZE) + +/* + * Defines the maximum number of translation tables that are allocated by the + * translation table library code. To minimize the amount of runtime memory + * used, choose the smallest value needed to map the required virtual addresses + * for each BL stage. + */ +#if USE_COHERENT_MEM +#define MAX_XLAT_TABLES 10 +#else +#define MAX_XLAT_TABLES 9 +#endif + +/* + * Defines the maximum number of regions that are allocated by the translation + * table library code. A region consists of physical base address, virtual base + * address, size and attributes (Device/Memory, RO/RW, Secure/Non-Secure), as + * defined in the `mmap_region_t` structure. The platform defines the regions + * that should be mapped. Then, the translation table library will create the + * corresponding tables and descriptors at runtime. To minimize the amount of + * runtime memory used, choose the smallest value needed to register the + * required regions for each BL stage. + */ +#define MAX_MMAP_REGIONS 11 + +/* + * Defines the total size of the address space in bytes. For example, for a 32 + * bit address space, this value should be `(1ull << 32)`. + */ +#define PLAT_PHY_ADDR_SPACE_SIZE (1ull << 32) +#define PLAT_VIRT_ADDR_SPACE_SIZE (1ull << 32) + +/* + * Some data must be aligned on the biggest cache line size in the platform. + * This is known only to the platform as it might have a combination of + * integrated and external caches. + */ +#define CACHE_WRITEBACK_SHIFT 6 +#define CACHE_WRITEBACK_GRANULE (1 << CACHE_WRITEBACK_SHIFT) + +/* Platform default console definitions */ +#ifndef K3_USART_BASE +#define K3_USART_BASE (0x02800000 + 0x10000 * K3_USART) +#endif + +/* USART has a default size for address space */ +#define K3_USART_SIZE 0x1000 + +#ifndef K3_USART_CLK_SPEED +#define K3_USART_CLK_SPEED 48000000 +#endif + +/* Crash console defaults */ +#define CRASH_CONSOLE_BASE K3_USART_BASE +#define CRASH_CONSOLE_CLK K3_USART_CLK_SPEED +#define CRASH_CONSOLE_BAUD_RATE K3_USART_BAUD + +/* Timer frequency */ +#ifndef SYS_COUNTER_FREQ_IN_TICKS +#define SYS_COUNTER_FREQ_IN_TICKS 200000000 +#endif + +/* Interrupt numbers */ +#define ARM_IRQ_SEC_PHY_TIMER 29 + +#define ARM_IRQ_SEC_SGI_0 8 +#define ARM_IRQ_SEC_SGI_1 9 +#define ARM_IRQ_SEC_SGI_2 10 +#define ARM_IRQ_SEC_SGI_3 11 +#define ARM_IRQ_SEC_SGI_4 12 +#define ARM_IRQ_SEC_SGI_5 13 +#define ARM_IRQ_SEC_SGI_6 14 +#define ARM_IRQ_SEC_SGI_7 15 + +/* + * Define properties of Group 1 Secure and Group 0 interrupts as per GICv3 + * terminology. On a GICv2 system or mode, the lists will be merged and treated + * as Group 0 interrupts. + */ +#define PLAT_ARM_G1S_IRQ_PROPS(grp) \ + INTR_PROP_DESC(ARM_IRQ_SEC_PHY_TIMER, GIC_HIGHEST_SEC_PRIORITY, grp, \ + GIC_INTR_CFG_LEVEL), \ + INTR_PROP_DESC(ARM_IRQ_SEC_SGI_1, GIC_HIGHEST_SEC_PRIORITY, grp, \ + GIC_INTR_CFG_EDGE), \ + INTR_PROP_DESC(ARM_IRQ_SEC_SGI_2, GIC_HIGHEST_SEC_PRIORITY, grp, \ + GIC_INTR_CFG_EDGE), \ + INTR_PROP_DESC(ARM_IRQ_SEC_SGI_3, GIC_HIGHEST_SEC_PRIORITY, grp, \ + GIC_INTR_CFG_EDGE), \ + INTR_PROP_DESC(ARM_IRQ_SEC_SGI_4, GIC_HIGHEST_SEC_PRIORITY, grp, \ + GIC_INTR_CFG_EDGE), \ + INTR_PROP_DESC(ARM_IRQ_SEC_SGI_5, GIC_HIGHEST_SEC_PRIORITY, grp, \ + GIC_INTR_CFG_EDGE), \ + INTR_PROP_DESC(ARM_IRQ_SEC_SGI_7, GIC_HIGHEST_SEC_PRIORITY, grp, \ + GIC_INTR_CFG_EDGE) + +#define PLAT_ARM_G0_IRQ_PROPS(grp) \ + INTR_PROP_DESC(ARM_IRQ_SEC_SGI_0, GIC_HIGHEST_SEC_PRIORITY, grp, \ + GIC_INTR_CFG_EDGE), \ + INTR_PROP_DESC(ARM_IRQ_SEC_SGI_6, GIC_HIGHEST_SEC_PRIORITY, grp, \ + GIC_INTR_CFG_EDGE) + + +#define K3_GTC_BASE 0x00A90000 +/* We just need 20 byte offset, but simpler to just remap the 64K page in */ +#define K3_GTC_SIZE 0x10000 +#define K3_GTC_CNTCR_OFFSET 0x00 +#define K3_GTC_CNTCR_EN_MASK 0x01 +#define K3_GTC_CNTCR_HDBG_MASK 0x02 +#define K3_GTC_CNTFID0_OFFSET 0x20 + +#define K3_GIC_BASE 0x01800000 +#define K3_GIC_SIZE 0x200000 + +#if !K3_SEC_PROXY_LITE +#define SEC_PROXY_DATA_BASE 0x32C00000 +#define SEC_PROXY_DATA_SIZE 0x80000 +#define SEC_PROXY_SCFG_BASE 0x32800000 +#define SEC_PROXY_SCFG_SIZE 0x80000 +#define SEC_PROXY_RT_BASE 0x32400000 +#define SEC_PROXY_RT_SIZE 0x80000 +#else +#define SEC_PROXY_DATA_BASE 0x4D000000 +#define SEC_PROXY_DATA_SIZE 0x80000 +#define SEC_PROXY_SCFG_BASE 0x4A400000 +#define SEC_PROXY_SCFG_SIZE 0x80000 +#define SEC_PROXY_RT_BASE 0x4A600000 +#define SEC_PROXY_RT_SIZE 0x80000 +#endif /* K3_SEC_PROXY_LITE */ + +#define SEC_PROXY_TIMEOUT_US 1000000 +#define SEC_PROXY_MAX_MESSAGE_SIZE 56 + +#define TI_SCI_HOST_ID 10 +#define TI_SCI_MAX_MESSAGE_SIZE 52 + +#endif /* PLATFORM_DEF_H */ diff --git a/plat/ti/k3/platform.mk b/plat/ti/k3/platform.mk new file mode 100644 index 0000000..2de21aa --- /dev/null +++ b/plat/ti/k3/platform.mk @@ -0,0 +1,14 @@ +# +# Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +PLAT_PATH := plat/ti/k3 +TARGET_BOARD ?= generic + +include ${PLAT_PATH}/common/plat_common.mk +include ${PLAT_PATH}/board/${TARGET_BOARD}/board.mk + +# modify BUILD_PLAT to point to board specific build directory +BUILD_PLAT := $(abspath ${BUILD_BASE})/${PLAT}/${TARGET_BOARD}/${BUILD_TYPE} -- cgit v1.2.3