From 102b0d2daa97dae68d3eed54d8fe37a9cc38a892 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 28 Apr 2024 11:13:47 +0200 Subject: Adding upstream version 2.8.0+dfsg. Signed-off-by: Daniel Baumann --- plat/nvidia/tegra/soc/t210/drivers/se/se_private.h | 663 ++++++++++++ .../tegra/soc/t210/drivers/se/security_engine.c | 1071 ++++++++++++++++++++ plat/nvidia/tegra/soc/t210/plat_psci_handlers.c | 609 +++++++++++ plat/nvidia/tegra/soc/t210/plat_secondary.c | 41 + plat/nvidia/tegra/soc/t210/plat_setup.c | 318 ++++++ plat/nvidia/tegra/soc/t210/plat_sip_calls.c | 97 ++ plat/nvidia/tegra/soc/t210/platform_t210.mk | 62 ++ 7 files changed, 2861 insertions(+) create mode 100644 plat/nvidia/tegra/soc/t210/drivers/se/se_private.h create mode 100644 plat/nvidia/tegra/soc/t210/drivers/se/security_engine.c create mode 100644 plat/nvidia/tegra/soc/t210/plat_psci_handlers.c create mode 100644 plat/nvidia/tegra/soc/t210/plat_secondary.c create mode 100644 plat/nvidia/tegra/soc/t210/plat_setup.c create mode 100644 plat/nvidia/tegra/soc/t210/plat_sip_calls.c create mode 100644 plat/nvidia/tegra/soc/t210/platform_t210.mk (limited to 'plat/nvidia/tegra/soc/t210') diff --git a/plat/nvidia/tegra/soc/t210/drivers/se/se_private.h b/plat/nvidia/tegra/soc/t210/drivers/se/se_private.h new file mode 100644 index 0000000..c44b0fc --- /dev/null +++ b/plat/nvidia/tegra/soc/t210/drivers/se/se_private.h @@ -0,0 +1,663 @@ +/* + * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef SE_PRIVATE_H +#define SE_PRIVATE_H + +#include +#include + +/* + * PMC registers + */ + +/* SC7 context save scratch register for T210 */ +#define PMC_SCRATCH43_REG_OFFSET U(0x22C) + +/* Secure scratch registers */ +#define PMC_SECURE_SCRATCH4_OFFSET 0xC0U +#define PMC_SECURE_SCRATCH5_OFFSET 0xC4U +#define PMC_SECURE_SCRATCH6_OFFSET 0x224U +#define PMC_SECURE_SCRATCH7_OFFSET 0x228U +#define PMC_SECURE_SCRATCH116_OFFSET 0xB28U +#define PMC_SECURE_SCRATCH117_OFFSET 0xB2CU +#define PMC_SECURE_SCRATCH120_OFFSET 0xB38U +#define PMC_SECURE_SCRATCH121_OFFSET 0xB3CU +#define PMC_SECURE_SCRATCH122_OFFSET 0xB40U +#define PMC_SECURE_SCRATCH123_OFFSET 0xB44U + +/* + * AHB arbitration memory write queue + */ +#define ARAHB_MEM_WRQUE_MST_ID_OFFSET 0xFCU +#define ARAHB_MST_ID_SE2_MASK (0x1U << 13) +#define ARAHB_MST_ID_SE_MASK (0x1U << 14) + +/** + * SE registers + */ +#define TEGRA_SE_AES_KEYSLOT_COUNT 16 +#define SE_MAX_LAST_BLOCK_SIZE 0xFFFFF + +/* SE Status register */ +#define SE_STATUS_OFFSET 0x800U +#define SE_STATUS_SHIFT 0 +#define SE_STATUS_IDLE \ + ((0U) << SE_STATUS_SHIFT) +#define SE_STATUS_BUSY \ + ((1U) << SE_STATUS_SHIFT) +#define SE_STATUS(x) \ + ((x) & ((0x3U) << SE_STATUS_SHIFT)) + +#define SE_MEM_INTERFACE_SHIFT 2 +#define SE_MEM_INTERFACE_IDLE 0 +#define SE_MEM_INTERFACE_BUSY 1 +#define SE_MEM_INTERFACE(x) ((x) << SE_STATUS_SHIFT) + +/* SE register definitions */ +#define SE_SECURITY_REG_OFFSET 0x0 +#define SE_SECURITY_TZ_LOCK_SOFT_SHIFT 5 +#define SE_SECURE 0x0 +#define SE_SECURITY_TZ_LOCK_SOFT(x) ((x) << SE_SECURITY_TZ_LOCK_SOFT_SHIFT) + +#define SE_SEC_ENG_DIS_SHIFT 1 +#define SE_DISABLE_FALSE 0 +#define SE_DISABLE_TRUE 1 +#define SE_SEC_ENG_DISABLE(x)((x) << SE_SEC_ENG_DIS_SHIFT) + +/* SE config register */ +#define SE_CONFIG_REG_OFFSET 0x14U +#define SE_CONFIG_ENC_ALG_SHIFT 12 +#define SE_CONFIG_ENC_ALG_AES_ENC \ + ((1U) << SE_CONFIG_ENC_ALG_SHIFT) +#define SE_CONFIG_ENC_ALG_RNG \ + ((2U) << SE_CONFIG_ENC_ALG_SHIFT) +#define SE_CONFIG_ENC_ALG_SHA \ + ((3U) << SE_CONFIG_ENC_ALG_SHIFT) +#define SE_CONFIG_ENC_ALG_RSA \ + ((4U) << SE_CONFIG_ENC_ALG_SHIFT) +#define SE_CONFIG_ENC_ALG_NOP \ + ((0U) << SE_CONFIG_ENC_ALG_SHIFT) +#define SE_CONFIG_ENC_ALG(x) \ + ((x) & ((0xFU) << SE_CONFIG_ENC_ALG_SHIFT)) + +#define SE_CONFIG_DEC_ALG_SHIFT 8 +#define SE_CONFIG_DEC_ALG_AES \ + ((1U) << SE_CONFIG_DEC_ALG_SHIFT) +#define SE_CONFIG_DEC_ALG_NOP \ + ((0U) << SE_CONFIG_DEC_ALG_SHIFT) +#define SE_CONFIG_DEC_ALG(x) \ + ((x) & ((0xFU) << SE_CONFIG_DEC_ALG_SHIFT)) + +#define SE_CONFIG_DST_SHIFT 2 +#define SE_CONFIG_DST_MEMORY \ + ((0U) << SE_CONFIG_DST_SHIFT) +#define SE_CONFIG_DST_HASHREG \ + ((1U) << SE_CONFIG_DST_SHIFT) +#define SE_CONFIG_DST_KEYTAB \ + ((2U) << SE_CONFIG_DST_SHIFT) +#define SE_CONFIG_DST_SRK \ + ((3U) << SE_CONFIG_DST_SHIFT) +#define SE_CONFIG_DST_RSAREG \ + ((4U) << SE_CONFIG_DST_SHIFT) +#define SE_CONFIG_DST(x) \ + ((x) & ((0x7U) << SE_CONFIG_DST_SHIFT)) + +#define SE_CONFIG_ENC_MODE_SHIFT 24 +#define SE_CONFIG_ENC_MODE_KEY128 \ + ((0UL) << SE_CONFIG_ENC_MODE_SHIFT) +#define SE_CONFIG_ENC_MODE_KEY192 \ + ((1UL) << SE_CONFIG_ENC_MODE_SHIFT) +#define SE_CONFIG_ENC_MODE_KEY256 \ + ((2UL) << SE_CONFIG_ENC_MODE_SHIFT) +#define SE_CONFIG_ENC_MODE_SHA1 \ + ((0UL) << SE_CONFIG_ENC_MODE_SHIFT) +#define SE_CONFIG_ENC_MODE_SHA224 \ + ((4UL) << SE_CONFIG_ENC_MODE_SHIFT) +#define SE_CONFIG_ENC_MODE_SHA256 \ + ((5UL) << SE_CONFIG_ENC_MODE_SHIFT) +#define SE_CONFIG_ENC_MODE_SHA384 \ + ((6UL) << SE_CONFIG_ENC_MODE_SHIFT) +#define SE_CONFIG_ENC_MODE_SHA512 \ + ((7UL) << SE_CONFIG_ENC_MODE_SHIFT) +#define SE_CONFIG_ENC_MODE(x)\ + ((x) & ((0xFFUL) << SE_CONFIG_ENC_MODE_SHIFT)) + +#define SE_CONFIG_DEC_MODE_SHIFT 16 +#define SE_CONFIG_DEC_MODE_KEY128 \ + ((0UL) << SE_CONFIG_DEC_MODE_SHIFT) +#define SE_CONFIG_DEC_MODE_KEY192 \ + ((1UL) << SE_CONFIG_DEC_MODE_SHIFT) +#define SE_CONFIG_DEC_MODE_KEY256 \ + ((2UL) << SE_CONFIG_DEC_MODE_SHIFT) +#define SE_CONFIG_DEC_MODE_SHA1 \ + ((0UL) << SE_CONFIG_DEC_MODE_SHIFT) +#define SE_CONFIG_DEC_MODE_SHA224 \ + ((4UL) << SE_CONFIG_DEC_MODE_SHIFT) +#define SE_CONFIG_DEC_MODE_SHA256 \ + ((5UL) << SE_CONFIG_DEC_MODE_SHIFT) +#define SE_CONFIG_DEC_MODE_SHA384 \ + ((6UL) << SE_CONFIG_DEC_MODE_SHIFT) +#define SE_CONFIG_DEC_MODE_SHA512 \ + ((7UL) << SE_CONFIG_DEC_MODE_SHIFT) +#define SE_CONFIG_DEC_MODE(x)\ + ((x) & ((0xFFUL) << SE_CONFIG_DEC_MODE_SHIFT)) + + +/* DRBG random number generator config */ +#define SE_RNG_CONFIG_REG_OFFSET 0x340 + +#define DRBG_MODE_SHIFT 0 +#define DRBG_MODE_NORMAL \ + ((0U) << DRBG_MODE_SHIFT) +#define DRBG_MODE_FORCE_INSTANTION \ + ((1U) << DRBG_MODE_SHIFT) +#define DRBG_MODE_FORCE_RESEED \ + ((2U) << DRBG_MODE_SHIFT) +#define SE_RNG_CONFIG_MODE(x) \ + ((x) & ((0x3U) << DRBG_MODE_SHIFT)) + +#define DRBG_SRC_SHIFT 2 +#define DRBG_SRC_NONE \ + ((0U) << DRBG_SRC_SHIFT) +#define DRBG_SRC_ENTROPY \ + ((1U) << DRBG_SRC_SHIFT) +#define DRBG_SRC_LFSR \ + ((2U) << DRBG_SRC_SHIFT) +#define SE_RNG_SRC_CONFIG_MODE(x) \ + ((x) & ((0x3U) << DRBG_SRC_SHIFT)) + +/* DRBG random number generator entropy config */ + +#define SE_RNG_SRC_CONFIG_REG_OFFSET 0x344U + +#define DRBG_RO_ENT_SRC_SHIFT 1 +#define DRBG_RO_ENT_SRC_ENABLE \ + ((1U) << DRBG_RO_ENT_SRC_SHIFT) +#define DRBG_RO_ENT_SRC_DISABLE \ + ((0U) << DRBG_RO_ENT_SRC_SHIFT) +#define SE_RNG_SRC_CONFIG_RO_ENT_SRC(x) \ + ((x) & ((0x1U) << DRBG_RO_ENT_SRC_SHIFT)) + +#define DRBG_RO_ENT_SRC_LOCK_SHIFT 0 +#define DRBG_RO_ENT_SRC_LOCK_ENABLE \ + ((1U) << DRBG_RO_ENT_SRC_LOCK_SHIFT) +#define DRBG_RO_ENT_SRC_LOCK_DISABLE \ + ((0U) << DRBG_RO_ENT_SRC_LOCK_SHIFT) +#define SE_RNG_SRC_CONFIG_RO_ENT_SRC_LOCK(x) \ + ((x) & ((0x1U) << DRBG_RO_ENT_SRC_LOCK_SHIFT)) + +#define DRBG_RO_ENT_IGNORE_MEM_SHIFT 12 +#define DRBG_RO_ENT_IGNORE_MEM_ENABLE \ + ((1U) << DRBG_RO_ENT_IGNORE_MEM_SHIFT) +#define DRBG_RO_ENT_IGNORE_MEM_DISABLE \ + ((0U) << DRBG_RO_ENT_IGNORE_MEM_SHIFT) +#define SE_RNG_SRC_CONFIG_RO_ENT_IGNORE_MEM(x) \ + ((x) & ((0x1U) << DRBG_RO_ENT_IGNORE_MEM_SHIFT)) + +#define SE_RNG_RESEED_INTERVAL_REG_OFFSET 0x348 + +/* SE CRYPTO */ +#define SE_CRYPTO_REG_OFFSET 0x304 +#define SE_CRYPTO_HASH_SHIFT 0 +#define SE_CRYPTO_HASH_DISABLE \ + ((0U) << SE_CRYPTO_HASH_SHIFT) +#define SE_CRYPTO_HASH_ENABLE \ + ((1U) << SE_CRYPTO_HASH_SHIFT) + +#define SE_CRYPTO_XOR_POS_SHIFT 1 +#define SE_CRYPTO_XOR_BYPASS \ + ((0U) << SE_CRYPTO_XOR_POS_SHIFT) +#define SE_CRYPTO_XOR_TOP \ + ((2U) << SE_CRYPTO_XOR_POS_SHIFT) +#define SE_CRYPTO_XOR_BOTTOM \ + ((3U) << SE_CRYPTO_XOR_POS_SHIFT) + +#define SE_CRYPTO_INPUT_SEL_SHIFT 3 +#define SE_CRYPTO_INPUT_AHB \ + ((0U) << SE_CRYPTO_INPUT_SEL_SHIFT) +#define SE_CRYPTO_INPUT_RANDOM \ + ((1U) << SE_CRYPTO_INPUT_SEL_SHIFT) +#define SE_CRYPTO_INPUT_AESOUT \ + ((2U) << SE_CRYPTO_INPUT_SEL_SHIFT) +#define SE_CRYPTO_INPUT_LNR_CTR \ + ((3U) << SE_CRYPTO_INPUT_SEL_SHIFT) + +#define SE_CRYPTO_VCTRAM_SEL_SHIFT 5 +#define SE_CRYPTO_VCTRAM_AHB \ + ((0U) << SE_CRYPTO_VCTRAM_SEL_SHIFT) +#define SE_CRYPTO_VCTRAM_AESOUT \ + ((2U) << SE_CRYPTO_VCTRAM_SEL_SHIFT) +#define SE_CRYPTO_VCTRAM_PREVAHB \ + ((3U) << SE_CRYPTO_VCTRAM_SEL_SHIFT) + +#define SE_CRYPTO_IV_SEL_SHIFT 7 +#define SE_CRYPTO_IV_ORIGINAL \ + ((0U) << SE_CRYPTO_IV_SEL_SHIFT) +#define SE_CRYPTO_IV_UPDATED \ + ((1U) << SE_CRYPTO_IV_SEL_SHIFT) + +#define SE_CRYPTO_CORE_SEL_SHIFT 8 +#define SE_CRYPTO_CORE_DECRYPT \ + ((0U) << SE_CRYPTO_CORE_SEL_SHIFT) +#define SE_CRYPTO_CORE_ENCRYPT \ + ((1U) << SE_CRYPTO_CORE_SEL_SHIFT) + +#define SE_CRYPTO_KEY_INDEX_SHIFT 24 +#define SE_CRYPTO_KEY_INDEX(x) (x << SE_CRYPTO_KEY_INDEX_SHIFT) + +#define SE_CRYPTO_MEMIF_AHB \ + ((0U) << SE_CRYPTO_MEMIF_SHIFT) +#define SE_CRYPTO_MEMIF_MCCIF \ + ((1U) << SE_CRYPTO_MEMIF_SHIFT) +#define SE_CRYPTO_MEMIF_SHIFT 31 + +/* KEY TABLE */ +#define SE_KEYTABLE_REG_OFFSET 0x31C + +/* KEYIV PKT - key slot */ +#define SE_KEYTABLE_SLOT_SHIFT 4 +#define SE_KEYTABLE_SLOT(x) (x << SE_KEYTABLE_SLOT_SHIFT) + +/* KEYIV PKT - KEYIV select */ +#define SE_KEYIV_PKT_KEYIV_SEL_SHIFT 3 +#define SE_CRYPTO_KEYIV_KEY \ + ((0U) << SE_KEYIV_PKT_KEYIV_SEL_SHIFT) +#define SE_CRYPTO_KEYIV_IVS \ + ((1U) << SE_KEYIV_PKT_KEYIV_SEL_SHIFT) + +/* KEYIV PKT - IV select */ +#define SE_KEYIV_PKT_IV_SEL_SHIFT 2 +#define SE_CRYPTO_KEYIV_IVS_OIV \ + ((0U) << SE_KEYIV_PKT_IV_SEL_SHIFT) +#define SE_CRYPTO_KEYIV_IVS_UIV \ + ((1U) << SE_KEYIV_PKT_IV_SEL_SHIFT) + +/* KEYIV PKT - key word */ +#define SE_KEYIV_PKT_KEY_WORD_SHIFT 0 +#define SE_KEYIV_PKT_KEY_WORD(x) \ + ((x) << SE_KEYIV_PKT_KEY_WORD_SHIFT) + +/* KEYIV PKT - iv word */ +#define SE_KEYIV_PKT_IV_WORD_SHIFT 0 +#define SE_KEYIV_PKT_IV_WORD(x) \ + ((x) << SE_KEYIV_PKT_IV_WORD_SHIFT) + +/* SE OPERATION */ +#define SE_OPERATION_REG_OFFSET 0x8U +#define SE_OPERATION_SHIFT 0 +#define SE_OP_ABORT \ + ((0x0U) << SE_OPERATION_SHIFT) +#define SE_OP_START \ + ((0x1U) << SE_OPERATION_SHIFT) +#define SE_OP_RESTART \ + ((0x2U) << SE_OPERATION_SHIFT) +#define SE_OP_CTX_SAVE \ + ((0x3U) << SE_OPERATION_SHIFT) +#define SE_OP_RESTART_IN \ + ((0x4U) << SE_OPERATION_SHIFT) +#define SE_OPERATION(x) \ + ((x) & ((0x7U) << SE_OPERATION_SHIFT)) + +/* SE CONTEXT */ +#define SE_CTX_SAVE_CONFIG_REG_OFFSET 0x70 +#define SE_CTX_SAVE_WORD_QUAD_SHIFT 0 +#define SE_CTX_SAVE_WORD_QUAD(x) \ + (x << SE_CTX_SAVE_WORD_QUAD_SHIFT) +#define SE_CTX_SAVE_WORD_QUAD_KEYS_0_3 \ + ((0U) << SE_CTX_SAVE_WORD_QUAD_SHIFT) +#define SE_CTX_SAVE_WORD_QUAD_KEYS_4_7 \ + ((1U) << SE_CTX_SAVE_WORD_QUAD_SHIFT) +#define SE_CTX_SAVE_WORD_QUAD_ORIG_IV \ + ((2U) << SE_CTX_SAVE_WORD_QUAD_SHIFT) +#define SE_CTX_SAVE_WORD_QUAD_UPD_IV \ + ((3U) << SE_CTX_SAVE_WORD_QUAD_SHIFT) + +#define SE_CTX_SAVE_KEY_INDEX_SHIFT 8 +#define SE_CTX_SAVE_KEY_INDEX(x) (x << SE_CTX_SAVE_KEY_INDEX_SHIFT) + +#define SE_CTX_SAVE_STICKY_WORD_QUAD_SHIFT 24 +#define SE_CTX_SAVE_STICKY_WORD_QUAD_STICKY_0_3 \ + ((0U) << SE_CTX_SAVE_STICKY_WORD_QUAD_SHIFT) +#define SE_CTX_SAVE_STICKY_WORD_QUAD_STICKY_4_7 \ + ((1U) << SE_CTX_SAVE_STICKY_WORD_QUAD_SHIFT) +#define SE_CTX_SAVE_STICKY_WORD_QUAD(x) \ + (x << SE_CTX_SAVE_STICKY_WORD_QUAD_SHIFT) + +#define SE_CTX_SAVE_SRC_SHIFT 29 +#define SE_CTX_SAVE_SRC_STICKY_BITS \ + ((0U) << SE_CTX_SAVE_SRC_SHIFT) +#define SE_CTX_SAVE_SRC_RSA_KEYTABLE \ + ((1U) << SE_CTX_SAVE_SRC_SHIFT) +#define SE_CTX_SAVE_SRC_AES_KEYTABLE \ + ((2U) << SE_CTX_SAVE_SRC_SHIFT) +#define SE_CTX_SAVE_SRC_PKA1_STICKY_BITS \ + ((3U) << SE_CTX_SAVE_SRC_SHIFT) +#define SE_CTX_SAVE_SRC_MEM \ + ((4U) << SE_CTX_SAVE_SRC_SHIFT) +#define SE_CTX_SAVE_SRC_SRK \ + ((6U) << SE_CTX_SAVE_SRC_SHIFT) +#define SE_CTX_SAVE_SRC_PKA1_KEYTABLE \ + ((7U) << SE_CTX_SAVE_SRC_SHIFT) + +#define SE_CTX_STICKY_WORD_QUAD_SHIFT 24 +#define SE_CTX_STICKY_WORD_QUAD_WORDS_0_3 \ + ((0U) << SE_CTX_STICKY_WORD_QUAD_SHIFT) +#define SE_CTX_STICKY_WORD_QUAD_WORDS_4_7 \ + ((1U) << SE_CTX_STICKY_WORD_QUAD_SHIFT) +#define SE_CTX_STICKY_WORD_QUAD(x) (x << SE_CTX_STICKY_WORD_QUAD_SHIFT) + +#define SE_CTX_SAVE_RSA_KEY_INDEX_SHIFT 16 +#define SE_CTX_SAVE_RSA_KEY_INDEX(x) \ + (x << SE_CTX_SAVE_RSA_KEY_INDEX_SHIFT) + +#define SE_CTX_RSA_WORD_QUAD_SHIFT 12 +#define SE_CTX_RSA_WORD_QUAD(x) \ + (x << SE_CTX_RSA_WORD_QUAD_SHIFT) + +#define SE_CTX_PKA1_WORD_QUAD_L_SHIFT 0 +#define SE_CTX_PKA1_WORD_QUAD_L_SIZE \ + ((true ? 4:0) - \ + (false ? 4:0) + 1) +#define SE_CTX_PKA1_WORD_QUAD_L(x)\ + (((x) << SE_CTX_PKA1_WORD_QUAD_L_SHIFT) & 0x1f) + +#define SE_CTX_PKA1_WORD_QUAD_H_SHIFT 12 +#define SE_CTX_PKA1_WORD_QUAD_H(x)\ + ((((x) >> SE_CTX_PKA1_WORD_QUAD_L_SIZE) & 0xf) \ + << SE_CTX_PKA1_WORD_QUAD_H_SHIFT) + +#define SE_RSA_KEY_INDEX_SLOT0_EXP 0 +#define SE_RSA_KEY_INDEX_SLOT0_MOD 1 +#define SE_RSA_KEY_INDEX_SLOT1_EXP 2 +#define SE_RSA_KEY_INDEX_SLOT1_MOD 3 + + +/* SE_CTX_SAVE_AUTO */ +#define SE_CTX_SAVE_AUTO_REG_OFFSET 0x74U + +/* Enable */ +#define SE_CTX_SAVE_AUTO_ENABLE_SHIFT 0 +#define SE_CTX_SAVE_AUTO_DIS \ + ((0U) << SE_CTX_SAVE_AUTO_ENABLE_SHIFT) +#define SE_CTX_SAVE_AUTO_EN \ + ((1U) << SE_CTX_SAVE_AUTO_ENABLE_SHIFT) +#define SE_CTX_SAVE_AUTO_ENABLE(x) \ + ((x) & ((0x1U) << SE_CTX_SAVE_AUTO_ENABLE_SHIFT)) + +/* Lock */ +#define SE_CTX_SAVE_AUTO_LOCK_SHIFT 8 +#define SE_CTX_SAVE_AUTO_LOCK_EN \ + ((1U) << SE_CTX_SAVE_AUTO_LOCK_SHIFT) +#define SE_CTX_SAVE_AUTO_LOCK_DIS \ + ((0U) << SE_CTX_SAVE_AUTO_LOCK_SHIFT) +#define SE_CTX_SAVE_AUTO_LOCK(x) \ + ((x) & ((0x1U) << SE_CTX_SAVE_AUTO_LOCK_SHIFT)) + +/* Current context save number of blocks*/ +#define SE_CTX_SAVE_AUTO_CURR_CNT_SHIFT 16 +#define SE_CTX_SAVE_AUTO_CURR_CNT_MASK 0x3FFU +#define SE_CTX_SAVE_GET_BLK_COUNT(x) \ + (((x) >> SE_CTX_SAVE_AUTO_CURR_CNT_SHIFT) & \ + SE_CTX_SAVE_AUTO_CURR_CNT_MASK) + +#define SE_CTX_SAVE_SIZE_BLOCKS_SE1 133 +#define SE_CTX_SAVE_SIZE_BLOCKS_SE2 646 + +/* SE TZRAM OPERATION - only for SE1 */ +#define SE_TZRAM_OPERATION 0x540U + +#define SE_TZRAM_OP_MODE_SHIFT 1 +#define SE_TZRAM_OP_COMMAND_INIT 1 +#define SE_TZRAM_OP_COMMAND_SHIFT 0 +#define SE_TZRAM_OP_MODE_SAVE \ + ((0U) << SE_TZRAM_OP_MODE_SHIFT) +#define SE_TZRAM_OP_MODE_RESTORE \ + ((1U) << SE_TZRAM_OP_MODE_SHIFT) +#define SE_TZRAM_OP_MODE(x) \ + ((x) & ((0x1U) << SE_TZRAM_OP_MODE_SHIFT)) + +#define SE_TZRAM_OP_BUSY_SHIFT 2 +#define SE_TZRAM_OP_BUSY_OFF \ + ((0U) << SE_TZRAM_OP_BUSY_SHIFT) +#define SE_TZRAM_OP_BUSY_ON \ + ((1U) << SE_TZRAM_OP_BUSY_SHIFT) +#define SE_TZRAM_OP_BUSY(x) \ + ((x) & ((0x1U) << SE_TZRAM_OP_BUSY_SHIFT)) + +#define SE_TZRAM_OP_REQ_SHIFT 0 +#define SE_TZRAM_OP_REQ_IDLE \ + ((0U) << SE_TZRAM_OP_REQ_SHIFT) +#define SE_TZRAM_OP_REQ_INIT \ + ((1U) << SE_TZRAM_OP_REQ_SHIFT) +#define SE_TZRAM_OP_REQ(x) \ + ((x) & ((0x1U) << SE_TZRAM_OP_REQ_SHIFT)) + +/* SE Interrupt */ +#define SE_INT_ENABLE_REG_OFFSET U(0xC) +#define SE_INT_STATUS_REG_OFFSET 0x10U +#define SE_INT_OP_DONE_SHIFT 4 +#define SE_INT_OP_DONE_CLEAR \ + ((0U) << SE_INT_OP_DONE_SHIFT) +#define SE_INT_OP_DONE_ACTIVE \ + ((1U) << SE_INT_OP_DONE_SHIFT) +#define SE_INT_OP_DONE(x) \ + ((x) & ((0x1U) << SE_INT_OP_DONE_SHIFT)) + +/* SE TZRAM SECURITY */ +#define SE_TZRAM_SEC_REG_OFFSET 0x4 + +#define SE_TZRAM_SEC_SETTING_SHIFT 0 +#define SE_TZRAM_SECURE \ + ((0UL) << SE_TZRAM_SEC_SETTING_SHIFT) +#define SE_TZRAM_NONSECURE \ + ((1UL) << SE_TZRAM_SEC_SETTING_SHIFT) +#define SE_TZRAM_SEC_SETTING(x) \ + ((x) & ((0x1UL) << SE_TZRAM_SEC_SETTING_SHIFT)) + +/* PKA1 KEY SLOTS */ +#define TEGRA_SE_PKA1_KEYSLOT_COUNT 4 + + +/* SE error status */ +#define SE_ERR_STATUS_REG_OFFSET 0x804U +#define SE_CRYPTO_KEYTABLE_DST_REG_OFFSET 0x330 +#define SE_CRYPTO_KEYTABLE_DST_WORD_QUAD_SHIFT 0 +#define SE_CRYPTO_KEYTABLE_DST_WORD_QUAD(x) \ + (x << SE_CRYPTO_KEYTABLE_DST_WORD_QUAD_SHIFT) + +#define SE_KEY_INDEX_SHIFT 8 +#define SE_CRYPTO_KEYTABLE_DST_KEY_INDEX(x) (x << SE_KEY_INDEX_SHIFT) + + +/* SE linked list (LL) register */ +#define SE_IN_LL_ADDR_REG_OFFSET 0x18U +#define SE_OUT_LL_ADDR_REG_OFFSET 0x24U +#define SE_BLOCK_COUNT_REG_OFFSET 0x318U + +/* AES data sizes */ +#define TEGRA_SE_KEY_256_SIZE 32 +#define TEGRA_SE_KEY_192_SIZE 24 +#define TEGRA_SE_KEY_128_SIZE 16 +#define TEGRA_SE_AES_BLOCK_SIZE 16 +#define TEGRA_SE_AES_MIN_KEY_SIZE 16 +#define TEGRA_SE_AES_MAX_KEY_SIZE 32 +#define TEGRA_SE_AES_IV_SIZE 16 + +#define TEGRA_SE_RNG_IV_SIZE 16 +#define TEGRA_SE_RNG_DT_SIZE 16 +#define TEGRA_SE_RNG_KEY_SIZE 16 +#define TEGRA_SE_RNG_SEED_SIZE (TEGRA_SE_RNG_IV_SIZE + \ + TEGRA_SE_RNG_KEY_SIZE + \ + TEGRA_SE_RNG_DT_SIZE) +#define TEGRA_SE_RSA512_DIGEST_SIZE 64 +#define TEGRA_SE_RSA1024_DIGEST_SIZE 128 +#define TEGRA_SE_RSA1536_DIGEST_SIZE 192 +#define TEGRA_SE_RSA2048_DIGEST_SIZE 256 + +#define SE_KEY_TABLE_ACCESS_REG_OFFSET 0x284 +#define SE_KEY_READ_DISABLE_SHIFT 0 + +#define SE_CTX_BUFER_SIZE 1072 +#define SE_CTX_DRBG_BUFER_SIZE 2112 + +/* SE blobs size in bytes */ +#define SE_CTX_SAVE_RSA_KEY_LENGTH 1024 +#define SE_CTX_SAVE_RANDOM_DATA_SIZE 16 +#define SE_CTX_SAVE_STICKY_BITS_SIZE 16 +#define SE2_CONTEXT_SAVE_PKA1_STICKY_BITS_LENGTH 16 +#define SE2_CONTEXT_SAVE_PKA1_KEYS_LENGTH 8192 +#define SE_CTX_KNOWN_PATTERN_SIZE 16 +#define SE_CTX_KNOWN_PATTERN_SIZE_WORDS (SE_CTX_KNOWN_PATTERN_SIZE/4) + +/* SE RSA */ +#define TEGRA_SE_RSA_KEYSLOT_COUNT 2 +#define SE_RSA_KEY_SIZE_REG_OFFSET 0x404 +#define SE_RSA_EXP_SIZE_REG_OFFSET 0x408 +#define SE_RSA_MAX_EXP_BIT_SIZE 2048 +#define SE_RSA_MAX_EXP_SIZE32 \ + (SE_RSA_MAX_EXP_BIT_SIZE >> 5) +#define SE_RSA_MAX_MOD_BIT_SIZE 2048 +#define SE_RSA_MAX_MOD_SIZE32 \ + (SE_RSA_MAX_MOD_BIT_SIZE >> 5) + +/* SE_RSA_KEYTABLE_ADDR */ +#define SE_RSA_KEYTABLE_ADDR 0x420 +#define RSA_KEY_PKT_WORD_ADDR_SHIFT 0 +#define RSA_KEY_PKT_EXPMOD_SEL_SHIFT \ + ((6U) << RSA_KEY_PKT_WORD_ADDR_SHIFT) +#define RSA_KEY_MOD \ + ((1U) << RSA_KEY_PKT_EXPMOD_SEL_SHIFT) +#define RSA_KEY_EXP \ + ((0U) << RSA_KEY_PKT_EXPMOD_SEL_SHIFT) +#define RSA_KEY_PKT_SLOT_SHIFT 7 +#define RSA_KEY_SLOT_1 \ + ((0U) << RSA_KEY_PKT_SLOT_SHIFT) +#define RSA_KEY_SLOT_2 \ + ((1U) << RSA_KEY_PKT_SLOT_SHIFT) +#define RSA_KEY_PKT_INPUT_MODE_SHIFT 8 +#define RSA_KEY_REG_INPUT \ + ((0U) << RSA_KEY_PKT_INPUT_MODE_SHIFT) +#define RSA_KEY_DMA_INPUT \ + ((1U) << RSA_KEY_PKT_INPUT_MODE_SHIFT) + +/* SE_RSA_KEYTABLE_DATA */ +#define SE_RSA_KEYTABLE_DATA 0x424 + +/* SE_RSA_CONFIG register */ +#define SE_RSA_CONFIG 0x400 +#define RSA_KEY_SLOT_SHIFT 24 +#define RSA_KEY_SLOT(x) \ + ((x) << RSA_KEY_SLOT_SHIFT) + +/******************************************************************************* + * Structure definition + ******************************************************************************/ + +/* SE context blob */ +#pragma pack(push, 1) +typedef struct tegra_aes_key_slot { + /* 0 - 7 AES key */ + uint32_t key[8]; + /* 8 - 11 Original IV */ + uint32_t oiv[4]; + /* 12 - 15 Updated IV */ + uint32_t uiv[4]; +} tegra_se_aes_key_slot_t; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct tegra_se_context { + /* random number */ + unsigned char rand_data[SE_CTX_SAVE_RANDOM_DATA_SIZE]; + /* Sticky bits */ + unsigned char sticky_bits[SE_CTX_SAVE_STICKY_BITS_SIZE * 2]; + /* AES key slots */ + tegra_se_aes_key_slot_t key_slots[TEGRA_SE_AES_KEYSLOT_COUNT]; + /* RSA key slots */ + unsigned char rsa_keys[SE_CTX_SAVE_RSA_KEY_LENGTH]; +} tegra_se_context_t; +#pragma pack(pop) + +/* PKA context blob */ +#pragma pack(push, 1) +typedef struct tegra_pka_context { + unsigned char sticky_bits[SE2_CONTEXT_SAVE_PKA1_STICKY_BITS_LENGTH]; + unsigned char pka_keys[SE2_CONTEXT_SAVE_PKA1_KEYS_LENGTH]; +} tegra_pka_context_t; +#pragma pack(pop) + +/* SE context blob */ +#pragma pack(push, 1) +typedef struct tegra_se_context_blob { + /* SE context */ + tegra_se_context_t se_ctx; + /* Known Pattern */ + unsigned char known_pattern[SE_CTX_KNOWN_PATTERN_SIZE]; +} tegra_se_context_blob_t; +#pragma pack(pop) + +/* SE2 and PKA1 context blob */ +#pragma pack(push, 1) +typedef struct tegra_se2_context_blob { + /* SE2 context */ + tegra_se_context_t se_ctx; + /* PKA1 context */ + tegra_pka_context_t pka_ctx; + /* Known Pattern */ + unsigned char known_pattern[SE_CTX_KNOWN_PATTERN_SIZE]; +} tegra_se2_context_blob_t; +#pragma pack(pop) + +/* SE AES key type 128bit, 192bit, 256bit */ +typedef enum { + SE_AES_KEY128, + SE_AES_KEY192, + SE_AES_KEY256, +} tegra_se_aes_key_type_t; + +/* SE RSA key slot */ +typedef struct tegra_se_rsa_key_slot { + /* 0 - 63 exponent key */ + uint32_t exponent[SE_RSA_MAX_EXP_SIZE32]; + /* 64 - 127 modulus key */ + uint32_t modulus[SE_RSA_MAX_MOD_SIZE32]; +} tegra_se_rsa_key_slot_t; + + +/******************************************************************************* + * Inline functions definition + ******************************************************************************/ + +static inline uint32_t tegra_se_read_32(const tegra_se_dev_t *dev, uint32_t offset) +{ + return mmio_read_32(dev->se_base + offset); +} + +static inline void tegra_se_write_32(const tegra_se_dev_t *dev, uint32_t offset, uint32_t val) +{ + mmio_write_32(dev->se_base + offset, val); +} + +static inline uint32_t tegra_pka_read_32(tegra_pka_dev_t *dev, uint32_t offset) +{ + return mmio_read_32(dev->pka_base + offset); +} + +static inline void tegra_pka_write_32(tegra_pka_dev_t *dev, uint32_t offset, +uint32_t val) +{ + mmio_write_32(dev->pka_base + offset, val); +} + +/******************************************************************************* + * Prototypes + ******************************************************************************/ +int tegra_se_start_normal_operation(const tegra_se_dev_t *, uint32_t); +int tegra_se_start_ctx_save_operation(const tegra_se_dev_t *, uint32_t); + +#endif /* SE_PRIVATE_H */ diff --git a/plat/nvidia/tegra/soc/t210/drivers/se/security_engine.c b/plat/nvidia/tegra/soc/t210/drivers/se/security_engine.c new file mode 100644 index 0000000..4860858 --- /dev/null +++ b/plat/nvidia/tegra/soc/t210/drivers/se/security_engine.c @@ -0,0 +1,1071 @@ +/* + * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/******************************************************************************* + * Constants and Macros + ******************************************************************************/ + +#define TIMEOUT_100MS 100U /* Timeout in 100ms */ +#define RNG_AES_KEY_INDEX 1 + +/******************************************************************************* + * Data structure and global variables + ******************************************************************************/ + +/* The security engine contexts are formatted as follows: + * + * SE1 CONTEXT: + * #--------------------------------# + * | Random Data 1 Block | + * #--------------------------------# + * | Sticky Bits 2 Blocks | + * #--------------------------------# + * | Key Table 64 Blocks | + * | For each Key (x16): | + * | Key: 2 Blocks | + * | Original-IV: 1 Block | + * | Updated-IV: 1 Block | + * #--------------------------------# + * | RSA Keys 64 Blocks | + * #--------------------------------# + * | Known Pattern 1 Block | + * #--------------------------------# + * + * SE2/PKA1 CONTEXT: + * #--------------------------------# + * | Random Data 1 Block | + * #--------------------------------# + * | Sticky Bits 2 Blocks | + * #--------------------------------# + * | Key Table 64 Blocks | + * | For each Key (x16): | + * | Key: 2 Blocks | + * | Original-IV: 1 Block | + * | Updated-IV: 1 Block | + * #--------------------------------# + * | RSA Keys 64 Blocks | + * #--------------------------------# + * | PKA sticky bits 1 Block | + * #--------------------------------# + * | PKA keys 512 Blocks | + * #--------------------------------# + * | Known Pattern 1 Block | + * #--------------------------------# + */ + +/* Known pattern data for T210 */ +static const uint8_t se_ctx_known_pattern_data[SE_CTX_KNOWN_PATTERN_SIZE] = { + /* 128 bit AES block */ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f + }; + +/* SE input and output linked list buffers */ +static tegra_se_io_lst_t se1_src_ll_buf; +static tegra_se_io_lst_t se1_dst_ll_buf; + +/* SE2 input and output linked list buffers */ +static tegra_se_io_lst_t se2_src_ll_buf; +static tegra_se_io_lst_t se2_dst_ll_buf; + +/* SE1 context buffer, 132 blocks */ +static __aligned(64) uint8_t se1_ctx_buf[SE_CTX_DRBG_BUFER_SIZE]; + +/* SE1 security engine device handle */ +static tegra_se_dev_t se_dev_1 = { + .se_num = 1, + /* Setup base address for se */ + .se_base = TEGRA_SE1_BASE, + /* Setup context size in AES blocks */ + .ctx_size_blks = SE_CTX_SAVE_SIZE_BLOCKS_SE1, + /* Setup SRC buffers for SE operations */ + .src_ll_buf = &se1_src_ll_buf, + /* Setup DST buffers for SE operations */ + .dst_ll_buf = &se1_dst_ll_buf, + /* Setup context save destination */ + .ctx_save_buf = (uint32_t *)&se1_ctx_buf +}; + +/* SE2 security engine device handle (T210B01 only) */ +static tegra_se_dev_t se_dev_2 = { + .se_num = 2, + /* Setup base address for se */ + .se_base = TEGRA_SE2_BASE, + /* Setup context size in AES blocks */ + .ctx_size_blks = SE_CTX_SAVE_SIZE_BLOCKS_SE2, + /* Setup SRC buffers for SE operations */ + .src_ll_buf = &se2_src_ll_buf, + /* Setup DST buffers for SE operations */ + .dst_ll_buf = &se2_dst_ll_buf, + /* Setup context save destination */ + .ctx_save_buf = (uint32_t *)(TEGRA_TZRAM_CARVEOUT_BASE + 0x1000) +}; + +static bool ecid_valid; + +/******************************************************************************* + * Functions Definition + ******************************************************************************/ + +static void tegra_se_make_data_coherent(const tegra_se_dev_t *se_dev) +{ + flush_dcache_range(((uint64_t)(se_dev->src_ll_buf)), + sizeof(tegra_se_io_lst_t)); + flush_dcache_range(((uint64_t)(se_dev->dst_ll_buf)), + sizeof(tegra_se_io_lst_t)); +} + +/* + * Check that SE operation has completed after kickoff + * This function is invoked after an SE operation has been started, + * and it checks the following conditions: + * 1. SE_INT_STATUS = SE_OP_DONE + * 2. SE_STATUS = IDLE + * 3. AHB bus data transfer complete. + * 4. SE_ERR_STATUS is clean. + */ +static int32_t tegra_se_operation_complete(const tegra_se_dev_t *se_dev) +{ + uint32_t val = 0; + int32_t ret = 0; + uint32_t timeout; + + /* Poll the SE interrupt register to ensure H/W operation complete */ + val = tegra_se_read_32(se_dev, SE_INT_STATUS_REG_OFFSET); + for (timeout = 0; (SE_INT_OP_DONE(val) == SE_INT_OP_DONE_CLEAR) && + (timeout < TIMEOUT_100MS); timeout++) { + mdelay(1); + val = tegra_se_read_32(se_dev, SE_INT_STATUS_REG_OFFSET); + } + + if (timeout == TIMEOUT_100MS) { + ERROR("%s: ERR: Atomic context save operation timeout!\n", + __func__); + ret = -ETIMEDOUT; + } + + /* Poll the SE status idle to ensure H/W operation complete */ + if (ret == 0) { + val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET); + for (timeout = 0; (val != 0U) && (timeout < TIMEOUT_100MS); + timeout++) { + mdelay(1); + val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET); + } + + if (timeout == TIMEOUT_100MS) { + ERROR("%s: ERR: MEM_INTERFACE and SE state " + "idle state timeout.\n", __func__); + ret = -ETIMEDOUT; + } + } + + /* Check AHB bus transfer complete */ + if (ret == 0) { + val = mmio_read_32(TEGRA_AHB_ARB_BASE + ARAHB_MEM_WRQUE_MST_ID_OFFSET); + for (timeout = 0; ((val & (ARAHB_MST_ID_SE_MASK | ARAHB_MST_ID_SE2_MASK)) != 0U) && + (timeout < TIMEOUT_100MS); timeout++) { + mdelay(1); + val = mmio_read_32(TEGRA_AHB_ARB_BASE + ARAHB_MEM_WRQUE_MST_ID_OFFSET); + } + + if (timeout == TIMEOUT_100MS) { + ERROR("%s: SE write over AHB timeout.\n", __func__); + ret = -ETIMEDOUT; + } + } + + /* Ensure that no errors are thrown during operation */ + if (ret == 0) { + val = tegra_se_read_32(se_dev, SE_ERR_STATUS_REG_OFFSET); + if (val != 0U) { + ERROR("%s: error during SE operation! 0x%x", __func__, val); + ret = -ENOTSUP; + } + } + + return ret; +} + +/* + * Wait for SE engine to be idle and clear pending interrupts before + * starting the next SE operation. + */ +static int32_t tegra_se_operation_prepare(const tegra_se_dev_t *se_dev) +{ + int32_t ret = 0; + uint32_t val = 0; + uint32_t timeout; + + /* disable SE interrupt to prevent interrupt issued by SE operation */ + tegra_se_write_32(se_dev, SE_INT_ENABLE_REG_OFFSET, 0U); + + /* Wait for previous operation to finish */ + val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET); + for (timeout = 0; (val != 0U) && (timeout < TIMEOUT_100MS); timeout++) { + mdelay(1); + val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET); + } + + if (timeout == TIMEOUT_100MS) { + ERROR("%s: ERR: SE status is not idle!\n", __func__); + ret = -ETIMEDOUT; + } + + /* Clear any pending interrupts from previous operation */ + val = tegra_se_read_32(se_dev, SE_INT_STATUS_REG_OFFSET); + tegra_se_write_32(se_dev, SE_INT_STATUS_REG_OFFSET, val); + return ret; +} + +/* + * SE atomic context save. At SC7 entry, SE driver triggers the + * hardware automatically performs the context save operation. + */ +static int32_t tegra_se_context_save_atomic(const tegra_se_dev_t *se_dev) +{ + int32_t ret = 0; + uint32_t val = 0; + uint32_t blk_count_limit = 0; + uint32_t block_count; + + /* Check that previous operation is finalized */ + ret = tegra_se_operation_prepare(se_dev); + + /* Read the context save progress counter: block_count + * Ensure no previous context save has been triggered + * SE_CTX_SAVE_AUTO.CURR_CNT == 0 + */ + if (ret == 0) { + val = tegra_se_read_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET); + block_count = SE_CTX_SAVE_GET_BLK_COUNT(val); + if (block_count != 0U) { + ERROR("%s: ctx_save triggered multiple times\n", + __func__); + ret = -EALREADY; + } + } + + /* Set the destination block count when the context save complete */ + if (ret == 0) { + blk_count_limit = block_count + se_dev->ctx_size_blks; + } + + /* Program SE_CONFIG register as for RNG operation + * SE_CONFIG.ENC_ALG = RNG + * SE_CONFIG.DEC_ALG = NOP + * SE_CONFIG.ENC_MODE is ignored + * SE_CONFIG.DEC_MODE is ignored + * SE_CONFIG.DST = MEMORY + */ + if (ret == 0) { + val = (SE_CONFIG_ENC_ALG_RNG | + SE_CONFIG_DEC_ALG_NOP | + SE_CONFIG_DST_MEMORY); + tegra_se_write_32(se_dev, SE_CONFIG_REG_OFFSET, val); + + tegra_se_make_data_coherent(se_dev); + + /* SE_CTX_SAVE operation */ + tegra_se_write_32(se_dev, SE_OPERATION_REG_OFFSET, + SE_OP_CTX_SAVE); + + ret = tegra_se_operation_complete(se_dev); + } + + /* Check that context has written the correct number of blocks */ + if (ret == 0) { + val = tegra_se_read_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET); + if (SE_CTX_SAVE_GET_BLK_COUNT(val) != blk_count_limit) { + ERROR("%s: expected %d blocks but %d were written\n", + __func__, blk_count_limit, val); + ret = -ECANCELED; + } + } + + return ret; +} + +/* + * Security engine primitive operations, including normal operation + * and the context save operation. + */ +static int tegra_se_perform_operation(const tegra_se_dev_t *se_dev, uint32_t nbytes, + bool context_save) +{ + uint32_t nblocks = nbytes / TEGRA_SE_AES_BLOCK_SIZE; + int ret = 0; + + assert(se_dev); + + /* Use device buffers for in and out */ + tegra_se_write_32(se_dev, SE_OUT_LL_ADDR_REG_OFFSET, ((uint64_t)(se_dev->dst_ll_buf))); + tegra_se_write_32(se_dev, SE_IN_LL_ADDR_REG_OFFSET, ((uint64_t)(se_dev->src_ll_buf))); + + /* Check that previous operation is finalized */ + ret = tegra_se_operation_prepare(se_dev); + if (ret != 0) { + goto op_error; + } + + /* Program SE operation size */ + if (nblocks) { + tegra_se_write_32(se_dev, SE_BLOCK_COUNT_REG_OFFSET, nblocks - 1); + } + + /* Make SE LL data coherent before the SE operation */ + tegra_se_make_data_coherent(se_dev); + + /* Start hardware operation */ + if (context_save) + tegra_se_write_32(se_dev, SE_OPERATION_REG_OFFSET, SE_OP_CTX_SAVE); + else + tegra_se_write_32(se_dev, SE_OPERATION_REG_OFFSET, SE_OP_START); + + /* Wait for operation to finish */ + ret = tegra_se_operation_complete(se_dev); + +op_error: + return ret; +} + +/* + * Normal security engine operations other than the context save + */ +int tegra_se_start_normal_operation(const tegra_se_dev_t *se_dev, uint32_t nbytes) +{ + return tegra_se_perform_operation(se_dev, nbytes, false); +} + +/* + * Security engine context save operation + */ +int tegra_se_start_ctx_save_operation(const tegra_se_dev_t *se_dev, uint32_t nbytes) +{ + return tegra_se_perform_operation(se_dev, nbytes, true); +} + +/* + * Security Engine sequence to generat SRK + * SE and SE2 will generate different SRK by different + * entropy seeds. + */ +static int tegra_se_generate_srk(const tegra_se_dev_t *se_dev) +{ + int ret = PSCI_E_INTERN_FAIL; + uint32_t val; + + /* Confgure the following hardware register settings: + * SE_CONFIG.DEC_ALG = NOP + * SE_CONFIG.ENC_ALG = RNG + * SE_CONFIG.DST = SRK + * SE_OPERATION.OP = START + * SE_CRYPTO_LAST_BLOCK = 0 + */ + se_dev->src_ll_buf->last_buff_num = 0; + se_dev->dst_ll_buf->last_buff_num = 0; + + /* Configure random number generator */ + if (ecid_valid) + val = (DRBG_MODE_FORCE_INSTANTION | DRBG_SRC_ENTROPY); + else + val = (DRBG_MODE_FORCE_RESEED | DRBG_SRC_ENTROPY); + tegra_se_write_32(se_dev, SE_RNG_CONFIG_REG_OFFSET, val); + + /* Configure output destination = SRK */ + val = (SE_CONFIG_ENC_ALG_RNG | + SE_CONFIG_DEC_ALG_NOP | + SE_CONFIG_DST_SRK); + tegra_se_write_32(se_dev, SE_CONFIG_REG_OFFSET, val); + + /* Perform hardware operation */ + ret = tegra_se_start_normal_operation(se_dev, 0); + + return ret; +} + +/* + * Generate plain text random data to some memory location using + * SE/SE2's SP800-90 random number generator. The random data size + * must be some multiple of the AES block size (16 bytes). + */ +static int tegra_se_lp_generate_random_data(tegra_se_dev_t *se_dev) +{ + int ret = 0; + uint32_t val; + + /* Set some arbitrary memory location to store the random data */ + se_dev->dst_ll_buf->last_buff_num = 0; + if (!se_dev->ctx_save_buf) { + ERROR("%s: ERR: context save buffer NULL pointer!\n", __func__); + return PSCI_E_NOT_PRESENT; + } + se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(((tegra_se_context_t *) + se_dev->ctx_save_buf)->rand_data))); + se_dev->dst_ll_buf->buffer[0].data_len = SE_CTX_SAVE_RANDOM_DATA_SIZE; + + + /* Confgure the following hardware register settings: + * SE_CONFIG.DEC_ALG = NOP + * SE_CONFIG.ENC_ALG = RNG + * SE_CONFIG.ENC_MODE = KEY192 + * SE_CONFIG.DST = MEMORY + */ + val = (SE_CONFIG_ENC_ALG_RNG | + SE_CONFIG_DEC_ALG_NOP | + SE_CONFIG_ENC_MODE_KEY192 | + SE_CONFIG_DST_MEMORY); + tegra_se_write_32(se_dev, SE_CONFIG_REG_OFFSET, val); + + /* Program the RNG options in SE_CRYPTO_CONFIG as follows: + * XOR_POS = BYPASS + * INPUT_SEL = RANDOM (Entropy or LFSR) + * HASH_ENB = DISABLE + */ + val = (SE_CRYPTO_INPUT_RANDOM | + SE_CRYPTO_XOR_BYPASS | + SE_CRYPTO_CORE_ENCRYPT | + SE_CRYPTO_HASH_DISABLE | + SE_CRYPTO_KEY_INDEX(RNG_AES_KEY_INDEX) | + SE_CRYPTO_IV_ORIGINAL); + tegra_se_write_32(se_dev, SE_CRYPTO_REG_OFFSET, val); + + /* Configure RNG */ + if (ecid_valid) + val = (DRBG_MODE_FORCE_INSTANTION | DRBG_SRC_LFSR); + else + val = (DRBG_MODE_FORCE_RESEED | DRBG_SRC_LFSR); + tegra_se_write_32(se_dev, SE_RNG_CONFIG_REG_OFFSET, val); + + /* SE normal operation */ + ret = tegra_se_start_normal_operation(se_dev, SE_CTX_SAVE_RANDOM_DATA_SIZE); + + return ret; +} + +/* + * Encrypt memory blocks with SRK as part of the security engine context. + * The data blocks include: random data and the known pattern data, where + * the random data is the first block and known pattern is the last block. + */ +static int tegra_se_lp_data_context_save(tegra_se_dev_t *se_dev, + uint64_t src_addr, uint64_t dst_addr, uint32_t data_size) +{ + int ret = 0; + + se_dev->src_ll_buf->last_buff_num = 0; + se_dev->dst_ll_buf->last_buff_num = 0; + se_dev->src_ll_buf->buffer[0].addr = src_addr; + se_dev->src_ll_buf->buffer[0].data_len = data_size; + se_dev->dst_ll_buf->buffer[0].addr = dst_addr; + se_dev->dst_ll_buf->buffer[0].data_len = data_size; + + /* By setting the context source from memory and calling the context save + * operation, the SE encrypts the memory data with SRK. + */ + tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, SE_CTX_SAVE_SRC_MEM); + + ret = tegra_se_start_ctx_save_operation(se_dev, data_size); + + return ret; +} + +/* + * Context save the key table access control sticky bits and + * security status of each key-slot. The encrypted sticky-bits are + * 32 bytes (2 AES blocks) and formatted as the following structure: + * { bit in registers bit in context save + * SECURITY_0[4] 158 + * SE_RSA_KEYTABLE_ACCE4SS_1[2:0] 157:155 + * SE_RSA_KEYTABLE_ACCE4SS_0[2:0] 154:152 + * SE_RSA_SECURITY_PERKEY_0[1:0] 151:150 + * SE_CRYPTO_KEYTABLE_ACCESS_15[7:0] 149:142 + * ..., + * SE_CRYPTO_KEYTABLE_ACCESS_0[7:0] 29:22 + * SE_CRYPTO_SECURITY_PERKEY_0[15:0] 21:6 + * SE_TZRAM_SECURITY_0[1:0] 5:4 + * SE_SECURITY_0[16] 3:3 + * SE_SECURITY_0[2:0] } 2:0 + */ +static int tegra_se_lp_sticky_bits_context_save(tegra_se_dev_t *se_dev) +{ + int ret = PSCI_E_INTERN_FAIL; + uint32_t val = 0; + + se_dev->dst_ll_buf->last_buff_num = 0; + if (!se_dev->ctx_save_buf) { + ERROR("%s: ERR: context save buffer NULL pointer!\n", __func__); + return PSCI_E_NOT_PRESENT; + } + se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(((tegra_se_context_t *) + se_dev->ctx_save_buf)->sticky_bits))); + se_dev->dst_ll_buf->buffer[0].data_len = SE_CTX_SAVE_STICKY_BITS_SIZE; + + /* + * The 1st AES block save the sticky-bits context 1 - 16 bytes (0 - 3 words). + * The 2nd AES block save the sticky-bits context 17 - 32 bytes (4 - 7 words). + */ + for (int i = 0; i < 2; i++) { + val = SE_CTX_SAVE_SRC_STICKY_BITS | + SE_CTX_SAVE_STICKY_WORD_QUAD(i); + tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, val); + + /* SE context save operation */ + ret = tegra_se_start_ctx_save_operation(se_dev, + SE_CTX_SAVE_STICKY_BITS_SIZE); + if (ret) + break; + se_dev->dst_ll_buf->buffer[0].addr += SE_CTX_SAVE_STICKY_BITS_SIZE; + } + + return ret; +} + +static int tegra_se_aeskeytable_context_save(tegra_se_dev_t *se_dev) +{ + uint32_t val = 0; + int ret = 0; + + se_dev->dst_ll_buf->last_buff_num = 0; + if (!se_dev->ctx_save_buf) { + ERROR("%s: ERR: context save buffer NULL pointer!\n", __func__); + ret = -EINVAL; + goto aes_keytable_save_err; + } + + /* AES key context save */ + for (int slot = 0; slot < TEGRA_SE_AES_KEYSLOT_COUNT; slot++) { + se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&( + ((tegra_se_context_t *)se_dev-> + ctx_save_buf)->key_slots[slot].key))); + se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_KEY_128_SIZE; + for (int i = 0; i < 2; i++) { + val = SE_CTX_SAVE_SRC_AES_KEYTABLE | + SE_CTX_SAVE_KEY_INDEX(slot) | + SE_CTX_SAVE_WORD_QUAD(i); + tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, val); + + /* SE context save operation */ + ret = tegra_se_start_ctx_save_operation(se_dev, + TEGRA_SE_KEY_128_SIZE); + if (ret) { + ERROR("%s: ERR: AES key CTX_SAVE OP failed, " + "slot=%d, word_quad=%d.\n", + __func__, slot, i); + goto aes_keytable_save_err; + } + se_dev->dst_ll_buf->buffer[0].addr += TEGRA_SE_KEY_128_SIZE; + } + + /* OIV context save */ + se_dev->dst_ll_buf->last_buff_num = 0; + se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&( + ((tegra_se_context_t *)se_dev-> + ctx_save_buf)->key_slots[slot].oiv))); + se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_AES_IV_SIZE; + + val = SE_CTX_SAVE_SRC_AES_KEYTABLE | + SE_CTX_SAVE_KEY_INDEX(slot) | + SE_CTX_SAVE_WORD_QUAD_ORIG_IV; + tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, val); + + /* SE context save operation */ + ret = tegra_se_start_ctx_save_operation(se_dev, TEGRA_SE_AES_IV_SIZE); + if (ret) { + ERROR("%s: ERR: OIV CTX_SAVE OP failed, slot=%d.\n", + __func__, slot); + goto aes_keytable_save_err; + } + + /* UIV context save */ + se_dev->dst_ll_buf->last_buff_num = 0; + se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&( + ((tegra_se_context_t *)se_dev-> + ctx_save_buf)->key_slots[slot].uiv))); + se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_AES_IV_SIZE; + + val = SE_CTX_SAVE_SRC_AES_KEYTABLE | + SE_CTX_SAVE_KEY_INDEX(slot) | + SE_CTX_SAVE_WORD_QUAD_UPD_IV; + tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, val); + + /* SE context save operation */ + ret = tegra_se_start_ctx_save_operation(se_dev, TEGRA_SE_AES_IV_SIZE); + if (ret) { + ERROR("%s: ERR: UIV CTX_SAVE OP failed, slot=%d\n", + __func__, slot); + goto aes_keytable_save_err; + } + } + +aes_keytable_save_err: + return ret; +} + +static int tegra_se_lp_rsakeytable_context_save(tegra_se_dev_t *se_dev) +{ + uint32_t val = 0; + int ret = 0; + /* For T210, First the modulus and then exponent must be + * encrypted and saved. This is repeated for SLOT 0 + * and SLOT 1. Hence the order: + * SLOT 0 modulus : RSA_KEY_INDEX : 1 + * SLOT 0 exponent : RSA_KEY_INDEX : 0 + * SLOT 1 modulus : RSA_KEY_INDEX : 3 + * SLOT 1 exponent : RSA_KEY_INDEX : 2 + */ + const unsigned int key_index_mod[TEGRA_SE_RSA_KEYSLOT_COUNT][2] = { + /* RSA key slot 0 */ + {SE_RSA_KEY_INDEX_SLOT0_MOD, SE_RSA_KEY_INDEX_SLOT0_EXP}, + /* RSA key slot 1 */ + {SE_RSA_KEY_INDEX_SLOT1_MOD, SE_RSA_KEY_INDEX_SLOT1_EXP}, + }; + + se_dev->dst_ll_buf->last_buff_num = 0; + se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&( + ((tegra_se_context_t *)se_dev-> + ctx_save_buf)->rsa_keys))); + se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_KEY_128_SIZE; + + for (int slot = 0; slot < TEGRA_SE_RSA_KEYSLOT_COUNT; slot++) { + /* loop for modulus and exponent */ + for (int index = 0; index < 2; index++) { + for (int word_quad = 0; word_quad < 16; word_quad++) { + val = SE_CTX_SAVE_SRC_RSA_KEYTABLE | + SE_CTX_SAVE_RSA_KEY_INDEX( + key_index_mod[slot][index]) | + SE_CTX_RSA_WORD_QUAD(word_quad); + tegra_se_write_32(se_dev, + SE_CTX_SAVE_CONFIG_REG_OFFSET, val); + + /* SE context save operation */ + ret = tegra_se_start_ctx_save_operation(se_dev, + TEGRA_SE_KEY_128_SIZE); + if (ret) { + ERROR("%s: ERR: slot=%d.\n", + __func__, slot); + goto rsa_keytable_save_err; + } + + /* Update the pointer to the next word quad */ + se_dev->dst_ll_buf->buffer[0].addr += + TEGRA_SE_KEY_128_SIZE; + } + } + } + +rsa_keytable_save_err: + return ret; +} + +static int tegra_se_pkakeytable_sticky_bits_save(tegra_se_dev_t *se_dev) +{ + int ret = 0; + + se_dev->dst_ll_buf->last_buff_num = 0; + se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&( + ((tegra_se2_context_blob_t *)se_dev-> + ctx_save_buf)->pka_ctx.sticky_bits))); + se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_AES_BLOCK_SIZE; + + /* PKA1 sticky bits are 1 AES block (16 bytes) */ + tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, + SE_CTX_SAVE_SRC_PKA1_STICKY_BITS | + SE_CTX_STICKY_WORD_QUAD_WORDS_0_3); + + /* SE context save operation */ + ret = tegra_se_start_ctx_save_operation(se_dev, 0); + if (ret) { + ERROR("%s: ERR: PKA1 sticky bits CTX_SAVE OP failed\n", + __func__); + goto pka_sticky_bits_save_err; + } + +pka_sticky_bits_save_err: + return ret; +} + +static int tegra_se_pkakeytable_context_save(tegra_se_dev_t *se_dev) +{ + uint32_t val = 0; + int ret = 0; + + se_dev->dst_ll_buf->last_buff_num = 0; + se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&( + ((tegra_se2_context_blob_t *)se_dev-> + ctx_save_buf)->pka_ctx.pka_keys))); + se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_KEY_128_SIZE; + + /* for each slot, save word quad 0-127 */ + for (int slot = 0; slot < TEGRA_SE_PKA1_KEYSLOT_COUNT; slot++) { + for (int word_quad = 0; word_quad < 512/4; word_quad++) { + val = SE_CTX_SAVE_SRC_PKA1_KEYTABLE | + SE_CTX_PKA1_WORD_QUAD_L((slot * 128) + + word_quad) | + SE_CTX_PKA1_WORD_QUAD_H((slot * 128) + + word_quad); + tegra_se_write_32(se_dev, + SE_CTX_SAVE_CONFIG_REG_OFFSET, val); + + /* SE context save operation */ + ret = tegra_se_start_ctx_save_operation(se_dev, + TEGRA_SE_KEY_128_SIZE); + if (ret) { + ERROR("%s: ERR: pka1 keytable ctx save error\n", + __func__); + goto pka_keytable_save_err; + } + + /* Update the pointer to the next word quad */ + se_dev->dst_ll_buf->buffer[0].addr += + TEGRA_SE_KEY_128_SIZE; + } + } + +pka_keytable_save_err: + return ret; +} + +static int tegra_se_save_SRK(tegra_se_dev_t *se_dev) +{ + tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, + SE_CTX_SAVE_SRC_SRK); + + /* SE context save operation */ + return tegra_se_start_ctx_save_operation(se_dev, 0); +} + +/* + * Lock both SE from non-TZ clients. + */ +static inline void tegra_se_lock(tegra_se_dev_t *se_dev) +{ + uint32_t val; + + assert(se_dev); + val = tegra_se_read_32(se_dev, SE_SECURITY_REG_OFFSET); + val |= SE_SECURITY_TZ_LOCK_SOFT(SE_SECURE); + tegra_se_write_32(se_dev, SE_SECURITY_REG_OFFSET, val); +} + +/* + * Use SRK to encrypt SE state and save to TZRAM carveout + */ +static int tegra_se_context_save_sw(tegra_se_dev_t *se_dev) +{ + int err = 0; + + assert(se_dev); + + /* Lock entire SE/SE2 as TZ protected */ + tegra_se_lock(se_dev); + + INFO("%s: generate SRK\n", __func__); + /* Generate SRK */ + err = tegra_se_generate_srk(se_dev); + if (err) { + ERROR("%s: ERR: SRK generation failed\n", __func__); + return err; + } + + INFO("%s: generate random data\n", __func__); + /* Generate random data */ + err = tegra_se_lp_generate_random_data(se_dev); + if (err) { + ERROR("%s: ERR: LP random pattern generation failed\n", __func__); + return err; + } + + INFO("%s: encrypt random data\n", __func__); + /* Encrypt the random data block */ + err = tegra_se_lp_data_context_save(se_dev, + ((uint64_t)(&(((tegra_se_context_t *)se_dev-> + ctx_save_buf)->rand_data))), + ((uint64_t)(&(((tegra_se_context_t *)se_dev-> + ctx_save_buf)->rand_data))), + SE_CTX_SAVE_RANDOM_DATA_SIZE); + if (err) { + ERROR("%s: ERR: random pattern encryption failed\n", __func__); + return err; + } + + INFO("%s: save SE sticky bits\n", __func__); + /* Save AES sticky bits context */ + err = tegra_se_lp_sticky_bits_context_save(se_dev); + if (err) { + ERROR("%s: ERR: sticky bits context save failed\n", __func__); + return err; + } + + INFO("%s: save AES keytables\n", __func__); + /* Save AES key table context */ + err = tegra_se_aeskeytable_context_save(se_dev); + if (err) { + ERROR("%s: ERR: LP keytable save failed\n", __func__); + return err; + } + + /* RSA key slot table context save */ + INFO("%s: save RSA keytables\n", __func__); + err = tegra_se_lp_rsakeytable_context_save(se_dev); + if (err) { + ERROR("%s: ERR: rsa key table context save failed\n", __func__); + return err; + } + + /* Only SE2 has an interface with PKA1; thus, PKA1's context is saved + * via SE2. + */ + if (se_dev->se_num == 2) { + /* Encrypt PKA1 sticky bits on SE2 only */ + INFO("%s: save PKA sticky bits\n", __func__); + err = tegra_se_pkakeytable_sticky_bits_save(se_dev); + if (err) { + ERROR("%s: ERR: PKA sticky bits context save failed\n", __func__); + return err; + } + + /* Encrypt PKA1 keyslots on SE2 only */ + INFO("%s: save PKA keytables\n", __func__); + err = tegra_se_pkakeytable_context_save(se_dev); + if (err) { + ERROR("%s: ERR: PKA key table context save failed\n", __func__); + return err; + } + } + + /* Encrypt known pattern */ + if (se_dev->se_num == 1) { + err = tegra_se_lp_data_context_save(se_dev, + ((uint64_t)(&se_ctx_known_pattern_data)), + ((uint64_t)(&(((tegra_se_context_blob_t *)se_dev->ctx_save_buf)->known_pattern))), + SE_CTX_KNOWN_PATTERN_SIZE); + } else if (se_dev->se_num == 2) { + err = tegra_se_lp_data_context_save(se_dev, + ((uint64_t)(&se_ctx_known_pattern_data)), + ((uint64_t)(&(((tegra_se2_context_blob_t *)se_dev->ctx_save_buf)->known_pattern))), + SE_CTX_KNOWN_PATTERN_SIZE); + } + if (err) { + ERROR("%s: ERR: save LP known pattern failure\n", __func__); + return err; + } + + /* Write lp context buffer address into PMC scratch register */ + if (se_dev->se_num == 1) { + /* SE context address, support T210 only */ + mmio_write_32((uint64_t)TEGRA_PMC_BASE + PMC_SCRATCH43_REG_OFFSET, + ((uint64_t)(se_dev->ctx_save_buf))); + } else if (se_dev->se_num == 2) { + /* SE2 & PKA1 context address */ + mmio_write_32((uint64_t)TEGRA_PMC_BASE + PMC_SECURE_SCRATCH116_OFFSET, + ((uint64_t)(se_dev->ctx_save_buf))); + } + + /* Saves SRK to PMC secure scratch registers for BootROM, which + * verifies and restores the security engine context on warm boot. + */ + err = tegra_se_save_SRK(se_dev); + if (err < 0) { + ERROR("%s: ERR: LP SRK save failure\n", __func__); + return err; + } + + INFO("%s: SE context save done \n", __func__); + + return err; +} + +/* + * Initialize the SE engine handle + */ +void tegra_se_init(void) +{ + uint32_t val = 0; + INFO("%s: start SE init\n", __func__); + + /* Generate random SRK to initialize DRBG */ + tegra_se_generate_srk(&se_dev_1); + + if (tegra_chipid_is_t210_b01()) { + tegra_se_generate_srk(&se_dev_2); + } + + /* determine if ECID is valid */ + val = mmio_read_32(TEGRA_FUSE_BASE + FUSE_JTAG_SECUREID_VALID); + ecid_valid = (val == ECID_VALID); + + INFO("%s: SE init done\n", __func__); +} + +static void tegra_se_enable_clocks(void) +{ + uint32_t val = 0; + + /* Enable entropy clock */ + val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_W); + val |= ENTROPY_CLK_ENB_BIT; + mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_W, val); + + /* De-Assert Entropy Reset */ + val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEVICES_W); + val &= ~ENTROPY_RESET_BIT; + mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEVICES_W, val); + + /* + * Switch SE clock source to CLK_M, to make sure SE clock + * is on when saving SE context + */ + mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_RST_CTL_CLK_SRC_SE, + SE_CLK_SRC_CLK_M); + + /* Enable SE clock */ + val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_V); + val |= SE_CLK_ENB_BIT; + mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_V, val); + + /* De-Assert SE Reset */ + val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEVICES_V); + val &= ~SE_RESET_BIT; + mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEVICES_V, val); +} + +static void tegra_se_disable_clocks(void) +{ + uint32_t val = 0; + + /* Disable entropy clock */ + val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_W); + val &= ~ENTROPY_CLK_ENB_BIT; + mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_W, val); + + /* Disable SE clock */ + val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_V); + val &= ~SE_CLK_ENB_BIT; + mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_V, val); +} + +/* + * Security engine power suspend entry point. + * This function is invoked from PSCI power domain suspend handler. + */ +int32_t tegra_se_suspend(void) +{ + int32_t ret = 0; + uint32_t val = 0; + + /* SE does not use SMMU in EL3, disable SMMU. + * This will be re-enabled by kernel on resume */ + val = mmio_read_32(TEGRA_MC_BASE + MC_SMMU_PPCS_ASID_0); + val &= ~PPCS_SMMU_ENABLE; + mmio_write_32(TEGRA_MC_BASE + MC_SMMU_PPCS_ASID_0, val); + + tegra_se_enable_clocks(); + + if (tegra_chipid_is_t210_b01()) { + /* It is T210 B01, Atomic context save se2 and pka1 */ + INFO("%s: SE2/PKA1 atomic context save\n", __func__); + ret = tegra_se_context_save_atomic(&se_dev_2); + if (ret != 0) { + ERROR("%s: SE2 ctx save failed (%d)\n", __func__, ret); + } + + ret = tegra_se_context_save_atomic(&se_dev_1); + if (ret != 0) { + ERROR("%s: SE1 ctx save failed (%d)\n", __func__, ret); + } + } else { + /* It is T210, SW context save se */ + INFO("%s: SE1 legacy(SW) context save\n", __func__); + ret = tegra_se_context_save_sw(&se_dev_1); + if (ret != 0) { + ERROR("%s: SE1 ctx save failed (%d)\n", __func__, ret); + } + } + + tegra_se_disable_clocks(); + + return ret; +} + +/* + * Save TZRAM to shadow TZRAM in AON + */ +int32_t tegra_se_save_tzram(void) +{ + uint32_t val = 0; + int32_t ret = 0; + uint32_t timeout; + + INFO("%s: SE TZRAM save start\n", __func__); + tegra_se_enable_clocks(); + + val = (SE_TZRAM_OP_REQ_INIT | SE_TZRAM_OP_MODE_SAVE); + tegra_se_write_32(&se_dev_1, SE_TZRAM_OPERATION, val); + + val = tegra_se_read_32(&se_dev_1, SE_TZRAM_OPERATION); + for (timeout = 0; (SE_TZRAM_OP_BUSY(val) == SE_TZRAM_OP_BUSY_ON) && + (timeout < TIMEOUT_100MS); timeout++) { + mdelay(1); + val = tegra_se_read_32(&se_dev_1, SE_TZRAM_OPERATION); + } + + if (timeout == TIMEOUT_100MS) { + ERROR("%s: ERR: TZRAM save timeout!\n", __func__); + ret = -ETIMEDOUT; + } + + if (ret == 0) { + INFO("%s: SE TZRAM save done!\n", __func__); + } + + tegra_se_disable_clocks(); + + return ret; +} + +/* + * The function is invoked by SE resume + */ +static void tegra_se_warm_boot_resume(const tegra_se_dev_t *se_dev) +{ + uint32_t val; + + assert(se_dev); + + /* Lock RNG source to ENTROPY on resume */ + val = DRBG_RO_ENT_IGNORE_MEM_ENABLE | + DRBG_RO_ENT_SRC_LOCK_ENABLE | + DRBG_RO_ENT_SRC_ENABLE; + tegra_se_write_32(se_dev, SE_RNG_SRC_CONFIG_REG_OFFSET, val); + + /* Set a random value to SRK to initialize DRBG */ + tegra_se_generate_srk(se_dev); +} + +/* + * The function is invoked on SC7 resume + */ +void tegra_se_resume(void) +{ + tegra_se_warm_boot_resume(&se_dev_1); + + if (tegra_chipid_is_t210_b01()) { + tegra_se_warm_boot_resume(&se_dev_2); + } +} diff --git a/plat/nvidia/tegra/soc/t210/plat_psci_handlers.c b/plat/nvidia/tegra/soc/t210/plat_psci_handlers.c new file mode 100644 index 0000000..7f73ea5 --- /dev/null +++ b/plat/nvidia/tegra/soc/t210/plat_psci_handlers.c @@ -0,0 +1,609 @@ +/* + * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2020, NVIDIA Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Register used to clear CPU reset signals. Each CPU has two reset + * signals: CPU reset (3:0) and Core reset (19:16). + */ +#define CPU_CMPLX_RESET_CLR 0x454 +#define CPU_CORE_RESET_MASK 0x10001 + +/* Clock and Reset controller registers for system clock's settings */ +#define SCLK_RATE 0x30 +#define SCLK_BURST_POLICY 0x28 +#define SCLK_BURST_POLICY_DEFAULT 0x10000000 + +static int cpu_powergate_mask[PLATFORM_MAX_CPUS_PER_CLUSTER]; +static bool tegra_bpmp_available = true; + +int32_t tegra_soc_validate_power_state(unsigned int power_state, + psci_power_state_t *req_state) +{ + int state_id = psci_get_pstate_id(power_state); + const plat_params_from_bl2_t *plat_params = bl31_get_plat_params(); + + /* Sanity check the requested state id */ + switch (state_id) { + case PSTATE_ID_CORE_POWERDN: + /* + * Core powerdown request only for afflvl 0 + */ + req_state->pwr_domain_state[MPIDR_AFFLVL0] = state_id & 0xff; + + break; + + case PSTATE_ID_CLUSTER_IDLE: + + /* + * Cluster idle request for afflvl 0 + */ + req_state->pwr_domain_state[MPIDR_AFFLVL0] = PSTATE_ID_CORE_POWERDN; + req_state->pwr_domain_state[MPIDR_AFFLVL1] = state_id; + break; + + case PSTATE_ID_SOC_POWERDN: + + /* + * sc7entry-fw must be present in the system when the bpmp + * firmware is not present, for a successful System Suspend + * entry. + */ + if (!tegra_bpmp_init() && !plat_params->sc7entry_fw_base) + return PSCI_E_NOT_SUPPORTED; + + /* + * System powerdown request only for afflvl 2 + */ + for (uint32_t i = MPIDR_AFFLVL0; i < PLAT_MAX_PWR_LVL; i++) + req_state->pwr_domain_state[i] = PLAT_MAX_OFF_STATE; + + req_state->pwr_domain_state[PLAT_MAX_PWR_LVL] = + PLAT_SYS_SUSPEND_STATE_ID; + + break; + + default: + ERROR("%s: unsupported state id (%d)\n", __func__, state_id); + return PSCI_E_INVALID_PARAMS; + } + + return PSCI_E_SUCCESS; +} + +/******************************************************************************* + * Platform handler to calculate the proper target power level at the + * specified affinity level. + ******************************************************************************/ +plat_local_state_t tegra_soc_get_target_pwr_state(unsigned int lvl, + const plat_local_state_t *states, + unsigned int ncpu) +{ + plat_local_state_t target = PSCI_LOCAL_STATE_RUN; + int cpu = plat_my_core_pos(); + int core_pos = read_mpidr() & MPIDR_CPU_MASK; + uint32_t bpmp_reply, data[3], val; + int ret; + + /* get the power state at this level */ + if (lvl == MPIDR_AFFLVL1) + target = *(states + core_pos); + if (lvl == MPIDR_AFFLVL2) + target = *(states + cpu); + + if ((lvl == MPIDR_AFFLVL1) && (target == PSTATE_ID_CLUSTER_IDLE)) { + + /* initialize the bpmp interface */ + ret = tegra_bpmp_init(); + if (ret != 0U) { + + /* + * flag to indicate that BPMP firmware is not + * available and the CPU has to handle entry/exit + * for all power states + */ + tegra_bpmp_available = false; + + /* Cluster idle not allowed */ + target = PSCI_LOCAL_STATE_RUN; + + /******************************************* + * BPMP is not present, so handle CC6 entry + * from the CPU + ******************************************/ + + /* check if cluster idle state has been enabled */ + val = mmio_read_32(TEGRA_CL_DVFS_BASE + DVFS_DFLL_CTRL); + if (val == ENABLE_CLOSED_LOOP) { + /* + * Acquire the cluster idle lock to stop + * other CPUs from powering up. + */ + tegra_fc_ccplex_pgexit_lock(); + + /* Cluster idle only from the last standing CPU */ + if (tegra_pmc_is_last_on_cpu() && tegra_fc_is_ccx_allowed()) { + /* Cluster idle allowed */ + target = PSTATE_ID_CLUSTER_IDLE; + } else { + /* release cluster idle lock */ + tegra_fc_ccplex_pgexit_unlock(); + } + } + } else { + + /* Cluster power-down */ + data[0] = (uint32_t)cpu; + data[1] = TEGRA_PM_CC6; + data[2] = TEGRA_PM_SC1; + ret = tegra_bpmp_send_receive_atomic(MRQ_DO_IDLE, + (void *)&data, (int)sizeof(data), + (void *)&bpmp_reply, + (int)sizeof(bpmp_reply)); + + /* check if cluster power down is allowed */ + if ((ret != 0L) || (bpmp_reply != BPMP_CCx_ALLOWED)) { + + /* Cluster power down not allowed */ + target = PSCI_LOCAL_STATE_RUN; + } + } + + } else if (((lvl == MPIDR_AFFLVL2) || (lvl == MPIDR_AFFLVL1)) && + (target == PSTATE_ID_SOC_POWERDN)) { + + /* System Suspend */ + target = PSTATE_ID_SOC_POWERDN; + + } else { + ; /* do nothing */ + } + + return target; +} + +int32_t tegra_soc_cpu_standby(plat_local_state_t cpu_state) +{ + (void)cpu_state; + return PSCI_E_SUCCESS; +} + +int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state) +{ + u_register_t mpidr = read_mpidr(); + const plat_local_state_t *pwr_domain_state = + target_state->pwr_domain_state; + unsigned int stateid_afflvl2 = pwr_domain_state[MPIDR_AFFLVL2]; + unsigned int stateid_afflvl1 = pwr_domain_state[MPIDR_AFFLVL1]; + unsigned int stateid_afflvl0 = pwr_domain_state[MPIDR_AFFLVL0]; + uint32_t cfg; + int ret = PSCI_E_SUCCESS; + uint32_t val; + + if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) { + + assert((stateid_afflvl0 == PLAT_MAX_OFF_STATE) || + (stateid_afflvl0 == PSTATE_ID_SOC_POWERDN)); + assert((stateid_afflvl1 == PLAT_MAX_OFF_STATE) || + (stateid_afflvl1 == PSTATE_ID_SOC_POWERDN)); + + /* Suspend se/se2 and pka1 for T210 B01 and se for T210 */ + if (tegra_se_suspend() != 0) { + ret = PSCI_E_INTERN_FAIL; + } + + } else if (stateid_afflvl1 == PSTATE_ID_CLUSTER_IDLE) { + + assert(stateid_afflvl0 == PSTATE_ID_CORE_POWERDN); + + if (!tegra_bpmp_available) { + + /* + * When disabled, DFLL loses its state. Enable + * open loop state for the DFLL as we dont want + * garbage values being written to the pmic + * when we enter cluster idle state. + */ + mmio_write_32(TEGRA_CL_DVFS_BASE + DVFS_DFLL_CTRL, + ENABLE_OPEN_LOOP); + + /* Find if the platform uses OVR2/MAX77621 PMIC */ + cfg = mmio_read_32(TEGRA_CL_DVFS_BASE + DVFS_DFLL_OUTPUT_CFG); + if (cfg & DFLL_OUTPUT_CFG_CLK_EN_BIT) { + /* OVR2 */ + + /* PWM tristate */ + val = mmio_read_32(TEGRA_MISC_BASE + PINMUX_AUX_DVFS_PWM); + val |= PINMUX_PWM_TRISTATE; + mmio_write_32(TEGRA_MISC_BASE + PINMUX_AUX_DVFS_PWM, val); + + /* + * SCRATCH201[1] is being used to identify CPU + * PMIC in warmboot code. + * 0 : OVR2 + * 1 : MAX77621 + */ + tegra_pmc_write_32(PMC_SCRATCH201, 0x0); + } else { + /* MAX77621 */ + tegra_pmc_write_32(PMC_SCRATCH201, 0x2); + } + } + + /* Prepare for cluster idle */ + tegra_fc_cluster_idle(mpidr); + + } else if (stateid_afflvl0 == PSTATE_ID_CORE_POWERDN) { + + /* Prepare for cpu powerdn */ + tegra_fc_cpu_powerdn(mpidr); + + } else { + ERROR("%s: Unknown state id (%d, %d, %d)\n", __func__, + stateid_afflvl2, stateid_afflvl1, stateid_afflvl0); + ret = PSCI_E_NOT_SUPPORTED; + } + + return ret; +} + +static void tegra_reset_all_dma_masters(void) +{ + uint32_t val, mask; + + /* + * Reset all possible DMA masters in the system. + */ + val = GPU_RESET_BIT; + mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_GPU_RESET_REG_OFFSET, val); + + val = NVENC_RESET_BIT | TSECB_RESET_BIT | APE_RESET_BIT | + NVJPG_RESET_BIT | NVDEC_RESET_BIT; + mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEV_SET_Y, val); + + val = HOST1X_RESET_BIT | ISP_RESET_BIT | USBD_RESET_BIT | + VI_RESET_BIT | SDMMC4_RESET_BIT | SDMMC1_RESET_BIT | + SDMMC2_RESET_BIT; + mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEV_SET_L, val); + + val = USB2_RESET_BIT | APBDMA_RESET_BIT | AHBDMA_RESET_BIT; + mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEV_SET_H, val); + + val = XUSB_DEV_RESET_BIT | XUSB_HOST_RESET_BIT | TSEC_RESET_BIT | + PCIE_RESET_BIT | SDMMC3_RESET_BIT; + mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEV_SET_U, val); + + val = SE_RESET_BIT | HDA_RESET_BIT | SATA_RESET_BIT; + mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEV_SET_V, val); + + /* + * If any of the DMA masters are still alive, assume + * that the system has been compromised and reboot. + */ + val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_GPU_RESET_REG_OFFSET); + mask = GPU_RESET_BIT; + if ((val & mask) != mask) + tegra_pmc_system_reset(); + + mask = NVENC_RESET_BIT | TSECB_RESET_BIT | APE_RESET_BIT | + NVJPG_RESET_BIT | NVDEC_RESET_BIT; + val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEV_SET_Y); + if ((val & mask) != mask) + tegra_pmc_system_reset(); + + mask = HOST1X_RESET_BIT | ISP_RESET_BIT | USBD_RESET_BIT | + VI_RESET_BIT | SDMMC4_RESET_BIT | SDMMC1_RESET_BIT | + SDMMC2_RESET_BIT; + val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEV_SET_L); + if ((val & mask) != mask) + tegra_pmc_system_reset(); + + mask = USB2_RESET_BIT | APBDMA_RESET_BIT | AHBDMA_RESET_BIT; + val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEV_SET_H); + if ((val & mask) != mask) + tegra_pmc_system_reset(); + + mask = XUSB_DEV_RESET_BIT | XUSB_HOST_RESET_BIT | TSEC_RESET_BIT | + PCIE_RESET_BIT | SDMMC3_RESET_BIT; + val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEV_SET_U); + if ((val & mask) != mask) + tegra_pmc_system_reset(); + + val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEV_SET_V); + mask = SE_RESET_BIT | HDA_RESET_BIT | SATA_RESET_BIT; + if ((val & mask) != mask) + tegra_pmc_system_reset(); +} + +int tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state) +{ + u_register_t mpidr = read_mpidr(); + const plat_local_state_t *pwr_domain_state = + target_state->pwr_domain_state; + unsigned int stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL]; + const plat_params_from_bl2_t *plat_params = bl31_get_plat_params(); + uint32_t val; + + if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) { + + if (tegra_chipid_is_t210_b01()) { + /* Save tzram contents */ + tegra_se_save_tzram(); + } + + /* de-init the interface */ + tegra_bpmp_suspend(); + + /* + * The CPU needs to load the System suspend entry firmware + * if nothing is running on the BPMP. + */ + if (!tegra_bpmp_available) { + + /* + * BPMP firmware is not running on the co-processor, so + * we need to explicitly load the firmware to enable + * entry/exit to/from System Suspend and set the BPMP + * on its way. + */ + + /* Power off BPMP before we proceed */ + tegra_fc_bpmp_off(); + + /* bond out IRAM banks B, C and D */ + mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_BOND_OUT_U, + IRAM_B_LOCK_BIT | IRAM_C_LOCK_BIT | + IRAM_D_LOCK_BIT); + + /* bond out APB/AHB DMAs */ + mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_BOND_OUT_H, + APB_DMA_LOCK_BIT | AHB_DMA_LOCK_BIT); + + /* Power off BPMP before we proceed */ + tegra_fc_bpmp_off(); + + /* + * Reset all the hardware blocks that can act as DMA + * masters on the bus. + */ + tegra_reset_all_dma_masters(); + + /* + * Mark PMC as accessible to the non-secure world + * to allow the COP to execute System Suspend + * sequence + */ + val = mmio_read_32(TEGRA_MISC_BASE + APB_SLAVE_SECURITY_ENABLE); + val &= ~PMC_SECURITY_EN_BIT; + mmio_write_32(TEGRA_MISC_BASE + APB_SLAVE_SECURITY_ENABLE, val); + + /* clean up IRAM of any cruft */ + zeromem((void *)(uintptr_t)TEGRA_IRAM_BASE, + TEGRA_IRAM_A_SIZE); + + /* Copy the firmware to BPMP's internal RAM */ + (void)memcpy((void *)(uintptr_t)TEGRA_IRAM_BASE, + (const void *)(plat_params->sc7entry_fw_base + SC7ENTRY_FW_HEADER_SIZE_BYTES), + plat_params->sc7entry_fw_size - SC7ENTRY_FW_HEADER_SIZE_BYTES); + + /* Power on the BPMP and execute from IRAM base */ + tegra_fc_bpmp_on(TEGRA_IRAM_BASE); + + /* Wait until BPMP powers up */ + do { + val = mmio_read_32(TEGRA_RES_SEMA_BASE + STA_OFFSET); + } while (val != SIGN_OF_LIFE); + } + + /* enter system suspend */ + tegra_fc_soc_powerdn(mpidr); + } + + return PSCI_E_SUCCESS; +} + +int32_t tegra_soc_pwr_domain_suspend_pwrdown_early(const psci_power_state_t *target_state) +{ + return PSCI_E_NOT_SUPPORTED; +} + +int tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state) +{ + const plat_params_from_bl2_t *plat_params = bl31_get_plat_params(); + uint32_t cfg; + uint32_t val, entrypoint = 0; + uint64_t offset; + + /* platform parameter passed by the previous bootloader */ + if (plat_params->l2_ecc_parity_prot_dis != 1) { + /* Enable ECC Parity Protection for Cortex-A57 CPUs */ + val = read_l2ctlr_el1(); + val |= (uint64_t)CORTEX_A57_L2_ECC_PARITY_PROTECTION_BIT; + write_l2ctlr_el1(val); + } + + /* + * Check if we are exiting from SOC_POWERDN. + */ + if (target_state->pwr_domain_state[PLAT_MAX_PWR_LVL] == + PLAT_SYS_SUSPEND_STATE_ID) { + + /* + * Security engine resume + */ + if (tegra_chipid_is_t210_b01()) { + tegra_se_resume(); + } + + /* + * Lock scratch registers which hold the CPU vectors + */ + tegra_pmc_lock_cpu_vectors(); + + /* + * Enable WRAP to INCR burst type conversions for + * incoming requests on the AXI slave ports. + */ + val = mmio_read_32(TEGRA_MSELECT_BASE + MSELECT_CONFIG); + val &= ~ENABLE_UNSUP_TX_ERRORS; + val |= ENABLE_WRAP_TO_INCR_BURSTS; + mmio_write_32(TEGRA_MSELECT_BASE + MSELECT_CONFIG, val); + + /* + * Restore Boot and Power Management Processor (BPMP) reset + * address and reset it, if it is supported by the platform. + */ + if (!tegra_bpmp_available) { + tegra_fc_bpmp_off(); + } else { + entrypoint = tegra_pmc_read_32(PMC_SCRATCH39); + tegra_fc_bpmp_on(entrypoint); + + /* initialise the interface */ + tegra_bpmp_resume(); + } + + if (plat_params->sc7entry_fw_base != 0U) { + /* sc7entry-fw is part of TZDRAM area */ + offset = plat_params->tzdram_base - plat_params->sc7entry_fw_base; + tegra_memctrl_tzdram_setup(plat_params->sc7entry_fw_base, + plat_params->tzdram_size + offset); + } + + if (!tegra_chipid_is_t210_b01()) { + /* restrict PMC access to secure world */ + val = mmio_read_32(TEGRA_MISC_BASE + APB_SLAVE_SECURITY_ENABLE); + val |= PMC_SECURITY_EN_BIT; + mmio_write_32(TEGRA_MISC_BASE + APB_SLAVE_SECURITY_ENABLE, val); + } + } + + /* + * Check if we are exiting cluster idle state + */ + if (target_state->pwr_domain_state[MPIDR_AFFLVL1] == + PSTATE_ID_CLUSTER_IDLE) { + + if (!tegra_bpmp_available) { + + /* PWM un-tristate */ + cfg = mmio_read_32(TEGRA_CL_DVFS_BASE + DVFS_DFLL_OUTPUT_CFG); + if (cfg & DFLL_OUTPUT_CFG_CLK_EN_BIT) { + val = mmio_read_32(TEGRA_MISC_BASE + PINMUX_AUX_DVFS_PWM); + val &= ~PINMUX_PWM_TRISTATE; + mmio_write_32(TEGRA_MISC_BASE + PINMUX_AUX_DVFS_PWM, val); + + /* make sure the setting took effect */ + val = mmio_read_32(TEGRA_MISC_BASE + PINMUX_AUX_DVFS_PWM); + assert((val & PINMUX_PWM_TRISTATE) == 0U); + } + + /* + * Restore operation mode for the DFLL ring + * oscillator + */ + mmio_write_32(TEGRA_CL_DVFS_BASE + DVFS_DFLL_CTRL, + ENABLE_CLOSED_LOOP); + + /* release cluster idle lock */ + tegra_fc_ccplex_pgexit_unlock(); + } + } + + /* + * Mark this CPU as ON in the cpu_powergate_mask[], + * so that we use Flow Controller for all subsequent + * power ups. + */ + cpu_powergate_mask[plat_my_core_pos()] = 1; + + /* + * T210 has a dedicated ARMv7 boot and power mgmt processor, BPMP. It's + * used for power management and boot purposes. Inform the BPMP that + * we have completed the cluster power up. + */ + tegra_fc_lock_active_cluster(); + + /* + * Resume PMC hardware block for Tegra210 platforms + */ + if (!tegra_chipid_is_t210_b01()) { + tegra_pmc_resume(); + } + + return PSCI_E_SUCCESS; +} + +int tegra_soc_pwr_domain_on(u_register_t mpidr) +{ + int cpu = mpidr & MPIDR_CPU_MASK; + uint32_t mask = CPU_CORE_RESET_MASK << cpu; + + /* Deassert CPU reset signals */ + mmio_write_32(TEGRA_CAR_RESET_BASE + CPU_CMPLX_RESET_CLR, mask); + + /* Turn on CPU using flow controller or PMC */ + if (cpu_powergate_mask[cpu] == 0) { + tegra_pmc_cpu_on(cpu); + } else { + tegra_fc_cpu_on(cpu); + } + + return PSCI_E_SUCCESS; +} + +int tegra_soc_pwr_domain_off(const psci_power_state_t *target_state) +{ + tegra_fc_cpu_off(read_mpidr() & MPIDR_CPU_MASK); + return PSCI_E_SUCCESS; +} + +int tegra_soc_prepare_system_reset(void) +{ + /* + * Set System Clock (SCLK) to POR default so that the clock source + * for the PMC APB clock would not be changed due to system reset. + */ + mmio_write_32((uintptr_t)TEGRA_CAR_RESET_BASE + SCLK_BURST_POLICY, + SCLK_BURST_POLICY_DEFAULT); + mmio_write_32((uintptr_t)TEGRA_CAR_RESET_BASE + SCLK_RATE, 0); + + /* Wait 1 ms to make sure clock source/device logic is stabilized. */ + mdelay(1); + + /* + * Program the PMC in order to restart the system. + */ + tegra_pmc_system_reset(); + + return PSCI_E_SUCCESS; +} + +__dead2 void tegra_soc_prepare_system_off(void) +{ + ERROR("Tegra System Off: operation not handled.\n"); + panic(); +} diff --git a/plat/nvidia/tegra/soc/t210/plat_secondary.c b/plat/nvidia/tegra/soc/t210/plat_secondary.c new file mode 100644 index 0000000..e0242cf --- /dev/null +++ b/plat/nvidia/tegra/soc/t210/plat_secondary.c @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include + +#include +#include + +#define SB_CSR 0x0 +#define SB_CSR_NS_RST_VEC_WR_DIS (1 << 1) + +/* CPU reset vector */ +#define SB_AA64_RESET_LOW 0x30 /* width = 31:0 */ +#define SB_AA64_RESET_HI 0x34 /* width = 11:0 */ + +extern void tegra_secure_entrypoint(void); + +/******************************************************************************* + * Setup secondary CPU vectors + ******************************************************************************/ +void plat_secondary_setup(void) +{ + uint32_t val; + uint64_t reset_addr = (uint64_t)tegra_secure_entrypoint; + + INFO("Setting up secondary CPU boot\n"); + + /* setup secondary CPU vector */ + mmio_write_32(TEGRA_SB_BASE + SB_AA64_RESET_LOW, + (reset_addr & 0xFFFFFFFF) | 1); + val = reset_addr >> 32; + mmio_write_32(TEGRA_SB_BASE + SB_AA64_RESET_HI, val & 0x7FF); + + /* configure PMC */ + tegra_pmc_cpu_setup(reset_addr); + tegra_pmc_lock_cpu_vectors(); +} diff --git a/plat/nvidia/tegra/soc/t210/plat_setup.c b/plat/nvidia/tegra/soc/t210/plat_setup.c new file mode 100644 index 0000000..68cd38e --- /dev/null +++ b/plat/nvidia/tegra/soc/t210/plat_setup.c @@ -0,0 +1,318 @@ +/* + * Copyright (c) 2015-2019, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2020, NVIDIA Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +/* sets of MMIO ranges setup */ +#define MMIO_RANGE_0_ADDR 0x50000000 +#define MMIO_RANGE_1_ADDR 0x60000000 +#define MMIO_RANGE_2_ADDR 0x70000000 +#define MMIO_RANGE_SIZE 0x200000 + +/* + * Table of regions to map using the MMU. + */ +static const mmap_region_t tegra_mmap[] = { + MAP_REGION_FLAT(TEGRA_IRAM_BASE, 0x40000, /* 256KB */ + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(MMIO_RANGE_0_ADDR, MMIO_RANGE_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(MMIO_RANGE_1_ADDR, MMIO_RANGE_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(MMIO_RANGE_2_ADDR, MMIO_RANGE_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + {0} +}; + +/******************************************************************************* + * Set up the pagetables as per the platform memory map & initialize the MMU + ******************************************************************************/ +const mmap_region_t *plat_get_mmio_map(void) +{ + /* Add the map region for security engine SE2 */ + if (tegra_chipid_is_t210_b01()) { + mmap_add_region((uint64_t)TEGRA_SE2_BASE, + (uint64_t)TEGRA_SE2_BASE, + (uint64_t)TEGRA_SE2_RANGE_SIZE, + MT_DEVICE | MT_RW | MT_SECURE); + } + + /* MMIO space */ + return tegra_mmap; +} + +/******************************************************************************* + * The Tegra power domain tree has a single system level power domain i.e. a + * single root node. The first entry in the power domain descriptor specifies + * the number of power domains at the highest power level. + ******************************************************************************* + */ +const unsigned char tegra_power_domain_tree_desc[] = { + /* No of root nodes */ + 1, + /* No of clusters */ + PLATFORM_CLUSTER_COUNT, + /* No of CPU cores - cluster0 */ + PLATFORM_MAX_CPUS_PER_CLUSTER, + /* No of CPU cores - cluster1 */ + PLATFORM_MAX_CPUS_PER_CLUSTER +}; + +/******************************************************************************* + * This function returns the Tegra default topology tree information. + ******************************************************************************/ +const unsigned char *plat_get_power_domain_tree_desc(void) +{ + return tegra_power_domain_tree_desc; +} + +/******************************************************************************* + * Handler to get the System Counter Frequency + ******************************************************************************/ +unsigned int plat_get_syscnt_freq2(void) +{ + return 19200000; +} + +/******************************************************************************* + * Maximum supported UART controllers + ******************************************************************************/ +#define TEGRA210_MAX_UART_PORTS 5 + +/******************************************************************************* + * This variable holds the UART port base addresses + ******************************************************************************/ +static uint32_t tegra210_uart_addresses[TEGRA210_MAX_UART_PORTS + 1] = { + 0, /* undefined - treated as an error case */ + TEGRA_UARTA_BASE, + TEGRA_UARTB_BASE, + TEGRA_UARTC_BASE, + TEGRA_UARTD_BASE, + TEGRA_UARTE_BASE, +}; + +/******************************************************************************* + * Enable console corresponding to the console ID + ******************************************************************************/ +void plat_enable_console(int32_t id) +{ + static console_t uart_console; + uint32_t console_clock; + + if ((id > 0) && (id < TEGRA210_MAX_UART_PORTS)) { + /* + * Reference clock used by the FPGAs is a lot slower. + */ + if (tegra_platform_is_fpga()) { + console_clock = TEGRA_BOOT_UART_CLK_13_MHZ; + } else { + console_clock = TEGRA_BOOT_UART_CLK_408_MHZ; + } + + (void)console_16550_register(tegra210_uart_addresses[id], + console_clock, + TEGRA_CONSOLE_BAUDRATE, + &uart_console); + console_set_scope(&uart_console, CONSOLE_FLAG_BOOT | + CONSOLE_FLAG_RUNTIME | CONSOLE_FLAG_CRASH); + } +} + +/******************************************************************************* + * Return pointer to the BL31 params from previous bootloader + ******************************************************************************/ +struct tegra_bl31_params *plat_get_bl31_params(void) +{ + return NULL; +} + +/******************************************************************************* + * Return pointer to the BL31 platform params from previous bootloader + ******************************************************************************/ +plat_params_from_bl2_t *plat_get_bl31_plat_params(void) +{ + return NULL; +} + +/******************************************************************************* + * Handler for early platform setup + ******************************************************************************/ +void plat_early_platform_setup(void) +{ + const plat_params_from_bl2_t *plat_params = bl31_get_plat_params(); + uint64_t val; + + /* Verify chip id is t210 */ + assert(tegra_chipid_is_t210()); + + /* + * Do initial security configuration to allow DRAM/device access. + */ + tegra_memctrl_tzdram_setup(plat_params->tzdram_base, + (uint32_t)plat_params->tzdram_size); + + /* platform parameter passed by the previous bootloader */ + if (plat_params->l2_ecc_parity_prot_dis != 1) { + /* Enable ECC Parity Protection for Cortex-A57 CPUs */ + val = read_l2ctlr_el1(); + val |= (uint64_t)CORTEX_A57_L2_ECC_PARITY_PROTECTION_BIT; + write_l2ctlr_el1(val); + } + + /* Initialize security engine driver */ + tegra_se_init(); +} + +/* Secure IRQs for Tegra186 */ +static const interrupt_prop_t tegra210_interrupt_props[] = { + INTR_PROP_DESC(TEGRA_SDEI_SGI_PRIVATE, PLAT_SDEI_CRITICAL_PRI, + GICV2_INTR_GROUP0, GIC_INTR_CFG_EDGE), + INTR_PROP_DESC(TEGRA210_TIMER1_IRQ, PLAT_TEGRA_WDT_PRIO, + GICV2_INTR_GROUP0, GIC_INTR_CFG_EDGE), + INTR_PROP_DESC(TEGRA210_WDT_CPU_LEGACY_FIQ, PLAT_TEGRA_WDT_PRIO, + GICV2_INTR_GROUP0, GIC_INTR_CFG_EDGE), +}; + +/******************************************************************************* + * Handler for late platform setup + ******************************************************************************/ +void plat_late_platform_setup(void) +{ + const plat_params_from_bl2_t *plat_params = bl31_get_plat_params(); + uint64_t sc7entry_end, offset; + int ret; + uint32_t val; + + /* memmap TZDRAM area containing the SC7 Entry Firmware */ + if (plat_params->sc7entry_fw_base && plat_params->sc7entry_fw_size) { + + assert(plat_params->sc7entry_fw_size <= TEGRA_IRAM_A_SIZE); + + /* + * Verify that the SC7 entry firmware resides inside the TZDRAM + * aperture, _before_ the BL31 code and the start address is + * exactly 1MB from BL31 base. + */ + + /* sc7entry-fw must be _before_ BL31 base */ + assert(plat_params->tzdram_base > plat_params->sc7entry_fw_base); + + sc7entry_end = plat_params->sc7entry_fw_base + + plat_params->sc7entry_fw_size; + assert(sc7entry_end < plat_params->tzdram_base); + + /* sc7entry-fw start must be exactly 1MB behind BL31 base */ + offset = plat_params->tzdram_base - plat_params->sc7entry_fw_base; + assert(offset == 0x100000); + + /* secure TZDRAM area */ + tegra_memctrl_tzdram_setup(plat_params->sc7entry_fw_base, + plat_params->tzdram_size + offset); + + /* power off BPMP processor until SC7 entry */ + tegra_fc_bpmp_off(); + + /* memmap SC7 entry firmware code */ + ret = mmap_add_dynamic_region(plat_params->sc7entry_fw_base, + plat_params->sc7entry_fw_base, + plat_params->sc7entry_fw_size, + MT_SECURE | MT_RO_DATA); + assert(ret == 0); + + /* restrict PMC access to secure world */ + val = mmio_read_32(TEGRA_MISC_BASE + APB_SLAVE_SECURITY_ENABLE); + val |= PMC_SECURITY_EN_BIT; + mmio_write_32(TEGRA_MISC_BASE + APB_SLAVE_SECURITY_ENABLE, val); + } + + if (!tegra_chipid_is_t210_b01()) { + /* restrict PMC access to secure world */ + val = mmio_read_32(TEGRA_MISC_BASE + APB_SLAVE_SECURITY_ENABLE); + val |= PMC_SECURITY_EN_BIT; + mmio_write_32(TEGRA_MISC_BASE + APB_SLAVE_SECURITY_ENABLE, val); + } +} + +/******************************************************************************* + * Initialize the GIC and SGIs + ******************************************************************************/ +void plat_gic_setup(void) +{ + tegra_gic_setup(tegra210_interrupt_props, ARRAY_SIZE(tegra210_interrupt_props)); + tegra_gic_init(); + + /* Enable handling for FIQs */ + tegra_fiq_handler_setup(); + + /* + * Enable routing watchdog FIQs from the flow controller to + * the GICD. + */ + tegra_fc_enable_fiq_to_ccplex_routing(); +} +/******************************************************************************* + * Handler to indicate support for System Suspend + ******************************************************************************/ +bool plat_supports_system_suspend(void) +{ + const plat_params_from_bl2_t *plat_params = bl31_get_plat_params(); + + /* + * sc7entry-fw is only supported by Tegra210 SoCs. + */ + if (!tegra_chipid_is_t210_b01() && (plat_params->sc7entry_fw_base != 0U)) { + return true; + } else if (tegra_chipid_is_t210_b01()) { + return true; + } else { + return false; + } +} +/******************************************************************************* + * Platform specific runtime setup. + ******************************************************************************/ +void plat_runtime_setup(void) +{ + /* + * During cold boot, it is observed that the arbitration + * bit is set in the Memory controller leading to false + * error interrupts in the non-secure world. To avoid + * this, clean the interrupt status register before + * booting into the non-secure world + */ + tegra_memctrl_clear_pending_interrupts(); + + /* + * During boot, USB3 and flash media (SDMMC/SATA) devices need + * access to IRAM. Because these clients connect to the MC and + * do not have a direct path to the IRAM, the MC implements AHB + * redirection during boot to allow path to IRAM. In this mode + * accesses to a programmed memory address aperture are directed + * to the AHB bus, allowing access to the IRAM. This mode must be + * disabled before we jump to the non-secure world. + */ + tegra_memctrl_disable_ahb_redirection(); +} diff --git a/plat/nvidia/tegra/soc/t210/plat_sip_calls.c b/plat/nvidia/tegra/soc/t210/plat_sip_calls.c new file mode 100644 index 0000000..e3484be --- /dev/null +++ b/plat/nvidia/tegra/soc/t210/plat_sip_calls.c @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2020, NVIDIA Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +/******************************************************************************* + * PMC parameters + ******************************************************************************/ +#define PMC_READ U(0xaa) +#define PMC_WRITE U(0xbb) + +/******************************************************************************* + * Tegra210 SiP SMCs + ******************************************************************************/ +#define TEGRA_SIP_PMC_COMMANDS U(0xC2FFFE00) + +/******************************************************************************* + * This function is responsible for handling all T210 SiP calls + ******************************************************************************/ +int plat_sip_handler(uint32_t smc_fid, + uint64_t x1, + uint64_t x2, + uint64_t x3, + uint64_t x4, + const void *cookie, + void *handle, + uint64_t flags) +{ + uint32_t val, ns; + + /* Determine which security state this SMC originated from */ + ns = is_caller_non_secure(flags); + if (!ns) + SMC_RET1(handle, SMC_UNK); + + if (smc_fid == TEGRA_SIP_PMC_COMMANDS) { + /* check the address is within PMC range and is 4byte aligned */ + if ((x2 >= TEGRA_PMC_SIZE) || (x2 & 0x3)) + return -EINVAL; + + switch (x2) { + /* Black listed PMC registers */ + case PMC_SCRATCH1: + case PMC_SCRATCH31 ... PMC_SCRATCH33: + case PMC_SCRATCH40: + case PMC_SCRATCH42: + case PMC_SCRATCH43 ... PMC_SCRATCH48: + case PMC_SCRATCH50 ... PMC_SCRATCH51: + case PMC_SCRATCH56 ... PMC_SCRATCH57: + /* PMC secure-only registers are not accessible */ + case PMC_DPD_ENABLE_0: + case PMC_FUSE_CONTROL_0: + case PMC_CRYPTO_OP_0: + case PMC_TSC_MULT_0: + case PMC_STICKY_BIT: + ERROR("%s: error offset=0x%" PRIx64 "\n", __func__, x2); + return -EFAULT; + default: + /* Valid register */ + break; + } + + /* Perform PMC read/write */ + if (x1 == PMC_READ) { + val = mmio_read_32((uint32_t)(TEGRA_PMC_BASE + x2)); + write_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X1, val); + } else if (x1 == PMC_WRITE) { + mmio_write_32((uint32_t)(TEGRA_PMC_BASE + x2), (uint32_t)x3); + } else { + return -EINVAL; + } + } else { + return -ENOTSUP; + } + return 0; +} diff --git a/plat/nvidia/tegra/soc/t210/platform_t210.mk b/plat/nvidia/tegra/soc/t210/platform_t210.mk new file mode 100644 index 0000000..724cfc3 --- /dev/null +++ b/plat/nvidia/tegra/soc/t210/platform_t210.mk @@ -0,0 +1,62 @@ +# +# Copyright (c) 2015-2019, ARM Limited and Contributors. All rights reserved. +# Copyright (c) 2020, NVIDIA Corporation. All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +TZDRAM_BASE := 0xFF800000 +$(eval $(call add_define,TZDRAM_BASE)) + +ERRATA_TEGRA_INVALIDATE_BTB_AT_BOOT := 1 +$(eval $(call add_define,ERRATA_TEGRA_INVALIDATE_BTB_AT_BOOT)) + +PLATFORM_CLUSTER_COUNT := 2 +$(eval $(call add_define,PLATFORM_CLUSTER_COUNT)) + +PLATFORM_MAX_CPUS_PER_CLUSTER := 4 +$(eval $(call add_define,PLATFORM_MAX_CPUS_PER_CLUSTER)) + +MAX_XLAT_TABLES := 10 +$(eval $(call add_define,MAX_XLAT_TABLES)) + +MAX_MMAP_REGIONS := 16 +$(eval $(call add_define,MAX_MMAP_REGIONS)) + +ENABLE_TEGRA_WDT_LEGACY_FIQ_HANDLING := 1 + +PLAT_INCLUDES += -Iplat/nvidia/tegra/include/t210 \ + -I${SOC_DIR}/drivers/se + +BL31_SOURCES += ${TEGRA_GICv2_SOURCES} \ + drivers/ti/uart/aarch64/16550_console.S \ + lib/cpus/aarch64/cortex_a53.S \ + lib/cpus/aarch64/cortex_a57.S \ + ${TEGRA_DRIVERS}/bpmp/bpmp.c \ + ${TEGRA_DRIVERS}/flowctrl/flowctrl.c \ + ${TEGRA_DRIVERS}/memctrl/memctrl_v1.c \ + ${TEGRA_DRIVERS}/pmc/pmc.c \ + ${SOC_DIR}/plat_psci_handlers.c \ + ${SOC_DIR}/plat_setup.c \ + ${SOC_DIR}/drivers/se/security_engine.c \ + ${SOC_DIR}/plat_secondary.c \ + ${SOC_DIR}/plat_sip_calls.c + +# Enable workarounds for selected Cortex-A57 erratas. +A57_DISABLE_NON_TEMPORAL_HINT := 1 +ERRATA_A57_826974 := 1 +ERRATA_A57_826977 := 1 +ERRATA_A57_828024 := 1 +ERRATA_A57_833471 := 1 + +# Enable workarounds for selected Cortex-A53 erratas. +A53_DISABLE_NON_TEMPORAL_HINT := 1 +ERRATA_A53_826319 := 1 +ERRATA_A53_836870 := 1 +ERRATA_A53_855873 := 1 + +# Skip L1 $ flush when powering down Cortex-A57 CPUs +SKIP_A57_L1_FLUSH_PWR_DWN := 1 + +# Enable higher performance Non-cacheable load forwarding +A57_ENABLE_NONCACHEABLE_LOAD_FWD := 1 -- cgit v1.2.3