summaryrefslogtreecommitdiffstats
path: root/plat/nvidia/tegra
diff options
context:
space:
mode:
Diffstat (limited to 'plat/nvidia/tegra')
-rw-r--r--plat/nvidia/tegra/common/aarch64/tegra_helpers.S446
-rw-r--r--plat/nvidia/tegra/common/tegra_bl31_setup.c361
-rw-r--r--plat/nvidia/tegra/common/tegra_common.mk59
-rw-r--r--plat/nvidia/tegra/common/tegra_delay_timer.c57
-rw-r--r--plat/nvidia/tegra/common/tegra_fiq_glue.c149
-rw-r--r--plat/nvidia/tegra/common/tegra_gicv2.c72
-rw-r--r--plat/nvidia/tegra/common/tegra_gicv3.c79
-rw-r--r--plat/nvidia/tegra/common/tegra_io_storage.c20
-rw-r--r--plat/nvidia/tegra/common/tegra_platform.c311
-rw-r--r--plat/nvidia/tegra/common/tegra_pm.c340
-rw-r--r--plat/nvidia/tegra/common/tegra_sdei.c56
-rw-r--r--plat/nvidia/tegra/common/tegra_sip_calls.c158
-rw-r--r--plat/nvidia/tegra/common/tegra_stack_protector.c28
-rw-r--r--plat/nvidia/tegra/drivers/bpmp/bpmp.c231
-rw-r--r--plat/nvidia/tegra/drivers/bpmp_ipc/intf.c345
-rw-r--r--plat/nvidia/tegra/drivers/bpmp_ipc/intf.h127
-rw-r--r--plat/nvidia/tegra/drivers/bpmp_ipc/ivc.c654
-rw-r--r--plat/nvidia/tegra/drivers/bpmp_ipc/ivc.h50
-rw-r--r--plat/nvidia/tegra/drivers/flowctrl/flowctrl.c322
-rw-r--r--plat/nvidia/tegra/drivers/gpcdma/gpcdma.c188
-rw-r--r--plat/nvidia/tegra/drivers/memctrl/memctrl_v1.c212
-rw-r--r--plat/nvidia/tegra/drivers/memctrl/memctrl_v2.c354
-rw-r--r--plat/nvidia/tegra/drivers/pmc/pmc.c153
-rw-r--r--plat/nvidia/tegra/drivers/smmu/smmu.c121
-rw-r--r--plat/nvidia/tegra/drivers/spe/shared_console.S187
-rw-r--r--plat/nvidia/tegra/include/drivers/bpmp.h127
-rw-r--r--plat/nvidia/tegra/include/drivers/bpmp_ipc.h43
-rw-r--r--plat/nvidia/tegra/include/drivers/flowctrl.h98
-rw-r--r--plat/nvidia/tegra/include/drivers/gpcdma.h17
-rw-r--r--plat/nvidia/tegra/include/drivers/mce.h76
-rw-r--r--plat/nvidia/tegra/include/drivers/memctrl.h18
-rw-r--r--plat/nvidia/tegra/include/drivers/memctrl_v1.h57
-rw-r--r--plat/nvidia/tegra/include/drivers/memctrl_v2.h107
-rw-r--r--plat/nvidia/tegra/include/drivers/pmc.h72
-rw-r--r--plat/nvidia/tegra/include/drivers/security_engine.h60
-rw-r--r--plat/nvidia/tegra/include/drivers/smmu.h92
-rw-r--r--plat/nvidia/tegra/include/drivers/spe.h23
-rw-r--r--plat/nvidia/tegra/include/drivers/tegra_gic.h30
-rw-r--r--plat/nvidia/tegra/include/lib/profiler.h20
-rw-r--r--plat/nvidia/tegra/include/plat_macros.S63
-rw-r--r--plat/nvidia/tegra/include/platform_def.h130
-rw-r--r--plat/nvidia/tegra/include/t186/tegra186_private.h12
-rw-r--r--plat/nvidia/tegra/include/t186/tegra_def.h327
-rw-r--r--plat/nvidia/tegra/include/t186/tegra_mc_def.h398
-rw-r--r--plat/nvidia/tegra/include/t194/tegra194_private.h16
-rw-r--r--plat/nvidia/tegra/include/t194/tegra194_ras_private.h263
-rw-r--r--plat/nvidia/tegra/include/t194/tegra_def.h326
-rw-r--r--plat/nvidia/tegra/include/t210/tegra_def.h293
-rw-r--r--plat/nvidia/tegra/include/tegra_platform.h70
-rw-r--r--plat/nvidia/tegra/include/tegra_private.h162
-rw-r--r--plat/nvidia/tegra/lib/debug/profiler.c144
-rw-r--r--plat/nvidia/tegra/platform.mk97
-rw-r--r--plat/nvidia/tegra/scat/bl31.scat284
-rw-r--r--plat/nvidia/tegra/soc/t186/drivers/include/mce_private.h260
-rw-r--r--plat/nvidia/tegra/soc/t186/drivers/include/t18x_ari.h437
-rw-r--r--plat/nvidia/tegra/soc/t186/drivers/mce/aarch64/nvg_helpers.S31
-rw-r--r--plat/nvidia/tegra/soc/t186/drivers/mce/ari.c564
-rw-r--r--plat/nvidia/tegra/soc/t186/drivers/mce/mce.c476
-rw-r--r--plat/nvidia/tegra/soc/t186/drivers/mce/nvg.c256
-rw-r--r--plat/nvidia/tegra/soc/t186/drivers/se/se.c277
-rw-r--r--plat/nvidia/tegra/soc/t186/drivers/se/se_private.h100
-rw-r--r--plat/nvidia/tegra/soc/t186/plat_memctrl.c700
-rw-r--r--plat/nvidia/tegra/soc/t186/plat_psci_handlers.c482
-rw-r--r--plat/nvidia/tegra/soc/t186/plat_secondary.c41
-rw-r--r--plat/nvidia/tegra/soc/t186/plat_setup.c397
-rw-r--r--plat/nvidia/tegra/soc/t186/plat_sip_calls.c159
-rw-r--r--plat/nvidia/tegra/soc/t186/plat_smmu.c22
-rw-r--r--plat/nvidia/tegra/soc/t186/plat_trampoline.S41
-rw-r--r--plat/nvidia/tegra/soc/t186/platform_t186.mk77
-rw-r--r--plat/nvidia/tegra/soc/t194/drivers/include/mce_private.h79
-rw-r--r--plat/nvidia/tegra/soc/t194/drivers/include/se.h15
-rw-r--r--plat/nvidia/tegra/soc/t194/drivers/include/t194_nvg.h429
-rw-r--r--plat/nvidia/tegra/soc/t194/drivers/mce/aarch64/nvg_helpers.S52
-rw-r--r--plat/nvidia/tegra/soc/t194/drivers/mce/mce.c255
-rw-r--r--plat/nvidia/tegra/soc/t194/drivers/mce/nvg.c262
-rw-r--r--plat/nvidia/tegra/soc/t194/drivers/se/se.c511
-rw-r--r--plat/nvidia/tegra/soc/t194/drivers/se/se_private.h165
-rw-r--r--plat/nvidia/tegra/soc/t194/plat_memctrl.c83
-rw-r--r--plat/nvidia/tegra/soc/t194/plat_psci_handlers.c515
-rw-r--r--plat/nvidia/tegra/soc/t194/plat_ras.c492
-rw-r--r--plat/nvidia/tegra/soc/t194/plat_secondary.c75
-rw-r--r--plat/nvidia/tegra/soc/t194/plat_setup.c449
-rw-r--r--plat/nvidia/tegra/soc/t194/plat_sip_calls.c103
-rw-r--r--plat/nvidia/tegra/soc/t194/plat_smmu.c35
-rw-r--r--plat/nvidia/tegra/soc/t194/plat_trampoline.S150
-rw-r--r--plat/nvidia/tegra/soc/t194/platform_t194.mk88
-rw-r--r--plat/nvidia/tegra/soc/t210/drivers/se/se_private.h663
-rw-r--r--plat/nvidia/tegra/soc/t210/drivers/se/security_engine.c1071
-rw-r--r--plat/nvidia/tegra/soc/t210/plat_psci_handlers.c619
-rw-r--r--plat/nvidia/tegra/soc/t210/plat_secondary.c41
-rw-r--r--plat/nvidia/tegra/soc/t210/plat_setup.c318
-rw-r--r--plat/nvidia/tegra/soc/t210/plat_sip_calls.c99
-rw-r--r--plat/nvidia/tegra/soc/t210/platform_t210.mk62
93 files changed, 19456 insertions, 0 deletions
diff --git a/plat/nvidia/tegra/common/aarch64/tegra_helpers.S b/plat/nvidia/tegra/common/aarch64/tegra_helpers.S
new file mode 100644
index 0000000..72ecd54
--- /dev/null
+++ b/plat/nvidia/tegra/common/aarch64/tegra_helpers.S
@@ -0,0 +1,446 @@
+/*
+ * Copyright (c) 2015-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cortex_a57.h>
+#include <cpu_macros.S>
+
+#include <platform_def.h>
+#include <tegra_def.h>
+#include <tegra_platform.h>
+
+#define MIDR_PN_CORTEX_A57 0xD07
+
+/*******************************************************************************
+ * Implementation defined ACTLR_EL3 bit definitions
+ ******************************************************************************/
+#define ACTLR_ELx_L2ACTLR_BIT (U(1) << 6)
+#define ACTLR_ELx_L2ECTLR_BIT (U(1) << 5)
+#define ACTLR_ELx_L2CTLR_BIT (U(1) << 4)
+#define ACTLR_ELx_CPUECTLR_BIT (U(1) << 1)
+#define ACTLR_ELx_CPUACTLR_BIT (U(1) << 0)
+#define ACTLR_ELx_ENABLE_ALL_ACCESS (ACTLR_ELx_L2ACTLR_BIT | \
+ ACTLR_ELx_L2ECTLR_BIT | \
+ ACTLR_ELx_L2CTLR_BIT | \
+ ACTLR_ELx_CPUECTLR_BIT | \
+ ACTLR_ELx_CPUACTLR_BIT)
+
+ /* Global functions */
+ .globl plat_is_my_cpu_primary
+ .globl plat_my_core_pos
+ .globl plat_get_my_entrypoint
+ .globl plat_secondary_cold_boot_setup
+ .globl platform_mem_init
+ .globl plat_crash_console_init
+ .globl plat_crash_console_putc
+ .globl plat_crash_console_flush
+ .weak plat_core_pos_by_mpidr
+ .globl tegra_secure_entrypoint
+ .globl plat_reset_handler
+
+ /* Global variables */
+ .globl tegra_sec_entry_point
+ .globl ns_image_entrypoint
+ .globl tegra_bl31_phys_base
+ .globl tegra_console_base
+
+ /* ---------------------
+ * Common CPU init code
+ * ---------------------
+ */
+.macro cpu_init_common
+
+ /* ------------------------------------------------
+ * We enable procesor retention, L2/CPUECTLR NS
+ * access and ECC/Parity protection for A57 CPUs
+ * ------------------------------------------------
+ */
+ mrs x0, midr_el1
+ mov x1, #(MIDR_PN_MASK << MIDR_PN_SHIFT)
+ and x0, x0, x1
+ lsr x0, x0, #MIDR_PN_SHIFT
+ cmp x0, #MIDR_PN_CORTEX_A57
+ b.ne 1f
+
+ /* ---------------------------
+ * Enable processor retention
+ * ---------------------------
+ */
+ mrs x0, CORTEX_A57_L2ECTLR_EL1
+ mov x1, #RETENTION_ENTRY_TICKS_512
+ bic x0, x0, #CORTEX_A57_L2ECTLR_RET_CTRL_MASK
+ orr x0, x0, x1
+ msr CORTEX_A57_L2ECTLR_EL1, x0
+ isb
+
+ mrs x0, CORTEX_A57_ECTLR_EL1
+ mov x1, #RETENTION_ENTRY_TICKS_512
+ bic x0, x0, #CORTEX_A57_ECTLR_CPU_RET_CTRL_MASK
+ orr x0, x0, x1
+ msr CORTEX_A57_ECTLR_EL1, x0
+ isb
+
+ /* -------------------------------------------------------
+ * Enable L2 and CPU ECTLR RW access from non-secure world
+ * -------------------------------------------------------
+ */
+ mrs x0, actlr_el3
+ mov x1, #ACTLR_ELx_ENABLE_ALL_ACCESS
+ orr x0, x0, x1
+ msr actlr_el3, x0
+ mrs x0, actlr_el2
+ mov x1, #ACTLR_ELx_ENABLE_ALL_ACCESS
+ orr x0, x0, x1
+ msr actlr_el2, x0
+ isb
+
+ /* --------------------------------
+ * Enable the cycle count register
+ * --------------------------------
+ */
+1: mrs x0, pmcr_el0
+ ubfx x0, x0, #11, #5 // read PMCR.N field
+ mov x1, #1
+ lsl x0, x1, x0
+ sub x0, x0, #1 // mask of event counters
+ orr x0, x0, #0x80000000 // disable overflow intrs
+ msr pmintenclr_el1, x0
+ msr pmuserenr_el0, x1 // enable user mode access
+
+ /* ----------------------------------------------------------------
+ * Allow non-privileged access to CNTVCT: Set CNTKCTL (Kernel Count
+ * register), bit 1 (EL0VCTEN) to enable access to CNTVCT/CNTFRQ
+ * registers from EL0.
+ * ----------------------------------------------------------------
+ */
+ mrs x0, cntkctl_el1
+ orr x0, x0, #EL0VCTEN_BIT
+ msr cntkctl_el1, x0
+.endm
+
+ /* -----------------------------------------------------
+ * bool plat_is_my_cpu_primary(void);
+ *
+ * This function checks if this is the Primary CPU
+ *
+ * Registers clobbered: x0, x1
+ * -----------------------------------------------------
+ */
+func plat_is_my_cpu_primary
+ mrs x0, mpidr_el1
+ adr x1, tegra_primary_cpu_mpid
+ ldr x1, [x1]
+ cmp x0, x1
+ cset x0, eq
+ ret
+endfunc plat_is_my_cpu_primary
+
+ /* ----------------------------------------------------------
+ * unsigned int plat_my_core_pos(void);
+ *
+ * result: CorePos = CoreId + (ClusterId * cpus per cluster)
+ * Registers clobbered: x0, x8
+ * ----------------------------------------------------------
+ */
+func plat_my_core_pos
+ mov x8, x30
+ mrs x0, mpidr_el1
+ bl plat_core_pos_by_mpidr
+ ret x8
+endfunc plat_my_core_pos
+
+ /* -----------------------------------------------------
+ * unsigned long plat_get_my_entrypoint (void);
+ *
+ * Main job of this routine is to distinguish between
+ * a cold and warm boot. If the tegra_sec_entry_point for
+ * this CPU is present, then it's a warm boot.
+ *
+ * -----------------------------------------------------
+ */
+func plat_get_my_entrypoint
+ adr x1, tegra_sec_entry_point
+ ldr x0, [x1]
+ ret
+endfunc plat_get_my_entrypoint
+
+ /* -----------------------------------------------------
+ * void plat_secondary_cold_boot_setup (void);
+ *
+ * This function performs any platform specific actions
+ * needed for a secondary cpu after a cold reset. Right
+ * now this is a stub function.
+ * -----------------------------------------------------
+ */
+func plat_secondary_cold_boot_setup
+ mov x0, #0
+ ret
+endfunc plat_secondary_cold_boot_setup
+
+ /* --------------------------------------------------------
+ * void platform_mem_init (void);
+ *
+ * Any memory init, relocation to be done before the
+ * platform boots. Called very early in the boot process.
+ * --------------------------------------------------------
+ */
+func platform_mem_init
+ mov x0, #0
+ ret
+endfunc platform_mem_init
+
+ /* ---------------------------------------------------
+ * Function to handle a platform reset and store
+ * input parameters passed by BL2.
+ * ---------------------------------------------------
+ */
+func plat_reset_handler
+
+ /* ----------------------------------------------------
+ * Verify if we are running from BL31_BASE address
+ * ----------------------------------------------------
+ */
+ adr x18, bl31_entrypoint
+ mov x17, #BL31_BASE
+ cmp x18, x17
+ b.eq 1f
+
+ /* ----------------------------------------------------
+ * Copy the entire BL31 code to BL31_BASE if we are not
+ * running from it already
+ * ----------------------------------------------------
+ */
+ mov x0, x17
+ mov x1, x18
+ adr x2, __RELA_END__
+ sub x2, x2, x18
+_loop16:
+ cmp x2, #16
+ b.lo _loop1
+ ldp x3, x4, [x1], #16
+ stp x3, x4, [x0], #16
+ sub x2, x2, #16
+ b _loop16
+ /* copy byte per byte */
+_loop1:
+ cbz x2, _end
+ ldrb w3, [x1], #1
+ strb w3, [x0], #1
+ subs x2, x2, #1
+ b.ne _loop1
+
+ /* ----------------------------------------------------
+ * Jump to BL31_BASE and start execution again
+ * ----------------------------------------------------
+ */
+_end: mov x0, x20
+ mov x1, x21
+ br x17
+1:
+
+ /* -----------------------------------
+ * derive and save the phys_base addr
+ * -----------------------------------
+ */
+ adr x17, tegra_bl31_phys_base
+ ldr x18, [x17]
+ cbnz x18, 1f
+ adr x18, bl31_entrypoint
+ str x18, [x17]
+
+ /* -----------------------------------
+ * save the boot CPU MPID value
+ * -----------------------------------
+ */
+ mrs x0, mpidr_el1
+ adr x1, tegra_primary_cpu_mpid
+ str x0, [x1]
+
+1: cpu_init_common
+
+ ret
+endfunc plat_reset_handler
+
+ /* ------------------------------------------------------
+ * int32_t plat_core_pos_by_mpidr(u_register_t mpidr)
+ *
+ * This function implements a part of the critical
+ * interface between the psci generic layer and the
+ * platform that allows the former to query the platform
+ * to convert an MPIDR to a unique linear index. An error
+ * code (-1) is returned in case the MPIDR is invalid.
+ *
+ * Clobbers: x0-x3
+ * ------------------------------------------------------
+ */
+func plat_core_pos_by_mpidr
+ lsr x1, x0, #MPIDR_AFF0_SHIFT
+ and x1, x1, #MPIDR_AFFLVL_MASK /* core id */
+ lsr x2, x0, #MPIDR_AFF1_SHIFT
+ and x2, x2, #MPIDR_AFFLVL_MASK /* cluster id */
+
+ /* core_id >= PLATFORM_MAX_CPUS_PER_CLUSTER */
+ mov x0, #-1
+ cmp x1, #(PLATFORM_MAX_CPUS_PER_CLUSTER - 1)
+ b.gt 1f
+
+ /* cluster_id >= PLATFORM_CLUSTER_COUNT */
+ cmp x2, #(PLATFORM_CLUSTER_COUNT - 1)
+ b.gt 1f
+
+ /* CorePos = CoreId + (ClusterId * cpus per cluster) */
+ mov x3, #PLATFORM_MAX_CPUS_PER_CLUSTER
+ mul x3, x3, x2
+ add x0, x1, x3
+
+1:
+ ret
+endfunc plat_core_pos_by_mpidr
+
+ /* ----------------------------------------
+ * Secure entrypoint function for CPU boot
+ * ----------------------------------------
+ */
+func tegra_secure_entrypoint _align=6
+
+#if ERRATA_TEGRA_INVALIDATE_BTB_AT_BOOT
+
+ /* --------------------------------------------------------
+ * Skip the invalidate BTB workaround for Tegra210B01 SKUs.
+ * --------------------------------------------------------
+ */
+ mov x0, #TEGRA_MISC_BASE
+ add x0, x0, #HARDWARE_REVISION_OFFSET
+ ldr w1, [x0]
+ lsr w1, w1, #CHIP_ID_SHIFT
+ and w1, w1, #CHIP_ID_MASK
+ cmp w1, #TEGRA_CHIPID_TEGRA21 /* T210? */
+ b.ne 2f
+ ldr w1, [x0]
+ lsr w1, w1, #MAJOR_VERSION_SHIFT
+ and w1, w1, #MAJOR_VERSION_MASK
+ cmp w1, #0x02 /* T210 B01? */
+ b.eq 2f
+
+ /* -------------------------------------------------------
+ * Invalidate BTB along with I$ to remove any stale
+ * entries from the branch predictor array.
+ * -------------------------------------------------------
+ */
+ mrs x0, CORTEX_A57_CPUACTLR_EL1
+ orr x0, x0, #1
+ msr CORTEX_A57_CPUACTLR_EL1, x0 /* invalidate BTB and I$ together */
+ dsb sy
+ isb
+ ic iallu /* actual invalidate */
+ dsb sy
+ isb
+
+ mrs x0, CORTEX_A57_CPUACTLR_EL1
+ bic x0, x0, #1
+ msr CORTEX_A57_CPUACTLR_EL1, X0 /* restore original CPUACTLR_EL1 */
+ dsb sy
+ isb
+
+ .rept 7
+ nop /* wait */
+ .endr
+
+ /* -----------------------------------------------
+ * Extract OSLK bit and check if it is '1'. This
+ * bit remains '0' for A53 on warm-resets. If '1',
+ * turn off regional clock gating and request warm
+ * reset.
+ * -----------------------------------------------
+ */
+ mrs x0, oslsr_el1
+ and x0, x0, #2
+ mrs x1, mpidr_el1
+ bics xzr, x0, x1, lsr #7 /* 0 = slow cluster or warm reset */
+ b.eq restore_oslock
+ mov x0, xzr
+ msr oslar_el1, x0 /* os lock stays 0 across warm reset */
+ mov x3, #3
+ movz x4, #0x8000, lsl #48
+ msr CORTEX_A57_CPUACTLR_EL1, x4 /* turn off RCG */
+ isb
+ msr rmr_el3, x3 /* request warm reset */
+ isb
+ dsb sy
+1: wfi
+ b 1b
+
+ /* --------------------------------------------------
+ * These nops are here so that speculative execution
+ * won't harm us before we are done with warm reset.
+ * --------------------------------------------------
+ */
+ .rept 65
+ nop
+ .endr
+2:
+ /* --------------------------------------------------
+ * Do not insert instructions here
+ * --------------------------------------------------
+ */
+#endif
+
+ /* --------------------------------------------------
+ * Restore OS Lock bit
+ * --------------------------------------------------
+ */
+restore_oslock:
+ mov x0, #1
+ msr oslar_el1, x0
+
+ /* --------------------------------------------------
+ * Get secure world's entry point and jump to it
+ * --------------------------------------------------
+ */
+ bl plat_get_my_entrypoint
+ br x0
+endfunc tegra_secure_entrypoint
+
+ .data
+ .align 3
+
+ /* --------------------------------------------------
+ * CPU Secure entry point - resume from suspend
+ * --------------------------------------------------
+ */
+tegra_sec_entry_point:
+ .quad 0
+
+ /* --------------------------------------------------
+ * NS world's cold boot entry point
+ * --------------------------------------------------
+ */
+ns_image_entrypoint:
+ .quad 0
+
+ /* --------------------------------------------------
+ * BL31's physical base address
+ * --------------------------------------------------
+ */
+tegra_bl31_phys_base:
+ .quad 0
+
+ /* --------------------------------------------------
+ * UART controller base for console init
+ * --------------------------------------------------
+ */
+tegra_console_base:
+ .quad 0
+
+ /* --------------------------------------------------
+ * MPID value for the boot CPU
+ * --------------------------------------------------
+ */
+tegra_primary_cpu_mpid:
+ .quad 0
diff --git a/plat/nvidia/tegra/common/tegra_bl31_setup.c b/plat/nvidia/tegra/common/tegra_bl31_setup.c
new file mode 100644
index 0000000..e3068b6
--- /dev/null
+++ b/plat/nvidia/tegra/common/tegra_bl31_setup.c
@@ -0,0 +1,361 @@
+/*
+ * Copyright (c) 2015-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020-2023, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <stddef.h>
+#include <string.h>
+
+#include <platform_def.h>
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <bl31/bl31.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <cortex_a57.h>
+#include <denver.h>
+#include <drivers/console.h>
+#include <lib/mmio.h>
+#include <lib/utils.h>
+#include <lib/utils_def.h>
+#include <plat/common/platform.h>
+
+#include <memctrl.h>
+#include <profiler.h>
+#include <smmu.h>
+#include <tegra_def.h>
+#include <tegra_platform.h>
+#include <tegra_private.h>
+
+/* length of Trusty's input parameters (in bytes) */
+#define TRUSTY_PARAMS_LEN_BYTES (4096*2)
+
+/*******************************************************************************
+ * Declarations of linker defined symbols which will help us find the layout
+ * of trusted SRAM
+ ******************************************************************************/
+IMPORT_SYM(uint64_t, __RW_START__, BL31_RW_START);
+
+extern uint64_t tegra_bl31_phys_base;
+
+static entry_point_info_t bl33_image_ep_info, bl32_image_ep_info;
+static plat_params_from_bl2_t plat_bl31_params_from_bl2 = {
+ .tzdram_size = TZDRAM_SIZE
+};
+#ifdef SPD_trusty
+static aapcs64_params_t bl32_args;
+#endif
+
+/*******************************************************************************
+ * This variable holds the non-secure image entry address
+ ******************************************************************************/
+extern uint64_t ns_image_entrypoint;
+
+/*******************************************************************************
+ * Return a pointer to the 'entry_point_info' structure of the next image for
+ * security state specified. BL33 corresponds to the non-secure image type
+ * while BL32 corresponds to the secure image type.
+ ******************************************************************************/
+entry_point_info_t *bl31_plat_get_next_image_ep_info(uint32_t type)
+{
+ entry_point_info_t *ep = NULL;
+
+ /* return BL32 entry point info if it is valid */
+ if (type == NON_SECURE) {
+ ep = &bl33_image_ep_info;
+ } else if ((type == SECURE) && (bl32_image_ep_info.pc != 0U)) {
+ ep = &bl32_image_ep_info;
+ }
+
+ return ep;
+}
+
+/*******************************************************************************
+ * Return a pointer to the 'plat_params_from_bl2_t' structure. The BL2 image
+ * passes this platform specific information.
+ ******************************************************************************/
+plat_params_from_bl2_t *bl31_get_plat_params(void)
+{
+ return &plat_bl31_params_from_bl2;
+}
+
+/*******************************************************************************
+ * Perform any BL31 specific platform actions. Populate the BL33 and BL32 image
+ * info.
+ ******************************************************************************/
+void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
+ u_register_t arg2, u_register_t arg3)
+{
+ struct tegra_bl31_params *arg_from_bl2 = plat_get_bl31_params();
+ plat_params_from_bl2_t *plat_params = plat_get_bl31_plat_params();
+ int32_t ret;
+
+ /*
+ * Tegra platforms will receive boot parameters through custom
+ * mechanisms. So, we ignore the input parameters.
+ */
+ (void)arg0;
+ (void)arg1;
+
+ /*
+ * Copy BL3-3, BL3-2 entry point information.
+ * They are stored in Secure RAM, in BL2's address space.
+ */
+ assert(arg_from_bl2 != NULL);
+ assert(arg_from_bl2->bl33_ep_info != NULL);
+ bl33_image_ep_info = *arg_from_bl2->bl33_ep_info;
+
+ if (arg_from_bl2->bl32_ep_info != NULL) {
+ bl32_image_ep_info = *arg_from_bl2->bl32_ep_info;
+#ifdef SPD_trusty
+ /* save BL32 boot parameters */
+ memcpy(&bl32_args, &arg_from_bl2->bl32_ep_info->args, sizeof(bl32_args));
+#endif
+ }
+
+ /*
+ * Parse platform specific parameters
+ */
+ assert(plat_params != NULL);
+ plat_bl31_params_from_bl2.tzdram_base = plat_params->tzdram_base;
+ plat_bl31_params_from_bl2.tzdram_size = plat_params->tzdram_size;
+ plat_bl31_params_from_bl2.uart_id = plat_params->uart_id;
+ plat_bl31_params_from_bl2.l2_ecc_parity_prot_dis = plat_params->l2_ecc_parity_prot_dis;
+ plat_bl31_params_from_bl2.sc7entry_fw_size = plat_params->sc7entry_fw_size;
+ plat_bl31_params_from_bl2.sc7entry_fw_base = plat_params->sc7entry_fw_base;
+
+ /*
+ * It is very important that we run either from TZDRAM or TZSRAM base.
+ * Add an explicit check here.
+ */
+ if ((plat_bl31_params_from_bl2.tzdram_base != (uint64_t)BL31_BASE) &&
+ (TEGRA_TZRAM_BASE != BL31_BASE)) {
+ panic();
+ }
+
+ /*
+ * Enable console for the platform
+ */
+ plat_enable_console(plat_params->uart_id);
+
+ /*
+ * The previous bootloader passes the base address of the shared memory
+ * location to store the boot profiler logs. Sanity check the
+ * address and initialise the profiler library, if it looks ok.
+ */
+ ret = bl31_check_ns_address(plat_params->boot_profiler_shmem_base,
+ PROFILER_SIZE_BYTES);
+ if (ret == (int32_t)0) {
+
+ /* store the membase for the profiler lib */
+ plat_bl31_params_from_bl2.boot_profiler_shmem_base =
+ plat_params->boot_profiler_shmem_base;
+
+ /* initialise the profiler library */
+ boot_profiler_init(plat_params->boot_profiler_shmem_base,
+ TEGRA_TMRUS_BASE);
+ }
+
+ /*
+ * Add timestamp for platform early setup entry.
+ */
+ boot_profiler_add_record("[TF] early setup entry");
+
+ /*
+ * Initialize delay timer
+ */
+ tegra_delay_timer_init();
+
+ /* Early platform setup for Tegra SoCs */
+ plat_early_platform_setup();
+
+ /*
+ * Add timestamp for platform early setup exit.
+ */
+ boot_profiler_add_record("[TF] early setup exit");
+
+ INFO("BL3-1: Boot CPU: %s Processor [%lx]\n",
+ (((read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK)
+ == DENVER_IMPL) ? "Denver" : "ARM", read_mpidr());
+}
+
+#ifdef SPD_trusty
+void plat_trusty_set_boot_args(aapcs64_params_t *args)
+{
+ /*
+ * arg0 = TZDRAM aperture available for BL32
+ * arg1 = BL32 boot params
+ * arg2 = EKS Blob Length
+ * arg3 = Boot Profiler Carveout Base
+ */
+ args->arg0 = bl32_args.arg0;
+ args->arg1 = bl32_args.arg2;
+
+ /* update EKS size */
+ args->arg2 = bl32_args.arg4;
+
+ /* Profiler Carveout Base */
+ args->arg3 = bl32_args.arg5;
+}
+#endif
+
+/*******************************************************************************
+ * Initialize the gic, configure the SCR.
+ ******************************************************************************/
+void bl31_platform_setup(void)
+{
+ /*
+ * Add timestamp for platform setup entry.
+ */
+ boot_profiler_add_record("[TF] plat setup entry");
+
+ /* Initialize the gic cpu and distributor interfaces */
+ plat_gic_setup();
+
+ /*
+ * Setup secondary CPU POR infrastructure.
+ */
+ plat_secondary_setup();
+
+ /*
+ * Initial Memory Controller configuration.
+ */
+ tegra_memctrl_setup();
+
+ /*
+ * Late setup handler to allow platforms to performs additional
+ * functionality.
+ * This handler gets called with MMU enabled.
+ */
+ plat_late_platform_setup();
+
+ /*
+ * Add timestamp for platform setup exit.
+ */
+ boot_profiler_add_record("[TF] plat setup exit");
+
+ INFO("BL3-1: Tegra platform setup complete\n");
+}
+
+/*******************************************************************************
+ * Perform any BL3-1 platform runtime setup prior to BL3-1 cold boot exit
+ ******************************************************************************/
+void bl31_plat_runtime_setup(void)
+{
+ /*
+ * Platform specific runtime setup
+ */
+ plat_runtime_setup();
+
+ /*
+ * Add final timestamp before exiting BL31.
+ */
+ boot_profiler_add_record("[TF] bl31 exit");
+ boot_profiler_deinit();
+}
+
+/*******************************************************************************
+ * Perform the very early platform specific architectural setup here. At the
+ * moment this only initializes the mmu in a quick and dirty way.
+ ******************************************************************************/
+void bl31_plat_arch_setup(void)
+{
+ uint64_t rw_start = BL31_RW_START;
+ uint64_t rw_size = BL_END - BL31_RW_START;
+ uint64_t rodata_start = BL_RO_DATA_BASE;
+ uint64_t rodata_size = BL_RO_DATA_END - BL_RO_DATA_BASE;
+ uint64_t code_base = BL_CODE_BASE;
+ uint64_t code_size = BL_CODE_END - BL_CODE_BASE;
+ const mmap_region_t *plat_mmio_map = NULL;
+ const plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
+
+ /*
+ * Add timestamp for arch setup entry.
+ */
+ boot_profiler_add_record("[TF] arch setup entry");
+
+ /* add MMIO space */
+ plat_mmio_map = plat_get_mmio_map();
+ if (plat_mmio_map != NULL) {
+ mmap_add(plat_mmio_map);
+ } else {
+ WARN("MMIO map not available\n");
+ }
+
+ /* add memory regions */
+ mmap_add_region(rw_start, rw_start,
+ rw_size,
+ MT_MEMORY | MT_RW | MT_SECURE);
+ mmap_add_region(rodata_start, rodata_start,
+ rodata_size,
+ MT_RO_DATA | MT_SECURE);
+ mmap_add_region(code_base, code_base,
+ code_size,
+ MT_CODE | MT_SECURE);
+
+ /* map TZDRAM used by BL31 as coherent memory */
+ if (TEGRA_TZRAM_BASE == tegra_bl31_phys_base) {
+ mmap_add_region(params_from_bl2->tzdram_base,
+ params_from_bl2->tzdram_base,
+ BL31_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE);
+ }
+
+ /* set up translation tables */
+ init_xlat_tables();
+
+ /* enable the MMU */
+ enable_mmu_el3(0);
+
+ /*
+ * Add timestamp for arch setup exit.
+ */
+ boot_profiler_add_record("[TF] arch setup exit");
+
+ INFO("BL3-1: Tegra: MMU enabled\n");
+}
+
+/*******************************************************************************
+ * Check if the given NS DRAM range is valid
+ ******************************************************************************/
+int32_t bl31_check_ns_address(uint64_t base, uint64_t size_in_bytes)
+{
+ uint64_t end = base + size_in_bytes - U(1);
+
+ /*
+ * Sanity check the input values
+ */
+ if ((base == 0U) || (size_in_bytes == 0U)) {
+ ERROR("NS address 0x%" PRIx64 " (%" PRId64 " bytes) is invalid\n",
+ base, size_in_bytes);
+ return -EINVAL;
+ }
+
+ /*
+ * Check if the NS DRAM address is valid
+ */
+ if ((base < TEGRA_DRAM_BASE) || (base >= TEGRA_DRAM_END) ||
+ (end > TEGRA_DRAM_END)) {
+
+ ERROR("NS address 0x%" PRIx64 " is out-of-bounds!\n", base);
+ return -EFAULT;
+ }
+
+ /*
+ * TZDRAM aperture contains the BL31 and BL32 images, so we need
+ * to check if the NS DRAM range overlaps the TZDRAM aperture.
+ */
+ if ((base < (uint64_t)TZDRAM_END) && (end > tegra_bl31_phys_base)) {
+ ERROR("NS address 0x%" PRIx64 " overlaps TZDRAM!\n", base);
+ return -ENOTSUP;
+ }
+
+ /* valid NS address */
+ return 0;
+}
diff --git a/plat/nvidia/tegra/common/tegra_common.mk b/plat/nvidia/tegra/common/tegra_common.mk
new file mode 100644
index 0000000..3791018
--- /dev/null
+++ b/plat/nvidia/tegra/common/tegra_common.mk
@@ -0,0 +1,59 @@
+#
+# Copyright (c) 2015-2020, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+PLAT_INCLUDES := -Iplat/nvidia/tegra/include/drivers \
+ -Iplat/nvidia/tegra/include/lib \
+ -Iplat/nvidia/tegra/include
+
+include lib/xlat_tables_v2/xlat_tables.mk
+PLAT_BL_COMMON_SOURCES += ${XLAT_TABLES_LIB_SRCS}
+
+TEGRA_COMMON := plat/nvidia/tegra/common
+TEGRA_DRIVERS := plat/nvidia/tegra/drivers
+TEGRA_LIBS := plat/nvidia/tegra/lib
+
+# Include GICv3 driver files
+include drivers/arm/gic/v3/gicv3.mk
+TEGRA_GICv3_SOURCES := $(GICV3_SOURCES) \
+ plat/common/plat_gicv3.c \
+ ${TEGRA_COMMON}/tegra_gicv3.c
+
+# Include GICv2 driver files
+include drivers/arm/gic/v2/gicv2.mk
+
+TEGRA_GICv2_SOURCES := ${GICV2_SOURCES} \
+ plat/common/plat_gicv2.c \
+ ${TEGRA_COMMON}/tegra_gicv2.c
+
+TEGRA_GICv3_SOURCES := drivers/arm/gic/common/gic_common.c \
+ drivers/arm/gic/v3/arm_gicv3_common.c \
+ drivers/arm/gic/v3/gicv3_main.c \
+ drivers/arm/gic/v3/gicv3_helpers.c \
+ plat/common/plat_gicv3.c \
+ ${TEGRA_COMMON}/tegra_gicv3.c
+
+BL31_SOURCES += drivers/delay_timer/delay_timer.c \
+ drivers/io/io_storage.c \
+ plat/common/aarch64/crash_console_helpers.S \
+ ${TEGRA_LIBS}/debug/profiler.c \
+ ${TEGRA_COMMON}/aarch64/tegra_helpers.S \
+ ${TEGRA_LIBS}/debug/profiler.c \
+ ${TEGRA_COMMON}/tegra_bl31_setup.c \
+ ${TEGRA_COMMON}/tegra_delay_timer.c \
+ ${TEGRA_COMMON}/tegra_fiq_glue.c \
+ ${TEGRA_COMMON}/tegra_io_storage.c \
+ ${TEGRA_COMMON}/tegra_platform.c \
+ ${TEGRA_COMMON}/tegra_pm.c \
+ ${TEGRA_COMMON}/tegra_sip_calls.c \
+ ${TEGRA_COMMON}/tegra_sdei.c
+
+ifneq ($(ENABLE_STACK_PROTECTOR), 0)
+BL31_SOURCES += ${TEGRA_COMMON}/tegra_stack_protector.c
+endif
+ifeq (${EL3_EXCEPTION_HANDLING},1)
+BL31_SOURCES += plat/common/aarch64/plat_ehf.c
+endif
diff --git a/plat/nvidia/tegra/common/tegra_delay_timer.c b/plat/nvidia/tegra/common/tegra_delay_timer.c
new file mode 100644
index 0000000..d9547c4
--- /dev/null
+++ b/plat/nvidia/tegra/common/tegra_delay_timer.c
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+
+#include <drivers/delay_timer.h>
+#include <lib/mmio.h>
+#include <lib/utils_def.h>
+#include <plat/common/platform.h>
+
+#include <tegra_def.h>
+#include <tegra_private.h>
+
+static uint32_t tegra_timer_get_value(void)
+{
+ /* enable cntps_tval_el1 timer, mask interrupt */
+ write_cntps_ctl_el1(CNTP_CTL_IMASK_BIT | CNTP_CTL_ENABLE_BIT);
+
+ /*
+ * Generic delay timer implementation expects the timer to be a down
+ * counter. The value is clipped from 64 to 32 bits.
+ */
+ return (uint32_t)(read_cntps_tval_el1());
+}
+
+/*
+ * Initialise the architecture provided counter as the delay timer.
+ */
+void tegra_delay_timer_init(void)
+{
+ static timer_ops_t tegra_timer_ops;
+
+ /* Value in ticks */
+ uint32_t multiplier = MHZ_TICKS_PER_SEC;
+
+ /* Value in ticks per second (Hz) */
+ uint32_t divider = plat_get_syscnt_freq2();
+
+ /* Reduce multiplier and divider by dividing them repeatedly by 10 */
+ while (((multiplier % 10U) == 0U) && ((divider % 10U) == 0U)) {
+ multiplier /= 10U;
+ divider /= 10U;
+ }
+
+ /* enable cntps_tval_el1 timer, mask interrupt */
+ write_cntps_ctl_el1(CNTP_CTL_IMASK_BIT | CNTP_CTL_ENABLE_BIT);
+
+ /* register the timer */
+ tegra_timer_ops.get_timer_value = tegra_timer_get_value;
+ tegra_timer_ops.clk_mult = multiplier;
+ tegra_timer_ops.clk_div = divider;
+ timer_init(&tegra_timer_ops);
+}
diff --git a/plat/nvidia/tegra/common/tegra_fiq_glue.c b/plat/nvidia/tegra/common/tegra_fiq_glue.c
new file mode 100644
index 0000000..5309d98
--- /dev/null
+++ b/plat/nvidia/tegra/common/tegra_fiq_glue.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2016-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include <arch_helpers.h>
+#include <bl31/interrupt_mgmt.h>
+#include <bl31/ehf.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <context.h>
+#include <denver.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <plat/common/platform.h>
+
+#if ENABLE_WDT_LEGACY_FIQ_HANDLING
+#include <flowctrl.h>
+#endif
+#include <tegra_def.h>
+#include <tegra_private.h>
+
+/* Legacy FIQ used by earlier Tegra platforms */
+#define LEGACY_FIQ_PPI_WDT 28U
+
+/*******************************************************************************
+ * Static variables
+ ******************************************************************************/
+static uint64_t ns_fiq_handler_addr;
+static uint32_t fiq_handler_active;
+static pcpu_fiq_state_t fiq_state[PLATFORM_CORE_COUNT];
+
+/*******************************************************************************
+ * Handler for FIQ interrupts
+ ******************************************************************************/
+static int tegra_fiq_interrupt_handler(unsigned int id, unsigned int flags,
+ void *handle, void *cookie)
+{
+ cpu_context_t *ctx = cm_get_context(NON_SECURE);
+ el3_state_t *el3state_ctx = get_el3state_ctx(ctx);
+ uint32_t cpu = plat_my_core_pos();
+
+ (void)flags;
+ (void)handle;
+ (void)cookie;
+
+ /*
+ * Jump to NS world only if the NS world's FIQ handler has
+ * been registered
+ */
+ if (ns_fiq_handler_addr != 0U) {
+
+ /*
+ * The FIQ was generated when the execution was in the non-secure
+ * world. Save the context registers to start with.
+ */
+ cm_el1_sysregs_context_save(NON_SECURE);
+
+ /*
+ * Save elr_el3 and spsr_el3 from the saved context, and overwrite
+ * the context with the NS fiq_handler_addr and SPSR value.
+ */
+ fiq_state[cpu].elr_el3 = read_ctx_reg((el3state_ctx), (uint32_t)(CTX_ELR_EL3));
+ fiq_state[cpu].spsr_el3 = read_ctx_reg((el3state_ctx), (uint32_t)(CTX_SPSR_EL3));
+
+ /*
+ * Set the new ELR to continue execution in the NS world using the
+ * FIQ handler registered earlier.
+ */
+ cm_set_elr_el3(NON_SECURE, ns_fiq_handler_addr);
+ }
+
+#if ENABLE_WDT_LEGACY_FIQ_HANDLING
+ /*
+ * Tegra platforms that use LEGACY_FIQ as the watchdog timer FIQ
+ * need to issue an IPI to other CPUs, to allow them to handle
+ * the "system hung" scenario. This interrupt is passed to the GICD
+ * via the Flow Controller. So, once we receive this interrupt,
+ * disable the routing so that we can mark it as "complete" in the
+ * GIC later.
+ */
+ if (id == LEGACY_FIQ_PPI_WDT) {
+ tegra_fc_disable_fiq_to_ccplex_routing();
+ }
+#endif
+
+ /*
+ * Mark this interrupt as complete to avoid a FIQ storm.
+ */
+ plat_ic_end_of_interrupt(id);
+
+ return 0;
+}
+
+/*******************************************************************************
+ * Setup handler for FIQ interrupts
+ ******************************************************************************/
+void tegra_fiq_handler_setup(void)
+{
+ /* return if already registered */
+ if (fiq_handler_active == 0U) {
+ /*
+ * Register an interrupt handler for FIQ interrupts generated for
+ * NS interrupt sources
+ */
+ ehf_register_priority_handler(PLAT_TEGRA_WDT_PRIO, tegra_fiq_interrupt_handler);
+
+ /* handler is now active */
+ fiq_handler_active = 1;
+ }
+}
+
+/*******************************************************************************
+ * Validate and store NS world's entrypoint for FIQ interrupts
+ ******************************************************************************/
+void tegra_fiq_set_ns_entrypoint(uint64_t entrypoint)
+{
+ ns_fiq_handler_addr = entrypoint;
+}
+
+/*******************************************************************************
+ * Handler to return the NS EL1/EL0 CPU context
+ ******************************************************************************/
+int32_t tegra_fiq_get_intr_context(void)
+{
+ cpu_context_t *ctx = cm_get_context(NON_SECURE);
+ gp_regs_t *gpregs_ctx = get_gpregs_ctx(ctx);
+ const el1_sysregs_t *el1state_ctx = get_el1_sysregs_ctx(ctx);
+ uint32_t cpu = plat_my_core_pos();
+ uint64_t val;
+
+ /*
+ * We store the ELR_EL3, SPSR_EL3, SP_EL0 and SP_EL1 registers so
+ * that el3_exit() sends these values back to the NS world.
+ */
+ write_ctx_reg((gpregs_ctx), (uint32_t)(CTX_GPREG_X0), (fiq_state[cpu].elr_el3));
+ write_ctx_reg((gpregs_ctx), (uint32_t)(CTX_GPREG_X1), (fiq_state[cpu].spsr_el3));
+
+ val = read_ctx_reg((gpregs_ctx), (uint32_t)(CTX_GPREG_SP_EL0));
+ write_ctx_reg((gpregs_ctx), (uint32_t)(CTX_GPREG_X2), (val));
+
+ val = read_ctx_reg((el1state_ctx), (uint32_t)(CTX_SP_EL1));
+ write_ctx_reg((gpregs_ctx), (uint32_t)(CTX_GPREG_X3), (val));
+
+ return 0;
+}
diff --git a/plat/nvidia/tegra/common/tegra_gicv2.c b/plat/nvidia/tegra/common/tegra_gicv2.c
new file mode 100644
index 0000000..012107e
--- /dev/null
+++ b/plat/nvidia/tegra/common/tegra_gicv2.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <platform_def.h>
+
+#include <common/bl_common.h>
+#include <drivers/arm/gicv2.h>
+#include <lib/utils.h>
+#include <plat/common/platform.h>
+
+#include <tegra_private.h>
+#include <tegra_def.h>
+
+static unsigned int tegra_target_masks[PLATFORM_CORE_COUNT];
+
+/******************************************************************************
+ * Tegra common helper to setup the GICv2 driver data.
+ *****************************************************************************/
+void tegra_gic_setup(const interrupt_prop_t *interrupt_props,
+ unsigned int interrupt_props_num)
+{
+ /*
+ * Tegra GIC configuration settings
+ */
+ static gicv2_driver_data_t tegra_gic_data;
+
+ /*
+ * Register Tegra GICv2 driver
+ */
+ tegra_gic_data.gicd_base = TEGRA_GICD_BASE;
+ tegra_gic_data.gicc_base = TEGRA_GICC_BASE;
+ tegra_gic_data.interrupt_props = interrupt_props;
+ tegra_gic_data.interrupt_props_num = interrupt_props_num;
+ tegra_gic_data.target_masks = tegra_target_masks;
+ tegra_gic_data.target_masks_num = ARRAY_SIZE(tegra_target_masks);
+ gicv2_driver_init(&tegra_gic_data);
+}
+
+/******************************************************************************
+ * Tegra common helper to initialize the GICv2 only driver.
+ *****************************************************************************/
+void tegra_gic_init(void)
+{
+ gicv2_distif_init();
+ gicv2_pcpu_distif_init();
+ gicv2_set_pe_target_mask(plat_my_core_pos());
+ gicv2_cpuif_enable();
+}
+
+/******************************************************************************
+ * Tegra common helper to disable the GICv2 CPU interface
+ *****************************************************************************/
+void tegra_gic_cpuif_deactivate(void)
+{
+ gicv2_cpuif_disable();
+}
+
+/******************************************************************************
+ * Tegra common helper to initialize the per cpu distributor interface
+ * in GICv2
+ *****************************************************************************/
+void tegra_gic_pcpu_init(void)
+{
+ gicv2_pcpu_distif_init();
+ gicv2_set_pe_target_mask(plat_my_core_pos());
+ gicv2_cpuif_enable();
+}
diff --git a/plat/nvidia/tegra/common/tegra_gicv3.c b/plat/nvidia/tegra/common/tegra_gicv3.c
new file mode 100644
index 0000000..cba2f9b
--- /dev/null
+++ b/plat/nvidia/tegra/common/tegra_gicv3.c
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include <common/bl_common.h>
+#include <drivers/arm/gicv3.h>
+#include <lib/utils.h>
+
+#include <plat/common/platform.h>
+#include <platform_def.h>
+#include <tegra_private.h>
+#include <tegra_def.h>
+
+/* The GICv3 driver only needs to be initialized in EL3 */
+static uintptr_t rdistif_base_addrs[PLATFORM_CORE_COUNT];
+
+static unsigned int plat_tegra_mpidr_to_core_pos(unsigned long mpidr)
+{
+ return (unsigned int)plat_core_pos_by_mpidr(mpidr);
+}
+
+/******************************************************************************
+ * Tegra common helper to setup the GICv3 driver data.
+ *****************************************************************************/
+void tegra_gic_setup(const interrupt_prop_t *interrupt_props,
+ unsigned int interrupt_props_num)
+{
+ /*
+ * Tegra GIC configuration settings
+ */
+ static gicv3_driver_data_t tegra_gic_data;
+
+ /*
+ * Register Tegra GICv3 driver
+ */
+ tegra_gic_data.gicd_base = TEGRA_GICD_BASE;
+ tegra_gic_data.gicr_base = TEGRA_GICR_BASE;
+ tegra_gic_data.rdistif_num = PLATFORM_CORE_COUNT;
+ tegra_gic_data.rdistif_base_addrs = rdistif_base_addrs;
+ tegra_gic_data.mpidr_to_core_pos = plat_tegra_mpidr_to_core_pos;
+ tegra_gic_data.interrupt_props = interrupt_props;
+ tegra_gic_data.interrupt_props_num = interrupt_props_num;
+ gicv3_driver_init(&tegra_gic_data);
+
+ /* initialize the GICD and GICR */
+ tegra_gic_init();
+}
+
+/******************************************************************************
+ * Tegra common helper to initialize the GICv3 only driver.
+ *****************************************************************************/
+void tegra_gic_init(void)
+{
+ gicv3_distif_init();
+ gicv3_rdistif_init(plat_my_core_pos());
+ gicv3_cpuif_enable(plat_my_core_pos());
+}
+
+/******************************************************************************
+ * Tegra common helper to disable the GICv3 CPU interface
+ *****************************************************************************/
+void tegra_gic_cpuif_deactivate(void)
+{
+ gicv3_cpuif_disable(plat_my_core_pos());
+}
+
+/******************************************************************************
+ * Tegra common helper to initialize the per cpu distributor interface
+ * in GICv3
+ *****************************************************************************/
+void tegra_gic_pcpu_init(void)
+{
+ gicv3_rdistif_init(plat_my_core_pos());
+ gicv3_cpuif_enable(plat_my_core_pos());
+}
diff --git a/plat/nvidia/tegra/common/tegra_io_storage.c b/plat/nvidia/tegra/common/tegra_io_storage.c
new file mode 100644
index 0000000..21641aa
--- /dev/null
+++ b/plat/nvidia/tegra/common/tegra_io_storage.c
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <errno.h>
+#include <plat/common/platform.h>
+
+/*
+ * Return an IO device handle and specification which can be used to access
+ * an image. Use this to enforce platform load policy.
+ *
+ * This function is not supported at this time
+ */
+int plat_get_image_source(unsigned int image_id, uintptr_t *dev_handle,
+ uintptr_t *image_spec)
+{
+ return -ENOTSUP;
+}
diff --git a/plat/nvidia/tegra/common/tegra_platform.c b/plat/nvidia/tegra/common/tegra_platform.c
new file mode 100644
index 0000000..6d736b5
--- /dev/null
+++ b/plat/nvidia/tegra/common/tegra_platform.c
@@ -0,0 +1,311 @@
+/*
+ * Copyright (c) 2016-2021, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020-2023, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <lib/mmio.h>
+#include <lib/smccc.h>
+#include <services/arm_arch_svc.h>
+#include <tegra_def.h>
+#include <tegra_platform.h>
+#include <tegra_private.h>
+
+/*******************************************************************************
+ * Tegra platforms
+ ******************************************************************************/
+typedef enum tegra_platform {
+ TEGRA_PLATFORM_SILICON = 0U,
+ TEGRA_PLATFORM_QT,
+ TEGRA_PLATFORM_FPGA,
+ TEGRA_PLATFORM_EMULATION,
+ TEGRA_PLATFORM_LINSIM,
+ TEGRA_PLATFORM_UNIT_FPGA,
+ TEGRA_PLATFORM_VIRT_DEV_KIT,
+ TEGRA_PLATFORM_MAX,
+} tegra_platform_t;
+
+/*******************************************************************************
+ * Tegra macros defining all the SoC minor versions
+ ******************************************************************************/
+#define TEGRA_MINOR_QT U(0)
+#define TEGRA_MINOR_FPGA U(1)
+#define TEGRA_MINOR_ASIM_QT U(2)
+#define TEGRA_MINOR_ASIM_LINSIM U(3)
+#define TEGRA_MINOR_DSIM_ASIM_LINSIM U(4)
+#define TEGRA_MINOR_UNIT_FPGA U(5)
+#define TEGRA_MINOR_VIRT_DEV_KIT U(6)
+
+/*******************************************************************************
+ * Tegra macros defining all the SoC pre_si_platform
+ ******************************************************************************/
+#define TEGRA_PRE_SI_QT U(1)
+#define TEGRA_PRE_SI_FPGA U(2)
+#define TEGRA_PRE_SI_UNIT_FPGA U(3)
+#define TEGRA_PRE_SI_ASIM_QT U(4)
+#define TEGRA_PRE_SI_ASIM_LINSIM U(5)
+#define TEGRA_PRE_SI_DSIM_ASIM_LINSIM U(6)
+#define TEGRA_PRE_SI_VDK U(8)
+
+/*
+ * Read the chip ID value
+ */
+static uint32_t tegra_get_chipid(void)
+{
+ return mmio_read_32(TEGRA_MISC_BASE + HARDWARE_REVISION_OFFSET);
+}
+
+/*
+ * Read the chip's major version from chip ID value
+ */
+uint32_t tegra_get_chipid_major(void)
+{
+ return (tegra_get_chipid() >> MAJOR_VERSION_SHIFT) & MAJOR_VERSION_MASK;
+}
+
+/*
+ * Read the chip's minor version from the chip ID value
+ */
+uint32_t tegra_get_chipid_minor(void)
+{
+ return (tegra_get_chipid() >> MINOR_VERSION_SHIFT) & MINOR_VERSION_MASK;
+}
+
+/*
+ * Read the chip's pre_si_platform valus from the chip ID value
+ */
+static uint32_t tegra_get_chipid_pre_si_platform(void)
+{
+ return (tegra_get_chipid() >> PRE_SI_PLATFORM_SHIFT) & PRE_SI_PLATFORM_MASK;
+}
+
+bool tegra_chipid_is_t186(void)
+{
+ uint32_t chip_id = (tegra_get_chipid() >> CHIP_ID_SHIFT) & CHIP_ID_MASK;
+
+ return (chip_id == TEGRA_CHIPID_TEGRA18);
+}
+
+bool tegra_chipid_is_t210(void)
+{
+ uint32_t chip_id = (tegra_get_chipid() >> CHIP_ID_SHIFT) & CHIP_ID_MASK;
+
+ return (chip_id == TEGRA_CHIPID_TEGRA21);
+}
+
+bool tegra_chipid_is_t210_b01(void)
+{
+ return (tegra_chipid_is_t210() && (tegra_get_chipid_major() == 0x2U));
+}
+
+bool tegra_chipid_is_t194(void)
+{
+ uint32_t chip_id = (tegra_get_chipid() >> CHIP_ID_SHIFT) & CHIP_ID_MASK;
+
+ return (chip_id == TEGRA_CHIPID_TEGRA19);
+}
+
+/*
+ * Read the chip ID value and derive the platform
+ */
+static tegra_platform_t tegra_get_platform(void)
+{
+ uint32_t major, minor, pre_si_platform;
+ tegra_platform_t ret;
+
+ /* get the major/minor chip ID values */
+ major = tegra_get_chipid_major();
+ minor = tegra_get_chipid_minor();
+ pre_si_platform = tegra_get_chipid_pre_si_platform();
+
+ if (major == 0U) {
+ /*
+ * The minor version number is used by simulation platforms
+ */
+ switch (minor) {
+ /*
+ * Cadence's QuickTurn emulation system is a Solaris-based
+ * chip emulation system
+ */
+ case TEGRA_MINOR_QT:
+ case TEGRA_MINOR_ASIM_QT:
+ ret = TEGRA_PLATFORM_QT;
+ break;
+
+ /*
+ * FPGAs are used during early software/hardware development
+ */
+ case TEGRA_MINOR_FPGA:
+ ret = TEGRA_PLATFORM_FPGA;
+ break;
+ /*
+ * Linsim is a reconfigurable, clock-driven, mixed RTL/cmodel
+ * simulation framework.
+ */
+ case TEGRA_MINOR_ASIM_LINSIM:
+ case TEGRA_MINOR_DSIM_ASIM_LINSIM:
+ ret = TEGRA_PLATFORM_LINSIM;
+ break;
+
+ /*
+ * Unit FPGAs run the actual hardware block IP on the FPGA with
+ * the other parts of the system using Linsim.
+ */
+ case TEGRA_MINOR_UNIT_FPGA:
+ ret = TEGRA_PLATFORM_UNIT_FPGA;
+ break;
+ /*
+ * The Virtualizer Development Kit (VDK) is the standard chip
+ * development from Synopsis.
+ */
+ case TEGRA_MINOR_VIRT_DEV_KIT:
+ ret = TEGRA_PLATFORM_VIRT_DEV_KIT;
+ break;
+
+ default:
+ ret = TEGRA_PLATFORM_MAX;
+ break;
+ }
+
+ } else if (pre_si_platform > 0U) {
+
+ switch (pre_si_platform) {
+ /*
+ * Cadence's QuickTurn emulation system is a Solaris-based
+ * chip emulation system
+ */
+ case TEGRA_PRE_SI_QT:
+ case TEGRA_PRE_SI_ASIM_QT:
+ ret = TEGRA_PLATFORM_QT;
+ break;
+
+ /*
+ * FPGAs are used during early software/hardware development
+ */
+ case TEGRA_PRE_SI_FPGA:
+ ret = TEGRA_PLATFORM_FPGA;
+ break;
+ /*
+ * Linsim is a reconfigurable, clock-driven, mixed RTL/cmodel
+ * simulation framework.
+ */
+ case TEGRA_PRE_SI_ASIM_LINSIM:
+ case TEGRA_PRE_SI_DSIM_ASIM_LINSIM:
+ ret = TEGRA_PLATFORM_LINSIM;
+ break;
+
+ /*
+ * Unit FPGAs run the actual hardware block IP on the FPGA with
+ * the other parts of the system using Linsim.
+ */
+ case TEGRA_PRE_SI_UNIT_FPGA:
+ ret = TEGRA_PLATFORM_UNIT_FPGA;
+ break;
+ /*
+ * The Virtualizer Development Kit (VDK) is the standard chip
+ * development from Synopsis.
+ */
+ case TEGRA_PRE_SI_VDK:
+ ret = TEGRA_PLATFORM_VIRT_DEV_KIT;
+ break;
+
+ default:
+ ret = TEGRA_PLATFORM_MAX;
+ break;
+ }
+
+ } else {
+ /* Actual silicon platforms have a non-zero major version */
+ ret = TEGRA_PLATFORM_SILICON;
+ }
+
+ return ret;
+}
+
+bool tegra_platform_is_silicon(void)
+{
+ return ((tegra_get_platform() == TEGRA_PLATFORM_SILICON) ? true : false);
+}
+
+bool tegra_platform_is_qt(void)
+{
+ return ((tegra_get_platform() == TEGRA_PLATFORM_QT) ? true : false);
+}
+
+bool tegra_platform_is_linsim(void)
+{
+ tegra_platform_t plat = tegra_get_platform();
+
+ return (((plat == TEGRA_PLATFORM_LINSIM) ||
+ (plat == TEGRA_PLATFORM_UNIT_FPGA)) ? true : false);
+}
+
+bool tegra_platform_is_fpga(void)
+{
+ return ((tegra_get_platform() == TEGRA_PLATFORM_FPGA) ? true : false);
+}
+
+bool tegra_platform_is_emulation(void)
+{
+ return (tegra_get_platform() == TEGRA_PLATFORM_EMULATION);
+}
+
+bool tegra_platform_is_unit_fpga(void)
+{
+ return ((tegra_get_platform() == TEGRA_PLATFORM_UNIT_FPGA) ? true : false);
+}
+
+bool tegra_platform_is_virt_dev_kit(void)
+{
+ return ((tegra_get_platform() == TEGRA_PLATFORM_VIRT_DEV_KIT) ? true : false);
+}
+
+/*
+ * This function returns soc version which mainly consist of below fields
+ *
+ * soc_version[30:24] = JEP-106 continuation code for the SiP
+ * soc_version[23:16] = JEP-106 identification code with parity bit for the SiP
+ * soc_version[0:15] = chip identification
+ */
+int32_t plat_get_soc_version(void)
+{
+ uint32_t chip_id = (tegra_get_chipid() >> CHIP_ID_SHIFT) & CHIP_ID_MASK;
+ uint32_t major_rev = tegra_get_chipid_major();
+ uint32_t manfid = SOC_ID_SET_JEP_106(JEDEC_NVIDIA_BKID, JEDEC_NVIDIA_MFID);
+
+ return (int32_t)(manfid | (((chip_id << MAJOR_VERSION_SHIFT) | major_rev) &
+ SOC_ID_IMPL_DEF_MASK));
+}
+
+/*
+ * This function returns soc revision in below format
+ *
+ * soc_revision[8:15] = major version number
+ * soc_revision[0:7] = minor version number
+ */
+int32_t plat_get_soc_revision(void)
+{
+ return (int32_t)(((tegra_get_chipid_major() << 8) | tegra_get_chipid_minor()) &
+ SOC_ID_REV_MASK);
+}
+
+/*****************************************************************************
+ * plat_is_smccc_feature_available() - This function checks whether SMCCC feature
+ * is availabile for the platform or not.
+ * @fid: SMCCC function id
+ *
+ * Return SMC_ARCH_CALL_SUCCESS if SMCCC feature is available and
+ * SMC_ARCH_CALL_NOT_SUPPORTED otherwise.
+ *****************************************************************************/
+int32_t plat_is_smccc_feature_available(u_register_t fid)
+{
+ switch (fid) {
+ case SMCCC_ARCH_SOC_ID:
+ return SMC_ARCH_CALL_SUCCESS;
+ default:
+ return SMC_ARCH_CALL_NOT_SUPPORTED;
+ }
+}
diff --git a/plat/nvidia/tegra/common/tegra_pm.c b/plat/nvidia/tegra/common/tegra_pm.c
new file mode 100644
index 0000000..8edb024
--- /dev/null
+++ b/plat/nvidia/tegra/common/tegra_pm.c
@@ -0,0 +1,340 @@
+/*
+ * Copyright (c) 2015-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020-2023, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include <platform_def.h>
+
+#include <arch_helpers.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <context.h>
+#include <drivers/console.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/mmio.h>
+#include <lib/psci/psci.h>
+#include <plat/common/platform.h>
+
+#include <memctrl.h>
+#include <pmc.h>
+#include <tegra_def.h>
+#include <tegra_platform.h>
+#include <tegra_private.h>
+
+extern uint64_t tegra_bl31_phys_base;
+extern uint64_t tegra_sec_entry_point;
+
+/*******************************************************************************
+ * This handler is called by the PSCI implementation during the `SYSTEM_SUSPEND`
+ * call to get the `power_state` parameter. This allows the platform to encode
+ * the appropriate State-ID field within the `power_state` parameter which can
+ * be utilized in `pwr_domain_suspend()` to suspend to system affinity level.
+******************************************************************************/
+static void tegra_get_sys_suspend_power_state(psci_power_state_t *req_state)
+{
+ /* all affinities use system suspend state id */
+ for (uint32_t i = MPIDR_AFFLVL0; i <= PLAT_MAX_PWR_LVL; i++) {
+ req_state->pwr_domain_state[i] = PSTATE_ID_SOC_POWERDN;
+ }
+}
+
+/*******************************************************************************
+ * Handler called when an affinity instance is about to enter standby.
+ ******************************************************************************/
+static void tegra_cpu_standby(plat_local_state_t cpu_state)
+{
+ u_register_t saved_scr_el3;
+
+ (void)cpu_state;
+
+ /* Tegra SoC specific handler */
+ if (tegra_soc_cpu_standby(cpu_state) != PSCI_E_SUCCESS)
+ ERROR("%s failed\n", __func__);
+
+ saved_scr_el3 = read_scr_el3();
+
+ /*
+ * As per ARM ARM D1.17.2, any physical IRQ interrupt received by the
+ * PE will be treated as a wake-up event, if SCR_EL3.IRQ is set to '1',
+ * irrespective of the value of the PSTATE.I bit value.
+ */
+ write_scr_el3(saved_scr_el3 | SCR_IRQ_BIT);
+
+ /*
+ * Enter standby state
+ *
+ * dsb & isb is good practice before using wfi to enter low power states
+ */
+ dsb();
+ isb();
+ wfi();
+
+ /*
+ * Restore saved scr_el3 that has IRQ bit cleared as we don't want EL3
+ * handling any further interrupts
+ */
+ write_scr_el3(saved_scr_el3);
+}
+
+/*******************************************************************************
+ * Handler called when an affinity instance is about to be turned on. The
+ * level and mpidr determine the affinity instance.
+ ******************************************************************************/
+static int32_t tegra_pwr_domain_on(u_register_t mpidr)
+{
+ return tegra_soc_pwr_domain_on(mpidr);
+}
+
+/*******************************************************************************
+ * Handler called when a power domain is about to be turned off. The
+ * target_state encodes the power state that each level should transition to.
+ * Return error if CPU off sequence is not allowed for the current core.
+ ******************************************************************************/
+static int tegra_pwr_domain_off_early(const psci_power_state_t *target_state)
+{
+ return tegra_soc_pwr_domain_off_early(target_state);
+}
+
+/*******************************************************************************
+ * Handler called when a power domain is about to be turned off. The
+ * target_state encodes the power state that each level should transition to.
+ ******************************************************************************/
+static void tegra_pwr_domain_off(const psci_power_state_t *target_state)
+{
+ (void)tegra_soc_pwr_domain_off(target_state);
+
+ /* disable GICC */
+ tegra_gic_cpuif_deactivate();
+}
+
+/*******************************************************************************
+ * Handler called when a power domain is about to be suspended. The
+ * target_state encodes the power state that each level should transition to.
+ * This handler is called with SMP and data cache enabled, when
+ * HW_ASSISTED_COHERENCY = 0
+ ******************************************************************************/
+void tegra_pwr_domain_suspend_pwrdown_early(const psci_power_state_t *target_state)
+{
+ tegra_soc_pwr_domain_suspend_pwrdown_early(target_state);
+}
+
+/*******************************************************************************
+ * Handler called when a power domain is about to be suspended. The
+ * target_state encodes the power state that each level should transition to.
+ ******************************************************************************/
+static void tegra_pwr_domain_suspend(const psci_power_state_t *target_state)
+{
+ (void)tegra_soc_pwr_domain_suspend(target_state);
+
+ /* disable GICC */
+ tegra_gic_cpuif_deactivate();
+}
+
+/*******************************************************************************
+ * Handler called at the end of the power domain suspend sequence. The
+ * target_state encodes the power state that each level should transition to.
+ ******************************************************************************/
+static __dead2 void tegra_pwr_domain_power_down_wfi(const psci_power_state_t
+ *target_state)
+{
+ /* call the chip's power down handler */
+ (void)tegra_soc_pwr_domain_power_down_wfi(target_state);
+
+ /* Disable console if we are entering deep sleep. */
+ if (target_state->pwr_domain_state[PLAT_MAX_PWR_LVL] ==
+ PSTATE_ID_SOC_POWERDN) {
+ INFO("%s: complete. Entering System Suspend...\n", __func__);
+ console_flush();
+ console_switch_state(0);
+ }
+
+ wfi();
+ panic();
+}
+
+/*******************************************************************************
+ * Handler called when a power domain has just been powered on after
+ * being turned off earlier. The target_state encodes the low power state that
+ * each level has woken up from.
+ ******************************************************************************/
+static void tegra_pwr_domain_on_finish(const psci_power_state_t *target_state)
+{
+ const plat_params_from_bl2_t *plat_params;
+
+ /*
+ * Check if we are exiting from deep sleep.
+ */
+ if (target_state->pwr_domain_state[PLAT_MAX_PWR_LVL] ==
+ PSTATE_ID_SOC_POWERDN) {
+
+ /*
+ * On entering System Suspend state, the GIC loses power
+ * completely. Initialize the GIC global distributor and
+ * GIC cpu interfaces.
+ */
+ tegra_gic_init();
+
+ /* Restart console output. */
+ console_switch_state(CONSOLE_FLAG_RUNTIME);
+
+ /*
+ * Restore Memory Controller settings as it loses state
+ * during system suspend.
+ */
+ tegra_memctrl_restore_settings();
+
+ /*
+ * Security configuration to allow DRAM/device access.
+ */
+ plat_params = bl31_get_plat_params();
+ tegra_memctrl_tzdram_setup(plat_params->tzdram_base,
+ (uint32_t)plat_params->tzdram_size);
+
+ } else {
+ /*
+ * Initialize the GIC cpu and distributor interfaces
+ */
+ tegra_gic_pcpu_init();
+ }
+
+ /*
+ * Reset hardware settings.
+ */
+ (void)tegra_soc_pwr_domain_on_finish(target_state);
+}
+
+/*******************************************************************************
+ * Handler called when a power domain has just been powered on after
+ * having been suspended earlier. The target_state encodes the low power state
+ * that each level has woken up from.
+ ******************************************************************************/
+static void tegra_pwr_domain_suspend_finish(const psci_power_state_t *target_state)
+{
+ tegra_pwr_domain_on_finish(target_state);
+}
+
+/*******************************************************************************
+ * Handler called when the system wants to be powered off
+ ******************************************************************************/
+static __dead2 void tegra_system_off(void)
+{
+ INFO("Powering down system...\n");
+
+ tegra_soc_prepare_system_off();
+}
+
+/*******************************************************************************
+ * Handler called when the system wants to be restarted.
+ ******************************************************************************/
+static __dead2 void tegra_system_reset(void)
+{
+ INFO("Restarting system...\n");
+
+ /* per-SoC system reset handler */
+ (void)tegra_soc_prepare_system_reset();
+
+ /* wait for the system to reset */
+ for (;;) {
+ ;
+ }
+}
+
+/*******************************************************************************
+ * Handler called to check the validity of the power state parameter.
+ ******************************************************************************/
+static int32_t tegra_validate_power_state(uint32_t power_state,
+ psci_power_state_t *req_state)
+{
+ assert(req_state != NULL);
+
+ return tegra_soc_validate_power_state(power_state, req_state);
+}
+
+/*******************************************************************************
+ * Platform handler called to check the validity of the non secure entrypoint.
+ ******************************************************************************/
+static int32_t tegra_validate_ns_entrypoint(uintptr_t entrypoint)
+{
+ int32_t ret = PSCI_E_INVALID_ADDRESS;
+
+ /*
+ * Check if the non secure entrypoint lies within the non
+ * secure DRAM.
+ */
+ if ((entrypoint >= TEGRA_DRAM_BASE) && (entrypoint <= TEGRA_DRAM_END)) {
+ ret = PSCI_E_SUCCESS;
+ }
+
+ return ret;
+}
+
+/*******************************************************************************
+ * Export the platform handlers to enable psci to invoke them
+ ******************************************************************************/
+static plat_psci_ops_t tegra_plat_psci_ops = {
+ .cpu_standby = tegra_cpu_standby,
+ .pwr_domain_on = tegra_pwr_domain_on,
+ .pwr_domain_off_early = tegra_pwr_domain_off_early,
+ .pwr_domain_off = tegra_pwr_domain_off,
+ .pwr_domain_suspend_pwrdown_early = tegra_pwr_domain_suspend_pwrdown_early,
+ .pwr_domain_suspend = tegra_pwr_domain_suspend,
+ .pwr_domain_on_finish = tegra_pwr_domain_on_finish,
+ .pwr_domain_suspend_finish = tegra_pwr_domain_suspend_finish,
+ .pwr_domain_pwr_down_wfi = tegra_pwr_domain_power_down_wfi,
+ .system_off = tegra_system_off,
+ .system_reset = tegra_system_reset,
+ .validate_power_state = tegra_validate_power_state,
+ .validate_ns_entrypoint = tegra_validate_ns_entrypoint,
+ .get_sys_suspend_power_state = tegra_get_sys_suspend_power_state,
+};
+
+/*******************************************************************************
+ * Export the platform specific power ops and initialize Power Controller
+ ******************************************************************************/
+int plat_setup_psci_ops(uintptr_t sec_entrypoint,
+ const plat_psci_ops_t **psci_ops)
+{
+ psci_power_state_t target_state = { { PSCI_LOCAL_STATE_RUN } };
+
+ /*
+ * Flush entrypoint variable to PoC since it will be
+ * accessed after a reset with the caches turned off.
+ */
+ tegra_sec_entry_point = sec_entrypoint;
+ flush_dcache_range((uint64_t)&tegra_sec_entry_point, sizeof(uint64_t));
+
+ /*
+ * Reset hardware settings.
+ */
+ (void)tegra_soc_pwr_domain_on_finish(&target_state);
+
+ /*
+ * Disable System Suspend if the platform does not
+ * support it
+ */
+ if (!plat_supports_system_suspend()) {
+ tegra_plat_psci_ops.get_sys_suspend_power_state = NULL;
+ }
+
+ /*
+ * Initialize PSCI ops struct
+ */
+ *psci_ops = &tegra_plat_psci_ops;
+
+ return 0;
+}
+
+/*******************************************************************************
+ * Platform handler to calculate the proper target power level at the
+ * specified affinity level
+ ******************************************************************************/
+plat_local_state_t plat_get_target_pwr_state(unsigned int lvl,
+ const plat_local_state_t *states,
+ unsigned int ncpu)
+{
+ return tegra_soc_get_target_pwr_state(lvl, states, ncpu);
+}
diff --git a/plat/nvidia/tegra/common/tegra_sdei.c b/plat/nvidia/tegra/common/tegra_sdei.c
new file mode 100644
index 0000000..9241b81
--- /dev/null
+++ b/plat/nvidia/tegra/common/tegra_sdei.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/* SDEI configuration for Tegra platforms */
+
+#include <platform_def.h>
+
+#include <bl31/ehf.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <lib/utils_def.h>
+#include <services/sdei.h>
+
+/* Private event mappings */
+static sdei_ev_map_t tegra_sdei_private[] = {
+ /* Event 0 definition */
+ SDEI_DEFINE_EVENT_0(TEGRA_SDEI_SGI_PRIVATE),
+
+ /* Dynamic private events */
+ SDEI_PRIVATE_EVENT(TEGRA_SDEI_DP_EVENT_0, SDEI_DYN_IRQ, SDEI_MAPF_DYNAMIC),
+ SDEI_PRIVATE_EVENT(TEGRA_SDEI_DP_EVENT_1, SDEI_DYN_IRQ, SDEI_MAPF_DYNAMIC),
+ SDEI_PRIVATE_EVENT(TEGRA_SDEI_DP_EVENT_2, SDEI_DYN_IRQ, SDEI_MAPF_DYNAMIC),
+
+ /* General purpose explicit events */
+ SDEI_EXPLICIT_EVENT(TEGRA_SDEI_EP_EVENT_0, SDEI_MAPF_CRITICAL),
+ SDEI_EXPLICIT_EVENT(TEGRA_SDEI_EP_EVENT_1, SDEI_MAPF_CRITICAL),
+ SDEI_EXPLICIT_EVENT(TEGRA_SDEI_EP_EVENT_2, SDEI_MAPF_CRITICAL),
+ SDEI_EXPLICIT_EVENT(TEGRA_SDEI_EP_EVENT_3, SDEI_MAPF_CRITICAL),
+ SDEI_EXPLICIT_EVENT(TEGRA_SDEI_EP_EVENT_4, SDEI_MAPF_CRITICAL),
+ SDEI_EXPLICIT_EVENT(TEGRA_SDEI_EP_EVENT_5, SDEI_MAPF_CRITICAL),
+ SDEI_EXPLICIT_EVENT(TEGRA_SDEI_EP_EVENT_6, SDEI_MAPF_CRITICAL),
+ SDEI_EXPLICIT_EVENT(TEGRA_SDEI_EP_EVENT_7, SDEI_MAPF_CRITICAL),
+ SDEI_EXPLICIT_EVENT(TEGRA_SDEI_EP_EVENT_8, SDEI_MAPF_CRITICAL),
+ SDEI_EXPLICIT_EVENT(TEGRA_SDEI_EP_EVENT_9, SDEI_MAPF_CRITICAL),
+ SDEI_EXPLICIT_EVENT(TEGRA_SDEI_EP_EVENT_10, SDEI_MAPF_CRITICAL),
+ SDEI_EXPLICIT_EVENT(TEGRA_SDEI_EP_EVENT_11, SDEI_MAPF_CRITICAL)
+};
+
+/* Shared event mappings */
+static sdei_ev_map_t tegra_sdei_shared[] = {
+ /* Dynamic shared events */
+ SDEI_SHARED_EVENT(TEGRA_SDEI_DS_EVENT_0, SDEI_DYN_IRQ, SDEI_MAPF_DYNAMIC),
+ SDEI_SHARED_EVENT(TEGRA_SDEI_DS_EVENT_1, SDEI_DYN_IRQ, SDEI_MAPF_DYNAMIC),
+ SDEI_SHARED_EVENT(TEGRA_SDEI_DS_EVENT_2, SDEI_DYN_IRQ, SDEI_MAPF_DYNAMIC)
+};
+
+void plat_sdei_setup(void)
+{
+ INFO("SDEI platform setup\n");
+}
+
+/* Export Tegra SDEI events */
+REGISTER_SDEI_MAP(tegra_sdei_private, tegra_sdei_shared);
diff --git a/plat/nvidia/tegra/common/tegra_sip_calls.c b/plat/nvidia/tegra/common/tegra_sip_calls.c
new file mode 100644
index 0000000..80a2c4d
--- /dev/null
+++ b/plat/nvidia/tegra/common/tegra_sip_calls.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <common/runtime_svc.h>
+#include <lib/mmio.h>
+
+#include <memctrl.h>
+#include <tegra_platform.h>
+#include <tegra_private.h>
+
+/*******************************************************************************
+ * Common Tegra SiP SMCs
+ ******************************************************************************/
+#define TEGRA_SIP_NEW_VIDEOMEM_REGION 0x82000003
+#define TEGRA_SIP_FIQ_NS_ENTRYPOINT 0x82000005
+#define TEGRA_SIP_FIQ_NS_GET_CONTEXT 0x82000006
+
+/*******************************************************************************
+ * This function is responsible for handling all SiP calls
+ ******************************************************************************/
+uintptr_t tegra_sip_handler(uint32_t smc_fid,
+ u_register_t x1,
+ u_register_t x2,
+ u_register_t x3,
+ u_register_t x4,
+ void *cookie,
+ void *handle,
+ u_register_t flags)
+{
+ uint32_t regval, local_x2_32 = (uint32_t)x2;
+ int32_t err;
+
+ /* Check if this is a SoC specific SiP */
+ err = plat_sip_handler(smc_fid, x1, x2, x3, x4, cookie, handle, flags);
+ if (err == 0) {
+
+ SMC_RET1(handle, (uint64_t)err);
+
+ } else {
+
+ switch (smc_fid) {
+
+ case TEGRA_SIP_NEW_VIDEOMEM_REGION:
+ /* Check whether Video memory resize is enabled */
+ if (mmio_read_32(TEGRA_MC_BASE + MC_VIDEO_PROTECT_REG_CTRL)
+ != MC_VIDEO_PROTECT_WRITE_ACCESS_ENABLED) {
+ ERROR("Video Memory Resize isn't enabled! \n");
+ SMC_RET1(handle, (uint64_t)-ENOTSUP);
+ }
+
+ /*
+ * Check if Video Memory overlaps TZDRAM (contains bl31/bl32)
+ * or falls outside of the valid DRAM range
+ */
+ err = bl31_check_ns_address(x1, local_x2_32);
+ if (err != 0) {
+ SMC_RET1(handle, (uint64_t)err);
+ }
+
+ /*
+ * Check if Video Memory is aligned to 1MB.
+ */
+ if (((x1 & 0xFFFFFU) != 0U) || ((local_x2_32 & 0xFFFFFU) != 0U)) {
+ ERROR("Unaligned Video Memory base address!\n");
+ SMC_RET1(handle, (uint64_t)-ENOTSUP);
+ }
+
+ /*
+ * The GPU is the user of the Video Memory region. In order to
+ * transition to the new memory region smoothly, we program the
+ * new base/size ONLY if the GPU is in reset mode.
+ */
+ regval = mmio_read_32(TEGRA_CAR_RESET_BASE +
+ TEGRA_GPU_RESET_REG_OFFSET);
+ if ((regval & GPU_RESET_BIT) == 0U) {
+ ERROR("GPU not in reset! Video Memory setup failed\n");
+ SMC_RET1(handle, (uint64_t)-ENOTSUP);
+ }
+
+ /* new video memory carveout settings */
+ tegra_memctrl_videomem_setup(x1, local_x2_32);
+
+ /*
+ * Ensure again that GPU is still in reset after VPR resize
+ */
+ regval = mmio_read_32(TEGRA_CAR_RESET_BASE +
+ TEGRA_GPU_RESET_REG_OFFSET);
+ if ((regval & GPU_RESET_BIT) == 0U) {
+ mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_GPU_RESET_GPU_SET_OFFSET,
+ GPU_SET_BIT);
+ }
+
+ SMC_RET1(handle, 0);
+
+ /*
+ * The NS world registers the address of its handler to be
+ * used for processing the FIQ. This is normally used by the
+ * NS FIQ debugger driver to detect system hangs by programming
+ * a watchdog timer to fire a FIQ interrupt.
+ */
+ case TEGRA_SIP_FIQ_NS_ENTRYPOINT:
+
+ if (x1 == 0U) {
+ SMC_RET1(handle, SMC_UNK);
+ }
+
+ /*
+ * TODO: Check if x1 contains a valid DRAM address
+ */
+
+ /* store the NS world's entrypoint */
+ tegra_fiq_set_ns_entrypoint(x1);
+
+ SMC_RET1(handle, 0);
+
+ /*
+ * The NS world's FIQ handler issues this SMC to get the NS EL1/EL0
+ * CPU context when the FIQ interrupt was triggered. This allows the
+ * NS world to understand the CPU state when the watchdog interrupt
+ * triggered.
+ */
+ case TEGRA_SIP_FIQ_NS_GET_CONTEXT:
+
+ /* retrieve context registers when FIQ triggered */
+ (void)tegra_fiq_get_intr_context();
+
+ SMC_RET0(handle);
+
+ default:
+ ERROR("%s: unhandled SMC (0x%x)\n", __func__, smc_fid);
+ break;
+ }
+ }
+
+ SMC_RET1(handle, SMC_UNK);
+}
+
+/* Define a runtime service descriptor for fast SMC calls */
+DECLARE_RT_SVC(
+ tegra_sip_fast,
+
+ (OEN_SIP_START),
+ (OEN_SIP_END),
+ (SMC_TYPE_FAST),
+ (NULL),
+ (tegra_sip_handler)
+);
diff --git a/plat/nvidia/tegra/common/tegra_stack_protector.c b/plat/nvidia/tegra/common/tegra_stack_protector.c
new file mode 100644
index 0000000..f6c459a
--- /dev/null
+++ b/plat/nvidia/tegra/common/tegra_stack_protector.c
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdint.h>
+
+#include <arch_helpers.h>
+#include <lib/mmio.h>
+#include <plat/common/platform.h>
+#include <platform_def.h>
+
+u_register_t plat_get_stack_protector_canary(void)
+{
+ u_register_t seed;
+
+ /*
+ * Ideally, a random number should be returned instead. As the
+ * platform does not have any random number generator, this is
+ * better than nothing, but not really secure.
+ */
+ seed = mmio_read_32(TEGRA_MISC_BASE + HARDWARE_REVISION_OFFSET);
+ seed <<= 32;
+ seed |= mmio_read_32(TEGRA_TMRUS_BASE);
+
+ return seed ^ read_cntpct_el0();
+}
diff --git a/plat/nvidia/tegra/drivers/bpmp/bpmp.c b/plat/nvidia/tegra/drivers/bpmp/bpmp.c
new file mode 100644
index 0000000..d7db604
--- /dev/null
+++ b/plat/nvidia/tegra/drivers/bpmp/bpmp.c
@@ -0,0 +1,231 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bpmp.h>
+#include <common/debug.h>
+#include <drivers/delay_timer.h>
+#include <errno.h>
+#include <lib/mmio.h>
+#include <plat/common/platform.h>
+#include <stdbool.h>
+#include <string.h>
+#include <tegra_def.h>
+
+#define BPMP_TIMEOUT 500 /* 500ms */
+
+static uint32_t channel_base[NR_CHANNELS];
+static uint32_t bpmp_init_state = BPMP_INIT_PENDING;
+
+static uint32_t channel_field(unsigned int ch)
+{
+ return mmio_read_32(TEGRA_RES_SEMA_BASE + STA_OFFSET) & CH_MASK(ch);
+}
+
+static bool master_free(unsigned int ch)
+{
+ return channel_field(ch) == MA_FREE(ch);
+}
+
+static bool master_acked(unsigned int ch)
+{
+ return channel_field(ch) == MA_ACKD(ch);
+}
+
+static void signal_slave(unsigned int ch)
+{
+ mmio_write_32(TEGRA_RES_SEMA_BASE + CLR_OFFSET, CH_MASK(ch));
+}
+
+static void free_master(unsigned int ch)
+{
+ mmio_write_32(TEGRA_RES_SEMA_BASE + CLR_OFFSET,
+ MA_ACKD(ch) ^ MA_FREE(ch));
+}
+
+/* should be called with local irqs disabled */
+int32_t tegra_bpmp_send_receive_atomic(int mrq, const void *ob_data, int ob_sz,
+ void *ib_data, int ib_sz)
+{
+ unsigned int ch = (unsigned int)plat_my_core_pos();
+ mb_data_t *p = (mb_data_t *)(uintptr_t)channel_base[ch];
+ int32_t ret = -ETIMEDOUT, timeout = 0;
+
+ if (bpmp_init_state == BPMP_INIT_COMPLETE) {
+
+ /* loop until BPMP is free */
+ for (timeout = 0; timeout < BPMP_TIMEOUT; timeout++) {
+ if (master_free(ch) == true) {
+ break;
+ }
+
+ mdelay(1);
+ }
+
+ if (timeout != BPMP_TIMEOUT) {
+
+ /* generate the command struct */
+ p->code = mrq;
+ p->flags = DO_ACK;
+ (void)memcpy((void *)p->data, ob_data, (size_t)ob_sz);
+
+ /* signal command ready to the BPMP */
+ signal_slave(ch);
+ mmio_write_32(TEGRA_PRI_ICTLR_BASE + CPU_IEP_FIR_SET,
+ (1U << INT_SHR_SEM_OUTBOX_FULL));
+
+ /* loop until the command is executed */
+ for (timeout = 0; timeout < BPMP_TIMEOUT; timeout++) {
+ if (master_acked(ch) == true) {
+ break;
+ }
+
+ mdelay(1);
+ }
+
+ if (timeout != BPMP_TIMEOUT) {
+
+ /* get the command response */
+ (void)memcpy(ib_data, (const void *)p->data,
+ (size_t)ib_sz);
+
+ /* return error code */
+ ret = p->code;
+
+ /* free this channel */
+ free_master(ch);
+ }
+ }
+
+ } else {
+ /* return error code */
+ ret = -EINVAL;
+ }
+
+ if (timeout == BPMP_TIMEOUT) {
+ ERROR("Timed out waiting for bpmp's response\n");
+ }
+
+ return ret;
+}
+
+int tegra_bpmp_init(void)
+{
+ uint32_t val, base, timeout = BPMP_TIMEOUT;
+ unsigned int ch;
+ int ret = 0;
+
+ if (bpmp_init_state == BPMP_INIT_PENDING) {
+
+ /* check if the bpmp processor is alive. */
+ do {
+ val = mmio_read_32(TEGRA_RES_SEMA_BASE + STA_OFFSET);
+ if (val != SIGN_OF_LIFE) {
+ mdelay(1);
+ timeout--;
+ }
+
+ } while ((val != SIGN_OF_LIFE) && (timeout > 0U));
+
+ if (val == SIGN_OF_LIFE) {
+
+ /* check if clock for the atomics block is enabled */
+ val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_ENB_V);
+ if ((val & CAR_ENABLE_ATOMICS) == 0) {
+ ERROR("Clock to the atomics block is disabled\n");
+ }
+
+ /* check if the atomics block is out of reset */
+ val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEV_CLR_V);
+ if ((val & CAR_ENABLE_ATOMICS) == CAR_ENABLE_ATOMICS) {
+ ERROR("Reset to the atomics block is asserted\n");
+ }
+
+ /* base address to get the result from Atomics */
+ base = TEGRA_ATOMICS_BASE + RESULT0_REG_OFFSET;
+
+ /* channel area is setup by BPMP before signaling handshake */
+ for (ch = 0; ch < NR_CHANNELS; ch++) {
+
+ /* issue command to get the channel base address */
+ mmio_write_32(base, (ch << TRIGGER_ID_SHIFT) |
+ ATOMIC_CMD_GET);
+
+ /* get the base address for the channel */
+ channel_base[ch] = mmio_read_32(base);
+
+ /* increment result register offset */
+ base += 4U;
+ }
+
+ /* mark state as "initialized" */
+ bpmp_init_state = BPMP_INIT_COMPLETE;
+
+ /* the channel values have to be visible across all cpus */
+ flush_dcache_range((uint64_t)channel_base,
+ sizeof(channel_base));
+ flush_dcache_range((uint64_t)&bpmp_init_state,
+ sizeof(bpmp_init_state));
+
+ INFO("%s: done\n", __func__);
+
+ } else {
+ ERROR("BPMP not powered on\n");
+
+ /* bpmp is not present in the system */
+ bpmp_init_state = BPMP_NOT_PRESENT;
+
+ /* communication timed out */
+ ret = -ETIMEDOUT;
+ }
+ }
+
+ return ret;
+}
+
+void tegra_bpmp_suspend(void)
+{
+ /* freeze the interface */
+ if (bpmp_init_state == BPMP_INIT_COMPLETE) {
+ bpmp_init_state = BPMP_SUSPEND_ENTRY;
+ flush_dcache_range((uint64_t)&bpmp_init_state,
+ sizeof(bpmp_init_state));
+ }
+}
+
+void tegra_bpmp_resume(void)
+{
+ uint32_t val, timeout = 0;
+
+ if (bpmp_init_state == BPMP_SUSPEND_ENTRY) {
+
+ /* check if the bpmp processor is alive. */
+ do {
+
+ val = mmio_read_32(TEGRA_RES_SEMA_BASE + STA_OFFSET);
+ if (val != SIGN_OF_LIFE) {
+ mdelay(1);
+ timeout++;
+ }
+
+ } while ((val != SIGN_OF_LIFE) && (timeout < BPMP_TIMEOUT));
+
+ if (val == SIGN_OF_LIFE) {
+
+ INFO("%s: BPMP took %d ms to resume\n", __func__, timeout);
+
+ /* mark state as "initialized" */
+ bpmp_init_state = BPMP_INIT_COMPLETE;
+
+ /* state has to be visible across all cpus */
+ flush_dcache_range((uint64_t)&bpmp_init_state,
+ sizeof(bpmp_init_state));
+ } else {
+ ERROR("BPMP not powered on\n");
+ }
+ }
+}
diff --git a/plat/nvidia/tegra/drivers/bpmp_ipc/intf.c b/plat/nvidia/tegra/drivers/bpmp_ipc/intf.c
new file mode 100644
index 0000000..2e90d25
--- /dev/null
+++ b/plat/nvidia/tegra/drivers/bpmp_ipc/intf.c
@@ -0,0 +1,345 @@
+/*
+ * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <bpmp_ipc.h>
+#include <common/debug.h>
+#include <drivers/delay_timer.h>
+#include <errno.h>
+#include <lib/mmio.h>
+#include <lib/utils_def.h>
+#include <stdbool.h>
+#include <string.h>
+#include <tegra_def.h>
+
+#include "intf.h"
+#include "ivc.h"
+
+/**
+ * Holds IVC channel data
+ */
+struct ccplex_bpmp_channel_data {
+ /* Buffer for incoming data */
+ struct frame_data *ib;
+
+ /* Buffer for outgoing data */
+ struct frame_data *ob;
+};
+
+static struct ccplex_bpmp_channel_data s_channel;
+static struct ivc ivc_ccplex_bpmp_channel;
+
+/*
+ * Helper functions to access the HSP doorbell registers
+ */
+static inline uint32_t hsp_db_read(uint32_t reg)
+{
+ return mmio_read_32((uint32_t)(TEGRA_HSP_DBELL_BASE + reg));
+}
+
+static inline void hsp_db_write(uint32_t reg, uint32_t val)
+{
+ mmio_write_32((uint32_t)(TEGRA_HSP_DBELL_BASE + reg), val);
+}
+
+/*******************************************************************************
+ * IVC wrappers for CCPLEX <-> BPMP communication.
+ ******************************************************************************/
+
+static void tegra_bpmp_ring_bpmp_doorbell(void);
+
+/*
+ * Get the next frame where data can be written.
+ */
+static struct frame_data *tegra_bpmp_get_next_out_frame(void)
+{
+ struct frame_data *frame;
+ const struct ivc *ch = &ivc_ccplex_bpmp_channel;
+
+ frame = (struct frame_data *)tegra_ivc_write_get_next_frame(ch);
+ if (frame == NULL) {
+ ERROR("%s: Error in getting next frame, exiting\n", __func__);
+ } else {
+ s_channel.ob = frame;
+ }
+
+ return frame;
+}
+
+static void tegra_bpmp_signal_slave(void)
+{
+ (void)tegra_ivc_write_advance(&ivc_ccplex_bpmp_channel);
+ tegra_bpmp_ring_bpmp_doorbell();
+}
+
+static int32_t tegra_bpmp_free_master(void)
+{
+ return tegra_ivc_read_advance(&ivc_ccplex_bpmp_channel);
+}
+
+static bool tegra_bpmp_slave_acked(void)
+{
+ struct frame_data *frame;
+ bool ret = true;
+
+ frame = (struct frame_data *)tegra_ivc_read_get_next_frame(&ivc_ccplex_bpmp_channel);
+ if (frame == NULL) {
+ ret = false;
+ } else {
+ s_channel.ib = frame;
+ }
+
+ return ret;
+}
+
+static struct frame_data *tegra_bpmp_get_cur_in_frame(void)
+{
+ return s_channel.ib;
+}
+
+/*
+ * Enables BPMP to ring CCPlex doorbell
+ */
+static void tegra_bpmp_enable_ccplex_doorbell(void)
+{
+ uint32_t reg;
+
+ reg = hsp_db_read(HSP_DBELL_1_ENABLE);
+ reg |= HSP_MASTER_BPMP_BIT;
+ hsp_db_write(HSP_DBELL_1_ENABLE, reg);
+}
+
+/*
+ * CCPlex rings the BPMP doorbell
+ */
+static void tegra_bpmp_ring_bpmp_doorbell(void)
+{
+ /*
+ * Any writes to this register has the same effect,
+ * uses master ID of the write transaction and set
+ * corresponding flag.
+ */
+ hsp_db_write(HSP_DBELL_3_TRIGGER, HSP_MASTER_CCPLEX_BIT);
+}
+
+/*
+ * Returns true if CCPLex can ring BPMP doorbell, otherwise false.
+ * This also signals that BPMP is up and ready.
+ */
+static bool tegra_bpmp_can_ccplex_ring_doorbell(void)
+{
+ uint32_t reg;
+
+ /* check if ccplex can communicate with bpmp */
+ reg = hsp_db_read(HSP_DBELL_3_ENABLE);
+
+ return ((reg & HSP_MASTER_CCPLEX_BIT) != 0U);
+}
+
+static int32_t tegra_bpmp_wait_for_slave_ack(void)
+{
+ uint32_t timeout = TIMEOUT_RESPONSE_FROM_BPMP_US;
+
+ while (!tegra_bpmp_slave_acked() && (timeout != 0U)) {
+ udelay(1);
+ timeout--;
+ };
+
+ return ((timeout == 0U) ? -ETIMEDOUT : 0);
+}
+
+/*
+ * Notification from the ivc layer
+ */
+static void tegra_bpmp_ivc_notify(const struct ivc *ivc)
+{
+ (void)(ivc);
+
+ tegra_bpmp_ring_bpmp_doorbell();
+}
+
+/*
+ * Atomic send/receive API, which means it waits until slave acks
+ */
+static int32_t tegra_bpmp_ipc_send_req_atomic(uint32_t mrq, void *p_out,
+ uint32_t size_out, void *p_in, uint32_t size_in)
+{
+ struct frame_data *frame = tegra_bpmp_get_next_out_frame();
+ const struct frame_data *f_in = NULL;
+ int32_t ret = 0;
+ void *p_fdata;
+
+ if ((p_out == NULL) || (size_out > IVC_DATA_SZ_BYTES) ||
+ (frame == NULL)) {
+ ERROR("%s: invalid parameters, exiting\n", __func__);
+ return -EINVAL;
+ }
+
+ /* prepare the command frame */
+ frame->mrq = mrq;
+ frame->flags = FLAG_DO_ACK;
+ p_fdata = frame->data;
+ (void)memcpy(p_fdata, p_out, (size_t)size_out);
+
+ /* signal the slave */
+ tegra_bpmp_signal_slave();
+
+ /* wait for slave to ack */
+ ret = tegra_bpmp_wait_for_slave_ack();
+ if (ret < 0) {
+ ERROR("%s: wait for slave failed (%d)\n", __func__, ret);
+ return ret;
+ }
+
+ /* retrieve the response frame */
+ if ((size_in <= IVC_DATA_SZ_BYTES) && (p_in != NULL)) {
+
+ f_in = tegra_bpmp_get_cur_in_frame();
+ if (f_in != NULL) {
+ ERROR("Failed to get next input frame!\n");
+ } else {
+ (void)memcpy(p_in, p_fdata, (size_t)size_in);
+ }
+ }
+
+ ret = tegra_bpmp_free_master();
+ if (ret < 0) {
+ ERROR("%s: free master failed (%d)\n", __func__, ret);
+ }
+
+ return ret;
+}
+
+/*
+ * Initializes the BPMP<--->CCPlex communication path.
+ */
+int32_t tegra_bpmp_ipc_init(void)
+{
+ size_t msg_size;
+ uint32_t frame_size, timeout;
+ int32_t error = 0;
+
+ /* allow bpmp to ring CCPLEX's doorbell */
+ tegra_bpmp_enable_ccplex_doorbell();
+
+ /* wait for BPMP to actually ring the doorbell */
+ timeout = TIMEOUT_RESPONSE_FROM_BPMP_US;
+ while ((timeout != 0U) && !tegra_bpmp_can_ccplex_ring_doorbell()) {
+ udelay(1); /* bpmp turn-around time */
+ timeout--;
+ }
+
+ if (timeout == 0U) {
+ ERROR("%s: BPMP firmware is not ready\n", __func__);
+ return -ENOTSUP;
+ }
+
+ INFO("%s: BPMP handshake completed\n", __func__);
+
+ msg_size = tegra_ivc_align(IVC_CMD_SZ_BYTES);
+ frame_size = (uint32_t)tegra_ivc_total_queue_size(msg_size);
+ if (frame_size > TEGRA_BPMP_IPC_CH_MAP_SIZE) {
+ ERROR("%s: carveout size is not sufficient\n", __func__);
+ return -EINVAL;
+ }
+
+ error = tegra_ivc_init(&ivc_ccplex_bpmp_channel,
+ (uint32_t)TEGRA_BPMP_IPC_RX_PHYS_BASE,
+ (uint32_t)TEGRA_BPMP_IPC_TX_PHYS_BASE,
+ 1U, frame_size, tegra_bpmp_ivc_notify);
+ if (error != 0) {
+
+ ERROR("%s: IVC init failed (%d)\n", __func__, error);
+
+ } else {
+
+ /* reset channel */
+ tegra_ivc_channel_reset(&ivc_ccplex_bpmp_channel);
+
+ /* wait for notification from BPMP */
+ while (tegra_ivc_channel_notified(&ivc_ccplex_bpmp_channel) != 0) {
+ /*
+ * Interrupt BPMP with doorbell each time after
+ * tegra_ivc_channel_notified() returns non zero
+ * value.
+ */
+ tegra_bpmp_ring_bpmp_doorbell();
+ }
+
+ INFO("%s: All communication channels initialized\n", __func__);
+ }
+
+ return error;
+}
+
+/* Handler to reset a hardware module */
+int32_t tegra_bpmp_ipc_reset_module(uint32_t rst_id)
+{
+ int32_t ret;
+ struct mrq_reset_request req = {
+ .cmd = (uint32_t)CMD_RESET_MODULE,
+ .reset_id = rst_id
+ };
+
+ /* only GPCDMA/XUSB_PADCTL resets are supported */
+ assert((rst_id == TEGRA_RESET_ID_XUSB_PADCTL) ||
+ (rst_id == TEGRA_RESET_ID_GPCDMA));
+
+ ret = tegra_bpmp_ipc_send_req_atomic(MRQ_RESET, &req,
+ (uint32_t)sizeof(req), NULL, 0);
+ if (ret != 0) {
+ ERROR("%s: failed for module %d with error %d\n", __func__,
+ rst_id, ret);
+ }
+
+ return ret;
+}
+
+int tegra_bpmp_ipc_enable_clock(uint32_t clk_id)
+{
+ int ret;
+ struct mrq_clk_request req;
+
+ /* only SE clocks are supported */
+ if (clk_id != TEGRA_CLK_SE) {
+ return -ENOTSUP;
+ }
+
+ /* prepare the MRQ_CLK command */
+ req.cmd_and_id = make_mrq_clk_cmd(CMD_CLK_ENABLE, clk_id);
+
+ ret = tegra_bpmp_ipc_send_req_atomic(MRQ_CLK, &req, (uint32_t)sizeof(req),
+ NULL, 0);
+ if (ret != 0) {
+ ERROR("%s: failed for module %d with error %d\n", __func__,
+ clk_id, ret);
+ }
+
+ return ret;
+}
+
+int tegra_bpmp_ipc_disable_clock(uint32_t clk_id)
+{
+ int ret;
+ struct mrq_clk_request req;
+
+ /* only SE clocks are supported */
+ if (clk_id != TEGRA_CLK_SE) {
+ return -ENOTSUP;
+ }
+
+ /* prepare the MRQ_CLK command */
+ req.cmd_and_id = make_mrq_clk_cmd(CMD_CLK_DISABLE, clk_id);
+
+ ret = tegra_bpmp_ipc_send_req_atomic(MRQ_CLK, &req, (uint32_t)sizeof(req),
+ NULL, 0);
+ if (ret != 0) {
+ ERROR("%s: failed for module %d with error %d\n", __func__,
+ clk_id, ret);
+ }
+
+ return ret;
+}
diff --git a/plat/nvidia/tegra/drivers/bpmp_ipc/intf.h b/plat/nvidia/tegra/drivers/bpmp_ipc/intf.h
new file mode 100644
index 0000000..d85b906
--- /dev/null
+++ b/plat/nvidia/tegra/drivers/bpmp_ipc/intf.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef BPMP_INTF_H
+#define BPMP_INTF_H
+
+/**
+ * Flags used in IPC req
+ */
+#define FLAG_DO_ACK (U(1) << 0)
+#define FLAG_RING_DOORBELL (U(1) << 1)
+
+/* Bit 1 is designated for CCPlex in secure world */
+#define HSP_MASTER_CCPLEX_BIT (U(1) << 1)
+/* Bit 19 is designated for BPMP in non-secure world */
+#define HSP_MASTER_BPMP_BIT (U(1) << 19)
+/* Timeout to receive response from BPMP is 1 sec */
+#define TIMEOUT_RESPONSE_FROM_BPMP_US U(1000000) /* in microseconds */
+
+/**
+ * IVC protocol defines and command/response frame
+ */
+
+/**
+ * IVC specific defines
+ */
+#define IVC_CMD_SZ_BYTES U(128)
+#define IVC_DATA_SZ_BYTES U(120)
+
+/**
+ * Holds frame data for an IPC request
+ */
+struct frame_data {
+ /* Identification as to what kind of data is being transmitted */
+ uint32_t mrq;
+
+ /* Flags for slave as to how to respond back */
+ uint32_t flags;
+
+ /* Actual data being sent */
+ uint8_t data[IVC_DATA_SZ_BYTES];
+};
+
+/**
+ * Commands send to the BPMP firmware
+ */
+
+/**
+ * MRQ command codes
+ */
+#define MRQ_RESET U(20)
+#define MRQ_CLK U(22)
+
+/**
+ * Reset sub-commands
+ */
+#define CMD_RESET_ASSERT U(1)
+#define CMD_RESET_DEASSERT U(2)
+#define CMD_RESET_MODULE U(3)
+
+/**
+ * Used by the sender of an #MRQ_RESET message to request BPMP to
+ * assert or deassert a given reset line.
+ */
+struct __attribute__((packed)) mrq_reset_request {
+ /* reset action to perform (mrq_reset_commands) */
+ uint32_t cmd;
+ /* id of the reset to affected */
+ uint32_t reset_id;
+};
+
+/**
+ * MRQ_CLK sub-commands
+ *
+ */
+enum {
+ CMD_CLK_GET_RATE = U(1),
+ CMD_CLK_SET_RATE = U(2),
+ CMD_CLK_ROUND_RATE = U(3),
+ CMD_CLK_GET_PARENT = U(4),
+ CMD_CLK_SET_PARENT = U(5),
+ CMD_CLK_IS_ENABLED = U(6),
+ CMD_CLK_ENABLE = U(7),
+ CMD_CLK_DISABLE = U(8),
+ CMD_CLK_GET_ALL_INFO = U(14),
+ CMD_CLK_GET_MAX_CLK_ID = U(15),
+ CMD_CLK_MAX,
+};
+
+/**
+ * Used by the sender of an #MRQ_CLK message to control clocks. The
+ * clk_request is split into several sub-commands. Some sub-commands
+ * require no additional data. Others have a sub-command specific
+ * payload
+ *
+ * |sub-command |payload |
+ * |----------------------------|-----------------------|
+ * |CMD_CLK_GET_RATE |- |
+ * |CMD_CLK_SET_RATE |clk_set_rate |
+ * |CMD_CLK_ROUND_RATE |clk_round_rate |
+ * |CMD_CLK_GET_PARENT |- |
+ * |CMD_CLK_SET_PARENT |clk_set_parent |
+ * |CMD_CLK_IS_ENABLED |- |
+ * |CMD_CLK_ENABLE |- |
+ * |CMD_CLK_DISABLE |- |
+ * |CMD_CLK_GET_ALL_INFO |- |
+ * |CMD_CLK_GET_MAX_CLK_ID |- |
+ *
+ */
+struct mrq_clk_request {
+ /**
+ * sub-command and clock id concatenated to 32-bit word.
+ * - bits[31..24] is the sub-cmd.
+ * - bits[23..0] is the clock id
+ */
+ uint32_t cmd_and_id;
+};
+
+/**
+ * Macro to prepare the MRQ_CLK sub-command
+ */
+#define make_mrq_clk_cmd(cmd, id) (((cmd) << 24) | (id & 0xFFFFFF))
+
+#endif /* BPMP_INTF_H */
diff --git a/plat/nvidia/tegra/drivers/bpmp_ipc/ivc.c b/plat/nvidia/tegra/drivers/bpmp_ipc/ivc.c
new file mode 100644
index 0000000..d964fc0
--- /dev/null
+++ b/plat/nvidia/tegra/drivers/bpmp_ipc/ivc.c
@@ -0,0 +1,654 @@
+/*
+ * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <common/debug.h>
+#include <errno.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <string.h>
+
+#include "ivc.h"
+
+/*
+ * IVC channel reset protocol.
+ *
+ * Each end uses its tx_channel.state to indicate its synchronization state.
+ */
+enum {
+ /*
+ * This value is zero for backwards compatibility with services that
+ * assume channels to be initially zeroed. Such channels are in an
+ * initially valid state, but cannot be asynchronously reset, and must
+ * maintain a valid state at all times.
+ *
+ * The transmitting end can enter the established state from the sync or
+ * ack state when it observes the receiving endpoint in the ack or
+ * established state, indicating that has cleared the counters in our
+ * rx_channel.
+ */
+ ivc_state_established = U(0),
+
+ /*
+ * If an endpoint is observed in the sync state, the remote endpoint is
+ * allowed to clear the counters it owns asynchronously with respect to
+ * the current endpoint. Therefore, the current endpoint is no longer
+ * allowed to communicate.
+ */
+ ivc_state_sync = U(1),
+
+ /*
+ * When the transmitting end observes the receiving end in the sync
+ * state, it can clear the w_count and r_count and transition to the ack
+ * state. If the remote endpoint observes us in the ack state, it can
+ * return to the established state once it has cleared its counters.
+ */
+ ivc_state_ack = U(2)
+};
+
+/*
+ * This structure is divided into two-cache aligned parts, the first is only
+ * written through the tx_channel pointer, while the second is only written
+ * through the rx_channel pointer. This delineates ownership of the cache lines,
+ * which is critical to performance and necessary in non-cache coherent
+ * implementations.
+ */
+struct ivc_channel_header {
+ struct {
+ /* fields owned by the transmitting end */
+ uint32_t w_count;
+ uint32_t state;
+ uint32_t w_rsvd[IVC_CHHDR_TX_FIELDS - 2];
+ };
+ struct {
+ /* fields owned by the receiving end */
+ uint32_t r_count;
+ uint32_t r_rsvd[IVC_CHHDR_RX_FIELDS - 1];
+ };
+};
+
+static inline bool ivc_channel_empty(const struct ivc *ivc,
+ volatile const struct ivc_channel_header *ch)
+{
+ /*
+ * This function performs multiple checks on the same values with
+ * security implications, so sample the counters' current values in
+ * shared memory to ensure that these checks use the same values.
+ */
+ uint32_t wr_count = ch->w_count;
+ uint32_t rd_count = ch->r_count;
+ bool ret = false;
+
+ (void)ivc;
+
+ /*
+ * Perform an over-full check to prevent denial of service attacks where
+ * a server could be easily fooled into believing that there's an
+ * extremely large number of frames ready, since receivers are not
+ * expected to check for full or over-full conditions.
+ *
+ * Although the channel isn't empty, this is an invalid case caused by
+ * a potentially malicious peer, so returning empty is safer, because it
+ * gives the impression that the channel has gone silent.
+ */
+ if (((wr_count - rd_count) > ivc->nframes) || (wr_count == rd_count)) {
+ ret = true;
+ }
+
+ return ret;
+}
+
+static inline bool ivc_channel_full(const struct ivc *ivc,
+ volatile const struct ivc_channel_header *ch)
+{
+ uint32_t wr_count = ch->w_count;
+ uint32_t rd_count = ch->r_count;
+
+ (void)ivc;
+
+ /*
+ * Invalid cases where the counters indicate that the queue is over
+ * capacity also appear full.
+ */
+ return ((wr_count - rd_count) >= ivc->nframes);
+}
+
+static inline uint32_t ivc_channel_avail_count(const struct ivc *ivc,
+ volatile const struct ivc_channel_header *ch)
+{
+ uint32_t wr_count = ch->w_count;
+ uint32_t rd_count = ch->r_count;
+
+ (void)ivc;
+
+ /*
+ * This function isn't expected to be used in scenarios where an
+ * over-full situation can lead to denial of service attacks. See the
+ * comment in ivc_channel_empty() for an explanation about special
+ * over-full considerations.
+ */
+ return (wr_count - rd_count);
+}
+
+static inline void ivc_advance_tx(struct ivc *ivc)
+{
+ ivc->tx_channel->w_count++;
+
+ if (ivc->w_pos == (ivc->nframes - (uint32_t)1U)) {
+ ivc->w_pos = 0U;
+ } else {
+ ivc->w_pos++;
+ }
+}
+
+static inline void ivc_advance_rx(struct ivc *ivc)
+{
+ ivc->rx_channel->r_count++;
+
+ if (ivc->r_pos == (ivc->nframes - (uint32_t)1U)) {
+ ivc->r_pos = 0U;
+ } else {
+ ivc->r_pos++;
+ }
+}
+
+static inline int32_t ivc_check_read(const struct ivc *ivc)
+{
+ /*
+ * tx_channel->state is set locally, so it is not synchronized with
+ * state from the remote peer. The remote peer cannot reset its
+ * transmit counters until we've acknowledged its synchronization
+ * request, so no additional synchronization is required because an
+ * asynchronous transition of rx_channel->state to ivc_state_ack is not
+ * allowed.
+ */
+ if (ivc->tx_channel->state != ivc_state_established) {
+ return -ECONNRESET;
+ }
+
+ /*
+ * Avoid unnecessary invalidations when performing repeated accesses to
+ * an IVC channel by checking the old queue pointers first.
+ * Synchronization is only necessary when these pointers indicate empty
+ * or full.
+ */
+ if (!ivc_channel_empty(ivc, ivc->rx_channel)) {
+ return 0;
+ }
+
+ return ivc_channel_empty(ivc, ivc->rx_channel) ? -ENOMEM : 0;
+}
+
+static inline int32_t ivc_check_write(const struct ivc *ivc)
+{
+ if (ivc->tx_channel->state != ivc_state_established) {
+ return -ECONNRESET;
+ }
+
+ if (!ivc_channel_full(ivc, ivc->tx_channel)) {
+ return 0;
+ }
+
+ return ivc_channel_full(ivc, ivc->tx_channel) ? -ENOMEM : 0;
+}
+
+bool tegra_ivc_can_read(const struct ivc *ivc)
+{
+ return ivc_check_read(ivc) == 0;
+}
+
+bool tegra_ivc_can_write(const struct ivc *ivc)
+{
+ return ivc_check_write(ivc) == 0;
+}
+
+bool tegra_ivc_tx_empty(const struct ivc *ivc)
+{
+ return ivc_channel_empty(ivc, ivc->tx_channel);
+}
+
+static inline uintptr_t calc_frame_offset(uint32_t frame_index,
+ uint32_t frame_size, uint32_t frame_offset)
+{
+ return ((uintptr_t)frame_index * (uintptr_t)frame_size) +
+ (uintptr_t)frame_offset;
+}
+
+static void *ivc_frame_pointer(const struct ivc *ivc,
+ volatile const struct ivc_channel_header *ch,
+ uint32_t frame)
+{
+ assert(frame < ivc->nframes);
+ return (void *)((uintptr_t)(&ch[1]) +
+ calc_frame_offset(frame, ivc->frame_size, 0));
+}
+
+int32_t tegra_ivc_read(struct ivc *ivc, void *buf, size_t max_read)
+{
+ const void *src;
+ int32_t result;
+
+ if (buf == NULL) {
+ return -EINVAL;
+ }
+
+ if (max_read > ivc->frame_size) {
+ return -E2BIG;
+ }
+
+ result = ivc_check_read(ivc);
+ if (result != 0) {
+ return result;
+ }
+
+ /*
+ * Order observation of w_pos potentially indicating new data before
+ * data read.
+ */
+ dmbish();
+
+ src = ivc_frame_pointer(ivc, ivc->rx_channel, ivc->r_pos);
+
+ (void)memcpy(buf, src, max_read);
+
+ ivc_advance_rx(ivc);
+
+ /*
+ * Ensure our write to r_pos occurs before our read from w_pos.
+ */
+ dmbish();
+
+ /*
+ * Notify only upon transition from full to non-full.
+ * The available count can only asynchronously increase, so the
+ * worst possible side-effect will be a spurious notification.
+ */
+ if (ivc_channel_avail_count(ivc, ivc->rx_channel) == (ivc->nframes - (uint32_t)1U)) {
+ ivc->notify(ivc);
+ }
+
+ return (int32_t)max_read;
+}
+
+/* directly peek at the next frame rx'ed */
+void *tegra_ivc_read_get_next_frame(const struct ivc *ivc)
+{
+ if (ivc_check_read(ivc) != 0) {
+ return NULL;
+ }
+
+ /*
+ * Order observation of w_pos potentially indicating new data before
+ * data read.
+ */
+ dmbld();
+
+ return ivc_frame_pointer(ivc, ivc->rx_channel, ivc->r_pos);
+}
+
+int32_t tegra_ivc_read_advance(struct ivc *ivc)
+{
+ /*
+ * No read barriers or synchronization here: the caller is expected to
+ * have already observed the channel non-empty. This check is just to
+ * catch programming errors.
+ */
+ int32_t result = ivc_check_read(ivc);
+ if (result != 0) {
+ return result;
+ }
+
+ ivc_advance_rx(ivc);
+
+ /*
+ * Ensure our write to r_pos occurs before our read from w_pos.
+ */
+ dmbish();
+
+ /*
+ * Notify only upon transition from full to non-full.
+ * The available count can only asynchronously increase, so the
+ * worst possible side-effect will be a spurious notification.
+ */
+ if (ivc_channel_avail_count(ivc, ivc->rx_channel) == (ivc->nframes - (uint32_t)1U)) {
+ ivc->notify(ivc);
+ }
+
+ return 0;
+}
+
+int32_t tegra_ivc_write(struct ivc *ivc, const void *buf, size_t size)
+{
+ void *p;
+ int32_t result;
+
+ if ((buf == NULL) || (ivc == NULL)) {
+ return -EINVAL;
+ }
+
+ if (size > ivc->frame_size) {
+ return -E2BIG;
+ }
+
+ result = ivc_check_write(ivc);
+ if (result != 0) {
+ return result;
+ }
+
+ p = ivc_frame_pointer(ivc, ivc->tx_channel, ivc->w_pos);
+
+ (void)memset(p, 0, ivc->frame_size);
+ (void)memcpy(p, buf, size);
+
+ /*
+ * Ensure that updated data is visible before the w_pos counter
+ * indicates that it is ready.
+ */
+ dmbst();
+
+ ivc_advance_tx(ivc);
+
+ /*
+ * Ensure our write to w_pos occurs before our read from r_pos.
+ */
+ dmbish();
+
+ /*
+ * Notify only upon transition from empty to non-empty.
+ * The available count can only asynchronously decrease, so the
+ * worst possible side-effect will be a spurious notification.
+ */
+ if (ivc_channel_avail_count(ivc, ivc->tx_channel) == 1U) {
+ ivc->notify(ivc);
+ }
+
+ return (int32_t)size;
+}
+
+/* directly poke at the next frame to be tx'ed */
+void *tegra_ivc_write_get_next_frame(const struct ivc *ivc)
+{
+ if (ivc_check_write(ivc) != 0) {
+ return NULL;
+ }
+
+ return ivc_frame_pointer(ivc, ivc->tx_channel, ivc->w_pos);
+}
+
+/* advance the tx buffer */
+int32_t tegra_ivc_write_advance(struct ivc *ivc)
+{
+ int32_t result = ivc_check_write(ivc);
+
+ if (result != 0) {
+ return result;
+ }
+
+ /*
+ * Order any possible stores to the frame before update of w_pos.
+ */
+ dmbst();
+
+ ivc_advance_tx(ivc);
+
+ /*
+ * Ensure our write to w_pos occurs before our read from r_pos.
+ */
+ dmbish();
+
+ /*
+ * Notify only upon transition from empty to non-empty.
+ * The available count can only asynchronously decrease, so the
+ * worst possible side-effect will be a spurious notification.
+ */
+ if (ivc_channel_avail_count(ivc, ivc->tx_channel) == (uint32_t)1U) {
+ ivc->notify(ivc);
+ }
+
+ return 0;
+}
+
+void tegra_ivc_channel_reset(const struct ivc *ivc)
+{
+ ivc->tx_channel->state = ivc_state_sync;
+ ivc->notify(ivc);
+}
+
+/*
+ * ===============================================================
+ * IVC State Transition Table - see tegra_ivc_channel_notified()
+ * ===============================================================
+ *
+ * local remote action
+ * ----- ------ -----------------------------------
+ * SYNC EST <none>
+ * SYNC ACK reset counters; move to EST; notify
+ * SYNC SYNC reset counters; move to ACK; notify
+ * ACK EST move to EST; notify
+ * ACK ACK move to EST; notify
+ * ACK SYNC reset counters; move to ACK; notify
+ * EST EST <none>
+ * EST ACK <none>
+ * EST SYNC reset counters; move to ACK; notify
+ *
+ * ===============================================================
+ */
+int32_t tegra_ivc_channel_notified(struct ivc *ivc)
+{
+ uint32_t peer_state;
+
+ /* Copy the receiver's state out of shared memory. */
+ peer_state = ivc->rx_channel->state;
+
+ if (peer_state == (uint32_t)ivc_state_sync) {
+ /*
+ * Order observation of ivc_state_sync before stores clearing
+ * tx_channel.
+ */
+ dmbld();
+
+ /*
+ * Reset tx_channel counters. The remote end is in the SYNC
+ * state and won't make progress until we change our state,
+ * so the counters are not in use at this time.
+ */
+ ivc->tx_channel->w_count = 0U;
+ ivc->rx_channel->r_count = 0U;
+
+ ivc->w_pos = 0U;
+ ivc->r_pos = 0U;
+
+ /*
+ * Ensure that counters appear cleared before new state can be
+ * observed.
+ */
+ dmbst();
+
+ /*
+ * Move to ACK state. We have just cleared our counters, so it
+ * is now safe for the remote end to start using these values.
+ */
+ ivc->tx_channel->state = ivc_state_ack;
+
+ /*
+ * Notify remote end to observe state transition.
+ */
+ ivc->notify(ivc);
+
+ } else if ((ivc->tx_channel->state == (uint32_t)ivc_state_sync) &&
+ (peer_state == (uint32_t)ivc_state_ack)) {
+ /*
+ * Order observation of ivc_state_sync before stores clearing
+ * tx_channel.
+ */
+ dmbld();
+
+ /*
+ * Reset tx_channel counters. The remote end is in the ACK
+ * state and won't make progress until we change our state,
+ * so the counters are not in use at this time.
+ */
+ ivc->tx_channel->w_count = 0U;
+ ivc->rx_channel->r_count = 0U;
+
+ ivc->w_pos = 0U;
+ ivc->r_pos = 0U;
+
+ /*
+ * Ensure that counters appear cleared before new state can be
+ * observed.
+ */
+ dmbst();
+
+ /*
+ * Move to ESTABLISHED state. We know that the remote end has
+ * already cleared its counters, so it is safe to start
+ * writing/reading on this channel.
+ */
+ ivc->tx_channel->state = ivc_state_established;
+
+ /*
+ * Notify remote end to observe state transition.
+ */
+ ivc->notify(ivc);
+
+ } else if (ivc->tx_channel->state == (uint32_t)ivc_state_ack) {
+ /*
+ * At this point, we have observed the peer to be in either
+ * the ACK or ESTABLISHED state. Next, order observation of
+ * peer state before storing to tx_channel.
+ */
+ dmbld();
+
+ /*
+ * Move to ESTABLISHED state. We know that we have previously
+ * cleared our counters, and we know that the remote end has
+ * cleared its counters, so it is safe to start writing/reading
+ * on this channel.
+ */
+ ivc->tx_channel->state = ivc_state_established;
+
+ /*
+ * Notify remote end to observe state transition.
+ */
+ ivc->notify(ivc);
+
+ } else {
+ /*
+ * There is no need to handle any further action. Either the
+ * channel is already fully established, or we are waiting for
+ * the remote end to catch up with our current state. Refer
+ * to the diagram in "IVC State Transition Table" above.
+ */
+ }
+
+ return ((ivc->tx_channel->state == (uint32_t)ivc_state_established) ? 0 : -EAGAIN);
+}
+
+size_t tegra_ivc_align(size_t size)
+{
+ return (size + (IVC_ALIGN - 1U)) & ~(IVC_ALIGN - 1U);
+}
+
+size_t tegra_ivc_total_queue_size(size_t queue_size)
+{
+ if ((queue_size & (IVC_ALIGN - 1U)) != 0U) {
+ ERROR("queue_size (%d) must be %d-byte aligned\n",
+ (int32_t)queue_size, IVC_ALIGN);
+ return 0;
+ }
+ return queue_size + sizeof(struct ivc_channel_header);
+}
+
+static int32_t check_ivc_params(uintptr_t queue_base1, uintptr_t queue_base2,
+ uint32_t nframes, uint32_t frame_size)
+{
+ assert((offsetof(struct ivc_channel_header, w_count)
+ & (IVC_ALIGN - 1U)) == 0U);
+ assert((offsetof(struct ivc_channel_header, r_count)
+ & (IVC_ALIGN - 1U)) == 0U);
+ assert((sizeof(struct ivc_channel_header) & (IVC_ALIGN - 1U)) == 0U);
+
+ if (((uint64_t)nframes * (uint64_t)frame_size) >= 0x100000000ULL) {
+ ERROR("nframes * frame_size overflows\n");
+ return -EINVAL;
+ }
+
+ /*
+ * The headers must at least be aligned enough for counters
+ * to be accessed atomically.
+ */
+ if ((queue_base1 & (IVC_ALIGN - 1U)) != 0U) {
+ ERROR("ivc channel start not aligned: %lx\n", queue_base1);
+ return -EINVAL;
+ }
+ if ((queue_base2 & (IVC_ALIGN - 1U)) != 0U) {
+ ERROR("ivc channel start not aligned: %lx\n", queue_base2);
+ return -EINVAL;
+ }
+
+ if ((frame_size & (IVC_ALIGN - 1U)) != 0U) {
+ ERROR("frame size not adequately aligned: %u\n",
+ frame_size);
+ return -EINVAL;
+ }
+
+ if (queue_base1 < queue_base2) {
+ if ((queue_base1 + ((uint64_t)frame_size * nframes)) > queue_base2) {
+ ERROR("queue regions overlap: %lx + %x, %x\n",
+ queue_base1, frame_size,
+ frame_size * nframes);
+ return -EINVAL;
+ }
+ } else {
+ if ((queue_base2 + ((uint64_t)frame_size * nframes)) > queue_base1) {
+ ERROR("queue regions overlap: %lx + %x, %x\n",
+ queue_base2, frame_size,
+ frame_size * nframes);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int32_t tegra_ivc_init(struct ivc *ivc, uintptr_t rx_base, uintptr_t tx_base,
+ uint32_t nframes, uint32_t frame_size,
+ ivc_notify_function notify)
+{
+ int32_t result;
+
+ /* sanity check input params */
+ if ((ivc == NULL) || (notify == NULL)) {
+ return -EINVAL;
+ }
+
+ result = check_ivc_params(rx_base, tx_base, nframes, frame_size);
+ if (result != 0) {
+ return result;
+ }
+
+ /*
+ * All sizes that can be returned by communication functions should
+ * fit in a 32-bit integer.
+ */
+ if (frame_size > (1u << 31)) {
+ return -E2BIG;
+ }
+
+ ivc->rx_channel = (struct ivc_channel_header *)rx_base;
+ ivc->tx_channel = (struct ivc_channel_header *)tx_base;
+ ivc->notify = notify;
+ ivc->frame_size = frame_size;
+ ivc->nframes = nframes;
+ ivc->w_pos = 0U;
+ ivc->r_pos = 0U;
+
+ INFO("%s: done\n", __func__);
+
+ return 0;
+}
diff --git a/plat/nvidia/tegra/drivers/bpmp_ipc/ivc.h b/plat/nvidia/tegra/drivers/bpmp_ipc/ivc.h
new file mode 100644
index 0000000..1b31821
--- /dev/null
+++ b/plat/nvidia/tegra/drivers/bpmp_ipc/ivc.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2017-2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef BPMP_IVC_H
+#define BPMP_IVC_H
+
+#include <lib/utils_def.h>
+#include <stdint.h>
+#include <stddef.h>
+
+#define IVC_ALIGN U(64)
+#define IVC_CHHDR_TX_FIELDS U(16)
+#define IVC_CHHDR_RX_FIELDS U(16)
+
+struct ivc_channel_header;
+
+struct ivc {
+ struct ivc_channel_header *rx_channel;
+ struct ivc_channel_header *tx_channel;
+ uint32_t w_pos;
+ uint32_t r_pos;
+ void (*notify)(const struct ivc *);
+ uint32_t nframes;
+ uint32_t frame_size;
+};
+
+/* callback handler for notify on receiving a response */
+typedef void (* ivc_notify_function)(const struct ivc *);
+
+int32_t tegra_ivc_init(struct ivc *ivc, uintptr_t rx_base, uintptr_t tx_base,
+ uint32_t nframes, uint32_t frame_size,
+ ivc_notify_function notify);
+size_t tegra_ivc_total_queue_size(size_t queue_size);
+size_t tegra_ivc_align(size_t size);
+int32_t tegra_ivc_channel_notified(struct ivc *ivc);
+void tegra_ivc_channel_reset(const struct ivc *ivc);
+int32_t tegra_ivc_write_advance(struct ivc *ivc);
+void *tegra_ivc_write_get_next_frame(const struct ivc *ivc);
+int32_t tegra_ivc_write(struct ivc *ivc, const void *buf, size_t size);
+int32_t tegra_ivc_read_advance(struct ivc *ivc);
+void *tegra_ivc_read_get_next_frame(const struct ivc *ivc);
+int32_t tegra_ivc_read(struct ivc *ivc, void *buf, size_t max_read);
+bool tegra_ivc_tx_empty(const struct ivc *ivc);
+bool tegra_ivc_can_write(const struct ivc *ivc);
+bool tegra_ivc_can_read(const struct ivc *ivc);
+
+#endif /* BPMP_IVC_H */
diff --git a/plat/nvidia/tegra/drivers/flowctrl/flowctrl.c b/plat/nvidia/tegra/drivers/flowctrl/flowctrl.c
new file mode 100644
index 0000000..4c9f4af
--- /dev/null
+++ b/plat/nvidia/tegra/drivers/flowctrl/flowctrl.c
@@ -0,0 +1,322 @@
+/*
+ * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include <arch_helpers.h>
+#include <cortex_a53.h>
+#include <common/debug.h>
+#include <drivers/delay_timer.h>
+#include <lib/mmio.h>
+
+#include <flowctrl.h>
+#include <lib/utils_def.h>
+#include <pmc.h>
+#include <tegra_def.h>
+
+#define CLK_RST_DEV_L_SET 0x300
+#define CLK_RST_DEV_L_CLR 0x304
+#define CLK_BPMP_RST (1 << 1)
+
+#define EVP_BPMP_RESET_VECTOR 0x200
+
+static const uint64_t flowctrl_offset_cpu_csr[4] = {
+ (TEGRA_FLOWCTRL_BASE + FLOWCTRL_CPU0_CSR),
+ (TEGRA_FLOWCTRL_BASE + FLOWCTRL_CPU1_CSR),
+ (TEGRA_FLOWCTRL_BASE + FLOWCTRL_CPU1_CSR + 8),
+ (TEGRA_FLOWCTRL_BASE + FLOWCTRL_CPU1_CSR + 16)
+};
+
+static const uint64_t flowctrl_offset_halt_cpu[4] = {
+ (TEGRA_FLOWCTRL_BASE + FLOWCTRL_HALT_CPU0_EVENTS),
+ (TEGRA_FLOWCTRL_BASE + FLOWCTRL_HALT_CPU1_EVENTS),
+ (TEGRA_FLOWCTRL_BASE + FLOWCTRL_HALT_CPU1_EVENTS + 8),
+ (TEGRA_FLOWCTRL_BASE + FLOWCTRL_HALT_CPU1_EVENTS + 16)
+};
+
+static const uint64_t flowctrl_offset_cc4_ctrl[4] = {
+ (TEGRA_FLOWCTRL_BASE + FLOWCTRL_CC4_CORE0_CTRL),
+ (TEGRA_FLOWCTRL_BASE + FLOWCTRL_CC4_CORE0_CTRL + 4),
+ (TEGRA_FLOWCTRL_BASE + FLOWCTRL_CC4_CORE0_CTRL + 8),
+ (TEGRA_FLOWCTRL_BASE + FLOWCTRL_CC4_CORE0_CTRL + 12)
+};
+
+static inline void tegra_fc_cc4_ctrl(int cpu_id, uint32_t val)
+{
+ mmio_write_32(flowctrl_offset_cc4_ctrl[cpu_id], val);
+ val = mmio_read_32(flowctrl_offset_cc4_ctrl[cpu_id]);
+}
+
+static inline void tegra_fc_cpu_csr(int cpu_id, uint32_t val)
+{
+ mmio_write_32(flowctrl_offset_cpu_csr[cpu_id], val);
+ val = mmio_read_32(flowctrl_offset_cpu_csr[cpu_id]);
+}
+
+static inline void tegra_fc_halt_cpu(int cpu_id, uint32_t val)
+{
+ mmio_write_32(flowctrl_offset_halt_cpu[cpu_id], val);
+ val = mmio_read_32(flowctrl_offset_halt_cpu[cpu_id]);
+}
+
+static void tegra_fc_prepare_suspend(int cpu_id, uint32_t csr)
+{
+ uint32_t val;
+
+ val = FLOWCTRL_HALT_GIC_IRQ | FLOWCTRL_HALT_GIC_FIQ |
+ FLOWCTRL_HALT_LIC_IRQ | FLOWCTRL_HALT_LIC_FIQ |
+ FLOWCTRL_WAITEVENT;
+ tegra_fc_halt_cpu(cpu_id, val);
+
+ val = FLOWCTRL_CSR_INTR_FLAG | FLOWCTRL_CSR_EVENT_FLAG |
+ FLOWCTRL_CSR_ENABLE | (FLOWCTRL_WAIT_WFI_BITMAP << cpu_id);
+ tegra_fc_cpu_csr(cpu_id, val | csr);
+}
+
+/*******************************************************************************
+ * After this, no core can wake from C7 until the action is reverted.
+ * If a wake up event is asserted, the FC state machine will stall until
+ * the action is reverted.
+ ******************************************************************************/
+void tegra_fc_ccplex_pgexit_lock(void)
+{
+ unsigned int i, cpu = read_mpidr() & MPIDR_CPU_MASK;
+ uint32_t flags = tegra_fc_read_32(FLOWCTRL_FC_SEQ_INTERCEPT) & ~INTERCEPT_IRQ_PENDING;
+ uint32_t icept_cpu_flags[] = {
+ INTERCEPT_EXIT_PG_CORE0,
+ INTERCEPT_EXIT_PG_CORE1,
+ INTERCEPT_EXIT_PG_CORE2,
+ INTERCEPT_EXIT_PG_CORE3
+ };
+
+ /* set the intercept flags */
+ for (i = 0; i < ARRAY_SIZE(icept_cpu_flags); i++) {
+
+ /* skip current CPU */
+ if (i == cpu)
+ continue;
+
+ /* enable power gate exit intercept locks */
+ flags |= icept_cpu_flags[i];
+ }
+
+ tegra_fc_write_32(FLOWCTRL_FC_SEQ_INTERCEPT, flags);
+ (void)tegra_fc_read_32(FLOWCTRL_FC_SEQ_INTERCEPT);
+}
+
+/*******************************************************************************
+ * Revert the ccplex powergate exit locks
+ ******************************************************************************/
+void tegra_fc_ccplex_pgexit_unlock(void)
+{
+ /* clear lock bits, clear pending interrupts */
+ tegra_fc_write_32(FLOWCTRL_FC_SEQ_INTERCEPT, INTERCEPT_IRQ_PENDING);
+ (void)tegra_fc_read_32(FLOWCTRL_FC_SEQ_INTERCEPT);
+}
+
+/*******************************************************************************
+ * Powerdn the current CPU
+ ******************************************************************************/
+void tegra_fc_cpu_powerdn(uint32_t mpidr)
+{
+ int cpu = mpidr & MPIDR_CPU_MASK;
+
+ VERBOSE("CPU%d powering down...\n", cpu);
+ tegra_fc_prepare_suspend(cpu, 0);
+}
+
+/*******************************************************************************
+ * Suspend the current CPU cluster
+ ******************************************************************************/
+void tegra_fc_cluster_idle(uint32_t mpidr)
+{
+ int cpu = mpidr & MPIDR_CPU_MASK;
+ uint32_t val;
+
+ VERBOSE("Entering cluster idle state...\n");
+
+ tegra_fc_cc4_ctrl(cpu, 0);
+
+ /* hardware L2 flush is faster for A53 only */
+ tegra_fc_write_32(FLOWCTRL_L2_FLUSH_CONTROL,
+ !!MPIDR_AFFLVL1_VAL(mpidr));
+
+ /* suspend the CPU cluster */
+ val = FLOWCTRL_PG_CPU_NONCPU << FLOWCTRL_ENABLE_EXT;
+ tegra_fc_prepare_suspend(cpu, val);
+}
+
+/*******************************************************************************
+ * Power down the current CPU cluster
+ ******************************************************************************/
+void tegra_fc_cluster_powerdn(uint32_t mpidr)
+{
+ int cpu = mpidr & MPIDR_CPU_MASK;
+ uint32_t val;
+
+ VERBOSE("Entering cluster powerdn state...\n");
+
+ tegra_fc_cc4_ctrl(cpu, 0);
+
+ /* hardware L2 flush is faster for A53 only */
+ tegra_fc_write_32(FLOWCTRL_L2_FLUSH_CONTROL,
+ read_midr() == CORTEX_A53_MIDR);
+
+ /* power down the CPU cluster */
+ val = FLOWCTRL_TURNOFF_CPURAIL << FLOWCTRL_ENABLE_EXT;
+ tegra_fc_prepare_suspend(cpu, val);
+}
+
+/*******************************************************************************
+ * Check if cluster idle or power down state is allowed from this CPU
+ ******************************************************************************/
+bool tegra_fc_is_ccx_allowed(void)
+{
+ unsigned int i, cpu = read_mpidr() & MPIDR_CPU_MASK;
+ uint32_t val;
+ bool ccx_allowed = true;
+
+ for (i = 0; i < ARRAY_SIZE(flowctrl_offset_cpu_csr); i++) {
+
+ /* skip current CPU */
+ if (i == cpu)
+ continue;
+
+ /* check if all other CPUs are already halted */
+ val = mmio_read_32(flowctrl_offset_cpu_csr[i]);
+ if ((val & FLOWCTRL_CSR_HALT_MASK) == 0U) {
+ ccx_allowed = false;
+ }
+ }
+
+ return ccx_allowed;
+}
+
+/*******************************************************************************
+ * Suspend the entire SoC
+ ******************************************************************************/
+void tegra_fc_soc_powerdn(uint32_t mpidr)
+{
+ int cpu = mpidr & MPIDR_CPU_MASK;
+ uint32_t val;
+
+ VERBOSE("Entering SoC powerdn state...\n");
+
+ tegra_fc_cc4_ctrl(cpu, 0);
+
+ tegra_fc_write_32(FLOWCTRL_L2_FLUSH_CONTROL, 1);
+
+ val = FLOWCTRL_TURNOFF_CPURAIL << FLOWCTRL_ENABLE_EXT;
+ tegra_fc_prepare_suspend(cpu, val);
+
+ /* overwrite HALT register */
+ tegra_fc_halt_cpu(cpu, FLOWCTRL_WAITEVENT);
+}
+
+/*******************************************************************************
+ * Power up the CPU
+ ******************************************************************************/
+void tegra_fc_cpu_on(int cpu)
+{
+ tegra_fc_cpu_csr(cpu, FLOWCTRL_CSR_ENABLE);
+ tegra_fc_halt_cpu(cpu, FLOWCTRL_WAITEVENT | FLOWCTRL_HALT_SCLK);
+}
+
+/*******************************************************************************
+ * Power down the CPU
+ ******************************************************************************/
+void tegra_fc_cpu_off(int cpu)
+{
+ uint32_t val;
+
+ /*
+ * Flow controller powers down the CPU during wfi. The CPU would be
+ * powered on when it receives any interrupt.
+ */
+ val = FLOWCTRL_CSR_INTR_FLAG | FLOWCTRL_CSR_EVENT_FLAG |
+ FLOWCTRL_CSR_ENABLE | (FLOWCTRL_WAIT_WFI_BITMAP << cpu);
+ tegra_fc_cpu_csr(cpu, val);
+ tegra_fc_halt_cpu(cpu, FLOWCTRL_WAITEVENT);
+ tegra_fc_cc4_ctrl(cpu, 0);
+}
+
+/*******************************************************************************
+ * Inform the BPMP that we have completed the cluster power up
+ ******************************************************************************/
+void tegra_fc_lock_active_cluster(void)
+{
+ uint32_t val;
+
+ val = tegra_fc_read_32(FLOWCTRL_BPMP_CLUSTER_CONTROL);
+ val |= FLOWCTRL_BPMP_CLUSTER_PWRON_LOCK;
+ tegra_fc_write_32(FLOWCTRL_BPMP_CLUSTER_CONTROL, val);
+ val = tegra_fc_read_32(FLOWCTRL_BPMP_CLUSTER_CONTROL);
+}
+
+/*******************************************************************************
+ * Power ON BPMP processor
+ ******************************************************************************/
+void tegra_fc_bpmp_on(uint32_t entrypoint)
+{
+ /* halt BPMP */
+ tegra_fc_write_32(FLOWCTRL_HALT_BPMP_EVENTS, FLOWCTRL_WAITEVENT);
+
+ /* Assert BPMP reset */
+ mmio_write_32(TEGRA_CAR_RESET_BASE + CLK_RST_DEV_L_SET, CLK_BPMP_RST);
+
+ /* Set reset address (stored in PMC_SCRATCH39) */
+ mmio_write_32(TEGRA_EVP_BASE + EVP_BPMP_RESET_VECTOR, entrypoint);
+ while (entrypoint != mmio_read_32(TEGRA_EVP_BASE + EVP_BPMP_RESET_VECTOR))
+ ; /* wait till value reaches EVP_BPMP_RESET_VECTOR */
+
+ /* Wait for 2us before de-asserting the reset signal. */
+ udelay(2);
+
+ /* De-assert BPMP reset */
+ mmio_write_32(TEGRA_CAR_RESET_BASE + CLK_RST_DEV_L_CLR, CLK_BPMP_RST);
+
+ /* Un-halt BPMP */
+ tegra_fc_write_32(FLOWCTRL_HALT_BPMP_EVENTS, 0);
+}
+
+/*******************************************************************************
+ * Power OFF BPMP processor
+ ******************************************************************************/
+void tegra_fc_bpmp_off(void)
+{
+ /* halt BPMP */
+ tegra_fc_write_32(FLOWCTRL_HALT_BPMP_EVENTS, FLOWCTRL_WAITEVENT);
+
+ /* Assert BPMP reset */
+ mmio_write_32(TEGRA_CAR_RESET_BASE + CLK_RST_DEV_L_SET, CLK_BPMP_RST);
+
+ /* Clear reset address */
+ mmio_write_32(TEGRA_EVP_BASE + EVP_BPMP_RESET_VECTOR, 0);
+ while (0 != mmio_read_32(TEGRA_EVP_BASE + EVP_BPMP_RESET_VECTOR))
+ ; /* wait till value reaches EVP_BPMP_RESET_VECTOR */
+}
+
+/*******************************************************************************
+ * Route legacy FIQ to the GICD
+ ******************************************************************************/
+void tegra_fc_enable_fiq_to_ccplex_routing(void)
+{
+ uint32_t val = tegra_fc_read_32(FLOW_CTLR_FLOW_DBG_QUAL);
+
+ /* set the bit to pass FIQs to the GICD */
+ tegra_fc_write_32(FLOW_CTLR_FLOW_DBG_QUAL, val | FLOWCTRL_FIQ2CCPLEX_ENABLE);
+}
+
+/*******************************************************************************
+ * Disable routing legacy FIQ to the GICD
+ ******************************************************************************/
+void tegra_fc_disable_fiq_to_ccplex_routing(void)
+{
+ uint32_t val = tegra_fc_read_32(FLOW_CTLR_FLOW_DBG_QUAL);
+
+ /* clear the bit to pass FIQs to the GICD */
+ tegra_fc_write_32(FLOW_CTLR_FLOW_DBG_QUAL, val & ~FLOWCTRL_FIQ2CCPLEX_ENABLE);
+}
diff --git a/plat/nvidia/tegra/drivers/gpcdma/gpcdma.c b/plat/nvidia/tegra/drivers/gpcdma/gpcdma.c
new file mode 100644
index 0000000..d68cdfd
--- /dev/null
+++ b/plat/nvidia/tegra/drivers/gpcdma/gpcdma.c
@@ -0,0 +1,188 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <drivers/delay_timer.h>
+#include <errno.h>
+#include <gpcdma.h>
+#include <lib/mmio.h>
+#include <lib/utils_def.h>
+#include <platform_def.h>
+#include <stdbool.h>
+#include <tegra_def.h>
+
+/* DMA channel registers */
+#define DMA_CH_CSR U(0x0)
+#define DMA_CH_CSR_WEIGHT_SHIFT U(10)
+#define DMA_CH_CSR_XFER_MODE_SHIFT U(21)
+#define DMA_CH_CSR_DMA_MODE_MEM2MEM U(4)
+#define DMA_CH_CSR_DMA_MODE_FIXEDPATTERN U(6)
+#define DMA_CH_CSR_IRQ_MASK_ENABLE (U(1) << 15)
+#define DMA_CH_CSR_RUN_ONCE (U(1) << 27)
+#define DMA_CH_CSR_ENABLE (U(1) << 31)
+
+#define DMA_CH_STAT U(0x4)
+#define DMA_CH_STAT_BUSY (U(1) << 31)
+
+#define DMA_CH_SRC_PTR U(0xC)
+
+#define DMA_CH_DST_PTR U(0x10)
+
+#define DMA_CH_HI_ADR_PTR U(0x14)
+#define DMA_CH_HI_ADR_PTR_SRC_MASK U(0xFF)
+#define DMA_CH_HI_ADR_PTR_DST_SHIFT U(16)
+#define DMA_CH_HI_ADR_PTR_DST_MASK U(0xFF)
+
+#define DMA_CH_MC_SEQ U(0x18)
+#define DMA_CH_MC_SEQ_REQ_CNT_SHIFT U(25)
+#define DMA_CH_MC_SEQ_REQ_CNT_VAL U(0x10)
+#define DMA_CH_MC_SEQ_BURST_SHIFT U(23)
+#define DMA_CH_MC_SEQ_BURST_16_WORDS U(0x3)
+
+#define DMA_CH_WORD_COUNT U(0x20)
+#define DMA_CH_FIXED_PATTERN U(0x34)
+#define DMA_CH_TZ U(0x38)
+#define DMA_CH_TZ_ACCESS_ENABLE U(0)
+#define DMA_CH_TZ_ACCESS_DISABLE U(3)
+
+#define MAX_TRANSFER_SIZE (1U*1024U*1024U*1024U) /* 1GB */
+#define GPCDMA_TIMEOUT_MS U(100)
+#define GPCDMA_RESET_BIT (U(1) << 1)
+
+static bool init_done;
+
+static void tegra_gpcdma_write32(uint32_t offset, uint32_t val)
+{
+ mmio_write_32(TEGRA_GPCDMA_BASE + offset, val);
+}
+
+static uint32_t tegra_gpcdma_read32(uint32_t offset)
+{
+ return mmio_read_32(TEGRA_GPCDMA_BASE + offset);
+}
+
+static void tegra_gpcdma_init(void)
+{
+ /* assert reset for DMA engine */
+ mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_GPCDMA_RST_SET_REG_OFFSET,
+ GPCDMA_RESET_BIT);
+
+ udelay(2);
+
+ /* de-assert reset for DMA engine */
+ mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_GPCDMA_RST_CLR_REG_OFFSET,
+ GPCDMA_RESET_BIT);
+}
+
+static void tegra_gpcdma_memcpy_priv(uint64_t dst_addr, uint64_t src_addr,
+ uint32_t num_bytes, uint32_t mode)
+{
+ uint32_t val, timeout = 0;
+ int32_t ret = 0;
+
+ /* sanity check byte count */
+ if ((num_bytes > MAX_TRANSFER_SIZE) || ((num_bytes & 0x3U) != U(0))) {
+ ret = -EINVAL;
+ }
+
+ /* initialise GPCDMA block */
+ if (!init_done) {
+ tegra_gpcdma_init();
+ init_done = true;
+ }
+
+ /* make sure channel isn't busy */
+ val = tegra_gpcdma_read32(DMA_CH_STAT);
+ if ((val & DMA_CH_STAT_BUSY) == DMA_CH_STAT_BUSY) {
+ ERROR("DMA channel is busy\n");
+ ret = -EBUSY;
+ }
+
+ if (ret == 0) {
+
+ /* disable any DMA transfers */
+ tegra_gpcdma_write32(DMA_CH_CSR, 0);
+
+ /* enable DMA access to TZDRAM */
+ tegra_gpcdma_write32(DMA_CH_TZ, DMA_CH_TZ_ACCESS_ENABLE);
+
+ /* configure MC sequencer */
+ val = (DMA_CH_MC_SEQ_REQ_CNT_VAL << DMA_CH_MC_SEQ_REQ_CNT_SHIFT) |
+ (DMA_CH_MC_SEQ_BURST_16_WORDS << DMA_CH_MC_SEQ_BURST_SHIFT);
+ tegra_gpcdma_write32(DMA_CH_MC_SEQ, val);
+
+ /* reset fixed pattern */
+ tegra_gpcdma_write32(DMA_CH_FIXED_PATTERN, 0);
+
+ /* populate src and dst address registers */
+ tegra_gpcdma_write32(DMA_CH_SRC_PTR, (uint32_t)src_addr);
+ tegra_gpcdma_write32(DMA_CH_DST_PTR, (uint32_t)dst_addr);
+
+ val = (uint32_t)((src_addr >> 32) & DMA_CH_HI_ADR_PTR_SRC_MASK);
+ val |= (uint32_t)(((dst_addr >> 32) & DMA_CH_HI_ADR_PTR_DST_MASK) <<
+ DMA_CH_HI_ADR_PTR_DST_SHIFT);
+ tegra_gpcdma_write32(DMA_CH_HI_ADR_PTR, val);
+
+ /* transfer size (in words) */
+ tegra_gpcdma_write32(DMA_CH_WORD_COUNT, ((num_bytes >> 2) - 1U));
+
+ /* populate value for CSR */
+ val = (mode << DMA_CH_CSR_XFER_MODE_SHIFT) |
+ DMA_CH_CSR_RUN_ONCE | (U(1) << DMA_CH_CSR_WEIGHT_SHIFT) |
+ DMA_CH_CSR_IRQ_MASK_ENABLE;
+ tegra_gpcdma_write32(DMA_CH_CSR, val);
+
+ /* enable transfer */
+ val = tegra_gpcdma_read32(DMA_CH_CSR);
+ val |= DMA_CH_CSR_ENABLE;
+ tegra_gpcdma_write32(DMA_CH_CSR, val);
+
+ /* wait till transfer completes */
+ do {
+
+ /* read the status */
+ val = tegra_gpcdma_read32(DMA_CH_STAT);
+ if ((val & DMA_CH_STAT_BUSY) != DMA_CH_STAT_BUSY) {
+ break;
+ }
+
+ mdelay(1);
+ timeout++;
+
+ } while (timeout < GPCDMA_TIMEOUT_MS);
+
+ /* flag timeout error */
+ if (timeout == GPCDMA_TIMEOUT_MS) {
+ ERROR("DMA transfer timed out\n");
+ }
+
+ dsbsy();
+
+ /* disable DMA access to TZDRAM */
+ tegra_gpcdma_write32(DMA_CH_TZ, DMA_CH_TZ_ACCESS_DISABLE);
+ isb();
+ }
+}
+
+/*******************************************************************************
+ * Memcpy using GPCDMA block (Mem2Mem copy)
+ ******************************************************************************/
+void tegra_gpcdma_memcpy(uint64_t dst_addr, uint64_t src_addr,
+ uint32_t num_bytes)
+{
+ tegra_gpcdma_memcpy_priv(dst_addr, src_addr, num_bytes,
+ DMA_CH_CSR_DMA_MODE_MEM2MEM);
+}
+
+/*******************************************************************************
+ * Memset using GPCDMA block (Fixed pattern write)
+ ******************************************************************************/
+void tegra_gpcdma_zeromem(uint64_t dst_addr, uint32_t num_bytes)
+{
+ tegra_gpcdma_memcpy_priv(dst_addr, 0, num_bytes,
+ DMA_CH_CSR_DMA_MODE_FIXEDPATTERN);
+}
diff --git a/plat/nvidia/tegra/drivers/memctrl/memctrl_v1.c b/plat/nvidia/tegra/drivers/memctrl/memctrl_v1.c
new file mode 100644
index 0000000..b3dcd2a
--- /dev/null
+++ b/plat/nvidia/tegra/drivers/memctrl/memctrl_v1.c
@@ -0,0 +1,212 @@
+/*
+ * Copyright (c) 2015-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <string.h>
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <lib/mmio.h>
+#include <lib/utils.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+
+#include <memctrl.h>
+#include <memctrl_v1.h>
+#include <tegra_def.h>
+
+/* Video Memory base and size (live values) */
+static uint64_t video_mem_base;
+static uint64_t video_mem_size;
+
+/*
+ * Init SMMU.
+ */
+void tegra_memctrl_setup(void)
+{
+ /*
+ * Setup the Memory controller to allow only secure accesses to
+ * the TZDRAM carveout
+ */
+ INFO("Tegra Memory Controller (v1)\n");
+
+ /* allow translations for all MC engines */
+ tegra_mc_write_32(MC_SMMU_TRANSLATION_ENABLE_0_0,
+ (unsigned int)MC_SMMU_TRANSLATION_ENABLE);
+ tegra_mc_write_32(MC_SMMU_TRANSLATION_ENABLE_1_0,
+ (unsigned int)MC_SMMU_TRANSLATION_ENABLE);
+ tegra_mc_write_32(MC_SMMU_TRANSLATION_ENABLE_2_0,
+ (unsigned int)MC_SMMU_TRANSLATION_ENABLE);
+ tegra_mc_write_32(MC_SMMU_TRANSLATION_ENABLE_3_0,
+ (unsigned int)MC_SMMU_TRANSLATION_ENABLE);
+ tegra_mc_write_32(MC_SMMU_TRANSLATION_ENABLE_4_0,
+ (unsigned int)MC_SMMU_TRANSLATION_ENABLE);
+
+ tegra_mc_write_32(MC_SMMU_ASID_SECURITY_0, MC_SMMU_ASID_SECURITY);
+
+ tegra_mc_write_32(MC_SMMU_TLB_CONFIG_0, MC_SMMU_TLB_CONFIG_0_RESET_VAL);
+ tegra_mc_write_32(MC_SMMU_PTC_CONFIG_0, MC_SMMU_PTC_CONFIG_0_RESET_VAL);
+
+ /* flush PTC and TLB */
+ tegra_mc_write_32(MC_SMMU_PTC_FLUSH_0, MC_SMMU_PTC_FLUSH_ALL);
+ (void)tegra_mc_read_32(MC_SMMU_CONFIG_0); /* read to flush writes */
+ tegra_mc_write_32(MC_SMMU_TLB_FLUSH_0, MC_SMMU_TLB_FLUSH_ALL);
+
+ /* enable SMMU */
+ tegra_mc_write_32(MC_SMMU_CONFIG_0,
+ MC_SMMU_CONFIG_0_SMMU_ENABLE_ENABLE);
+ (void)tegra_mc_read_32(MC_SMMU_CONFIG_0); /* read to flush writes */
+
+ /* video memory carveout */
+ tegra_mc_write_32(MC_VIDEO_PROTECT_BASE_HI,
+ (uint32_t)(video_mem_base >> 32));
+ tegra_mc_write_32(MC_VIDEO_PROTECT_BASE_LO, (uint32_t)video_mem_base);
+ tegra_mc_write_32(MC_VIDEO_PROTECT_SIZE_MB, video_mem_size);
+}
+
+/*
+ * Restore Memory Controller settings after "System Suspend"
+ */
+void tegra_memctrl_restore_settings(void)
+{
+ tegra_memctrl_setup();
+}
+
+/*
+ * Secure the BL31 DRAM aperture.
+ *
+ * phys_base = physical base of TZDRAM aperture
+ * size_in_bytes = size of aperture in bytes
+ */
+void tegra_memctrl_tzdram_setup(uint64_t phys_base, uint32_t size_in_bytes)
+{
+ /*
+ * Setup the Memory controller to allow only secure accesses to
+ * the TZDRAM carveout
+ */
+ INFO("Configuring TrustZone DRAM Memory Carveout\n");
+
+ tegra_mc_write_32(MC_SECURITY_CFG0_0, phys_base);
+ tegra_mc_write_32(MC_SECURITY_CFG1_0, size_in_bytes >> 20);
+}
+
+static void tegra_clear_videomem(uintptr_t non_overlap_area_start,
+ unsigned long long non_overlap_area_size)
+{
+ int ret;
+
+ /*
+ * Map the NS memory first, clean it and then unmap it.
+ */
+ ret = mmap_add_dynamic_region(non_overlap_area_start, /* PA */
+ non_overlap_area_start, /* VA */
+ non_overlap_area_size, /* size */
+ MT_NS | MT_RW | MT_EXECUTE_NEVER |
+ MT_NON_CACHEABLE); /* attrs */
+ assert(ret == 0);
+
+ zeromem((void *)non_overlap_area_start, non_overlap_area_size);
+ flush_dcache_range(non_overlap_area_start, non_overlap_area_size);
+
+ mmap_remove_dynamic_region(non_overlap_area_start,
+ non_overlap_area_size);
+}
+
+/*
+ * Program the Video Memory carveout region
+ *
+ * phys_base = physical base of aperture
+ * size_in_bytes = size of aperture in bytes
+ */
+void tegra_memctrl_videomem_setup(uint64_t phys_base, uint32_t size_in_bytes)
+{
+ uintptr_t vmem_end_old = video_mem_base + (video_mem_size << 20);
+ uintptr_t vmem_end_new = phys_base + size_in_bytes;
+ unsigned long long non_overlap_area_size;
+
+ /*
+ * Setup the Memory controller to restrict CPU accesses to the Video
+ * Memory region
+ */
+ INFO("Configuring Video Memory Carveout\n");
+
+ /*
+ * Configure Memory Controller directly for the first time.
+ */
+ if (video_mem_base == 0)
+ goto done;
+
+ /*
+ * Clear the old regions now being exposed. The following cases
+ * can occur -
+ *
+ * 1. clear whole old region (no overlap with new region)
+ * 2. clear old sub-region below new base
+ * 3. clear old sub-region above new end
+ */
+ INFO("Cleaning previous Video Memory Carveout\n");
+
+ if (phys_base > vmem_end_old || video_mem_base > vmem_end_new) {
+ tegra_clear_videomem(video_mem_base, video_mem_size << 20);
+ } else {
+ if (video_mem_base < phys_base) {
+ non_overlap_area_size = phys_base - video_mem_base;
+ tegra_clear_videomem(video_mem_base, non_overlap_area_size);
+ }
+ if (vmem_end_old > vmem_end_new) {
+ non_overlap_area_size = vmem_end_old - vmem_end_new;
+ tegra_clear_videomem(vmem_end_new, non_overlap_area_size);
+ }
+ }
+
+done:
+ tegra_mc_write_32(MC_VIDEO_PROTECT_BASE_HI, (uint32_t)(phys_base >> 32));
+ tegra_mc_write_32(MC_VIDEO_PROTECT_BASE_LO, (uint32_t)phys_base);
+ tegra_mc_write_32(MC_VIDEO_PROTECT_SIZE_MB, size_in_bytes >> 20);
+
+ /* store new values */
+ video_mem_base = phys_base;
+ video_mem_size = size_in_bytes >> 20;
+}
+
+/*
+ * During boot, USB3 and flash media (SDMMC/SATA) devices need access to
+ * IRAM. Because these clients connect to the MC and do not have a direct
+ * path to the IRAM, the MC implements AHB redirection during boot to allow
+ * path to IRAM. In this mode, accesses to a programmed memory address aperture
+ * are directed to the AHB bus, allowing access to the IRAM. The AHB aperture
+ * is defined by the IRAM_BASE_LO and IRAM_BASE_HI registers, which are
+ * initialized to disable this aperture.
+ *
+ * Once bootup is complete, we must program IRAM base to 0xffffffff and
+ * IRAM top to 0x00000000, thus disabling access to IRAM. DRAM is then
+ * potentially accessible in this address range. These aperture registers
+ * also have an access_control/lock bit. After disabling the aperture, the
+ * access_control register should be programmed to lock the registers.
+ */
+void tegra_memctrl_disable_ahb_redirection(void)
+{
+ /* program the aperture registers */
+ tegra_mc_write_32(MC_IRAM_BASE_LO, 0xFFFFFFFF);
+ tegra_mc_write_32(MC_IRAM_TOP_LO, 0);
+ tegra_mc_write_32(MC_IRAM_BASE_TOP_HI, 0);
+
+ /* lock the aperture registers */
+ tegra_mc_write_32(MC_IRAM_REG_CTRL, MC_DISABLE_IRAM_CFG_WRITES);
+}
+
+void tegra_memctrl_clear_pending_interrupts(void)
+{
+ uint32_t mcerr;
+
+ /* check if there are any pending interrupts */
+ mcerr = mmio_read_32(TEGRA_MC_BASE + MC_INTSTATUS);
+
+ if (mcerr != (uint32_t)0U) { /* should not see error here */
+ WARN("MC_INTSTATUS = 0x%x (should be zero)\n", mcerr);
+ mmio_write_32((TEGRA_MC_BASE + MC_INTSTATUS), mcerr);
+ }
+}
diff --git a/plat/nvidia/tegra/drivers/memctrl/memctrl_v2.c b/plat/nvidia/tegra/drivers/memctrl/memctrl_v2.c
new file mode 100644
index 0000000..0644fd2
--- /dev/null
+++ b/plat/nvidia/tegra/drivers/memctrl/memctrl_v2.c
@@ -0,0 +1,354 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2019-2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <string.h>
+
+#include <arch_helpers.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <lib/mmio.h>
+#include <lib/utils.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+
+#include <mce.h>
+#include <memctrl.h>
+#include <memctrl_v2.h>
+#include <smmu.h>
+#include <tegra_def.h>
+#include <tegra_platform.h>
+#include <tegra_private.h>
+
+/* Video Memory base and size (live values) */
+static uint64_t video_mem_base;
+static uint64_t video_mem_size_mb;
+
+/*
+ * Init Memory controller during boot.
+ */
+void tegra_memctrl_setup(void)
+{
+ INFO("Tegra Memory Controller (v2)\n");
+
+ /* Initialize the System memory management unit */
+ tegra_smmu_init();
+
+ /* allow platforms to program custom memory controller settings */
+ plat_memctrl_setup();
+
+ /*
+ * All requests at boot time, and certain requests during
+ * normal run time, are physically addressed and must bypass
+ * the SMMU. The client hub logic implements a hardware bypass
+ * path around the Translation Buffer Units (TBU). During
+ * boot-time, the SMMU_BYPASS_CTRL register (which defaults to
+ * TBU_BYPASS mode) will be used to steer all requests around
+ * the uninitialized TBUs. During normal operation, this register
+ * is locked into TBU_BYPASS_SID config, which routes requests
+ * with special StreamID 0x7f on the bypass path and all others
+ * through the selected TBU. This is done to disable SMMU Bypass
+ * mode, as it could be used to circumvent SMMU security checks.
+ */
+ tegra_mc_write_32(MC_SMMU_BYPASS_CONFIG,
+ MC_SMMU_BYPASS_CONFIG_SETTINGS);
+}
+
+/*
+ * Restore Memory Controller settings after "System Suspend"
+ */
+void tegra_memctrl_restore_settings(void)
+{
+ /* restore platform's memory controller settings */
+ plat_memctrl_restore();
+
+ /* video memory carveout region */
+ if (video_mem_base != 0ULL) {
+ tegra_mc_write_32(MC_VIDEO_PROTECT_BASE_LO,
+ (uint32_t)video_mem_base);
+ assert(tegra_mc_read_32(MC_VIDEO_PROTECT_BASE_LO)
+ == (uint32_t)video_mem_base);
+ tegra_mc_write_32(MC_VIDEO_PROTECT_BASE_HI,
+ (uint32_t)(video_mem_base >> 32));
+ assert(tegra_mc_read_32(MC_VIDEO_PROTECT_BASE_HI)
+ == (uint32_t)(video_mem_base >> 32));
+ tegra_mc_write_32(MC_VIDEO_PROTECT_SIZE_MB,
+ (uint32_t)video_mem_size_mb);
+ assert(tegra_mc_read_32(MC_VIDEO_PROTECT_SIZE_MB)
+ == (uint32_t)video_mem_size_mb);
+
+ /*
+ * MCE propagates the VideoMem configuration values across the
+ * CCPLEX.
+ */
+ mce_update_gsc_videomem();
+ }
+}
+
+/*
+ * Secure the BL31 DRAM aperture.
+ *
+ * phys_base = physical base of TZDRAM aperture
+ * size_in_bytes = size of aperture in bytes
+ */
+void tegra_memctrl_tzdram_setup(uint64_t phys_base, uint32_t size_in_bytes)
+{
+ /*
+ * Perform platform specific steps.
+ */
+ plat_memctrl_tzdram_setup(phys_base, size_in_bytes);
+}
+
+/*
+ * Secure the BL31 TZRAM aperture.
+ *
+ * phys_base = physical base of TZRAM aperture
+ * size_in_bytes = size of aperture in bytes
+ */
+void tegra_memctrl_tzram_setup(uint64_t phys_base, uint32_t size_in_bytes)
+{
+ ; /* do nothing */
+}
+
+/*
+ * Save MC settings before "System Suspend" to TZDRAM
+ */
+void tegra_mc_save_context(uint64_t mc_ctx_addr)
+{
+ uint32_t i, num_entries = 0;
+ mc_regs_t *mc_ctx_regs;
+ const plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
+ uint64_t tzdram_base = params_from_bl2->tzdram_base;
+ uint64_t tzdram_end = tzdram_base + params_from_bl2->tzdram_size;
+
+ assert((mc_ctx_addr >= tzdram_base) && (mc_ctx_addr <= tzdram_end));
+
+ /* get MC context table */
+ mc_ctx_regs = plat_memctrl_get_sys_suspend_ctx();
+ assert(mc_ctx_regs != NULL);
+
+ /*
+ * mc_ctx_regs[0].val contains the size of the context table minus
+ * the last entry. Sanity check the table size before we start with
+ * the context save operation.
+ */
+ while (mc_ctx_regs[num_entries].reg != 0xFFFFFFFFU) {
+ num_entries++;
+ }
+
+ /* panic if the sizes do not match */
+ if (num_entries != mc_ctx_regs[0].val) {
+ ERROR("MC context size mismatch!");
+ panic();
+ }
+
+ /* save MC register values */
+ for (i = 1U; i < num_entries; i++) {
+ mc_ctx_regs[i].val = mmio_read_32(mc_ctx_regs[i].reg);
+ }
+
+ /* increment by 1 to take care of the last entry */
+ num_entries++;
+
+ /* Save MC config settings */
+ (void)memcpy((void *)mc_ctx_addr, mc_ctx_regs,
+ sizeof(mc_regs_t) * num_entries);
+
+ /* save the MC table address */
+ mmio_write_32(TEGRA_SCRATCH_BASE + SCRATCH_MC_TABLE_ADDR_LO,
+ (uint32_t)mc_ctx_addr);
+ assert(mmio_read_32(TEGRA_SCRATCH_BASE + SCRATCH_MC_TABLE_ADDR_LO)
+ == (uint32_t)mc_ctx_addr);
+ mmio_write_32(TEGRA_SCRATCH_BASE + SCRATCH_MC_TABLE_ADDR_HI,
+ (uint32_t)(mc_ctx_addr >> 32));
+ assert(mmio_read_32(TEGRA_SCRATCH_BASE + SCRATCH_MC_TABLE_ADDR_HI)
+ == (uint32_t)(mc_ctx_addr >> 32));
+}
+
+static void tegra_lock_videomem_nonoverlap(uint64_t phys_base,
+ uint64_t size_in_bytes)
+{
+ uint32_t index;
+ uint64_t total_128kb_blocks = size_in_bytes >> 17;
+ uint64_t residual_4kb_blocks = (size_in_bytes & (uint32_t)0x1FFFF) >> 12;
+ uint64_t val;
+
+ /*
+ * Reset the access configuration registers to restrict access to
+ * old Videomem aperture
+ */
+ for (index = MC_VIDEO_PROTECT_CLEAR_ACCESS_CFG0;
+ index < ((uint32_t)MC_VIDEO_PROTECT_CLEAR_ACCESS_CFG0 + (uint32_t)MC_GSC_CONFIG_REGS_SIZE);
+ index += 4U) {
+ tegra_mc_write_32(index, 0);
+ }
+
+ /*
+ * Set the base. It must be 4k aligned, at least.
+ */
+ assert((phys_base & (uint64_t)0xFFF) == 0U);
+ tegra_mc_write_32(MC_VIDEO_PROTECT_CLEAR_BASE_LO, (uint32_t)phys_base);
+ tegra_mc_write_32(MC_VIDEO_PROTECT_CLEAR_BASE_HI,
+ (uint32_t)(phys_base >> 32) & (uint32_t)MC_GSC_BASE_HI_MASK);
+
+ /*
+ * Set the aperture size
+ *
+ * total size = (number of 128KB blocks) + (number of remaining 4KB
+ * blocks)
+ *
+ */
+ val = (uint32_t)((residual_4kb_blocks << MC_GSC_SIZE_RANGE_4KB_SHIFT) |
+ total_128kb_blocks);
+ tegra_mc_write_32(MC_VIDEO_PROTECT_CLEAR_SIZE, (uint32_t)val);
+
+ /*
+ * Lock the configuration settings by enabling TZ-only lock and
+ * locking the configuration against any future changes from NS
+ * world.
+ */
+ tegra_mc_write_32(MC_VIDEO_PROTECT_CLEAR_CFG,
+ (uint32_t)MC_GSC_ENABLE_TZ_LOCK_BIT);
+
+ /*
+ * MCE propagates the GSC configuration values across the
+ * CCPLEX.
+ */
+}
+
+static void tegra_unlock_videomem_nonoverlap(void)
+{
+ /* Clear the base */
+ tegra_mc_write_32(MC_VIDEO_PROTECT_CLEAR_BASE_LO, 0);
+ tegra_mc_write_32(MC_VIDEO_PROTECT_CLEAR_BASE_HI, 0);
+
+ /* Clear the size */
+ tegra_mc_write_32(MC_VIDEO_PROTECT_CLEAR_SIZE, 0);
+}
+
+static void tegra_clear_videomem(uintptr_t non_overlap_area_start,
+ unsigned long long non_overlap_area_size)
+{
+ int ret;
+
+ INFO("Cleaning previous Video Memory Carveout\n");
+
+ /*
+ * Map the NS memory first, clean it and then unmap it.
+ */
+ ret = mmap_add_dynamic_region(non_overlap_area_start, /* PA */
+ non_overlap_area_start, /* VA */
+ non_overlap_area_size, /* size */
+ MT_DEVICE | MT_RW | MT_NS); /* attrs */
+ assert(ret == 0);
+
+ zeromem((void *)non_overlap_area_start, non_overlap_area_size);
+ flush_dcache_range(non_overlap_area_start, non_overlap_area_size);
+
+ ret = mmap_remove_dynamic_region(non_overlap_area_start,
+ non_overlap_area_size);
+ assert(ret == 0);
+}
+
+static void tegra_clear_videomem_nonoverlap(uintptr_t phys_base,
+ unsigned long size_in_bytes)
+{
+ uintptr_t vmem_end_old = video_mem_base + (video_mem_size_mb << 20);
+ uintptr_t vmem_end_new = phys_base + size_in_bytes;
+ unsigned long long non_overlap_area_size;
+
+ /*
+ * Clear the old regions now being exposed. The following cases
+ * can occur -
+ *
+ * 1. clear whole old region (no overlap with new region)
+ * 2. clear old sub-region below new base
+ * 3. clear old sub-region above new end
+ */
+ if ((phys_base > vmem_end_old) || (video_mem_base > vmem_end_new)) {
+ tegra_clear_videomem(video_mem_base,
+ video_mem_size_mb << 20U);
+ } else {
+ if (video_mem_base < phys_base) {
+ non_overlap_area_size = phys_base - video_mem_base;
+ tegra_clear_videomem(video_mem_base, non_overlap_area_size);
+ }
+ if (vmem_end_old > vmem_end_new) {
+ non_overlap_area_size = vmem_end_old - vmem_end_new;
+ tegra_clear_videomem(vmem_end_new, non_overlap_area_size);
+ }
+ }
+}
+
+/*
+ * Program the Video Memory carveout region
+ *
+ * phys_base = physical base of aperture
+ * size_in_bytes = size of aperture in bytes
+ */
+void tegra_memctrl_videomem_setup(uint64_t phys_base, uint32_t size_in_bytes)
+{
+ /*
+ * Setup the Memory controller to restrict CPU accesses to the Video
+ * Memory region
+ */
+
+ INFO("Configuring Video Memory Carveout\n");
+
+ if (video_mem_base != 0U) {
+ /*
+ * Lock the non overlapping memory being cleared so that
+ * other masters do not accidentally write to it. The memory
+ * would be unlocked once the non overlapping region is
+ * cleared and the new memory settings take effect.
+ */
+ tegra_lock_videomem_nonoverlap(video_mem_base,
+ video_mem_size_mb << 20);
+ }
+
+ /* program the Videomem aperture */
+ tegra_mc_write_32(MC_VIDEO_PROTECT_BASE_LO, (uint32_t)phys_base);
+ tegra_mc_write_32(MC_VIDEO_PROTECT_BASE_HI,
+ (uint32_t)(phys_base >> 32));
+ tegra_mc_write_32(MC_VIDEO_PROTECT_SIZE_MB, size_in_bytes >> 20);
+
+ /* Redundancy check for Video Protect setting */
+ assert(tegra_mc_read_32(MC_VIDEO_PROTECT_BASE_LO)
+ == (uint32_t)phys_base);
+ assert(tegra_mc_read_32(MC_VIDEO_PROTECT_BASE_HI)
+ == (uint32_t)(phys_base >> 32));
+ assert(tegra_mc_read_32(MC_VIDEO_PROTECT_SIZE_MB)
+ == (size_in_bytes >> 20));
+
+ /*
+ * MCE propagates the VideoMem configuration values across the
+ * CCPLEX.
+ */
+ (void)mce_update_gsc_videomem();
+
+ /* Clear the non-overlapping memory */
+ if (video_mem_base != 0U) {
+ tegra_clear_videomem_nonoverlap(phys_base, size_in_bytes);
+ tegra_unlock_videomem_nonoverlap();
+ }
+
+ /* store new values */
+ video_mem_base = phys_base;
+ video_mem_size_mb = (uint64_t)size_in_bytes >> 20;
+}
+
+/*
+ * This feature exists only for v1 of the Tegra Memory Controller.
+ */
+void tegra_memctrl_disable_ahb_redirection(void)
+{
+ ; /* do nothing */
+}
+
+void tegra_memctrl_clear_pending_interrupts(void)
+{
+ ; /* do nothing */
+}
diff --git a/plat/nvidia/tegra/drivers/pmc/pmc.c b/plat/nvidia/tegra/drivers/pmc/pmc.c
new file mode 100644
index 0000000..e70e7a6
--- /dev/null
+++ b/plat/nvidia/tegra/drivers/pmc/pmc.c
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <lib/mmio.h>
+
+#include <pmc.h>
+#include <tegra_def.h>
+
+#define RESET_ENABLE 0x10U
+
+/* Module IDs used during power ungate procedure */
+static const uint32_t pmc_cpu_powergate_id[4] = {
+ 14, /* CPU 0 */
+ 9, /* CPU 1 */
+ 10, /* CPU 2 */
+ 11 /* CPU 3 */
+};
+
+/*******************************************************************************
+ * Power ungate CPU to start the boot process. CPU reset vectors must be
+ * populated before calling this function.
+ ******************************************************************************/
+void tegra_pmc_cpu_on(int32_t cpu)
+{
+ uint32_t val;
+
+ /*
+ * Check if CPU is already power ungated
+ */
+ val = tegra_pmc_read_32(PMC_PWRGATE_STATUS);
+ if ((val & (1U << pmc_cpu_powergate_id[cpu])) == 0U) {
+ /*
+ * The PMC deasserts the START bit when it starts the power
+ * ungate process. Loop till no power toggle is in progress.
+ */
+ do {
+ val = tegra_pmc_read_32(PMC_PWRGATE_TOGGLE);
+ } while ((val & PMC_TOGGLE_START) != 0U);
+
+ /*
+ * Start the power ungate procedure
+ */
+ val = pmc_cpu_powergate_id[cpu] | PMC_TOGGLE_START;
+ tegra_pmc_write_32(PMC_PWRGATE_TOGGLE, val);
+
+ /*
+ * The PMC deasserts the START bit when it starts the power
+ * ungate process. Loop till powergate START bit is asserted.
+ */
+ do {
+ val = tegra_pmc_read_32(PMC_PWRGATE_TOGGLE);
+ } while ((val & (1U << 8)) != 0U);
+
+ /* loop till the CPU is power ungated */
+ do {
+ val = tegra_pmc_read_32(PMC_PWRGATE_STATUS);
+ } while ((val & (1U << pmc_cpu_powergate_id[cpu])) == 0U);
+ }
+}
+
+/*******************************************************************************
+ * Setup CPU vectors for resume from deep sleep
+ ******************************************************************************/
+void tegra_pmc_cpu_setup(uint64_t reset_addr)
+{
+ uint32_t val;
+
+ tegra_pmc_write_32(PMC_SECURE_SCRATCH34,
+ ((uint32_t)reset_addr & 0xFFFFFFFFU) | 1U);
+ val = (uint32_t)(reset_addr >> 32U);
+ tegra_pmc_write_32(PMC_SECURE_SCRATCH35, val & 0x7FFU);
+}
+
+/*******************************************************************************
+ * Lock CPU vectors to restrict further writes
+ ******************************************************************************/
+void tegra_pmc_lock_cpu_vectors(void)
+{
+ uint32_t val;
+
+ /* lock PMC_SECURE_SCRATCH22 */
+ val = tegra_pmc_read_32(PMC_SECURE_DISABLE2);
+ val |= PMC_SECURE_DISABLE2_WRITE22_ON;
+ tegra_pmc_write_32(PMC_SECURE_DISABLE2, val);
+
+ /* lock PMC_SECURE_SCRATCH34/35 */
+ val = tegra_pmc_read_32(PMC_SECURE_DISABLE3);
+ val |= (PMC_SECURE_DISABLE3_WRITE34_ON |
+ PMC_SECURE_DISABLE3_WRITE35_ON);
+ tegra_pmc_write_32(PMC_SECURE_DISABLE3, val);
+}
+
+/*******************************************************************************
+ * Find out if this is the last standing CPU
+ ******************************************************************************/
+bool tegra_pmc_is_last_on_cpu(void)
+{
+ int i, cpu = read_mpidr() & MPIDR_CPU_MASK;
+ uint32_t val = tegra_pmc_read_32(PMC_PWRGATE_STATUS);
+ bool status = true;
+
+ /* check if this is the last standing CPU */
+ for (i = 0; i < PLATFORM_MAX_CPUS_PER_CLUSTER; i++) {
+
+ /* skip the current CPU */
+ if (i == cpu)
+ continue;
+
+ /* are other CPUs already power gated? */
+ if ((val & ((uint32_t)1 << pmc_cpu_powergate_id[i])) != 0U) {
+ status = false;
+ }
+ }
+
+ return status;
+}
+
+/*******************************************************************************
+ * Handler to be called on exiting System suspend. Right now only DPD registers
+ * are cleared.
+ ******************************************************************************/
+void tegra_pmc_resume(void)
+{
+
+ /* Clear DPD sample */
+ mmio_write_32((TEGRA_PMC_BASE + PMC_IO_DPD_SAMPLE), 0x0);
+
+ /* Clear DPD Enable */
+ mmio_write_32((TEGRA_PMC_BASE + PMC_DPD_ENABLE_0), 0x0);
+}
+
+/*******************************************************************************
+ * Restart the system
+ ******************************************************************************/
+__dead2 void tegra_pmc_system_reset(void)
+{
+ uint32_t reg;
+
+ reg = tegra_pmc_read_32(PMC_CONFIG);
+ reg |= RESET_ENABLE; /* restart */
+ tegra_pmc_write_32(PMC_CONFIG, reg);
+ wfi();
+
+ ERROR("Tegra System Reset: operation not handled.\n");
+ panic();
+}
diff --git a/plat/nvidia/tegra/drivers/smmu/smmu.c b/plat/nvidia/tegra/drivers/smmu/smmu.c
new file mode 100644
index 0000000..4189b00
--- /dev/null
+++ b/plat/nvidia/tegra/drivers/smmu/smmu.c
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <string.h>
+
+#include <platform_def.h>
+
+#include <common/bl_common.h>
+#include <common/debug.h>
+
+#include <smmu.h>
+#include <tegra_platform.h>
+#include <tegra_private.h>
+
+extern void memcpy16(void *dest, const void *src, unsigned int length);
+
+#define SMMU_NUM_CONTEXTS 64U
+#define SMMU_CONTEXT_BANK_MAX_IDX 64U
+
+#define MISMATCH_DETECTED 0x55AA55AAU
+
+/*
+ * Init SMMU during boot or "System Suspend" exit
+ */
+void tegra_smmu_init(void)
+{
+ uint32_t val, cb_idx, smmu_id, ctx_base;
+ uint32_t num_smmu_devices = plat_get_num_smmu_devices();
+
+ for (smmu_id = 0U; smmu_id < num_smmu_devices; smmu_id++) {
+ /* Program the SMMU pagesize and reset CACHE_LOCK bit */
+ val = tegra_smmu_read_32(smmu_id, SMMU_GSR0_SECURE_ACR);
+ val |= SMMU_GSR0_PGSIZE_64K;
+ val &= (uint32_t)~SMMU_ACR_CACHE_LOCK_ENABLE_BIT;
+ tegra_smmu_write_32(smmu_id, SMMU_GSR0_SECURE_ACR, val);
+
+ /* reset CACHE LOCK bit for NS Aux. Config. Register */
+ val = tegra_smmu_read_32(smmu_id, SMMU_GNSR_ACR);
+ val &= (uint32_t)~SMMU_ACR_CACHE_LOCK_ENABLE_BIT;
+ tegra_smmu_write_32(smmu_id, SMMU_GNSR_ACR, val);
+
+ /* disable TCU prefetch for all contexts */
+ ctx_base = (SMMU_GSR0_PGSIZE_64K * SMMU_NUM_CONTEXTS)
+ + SMMU_CBn_ACTLR;
+ for (cb_idx = 0U; cb_idx < SMMU_CONTEXT_BANK_MAX_IDX; cb_idx++) {
+ val = tegra_smmu_read_32(smmu_id,
+ ctx_base + (SMMU_GSR0_PGSIZE_64K * cb_idx));
+ val &= (uint32_t)~SMMU_CBn_ACTLR_CPRE_BIT;
+ tegra_smmu_write_32(smmu_id, ctx_base +
+ (SMMU_GSR0_PGSIZE_64K * cb_idx), val);
+ }
+
+ /* set CACHE LOCK bit for NS Aux. Config. Register */
+ val = tegra_smmu_read_32(smmu_id, SMMU_GNSR_ACR);
+ val |= (uint32_t)SMMU_ACR_CACHE_LOCK_ENABLE_BIT;
+ tegra_smmu_write_32(smmu_id, SMMU_GNSR_ACR, val);
+
+ /* set CACHE LOCK bit for S Aux. Config. Register */
+ val = tegra_smmu_read_32(smmu_id, SMMU_GSR0_SECURE_ACR);
+ val |= (uint32_t)SMMU_ACR_CACHE_LOCK_ENABLE_BIT;
+ tegra_smmu_write_32(smmu_id, SMMU_GSR0_SECURE_ACR, val);
+ }
+}
+
+/*
+ * Verify SMMU settings have not been altered during boot
+ */
+void tegra_smmu_verify(void)
+{
+ uint32_t cb_idx, ctx_base, smmu_id, val;
+ uint32_t num_smmu_devices = plat_get_num_smmu_devices();
+ uint32_t mismatch = 0U;
+
+ for (smmu_id = 0U; smmu_id < num_smmu_devices; smmu_id++) {
+ /* check PGSIZE_64K bit inr S Aux. Config. Register */
+ val = tegra_smmu_read_32(smmu_id, SMMU_GSR0_SECURE_ACR);
+ if (0U == (val & SMMU_GSR0_PGSIZE_64K)) {
+ ERROR("%s: PGSIZE_64K Mismatch - smmu_id=%d, GSR0_SECURE_ACR=%x\n",
+ __func__, smmu_id, val);
+ mismatch = MISMATCH_DETECTED;
+ }
+
+ /* check CACHE LOCK bit in S Aux. Config. Register */
+ if (0U == (val & SMMU_ACR_CACHE_LOCK_ENABLE_BIT)) {
+ ERROR("%s: CACHE_LOCK Mismatch - smmu_id=%d, GSR0_SECURE_ACR=%x\n",
+ __func__, smmu_id, val);
+ mismatch = MISMATCH_DETECTED;
+ }
+
+ /* check CACHE LOCK bit in NS Aux. Config. Register */
+ val = tegra_smmu_read_32(smmu_id, SMMU_GNSR_ACR);
+ if (0U == (val & SMMU_ACR_CACHE_LOCK_ENABLE_BIT)) {
+ ERROR("%s: Mismatch - smmu_id=%d, GNSR_ACR=%x\n",
+ __func__, smmu_id, val);
+ mismatch = MISMATCH_DETECTED;
+ }
+
+ /* verify TCU prefetch for all contexts is disabled */
+ ctx_base = (SMMU_GSR0_PGSIZE_64K * SMMU_NUM_CONTEXTS) +
+ SMMU_CBn_ACTLR;
+ for (cb_idx = 0U; cb_idx < SMMU_CONTEXT_BANK_MAX_IDX; cb_idx++) {
+ val = tegra_smmu_read_32(smmu_id,
+ ctx_base + (SMMU_GSR0_PGSIZE_64K * cb_idx));
+ if (0U != (val & SMMU_CBn_ACTLR_CPRE_BIT)) {
+ ERROR("%s: Mismatch - smmu_id=%d, cb_idx=%d, GSR0_PGSIZE_64K=%x\n",
+ __func__, smmu_id, cb_idx, val);
+ mismatch = MISMATCH_DETECTED;
+ }
+ }
+ }
+
+ /* Treat configuration mismatch as fatal */
+ if ((mismatch == MISMATCH_DETECTED) && tegra_platform_is_silicon()) {
+ panic();
+ }
+}
diff --git a/plat/nvidia/tegra/drivers/spe/shared_console.S b/plat/nvidia/tegra/drivers/spe/shared_console.S
new file mode 100644
index 0000000..5ad4eb8
--- /dev/null
+++ b/plat/nvidia/tegra/drivers/spe/shared_console.S
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <console_macros.S>
+
+#define CONSOLE_NUM_BYTES_SHIFT 24
+#define CONSOLE_FLUSH_DATA_TO_PORT (1 << 26)
+#define CONSOLE_RING_DOORBELL (1 << 31)
+#define CONSOLE_IS_BUSY (1 << 31)
+#define CONSOLE_TIMEOUT 0xC000 /* 50 ms */
+
+ /*
+ * This file contains a driver implementation to make use of the
+ * real console implementation provided by the SPE firmware running
+ * SoCs after Tegra186.
+ *
+ * This console is shared by multiple components and the SPE firmware
+ * finally displays everything on the UART port.
+ */
+
+ .globl console_spe_core_init
+ .globl console_spe_core_putc
+ .globl console_spe_core_getc
+ .globl console_spe_core_flush
+ .globl console_spe_putc
+ .globl console_spe_getc
+ .globl console_spe_flush
+ .globl console_spe_register
+
+.macro check_if_console_is_ready base, tmp1, tmp2, label
+ /* wait until spe is ready or timeout expires */
+ mrs \tmp2, cntps_tval_el1
+1: ldr \tmp1, [\base]
+ and \tmp1, \tmp1, #CONSOLE_IS_BUSY
+ cbz \tmp1, 2f
+ mrs \tmp1, cntps_tval_el1
+ sub \tmp1, \tmp2, \tmp1
+ cmp \tmp1, #CONSOLE_TIMEOUT
+ b.lt 1b
+ b \label
+2:
+.endm
+
+ /* -------------------------------------------------
+ * int console_spe_register(uintptr_t baseaddr,
+ * uint32_t clock, uint32_t baud,
+ * console_t *console);
+ * Function to initialize and register a new spe
+ * console. Storage passed in for the console struct
+ * *must* be persistent (i.e. not from the stack).
+ * In: x0 - UART register base address
+ * w1 - UART clock in Hz
+ * w2 - Baud rate
+ * x3 - pointer to empty console_t struct
+ * Out: return 1 on success, 0 on error
+ * Clobber list : x0, x1, x2, x6, x7, x14
+ * -------------------------------------------------
+ */
+func console_spe_register
+ /* Check the input base address */
+ cbz x0, register_fail
+
+ /* Dont use clock or baud rate, so ok to overwrite them */
+ check_if_console_is_ready x0, x1, x2, register_fail
+
+ cbz x3, register_fail
+ str x0, [x3, #CONSOLE_T_BASE]
+ mov x0, x3
+ finish_console_register spe putc=1, getc=ENABLE_CONSOLE_GETC, flush=1
+
+register_fail:
+ mov w0, wzr
+ ret
+endfunc console_spe_register
+
+ /* --------------------------------------------------------
+ * int console_spe_core_putc(int c, uintptr_t base_addr)
+ * Function to output a character over the console. It
+ * returns the character printed on success or -1 on error.
+ * In : w0 - character to be printed
+ * x1 - console base address
+ * Out : return -1 on error else return character.
+ * Clobber list : x2, x3
+ * --------------------------------------------------------
+ */
+func console_spe_core_putc
+ /* Check the input parameter */
+ cbz x1, putc_error
+
+ /* Prepend '\r' to '\n' */
+ cmp w0, #0xA
+ b.ne not_eol
+
+ check_if_console_is_ready x1, x2, x3, putc_error
+
+ /* spe is ready */
+ mov w2, #0xD /* '\r' */
+ and w2, w2, #0xFF
+ mov w3, #(CONSOLE_RING_DOORBELL | (1 << CONSOLE_NUM_BYTES_SHIFT))
+ orr w2, w2, w3
+ str w2, [x1]
+
+not_eol:
+ check_if_console_is_ready x1, x2, x3, putc_error
+
+ /* spe is ready */
+ mov w2, w0
+ and w2, w2, #0xFF
+ mov w3, #(CONSOLE_RING_DOORBELL | (1 << CONSOLE_NUM_BYTES_SHIFT))
+ orr w2, w2, w3
+ str w2, [x1]
+
+ ret
+putc_error:
+ mov w0, #-1
+ ret
+endfunc console_spe_core_putc
+
+ /* --------------------------------------------------------
+ * int console_spe_putc(int c, console_t *console)
+ * Function to output a character over the console. It
+ * returns the character printed on success or -1 on error.
+ * In : w0 - character to be printed
+ * x1 - pointer to console_t structure
+ * Out : return -1 on error else return character.
+ * Clobber list : x2
+ * --------------------------------------------------------
+ */
+func console_spe_putc
+ ldr x1, [x1, #CONSOLE_T_BASE]
+ b console_spe_core_putc
+endfunc console_spe_putc
+
+ /* ---------------------------------------------
+ * int console_spe_getc(console_t *console)
+ * Function to get a character from the console.
+ * It returns the character grabbed on success
+ * or -1 if no character is available.
+ * In : x0 - pointer to console_t structure
+ * Out: w0 - character if available, else -1
+ * Clobber list : x0, x1
+ * ---------------------------------------------
+ */
+func console_spe_getc
+ mov w0, #-1
+ ret
+endfunc console_spe_getc
+
+ /* -------------------------------------------------
+ * void console_spe_core_flush(uintptr_t base_addr)
+ * Function to force a write of all buffered
+ * data that hasn't been output.
+ * In : x0 - console base address
+ * Out : void.
+ * Clobber list : x0, x1
+ * -------------------------------------------------
+ */
+func console_spe_core_flush
+#if ENABLE_ASSERTIONS
+ cmp x0, #0
+ ASM_ASSERT(ne)
+#endif /* ENABLE_ASSERTIONS */
+
+ /* flush console */
+ mov w1, #(CONSOLE_RING_DOORBELL | CONSOLE_FLUSH_DATA_TO_PORT)
+ str w1, [x0]
+ ret
+endfunc console_spe_core_flush
+
+ /* ---------------------------------------------
+ * void console_spe_flush(console_t *console)
+ * Function to force a write of all buffered
+ * data that hasn't been output.
+ * In : x0 - pointer to console_t structure
+ * Out : void.
+ * Clobber list : x0, x1
+ * ---------------------------------------------
+ */
+func console_spe_flush
+ ldr x0, [x0, #CONSOLE_T_BASE]
+ b console_spe_core_flush
+endfunc console_spe_flush
diff --git a/plat/nvidia/tegra/include/drivers/bpmp.h b/plat/nvidia/tegra/include/drivers/bpmp.h
new file mode 100644
index 0000000..dc3fb6b
--- /dev/null
+++ b/plat/nvidia/tegra/include/drivers/bpmp.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef BPMP_H
+#define BPMP_H
+
+#include <stdint.h>
+
+/* macro to enable clock to the Atomics block */
+#define CAR_ENABLE_ATOMICS (1U << 16)
+
+/* command to get the channel base addresses from bpmp */
+#define ATOMIC_CMD_GET 4U
+
+/* Hardware IRQ # used to signal bpmp of an incoming command */
+#define INT_SHR_SEM_OUTBOX_FULL 6U
+
+/* macros to decode the bpmp's state */
+#define CH_MASK(ch) ((uint32_t)0x3 << ((ch) * 2U))
+#define MA_FREE(ch) ((uint32_t)0x2 << ((ch) * 2U))
+#define MA_ACKD(ch) ((uint32_t)0x3 << ((ch) * 2U))
+
+/* response from bpmp to indicate it has powered up */
+#define SIGN_OF_LIFE 0xAAAAAAAAU
+
+/* flags to indicate bpmp driver's state */
+#define BPMP_NOT_PRESENT 0xF00DBEEFU
+#define BPMP_INIT_COMPLETE 0xBEEFF00DU
+#define BPMP_INIT_PENDING 0xDEADBEEFU
+#define BPMP_SUSPEND_ENTRY 0xF00DCAFEU
+
+/* requests serviced by the bpmp */
+#define MRQ_PING 0
+#define MRQ_QUERY_TAG 1
+#define MRQ_DO_IDLE 2
+#define MRQ_TOLERATE_IDLE 3
+#define MRQ_MODULE_LOAD 4
+#define MRQ_MODULE_UNLOAD 5
+#define MRQ_SWITCH_CLUSTER 6
+#define MRQ_TRACE_MODIFY 7
+#define MRQ_WRITE_TRACE 8
+#define MRQ_THREADED_PING 9
+#define MRQ_CPUIDLE_USAGE 10
+#define MRQ_MODULE_MAIL 11
+#define MRQ_SCX_ENABLE 12
+#define MRQ_BPMPIDLE_USAGE 14
+#define MRQ_HEAP_USAGE 15
+#define MRQ_SCLK_SKIP_SET_RATE 16
+#define MRQ_ENABLE_SUSPEND 17
+#define MRQ_PASR_MASK 18
+#define MRQ_DEBUGFS 19
+#define MRQ_THERMAL 27
+
+/* Tegra PM states as known to BPMP */
+#define TEGRA_PM_CC1 9
+#define TEGRA_PM_CC4 12
+#define TEGRA_PM_CC6 14
+#define TEGRA_PM_CC7 15
+#define TEGRA_PM_SC1 17
+#define TEGRA_PM_SC2 18
+#define TEGRA_PM_SC3 19
+#define TEGRA_PM_SC4 20
+#define TEGRA_PM_SC7 23
+
+/* flag to indicate if entry into a CCx power state is allowed */
+#define BPMP_CCx_ALLOWED 0U
+
+/* number of communication channels to interact with the bpmp */
+#define NR_CHANNELS 4U
+
+/* flag to ask bpmp to acknowledge command packet */
+#define NO_ACK (0U << 0U)
+#define DO_ACK (1U << 0U)
+
+/* size of the command/response data */
+#define MSG_DATA_MAX_SZ 120U
+
+/**
+ * command/response packet to/from the bpmp
+ *
+ * command
+ * -------
+ * code: MRQ_* command
+ * flags: DO_ACK or NO_ACK
+ * data:
+ * [0] = cpu #
+ * [1] = cluster power state (TEGRA_PM_CCx)
+ * [2] = system power state (TEGRA_PM_SCx)
+ *
+ * response
+ * ---------
+ * code: error code
+ * flags: not used
+ * data:
+ * [0-3] = response value
+ */
+typedef struct mb_data {
+ int32_t code;
+ uint32_t flags;
+ uint8_t data[MSG_DATA_MAX_SZ];
+} mb_data_t;
+
+/**
+ * Function to initialise the interface with the bpmp
+ */
+int tegra_bpmp_init(void);
+
+/**
+ * Function to suspend the interface with the bpmp
+ */
+void tegra_bpmp_suspend(void);
+
+/**
+ * Function to resume the interface with the bpmp
+ */
+void tegra_bpmp_resume(void);
+
+/**
+ * Handler to send a MRQ_* command to the bpmp
+ */
+int32_t tegra_bpmp_send_receive_atomic(int mrq, const void *ob_data, int ob_sz,
+ void *ib_data, int ib_sz);
+
+#endif /* BPMP_H */
diff --git a/plat/nvidia/tegra/include/drivers/bpmp_ipc.h b/plat/nvidia/tegra/include/drivers/bpmp_ipc.h
new file mode 100644
index 0000000..401a07a
--- /dev/null
+++ b/plat/nvidia/tegra/include/drivers/bpmp_ipc.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef BPMP_IPC_H
+#define BPMP_IPC_H
+
+#include <lib/utils_def.h>
+#include <stdbool.h>
+#include <stdint.h>
+
+/**
+ * Currently supported reset identifiers
+ */
+#define TEGRA_RESET_ID_XUSB_PADCTL U(114)
+#define TEGRA_RESET_ID_GPCDMA U(70)
+
+/**
+ * Function to initialise the IPC with the bpmp
+ */
+int32_t tegra_bpmp_ipc_init(void);
+
+/**
+ * Handler to reset a module
+ */
+int32_t tegra_bpmp_ipc_reset_module(uint32_t rst_id);
+
+/**
+ * Handler to enable clock to a module. Only SE device is
+ * supported for now.
+ */
+int tegra_bpmp_ipc_enable_clock(uint32_t clk_id);
+
+/**
+ * Handler to disable clock to a module. Only SE device is
+ * supported for now.
+ */
+int tegra_bpmp_ipc_disable_clock(uint32_t clk_id);
+
+#endif /* BPMP_IPC_H */
diff --git a/plat/nvidia/tegra/include/drivers/flowctrl.h b/plat/nvidia/tegra/include/drivers/flowctrl.h
new file mode 100644
index 0000000..e5ab600
--- /dev/null
+++ b/plat/nvidia/tegra/include/drivers/flowctrl.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef FLOWCTRL_H
+#define FLOWCTRL_H
+
+#include <lib/mmio.h>
+
+#include <stdbool.h>
+
+#include <tegra_def.h>
+
+#define FLOWCTRL_HALT_CPU0_EVENTS (0x0U)
+#define FLOWCTRL_WAITEVENT (2U << 29)
+#define FLOWCTRL_WAIT_FOR_INTERRUPT (4U << 29)
+#define FLOWCTRL_JTAG_RESUME (1U << 28)
+#define FLOWCTRL_HALT_SCLK (1U << 27)
+#define FLOWCTRL_HALT_LIC_IRQ (1U << 11)
+#define FLOWCTRL_HALT_LIC_FIQ (1U << 10)
+#define FLOWCTRL_HALT_GIC_IRQ (1U << 9)
+#define FLOWCTRL_HALT_GIC_FIQ (1U << 8)
+#define FLOWCTRL_HALT_BPMP_EVENTS (0x4U)
+#define FLOWCTRL_CPU0_CSR (0x8U)
+#define FLOWCTRL_CSR_HALT_MASK (1U << 22)
+#define FLOWCTRL_CSR_PWR_OFF_STS (1U << 16)
+#define FLOWCTRL_CSR_INTR_FLAG (1U << 15)
+#define FLOWCTRL_CSR_EVENT_FLAG (1U << 14)
+#define FLOWCTRL_CSR_IMMEDIATE_WAKE (1U << 3)
+#define FLOWCTRL_CSR_ENABLE (1U << 0)
+#define FLOWCTRL_HALT_CPU1_EVENTS (0x14U)
+#define FLOWCTRL_CPU1_CSR (0x18U)
+#define FLOW_CTLR_FLOW_DBG_QUAL (0x50U)
+#define FLOWCTRL_FIQ2CCPLEX_ENABLE (1U << 28)
+#define FLOWCTRL_FC_SEQ_INTERCEPT (0x5cU)
+#define INTERCEPT_IRQ_PENDING (0xffU)
+#define INTERCEPT_HVC (U(1) << 21)
+#define INTERCEPT_ENTRY_CC4 (U(1) << 20)
+#define INTERCEPT_ENTRY_PG_NONCPU (U(1) << 19)
+#define INTERCEPT_EXIT_PG_NONCPU (U(1) << 18)
+#define INTERCEPT_ENTRY_RG_CPU (U(1) << 17)
+#define INTERCEPT_EXIT_RG_CPU (U(1) << 16)
+#define INTERCEPT_ENTRY_PG_CORE0 (U(1) << 15)
+#define INTERCEPT_EXIT_PG_CORE0 (U(1) << 14)
+#define INTERCEPT_ENTRY_PG_CORE1 (U(1) << 13)
+#define INTERCEPT_EXIT_PG_CORE1 (U(1) << 12)
+#define INTERCEPT_ENTRY_PG_CORE2 (U(1) << 11)
+#define INTERCEPT_EXIT_PG_CORE2 (U(1) << 10)
+#define INTERCEPT_ENTRY_PG_CORE3 (U(1) << 9)
+#define INTERCEPT_EXIT_PG_CORE3 (U(1) << 8)
+#define INTERRUPT_PENDING_NONCPU (U(1) << 7)
+#define INTERRUPT_PENDING_CRAIL (U(1) << 6)
+#define INTERRUPT_PENDING_CORE0 (U(1) << 5)
+#define INTERRUPT_PENDING_CORE1 (U(1) << 4)
+#define INTERRUPT_PENDING_CORE2 (U(1) << 3)
+#define INTERRUPT_PENDING_CORE3 (U(1) << 2)
+#define CC4_INTERRUPT_PENDING (U(1) << 1)
+#define HVC_INTERRUPT_PENDING (U(1) << 0)
+#define FLOWCTRL_CC4_CORE0_CTRL (0x6cU)
+#define FLOWCTRL_WAIT_WFI_BITMAP (0x100U)
+#define FLOWCTRL_L2_FLUSH_CONTROL (0x94U)
+#define FLOWCTRL_BPMP_CLUSTER_CONTROL (0x98U)
+#define FLOWCTRL_BPMP_CLUSTER_PWRON_LOCK (1U << 2)
+
+#define FLOWCTRL_ENABLE_EXT 12U
+#define FLOWCTRL_ENABLE_EXT_MASK 3U
+#define FLOWCTRL_PG_CPU_NONCPU 0x1U
+#define FLOWCTRL_TURNOFF_CPURAIL 0x2U
+
+static inline uint32_t tegra_fc_read_32(uint32_t off)
+{
+ return mmio_read_32(TEGRA_FLOWCTRL_BASE + off);
+}
+
+static inline void tegra_fc_write_32(uint32_t off, uint32_t val)
+{
+ mmio_write_32(TEGRA_FLOWCTRL_BASE + off, val);
+}
+
+void tegra_fc_bpmp_on(uint32_t entrypoint);
+void tegra_fc_bpmp_off(void);
+void tegra_fc_ccplex_pgexit_lock(void);
+void tegra_fc_ccplex_pgexit_unlock(void);
+void tegra_fc_cluster_idle(uint32_t midr);
+void tegra_fc_cpu_powerdn(uint32_t mpidr);
+void tegra_fc_cluster_powerdn(uint32_t midr);
+void tegra_fc_cpu_on(int cpu);
+void tegra_fc_cpu_off(int cpu);
+void tegra_fc_disable_fiq_to_ccplex_routing(void);
+void tegra_fc_enable_fiq_to_ccplex_routing(void);
+bool tegra_fc_is_ccx_allowed(void);
+void tegra_fc_lock_active_cluster(void);
+void tegra_fc_soc_powerdn(uint32_t midr);
+
+#endif /* FLOWCTRL_H */
diff --git a/plat/nvidia/tegra/include/drivers/gpcdma.h b/plat/nvidia/tegra/include/drivers/gpcdma.h
new file mode 100644
index 0000000..a59df37
--- /dev/null
+++ b/plat/nvidia/tegra/include/drivers/gpcdma.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef GPCDMA_H
+#define GPCDMA_H
+
+#include <stdint.h>
+
+void tegra_gpcdma_memcpy(uint64_t dst_addr, uint64_t src_addr,
+ uint32_t num_bytes);
+void tegra_gpcdma_zeromem(uint64_t dst_addr, uint32_t num_bytes);
+
+#endif /* GPCDMA_H */
diff --git a/plat/nvidia/tegra/include/drivers/mce.h b/plat/nvidia/tegra/include/drivers/mce.h
new file mode 100644
index 0000000..5f1bb4f
--- /dev/null
+++ b/plat/nvidia/tegra/include/drivers/mce.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef MCE_H
+#define MCE_H
+
+#include <lib/mmio.h>
+
+#include <tegra_def.h>
+
+/*******************************************************************************
+ * MCE commands
+ ******************************************************************************/
+typedef enum mce_cmd {
+ MCE_CMD_ENTER_CSTATE = 0U,
+ MCE_CMD_UPDATE_CSTATE_INFO = 1U,
+ MCE_CMD_UPDATE_CROSSOVER_TIME = 2U,
+ MCE_CMD_READ_CSTATE_STATS = 3U,
+ MCE_CMD_WRITE_CSTATE_STATS = 4U,
+ MCE_CMD_IS_SC7_ALLOWED = 5U,
+ MCE_CMD_ONLINE_CORE = 6U,
+ MCE_CMD_CC3_CTRL = 7U,
+ MCE_CMD_ECHO_DATA = 8U,
+ MCE_CMD_READ_VERSIONS = 9U,
+ MCE_CMD_ENUM_FEATURES = 10U,
+ MCE_CMD_ROC_FLUSH_CACHE_TRBITS = 11U,
+ MCE_CMD_ENUM_READ_MCA = 12U,
+ MCE_CMD_ENUM_WRITE_MCA = 13U,
+ MCE_CMD_ROC_FLUSH_CACHE = 14U,
+ MCE_CMD_ROC_CLEAN_CACHE = 15U,
+ MCE_CMD_ENABLE_LATIC = 16U,
+ MCE_CMD_UNCORE_PERFMON_REQ = 17U,
+ MCE_CMD_MISC_CCPLEX = 18U,
+ MCE_CMD_IS_CCX_ALLOWED = 0xFEU,
+ MCE_CMD_MAX = 0xFFU,
+} mce_cmd_t;
+
+#define MCE_CMD_MASK 0xFFU
+
+/*******************************************************************************
+ * Timeout value used to powerdown a core
+ ******************************************************************************/
+#define MCE_CORE_SLEEP_TIME_INFINITE 0xFFFFFFFFU
+
+/*******************************************************************************
+ * Struct to prepare UPDATE_CSTATE_INFO request
+ ******************************************************************************/
+typedef struct mce_cstate_info {
+ /* cluster cstate value */
+ uint32_t cluster;
+ /* ccplex cstate value */
+ uint32_t ccplex;
+ /* system cstate value */
+ uint32_t system;
+ /* force system state? */
+ uint8_t system_state_force;
+ /* wake mask value */
+ uint32_t wake_mask;
+ /* update the wake mask? */
+ uint8_t update_wake_mask;
+} mce_cstate_info_t;
+
+/* public interfaces */
+int mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1,
+ uint64_t arg2);
+int mce_update_reset_vector(void);
+int mce_update_gsc_videomem(void);
+int mce_update_gsc_tzdram(void);
+__dead2 void mce_enter_ccplex_state(uint32_t state_idx);
+void mce_update_cstate_info(const mce_cstate_info_t *cstate);
+void mce_verify_firmware_version(void);
+
+#endif /* MCE_H */
diff --git a/plat/nvidia/tegra/include/drivers/memctrl.h b/plat/nvidia/tegra/include/drivers/memctrl.h
new file mode 100644
index 0000000..cc85095
--- /dev/null
+++ b/plat/nvidia/tegra/include/drivers/memctrl.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef MEMCTRL_H
+#define MEMCTRL_H
+
+void tegra_memctrl_setup(void);
+void tegra_memctrl_restore_settings(void);
+void tegra_memctrl_tzdram_setup(uint64_t phys_base, uint32_t size_in_bytes);
+void tegra_memctrl_videomem_setup(uint64_t phys_base, uint32_t size_in_bytes);
+void tegra_memctrl_disable_ahb_redirection(void);
+void tegra_memctrl_clear_pending_interrupts(void);
+
+#endif /* MEMCTRL_H */
diff --git a/plat/nvidia/tegra/include/drivers/memctrl_v1.h b/plat/nvidia/tegra/include/drivers/memctrl_v1.h
new file mode 100644
index 0000000..8e9f198
--- /dev/null
+++ b/plat/nvidia/tegra/include/drivers/memctrl_v1.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef MEMCTRL_V1_H
+#define MEMCTRL_V1_H
+
+#include <lib/mmio.h>
+
+#include <tegra_def.h>
+
+/* SMMU registers */
+#define MC_SMMU_CONFIG_0 0x10U
+#define MC_SMMU_CONFIG_0_SMMU_ENABLE_DISABLE 0U
+#define MC_SMMU_CONFIG_0_SMMU_ENABLE_ENABLE 1U
+#define MC_SMMU_TLB_CONFIG_0 0x14U
+#define MC_SMMU_TLB_CONFIG_0_RESET_VAL 0x20000010U
+#define MC_SMMU_PTC_CONFIG_0 0x18U
+#define MC_SMMU_PTC_CONFIG_0_RESET_VAL 0x2000003fU
+#define MC_SMMU_TLB_FLUSH_0 0x30U
+#define TLB_FLUSH_VA_MATCH_ALL 0U
+#define TLB_FLUSH_ASID_MATCH_DISABLE 0U
+#define TLB_FLUSH_ASID_MATCH_SHIFT 31U
+#define MC_SMMU_TLB_FLUSH_ALL \
+ (TLB_FLUSH_VA_MATCH_ALL | \
+ (TLB_FLUSH_ASID_MATCH_DISABLE << TLB_FLUSH_ASID_MATCH_SHIFT))
+#define MC_SMMU_PTC_FLUSH_0 0x34U
+#define MC_SMMU_PTC_FLUSH_ALL 0U
+#define MC_SMMU_ASID_SECURITY_0 0x38U
+#define MC_SMMU_ASID_SECURITY 0U
+#define MC_SMMU_TRANSLATION_ENABLE_0_0 0x228U
+#define MC_SMMU_TRANSLATION_ENABLE_1_0 0x22cU
+#define MC_SMMU_TRANSLATION_ENABLE_2_0 0x230U
+#define MC_SMMU_TRANSLATION_ENABLE_3_0 0x234U
+#define MC_SMMU_TRANSLATION_ENABLE_4_0 0xb98U
+#define MC_SMMU_TRANSLATION_ENABLE (~0)
+
+/* MC IRAM aperture registers */
+#define MC_IRAM_BASE_LO 0x65CU
+#define MC_IRAM_TOP_LO 0x660U
+#define MC_IRAM_BASE_TOP_HI 0x980U
+#define MC_IRAM_REG_CTRL 0x964U
+#define MC_DISABLE_IRAM_CFG_WRITES 1U
+
+static inline uint32_t tegra_mc_read_32(uint32_t off)
+{
+ return mmio_read_32(TEGRA_MC_BASE + off);
+}
+
+static inline void tegra_mc_write_32(uint32_t off, uint32_t val)
+{
+ mmio_write_32(TEGRA_MC_BASE + off, val);
+}
+
+#endif /* MEMCTRL_V1_H */
diff --git a/plat/nvidia/tegra/include/drivers/memctrl_v2.h b/plat/nvidia/tegra/include/drivers/memctrl_v2.h
new file mode 100644
index 0000000..9af3027
--- /dev/null
+++ b/plat/nvidia/tegra/include/drivers/memctrl_v2.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2015-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef MEMCTRL_V2_H
+#define MEMCTRL_V2_H
+
+#include <arch.h>
+
+#include <tegra_def.h>
+
+/*******************************************************************************
+ * Memory Controller SMMU Bypass config register
+ ******************************************************************************/
+#define MC_SMMU_BYPASS_CONFIG 0x1820U
+#define MC_SMMU_BYPASS_CTRL_MASK 0x3U
+#define MC_SMMU_BYPASS_CTRL_SHIFT 0U
+#define MC_SMMU_CTRL_TBU_BYPASS_ALL (0U << MC_SMMU_BYPASS_CTRL_SHIFT)
+#define MC_SMMU_CTRL_TBU_RSVD (1U << MC_SMMU_BYPASS_CTRL_SHIFT)
+#define MC_SMMU_CTRL_TBU_BYPASS_SPL_STREAMID (2U << MC_SMMU_BYPASS_CTRL_SHIFT)
+#define MC_SMMU_CTRL_TBU_BYPASS_NONE (3U << MC_SMMU_BYPASS_CTRL_SHIFT)
+#define MC_SMMU_BYPASS_CONFIG_WRITE_ACCESS_BIT (1U << 31)
+#define MC_SMMU_BYPASS_CONFIG_SETTINGS (MC_SMMU_BYPASS_CONFIG_WRITE_ACCESS_BIT | \
+ MC_SMMU_CTRL_TBU_BYPASS_SPL_STREAMID)
+
+#ifndef __ASSEMBLER__
+
+#include <assert.h>
+
+typedef struct mc_regs {
+ uint32_t reg;
+ uint32_t val;
+} mc_regs_t;
+
+#define mc_smmu_bypass_cfg \
+ { \
+ .reg = TEGRA_MC_BASE + MC_SMMU_BYPASS_CONFIG, \
+ .val = 0x00000000U, \
+ }
+
+#define _START_OF_TABLE_ \
+ { \
+ .reg = 0xCAFE05C7U, \
+ .val = 0x00000000U, \
+ }
+
+#define _END_OF_TABLE_ \
+ { \
+ .reg = 0xFFFFFFFFU, \
+ .val = 0xFFFFFFFFU, \
+ }
+
+#endif /* __ASSEMBLER__ */
+
+#ifndef __ASSEMBLER__
+
+#include <lib/mmio.h>
+
+static inline uint32_t tegra_mc_read_32(uint32_t off)
+{
+ return mmio_read_32(TEGRA_MC_BASE + off);
+}
+
+static inline void tegra_mc_write_32(uint32_t off, uint32_t val)
+{
+ mmio_write_32(TEGRA_MC_BASE + off, val);
+}
+
+#if defined(TEGRA_MC_STREAMID_BASE)
+static inline uint32_t tegra_mc_streamid_read_32(uint32_t off)
+{
+ return mmio_read_32(TEGRA_MC_STREAMID_BASE + off);
+}
+
+static inline void tegra_mc_streamid_write_32(uint32_t off, uint32_t val)
+{
+ mmio_write_32(TEGRA_MC_STREAMID_BASE + off, val);
+ assert(mmio_read_32(TEGRA_MC_STREAMID_BASE + off) == val);
+}
+#endif
+
+void plat_memctrl_setup(void);
+
+void plat_memctrl_restore(void);
+mc_regs_t *plat_memctrl_get_sys_suspend_ctx(void);
+
+/*******************************************************************************
+ * Handler to save MC settings before "System Suspend" to TZDRAM
+ *
+ * Implemented by Tegra common memctrl_v2 driver under common/drivers/memctrl
+ ******************************************************************************/
+void tegra_mc_save_context(uint64_t mc_ctx_addr);
+
+/*******************************************************************************
+ * Handler to program the scratch registers with TZDRAM settings for the
+ * resume firmware.
+ *
+ * Implemented by SoCs under tegra/soc/txxx
+ ******************************************************************************/
+void plat_memctrl_tzdram_setup(uint64_t phys_base, uint64_t size_in_bytes);
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* MEMCTRL_V2_H */
diff --git a/plat/nvidia/tegra/include/drivers/pmc.h b/plat/nvidia/tegra/include/drivers/pmc.h
new file mode 100644
index 0000000..8752b84
--- /dev/null
+++ b/plat/nvidia/tegra/include/drivers/pmc.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PMC_H
+#define PMC_H
+
+#include <lib/mmio.h>
+#include <lib/utils_def.h>
+#include <stdbool.h>
+
+#include <tegra_def.h>
+
+#define PMC_CONFIG U(0x0)
+#define PMC_IO_DPD_SAMPLE U(0x20)
+#define PMC_DPD_ENABLE_0 U(0x24)
+#define PMC_PWRGATE_STATUS U(0x38)
+#define PMC_PWRGATE_TOGGLE U(0x30)
+#define PMC_SCRATCH1 U(0x54)
+#define PMC_CRYPTO_OP_0 U(0xf4)
+#define PMC_TOGGLE_START U(0x100)
+#define PMC_SCRATCH31 U(0x118)
+#define PMC_SCRATCH32 U(0x11C)
+#define PMC_SCRATCH33 U(0x120)
+#define PMC_SCRATCH39 U(0x138)
+#define PMC_SCRATCH40 U(0x13C)
+#define PMC_SCRATCH41 U(0x140)
+#define PMC_SCRATCH42 U(0x144)
+#define PMC_SCRATCH43 U(0x22C)
+#define PMC_SCRATCH44 U(0x230)
+#define PMC_SCRATCH45 U(0x234)
+#define PMC_SCRATCH46 U(0x238)
+#define PMC_SCRATCH47 U(0x23C)
+#define PMC_SCRATCH48 U(0x240)
+#define PMC_SCRATCH50 U(0x248)
+#define PMC_SCRATCH51 U(0x24C)
+#define PMC_TSC_MULT_0 U(0x2B4)
+#define PMC_STICKY_BIT U(0x2C0)
+#define PMC_SECURE_DISABLE2 U(0x2C4)
+#define PMC_SECURE_DISABLE2_WRITE22_ON (U(1) << 28)
+#define PMC_FUSE_CONTROL_0 U(0x450)
+#define PMC_SECURE_DISABLE3 U(0x2D8)
+#define PMC_SECURE_DISABLE3_WRITE34_ON (U(1) << 20)
+#define PMC_SECURE_DISABLE3_WRITE35_ON (U(1) << 22)
+#define PMC_SECURE_SCRATCH22 U(0x338)
+#define PMC_SECURE_SCRATCH34 U(0x368)
+#define PMC_SECURE_SCRATCH35 U(0x36c)
+#define PMC_SCRATCH56 U(0x600)
+#define PMC_SCRATCH57 U(0x604)
+#define PMC_SCRATCH201 U(0x844)
+
+static inline uint32_t tegra_pmc_read_32(uint32_t off)
+{
+ return mmio_read_32(TEGRA_PMC_BASE + off);
+}
+
+static inline void tegra_pmc_write_32(uint32_t off, uint32_t val)
+{
+ mmio_write_32(TEGRA_PMC_BASE + off, val);
+}
+
+void tegra_pmc_cpu_on(int32_t cpu);
+void tegra_pmc_cpu_setup(uint64_t reset_addr);
+bool tegra_pmc_is_last_on_cpu(void);
+void tegra_pmc_lock_cpu_vectors(void);
+void tegra_pmc_resume(void);
+__dead2 void tegra_pmc_system_reset(void);
+
+#endif /* PMC_H */
diff --git a/plat/nvidia/tegra/include/drivers/security_engine.h b/plat/nvidia/tegra/include/drivers/security_engine.h
new file mode 100644
index 0000000..5ae6257
--- /dev/null
+++ b/plat/nvidia/tegra/include/drivers/security_engine.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SECURITY_ENGINE_H
+#define SECURITY_ENGINE_H
+
+/*******************************************************************************
+ * Structure definition
+ ******************************************************************************/
+
+/* Security Engine Linked List */
+struct tegra_se_ll {
+ /* DMA buffer address */
+ uint32_t addr;
+ /* Data length in DMA buffer */
+ uint32_t data_len;
+};
+
+#define SE_LL_MAX_BUFFER_NUM 4
+typedef struct tegra_se_io_lst {
+ volatile uint32_t last_buff_num;
+ volatile struct tegra_se_ll buffer[SE_LL_MAX_BUFFER_NUM];
+} tegra_se_io_lst_t __attribute__((aligned(4)));
+
+/* SE device structure */
+typedef struct tegra_se_dev {
+ /* Security Engine ID */
+ const int se_num;
+ /* SE base address */
+ const uint64_t se_base;
+ /* SE context size in AES blocks */
+ const uint32_t ctx_size_blks;
+ /* pointer to source linked list buffer */
+ tegra_se_io_lst_t *src_ll_buf;
+ /* pointer to destination linked list buffer */
+ tegra_se_io_lst_t *dst_ll_buf;
+ /* LP context buffer pointer */
+ uint32_t *ctx_save_buf;
+} tegra_se_dev_t;
+
+/* PKA1 device structure */
+typedef struct tegra_pka_dev {
+ /* PKA1 base address */
+ uint64_t pka_base;
+} tegra_pka_dev_t;
+
+/*******************************************************************************
+ * Public interface
+ ******************************************************************************/
+void tegra_se_init(void);
+int tegra_se_suspend(void);
+void tegra_se_resume(void);
+int tegra_se_save_tzram(void);
+int32_t tegra_se_save_sha256_hash(uint64_t bl31_base, uint32_t src_len_inbyte);
+
+#endif /* SECURITY_ENGINE_H */
diff --git a/plat/nvidia/tegra/include/drivers/smmu.h b/plat/nvidia/tegra/include/drivers/smmu.h
new file mode 100644
index 0000000..1de9af6
--- /dev/null
+++ b/plat/nvidia/tegra/include/drivers/smmu.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SMMU_H
+#define SMMU_H
+
+#include <lib/mmio.h>
+
+#include <memctrl_v2.h>
+#include <tegra_def.h>
+
+#define SMMU_CBn_ACTLR (0x4U)
+
+/*******************************************************************************
+ * SMMU Global Secure Aux. Configuration Register
+ ******************************************************************************/
+#define SMMU_GSR0_SECURE_ACR 0x10U
+#define SMMU_GNSR_ACR (SMMU_GSR0_SECURE_ACR + 0x400U)
+#define SMMU_GSR0_PGSIZE_SHIFT 16U
+#define SMMU_GSR0_PGSIZE_4K (0U << SMMU_GSR0_PGSIZE_SHIFT)
+#define SMMU_GSR0_PGSIZE_64K (1U << SMMU_GSR0_PGSIZE_SHIFT)
+#define SMMU_ACR_CACHE_LOCK_ENABLE_BIT (1ULL << 26U)
+#define SMMU_GSR0_PER (0x20200U)
+
+/*******************************************************************************
+ * SMMU Global Aux. Control Register
+ ******************************************************************************/
+#define SMMU_CBn_ACTLR_CPRE_BIT (1ULL << 1U)
+
+/* SMMU IDs currently supported by the driver */
+enum {
+ TEGRA_SMMU0 = 0U,
+ TEGRA_SMMU1 = 1U,
+ TEGRA_SMMU2 = 2U
+};
+
+static inline uint32_t tegra_smmu_read_32(uint32_t smmu_id, uint32_t off)
+{
+ uint32_t ret = 0U;
+
+#if defined(TEGRA_SMMU0_BASE)
+ if (smmu_id == TEGRA_SMMU0) {
+ ret = mmio_read_32(TEGRA_SMMU0_BASE + (uint64_t)off);
+ }
+#endif
+
+#if defined(TEGRA_SMMU1_BASE)
+ if (smmu_id == TEGRA_SMMU1) {
+ ret = mmio_read_32(TEGRA_SMMU1_BASE + (uint64_t)off);
+ }
+#endif
+
+#if defined(TEGRA_SMMU2_BASE)
+ if (smmu_id == TEGRA_SMMU2) {
+ ret = mmio_read_32(TEGRA_SMMU2_BASE + (uint64_t)off);
+ }
+#endif
+
+ return ret;
+}
+
+static inline void tegra_smmu_write_32(uint32_t smmu_id,
+ uint32_t off, uint32_t val)
+{
+#if defined(TEGRA_SMMU0_BASE)
+ if (smmu_id == TEGRA_SMMU0) {
+ mmio_write_32(TEGRA_SMMU0_BASE + (uint64_t)off, val);
+ }
+#endif
+
+#if defined(TEGRA_SMMU1_BASE)
+ if (smmu_id == TEGRA_SMMU1) {
+ mmio_write_32(TEGRA_SMMU1_BASE + (uint64_t)off, val);
+ }
+#endif
+
+#if defined(TEGRA_SMMU2_BASE)
+ if (smmu_id == TEGRA_SMMU2) {
+ mmio_write_32(TEGRA_SMMU2_BASE + (uint64_t)off, val);
+ }
+#endif
+}
+
+void tegra_smmu_init(void);
+void tegra_smmu_verify(void);
+uint32_t plat_get_num_smmu_devices(void);
+
+#endif /* SMMU_H */
diff --git a/plat/nvidia/tegra/include/drivers/spe.h b/plat/nvidia/tegra/include/drivers/spe.h
new file mode 100644
index 0000000..e0f8714
--- /dev/null
+++ b/plat/nvidia/tegra/include/drivers/spe.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SPE_H
+#define SPE_H
+
+#include <stdint.h>
+
+#include <drivers/console.h>
+
+/*
+ * Initialize a new spe console instance and register it with the console
+ * framework. The |console| pointer must point to storage that will be valid
+ * for the lifetime of the console, such as a global or static local variable.
+ * Its contents will be reinitialized from scratch.
+ */
+int console_spe_register(uintptr_t baseaddr, uint32_t clock, uint32_t baud,
+ console_t *console);
+
+#endif /* SPE_H */
diff --git a/plat/nvidia/tegra/include/drivers/tegra_gic.h b/plat/nvidia/tegra/include/drivers/tegra_gic.h
new file mode 100644
index 0000000..9f9477c
--- /dev/null
+++ b/plat/nvidia/tegra/include/drivers/tegra_gic.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef TEGRA_GIC_H
+#define TEGRA_GIC_H
+
+#include <common/interrupt_props.h>
+
+/*******************************************************************************
+ * Per-CPU struct describing FIQ state to be stored
+ ******************************************************************************/
+typedef struct pcpu_fiq_state {
+ uint64_t elr_el3;
+ uint64_t spsr_el3;
+} pcpu_fiq_state_t;
+
+/*******************************************************************************
+ * Function declarations
+ ******************************************************************************/
+void tegra_gic_cpuif_deactivate(void);
+void tegra_gic_init(void);
+void tegra_gic_pcpu_init(void);
+void tegra_gic_setup(const interrupt_prop_t *interrupt_props,
+ unsigned int interrupt_props_num);
+
+#endif /* TEGRA_GIC_H */
diff --git a/plat/nvidia/tegra/include/lib/profiler.h b/plat/nvidia/tegra/include/lib/profiler.h
new file mode 100644
index 0000000..a7d5e7b
--- /dev/null
+++ b/plat/nvidia/tegra/include/lib/profiler.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2017, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PROFILER_H
+#define PROFILER_H
+
+/*******************************************************************************
+ * Number of bytes of memory used by the profiler on Tegra
+ ******************************************************************************/
+#define PROFILER_SIZE_BYTES U(0x1000)
+
+void boot_profiler_init(uint64_t shmem_base, uint32_t tmr_base);
+void boot_profiler_add_record(const char *str);
+void boot_profiler_deinit(void);
+
+#endif /* PROFILER_H */
diff --git a/plat/nvidia/tegra/include/plat_macros.S b/plat/nvidia/tegra/include/plat_macros.S
new file mode 100644
index 0000000..2dc3b41
--- /dev/null
+++ b/plat/nvidia/tegra/include/plat_macros.S
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PLAT_MACROS_S
+#define PLAT_MACROS_S
+
+#include <drivers/arm/gicv2.h>
+#include <tegra_def.h>
+
+.section .rodata.gic_reg_name, "aS"
+gicc_regs:
+ .asciz "gicc_hppir", "gicc_ahppir", "gicc_ctlr", ""
+gicd_pend_reg:
+ .asciz "gicd_ispendr regs (Offsets 0x200 - 0x278)\n Offset:\t\t\tvalue\n"
+newline:
+ .asciz "\n"
+spacer:
+ .asciz ":\t\t0x"
+
+/* ---------------------------------------------
+ * The below macro prints out relevant GIC
+ * registers whenever an unhandled exception is
+ * taken in BL31.
+ * ---------------------------------------------
+ */
+.macro plat_crash_print_regs
+#ifdef TEGRA_GICC_BASE
+ mov_imm x16, TEGRA_GICC_BASE
+
+ /* gicc base address is now in x16 */
+ adr x6, gicc_regs /* Load the gicc reg list to x6 */
+ /* Load the gicc regs to gp regs used by str_in_crash_buf_print */
+ ldr w8, [x16, #GICC_HPPIR]
+ ldr w9, [x16, #GICC_AHPPIR]
+ ldr w10, [x16, #GICC_CTLR]
+ /* Store to the crash buf and print to cosole */
+ bl str_in_crash_buf_print
+#endif
+ /* Print the GICD_ISPENDR regs */
+ mov_imm x16, TEGRA_GICD_BASE
+ add x7, x16, #GICD_ISPENDR
+ adr x4, gicd_pend_reg
+ bl asm_print_str
+2:
+ sub x4, x7, x16
+ cmp x4, #0x280
+ b.eq 1f
+ bl asm_print_hex
+ adr x4, spacer
+ bl asm_print_str
+ ldr w4, [x7], #4
+ bl asm_print_hex
+ adr x4, newline
+ bl asm_print_str
+ b 2b
+1:
+.endm
+
+#endif /* PLAT_MACROS_S */
diff --git a/plat/nvidia/tegra/include/platform_def.h b/plat/nvidia/tegra/include/platform_def.h
new file mode 100644
index 0000000..958a3f9
--- /dev/null
+++ b/plat/nvidia/tegra/include/platform_def.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PLATFORM_DEF_H
+#define PLATFORM_DEF_H
+
+#include <arch.h>
+#include <lib/utils_def.h>
+
+#include <tegra_def.h>
+
+/*******************************************************************************
+ * Check and error if SEPARATE_CODE_AND_RODATA is not set to 1
+ ******************************************************************************/
+#if !SEPARATE_CODE_AND_RODATA
+#error "SEPARATE_CODE_AND_RODATA should be set to 1"
+#endif
+
+/*
+ * Platform binary types for linking
+ */
+#define PLATFORM_LINKER_FORMAT "elf64-littleaarch64"
+#define PLATFORM_LINKER_ARCH aarch64
+
+/*
+ * Platform binary types for linking
+ */
+#define PLATFORM_LINKER_FORMAT "elf64-littleaarch64"
+#define PLATFORM_LINKER_ARCH aarch64
+
+/*******************************************************************************
+ * Generic platform constants
+ ******************************************************************************/
+
+/* Size of cacheable stacks */
+#ifdef IMAGE_BL31
+#define PLATFORM_STACK_SIZE U(0x400)
+#endif
+
+#define PLAT_MAX_PWR_LVL MPIDR_AFFLVL2
+#define PLATFORM_CORE_COUNT (PLATFORM_CLUSTER_COUNT * \
+ PLATFORM_MAX_CPUS_PER_CLUSTER)
+#define PLAT_NUM_PWR_DOMAINS (PLATFORM_CORE_COUNT + \
+ PLATFORM_CLUSTER_COUNT + U(1))
+
+/*******************************************************************************
+ * Platform console related constants
+ ******************************************************************************/
+#define TEGRA_CONSOLE_BAUDRATE U(115200)
+#define TEGRA_BOOT_UART_CLK_13_MHZ U(13000000)
+#define TEGRA_BOOT_UART_CLK_408_MHZ U(408000000)
+
+/*******************************************************************************
+ * Platform memory map related constants
+ ******************************************************************************/
+/* Size of trusted dram */
+#define TZDRAM_SIZE U(0x00400000)
+#define TZDRAM_END (TZDRAM_BASE + TZDRAM_SIZE)
+
+/*******************************************************************************
+ * BL31 specific defines.
+ ******************************************************************************/
+#define BL31_BASE TZDRAM_BASE
+#define BL31_LIMIT (TZDRAM_BASE + BL31_SIZE - 1)
+#define BL32_BASE (TZDRAM_BASE + BL31_SIZE)
+#define BL32_LIMIT TZDRAM_END
+
+/*******************************************************************************
+ * Some data must be aligned on the biggest cache line size in the platform.
+ * This is known only to the platform as it might have a combination of
+ * integrated and external caches.
+ ******************************************************************************/
+#define CACHE_WRITEBACK_SHIFT 6
+#define CACHE_WRITEBACK_GRANULE (0x40) /* (U(1) << CACHE_WRITEBACK_SHIFT) */
+
+/*******************************************************************************
+ * Dummy macros to compile io_storage support
+ ******************************************************************************/
+#define MAX_IO_DEVICES U(0)
+#define MAX_IO_HANDLES U(0)
+
+/*******************************************************************************
+ * Platforms macros to support SDEI
+ ******************************************************************************/
+#define TEGRA_SDEI_SGI_PRIVATE U(8)
+
+/*******************************************************************************
+ * Platform macros to support exception handling framework
+ ******************************************************************************/
+#define PLAT_PRI_BITS U(3)
+#define PLAT_RAS_PRI U(0x10)
+#define PLAT_SDEI_CRITICAL_PRI U(0x20)
+#define PLAT_SDEI_NORMAL_PRI U(0x30)
+#define PLAT_TEGRA_WDT_PRIO U(0x40)
+
+#define PLAT_EHF_DESC EHF_PRI_DESC(PLAT_PRI_BITS,\
+ PLAT_TEGRA_WDT_PRIO)
+
+/*******************************************************************************
+ * SDEI events
+ ******************************************************************************/
+/* SDEI dynamic private event numbers */
+#define TEGRA_SDEI_DP_EVENT_0 U(100)
+#define TEGRA_SDEI_DP_EVENT_1 U(101)
+#define TEGRA_SDEI_DP_EVENT_2 U(102)
+
+/* SDEI dynamic shared event numbers */
+#define TEGRA_SDEI_DS_EVENT_0 U(200)
+#define TEGRA_SDEI_DS_EVENT_1 U(201)
+#define TEGRA_SDEI_DS_EVENT_2 U(202)
+
+/* SDEI explicit events */
+#define TEGRA_SDEI_EP_EVENT_0 U(300)
+#define TEGRA_SDEI_EP_EVENT_1 U(301)
+#define TEGRA_SDEI_EP_EVENT_2 U(302)
+#define TEGRA_SDEI_EP_EVENT_3 U(303)
+#define TEGRA_SDEI_EP_EVENT_4 U(304)
+#define TEGRA_SDEI_EP_EVENT_5 U(305)
+#define TEGRA_SDEI_EP_EVENT_6 U(306)
+#define TEGRA_SDEI_EP_EVENT_7 U(307)
+#define TEGRA_SDEI_EP_EVENT_8 U(308)
+#define TEGRA_SDEI_EP_EVENT_9 U(309)
+#define TEGRA_SDEI_EP_EVENT_10 U(310)
+#define TEGRA_SDEI_EP_EVENT_11 U(311)
+
+#endif /* PLATFORM_DEF_H */
diff --git a/plat/nvidia/tegra/include/t186/tegra186_private.h b/plat/nvidia/tegra/include/t186/tegra186_private.h
new file mode 100644
index 0000000..4514e14
--- /dev/null
+++ b/plat/nvidia/tegra/include/t186/tegra186_private.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef TEGRA186_PRIVATE_H
+#define TEGRA186_PRIVATE_H
+
+uint64_t tegra186_get_mc_ctx_size(void);
+
+#endif /* TEGRA186_PRIVATE_H */
diff --git a/plat/nvidia/tegra/include/t186/tegra_def.h b/plat/nvidia/tegra/include/t186/tegra_def.h
new file mode 100644
index 0000000..cf8778b
--- /dev/null
+++ b/plat/nvidia/tegra/include/t186/tegra_def.h
@@ -0,0 +1,327 @@
+/*
+ * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef TEGRA_DEF_H
+#define TEGRA_DEF_H
+
+#include <lib/utils_def.h>
+
+/*******************************************************************************
+ * Platform BL31 specific defines.
+ ******************************************************************************/
+#define BL31_SIZE U(0x40000)
+
+/*******************************************************************************
+ * MCE apertures used by the ARI interface
+ *
+ * Aperture 0 - Cpu0 (ARM Cortex A-57)
+ * Aperture 1 - Cpu1 (ARM Cortex A-57)
+ * Aperture 2 - Cpu2 (ARM Cortex A-57)
+ * Aperture 3 - Cpu3 (ARM Cortex A-57)
+ * Aperture 4 - Cpu4 (Denver15)
+ * Aperture 5 - Cpu5 (Denver15)
+ ******************************************************************************/
+#define MCE_ARI_APERTURE_0_OFFSET U(0x0)
+#define MCE_ARI_APERTURE_1_OFFSET U(0x10000)
+#define MCE_ARI_APERTURE_2_OFFSET U(0x20000)
+#define MCE_ARI_APERTURE_3_OFFSET U(0x30000)
+#define MCE_ARI_APERTURE_4_OFFSET U(0x40000)
+#define MCE_ARI_APERTURE_5_OFFSET U(0x50000)
+#define MCE_ARI_APERTURE_OFFSET_MAX MCE_APERTURE_5_OFFSET
+
+/* number of apertures */
+#define MCE_ARI_APERTURES_MAX U(6)
+
+/* each ARI aperture is 64KB */
+#define MCE_ARI_APERTURE_SIZE U(0x10000)
+
+/*******************************************************************************
+ * CPU core id macros for the MCE_ONLINE_CORE ARI
+ ******************************************************************************/
+#define MCE_CORE_ID_MAX U(8)
+#define MCE_CORE_ID_MASK U(0x7)
+
+/*******************************************************************************
+ * These values are used by the PSCI implementation during the `CPU_SUSPEND`
+ * and `SYSTEM_SUSPEND` calls as the `state-id` field in the 'power state'
+ * parameter.
+ ******************************************************************************/
+#define PSTATE_ID_CORE_IDLE U(6)
+#define PSTATE_ID_CORE_POWERDN U(7)
+#define PSTATE_ID_SOC_POWERDN U(2)
+
+/*******************************************************************************
+ * Platform power states (used by PSCI framework)
+ *
+ * - PLAT_MAX_RET_STATE should be less than lowest PSTATE_ID
+ * - PLAT_MAX_OFF_STATE should be greater than the highest PSTATE_ID
+ ******************************************************************************/
+#define PLAT_MAX_RET_STATE U(1)
+#define PLAT_MAX_OFF_STATE U(8)
+
+/*******************************************************************************
+ * Chip specific page table and MMU setup constants
+ ******************************************************************************/
+#define PLAT_PHY_ADDR_SPACE_SIZE (ULL(1) << 35)
+#define PLAT_VIRT_ADDR_SPACE_SIZE (ULL(1) << 35)
+
+/*******************************************************************************
+ * Secure IRQ definitions
+ ******************************************************************************/
+#define TEGRA186_TOP_WDT_IRQ U(49)
+#define TEGRA186_AON_WDT_IRQ U(50)
+
+#define TEGRA186_SEC_IRQ_TARGET_MASK U(0xF3) /* 4 A57 - 2 Denver */
+
+/*******************************************************************************
+ * Clock identifier for the SE device
+ ******************************************************************************/
+#define TEGRA186_CLK_SE U(103)
+#define TEGRA_CLK_SE TEGRA186_CLK_SE
+
+/*******************************************************************************
+ * Tegra Miscellaneous register constants
+ ******************************************************************************/
+#define TEGRA_MISC_BASE U(0x00100000)
+#define HARDWARE_REVISION_OFFSET U(0x4)
+
+#define MISCREG_PFCFG U(0x200C)
+
+/*******************************************************************************
+ * Tegra TSA Controller constants
+ ******************************************************************************/
+#define TEGRA_TSA_BASE U(0x02400000)
+
+/*******************************************************************************
+ * TSA configuration registers
+ ******************************************************************************/
+#define TSA_CONFIG_STATIC0_CSW_SESWR U(0x4010)
+#define TSA_CONFIG_STATIC0_CSW_SESWR_RESET U(0x1100)
+#define TSA_CONFIG_STATIC0_CSW_ETRW U(0x4038)
+#define TSA_CONFIG_STATIC0_CSW_ETRW_RESET U(0x1100)
+#define TSA_CONFIG_STATIC0_CSW_SDMMCWAB U(0x5010)
+#define TSA_CONFIG_STATIC0_CSW_SDMMCWAB_RESET U(0x1100)
+#define TSA_CONFIG_STATIC0_CSW_AXISW U(0x7008)
+#define TSA_CONFIG_STATIC0_CSW_AXISW_RESET U(0x1100)
+#define TSA_CONFIG_STATIC0_CSW_HDAW U(0xA008)
+#define TSA_CONFIG_STATIC0_CSW_HDAW_RESET U(0x100)
+#define TSA_CONFIG_STATIC0_CSW_AONDMAW U(0xB018)
+#define TSA_CONFIG_STATIC0_CSW_AONDMAW_RESET U(0x1100)
+#define TSA_CONFIG_STATIC0_CSW_SCEDMAW U(0xD018)
+#define TSA_CONFIG_STATIC0_CSW_SCEDMAW_RESET U(0x1100)
+#define TSA_CONFIG_STATIC0_CSW_BPMPDMAW U(0xD028)
+#define TSA_CONFIG_STATIC0_CSW_BPMPDMAW_RESET U(0x1100)
+#define TSA_CONFIG_STATIC0_CSW_APEDMAW U(0x12018)
+#define TSA_CONFIG_STATIC0_CSW_APEDMAW_RESET U(0x1100)
+#define TSA_CONFIG_STATIC0_CSW_UFSHCW U(0x13008)
+#define TSA_CONFIG_STATIC0_CSW_UFSHCW_RESET U(0x1100)
+#define TSA_CONFIG_STATIC0_CSW_AFIW U(0x13018)
+#define TSA_CONFIG_STATIC0_CSW_AFIW_RESET U(0x1100)
+#define TSA_CONFIG_STATIC0_CSW_SATAW U(0x13028)
+#define TSA_CONFIG_STATIC0_CSW_SATAW_RESET U(0x1100)
+#define TSA_CONFIG_STATIC0_CSW_EQOSW U(0x13038)
+#define TSA_CONFIG_STATIC0_CSW_EQOSW_RESET U(0x1100)
+#define TSA_CONFIG_STATIC0_CSW_XUSB_DEVW U(0x15008)
+#define TSA_CONFIG_STATIC0_CSW_XUSB_DEVW_RESET U(0x1100)
+#define TSA_CONFIG_STATIC0_CSW_XUSB_HOSTW U(0x15018)
+#define TSA_CONFIG_STATIC0_CSW_XUSB_HOSTW_RESET U(0x1100)
+
+#define TSA_CONFIG_CSW_MEMTYPE_OVERRIDE_MASK (ULL(0x3) << 11)
+#define TSA_CONFIG_CSW_MEMTYPE_OVERRIDE_PASTHRU (ULL(0) << 11)
+
+/*******************************************************************************
+ * Tegra General Purpose Centralised DMA constants
+ ******************************************************************************/
+#define TEGRA_GPCDMA_BASE ULL(0x2610000)
+
+/*******************************************************************************
+ * Tegra Memory Controller constants
+ ******************************************************************************/
+#define TEGRA_MC_STREAMID_BASE U(0x02C00000)
+#define TEGRA_MC_BASE U(0x02C10000)
+
+/* General Security Carveout register macros */
+#define MC_GSC_CONFIG_REGS_SIZE U(0x40)
+#define MC_GSC_LOCK_CFG_SETTINGS_BIT (U(1) << 1)
+#define MC_GSC_ENABLE_TZ_LOCK_BIT (ULL(1) << 0)
+#define MC_GSC_SIZE_RANGE_4KB_SHIFT U(27)
+#define MC_GSC_BASE_LO_SHIFT U(12)
+#define MC_GSC_BASE_LO_MASK U(0xFFFFF)
+#define MC_GSC_BASE_HI_SHIFT U(0)
+#define MC_GSC_BASE_HI_MASK U(3)
+#define MC_GSC_ENABLE_CPU_SECURE_BIT (U(1) << 31)
+
+/* TZDRAM carveout configuration registers */
+#define MC_SECURITY_CFG0_0 U(0x70)
+#define MC_SECURITY_CFG1_0 U(0x74)
+#define MC_SECURITY_CFG3_0 U(0x9BC)
+
+#define MC_SECURITY_BOM_MASK (U(0xFFF) << 20)
+#define MC_SECURITY_SIZE_MB_MASK (U(0x1FFF) << 0)
+#define MC_SECURITY_BOM_HI_MASK (U(0x3) << 0)
+
+/* Video Memory carveout configuration registers */
+#define MC_VIDEO_PROTECT_BASE_HI U(0x978)
+#define MC_VIDEO_PROTECT_BASE_LO U(0x648)
+#define MC_VIDEO_PROTECT_SIZE_MB U(0x64C)
+#define MC_VIDEO_PROTECT_REG_CTRL U(0x650)
+#define MC_VIDEO_PROTECT_WRITE_ACCESS_ENABLED U(3)
+
+/*
+ * Carveout (MC_SECURITY_CARVEOUT24) registers used to clear the
+ * non-overlapping Video memory region
+ */
+#define MC_VIDEO_PROTECT_CLEAR_CFG U(0x25A0)
+#define MC_VIDEO_PROTECT_CLEAR_BASE_LO U(0x25A4)
+#define MC_VIDEO_PROTECT_CLEAR_BASE_HI U(0x25A8)
+#define MC_VIDEO_PROTECT_CLEAR_SIZE U(0x25AC)
+#define MC_VIDEO_PROTECT_CLEAR_ACCESS_CFG0 U(0x25B0)
+
+/* TZRAM carveout (MC_SECURITY_CARVEOUT11) configuration registers */
+#define MC_TZRAM_CARVEOUT_CFG U(0x2190)
+#define MC_TZRAM_BASE_LO U(0x2194)
+#define MC_TZRAM_BASE_HI U(0x2198)
+#define MC_TZRAM_SIZE U(0x219C)
+#define MC_TZRAM_CLIENT_ACCESS0_CFG0 U(0x21A0)
+#define MC_TZRAM_CLIENT_ACCESS1_CFG0 U(0x21A4)
+#define TZRAM_ALLOW_MPCORER (U(1) << 7)
+#define TZRAM_ALLOW_MPCOREW (U(1) << 25)
+
+/*******************************************************************************
+ * Tegra UART Controller constants
+ ******************************************************************************/
+#define TEGRA_UARTA_BASE U(0x03100000)
+#define TEGRA_UARTB_BASE U(0x03110000)
+#define TEGRA_UARTC_BASE U(0x0C280000)
+#define TEGRA_UARTD_BASE U(0x03130000)
+#define TEGRA_UARTE_BASE U(0x03140000)
+#define TEGRA_UARTF_BASE U(0x03150000)
+#define TEGRA_UARTG_BASE U(0x0C290000)
+
+/*******************************************************************************
+ * Tegra Fuse Controller related constants
+ ******************************************************************************/
+#define TEGRA_FUSE_BASE U(0x03820000)
+#define OPT_SUBREVISION U(0x248)
+#define SUBREVISION_MASK U(0xFF)
+
+/*******************************************************************************
+ * GICv2 & interrupt handling related constants
+ ******************************************************************************/
+#define TEGRA_GICD_BASE U(0x03881000)
+#define TEGRA_GICC_BASE U(0x03882000)
+
+/*******************************************************************************
+ * Security Engine related constants
+ ******************************************************************************/
+#define TEGRA_SE0_BASE U(0x03AC0000)
+#define SE_MUTEX_WATCHDOG_NS_LIMIT U(0x6C)
+#define TEGRA_PKA1_BASE U(0x03AD0000)
+#define PKA_MUTEX_WATCHDOG_NS_LIMIT U(0x8144)
+#define TEGRA_RNG1_BASE U(0x03AE0000)
+#define RNG_MUTEX_WATCHDOG_NS_LIMIT U(0xFE0)
+
+/*******************************************************************************
+ * Tegra HSP doorbell #0 constants
+ ******************************************************************************/
+#define TEGRA_HSP_DBELL_BASE U(0x03C90000)
+#define HSP_DBELL_1_ENABLE U(0x104)
+#define HSP_DBELL_3_TRIGGER U(0x300)
+#define HSP_DBELL_3_ENABLE U(0x304)
+
+/*******************************************************************************
+ * Tegra Clock and Reset Controller constants
+ ******************************************************************************/
+#define TEGRA_CAR_RESET_BASE U(0x05000000)
+#define TEGRA_GPU_RESET_REG_OFFSET U(0x30)
+#define TEGRA_GPU_RESET_GPU_SET_OFFSET U(0x34)
+#define GPU_RESET_BIT (U(1) << 0)
+#define GPU_SET_BIT (U(1) << 0)
+#define TEGRA_GPCDMA_RST_SET_REG_OFFSET U(0x6A0004)
+#define TEGRA_GPCDMA_RST_CLR_REG_OFFSET U(0x6A0008)
+
+/*******************************************************************************
+ * Tegra micro-seconds timer constants
+ ******************************************************************************/
+#define TEGRA_TMRUS_BASE U(0x0C2E0000)
+#define TEGRA_TMRUS_SIZE U(0x1000)
+
+/*******************************************************************************
+ * Tegra Power Mgmt Controller constants
+ ******************************************************************************/
+#define TEGRA_PMC_BASE U(0x0C360000)
+
+/*******************************************************************************
+ * Tegra scratch registers constants
+ ******************************************************************************/
+#define TEGRA_SCRATCH_BASE U(0x0C390000)
+#define SECURE_SCRATCH_RSV0_HI U(0x654)
+#define SECURE_SCRATCH_RSV1_LO U(0x658)
+#define SECURE_SCRATCH_RSV1_HI U(0x65C)
+#define SECURE_SCRATCH_RSV6 U(0x680)
+#define SECURE_SCRATCH_RSV11_LO U(0x6A8)
+#define SECURE_SCRATCH_RSV11_HI U(0x6AC)
+#define SECURE_SCRATCH_RSV53_LO U(0x7F8)
+#define SECURE_SCRATCH_RSV53_HI U(0x7FC)
+#define SECURE_SCRATCH_RSV55_LO U(0x808)
+#define SECURE_SCRATCH_RSV55_HI U(0x80C)
+#define SECURE_SCRATCH_RSV63_LO U(0x848)
+#define SECURE_SCRATCH_RSV63_HI U(0x84C)
+#define SECURE_SCRATCH_RSV64_LO U(0x850)
+#define SECURE_SCRATCH_RSV64_HI U(0x854)
+#define SECURE_SCRATCH_RSV65_LO U(0x858)
+#define SECURE_SCRATCH_RSV65_HI U(0x85c)
+#define SECURE_SCRATCH_RSV66_LO U(0x860)
+#define SECURE_SCRATCH_RSV66_HI U(0x864)
+#define SECURE_SCRATCH_RSV68_LO U(0x870)
+
+#define SCRATCH_RESET_VECTOR_LO SECURE_SCRATCH_RSV1_LO
+#define SCRATCH_RESET_VECTOR_HI SECURE_SCRATCH_RSV1_HI
+#define SCRATCH_SECURE_BOOTP_FCFG SECURE_SCRATCH_RSV6
+#define SCRATCH_MC_TABLE_ADDR_LO SECURE_SCRATCH_RSV11_LO
+#define SCRATCH_MC_TABLE_ADDR_HI SECURE_SCRATCH_RSV11_HI
+#define SCRATCH_BL31_PARAMS_ADDR SECURE_SCRATCH_RSV53_LO
+#define SCRATCH_BL31_PLAT_PARAMS_ADDR SECURE_SCRATCH_RSV53_HI
+#define SCRATCH_TZDRAM_ADDR_LO SECURE_SCRATCH_RSV55_LO
+#define SCRATCH_TZDRAM_ADDR_HI SECURE_SCRATCH_RSV55_HI
+
+/*******************************************************************************
+ * Tegra Memory Mapped Control Register Access constants
+ ******************************************************************************/
+#define TEGRA_MMCRAB_BASE U(0x0E000000)
+
+/*******************************************************************************
+ * Tegra Memory Mapped Activity Monitor Register Access constants
+ ******************************************************************************/
+#define TEGRA_ARM_ACTMON_CTR_BASE U(0x0E060000)
+#define TEGRA_DENVER_ACTMON_CTR_BASE U(0x0E070000)
+
+/*******************************************************************************
+ * Tegra SMMU Controller constants
+ ******************************************************************************/
+#define TEGRA_SMMU0_BASE U(0x12000000)
+
+/*******************************************************************************
+ * Tegra TZRAM constants
+ ******************************************************************************/
+#define TEGRA_TZRAM_BASE U(0x30000000)
+#define TEGRA_TZRAM_SIZE U(0x40000)
+
+/*******************************************************************************
+ * Tegra CCPLEX-BPMP IPC constants
+ ******************************************************************************/
+#define TEGRA_BPMP_IPC_TX_PHYS_BASE U(0x3004C000)
+#define TEGRA_BPMP_IPC_RX_PHYS_BASE U(0x3004D000)
+#define TEGRA_BPMP_IPC_CH_MAP_SIZE U(0x1000) /* 4KB */
+
+/*******************************************************************************
+ * Tegra DRAM memory base address
+ ******************************************************************************/
+#define TEGRA_DRAM_BASE ULL(0x80000000)
+#define TEGRA_DRAM_END ULL(0x27FFFFFFF)
+
+#endif /* TEGRA_DEF_H */
diff --git a/plat/nvidia/tegra/include/t186/tegra_mc_def.h b/plat/nvidia/tegra/include/t186/tegra_mc_def.h
new file mode 100644
index 0000000..fa44772
--- /dev/null
+++ b/plat/nvidia/tegra/include/t186/tegra_mc_def.h
@@ -0,0 +1,398 @@
+/*
+ * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef TEGRA_MC_DEF_H
+#define TEGRA_MC_DEF_H
+
+/*******************************************************************************
+ * Memory Controller's PCFIFO client configuration registers
+ ******************************************************************************/
+#define MC_PCFIFO_CLIENT_CONFIG0 0xdd0U
+
+#define MC_PCFIFO_CLIENT_CONFIG1 0xdd4U
+#define MC_PCFIFO_CLIENT_CONFIG1_RESET_VAL 0x20000U
+#define MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_AFIW_UNORDERED (0U << 17)
+#define MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_AFIW_MASK (1U << 17)
+#define MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_HDAW_UNORDERED (0U << 21)
+#define MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_HDAW_MASK (1U << 21)
+#define MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_SATAW_UNORDERED (0U << 29)
+#define MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_SATAW_MASK (1U << 29)
+
+#define MC_PCFIFO_CLIENT_CONFIG2 0xdd8U
+#define MC_PCFIFO_CLIENT_CONFIG2_RESET_VAL 0x20000U
+#define MC_PCFIFO_CLIENT_CONFIG2_PCFIFO_XUSB_HOSTW_UNORDERED (0U << 11)
+#define MC_PCFIFO_CLIENT_CONFIG2_PCFIFO_XUSB_HOSTW_MASK (1U << 11)
+#define MC_PCFIFO_CLIENT_CONFIG2_PCFIFO_XUSB_DEVW_UNORDERED (0U << 13)
+#define MC_PCFIFO_CLIENT_CONFIG2_PCFIFO_XUSB_DEVW_MASK (1U << 13)
+
+#define MC_PCFIFO_CLIENT_CONFIG3 0xddcU
+#define MC_PCFIFO_CLIENT_CONFIG3_RESET_VAL 0U
+#define MC_PCFIFO_CLIENT_CONFIG3_PCFIFO_SDMMCWAB_UNORDERED (0U << 7)
+#define MC_PCFIFO_CLIENT_CONFIG3_PCFIFO_SDMMCWAB_MASK (1U << 7)
+
+#define MC_PCFIFO_CLIENT_CONFIG4 0xde0U
+#define MC_PCFIFO_CLIENT_CONFIG4_RESET_VAL 0U
+#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_SESWR_UNORDERED (0U << 1)
+#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_SESWR_MASK (1U << 1)
+#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_ETRW_UNORDERED (0U << 5)
+#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_ETRW_MASK (1U << 5)
+#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_AXISW_UNORDERED (0U << 13)
+#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_AXISW_MASK (1U << 13)
+#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_EQOSW_UNORDERED (0U << 15)
+#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_EQOSW_ORDERED (1U << 15)
+#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_EQOSW_MASK (1U << 15)
+#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_UFSHCW_UNORDERED (0U << 17)
+#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_UFSHCW_MASK (1U << 17)
+#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_BPMPDMAW_UNORDERED (0U << 22)
+#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_BPMPDMAW_MASK (1U << 22)
+#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_AONDMAW_UNORDERED (0U << 26)
+#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_AONDMAW_MASK (1U << 26)
+#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_SCEDMAW_UNORDERED (0U << 30)
+#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_SCEDMAW_MASK (1U << 30)
+
+#define MC_PCFIFO_CLIENT_CONFIG5 0xbf4U
+#define MC_PCFIFO_CLIENT_CONFIG5_RESET_VAL 0U
+#define MC_PCFIFO_CLIENT_CONFIG5_PCFIFO_APEDMAW_UNORDERED (0U << 0)
+#define MC_PCFIFO_CLIENT_CONFIG5_PCFIFO_APEDMAW_MASK (1U << 0)
+
+/*******************************************************************************
+ * Stream ID Override Config registers
+ ******************************************************************************/
+#define MC_STREAMID_OVERRIDE_CFG_PTCR 0x000U
+#define MC_STREAMID_OVERRIDE_CFG_AFIR 0x070U
+#define MC_STREAMID_OVERRIDE_CFG_HDAR 0x0A8U
+#define MC_STREAMID_OVERRIDE_CFG_HOST1XDMAR 0x0B0U
+#define MC_STREAMID_OVERRIDE_CFG_NVENCSRD 0x0E0U
+#define MC_STREAMID_OVERRIDE_CFG_SATAR 0x0F8U
+#define MC_STREAMID_OVERRIDE_CFG_MPCORER 0x138U
+#define MC_STREAMID_OVERRIDE_CFG_NVENCSWR 0x158U
+#define MC_STREAMID_OVERRIDE_CFG_AFIW 0x188U
+#define MC_STREAMID_OVERRIDE_CFG_HDAW 0x1A8U
+#define MC_STREAMID_OVERRIDE_CFG_MPCOREW 0x1C8U
+#define MC_STREAMID_OVERRIDE_CFG_SATAW 0x1E8U
+#define MC_STREAMID_OVERRIDE_CFG_ISPRA 0x220U
+#define MC_STREAMID_OVERRIDE_CFG_ISPWA 0x230U
+#define MC_STREAMID_OVERRIDE_CFG_ISPWB 0x238U
+#define MC_STREAMID_OVERRIDE_CFG_XUSB_HOSTR 0x250U
+#define MC_STREAMID_OVERRIDE_CFG_XUSB_HOSTW 0x258U
+#define MC_STREAMID_OVERRIDE_CFG_XUSB_DEVR 0x260U
+#define MC_STREAMID_OVERRIDE_CFG_XUSB_DEVW 0x268U
+#define MC_STREAMID_OVERRIDE_CFG_TSECSRD 0x2A0U
+#define MC_STREAMID_OVERRIDE_CFG_TSECSWR 0x2A8U
+#define MC_STREAMID_OVERRIDE_CFG_GPUSRD 0x2C0U
+#define MC_STREAMID_OVERRIDE_CFG_GPUSWR 0x2C8U
+#define MC_STREAMID_OVERRIDE_CFG_SDMMCRA 0x300U
+#define MC_STREAMID_OVERRIDE_CFG_SDMMCRAA 0x308U
+#define MC_STREAMID_OVERRIDE_CFG_SDMMCR 0x310U
+#define MC_STREAMID_OVERRIDE_CFG_SDMMCRAB 0x318U
+#define MC_STREAMID_OVERRIDE_CFG_SDMMCWA 0x320U
+#define MC_STREAMID_OVERRIDE_CFG_SDMMCWAA 0x328U
+#define MC_STREAMID_OVERRIDE_CFG_SDMMCW 0x330U
+#define MC_STREAMID_OVERRIDE_CFG_SDMMCWAB 0x338U
+#define MC_STREAMID_OVERRIDE_CFG_VICSRD 0x360U
+#define MC_STREAMID_OVERRIDE_CFG_VICSWR 0x368U
+#define MC_STREAMID_OVERRIDE_CFG_VIW 0x390U
+#define MC_STREAMID_OVERRIDE_CFG_NVDECSRD 0x3C0U
+#define MC_STREAMID_OVERRIDE_CFG_NVDECSWR 0x3C8U
+#define MC_STREAMID_OVERRIDE_CFG_APER 0x3D0U
+#define MC_STREAMID_OVERRIDE_CFG_APEW 0x3D8U
+#define MC_STREAMID_OVERRIDE_CFG_NVJPGSRD 0x3F0U
+#define MC_STREAMID_OVERRIDE_CFG_NVJPGSWR 0x3F8U
+#define MC_STREAMID_OVERRIDE_CFG_SESRD 0x400U
+#define MC_STREAMID_OVERRIDE_CFG_SESWR 0x408U
+#define MC_STREAMID_OVERRIDE_CFG_ETRR 0x420U
+#define MC_STREAMID_OVERRIDE_CFG_ETRW 0x428U
+#define MC_STREAMID_OVERRIDE_CFG_TSECSRDB 0x430U
+#define MC_STREAMID_OVERRIDE_CFG_TSECSWRB 0x438U
+#define MC_STREAMID_OVERRIDE_CFG_GPUSRD2 0x440U
+#define MC_STREAMID_OVERRIDE_CFG_GPUSWR2 0x448U
+#define MC_STREAMID_OVERRIDE_CFG_AXISR 0x460U
+#define MC_STREAMID_OVERRIDE_CFG_AXISW 0x468U
+#define MC_STREAMID_OVERRIDE_CFG_EQOSR 0x470U
+#define MC_STREAMID_OVERRIDE_CFG_EQOSW 0x478U
+#define MC_STREAMID_OVERRIDE_CFG_UFSHCR 0x480U
+#define MC_STREAMID_OVERRIDE_CFG_UFSHCW 0x488U
+#define MC_STREAMID_OVERRIDE_CFG_NVDISPLAYR 0x490U
+#define MC_STREAMID_OVERRIDE_CFG_BPMPR 0x498U
+#define MC_STREAMID_OVERRIDE_CFG_BPMPW 0x4A0U
+#define MC_STREAMID_OVERRIDE_CFG_BPMPDMAR 0x4A8U
+#define MC_STREAMID_OVERRIDE_CFG_BPMPDMAW 0x4B0U
+#define MC_STREAMID_OVERRIDE_CFG_AONR 0x4B8U
+#define MC_STREAMID_OVERRIDE_CFG_AONW 0x4C0U
+#define MC_STREAMID_OVERRIDE_CFG_AONDMAR 0x4C8U
+#define MC_STREAMID_OVERRIDE_CFG_AONDMAW 0x4D0U
+#define MC_STREAMID_OVERRIDE_CFG_SCER 0x4D8U
+#define MC_STREAMID_OVERRIDE_CFG_SCEW 0x4E0U
+#define MC_STREAMID_OVERRIDE_CFG_SCEDMAR 0x4E8U
+#define MC_STREAMID_OVERRIDE_CFG_SCEDMAW 0x4F0U
+#define MC_STREAMID_OVERRIDE_CFG_APEDMAR 0x4F8U
+#define MC_STREAMID_OVERRIDE_CFG_APEDMAW 0x500U
+#define MC_STREAMID_OVERRIDE_CFG_NVDISPLAYR1 0x508U
+#define MC_STREAMID_OVERRIDE_CFG_VICSRD1 0x510U
+#define MC_STREAMID_OVERRIDE_CFG_NVDECSRD1 0x518U
+
+/*******************************************************************************
+ * Macro to calculate Security cfg register addr from StreamID Override register
+ ******************************************************************************/
+#define MC_STREAMID_OVERRIDE_TO_SECURITY_CFG(addr) ((addr) + (uint32_t)sizeof(uint32_t))
+
+#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_NO_OVERRIDE_SO_DEV (0U << 4)
+#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_FORCE_NON_COHERENT_SO_DEV (1U << 4)
+#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_FORCE_COHERENT_SO_DEV (2U << 4)
+#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_FORCE_COHERENT_SNOOP_SO_DEV (3U << 4)
+
+#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_NO_OVERRIDE_NORMAL (0U << 8)
+#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_FORCE_NON_COHERENT_NORMAL (1U << 8)
+#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_FORCE_COHERENT_NORMAL (2U << 8)
+#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_FORCE_COHERENT_SNOOP_NORMAL (3U << 8)
+
+#define MC_TXN_OVERRIDE_CONFIG_CGID_SO_DEV_ZERO (0U << 12)
+#define MC_TXN_OVERRIDE_CONFIG_CGID_SO_DEV_CLIENT_AXI_ID (1U << 12)
+
+/*******************************************************************************
+ * Memory Controller transaction override config registers
+ ******************************************************************************/
+#define MC_TXN_OVERRIDE_CONFIG_HDAR 0x10a8U
+#define MC_TXN_OVERRIDE_CONFIG_BPMPW 0x14a0U
+#define MC_TXN_OVERRIDE_CONFIG_PTCR 0x1000U
+#define MC_TXN_OVERRIDE_CONFIG_NVDISPLAYR 0x1490U
+#define MC_TXN_OVERRIDE_CONFIG_EQOSW 0x1478U
+#define MC_TXN_OVERRIDE_CONFIG_NVJPGSWR 0x13f8U
+#define MC_TXN_OVERRIDE_CONFIG_ISPRA 0x1220U
+#define MC_TXN_OVERRIDE_CONFIG_SDMMCWAA 0x1328U
+#define MC_TXN_OVERRIDE_CONFIG_VICSRD 0x1360U
+#define MC_TXN_OVERRIDE_CONFIG_MPCOREW 0x11c8U
+#define MC_TXN_OVERRIDE_CONFIG_GPUSRD 0x12c0U
+#define MC_TXN_OVERRIDE_CONFIG_AXISR 0x1460U
+#define MC_TXN_OVERRIDE_CONFIG_SCEDMAW 0x14f0U
+#define MC_TXN_OVERRIDE_CONFIG_SDMMCW 0x1330U
+#define MC_TXN_OVERRIDE_CONFIG_EQOSR 0x1470U
+#define MC_TXN_OVERRIDE_CONFIG_APEDMAR 0x14f8U
+#define MC_TXN_OVERRIDE_CONFIG_NVENCSRD 0x10e0U
+#define MC_TXN_OVERRIDE_CONFIG_SDMMCRAB 0x1318U
+#define MC_TXN_OVERRIDE_CONFIG_VICSRD1 0x1510U
+#define MC_TXN_OVERRIDE_CONFIG_BPMPDMAR 0x14a8U
+#define MC_TXN_OVERRIDE_CONFIG_VIW 0x1390U
+#define MC_TXN_OVERRIDE_CONFIG_SDMMCRAA 0x1308U
+#define MC_TXN_OVERRIDE_CONFIG_AXISW 0x1468U
+#define MC_TXN_OVERRIDE_CONFIG_XUSB_DEVR 0x1260U
+#define MC_TXN_OVERRIDE_CONFIG_UFSHCR 0x1480U
+#define MC_TXN_OVERRIDE_CONFIG_TSECSWR 0x12a8U
+#define MC_TXN_OVERRIDE_CONFIG_GPUSWR 0x12c8U
+#define MC_TXN_OVERRIDE_CONFIG_SATAR 0x10f8U
+#define MC_TXN_OVERRIDE_CONFIG_XUSB_HOSTW 0x1258U
+#define MC_TXN_OVERRIDE_CONFIG_TSECSWRB 0x1438U
+#define MC_TXN_OVERRIDE_CONFIG_GPUSRD2 0x1440U
+#define MC_TXN_OVERRIDE_CONFIG_SCEDMAR 0x14e8U
+#define MC_TXN_OVERRIDE_CONFIG_GPUSWR2 0x1448U
+#define MC_TXN_OVERRIDE_CONFIG_AONDMAW 0x14d0U
+#define MC_TXN_OVERRIDE_CONFIG_APEDMAW 0x1500U
+#define MC_TXN_OVERRIDE_CONFIG_AONW 0x14c0U
+#define MC_TXN_OVERRIDE_CONFIG_HOST1XDMAR 0x10b0U
+#define MC_TXN_OVERRIDE_CONFIG_ETRR 0x1420U
+#define MC_TXN_OVERRIDE_CONFIG_SESWR 0x1408U
+#define MC_TXN_OVERRIDE_CONFIG_NVJPGSRD 0x13f0U
+#define MC_TXN_OVERRIDE_CONFIG_NVDECSRD 0x13c0U
+#define MC_TXN_OVERRIDE_CONFIG_TSECSRDB 0x1430U
+#define MC_TXN_OVERRIDE_CONFIG_BPMPDMAW 0x14b0U
+#define MC_TXN_OVERRIDE_CONFIG_APER 0x13d0U
+#define MC_TXN_OVERRIDE_CONFIG_NVDECSRD1 0x1518U
+#define MC_TXN_OVERRIDE_CONFIG_XUSB_HOSTR 0x1250U
+#define MC_TXN_OVERRIDE_CONFIG_ISPWA 0x1230U
+#define MC_TXN_OVERRIDE_CONFIG_SESRD 0x1400U
+#define MC_TXN_OVERRIDE_CONFIG_SCER 0x14d8U
+#define MC_TXN_OVERRIDE_CONFIG_AONR 0x14b8U
+#define MC_TXN_OVERRIDE_CONFIG_MPCORER 0x1138U
+#define MC_TXN_OVERRIDE_CONFIG_SDMMCWA 0x1320U
+#define MC_TXN_OVERRIDE_CONFIG_HDAW 0x11a8U
+#define MC_TXN_OVERRIDE_CONFIG_NVDECSWR 0x13c8U
+#define MC_TXN_OVERRIDE_CONFIG_UFSHCW 0x1488U
+#define MC_TXN_OVERRIDE_CONFIG_AONDMAR 0x14c8U
+#define MC_TXN_OVERRIDE_CONFIG_SATAW 0x11e8U
+#define MC_TXN_OVERRIDE_CONFIG_ETRW 0x1428U
+#define MC_TXN_OVERRIDE_CONFIG_VICSWR 0x1368U
+#define MC_TXN_OVERRIDE_CONFIG_NVENCSWR 0x1158U
+#define MC_TXN_OVERRIDE_CONFIG_AFIR 0x1070U
+#define MC_TXN_OVERRIDE_CONFIG_SDMMCWAB 0x1338U
+#define MC_TXN_OVERRIDE_CONFIG_SDMMCRA 0x1300U
+#define MC_TXN_OVERRIDE_CONFIG_NVDISPLAYR1 0x1508U
+#define MC_TXN_OVERRIDE_CONFIG_ISPWB 0x1238U
+#define MC_TXN_OVERRIDE_CONFIG_BPMPR 0x1498U
+#define MC_TXN_OVERRIDE_CONFIG_APEW 0x13d8U
+#define MC_TXN_OVERRIDE_CONFIG_SDMMCR 0x1310U
+#define MC_TXN_OVERRIDE_CONFIG_XUSB_DEVW 0x1268U
+#define MC_TXN_OVERRIDE_CONFIG_TSECSRD 0x12a0U
+#define MC_TXN_OVERRIDE_CONFIG_AFIW 0x1188U
+#define MC_TXN_OVERRIDE_CONFIG_SCEW 0x14e0U
+
+#define MC_TXN_OVERRIDE_CONFIG_AXID_OVERRIDE_CGID (1U << 0)
+#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_OVERRIDE_SO_DEV (2U << 4)
+#define MC_TXN_OVERRIDE_CONFIG_AXID_OVERRIDE_SO_DEV_CGID_SO_DEV_CLIENT (1U << 12)
+
+/*******************************************************************************
+ * Non-SO_DEV transactions override values for CGID_TAG bitfield for the
+ * MC_TXN_OVERRIDE_CONFIG_{module} registers
+ ******************************************************************************/
+#define MC_TXN_OVERRIDE_CGID_TAG_DEFAULT 0U
+#define MC_TXN_OVERRIDE_CGID_TAG_CLIENT_AXI_ID 1U
+#define MC_TXN_OVERRIDE_CGID_TAG_ZERO 2U
+#define MC_TXN_OVERRIDE_CGID_TAG_ADR 3U
+#define MC_TXN_OVERRIDE_CGID_TAG_MASK 3ULL
+
+/*******************************************************************************
+ * Memory Controller Reset Control registers
+ ******************************************************************************/
+#define MC_CLIENT_HOTRESET_CTRL0 0x200U
+#define MC_CLIENT_HOTRESET_CTRL0_RESET_VAL 0U
+#define MC_CLIENT_HOTRESET_CTRL0_AFI_FLUSH_ENB (1U << 0)
+#define MC_CLIENT_HOTRESET_CTRL0_HC_FLUSH_ENB (1U << 6)
+#define MC_CLIENT_HOTRESET_CTRL0_HDA_FLUSH_ENB (1U << 7)
+#define MC_CLIENT_HOTRESET_CTRL0_ISP2_FLUSH_ENB (1U << 8)
+#define MC_CLIENT_HOTRESET_CTRL0_MPCORE_FLUSH_ENB (1U << 9)
+#define MC_CLIENT_HOTRESET_CTRL0_NVENC_FLUSH_ENB (1U << 11)
+#define MC_CLIENT_HOTRESET_CTRL0_SATA_FLUSH_ENB (1U << 15)
+#define MC_CLIENT_HOTRESET_CTRL0_VI_FLUSH_ENB (1U << 17)
+#define MC_CLIENT_HOTRESET_CTRL0_VIC_FLUSH_ENB (1U << 18)
+#define MC_CLIENT_HOTRESET_CTRL0_XUSB_HOST_FLUSH_ENB (1U << 19)
+#define MC_CLIENT_HOTRESET_CTRL0_XUSB_DEV_FLUSH_ENB (1U << 20)
+#define MC_CLIENT_HOTRESET_CTRL0_TSEC_FLUSH_ENB (1U << 22)
+#define MC_CLIENT_HOTRESET_CTRL0_SDMMC1A_FLUSH_ENB (1U << 29)
+#define MC_CLIENT_HOTRESET_CTRL0_SDMMC2A_FLUSH_ENB (1U << 30)
+#define MC_CLIENT_HOTRESET_CTRL0_SDMMC3A_FLUSH_ENB (1U << 31)
+#define MC_CLIENT_HOTRESET_STATUS0 0x204U
+#define MC_CLIENT_HOTRESET_CTRL1 0x970U
+#define MC_CLIENT_HOTRESET_CTRL1_RESET_VAL 0U
+#define MC_CLIENT_HOTRESET_CTRL1_SDMMC4A_FLUSH_ENB (1U << 0)
+#define MC_CLIENT_HOTRESET_CTRL1_GPU_FLUSH_ENB (1U << 2)
+#define MC_CLIENT_HOTRESET_CTRL1_NVDEC_FLUSH_ENB (1U << 5)
+#define MC_CLIENT_HOTRESET_CTRL1_APE_FLUSH_ENB (1U << 6)
+#define MC_CLIENT_HOTRESET_CTRL1_SE_FLUSH_ENB (1U << 7)
+#define MC_CLIENT_HOTRESET_CTRL1_NVJPG_FLUSH_ENB (1U << 8)
+#define MC_CLIENT_HOTRESET_CTRL1_ETR_FLUSH_ENB (1U << 12)
+#define MC_CLIENT_HOTRESET_CTRL1_TSECB_FLUSH_ENB (1U << 13)
+#define MC_CLIENT_HOTRESET_CTRL1_AXIS_FLUSH_ENB (1U << 18)
+#define MC_CLIENT_HOTRESET_CTRL1_EQOS_FLUSH_ENB (1U << 19)
+#define MC_CLIENT_HOTRESET_CTRL1_UFSHC_FLUSH_ENB (1U << 20)
+#define MC_CLIENT_HOTRESET_CTRL1_NVDISPLAY_FLUSH_ENB (1U << 21)
+#define MC_CLIENT_HOTRESET_CTRL1_BPMP_FLUSH_ENB (1U << 22)
+#define MC_CLIENT_HOTRESET_CTRL1_AON_FLUSH_ENB (1U << 23)
+#define MC_CLIENT_HOTRESET_CTRL1_SCE_FLUSH_ENB (1U << 24)
+#define MC_CLIENT_HOTRESET_STATUS1 0x974U
+
+#ifndef __ASSEMBLER__
+
+/*******************************************************************************
+ * Structure to hold the transaction override settings to use to override
+ * client inputs
+ ******************************************************************************/
+typedef struct mc_txn_override_cfg {
+ uint32_t offset;
+ uint8_t cgid_tag;
+} mc_txn_override_cfg_t;
+
+#define mc_make_txn_override_cfg(off, val) \
+ { \
+ .offset = MC_TXN_OVERRIDE_CONFIG_ ## off, \
+ .cgid_tag = MC_TXN_OVERRIDE_ ## val \
+ }
+
+/*******************************************************************************
+ * Structure to hold the Stream ID to use to override client inputs
+ ******************************************************************************/
+typedef struct mc_streamid_override_cfg {
+ uint32_t offset;
+ uint8_t stream_id;
+} mc_streamid_override_cfg_t;
+
+/*******************************************************************************
+ * Structure to hold the Stream ID Security Configuration settings
+ ******************************************************************************/
+typedef struct mc_streamid_security_cfg {
+ char *name;
+ uint32_t offset;
+ uint32_t override_enable;
+ uint32_t override_client_inputs;
+ uint32_t override_client_ns_flag;
+} mc_streamid_security_cfg_t;
+
+#define OVERRIDE_DISABLE 1U
+#define OVERRIDE_ENABLE 0U
+#define CLIENT_FLAG_SECURE 0U
+#define CLIENT_FLAG_NON_SECURE 1U
+#define CLIENT_INPUTS_OVERRIDE 1U
+#define CLIENT_INPUTS_NO_OVERRIDE 0U
+
+/*******************************************************************************
+ * StreamID to indicate no SMMU translations (requests to be steered on the
+ * SMMU bypass path)
+ ******************************************************************************/
+#define MC_STREAM_ID_MAX 0x7FU
+
+#define mc_make_sec_cfg(off, ns, ovrrd, access) \
+ { \
+ .name = # off, \
+ .offset = MC_STREAMID_OVERRIDE_TO_SECURITY_CFG( \
+ MC_STREAMID_OVERRIDE_CFG_ ## off), \
+ .override_client_ns_flag = CLIENT_FLAG_ ## ns, \
+ .override_client_inputs = CLIENT_INPUTS_ ## ovrrd, \
+ .override_enable = OVERRIDE_ ## access \
+ }
+
+#define mc_make_sid_override_cfg(name) \
+ { \
+ .reg = TEGRA_MC_STREAMID_BASE + MC_STREAMID_OVERRIDE_CFG_ ## name, \
+ .val = 0x00000000U, \
+ }
+
+#define mc_make_sid_security_cfg(name) \
+ { \
+ .reg = TEGRA_MC_STREAMID_BASE + MC_STREAMID_OVERRIDE_TO_SECURITY_CFG(MC_STREAMID_OVERRIDE_CFG_ ## name), \
+ .val = 0x00000000U, \
+ }
+
+#define mc_set_pcfifo_unordered_boot_so_mss(id, client) \
+ ((uint32_t)~MC_PCFIFO_CLIENT_CONFIG##id##_PCFIFO_##client##_MASK | \
+ MC_PCFIFO_CLIENT_CONFIG##id##_PCFIFO_##client##_UNORDERED)
+
+#define mc_set_pcfifo_ordered_boot_so_mss(id, client) \
+ MC_PCFIFO_CLIENT_CONFIG##id##_PCFIFO_##client##_ORDERED
+
+#define mc_set_tsa_passthrough(client) \
+ do { \
+ mmio_write_32(TEGRA_TSA_BASE + TSA_CONFIG_STATIC0_CSW_##client, \
+ (TSA_CONFIG_STATIC0_CSW_##client##_RESET & \
+ (uint32_t)~TSA_CONFIG_CSW_MEMTYPE_OVERRIDE_MASK) | \
+ (uint32_t)TSA_CONFIG_CSW_MEMTYPE_OVERRIDE_PASTHRU); \
+ } while (0)
+
+#define mc_set_tsa_w_passthrough(client) \
+ do { \
+ mmio_write_32(TEGRA_TSA_BASE + TSA_CONFIG_STATIC0_CSW_##client, \
+ (TSA_CONFIG_STATIC0_CSW_RESET_W & \
+ (uint32_t)~TSA_CONFIG_CSW_MEMTYPE_OVERRIDE_MASK) | \
+ (uint32_t)TSA_CONFIG_CSW_MEMTYPE_OVERRIDE_PASTHRU); \
+ } while (0)
+
+#define mc_set_tsa_r_passthrough(client) \
+ { \
+ mmio_write_32(TEGRA_TSA_BASE + TSA_CONFIG_STATIC0_CSR_##client, \
+ (TSA_CONFIG_STATIC0_CSR_RESET_R & \
+ (uint32_t)~TSA_CONFIG_CSW_MEMTYPE_OVERRIDE_MASK) | \
+ (uint32_t)TSA_CONFIG_CSW_MEMTYPE_OVERRIDE_PASTHRU); \
+ } while (0)
+
+#define mc_set_txn_override(client, normal_axi_id, so_dev_axi_id, normal_override, so_dev_override) \
+ do { \
+ tegra_mc_write_32(MC_TXN_OVERRIDE_CONFIG_##client, \
+ MC_TXN_OVERRIDE_##normal_axi_id | \
+ MC_TXN_OVERRIDE_CONFIG_COH_PATH_##so_dev_override##_SO_DEV | \
+ MC_TXN_OVERRIDE_CONFIG_COH_PATH_##normal_override##_NORMAL | \
+ MC_TXN_OVERRIDE_CONFIG_CGID_##so_dev_axi_id); \
+ } while (0)
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* TEGRA_MC_DEF_H */
diff --git a/plat/nvidia/tegra/include/t194/tegra194_private.h b/plat/nvidia/tegra/include/t194/tegra194_private.h
new file mode 100644
index 0000000..c5a51e9
--- /dev/null
+++ b/plat/nvidia/tegra/include/t194/tegra194_private.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef TEGRA194_PRIVATE_H
+#define TEGRA194_PRIVATE_H
+
+void tegra194_cpu_reset_handler(void);
+uint64_t tegra194_get_cpu_reset_handler_base(void);
+uint64_t tegra194_get_cpu_reset_handler_size(void);
+uint64_t tegra194_get_mc_ctx_offset(void);
+void tegra194_set_system_suspend_entry(void);
+
+#endif /* TEGRA194_PRIVATE_H */
diff --git a/plat/nvidia/tegra/include/t194/tegra194_ras_private.h b/plat/nvidia/tegra/include/t194/tegra194_ras_private.h
new file mode 100644
index 0000000..336461a
--- /dev/null
+++ b/plat/nvidia/tegra/include/t194/tegra194_ras_private.h
@@ -0,0 +1,263 @@
+/*
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef TEGRA194_RAS_PRIVATE
+#define TEGRA194_RAS_PRIVATE
+
+#include <stdint.h>
+
+/* Implementation defined RAS error and corresponding error message */
+struct ras_error {
+ const char *error_msg;
+ /* IERR(bits[15:8]) from ERR<n>STATUS */
+ uint8_t error_code;
+};
+
+/* RAS error node-specific auxiliary data */
+struct ras_aux_data {
+ /* name for current RAS node. */
+ const char *name;
+ /* point to null-terminated ras_error array to convert error code to msg. */
+ const struct ras_error *error_records;
+ /*
+ * function to return an value which needs to be programmed into ERXCTLR_EL1
+ * to enable all specified RAS errors for current node.
+ */
+ uint64_t (*err_ctrl)(void);
+};
+
+/* IFU Uncorrectable RAS ERROR */
+#define IFU_UNCORR_RAS_ERROR_LIST(X)
+
+/* JSR_RET Uncorrectable RAS ERROR */
+#define JSR_RET_UNCORR_RAS_ERROR_LIST(X) \
+ /* Name, ERR_CTRL, IERR, ISA Desc */ \
+ X(JSR_RET, 35, 0x13, "Floating Point Register File Parity Error") \
+ X(JSR_RET, 34, 0x12, "Integer Register File Parity Error") \
+ X(JSR_RET, 33, 0x11, "Garbage Bundle") \
+ X(JSR_RET, 32, 0x10, "Bundle Completion Timeout")
+
+/* JSR_MTS Uncorrectable RAS ERROR */
+#define JSR_MTS_UNCORR_RAS_ERROR_LIST(X) \
+ /* Name, ERR_CTRL, IERR, ISA Desc */ \
+ X(JSR_MTS, 40, 0x28, "CoreSight Access Error") \
+ X(JSR_MTS, 39, 0x27, "Dual Execution Uncorrectable Error") \
+ X(JSR_MTS, 37, 0x25, "CTU MMIO Region") \
+ X(JSR_MTS, 36, 0x24, "MTS MMCRAB Region Access") \
+ X(JSR_MTS, 35, 0x23, "MTS_CARVEOUT Access from ARM SW") \
+ X(JSR_MTS, 34, 0x22, "NAFLL PLL Failure to Lock") \
+ X(JSR_MTS, 32, 0x20, "Internal Uncorrectable MTS Error")
+
+/* LSD_STQ Uncorrectable RAS ERROR */
+#define LSD_STQ_UNCORR_RAS_ERROR_LIST(X) \
+ /* Name, ERR_CTRL, IERR, ISA Desc */ \
+ X(LSD_STQ, 41, 0x39, "Coherent Cache Data Store Multi-Line ECC Error") \
+ X(LSD_STQ, 40, 0x38, "Coherent Cache Data Store Uncorrectable ECC Error") \
+ X(LSD_STQ, 38, 0x36, "Coherent Cache Data Load Uncorrectable ECC Error") \
+ X(LSD_STQ, 33, 0x31, "Coherent Cache Tag Store Parity Error") \
+ X(LSD_STQ, 32, 0x30, "Coherent Cache Tag Load Parity Error")
+
+/* LSD_DCC Uncorrectable RAS ERROR */
+#define LSD_DCC_UNCORR_RAS_ERROR_LIST(X) \
+ /* Name, ERR_CTRL, IERR, ISA Desc */ \
+ X(LSD_DCC, 41, 0x49, "BTU Copy Mini-Cache PPN Multi-Hit Error") \
+ X(LSD_DCC, 39, 0x47, "Coherent Cache Data Uncorrectable ECC Error") \
+ X(LSD_DCC, 37, 0x45, "Version Cache Byte-Enable Parity Error") \
+ X(LSD_DCC, 36, 0x44, "Version Cache Data Uncorrectable ECC Error") \
+ X(LSD_DCC, 33, 0x41, "BTU Copy Coherent Cache PPN Parity Error") \
+ X(LSD_DCC, 32, 0x40, "BTU Copy Coherent Cache VPN Parity Error")
+
+/* LSD_L1HPF Uncorrectable RAS ERROR */
+#define LSD_L1HPF_UNCORR_RAS_ERROR_LIST(X)
+
+/* L2 Uncorrectable RAS ERROR */
+#define L2_UNCORR_RAS_ERROR_LIST(X) \
+ /* Name, ERR_CTRL, IERR, ISA Desc */ \
+ X(L2, 56, 0x68, "URT Timeout") \
+ X(L2, 55, 0x67, "L2 Protocol Violation") \
+ X(L2, 54, 0x66, "SCF to L2 Slave Error Read") \
+ X(L2, 53, 0x65, "SCF to L2 Slave Error Write") \
+ X(L2, 52, 0x64, "SCF to L2 Decode Error Read") \
+ X(L2, 51, 0x63, "SCF to L2 Decode Error Write") \
+ X(L2, 50, 0x62, "SCF to L2 Request Response Interface Parity Errors") \
+ X(L2, 49, 0x61, "SCF to L2 Advance notice interface parity errors") \
+ X(L2, 48, 0x60, "SCF to L2 Filldata Parity Errors") \
+ X(L2, 47, 0x5F, "SCF to L2 UnCorrectable ECC Data Error on interface") \
+ X(L2, 45, 0x5D, "Core 1 to L2 Parity Error") \
+ X(L2, 44, 0x5C, "Core 0 to L2 Parity Error") \
+ X(L2, 43, 0x5B, "L2 Multi-Hit") \
+ X(L2, 42, 0x5A, "L2 URT Tag Parity Error") \
+ X(L2, 41, 0x59, "L2 NTT Tag Parity Error") \
+ X(L2, 40, 0x58, "L2 MLT Tag Parity Error") \
+ X(L2, 39, 0x57, "L2 URD Data") \
+ X(L2, 38, 0x56, "L2 NTP Data") \
+ X(L2, 36, 0x54, "L2 MLC Uncorrectable Clean") \
+ X(L2, 35, 0x53, "L2 URD Uncorrectable Dirty") \
+ X(L2, 34, 0x52, "L2 MLC Uncorrectable Dirty")
+
+/* CLUSTER_CLOCKS Uncorrectable RAS ERROR */
+#define CLUSTER_CLOCKS_UNCORR_RAS_ERROR_LIST(X) \
+ /* Name, ERR_CTRL, IERR, ISA Desc */ \
+ X(CLUSTER_CLOCKS, 32, 0xE4, "Frequency Monitor Error")
+
+/* MMU Uncorrectable RAS ERROR */
+#define MMU_UNCORR_RAS_ERROR_LIST(X)
+
+/* L3 Uncorrectable RAS ERROR */
+#define L3_UNCORR_RAS_ERROR_LIST(X) \
+ /* Name, ERR_CTRL, IERR, ISA Desc */ \
+ X(L3, 43, 0x7B, "SNOC Interface Parity Error") \
+ X(L3, 42, 0x7A, "MCF Interface Parity Error") \
+ X(L3, 41, 0x79, "L3 Tag Parity Error") \
+ X(L3, 40, 0x78, "L3 Dir Parity Error") \
+ X(L3, 39, 0x77, "L3 Uncorrectable ECC Error") \
+ X(L3, 37, 0x75, "Multi-Hit CAM Error") \
+ X(L3, 36, 0x74, "Multi-Hit Tag Error") \
+ X(L3, 35, 0x73, "Unrecognized Command Error") \
+ X(L3, 34, 0x72, "L3 Protocol Error")
+
+/* CCPMU Uncorrectable RAS ERROR */
+#define CCPMU_UNCORR_RAS_ERROR_LIST(X) \
+ /* Name, ERR_CTRL, IERR, ISA Desc */ \
+ X(CCPMU, 40, 0x87, "CoreSight Access Error") \
+ X(CCPMU, 36, 0x84, "MCE Ucode Error") \
+ X(CCPMU, 35, 0x83, "MCE IL1 Parity Error") \
+ X(CCPMU, 34, 0x82, "MCE Timeout Error") \
+ X(CCPMU, 33, 0x81, "CRAB Access Error") \
+ X(CCPMU, 32, 0x80, "MCE Memory Access Error")
+
+/* SCF_IOB Uncorrectable RAS ERROR */
+#define SCF_IOB_UNCORR_RAS_ERROR_LIST(X) \
+ /* Name, ERR_CTRL, IERR, ISA Desc */ \
+ X(SCF_IOB, 41, 0x99, "Request parity error") \
+ X(SCF_IOB, 40, 0x98, "Putdata parity error") \
+ X(SCF_IOB, 39, 0x97, "Uncorrectable ECC on Putdata") \
+ X(SCF_IOB, 38, 0x96, "CBB Interface Error") \
+ X(SCF_IOB, 37, 0x95, "MMCRAB Error") \
+ X(SCF_IOB, 36, 0x94, "IHI Interface Error") \
+ X(SCF_IOB, 35, 0x93, "CRI Error") \
+ X(SCF_IOB, 34, 0x92, "TBX Interface Error") \
+ X(SCF_IOB, 33, 0x91, "EVP Interface Error")
+
+/* SCF_SNOC Uncorrectable RAS ERROR */
+#define SCF_SNOC_UNCORR_RAS_ERROR_LIST(X) \
+ /* Name, ERR_CTRL, IERR, ISA Desc */ \
+ X(SCF_SNOC, 42, 0xAA, "Misc Client Parity Error") \
+ X(SCF_SNOC, 41, 0xA9, "Misc Filldata Parity Error") \
+ X(SCF_SNOC, 40, 0xA8, "Uncorrectable ECC Misc Client") \
+ X(SCF_SNOC, 39, 0xA7, "DVMU Interface Parity Error") \
+ X(SCF_SNOC, 38, 0xA6, "DVMU Interface Timeout Error") \
+ X(SCF_SNOC, 37, 0xA5, "CPE Request Error") \
+ X(SCF_SNOC, 36, 0xA4, "CPE Response Error") \
+ X(SCF_SNOC, 35, 0xA3, "CPE Timeout Error") \
+ X(SCF_SNOC, 34, 0xA2, "Uncorrectable Carveout Error")
+
+/* SCF_CTU Uncorrectable RAS ERROR */
+#define SCF_CTU_UNCORR_RAS_ERROR_LIST(X) \
+ /* Name, ERR_CTRL, IERR, ISA Desc */ \
+ X(SCF_CTU, 39, 0xB7, "Timeout error for TRC_DMA request") \
+ X(SCF_CTU, 38, 0xB6, "Timeout error for CTU Snp") \
+ X(SCF_CTU, 37, 0xB5, "Parity error in CTU TAG RAM") \
+ X(SCF_CTU, 36, 0xB3, "Parity error in CTU DATA RAM") \
+ X(SCF_CTU, 35, 0xB4, "Parity error for Cluster Rsp") \
+ X(SCF_CTU, 34, 0xB2, "Parity error for TRL requests from 9 agents") \
+ X(SCF_CTU, 33, 0xB1, "Parity error for MCF request") \
+ X(SCF_CTU, 32, 0xB0, "TRC DMA fillsnoop parity error")
+
+/* CMU_CLOCKS Uncorrectable RAS ERROR */
+#define CMU_CLOCKS_UNCORR_RAS_ERROR_LIST(X) \
+ /* Name, ERR_CTRL, IERR, ISA Desc */ \
+ X(CMU_CLOCKS, 39, 0xC7, "Cluster 3 frequency monitor error") \
+ X(CMU_CLOCKS, 38, 0xC6, "Cluster 2 frequency monitor error") \
+ X(CMU_CLOCKS, 37, 0xC5, "Cluster 1 frequency monitor error") \
+ X(CMU_CLOCKS, 36, 0xC3, "Cluster 0 frequency monitor error") \
+ X(CMU_CLOCKS, 35, 0xC4, "Voltage error on ADC1 Monitored Logic") \
+ X(CMU_CLOCKS, 34, 0xC2, "Voltage error on ADC0 Monitored Logic") \
+ X(CMU_CLOCKS, 33, 0xC1, "Lookup Table 1 Parity Error") \
+ X(CMU_CLOCKS, 32, 0xC0, "Lookup Table 0 Parity Error")
+
+/*
+ * Define one ras_error entry.
+ *
+ * This macro wille be used to to generate ras_error records for each node
+ * defined by <NODE_NAME>_UNCORR_RAS_ERROR_LIST macro.
+ */
+#define DEFINE_ONE_RAS_ERROR_MSG(unit, ras_bit, ierr, msg) \
+ { \
+ .error_msg = (msg), \
+ .error_code = (ierr) \
+ },
+
+/*
+ * Set one implementation defined bit in ERR<n>CTLR
+ *
+ * This macro will be used to collect all defined ERR_CTRL bits for each node
+ * defined by <NODE_NAME>_UNCORR_RAS_ERROR_LIST macro.
+ */
+#define DEFINE_ENABLE_RAS_BIT(unit, ras_bit, ierr, msg) \
+ do { \
+ val |= (1ULL << ras_bit##U); \
+ } while (0);
+
+/* Represent one RAS node with 0 or more error bits (ERR_CTLR) enabled */
+#define DEFINE_ONE_RAS_NODE(node) \
+static const struct ras_error node##_uncorr_ras_errors[] = { \
+ node##_UNCORR_RAS_ERROR_LIST(DEFINE_ONE_RAS_ERROR_MSG) \
+ { \
+ NULL, \
+ 0U \
+ }, \
+}; \
+static inline uint64_t node##_err_ctrl(void) \
+{ \
+ uint64_t val = 0ULL; \
+ node##_UNCORR_RAS_ERROR_LIST(DEFINE_ENABLE_RAS_BIT) \
+ return val; \
+}
+
+#define DEFINE_ONE_RAS_AUX_DATA(node) \
+ { \
+ .name = #node, \
+ .error_records = node##_uncorr_ras_errors, \
+ .err_ctrl = &node##_err_ctrl \
+ },
+
+#define PER_CORE_RAS_NODE_LIST(X) \
+ X(IFU) \
+ X(JSR_RET) \
+ X(JSR_MTS) \
+ X(LSD_STQ) \
+ X(LSD_DCC) \
+ X(LSD_L1HPF)
+
+#define PER_CORE_RAS_GROUP_NODES PER_CORE_RAS_NODE_LIST(DEFINE_ONE_RAS_AUX_DATA)
+
+#define PER_CLUSTER_RAS_NODE_LIST(X) \
+ X(L2) \
+ X(CLUSTER_CLOCKS) \
+ X(MMU)
+
+#define PER_CLUSTER_RAS_GROUP_NODES PER_CLUSTER_RAS_NODE_LIST(DEFINE_ONE_RAS_AUX_DATA)
+
+#define SCF_L3_BANK_RAS_NODE_LIST(X) X(L3)
+
+/* we have 4 SCF_L3 nodes:3*256 + L3_Bank_ID(0-3) */
+#define SCF_L3_BANK_RAS_GROUP_NODES \
+ SCF_L3_BANK_RAS_NODE_LIST(DEFINE_ONE_RAS_AUX_DATA) \
+ SCF_L3_BANK_RAS_NODE_LIST(DEFINE_ONE_RAS_AUX_DATA) \
+ SCF_L3_BANK_RAS_NODE_LIST(DEFINE_ONE_RAS_AUX_DATA) \
+ SCF_L3_BANK_RAS_NODE_LIST(DEFINE_ONE_RAS_AUX_DATA)
+
+#define CCPLEX_RAS_NODE_LIST(X) \
+ X(CCPMU) \
+ X(SCF_IOB) \
+ X(SCF_SNOC) \
+ X(SCF_CTU) \
+ X(CMU_CLOCKS)
+
+#define CCPLEX_RAS_GROUP_NODES CCPLEX_RAS_NODE_LIST(DEFINE_ONE_RAS_AUX_DATA)
+
+#endif /* TEGRA194_RAS_PRIVATE */
diff --git a/plat/nvidia/tegra/include/t194/tegra_def.h b/plat/nvidia/tegra/include/t194/tegra_def.h
new file mode 100644
index 0000000..2158913
--- /dev/null
+++ b/plat/nvidia/tegra/include/t194/tegra_def.h
@@ -0,0 +1,326 @@
+/*
+ * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef TEGRA_DEF_H
+#define TEGRA_DEF_H
+
+#include <lib/utils_def.h>
+
+/*******************************************************************************
+ * Platform BL31 specific defines.
+ ******************************************************************************/
+#define BL31_SIZE U(0x40000)
+
+/*******************************************************************************
+ * Chip specific cluster and cpu numbers
+ ******************************************************************************/
+#define PLATFORM_CLUSTER_COUNT U(4)
+#define PLATFORM_MAX_CPUS_PER_CLUSTER U(2)
+
+/*******************************************************************************
+ * Chip specific page table and MMU setup constants
+ ******************************************************************************/
+#define PLAT_PHY_ADDR_SPACE_SIZE (ULL(1) << 40)
+#define PLAT_VIRT_ADDR_SPACE_SIZE (ULL(1) << 40)
+
+/*******************************************************************************
+ * These values are used by the PSCI implementation during the `CPU_SUSPEND`
+ * and `SYSTEM_SUSPEND` calls as the `state-id` field in the 'power state'
+ * parameter.
+ ******************************************************************************/
+#define PSTATE_ID_CORE_IDLE U(6)
+#define PSTATE_ID_CORE_POWERDN U(7)
+#define PSTATE_ID_SOC_POWERDN U(2)
+
+/*******************************************************************************
+ * Platform power states (used by PSCI framework)
+ *
+ * - PLAT_MAX_RET_STATE should be less than lowest PSTATE_ID
+ * - PLAT_MAX_OFF_STATE should be greater than the highest PSTATE_ID
+ ******************************************************************************/
+#define PLAT_MAX_RET_STATE U(1)
+#define PLAT_MAX_OFF_STATE U(8)
+
+/*******************************************************************************
+ * Secure IRQ definitions
+ ******************************************************************************/
+#define TEGRA194_MAX_SEC_IRQS U(2)
+#define TEGRA194_TOP_WDT_IRQ U(49)
+#define TEGRA194_AON_WDT_IRQ U(50)
+
+#define TEGRA194_SEC_IRQ_TARGET_MASK U(0xFF) /* 8 Carmel */
+
+/*******************************************************************************
+ * Clock identifier for the SE device
+ ******************************************************************************/
+#define TEGRA194_CLK_SE U(124)
+#define TEGRA_CLK_SE TEGRA194_CLK_SE
+
+/*******************************************************************************
+ * Tegra Miscellaneous register constants
+ ******************************************************************************/
+#define TEGRA_MISC_BASE U(0x00100000)
+
+#define HARDWARE_REVISION_OFFSET U(0x4)
+#define MISCREG_EMU_REVID U(0x3160)
+#define BOARD_MASK_BITS U(0xFF)
+#define BOARD_SHIFT_BITS U(24)
+#define MISCREG_PFCFG U(0x200C)
+
+/*******************************************************************************
+ * Tegra General Purpose Centralised DMA constants
+ ******************************************************************************/
+#define TEGRA_GPCDMA_BASE U(0x02610000)
+
+/*******************************************************************************
+ * Tegra Memory Controller constants
+ ******************************************************************************/
+#define TEGRA_MC_STREAMID_BASE U(0x02C00000)
+#define TEGRA_MC_BASE U(0x02C10000)
+
+/* General Security Carveout register macros */
+#define MC_GSC_CONFIG_REGS_SIZE U(0x40)
+#define MC_GSC_LOCK_CFG_SETTINGS_BIT (U(1) << 1)
+#define MC_GSC_ENABLE_TZ_LOCK_BIT (U(1) << 0)
+#define MC_GSC_SIZE_RANGE_4KB_SHIFT U(27)
+#define MC_GSC_BASE_LO_SHIFT U(12)
+#define MC_GSC_BASE_LO_MASK U(0xFFFFF)
+#define MC_GSC_BASE_HI_SHIFT U(0)
+#define MC_GSC_BASE_HI_MASK U(3)
+#define MC_GSC_ENABLE_CPU_SECURE_BIT (U(1) << 31)
+
+/* TZDRAM carveout configuration registers */
+#define MC_SECURITY_CFG0_0 U(0x70)
+#define MC_SECURITY_CFG1_0 U(0x74)
+#define MC_SECURITY_CFG3_0 U(0x9BC)
+
+#define MC_SECURITY_BOM_MASK (U(0xFFF) << 20)
+#define MC_SECURITY_SIZE_MB_MASK (U(0x1FFF) << 0)
+#define MC_SECURITY_BOM_HI_MASK (U(0x3) << 0)
+
+#define MC_SECURITY_CFG_REG_CTRL_0 U(0x154)
+#define SECURITY_CFG_WRITE_ACCESS_BIT (U(0x1) << 0)
+#define SECURITY_CFG_WRITE_ACCESS_ENABLE U(0x0)
+#define SECURITY_CFG_WRITE_ACCESS_DISABLE U(0x1)
+
+/* Video Memory carveout configuration registers */
+#define MC_VIDEO_PROTECT_BASE_HI U(0x978)
+#define MC_VIDEO_PROTECT_BASE_LO U(0x648)
+#define MC_VIDEO_PROTECT_SIZE_MB U(0x64c)
+#define MC_VIDEO_PROTECT_REG_CTRL U(0x650)
+#define MC_VIDEO_PROTECT_WRITE_ACCESS_ENABLED U(3)
+
+/*
+ * Carveout (MC_SECURITY_CARVEOUT24) registers used to clear the
+ * non-overlapping Video memory region
+ */
+#define MC_VIDEO_PROTECT_CLEAR_CFG U(0x25A0)
+#define MC_VIDEO_PROTECT_CLEAR_BASE_LO U(0x25A4)
+#define MC_VIDEO_PROTECT_CLEAR_BASE_HI U(0x25A8)
+#define MC_VIDEO_PROTECT_CLEAR_SIZE U(0x25AC)
+#define MC_VIDEO_PROTECT_CLEAR_ACCESS_CFG0 U(0x25B0)
+
+/* TZRAM carveout (MC_SECURITY_CARVEOUT11) configuration registers */
+#define MC_TZRAM_CARVEOUT_CFG U(0x2190)
+#define MC_TZRAM_BASE_LO U(0x2194)
+#define MC_TZRAM_BASE_HI U(0x2198)
+#define MC_TZRAM_SIZE U(0x219C)
+#define MC_TZRAM_CLIENT_ACCESS0_CFG0 U(0x21A0)
+#define MC_TZRAM_CLIENT_ACCESS1_CFG0 U(0x21A4)
+#define TZRAM_ALLOW_MPCORER (U(1) << 7)
+#define TZRAM_ALLOW_MPCOREW (U(1) << 25)
+
+/* Memory Controller Reset Control registers */
+#define MC_CLIENT_HOTRESET_CTRL1_DLAA_FLUSH_ENB (U(1) << 28)
+#define MC_CLIENT_HOTRESET_CTRL1_DLA1A_FLUSH_ENB (U(1) << 29)
+#define MC_CLIENT_HOTRESET_CTRL1_PVA0A_FLUSH_ENB (U(1) << 30)
+#define MC_CLIENT_HOTRESET_CTRL1_PVA1A_FLUSH_ENB (U(1) << 31)
+
+/*******************************************************************************
+ * Tegra UART Controller constants
+ ******************************************************************************/
+#define TEGRA_UARTA_BASE U(0x03100000)
+#define TEGRA_UARTB_BASE U(0x03110000)
+#define TEGRA_UARTC_BASE U(0x0C280000)
+#define TEGRA_UARTD_BASE U(0x03130000)
+#define TEGRA_UARTE_BASE U(0x03140000)
+#define TEGRA_UARTF_BASE U(0x03150000)
+#define TEGRA_UARTG_BASE U(0x0C290000)
+
+/*******************************************************************************
+ * XUSB PADCTL
+ ******************************************************************************/
+#define TEGRA_XUSB_PADCTL_BASE U(0x03520000)
+#define TEGRA_XUSB_PADCTL_SIZE U(0x10000)
+#define XUSB_PADCTL_HOST_AXI_STREAMID_PF_0 U(0x136c)
+#define XUSB_PADCTL_HOST_AXI_STREAMID_VF_0 U(0x1370)
+#define XUSB_PADCTL_HOST_AXI_STREAMID_VF_1 U(0x1374)
+#define XUSB_PADCTL_HOST_AXI_STREAMID_VF_2 U(0x1378)
+#define XUSB_PADCTL_HOST_AXI_STREAMID_VF_3 U(0x137c)
+#define XUSB_PADCTL_DEV_AXI_STREAMID_PF_0 U(0x139c)
+
+/*******************************************************************************
+ * Tegra Fuse Controller related constants
+ ******************************************************************************/
+#define TEGRA_FUSE_BASE U(0x03820000)
+#define OPT_SUBREVISION U(0x248)
+#define SUBREVISION_MASK U(0xF)
+
+/*******************************************************************************
+ * GICv2 & interrupt handling related constants
+ ******************************************************************************/
+#define TEGRA_GICD_BASE U(0x03881000)
+#define TEGRA_GICC_BASE U(0x03882000)
+
+/*******************************************************************************
+ * Security Engine related constants
+ ******************************************************************************/
+#define TEGRA_SE0_BASE U(0x03AC0000)
+#define SE0_MUTEX_WATCHDOG_NS_LIMIT U(0x6C)
+#define SE0_AES0_ENTROPY_SRC_AGE_CTRL U(0x2FC)
+#define TEGRA_PKA1_BASE U(0x03AD0000)
+#define SE_PKA1_CTRL_SE_MUTEX_TMOUT_DFTVAL U(0x144)
+#define PKA1_MUTEX_WATCHDOG_NS_LIMIT SE_PKA1_CTRL_SE_MUTEX_TMOUT_DFTVAL
+#define TEGRA_RNG1_BASE U(0x03AE0000)
+#define RNG1_MUTEX_WATCHDOG_NS_LIMIT U(0xFE0)
+
+/*******************************************************************************
+ * Tegra HSP doorbell #0 constants
+ ******************************************************************************/
+#define TEGRA_HSP_DBELL_BASE U(0x03C90000)
+#define HSP_DBELL_1_ENABLE U(0x104)
+#define HSP_DBELL_3_TRIGGER U(0x300)
+#define HSP_DBELL_3_ENABLE U(0x304)
+
+/*******************************************************************************
+ * Tegra hardware synchronization primitives for the SPE engine
+ ******************************************************************************/
+#define TEGRA_AON_HSP_SM_6_7_BASE U(0x0c190000)
+#define TEGRA_CONSOLE_SPE_BASE (TEGRA_AON_HSP_SM_6_7_BASE + U(0x8000))
+
+/*******************************************************************************
+ * Tegra micro-seconds timer constants
+ ******************************************************************************/
+#define TEGRA_TMRUS_BASE U(0x0C2E0000)
+#define TEGRA_TMRUS_SIZE U(0x10000)
+
+/*******************************************************************************
+ * Tegra Power Mgmt Controller constants
+ ******************************************************************************/
+#define TEGRA_PMC_BASE U(0x0C360000)
+
+/*******************************************************************************
+ * Tegra scratch registers constants
+ ******************************************************************************/
+#define TEGRA_SCRATCH_BASE U(0x0C390000)
+#define SECURE_SCRATCH_RSV68_LO U(0x284)
+#define SECURE_SCRATCH_RSV68_HI U(0x288)
+#define SECURE_SCRATCH_RSV69_LO U(0x28C)
+#define SECURE_SCRATCH_RSV69_HI U(0x290)
+#define SECURE_SCRATCH_RSV70_LO U(0x294)
+#define SECURE_SCRATCH_RSV70_HI U(0x298)
+#define SECURE_SCRATCH_RSV71_LO U(0x29C)
+#define SECURE_SCRATCH_RSV71_HI U(0x2A0)
+#define SECURE_SCRATCH_RSV72_LO U(0x2A4)
+#define SECURE_SCRATCH_RSV72_HI U(0x2A8)
+#define SECURE_SCRATCH_RSV75 U(0x2BC)
+#define SECURE_SCRATCH_RSV81_LO U(0x2EC)
+#define SECURE_SCRATCH_RSV81_HI U(0x2F0)
+#define SECURE_SCRATCH_RSV97 U(0x36C)
+#define SECURE_SCRATCH_RSV99_LO U(0x37C)
+#define SECURE_SCRATCH_RSV99_HI U(0x380)
+#define SECURE_SCRATCH_RSV109_LO U(0x3CC)
+#define SECURE_SCRATCH_RSV109_HI U(0x3D0)
+
+#define SCRATCH_BL31_PARAMS_HI_ADDR SECURE_SCRATCH_RSV75
+#define SCRATCH_BL31_PARAMS_HI_ADDR_MASK U(0xFFFF)
+#define SCRATCH_BL31_PARAMS_HI_ADDR_SHIFT U(0)
+#define SCRATCH_BL31_PARAMS_LO_ADDR SECURE_SCRATCH_RSV81_LO
+#define SCRATCH_BL31_PLAT_PARAMS_HI_ADDR SECURE_SCRATCH_RSV75
+#define SCRATCH_BL31_PLAT_PARAMS_HI_ADDR_MASK U(0xFFFF0000)
+#define SCRATCH_BL31_PLAT_PARAMS_HI_ADDR_SHIFT U(16)
+#define SCRATCH_BL31_PLAT_PARAMS_LO_ADDR SECURE_SCRATCH_RSV81_HI
+#define SCRATCH_SECURE_BOOTP_FCFG SECURE_SCRATCH_RSV97
+#define SCRATCH_MC_TABLE_ADDR_LO SECURE_SCRATCH_RSV99_LO
+#define SCRATCH_MC_TABLE_ADDR_HI SECURE_SCRATCH_RSV99_HI
+#define SCRATCH_RESET_VECTOR_LO SECURE_SCRATCH_RSV109_LO
+#define SCRATCH_RESET_VECTOR_HI SECURE_SCRATCH_RSV109_HI
+
+/*******************************************************************************
+ * Tegra Memory Mapped Control Register Access Bus constants
+ ******************************************************************************/
+#define TEGRA_MMCRAB_BASE U(0x0E000000)
+
+/*******************************************************************************
+ * Tegra SMMU Controller constants
+ ******************************************************************************/
+#define TEGRA_SMMU0_BASE U(0x12000000)
+#define TEGRA_SMMU1_BASE U(0x11000000)
+#define TEGRA_SMMU2_BASE U(0x10000000)
+
+/*******************************************************************************
+ * Tegra TZRAM constants
+ ******************************************************************************/
+#define TEGRA_TZRAM_BASE U(0x40000000)
+#define TEGRA_TZRAM_SIZE U(0x40000)
+
+/*******************************************************************************
+ * Tegra CCPLEX-BPMP IPC constants
+ ******************************************************************************/
+#define TEGRA_BPMP_IPC_TX_PHYS_BASE U(0x4004C000)
+#define TEGRA_BPMP_IPC_RX_PHYS_BASE U(0x4004D000)
+#define TEGRA_BPMP_IPC_CH_MAP_SIZE U(0x1000) /* 4KB */
+
+/*******************************************************************************
+ * Tegra Clock and Reset Controller constants
+ ******************************************************************************/
+#define TEGRA_CAR_RESET_BASE U(0x20000000)
+#define TEGRA_GPU_RESET_REG_OFFSET U(0x18)
+#define TEGRA_GPU_RESET_GPU_SET_OFFSET U(0x1C)
+#define GPU_RESET_BIT (U(1) << 0)
+#define GPU_SET_BIT (U(1) << 0)
+#define TEGRA_GPCDMA_RST_SET_REG_OFFSET U(0x6A0004)
+#define TEGRA_GPCDMA_RST_CLR_REG_OFFSET U(0x6A0008)
+
+/*******************************************************************************
+ * Tegra DRAM memory base address
+ ******************************************************************************/
+#define TEGRA_DRAM_BASE ULL(0x80000000)
+#define TEGRA_DRAM_END ULL(0xFFFFFFFFF)
+
+/*******************************************************************************
+ * XUSB STREAMIDs
+ ******************************************************************************/
+#define TEGRA_SID_XUSB_HOST U(0x1b)
+#define TEGRA_SID_XUSB_DEV U(0x1c)
+#define TEGRA_SID_XUSB_VF0 U(0x5d)
+#define TEGRA_SID_XUSB_VF1 U(0x5e)
+#define TEGRA_SID_XUSB_VF2 U(0x5f)
+#define TEGRA_SID_XUSB_VF3 U(0x60)
+
+/*******************************************************************************
+ * SCR addresses and expected settings
+ ******************************************************************************/
+#define SCRATCH_RSV68_SCR U(0x0C398110)
+#define SCRATCH_RSV68_SCR_VAL U(0x38000101)
+#define SCRATCH_RSV71_SCR U(0x0C39811C)
+#define SCRATCH_RSV71_SCR_VAL U(0x38000101)
+#define SCRATCH_RSV72_SCR U(0x0C398120)
+#define SCRATCH_RSV72_SCR_VAL U(0x38000101)
+#define SCRATCH_RSV75_SCR U(0x0C39812C)
+#define SCRATCH_RSV75_SCR_VAL U(0x3A000005)
+#define SCRATCH_RSV81_SCR U(0x0C398144)
+#define SCRATCH_RSV81_SCR_VAL U(0x3A000105)
+#define SCRATCH_RSV97_SCR U(0x0C398184)
+#define SCRATCH_RSV97_SCR_VAL U(0x38000101)
+#define SCRATCH_RSV99_SCR U(0x0C39818C)
+#define SCRATCH_RSV99_SCR_VAL U(0x38000101)
+#define SCRATCH_RSV109_SCR U(0x0C3981B4)
+#define SCRATCH_RSV109_SCR_VAL U(0x38000101)
+#define MISCREG_SCR_SCRTZWELCK U(0x00109000)
+#define MISCREG_SCR_SCRTZWELCK_VAL U(0x30000100)
+
+#endif /* TEGRA_DEF_H */
diff --git a/plat/nvidia/tegra/include/t210/tegra_def.h b/plat/nvidia/tegra/include/t210/tegra_def.h
new file mode 100644
index 0000000..81b25e0
--- /dev/null
+++ b/plat/nvidia/tegra/include/t210/tegra_def.h
@@ -0,0 +1,293 @@
+/*
+ * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef TEGRA_DEF_H
+#define TEGRA_DEF_H
+
+#include <lib/utils_def.h>
+
+/*******************************************************************************
+ * Platform BL31 specific defines.
+ ******************************************************************************/
+#define BL31_SIZE U(0x40000)
+
+/*******************************************************************************
+ * Power down state IDs
+ ******************************************************************************/
+#define PSTATE_ID_CORE_POWERDN U(7)
+#define PSTATE_ID_CLUSTER_IDLE U(16)
+#define PSTATE_ID_SOC_POWERDN U(27)
+
+/*******************************************************************************
+ * This value is used by the PSCI implementation during the `SYSTEM_SUSPEND`
+ * call as the `state-id` field in the 'power state' parameter.
+ ******************************************************************************/
+#define PLAT_SYS_SUSPEND_STATE_ID PSTATE_ID_SOC_POWERDN
+
+/*******************************************************************************
+ * Platform power states (used by PSCI framework)
+ *
+ * - PLAT_MAX_RET_STATE should be less than lowest PSTATE_ID
+ * - PLAT_MAX_OFF_STATE should be greater than the highest PSTATE_ID
+ ******************************************************************************/
+#define PLAT_MAX_RET_STATE U(1)
+#define PLAT_MAX_OFF_STATE (PSTATE_ID_SOC_POWERDN + U(1))
+
+/*******************************************************************************
+ * Chip specific page table and MMU setup constants
+ ******************************************************************************/
+#define PLAT_PHY_ADDR_SPACE_SIZE (ULL(1) << 35)
+#define PLAT_VIRT_ADDR_SPACE_SIZE (ULL(1) << 35)
+
+/*******************************************************************************
+ * SC7 entry firmware's header size
+ ******************************************************************************/
+#define SC7ENTRY_FW_HEADER_SIZE_BYTES U(0x400)
+
+/*******************************************************************************
+ * Counter-timer physical secure timer PPI
+ ******************************************************************************/
+#define TEGRA210_TIMER1_IRQ 32
+
+/*******************************************************************************
+ * iRAM memory constants
+ ******************************************************************************/
+#define TEGRA_IRAM_BASE U(0x40000000)
+#define TEGRA_IRAM_A_SIZE U(0x10000) /* 64KB */
+#define TEGRA_IRAM_SIZE U(40000) /* 256KB */
+
+/*******************************************************************************
+ * GIC memory map
+ ******************************************************************************/
+#define TEGRA_GICD_BASE U(0x50041000)
+#define TEGRA_GICC_BASE U(0x50042000)
+
+/*******************************************************************************
+ * Secure IRQ definitions
+ ******************************************************************************/
+#define TEGRA210_WDT_CPU_LEGACY_FIQ U(28)
+
+/*******************************************************************************
+ * Tegra Memory Select Switch Controller constants
+ ******************************************************************************/
+#define TEGRA_MSELECT_BASE U(0x50060000)
+
+#define MSELECT_CONFIG U(0x0)
+#define ENABLE_WRAP_INCR_MASTER2_BIT (U(1) << U(29))
+#define ENABLE_WRAP_INCR_MASTER1_BIT (U(1) << U(28))
+#define ENABLE_WRAP_INCR_MASTER0_BIT (U(1) << U(27))
+#define UNSUPPORTED_TX_ERR_MASTER2_BIT (U(1) << U(25))
+#define UNSUPPORTED_TX_ERR_MASTER1_BIT (U(1) << U(24))
+#define ENABLE_UNSUP_TX_ERRORS (UNSUPPORTED_TX_ERR_MASTER2_BIT | \
+ UNSUPPORTED_TX_ERR_MASTER1_BIT)
+#define ENABLE_WRAP_TO_INCR_BURSTS (ENABLE_WRAP_INCR_MASTER2_BIT | \
+ ENABLE_WRAP_INCR_MASTER1_BIT | \
+ ENABLE_WRAP_INCR_MASTER0_BIT)
+
+/*******************************************************************************
+ * Tegra Resource Semaphore constants
+ ******************************************************************************/
+#define TEGRA_RES_SEMA_BASE 0x60001000UL
+#define STA_OFFSET 0UL
+#define SET_OFFSET 4UL
+#define CLR_OFFSET 8UL
+
+/*******************************************************************************
+ * Tegra Primary Interrupt Controller constants
+ ******************************************************************************/
+#define TEGRA_PRI_ICTLR_BASE 0x60004000UL
+#define CPU_IEP_FIR_SET 0x18UL
+
+/*******************************************************************************
+ * Tegra micro-seconds timer constants
+ ******************************************************************************/
+#define TEGRA_TMRUS_BASE U(0x60005010)
+#define TEGRA_TMRUS_SIZE U(0x1000)
+
+/*******************************************************************************
+ * Tegra Clock and Reset Controller constants
+ ******************************************************************************/
+#define TEGRA_CAR_RESET_BASE U(0x60006000)
+#define TEGRA_BOND_OUT_H U(0x74)
+#define APB_DMA_LOCK_BIT (U(1) << 2)
+#define AHB_DMA_LOCK_BIT (U(1) << 1)
+#define TEGRA_BOND_OUT_U U(0x78)
+#define IRAM_D_LOCK_BIT (U(1) << 23)
+#define IRAM_C_LOCK_BIT (U(1) << 22)
+#define IRAM_B_LOCK_BIT (U(1) << 21)
+#define TEGRA_GPU_RESET_REG_OFFSET U(0x28C)
+#define TEGRA_GPU_RESET_GPU_SET_OFFSET U(0x290)
+#define GPU_RESET_BIT (U(1) << 24)
+#define GPU_SET_BIT (U(1) << 24)
+#define TEGRA_RST_DEV_SET_Y U(0x2a8)
+#define NVENC_RESET_BIT (U(1) << 27)
+#define TSECB_RESET_BIT (U(1) << 14)
+#define APE_RESET_BIT (U(1) << 6)
+#define NVJPG_RESET_BIT (U(1) << 3)
+#define NVDEC_RESET_BIT (U(1) << 2)
+#define TEGRA_RST_DEV_SET_L U(0x300)
+#define HOST1X_RESET_BIT (U(1) << 28)
+#define ISP_RESET_BIT (U(1) << 23)
+#define USBD_RESET_BIT (U(1) << 22)
+#define VI_RESET_BIT (U(1) << 20)
+#define SDMMC4_RESET_BIT (U(1) << 15)
+#define SDMMC1_RESET_BIT (U(1) << 14)
+#define SDMMC2_RESET_BIT (U(1) << 9)
+#define TEGRA_RST_DEV_SET_H U(0x308)
+#define USB2_RESET_BIT (U(1) << 26)
+#define APBDMA_RESET_BIT (U(1) << 2)
+#define AHBDMA_RESET_BIT (U(1) << 1)
+#define TEGRA_RST_DEV_SET_U U(0x310)
+#define XUSB_DEV_RESET_BIT (U(1) << 31)
+#define XUSB_HOST_RESET_BIT (U(1) << 25)
+#define TSEC_RESET_BIT (U(1) << 19)
+#define PCIE_RESET_BIT (U(1) << 6)
+#define SDMMC3_RESET_BIT (U(1) << 5)
+#define TEGRA_RST_DEVICES_V U(0x358)
+#define TEGRA_RST_DEVICES_W U(0x35C)
+#define ENTROPY_CLK_ENB_BIT (U(1) << 21)
+#define TEGRA_CLK_OUT_ENB_V U(0x360)
+#define SE_CLK_ENB_BIT (U(1) << 31)
+#define TEGRA_CLK_OUT_ENB_W U(0x364)
+#define ENTROPY_RESET_BIT (U(1) << 21)
+#define TEGRA_CLK_RST_CTL_CLK_SRC_SE U(0x42C)
+#define SE_CLK_SRC_MASK (U(7) << 29)
+#define SE_CLK_SRC_CLK_M (U(6) << 29)
+#define TEGRA_RST_DEV_SET_V U(0x430)
+#define SE_RESET_BIT (U(1) << 31)
+#define HDA_RESET_BIT (U(1) << 29)
+#define SATA_RESET_BIT (U(1) << 28)
+#define TEGRA_RST_DEV_CLR_V U(0x434)
+#define TEGRA_CLK_ENB_V U(0x440)
+
+/*******************************************************************************
+ * Tegra Flow Controller constants
+ ******************************************************************************/
+#define TEGRA_FLOWCTRL_BASE U(0x60007000)
+
+/*******************************************************************************
+ * Tegra AHB arbitration controller
+ ******************************************************************************/
+#define TEGRA_AHB_ARB_BASE 0x6000C000UL
+
+/*******************************************************************************
+ * Tegra Secure Boot Controller constants
+ ******************************************************************************/
+#define TEGRA_SB_BASE U(0x6000C200)
+
+/*******************************************************************************
+ * Tegra Exception Vectors constants
+ ******************************************************************************/
+#define TEGRA_EVP_BASE U(0x6000F000)
+
+/*******************************************************************************
+ * Tegra Miscellaneous register constants
+ ******************************************************************************/
+#define TEGRA_MISC_BASE U(0x70000000)
+#define HARDWARE_REVISION_OFFSET U(0x804)
+#define APB_SLAVE_SECURITY_ENABLE U(0xC00)
+#define PMC_SECURITY_EN_BIT (U(1) << 13)
+#define PINMUX_AUX_DVFS_PWM U(0x3184)
+#define PINMUX_PWM_TRISTATE (U(1) << 4)
+
+/*******************************************************************************
+ * Tegra UART controller base addresses
+ ******************************************************************************/
+#define TEGRA_UARTA_BASE U(0x70006000)
+#define TEGRA_UARTB_BASE U(0x70006040)
+#define TEGRA_UARTC_BASE U(0x70006200)
+#define TEGRA_UARTD_BASE U(0x70006300)
+#define TEGRA_UARTE_BASE U(0x70006400)
+
+/*******************************************************************************
+ * Tegra Fuse Controller related constants
+ ******************************************************************************/
+#define TEGRA_FUSE_BASE 0x7000F800UL
+#define FUSE_BOOT_SECURITY_INFO 0x268UL
+#define FUSE_ATOMIC_SAVE_CARVEOUT_EN (0x1U << 7)
+#define FUSE_JTAG_SECUREID_VALID (0x104UL)
+#define ECID_VALID (0x1UL)
+
+
+/*******************************************************************************
+ * Tegra Power Mgmt Controller constants
+ ******************************************************************************/
+#define TEGRA_PMC_BASE U(0x7000E400)
+#define TEGRA_PMC_SIZE U(0xC00) /* 3k */
+
+/*******************************************************************************
+ * Tegra Atomics constants
+ ******************************************************************************/
+#define TEGRA_ATOMICS_BASE 0x70016000UL
+#define TRIGGER0_REG_OFFSET 0UL
+#define TRIGGER_WIDTH_SHIFT 4UL
+#define TRIGGER_ID_SHIFT 16UL
+#define RESULT0_REG_OFFSET 0xC00UL
+
+/*******************************************************************************
+ * Tegra Memory Controller constants
+ ******************************************************************************/
+#define TEGRA_MC_BASE U(0x70019000)
+
+/* Memory Controller Interrupt Status */
+#define MC_INTSTATUS 0x00U
+
+/* TZDRAM carveout configuration registers */
+#define MC_SECURITY_CFG0_0 U(0x70)
+#define MC_SECURITY_CFG1_0 U(0x74)
+#define MC_SECURITY_CFG3_0 U(0x9BC)
+
+/* Video Memory carveout configuration registers */
+#define MC_VIDEO_PROTECT_BASE_HI U(0x978)
+#define MC_VIDEO_PROTECT_BASE_LO U(0x648)
+#define MC_VIDEO_PROTECT_SIZE_MB U(0x64c)
+#define MC_VIDEO_PROTECT_REG_CTRL U(0x650)
+#define MC_VIDEO_PROTECT_WRITE_ACCESS_ENABLED U(3)
+
+/* SMMU configuration registers*/
+#define MC_SMMU_PPCS_ASID_0 0x270U
+#define PPCS_SMMU_ENABLE (0x1U << 31)
+
+/*******************************************************************************
+ * Tegra CLDVFS constants
+ ******************************************************************************/
+#define TEGRA_CL_DVFS_BASE U(0x70110000)
+#define DVFS_DFLL_CTRL U(0x00)
+#define ENABLE_OPEN_LOOP U(1)
+#define ENABLE_CLOSED_LOOP U(2)
+#define DVFS_DFLL_OUTPUT_CFG U(0x20)
+#define DFLL_OUTPUT_CFG_I2C_EN_BIT (U(1) << 30)
+#define DFLL_OUTPUT_CFG_CLK_EN_BIT (U(1) << 6)
+
+/*******************************************************************************
+ * Tegra SE constants
+ ******************************************************************************/
+#define TEGRA_SE1_BASE U(0x70012000)
+#define TEGRA_SE2_BASE U(0x70412000)
+#define TEGRA_PKA1_BASE U(0x70420000)
+#define TEGRA_SE2_RANGE_SIZE U(0x2000)
+#define SE_TZRAM_SECURITY U(0x4)
+
+/*******************************************************************************
+ * Tegra TZRAM constants
+ ******************************************************************************/
+#define TEGRA_TZRAM_BASE U(0x7C010000)
+#define TEGRA_TZRAM_SIZE U(0x10000)
+
+/*******************************************************************************
+ * Tegra TZRAM carveout constants
+ ******************************************************************************/
+#define TEGRA_TZRAM_CARVEOUT_BASE U(0x7C04C000)
+#define TEGRA_TZRAM_CARVEOUT_SIZE U(0x4000)
+
+/*******************************************************************************
+ * Tegra DRAM memory base address
+ ******************************************************************************/
+#define TEGRA_DRAM_BASE ULL(0x80000000)
+#define TEGRA_DRAM_END ULL(0x27FFFFFFF)
+
+#endif /* TEGRA_DEF_H */
diff --git a/plat/nvidia/tegra/include/tegra_platform.h b/plat/nvidia/tegra/include/tegra_platform.h
new file mode 100644
index 0000000..ab51dfe
--- /dev/null
+++ b/plat/nvidia/tegra/include/tegra_platform.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020-2021, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef TEGRA_PLATFORM_H
+#define TEGRA_PLATFORM_H
+
+#include <cdefs.h>
+#include <lib/utils_def.h>
+#include <stdbool.h>
+
+/*******************************************************************************
+ * Tegra major, minor version helper macros
+ ******************************************************************************/
+#define MAJOR_VERSION_SHIFT U(0x4)
+#define MAJOR_VERSION_MASK U(0xF)
+#define MINOR_VERSION_SHIFT U(0x10)
+#define MINOR_VERSION_MASK U(0xF)
+#define CHIP_ID_SHIFT U(8)
+#define CHIP_ID_MASK U(0xFF)
+#define PRE_SI_PLATFORM_SHIFT U(0x14)
+#define PRE_SI_PLATFORM_MASK U(0xF)
+
+/*******************************************************************************
+ * Tegra chip ID values
+ ******************************************************************************/
+#define TEGRA_CHIPID_TEGRA13 U(0x13)
+#define TEGRA_CHIPID_TEGRA21 U(0x21)
+#define TEGRA_CHIPID_TEGRA18 U(0x18)
+#define TEGRA_CHIPID_TEGRA19 U(0x19)
+
+/*******************************************************************************
+ * JEDEC Standard Manufacturer's Identification Code and Bank ID
+ ******************************************************************************/
+#define JEDEC_NVIDIA_MFID U(0x6B)
+#define JEDEC_NVIDIA_BKID U(3)
+
+#ifndef __ASSEMBLER__
+
+/*
+ * Tegra chip ID major/minor identifiers
+ */
+uint32_t tegra_get_chipid_major(void);
+uint32_t tegra_get_chipid_minor(void);
+
+/*
+ * Tegra chip ID identifiers
+ */
+bool tegra_chipid_is_t186(void);
+bool tegra_chipid_is_t210(void);
+bool tegra_chipid_is_t210_b01(void);
+bool tegra_chipid_is_t194(void);
+
+/*
+ * Tegra platform identifiers
+ */
+bool tegra_platform_is_silicon(void);
+bool tegra_platform_is_qt(void);
+bool tegra_platform_is_emulation(void);
+bool tegra_platform_is_linsim(void);
+bool tegra_platform_is_fpga(void);
+bool tegra_platform_is_unit_fpga(void);
+bool tegra_platform_is_virt_dev_kit(void);
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* TEGRA_PLATFORM_H */
diff --git a/plat/nvidia/tegra/include/tegra_private.h b/plat/nvidia/tegra/include/tegra_private.h
new file mode 100644
index 0000000..ad80596
--- /dev/null
+++ b/plat/nvidia/tegra/include/tegra_private.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright (c) 2015-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020-2023, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef TEGRA_PRIVATE_H
+#define TEGRA_PRIVATE_H
+
+#include <platform_def.h>
+#include <stdbool.h>
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <drivers/ti/uart/uart_16550.h>
+#include <lib/psci/psci.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+
+#include <tegra_gic.h>
+
+/*******************************************************************************
+ * Implementation defined ACTLR_EL1 bit definitions
+ ******************************************************************************/
+#define ACTLR_EL1_PMSTATE_MASK (ULL(0xF) << 0)
+
+/*******************************************************************************
+ * Implementation defined ACTLR_EL2 bit definitions
+ ******************************************************************************/
+#define ACTLR_EL2_PMSTATE_MASK (ULL(0xF) << 0)
+
+/*******************************************************************************
+ * Struct for parameters received from BL2
+ ******************************************************************************/
+typedef struct plat_params_from_bl2 {
+ /* TZ memory size */
+ uint64_t tzdram_size;
+ /* TZ memory base */
+ uint64_t tzdram_base;
+ /* UART port ID */
+ int32_t uart_id;
+ /* L2 ECC parity protection disable flag */
+ int32_t l2_ecc_parity_prot_dis;
+ /* SHMEM base address for storing the boot logs */
+ uint64_t boot_profiler_shmem_base;
+ /* System Suspend Entry Firmware size */
+ uint64_t sc7entry_fw_size;
+ /* System Suspend Entry Firmware base address */
+ uint64_t sc7entry_fw_base;
+ /* Enable dual execution */
+ uint8_t enable_ccplex_lock_step;
+} plat_params_from_bl2_t;
+
+/*******************************************************************************
+ * Helper function to access l2ctlr_el1 register on Cortex-A57 CPUs
+ ******************************************************************************/
+DEFINE_RENAME_SYSREG_RW_FUNCS(l2ctlr_el1, CORTEX_A57_L2CTLR_EL1)
+
+/*******************************************************************************
+ * Struct describing parameters passed to bl31
+ ******************************************************************************/
+struct tegra_bl31_params {
+ param_header_t h;
+ image_info_t *bl31_image_info;
+ entry_point_info_t *bl32_ep_info;
+ image_info_t *bl32_image_info;
+ entry_point_info_t *bl33_ep_info;
+ image_info_t *bl33_image_info;
+};
+
+/*******************************************************************************
+* To suppress Coverity MISRA C-2012 Rule 2.2 violations
+*******************************************************************************/
+#define UNUSED_FUNC_NOP() asm("nop")
+
+/* Declarations for plat_psci_handlers.c */
+int32_t tegra_soc_validate_power_state(uint32_t power_state,
+ psci_power_state_t *req_state);
+
+/* Declarations for plat_setup.c */
+const mmap_region_t *plat_get_mmio_map(void);
+void plat_enable_console(int32_t id);
+void plat_gic_setup(void);
+struct tegra_bl31_params *plat_get_bl31_params(void);
+plat_params_from_bl2_t *plat_get_bl31_plat_params(void);
+void plat_early_platform_setup(void);
+void plat_late_platform_setup(void);
+void plat_relocate_bl32_image(const image_info_t *bl32_img_info);
+bool plat_supports_system_suspend(void);
+void plat_runtime_setup(void);
+
+/* Declarations for plat_secondary.c */
+void plat_secondary_setup(void);
+int32_t plat_lock_cpu_vectors(void);
+
+/* Declarations for tegra_fiq_glue.c */
+void tegra_fiq_handler_setup(void);
+int32_t tegra_fiq_get_intr_context(void);
+void tegra_fiq_set_ns_entrypoint(uint64_t entrypoint);
+
+/* Declarations for tegra_helpers.S */
+bool plat_is_my_cpu_primary(void);
+
+/* Declarations for tegra_security.c */
+void tegra_security_setup(void);
+void tegra_security_setup_videomem(uintptr_t base, uint64_t size);
+
+/* Declarations for tegra_pm.c */
+void tegra_pm_system_suspend_entry(void);
+void tegra_pm_system_suspend_exit(void);
+int32_t tegra_system_suspended(void);
+int32_t tegra_soc_cpu_standby(plat_local_state_t cpu_state);
+int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state);
+int32_t tegra_soc_pwr_domain_on(u_register_t mpidr);
+int32_t tegra_soc_pwr_domain_off_early(const psci_power_state_t *target_state);
+int32_t tegra_soc_pwr_domain_off(const psci_power_state_t *target_state);
+int32_t tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state);
+int32_t tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state);
+int32_t tegra_soc_pwr_domain_suspend_pwrdown_early(const psci_power_state_t *target_state);
+int32_t tegra_soc_prepare_system_reset(void);
+__dead2 void tegra_soc_prepare_system_off(void);
+plat_local_state_t tegra_soc_get_target_pwr_state(uint32_t lvl,
+ const plat_local_state_t *states,
+ uint32_t ncpu);
+
+/* Declarations for tegraXXX_pm.c */
+int tegra_prepare_cpu_suspend(unsigned int id, unsigned int afflvl);
+int tegra_prepare_cpu_on_finish(unsigned long mpidr);
+
+/* Declarations for tegra_bl31_setup.c */
+plat_params_from_bl2_t *bl31_get_plat_params(void);
+int32_t bl31_check_ns_address(uint64_t base, uint64_t size_in_bytes);
+
+/* Declarations for tegra_delay_timer.c */
+void tegra_delay_timer_init(void);
+
+void tegra_secure_entrypoint(void);
+
+/* Declarations for tegra_sip_calls.c */
+uintptr_t tegra_sip_handler(uint32_t smc_fid,
+ u_register_t x1,
+ u_register_t x2,
+ u_register_t x3,
+ u_register_t x4,
+ void *cookie,
+ void *handle,
+ u_register_t flags);
+int plat_sip_handler(uint32_t smc_fid,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ const void *cookie,
+ void *handle,
+ uint64_t flags);
+
+#if ENABLE_FEAT_RAS && FFH_SUPPORT
+void tegra194_ras_enable(void);
+void tegra194_ras_corrected_err_clear(uint64_t *cookie);
+#endif
+
+#endif /* TEGRA_PRIVATE_H */
diff --git a/plat/nvidia/tegra/lib/debug/profiler.c b/plat/nvidia/tegra/lib/debug/profiler.c
new file mode 100644
index 0000000..b5baf42
--- /dev/null
+++ b/plat/nvidia/tegra/lib/debug/profiler.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2017, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*******************************************************************************
+ * The profiler stores the timestamps captured during cold boot to the shared
+ * memory for the non-secure world. The non-secure world driver parses the
+ * shared memory block and writes the contents to a file on the device, which
+ * can be later extracted for analysis.
+ *
+ * Profiler memory map
+ *
+ * TOP --------------------------- ---
+ * Trusted OS timestamps 3KB
+ * --------------------------- ---
+ * Trusted Firmware timestamps 1KB
+ * BASE --------------------------- ---
+ *
+ ******************************************************************************/
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <lib/mmio.h>
+#include <lib/utils_def.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#include <profiler.h>
+#include <stdbool.h>
+#include <string.h>
+
+static uint64_t shmem_base_addr;
+
+#define MAX_PROFILER_RECORDS U(16)
+#define TAG_LEN_BYTES U(56)
+
+/*******************************************************************************
+ * Profiler entry format
+ ******************************************************************************/
+typedef struct {
+ /* text explaining the timestamp location in code */
+ uint8_t tag[TAG_LEN_BYTES];
+ /* timestamp value */
+ uint64_t timestamp;
+} profiler_rec_t;
+
+static profiler_rec_t *head, *cur, *tail;
+static uint32_t tmr;
+static bool is_shmem_buf_mapped;
+
+/*******************************************************************************
+ * Initialise the profiling library
+ ******************************************************************************/
+void boot_profiler_init(uint64_t shmem_base, uint32_t tmr_base)
+{
+ uint64_t shmem_end_base;
+
+ assert(shmem_base != ULL(0));
+ assert(tmr_base != U(0));
+
+ /* store the buffer address */
+ shmem_base_addr = shmem_base;
+
+ /* calculate the base address of the last record */
+ shmem_end_base = shmem_base + (sizeof(profiler_rec_t) *
+ (MAX_PROFILER_RECORDS - U(1)));
+
+ /* calculate the head, tail and cur values */
+ head = (profiler_rec_t *)shmem_base;
+ tail = (profiler_rec_t *)shmem_end_base;
+ cur = head;
+
+ /* timer used to get the current timestamp */
+ tmr = tmr_base;
+}
+
+/*******************************************************************************
+ * Add tag and timestamp to profiler
+ ******************************************************************************/
+void boot_profiler_add_record(const char *str)
+{
+ unsigned int len;
+
+ /* calculate the length of the tag */
+ if (((unsigned int)strlen(str) + U(1)) > TAG_LEN_BYTES) {
+ len = TAG_LEN_BYTES;
+ } else {
+ len = (unsigned int)strlen(str) + U(1);
+ }
+
+ if (head != NULL) {
+
+ /*
+ * The profiler runs with/without MMU enabled. Check
+ * if MMU is enabled and memmap the shmem buffer, in
+ * case it is.
+ */
+ if ((!is_shmem_buf_mapped) &&
+ ((read_sctlr_el3() & SCTLR_M_BIT) != U(0))) {
+
+ (void)mmap_add_dynamic_region(shmem_base_addr,
+ shmem_base_addr,
+ PROFILER_SIZE_BYTES,
+ (MT_NS | MT_RW | MT_EXECUTE_NEVER));
+
+ is_shmem_buf_mapped = true;
+ }
+
+ /* write the tag and timestamp to buffer */
+ (void)snprintf((char *)cur->tag, len, "%s", str);
+ cur->timestamp = mmio_read_32(tmr);
+
+ /* start from head if we reached the end */
+ if (cur == tail) {
+ cur = head;
+ } else {
+ cur++;
+ }
+ }
+}
+
+/*******************************************************************************
+ * Deinint the profiler
+ ******************************************************************************/
+void boot_profiler_deinit(void)
+{
+ if (shmem_base_addr != ULL(0)) {
+
+ /* clean up resources */
+ cur = NULL;
+ head = NULL;
+ tail = NULL;
+
+ /* flush the shmem for it to be visible to the NS world */
+ flush_dcache_range(shmem_base_addr, PROFILER_SIZE_BYTES);
+
+ /* unmap the shmem buffer */
+ if (is_shmem_buf_mapped) {
+ (void)mmap_remove_dynamic_region(shmem_base_addr,
+ PROFILER_SIZE_BYTES);
+ }
+ }
+}
diff --git a/plat/nvidia/tegra/platform.mk b/plat/nvidia/tegra/platform.mk
new file mode 100644
index 0000000..2365564
--- /dev/null
+++ b/plat/nvidia/tegra/platform.mk
@@ -0,0 +1,97 @@
+#
+# Copyright (c) 2015-2019, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+SOC_DIR := plat/nvidia/tegra/soc/${TARGET_SOC}
+
+# dump the state on crash console
+CRASH_REPORTING := 1
+$(eval $(call add_define,CRASH_REPORTING))
+
+# enable assert() for release/debug builds
+ENABLE_ASSERTIONS := 1
+PLAT_LOG_LEVEL_ASSERT := 50
+$(eval $(call add_define,PLAT_LOG_LEVEL_ASSERT))
+
+# enable dynamic memory mapping
+PLAT_XLAT_TABLES_DYNAMIC := 1
+$(eval $(call add_define,PLAT_XLAT_TABLES_DYNAMIC))
+
+# Enable exception handling at EL3
+EL3_EXCEPTION_HANDLING := 1
+GICV2_G0_FOR_EL3 := 1
+
+# Enable PSCI v1.0 extended state ID format
+PSCI_EXTENDED_STATE_ID := 1
+
+# code and read-only data should be put on separate memory pages
+SEPARATE_CODE_AND_RODATA := 1
+
+# do not use coherent memory
+USE_COHERENT_MEM := 0
+
+# enable D-cache early during CPU warmboot
+WARMBOOT_ENABLE_DCACHE_EARLY := 1
+
+# remove the standard libc
+OVERRIDE_LIBC := 1
+
+# Flag to enable WDT FIQ interrupt handling for Tegra SoCs
+# prior to Tegra186
+ENABLE_TEGRA_WDT_LEGACY_FIQ_HANDLING ?= 0
+
+# Flag to allow relocation of BL32 image to TZDRAM during boot
+RELOCATE_BL32_IMAGE ?= 0
+
+# Enable stack protection
+ENABLE_STACK_PROTECTOR := strong
+
+# Enable SDEI
+SDEI_SUPPORT := 1
+
+# modify BUILD_PLAT to point to SoC specific build directory
+BUILD_PLAT := ${BUILD_BASE}/${PLAT}/${TARGET_SOC}/${BUILD_TYPE}
+
+include plat/nvidia/tegra/common/tegra_common.mk
+include ${SOC_DIR}/platform_${TARGET_SOC}.mk
+
+$(eval $(call add_define,ENABLE_TEGRA_WDT_LEGACY_FIQ_HANDLING))
+$(eval $(call add_define,RELOCATE_BL32_IMAGE))
+
+# platform cflags (enable signed comparisons, disable stdlib)
+TF_CFLAGS += -nostdlib
+
+# override with necessary libc files for the Tegra platform
+override LIBC_SRCS := $(addprefix lib/libc/, \
+ aarch64/setjmp.S \
+ assert.c \
+ memchr.c \
+ memcmp.c \
+ memcpy.c \
+ memmove.c \
+ memset.c \
+ printf.c \
+ putchar.c \
+ strrchr.c \
+ strlen.c \
+ snprintf.c)
+
+INCLUDES += -Iinclude/lib/libc \
+ -Iinclude/lib/libc/$(ARCH) \
+
+ifneq ($(findstring armlink,$(notdir $(LD))),)
+# o suppress warnings for section mismatches, undefined symbols
+# o use only those libraries that are specified in the input file
+# list to resolve references
+# o create a static callgraph of functions
+# o resolve undefined symbols to el3_panic
+# o include only required sections
+TF_LDFLAGS += --diag_suppress=L6314,L6332 --no_scanlib --callgraph
+TF_LDFLAGS += --keep="*(.__pubsub*)" --keep="*(.rt_svc_descs*)" --keep="*(.cpu_ops)"
+ifeq (${ENABLE_PMF},1)
+TF_LDFLAGS += --keep="*(.pmf_svc_descs*)"
+endif
+endif
diff --git a/plat/nvidia/tegra/scat/bl31.scat b/plat/nvidia/tegra/scat/bl31.scat
new file mode 100644
index 0000000..fdd6e33
--- /dev/null
+++ b/plat/nvidia/tegra/scat/bl31.scat
@@ -0,0 +1,284 @@
+#! armclang -E -x c
+
+/*
+ * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <platform_def.h>
+
+#define PAGE_SIZE (1024 * 4)
+
+LR_START BL31_BASE
+{
+ __BL31_START__ +0 FIXED EMPTY 0
+ {
+ /* placeholder */
+ }
+
+ /* BL31_BASE address must be aligned on a page boundary. */
+ ScatterAssert((ImageBase(__BL31_START__) AND 0xFFF) == 0)
+}
+
+LR_TEXT BL31_BASE
+{
+ __TEXT__ +0 FIXED
+ {
+ *(:gdef:bl31_entrypoint, +FIRST)
+ *(.text*)
+ *(.vectors)
+ .ANY1(+RO-CODE)
+ }
+
+ __TEXT_EPILOGUE__ AlignExpr(+0, PAGE_SIZE) FIXED EMPTY 0
+ {
+ /* section delimiter */
+ }
+}
+
+LR_RO_DATA +0
+{
+ __RODATA__ AlignExpr(ImageLimit(LR_TEXT), 0) FIXED
+ {
+ *(.rodata*)
+ .ANY2(+RO-DATA)
+ }
+
+ /* Ensure 8-byte alignment for descriptors and ensure inclusion */
+ __RT_SVC_DESCS__ AlignExpr(ImageLimit(__RODATA__), 8) FIXED
+ {
+ *(.rt_svc_descs)
+ }
+
+#if ENABLE_PMF
+ /* Ensure 8-byte alignment for descriptors and ensure inclusion */
+ __PMF_SVC_DESCS__ AlignExpr(ImageLimit(__RT_SVC_DESCS__), 8) FIXED
+ {
+ *(.pmf_svc_descs)
+ }
+#endif /* ENABLE_PMF */
+
+ /*
+ * Ensure 8-byte alignment for cpu_ops so that its fields are also
+ * aligned.
+ */
+ __CPU_OPS__ AlignExpr(+0, 8) FIXED
+ {
+ *(.cpu_ops)
+ }
+
+ /*
+ * Keep the .got section in the RO section as it is patched
+ * prior to enabling the MMU and having the .got in RO is better for
+ * security. GOT is a table of addresses so ensure 8-byte alignment.
+ */
+ __GOT__ AlignExpr(ImageLimit(__CPU_OPS__), 8) FIXED
+ {
+ *(.got)
+ }
+
+ /* Place pubsub sections for events */
+ __PUBSUB_EVENTS__ AlignExpr(+0, 8) EMPTY 0
+ {
+ /* placeholder */
+ }
+
+#include <lib/el3_runtime/pubsub_events.h>
+
+ __RODATA_EPILOGUE__ AlignExpr(+0, PAGE_SIZE) FIXED EMPTY 0
+ {
+ /* section delimiter */
+ }
+}
+
+ /* cpu_ops must always be defined */
+ ScatterAssert(ImageLength(__CPU_OPS__) > 0)
+
+#if SPM_MM
+LR_SPM +0
+{
+ /*
+ * Exception vectors of the SPM shim layer. They must be aligned to a 2K
+ * address, but we need to place them in a separate page so that we can set
+ * individual permissions to them, so the actual alignment needed is 4K.
+ *
+ * There's no need to include this into the RO section of BL31 because it
+ * doesn't need to be accessed by BL31.
+ */
+ __SPM_SHIM_EXCEPTIONS__ AlignExpr(ImageLimit(LR_RO_DATA), PAGE_SIZE) FIXED
+ {
+ *(.spm_shim_exceptions)
+ }
+
+ __SPM_SHIM_EXCEPTIONS_EPILOGUE__ AlignExpr(ImageLimit(__SPM_SHIM_EXCEPTIONS__), PAGE_SIZE) FIXED
+ {
+ /* placeholder */
+ }
+}
+#endif
+
+LR_RW_DATA +0
+{
+ __DATA__ AlignExpr(+0, 16) FIXED
+ {
+ *(.data*)
+ *(.constdata)
+ *(locale$$data)
+ }
+}
+
+LR_RELA +0
+{
+ /*
+ * .rela.dyn needs to come after .data for the read-elf utility to parse
+ * this section correctly. Ensure 8-byte alignment so that the fields of
+ * RELA data structure are aligned.
+ */
+ __RELA__ AlignExpr(ImageLimit(LR_RW_DATA), 8) FIXED
+ {
+ *(.rela.dyn)
+ }
+}
+
+#ifdef BL31_PROGBITS_LIMIT
+ /* BL31 progbits has exceeded its limit. */
+ ScatterAssert(ImageLimit(LR_RELA) <= BL31_PROGBITS_LIMIT)
+#endif
+
+LR_STACKS +0
+{
+ __STACKS__ AlignExpr(+0, 64) FIXED
+ {
+ *(.tzfw_normal_stacks)
+ }
+}
+
+#define __BAKERY_LOCK_SIZE__ (ImageLimit(__BAKERY_LOCKS_EPILOGUE__) - \
+ ImageBase(__BAKERY_LOCKS__))
+#define BAKERY_LOCK_SIZE (__BAKERY_LOCK_SIZE__ * (PLATFORM_CORE_COUNT - 1))
+#define __PMF_TIMESTAMP_SIZE__ (ImageLimit(__PMF_TIMESTAMP__) - \
+ ImageBase(__PMF_TIMESTAMP__))
+#define PER_CPU_TIMESTAMP_SIZE (__PMF_TIMESTAMP_SIZE__ * (PLATFORM_CORE_COUNT - 1))
+
+LR_BSS +0
+{
+ __BSS__ AlignExpr(ImageLimit(LR_STACKS), 256) FIXED
+ {
+ *(.bss*)
+ *(COMDAT)
+ }
+
+#if !USE_COHERENT_MEM
+ /*
+ * Bakery locks are stored in normal .bss memory
+ *
+ * Each lock's data is spread across multiple cache lines, one per CPU,
+ * but multiple locks can share the same cache line.
+ * The compiler will allocate enough memory for one CPU's bakery locks,
+ * the remaining cache lines are allocated by the linker script
+ */
+ __BAKERY_LOCKS__ AlignExpr(ImageLimit(__BSS__), CACHE_WRITEBACK_GRANULE) FIXED
+ {
+ *(.bakery_lock)
+ }
+
+ __BAKERY_LOCKS_EPILOGUE__ AlignExpr(ImageLimit(__BAKERY_LOCKS__), CACHE_WRITEBACK_GRANULE) FIXED EMPTY 0
+ {
+ /* section delimiter */
+ }
+
+ __PER_CPU_BAKERY_LOCKS__ ImageLimit(__BAKERY_LOCKS_EPILOGUE__) FIXED FILL 0 BAKERY_LOCK_SIZE
+ {
+ /* padded memory section to store per cpu bakery locks */
+ }
+
+#ifdef PLAT_PERCPU_BAKERY_LOCK_SIZE
+ /* PLAT_PERCPU_BAKERY_LOCK_SIZE does not match bakery lock requirements */
+ ScatterAssert(__PER_CPU_BAKERY_LOCK_SIZE__ == PLAT_PERCPU_BAKERY_LOCK_SIZE)
+#endif
+#endif
+
+#if ENABLE_PMF
+ /*
+ * Time-stamps are stored in normal .bss memory
+ *
+ * The compiler will allocate enough memory for one CPU's time-stamps,
+ * the remaining memory for other CPU's is allocated by the
+ * linker script
+ */
+ __PMF_TIMESTAMP__ AlignExpr(+0, CACHE_WRITEBACK_GRANULE) FIXED EMPTY CACHE_WRITEBACK_GRANULE
+ {
+ /* store timestamps in this carved out memory */
+ }
+
+ __PMF_TIMESTAMP_EPILOGUE__ AlignExpr(ImageLimit(__PMF_TIMESTAMP__), CACHE_WRITEBACK_GRANULE) FIXED EMPTY 0
+ {
+ /*
+ * placeholder to make __PMF_TIMESTAMP_START__ end on a
+ * CACHE_WRITEBACK_GRANULE boundary
+ */
+ }
+
+ __PER_CPU_TIMESTAMPS__ +0 FIXED FILL 0 PER_CPU_TIMESTAMP_SIZE
+ {
+ /* padded memory section to store per cpu timestamps */
+ }
+#endif /* ENABLE_PMF */
+}
+
+LR_XLAT_TABLE +0
+{
+ .xlat_table +0 FIXED
+ {
+ *(.xlat_table)
+ }
+}
+
+#if USE_COHERENT_MEM
+LR_COHERENT_RAM +0
+{
+ /*
+ * The base address of the coherent memory section must be page-aligned (4K)
+ * to guarantee that the coherent data are stored on their own pages and
+ * are not mixed with normal data. This is required to set up the correct
+ * memory attributes for the coherent data page tables.
+ */
+ __COHERENT_RAM__ AlignExpr(+0, PAGE_SIZE) FIXED
+ {
+ /*
+ * Bakery locks are stored in coherent memory
+ *
+ * Each lock's data is contiguous and fully allocated by the compiler
+ */
+ *(.bakery_lock)
+ *(.tzfw_coherent_mem)
+ }
+
+ __COHERENT_RAM_EPILOGUE_UNALIGNED__ +0 FIXED EMPTY 0
+ {
+ /* section delimiter */
+ }
+
+ /*
+ * Memory page(s) mapped to this section will be marked
+ * as device memory. No other unexpected data must creep in.
+ * Ensure the rest of the current memory page is unused.
+ */
+ __COHERENT_RAM_EPILOGUE__ AlignExpr(ImageLimit(__COHERENT_RAM_START__), PAGE_SIZE) FIXED EMPTY 0
+ {
+ /* section delimiter */
+ }
+}
+#endif
+
+LR_END +0
+{
+ __BL31_END__ +0 FIXED EMPTY 0
+ {
+ /* placeholder */
+ }
+
+ /* BL31 image has exceeded its limit. */
+ ScatterAssert(ImageLimit(__BL31_END__) <= BL31_LIMIT)
+}
diff --git a/plat/nvidia/tegra/soc/t186/drivers/include/mce_private.h b/plat/nvidia/tegra/soc/t186/drivers/include/mce_private.h
new file mode 100644
index 0000000..203f61a
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/drivers/include/mce_private.h
@@ -0,0 +1,260 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef MCE_PRIVATE_H
+#define MCE_PRIVATE_H
+
+#include <lib/mmio.h>
+
+#include <tegra_def.h>
+
+/*******************************************************************************
+ * Macros to prepare CSTATE info request
+ ******************************************************************************/
+/* Description of the parameters for UPDATE_CSTATE_INFO request */
+#define CLUSTER_CSTATE_MASK ULL(0x7)
+#define CLUSTER_CSTATE_SHIFT U(0)
+#define CLUSTER_CSTATE_UPDATE_BIT (ULL(1) << 7)
+#define CCPLEX_CSTATE_MASK ULL(0x3)
+#define CCPLEX_CSTATE_SHIFT ULL(8)
+#define CCPLEX_CSTATE_UPDATE_BIT (ULL(1) << 15)
+#define SYSTEM_CSTATE_MASK ULL(0xF)
+#define SYSTEM_CSTATE_SHIFT ULL(16)
+#define SYSTEM_CSTATE_FORCE_UPDATE_SHIFT ULL(22)
+#define SYSTEM_CSTATE_FORCE_UPDATE_BIT (ULL(1) << 22)
+#define SYSTEM_CSTATE_UPDATE_BIT (ULL(1) << 23)
+#define CSTATE_WAKE_MASK_UPDATE_BIT (ULL(1) << 31)
+#define CSTATE_WAKE_MASK_SHIFT ULL(32)
+#define CSTATE_WAKE_MASK_CLEAR U(0xFFFFFFFF)
+
+/*******************************************************************************
+ * Auto-CC3 control macros
+ ******************************************************************************/
+#define MCE_AUTO_CC3_FREQ_MASK U(0x1FF)
+#define MCE_AUTO_CC3_FREQ_SHIFT U(0)
+#define MCE_AUTO_CC3_VTG_MASK U(0x7F)
+#define MCE_AUTO_CC3_VTG_SHIFT U(16)
+#define MCE_AUTO_CC3_ENABLE_BIT (U(1) << 31)
+
+/*******************************************************************************
+ * Macros for the 'IS_SC7_ALLOWED' command
+ ******************************************************************************/
+#define MCE_SC7_ALLOWED_MASK U(0x7)
+#define MCE_SC7_WAKE_TIME_SHIFT U(32)
+
+/*******************************************************************************
+ * Macros for 'read/write ctats' commands
+ ******************************************************************************/
+#define MCE_CSTATE_STATS_TYPE_SHIFT ULL(32)
+#define MCE_CSTATE_WRITE_DATA_LO_MASK U(0xF)
+
+/*******************************************************************************
+ * Macros for 'update crossover threshold' command
+ ******************************************************************************/
+#define MCE_CROSSOVER_THRESHOLD_TIME_SHIFT U(32)
+
+/*******************************************************************************
+ * MCA argument macros
+ ******************************************************************************/
+#define MCA_ARG_ERROR_MASK U(0xFF)
+#define MCA_ARG_FINISH_SHIFT U(24)
+#define MCA_ARG_FINISH_MASK U(0xFF)
+
+/*******************************************************************************
+ * Uncore PERFMON ARI macros
+ ******************************************************************************/
+#define UNCORE_PERFMON_CMD_READ U(0)
+#define UNCORE_PERFMON_CMD_WRITE U(1)
+
+#define UNCORE_PERFMON_CMD_MASK U(0xFF)
+#define UNCORE_PERFMON_UNIT_GRP_MASK U(0xF)
+#define UNCORE_PERFMON_SELECTOR_MASK U(0xF)
+#define UNCORE_PERFMON_REG_MASK U(0xFF)
+#define UNCORE_PERFMON_CTR_MASK U(0xFF)
+#define UNCORE_PERFMON_RESP_STATUS_MASK U(0xFF)
+
+/*******************************************************************************
+ * Structure populated by arch specific code to export routines which perform
+ * common low level MCE functions
+ ******************************************************************************/
+typedef struct arch_mce_ops {
+ /*
+ * This ARI request sets up the MCE to start execution on assertion
+ * of STANDBYWFI, update the core power state and expected wake time,
+ * then determine the proper power state to enter.
+ */
+ int32_t (*enter_cstate)(uint32_t ari_base, uint32_t state,
+ uint32_t wake_time);
+ /*
+ * This ARI request allows updating of the CLUSTER_CSTATE,
+ * CCPLEX_CSTATE, and SYSTEM_CSTATE register values.
+ */
+ int32_t (*update_cstate_info)(uint32_t ari_base,
+ uint32_t cluster,
+ uint32_t ccplex,
+ uint32_t system,
+ uint8_t sys_state_force,
+ uint32_t wake_mask,
+ uint8_t update_wake_mask);
+ /*
+ * This ARI request allows updating of power state crossover
+ * threshold times. An index value specifies which crossover
+ * state is being updated.
+ */
+ int32_t (*update_crossover_time)(uint32_t ari_base,
+ uint32_t type,
+ uint32_t time);
+ /*
+ * This ARI request allows read access to statistical information
+ * related to power states.
+ */
+ uint64_t (*read_cstate_stats)(uint32_t ari_base,
+ uint32_t state);
+ /*
+ * This ARI request allows write access to statistical information
+ * related to power states.
+ */
+ int32_t (*write_cstate_stats)(uint32_t ari_base,
+ uint32_t state,
+ uint32_t stats);
+ /*
+ * This ARI request allows the CPU to understand the features
+ * supported by the MCE firmware.
+ */
+ uint64_t (*call_enum_misc)(uint32_t ari_base, uint32_t cmd,
+ uint32_t data);
+ /*
+ * This ARI request allows querying the CCPLEX to determine if
+ * the CCx state is allowed given a target core C-state and wake
+ * time. If the CCx state is allowed, the response indicates CCx
+ * must be entered. If the CCx state is not allowed, the response
+ * indicates CC6/CC7 can't be entered
+ */
+ int32_t (*is_ccx_allowed)(uint32_t ari_base, uint32_t state,
+ uint32_t wake_time);
+ /*
+ * This ARI request allows querying the CCPLEX to determine if
+ * the SC7 state is allowed given a target core C-state and wake
+ * time. If the SC7 state is allowed, all cores but the associated
+ * core are offlined (WAKE_EVENTS are set to 0) and the response
+ * indicates SC7 must be entered. If the SC7 state is not allowed,
+ * the response indicates SC7 can't be entered
+ */
+ int32_t (*is_sc7_allowed)(uint32_t ari_base, uint32_t state,
+ uint32_t wake_time);
+ /*
+ * This ARI request allows a core to bring another offlined core
+ * back online to the C0 state. Note that a core is offlined by
+ * entering a C-state where the WAKE_MASK is all 0.
+ */
+ int32_t (*online_core)(uint32_t ari_base, uint32_t cpuid);
+ /*
+ * This ARI request allows the CPU to enable/disable Auto-CC3 idle
+ * state.
+ */
+ int32_t (*cc3_ctrl)(uint32_t ari_base,
+ uint32_t freq,
+ uint32_t volt,
+ uint8_t enable);
+ /*
+ * This ARI request allows updating the reset vector register for
+ * D15 and A57 CPUs.
+ */
+ int32_t (*update_reset_vector)(uint32_t ari_base);
+ /*
+ * This ARI request instructs the ROC to flush A57 data caches in
+ * order to maintain coherency with the Denver cluster.
+ */
+ int32_t (*roc_flush_cache)(uint32_t ari_base);
+ /*
+ * This ARI request instructs the ROC to flush A57 data caches along
+ * with the caches covering ARM code in order to maintain coherency
+ * with the Denver cluster.
+ */
+ int32_t (*roc_flush_cache_trbits)(uint32_t ari_base);
+ /*
+ * This ARI request instructs the ROC to clean A57 data caches along
+ * with the caches covering ARM code in order to maintain coherency
+ * with the Denver cluster.
+ */
+ int32_t (*roc_clean_cache)(uint32_t ari_base);
+ /*
+ * This ARI request reads/writes the Machine Check Arch. (MCA)
+ * registers.
+ */
+ uint64_t (*read_write_mca)(uint32_t ari_base,
+ uint64_t cmd,
+ uint64_t *data);
+ /*
+ * Some MC GSC (General Security Carveout) register values are
+ * expected to be changed by TrustZone secure ARM code after boot.
+ * Since there is no hardware mechanism for the CCPLEX to know
+ * that an MC GSC register has changed to allow it to update its
+ * own internal GSC register, there needs to be a mechanism that
+ * can be used by ARM code to cause the CCPLEX to update its GSC
+ * register value. This ARI request allows updating the GSC register
+ * value for a certain carveout in the CCPLEX.
+ */
+ int32_t (*update_ccplex_gsc)(uint32_t ari_base, uint32_t gsc_idx);
+ /*
+ * This ARI request instructs the CCPLEX to either shutdown or
+ * reset the entire system
+ */
+ void (*enter_ccplex_state)(uint32_t ari_base, uint32_t state_idx);
+ /*
+ * This ARI request reads/writes data from/to Uncore PERFMON
+ * registers
+ */
+ int32_t (*read_write_uncore_perfmon)(uint32_t ari_base,
+ uint64_t req, uint64_t *data);
+ /*
+ * This ARI implements ARI_MISC_CCPLEX commands. This can be
+ * used to enable/disable coresight clock gating.
+ */
+ void (*misc_ccplex)(uint32_t ari_base, uint32_t index,
+ uint32_t value);
+} arch_mce_ops_t;
+
+/* declarations for ARI/NVG handler functions */
+int32_t ari_enter_cstate(uint32_t ari_base, uint32_t state, uint32_t wake_time);
+int32_t ari_update_cstate_info(uint32_t ari_base, uint32_t cluster, uint32_t ccplex,
+ uint32_t system, uint8_t sys_state_force, uint32_t wake_mask,
+ uint8_t update_wake_mask);
+int32_t ari_update_crossover_time(uint32_t ari_base, uint32_t type, uint32_t time);
+uint64_t ari_read_cstate_stats(uint32_t ari_base, uint32_t state);
+int32_t ari_write_cstate_stats(uint32_t ari_base, uint32_t state, uint32_t stats);
+uint64_t ari_enumeration_misc(uint32_t ari_base, uint32_t cmd, uint32_t data);
+int32_t ari_is_ccx_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time);
+int32_t ari_is_sc7_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time);
+int32_t ari_online_core(uint32_t ari_base, uint32_t core);
+int32_t ari_cc3_ctrl(uint32_t ari_base, uint32_t freq, uint32_t volt, uint8_t enable);
+int32_t ari_reset_vector_update(uint32_t ari_base);
+int32_t ari_roc_flush_cache_trbits(uint32_t ari_base);
+int32_t ari_roc_flush_cache(uint32_t ari_base);
+int32_t ari_roc_clean_cache(uint32_t ari_base);
+uint64_t ari_read_write_mca(uint32_t ari_base, uint64_t cmd, uint64_t *data);
+int32_t ari_update_ccplex_gsc(uint32_t ari_base, uint32_t gsc_idx);
+void ari_enter_ccplex_state(uint32_t ari_base, uint32_t state_idx);
+int32_t ari_read_write_uncore_perfmon(uint32_t ari_base,
+ uint64_t req, uint64_t *data);
+void ari_misc_ccplex(uint32_t ari_base, uint32_t index, uint32_t value);
+
+int32_t nvg_enter_cstate(uint32_t ari_base, uint32_t state, uint32_t wake_time);
+int32_t nvg_update_cstate_info(uint32_t ari_base, uint32_t cluster, uint32_t ccplex,
+ uint32_t system, uint8_t sys_state_force, uint32_t wake_mask,
+ uint8_t update_wake_mask);
+int32_t nvg_update_crossover_time(uint32_t ari_base, uint32_t type, uint32_t time);
+uint64_t nvg_read_cstate_stats(uint32_t ari_base, uint32_t state);
+int32_t nvg_write_cstate_stats(uint32_t ari_base, uint32_t state, uint32_t stats);
+int32_t nvg_is_ccx_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time);
+int32_t nvg_is_sc7_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time);
+int32_t nvg_online_core(uint32_t ari_base, uint32_t core);
+int32_t nvg_cc3_ctrl(uint32_t ari_base, uint32_t freq, uint32_t volt, uint8_t enable);
+
+extern void nvg_set_request_data(uint64_t req, uint64_t data);
+extern void nvg_set_request(uint64_t req);
+extern uint64_t nvg_get_result(void);
+#endif /* MCE_PRIVATE_H */
diff --git a/plat/nvidia/tegra/soc/t186/drivers/include/t18x_ari.h b/plat/nvidia/tegra/soc/t186/drivers/include/t18x_ari.h
new file mode 100644
index 0000000..45302da
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/drivers/include/t18x_ari.h
@@ -0,0 +1,437 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef T18X_ARI_H
+#define T18X_ARI_H
+
+/*
+ * ----------------------------------------------------------------------------
+ * t18x_ari.h
+ *
+ * Global ARI definitions.
+ * ----------------------------------------------------------------------------
+ */
+
+enum {
+ TEGRA_ARI_VERSION_MAJOR = 3U,
+ TEGRA_ARI_VERSION_MINOR = 1U,
+};
+
+typedef enum {
+ /* indexes below get the core lock */
+ TEGRA_ARI_MISC = 0U,
+ /* index 1 is deprecated */
+ /* index 2 is deprecated */
+ /* index 3 is deprecated */
+ TEGRA_ARI_ONLINE_CORE = 4U,
+
+ /* indexes below need cluster lock */
+ TEGRA_ARI_MISC_CLUSTER = 41U,
+ TEGRA_ARI_IS_CCX_ALLOWED = 42U,
+ TEGRA_ARI_CC3_CTRL = 43U,
+
+ /* indexes below need ccplex lock */
+ TEGRA_ARI_ENTER_CSTATE = 80U,
+ TEGRA_ARI_UPDATE_CSTATE_INFO = 81U,
+ TEGRA_ARI_IS_SC7_ALLOWED = 82U,
+ /* index 83 is deprecated */
+ TEGRA_ARI_PERFMON = 84U,
+ TEGRA_ARI_UPDATE_CCPLEX_GSC = 85U,
+ /* index 86 is deprecated */
+ /* index 87 is deprecated */
+ TEGRA_ARI_ROC_FLUSH_CACHE_ONLY = 88U,
+ TEGRA_ARI_ROC_FLUSH_CACHE_TRBITS = 89U,
+ TEGRA_ARI_MISC_CCPLEX = 90U,
+ TEGRA_ARI_MCA = 91U,
+ TEGRA_ARI_UPDATE_CROSSOVER = 92U,
+ TEGRA_ARI_CSTATE_STATS = 93U,
+ TEGRA_ARI_WRITE_CSTATE_STATS = 94U,
+ TEGRA_ARI_COPY_MISCREG_AA64_RST = 95U,
+ TEGRA_ARI_ROC_CLEAN_CACHE_ONLY = 96U,
+} tegra_ari_req_id_t;
+
+typedef enum {
+ TEGRA_ARI_MISC_ECHO = 0U,
+ TEGRA_ARI_MISC_VERSION = 1U,
+ TEGRA_ARI_MISC_FEATURE_LEAF_0 = 2U,
+} tegra_ari_misc_index_t;
+
+typedef enum {
+ TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF = 0U,
+ TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT = 1U,
+ TEGRA_ARI_MISC_CCPLEX_CORESIGHT_CG_CTRL = 2U,
+ TEGRA_ARI_MISC_CCPLEX_EDBGREQ = 3U,
+} tegra_ari_misc_ccplex_index_t;
+
+typedef enum {
+ TEGRA_ARI_CORE_C0 = 0U,
+ TEGRA_ARI_CORE_C1 = 1U,
+ TEGRA_ARI_CORE_C6 = 6U,
+ TEGRA_ARI_CORE_C7 = 7U,
+ TEGRA_ARI_CORE_WARMRSTREQ = 8U,
+} tegra_ari_core_sleep_state_t;
+
+typedef enum {
+ TEGRA_ARI_CLUSTER_CC0 = 0U,
+ TEGRA_ARI_CLUSTER_CC1 = 1U,
+ TEGRA_ARI_CLUSTER_CC6 = 6U,
+ TEGRA_ARI_CLUSTER_CC7 = 7U,
+} tegra_ari_cluster_sleep_state_t;
+
+typedef enum {
+ TEGRA_ARI_CCPLEX_CCP0 = 0U,
+ TEGRA_ARI_CCPLEX_CCP1 = 1U,
+ TEGRA_ARI_CCPLEX_CCP3 = 3U, /* obsoleted */
+} tegra_ari_ccplex_sleep_state_t;
+
+typedef enum {
+ TEGRA_ARI_SYSTEM_SC0 = 0U,
+ TEGRA_ARI_SYSTEM_SC1 = 1U, /* obsoleted */
+ TEGRA_ARI_SYSTEM_SC2 = 2U, /* obsoleted */
+ TEGRA_ARI_SYSTEM_SC3 = 3U, /* obsoleted */
+ TEGRA_ARI_SYSTEM_SC4 = 4U, /* obsoleted */
+ TEGRA_ARI_SYSTEM_SC7 = 7U,
+ TEGRA_ARI_SYSTEM_SC8 = 8U,
+} tegra_ari_system_sleep_state_t;
+
+typedef enum {
+ TEGRA_ARI_CROSSOVER_C1_C6 = 0U,
+ TEGRA_ARI_CROSSOVER_CC1_CC6 = 1U,
+ TEGRA_ARI_CROSSOVER_CC1_CC7 = 2U,
+ TEGRA_ARI_CROSSOVER_CCP1_CCP3 = 3U, /* obsoleted */
+ TEGRA_ARI_CROSSOVER_CCP3_SC2 = 4U, /* obsoleted */
+ TEGRA_ARI_CROSSOVER_CCP3_SC3 = 5U, /* obsoleted */
+ TEGRA_ARI_CROSSOVER_CCP3_SC4 = 6U, /* obsoleted */
+ TEGRA_ARI_CROSSOVER_CCP3_SC7 = 7U, /* obsoleted */
+ TEGRA_ARI_CROSSOVER_SC0_SC7 = 7U,
+ TEGRA_ARI_CROSSOVER_CCP3_SC1 = 8U, /* obsoleted */
+} tegra_ari_crossover_index_t;
+
+typedef enum {
+ TEGRA_ARI_CSTATE_STATS_CLEAR = 0U,
+ TEGRA_ARI_CSTATE_STATS_SC7_ENTRIES = 1U,
+ TEGRA_ARI_CSTATE_STATS_SC4_ENTRIES, /* obsoleted */
+ TEGRA_ARI_CSTATE_STATS_SC3_ENTRIES, /* obsoleted */
+ TEGRA_ARI_CSTATE_STATS_SC2_ENTRIES, /* obsoleted */
+ TEGRA_ARI_CSTATE_STATS_CCP3_ENTRIES, /* obsoleted */
+ TEGRA_ARI_CSTATE_STATS_A57_CC6_ENTRIES,
+ TEGRA_ARI_CSTATE_STATS_A57_CC7_ENTRIES,
+ TEGRA_ARI_CSTATE_STATS_D15_CC6_ENTRIES,
+ TEGRA_ARI_CSTATE_STATS_D15_CC7_ENTRIES,
+ TEGRA_ARI_CSTATE_STATS_D15_0_C6_ENTRIES,
+ TEGRA_ARI_CSTATE_STATS_D15_1_C6_ENTRIES,
+ TEGRA_ARI_CSTATE_STATS_D15_0_C7_ENTRIES = 14U,
+ TEGRA_ARI_CSTATE_STATS_D15_1_C7_ENTRIES,
+ TEGRA_ARI_CSTATE_STATS_A57_0_C7_ENTRIES = 18U,
+ TEGRA_ARI_CSTATE_STATS_A57_1_C7_ENTRIES,
+ TEGRA_ARI_CSTATE_STATS_A57_2_C7_ENTRIES,
+ TEGRA_ARI_CSTATE_STATS_A57_3_C7_ENTRIES,
+ TEGRA_ARI_CSTATE_STATS_LAST_CSTATE_ENTRY_D15_0,
+ TEGRA_ARI_CSTATE_STATS_LAST_CSTATE_ENTRY_D15_1,
+ TEGRA_ARI_CSTATE_STATS_LAST_CSTATE_ENTRY_A57_0 = 26U,
+ TEGRA_ARI_CSTATE_STATS_LAST_CSTATE_ENTRY_A57_1,
+ TEGRA_ARI_CSTATE_STATS_LAST_CSTATE_ENTRY_A57_2,
+ TEGRA_ARI_CSTATE_STATS_LAST_CSTATE_ENTRY_A57_3,
+} tegra_ari_cstate_stats_index_t;
+
+typedef enum {
+ TEGRA_ARI_GSC_ALL = 0U,
+ TEGRA_ARI_GSC_BPMP = 6U,
+ TEGRA_ARI_GSC_APE = 7U,
+ TEGRA_ARI_GSC_SPE = 8U,
+ TEGRA_ARI_GSC_SCE = 9U,
+ TEGRA_ARI_GSC_APR = 10U,
+ TEGRA_ARI_GSC_TZRAM = 11U,
+ TEGRA_ARI_GSC_SE = 12U,
+ TEGRA_ARI_GSC_BPMP_TO_SPE = 16U,
+ TEGRA_ARI_GSC_SPE_TO_BPMP = 17U,
+ TEGRA_ARI_GSC_CPU_TZ_TO_BPMP = 18U,
+ TEGRA_ARI_GSC_BPMP_TO_CPU_TZ = 19U,
+ TEGRA_ARI_GSC_CPU_NS_TO_BPMP = 20U,
+ TEGRA_ARI_GSC_BPMP_TO_CPU_NS = 21U,
+ TEGRA_ARI_GSC_IPC_SE_SPE_SCE_BPMP = 22U,
+ TEGRA_ARI_GSC_SC7_RESUME_FW = 23U,
+ TEGRA_ARI_GSC_TZ_DRAM_IDX = 34U,
+ TEGRA_ARI_GSC_VPR_IDX = 35U,
+} tegra_ari_gsc_index_t;
+
+/* This macro will produce enums for __name##_LSB, __name##_MSB and __name##_MSK */
+#define TEGRA_ARI_ENUM_MASK_LSB_MSB(__name, __lsb, __msb) __name##_LSB = __lsb, __name##_MSB = __msb
+
+typedef enum {
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_UPDATE_CSTATE_INFO__CLUSTER_CSTATE, 0U, 2U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_UPDATE_CSTATE_INFO__CLUSTER_CSTATE_PRESENT, 7U, 7U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_UPDATE_CSTATE_INFO__CCPLEX_CSTATE, 8U, 9U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_UPDATE_CSTATE_INFO__CCPLEX_CSTATE_PRESENT, 15U, 15U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_UPDATE_CSTATE_INFO__SYSTEM_CSTATE, 16U, 19U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_UPDATE_CSTATE_INFO__IGNORE_CROSSOVERS, 22U, 22U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_UPDATE_CSTATE_INFO__SYSTEM_CSTATE_PRESENT, 23U, 23U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_UPDATE_CSTATE_INFO__WAKE_MASK_PRESENT, 31U, 31U),
+} tegra_ari_update_cstate_info_bitmasks_t;
+
+typedef enum {
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MISC_CCPLEX_CORESIGHT_CG_CTRL__EN, 0U, 0U),
+} tegra_ari_misc_ccplex_bitmasks_t;
+
+typedef enum {
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_CC3_CTRL__IDLE_FREQ, 0U, 8U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_CC3_CTRL__IDLE_VOLT, 16U, 23U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_CC3_CTRL__ENABLE, 31U, 31U),
+} tegra_ari_cc3_ctrl_bitmasks_t;
+
+typedef enum {
+ TEGRA_ARI_MCA_NOP = 0U,
+ TEGRA_ARI_MCA_READ_SERR = 1U,
+ TEGRA_ARI_MCA_WRITE_SERR = 2U,
+ TEGRA_ARI_MCA_CLEAR_SERR = 4U,
+ TEGRA_ARI_MCA_REPORT_SERR = 5U,
+ TEGRA_ARI_MCA_READ_INTSTS = 6U,
+ TEGRA_ARI_MCA_WRITE_INTSTS = 7U,
+ TEGRA_ARI_MCA_READ_PREBOOT_SERR = 8U,
+} tegra_ari_mca_commands_t;
+
+typedef enum {
+ TEGRA_ARI_MCA_RD_WR_DPMU = 0U,
+ TEGRA_ARI_MCA_RD_WR_IOB = 1U,
+ TEGRA_ARI_MCA_RD_WR_MCB = 2U,
+ TEGRA_ARI_MCA_RD_WR_CCE = 3U,
+ TEGRA_ARI_MCA_RD_WR_CQX = 4U,
+ TEGRA_ARI_MCA_RD_WR_CTU = 5U,
+ TEGRA_ARI_MCA_RD_WR_JSR_MTS = 7U,
+ TEGRA_ARI_MCA_RD_BANK_INFO = 0x0fU,
+ TEGRA_ARI_MCA_RD_BANK_TEMPLATE = 0x10U,
+ TEGRA_ARI_MCA_RD_WR_SECURE_ACCESS_REGISTER = 0x11U,
+ TEGRA_ARI_MCA_RD_WR_GLOBAL_CONFIG_REGISTER = 0x12U,
+} tegra_ari_mca_rd_wr_indexes_t;
+
+typedef enum {
+ TEGRA_ARI_MCA_RD_WR_ASERRX_CTRL = 0U,
+ TEGRA_ARI_MCA_RD_WR_ASERRX_STATUS = 1U,
+ TEGRA_ARI_MCA_RD_WR_ASERRX_ADDR = 2U,
+ TEGRA_ARI_MCA_RD_WR_ASERRX_MISC1 = 3U,
+ TEGRA_ARI_MCA_RD_WR_ASERRX_MISC2 = 4U,
+} tegra_ari_mca_read_asserx_subindexes_t;
+
+typedef enum {
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SECURE_REGISTER_SETTING_ENABLES_NS_PERMITTED, 0U, 0U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SECURE_REGISTER_READING_STATUS_NS_PERMITTED, 1U, 1U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SECURE_REGISTER_PENDING_MCA_ERRORS_NS_PERMITTED, 2U, 2U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SECURE_REGISTER_CLEARING_MCA_INTERRUPTS_NS_PERMITTED, 3U, 3U),
+} tegra_ari_mca_secure_register_bitmasks_t;
+
+typedef enum {
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_SERR_ERR_CODE, 0U, 15U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_PWM_ERR, 16U, 16U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_CRAB_ERR, 17U, 17U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_RD_WR_N, 18U, 18U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_UCODE_ERR, 19U, 19U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_PWM, 20U, 23U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_AV, 58U, 58U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_MV, 59U, 59U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_EN, 60U, 60U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_UC, 61U, 61U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_OVF, 62U, 62U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_VAL, 63U, 63U),
+
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_ADDR_ADDR, 0U, 41U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_ADDR_UCODE_ERRCD, 42U, 52U),
+
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_CTRL_EN_PWM_ERR, 0U, 0U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_CTRL_EN_CRAB_ERR, 1U, 1U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_CTRL_EN_UCODE_ERR, 3U, 3U),
+} tegra_ari_mca_aserr0_bitmasks_t;
+
+typedef enum {
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_SERR_ERR_CODE, 0U, 15U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_MSI_ERR, 16U, 16U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_IHI_ERR, 17U, 17U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_CRI_ERR, 18U, 18U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_MMCRAB_ERR, 19U, 19U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_CSI_ERR, 20U, 20U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_RD_WR_N, 21U, 21U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_REQ_ERRT, 22U, 23U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_RESP_ERRT, 24U, 25U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_AV, 58U, 58U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_MV, 59U, 59U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_EN, 60U, 60U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_UC, 61U, 61U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_OVF, 62U, 62U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_VAL, 63U, 63U),
+
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_AXI_ID, 0U, 7U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_CQX_ID, 8U, 27U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_CQX_CID, 28U, 31U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_CQX_CMD, 32U, 35U),
+
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_CTRL_EN_MSI_ERR, 0U, 0U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_CTRL_EN_IHI_ERR, 1U, 1U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_CTRL_EN_CRI_ERR, 2U, 2U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_CTRL_EN_MMCRAB_ERR, 3U, 3U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_CTRL_EN_CSI_ERR, 4U, 4U),
+
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_MISC_ADDR, 0U, 41U),
+} tegra_ari_mca_aserr1_bitmasks_t;
+
+typedef enum {
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_SERR_ERR_CODE, 0U, 15U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_MC_ERR, 16U, 16U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_SYSRAM_ERR, 17U, 17U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_CLIENT_ID, 18U, 19U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_AV, 58U, 58U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_MV, 59U, 59U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_EN, 60U, 60U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_UC, 61U, 61U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_OVF, 62U, 62U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_VAL, 63U, 63U),
+
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_ADDR_ID, 0U, 17U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_ADDR_CMD, 18U, 21U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_ADDR_ADDR, 22U, 53U),
+
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_CTRL_EN_MC_ERR, 0U, 0U),
+} tegra_ari_mca_aserr2_bitmasks_t;
+
+typedef enum {
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_SERR_ERR_CODE, 0U, 15U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_TO_ERR, 16U, 16U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_STAT_ERR, 17U, 17U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_DST_ERR, 18U, 18U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_UNC_ERR, 19U, 19U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_MH_ERR, 20U, 20U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_PERR, 21U, 21U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_PSN_ERR, 22U, 22U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_AV, 58U, 58U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_MV, 59U, 59U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_EN, 60U, 60U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_UC, 61U, 61U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_OVF, 62U, 62U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_VAL, 63U, 63U),
+
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_ADDR_CMD, 0U, 5U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_ADDR_ADDR, 6U, 47U),
+
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_MISC1_TO, 0U, 0U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_MISC1_DIV4, 1U, 1U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_MISC1_TLIMIT, 2U, 11U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_MISC1_PSN_ERR_CORR_MSK, 12U, 25U),
+
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_MISC2_MORE_INFO, 0U, 17U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_MISC2_TO_INFO, 18U, 43U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_MISC2_SRC, 44U, 45U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_MISC2_TID, 46U, 52U),
+
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_CTRL_EN_TO_ERR, 0U, 0U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_CTRL_EN_STAT_ERR, 1U, 1U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_CTRL_EN_DST_ERR, 2U, 2U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_CTRL_EN_UNC_ERR, 3U, 3U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_CTRL_EN_MH_ERR, 4U, 4U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_CTRL_EN_PERR, 5U, 5U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_CTRL_EN_PSN_ERR, 6U, 19U),
+} tegra_ari_mca_aserr3_bitmasks_t;
+
+typedef enum {
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_SERR_ERR_CODE, 0U, 15U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_SRC_ERR, 16U, 16U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_DST_ERR, 17U, 17U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_REQ_ERR, 18U, 18U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_RSP_ERR, 19U, 19U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_AV, 58U, 58U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_MV, 59U, 59U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_EN, 60U, 60U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_UC, 61U, 61U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_OVF, 62U, 62U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_VAL, 63U, 63U),
+
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_CTRL_EN_CPE_ERR, 0U, 0U),
+} tegra_ari_mca_aserr4_bitmasks_t;
+
+typedef enum {
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_SERR_ERR_CODE, 0U, 15U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_CTUPAR, 16U, 16U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_MULTI, 17U, 17U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_AV, 58U, 58U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_MV, 59U, 59U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_EN, 60U, 60U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_UC, 61U, 61U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_OVF, 62U, 62U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_VAL, 63U, 63U),
+
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_ADDR_SRC, 0U, 7U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_ADDR_ID, 8U, 15U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_ADDR_DATA, 16U, 26U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_ADDR_CMD, 32U, 35U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_ADDR_ADDR, 36U, 45U),
+
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_CTRL_EN_CTUPAR, 0U, 0U),
+} tegra_ari_mca_aserr5_bitmasks_t;
+
+typedef enum {
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SERR1_STAT_SERR_ERR_CODE, 0U, 15U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SERR1_STAT_AV, 58U, 58U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SERR1_STAT_MV, 59U, 59U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SERR1_STAT_EN, 60U, 60U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SERR1_STAT_UC, 61U, 61U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SERR1_STAT_OVF, 62U, 62U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SERR1_STAT_VAL, 63U, 63U),
+
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SERR1_ADDR_TBD_INFO, 0U, 63U),
+} tegra_ari_mca_serr1_bitmasks_t;
+
+#undef TEGRA_ARI_ENUM_MASK_LSB_MSB
+
+typedef enum {
+ TEGRA_NVG_CHANNEL_PMIC = 0U,
+ TEGRA_NVG_CHANNEL_POWER_PERF = 1U,
+ TEGRA_NVG_CHANNEL_POWER_MODES = 2U,
+ TEGRA_NVG_CHANNEL_WAKE_TIME = 3U,
+ TEGRA_NVG_CHANNEL_CSTATE_INFO = 4U,
+ TEGRA_NVG_CHANNEL_CROSSOVER_C1_C6 = 5U,
+ TEGRA_NVG_CHANNEL_CROSSOVER_CC1_CC6 = 6U,
+ TEGRA_NVG_CHANNEL_CROSSOVER_CC1_CC7 = 7U,
+ TEGRA_NVG_CHANNEL_CROSSOVER_CCP1_CCP3 = 8U, /* obsoleted */
+ TEGRA_NVG_CHANNEL_CROSSOVER_CCP3_SC2 = 9U, /* obsoleted */
+ TEGRA_NVG_CHANNEL_CROSSOVER_CCP3_SC3 = 10U, /* obsoleted */
+ TEGRA_NVG_CHANNEL_CROSSOVER_CCP3_SC4 = 11U, /* obsoleted */
+ TEGRA_NVG_CHANNEL_CROSSOVER_CCP3_SC7 = 12U, /* obsoleted */
+ TEGRA_NVG_CHANNEL_CROSSOVER_SC0_SC7 = 12U,
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_CLEAR = 13U,
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_SC7_ENTRIES = 14U,
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_SC4_ENTRIES = 15U, /* obsoleted */
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_SC3_ENTRIES = 16U, /* obsoleted */
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_SC2_ENTRIES = 17U, /* obsoleted */
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_CCP3_ENTRIES = 18U, /* obsoleted */
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_A57_CC6_ENTRIES = 19U,
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_A57_CC7_ENTRIES = 20U,
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_CC6_ENTRIES = 21U,
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_CC7_ENTRIES = 22U,
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_0_C6_ENTRIES = 23U,
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_1_C6_ENTRIES = 24U,
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_2_C6_ENTRIES = 25U, /* Reserved (for Denver15 core 2) */
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_3_C6_ENTRIES = 26U, /* Reserved (for Denver15 core 3) */
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_0_C7_ENTRIES = 27U,
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_1_C7_ENTRIES = 28U,
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_2_C7_ENTRIES = 29U, /* Reserved (for Denver15 core 2) */
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_3_C7_ENTRIES = 30U, /* Reserved (for Denver15 core 3) */
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_A57_0_C7_ENTRIES = 31U,
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_A57_1_C7_ENTRIES = 32U,
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_A57_2_C7_ENTRIES = 33U,
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_A57_3_C7_ENTRIES = 34U,
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_LAST_CSTATE_ENTRY_D15_0 = 35U,
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_LAST_CSTATE_ENTRY_D15_1 = 36U,
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_LAST_CSTATE_ENTRY_D15_2 = 37U, /* Reserved (for Denver15 core 2) */
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_LAST_CSTATE_ENTRY_D15_3 = 38U, /* Reserved (for Denver15 core 3) */
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_LAST_CSTATE_ENTRY_A57_0 = 39U,
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_LAST_CSTATE_ENTRY_A57_1 = 40U,
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_LAST_CSTATE_ENTRY_A57_2 = 41U,
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_LAST_CSTATE_ENTRY_A57_3 = 42U,
+ TEGRA_NVG_CHANNEL_IS_SC7_ALLOWED = 43U,
+ TEGRA_NVG_CHANNEL_ONLINE_CORE = 44U,
+ TEGRA_NVG_CHANNEL_CC3_CTRL = 45U,
+ TEGRA_NVG_CHANNEL_CROSSOVER_CCP3_SC1 = 46U, /* obsoleted */
+ TEGRA_NVG_CHANNEL_LAST_INDEX,
+} tegra_nvg_channel_id_t;
+
+#endif /* T18X_ARI_H */
diff --git a/plat/nvidia/tegra/soc/t186/drivers/mce/aarch64/nvg_helpers.S b/plat/nvidia/tegra/soc/t186/drivers/mce/aarch64/nvg_helpers.S
new file mode 100644
index 0000000..e3591ce
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/drivers/mce/aarch64/nvg_helpers.S
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+
+ .globl nvg_set_request_data
+ .globl nvg_set_request
+ .globl nvg_get_result
+
+/* void nvg_set_request_data(uint64_t req, uint64_t data) */
+func nvg_set_request_data
+ msr s3_0_c15_c1_2, x0
+ msr s3_0_c15_c1_3, x1
+ ret
+endfunc nvg_set_request_data
+
+/* void nvg_set_request(uint64_t req) */
+func nvg_set_request
+ msr s3_0_c15_c1_2, x0
+ ret
+endfunc nvg_set_request
+
+/* uint64_t nvg_get_result(void) */
+func nvg_get_result
+ mrs x0, s3_0_c15_c1_3
+ ret
+endfunc nvg_get_result
diff --git a/plat/nvidia/tegra/soc/t186/drivers/mce/ari.c b/plat/nvidia/tegra/soc/t186/drivers/mce/ari.c
new file mode 100644
index 0000000..6414e07
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/drivers/mce/ari.c
@@ -0,0 +1,564 @@
+/*
+ * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <drivers/delay_timer.h>
+#include <denver.h>
+#include <lib/mmio.h>
+#include <plat/common/platform.h>
+
+#include <mce_private.h>
+#include <t18x_ari.h>
+
+/*******************************************************************************
+ * Register offsets for ARI request/results
+ ******************************************************************************/
+#define ARI_REQUEST 0x0U
+#define ARI_REQUEST_EVENT_MASK 0x4U
+#define ARI_STATUS 0x8U
+#define ARI_REQUEST_DATA_LO 0xCU
+#define ARI_REQUEST_DATA_HI 0x10U
+#define ARI_RESPONSE_DATA_LO 0x14U
+#define ARI_RESPONSE_DATA_HI 0x18U
+
+/* Status values for the current request */
+#define ARI_REQ_PENDING 1U
+#define ARI_REQ_ONGOING 3U
+#define ARI_REQUEST_VALID_BIT (1U << 8)
+#define ARI_EVT_MASK_STANDBYWFI_BIT (1U << 7)
+
+/* default timeout (us) to wait for ARI completion */
+#define ARI_MAX_RETRY_COUNT U(2000000)
+
+/*******************************************************************************
+ * ARI helper functions
+ ******************************************************************************/
+static inline uint32_t ari_read_32(uint32_t ari_base, uint32_t reg)
+{
+ return mmio_read_32((uint64_t)ari_base + (uint64_t)reg);
+}
+
+static inline void ari_write_32(uint32_t ari_base, uint32_t val, uint32_t reg)
+{
+ mmio_write_32((uint64_t)ari_base + (uint64_t)reg, val);
+}
+
+static inline uint32_t ari_get_request_low(uint32_t ari_base)
+{
+ return ari_read_32(ari_base, ARI_REQUEST_DATA_LO);
+}
+
+static inline uint32_t ari_get_request_high(uint32_t ari_base)
+{
+ return ari_read_32(ari_base, ARI_REQUEST_DATA_HI);
+}
+
+static inline uint32_t ari_get_response_low(uint32_t ari_base)
+{
+ return ari_read_32(ari_base, ARI_RESPONSE_DATA_LO);
+}
+
+static inline uint32_t ari_get_response_high(uint32_t ari_base)
+{
+ return ari_read_32(ari_base, ARI_RESPONSE_DATA_HI);
+}
+
+static inline void ari_clobber_response(uint32_t ari_base)
+{
+ ari_write_32(ari_base, 0, ARI_RESPONSE_DATA_LO);
+ ari_write_32(ari_base, 0, ARI_RESPONSE_DATA_HI);
+}
+
+static int32_t ari_request_wait(uint32_t ari_base, uint32_t evt_mask, uint32_t req,
+ uint32_t lo, uint32_t hi)
+{
+ uint32_t retries = (uint32_t)ARI_MAX_RETRY_COUNT;
+ uint32_t status;
+ int32_t ret = 0;
+
+ /* program the request, event_mask, hi and lo registers */
+ ari_write_32(ari_base, lo, ARI_REQUEST_DATA_LO);
+ ari_write_32(ari_base, hi, ARI_REQUEST_DATA_HI);
+ ari_write_32(ari_base, evt_mask, ARI_REQUEST_EVENT_MASK);
+ ari_write_32(ari_base, req | ARI_REQUEST_VALID_BIT, ARI_REQUEST);
+
+ /*
+ * For commands that have an event trigger, we should bypass
+ * ARI_STATUS polling, since MCE is waiting for SW to trigger
+ * the event.
+ */
+ if (evt_mask != 0U) {
+ ret = 0;
+ } else {
+ /* For shutdown/reboot commands, we dont have to check for timeouts */
+ if ((req == TEGRA_ARI_MISC_CCPLEX) &&
+ ((lo == TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF) ||
+ (lo == TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT))) {
+ ret = 0;
+ } else {
+ /*
+ * Wait for the command response for not more than the timeout
+ */
+ while (retries != 0U) {
+
+ /* read the command status */
+ status = ari_read_32(ari_base, ARI_STATUS);
+ if ((status & (ARI_REQ_ONGOING | ARI_REQ_PENDING)) == 0U) {
+ break;
+ }
+
+ /* delay 1 us */
+ udelay(1);
+
+ /* decrement the retry count */
+ retries--;
+ }
+
+ /* assert if the command timed out */
+ if (retries == 0U) {
+ ERROR("ARI request timed out: req %d on CPU %d\n",
+ req, plat_my_core_pos());
+ assert(retries != 0U);
+ }
+ }
+ }
+
+ return ret;
+}
+
+int32_t ari_enter_cstate(uint32_t ari_base, uint32_t state, uint32_t wake_time)
+{
+ int32_t ret = 0;
+
+ /* check for allowed power state */
+ if ((state != TEGRA_ARI_CORE_C0) &&
+ (state != TEGRA_ARI_CORE_C1) &&
+ (state != TEGRA_ARI_CORE_C6) &&
+ (state != TEGRA_ARI_CORE_C7)) {
+ ERROR("%s: unknown cstate (%d)\n", __func__, state);
+ ret = EINVAL;
+ } else {
+ /* clean the previous response state */
+ ari_clobber_response(ari_base);
+
+ /* Enter the cstate, to be woken up after wake_time (TSC ticks) */
+ ret = ari_request_wait(ari_base, ARI_EVT_MASK_STANDBYWFI_BIT,
+ (uint32_t)TEGRA_ARI_ENTER_CSTATE, state, wake_time);
+ }
+
+ return ret;
+}
+
+int32_t ari_update_cstate_info(uint32_t ari_base, uint32_t cluster, uint32_t ccplex,
+ uint32_t system, uint8_t sys_state_force, uint32_t wake_mask,
+ uint8_t update_wake_mask)
+{
+ uint64_t val = 0U;
+
+ /* clean the previous response state */
+ ari_clobber_response(ari_base);
+
+ /* update CLUSTER_CSTATE? */
+ if (cluster != 0U) {
+ val |= (cluster & CLUSTER_CSTATE_MASK) |
+ CLUSTER_CSTATE_UPDATE_BIT;
+ }
+
+ /* update CCPLEX_CSTATE? */
+ if (ccplex != 0U) {
+ val |= ((ccplex & CCPLEX_CSTATE_MASK) << CCPLEX_CSTATE_SHIFT) |
+ CCPLEX_CSTATE_UPDATE_BIT;
+ }
+
+ /* update SYSTEM_CSTATE? */
+ if (system != 0U) {
+ val |= ((system & SYSTEM_CSTATE_MASK) << SYSTEM_CSTATE_SHIFT) |
+ (((uint64_t)sys_state_force << SYSTEM_CSTATE_FORCE_UPDATE_SHIFT) |
+ SYSTEM_CSTATE_UPDATE_BIT);
+ }
+
+ /* update wake mask value? */
+ if (update_wake_mask != 0U) {
+ val |= CSTATE_WAKE_MASK_UPDATE_BIT;
+ }
+
+ /* set the updated cstate info */
+ return ari_request_wait(ari_base, 0U, (uint32_t)TEGRA_ARI_UPDATE_CSTATE_INFO,
+ (uint32_t)val, wake_mask);
+}
+
+int32_t ari_update_crossover_time(uint32_t ari_base, uint32_t type, uint32_t time)
+{
+ int32_t ret = 0;
+
+ /* sanity check crossover type */
+ if ((type == TEGRA_ARI_CROSSOVER_C1_C6) ||
+ (type > TEGRA_ARI_CROSSOVER_CCP3_SC1)) {
+ ret = EINVAL;
+ } else {
+ /* clean the previous response state */
+ ari_clobber_response(ari_base);
+
+ /* update crossover threshold time */
+ ret = ari_request_wait(ari_base, 0U,
+ (uint32_t)TEGRA_ARI_UPDATE_CROSSOVER, type, time);
+ }
+
+ return ret;
+}
+
+uint64_t ari_read_cstate_stats(uint32_t ari_base, uint32_t state)
+{
+ int32_t ret;
+ uint64_t result;
+
+ /* sanity check crossover type */
+ if (state == 0U) {
+ result = EINVAL;
+ } else {
+ /* clean the previous response state */
+ ari_clobber_response(ari_base);
+
+ ret = ari_request_wait(ari_base, 0U,
+ (uint32_t)TEGRA_ARI_CSTATE_STATS, state, 0U);
+ if (ret != 0) {
+ result = EINVAL;
+ } else {
+ result = (uint64_t)ari_get_response_low(ari_base);
+ }
+ }
+ return result;
+}
+
+int32_t ari_write_cstate_stats(uint32_t ari_base, uint32_t state, uint32_t stats)
+{
+ /* clean the previous response state */
+ ari_clobber_response(ari_base);
+
+ /* write the cstate stats */
+ return ari_request_wait(ari_base, 0U, (uint32_t)TEGRA_ARI_WRITE_CSTATE_STATS,
+ state, stats);
+}
+
+uint64_t ari_enumeration_misc(uint32_t ari_base, uint32_t cmd, uint32_t data)
+{
+ uint64_t resp;
+ int32_t ret;
+ uint32_t local_data = data;
+
+ /* clean the previous response state */
+ ari_clobber_response(ari_base);
+
+ /* ARI_REQUEST_DATA_HI is reserved for commands other than 'ECHO' */
+ if (cmd != TEGRA_ARI_MISC_ECHO) {
+ local_data = 0U;
+ }
+
+ ret = ari_request_wait(ari_base, 0U, (uint32_t)TEGRA_ARI_MISC, cmd, local_data);
+ if (ret != 0) {
+ resp = (uint64_t)ret;
+ } else {
+ /* get the command response */
+ resp = ari_get_response_low(ari_base);
+ resp |= ((uint64_t)ari_get_response_high(ari_base) << 32);
+ }
+
+ return resp;
+}
+
+int32_t ari_is_ccx_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time)
+{
+ int32_t ret;
+ uint32_t result;
+
+ /* clean the previous response state */
+ ari_clobber_response(ari_base);
+
+ ret = ari_request_wait(ari_base, 0U, (uint32_t)TEGRA_ARI_IS_CCX_ALLOWED,
+ state & 0x7U, wake_time);
+ if (ret != 0) {
+ ERROR("%s: failed (%d)\n", __func__, ret);
+ result = 0U;
+ } else {
+ result = ari_get_response_low(ari_base) & 0x1U;
+ }
+
+ /* 1 = CCx allowed, 0 = CCx not allowed */
+ return (int32_t)result;
+}
+
+int32_t ari_is_sc7_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time)
+{
+ int32_t ret, result;
+
+ /* check for allowed power state */
+ if ((state != TEGRA_ARI_CORE_C0) && (state != TEGRA_ARI_CORE_C1) &&
+ (state != TEGRA_ARI_CORE_C6) && (state != TEGRA_ARI_CORE_C7)) {
+ ERROR("%s: unknown cstate (%d)\n", __func__, state);
+ result = EINVAL;
+ } else {
+ /* clean the previous response state */
+ ari_clobber_response(ari_base);
+
+ ret = ari_request_wait(ari_base, 0U,
+ (uint32_t)TEGRA_ARI_IS_SC7_ALLOWED, state, wake_time);
+ if (ret != 0) {
+ ERROR("%s: failed (%d)\n", __func__, ret);
+ result = 0;
+ } else {
+ /* 1 = SC7 allowed, 0 = SC7 not allowed */
+ result = (ari_get_response_low(ari_base) != 0U) ? 1 : 0;
+ }
+ }
+
+ return result;
+}
+
+int32_t ari_online_core(uint32_t ari_base, uint32_t core)
+{
+ uint64_t cpu = read_mpidr() & (MPIDR_CPU_MASK);
+ uint64_t cluster = (read_mpidr() & (MPIDR_CLUSTER_MASK)) >>
+ (MPIDR_AFFINITY_BITS);
+ uint64_t impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
+ int32_t ret;
+
+ /* construct the current CPU # */
+ cpu |= (cluster << 2);
+
+ /* sanity check target core id */
+ if ((core >= MCE_CORE_ID_MAX) || (cpu == (uint64_t)core)) {
+ ERROR("%s: unsupported core id (%d)\n", __func__, core);
+ ret = EINVAL;
+ } else {
+ /*
+ * The Denver cluster has 2 CPUs only - 0, 1.
+ */
+ if ((impl == DENVER_IMPL) && ((core == 2U) || (core == 3U))) {
+ ERROR("%s: unknown core id (%d)\n", __func__, core);
+ ret = EINVAL;
+ } else {
+ /* clean the previous response state */
+ ari_clobber_response(ari_base);
+ ret = ari_request_wait(ari_base, 0U,
+ (uint32_t)TEGRA_ARI_ONLINE_CORE, core, 0U);
+ }
+ }
+
+ return ret;
+}
+
+int32_t ari_cc3_ctrl(uint32_t ari_base, uint32_t freq, uint32_t volt, uint8_t enable)
+{
+ uint32_t val;
+
+ /* clean the previous response state */
+ ari_clobber_response(ari_base);
+
+ /*
+ * If the enable bit is cleared, Auto-CC3 will be disabled by setting
+ * the SW visible voltage/frequency request registers for all non
+ * floorswept cores valid independent of StandbyWFI and disabling
+ * the IDLE voltage/frequency request register. If set, Auto-CC3
+ * will be enabled by setting the ARM SW visible voltage/frequency
+ * request registers for all non floorswept cores to be enabled by
+ * StandbyWFI or the equivalent signal, and always keeping the IDLE
+ * voltage/frequency request register enabled.
+ */
+ val = (((freq & MCE_AUTO_CC3_FREQ_MASK) << MCE_AUTO_CC3_FREQ_SHIFT) |
+ ((volt & MCE_AUTO_CC3_VTG_MASK) << MCE_AUTO_CC3_VTG_SHIFT) |
+ ((enable != 0U) ? MCE_AUTO_CC3_ENABLE_BIT : 0U));
+
+ return ari_request_wait(ari_base, 0U,
+ (uint32_t)TEGRA_ARI_CC3_CTRL, val, 0U);
+}
+
+int32_t ari_reset_vector_update(uint32_t ari_base)
+{
+ /* clean the previous response state */
+ ari_clobber_response(ari_base);
+
+ /*
+ * Need to program the CPU reset vector one time during cold boot
+ * and SC7 exit
+ */
+ (void)ari_request_wait(ari_base, 0U,
+ (uint32_t)TEGRA_ARI_COPY_MISCREG_AA64_RST, 0U, 0U);
+
+ return 0;
+}
+
+int32_t ari_roc_flush_cache_trbits(uint32_t ari_base)
+{
+ /* clean the previous response state */
+ ari_clobber_response(ari_base);
+
+ return ari_request_wait(ari_base, 0U,
+ (uint32_t)TEGRA_ARI_ROC_FLUSH_CACHE_TRBITS, 0U, 0U);
+}
+
+int32_t ari_roc_flush_cache(uint32_t ari_base)
+{
+ /* clean the previous response state */
+ ari_clobber_response(ari_base);
+
+ return ari_request_wait(ari_base, 0U,
+ (uint32_t)TEGRA_ARI_ROC_FLUSH_CACHE_ONLY, 0U, 0U);
+}
+
+int32_t ari_roc_clean_cache(uint32_t ari_base)
+{
+ /* clean the previous response state */
+ ari_clobber_response(ari_base);
+
+ return ari_request_wait(ari_base, 0U,
+ (uint32_t)TEGRA_ARI_ROC_CLEAN_CACHE_ONLY, 0U, 0U);
+}
+
+uint64_t ari_read_write_mca(uint32_t ari_base, uint64_t cmd, uint64_t *data)
+{
+ uint64_t mca_arg_data, result = 0;
+ uint32_t resp_lo, resp_hi;
+ uint32_t mca_arg_err, mca_arg_finish;
+ int32_t ret;
+
+ /* Set data (write) */
+ mca_arg_data = (data != NULL) ? *data : 0ULL;
+
+ /* Set command */
+ ari_write_32(ari_base, (uint32_t)cmd, ARI_RESPONSE_DATA_LO);
+ ari_write_32(ari_base, (uint32_t)(cmd >> 32U), ARI_RESPONSE_DATA_HI);
+
+ ret = ari_request_wait(ari_base, 0U, (uint32_t)TEGRA_ARI_MCA,
+ (uint32_t)mca_arg_data,
+ (uint32_t)(mca_arg_data >> 32U));
+ if (ret == 0) {
+ resp_lo = ari_get_response_low(ari_base);
+ resp_hi = ari_get_response_high(ari_base);
+
+ mca_arg_err = resp_lo & MCA_ARG_ERROR_MASK;
+ mca_arg_finish = (resp_hi >> MCA_ARG_FINISH_SHIFT) &
+ MCA_ARG_FINISH_MASK;
+
+ if (mca_arg_finish == 0U) {
+ result = (uint64_t)mca_arg_err;
+ } else {
+ if (data != NULL) {
+ resp_lo = ari_get_request_low(ari_base);
+ resp_hi = ari_get_request_high(ari_base);
+ *data = ((uint64_t)resp_hi << 32U) |
+ (uint64_t)resp_lo;
+ }
+ }
+ }
+
+ return result;
+}
+
+int32_t ari_update_ccplex_gsc(uint32_t ari_base, uint32_t gsc_idx)
+{
+ int32_t ret = 0;
+ /* sanity check GSC ID */
+ if (gsc_idx > TEGRA_ARI_GSC_VPR_IDX) {
+ ret = EINVAL;
+ } else {
+ /* clean the previous response state */
+ ari_clobber_response(ari_base);
+
+ /*
+ * The MCE code will read the GSC carveout value, corrseponding to
+ * the ID, from the MC registers and update the internal GSC registers
+ * of the CCPLEX.
+ */
+ (void)ari_request_wait(ari_base, 0U,
+ (uint32_t)TEGRA_ARI_UPDATE_CCPLEX_GSC, gsc_idx, 0U);
+ }
+
+ return ret;
+}
+
+void ari_enter_ccplex_state(uint32_t ari_base, uint32_t state_idx)
+{
+ /* clean the previous response state */
+ ari_clobber_response(ari_base);
+
+ /*
+ * The MCE will shutdown or restart the entire system
+ */
+ (void)ari_request_wait(ari_base, 0U,
+ (uint32_t)TEGRA_ARI_MISC_CCPLEX, state_idx, 0U);
+}
+
+int32_t ari_read_write_uncore_perfmon(uint32_t ari_base, uint64_t req,
+ uint64_t *data)
+{
+ int32_t ret, result;
+ uint32_t val, req_status;
+ uint8_t req_cmd;
+
+ req_cmd = (uint8_t)(req & UNCORE_PERFMON_CMD_MASK);
+
+ /* clean the previous response state */
+ ari_clobber_response(ari_base);
+
+ /* sanity check input parameters */
+ if ((req_cmd == UNCORE_PERFMON_CMD_READ) && (data == NULL)) {
+ ERROR("invalid parameters\n");
+ result = EINVAL;
+ } else {
+ /*
+ * For "write" commands get the value that has to be written
+ * to the uncore perfmon registers
+ */
+ val = (req_cmd == UNCORE_PERFMON_CMD_WRITE) ?
+ (uint32_t)*data : 0U;
+
+ ret = ari_request_wait(ari_base, 0U,
+ (uint32_t)TEGRA_ARI_PERFMON, val, (uint32_t)req);
+ if (ret != 0) {
+ result = ret;
+ } else {
+ /* read the command status value */
+ req_status = ari_get_response_high(ari_base) &
+ UNCORE_PERFMON_RESP_STATUS_MASK;
+
+ /*
+ * For "read" commands get the data from the uncore
+ * perfmon registers
+ */
+ req_status &= UNCORE_PERFMON_RESP_STATUS_MASK;
+ if ((req_status == 0U) && (req_cmd == UNCORE_PERFMON_CMD_READ)) {
+ *data = ari_get_response_low(ari_base);
+ }
+ result = (int32_t)req_status;
+ }
+ }
+
+ return result;
+}
+
+void ari_misc_ccplex(uint32_t ari_base, uint32_t index, uint32_t value)
+{
+ /*
+ * This invokes the ARI_MISC_CCPLEX commands. This can be
+ * used to enable/disable coresight clock gating.
+ */
+
+ if ((index > TEGRA_ARI_MISC_CCPLEX_EDBGREQ) ||
+ ((index == TEGRA_ARI_MISC_CCPLEX_CORESIGHT_CG_CTRL) &&
+ (value > 1U))) {
+ ERROR("%s: invalid parameters \n", __func__);
+ } else {
+ /* clean the previous response state */
+ ari_clobber_response(ari_base);
+ (void)ari_request_wait(ari_base, 0U,
+ (uint32_t)TEGRA_ARI_MISC_CCPLEX, index, value);
+ }
+}
diff --git a/plat/nvidia/tegra/soc/t186/drivers/mce/mce.c b/plat/nvidia/tegra/soc/t186/drivers/mce/mce.c
new file mode 100644
index 0000000..aebaceb
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/drivers/mce/mce.c
@@ -0,0 +1,476 @@
+/*
+ * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <context.h>
+#include <denver.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/mmio.h>
+
+#include <mce.h>
+#include <mce_private.h>
+#include <t18x_ari.h>
+#include <tegra_def.h>
+#include <tegra_platform.h>
+
+/* NVG functions handlers */
+static arch_mce_ops_t nvg_mce_ops = {
+ .enter_cstate = nvg_enter_cstate,
+ .update_cstate_info = nvg_update_cstate_info,
+ .update_crossover_time = nvg_update_crossover_time,
+ .read_cstate_stats = nvg_read_cstate_stats,
+ .write_cstate_stats = nvg_write_cstate_stats,
+ .call_enum_misc = ari_enumeration_misc,
+ .is_ccx_allowed = nvg_is_ccx_allowed,
+ .is_sc7_allowed = nvg_is_sc7_allowed,
+ .online_core = nvg_online_core,
+ .cc3_ctrl = nvg_cc3_ctrl,
+ .update_reset_vector = ari_reset_vector_update,
+ .roc_flush_cache = ari_roc_flush_cache,
+ .roc_flush_cache_trbits = ari_roc_flush_cache_trbits,
+ .roc_clean_cache = ari_roc_clean_cache,
+ .read_write_mca = ari_read_write_mca,
+ .update_ccplex_gsc = ari_update_ccplex_gsc,
+ .enter_ccplex_state = ari_enter_ccplex_state,
+ .read_write_uncore_perfmon = ari_read_write_uncore_perfmon,
+ .misc_ccplex = ari_misc_ccplex
+};
+
+/* ARI functions handlers */
+static arch_mce_ops_t ari_mce_ops = {
+ .enter_cstate = ari_enter_cstate,
+ .update_cstate_info = ari_update_cstate_info,
+ .update_crossover_time = ari_update_crossover_time,
+ .read_cstate_stats = ari_read_cstate_stats,
+ .write_cstate_stats = ari_write_cstate_stats,
+ .call_enum_misc = ari_enumeration_misc,
+ .is_ccx_allowed = ari_is_ccx_allowed,
+ .is_sc7_allowed = ari_is_sc7_allowed,
+ .online_core = ari_online_core,
+ .cc3_ctrl = ari_cc3_ctrl,
+ .update_reset_vector = ari_reset_vector_update,
+ .roc_flush_cache = ari_roc_flush_cache,
+ .roc_flush_cache_trbits = ari_roc_flush_cache_trbits,
+ .roc_clean_cache = ari_roc_clean_cache,
+ .read_write_mca = ari_read_write_mca,
+ .update_ccplex_gsc = ari_update_ccplex_gsc,
+ .enter_ccplex_state = ari_enter_ccplex_state,
+ .read_write_uncore_perfmon = ari_read_write_uncore_perfmon,
+ .misc_ccplex = ari_misc_ccplex
+};
+
+typedef struct {
+ uint32_t ari_base;
+ arch_mce_ops_t *ops;
+} mce_config_t;
+
+/* Table to hold the per-CPU ARI base address and function handlers */
+static mce_config_t mce_cfg_table[MCE_ARI_APERTURES_MAX] = {
+ {
+ /* A57 Core 0 */
+ .ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_0_OFFSET,
+ .ops = &ari_mce_ops,
+ },
+ {
+ /* A57 Core 1 */
+ .ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_1_OFFSET,
+ .ops = &ari_mce_ops,
+ },
+ {
+ /* A57 Core 2 */
+ .ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_2_OFFSET,
+ .ops = &ari_mce_ops,
+ },
+ {
+ /* A57 Core 3 */
+ .ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_3_OFFSET,
+ .ops = &ari_mce_ops,
+ },
+ {
+ /* D15 Core 0 */
+ .ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_4_OFFSET,
+ .ops = &nvg_mce_ops,
+ },
+ {
+ /* D15 Core 1 */
+ .ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_5_OFFSET,
+ .ops = &nvg_mce_ops,
+ }
+};
+
+static uint32_t mce_get_curr_cpu_ari_base(void)
+{
+ uint64_t mpidr = read_mpidr();
+ uint64_t cpuid = mpidr & MPIDR_CPU_MASK;
+ uint64_t impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
+
+ /*
+ * T186 has 2 CPU clusters, one with Denver CPUs and the other with
+ * ARM CortexA-57 CPUs. Each cluster consists of 4 CPUs and the CPU
+ * numbers start from 0. In order to get the proper arch_mce_ops_t
+ * struct, we have to convert the Denver CPU ids to the corresponding
+ * indices in the mce_ops_table array.
+ */
+ if (impl == DENVER_IMPL) {
+ cpuid |= 0x4U;
+ }
+
+ return mce_cfg_table[cpuid].ari_base;
+}
+
+static arch_mce_ops_t *mce_get_curr_cpu_ops(void)
+{
+ uint64_t mpidr = read_mpidr();
+ uint64_t cpuid = mpidr & MPIDR_CPU_MASK;
+ uint64_t impl = (read_midr() >> MIDR_IMPL_SHIFT) &
+ MIDR_IMPL_MASK;
+
+ /*
+ * T186 has 2 CPU clusters, one with Denver CPUs and the other with
+ * ARM CortexA-57 CPUs. Each cluster consists of 4 CPUs and the CPU
+ * numbers start from 0. In order to get the proper arch_mce_ops_t
+ * struct, we have to convert the Denver CPU ids to the corresponding
+ * indices in the mce_ops_table array.
+ */
+ if (impl == DENVER_IMPL) {
+ cpuid |= 0x4U;
+ }
+
+ return mce_cfg_table[cpuid].ops;
+}
+
+/*******************************************************************************
+ * Common handler for all MCE commands
+ ******************************************************************************/
+int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1,
+ uint64_t arg2)
+{
+ const arch_mce_ops_t *ops;
+ gp_regs_t *gp_regs = get_gpregs_ctx(cm_get_context(NON_SECURE));
+ uint32_t cpu_ari_base;
+ uint64_t ret64 = 0, arg3, arg4, arg5;
+ int32_t ret = 0;
+
+ assert(gp_regs != NULL);
+
+ /* get a pointer to the CPU's arch_mce_ops_t struct */
+ ops = mce_get_curr_cpu_ops();
+
+ /* get the CPU's ARI base address */
+ cpu_ari_base = mce_get_curr_cpu_ari_base();
+
+ switch (cmd) {
+ case (uint64_t)MCE_CMD_ENTER_CSTATE:
+ ret = ops->enter_cstate(cpu_ari_base, arg0, arg1);
+
+ break;
+
+ case (uint64_t)MCE_CMD_UPDATE_CSTATE_INFO:
+ /*
+ * get the parameters required for the update cstate info
+ * command
+ */
+ arg3 = read_ctx_reg(gp_regs, CTX_GPREG_X4);
+ arg4 = read_ctx_reg(gp_regs, CTX_GPREG_X5);
+ arg5 = read_ctx_reg(gp_regs, CTX_GPREG_X6);
+
+ ret = ops->update_cstate_info(cpu_ari_base, (uint32_t)arg0,
+ (uint32_t)arg1, (uint32_t)arg2, (uint8_t)arg3,
+ (uint32_t)arg4, (uint8_t)arg5);
+
+ write_ctx_reg(gp_regs, CTX_GPREG_X4, (0ULL));
+ write_ctx_reg(gp_regs, CTX_GPREG_X5, (0ULL));
+ write_ctx_reg(gp_regs, CTX_GPREG_X6, (0ULL));
+
+ break;
+
+ case (uint64_t)MCE_CMD_UPDATE_CROSSOVER_TIME:
+ ret = ops->update_crossover_time(cpu_ari_base, arg0, arg1);
+
+ break;
+
+ case (uint64_t)MCE_CMD_READ_CSTATE_STATS:
+ ret64 = ops->read_cstate_stats(cpu_ari_base, arg0);
+
+ /* update context to return cstate stats value */
+ write_ctx_reg(gp_regs, CTX_GPREG_X1, (ret64));
+ write_ctx_reg(gp_regs, CTX_GPREG_X2, (ret64));
+
+ break;
+
+ case (uint64_t)MCE_CMD_WRITE_CSTATE_STATS:
+ ret = ops->write_cstate_stats(cpu_ari_base, arg0, arg1);
+
+ break;
+
+ case (uint64_t)MCE_CMD_IS_CCX_ALLOWED:
+ ret = ops->is_ccx_allowed(cpu_ari_base, arg0, arg1);
+
+ /* update context to return CCx status value */
+ write_ctx_reg(gp_regs, CTX_GPREG_X1, (uint64_t)(ret));
+
+ break;
+
+ case (uint64_t)MCE_CMD_IS_SC7_ALLOWED:
+ ret = ops->is_sc7_allowed(cpu_ari_base, arg0, arg1);
+
+ /* update context to return SC7 status value */
+ write_ctx_reg(gp_regs, CTX_GPREG_X1, (uint64_t)(ret));
+ write_ctx_reg(gp_regs, CTX_GPREG_X3, (uint64_t)(ret));
+
+ break;
+
+ case (uint64_t)MCE_CMD_ONLINE_CORE:
+ ret = ops->online_core(cpu_ari_base, arg0);
+
+ break;
+
+ case (uint64_t)MCE_CMD_CC3_CTRL:
+ ret = ops->cc3_ctrl(cpu_ari_base, arg0, arg1, arg2);
+
+ break;
+
+ case (uint64_t)MCE_CMD_ECHO_DATA:
+ ret64 = ops->call_enum_misc(cpu_ari_base, TEGRA_ARI_MISC_ECHO,
+ arg0);
+
+ /* update context to return if echo'd data matched source */
+ write_ctx_reg(gp_regs, CTX_GPREG_X1, ((ret64 == arg0) ?
+ 1ULL : 0ULL));
+ write_ctx_reg(gp_regs, CTX_GPREG_X2, ((ret64 == arg0) ?
+ 1ULL : 0ULL));
+
+ break;
+
+ case (uint64_t)MCE_CMD_READ_VERSIONS:
+ ret64 = ops->call_enum_misc(cpu_ari_base, TEGRA_ARI_MISC_VERSION,
+ arg0);
+
+ /*
+ * version = minor(63:32) | major(31:0). Update context
+ * to return major and minor version number.
+ */
+ write_ctx_reg(gp_regs, CTX_GPREG_X1, (ret64));
+ write_ctx_reg(gp_regs, CTX_GPREG_X2, (ret64 >> 32ULL));
+
+ break;
+
+ case (uint64_t)MCE_CMD_ENUM_FEATURES:
+ ret64 = ops->call_enum_misc(cpu_ari_base,
+ TEGRA_ARI_MISC_FEATURE_LEAF_0, arg0);
+
+ /* update context to return features value */
+ write_ctx_reg(gp_regs, CTX_GPREG_X1, (ret64));
+
+ break;
+
+ case (uint64_t)MCE_CMD_ROC_FLUSH_CACHE_TRBITS:
+ ret = ops->roc_flush_cache_trbits(cpu_ari_base);
+
+ break;
+
+ case (uint64_t)MCE_CMD_ROC_FLUSH_CACHE:
+ ret = ops->roc_flush_cache(cpu_ari_base);
+
+ break;
+
+ case (uint64_t)MCE_CMD_ROC_CLEAN_CACHE:
+ ret = ops->roc_clean_cache(cpu_ari_base);
+
+ break;
+
+ case (uint64_t)MCE_CMD_ENUM_READ_MCA:
+ ret64 = ops->read_write_mca(cpu_ari_base, arg0, &arg1);
+
+ /* update context to return MCA data/error */
+ write_ctx_reg(gp_regs, CTX_GPREG_X1, (ret64));
+ write_ctx_reg(gp_regs, CTX_GPREG_X2, (arg1));
+ write_ctx_reg(gp_regs, CTX_GPREG_X3, (ret64));
+
+ break;
+
+ case (uint64_t)MCE_CMD_ENUM_WRITE_MCA:
+ ret64 = ops->read_write_mca(cpu_ari_base, arg0, &arg1);
+
+ /* update context to return MCA error */
+ write_ctx_reg(gp_regs, CTX_GPREG_X1, (ret64));
+ write_ctx_reg(gp_regs, CTX_GPREG_X3, (ret64));
+
+ break;
+
+#if ENABLE_CHIP_VERIFICATION_HARNESS
+ case (uint64_t)MCE_CMD_ENABLE_LATIC:
+ /*
+ * This call is not for production use. The constant value,
+ * 0xFFFF0000, is specific to allowing for enabling LATIC on
+ * pre-production parts for the chip verification harness.
+ *
+ * Enabling LATIC allows S/W to read the MINI ISPs in the
+ * CCPLEX. The ISMs are used for various measurements relevant
+ * to particular locations in the Silicon. They are small
+ * counters which can be polled to determine how fast a
+ * particular location in the Silicon is.
+ */
+ ops->enter_ccplex_state(mce_get_curr_cpu_ari_base(),
+ 0xFFFF0000);
+
+ break;
+#endif
+
+ case (uint64_t)MCE_CMD_UNCORE_PERFMON_REQ:
+ ret = ops->read_write_uncore_perfmon(cpu_ari_base, arg0, &arg1);
+
+ /* update context to return data */
+ write_ctx_reg(gp_regs, CTX_GPREG_X1, (arg1));
+ break;
+
+ case (uint64_t)MCE_CMD_MISC_CCPLEX:
+ ops->misc_ccplex(cpu_ari_base, arg0, arg1);
+
+ break;
+
+ default:
+ ERROR("unknown MCE command (%" PRIu64 ")\n", cmd);
+ ret = EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/*******************************************************************************
+ * Handler to update the reset vector for CPUs
+ ******************************************************************************/
+int32_t mce_update_reset_vector(void)
+{
+ const arch_mce_ops_t *ops = mce_get_curr_cpu_ops();
+
+ ops->update_reset_vector(mce_get_curr_cpu_ari_base());
+
+ return 0;
+}
+
+static int32_t mce_update_ccplex_gsc(tegra_ari_gsc_index_t gsc_idx)
+{
+ const arch_mce_ops_t *ops = mce_get_curr_cpu_ops();
+
+ ops->update_ccplex_gsc(mce_get_curr_cpu_ari_base(), gsc_idx);
+
+ return 0;
+}
+
+/*******************************************************************************
+ * Handler to update carveout values for Video Memory Carveout region
+ ******************************************************************************/
+int32_t mce_update_gsc_videomem(void)
+{
+ return mce_update_ccplex_gsc(TEGRA_ARI_GSC_VPR_IDX);
+}
+
+/*******************************************************************************
+ * Handler to update carveout values for TZDRAM aperture
+ ******************************************************************************/
+int32_t mce_update_gsc_tzdram(void)
+{
+ return mce_update_ccplex_gsc(TEGRA_ARI_GSC_TZ_DRAM_IDX);
+}
+
+/*******************************************************************************
+ * Handler to shutdown/reset the entire system
+ ******************************************************************************/
+__dead2 void mce_enter_ccplex_state(uint32_t state_idx)
+{
+ const arch_mce_ops_t *ops = mce_get_curr_cpu_ops();
+
+ /* sanity check state value */
+ if ((state_idx != TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF) &&
+ (state_idx != TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT)) {
+ panic();
+ }
+
+ ops->enter_ccplex_state(mce_get_curr_cpu_ari_base(), state_idx);
+
+ /* wait till the CCPLEX powers down */
+ for (;;) {
+ ;
+ }
+
+}
+
+/*******************************************************************************
+ * Handler to issue the UPDATE_CSTATE_INFO request
+ ******************************************************************************/
+void mce_update_cstate_info(const mce_cstate_info_t *cstate)
+{
+ const arch_mce_ops_t *ops = mce_get_curr_cpu_ops();
+
+ /* issue the UPDATE_CSTATE_INFO request */
+ ops->update_cstate_info(mce_get_curr_cpu_ari_base(), cstate->cluster,
+ cstate->ccplex, cstate->system, cstate->system_state_force,
+ cstate->wake_mask, cstate->update_wake_mask);
+}
+
+/*******************************************************************************
+ * Handler to read the MCE firmware version and check if it is compatible
+ * with interface header the BL3-1 was compiled against
+ ******************************************************************************/
+void mce_verify_firmware_version(void)
+{
+ const arch_mce_ops_t *ops;
+ uint32_t cpu_ari_base;
+ uint64_t version;
+ uint32_t major, minor;
+
+ /*
+ * MCE firmware is not supported on simulation platforms.
+ */
+ if (tegra_platform_is_emulation()) {
+
+ INFO("MCE firmware is not supported\n");
+
+ } else {
+ /* get a pointer to the CPU's arch_mce_ops_t struct */
+ ops = mce_get_curr_cpu_ops();
+
+ /* get the CPU's ARI base address */
+ cpu_ari_base = mce_get_curr_cpu_ari_base();
+
+ /*
+ * Read the MCE firmware version and extract the major and minor
+ * version fields
+ */
+ version = ops->call_enum_misc(cpu_ari_base, TEGRA_ARI_MISC_VERSION, 0);
+ major = (uint32_t)version;
+ minor = (uint32_t)(version >> 32);
+
+ INFO("MCE Version - HW=%d:%d, SW=%d:%d\n", major, minor,
+ TEGRA_ARI_VERSION_MAJOR, TEGRA_ARI_VERSION_MINOR);
+
+ /*
+ * Verify that the MCE firmware version and the interface header
+ * match
+ */
+ if (major != TEGRA_ARI_VERSION_MAJOR) {
+ ERROR("ARI major version mismatch\n");
+ panic();
+ }
+
+ if (minor < TEGRA_ARI_VERSION_MINOR) {
+ ERROR("ARI minor version mismatch\n");
+ panic();
+ }
+ }
+}
diff --git a/plat/nvidia/tegra/soc/t186/drivers/mce/nvg.c b/plat/nvidia/tegra/soc/t186/drivers/mce/nvg.c
new file mode 100644
index 0000000..1a48563
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/drivers/mce/nvg.c
@@ -0,0 +1,256 @@
+/*
+ * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <errno.h>
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <denver.h>
+#include <lib/mmio.h>
+
+#include <mce_private.h>
+#include <t18x_ari.h>
+#include <tegra_private.h>
+
+int32_t nvg_enter_cstate(uint32_t ari_base, uint32_t state, uint32_t wake_time)
+{
+ int32_t ret = 0;
+ uint64_t val = 0ULL;
+
+ (void)ari_base;
+
+ /* check for allowed power state */
+ if ((state != TEGRA_ARI_CORE_C0) && (state != TEGRA_ARI_CORE_C1) &&
+ (state != TEGRA_ARI_CORE_C6) && (state != TEGRA_ARI_CORE_C7)) {
+ ERROR("%s: unknown cstate (%d)\n", __func__, state);
+ ret = EINVAL;
+ } else {
+ /* time (TSC ticks) until the core is expected to get a wake event */
+ nvg_set_request_data((uint64_t)TEGRA_NVG_CHANNEL_WAKE_TIME, wake_time);
+
+ /* set the core cstate */
+ val = read_actlr_el1() & ~ACTLR_EL1_PMSTATE_MASK;
+ write_actlr_el1(val | (uint64_t)state);
+ }
+
+ return ret;
+}
+
+/*
+ * This request allows updating of CLUSTER_CSTATE, CCPLEX_CSTATE and
+ * SYSTEM_CSTATE values.
+ */
+int32_t nvg_update_cstate_info(uint32_t ari_base, uint32_t cluster, uint32_t ccplex,
+ uint32_t system, uint8_t sys_state_force, uint32_t wake_mask,
+ uint8_t update_wake_mask)
+{
+ uint64_t val = 0ULL;
+
+ (void)ari_base;
+
+ /* update CLUSTER_CSTATE? */
+ if (cluster != 0U) {
+ val |= ((uint64_t)cluster & CLUSTER_CSTATE_MASK) |
+ CLUSTER_CSTATE_UPDATE_BIT;
+ }
+
+ /* update CCPLEX_CSTATE? */
+ if (ccplex != 0U) {
+ val |= (((uint64_t)ccplex & CCPLEX_CSTATE_MASK) << CCPLEX_CSTATE_SHIFT) |
+ CCPLEX_CSTATE_UPDATE_BIT;
+ }
+
+ /* update SYSTEM_CSTATE? */
+ if (system != 0U) {
+ val |= (((uint64_t)system & SYSTEM_CSTATE_MASK) << SYSTEM_CSTATE_SHIFT) |
+ (((uint64_t)sys_state_force << SYSTEM_CSTATE_FORCE_UPDATE_SHIFT) |
+ SYSTEM_CSTATE_UPDATE_BIT);
+ }
+
+ /* update wake mask value? */
+ if (update_wake_mask != 0U) {
+ val |= CSTATE_WAKE_MASK_UPDATE_BIT;
+ }
+
+ /* set the wake mask */
+ val &= CSTATE_WAKE_MASK_CLEAR;
+ val |= ((uint64_t)wake_mask << CSTATE_WAKE_MASK_SHIFT);
+
+ /* set the updated cstate info */
+ nvg_set_request_data((uint64_t)TEGRA_NVG_CHANNEL_CSTATE_INFO, val);
+
+ return 0;
+}
+
+int32_t nvg_update_crossover_time(uint32_t ari_base, uint32_t type, uint32_t time)
+{
+ int32_t ret = 0;
+
+ (void)ari_base;
+
+ /* sanity check crossover type */
+ if (type > TEGRA_ARI_CROSSOVER_CCP3_SC1) {
+ ret = EINVAL;
+ } else {
+ /*
+ * The crossover threshold limit types start from
+ * TEGRA_CROSSOVER_TYPE_C1_C6 to TEGRA_CROSSOVER_TYPE_CCP3_SC7.
+ * The command indices for updating the threshold be generated
+ * by adding the type to the NVG_SET_THRESHOLD_CROSSOVER_C1_C6
+ * command index.
+ */
+ nvg_set_request_data((TEGRA_NVG_CHANNEL_CROSSOVER_C1_C6 +
+ (uint64_t)type), (uint64_t)time);
+ }
+
+ return ret;
+}
+
+uint64_t nvg_read_cstate_stats(uint32_t ari_base, uint32_t state)
+{
+ uint64_t ret;
+
+ (void)ari_base;
+
+ /* sanity check state */
+ if (state == 0U) {
+ ret = EINVAL;
+ } else {
+ /*
+ * The cstate types start from NVG_READ_CSTATE_STATS_SC7_ENTRIES
+ * to NVG_GET_LAST_CSTATE_ENTRY_A57_3. The command indices for
+ * reading the threshold can be generated by adding the type to
+ * the NVG_CLEAR_CSTATE_STATS command index.
+ */
+ nvg_set_request((TEGRA_NVG_CHANNEL_CSTATE_STATS_CLEAR +
+ (uint64_t)state));
+ ret = nvg_get_result();
+ }
+
+ return ret;
+}
+
+int32_t nvg_write_cstate_stats(uint32_t ari_base, uint32_t state, uint32_t stats)
+{
+ uint64_t val;
+
+ (void)ari_base;
+
+ /*
+ * The only difference between a CSTATE_STATS_WRITE and
+ * CSTATE_STATS_READ is the usage of the 63:32 in the request.
+ * 63:32 are set to '0' for a read, while a write contains the
+ * actual stats value to be written.
+ */
+ val = ((uint64_t)stats << MCE_CSTATE_STATS_TYPE_SHIFT) | state;
+
+ /*
+ * The cstate types start from NVG_READ_CSTATE_STATS_SC7_ENTRIES
+ * to NVG_GET_LAST_CSTATE_ENTRY_A57_3. The command indices for
+ * reading the threshold can be generated by adding the type to
+ * the NVG_CLEAR_CSTATE_STATS command index.
+ */
+ nvg_set_request_data((TEGRA_NVG_CHANNEL_CSTATE_STATS_CLEAR +
+ (uint64_t)state), val);
+
+ return 0;
+}
+
+int32_t nvg_is_ccx_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time)
+{
+ (void)ari_base;
+ (void)state;
+ (void)wake_time;
+
+ /* This does not apply to the Denver cluster */
+ return 0;
+}
+
+int32_t nvg_is_sc7_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time)
+{
+ uint64_t val;
+ int32_t ret;
+
+ (void)ari_base;
+
+ /* check for allowed power state */
+ if ((state != TEGRA_ARI_CORE_C0) && (state != TEGRA_ARI_CORE_C1) &&
+ (state != TEGRA_ARI_CORE_C6) && (state != TEGRA_ARI_CORE_C7)) {
+ ERROR("%s: unknown cstate (%d)\n", __func__, state);
+ ret = EINVAL;
+ } else {
+ /*
+ * Request format -
+ * 63:32 = wake time
+ * 31:0 = C-state for this core
+ */
+ val = ((uint64_t)wake_time << MCE_SC7_WAKE_TIME_SHIFT) |
+ ((uint64_t)state & MCE_SC7_ALLOWED_MASK);
+
+ /* issue command to check if SC7 is allowed */
+ nvg_set_request_data((uint64_t)TEGRA_NVG_CHANNEL_IS_SC7_ALLOWED, val);
+
+ /* 1 = SC7 allowed, 0 = SC7 not allowed */
+ ret = (nvg_get_result() != 0ULL) ? 1 : 0;
+ }
+
+ return ret;
+}
+
+int32_t nvg_online_core(uint32_t ari_base, uint32_t core)
+{
+ uint64_t cpu = read_mpidr() & MPIDR_CPU_MASK;
+ uint64_t impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
+ int32_t ret = 0;
+
+ (void)ari_base;
+
+ /* sanity check code id */
+ if ((core >= MCE_CORE_ID_MAX) || (cpu == core)) {
+ ERROR("%s: unsupported core id (%d)\n", __func__, core);
+ ret = EINVAL;
+ } else {
+ /*
+ * The Denver cluster has 2 CPUs only - 0, 1.
+ */
+ if ((impl == DENVER_IMPL) && ((core == 2U) || (core == 3U))) {
+ ERROR("%s: unknown core id (%d)\n", __func__, core);
+ ret = EINVAL;
+ } else {
+ /* get a core online */
+ nvg_set_request_data((uint64_t)TEGRA_NVG_CHANNEL_ONLINE_CORE,
+ ((uint64_t)core & MCE_CORE_ID_MASK));
+ }
+ }
+
+ return ret;
+}
+
+int32_t nvg_cc3_ctrl(uint32_t ari_base, uint32_t freq, uint32_t volt, uint8_t enable)
+{
+ uint32_t val;
+
+ (void)ari_base;
+
+ /*
+ * If the enable bit is cleared, Auto-CC3 will be disabled by setting
+ * the SW visible voltage/frequency request registers for all non
+ * floorswept cores valid independent of StandbyWFI and disabling
+ * the IDLE voltage/frequency request register. If set, Auto-CC3
+ * will be enabled by setting the ARM SW visible voltage/frequency
+ * request registers for all non floorswept cores to be enabled by
+ * StandbyWFI or the equivalent signal, and always keeping the IDLE
+ * voltage/frequency request register enabled.
+ */
+ val = (((freq & MCE_AUTO_CC3_FREQ_MASK) << MCE_AUTO_CC3_FREQ_SHIFT) |
+ ((volt & MCE_AUTO_CC3_VTG_MASK) << MCE_AUTO_CC3_VTG_SHIFT) |
+ ((enable != 0U) ? MCE_AUTO_CC3_ENABLE_BIT : 0U));
+
+ nvg_set_request_data((uint64_t)TEGRA_NVG_CHANNEL_CC3_CTRL, (uint64_t)val);
+
+ return 0;
+}
diff --git a/plat/nvidia/tegra/soc/t186/drivers/se/se.c b/plat/nvidia/tegra/soc/t186/drivers/se/se.c
new file mode 100644
index 0000000..25f8cd0
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/drivers/se/se.c
@@ -0,0 +1,277 @@
+/*
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <drivers/delay_timer.h>
+#include <errno.h>
+#include <string.h>
+
+#include <bpmp_ipc.h>
+#include <pmc.h>
+#include <security_engine.h>
+#include <tegra_private.h>
+
+#include "se_private.h"
+
+/*******************************************************************************
+ * Constants and Macros
+ ******************************************************************************/
+#define SE0_MAX_BUSY_TIMEOUT_MS U(100) /* 100ms */
+#define BYTES_IN_WORD U(4)
+#define SHA256_MAX_HASH_RESULT U(7)
+#define SHA256_DST_SIZE U(32)
+#define SHA_FIRST_OP U(1)
+#define MAX_SHA_ENGINE_CHUNK_SIZE U(0xFFFFFF)
+#define SHA256_MSG_LENGTH_ONETIME U(0xffff)
+
+/*
+ * Check that SE operation has completed after kickoff
+ * This function is invoked after an SE operation has been started,
+ * and it checks the following conditions:
+ * 1. SE0_INT_STATUS = SE0_OP_DONE
+ * 2. SE0_STATUS = IDLE
+ * 3. SE0_ERR_STATUS is clean.
+ */
+static int32_t tegra_se_operation_complete(void)
+{
+ uint32_t val = 0U;
+
+ /* Read SE0 interrupt register to ensure H/W operation complete */
+ val = tegra_se_read_32(SE0_INT_STATUS_REG_OFFSET);
+ if (SE0_INT_OP_DONE(val) == SE0_INT_OP_DONE_CLEAR) {
+ ERROR("%s: Engine busy state too many times! val = 0x%x\n",
+ __func__, val);
+ return -ETIMEDOUT;
+ }
+
+ /* Read SE0 status idle to ensure H/W operation complete */
+ val = tegra_se_read_32(SE0_SHA_STATUS_0);
+ if (val != SE0_SHA_STATUS_IDLE) {
+ ERROR("%s: Idle state timeout! val = 0x%x\n", __func__,
+ val);
+ return -ETIMEDOUT;
+ }
+
+ /* Ensure that no errors are thrown during operation */
+ val = tegra_se_read_32(SE0_ERR_STATUS_REG_OFFSET);
+ if (val != SE0_ERR_STATUS_CLEAR) {
+ ERROR("%s: Error during SE operation! val = 0x%x",
+ __func__, val);
+ return -ENOTSUP;
+ }
+
+ return 0;
+}
+
+/*
+ * Security engine primitive normal operations
+ */
+static int32_t tegra_se_start_normal_operation(uint64_t src_addr,
+ uint32_t nbytes, uint32_t last_buf, uint32_t src_len_inbytes)
+{
+ int32_t ret = 0;
+ uint32_t val = 0U;
+ uint32_t src_in_lo;
+ uint32_t src_in_msb;
+ uint32_t src_in_hi;
+
+ if ((src_addr == 0UL) || (nbytes == 0U))
+ return -EINVAL;
+
+ src_in_lo = (uint32_t)src_addr;
+ src_in_msb = ((uint32_t)(src_addr >> 32U) & 0xffU);
+ src_in_hi = ((src_in_msb << SE0_IN_HI_ADDR_HI_0_MSB_SHIFT) |
+ (nbytes & 0xffffffU));
+
+ /* set SRC_IN_ADDR_LO and SRC_IN_ADDR_HI*/
+ tegra_se_write_32(SE0_IN_ADDR, src_in_lo);
+ tegra_se_write_32(SE0_IN_HI_ADDR_HI, src_in_hi);
+
+ val = tegra_se_read_32(SE0_INT_STATUS_REG_OFFSET);
+ if (val > 0U) {
+ tegra_se_write_32(SE0_INT_STATUS_REG_OFFSET, 0x00000U);
+ }
+
+ /* Enable SHA interrupt for SE0 Operation */
+ tegra_se_write_32(SE0_SHA_INT_ENABLE, 0x1aU);
+
+ /* flush to DRAM for SE to use the updated contents */
+ flush_dcache_range(src_addr, src_len_inbytes);
+
+ /* Start SHA256 operation */
+ if (last_buf == 1U) {
+ tegra_se_write_32(SE0_OPERATION_REG_OFFSET, SE0_OP_START |
+ SE0_UNIT_OPERATION_PKT_LASTBUF_FIELD);
+ } else {
+ tegra_se_write_32(SE0_OPERATION_REG_OFFSET, SE0_OP_START);
+ }
+
+ /* Wait for SE-operation to finish */
+ udelay(SE0_MAX_BUSY_TIMEOUT_MS * 100U);
+
+ /* Check SE0 operation status */
+ ret = tegra_se_operation_complete();
+ if (ret != 0) {
+ ERROR("SE operation complete Failed! 0x%x", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int32_t tegra_se_calculate_sha256_hash(uint64_t src_addr,
+ uint32_t src_len_inbyte)
+{
+ uint32_t val, last_buf, i;
+ int32_t ret = 0;
+ uint32_t operations;
+ uint64_t src_len_inbits;
+ uint32_t len_bits_msb;
+ uint32_t len_bits_lsb;
+ uint32_t number_of_operations, max_bytes, bytes_left, remaining_bytes;
+
+ if (src_len_inbyte > MAX_SHA_ENGINE_CHUNK_SIZE) {
+ ERROR("SHA input chunk size too big: 0x%x\n", src_len_inbyte);
+ return -EINVAL;
+ }
+
+ if (src_addr == 0UL) {
+ return -EINVAL;
+ }
+
+ /* number of bytes per operation */
+ max_bytes = SHA256_HASH_SIZE_BYTES * SHA256_MSG_LENGTH_ONETIME;
+
+ src_len_inbits = src_len_inbyte * 8U;
+ len_bits_msb = (uint32_t)(src_len_inbits >> 32U);
+ len_bits_lsb = (uint32_t)(src_len_inbits & 0xFFFFFFFF);
+
+ /* program SE0_CONFIG for SHA256 operation */
+ val = SE0_CONFIG_ENC_ALG_SHA | SE0_CONFIG_ENC_MODE_SHA256 |
+ SE0_CONFIG_DEC_ALG_NOP | SE0_CONFIG_DST_HASHREG;
+ tegra_se_write_32(SE0_SHA_CONFIG, val);
+
+ /* set SE0_SHA_MSG_LENGTH registers */
+ tegra_se_write_32(SE0_SHA_MSG_LENGTH_0, len_bits_lsb);
+ tegra_se_write_32(SE0_SHA_MSG_LEFT_0, len_bits_lsb);
+ tegra_se_write_32(SE0_SHA_MSG_LENGTH_1, len_bits_msb);
+
+ /* zero out unused SE0_SHA_MSG_LENGTH and SE0_SHA_MSG_LEFT */
+ tegra_se_write_32(SE0_SHA_MSG_LENGTH_2, 0U);
+ tegra_se_write_32(SE0_SHA_MSG_LENGTH_3, 0U);
+ tegra_se_write_32(SE0_SHA_MSG_LEFT_1, 0U);
+ tegra_se_write_32(SE0_SHA_MSG_LEFT_2, 0U);
+ tegra_se_write_32(SE0_SHA_MSG_LEFT_3, 0U);
+
+ number_of_operations = src_len_inbyte / max_bytes;
+ remaining_bytes = src_len_inbyte % max_bytes;
+ if (remaining_bytes > 0U) {
+ number_of_operations += 1U;
+ }
+
+ /*
+ * 1. Operations == 1: program SE0_SHA_TASK register to initiate SHA256
+ * hash generation by setting
+ * 1(SE0_SHA_CONFIG_HW_INIT_HASH) to SE0_SHA_TASK
+ * and start SHA256-normal operation.
+ * 2. 1 < Operations < number_of_operations: program SE0_SHA_TASK to
+ * 0(SE0_SHA_CONFIG_HW_INIT_HASH_DISABLE) to load
+ * intermediate SHA256 digest result from
+ * HASH_RESULT register to continue SHA256
+ * generation and start SHA256-normal operation.
+ * 3. Operations == number_of_operations: continue with step 2 and set
+ * max_bytes to bytes_left to process final
+ * hash-result generation and
+ * start SHA256-normal operation.
+ */
+ bytes_left = src_len_inbyte;
+ for (operations = 1U; operations <= number_of_operations;
+ operations++) {
+ if (operations == SHA_FIRST_OP) {
+ val = SE0_SHA_CONFIG_HW_INIT_HASH;
+ } else {
+ /* Load intermediate SHA digest result to
+ * SHA:HASH_RESULT(0..7) to continue the SHA
+ * calculation and tell the SHA engine to use it.
+ */
+ for (i = 0U; (i / BYTES_IN_WORD) <=
+ SHA256_MAX_HASH_RESULT; i += BYTES_IN_WORD) {
+ val = tegra_se_read_32(SE0_SHA_HASH_RESULT_0 +
+ i);
+ tegra_se_write_32(SE0_SHA_HASH_RESULT_0 + i,
+ val);
+ }
+ val = SE0_SHA_CONFIG_HW_INIT_HASH_DISABLE;
+ if (len_bits_lsb <= (max_bytes * 8U)) {
+ len_bits_lsb = (remaining_bytes * 8U);
+ } else {
+ len_bits_lsb -= (max_bytes * 8U);
+ }
+ tegra_se_write_32(SE0_SHA_MSG_LEFT_0, len_bits_lsb);
+ }
+ tegra_se_write_32(SE0_SHA_TASK_CONFIG, val);
+
+ max_bytes = (SHA256_HASH_SIZE_BYTES *
+ SHA256_MSG_LENGTH_ONETIME);
+ if (bytes_left < max_bytes) {
+ max_bytes = bytes_left;
+ last_buf = 1U;
+ } else {
+ bytes_left = bytes_left - max_bytes;
+ last_buf = 0U;
+ }
+ /* start operation */
+ ret = tegra_se_start_normal_operation(src_addr, max_bytes,
+ last_buf, src_len_inbyte);
+ if (ret != 0) {
+ ERROR("Error during SE operation! 0x%x", ret);
+ return -EINVAL;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * Handler to generate SHA256 and save SHA256 hash to PMC-Scratch register.
+ */
+int32_t tegra_se_save_sha256_hash(uint64_t bl31_base, uint32_t src_len_inbyte)
+{
+ int32_t ret = 0;
+ uint32_t val = 0U, hash_offset = 0U, scratch_offset = 0U, security;
+
+ /*
+ * Set SE_SOFT_SETTINGS=SE_SECURE to prevent NS process to change SE
+ * registers.
+ */
+ security = tegra_se_read_32(SE0_SECURITY);
+ tegra_se_write_32(SE0_SECURITY, security | SE0_SECURITY_SE_SOFT_SETTING);
+
+ ret = tegra_se_calculate_sha256_hash(bl31_base, src_len_inbyte);
+ if (ret != 0L) {
+ ERROR("%s: SHA256 generation failed\n", __func__);
+ return ret;
+ }
+
+ /*
+ * Reset SE_SECURE to previous value.
+ */
+ tegra_se_write_32(SE0_SECURITY, security);
+
+ /* read SHA256_HASH_RESULT and save to PMC Scratch registers */
+ scratch_offset = SECURE_SCRATCH_TZDRAM_SHA256_HASH_START;
+ while (scratch_offset <= SECURE_SCRATCH_TZDRAM_SHA256_HASH_END) {
+
+ val = tegra_se_read_32(SE0_SHA_HASH_RESULT_0 + hash_offset);
+ mmio_write_32(TEGRA_SCRATCH_BASE + scratch_offset, val);
+
+ hash_offset += BYTES_IN_WORD;
+ scratch_offset += BYTES_IN_WORD;
+ }
+
+ return ret;
+}
+
diff --git a/plat/nvidia/tegra/soc/t186/drivers/se/se_private.h b/plat/nvidia/tegra/soc/t186/drivers/se/se_private.h
new file mode 100644
index 0000000..7aa0dd6
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/drivers/se/se_private.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SE_PRIVATE_H
+#define SE_PRIVATE_H
+
+#include <lib/utils_def.h>
+
+/* SE0 security register */
+#define SE0_SECURITY U(0x18)
+#define SE0_SECURITY_SE_SOFT_SETTING (((uint32_t)1) << 16U)
+
+/* SE0 config register */
+#define SE0_SHA_CONFIG U(0x104)
+#define SE0_SHA_TASK_CONFIG U(0x108)
+#define SE0_SHA_CONFIG_HW_INIT_HASH ((1U) << 0U)
+#define SE0_SHA_CONFIG_HW_INIT_HASH_DISABLE U(0)
+
+#define SE0_CONFIG_ENC_ALG_SHIFT U(12)
+#define SE0_CONFIG_ENC_ALG_SHA \
+ (((uint32_t)3) << SE0_CONFIG_ENC_ALG_SHIFT)
+#define SE0_CONFIG_DEC_ALG_SHIFT U(8)
+#define SE0_CONFIG_DEC_ALG_NOP \
+ (((uint32_t)0) << SE0_CONFIG_DEC_ALG_SHIFT)
+#define SE0_CONFIG_DST_SHIFT U(2)
+#define SE0_CONFIG_DST_HASHREG \
+ (((uint32_t)1) << SE0_CONFIG_DST_SHIFT)
+#define SHA256_HASH_SIZE_BYTES U(256)
+
+#define SE0_CONFIG_ENC_MODE_SHIFT U(24)
+#define SE0_CONFIG_ENC_MODE_SHA256 \
+ (((uint32_t)5) << SE0_CONFIG_ENC_MODE_SHIFT)
+
+/* SHA input message length */
+#define SE0_SHA_MSG_LENGTH_0 U(0x11c)
+#define SE0_SHA_MSG_LENGTH_1 U(0x120)
+#define SE0_SHA_MSG_LENGTH_2 U(0x124)
+#define SE0_SHA_MSG_LENGTH_3 U(0x128)
+
+/* SHA input message left */
+#define SE0_SHA_MSG_LEFT_0 U(0x12c)
+#define SE0_SHA_MSG_LEFT_1 U(0x130)
+#define SE0_SHA_MSG_LEFT_2 U(0x134)
+#define SE0_SHA_MSG_LEFT_3 U(0x138)
+
+/* SE Hash Result */
+#define SE0_SHA_HASH_RESULT_0 U(0x13c)
+
+/* SE OPERATION */
+#define SE0_OPERATION_REG_OFFSET U(0x17c)
+#define SE0_UNIT_OPERATION_PKT_LASTBUF_SHIFT U(16)
+#define SE0_UNIT_OPERATION_PKT_LASTBUF_FIELD \
+ (((uint32_t)0x1) << SE0_UNIT_OPERATION_PKT_LASTBUF_SHIFT)
+#define SE0_OPERATION_SHIFT U(0)
+#define SE0_OP_START \
+ (((uint32_t)0x1) << SE0_OPERATION_SHIFT)
+
+/* SE Interrupt */
+#define SE0_SHA_INT_ENABLE U(0x180)
+
+#define SE0_INT_STATUS_REG_OFFSET U(0x184)
+#define SE0_INT_OP_DONE_SHIFT U(4)
+#define SE0_INT_OP_DONE_CLEAR \
+ (((uint32_t)0) << SE0_INT_OP_DONE_SHIFT)
+#define SE0_INT_OP_DONE(x) \
+ ((x) & (((uint32_t)0x1) << SE0_INT_OP_DONE_SHIFT))
+
+/* SE SHA status */
+#define SE0_SHA_STATUS_0 U(0x188)
+#define SE0_SHA_STATUS_IDLE U(0)
+
+/* SE error status */
+#define SE0_ERR_STATUS_REG_OFFSET U(0x18c)
+#define SE0_ERR_STATUS_CLEAR U(0)
+#define SE0_IN_ADDR U(0x10c)
+#define SE0_IN_HI_ADDR_HI U(0x110)
+#define SE0_IN_HI_ADDR_HI_0_MSB_SHIFT U(24)
+
+/* SE error status */
+#define SECURE_SCRATCH_TZDRAM_SHA256_HASH_START SECURE_SCRATCH_RSV63_LO
+#define SECURE_SCRATCH_TZDRAM_SHA256_HASH_END SECURE_SCRATCH_RSV66_HI
+
+/*******************************************************************************
+ * Inline functions definition
+ ******************************************************************************/
+
+static inline uint32_t tegra_se_read_32(uint32_t offset)
+{
+ return mmio_read_32((uint32_t)(TEGRA_SE0_BASE + offset));
+}
+
+static inline void tegra_se_write_32(uint32_t offset, uint32_t val)
+{
+ mmio_write_32(((uint32_t)(TEGRA_SE0_BASE + offset)), val);
+}
+
+#endif /* SE_PRIVATE_H */
diff --git a/plat/nvidia/tegra/soc/t186/plat_memctrl.c b/plat/nvidia/tegra/soc/t186/plat_memctrl.c
new file mode 100644
index 0000000..2533013
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/plat_memctrl.c
@@ -0,0 +1,700 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <common/bl_common.h>
+
+#include <mce.h>
+#include <memctrl_v2.h>
+#include <tegra186_private.h>
+#include <tegra_mc_def.h>
+#include <tegra_platform.h>
+#include <tegra_private.h>
+
+extern uint64_t tegra_bl31_phys_base;
+
+/*******************************************************************************
+ * Array to hold stream_id override config register offsets
+ ******************************************************************************/
+static const uint32_t tegra186_streamid_override_regs[] = {
+ MC_STREAMID_OVERRIDE_CFG_SDMMCRA,
+ MC_STREAMID_OVERRIDE_CFG_SDMMCRAA,
+ MC_STREAMID_OVERRIDE_CFG_SDMMCR,
+ MC_STREAMID_OVERRIDE_CFG_SDMMCRAB,
+ MC_STREAMID_OVERRIDE_CFG_SDMMCWA,
+ MC_STREAMID_OVERRIDE_CFG_SDMMCWAA,
+ MC_STREAMID_OVERRIDE_CFG_SDMMCW,
+ MC_STREAMID_OVERRIDE_CFG_SDMMCWAB,
+};
+
+/*******************************************************************************
+ * Array to hold the security configs for stream IDs
+ ******************************************************************************/
+static const mc_streamid_security_cfg_t tegra186_streamid_sec_cfgs[] = {
+ mc_make_sec_cfg(SCEW, NON_SECURE, NO_OVERRIDE, DISABLE),
+ mc_make_sec_cfg(AFIR, NON_SECURE, OVERRIDE, DISABLE),
+ mc_make_sec_cfg(AFIW, NON_SECURE, OVERRIDE, DISABLE),
+ mc_make_sec_cfg(NVDISPLAYR1, NON_SECURE, OVERRIDE, DISABLE),
+ mc_make_sec_cfg(XUSB_DEVR, NON_SECURE, OVERRIDE, ENABLE),
+ mc_make_sec_cfg(VICSRD1, NON_SECURE, NO_OVERRIDE, DISABLE),
+ mc_make_sec_cfg(NVENCSWR, NON_SECURE, NO_OVERRIDE, DISABLE),
+ mc_make_sec_cfg(TSECSRDB, NON_SECURE, NO_OVERRIDE, DISABLE),
+ mc_make_sec_cfg(AXISW, SECURE, NO_OVERRIDE, DISABLE),
+ mc_make_sec_cfg(SDMMCWAB, NON_SECURE, OVERRIDE, DISABLE),
+ mc_make_sec_cfg(AONDMAW, NON_SECURE, NO_OVERRIDE, DISABLE),
+ mc_make_sec_cfg(GPUSWR2, SECURE, NO_OVERRIDE, DISABLE),
+ mc_make_sec_cfg(SATAW, NON_SECURE, OVERRIDE, DISABLE),
+ mc_make_sec_cfg(UFSHCW, NON_SECURE, OVERRIDE, DISABLE),
+ mc_make_sec_cfg(SDMMCR, NON_SECURE, OVERRIDE, DISABLE),
+ mc_make_sec_cfg(SCEDMAW, NON_SECURE, NO_OVERRIDE, DISABLE),
+ mc_make_sec_cfg(UFSHCR, NON_SECURE, OVERRIDE, DISABLE),
+ mc_make_sec_cfg(SDMMCWAA, NON_SECURE, OVERRIDE, DISABLE),
+ mc_make_sec_cfg(SESWR, NON_SECURE, NO_OVERRIDE, DISABLE),
+ mc_make_sec_cfg(MPCORER, NON_SECURE, OVERRIDE, DISABLE),
+ mc_make_sec_cfg(PTCR, NON_SECURE, OVERRIDE, DISABLE),
+ mc_make_sec_cfg(BPMPW, NON_SECURE, NO_OVERRIDE, DISABLE),
+ mc_make_sec_cfg(ETRW, NON_SECURE, OVERRIDE, DISABLE),
+ mc_make_sec_cfg(GPUSRD, SECURE, NO_OVERRIDE, DISABLE),
+ mc_make_sec_cfg(VICSWR, NON_SECURE, NO_OVERRIDE, DISABLE),
+ mc_make_sec_cfg(SCEDMAR, NON_SECURE, NO_OVERRIDE, DISABLE),
+ mc_make_sec_cfg(HDAW, NON_SECURE, OVERRIDE, DISABLE),
+ mc_make_sec_cfg(ISPWA, NON_SECURE, OVERRIDE, ENABLE),
+ mc_make_sec_cfg(EQOSW, NON_SECURE, OVERRIDE, DISABLE),
+ mc_make_sec_cfg(XUSB_HOSTW, NON_SECURE, OVERRIDE, ENABLE),
+ mc_make_sec_cfg(TSECSWR, NON_SECURE, NO_OVERRIDE, DISABLE),
+ mc_make_sec_cfg(SDMMCRAA, NON_SECURE, OVERRIDE, DISABLE),
+ mc_make_sec_cfg(VIW, NON_SECURE, OVERRIDE, ENABLE),
+ mc_make_sec_cfg(AXISR, SECURE, NO_OVERRIDE, DISABLE),
+ mc_make_sec_cfg(SDMMCW, NON_SECURE, OVERRIDE, DISABLE),
+ mc_make_sec_cfg(BPMPDMAW, NON_SECURE, NO_OVERRIDE, DISABLE),
+ mc_make_sec_cfg(ISPRA, NON_SECURE, OVERRIDE, ENABLE),
+ mc_make_sec_cfg(NVDECSWR, NON_SECURE, NO_OVERRIDE, DISABLE),
+ mc_make_sec_cfg(XUSB_DEVW, NON_SECURE, OVERRIDE, ENABLE),
+ mc_make_sec_cfg(NVDECSRD, NON_SECURE, NO_OVERRIDE, DISABLE),
+ mc_make_sec_cfg(MPCOREW, NON_SECURE, OVERRIDE, DISABLE),
+ mc_make_sec_cfg(NVDISPLAYR, NON_SECURE, OVERRIDE, DISABLE),
+ mc_make_sec_cfg(BPMPDMAR, NON_SECURE, NO_OVERRIDE, DISABLE),
+ mc_make_sec_cfg(NVJPGSWR, NON_SECURE, NO_OVERRIDE, DISABLE),
+ mc_make_sec_cfg(NVDECSRD1, NON_SECURE, NO_OVERRIDE, DISABLE),
+ mc_make_sec_cfg(TSECSRD, NON_SECURE, NO_OVERRIDE, DISABLE),
+ mc_make_sec_cfg(NVJPGSRD, NON_SECURE, NO_OVERRIDE, DISABLE),
+ mc_make_sec_cfg(SDMMCWA, NON_SECURE, OVERRIDE, DISABLE),
+ mc_make_sec_cfg(SCER, NON_SECURE, NO_OVERRIDE, DISABLE),
+ mc_make_sec_cfg(XUSB_HOSTR, NON_SECURE, OVERRIDE, ENABLE),
+ mc_make_sec_cfg(VICSRD, NON_SECURE, NO_OVERRIDE, DISABLE),
+ mc_make_sec_cfg(AONDMAR, NON_SECURE, NO_OVERRIDE, DISABLE),
+ mc_make_sec_cfg(AONW, NON_SECURE, NO_OVERRIDE, DISABLE),
+ mc_make_sec_cfg(SDMMCRA, NON_SECURE, OVERRIDE, DISABLE),
+ mc_make_sec_cfg(HOST1XDMAR, NON_SECURE, NO_OVERRIDE, DISABLE),
+ mc_make_sec_cfg(EQOSR, NON_SECURE, OVERRIDE, DISABLE),
+ mc_make_sec_cfg(SATAR, NON_SECURE, OVERRIDE, DISABLE),
+ mc_make_sec_cfg(BPMPR, NON_SECURE, NO_OVERRIDE, DISABLE),
+ mc_make_sec_cfg(HDAR, NON_SECURE, OVERRIDE, DISABLE),
+ mc_make_sec_cfg(SDMMCRAB, NON_SECURE, OVERRIDE, DISABLE),
+ mc_make_sec_cfg(ETRR, NON_SECURE, OVERRIDE, DISABLE),
+ mc_make_sec_cfg(AONR, NON_SECURE, NO_OVERRIDE, DISABLE),
+ mc_make_sec_cfg(SESRD, NON_SECURE, NO_OVERRIDE, DISABLE),
+ mc_make_sec_cfg(NVENCSRD, NON_SECURE, NO_OVERRIDE, DISABLE),
+ mc_make_sec_cfg(GPUSWR, SECURE, NO_OVERRIDE, DISABLE),
+ mc_make_sec_cfg(TSECSWRB, NON_SECURE, NO_OVERRIDE, DISABLE),
+ mc_make_sec_cfg(ISPWB, NON_SECURE, OVERRIDE, ENABLE),
+ mc_make_sec_cfg(GPUSRD2, SECURE, NO_OVERRIDE, DISABLE),
+ mc_make_sec_cfg(APEDMAW, NON_SECURE, NO_OVERRIDE, DISABLE),
+ mc_make_sec_cfg(APER, NON_SECURE, NO_OVERRIDE, DISABLE),
+ mc_make_sec_cfg(APEW, NON_SECURE, NO_OVERRIDE, DISABLE),
+ mc_make_sec_cfg(APEDMAR, NON_SECURE, NO_OVERRIDE, DISABLE),
+};
+
+/*******************************************************************************
+ * Array to hold the transaction override configs
+ ******************************************************************************/
+static const mc_txn_override_cfg_t tegra186_txn_override_cfgs[] = {
+ mc_make_txn_override_cfg(BPMPW, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(EQOSW, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(NVJPGSWR, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(SDMMCWAA, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(MPCOREW, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(SCEDMAW, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(SDMMCW, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(AXISW, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(TSECSWR, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(GPUSWR, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(XUSB_HOSTW, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(TSECSWRB, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(GPUSWR2, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(AONDMAW, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(AONW, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(SESWR, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(BPMPDMAW, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(SDMMCWA, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(HDAW, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(NVDECSWR, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(UFSHCW, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(SATAW, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(ETRW, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(VICSWR, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(NVENCSWR, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(SDMMCWAB, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(ISPWB, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(APEW, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(XUSB_DEVW, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(AFIW, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(SCEW, CGID_TAG_ADR),
+};
+
+static void tegra186_memctrl_reconfig_mss_clients(void)
+{
+#if ENABLE_ROC_FOR_ORDERING_CLIENT_REQUESTS
+ uint32_t val, wdata_0, wdata_1;
+
+ /*
+ * Assert Memory Controller's HOTRESET_FLUSH_ENABLE signal for
+ * boot and strongly ordered MSS clients to flush existing memory
+ * traffic and stall future requests.
+ */
+ val = tegra_mc_read_32(MC_CLIENT_HOTRESET_CTRL0);
+ assert(val == MC_CLIENT_HOTRESET_CTRL0_RESET_VAL);
+
+ wdata_0 = MC_CLIENT_HOTRESET_CTRL0_HDA_FLUSH_ENB |
+ MC_CLIENT_HOTRESET_CTRL0_AFI_FLUSH_ENB |
+ MC_CLIENT_HOTRESET_CTRL0_SATA_FLUSH_ENB |
+ MC_CLIENT_HOTRESET_CTRL0_XUSB_HOST_FLUSH_ENB |
+ MC_CLIENT_HOTRESET_CTRL0_XUSB_DEV_FLUSH_ENB;
+ tegra_mc_write_32(MC_CLIENT_HOTRESET_CTRL0, wdata_0);
+
+ /* Wait for HOTRESET STATUS to indicate FLUSH_DONE */
+ do {
+ val = tegra_mc_read_32(MC_CLIENT_HOTRESET_STATUS0);
+ } while ((val & wdata_0) != wdata_0);
+
+ /* Wait one more time due to SW WAR for known legacy issue */
+ do {
+ val = tegra_mc_read_32(MC_CLIENT_HOTRESET_STATUS0);
+ } while ((val & wdata_0) != wdata_0);
+
+ val = tegra_mc_read_32(MC_CLIENT_HOTRESET_CTRL1);
+ assert(val == MC_CLIENT_HOTRESET_CTRL1_RESET_VAL);
+
+ wdata_1 = MC_CLIENT_HOTRESET_CTRL1_SDMMC4A_FLUSH_ENB |
+ MC_CLIENT_HOTRESET_CTRL1_APE_FLUSH_ENB |
+ MC_CLIENT_HOTRESET_CTRL1_SE_FLUSH_ENB |
+ MC_CLIENT_HOTRESET_CTRL1_ETR_FLUSH_ENB |
+ MC_CLIENT_HOTRESET_CTRL1_AXIS_FLUSH_ENB |
+ MC_CLIENT_HOTRESET_CTRL1_EQOS_FLUSH_ENB |
+ MC_CLIENT_HOTRESET_CTRL1_UFSHC_FLUSH_ENB |
+ MC_CLIENT_HOTRESET_CTRL1_BPMP_FLUSH_ENB |
+ MC_CLIENT_HOTRESET_CTRL1_AON_FLUSH_ENB |
+ MC_CLIENT_HOTRESET_CTRL1_SCE_FLUSH_ENB;
+ tegra_mc_write_32(MC_CLIENT_HOTRESET_CTRL1, wdata_1);
+
+ /* Wait for HOTRESET STATUS to indicate FLUSH_DONE */
+ do {
+ val = tegra_mc_read_32(MC_CLIENT_HOTRESET_STATUS1);
+ } while ((val & wdata_1) != wdata_1);
+
+ /* Wait one more time due to SW WAR for known legacy issue */
+ do {
+ val = tegra_mc_read_32(MC_CLIENT_HOTRESET_STATUS1);
+ } while ((val & wdata_1) != wdata_1);
+
+ /*
+ * Change MEMTYPE_OVERRIDE from SO_DEV -> PASSTHRU for boot and
+ * strongly ordered MSS clients. ROC needs to be single point
+ * of control on overriding the memory type. So, remove TSA's
+ * memtype override.
+ *
+ * MC clients with default SO_DEV override still enabled at TSA:
+ * AONW, BPMPW, SCEW, APEW
+ */
+ mc_set_tsa_passthrough(AFIW);
+ mc_set_tsa_passthrough(HDAW);
+ mc_set_tsa_passthrough(SATAW);
+ mc_set_tsa_passthrough(XUSB_HOSTW);
+ mc_set_tsa_passthrough(XUSB_DEVW);
+ mc_set_tsa_passthrough(SDMMCWAB);
+ mc_set_tsa_passthrough(APEDMAW);
+ mc_set_tsa_passthrough(SESWR);
+ mc_set_tsa_passthrough(ETRW);
+ mc_set_tsa_passthrough(AXISW);
+ mc_set_tsa_passthrough(EQOSW);
+ mc_set_tsa_passthrough(UFSHCW);
+ mc_set_tsa_passthrough(BPMPDMAW);
+ mc_set_tsa_passthrough(AONDMAW);
+ mc_set_tsa_passthrough(SCEDMAW);
+
+ /* Parker has no IO Coherency support and need the following:
+ * Ordered MC Clients on Parker are AFI, EQOS, SATA, XUSB.
+ * ISO clients(DISP, VI, EQOS) should never snoop caches and
+ * don't need ROC/PCFIFO ordering.
+ * ISO clients(EQOS) that need ordering should use PCFIFO ordering
+ * and bypass ROC ordering by using FORCE_NON_COHERENT path.
+ * FORCE_NON_COHERENT/FORCE_COHERENT config take precedence
+ * over SMMU attributes.
+ * Force all Normal memory transactions from ISO and non-ISO to be
+ * non-coherent(bypass ROC, avoid cache snoop to avoid perf hit).
+ * Force the SO_DEV transactions from ordered ISO clients(EQOS) to
+ * non-coherent path and enable MC PCFIFO interlock for ordering.
+ * Force the SO_DEV transactions from ordered non-ISO clients (PCIe,
+ * XUSB, SATA) to coherent so that the transactions are
+ * ordered by ROC.
+ * PCFIFO ensure write ordering.
+ * Read after Write ordering is maintained/enforced by MC clients.
+ * Clients that need PCIe type write ordering must
+ * go through ROC ordering.
+ * Ordering enable for Read clients is not necessary.
+ * R5's and A9 would get necessary ordering from AXI and
+ * don't need ROC ordering enable:
+ * - MMIO ordering is through dev mapping and MMIO
+ * accesses bypass SMMU.
+ * - Normal memory is accessed through SMMU and ordering is
+ * ensured by client and AXI.
+ * - Ack point for Normal memory is WCAM in MC.
+ * - MMIO's can be early acked and AXI ensures dev memory ordering,
+ * Client ensures read/write direction change ordering.
+ * - See Bug 200312466 for more details.
+ *
+ * CGID_TAG_ADR is only present from T186 A02. As this code is common
+ * between A01 and A02, tegra_memctrl_set_overrides() programs
+ * CGID_TAG_ADR for the necessary clients on A02.
+ */
+ mc_set_txn_override(HDAR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(BPMPW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(PTCR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(NVDISPLAYR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(EQOSW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(NVJPGSWR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(ISPRA, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(SDMMCWAA, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(VICSRD, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(MPCOREW, CGID_TAG_DEFAULT, SO_DEV_ZERO, NO_OVERRIDE, NO_OVERRIDE);
+ mc_set_txn_override(GPUSRD, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(AXISR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(SCEDMAW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(SDMMCW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(EQOSR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ /* See bug 200131110 comment #35*/
+ mc_set_txn_override(APEDMAR, CGID_TAG_CLIENT_AXI_ID, SO_DEV_CLIENT_AXI_ID, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(NVENCSRD, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(SDMMCRAB, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(VICSRD1, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(BPMPDMAR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(VIW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(SDMMCRAA, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(AXISW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(XUSB_DEVR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(UFSHCR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(TSECSWR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(GPUSWR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(SATAR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(XUSB_HOSTW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_COHERENT);
+ mc_set_txn_override(TSECSWRB, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(GPUSRD2, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(SCEDMAR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(GPUSWR2, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(AONDMAW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ /* See bug 200131110 comment #35*/
+ mc_set_txn_override(APEDMAW, CGID_TAG_CLIENT_AXI_ID, SO_DEV_CLIENT_AXI_ID, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(AONW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(HOST1XDMAR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(ETRR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(SESWR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(NVJPGSRD, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(NVDECSRD, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(TSECSRDB, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(BPMPDMAW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(APER, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(NVDECSRD1, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(XUSB_HOSTR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(ISPWA, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(SESRD, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(SCER, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(AONR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(MPCORER, CGID_TAG_DEFAULT, SO_DEV_ZERO, NO_OVERRIDE, NO_OVERRIDE);
+ mc_set_txn_override(SDMMCWA, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(HDAW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(NVDECSWR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(UFSHCW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(AONDMAR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(SATAW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_COHERENT);
+ mc_set_txn_override(ETRW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(VICSWR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(NVENCSWR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ /* See bug 200131110 comment #35 */
+ mc_set_txn_override(AFIR, CGID_TAG_DEFAULT, SO_DEV_CLIENT_AXI_ID, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(SDMMCWAB, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(SDMMCRA, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(NVDISPLAYR1, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(ISPWB, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(BPMPR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(APEW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(SDMMCR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ mc_set_txn_override(XUSB_DEVW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_COHERENT);
+ mc_set_txn_override(TSECSRD, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+ /*
+ * See bug 200131110 comment #35 - there are no normal requests
+ * and AWID for SO/DEV requests is hardcoded in RTL for a
+ * particular PCIE controller
+ */
+ mc_set_txn_override(AFIW, CGID_TAG_DEFAULT, SO_DEV_CLIENT_AXI_ID, FORCE_NON_COHERENT, FORCE_COHERENT);
+ mc_set_txn_override(SCEW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
+
+ /*
+ * At this point, ordering can occur at ROC. So, remove PCFIFO's
+ * control over ordering requests.
+ *
+ * Change PCFIFO_*_ORDERED_CLIENT from ORDERED -> UNORDERED for
+ * boot and strongly ordered MSS clients
+ */
+ val = MC_PCFIFO_CLIENT_CONFIG1_RESET_VAL &
+ mc_set_pcfifo_unordered_boot_so_mss(1, AFIW) &
+ mc_set_pcfifo_unordered_boot_so_mss(1, HDAW) &
+ mc_set_pcfifo_unordered_boot_so_mss(1, SATAW);
+ tegra_mc_write_32(MC_PCFIFO_CLIENT_CONFIG1, val);
+
+ val = MC_PCFIFO_CLIENT_CONFIG2_RESET_VAL &
+ mc_set_pcfifo_unordered_boot_so_mss(2, XUSB_HOSTW) &
+ mc_set_pcfifo_unordered_boot_so_mss(2, XUSB_DEVW);
+ tegra_mc_write_32(MC_PCFIFO_CLIENT_CONFIG2, val);
+
+ val = MC_PCFIFO_CLIENT_CONFIG3_RESET_VAL &
+ mc_set_pcfifo_unordered_boot_so_mss(3, SDMMCWAB);
+ tegra_mc_write_32(MC_PCFIFO_CLIENT_CONFIG3, val);
+
+ val = MC_PCFIFO_CLIENT_CONFIG4_RESET_VAL &
+ mc_set_pcfifo_unordered_boot_so_mss(4, SESWR) &
+ mc_set_pcfifo_unordered_boot_so_mss(4, ETRW) &
+ mc_set_pcfifo_unordered_boot_so_mss(4, AXISW) &
+ mc_set_pcfifo_unordered_boot_so_mss(4, UFSHCW) &
+ mc_set_pcfifo_unordered_boot_so_mss(4, BPMPDMAW) &
+ mc_set_pcfifo_unordered_boot_so_mss(4, AONDMAW) &
+ mc_set_pcfifo_unordered_boot_so_mss(4, SCEDMAW);
+ /* EQOSW is the only client that has PCFIFO order enabled. */
+ val |= mc_set_pcfifo_ordered_boot_so_mss(4, EQOSW);
+ tegra_mc_write_32(MC_PCFIFO_CLIENT_CONFIG4, val);
+
+ val = MC_PCFIFO_CLIENT_CONFIG5_RESET_VAL &
+ mc_set_pcfifo_unordered_boot_so_mss(5, APEDMAW);
+ tegra_mc_write_32(MC_PCFIFO_CLIENT_CONFIG5, val);
+
+ /*
+ * Deassert HOTRESET FLUSH_ENABLE for boot and strongly ordered MSS
+ * clients to allow memory traffic from all clients to start passing
+ * through ROC
+ */
+ val = tegra_mc_read_32(MC_CLIENT_HOTRESET_CTRL0);
+ assert(val == wdata_0);
+
+ wdata_0 = MC_CLIENT_HOTRESET_CTRL0_RESET_VAL;
+ tegra_mc_write_32(MC_CLIENT_HOTRESET_CTRL0, wdata_0);
+
+ val = tegra_mc_read_32(MC_CLIENT_HOTRESET_CTRL1);
+ assert(val == wdata_1);
+
+ wdata_1 = MC_CLIENT_HOTRESET_CTRL1_RESET_VAL;
+ tegra_mc_write_32(MC_CLIENT_HOTRESET_CTRL1, wdata_1);
+
+#endif
+}
+
+static void tegra186_memctrl_set_overrides(void)
+{
+ uint32_t i, val;
+
+ /*
+ * Set the MC_TXN_OVERRIDE registers for write clients.
+ */
+ if ((tegra_chipid_is_t186()) &&
+ (!tegra_platform_is_silicon() ||
+ (tegra_platform_is_silicon() && (tegra_get_chipid_minor() == 1U)))) {
+
+ /*
+ * GPU and NVENC settings for Tegra186 simulation and
+ * Silicon rev. A01
+ */
+ val = tegra_mc_read_32(MC_TXN_OVERRIDE_CONFIG_GPUSWR);
+ val &= (uint32_t)~MC_TXN_OVERRIDE_CGID_TAG_MASK;
+ tegra_mc_write_32(MC_TXN_OVERRIDE_CONFIG_GPUSWR,
+ val | MC_TXN_OVERRIDE_CGID_TAG_ZERO);
+
+ val = tegra_mc_read_32(MC_TXN_OVERRIDE_CONFIG_GPUSWR2);
+ val &= (uint32_t)~MC_TXN_OVERRIDE_CGID_TAG_MASK;
+ tegra_mc_write_32(MC_TXN_OVERRIDE_CONFIG_GPUSWR2,
+ val | MC_TXN_OVERRIDE_CGID_TAG_ZERO);
+
+ val = tegra_mc_read_32(MC_TXN_OVERRIDE_CONFIG_NVENCSWR);
+ val &= (uint32_t)~MC_TXN_OVERRIDE_CGID_TAG_MASK;
+ tegra_mc_write_32(MC_TXN_OVERRIDE_CONFIG_NVENCSWR,
+ val | MC_TXN_OVERRIDE_CGID_TAG_CLIENT_AXI_ID);
+
+ } else {
+
+ /*
+ * Settings for Tegra186 silicon rev. A02 and onwards.
+ */
+ for (i = 0; i < ARRAY_SIZE(tegra186_txn_override_cfgs); i++) {
+ val = tegra_mc_read_32(tegra186_txn_override_cfgs[i].offset);
+ val &= (uint32_t)~MC_TXN_OVERRIDE_CGID_TAG_MASK;
+ tegra_mc_write_32(tegra186_txn_override_cfgs[i].offset,
+ val | tegra186_txn_override_cfgs[i].cgid_tag);
+ }
+ }
+}
+
+
+/*******************************************************************************
+ * Array to hold MC context for Tegra186
+ ******************************************************************************/
+static __attribute__((aligned(16))) mc_regs_t tegra186_mc_context[] = {
+ _START_OF_TABLE_,
+ mc_make_sid_security_cfg(SCEW),
+ mc_make_sid_security_cfg(AFIR),
+ mc_make_sid_security_cfg(NVDISPLAYR1),
+ mc_make_sid_security_cfg(XUSB_DEVR),
+ mc_make_sid_security_cfg(VICSRD1),
+ mc_make_sid_security_cfg(NVENCSWR),
+ mc_make_sid_security_cfg(TSECSRDB),
+ mc_make_sid_security_cfg(AXISW),
+ mc_make_sid_security_cfg(SDMMCWAB),
+ mc_make_sid_security_cfg(AONDMAW),
+ mc_make_sid_security_cfg(GPUSWR2),
+ mc_make_sid_security_cfg(SATAW),
+ mc_make_sid_security_cfg(UFSHCW),
+ mc_make_sid_security_cfg(AFIW),
+ mc_make_sid_security_cfg(SDMMCR),
+ mc_make_sid_security_cfg(SCEDMAW),
+ mc_make_sid_security_cfg(UFSHCR),
+ mc_make_sid_security_cfg(SDMMCWAA),
+ mc_make_sid_security_cfg(APEDMAW),
+ mc_make_sid_security_cfg(SESWR),
+ mc_make_sid_security_cfg(MPCORER),
+ mc_make_sid_security_cfg(PTCR),
+ mc_make_sid_security_cfg(BPMPW),
+ mc_make_sid_security_cfg(ETRW),
+ mc_make_sid_security_cfg(GPUSRD),
+ mc_make_sid_security_cfg(VICSWR),
+ mc_make_sid_security_cfg(SCEDMAR),
+ mc_make_sid_security_cfg(HDAW),
+ mc_make_sid_security_cfg(ISPWA),
+ mc_make_sid_security_cfg(EQOSW),
+ mc_make_sid_security_cfg(XUSB_HOSTW),
+ mc_make_sid_security_cfg(TSECSWR),
+ mc_make_sid_security_cfg(SDMMCRAA),
+ mc_make_sid_security_cfg(APER),
+ mc_make_sid_security_cfg(VIW),
+ mc_make_sid_security_cfg(APEW),
+ mc_make_sid_security_cfg(AXISR),
+ mc_make_sid_security_cfg(SDMMCW),
+ mc_make_sid_security_cfg(BPMPDMAW),
+ mc_make_sid_security_cfg(ISPRA),
+ mc_make_sid_security_cfg(NVDECSWR),
+ mc_make_sid_security_cfg(XUSB_DEVW),
+ mc_make_sid_security_cfg(NVDECSRD),
+ mc_make_sid_security_cfg(MPCOREW),
+ mc_make_sid_security_cfg(NVDISPLAYR),
+ mc_make_sid_security_cfg(BPMPDMAR),
+ mc_make_sid_security_cfg(NVJPGSWR),
+ mc_make_sid_security_cfg(NVDECSRD1),
+ mc_make_sid_security_cfg(TSECSRD),
+ mc_make_sid_security_cfg(NVJPGSRD),
+ mc_make_sid_security_cfg(SDMMCWA),
+ mc_make_sid_security_cfg(SCER),
+ mc_make_sid_security_cfg(XUSB_HOSTR),
+ mc_make_sid_security_cfg(VICSRD),
+ mc_make_sid_security_cfg(AONDMAR),
+ mc_make_sid_security_cfg(AONW),
+ mc_make_sid_security_cfg(SDMMCRA),
+ mc_make_sid_security_cfg(HOST1XDMAR),
+ mc_make_sid_security_cfg(EQOSR),
+ mc_make_sid_security_cfg(SATAR),
+ mc_make_sid_security_cfg(BPMPR),
+ mc_make_sid_security_cfg(HDAR),
+ mc_make_sid_security_cfg(SDMMCRAB),
+ mc_make_sid_security_cfg(ETRR),
+ mc_make_sid_security_cfg(AONR),
+ mc_make_sid_security_cfg(APEDMAR),
+ mc_make_sid_security_cfg(SESRD),
+ mc_make_sid_security_cfg(NVENCSRD),
+ mc_make_sid_security_cfg(GPUSWR),
+ mc_make_sid_security_cfg(TSECSWRB),
+ mc_make_sid_security_cfg(ISPWB),
+ mc_make_sid_security_cfg(GPUSRD2),
+ mc_make_sid_override_cfg(APER),
+ mc_make_sid_override_cfg(VICSRD),
+ mc_make_sid_override_cfg(NVENCSRD),
+ mc_make_sid_override_cfg(NVJPGSWR),
+ mc_make_sid_override_cfg(AONW),
+ mc_make_sid_override_cfg(BPMPR),
+ mc_make_sid_override_cfg(BPMPW),
+ mc_make_sid_override_cfg(HDAW),
+ mc_make_sid_override_cfg(NVDISPLAYR1),
+ mc_make_sid_override_cfg(APEDMAR),
+ mc_make_sid_override_cfg(AFIR),
+ mc_make_sid_override_cfg(AXISR),
+ mc_make_sid_override_cfg(VICSRD1),
+ mc_make_sid_override_cfg(TSECSRD),
+ mc_make_sid_override_cfg(BPMPDMAW),
+ mc_make_sid_override_cfg(MPCOREW),
+ mc_make_sid_override_cfg(XUSB_HOSTR),
+ mc_make_sid_override_cfg(GPUSWR),
+ mc_make_sid_override_cfg(XUSB_DEVR),
+ mc_make_sid_override_cfg(UFSHCW),
+ mc_make_sid_override_cfg(XUSB_HOSTW),
+ mc_make_sid_override_cfg(SDMMCWAB),
+ mc_make_sid_override_cfg(SATAW),
+ mc_make_sid_override_cfg(SCEDMAR),
+ mc_make_sid_override_cfg(HOST1XDMAR),
+ mc_make_sid_override_cfg(SDMMCWA),
+ mc_make_sid_override_cfg(APEDMAW),
+ mc_make_sid_override_cfg(SESWR),
+ mc_make_sid_override_cfg(AXISW),
+ mc_make_sid_override_cfg(AONDMAW),
+ mc_make_sid_override_cfg(TSECSWRB),
+ mc_make_sid_override_cfg(MPCORER),
+ mc_make_sid_override_cfg(ISPWB),
+ mc_make_sid_override_cfg(AONR),
+ mc_make_sid_override_cfg(BPMPDMAR),
+ mc_make_sid_override_cfg(HDAR),
+ mc_make_sid_override_cfg(SDMMCRA),
+ mc_make_sid_override_cfg(ETRW),
+ mc_make_sid_override_cfg(GPUSWR2),
+ mc_make_sid_override_cfg(EQOSR),
+ mc_make_sid_override_cfg(TSECSWR),
+ mc_make_sid_override_cfg(ETRR),
+ mc_make_sid_override_cfg(NVDECSRD),
+ mc_make_sid_override_cfg(TSECSRDB),
+ mc_make_sid_override_cfg(SDMMCRAA),
+ mc_make_sid_override_cfg(NVDECSRD1),
+ mc_make_sid_override_cfg(SDMMCR),
+ mc_make_sid_override_cfg(NVJPGSRD),
+ mc_make_sid_override_cfg(SCEDMAW),
+ mc_make_sid_override_cfg(SDMMCWAA),
+ mc_make_sid_override_cfg(APEW),
+ mc_make_sid_override_cfg(AONDMAR),
+ mc_make_sid_override_cfg(PTCR),
+ mc_make_sid_override_cfg(SCER),
+ mc_make_sid_override_cfg(ISPRA),
+ mc_make_sid_override_cfg(ISPWA),
+ mc_make_sid_override_cfg(VICSWR),
+ mc_make_sid_override_cfg(SESRD),
+ mc_make_sid_override_cfg(SDMMCW),
+ mc_make_sid_override_cfg(SDMMCRAB),
+ mc_make_sid_override_cfg(EQOSW),
+ mc_make_sid_override_cfg(GPUSRD2),
+ mc_make_sid_override_cfg(SCEW),
+ mc_make_sid_override_cfg(GPUSRD),
+ mc_make_sid_override_cfg(NVDECSWR),
+ mc_make_sid_override_cfg(XUSB_DEVW),
+ mc_make_sid_override_cfg(SATAR),
+ mc_make_sid_override_cfg(NVDISPLAYR),
+ mc_make_sid_override_cfg(VIW),
+ mc_make_sid_override_cfg(UFSHCR),
+ mc_make_sid_override_cfg(NVENCSWR),
+ mc_make_sid_override_cfg(AFIW),
+ mc_smmu_bypass_cfg, /* TBU settings */
+ _END_OF_TABLE_,
+};
+
+/*******************************************************************************
+ * Handler to return the pointer to the MC's context struct
+ ******************************************************************************/
+mc_regs_t *plat_memctrl_get_sys_suspend_ctx(void)
+{
+ /* index of _END_OF_TABLE_ */
+ tegra186_mc_context[0].val = (uint32_t)(ARRAY_SIZE(tegra186_mc_context)) - 1U;
+
+ return tegra186_mc_context;
+}
+
+void plat_memctrl_setup(void)
+{
+ uint32_t val;
+ unsigned int i;
+
+ /* Program all the Stream ID overrides */
+ for (i = 0U; i < ARRAY_SIZE(tegra186_streamid_override_regs); i++) {
+ tegra_mc_streamid_write_32(tegra186_streamid_override_regs[i],
+ MC_STREAM_ID_MAX);
+ }
+
+ /* Program the security config settings for all Stream IDs */
+ for (i = 0U; i < ARRAY_SIZE(tegra186_streamid_sec_cfgs); i++) {
+ val = (tegra186_streamid_sec_cfgs[i].override_enable << 16) |
+ (tegra186_streamid_sec_cfgs[i].override_client_inputs << 8) |
+ (tegra186_streamid_sec_cfgs[i].override_client_ns_flag << 0);
+ tegra_mc_streamid_write_32(tegra186_streamid_sec_cfgs[i].offset, val);
+ }
+
+ /*
+ * Re-configure MSS to allow ROC to deal with ordering of the
+ * Memory Controller traffic. This is needed as the Memory Controller
+ * boots with MSS having all control, but ROC provides a performance
+ * boost as compared to MSS.
+ */
+ tegra186_memctrl_reconfig_mss_clients();
+
+ /* Program overrides for MC transactions */
+ tegra186_memctrl_set_overrides();
+}
+
+/*******************************************************************************
+ * Handler to restore platform specific settings to the memory controller
+ ******************************************************************************/
+void plat_memctrl_restore(void)
+{
+ /*
+ * Re-configure MSS to allow ROC to deal with ordering of the
+ * Memory Controller traffic. This is needed as the Memory Controller
+ * boots with MSS having all control, but ROC provides a performance
+ * boost as compared to MSS.
+ */
+ tegra186_memctrl_reconfig_mss_clients();
+
+ /* Program overrides for MC transactions */
+ tegra186_memctrl_set_overrides();
+}
+
+/*******************************************************************************
+ * Handler to program the scratch registers with TZDRAM settings for the
+ * resume firmware
+ ******************************************************************************/
+void plat_memctrl_tzdram_setup(uint64_t phys_base, uint64_t size_in_bytes)
+{
+ uint32_t val;
+
+ /*
+ * Setup the Memory controller to allow only secure accesses to
+ * the TZDRAM carveout
+ */
+ INFO("Configuring TrustZone DRAM Memory Carveout\n");
+
+ tegra_mc_write_32(MC_SECURITY_CFG0_0, (uint32_t)phys_base);
+ tegra_mc_write_32(MC_SECURITY_CFG3_0, (uint32_t)(phys_base >> 32));
+ tegra_mc_write_32(MC_SECURITY_CFG1_0, size_in_bytes >> 20);
+
+ /*
+ * When TZ encryption is enabled, we need to setup TZDRAM
+ * before CPU accesses TZ Carveout, else CPU will fetch
+ * non-decrypted data. So save TZDRAM setting for SC7 resume
+ * FW to restore.
+ *
+ * Scratch registers map:
+ * RSV55_0 = CFG1[12:0] | CFG0[31:20]
+ * RSV55_1 = CFG3[1:0]
+ */
+ val = tegra_mc_read_32(MC_SECURITY_CFG1_0) & MC_SECURITY_SIZE_MB_MASK;
+ val |= tegra_mc_read_32(MC_SECURITY_CFG0_0) & MC_SECURITY_BOM_MASK;
+ mmio_write_32(TEGRA_SCRATCH_BASE + SCRATCH_TZDRAM_ADDR_LO, val);
+
+ val = tegra_mc_read_32(MC_SECURITY_CFG3_0) & MC_SECURITY_BOM_HI_MASK;
+ mmio_write_32(TEGRA_SCRATCH_BASE + SCRATCH_TZDRAM_ADDR_HI, val);
+
+ /*
+ * MCE propagates the security configuration values across the
+ * CCPLEX.
+ */
+ (void)mce_update_gsc_tzdram();
+}
diff --git a/plat/nvidia/tegra/soc/t186/plat_psci_handlers.c b/plat/nvidia/tegra/soc/t186/plat_psci_handlers.c
new file mode 100644
index 0000000..8f88e28
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/plat_psci_handlers.c
@@ -0,0 +1,482 @@
+/*
+ * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <string.h>
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <context.h>
+#include <cortex_a57.h>
+#include <denver.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/psci/psci.h>
+#include <plat/common/platform.h>
+
+#include <bpmp_ipc.h>
+#include <mce.h>
+#include <memctrl_v2.h>
+#include <security_engine.h>
+#include <smmu.h>
+#include <t18x_ari.h>
+#include <tegra186_private.h>
+#include <tegra_private.h>
+
+extern void memcpy16(void *dest, const void *src, unsigned int length);
+
+/* state id mask */
+#define TEGRA186_STATE_ID_MASK 0xFU
+/* constants to get power state's wake time */
+#define TEGRA186_WAKE_TIME_MASK 0x0FFFFFF0U
+#define TEGRA186_WAKE_TIME_SHIFT 4U
+/* default core wake mask for CPU_SUSPEND */
+#define TEGRA186_CORE_WAKE_MASK 0x180cU
+/* context size to save during system suspend */
+#define TEGRA186_SE_CONTEXT_SIZE 3U
+
+static uint32_t se_regs[TEGRA186_SE_CONTEXT_SIZE];
+static struct tegra_psci_percpu_data {
+ uint32_t wake_time;
+} __aligned(CACHE_WRITEBACK_GRANULE) tegra_percpu_data[PLATFORM_CORE_COUNT];
+
+int32_t tegra_soc_validate_power_state(uint32_t power_state,
+ psci_power_state_t *req_state)
+{
+ uint8_t state_id = (uint8_t)psci_get_pstate_id(power_state) & TEGRA186_STATE_ID_MASK;
+ uint32_t cpu = plat_my_core_pos();
+ int32_t ret = PSCI_E_SUCCESS;
+
+ /* save the core wake time (in TSC ticks)*/
+ tegra_percpu_data[cpu].wake_time = (power_state & TEGRA186_WAKE_TIME_MASK)
+ << TEGRA186_WAKE_TIME_SHIFT;
+
+ /*
+ * Clean percpu_data[cpu] to DRAM. This needs to be done to ensure that
+ * the correct value is read in tegra_soc_pwr_domain_suspend(), which
+ * is called with caches disabled. It is possible to read a stale value
+ * from DRAM in that function, because the L2 cache is not flushed
+ * unless the cluster is entering CC6/CC7.
+ */
+ clean_dcache_range((uint64_t)&tegra_percpu_data[cpu],
+ sizeof(tegra_percpu_data[cpu]));
+
+ /* Sanity check the requested state id */
+ switch (state_id) {
+ case PSTATE_ID_CORE_IDLE:
+ case PSTATE_ID_CORE_POWERDN:
+
+ if (psci_get_pstate_type(power_state) != PSTATE_TYPE_POWERDOWN) {
+ ret = PSCI_E_INVALID_PARAMS;
+ break;
+ }
+
+ /* Core powerdown request */
+ req_state->pwr_domain_state[MPIDR_AFFLVL0] = state_id;
+ req_state->pwr_domain_state[MPIDR_AFFLVL1] = state_id;
+
+ break;
+
+ default:
+ ERROR("%s: unsupported state id (%d)\n", __func__, state_id);
+ ret = PSCI_E_INVALID_PARAMS;
+ break;
+ }
+
+ return ret;
+}
+
+int32_t tegra_soc_cpu_standby(plat_local_state_t cpu_state)
+{
+ (void)cpu_state;
+ return PSCI_E_SUCCESS;
+}
+
+int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
+{
+ const plat_local_state_t *pwr_domain_state;
+ uint8_t stateid_afflvl0, stateid_afflvl2;
+ uint32_t cpu = plat_my_core_pos();
+ const plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
+ mce_cstate_info_t cstate_info = { 0 };
+ uint64_t mc_ctx_base;
+ uint32_t val;
+
+ /* get the state ID */
+ pwr_domain_state = target_state->pwr_domain_state;
+ stateid_afflvl0 = pwr_domain_state[MPIDR_AFFLVL0] &
+ TEGRA186_STATE_ID_MASK;
+ stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
+ TEGRA186_STATE_ID_MASK;
+
+ if ((stateid_afflvl0 == PSTATE_ID_CORE_IDLE) ||
+ (stateid_afflvl0 == PSTATE_ID_CORE_POWERDN)) {
+
+ /* Enter CPU idle/powerdown */
+ val = (stateid_afflvl0 == PSTATE_ID_CORE_IDLE) ?
+ (uint32_t)TEGRA_ARI_CORE_C6 : (uint32_t)TEGRA_ARI_CORE_C7;
+ (void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE, (uint64_t)val,
+ tegra_percpu_data[cpu].wake_time, 0U);
+
+ } else if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
+
+ /* save SE registers */
+ se_regs[0] = mmio_read_32(TEGRA_SE0_BASE +
+ SE_MUTEX_WATCHDOG_NS_LIMIT);
+ se_regs[1] = mmio_read_32(TEGRA_RNG1_BASE +
+ RNG_MUTEX_WATCHDOG_NS_LIMIT);
+ se_regs[2] = mmio_read_32(TEGRA_PKA1_BASE +
+ PKA_MUTEX_WATCHDOG_NS_LIMIT);
+
+ /* save 'Secure Boot' Processor Feature Config Register */
+ val = mmio_read_32(TEGRA_MISC_BASE + MISCREG_PFCFG);
+ mmio_write_32(TEGRA_SCRATCH_BASE + SCRATCH_SECURE_BOOTP_FCFG, val);
+
+ /* save MC context to TZDRAM */
+ mc_ctx_base = params_from_bl2->tzdram_base;
+ tegra_mc_save_context((uintptr_t)mc_ctx_base);
+
+ /* Prepare for system suspend */
+ cstate_info.cluster = (uint32_t)TEGRA_ARI_CLUSTER_CC7;
+ cstate_info.system = (uint32_t)TEGRA_ARI_SYSTEM_SC7;
+ cstate_info.system_state_force = 1;
+ cstate_info.update_wake_mask = 1;
+ mce_update_cstate_info(&cstate_info);
+
+ /* Loop until system suspend is allowed */
+ do {
+ val = (uint32_t)mce_command_handler(
+ (uint64_t)MCE_CMD_IS_SC7_ALLOWED,
+ (uint64_t)TEGRA_ARI_CORE_C7,
+ MCE_CORE_SLEEP_TIME_INFINITE,
+ 0U);
+ } while (val == 0U);
+
+ /* Instruct the MCE to enter system suspend state */
+ (void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
+ (uint64_t)TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0U);
+
+ } else {
+ ; /* do nothing */
+ }
+
+ return PSCI_E_SUCCESS;
+}
+
+/*******************************************************************************
+ * Helper function to check if this is the last ON CPU in the cluster
+ ******************************************************************************/
+static bool tegra_last_cpu_in_cluster(const plat_local_state_t *states,
+ uint32_t ncpu)
+{
+ plat_local_state_t target;
+ bool last_on_cpu = true;
+ uint32_t num_cpus = ncpu, pos = 0;
+
+ do {
+ target = states[pos];
+ if (target != PLAT_MAX_OFF_STATE) {
+ last_on_cpu = false;
+ }
+ --num_cpus;
+ pos++;
+ } while (num_cpus != 0U);
+
+ return last_on_cpu;
+}
+
+/*******************************************************************************
+ * Helper function to get target power state for the cluster
+ ******************************************************************************/
+static plat_local_state_t tegra_get_afflvl1_pwr_state(const plat_local_state_t *states,
+ uint32_t ncpu)
+{
+ uint32_t core_pos = (uint32_t)read_mpidr() & (uint32_t)MPIDR_CPU_MASK;
+ uint32_t cpu = plat_my_core_pos();
+ int32_t ret;
+ plat_local_state_t target = states[core_pos];
+ mce_cstate_info_t cstate_info = { 0 };
+
+ /* CPU suspend */
+ if (target == PSTATE_ID_CORE_POWERDN) {
+ /* Program default wake mask */
+ cstate_info.wake_mask = TEGRA186_CORE_WAKE_MASK;
+ cstate_info.update_wake_mask = 1;
+ mce_update_cstate_info(&cstate_info);
+
+ /* Check if CCx state is allowed. */
+ ret = mce_command_handler((uint64_t)MCE_CMD_IS_CCX_ALLOWED,
+ (uint64_t)TEGRA_ARI_CORE_C7,
+ tegra_percpu_data[cpu].wake_time,
+ 0U);
+ if (ret == 0) {
+ target = PSCI_LOCAL_STATE_RUN;
+ }
+ }
+
+ /* CPU off */
+ if (target == PLAT_MAX_OFF_STATE) {
+ /* Enable cluster powerdn from last CPU in the cluster */
+ if (tegra_last_cpu_in_cluster(states, ncpu)) {
+ /* Enable CC7 state and turn off wake mask */
+ cstate_info.cluster = (uint32_t)TEGRA_ARI_CLUSTER_CC7;
+ cstate_info.update_wake_mask = 1;
+ mce_update_cstate_info(&cstate_info);
+
+ /* Check if CCx state is allowed. */
+ ret = mce_command_handler((uint64_t)MCE_CMD_IS_CCX_ALLOWED,
+ (uint64_t)TEGRA_ARI_CORE_C7,
+ MCE_CORE_SLEEP_TIME_INFINITE,
+ 0U);
+ if (ret == 0) {
+ target = PSCI_LOCAL_STATE_RUN;
+ }
+
+ } else {
+
+ /* Turn off wake_mask */
+ cstate_info.update_wake_mask = 1;
+ mce_update_cstate_info(&cstate_info);
+ target = PSCI_LOCAL_STATE_RUN;
+ }
+ }
+
+ return target;
+}
+
+/*******************************************************************************
+ * Platform handler to calculate the proper target power level at the
+ * specified affinity level
+ ******************************************************************************/
+plat_local_state_t tegra_soc_get_target_pwr_state(uint32_t lvl,
+ const plat_local_state_t *states,
+ uint32_t ncpu)
+{
+ plat_local_state_t target = PSCI_LOCAL_STATE_RUN;
+ uint32_t cpu = plat_my_core_pos();
+
+ /* System Suspend */
+ if ((lvl == (uint32_t)MPIDR_AFFLVL2) &&
+ (states[cpu] == PSTATE_ID_SOC_POWERDN)) {
+ target = PSTATE_ID_SOC_POWERDN;
+ }
+
+ /* CPU off, CPU suspend */
+ if (lvl == (uint32_t)MPIDR_AFFLVL1) {
+ target = tegra_get_afflvl1_pwr_state(states, ncpu);
+ }
+
+ /* target cluster/system state */
+ return target;
+}
+
+int32_t tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state)
+{
+ const plat_local_state_t *pwr_domain_state =
+ target_state->pwr_domain_state;
+ const plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
+ uint8_t stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
+ TEGRA186_STATE_ID_MASK;
+ uint64_t val;
+ uint64_t src_len_in_bytes = (uint64_t)(((uintptr_t)(&__BL31_END__) -
+ (uintptr_t)BL31_BASE));
+ int32_t ret;
+
+ if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
+ val = params_from_bl2->tzdram_base +
+ tegra186_get_mc_ctx_size();
+
+ /* Initialise communication channel with BPMP */
+ assert(tegra_bpmp_ipc_init() == 0);
+
+ /* Enable SE clock */
+ ret = tegra_bpmp_ipc_enable_clock(TEGRA186_CLK_SE);
+ if (ret != 0) {
+ ERROR("Failed to enable clock\n");
+ return ret;
+ }
+
+ /*
+ * Generate/save SHA256 of ATF during SC7 entry
+ */
+ if (tegra_se_save_sha256_hash(BL31_BASE,
+ (uint32_t)src_len_in_bytes) != 0) {
+ ERROR("Hash calculation failed. Reboot\n");
+ (void)tegra_soc_prepare_system_reset();
+ }
+
+ /*
+ * The TZRAM loses power when we enter system suspend. To
+ * allow graceful exit from system suspend, we need to copy
+ * BL3-1 over to TZDRAM.
+ */
+ val = params_from_bl2->tzdram_base +
+ tegra186_get_mc_ctx_size();
+ memcpy16((void *)(uintptr_t)val, (void *)(uintptr_t)BL31_BASE,
+ (uintptr_t)BL31_END - (uintptr_t)BL31_BASE);
+
+ /*
+ * Save code base and size; this would be used by SC7-RF to
+ * verify binary
+ */
+ mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV68_LO,
+ (uint32_t)val);
+ mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV0_HI,
+ (uint32_t)src_len_in_bytes);
+
+ ret = tegra_bpmp_ipc_disable_clock(TEGRA186_CLK_SE);
+ if (ret != 0) {
+ ERROR("Failed to disable clock\n");
+ return ret;
+ }
+ }
+
+ return PSCI_E_SUCCESS;
+}
+
+int32_t tegra_soc_pwr_domain_suspend_pwrdown_early(const psci_power_state_t *target_state)
+{
+ return PSCI_E_NOT_SUPPORTED;
+}
+
+int32_t tegra_soc_pwr_domain_on(u_register_t mpidr)
+{
+ int32_t ret = PSCI_E_SUCCESS;
+ uint64_t target_cpu = mpidr & MPIDR_CPU_MASK;
+ uint64_t target_cluster = (mpidr & MPIDR_CLUSTER_MASK) >>
+ MPIDR_AFFINITY_BITS;
+
+ if (target_cluster > ((uint32_t)PLATFORM_CLUSTER_COUNT - 1U)) {
+
+ ERROR("%s: unsupported CPU (0x%lx)\n", __func__, mpidr);
+ ret = PSCI_E_NOT_PRESENT;
+
+ } else {
+ /* construct the target CPU # */
+ target_cpu |= (target_cluster << 2);
+
+ (void)mce_command_handler((uint64_t)MCE_CMD_ONLINE_CORE, target_cpu, 0U, 0U);
+ }
+
+ return ret;
+}
+
+int32_t tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
+{
+ uint8_t stateid_afflvl2 = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL];
+ uint8_t stateid_afflvl0 = target_state->pwr_domain_state[MPIDR_AFFLVL0];
+ mce_cstate_info_t cstate_info = { 0 };
+ uint64_t impl, val;
+ const plat_params_from_bl2_t *plat_params = bl31_get_plat_params();
+
+ impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
+
+ /*
+ * Enable ECC and Parity Protection for Cortex-A57 CPUs (Tegra186
+ * A02p and beyond).
+ */
+ if ((plat_params->l2_ecc_parity_prot_dis != 1) && (impl != DENVER_IMPL)) {
+
+ val = read_l2ctlr_el1();
+ val |= CORTEX_A57_L2_ECC_PARITY_PROTECTION_BIT;
+ write_l2ctlr_el1(val);
+ }
+
+ /*
+ * Reset power state info for CPUs when onlining, we set
+ * deepest power when offlining a core but that may not be
+ * requested by non-secure sw which controls idle states. It
+ * will re-init this info from non-secure software when the
+ * core come online.
+ */
+ if (stateid_afflvl0 == PLAT_MAX_OFF_STATE) {
+
+ cstate_info.cluster = (uint32_t)TEGRA_ARI_CLUSTER_CC1;
+ cstate_info.update_wake_mask = 1;
+ mce_update_cstate_info(&cstate_info);
+ }
+
+ /*
+ * Check if we are exiting from deep sleep and restore SE
+ * context if we are.
+ */
+ if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
+
+ mmio_write_32(TEGRA_SE0_BASE + SE_MUTEX_WATCHDOG_NS_LIMIT,
+ se_regs[0]);
+ mmio_write_32(TEGRA_RNG1_BASE + RNG_MUTEX_WATCHDOG_NS_LIMIT,
+ se_regs[1]);
+ mmio_write_32(TEGRA_PKA1_BASE + PKA_MUTEX_WATCHDOG_NS_LIMIT,
+ se_regs[2]);
+
+ /* Init SMMU */
+ tegra_smmu_init();
+
+ /*
+ * Reset power state info for the last core doing SC7
+ * entry and exit, we set deepest power state as CC7
+ * and SC7 for SC7 entry which may not be requested by
+ * non-secure SW which controls idle states.
+ */
+ cstate_info.cluster = (uint32_t)TEGRA_ARI_CLUSTER_CC7;
+ cstate_info.system = (uint32_t)TEGRA_ARI_SYSTEM_SC1;
+ cstate_info.update_wake_mask = 1;
+ mce_update_cstate_info(&cstate_info);
+ }
+
+ return PSCI_E_SUCCESS;
+}
+
+int32_t tegra_soc_pwr_domain_off_early(const psci_power_state_t *target_state)
+{
+ /* Do not power off the boot CPU */
+ if (plat_is_my_cpu_primary()) {
+ return PSCI_E_DENIED;
+ }
+
+ return PSCI_E_SUCCESS;
+}
+
+int32_t tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
+{
+ uint64_t impl = (read_midr() >> MIDR_IMPL_SHIFT) & (uint64_t)MIDR_IMPL_MASK;
+
+ (void)target_state;
+
+ /* Disable Denver's DCO operations */
+ if (impl == DENVER_IMPL) {
+ denver_disable_dco();
+ }
+
+ /* Turn off CPU */
+ (void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
+ (uint64_t)TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0U);
+
+ return PSCI_E_SUCCESS;
+}
+
+__dead2 void tegra_soc_prepare_system_off(void)
+{
+ /* power off the entire system */
+ mce_enter_ccplex_state((uint32_t)TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF);
+
+ wfi();
+
+ /* wait for the system to power down */
+ for (;;) {
+ ;
+ }
+}
+
+int32_t tegra_soc_prepare_system_reset(void)
+{
+ mce_enter_ccplex_state((uint32_t)TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT);
+
+ return PSCI_E_SUCCESS;
+}
diff --git a/plat/nvidia/tegra/soc/t186/plat_secondary.c b/plat/nvidia/tegra/soc/t186/plat_secondary.c
new file mode 100644
index 0000000..fbb550a
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/plat_secondary.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <string.h>
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <lib/mmio.h>
+
+#include <mce.h>
+#include <tegra_def.h>
+#include <tegra_private.h>
+
+#define SCRATCH_SECURE_RSV1_SCRATCH_0 0x658U
+#define SCRATCH_SECURE_RSV1_SCRATCH_1 0x65CU
+
+#define CPU_RESET_MODE_AA64 1U
+
+/*******************************************************************************
+ * Setup secondary CPU vectors
+ ******************************************************************************/
+void plat_secondary_setup(void)
+{
+ uint32_t addr_low, addr_high;
+
+ INFO("Setting up secondary CPU boot\n");
+
+ /* TZDRAM base will be used as the "resume" address */
+ addr_low = (uintptr_t)&tegra_secure_entrypoint | CPU_RESET_MODE_AA64;
+ addr_high = (uintptr_t)(((uintptr_t)&tegra_secure_entrypoint >> 32U) & 0x7ffU);
+
+ /* save reset vector to be used during SYSTEM_SUSPEND exit */
+ mmio_write_32(TEGRA_SCRATCH_BASE + SCRATCH_RESET_VECTOR_LO,
+ addr_low);
+ mmio_write_32(TEGRA_SCRATCH_BASE + SCRATCH_RESET_VECTOR_HI,
+ addr_high);
+}
diff --git a/plat/nvidia/tegra/soc/t186/plat_setup.c b/plat/nvidia/tegra/soc/t186/plat_setup.c
new file mode 100644
index 0000000..b21faa3
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/plat_setup.c
@@ -0,0 +1,397 @@
+/*
+ * Copyright (c) 2015-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include <arch_helpers.h>
+#include <bl31/bl31.h>
+#include <bl31/interrupt_mgmt.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <common/ep_info.h>
+#include <common/interrupt_props.h>
+#include <context.h>
+#include <cortex_a57.h>
+#include <denver.h>
+#include <drivers/arm/gic_common.h>
+#include <drivers/arm/gicv2.h>
+#include <drivers/console.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/utils.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#include <plat/common/platform.h>
+
+#include <mce.h>
+#include <memctrl.h>
+#include <smmu.h>
+#include <tegra_def.h>
+#include <tegra_platform.h>
+#include <tegra_private.h>
+
+extern void memcpy16(void *dest, const void *src, unsigned int length);
+
+/*******************************************************************************
+ * Tegra186 CPU numbers in cluster #0
+ *******************************************************************************
+ */
+#define TEGRA186_CLUSTER0_CORE2 2U
+#define TEGRA186_CLUSTER0_CORE3 3U
+
+/*******************************************************************************
+ * The Tegra power domain tree has a single system level power domain i.e. a
+ * single root node. The first entry in the power domain descriptor specifies
+ * the number of power domains at the highest power level.
+ *******************************************************************************
+ */
+static const uint8_t tegra_power_domain_tree_desc[] = {
+ /* No of root nodes */
+ 1,
+ /* No of clusters */
+ PLATFORM_CLUSTER_COUNT,
+ /* No of CPU cores - cluster0 */
+ PLATFORM_MAX_CPUS_PER_CLUSTER,
+ /* No of CPU cores - cluster1 */
+ PLATFORM_MAX_CPUS_PER_CLUSTER
+};
+
+/*******************************************************************************
+ * This function returns the Tegra default topology tree information.
+ ******************************************************************************/
+const uint8_t *plat_get_power_domain_tree_desc(void)
+{
+ return tegra_power_domain_tree_desc;
+}
+
+/*
+ * Table of regions to map using the MMU.
+ */
+static const mmap_region_t tegra_mmap[] = {
+ MAP_REGION_FLAT(TEGRA_MISC_BASE, 0x10000U, /* 64KB */
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_TSA_BASE, 0x20000U, /* 128KB */
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_MC_STREAMID_BASE, 0x10000U, /* 64KB */
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_MC_BASE, 0x10000U, /* 64KB */
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_UARTA_BASE, 0x20000U, /* 128KB - UART A, B*/
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_UARTC_BASE, 0x20000U, /* 128KB - UART C, G */
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_UARTD_BASE, 0x30000U, /* 192KB - UART D, E, F */
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_FUSE_BASE, 0x10000U, /* 64KB */
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_GICD_BASE, 0x20000U, /* 128KB */
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_SE0_BASE, 0x10000U, /* 64KB */
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_PKA1_BASE, 0x10000U, /* 64KB */
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_RNG1_BASE, 0x10000U, /* 64KB */
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_CAR_RESET_BASE, 0x10000U, /* 64KB */
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_PMC_BASE, 0x40000U, /* 256KB */
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_TMRUS_BASE, 0x1000U, /* 4KB */
+ MT_DEVICE | MT_RO | MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_SCRATCH_BASE, 0x10000U, /* 64KB */
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_MMCRAB_BASE, 0x60000U, /* 384KB */
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_ARM_ACTMON_CTR_BASE, 0x20000U, /* 128KB - ARM/Denver */
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_SMMU0_BASE, 0x1000000U, /* 64KB */
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_HSP_DBELL_BASE, 0x10000U, /* 64KB */
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_BPMP_IPC_TX_PHYS_BASE, TEGRA_BPMP_IPC_CH_MAP_SIZE, /* 4KB */
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_BPMP_IPC_RX_PHYS_BASE, TEGRA_BPMP_IPC_CH_MAP_SIZE, /* 4KB */
+ MT_DEVICE | MT_RW | MT_SECURE),
+ {0}
+};
+
+/*******************************************************************************
+ * Set up the pagetables as per the platform memory map & initialize the MMU
+ ******************************************************************************/
+const mmap_region_t *plat_get_mmio_map(void)
+{
+ /* MMIO space */
+ return tegra_mmap;
+}
+
+/*******************************************************************************
+ * Handler to get the System Counter Frequency
+ ******************************************************************************/
+uint32_t plat_get_syscnt_freq2(void)
+{
+ return 31250000;
+}
+
+/*******************************************************************************
+ * Maximum supported UART controllers
+ ******************************************************************************/
+#define TEGRA186_MAX_UART_PORTS 7
+
+/*******************************************************************************
+ * This variable holds the UART port base addresses
+ ******************************************************************************/
+static uint32_t tegra186_uart_addresses[TEGRA186_MAX_UART_PORTS + 1] = {
+ 0, /* undefined - treated as an error case */
+ TEGRA_UARTA_BASE,
+ TEGRA_UARTB_BASE,
+ TEGRA_UARTC_BASE,
+ TEGRA_UARTD_BASE,
+ TEGRA_UARTE_BASE,
+ TEGRA_UARTF_BASE,
+ TEGRA_UARTG_BASE,
+};
+
+/*******************************************************************************
+ * Enable console corresponding to the console ID
+ ******************************************************************************/
+void plat_enable_console(int32_t id)
+{
+ static console_t uart_console;
+ uint32_t console_clock;
+
+ if ((id > 0) && (id < TEGRA186_MAX_UART_PORTS)) {
+ /*
+ * Reference clock used by the FPGAs is a lot slower.
+ */
+ if (tegra_platform_is_fpga()) {
+ console_clock = TEGRA_BOOT_UART_CLK_13_MHZ;
+ } else {
+ console_clock = TEGRA_BOOT_UART_CLK_408_MHZ;
+ }
+
+ (void)console_16550_register(tegra186_uart_addresses[id],
+ console_clock,
+ TEGRA_CONSOLE_BAUDRATE,
+ &uart_console);
+ console_set_scope(&uart_console, CONSOLE_FLAG_BOOT |
+ CONSOLE_FLAG_RUNTIME | CONSOLE_FLAG_CRASH);
+ }
+}
+
+/*******************************************************************************
+ * Handler for early platform setup
+ ******************************************************************************/
+void plat_early_platform_setup(void)
+{
+ uint64_t impl, val;
+ const plat_params_from_bl2_t *plat_params = bl31_get_plat_params();
+ const struct tegra_bl31_params *arg_from_bl2 = plat_get_bl31_params();
+
+ /* Verify chip id is t186 */
+ assert(tegra_chipid_is_t186());
+
+ /* sanity check MCE firmware compatibility */
+ mce_verify_firmware_version();
+
+ /*
+ * Do initial security configuration to allow DRAM/device access.
+ */
+ tegra_memctrl_tzdram_setup(plat_params->tzdram_base,
+ (uint32_t)plat_params->tzdram_size);
+
+ impl = (read_midr() >> MIDR_IMPL_SHIFT) & (uint64_t)MIDR_IMPL_MASK;
+
+ /*
+ * Enable ECC and Parity Protection for Cortex-A57 CPUs (Tegra186
+ * A02p and beyond).
+ */
+ if ((plat_params->l2_ecc_parity_prot_dis != 1) &&
+ (impl != (uint64_t)DENVER_IMPL)) {
+
+ val = read_l2ctlr_el1();
+ val |= CORTEX_A57_L2_ECC_PARITY_PROTECTION_BIT;
+ write_l2ctlr_el1(val);
+ }
+
+ /*
+ * The previous bootloader might not have placed the BL32 image
+ * inside the TZDRAM. Platform handler to allow relocation of BL32
+ * image to TZDRAM memory. This behavior might change per platform.
+ */
+ plat_relocate_bl32_image(arg_from_bl2->bl32_image_info);
+}
+
+/*******************************************************************************
+ * Handler for late platform setup
+ ******************************************************************************/
+void plat_late_platform_setup(void)
+{
+ ; /* do nothing */
+}
+
+/* Secure IRQs for Tegra186 */
+static const interrupt_prop_t tegra186_interrupt_props[] = {
+ INTR_PROP_DESC(TEGRA_SDEI_SGI_PRIVATE, PLAT_SDEI_CRITICAL_PRI,
+ GICV2_INTR_GROUP0, GIC_INTR_CFG_EDGE),
+ INTR_PROP_DESC(TEGRA186_TOP_WDT_IRQ, PLAT_TEGRA_WDT_PRIO,
+ GICV2_INTR_GROUP0, GIC_INTR_CFG_EDGE),
+ INTR_PROP_DESC(TEGRA186_AON_WDT_IRQ, PLAT_TEGRA_WDT_PRIO,
+ GICV2_INTR_GROUP0, GIC_INTR_CFG_EDGE)
+};
+
+/*******************************************************************************
+ * Initialize the GIC and SGIs
+ ******************************************************************************/
+void plat_gic_setup(void)
+{
+ tegra_gic_setup(tegra186_interrupt_props, ARRAY_SIZE(tegra186_interrupt_props));
+ tegra_gic_init();
+
+ /*
+ * Initialize the FIQ handler only if the platform supports any
+ * FIQ interrupt sources.
+ */
+ tegra_fiq_handler_setup();
+}
+
+/*******************************************************************************
+ * Return pointer to the BL31 params from previous bootloader
+ ******************************************************************************/
+struct tegra_bl31_params *plat_get_bl31_params(void)
+{
+ uint32_t val;
+
+ val = mmio_read_32(TEGRA_SCRATCH_BASE + SCRATCH_BL31_PARAMS_ADDR);
+
+ return (struct tegra_bl31_params *)(uintptr_t)val;
+}
+
+/*******************************************************************************
+ * Return pointer to the BL31 platform params from previous bootloader
+ ******************************************************************************/
+plat_params_from_bl2_t *plat_get_bl31_plat_params(void)
+{
+ uint32_t val;
+
+ val = mmio_read_32(TEGRA_SCRATCH_BASE + SCRATCH_BL31_PLAT_PARAMS_ADDR);
+
+ return (plat_params_from_bl2_t *)(uintptr_t)val;
+}
+
+/*******************************************************************************
+ * This function implements a part of the critical interface between the psci
+ * generic layer and the platform that allows the former to query the platform
+ * to convert an MPIDR to a unique linear index. An error code (-1) is returned
+ * in case the MPIDR is invalid.
+ ******************************************************************************/
+int32_t plat_core_pos_by_mpidr(u_register_t mpidr)
+{
+ u_register_t cluster_id, cpu_id, pos;
+ int32_t ret;
+
+ cluster_id = (mpidr >> (u_register_t)MPIDR_AFF1_SHIFT) & (u_register_t)MPIDR_AFFLVL_MASK;
+ cpu_id = (mpidr >> (u_register_t)MPIDR_AFF0_SHIFT) & (u_register_t)MPIDR_AFFLVL_MASK;
+
+ /*
+ * Validate cluster_id by checking whether it represents
+ * one of the two clusters present on the platform.
+ * Validate cpu_id by checking whether it represents a CPU in
+ * one of the two clusters present on the platform.
+ */
+ if ((cluster_id >= (u_register_t)PLATFORM_CLUSTER_COUNT) ||
+ (cpu_id >= (u_register_t)PLATFORM_MAX_CPUS_PER_CLUSTER)) {
+ ret = -1;
+ } else {
+ /* calculate the core position */
+ pos = cpu_id + (cluster_id << 2U);
+
+ /* check for non-existent CPUs */
+ if ((pos == TEGRA186_CLUSTER0_CORE2) || (pos == TEGRA186_CLUSTER0_CORE3)) {
+ ret = -1;
+ } else {
+ ret = (int32_t)pos;
+ }
+ }
+
+ return ret;
+}
+
+/*******************************************************************************
+ * Handler to relocate BL32 image to TZDRAM
+ ******************************************************************************/
+void plat_relocate_bl32_image(const image_info_t *bl32_img_info)
+{
+ const plat_params_from_bl2_t *plat_bl31_params = plat_get_bl31_plat_params();
+ const entry_point_info_t *bl32_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
+ uint64_t tzdram_start, tzdram_end, bl32_start, bl32_end;
+
+ if ((bl32_img_info != NULL) && (bl32_ep_info != NULL)) {
+
+ /* Relocate BL32 if it resides outside of the TZDRAM */
+ tzdram_start = plat_bl31_params->tzdram_base;
+ tzdram_end = plat_bl31_params->tzdram_base +
+ plat_bl31_params->tzdram_size;
+ bl32_start = bl32_img_info->image_base;
+ bl32_end = bl32_img_info->image_base + bl32_img_info->image_size;
+
+ assert(tzdram_end > tzdram_start);
+ assert(bl32_end > bl32_start);
+ assert(bl32_ep_info->pc > tzdram_start);
+ assert(bl32_ep_info->pc < tzdram_end);
+
+ /* relocate BL32 */
+ if ((bl32_start >= tzdram_end) || (bl32_end <= tzdram_start)) {
+
+ INFO("Relocate BL32 to TZDRAM\n");
+
+ (void)memcpy16((void *)(uintptr_t)bl32_ep_info->pc,
+ (void *)(uintptr_t)bl32_start,
+ bl32_img_info->image_size);
+
+ /* clean up non-secure intermediate buffer */
+ zeromem((void *)(uintptr_t)bl32_start,
+ bl32_img_info->image_size);
+ }
+ }
+}
+
+/*******************************************************************************
+ * Handler to indicate support for System Suspend
+ ******************************************************************************/
+bool plat_supports_system_suspend(void)
+{
+ return true;
+}
+/*******************************************************************************
+ * Platform specific runtime setup.
+ ******************************************************************************/
+void plat_runtime_setup(void)
+{
+ /*
+ * During cold boot, it is observed that the arbitration
+ * bit is set in the Memory controller leading to false
+ * error interrupts in the non-secure world. To avoid
+ * this, clean the interrupt status register before
+ * booting into the non-secure world
+ */
+ tegra_memctrl_clear_pending_interrupts();
+
+ /*
+ * During boot, USB3 and flash media (SDMMC/SATA) devices need
+ * access to IRAM. Because these clients connect to the MC and
+ * do not have a direct path to the IRAM, the MC implements AHB
+ * redirection during boot to allow path to IRAM. In this mode
+ * accesses to a programmed memory address aperture are directed
+ * to the AHB bus, allowing access to the IRAM. This mode must be
+ * disabled before we jump to the non-secure world.
+ */
+ tegra_memctrl_disable_ahb_redirection();
+
+ /*
+ * Verify the integrity of the previously configured SMMU(s)
+ * settings
+ */
+ tegra_smmu_verify();
+}
diff --git a/plat/nvidia/tegra/soc/t186/plat_sip_calls.c b/plat/nvidia/tegra/soc/t186/plat_sip_calls.c
new file mode 100644
index 0000000..4de8a9e
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/plat_sip_calls.c
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <common/runtime_svc.h>
+#include <denver.h>
+#include <lib/el3_runtime/context_mgmt.h>
+
+#include <mce.h>
+#include <memctrl.h>
+#include <t18x_ari.h>
+#include <tegra_private.h>
+
+/*******************************************************************************
+ * Offset to read the ref_clk counter value
+ ******************************************************************************/
+#define REF_CLK_OFFSET 4ULL
+
+/*******************************************************************************
+ * Tegra186 SiP SMCs
+ ******************************************************************************/
+#define TEGRA_SIP_GET_ACTMON_CLK_COUNTERS 0xC2FFFE02
+#define TEGRA_SIP_MCE_CMD_ENTER_CSTATE 0xC2FFFF00
+#define TEGRA_SIP_MCE_CMD_UPDATE_CSTATE_INFO 0xC2FFFF01
+#define TEGRA_SIP_MCE_CMD_UPDATE_CROSSOVER_TIME 0xC2FFFF02
+#define TEGRA_SIP_MCE_CMD_READ_CSTATE_STATS 0xC2FFFF03
+#define TEGRA_SIP_MCE_CMD_WRITE_CSTATE_STATS 0xC2FFFF04
+#define TEGRA_SIP_MCE_CMD_IS_SC7_ALLOWED 0xC2FFFF05
+
+#define TEGRA_SIP_MCE_CMD_CC3_CTRL 0xC2FFFF07
+#define TEGRA_SIP_MCE_CMD_ECHO_DATA 0xC2FFFF08
+#define TEGRA_SIP_MCE_CMD_READ_VERSIONS 0xC2FFFF09
+#define TEGRA_SIP_MCE_CMD_ENUM_FEATURES 0xC2FFFF0A
+#define TEGRA_SIP_MCE_CMD_ROC_FLUSH_CACHE_TRBITS 0xC2FFFF0B
+#define TEGRA_SIP_MCE_CMD_ENUM_READ_MCA 0xC2FFFF0C
+#define TEGRA_SIP_MCE_CMD_ENUM_WRITE_MCA 0xC2FFFF0D
+#define TEGRA_SIP_MCE_CMD_ROC_FLUSH_CACHE 0xC2FFFF0E
+#define TEGRA_SIP_MCE_CMD_ROC_CLEAN_CACHE 0xC2FFFF0F
+#define TEGRA_SIP_MCE_CMD_ENABLE_LATIC 0xC2FFFF10
+#define TEGRA_SIP_MCE_CMD_UNCORE_PERFMON_REQ 0xC2FFFF11
+#define TEGRA_SIP_MCE_CMD_MISC_CCPLEX 0xC2FFFF12
+
+/*******************************************************************************
+ * This function is responsible for handling all T186 SiP calls
+ ******************************************************************************/
+int32_t plat_sip_handler(uint32_t smc_fid,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ const void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ int32_t mce_ret, ret = 0;
+ uint32_t impl, cpu;
+ uint32_t base, core_clk_ctr, ref_clk_ctr;
+ uint32_t local_smc_fid = smc_fid;
+ uint64_t local_x1 = x1, local_x2 = x2, local_x3 = x3;
+
+ (void)x4;
+ (void)cookie;
+ (void)flags;
+
+ if (((smc_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_32) {
+ /* 32-bit function, clear top parameter bits */
+
+ local_x1 = (uint32_t)x1;
+ local_x2 = (uint32_t)x2;
+ local_x3 = (uint32_t)x3;
+ }
+
+ /*
+ * Convert SMC FID to SMC64, to support SMC32/SMC64 configurations
+ */
+ local_smc_fid |= (SMC_64 << FUNCID_CC_SHIFT);
+
+ switch (local_smc_fid) {
+ /*
+ * Micro Coded Engine (MCE) commands reside in the 0x82FFFF00 -
+ * 0x82FFFFFF SiP SMC space
+ */
+ case TEGRA_SIP_MCE_CMD_ENTER_CSTATE:
+ case TEGRA_SIP_MCE_CMD_UPDATE_CSTATE_INFO:
+ case TEGRA_SIP_MCE_CMD_UPDATE_CROSSOVER_TIME:
+ case TEGRA_SIP_MCE_CMD_READ_CSTATE_STATS:
+ case TEGRA_SIP_MCE_CMD_WRITE_CSTATE_STATS:
+ case TEGRA_SIP_MCE_CMD_IS_SC7_ALLOWED:
+ case TEGRA_SIP_MCE_CMD_CC3_CTRL:
+ case TEGRA_SIP_MCE_CMD_ECHO_DATA:
+ case TEGRA_SIP_MCE_CMD_READ_VERSIONS:
+ case TEGRA_SIP_MCE_CMD_ENUM_FEATURES:
+ case TEGRA_SIP_MCE_CMD_ROC_FLUSH_CACHE_TRBITS:
+ case TEGRA_SIP_MCE_CMD_ENUM_READ_MCA:
+ case TEGRA_SIP_MCE_CMD_ENUM_WRITE_MCA:
+ case TEGRA_SIP_MCE_CMD_ROC_FLUSH_CACHE:
+ case TEGRA_SIP_MCE_CMD_ROC_CLEAN_CACHE:
+ case TEGRA_SIP_MCE_CMD_ENABLE_LATIC:
+ case TEGRA_SIP_MCE_CMD_UNCORE_PERFMON_REQ:
+ case TEGRA_SIP_MCE_CMD_MISC_CCPLEX:
+
+ /* clean up the high bits */
+ local_smc_fid &= MCE_CMD_MASK;
+
+ /* execute the command and store the result */
+ mce_ret = mce_command_handler(local_smc_fid, local_x1, local_x2, local_x3);
+ write_ctx_reg(get_gpregs_ctx(handle),
+ CTX_GPREG_X0, (uint64_t)(mce_ret));
+ break;
+
+ /*
+ * This function ID reads the Activity monitor's core/ref clock
+ * counter values for a core/cluster.
+ *
+ * x1 = MPIDR of the target core
+ * x2 = MIDR of the target core
+ */
+ case TEGRA_SIP_GET_ACTMON_CLK_COUNTERS:
+
+ cpu = (uint32_t)x1 & MPIDR_CPU_MASK;
+ impl = ((uint32_t)x2 >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
+
+ /* sanity check target CPU number */
+ if (cpu > (uint32_t)PLATFORM_MAX_CPUS_PER_CLUSTER) {
+ ret = -EINVAL;
+ } else {
+ /* get the base address for the current CPU */
+ base = (impl == DENVER_IMPL) ? TEGRA_DENVER_ACTMON_CTR_BASE :
+ TEGRA_ARM_ACTMON_CTR_BASE;
+
+ /* read the clock counter values */
+ core_clk_ctr = mmio_read_32(base + (8ULL * cpu));
+ ref_clk_ctr = mmio_read_32(base + (8ULL * cpu) + REF_CLK_OFFSET);
+
+ /* return the counter values as two different parameters */
+ write_ctx_reg(get_gpregs_ctx(handle),
+ CTX_GPREG_X1, (core_clk_ctr));
+ write_ctx_reg(get_gpregs_ctx(handle),
+ CTX_GPREG_X2, (ref_clk_ctr));
+ }
+
+ break;
+
+ default:
+ ret = -ENOTSUP;
+ break;
+ }
+
+ return ret;
+}
diff --git a/plat/nvidia/tegra/soc/t186/plat_smmu.c b/plat/nvidia/tegra/soc/t186/plat_smmu.c
new file mode 100644
index 0000000..f1bc235
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/plat_smmu.c
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <common/bl_common.h>
+
+#include <smmu.h>
+#include <tegra_def.h>
+#include <tegra_mc_def.h>
+
+#define MAX_NUM_SMMU_DEVICES U(1)
+
+/*******************************************************************************
+ * Handler to return the support SMMU devices number
+ ******************************************************************************/
+uint32_t plat_get_num_smmu_devices(void)
+{
+ return MAX_NUM_SMMU_DEVICES;
+}
diff --git a/plat/nvidia/tegra/soc/t186/plat_trampoline.S b/plat/nvidia/tegra/soc/t186/plat_trampoline.S
new file mode 100644
index 0000000..2fc2046
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/plat_trampoline.S
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <memctrl_v2.h>
+#include <plat/common/common_def.h>
+#include <tegra_def.h>
+
+#define TEGRA186_MC_CTX_SIZE 0x93
+
+ .globl tegra186_get_mc_ctx_size
+
+ /*
+ * Tegra186 reset data (offset 0x0 - 0x420)
+ *
+ * 0x000: MC context start
+ * 0x420: MC context end
+ */
+
+ .align 4
+__tegra186_mc_context:
+ .rept TEGRA186_MC_CTX_SIZE
+ .quad 0
+ .endr
+
+ .align 4
+__tegra186_mc_context_end:
+
+/* return the size of the MC context */
+func tegra186_get_mc_ctx_size
+ adr x0, __tegra186_mc_context_end
+ adr x1, __tegra186_mc_context
+ sub x0, x0, x1
+ ret
+endfunc tegra186_get_mc_ctx_size
diff --git a/plat/nvidia/tegra/soc/t186/platform_t186.mk b/plat/nvidia/tegra/soc/t186/platform_t186.mk
new file mode 100644
index 0000000..5275b8e
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/platform_t186.mk
@@ -0,0 +1,77 @@
+#
+# Copyright (c) 2015-2019, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# platform configs
+ENABLE_ROC_FOR_ORDERING_CLIENT_REQUESTS := 1
+$(eval $(call add_define,ENABLE_ROC_FOR_ORDERING_CLIENT_REQUESTS))
+
+ENABLE_CHIP_VERIFICATION_HARNESS := 0
+$(eval $(call add_define,ENABLE_CHIP_VERIFICATION_HARNESS))
+
+RESET_TO_BL31 := 1
+
+PROGRAMMABLE_RESET_ADDRESS := 0
+
+COLD_BOOT_SINGLE_CPU := 1
+
+RELOCATE_BL32_IMAGE := 1
+
+# platform settings
+TZDRAM_BASE := 0x30000000
+$(eval $(call add_define,TZDRAM_BASE))
+
+PLATFORM_CLUSTER_COUNT := 2
+$(eval $(call add_define,PLATFORM_CLUSTER_COUNT))
+
+PLATFORM_MAX_CPUS_PER_CLUSTER := 4
+$(eval $(call add_define,PLATFORM_MAX_CPUS_PER_CLUSTER))
+
+MAX_XLAT_TABLES := 25
+$(eval $(call add_define,MAX_XLAT_TABLES))
+
+MAX_MMAP_REGIONS := 30
+$(eval $(call add_define,MAX_MMAP_REGIONS))
+
+# platform files
+PLAT_INCLUDES += -Iplat/nvidia/tegra/include/t186 \
+ -I${SOC_DIR}/drivers/include
+
+BL31_SOURCES += ${TEGRA_GICv2_SOURCES} \
+ drivers/ti/uart/aarch64/16550_console.S \
+ lib/cpus/aarch64/denver.S \
+ lib/cpus/aarch64/cortex_a57.S \
+ ${TEGRA_DRIVERS}/bpmp_ipc/intf.c \
+ ${TEGRA_DRIVERS}/bpmp_ipc/ivc.c \
+ ${TEGRA_DRIVERS}/gpcdma/gpcdma.c \
+ ${TEGRA_DRIVERS}/memctrl/memctrl_v2.c \
+ ${TEGRA_DRIVERS}/smmu/smmu.c \
+ ${SOC_DIR}/drivers/mce/mce.c \
+ ${SOC_DIR}/drivers/mce/ari.c \
+ ${SOC_DIR}/drivers/mce/nvg.c \
+ ${SOC_DIR}/drivers/mce/aarch64/nvg_helpers.S \
+ $(SOC_DIR)/drivers/se/se.c \
+ ${SOC_DIR}/plat_memctrl.c \
+ ${SOC_DIR}/plat_psci_handlers.c \
+ ${SOC_DIR}/plat_setup.c \
+ ${SOC_DIR}/plat_secondary.c \
+ ${SOC_DIR}/plat_sip_calls.c \
+ ${SOC_DIR}/plat_smmu.c \
+ ${SOC_DIR}/plat_trampoline.S
+
+# Enable workarounds for selected Cortex-A57 erratas.
+A57_DISABLE_NON_TEMPORAL_HINT := 1
+ERRATA_A57_806969 := 1
+ERRATA_A57_813419 := 1
+ERRATA_A57_813420 := 1
+ERRATA_A57_826974 := 1
+ERRATA_A57_826977 := 1
+ERRATA_A57_828024 := 1
+ERRATA_A57_829520 := 1
+ERRATA_A57_833471 := 1
+
+# Enable higher performance Non-cacheable load forwarding
+A57_ENABLE_NONCACHEABLE_LOAD_FWD := 1
diff --git a/plat/nvidia/tegra/soc/t194/drivers/include/mce_private.h b/plat/nvidia/tegra/soc/t194/drivers/include/mce_private.h
new file mode 100644
index 0000000..ef16980
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t194/drivers/include/mce_private.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef MCE_PRIVATE_H
+#define MCE_PRIVATE_H
+
+#include <stdbool.h>
+#include <tegra_def.h>
+
+/*******************************************************************************
+ * Macros to prepare CSTATE info request
+ ******************************************************************************/
+/* Description of the parameters for UPDATE_CSTATE_INFO request */
+#define CLUSTER_CSTATE_MASK 0x7U
+#define CLUSTER_CSTATE_SHIFT 0X0U
+#define CLUSTER_CSTATE_UPDATE_BIT (1U << 7)
+#define CCPLEX_CSTATE_MASK 0x7U
+#define CCPLEX_CSTATE_SHIFT 8U
+#define CCPLEX_CSTATE_UPDATE_BIT (1U << 15)
+#define SYSTEM_CSTATE_MASK 0xFU
+#define SYSTEM_CSTATE_SHIFT 16U
+#define SYSTEM_CSTATE_UPDATE_BIT (1U << 23)
+#define CSTATE_WAKE_MASK_UPDATE_BIT (1U << 31)
+#define CSTATE_WAKE_MASK_SHIFT 32U
+#define CSTATE_WAKE_MASK_CLEAR 0xFFFFFFFFU
+
+/*******************************************************************************
+ * Core ID mask (bits 3:0 in the online request)
+ ******************************************************************************/
+#define MCE_CORE_ID_MASK 0xFU
+
+/*******************************************************************************
+ * C-state statistics macros
+ ******************************************************************************/
+#define MCE_STAT_ID_SHIFT 16U
+
+/*******************************************************************************
+ * Security config macros
+ ******************************************************************************/
+#define STRICT_CHECKING_ENABLED_SET (1UL << 0)
+#define STRICT_CHECKING_LOCKED_SET (1UL << 1)
+
+/* declarations for NVG handler functions */
+uint64_t nvg_get_version(void);
+void nvg_set_wake_time(uint32_t wake_time);
+void nvg_update_cstate_info(uint32_t cluster, uint32_t ccplex,
+ uint32_t system, uint32_t wake_mask, uint8_t update_wake_mask);
+int32_t nvg_set_cstate_stat_query_value(uint64_t data);
+uint64_t nvg_get_cstate_stat_query_value(void);
+int32_t nvg_is_sc7_allowed(void);
+int32_t nvg_online_core(uint32_t core);
+int32_t nvg_update_ccplex_gsc(uint32_t gsc_idx);
+int32_t nvg_enter_cstate(uint32_t state, uint32_t wake_time);
+int32_t nvg_roc_clean_cache_trbits(void);
+void nvg_enable_strict_checking_mode(void);
+void nvg_verify_strict_checking_mode(void);
+void nvg_system_shutdown(void);
+void nvg_system_reboot(void);
+void nvg_clear_hsm_corr_status(void);
+
+/* declarations for assembly functions */
+void nvg_set_request_data(uint64_t req, uint64_t data);
+void nvg_set_request(uint64_t req);
+uint64_t nvg_get_result(void);
+uint64_t nvg_cache_clean(void);
+uint64_t nvg_cache_clean_inval(void);
+uint64_t nvg_cache_inval_all(void);
+
+/* MCE helper functions */
+void mce_enable_strict_checking(void);
+void mce_verify_strict_checking(void);
+void mce_system_shutdown(void);
+void mce_system_reboot(void);
+void mce_clear_hsm_corr_status(void);
+
+#endif /* MCE_PRIVATE_H */
diff --git a/plat/nvidia/tegra/soc/t194/drivers/include/se.h b/plat/nvidia/tegra/soc/t194/drivers/include/se.h
new file mode 100644
index 0000000..7de55a7
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t194/drivers/include/se.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SE_H
+#define SE_H
+
+int32_t tegra_se_calculate_save_sha256(uint64_t src_addr,
+ uint32_t src_len_inbyte);
+int32_t tegra_se_suspend(void);
+void tegra_se_resume(void);
+
+#endif /* SE_H */
diff --git a/plat/nvidia/tegra/soc/t194/drivers/include/t194_nvg.h b/plat/nvidia/tegra/soc/t194/drivers/include/t194_nvg.h
new file mode 100644
index 0000000..ca74d2c
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t194/drivers/include/t194_nvg.h
@@ -0,0 +1,429 @@
+/*
+ * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef T194_NVG_H
+#define T194_NVG_H
+
+#include <lib/utils_def.h>
+
+/**
+ * t194_nvg.h - Header for the NVIDIA Generic interface (NVG).
+ * Official documentation for this interface is included as part
+ * of the T194 TRM.
+ */
+
+/**
+ * Current version - Major version increments may break backwards
+ * compatibility and binary compatibility. Minor version increments
+ * occur when there is only new functionality.
+ */
+enum {
+ TEGRA_NVG_VERSION_MAJOR = U(6),
+ TEGRA_NVG_VERSION_MINOR = U(7)
+};
+
+typedef enum {
+ TEGRA_NVG_CHANNEL_VERSION = U(0),
+ TEGRA_NVG_CHANNEL_POWER_PERF = U(1),
+ TEGRA_NVG_CHANNEL_POWER_MODES = U(2),
+ TEGRA_NVG_CHANNEL_WAKE_TIME = U(3),
+ TEGRA_NVG_CHANNEL_CSTATE_INFO = U(4),
+ TEGRA_NVG_CHANNEL_CROSSOVER_C6_LOWER_BOUND = U(5),
+ TEGRA_NVG_CHANNEL_CROSSOVER_CC6_LOWER_BOUND = U(6),
+ TEGRA_NVG_CHANNEL_CROSSOVER_CG7_LOWER_BOUND = U(8),
+ TEGRA_NVG_CHANNEL_CSTATE_STAT_QUERY_REQUEST = U(10),
+ TEGRA_NVG_CHANNEL_CSTATE_STAT_QUERY_VALUE = U(11),
+ TEGRA_NVG_CHANNEL_NUM_CORES = U(20),
+ TEGRA_NVG_CHANNEL_UNIQUE_LOGICAL_ID = U(21),
+ TEGRA_NVG_CHANNEL_LOGICAL_TO_PHYSICAL_MAPPING = U(22),
+ TEGRA_NVG_CHANNEL_LOGICAL_TO_MPIDR = U(23),
+ TEGRA_NVG_CHANNEL_SHUTDOWN = U(42),
+ TEGRA_NVG_CHANNEL_IS_SC7_ALLOWED = U(43),
+ TEGRA_NVG_CHANNEL_ONLINE_CORE = U(44),
+ TEGRA_NVG_CHANNEL_CC3_CTRL = U(45),
+ TEGRA_NVG_CHANNEL_CCPLEX_CACHE_CONTROL = U(49),
+ TEGRA_NVG_CHANNEL_UPDATE_CCPLEX_GSC = U(50),
+ TEGRA_NVG_CHANNEL_HSM_ERROR_CTRL = U(53),
+ TEGRA_NVG_CHANNEL_SECURITY_CONFIG = U(54),
+ TEGRA_NVG_CHANNEL_DEBUG_CONFIG = U(55),
+ TEGRA_NVG_CHANNEL_DDA_SNOC_MCF = U(56),
+ TEGRA_NVG_CHANNEL_DDA_MCF_ORD1 = U(57),
+ TEGRA_NVG_CHANNEL_DDA_MCF_ORD2 = U(58),
+ TEGRA_NVG_CHANNEL_DDA_MCF_ORD3 = U(59),
+ TEGRA_NVG_CHANNEL_DDA_MCF_ISO = U(60),
+ TEGRA_NVG_CHANNEL_DDA_MCF_SISO = U(61),
+ TEGRA_NVG_CHANNEL_DDA_MCF_NISO = U(62),
+ TEGRA_NVG_CHANNEL_DDA_MCF_NISO_REMOTE = U(63),
+ TEGRA_NVG_CHANNEL_DDA_L3CTRL_ISO = U(64),
+ TEGRA_NVG_CHANNEL_DDA_L3CTRL_SISO = U(65),
+ TEGRA_NVG_CHANNEL_DDA_L3CTRL_NISO = U(66),
+ TEGRA_NVG_CHANNEL_DDA_L3CTRL_NISO_REMOTE = U(67),
+ TEGRA_NVG_CHANNEL_DDA_L3CTRL_L3FILL = U(68),
+ TEGRA_NVG_CHANNEL_DDA_L3CTRL_L3WR = U(69),
+ TEGRA_NVG_CHANNEL_DDA_L3CTRL_RSP_L3RD_DMA = U(70),
+ TEGRA_NVG_CHANNEL_DDA_L3CTRL_RSP_MCFRD_DMA = U(71),
+ TEGRA_NVG_CHANNEL_DDA_L3CTRL_GLOBAL = U(72),
+ TEGRA_NVG_CHANNEL_DDA_L3CTRL_LL = U(73),
+ TEGRA_NVG_CHANNEL_DDA_L3CTRL_L3D = U(74),
+ TEGRA_NVG_CHANNEL_DDA_L3CTRL_FCM_RD = U(75),
+ TEGRA_NVG_CHANNEL_DDA_L3CTRL_FCM_WR = U(76),
+ TEGRA_NVG_CHANNEL_DDA_SNOC_GLOBAL_CTRL = U(77),
+ TEGRA_NVG_CHANNEL_DDA_SNOC_CLIENT_REQ_CTRL = U(78),
+ TEGRA_NVG_CHANNEL_DDA_SNOC_CLIENT_REPLENTISH_CTRL = U(79),
+ TEGRA_NVG_CHANNEL_RT_SAFE_MASK = U(80),
+ TEGRA_NVG_CHANNEL_RT_WINDOW_US = U(81),
+ TEGRA_NVG_CHANNEL_RT_FWD_PROGRESS_US = U(82),
+
+ TEGRA_NVG_CHANNEL_LAST_INDEX
+} tegra_nvg_channel_id_t;
+
+typedef enum {
+ NVG_STAT_QUERY_SC7_ENTRIES = U(1),
+ NVG_STAT_QUERY_CC6_ENTRIES = U(6),
+ NVG_STAT_QUERY_CG7_ENTRIES = U(7),
+ NVG_STAT_QUERY_C6_ENTRIES = U(10),
+ NVG_STAT_QUERY_C7_ENTRIES = U(14),
+ NVG_STAT_QUERY_SC7_RESIDENCY_SUM = U(32),
+ NVG_STAT_QUERY_CC6_RESIDENCY_SUM = U(41),
+ NVG_STAT_QUERY_CG7_RESIDENCY_SUM = U(46),
+ NVG_STAT_QUERY_C6_RESIDENCY_SUM = U(51),
+ NVG_STAT_QUERY_C7_RESIDENCY_SUM = U(56),
+ NVG_STAT_QUERY_SC7_ENTRY_TIME_SUM = U(60),
+ NVG_STAT_QUERY_CC6_ENTRY_TIME_SUM = U(61),
+ NVG_STAT_QUERY_CG7_ENTRY_TIME_SUM = U(62),
+ NVG_STAT_QUERY_C6_ENTRY_TIME_SUM = U(63),
+ NVG_STAT_QUERY_C7_ENTRY_TIME_SUM = U(64),
+ NVG_STAT_QUERY_SC7_EXIT_TIME_SUM = U(70),
+ NVG_STAT_QUERY_CC6_EXIT_TIME_SUM = U(71),
+ NVG_STAT_QUERY_CG7_EXIT_TIME_SUM = U(72),
+ NVG_STAT_QUERY_C6_EXIT_TIME_SUM = U(73),
+ NVG_STAT_QUERY_C7_EXIT_TIME_SUM = U(74),
+ NVG_STAT_QUERY_SC7_ENTRY_LAST = U(80),
+ NVG_STAT_QUERY_CC6_ENTRY_LAST = U(81),
+ NVG_STAT_QUERY_CG7_ENTRY_LAST = U(82),
+ NVG_STAT_QUERY_C6_ENTRY_LAST = U(83),
+ NVG_STAT_QUERY_C7_ENTRY_LAST = U(84),
+ NVG_STAT_QUERY_SC7_EXIT_LAST = U(90),
+ NVG_STAT_QUERY_CC6_EXIT_LAST = U(91),
+ NVG_STAT_QUERY_CG7_EXIT_LAST = U(92),
+ NVG_STAT_QUERY_C6_EXIT_LAST = U(93),
+ NVG_STAT_QUERY_C7_EXIT_LAST = U(94)
+
+} tegra_nvg_stat_query_t;
+
+typedef enum {
+ TEGRA_NVG_CORE_C0 = U(0),
+ TEGRA_NVG_CORE_C1 = U(1),
+ TEGRA_NVG_CORE_C6 = U(6),
+ TEGRA_NVG_CORE_C7 = U(7),
+ TEGRA_NVG_CORE_WARMRSTREQ = U(8)
+} tegra_nvg_core_sleep_state_t;
+
+typedef enum {
+ TEGRA_NVG_SHUTDOWN = U(0),
+ TEGRA_NVG_REBOOT = U(1)
+} tegra_nvg_shutdown_reboot_state_t;
+
+typedef enum {
+ TEGRA_NVG_CLUSTER_CC0 = U(0),
+ TEGRA_NVG_CLUSTER_AUTO_CC1 = U(1),
+ TEGRA_NVG_CLUSTER_CC6 = U(6)
+} tegra_nvg_cluster_sleep_state_t;
+
+typedef enum {
+ TEGRA_NVG_CG_CG0 = U(0),
+ TEGRA_NVG_CG_CG7 = U(7)
+} tegra_nvg_cluster_group_sleep_state_t;
+
+typedef enum {
+ TEGRA_NVG_SYSTEM_SC0 = U(0),
+ TEGRA_NVG_SYSTEM_SC7 = U(7),
+ TEGRA_NVG_SYSTEM_SC8 = U(8)
+} tegra_nvg_system_sleep_state_t;
+
+// ---------------------------------------------------------------------------
+// NVG Data subformats
+// ---------------------------------------------------------------------------
+
+typedef union {
+ uint64_t flat;
+ struct nvg_version_channel_t {
+ uint32_t minor_version : U(32);
+ uint32_t major_version : U(32);
+ } bits;
+} nvg_version_data_t;
+
+typedef union {
+ uint64_t flat;
+ struct {
+ uint32_t perf_per_watt : U(1);
+ uint32_t reserved_31_1 : U(31);
+ uint32_t reserved_63_32 : U(32);
+ } bits;
+} nvg_power_perf_channel_t;
+
+typedef union {
+ uint64_t flat;
+ struct {
+ uint32_t low_battery : U(1);
+ uint32_t reserved_1_1 : U(1);
+ uint32_t battery_save : U(1);
+ uint32_t reserved_31_3 : U(29);
+ uint32_t reserved_63_32 : U(32);
+ } bits;
+} nvg_power_modes_channel_t;
+
+typedef union nvg_channel_1_data_u {
+ uint64_t flat;
+ struct nvg_channel_1_data_s {
+ uint32_t perf_per_watt_mode : U(1);
+ uint32_t reserved_31_1 : U(31);
+ uint32_t reserved_63_32 : U(32);
+ } bits;
+} nvg_channel_1_data_t;
+
+typedef union {
+ uint64_t flat;
+ struct {
+ uint32_t gpu_ways : U(5);
+ uint32_t reserved_7_5 : U(3);
+ uint32_t gpu_only_ways : U(5);
+ uint32_t reserved_31_13 : U(19);
+ uint32_t reserved_63_32 : U(32);
+ } bits;
+} nvg_ccplex_cache_control_channel_t;
+
+typedef union nvg_channel_2_data_u {
+ uint64_t flat;
+ struct nvg_channel_2_data_s {
+ uint32_t reserved_1_0 : U(2);
+ uint32_t battery_saver_mode : U(1);
+ uint32_t reserved_31_3 : U(29);
+ uint32_t reserved_63_32 : U(32);
+ } bits;
+} nvg_channel_2_data_t;
+
+typedef union {
+ uint64_t flat;
+ struct {
+ uint32_t wake_time : U(32);
+ uint32_t reserved_63_32 : U(32);
+ } bits;
+} nvg_wake_time_channel_t;
+
+typedef union {
+ uint64_t flat;
+ struct {
+ uint32_t cluster_state : U(3);
+ uint32_t reserved_6_3 : U(4);
+ uint32_t update_cluster : U(1);
+ uint32_t cg_cstate : U(3);
+ uint32_t reserved_14_11 : U(4);
+ uint32_t update_cg : U(1);
+ uint32_t system_cstate : U(4);
+ uint32_t reserved_22_20 : U(3);
+ uint32_t update_system : U(1);
+ uint32_t reserved_30_24 : U(7);
+ uint32_t update_wake_mask : U(1);
+ union {
+ uint32_t flat : U(32);
+ struct {
+ uint32_t vfiq : U(1);
+ uint32_t virq : U(1);
+ uint32_t fiq : U(1);
+ uint32_t irq : U(1);
+ uint32_t serror : U(1);
+ uint32_t reserved_10_5 : U(6);
+ uint32_t fiqout : U(1);
+ uint32_t irqout : U(1);
+ uint32_t reserved_31_13 : U(19);
+ } carmel;
+ } wake_mask;
+ } bits;
+} nvg_cstate_info_channel_t;
+
+typedef union {
+ uint64_t flat;
+ struct {
+ uint32_t crossover_value : U(32);
+ uint32_t reserved_63_32 : U(32);
+ } bits;
+} nvg_lower_bound_channel_t;
+
+typedef union {
+ uint64_t flat;
+ struct {
+ uint32_t unit_id : U(4);
+ uint32_t reserved_15_4 : U(12);
+ uint32_t stat_id : U(16);
+ uint32_t reserved_63_32 : U(32);
+ } bits;
+} nvg_cstate_stat_query_channel_t;
+
+typedef union {
+ uint64_t flat;
+ struct {
+ uint32_t num_cores : U(4);
+ uint32_t reserved_31_4 : U(28);
+ uint32_t reserved_63_32 : U(32);
+ } bits;
+} nvg_num_cores_channel_t;
+
+typedef union {
+ uint64_t flat;
+ struct {
+ uint32_t unique_core_id : U(3);
+ uint32_t reserved_31_3 : U(29);
+ uint32_t reserved_63_32 : U(32);
+ } bits;
+} nvg_unique_logical_id_channel_t;
+
+typedef union {
+ uint64_t flat;
+ struct {
+ uint32_t lcore0_pcore_id : U(4);
+ uint32_t lcore1_pcore_id : U(4);
+ uint32_t lcore2_pcore_id : U(4);
+ uint32_t lcore3_pcore_id : U(4);
+ uint32_t lcore4_pcore_id : U(4);
+ uint32_t lcore5_pcore_id : U(4);
+ uint32_t lcore6_pcore_id : U(4);
+ uint32_t lcore7_pcore_id : U(4);
+ uint32_t reserved_63_32 : U(32);
+ } bits;
+} nvg_logical_to_physical_mappings_channel_t;
+
+typedef union {
+ uint64_t flat;
+ struct nvg_logical_to_mpidr_channel_write_t {
+ uint32_t lcore_id : U(3);
+ uint32_t reserved_31_3 : U(29);
+ uint32_t reserved_63_32 : U(32);
+ } write;
+ struct nvg_logical_to_mpidr_channel_read_t {
+ uint32_t mpidr : U(32);
+ uint32_t reserved_63_32 : U(32);
+ } read;
+} nvg_logical_to_mpidr_channel_t;
+
+typedef union {
+ uint64_t flat;
+ struct {
+ uint32_t is_sc7_allowed : U(1);
+ uint32_t reserved_31_1 : U(31);
+ uint32_t reserved_63_32 : U(32);
+ } bits;
+} nvg_is_sc7_allowed_channel_t;
+
+typedef union {
+ uint64_t flat;
+ struct {
+ uint32_t core_id : U(4);
+ uint32_t reserved_31_4 : U(28);
+ uint32_t reserved_63_32 : U(32);
+ } bits;
+} nvg_core_online_channel_t;
+
+typedef union {
+ uint64_t flat;
+ struct {
+ uint32_t freq_req : U(9);
+ uint32_t reserved_30_9 : U(22);
+ uint32_t enable : U(1);
+ uint32_t reserved_63_32 : U(32);
+ } bits;
+} nvg_cc3_control_channel_t;
+
+typedef enum {
+ TEGRA_NVG_CHANNEL_UPDATE_GSC_ALL = U(0),
+ TEGRA_NVG_CHANNEL_UPDATE_GSC_NVDEC = U(1),
+ TEGRA_NVG_CHANNEL_UPDATE_GSC_WPR1 = U(2),
+ TEGRA_NVG_CHANNEL_UPDATE_GSC_WPR2 = U(3),
+ TEGRA_NVG_CHANNEL_UPDATE_GSC_TSECA = U(4),
+ TEGRA_NVG_CHANNEL_UPDATE_GSC_TSECB = U(5),
+ TEGRA_NVG_CHANNEL_UPDATE_GSC_BPMP = U(6),
+ TEGRA_NVG_CHANNEL_UPDATE_GSC_APE = U(7),
+ TEGRA_NVG_CHANNEL_UPDATE_GSC_SPE = U(8),
+ TEGRA_NVG_CHANNEL_UPDATE_GSC_SCE = U(9),
+ TEGRA_NVG_CHANNEL_UPDATE_GSC_APR = U(10),
+ TEGRA_NVG_CHANNEL_UPDATE_GSC_TZRAM = U(11),
+ TEGRA_NVG_CHANNEL_UPDATE_GSC_IPC_SE_TSEC = U(12),
+ TEGRA_NVG_CHANNEL_UPDATE_GSC_BPMP_TO_RCE = U(13),
+ TEGRA_NVG_CHANNEL_UPDATE_GSC_BPMP_TO_MCE = U(14),
+ TEGRA_NVG_CHANNEL_UPDATE_GSC_SE_SC7 = U(15),
+ TEGRA_NVG_CHANNEL_UPDATE_GSC_BPMP_TO_SPE = U(16),
+ TEGRA_NVG_CHANNEL_UPDATE_GSC_RCE = U(17),
+ TEGRA_NVG_CHANNEL_UPDATE_GSC_CPU_TZ_TO_BPMP = U(18),
+ TEGRA_NVG_CHANNEL_UPDATE_GSC_VM_ENCR1 = U(19),
+ TEGRA_NVG_CHANNEL_UPDATE_GSC_CPU_NS_TO_BPMP = U(20),
+ TEGRA_NVG_CHANNEL_UPDATE_GSC_OEM_SC7 = U(21),
+ TEGRA_NVG_CHANNEL_UPDATE_GSC_IPC_SE_SPE_SCE_BPMP = U(22),
+ TEGRA_NVG_CHANNEL_UPDATE_GSC_SC7_RESUME_FW = U(23),
+ TEGRA_NVG_CHANNEL_UPDATE_GSC_CAMERA_TASKLIST = U(24),
+ TEGRA_NVG_CHANNEL_UPDATE_GSC_XUSB = U(25),
+ TEGRA_NVG_CHANNEL_UPDATE_GSC_CV = U(26),
+ TEGRA_NVG_CHANNEL_UPDATE_GSC_VM_ENCR2 = U(27),
+ TEGRA_NVG_CHANNEL_UPDATE_GSC_HYPERVISOR_SW = U(28),
+ TEGRA_NVG_CHANNEL_UPDATE_GSC_SMMU_PAGETABLES = U(29),
+ TEGRA_NVG_CHANNEL_UPDATE_GSC_30 = U(30),
+ TEGRA_NVG_CHANNEL_UPDATE_GSC_31 = U(31),
+ TEGRA_NVG_CHANNEL_UPDATE_GSC_TZ_DRAM = U(32),
+ TEGRA_NVG_CHANNEL_UPDATE_GSC_NVLINK = U(33),
+ TEGRA_NVG_CHANNEL_UPDATE_GSC_SBS = U(34),
+ TEGRA_NVG_CHANNEL_UPDATE_GSC_VPR = U(35),
+ TEGRA_NVG_CHANNEL_UPDATE_GSC_LAST_INDEX
+} tegra_nvg_channel_update_gsc_gsc_enum_t;
+
+typedef union {
+ uint64_t flat;
+ struct {
+ uint32_t gsc_enum : U(16);
+ uint32_t reserved_31_16 : U(16);
+ uint32_t reserved_63_32 : U(32);
+ } bits;
+} nvg_update_ccplex_gsc_channel_t;
+
+typedef union {
+ uint64_t flat;
+ struct nvg_security_config_channel_t {
+ uint32_t strict_checking_enabled : U(1);
+ uint32_t strict_checking_locked : U(1);
+ uint32_t reserved_31_2 : U(30);
+ uint32_t reserved_63_32 : U(32);
+ } bits;
+} nvg_security_config_t;
+
+typedef union {
+ uint64_t flat;
+ struct nvg_shutdown_channel_t {
+ uint32_t reboot : U(1);
+ uint32_t reserved_31_1 : U(31);
+ uint32_t reserved_63_32 : U(32);
+ } bits;
+} nvg_shutdown_t;
+
+typedef union {
+ uint64_t flat;
+ struct nvg_debug_config_channel_t {
+ uint32_t enter_debug_state_on_mca : U(1);
+ uint32_t reserved_31_1 : U(31);
+ uint32_t reserved_63_32 : U(32);
+ } bits;
+} nvg_debug_config_t;
+
+typedef union {
+ uint64_t flat;
+ struct {
+ uint32_t uncorr : U(1);
+ uint32_t corr : U(1);
+ uint32_t reserved_31_2 : U(30);
+ uint32_t reserved_63_32 : U(32);
+ } bits;
+} nvg_hsm_error_ctrl_channel_t;
+
+extern nvg_debug_config_t nvg_debug_config;
+
+#endif /* T194_NVG_H */
diff --git a/plat/nvidia/tegra/soc/t194/drivers/mce/aarch64/nvg_helpers.S b/plat/nvidia/tegra/soc/t194/drivers/mce/aarch64/nvg_helpers.S
new file mode 100644
index 0000000..3c47208
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t194/drivers/mce/aarch64/nvg_helpers.S
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+
+ .globl nvg_set_request_data
+ .globl nvg_set_request
+ .globl nvg_get_result
+ .globl nvg_cache_clean
+ .globl nvg_cache_clean_inval
+ .globl nvg_cache_inval_all
+
+/* void nvg_set_request_data(uint64_t req, uint64_t data) */
+func nvg_set_request_data
+ msr s3_0_c15_c1_2, x0
+ msr s3_0_c15_c1_3, x1
+ ret
+endfunc nvg_set_request_data
+
+/* void nvg_set_request(uint64_t req) */
+func nvg_set_request
+ msr s3_0_c15_c1_2, x0
+ ret
+endfunc nvg_set_request
+
+/* uint64_t nvg_get_result(void) */
+func nvg_get_result
+ mrs x0, s3_0_c15_c1_3
+ ret
+endfunc nvg_get_result
+
+/* uint64_t nvg_cache_clean(void) */
+func nvg_cache_clean
+ mrs x0, s3_0_c15_c3_5
+ ret
+endfunc nvg_cache_clean
+
+/* uint64_t nvg_cache_clean_inval(void) */
+func nvg_cache_clean_inval
+ mrs x0, s3_0_c15_c3_6
+ ret
+endfunc nvg_cache_clean_inval
+
+/* uint64_t nvg_cache_inval_all(void) */
+func nvg_cache_inval_all
+ mrs x0, s3_0_c15_c3_7
+ ret
+endfunc nvg_cache_inval_all \ No newline at end of file
diff --git a/plat/nvidia/tegra/soc/t194/drivers/mce/mce.c b/plat/nvidia/tegra/soc/t194/drivers/mce/mce.c
new file mode 100644
index 0000000..af1c0aa
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t194/drivers/mce/mce.c
@@ -0,0 +1,255 @@
+/*
+ * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <common/bl_common.h>
+#include <context.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <common/debug.h>
+#include <denver.h>
+#include <mce.h>
+#include <mce_private.h>
+#include <platform_def.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <t194_nvg.h>
+#include <tegra_def.h>
+#include <tegra_platform.h>
+#include <tegra_private.h>
+
+/* Handler to check if MCE firmware is supported */
+static bool mce_firmware_not_supported(void)
+{
+ bool status;
+
+ /* these platforms do not load MCE firmware */
+ status = tegra_platform_is_linsim() || tegra_platform_is_qt() ||
+ tegra_platform_is_virt_dev_kit();
+
+ return status;
+}
+
+/*******************************************************************************
+ * Common handler for all MCE commands
+ ******************************************************************************/
+int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1,
+ uint64_t arg2)
+{
+ int32_t ret = 0;
+
+ switch (cmd) {
+ case (uint64_t)MCE_CMD_ENTER_CSTATE:
+ ret = nvg_enter_cstate((uint32_t)arg0, (uint32_t)arg1);
+ if (ret < 0) {
+ ERROR("%s: enter_cstate failed(%d)\n", __func__, ret);
+ }
+
+ break;
+
+ case (uint64_t)MCE_CMD_IS_SC7_ALLOWED:
+ ret = nvg_is_sc7_allowed();
+ if (ret < 0) {
+ ERROR("%s: is_sc7_allowed failed(%d)\n", __func__, ret);
+ }
+
+ break;
+
+ case (uint64_t)MCE_CMD_ONLINE_CORE:
+ ret = nvg_online_core((uint32_t)arg0);
+ if (ret < 0) {
+ ERROR("%s: online_core failed(%d)\n", __func__, ret);
+ }
+
+ break;
+
+ default:
+ ERROR("unknown MCE command (%" PRIu64 ")\n", cmd);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/*******************************************************************************
+ * Handler to update carveout values for Video Memory Carveout region
+ ******************************************************************************/
+int32_t mce_update_gsc_videomem(void)
+{
+ int32_t ret;
+
+ /*
+ * MCE firmware is not running on simulation platforms.
+ */
+ if (mce_firmware_not_supported()) {
+ ret = -EINVAL;
+ } else {
+ ret = nvg_update_ccplex_gsc((uint32_t)TEGRA_NVG_CHANNEL_UPDATE_GSC_VPR);
+ }
+
+ return ret;
+}
+
+/*******************************************************************************
+ * Handler to update carveout values for TZDRAM aperture
+ ******************************************************************************/
+int32_t mce_update_gsc_tzdram(void)
+{
+ int32_t ret;
+
+ /*
+ * MCE firmware is not running on simulation platforms.
+ */
+ if (mce_firmware_not_supported()) {
+ ret = -EINVAL;
+ } else {
+ ret = nvg_update_ccplex_gsc((uint32_t)TEGRA_NVG_CHANNEL_UPDATE_GSC_TZ_DRAM);
+ }
+
+ return ret;
+}
+
+/*******************************************************************************
+ * Handler to issue the UPDATE_CSTATE_INFO request
+ ******************************************************************************/
+void mce_update_cstate_info(const mce_cstate_info_t *cstate)
+{
+ /* issue the UPDATE_CSTATE_INFO request */
+ nvg_update_cstate_info(cstate->cluster, cstate->ccplex, cstate->system,
+ cstate->wake_mask, cstate->update_wake_mask);
+}
+
+/*******************************************************************************
+ * Handler to read the MCE firmware version and check if it is compatible
+ * with interface header the BL3-1 was compiled against
+ ******************************************************************************/
+void mce_verify_firmware_version(void)
+{
+ uint64_t version;
+ uint32_t major, minor;
+
+ /*
+ * MCE firmware is not running on simulation platforms.
+ */
+ if (mce_firmware_not_supported()) {
+ return;
+ }
+
+ /*
+ * Read the MCE firmware version and extract the major and minor
+ * version fields
+ */
+ version = nvg_get_version();
+ minor = (uint32_t)version;
+ major = (uint32_t)(version >> 32);
+
+ INFO("MCE Version - HW=%u:%u, SW=%u:%u\n", major, minor,
+ TEGRA_NVG_VERSION_MAJOR, TEGRA_NVG_VERSION_MINOR);
+
+ /*
+ * Verify that the MCE firmware version and the interface header
+ * match
+ */
+ if (major != (uint32_t)TEGRA_NVG_VERSION_MAJOR) {
+ ERROR("MCE major version mismatch\n");
+ panic();
+ }
+
+ if (minor < (uint32_t)TEGRA_NVG_VERSION_MINOR) {
+ ERROR("MCE minor version mismatch\n");
+ panic();
+ }
+}
+
+#if ENABLE_STRICT_CHECKING_MODE
+/*******************************************************************************
+ * Handler to enable the strict checking mode
+ ******************************************************************************/
+void mce_enable_strict_checking(void)
+{
+ uint64_t sctlr = read_sctlr_el3();
+ int32_t ret = 0;
+
+ if (tegra_platform_is_silicon() || tegra_platform_is_fpga()) {
+ /*
+ * Step1: TZ-DRAM and TZRAM should be setup before the MMU is
+ * enabled.
+ *
+ * The common code makes sure that TZDRAM/TZRAM are already
+ * enabled before calling into this handler. If this is not the
+ * case, the following sequence must be executed before moving
+ * on to step 2.
+ *
+ * tlbialle1is();
+ * tlbialle3is();
+ * dsbsy();
+ * isb();
+ *
+ */
+ if ((sctlr & (uint64_t)SCTLR_M_BIT) == (uint64_t)SCTLR_M_BIT) {
+ tlbialle1is();
+ tlbialle3is();
+ dsbsy();
+ isb();
+ }
+
+ /*
+ * Step2: SCF flush - Clean and invalidate caches and clear the
+ * TR-bits
+ */
+ ret = nvg_roc_clean_cache_trbits();
+ if (ret < 0) {
+ ERROR("%s: flush cache_trbits failed(%d)\n", __func__,
+ ret);
+ return;
+ }
+
+ /*
+ * Step3: Issue the SECURITY_CONFIG request to MCE to enable
+ * strict checking mode.
+ */
+ nvg_enable_strict_checking_mode();
+ }
+}
+void mce_verify_strict_checking(void)
+{
+ bool is_silicon = tegra_platform_is_silicon();
+ bool is_fpga = tegra_platform_is_fpga();
+
+ if (is_silicon || is_fpga) {
+ nvg_verify_strict_checking_mode();
+ }
+}
+#endif
+
+/*******************************************************************************
+ * Handler to power down the entire system
+ ******************************************************************************/
+void mce_system_shutdown(void)
+{
+ nvg_system_shutdown();
+}
+
+/*******************************************************************************
+ * Handler to reboot the entire system
+ ******************************************************************************/
+void mce_system_reboot(void)
+{
+ nvg_system_reboot();
+}
+
+/*******************************************************************************
+ * Handler to clear CCPLEX->HSM correctable RAS error signal.
+ ******************************************************************************/
+void mce_clear_hsm_corr_status(void)
+{
+ nvg_clear_hsm_corr_status();
+}
diff --git a/plat/nvidia/tegra/soc/t194/drivers/mce/nvg.c b/plat/nvidia/tegra/soc/t194/drivers/mce/nvg.c
new file mode 100644
index 0000000..f76ab14
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t194/drivers/mce/nvg.c
@@ -0,0 +1,262 @@
+/*
+ * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <denver.h>
+#include <lib/mmio.h>
+
+#include <mce_private.h>
+#include <platform_def.h>
+#include <t194_nvg.h>
+#include <tegra_private.h>
+
+#define ID_AFR0_EL1_CACHE_OPS_SHIFT U(12)
+#define ID_AFR0_EL1_CACHE_OPS_MASK U(0xF)
+/*
+ * Reports the major and minor version of this interface.
+ *
+ * NVGDATA[0:31]: SW(R) Minor Version
+ * NVGDATA[32:63]: SW(R) Major Version
+ */
+uint64_t nvg_get_version(void)
+{
+ nvg_set_request((uint64_t)TEGRA_NVG_CHANNEL_VERSION);
+
+ return (uint64_t)nvg_get_result();
+}
+
+/*
+ * Set the expected wake time in TSC ticks for the next low-power state the
+ * core enters.
+ *
+ * NVGDATA[0:31]: SW(RW), WAKE_TIME
+ */
+void nvg_set_wake_time(uint32_t wake_time)
+{
+ /* time (TSC ticks) until the core is expected to get a wake event */
+ nvg_set_request_data((uint64_t)TEGRA_NVG_CHANNEL_WAKE_TIME, (uint64_t)wake_time);
+}
+
+/*
+ * This request allows updating of CLUSTER_CSTATE, CCPLEX_CSTATE and
+ * SYSTEM_CSTATE values.
+ *
+ * NVGDATA[0:2]: SW(RW), CLUSTER_CSTATE
+ * NVGDATA[7]: SW(W), update cluster flag
+ * NVGDATA[8:10]: SW(RW), CG_CSTATE
+ * NVGDATA[15]: SW(W), update ccplex flag
+ * NVGDATA[16:19]: SW(RW), SYSTEM_CSTATE
+ * NVGDATA[23]: SW(W), update system flag
+ * NVGDATA[31]: SW(W), update wake mask flag
+ * NVGDATA[32:63]: SW(RW), WAKE_MASK
+ */
+void nvg_update_cstate_info(uint32_t cluster, uint32_t ccplex,
+ uint32_t system, uint32_t wake_mask, uint8_t update_wake_mask)
+{
+ uint64_t val = 0;
+
+ /* update CLUSTER_CSTATE? */
+ if (cluster != 0U) {
+ val |= ((uint64_t)cluster & CLUSTER_CSTATE_MASK) |
+ CLUSTER_CSTATE_UPDATE_BIT;
+ }
+
+ /* update CCPLEX_CSTATE? */
+ if (ccplex != 0U) {
+ val |= (((uint64_t)ccplex & CCPLEX_CSTATE_MASK) << CCPLEX_CSTATE_SHIFT) |
+ CCPLEX_CSTATE_UPDATE_BIT;
+ }
+
+ /* update SYSTEM_CSTATE? */
+ if (system != 0U) {
+ val |= (((uint64_t)system & SYSTEM_CSTATE_MASK) << SYSTEM_CSTATE_SHIFT) |
+ SYSTEM_CSTATE_UPDATE_BIT;
+ }
+
+ /* update wake mask value? */
+ if (update_wake_mask != 0U) {
+ val |= CSTATE_WAKE_MASK_UPDATE_BIT;
+ }
+
+ /* set the wake mask */
+ val |= ((uint64_t)wake_mask & CSTATE_WAKE_MASK_CLEAR) << CSTATE_WAKE_MASK_SHIFT;
+
+ /* set the updated cstate info */
+ nvg_set_request_data((uint64_t)TEGRA_NVG_CHANNEL_CSTATE_INFO, val);
+}
+
+/*
+ * Return a non-zero value if the CCPLEX is able to enter SC7
+ *
+ * NVGDATA[0]: SW(R), Is allowed result
+ */
+int32_t nvg_is_sc7_allowed(void)
+{
+ /* issue command to check if SC7 is allowed */
+ nvg_set_request((uint64_t)TEGRA_NVG_CHANNEL_IS_SC7_ALLOWED);
+
+ /* 1 = SC7 allowed, 0 = SC7 not allowed */
+ return (int32_t)nvg_get_result();
+}
+
+/*
+ * Wake an offlined logical core. Note that a core is offlined by entering
+ * a C-state where the WAKE_MASK is all 0.
+ *
+ * NVGDATA[0:3]: SW(W) logical core to online
+ */
+int32_t nvg_online_core(uint32_t core)
+{
+ int32_t ret = 0;
+
+ /* sanity check the core ID value */
+ if (core > (uint32_t)PLATFORM_CORE_COUNT) {
+ ERROR("%s: unknown core id (%d)\n", __func__, core);
+ ret = -EINVAL;
+ } else {
+ /* get a core online */
+ nvg_set_request_data((uint64_t)TEGRA_NVG_CHANNEL_ONLINE_CORE,
+ (uint64_t)core & MCE_CORE_ID_MASK);
+ }
+
+ return ret;
+}
+
+/*
+ * MC GSC (General Security Carveout) register values are expected to be
+ * changed by TrustZone ARM code after boot.
+ *
+ * NVGDATA[0:15] SW(R) GSC enun
+ */
+int32_t nvg_update_ccplex_gsc(uint32_t gsc_idx)
+{
+ int32_t ret = 0;
+
+ /* sanity check GSC ID */
+ if (gsc_idx > (uint32_t)TEGRA_NVG_CHANNEL_UPDATE_GSC_VPR) {
+ ERROR("%s: unknown gsc_idx (%u)\n", __func__, gsc_idx);
+ ret = -EINVAL;
+ } else {
+ nvg_set_request_data((uint64_t)TEGRA_NVG_CHANNEL_UPDATE_CCPLEX_GSC,
+ (uint64_t)gsc_idx);
+ }
+
+ return ret;
+}
+
+/*
+ * Cache clean and invalidate, clear TR-bit operation for all CCPLEX caches.
+ */
+int32_t nvg_roc_clean_cache_trbits(void)
+{
+ int32_t ret = 0;
+
+ /* check if cache flush through mts is supported */
+ if (((read_id_afr0_el1() >> ID_AFR0_EL1_CACHE_OPS_SHIFT) &
+ ID_AFR0_EL1_CACHE_OPS_MASK) == 1U) {
+ if (nvg_cache_inval_all() == 0U) {
+ ERROR("%s: failed\n", __func__);
+ ret = -ENODEV;
+ }
+ } else {
+ ret = -ENOTSUP;
+ }
+
+ return ret;
+}
+
+/*
+ * Set the power state for a core
+ */
+int32_t nvg_enter_cstate(uint32_t state, uint32_t wake_time)
+{
+ int32_t ret = 0;
+ uint64_t val = 0ULL;
+
+ /* check for allowed power state */
+ if ((state != (uint32_t)TEGRA_NVG_CORE_C0) &&
+ (state != (uint32_t)TEGRA_NVG_CORE_C1) &&
+ (state != (uint32_t)TEGRA_NVG_CORE_C6) &&
+ (state != (uint32_t)TEGRA_NVG_CORE_C7))
+ {
+ ERROR("%s: unknown cstate (%u)\n", __func__, state);
+ ret = -EINVAL;
+ } else {
+ /* time (TSC ticks) until the core is expected to get a wake event */
+ nvg_set_wake_time(wake_time);
+
+ /* set the core cstate */
+ val = read_actlr_el1() & ~ACTLR_EL1_PMSTATE_MASK;
+ write_actlr_el1(val | (uint64_t)state);
+ }
+
+ return ret;
+}
+
+#if ENABLE_STRICT_CHECKING_MODE
+/*
+ * Enable strict checking mode
+ *
+ * NVGDATA[3] strict_check ON + lock
+ */
+void nvg_enable_strict_checking_mode(void)
+{
+ uint64_t params = (uint64_t)(STRICT_CHECKING_ENABLED_SET |
+ STRICT_CHECKING_LOCKED_SET);
+
+ nvg_set_request_data((uint64_t)TEGRA_NVG_CHANNEL_SECURITY_CONFIG, params);
+}
+
+void nvg_verify_strict_checking_mode(void)
+{
+ uint64_t params = (uint64_t)(STRICT_CHECKING_ENABLED_SET |
+ STRICT_CHECKING_LOCKED_SET);
+
+ nvg_set_request((uint64_t)TEGRA_NVG_CHANNEL_SECURITY_CONFIG);
+ assert(params == (uint64_t)nvg_get_result());
+}
+#endif
+
+/*
+ * Request a reboot
+ *
+ * NVGDATA[0]: reboot command
+ */
+void nvg_system_reboot(void)
+{
+ /* issue command for reboot */
+ nvg_set_request_data((uint64_t)TEGRA_NVG_CHANNEL_SHUTDOWN,
+ (uint64_t)TEGRA_NVG_REBOOT);
+}
+
+/*
+ * Request a shutdown
+ *
+ * NVGDATA[0]: shutdown command
+ */
+void nvg_system_shutdown(void)
+{
+ /* issue command for shutdown */
+ nvg_set_request_data((uint64_t)TEGRA_NVG_CHANNEL_SHUTDOWN,
+ (uint64_t)TEGRA_NVG_SHUTDOWN);
+}
+
+/*
+ * Request to clear CCPLEX->HSM correctable error signal.
+ * NVGDATA[1]: A write of 1 clears the CCPLEX->HSM correctable error signal,
+ * A write of 0 has no effect.
+ */
+void nvg_clear_hsm_corr_status(void)
+{
+ nvg_hsm_error_ctrl_channel_t status = { .bits = { .corr = 1U, }, };
+
+ nvg_set_request_data((uint64_t)TEGRA_NVG_CHANNEL_HSM_ERROR_CTRL, status.flat);
+}
diff --git a/plat/nvidia/tegra/soc/t194/drivers/se/se.c b/plat/nvidia/tegra/soc/t194/drivers/se/se.c
new file mode 100644
index 0000000..31b0e26
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t194/drivers/se/se.c
@@ -0,0 +1,511 @@
+/*
+ * Copyright (c) 2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <stdbool.h>
+
+#include <arch_helpers.h>
+#include <bpmp_ipc.h>
+#include <common/debug.h>
+#include <drivers/delay_timer.h>
+#include <lib/mmio.h>
+#include <lib/psci/psci.h>
+#include <se.h>
+#include <tegra_platform.h>
+
+#include "se_private.h"
+
+/*******************************************************************************
+ * Constants and Macros
+ ******************************************************************************/
+#define ERR_STATUS_SW_CLEAR U(0xFFFFFFFF)
+#define INT_STATUS_SW_CLEAR U(0xFFFFFFFF)
+#define MAX_TIMEOUT_MS U(1000) /* Max. timeout of 1s */
+#define NUM_SE_REGS_TO_SAVE U(4)
+
+#define BYTES_IN_WORD U(4)
+#define SHA256_MAX_HASH_RESULT U(7)
+#define SHA256_DST_SIZE U(32)
+#define SHA_FIRST_OP U(1)
+#define MAX_SHA_ENGINE_CHUNK_SIZE U(0xFFFFFF)
+#define SHA256_MSG_LENGTH_ONETIME U(0xFFFF)
+
+/*******************************************************************************
+ * Data structure and global variables
+ ******************************************************************************/
+static uint32_t se_regs[NUM_SE_REGS_TO_SAVE];
+
+/*
+ * Check that SE operation has completed after kickoff.
+ *
+ * This function is invoked after an SE operation has been started,
+ * and it checks the following conditions:
+ *
+ * 1. SE_STATUS = IDLE
+ * 2. AHB bus data transfer is complete.
+ * 3. SE_ERR_STATUS is clean.
+ */
+static bool tegra_se_is_operation_complete(void)
+{
+ uint32_t val = 0, timeout = 0, sha_status, aes_status;
+ int32_t ret = 0;
+ bool se_is_busy, txn_has_errors, txn_successful;
+
+ /*
+ * Poll the status register to check if the operation
+ * completed.
+ */
+ do {
+ val = tegra_se_read_32(CTX_SAVE_AUTO_STATUS);
+ se_is_busy = ((val & CTX_SAVE_AUTO_SE_BUSY) != 0U);
+
+ /* sleep until SE finishes */
+ if (se_is_busy) {
+ mdelay(1);
+ timeout++;
+ }
+
+ } while (se_is_busy && (timeout < MAX_TIMEOUT_MS));
+
+ /* any transaction errors? */
+ txn_has_errors = (tegra_se_read_32(SHA_ERR_STATUS) != 0U) ||
+ (tegra_se_read_32(AES0_ERR_STATUS) != 0U);
+
+ /* transaction successful? */
+ sha_status = tegra_se_read_32(SHA_INT_STATUS) & SHA_SE_OP_DONE;
+ aes_status = tegra_se_read_32(AES0_INT_STATUS) & AES0_SE_OP_DONE;
+ txn_successful = (sha_status == SHA_SE_OP_DONE) &&
+ (aes_status == AES0_SE_OP_DONE);
+
+ if ((timeout == MAX_TIMEOUT_MS) || txn_has_errors || !txn_successful) {
+ ERROR("%s: Atomic context save operation failed!\n",
+ __func__);
+ ret = -ECANCELED;
+ }
+
+ return (ret == 0);
+}
+
+/*
+ * Wait for SE engine to be idle and clear any pending interrupts, before
+ * starting the next SE operation.
+ */
+static bool tegra_se_is_ready(void)
+{
+ int32_t ret = 0;
+ uint32_t val = 0, timeout = 0;
+ bool se_is_ready;
+
+ /* Wait for previous operation to finish */
+ do {
+ val = tegra_se_read_32(CTX_SAVE_AUTO_STATUS);
+ se_is_ready = (val == CTX_SAVE_AUTO_SE_READY);
+
+ /* sleep until SE is ready */
+ if (!se_is_ready) {
+ mdelay(1);
+ timeout++;
+ }
+
+ } while (!se_is_ready && (timeout < MAX_TIMEOUT_MS));
+
+ if (timeout == MAX_TIMEOUT_MS) {
+ ERROR("%s: SE is not ready!\n", __func__);
+ ret = -ETIMEDOUT;
+ }
+
+ /* Clear any pending interrupts from previous operation */
+ tegra_se_write_32(AES0_INT_STATUS, INT_STATUS_SW_CLEAR);
+ tegra_se_write_32(AES1_INT_STATUS, INT_STATUS_SW_CLEAR);
+ tegra_se_write_32(RSA_INT_STATUS, INT_STATUS_SW_CLEAR);
+ tegra_se_write_32(SHA_INT_STATUS, INT_STATUS_SW_CLEAR);
+
+ /* Clear error status for each engine seen from current port */
+ tegra_se_write_32(AES0_ERR_STATUS, ERR_STATUS_SW_CLEAR);
+ tegra_se_write_32(AES1_ERR_STATUS, ERR_STATUS_SW_CLEAR);
+ tegra_se_write_32(RSA_ERR_STATUS, ERR_STATUS_SW_CLEAR);
+ tegra_se_write_32(SHA_ERR_STATUS, ERR_STATUS_SW_CLEAR);
+
+ return (ret == 0);
+}
+
+/*
+ * During System Suspend, this handler triggers the hardware context
+ * save operation.
+ */
+static int32_t tegra_se_save_context(void)
+{
+ int32_t ret = -ECANCELED;
+
+ /*
+ * 1. Ensure all SE Driver including RNG1/PKA1 are shut down.
+ * TSEC/R5s are powergated/idle. All tasks on SE1~SE4, RNG1,
+ * PKA1 are wrapped up. SE0 is ready for use.
+ * 2. Clear interrupt/error in SE0 status register.
+ * 3. Scrub SE0 register to avoid false failure for illegal
+ * configuration. Probably not needed, dependent on HW
+ * implementation.
+ * 4. Check SE is ready for HW CTX_SAVE by polling
+ * SE_CTX_SAVE_AUTO_STATUS.SE_READY.
+ *
+ * Steps 1-4 are executed by tegra_se_is_ready().
+ *
+ * 5. Issue context save command.
+ * 6. Check SE is busy with CTX_SAVE, the command in step5 was not
+ * dropped for ongoing traffic in any of SE port/engine.
+ * 7. Poll SE register or wait for SE APB interrupt for task completion
+ * a. Polling: Read SE_CTX_SAVE_AUTO_STATUS.BUSY till it reports IDLE
+ * b. Interrupt: After receiving interrupt from SE APB, read
+ * SE_CTX_SAVE_AUTO_STATUS.BUSY till it reports IDLE.
+ * 8. Check AES0 and SHA ERR_STATUS to ensure no error case.
+ * 9. Check AES0 and SHA INT_STATUS to ensure operation has successfully
+ * completed.
+ *
+ * Steps 6-9 are executed by tegra_se_is_operation_complete().
+ */
+ if (tegra_se_is_ready()) {
+
+ /* Issue context save command */
+ tegra_se_write_32(AES0_OPERATION, SE_OP_CTX_SAVE);
+
+ /* Wait for operation to finish */
+ if (tegra_se_is_operation_complete()) {
+ ret = 0;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * Check that SE operation has completed after kickoff
+ * This function is invoked after an SE operation has been started,
+ * and it checks the following conditions:
+ * 1. SE0_INT_STATUS = SE0_OP_DONE
+ * 2. SE0_STATUS = IDLE
+ * 3. SE0_ERR_STATUS is clean.
+ */
+static int32_t tegra_se_sha256_hash_operation_complete(void)
+{
+ uint32_t val = 0U;
+
+ /* Poll the SE interrupt register to ensure H/W operation complete */
+ val = tegra_se_read_32(SE0_INT_STATUS_REG_OFFSET);
+ while (SE0_INT_OP_DONE(val) == SE0_INT_OP_DONE_CLEAR) {
+ val = tegra_se_read_32(SE0_INT_STATUS_REG_OFFSET);
+ if (SE0_INT_OP_DONE(val) != SE0_INT_OP_DONE_CLEAR) {
+ break;
+ }
+ }
+
+ /* Poll the SE status idle to ensure H/W operation complete */
+ val = tegra_se_read_32(SE0_SHA_STATUS_0);
+ while (val != SE0_SHA_STATUS_IDLE) {
+ val = tegra_se_read_32(SE0_SHA_STATUS_0);
+ if (val == SE0_SHA_STATUS_IDLE) {
+ break;
+ }
+ }
+
+ /* Ensure that no errors are thrown during operation */
+ val = tegra_se_read_32(SE0_ERR_STATUS_REG_OFFSET);
+ if (val != 0U) {
+ ERROR("%s: error during SE operation! 0x%x", __func__,
+ val);
+ return -ENOTSUP;
+ }
+
+ return 0;
+}
+
+/*
+ * Security engine primitive normal operations
+ */
+static int32_t tegra_se_start_normal_operation(uint64_t src_addr,
+ uint32_t nbytes, uint32_t last_buf, uint32_t src_len_inbytes)
+{
+ uint32_t val = 0U;
+ uint32_t src_in_lo;
+ uint32_t src_in_msb;
+ uint32_t src_in_hi;
+ int32_t ret = 0;
+
+ if ((src_addr == 0ULL) || (nbytes == 0U))
+ return -EINVAL;
+
+ src_in_lo = (uint32_t)src_addr;
+ src_in_msb = (uint32_t)((src_addr >> 32U) & 0xFFU);
+ src_in_hi = ((src_in_msb << SE0_IN_HI_ADDR_HI_0_MSB_SHIFT) |
+ (nbytes & MAX_SHA_ENGINE_CHUNK_SIZE));
+
+ /* set SRC_IN_ADDR_LO and SRC_IN_ADDR_HI*/
+ tegra_se_write_32(SE0_IN_ADDR, src_in_lo);
+ tegra_se_write_32(SE0_IN_HI_ADDR_HI, src_in_hi);
+
+ val = tegra_se_read_32(SE0_INT_STATUS_REG_OFFSET);
+ if (val > 0U) {
+ tegra_se_write_32(SE0_INT_STATUS_REG_OFFSET, 0x0U);
+ }
+
+ /* Enable SHA interrupt for SE0 Operation */
+ tegra_se_write_32(SE0_SHA_INT_ENABLE, 0x1aU);
+
+ /* flush to DRAM for SE to use the updated contents */
+ flush_dcache_range(src_addr, src_len_inbytes);
+
+ /* Start SHA256 operation */
+ if (last_buf == 1U) {
+ tegra_se_write_32(SE0_OPERATION_REG_OFFSET, SE0_OP_START |
+ SE0_UNIT_OPERATION_PKT_LASTBUF_FIELD);
+ } else {
+ tegra_se_write_32(SE0_OPERATION_REG_OFFSET, SE0_OP_START);
+ }
+
+ return ret;
+}
+
+static int32_t tegra_se_calculate_sha256_hash(uint64_t src_addr,
+ uint32_t src_len_inbyte)
+{
+ uint32_t val, last_buf, i;
+ int32_t ret = 0;
+ uint32_t operations;
+ uint64_t src_len_inbits;
+ uint32_t len_bits_msb;
+ uint32_t len_bits_lsb;
+ uint32_t number_of_operations, max_bytes, bytes_left, remaining_bytes;
+
+ if (src_len_inbyte > MAX_SHA_ENGINE_CHUNK_SIZE) {
+ ERROR("SHA input chunk size too big: 0x%x\n", src_len_inbyte);
+ return -EINVAL;
+ }
+
+ if (src_addr == 0ULL) {
+ return -EINVAL;
+ }
+
+ /* number of bytes per operation */
+ max_bytes = (SHA256_HASH_SIZE_BYTES * SHA256_MSG_LENGTH_ONETIME);
+
+ src_len_inbits = (uint32_t)(src_len_inbyte * 8U);
+ len_bits_msb = (uint32_t)(src_len_inbits >> 32U);
+ len_bits_lsb = (uint32_t)src_len_inbits;
+
+ /* program SE0_CONFIG for SHA256 operation */
+ val = (uint32_t)(SE0_CONFIG_ENC_ALG_SHA | SE0_CONFIG_ENC_MODE_SHA256 |
+ SE0_CONFIG_DEC_ALG_NOP | SE0_CONFIG_DST_HASHREG);
+ tegra_se_write_32(SE0_SHA_CONFIG, val);
+
+ /* set SE0_SHA_MSG_LENGTH registers */
+ tegra_se_write_32(SE0_SHA_MSG_LENGTH_0, len_bits_lsb);
+ tegra_se_write_32(SE0_SHA_MSG_LEFT_0, len_bits_lsb);
+ tegra_se_write_32(SE0_SHA_MSG_LENGTH_1, len_bits_msb);
+
+ /* zero out unused SE0_SHA_MSG_LENGTH and SE0_SHA_MSG_LEFT */
+ tegra_se_write_32(SE0_SHA_MSG_LENGTH_2, 0U);
+ tegra_se_write_32(SE0_SHA_MSG_LENGTH_3, 0U);
+ tegra_se_write_32(SE0_SHA_MSG_LEFT_1, 0U);
+ tegra_se_write_32(SE0_SHA_MSG_LEFT_2, 0U);
+ tegra_se_write_32(SE0_SHA_MSG_LEFT_3, 0U);
+
+ number_of_operations = (src_len_inbyte / max_bytes);
+ remaining_bytes = (src_len_inbyte % max_bytes);
+ if (remaining_bytes > 0U) {
+ number_of_operations += 1U;
+ }
+
+ /*
+ * 1. Operations == 1: program SE0_SHA_TASK register to initiate SHA256
+ * hash generation by setting
+ * 1(SE0_SHA_CONFIG_HW_INIT_HASH) to SE0_SHA_TASK
+ * and start SHA256-normal operation.
+ * 2. 1 < Operations < number_of_operations: program SE0_SHA_TASK to
+ * 0(SE0_SHA_CONFIG_HW_INIT_HASH_DISABLE) to load
+ * intermediate SHA256 digest result from
+ * HASH_RESULT register to continue SHA256
+ * generation and start SHA256-normal operation.
+ * 3. Operations == number_of_operations: continue with step 2 and set
+ * max_bytes to bytes_left to process final
+ * hash-result generation and start SHA256-normal
+ * operation.
+ */
+ bytes_left = src_len_inbyte;
+ for (operations = 1U; operations <= number_of_operations;
+ operations++) {
+ if (operations == SHA_FIRST_OP) {
+ val = SE0_SHA_CONFIG_HW_INIT_HASH;
+ } else {
+ /* Load intermediate SHA digest result to
+ * SHA:HASH_RESULT(0..7) to continue the SHA
+ * calculation and tell the SHA engine to use it.
+ */
+ for (i = 0U; (i / BYTES_IN_WORD) <=
+ SHA256_MAX_HASH_RESULT; i += BYTES_IN_WORD) {
+ val = tegra_se_read_32(SE0_SHA_HASH_RESULT_0 +
+ i);
+ tegra_se_write_32(SE0_SHA_HASH_RESULT_0 + i,
+ val);
+ }
+ val = SE0_SHA_CONFIG_HW_INIT_HASH_DISABLE;
+ if (len_bits_lsb <= (max_bytes * 8U)) {
+ len_bits_lsb = (remaining_bytes * 8U);
+ } else {
+ len_bits_lsb -= (max_bytes * 8U);
+ }
+ tegra_se_write_32(SE0_SHA_MSG_LEFT_0, len_bits_lsb);
+ }
+ tegra_se_write_32(SE0_SHA_TASK_CONFIG, val);
+
+ max_bytes = (SHA256_HASH_SIZE_BYTES *
+ SHA256_MSG_LENGTH_ONETIME);
+ if (bytes_left < max_bytes) {
+ max_bytes = bytes_left;
+ last_buf = 1U;
+ } else {
+ bytes_left = bytes_left - max_bytes;
+ last_buf = 0U;
+ }
+ /* start operation */
+ ret = tegra_se_start_normal_operation(src_addr, max_bytes,
+ last_buf, src_len_inbyte);
+ if (ret != 0) {
+ ERROR("Error during SE operation! 0x%x", ret);
+ return -EINVAL;
+ }
+ }
+
+ return ret;
+}
+
+static int32_t tegra_se_save_sha256_pmc_scratch(void)
+{
+ uint32_t val = 0U, hash_offset = 0U, scratch_offset = 0U;
+ int32_t ret;
+
+ /* Check SE0 operation status */
+ ret = tegra_se_sha256_hash_operation_complete();
+ if (ret != 0) {
+ ERROR("SE operation complete Failed! 0x%x", ret);
+ return ret;
+ }
+
+ for (scratch_offset = SECURE_SCRATCH_TZDRAM_SHA256_HASH_START;
+ scratch_offset <= SECURE_SCRATCH_TZDRAM_SHA256_HASH_END;
+ scratch_offset += BYTES_IN_WORD) {
+ val = tegra_se_read_32(SE0_SHA_HASH_RESULT_0 + hash_offset);
+ mmio_write_32((uint32_t)(TEGRA_SCRATCH_BASE + scratch_offset),
+ val);
+ hash_offset += BYTES_IN_WORD;
+ }
+ return 0;
+}
+
+/*
+ * Handler to generate SHA256 and save HASH-result to pmc-scratch register
+ */
+int32_t tegra_se_calculate_save_sha256(uint64_t src_addr,
+ uint32_t src_len_inbyte)
+{
+ uint32_t security;
+ int32_t val = 0;
+
+ /* Set SE_SOFT_SETTINGS=SE_SECURE to prevent NS process to change SE
+ * registers.
+ */
+ security = tegra_se_read_32(SE0_SECURITY);
+ tegra_se_write_32(SE0_SECURITY, security | SE0_SECURITY_SE_SOFT_SETTING);
+
+ /* Bootrom enable IN_ID bit in SE0_SHA_GSCID_0 register during SC7-exit, causing
+ * SE0 ignores SE0 operation, and therefore failure of 2nd iteration of SC7 cycle.
+ */
+ tegra_se_write_32(SE0_SHA_GSCID_0, 0x0U);
+
+ /* Calculate SHA256 of BL31 */
+ val = tegra_se_calculate_sha256_hash(src_addr, src_len_inbyte);
+ if (val != 0) {
+ ERROR("%s: SHA256 generation failed\n", __func__);
+ return val;
+ }
+
+ /*
+ * Reset SE_SECURE to previous value.
+ */
+ tegra_se_write_32(SE0_SECURITY, security);
+
+ /* copy sha256_dst to PMC Scratch register */
+ val = tegra_se_save_sha256_pmc_scratch();
+ if (val != 0) {
+ ERROR("%s: SE0 status Error.\n", __func__);
+ }
+
+ return val;
+}
+
+/*
+ * Handler to power down the SE hardware blocks - SE, RNG1 and PKA1. This
+ * needs to be called only during System Suspend.
+ */
+int32_t tegra_se_suspend(void)
+{
+ int32_t ret = 0;
+
+ /* initialise communication channel with BPMP */
+ assert(tegra_bpmp_ipc_init() == 0);
+
+ /* Enable SE clock before SE context save */
+ ret = tegra_bpmp_ipc_enable_clock(TEGRA194_CLK_SE);
+ assert(ret == 0);
+
+ /* save SE registers */
+ se_regs[0] = mmio_read_32(TEGRA_SE0_BASE + SE0_MUTEX_WATCHDOG_NS_LIMIT);
+ se_regs[1] = mmio_read_32(TEGRA_SE0_BASE + SE0_AES0_ENTROPY_SRC_AGE_CTRL);
+ se_regs[2] = mmio_read_32(TEGRA_RNG1_BASE + RNG1_MUTEX_WATCHDOG_NS_LIMIT);
+ se_regs[3] = mmio_read_32(TEGRA_PKA1_BASE + PKA1_MUTEX_WATCHDOG_NS_LIMIT);
+
+ /* Save SE context. The BootROM restores it during System Resume */
+ ret = tegra_se_save_context();
+ if (ret != 0) {
+ ERROR("%s: context save failed (%d)\n", __func__, ret);
+ }
+
+ /* Disable SE clock after SE context save */
+ ret = tegra_bpmp_ipc_disable_clock(TEGRA194_CLK_SE);
+ assert(ret == 0);
+
+ return ret;
+}
+
+/*
+ * Handler to power up the SE hardware block(s) during System Resume.
+ */
+void tegra_se_resume(void)
+{
+ int32_t ret = 0;
+
+ /* initialise communication channel with BPMP */
+ assert(tegra_bpmp_ipc_init() == 0);
+
+ /* Enable SE clock before SE context restore */
+ ret = tegra_bpmp_ipc_enable_clock(TEGRA194_CLK_SE);
+ assert(ret == 0);
+
+ /*
+ * When TZ takes over after System Resume, TZ should first reconfigure
+ * SE_MUTEX_WATCHDOG_NS_LIMIT, PKA1_MUTEX_WATCHDOG_NS_LIMIT,
+ * RNG1_MUTEX_WATCHDOG_NS_LIMIT and SE_ENTROPY_SRC_AGE_CTRL before
+ * other operations.
+ */
+ mmio_write_32(TEGRA_SE0_BASE + SE0_MUTEX_WATCHDOG_NS_LIMIT, se_regs[0]);
+ mmio_write_32(TEGRA_SE0_BASE + SE0_AES0_ENTROPY_SRC_AGE_CTRL, se_regs[1]);
+ mmio_write_32(TEGRA_RNG1_BASE + RNG1_MUTEX_WATCHDOG_NS_LIMIT, se_regs[2]);
+ mmio_write_32(TEGRA_PKA1_BASE + PKA1_MUTEX_WATCHDOG_NS_LIMIT, se_regs[3]);
+
+ /* Disable SE clock after SE context restore */
+ ret = tegra_bpmp_ipc_disable_clock(TEGRA194_CLK_SE);
+ assert(ret == 0);
+}
diff --git a/plat/nvidia/tegra/soc/t194/drivers/se/se_private.h b/plat/nvidia/tegra/soc/t194/drivers/se/se_private.h
new file mode 100644
index 0000000..fc118aa
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t194/drivers/se/se_private.h
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SE_PRIVATE_H
+#define SE_PRIVATE_H
+
+#include <lib/utils_def.h>
+#include <tegra_def.h>
+
+/* SE0 security register */
+#define SE0_SECURITY U(0x18)
+#define SE0_SECURITY_SE_SOFT_SETTING (((uint32_t)1) << 16U)
+
+/* SE0 SHA GSCID register */
+#define SE0_SHA_GSCID_0 U(0x100)
+
+/* SE0 config register */
+#define SE0_SHA_CONFIG U(0x104)
+#define SE0_SHA_TASK_CONFIG U(0x108)
+#define SE0_SHA_CONFIG_HW_INIT_HASH (((uint32_t)1) << 0U)
+#define SE0_SHA_CONFIG_HW_INIT_HASH_DISABLE U(0)
+
+#define SE0_CONFIG_ENC_ALG_SHIFT U(12)
+#define SE0_CONFIG_ENC_ALG_SHA \
+ (((uint32_t)3) << SE0_CONFIG_ENC_ALG_SHIFT)
+#define SE0_CONFIG_DEC_ALG_SHIFT U(8)
+#define SE0_CONFIG_DEC_ALG_NOP \
+ (((uint32_t)0) << SE0_CONFIG_DEC_ALG_SHIFT)
+#define SE0_CONFIG_DST_SHIFT U(2)
+#define SE0_CONFIG_DST_HASHREG \
+ (((uint32_t)1) << SE0_CONFIG_DST_SHIFT)
+#define SHA256_HASH_SIZE_BYTES U(256)
+
+#define SE0_CONFIG_ENC_MODE_SHIFT U(24)
+#define SE0_CONFIG_ENC_MODE_SHA256 \
+ (((uint32_t)5) << SE0_CONFIG_ENC_MODE_SHIFT)
+
+/* SHA input message length */
+#define SE0_IN_ADDR U(0x10c)
+#define SE0_IN_HI_ADDR_HI U(0x110)
+#define SE0_IN_HI_ADDR_HI_0_MSB_SHIFT U(24)
+
+/* SHA input message length */
+#define SE0_SHA_MSG_LENGTH_0 U(0x11c)
+#define SE0_SHA_MSG_LENGTH_1 U(0x120)
+#define SE0_SHA_MSG_LENGTH_2 U(0x124)
+#define SE0_SHA_MSG_LENGTH_3 U(0x128)
+
+/* SHA input message left */
+#define SE0_SHA_MSG_LEFT_0 U(0x12c)
+#define SE0_SHA_MSG_LEFT_1 U(0x130)
+#define SE0_SHA_MSG_LEFT_2 U(0x134)
+#define SE0_SHA_MSG_LEFT_3 U(0x138)
+
+/* SE HASH-RESULT */
+#define SE0_SHA_HASH_RESULT_0 U(0x13c)
+
+/* SE OPERATION */
+#define SE0_OPERATION_REG_OFFSET U(0x17c)
+#define SE0_UNIT_OPERATION_PKT_LASTBUF_SHIFT U(16)
+#define SE0_UNIT_OPERATION_PKT_LASTBUF_FIELD \
+ ((uint32_t)0x1 << SE0_UNIT_OPERATION_PKT_LASTBUF_SHIFT)
+#define SE0_OPERATION_SHIFT U(0)
+#define SE0_OP_START \
+ (((uint32_t)0x1) << SE0_OPERATION_SHIFT)
+
+/* SE Interrupt */
+#define SE0_SHA_INT_ENABLE U(0x180)
+
+#define SE0_INT_STATUS_REG_OFFSET U(0x184)
+#define SE0_INT_OP_DONE_SHIFT U(4)
+#define SE0_INT_OP_DONE_CLEAR \
+ (((uint32_t)0U) << SE0_INT_OP_DONE_SHIFT)
+#define SE0_INT_OP_DONE(x) \
+ ((x) & (((uint32_t)0x1U) << SE0_INT_OP_DONE_SHIFT))
+
+/* SE SHA Status */
+#define SE0_SHA_STATUS_0 U(0x188)
+#define SE0_SHA_STATUS_IDLE U(0)
+
+/* SE error status */
+#define SE0_ERR_STATUS_REG_OFFSET U(0x18c)
+#define SE0_ERR_STATUS_CLEAR U(0)
+
+/* SE error status */
+#define SECURE_SCRATCH_TZDRAM_SHA256_HASH_START SECURE_SCRATCH_RSV68_LO
+#define SECURE_SCRATCH_TZDRAM_SHA256_HASH_END SECURE_SCRATCH_RSV71_HI
+
+/* SE0_INT_ENABLE_0 */
+#define SE0_INT_ENABLE U(0x88)
+#define SE0_DISABLE_ALL_INT U(0x0)
+
+/* SE0_INT_STATUS_0 */
+#define SE0_INT_STATUS U(0x8C)
+#define SE0_CLEAR_ALL_INT_STATUS U(0x3F)
+
+/* SE0_SHA_INT_STATUS_0 */
+#define SHA_INT_STATUS U(0x184)
+#define SHA_SE_OP_DONE (U(1) << 4)
+
+/* SE0_SHA_ERR_STATUS_0 */
+#define SHA_ERR_STATUS U(0x18C)
+
+/* SE0_AES0_INT_STATUS_0 */
+#define AES0_INT_STATUS U(0x2F0)
+#define AES0_SE_OP_DONE (U(1) << 4)
+
+/* SE0_AES0_ERR_STATUS_0 */
+#define AES0_ERR_STATUS U(0x2F8)
+
+/* SE0_AES1_INT_STATUS_0 */
+#define AES1_INT_STATUS U(0x4F0)
+
+/* SE0_AES1_ERR_STATUS_0 */
+#define AES1_ERR_STATUS U(0x4F8)
+
+/* SE0_RSA_INT_STATUS_0 */
+#define RSA_INT_STATUS U(0x758)
+
+/* SE0_RSA_ERR_STATUS_0 */
+#define RSA_ERR_STATUS U(0x760)
+
+/* SE0_AES0_OPERATION_0 */
+#define AES0_OPERATION U(0x238)
+#define OP_MASK_BITS U(0x7)
+#define SE_OP_CTX_SAVE U(0x3)
+
+/* SE0_AES0_CTX_SAVE_CONFIG_0 */
+#define CTX_SAVE_CONFIG U(0x2D4)
+
+/* SE0_AES0_CTX_SAVE_AUTO_STATUS_0 */
+#define CTX_SAVE_AUTO_STATUS U(0x300)
+#define CTX_SAVE_AUTO_SE_READY U(0xFF)
+#define CTX_SAVE_AUTO_SE_BUSY (U(0x1) << 31)
+
+/* SE0_AES0_CTX_SAVE_AUTO_CTRL_0 */
+#define CTX_SAVE_AUTO_CTRL U(0x304)
+#define SE_CTX_SAVE_AUTO_EN (U(0x1) << 0)
+#define SE_CTX_SAVE_AUTO_LOCK_EN (U(0x1) << 1)
+
+/* SE0_AES0_CTX_SAVE_AUTO_START_ADDR_0 */
+#define CTX_SAVE_AUTO_START_ADDR U(0x308)
+
+/* SE0_AES0_CTX_SAVE_AUTO_START_ADDR_HI_0 */
+#define CTX_SAVE_AUTO_START_ADDR_HI U(0x30C)
+
+/*******************************************************************************
+ * Inline functions definition
+ ******************************************************************************/
+
+static inline uint32_t tegra_se_read_32(uint32_t offset)
+{
+ return mmio_read_32((uint32_t)(TEGRA_SE0_BASE + offset));
+}
+
+static inline void tegra_se_write_32(uint32_t offset, uint32_t val)
+{
+ mmio_write_32((uint32_t)(TEGRA_SE0_BASE + offset), val);
+}
+
+#endif /* SE_PRIVATE_H */
diff --git a/plat/nvidia/tegra/soc/t194/plat_memctrl.c b/plat/nvidia/tegra/soc/t194/plat_memctrl.c
new file mode 100644
index 0000000..9ddcacf
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t194/plat_memctrl.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <common/bl_common.h>
+#include <mce.h>
+#include <memctrl_v2.h>
+#include <tegra_platform.h>
+#include <tegra_private.h>
+
+/*******************************************************************************
+ * Array to hold MC context for Tegra194
+ ******************************************************************************/
+static __attribute__((aligned(16))) mc_regs_t tegra194_mc_context[] = {
+ _START_OF_TABLE_,
+ mc_smmu_bypass_cfg, /* TBU settings */
+ _END_OF_TABLE_,
+};
+
+/*******************************************************************************
+ * Handler to return the pointer to the MC's context struct
+ ******************************************************************************/
+mc_regs_t *plat_memctrl_get_sys_suspend_ctx(void)
+{
+ /* index of _END_OF_TABLE_ */
+ tegra194_mc_context[0].val = (uint32_t)ARRAY_SIZE(tegra194_mc_context) - 1U;
+
+ return tegra194_mc_context;
+}
+
+/*******************************************************************************
+ * Handler to restore platform specific settings to the memory controller
+ ******************************************************************************/
+void plat_memctrl_restore(void)
+{
+ UNUSED_FUNC_NOP(); /* do nothing */
+}
+
+/*******************************************************************************
+ * Handler to program platform specific settings to the memory controller
+ ******************************************************************************/
+void plat_memctrl_setup(void)
+{
+ UNUSED_FUNC_NOP(); /* do nothing */
+}
+
+/*******************************************************************************
+ * Handler to program the scratch registers with TZDRAM settings for the
+ * resume firmware
+ ******************************************************************************/
+void plat_memctrl_tzdram_setup(uint64_t phys_base, uint64_t size_in_bytes)
+{
+ uint32_t sec_reg_ctrl = tegra_mc_read_32(MC_SECURITY_CFG_REG_CTRL_0);
+ uint32_t phys_base_lo = (uint32_t)phys_base & 0xFFF00000;
+ uint32_t phys_base_hi = (uint32_t)(phys_base >> 32);
+
+ /*
+ * Check TZDRAM carveout register access status. Setup TZDRAM fence
+ * only if access is enabled.
+ */
+ if ((sec_reg_ctrl & SECURITY_CFG_WRITE_ACCESS_BIT) ==
+ SECURITY_CFG_WRITE_ACCESS_ENABLE) {
+
+ /*
+ * Setup the Memory controller to allow only secure accesses to
+ * the TZDRAM carveout
+ */
+ INFO("Configuring TrustZone DRAM Memory Carveout\n");
+
+ tegra_mc_write_32(MC_SECURITY_CFG0_0, phys_base_lo);
+ tegra_mc_write_32(MC_SECURITY_CFG3_0, phys_base_hi);
+ tegra_mc_write_32(MC_SECURITY_CFG1_0, (uint32_t)(size_in_bytes >> 20));
+
+ /*
+ * MCE propagates the security configuration values across the
+ * CCPLEX.
+ */
+ (void)mce_update_gsc_tzdram();
+ }
+}
diff --git a/plat/nvidia/tegra/soc/t194/plat_psci_handlers.c b/plat/nvidia/tegra/soc/t194/plat_psci_handlers.c
new file mode 100644
index 0000000..83d815a
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t194/plat_psci_handlers.c
@@ -0,0 +1,515 @@
+/*
+ * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <assert.h>
+#include <stdbool.h>
+#include <string.h>
+
+#include <arch_helpers.h>
+#include <bpmp_ipc.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <context.h>
+#include <drivers/delay_timer.h>
+#include <denver.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/psci/psci.h>
+#include <mce.h>
+#include <mce_private.h>
+#include <memctrl_v2.h>
+#include <plat/common/platform.h>
+#include <se.h>
+#include <smmu.h>
+#include <t194_nvg.h>
+#include <tegra194_private.h>
+#include <tegra_platform.h>
+#include <tegra_private.h>
+
+extern uint32_t __tegra194_cpu_reset_handler_data,
+ __tegra194_cpu_reset_handler_end;
+
+/* TZDRAM offset for saving SMMU context */
+#define TEGRA194_SMMU_CTX_OFFSET 16U
+
+/* state id mask */
+#define TEGRA194_STATE_ID_MASK 0xFU
+/* constants to get power state's wake time */
+#define TEGRA194_WAKE_TIME_MASK 0x0FFFFFF0U
+#define TEGRA194_WAKE_TIME_SHIFT 4U
+/* default core wake mask for CPU_SUSPEND */
+#define TEGRA194_CORE_WAKE_MASK 0x180cU
+
+static struct t19x_psci_percpu_data {
+ uint32_t wake_time;
+} __aligned(CACHE_WRITEBACK_GRANULE) t19x_percpu_data[PLATFORM_CORE_COUNT];
+
+int32_t tegra_soc_validate_power_state(uint32_t power_state,
+ psci_power_state_t *req_state)
+{
+ uint8_t state_id = (uint8_t)psci_get_pstate_id(power_state) &
+ TEGRA194_STATE_ID_MASK;
+ uint32_t cpu = plat_my_core_pos();
+ int32_t ret = PSCI_E_SUCCESS;
+
+ /* save the core wake time (in TSC ticks)*/
+ t19x_percpu_data[cpu].wake_time = (power_state & TEGRA194_WAKE_TIME_MASK)
+ << TEGRA194_WAKE_TIME_SHIFT;
+
+ /*
+ * Clean t19x_percpu_data[cpu] to DRAM. This needs to be done to ensure
+ * that the correct value is read in tegra_soc_pwr_domain_suspend(),
+ * which is called with caches disabled. It is possible to read a stale
+ * value from DRAM in that function, because the L2 cache is not flushed
+ * unless the cluster is entering CC6/CC7.
+ */
+ clean_dcache_range((uint64_t)&t19x_percpu_data[cpu],
+ sizeof(t19x_percpu_data[cpu]));
+
+ /* Sanity check the requested state id */
+ switch (state_id) {
+ case PSTATE_ID_CORE_IDLE:
+
+ if (psci_get_pstate_type(power_state) != PSTATE_TYPE_STANDBY) {
+ ret = PSCI_E_INVALID_PARAMS;
+ break;
+ }
+
+ /* Core idle request */
+ req_state->pwr_domain_state[MPIDR_AFFLVL0] = PLAT_MAX_RET_STATE;
+ req_state->pwr_domain_state[MPIDR_AFFLVL1] = PSCI_LOCAL_STATE_RUN;
+ break;
+
+ default:
+ ERROR("%s: unsupported state id (%d)\n", __func__, state_id);
+ ret = PSCI_E_INVALID_PARAMS;
+ break;
+ }
+
+ return ret;
+}
+
+int32_t tegra_soc_cpu_standby(plat_local_state_t cpu_state)
+{
+ uint32_t cpu = plat_my_core_pos();
+ mce_cstate_info_t cstate_info = { 0 };
+
+ /* Program default wake mask */
+ cstate_info.wake_mask = TEGRA194_CORE_WAKE_MASK;
+ cstate_info.update_wake_mask = 1;
+ mce_update_cstate_info(&cstate_info);
+
+ /* Enter CPU idle */
+ (void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
+ (uint64_t)TEGRA_NVG_CORE_C6,
+ t19x_percpu_data[cpu].wake_time,
+ 0U);
+
+ return PSCI_E_SUCCESS;
+}
+
+int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
+{
+ const plat_local_state_t *pwr_domain_state;
+ uint8_t stateid_afflvl2;
+ plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
+ uint64_t mc_ctx_base;
+ uint32_t val;
+ mce_cstate_info_t sc7_cstate_info = {
+ .cluster = (uint32_t)TEGRA_NVG_CLUSTER_CC6,
+ .ccplex = (uint32_t)TEGRA_NVG_CG_CG7,
+ .system = (uint32_t)TEGRA_NVG_SYSTEM_SC7,
+ .system_state_force = 1U,
+ .update_wake_mask = 1U,
+ };
+ int32_t ret = 0;
+
+ /* get the state ID */
+ pwr_domain_state = target_state->pwr_domain_state;
+ stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
+ TEGRA194_STATE_ID_MASK;
+
+ if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
+
+ /* save 'Secure Boot' Processor Feature Config Register */
+ val = mmio_read_32(TEGRA_MISC_BASE + MISCREG_PFCFG);
+ mmio_write_32(TEGRA_SCRATCH_BASE + SCRATCH_SECURE_BOOTP_FCFG, val);
+
+ /* save MC context */
+ mc_ctx_base = params_from_bl2->tzdram_base +
+ tegra194_get_mc_ctx_offset();
+ tegra_mc_save_context((uintptr_t)mc_ctx_base);
+
+ /*
+ * Suspend SE, RNG1 and PKA1 only on silcon and fpga,
+ * since VDK does not support atomic se ctx save
+ */
+ if (tegra_platform_is_silicon() || tegra_platform_is_fpga()) {
+ ret = tegra_se_suspend();
+ assert(ret == 0);
+ }
+
+ /* Prepare for system suspend */
+ mce_update_cstate_info(&sc7_cstate_info);
+
+ do {
+ val = (uint32_t)mce_command_handler(
+ (uint32_t)MCE_CMD_IS_SC7_ALLOWED,
+ (uint32_t)TEGRA_NVG_CORE_C7,
+ MCE_CORE_SLEEP_TIME_INFINITE,
+ 0U);
+ } while (val == 0U);
+
+ /* Instruct the MCE to enter system suspend state */
+ ret = mce_command_handler(
+ (uint64_t)MCE_CMD_ENTER_CSTATE,
+ (uint64_t)TEGRA_NVG_CORE_C7,
+ MCE_CORE_SLEEP_TIME_INFINITE,
+ 0U);
+ assert(ret == 0);
+
+ /* set system suspend state for house-keeping */
+ tegra194_set_system_suspend_entry();
+ }
+
+ return PSCI_E_SUCCESS;
+}
+
+/*******************************************************************************
+ * Helper function to check if this is the last ON CPU in the cluster
+ ******************************************************************************/
+static bool tegra_last_on_cpu_in_cluster(const plat_local_state_t *states,
+ uint32_t ncpu)
+{
+ plat_local_state_t target;
+ bool last_on_cpu = true;
+ uint32_t num_cpus = ncpu, pos = 0;
+
+ do {
+ target = states[pos];
+ if (target != PLAT_MAX_OFF_STATE) {
+ last_on_cpu = false;
+ }
+ --num_cpus;
+ pos++;
+ } while (num_cpus != 0U);
+
+ return last_on_cpu;
+}
+
+/*******************************************************************************
+ * Helper function to get target power state for the cluster
+ ******************************************************************************/
+static plat_local_state_t tegra_get_afflvl1_pwr_state(const plat_local_state_t *states,
+ uint32_t ncpu)
+{
+ uint32_t core_pos = (uint32_t)read_mpidr() & (uint32_t)MPIDR_CPU_MASK;
+ plat_local_state_t target = states[core_pos];
+ mce_cstate_info_t cstate_info = { 0 };
+
+ /* CPU off */
+ if (target == PLAT_MAX_OFF_STATE) {
+
+ /* Enable cluster powerdn from last CPU in the cluster */
+ if (tegra_last_on_cpu_in_cluster(states, ncpu)) {
+
+ /* Enable CC6 state and turn off wake mask */
+ cstate_info.cluster = (uint32_t)TEGRA_NVG_CLUSTER_CC6;
+ cstate_info.ccplex = (uint32_t)TEGRA_NVG_CG_CG7;
+ cstate_info.system_state_force = 1;
+ cstate_info.update_wake_mask = 1U;
+ mce_update_cstate_info(&cstate_info);
+
+ } else {
+
+ /* Turn off wake_mask */
+ cstate_info.update_wake_mask = 1U;
+ mce_update_cstate_info(&cstate_info);
+ target = PSCI_LOCAL_STATE_RUN;
+ }
+ }
+
+ return target;
+}
+
+/*******************************************************************************
+ * Platform handler to calculate the proper target power level at the
+ * specified affinity level
+ ******************************************************************************/
+plat_local_state_t tegra_soc_get_target_pwr_state(uint32_t lvl,
+ const plat_local_state_t *states,
+ uint32_t ncpu)
+{
+ plat_local_state_t target = PSCI_LOCAL_STATE_RUN;
+ uint32_t cpu = plat_my_core_pos();
+
+ /* System Suspend */
+ if ((lvl == (uint32_t)MPIDR_AFFLVL2) && (states[cpu] == PSTATE_ID_SOC_POWERDN)) {
+ target = PSTATE_ID_SOC_POWERDN;
+ }
+
+ /* CPU off, CPU suspend */
+ if (lvl == (uint32_t)MPIDR_AFFLVL1) {
+ target = tegra_get_afflvl1_pwr_state(states, ncpu);
+ }
+
+ /* target cluster/system state */
+ return target;
+}
+
+int32_t tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state)
+{
+ const plat_local_state_t *pwr_domain_state =
+ target_state->pwr_domain_state;
+ plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
+ uint8_t stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
+ TEGRA194_STATE_ID_MASK;
+ uint64_t src_len_in_bytes = (uintptr_t)&__BL31_END__ - (uintptr_t)BL31_BASE;
+ uint64_t val;
+ int32_t ret = PSCI_E_SUCCESS;
+
+ if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
+ val = params_from_bl2->tzdram_base +
+ tegra194_get_cpu_reset_handler_size();
+
+ /* initialise communication channel with BPMP */
+ ret = tegra_bpmp_ipc_init();
+ assert(ret == 0);
+
+ /* Enable SE clock before SE context save */
+ ret = tegra_bpmp_ipc_enable_clock(TEGRA194_CLK_SE);
+ assert(ret == 0);
+
+ /*
+ * It is very unlikely that the BL31 image would be
+ * bigger than 2^32 bytes
+ */
+ assert(src_len_in_bytes < UINT32_MAX);
+
+ if (tegra_se_calculate_save_sha256(BL31_BASE,
+ (uint32_t)src_len_in_bytes) != 0) {
+ ERROR("Hash calculation failed. Reboot\n");
+ (void)tegra_soc_prepare_system_reset();
+ }
+
+ /*
+ * The TZRAM loses power when we enter system suspend. To
+ * allow graceful exit from system suspend, we need to copy
+ * BL3-1 over to TZDRAM.
+ */
+ val = params_from_bl2->tzdram_base +
+ tegra194_get_cpu_reset_handler_size();
+ memcpy((void *)(uintptr_t)val, (void *)(uintptr_t)BL31_BASE,
+ src_len_in_bytes);
+
+ /* Disable SE clock after SE context save */
+ ret = tegra_bpmp_ipc_disable_clock(TEGRA194_CLK_SE);
+ assert(ret == 0);
+ }
+
+ return ret;
+}
+
+int32_t tegra_soc_pwr_domain_suspend_pwrdown_early(const psci_power_state_t *target_state)
+{
+ return PSCI_E_NOT_SUPPORTED;
+}
+
+int32_t tegra_soc_pwr_domain_on(u_register_t mpidr)
+{
+ uint64_t target_cpu = mpidr & MPIDR_CPU_MASK;
+ uint64_t target_cluster = (mpidr & MPIDR_CLUSTER_MASK) >>
+ MPIDR_AFFINITY_BITS;
+ int32_t ret = 0;
+
+ if (target_cluster > ((uint32_t)PLATFORM_CLUSTER_COUNT - 1U)) {
+ ERROR("%s: unsupported CPU (0x%lx)\n", __func__ , mpidr);
+ return PSCI_E_NOT_PRESENT;
+ }
+
+ /* construct the target CPU # */
+ target_cpu += (target_cluster << 1U);
+
+ ret = mce_command_handler((uint64_t)MCE_CMD_ONLINE_CORE, target_cpu, 0U, 0U);
+ if (ret < 0) {
+ return PSCI_E_DENIED;
+ }
+
+ return PSCI_E_SUCCESS;
+}
+
+int32_t tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
+{
+ const plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
+ uint8_t enable_ccplex_lock_step = params_from_bl2->enable_ccplex_lock_step;
+ uint8_t stateid_afflvl2 = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL];
+ cpu_context_t *ctx = cm_get_context(NON_SECURE);
+ uint64_t actlr_elx;
+
+ /*
+ * Reset power state info for CPUs when onlining, we set
+ * deepest power when offlining a core but that may not be
+ * requested by non-secure sw which controls idle states. It
+ * will re-init this info from non-secure software when the
+ * core come online.
+ */
+ actlr_elx = read_ctx_reg((get_el1_sysregs_ctx(ctx)), (CTX_ACTLR_EL1));
+ actlr_elx &= ~DENVER_CPU_PMSTATE_MASK;
+ actlr_elx |= DENVER_CPU_PMSTATE_C1;
+ write_ctx_reg((get_el1_sysregs_ctx(ctx)), (CTX_ACTLR_EL1), (actlr_elx));
+
+ /*
+ * Check if we are exiting from deep sleep and restore SE
+ * context if we are.
+ */
+ if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
+
+#if ENABLE_STRICT_CHECKING_MODE
+ /*
+ * Enable strict checking after programming the GSC for
+ * enabling TZSRAM and TZDRAM
+ */
+ mce_enable_strict_checking();
+#endif
+
+ /* Init SMMU */
+ tegra_smmu_init();
+
+ /* Resume SE, RNG1 and PKA1 */
+ tegra_se_resume();
+
+ /*
+ * Program XUSB STREAMIDs
+ * ======================
+ * T19x XUSB has support for XUSB virtualization. It will
+ * have one physical function (PF) and four Virtual functions
+ * (VF)
+ *
+ * There were below two SIDs for XUSB until T186.
+ * 1) #define TEGRA_SID_XUSB_HOST 0x1bU
+ * 2) #define TEGRA_SID_XUSB_DEV 0x1cU
+ *
+ * We have below four new SIDs added for VF(s)
+ * 3) #define TEGRA_SID_XUSB_VF0 0x5dU
+ * 4) #define TEGRA_SID_XUSB_VF1 0x5eU
+ * 5) #define TEGRA_SID_XUSB_VF2 0x5fU
+ * 6) #define TEGRA_SID_XUSB_VF3 0x60U
+ *
+ * When virtualization is enabled then we have to disable SID
+ * override and program above SIDs in below newly added SID
+ * registers in XUSB PADCTL MMIO space. These registers are
+ * TZ protected and so need to be done in ATF.
+ *
+ * a) #define XUSB_PADCTL_HOST_AXI_STREAMID_PF_0 (0x136cU)
+ * b) #define XUSB_PADCTL_DEV_AXI_STREAMID_PF_0 (0x139cU)
+ * c) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_0 (0x1370U)
+ * d) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_1 (0x1374U)
+ * e) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_2 (0x1378U)
+ * f) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_3 (0x137cU)
+ *
+ * This change disables SID override and programs XUSB SIDs
+ * in above registers to support both virtualization and
+ * non-virtualization platforms
+ */
+ if (tegra_platform_is_silicon() || tegra_platform_is_fpga()) {
+
+ mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
+ XUSB_PADCTL_HOST_AXI_STREAMID_PF_0, TEGRA_SID_XUSB_HOST);
+ assert(mmio_read_32(TEGRA_XUSB_PADCTL_BASE +
+ XUSB_PADCTL_HOST_AXI_STREAMID_PF_0) == TEGRA_SID_XUSB_HOST);
+ mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
+ XUSB_PADCTL_HOST_AXI_STREAMID_VF_0, TEGRA_SID_XUSB_VF0);
+ assert(mmio_read_32(TEGRA_XUSB_PADCTL_BASE +
+ XUSB_PADCTL_HOST_AXI_STREAMID_VF_0) == TEGRA_SID_XUSB_VF0);
+ mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
+ XUSB_PADCTL_HOST_AXI_STREAMID_VF_1, TEGRA_SID_XUSB_VF1);
+ assert(mmio_read_32(TEGRA_XUSB_PADCTL_BASE +
+ XUSB_PADCTL_HOST_AXI_STREAMID_VF_1) == TEGRA_SID_XUSB_VF1);
+ mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
+ XUSB_PADCTL_HOST_AXI_STREAMID_VF_2, TEGRA_SID_XUSB_VF2);
+ assert(mmio_read_32(TEGRA_XUSB_PADCTL_BASE +
+ XUSB_PADCTL_HOST_AXI_STREAMID_VF_2) == TEGRA_SID_XUSB_VF2);
+ mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
+ XUSB_PADCTL_HOST_AXI_STREAMID_VF_3, TEGRA_SID_XUSB_VF3);
+ assert(mmio_read_32(TEGRA_XUSB_PADCTL_BASE +
+ XUSB_PADCTL_HOST_AXI_STREAMID_VF_3) == TEGRA_SID_XUSB_VF3);
+ mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
+ XUSB_PADCTL_DEV_AXI_STREAMID_PF_0, TEGRA_SID_XUSB_DEV);
+ assert(mmio_read_32(TEGRA_XUSB_PADCTL_BASE +
+ XUSB_PADCTL_DEV_AXI_STREAMID_PF_0) == TEGRA_SID_XUSB_DEV);
+ }
+ }
+
+ /*
+ * Enable dual execution optimized translations for all ELx.
+ */
+ if (enable_ccplex_lock_step != 0U) {
+ actlr_elx = read_actlr_el3();
+ actlr_elx |= DENVER_CPU_ENABLE_DUAL_EXEC_EL3;
+ write_actlr_el3(actlr_elx);
+
+ actlr_elx = read_actlr_el2();
+ actlr_elx |= DENVER_CPU_ENABLE_DUAL_EXEC_EL2;
+ write_actlr_el2(actlr_elx);
+
+ actlr_elx = read_actlr_el1();
+ actlr_elx |= DENVER_CPU_ENABLE_DUAL_EXEC_EL1;
+ write_actlr_el1(actlr_elx);
+ }
+
+ return PSCI_E_SUCCESS;
+}
+
+int32_t tegra_soc_pwr_domain_off_early(const psci_power_state_t *target_state)
+{
+ /* Do not power off the boot CPU */
+ if (plat_is_my_cpu_primary()) {
+ return PSCI_E_DENIED;
+ }
+
+ return PSCI_E_SUCCESS;
+}
+
+int32_t tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
+{
+ uint64_t impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
+ int32_t ret = 0;
+
+ (void)target_state;
+
+ /* Disable Denver's DCO operations */
+ if (impl == DENVER_IMPL) {
+ denver_disable_dco();
+ }
+
+ /* Turn off CPU */
+ ret = mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
+ (uint64_t)TEGRA_NVG_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0U);
+ assert(ret == 0);
+
+ return PSCI_E_SUCCESS;
+}
+
+__dead2 void tegra_soc_prepare_system_off(void)
+{
+ /* System power off */
+ mce_system_shutdown();
+
+ wfi();
+
+ /* wait for the system to power down */
+ for (;;) {
+ ;
+ }
+}
+
+int32_t tegra_soc_prepare_system_reset(void)
+{
+ /* System reboot */
+ mce_system_reboot();
+
+ return PSCI_E_SUCCESS;
+}
diff --git a/plat/nvidia/tegra/soc/t194/plat_ras.c b/plat/nvidia/tegra/soc/t194/plat_ras.c
new file mode 100644
index 0000000..841d70b
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t194/plat_ras.c
@@ -0,0 +1,492 @@
+/*
+ * Copyright (c) 2020-2021, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <common/debug.h>
+#include <lib/bakery_lock.h>
+#include <lib/cassert.h>
+#include <lib/extensions/ras.h>
+#include <lib/utils_def.h>
+#include <services/sdei.h>
+
+#include <plat/common/platform.h>
+#include <platform_def.h>
+#include <tegra194_ras_private.h>
+#include <tegra_def.h>
+#include <tegra_platform.h>
+#include <tegra_private.h>
+
+/*
+ * ERR<n>FR bits[63:32], it indicates supported RAS errors which can be enabled
+ * by setting corresponding bits in ERR<n>CTLR
+ */
+#define ERR_FR_EN_BITS_MASK 0xFFFFFFFF00000000ULL
+
+/*
+ * Number of RAS errors will be cleared per 'tegra194_ras_corrected_err_clear'
+ * function call.
+ */
+#define RAS_ERRORS_PER_CALL 8
+
+/*
+ * the max possible RAS node index value.
+ */
+#define RAS_NODE_INDEX_MAX 0x1FFFFFFFU
+
+/* bakery lock for platform RAS handler. */
+static DEFINE_BAKERY_LOCK(ras_handler_lock);
+#define ras_lock() bakery_lock_get(&ras_handler_lock)
+#define ras_unlock() bakery_lock_release(&ras_handler_lock)
+
+/*
+ * Function to handle an External Abort received at EL3.
+ * This function is invoked by RAS framework.
+ */
+static void tegra194_ea_handler(unsigned int ea_reason, uint64_t syndrome,
+ void *cookie, void *handle, uint64_t flags)
+{
+ int32_t ret;
+
+ ras_lock();
+
+ ERROR("MPIDR 0x%lx: exception reason=%u syndrome=0x%" PRIx64 "\n",
+ read_mpidr(), ea_reason, syndrome);
+
+ /* Call RAS EA handler */
+ ret = ras_ea_handler(ea_reason, syndrome, cookie, handle, flags);
+ if (ret != 0) {
+ ERROR("RAS error handled!\n");
+ ret = sdei_dispatch_event(TEGRA_SDEI_EP_EVENT_0 +
+ plat_my_core_pos());
+ if (ret != 0)
+ ERROR("sdei_dispatch_event returned %d\n", ret);
+ } else {
+ ERROR("Not a RAS error!\n");
+ }
+
+ ras_unlock();
+}
+
+/*
+ * Function to enable all supported RAS error report.
+ *
+ * Uncorrected errors are set to report as External abort (SError)
+ * Corrected errors are set to report as interrupt.
+ */
+void tegra194_ras_enable(void)
+{
+ VERBOSE("%s\n", __func__);
+
+ /* skip RAS enablement if not a silicon platform. */
+ if (!tegra_platform_is_silicon()) {
+ return;
+ }
+
+ /*
+ * Iterate for each group(num_idx ERRSELRs starting from idx_start)
+ * use normal for loop instead of for_each_err_record_info to get rid
+ * of MISRA noise..
+ */
+ for (uint32_t i = 0U; i < err_record_mappings.num_err_records; i++) {
+
+ const struct err_record_info *info = &err_record_mappings.err_records[i];
+
+ uint32_t idx_start = info->sysreg.idx_start;
+ uint32_t num_idx = info->sysreg.num_idx;
+ const struct ras_aux_data *aux_data = (const struct ras_aux_data *)info->aux_data;
+
+ assert(aux_data != NULL);
+
+ for (uint32_t j = 0; j < num_idx; j++) {
+
+ /* ERR<n>CTLR register value. */
+ uint64_t err_ctrl = 0ULL;
+ /* all supported errors for this node. */
+ uint64_t err_fr;
+ /* uncorrectable errors */
+ uint64_t uncorr_errs;
+ /* correctable errors */
+ uint64_t corr_errs;
+
+ /*
+ * Catch error if something wrong with the RAS aux data
+ * record table.
+ */
+ assert(aux_data[j].err_ctrl != NULL);
+
+ /*
+ * Write to ERRSELR_EL1 to select the RAS error node.
+ * Always program this at first to select corresponding
+ * RAS node before any other RAS register r/w.
+ */
+ ser_sys_select_record(idx_start + j);
+
+ err_fr = read_erxfr_el1() & ERR_FR_EN_BITS_MASK;
+ uncorr_errs = aux_data[j].err_ctrl();
+ corr_errs = ~uncorr_errs & err_fr;
+
+ /* enable error reporting */
+ ERR_CTLR_ENABLE_FIELD(err_ctrl, ED);
+
+ /* enable SError reporting for uncorrectable errors */
+ if ((uncorr_errs & err_fr) != 0ULL) {
+ ERR_CTLR_ENABLE_FIELD(err_ctrl, UE);
+ }
+
+ /* generate interrupt for corrected errors. */
+ if (corr_errs != 0ULL) {
+ ERR_CTLR_ENABLE_FIELD(err_ctrl, CFI);
+ }
+
+ /* enable the supported errors */
+ err_ctrl |= err_fr;
+
+ VERBOSE("errselr_el1:0x%x, erxfr:0x%" PRIx64 ", err_ctrl:0x%" PRIx64 "\n",
+ idx_start + j, err_fr, err_ctrl);
+
+ /* enable specified errors, or set to 0 if no supported error */
+ write_erxctlr_el1(err_ctrl);
+ }
+ }
+}
+
+/*
+ * Function to clear RAS ERR<n>STATUS for corrected RAS error.
+ *
+ * This function clears number of 'RAS_ERRORS_PER_CALL' RAS errors at most.
+ * 'cookie' - in/out cookie parameter to specify/store last visited RAS
+ * error record index. it is set to '0' to indicate no more RAS
+ * error record to clear.
+ */
+void tegra194_ras_corrected_err_clear(uint64_t *cookie)
+{
+ /*
+ * 'last_node' and 'last_idx' represent last visited RAS node index from
+ * previous function call. they are set to 0 when first smc call is made
+ * or all RAS error are visited by followed multipile smc calls.
+ */
+ union prev_record {
+ struct record {
+ uint32_t last_node;
+ uint32_t last_idx;
+ } rec;
+ uint64_t value;
+ } prev;
+
+ uint64_t clear_ce_status = 0ULL;
+ int32_t nerrs_per_call = RAS_ERRORS_PER_CALL;
+ uint32_t i;
+
+ if (cookie == NULL) {
+ return;
+ }
+
+ prev.value = *cookie;
+
+ if ((prev.rec.last_node >= RAS_NODE_INDEX_MAX) ||
+ (prev.rec.last_idx >= RAS_NODE_INDEX_MAX)) {
+ return;
+ }
+
+ ERR_STATUS_SET_FIELD(clear_ce_status, AV, 0x1UL);
+ ERR_STATUS_SET_FIELD(clear_ce_status, V, 0x1UL);
+ ERR_STATUS_SET_FIELD(clear_ce_status, OF, 0x1UL);
+ ERR_STATUS_SET_FIELD(clear_ce_status, MV, 0x1UL);
+ ERR_STATUS_SET_FIELD(clear_ce_status, CE, 0x3UL);
+
+
+ for (i = prev.rec.last_node; i < err_record_mappings.num_err_records; i++) {
+
+ const struct err_record_info *info = &err_record_mappings.err_records[i];
+ uint32_t idx_start = info->sysreg.idx_start;
+ uint32_t num_idx = info->sysreg.num_idx;
+
+ uint32_t j;
+
+ j = (i == prev.rec.last_node && prev.value != 0UL) ?
+ (prev.rec.last_idx + 1U) : 0U;
+
+ for (; j < num_idx; j++) {
+
+ uint64_t status;
+ uint32_t err_idx = idx_start + j;
+
+ if (err_idx >= RAS_NODE_INDEX_MAX) {
+ return;
+ }
+
+ write_errselr_el1(err_idx);
+ status = read_erxstatus_el1();
+
+ if (ERR_STATUS_GET_FIELD(status, CE) != 0U) {
+ write_erxstatus_el1(clear_ce_status);
+ }
+
+ --nerrs_per_call;
+
+ /* only clear 'nerrs_per_call' errors each time. */
+ if (nerrs_per_call <= 0) {
+ prev.rec.last_idx = j;
+ prev.rec.last_node = i;
+ /* save last visited error record index
+ * into cookie.
+ */
+ *cookie = prev.value;
+
+ return;
+ }
+ }
+ }
+
+ /*
+ * finish if all ras error records are checked or provided index is out
+ * of range.
+ */
+ *cookie = 0ULL;
+}
+
+/* Function to probe an error from error record group. */
+static int32_t tegra194_ras_record_probe(const struct err_record_info *info,
+ int *probe_data)
+{
+ /* Skip probing if not a silicon platform */
+ if (!tegra_platform_is_silicon()) {
+ return 0;
+ }
+
+ return ser_probe_sysreg(info->sysreg.idx_start, info->sysreg.num_idx, probe_data);
+}
+
+/* Function to handle error from one given node */
+static int32_t tegra194_ras_node_handler(uint32_t errselr, const char *name,
+ const struct ras_error *errors, uint64_t status)
+{
+ bool found = false;
+ uint32_t ierr = (uint32_t)ERR_STATUS_GET_FIELD(status, IERR);
+ uint32_t serr = (uint32_t)ERR_STATUS_GET_FIELD(status, SERR);
+ uint64_t val = 0;
+
+ /* not a valid error. */
+ if (ERR_STATUS_GET_FIELD(status, V) == 0U) {
+ return 0;
+ }
+
+ ERR_STATUS_SET_FIELD(val, V, 1);
+
+ /* keep the log print same as linux arm64_ras driver. */
+ ERROR("**************************************\n");
+ ERROR("RAS Error in %s, ERRSELR_EL1=0x%x:\n", name, errselr);
+ ERROR("\tStatus = 0x%" PRIx64 "\n", status);
+
+ /* Print uncorrectable error information. */
+ if (ERR_STATUS_GET_FIELD(status, UE) != 0U) {
+
+ ERR_STATUS_SET_FIELD(val, UE, 1);
+ ERR_STATUS_SET_FIELD(val, UET, 1);
+
+ /* IERR to error message */
+ for (uint32_t i = 0; errors[i].error_msg != NULL; i++) {
+ if (ierr == errors[i].error_code) {
+ ERROR("\tIERR = %s: 0x%x\n",
+ errors[i].error_msg, ierr);
+
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ ERROR("\tUnknown IERR: 0x%x\n", ierr);
+ }
+
+ ERROR("SERR = %s: 0x%x\n", ras_serr_to_str(serr), serr);
+
+ /* Overflow, multiple errors have been detected. */
+ if (ERR_STATUS_GET_FIELD(status, OF) != 0U) {
+ ERROR("\tOverflow (there may be more errors) - "
+ "Uncorrectable\n");
+ ERR_STATUS_SET_FIELD(val, OF, 1);
+ }
+
+ ERROR("\tUncorrectable (this is fatal)\n");
+
+ /* Miscellaneous Register Valid. */
+ if (ERR_STATUS_GET_FIELD(status, MV) != 0U) {
+ ERROR("\tMISC0 = 0x%lx\n", read_erxmisc0_el1());
+ ERROR("\tMISC1 = 0x%lx\n", read_erxmisc1_el1());
+ ERR_STATUS_SET_FIELD(val, MV, 1);
+ }
+
+ /* Address Valid. */
+ if (ERR_STATUS_GET_FIELD(status, AV) != 0U) {
+ ERROR("\tADDR = 0x%lx\n", read_erxaddr_el1());
+ ERR_STATUS_SET_FIELD(val, AV, 1);
+ }
+
+ /* Deferred error */
+ if (ERR_STATUS_GET_FIELD(status, DE) != 0U) {
+ ERROR("\tDeferred error\n");
+ ERR_STATUS_SET_FIELD(val, DE, 1);
+ }
+
+ } else {
+ /* For corrected error, simply clear it. */
+ VERBOSE("corrected RAS error is cleared: ERRSELR_EL1:0x%x, "
+ "IERR:0x%x, SERR:0x%x\n", errselr, ierr, serr);
+ ERR_STATUS_SET_FIELD(val, CE, 1);
+ }
+
+ ERROR("**************************************\n");
+
+ /* Write to clear reported errors. */
+ write_erxstatus_el1(val);
+
+ /* error handled */
+ return 0;
+}
+
+/* Function to handle one error node from an error record group. */
+static int32_t tegra194_ras_record_handler(const struct err_record_info *info,
+ int probe_data, const struct err_handler_data *const data __unused)
+{
+ uint32_t num_idx = info->sysreg.num_idx;
+ uint32_t idx_start = info->sysreg.idx_start;
+ const struct ras_aux_data *aux_data = info->aux_data;
+ const struct ras_error *errors;
+ uint32_t offset;
+ const char *node_name;
+
+ uint64_t status = 0ULL;
+
+ VERBOSE("%s\n", __func__);
+
+ assert(probe_data >= 0);
+ assert((uint32_t)probe_data < num_idx);
+
+ offset = (uint32_t)probe_data;
+ errors = aux_data[offset].error_records;
+ node_name = aux_data[offset].name;
+
+ assert(errors != NULL);
+
+ /* Write to ERRSELR_EL1 to select the error record */
+ ser_sys_select_record(idx_start + offset);
+
+ /* Retrieve status register from the error record */
+ status = read_erxstatus_el1();
+
+ return tegra194_ras_node_handler(idx_start + offset, node_name,
+ errors, status);
+}
+
+
+/* Instantiate RAS nodes */
+PER_CORE_RAS_NODE_LIST(DEFINE_ONE_RAS_NODE)
+PER_CLUSTER_RAS_NODE_LIST(DEFINE_ONE_RAS_NODE)
+SCF_L3_BANK_RAS_NODE_LIST(DEFINE_ONE_RAS_NODE)
+CCPLEX_RAS_NODE_LIST(DEFINE_ONE_RAS_NODE)
+
+/* Instantiate RAS node groups */
+static struct ras_aux_data per_core_ras_group[] = {
+ PER_CORE_RAS_GROUP_NODES
+};
+CASSERT(ARRAY_SIZE(per_core_ras_group) < RAS_NODE_INDEX_MAX,
+ assert_max_per_core_ras_group_size);
+
+static struct ras_aux_data per_cluster_ras_group[] = {
+ PER_CLUSTER_RAS_GROUP_NODES
+};
+CASSERT(ARRAY_SIZE(per_cluster_ras_group) < RAS_NODE_INDEX_MAX,
+ assert_max_per_cluster_ras_group_size);
+
+static struct ras_aux_data scf_l3_ras_group[] = {
+ SCF_L3_BANK_RAS_GROUP_NODES
+};
+CASSERT(ARRAY_SIZE(scf_l3_ras_group) < RAS_NODE_INDEX_MAX,
+ assert_max_scf_l3_ras_group_size);
+
+static struct ras_aux_data ccplex_ras_group[] = {
+ CCPLEX_RAS_GROUP_NODES
+};
+CASSERT(ARRAY_SIZE(ccplex_ras_group) < RAS_NODE_INDEX_MAX,
+ assert_max_ccplex_ras_group_size);
+
+/*
+ * We have same probe and handler for each error record group, use a macro to
+ * simply the record definition.
+ */
+#define ADD_ONE_ERR_GROUP(errselr_start, group) \
+ ERR_RECORD_SYSREG_V1((errselr_start), (uint32_t)ARRAY_SIZE((group)), \
+ &tegra194_ras_record_probe, \
+ &tegra194_ras_record_handler, (group))
+
+/* RAS error record group information */
+static struct err_record_info carmel_ras_records[] = {
+ /*
+ * Per core ras error records
+ * ERRSELR starts from 0*256 + Logical_CPU_ID*16 + 0 to
+ * 0*256 + Logical_CPU_ID*16 + 5 for each group.
+ * 8 cores/groups, 6 * 8 nodes in total.
+ */
+ ADD_ONE_ERR_GROUP(0x000, per_core_ras_group),
+ ADD_ONE_ERR_GROUP(0x010, per_core_ras_group),
+ ADD_ONE_ERR_GROUP(0x020, per_core_ras_group),
+ ADD_ONE_ERR_GROUP(0x030, per_core_ras_group),
+ ADD_ONE_ERR_GROUP(0x040, per_core_ras_group),
+ ADD_ONE_ERR_GROUP(0x050, per_core_ras_group),
+ ADD_ONE_ERR_GROUP(0x060, per_core_ras_group),
+ ADD_ONE_ERR_GROUP(0x070, per_core_ras_group),
+
+ /*
+ * Per cluster ras error records
+ * ERRSELR starts from 2*256 + Logical_Cluster_ID*16 + 0 to
+ * 2*256 + Logical_Cluster_ID*16 + 3.
+ * 4 clusters/groups, 3 * 4 nodes in total.
+ */
+ ADD_ONE_ERR_GROUP(0x200, per_cluster_ras_group),
+ ADD_ONE_ERR_GROUP(0x210, per_cluster_ras_group),
+ ADD_ONE_ERR_GROUP(0x220, per_cluster_ras_group),
+ ADD_ONE_ERR_GROUP(0x230, per_cluster_ras_group),
+
+ /*
+ * SCF L3_Bank ras error records
+ * ERRSELR: 3*256 + L3_Bank_ID, L3_Bank_ID: 0-3
+ * 1 groups, 4 nodes in total.
+ */
+ ADD_ONE_ERR_GROUP(0x300, scf_l3_ras_group),
+
+ /*
+ * CCPLEX ras error records
+ * ERRSELR: 4*256 + Unit_ID, Unit_ID: 0 - 4
+ * 1 groups, 5 nodes in total.
+ */
+ ADD_ONE_ERR_GROUP(0x400, ccplex_ras_group),
+};
+
+CASSERT(ARRAY_SIZE(carmel_ras_records) < RAS_NODE_INDEX_MAX,
+ assert_max_carmel_ras_records_size);
+
+REGISTER_ERR_RECORD_INFO(carmel_ras_records);
+
+/* dummy RAS interrupt */
+static struct ras_interrupt carmel_ras_interrupts[] = {};
+REGISTER_RAS_INTERRUPTS(carmel_ras_interrupts);
+
+/*******************************************************************************
+ * RAS handler for the platform
+ ******************************************************************************/
+void plat_ea_handler(unsigned int ea_reason, uint64_t syndrome, void *cookie,
+ void *handle, uint64_t flags)
+{
+#if ENABLE_FEAT_RAS
+ tegra194_ea_handler(ea_reason, syndrome, cookie, handle, flags);
+#else
+ plat_default_ea_handler(ea_reason, syndrome, cookie, handle, flags);
+#endif
+}
diff --git a/plat/nvidia/tegra/soc/t194/plat_secondary.c b/plat/nvidia/tegra/soc/t194/plat_secondary.c
new file mode 100644
index 0000000..1cb14ad
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t194/plat_secondary.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <string.h>
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <lib/mmio.h>
+
+#include <mce.h>
+#include <tegra194_private.h>
+#include <tegra_def.h>
+#include <tegra_private.h>
+
+extern uint64_t tegra_bl31_phys_base;
+
+#define MISCREG_AA64_RST_LOW 0x2004U
+#define MISCREG_AA64_RST_HIGH 0x2008U
+
+#define CPU_RESET_MODE_AA64 1U
+
+/*******************************************************************************
+ * Setup secondary CPU vectors
+ ******************************************************************************/
+void plat_secondary_setup(void)
+{
+ uint32_t addr_low, addr_high;
+ plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
+ uint64_t cpu_reset_handler_base, cpu_reset_handler_size, tzdram_addr;
+ uint64_t src_len_bytes = BL_END - tegra_bl31_phys_base;
+
+ INFO("Setting up secondary CPU boot\n");
+
+ tzdram_addr = params_from_bl2->tzdram_base +
+ tegra194_get_cpu_reset_handler_size();
+
+ /*
+ * The BL31 code resides in the TZSRAM which loses state
+ * when we enter System Suspend. Copy the wakeup trampoline
+ * code to TZDRAM to help us exit from System Suspend.
+ */
+ cpu_reset_handler_base = tegra194_get_cpu_reset_handler_base();
+ cpu_reset_handler_size = tegra194_get_cpu_reset_handler_size();
+ memcpy((void *)((uintptr_t)params_from_bl2->tzdram_base),
+ (void *)((uintptr_t)cpu_reset_handler_base),
+ cpu_reset_handler_size);
+
+ /* TZDRAM base will be used as the "resume" address */
+ addr_low = (uint32_t)params_from_bl2->tzdram_base | CPU_RESET_MODE_AA64;
+ addr_high = (uint32_t)((params_from_bl2->tzdram_base >> 32U) & 0x7ffU);
+
+ /* write lower 32 bits first, then the upper 11 bits */
+ mmio_write_32(TEGRA_MISC_BASE + MISCREG_AA64_RST_LOW, addr_low);
+ assert(mmio_read_32(TEGRA_MISC_BASE + MISCREG_AA64_RST_LOW) == addr_low);
+ mmio_write_32(TEGRA_MISC_BASE + MISCREG_AA64_RST_HIGH, addr_high);
+ assert(mmio_read_32(TEGRA_MISC_BASE + MISCREG_AA64_RST_HIGH) == addr_high);
+
+ /* save reset vector to be used during SYSTEM_SUSPEND exit */
+ mmio_write_32(TEGRA_SCRATCH_BASE + SCRATCH_RESET_VECTOR_LO,
+ addr_low);
+ assert(mmio_read_32(TEGRA_SCRATCH_BASE + SCRATCH_RESET_VECTOR_LO) == addr_low);
+ mmio_write_32(TEGRA_SCRATCH_BASE + SCRATCH_RESET_VECTOR_HI,
+ addr_high);
+ assert(mmio_read_32(TEGRA_SCRATCH_BASE + SCRATCH_RESET_VECTOR_HI) == addr_high);
+ mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV72_LO,
+ (uint32_t)tzdram_addr);
+ assert(mmio_read_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV72_LO) == (uint32_t)tzdram_addr);
+ mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV72_HI,
+ (uint32_t)src_len_bytes);
+ assert(mmio_read_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV72_HI) == (uint32_t)src_len_bytes);
+}
diff --git a/plat/nvidia/tegra/soc/t194/plat_setup.c b/plat/nvidia/tegra/soc/t194/plat_setup.c
new file mode 100644
index 0000000..6850330
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t194/plat_setup.c
@@ -0,0 +1,449 @@
+/*
+ * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl31/bl31.h>
+#include <common/bl_common.h>
+#include <common/interrupt_props.h>
+#include <drivers/console.h>
+#include <context.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <cortex_a57.h>
+#include <common/debug.h>
+#include <denver.h>
+#include <drivers/arm/gic_common.h>
+#include <drivers/arm/gicv2.h>
+#include <bl31/interrupt_mgmt.h>
+#include <mce.h>
+#include <mce_private.h>
+#include <memctrl.h>
+#include <plat/common/platform.h>
+#include <smmu.h>
+#include <spe.h>
+#include <tegra_def.h>
+#include <tegra_platform.h>
+#include <tegra_private.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+
+/* ID for spe-console */
+#define TEGRA_CONSOLE_SPE_ID 0xFE
+
+/*******************************************************************************
+ * Structure to store the SCR addresses and its expected settings.
+ *******************************************************************************
+ */
+typedef struct {
+ uint32_t scr_addr;
+ uint32_t scr_val;
+} scr_settings_t;
+
+static const scr_settings_t t194_scr_settings[] = {
+ { SCRATCH_RSV68_SCR, SCRATCH_RSV68_SCR_VAL },
+ { SCRATCH_RSV71_SCR, SCRATCH_RSV71_SCR_VAL },
+ { SCRATCH_RSV72_SCR, SCRATCH_RSV72_SCR_VAL },
+ { SCRATCH_RSV75_SCR, SCRATCH_RSV75_SCR_VAL },
+ { SCRATCH_RSV81_SCR, SCRATCH_RSV81_SCR_VAL },
+ { SCRATCH_RSV97_SCR, SCRATCH_RSV97_SCR_VAL },
+ { SCRATCH_RSV99_SCR, SCRATCH_RSV99_SCR_VAL },
+ { SCRATCH_RSV109_SCR, SCRATCH_RSV109_SCR_VAL },
+ { MISCREG_SCR_SCRTZWELCK, MISCREG_SCR_SCRTZWELCK_VAL }
+};
+
+/*******************************************************************************
+ * The Tegra power domain tree has a single system level power domain i.e. a
+ * single root node. The first entry in the power domain descriptor specifies
+ * the number of power domains at the highest power level.
+ *******************************************************************************
+ */
+static const uint8_t tegra_power_domain_tree_desc[] = {
+ /* No of root nodes */
+ 1,
+ /* No of clusters */
+ PLATFORM_CLUSTER_COUNT,
+ /* No of CPU cores - cluster0 */
+ PLATFORM_MAX_CPUS_PER_CLUSTER,
+ /* No of CPU cores - cluster1 */
+ PLATFORM_MAX_CPUS_PER_CLUSTER,
+ /* No of CPU cores - cluster2 */
+ PLATFORM_MAX_CPUS_PER_CLUSTER,
+ /* No of CPU cores - cluster3 */
+ PLATFORM_MAX_CPUS_PER_CLUSTER
+};
+
+/*******************************************************************************
+ * This function returns the Tegra default topology tree information.
+ ******************************************************************************/
+const uint8_t *plat_get_power_domain_tree_desc(void)
+{
+ return tegra_power_domain_tree_desc;
+}
+
+/*
+ * Table of regions to map using the MMU.
+ */
+static const mmap_region_t tegra_mmap[] = {
+ MAP_REGION_FLAT(TEGRA_MISC_BASE, 0x4000U, /* 16KB */
+ (uint8_t)MT_DEVICE | (uint8_t)MT_RW | (uint8_t)MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_GPCDMA_BASE, 0x10000U, /* 64KB */
+ (uint8_t)MT_DEVICE | (uint8_t)MT_RW | (uint8_t)MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_MC_STREAMID_BASE, 0x8000U, /* 32KB */
+ (uint8_t)MT_DEVICE | (uint8_t)MT_RW | (uint8_t)MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_MC_BASE, 0x8000U, /* 32KB */
+ (uint8_t)MT_DEVICE | (uint8_t)MT_RW | (uint8_t)MT_SECURE),
+#if !ENABLE_CONSOLE_SPE
+ MAP_REGION_FLAT(TEGRA_UARTA_BASE, 0x20000U, /* 128KB - UART A, B*/
+ (uint8_t)MT_DEVICE | (uint8_t)MT_RW | (uint8_t)MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_UARTC_BASE, 0x20000U, /* 128KB - UART C, G */
+ (uint8_t)MT_DEVICE | (uint8_t)MT_RW | (uint8_t)MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_UARTD_BASE, 0x30000U, /* 192KB - UART D, E, F */
+ (uint8_t)MT_DEVICE | (uint8_t)MT_RW | (uint8_t)MT_SECURE),
+#endif
+ MAP_REGION_FLAT(TEGRA_XUSB_PADCTL_BASE, 0x2000U, /* 8KB */
+ (uint8_t)MT_DEVICE | (uint8_t)MT_RW | (uint8_t)MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_GICD_BASE, 0x1000, /* 4KB */
+ (uint8_t)MT_DEVICE | (uint8_t)MT_RW | (uint8_t)MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_GICC_BASE, 0x1000, /* 4KB */
+ (uint8_t)MT_DEVICE | (uint8_t)MT_RW | (uint8_t)MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_SE0_BASE, 0x1000U, /* 4KB */
+ (uint8_t)MT_DEVICE | (uint8_t)MT_RW | (uint8_t)MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_PKA1_BASE, 0x1000U, /* 4KB */
+ (uint8_t)MT_DEVICE | (uint8_t)MT_RW | (uint8_t)MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_RNG1_BASE, 0x1000U, /* 4KB */
+ (uint8_t)MT_DEVICE | (uint8_t)MT_RW | (uint8_t)MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_HSP_DBELL_BASE, 0x1000U, /* 4KB */
+ (uint8_t)MT_DEVICE | (uint8_t)MT_RW | (uint8_t)MT_SECURE),
+#if ENABLE_CONSOLE_SPE
+ MAP_REGION_FLAT(TEGRA_CONSOLE_SPE_BASE, 0x1000U, /* 4KB */
+ (uint8_t)MT_DEVICE | (uint8_t)MT_RW | (uint8_t)MT_SECURE),
+#endif
+ MAP_REGION_FLAT(TEGRA_TMRUS_BASE, TEGRA_TMRUS_SIZE, /* 4KB */
+ (uint8_t)MT_DEVICE | (uint8_t)MT_RW | (uint8_t)MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_SCRATCH_BASE, 0x1000U, /* 4KB */
+ (uint8_t)MT_DEVICE | (uint8_t)MT_RW | (uint8_t)MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_SMMU2_BASE, 0x800000U, /* 8MB */
+ (uint8_t)MT_DEVICE | (uint8_t)MT_RW | (uint8_t)MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_SMMU1_BASE, 0x800000U, /* 8MB */
+ (uint8_t)MT_DEVICE | (uint8_t)MT_RW | (uint8_t)MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_SMMU0_BASE, 0x800000U, /* 8MB */
+ (uint8_t)MT_DEVICE | (uint8_t)MT_RW | (uint8_t)MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_BPMP_IPC_TX_PHYS_BASE, 0x10000U, /* 64KB */
+ (uint8_t)MT_DEVICE | (uint8_t)MT_RW | (uint8_t)MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_CAR_RESET_BASE, 0x10000U, /* 64KB */
+ (uint8_t)MT_DEVICE | (uint8_t)MT_RW | (uint8_t)MT_SECURE),
+ {0}
+};
+
+/*******************************************************************************
+ * Set up the pagetables as per the platform memory map & initialize the MMU
+ ******************************************************************************/
+const mmap_region_t *plat_get_mmio_map(void)
+{
+ /* MMIO space */
+ return tegra_mmap;
+}
+
+/*******************************************************************************
+ * Handler to get the System Counter Frequency
+ ******************************************************************************/
+uint32_t plat_get_syscnt_freq2(void)
+{
+ return 31250000;
+}
+
+#if !ENABLE_CONSOLE_SPE
+/*******************************************************************************
+ * Maximum supported UART controllers
+ ******************************************************************************/
+#define TEGRA194_MAX_UART_PORTS 7
+
+/*******************************************************************************
+ * This variable holds the UART port base addresses
+ ******************************************************************************/
+static uint32_t tegra194_uart_addresses[TEGRA194_MAX_UART_PORTS + 1] = {
+ 0, /* undefined - treated as an error case */
+ TEGRA_UARTA_BASE,
+ TEGRA_UARTB_BASE,
+ TEGRA_UARTC_BASE,
+ TEGRA_UARTD_BASE,
+ TEGRA_UARTE_BASE,
+ TEGRA_UARTF_BASE,
+ TEGRA_UARTG_BASE
+};
+#endif
+
+/*******************************************************************************
+ * Enable console corresponding to the console ID
+ ******************************************************************************/
+void plat_enable_console(int32_t id)
+{
+ uint32_t console_clock = 0U;
+
+#if ENABLE_CONSOLE_SPE
+ static console_t spe_console;
+
+ if (id == TEGRA_CONSOLE_SPE_ID) {
+ (void)console_spe_register(TEGRA_CONSOLE_SPE_BASE,
+ console_clock,
+ TEGRA_CONSOLE_BAUDRATE,
+ &spe_console);
+ console_set_scope(&spe_console, CONSOLE_FLAG_BOOT |
+ CONSOLE_FLAG_RUNTIME | CONSOLE_FLAG_CRASH);
+ }
+#else
+ static console_t uart_console;
+
+ if ((id > 0) && (id < TEGRA194_MAX_UART_PORTS)) {
+ /*
+ * Reference clock used by the FPGAs is a lot slower.
+ */
+ if (tegra_platform_is_fpga()) {
+ console_clock = TEGRA_BOOT_UART_CLK_13_MHZ;
+ } else {
+ console_clock = TEGRA_BOOT_UART_CLK_408_MHZ;
+ }
+
+ (void)console_16550_register(tegra194_uart_addresses[id],
+ console_clock,
+ TEGRA_CONSOLE_BAUDRATE,
+ &uart_console);
+ console_set_scope(&uart_console, CONSOLE_FLAG_BOOT |
+ CONSOLE_FLAG_RUNTIME | CONSOLE_FLAG_CRASH);
+ }
+#endif
+}
+
+/*******************************************************************************
+ * Verify SCR settings
+ ******************************************************************************/
+static inline bool tegra194_is_scr_valid(void)
+{
+ uint32_t scr_val;
+ bool ret = true;
+
+ for (uint8_t i = 0U; i < ARRAY_SIZE(t194_scr_settings); i++) {
+ scr_val = mmio_read_32((uintptr_t)t194_scr_settings[i].scr_addr);
+ if (scr_val != t194_scr_settings[i].scr_val) {
+ ERROR("Mismatch at SCR addr = 0x%x\n", t194_scr_settings[i].scr_addr);
+ ret = false;
+ }
+ }
+ return ret;
+}
+
+/*******************************************************************************
+ * Handler for early platform setup
+ ******************************************************************************/
+void plat_early_platform_setup(void)
+{
+ const plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
+ uint8_t enable_ccplex_lock_step = params_from_bl2->enable_ccplex_lock_step;
+ uint64_t actlr_elx;
+
+ /* Verify chip id is t194 */
+ assert(tegra_chipid_is_t194());
+
+ /* Verify SCR settings */
+ if (tegra_platform_is_silicon()) {
+ assert(tegra194_is_scr_valid());
+ }
+
+ /* sanity check MCE firmware compatibility */
+ mce_verify_firmware_version();
+
+#if ENABLE_FEAT_RAS
+ /* Enable Uncorrectable RAS error */
+ tegra194_ras_enable();
+#endif
+
+ /*
+ * Program XUSB STREAMIDs
+ * ======================
+ * T19x XUSB has support for XUSB virtualization. It will have one
+ * physical function (PF) and four Virtual function (VF)
+ *
+ * There were below two SIDs for XUSB until T186.
+ * 1) #define TEGRA_SID_XUSB_HOST 0x1bU
+ * 2) #define TEGRA_SID_XUSB_DEV 0x1cU
+ *
+ * We have below four new SIDs added for VF(s)
+ * 3) #define TEGRA_SID_XUSB_VF0 0x5dU
+ * 4) #define TEGRA_SID_XUSB_VF1 0x5eU
+ * 5) #define TEGRA_SID_XUSB_VF2 0x5fU
+ * 6) #define TEGRA_SID_XUSB_VF3 0x60U
+ *
+ * When virtualization is enabled then we have to disable SID override
+ * and program above SIDs in below newly added SID registers in XUSB
+ * PADCTL MMIO space. These registers are TZ protected and so need to
+ * be done in ATF.
+ * a) #define XUSB_PADCTL_HOST_AXI_STREAMID_PF_0 (0x136cU)
+ * b) #define XUSB_PADCTL_DEV_AXI_STREAMID_PF_0 (0x139cU)
+ * c) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_0 (0x1370U)
+ * d) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_1 (0x1374U)
+ * e) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_2 (0x1378U)
+ * f) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_3 (0x137cU)
+ *
+ * This change disables SID override and programs XUSB SIDs in
+ * above registers to support both virtualization and
+ * non-virtualization platforms
+ */
+ if (tegra_platform_is_silicon() || tegra_platform_is_fpga()) {
+
+ mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
+ XUSB_PADCTL_HOST_AXI_STREAMID_PF_0, TEGRA_SID_XUSB_HOST);
+ assert(mmio_read_32(TEGRA_XUSB_PADCTL_BASE +
+ XUSB_PADCTL_HOST_AXI_STREAMID_PF_0) == TEGRA_SID_XUSB_HOST);
+ mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
+ XUSB_PADCTL_HOST_AXI_STREAMID_VF_0, TEGRA_SID_XUSB_VF0);
+ assert(mmio_read_32(TEGRA_XUSB_PADCTL_BASE +
+ XUSB_PADCTL_HOST_AXI_STREAMID_VF_0) == TEGRA_SID_XUSB_VF0);
+ mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
+ XUSB_PADCTL_HOST_AXI_STREAMID_VF_1, TEGRA_SID_XUSB_VF1);
+ assert(mmio_read_32(TEGRA_XUSB_PADCTL_BASE +
+ XUSB_PADCTL_HOST_AXI_STREAMID_VF_1) == TEGRA_SID_XUSB_VF1);
+ mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
+ XUSB_PADCTL_HOST_AXI_STREAMID_VF_2, TEGRA_SID_XUSB_VF2);
+ assert(mmio_read_32(TEGRA_XUSB_PADCTL_BASE +
+ XUSB_PADCTL_HOST_AXI_STREAMID_VF_2) == TEGRA_SID_XUSB_VF2);
+ mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
+ XUSB_PADCTL_HOST_AXI_STREAMID_VF_3, TEGRA_SID_XUSB_VF3);
+ assert(mmio_read_32(TEGRA_XUSB_PADCTL_BASE +
+ XUSB_PADCTL_HOST_AXI_STREAMID_VF_3) == TEGRA_SID_XUSB_VF3);
+ mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
+ XUSB_PADCTL_DEV_AXI_STREAMID_PF_0, TEGRA_SID_XUSB_DEV);
+ assert(mmio_read_32(TEGRA_XUSB_PADCTL_BASE +
+ XUSB_PADCTL_DEV_AXI_STREAMID_PF_0) == TEGRA_SID_XUSB_DEV);
+ }
+
+ /*
+ * Enable dual execution optimized translations for all ELx.
+ */
+ if (enable_ccplex_lock_step != 0U) {
+ actlr_elx = read_actlr_el3();
+ actlr_elx |= DENVER_CPU_ENABLE_DUAL_EXEC_EL3;
+ write_actlr_el3(actlr_elx);
+ /* check if the bit is actually set */
+ assert((read_actlr_el3() & DENVER_CPU_ENABLE_DUAL_EXEC_EL3) != 0ULL);
+
+ actlr_elx = read_actlr_el2();
+ actlr_elx |= DENVER_CPU_ENABLE_DUAL_EXEC_EL2;
+ write_actlr_el2(actlr_elx);
+ /* check if the bit is actually set */
+ assert((read_actlr_el2() & DENVER_CPU_ENABLE_DUAL_EXEC_EL2) != 0ULL);
+
+ actlr_elx = read_actlr_el1();
+ actlr_elx |= DENVER_CPU_ENABLE_DUAL_EXEC_EL1;
+ write_actlr_el1(actlr_elx);
+ /* check if the bit is actually set */
+ assert((read_actlr_el1() & DENVER_CPU_ENABLE_DUAL_EXEC_EL1) != 0ULL);
+ }
+}
+
+/* Secure IRQs for Tegra194 */
+static const interrupt_prop_t tegra194_interrupt_props[] = {
+ INTR_PROP_DESC(TEGRA_SDEI_SGI_PRIVATE, PLAT_SDEI_CRITICAL_PRI,
+ GICV2_INTR_GROUP0, GIC_INTR_CFG_EDGE),
+ INTR_PROP_DESC(TEGRA194_TOP_WDT_IRQ, PLAT_TEGRA_WDT_PRIO,
+ GICV2_INTR_GROUP0, GIC_INTR_CFG_EDGE)
+};
+
+/*******************************************************************************
+ * Initialize the GIC and SGIs
+ ******************************************************************************/
+void plat_gic_setup(void)
+{
+ tegra_gic_setup(tegra194_interrupt_props, ARRAY_SIZE(tegra194_interrupt_props));
+ tegra_gic_init();
+
+ /*
+ * Initialize the FIQ handler
+ */
+ tegra_fiq_handler_setup();
+}
+
+/*******************************************************************************
+ * Return pointer to the BL31 params from previous bootloader
+ ******************************************************************************/
+struct tegra_bl31_params *plat_get_bl31_params(void)
+{
+ uint64_t val;
+
+ val = (mmio_read_32(TEGRA_SCRATCH_BASE + SCRATCH_BL31_PARAMS_HI_ADDR) &
+ SCRATCH_BL31_PARAMS_HI_ADDR_MASK) >> SCRATCH_BL31_PARAMS_HI_ADDR_SHIFT;
+ val <<= 32;
+ val |= mmio_read_32(TEGRA_SCRATCH_BASE + SCRATCH_BL31_PARAMS_LO_ADDR);
+
+ return (struct tegra_bl31_params *)(uintptr_t)val;
+}
+
+/*******************************************************************************
+ * Return pointer to the BL31 platform params from previous bootloader
+ ******************************************************************************/
+plat_params_from_bl2_t *plat_get_bl31_plat_params(void)
+{
+ uint64_t val;
+
+ val = (mmio_read_32(TEGRA_SCRATCH_BASE + SCRATCH_BL31_PLAT_PARAMS_HI_ADDR) &
+ SCRATCH_BL31_PLAT_PARAMS_HI_ADDR_MASK) >> SCRATCH_BL31_PLAT_PARAMS_HI_ADDR_SHIFT;
+ val <<= 32;
+ val |= mmio_read_32(TEGRA_SCRATCH_BASE + SCRATCH_BL31_PLAT_PARAMS_LO_ADDR);
+
+ return (plat_params_from_bl2_t *)(uintptr_t)val;
+}
+
+/*******************************************************************************
+ * Handler for late platform setup
+ ******************************************************************************/
+void plat_late_platform_setup(void)
+{
+#if ENABLE_STRICT_CHECKING_MODE
+ /*
+ * Enable strict checking after programming the GSC for
+ * enabling TZSRAM and TZDRAM
+ */
+ mce_enable_strict_checking();
+ mce_verify_strict_checking();
+#endif
+}
+
+/*******************************************************************************
+ * Handler to indicate support for System Suspend
+ ******************************************************************************/
+bool plat_supports_system_suspend(void)
+{
+ return true;
+}
+
+/*******************************************************************************
+ * Platform specific runtime setup.
+ ******************************************************************************/
+void plat_runtime_setup(void)
+{
+ /*
+ * During cold boot, it is observed that the arbitration
+ * bit is set in the Memory controller leading to false
+ * error interrupts in the non-secure world. To avoid
+ * this, clean the interrupt status register before
+ * booting into the non-secure world
+ */
+ tegra_memctrl_clear_pending_interrupts();
+
+ /*
+ * During boot, USB3 and flash media (SDMMC/SATA) devices need
+ * access to IRAM. Because these clients connect to the MC and
+ * do not have a direct path to the IRAM, the MC implements AHB
+ * redirection during boot to allow path to IRAM. In this mode
+ * accesses to a programmed memory address aperture are directed
+ * to the AHB bus, allowing access to the IRAM. This mode must be
+ * disabled before we jump to the non-secure world.
+ */
+ tegra_memctrl_disable_ahb_redirection();
+
+ /*
+ * Verify the integrity of the previously configured SMMU(s) settings
+ */
+ tegra_smmu_verify();
+}
diff --git a/plat/nvidia/tegra/soc/t194/plat_sip_calls.c b/plat/nvidia/tegra/soc/t194/plat_sip_calls.c
new file mode 100644
index 0000000..6e42e64
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t194/plat_sip_calls.c
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <common/bl_common.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <common/debug.h>
+#include <errno.h>
+#include <mce.h>
+#include <mce_private.h>
+#include <memctrl.h>
+#include <common/runtime_svc.h>
+#include <tegra_private.h>
+#include <tegra_platform.h>
+#include <smmu.h>
+#include <stdbool.h>
+
+/*******************************************************************************
+ * Tegra194 SiP SMCs
+ ******************************************************************************/
+#define TEGRA_SIP_GET_SMMU_PER 0xC200FF00U
+#define TEGRA_SIP_CLEAR_RAS_CORRECTED_ERRORS 0xC200FF01U
+
+/*******************************************************************************
+ * This function is responsible for handling all T194 SiP calls
+ ******************************************************************************/
+int32_t plat_sip_handler(uint32_t smc_fid,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ const void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ int32_t ret = 0;
+ uint32_t i, smmu_per[6] = {0};
+ uint32_t num_smmu_devices = plat_get_num_smmu_devices();
+ uint64_t per[3] = {0ULL};
+
+ (void)x1;
+ (void)x4;
+ (void)cookie;
+ (void)flags;
+
+ switch (smc_fid) {
+ case TEGRA_SIP_GET_SMMU_PER:
+
+ /* make sure we dont go past the array length */
+ assert(num_smmu_devices <= ARRAY_SIZE(smmu_per));
+
+ /* read all supported SMMU_PER records */
+ for (i = 0U; i < num_smmu_devices; i++) {
+ smmu_per[i] = tegra_smmu_read_32(i, SMMU_GSR0_PER);
+ }
+
+ /* pack results into 3 64bit variables. */
+ per[0] = smmu_per[0] | ((uint64_t)smmu_per[1] << 32U);
+ per[1] = smmu_per[2] | ((uint64_t)smmu_per[3] << 32U);
+ per[2] = smmu_per[4] | ((uint64_t)smmu_per[5] << 32U);
+
+ /* provide the results via X1-X3 CPU registers */
+ write_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X1, per[0]);
+ write_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X2, per[1]);
+ write_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X3, per[2]);
+
+ break;
+
+#if ENABLE_FEAT_RAS
+ case TEGRA_SIP_CLEAR_RAS_CORRECTED_ERRORS:
+ {
+ /*
+ * clear all RAS error records for corrected errors at first.
+ * x1 shall be 0 for first SMC call after FHI is asserted.
+ * */
+ uint64_t local_x1 = x1;
+
+ tegra194_ras_corrected_err_clear(&local_x1);
+ if (local_x1 == 0ULL) {
+ /* clear HSM corrected error status after all corrected
+ * RAS errors are cleared.
+ */
+ mce_clear_hsm_corr_status();
+ }
+
+ write_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X1, local_x1);
+
+ break;
+ }
+#endif
+
+ default:
+ ret = -ENOTSUP;
+ break;
+ }
+
+ return ret;
+}
diff --git a/plat/nvidia/tegra/soc/t194/plat_smmu.c b/plat/nvidia/tegra/soc/t194/plat_smmu.c
new file mode 100644
index 0000000..710d5c5
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t194/plat_smmu.c
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <smmu.h>
+#include <tegra_def.h>
+
+#define BOARD_SYSTEM_FPGA_BASE U(1)
+#define BASE_CONFIG_SMMU_DEVICES U(2)
+#define MAX_NUM_SMMU_DEVICES U(3)
+
+static uint32_t tegra_misc_read_32(uint32_t off)
+{
+ return mmio_read_32((uintptr_t)TEGRA_MISC_BASE + off);
+}
+
+/*******************************************************************************
+ * Handler to return the support SMMU devices number
+ ******************************************************************************/
+uint32_t plat_get_num_smmu_devices(void)
+{
+ uint32_t ret_num = MAX_NUM_SMMU_DEVICES;
+ uint32_t board_revid = ((tegra_misc_read_32(MISCREG_EMU_REVID) >>
+ BOARD_SHIFT_BITS) & BOARD_MASK_BITS);
+
+ if (board_revid == BOARD_SYSTEM_FPGA_BASE) {
+ ret_num = BASE_CONFIG_SMMU_DEVICES;
+ }
+
+ return ret_num;
+}
diff --git a/plat/nvidia/tegra/soc/t194/plat_trampoline.S b/plat/nvidia/tegra/soc/t194/plat_trampoline.S
new file mode 100644
index 0000000..0ff5407
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t194/plat_trampoline.S
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <plat/common/common_def.h>
+#include <memctrl_v2.h>
+#include <tegra_def.h>
+
+#define TEGRA194_STATE_SYSTEM_SUSPEND 0x5C7
+#define TEGRA194_STATE_SYSTEM_RESUME 0x600D
+#define TEGRA194_MC_CTX_SIZE 0xFB
+
+ .align 4
+ .globl tegra194_cpu_reset_handler
+
+/* CPU reset handler routine */
+func tegra194_cpu_reset_handler
+ /* check if we are exiting system suspend state */
+ adr x0, __tegra194_system_suspend_state
+ ldr x1, [x0]
+ mov x2, #TEGRA194_STATE_SYSTEM_SUSPEND
+ lsl x2, x2, #16
+ add x2, x2, #TEGRA194_STATE_SYSTEM_SUSPEND
+ cmp x1, x2
+ bne boot_cpu
+
+ /* set system resume state */
+ mov x1, #TEGRA194_STATE_SYSTEM_RESUME
+ lsl x1, x1, #16
+ mov x2, #TEGRA194_STATE_SYSTEM_RESUME
+ add x1, x1, x2
+ str x1, [x0]
+ dsb sy
+
+ /* prepare to relocate to TZSRAM */
+ mov x0, #BL31_BASE
+ adr x1, __tegra194_cpu_reset_handler_end
+ adr x2, __tegra194_cpu_reset_handler_data
+ ldr x2, [x2, #8]
+
+ /* memcpy16 */
+m_loop16:
+ cmp x2, #16
+ b.lt m_loop1
+ ldp x3, x4, [x1], #16
+ stp x3, x4, [x0], #16
+ sub x2, x2, #16
+ b m_loop16
+ /* copy byte per byte */
+m_loop1:
+ cbz x2, boot_cpu
+ ldrb w3, [x1], #1
+ strb w3, [x0], #1
+ subs x2, x2, #1
+ b.ne m_loop1
+
+ /*
+ * Synchronization barriers to make sure that memory is flushed out
+ * before we start execution in SysRAM.
+ */
+ dsb sy
+ isb
+
+boot_cpu:
+ adr x0, __tegra194_cpu_reset_handler_data
+ ldr x0, [x0]
+ br x0
+endfunc tegra194_cpu_reset_handler
+
+ /*
+ * Tegra194 reset data (offset 0x0 - 0x2490)
+ *
+ * 0x0000: secure world's entrypoint
+ * 0x0008: BL31 size (RO + RW)
+ * 0x0010: MC context start
+ * 0x2490: MC context end
+ */
+
+ .align 4
+ .type __tegra194_cpu_reset_handler_data, %object
+ .globl __tegra194_cpu_reset_handler_data
+__tegra194_cpu_reset_handler_data:
+ .quad tegra_secure_entrypoint
+ .quad __BL31_END__ - BL31_BASE
+ .globl __tegra194_system_suspend_state
+__tegra194_system_suspend_state:
+ .quad 0
+
+ .align 4
+__tegra194_mc_context:
+ .rept TEGRA194_MC_CTX_SIZE
+ .quad 0
+ .endr
+ .size __tegra194_cpu_reset_handler_data, \
+ . - __tegra194_cpu_reset_handler_data
+
+ .align 4
+ .globl __tegra194_cpu_reset_handler_end
+__tegra194_cpu_reset_handler_end:
+
+ .globl tegra194_get_cpu_reset_handler_size
+ .globl tegra194_get_cpu_reset_handler_base
+ .globl tegra194_get_mc_ctx_offset
+ .globl tegra194_set_system_suspend_entry
+
+/* return size of the CPU reset handler */
+func tegra194_get_cpu_reset_handler_size
+ adr x0, __tegra194_cpu_reset_handler_end
+ adr x1, tegra194_cpu_reset_handler
+ sub x0, x0, x1
+ ret
+endfunc tegra194_get_cpu_reset_handler_size
+
+/* return the start address of the CPU reset handler */
+func tegra194_get_cpu_reset_handler_base
+ adr x0, tegra194_cpu_reset_handler
+ ret
+endfunc tegra194_get_cpu_reset_handler_base
+
+/* return the size of the MC context */
+func tegra194_get_mc_ctx_offset
+ adr x0, __tegra194_mc_context
+ adr x1, tegra194_cpu_reset_handler
+ sub x0, x0, x1
+ ret
+endfunc tegra194_get_mc_ctx_offset
+
+/* set system suspend state before SC7 entry */
+func tegra194_set_system_suspend_entry
+ mov x0, #TEGRA_MC_BASE
+ mov x3, #MC_SECURITY_CFG3_0
+ ldr w1, [x0, x3]
+ lsl x1, x1, #32
+ mov x3, #MC_SECURITY_CFG0_0
+ ldr w2, [x0, x3]
+ orr x3, x1, x2 /* TZDRAM base */
+ adr x0, __tegra194_system_suspend_state
+ adr x1, tegra194_cpu_reset_handler
+ sub x2, x0, x1 /* offset in TZDRAM */
+ mov x0, #TEGRA194_STATE_SYSTEM_SUSPEND
+ lsl x0, x0, #16
+ add x0, x0, #TEGRA194_STATE_SYSTEM_SUSPEND
+ str x0, [x3, x2] /* set value in TZDRAM */
+ dsb sy
+ ret
+endfunc tegra194_set_system_suspend_entry
diff --git a/plat/nvidia/tegra/soc/t194/platform_t194.mk b/plat/nvidia/tegra/soc/t194/platform_t194.mk
new file mode 100644
index 0000000..e6e0b5e
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t194/platform_t194.mk
@@ -0,0 +1,88 @@
+#
+# Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+include common/fdt_wrappers.mk
+
+ARM_ARCH_MAJOR := 8
+ARM_ARCH_MINOR := 2
+
+# platform configs
+ENABLE_CONSOLE_SPE := 1
+$(eval $(call add_define,ENABLE_CONSOLE_SPE))
+
+ENABLE_STRICT_CHECKING_MODE := 1
+$(eval $(call add_define,ENABLE_STRICT_CHECKING_MODE))
+
+USE_GPC_DMA := 1
+$(eval $(call add_define,USE_GPC_DMA))
+
+RESET_TO_BL31 := 1
+
+PROGRAMMABLE_RESET_ADDRESS := 1
+
+COLD_BOOT_SINGLE_CPU := 1
+
+# platform settings
+TZDRAM_BASE := 0x40000000
+$(eval $(call add_define,TZDRAM_BASE))
+
+MAX_XLAT_TABLES := 25
+$(eval $(call add_define,MAX_XLAT_TABLES))
+
+MAX_MMAP_REGIONS := 30
+$(eval $(call add_define,MAX_MMAP_REGIONS))
+
+# enable RAS handling
+HANDLE_EA_EL3_FIRST_NS := 1
+ENABLE_FEAT_RAS := 1
+
+# platform files
+PLAT_INCLUDES += -Iplat/nvidia/tegra/include/t194 \
+ -I${SOC_DIR}/drivers/include
+
+BL31_SOURCES += ${TEGRA_GICv2_SOURCES} \
+ drivers/ti/uart/aarch64/16550_console.S \
+ lib/cpus/aarch64/denver.S \
+ ${TEGRA_DRIVERS}/bpmp_ipc/intf.c \
+ ${TEGRA_DRIVERS}/bpmp_ipc/ivc.c \
+ ${TEGRA_DRIVERS}/memctrl/memctrl_v2.c \
+ ${TEGRA_DRIVERS}/smmu/smmu.c \
+ ${SOC_DIR}/drivers/mce/mce.c \
+ ${SOC_DIR}/drivers/mce/nvg.c \
+ ${SOC_DIR}/drivers/mce/aarch64/nvg_helpers.S \
+ ${SOC_DIR}/drivers/se/se.c \
+ ${SOC_DIR}/plat_memctrl.c \
+ ${SOC_DIR}/plat_psci_handlers.c \
+ ${SOC_DIR}/plat_setup.c \
+ ${SOC_DIR}/plat_secondary.c \
+ ${SOC_DIR}/plat_sip_calls.c \
+ ${SOC_DIR}/plat_smmu.c \
+ ${SOC_DIR}/plat_trampoline.S
+
+ifeq (${USE_GPC_DMA}, 1)
+BL31_SOURCES += ${TEGRA_DRIVERS}/gpcdma/gpcdma.c
+endif
+
+ifeq (${ENABLE_CONSOLE_SPE},1)
+BL31_SOURCES += ${TEGRA_DRIVERS}/spe/shared_console.S
+endif
+
+# RAS sources
+ifeq (${ENABLE_FEAT_RAS}-${HANDLE_EA_EL3_FIRST_NS},1-1)
+BL31_SOURCES += lib/extensions/ras/std_err_record.c \
+ lib/extensions/ras/ras_common.c \
+ ${SOC_DIR}/plat_ras.c
+endif
+
+# SPM dispatcher
+ifeq (${SPD},spmd)
+include lib/libfdt/libfdt.mk
+# sources to support spmd
+BL31_SOURCES += plat/common/plat_spmd_manifest.c \
+ ${LIBFDT_SRCS}
+
+BL31_SOURCES += ${FDT_WRAPPERS_SOURCES}
+endif
diff --git a/plat/nvidia/tegra/soc/t210/drivers/se/se_private.h b/plat/nvidia/tegra/soc/t210/drivers/se/se_private.h
new file mode 100644
index 0000000..c44b0fc
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t210/drivers/se/se_private.h
@@ -0,0 +1,663 @@
+/*
+ * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SE_PRIVATE_H
+#define SE_PRIVATE_H
+
+#include <stdbool.h>
+#include <security_engine.h>
+
+/*
+ * PMC registers
+ */
+
+/* SC7 context save scratch register for T210 */
+#define PMC_SCRATCH43_REG_OFFSET U(0x22C)
+
+/* Secure scratch registers */
+#define PMC_SECURE_SCRATCH4_OFFSET 0xC0U
+#define PMC_SECURE_SCRATCH5_OFFSET 0xC4U
+#define PMC_SECURE_SCRATCH6_OFFSET 0x224U
+#define PMC_SECURE_SCRATCH7_OFFSET 0x228U
+#define PMC_SECURE_SCRATCH116_OFFSET 0xB28U
+#define PMC_SECURE_SCRATCH117_OFFSET 0xB2CU
+#define PMC_SECURE_SCRATCH120_OFFSET 0xB38U
+#define PMC_SECURE_SCRATCH121_OFFSET 0xB3CU
+#define PMC_SECURE_SCRATCH122_OFFSET 0xB40U
+#define PMC_SECURE_SCRATCH123_OFFSET 0xB44U
+
+/*
+ * AHB arbitration memory write queue
+ */
+#define ARAHB_MEM_WRQUE_MST_ID_OFFSET 0xFCU
+#define ARAHB_MST_ID_SE2_MASK (0x1U << 13)
+#define ARAHB_MST_ID_SE_MASK (0x1U << 14)
+
+/**
+ * SE registers
+ */
+#define TEGRA_SE_AES_KEYSLOT_COUNT 16
+#define SE_MAX_LAST_BLOCK_SIZE 0xFFFFF
+
+/* SE Status register */
+#define SE_STATUS_OFFSET 0x800U
+#define SE_STATUS_SHIFT 0
+#define SE_STATUS_IDLE \
+ ((0U) << SE_STATUS_SHIFT)
+#define SE_STATUS_BUSY \
+ ((1U) << SE_STATUS_SHIFT)
+#define SE_STATUS(x) \
+ ((x) & ((0x3U) << SE_STATUS_SHIFT))
+
+#define SE_MEM_INTERFACE_SHIFT 2
+#define SE_MEM_INTERFACE_IDLE 0
+#define SE_MEM_INTERFACE_BUSY 1
+#define SE_MEM_INTERFACE(x) ((x) << SE_STATUS_SHIFT)
+
+/* SE register definitions */
+#define SE_SECURITY_REG_OFFSET 0x0
+#define SE_SECURITY_TZ_LOCK_SOFT_SHIFT 5
+#define SE_SECURE 0x0
+#define SE_SECURITY_TZ_LOCK_SOFT(x) ((x) << SE_SECURITY_TZ_LOCK_SOFT_SHIFT)
+
+#define SE_SEC_ENG_DIS_SHIFT 1
+#define SE_DISABLE_FALSE 0
+#define SE_DISABLE_TRUE 1
+#define SE_SEC_ENG_DISABLE(x)((x) << SE_SEC_ENG_DIS_SHIFT)
+
+/* SE config register */
+#define SE_CONFIG_REG_OFFSET 0x14U
+#define SE_CONFIG_ENC_ALG_SHIFT 12
+#define SE_CONFIG_ENC_ALG_AES_ENC \
+ ((1U) << SE_CONFIG_ENC_ALG_SHIFT)
+#define SE_CONFIG_ENC_ALG_RNG \
+ ((2U) << SE_CONFIG_ENC_ALG_SHIFT)
+#define SE_CONFIG_ENC_ALG_SHA \
+ ((3U) << SE_CONFIG_ENC_ALG_SHIFT)
+#define SE_CONFIG_ENC_ALG_RSA \
+ ((4U) << SE_CONFIG_ENC_ALG_SHIFT)
+#define SE_CONFIG_ENC_ALG_NOP \
+ ((0U) << SE_CONFIG_ENC_ALG_SHIFT)
+#define SE_CONFIG_ENC_ALG(x) \
+ ((x) & ((0xFU) << SE_CONFIG_ENC_ALG_SHIFT))
+
+#define SE_CONFIG_DEC_ALG_SHIFT 8
+#define SE_CONFIG_DEC_ALG_AES \
+ ((1U) << SE_CONFIG_DEC_ALG_SHIFT)
+#define SE_CONFIG_DEC_ALG_NOP \
+ ((0U) << SE_CONFIG_DEC_ALG_SHIFT)
+#define SE_CONFIG_DEC_ALG(x) \
+ ((x) & ((0xFU) << SE_CONFIG_DEC_ALG_SHIFT))
+
+#define SE_CONFIG_DST_SHIFT 2
+#define SE_CONFIG_DST_MEMORY \
+ ((0U) << SE_CONFIG_DST_SHIFT)
+#define SE_CONFIG_DST_HASHREG \
+ ((1U) << SE_CONFIG_DST_SHIFT)
+#define SE_CONFIG_DST_KEYTAB \
+ ((2U) << SE_CONFIG_DST_SHIFT)
+#define SE_CONFIG_DST_SRK \
+ ((3U) << SE_CONFIG_DST_SHIFT)
+#define SE_CONFIG_DST_RSAREG \
+ ((4U) << SE_CONFIG_DST_SHIFT)
+#define SE_CONFIG_DST(x) \
+ ((x) & ((0x7U) << SE_CONFIG_DST_SHIFT))
+
+#define SE_CONFIG_ENC_MODE_SHIFT 24
+#define SE_CONFIG_ENC_MODE_KEY128 \
+ ((0UL) << SE_CONFIG_ENC_MODE_SHIFT)
+#define SE_CONFIG_ENC_MODE_KEY192 \
+ ((1UL) << SE_CONFIG_ENC_MODE_SHIFT)
+#define SE_CONFIG_ENC_MODE_KEY256 \
+ ((2UL) << SE_CONFIG_ENC_MODE_SHIFT)
+#define SE_CONFIG_ENC_MODE_SHA1 \
+ ((0UL) << SE_CONFIG_ENC_MODE_SHIFT)
+#define SE_CONFIG_ENC_MODE_SHA224 \
+ ((4UL) << SE_CONFIG_ENC_MODE_SHIFT)
+#define SE_CONFIG_ENC_MODE_SHA256 \
+ ((5UL) << SE_CONFIG_ENC_MODE_SHIFT)
+#define SE_CONFIG_ENC_MODE_SHA384 \
+ ((6UL) << SE_CONFIG_ENC_MODE_SHIFT)
+#define SE_CONFIG_ENC_MODE_SHA512 \
+ ((7UL) << SE_CONFIG_ENC_MODE_SHIFT)
+#define SE_CONFIG_ENC_MODE(x)\
+ ((x) & ((0xFFUL) << SE_CONFIG_ENC_MODE_SHIFT))
+
+#define SE_CONFIG_DEC_MODE_SHIFT 16
+#define SE_CONFIG_DEC_MODE_KEY128 \
+ ((0UL) << SE_CONFIG_DEC_MODE_SHIFT)
+#define SE_CONFIG_DEC_MODE_KEY192 \
+ ((1UL) << SE_CONFIG_DEC_MODE_SHIFT)
+#define SE_CONFIG_DEC_MODE_KEY256 \
+ ((2UL) << SE_CONFIG_DEC_MODE_SHIFT)
+#define SE_CONFIG_DEC_MODE_SHA1 \
+ ((0UL) << SE_CONFIG_DEC_MODE_SHIFT)
+#define SE_CONFIG_DEC_MODE_SHA224 \
+ ((4UL) << SE_CONFIG_DEC_MODE_SHIFT)
+#define SE_CONFIG_DEC_MODE_SHA256 \
+ ((5UL) << SE_CONFIG_DEC_MODE_SHIFT)
+#define SE_CONFIG_DEC_MODE_SHA384 \
+ ((6UL) << SE_CONFIG_DEC_MODE_SHIFT)
+#define SE_CONFIG_DEC_MODE_SHA512 \
+ ((7UL) << SE_CONFIG_DEC_MODE_SHIFT)
+#define SE_CONFIG_DEC_MODE(x)\
+ ((x) & ((0xFFUL) << SE_CONFIG_DEC_MODE_SHIFT))
+
+
+/* DRBG random number generator config */
+#define SE_RNG_CONFIG_REG_OFFSET 0x340
+
+#define DRBG_MODE_SHIFT 0
+#define DRBG_MODE_NORMAL \
+ ((0U) << DRBG_MODE_SHIFT)
+#define DRBG_MODE_FORCE_INSTANTION \
+ ((1U) << DRBG_MODE_SHIFT)
+#define DRBG_MODE_FORCE_RESEED \
+ ((2U) << DRBG_MODE_SHIFT)
+#define SE_RNG_CONFIG_MODE(x) \
+ ((x) & ((0x3U) << DRBG_MODE_SHIFT))
+
+#define DRBG_SRC_SHIFT 2
+#define DRBG_SRC_NONE \
+ ((0U) << DRBG_SRC_SHIFT)
+#define DRBG_SRC_ENTROPY \
+ ((1U) << DRBG_SRC_SHIFT)
+#define DRBG_SRC_LFSR \
+ ((2U) << DRBG_SRC_SHIFT)
+#define SE_RNG_SRC_CONFIG_MODE(x) \
+ ((x) & ((0x3U) << DRBG_SRC_SHIFT))
+
+/* DRBG random number generator entropy config */
+
+#define SE_RNG_SRC_CONFIG_REG_OFFSET 0x344U
+
+#define DRBG_RO_ENT_SRC_SHIFT 1
+#define DRBG_RO_ENT_SRC_ENABLE \
+ ((1U) << DRBG_RO_ENT_SRC_SHIFT)
+#define DRBG_RO_ENT_SRC_DISABLE \
+ ((0U) << DRBG_RO_ENT_SRC_SHIFT)
+#define SE_RNG_SRC_CONFIG_RO_ENT_SRC(x) \
+ ((x) & ((0x1U) << DRBG_RO_ENT_SRC_SHIFT))
+
+#define DRBG_RO_ENT_SRC_LOCK_SHIFT 0
+#define DRBG_RO_ENT_SRC_LOCK_ENABLE \
+ ((1U) << DRBG_RO_ENT_SRC_LOCK_SHIFT)
+#define DRBG_RO_ENT_SRC_LOCK_DISABLE \
+ ((0U) << DRBG_RO_ENT_SRC_LOCK_SHIFT)
+#define SE_RNG_SRC_CONFIG_RO_ENT_SRC_LOCK(x) \
+ ((x) & ((0x1U) << DRBG_RO_ENT_SRC_LOCK_SHIFT))
+
+#define DRBG_RO_ENT_IGNORE_MEM_SHIFT 12
+#define DRBG_RO_ENT_IGNORE_MEM_ENABLE \
+ ((1U) << DRBG_RO_ENT_IGNORE_MEM_SHIFT)
+#define DRBG_RO_ENT_IGNORE_MEM_DISABLE \
+ ((0U) << DRBG_RO_ENT_IGNORE_MEM_SHIFT)
+#define SE_RNG_SRC_CONFIG_RO_ENT_IGNORE_MEM(x) \
+ ((x) & ((0x1U) << DRBG_RO_ENT_IGNORE_MEM_SHIFT))
+
+#define SE_RNG_RESEED_INTERVAL_REG_OFFSET 0x348
+
+/* SE CRYPTO */
+#define SE_CRYPTO_REG_OFFSET 0x304
+#define SE_CRYPTO_HASH_SHIFT 0
+#define SE_CRYPTO_HASH_DISABLE \
+ ((0U) << SE_CRYPTO_HASH_SHIFT)
+#define SE_CRYPTO_HASH_ENABLE \
+ ((1U) << SE_CRYPTO_HASH_SHIFT)
+
+#define SE_CRYPTO_XOR_POS_SHIFT 1
+#define SE_CRYPTO_XOR_BYPASS \
+ ((0U) << SE_CRYPTO_XOR_POS_SHIFT)
+#define SE_CRYPTO_XOR_TOP \
+ ((2U) << SE_CRYPTO_XOR_POS_SHIFT)
+#define SE_CRYPTO_XOR_BOTTOM \
+ ((3U) << SE_CRYPTO_XOR_POS_SHIFT)
+
+#define SE_CRYPTO_INPUT_SEL_SHIFT 3
+#define SE_CRYPTO_INPUT_AHB \
+ ((0U) << SE_CRYPTO_INPUT_SEL_SHIFT)
+#define SE_CRYPTO_INPUT_RANDOM \
+ ((1U) << SE_CRYPTO_INPUT_SEL_SHIFT)
+#define SE_CRYPTO_INPUT_AESOUT \
+ ((2U) << SE_CRYPTO_INPUT_SEL_SHIFT)
+#define SE_CRYPTO_INPUT_LNR_CTR \
+ ((3U) << SE_CRYPTO_INPUT_SEL_SHIFT)
+
+#define SE_CRYPTO_VCTRAM_SEL_SHIFT 5
+#define SE_CRYPTO_VCTRAM_AHB \
+ ((0U) << SE_CRYPTO_VCTRAM_SEL_SHIFT)
+#define SE_CRYPTO_VCTRAM_AESOUT \
+ ((2U) << SE_CRYPTO_VCTRAM_SEL_SHIFT)
+#define SE_CRYPTO_VCTRAM_PREVAHB \
+ ((3U) << SE_CRYPTO_VCTRAM_SEL_SHIFT)
+
+#define SE_CRYPTO_IV_SEL_SHIFT 7
+#define SE_CRYPTO_IV_ORIGINAL \
+ ((0U) << SE_CRYPTO_IV_SEL_SHIFT)
+#define SE_CRYPTO_IV_UPDATED \
+ ((1U) << SE_CRYPTO_IV_SEL_SHIFT)
+
+#define SE_CRYPTO_CORE_SEL_SHIFT 8
+#define SE_CRYPTO_CORE_DECRYPT \
+ ((0U) << SE_CRYPTO_CORE_SEL_SHIFT)
+#define SE_CRYPTO_CORE_ENCRYPT \
+ ((1U) << SE_CRYPTO_CORE_SEL_SHIFT)
+
+#define SE_CRYPTO_KEY_INDEX_SHIFT 24
+#define SE_CRYPTO_KEY_INDEX(x) (x << SE_CRYPTO_KEY_INDEX_SHIFT)
+
+#define SE_CRYPTO_MEMIF_AHB \
+ ((0U) << SE_CRYPTO_MEMIF_SHIFT)
+#define SE_CRYPTO_MEMIF_MCCIF \
+ ((1U) << SE_CRYPTO_MEMIF_SHIFT)
+#define SE_CRYPTO_MEMIF_SHIFT 31
+
+/* KEY TABLE */
+#define SE_KEYTABLE_REG_OFFSET 0x31C
+
+/* KEYIV PKT - key slot */
+#define SE_KEYTABLE_SLOT_SHIFT 4
+#define SE_KEYTABLE_SLOT(x) (x << SE_KEYTABLE_SLOT_SHIFT)
+
+/* KEYIV PKT - KEYIV select */
+#define SE_KEYIV_PKT_KEYIV_SEL_SHIFT 3
+#define SE_CRYPTO_KEYIV_KEY \
+ ((0U) << SE_KEYIV_PKT_KEYIV_SEL_SHIFT)
+#define SE_CRYPTO_KEYIV_IVS \
+ ((1U) << SE_KEYIV_PKT_KEYIV_SEL_SHIFT)
+
+/* KEYIV PKT - IV select */
+#define SE_KEYIV_PKT_IV_SEL_SHIFT 2
+#define SE_CRYPTO_KEYIV_IVS_OIV \
+ ((0U) << SE_KEYIV_PKT_IV_SEL_SHIFT)
+#define SE_CRYPTO_KEYIV_IVS_UIV \
+ ((1U) << SE_KEYIV_PKT_IV_SEL_SHIFT)
+
+/* KEYIV PKT - key word */
+#define SE_KEYIV_PKT_KEY_WORD_SHIFT 0
+#define SE_KEYIV_PKT_KEY_WORD(x) \
+ ((x) << SE_KEYIV_PKT_KEY_WORD_SHIFT)
+
+/* KEYIV PKT - iv word */
+#define SE_KEYIV_PKT_IV_WORD_SHIFT 0
+#define SE_KEYIV_PKT_IV_WORD(x) \
+ ((x) << SE_KEYIV_PKT_IV_WORD_SHIFT)
+
+/* SE OPERATION */
+#define SE_OPERATION_REG_OFFSET 0x8U
+#define SE_OPERATION_SHIFT 0
+#define SE_OP_ABORT \
+ ((0x0U) << SE_OPERATION_SHIFT)
+#define SE_OP_START \
+ ((0x1U) << SE_OPERATION_SHIFT)
+#define SE_OP_RESTART \
+ ((0x2U) << SE_OPERATION_SHIFT)
+#define SE_OP_CTX_SAVE \
+ ((0x3U) << SE_OPERATION_SHIFT)
+#define SE_OP_RESTART_IN \
+ ((0x4U) << SE_OPERATION_SHIFT)
+#define SE_OPERATION(x) \
+ ((x) & ((0x7U) << SE_OPERATION_SHIFT))
+
+/* SE CONTEXT */
+#define SE_CTX_SAVE_CONFIG_REG_OFFSET 0x70
+#define SE_CTX_SAVE_WORD_QUAD_SHIFT 0
+#define SE_CTX_SAVE_WORD_QUAD(x) \
+ (x << SE_CTX_SAVE_WORD_QUAD_SHIFT)
+#define SE_CTX_SAVE_WORD_QUAD_KEYS_0_3 \
+ ((0U) << SE_CTX_SAVE_WORD_QUAD_SHIFT)
+#define SE_CTX_SAVE_WORD_QUAD_KEYS_4_7 \
+ ((1U) << SE_CTX_SAVE_WORD_QUAD_SHIFT)
+#define SE_CTX_SAVE_WORD_QUAD_ORIG_IV \
+ ((2U) << SE_CTX_SAVE_WORD_QUAD_SHIFT)
+#define SE_CTX_SAVE_WORD_QUAD_UPD_IV \
+ ((3U) << SE_CTX_SAVE_WORD_QUAD_SHIFT)
+
+#define SE_CTX_SAVE_KEY_INDEX_SHIFT 8
+#define SE_CTX_SAVE_KEY_INDEX(x) (x << SE_CTX_SAVE_KEY_INDEX_SHIFT)
+
+#define SE_CTX_SAVE_STICKY_WORD_QUAD_SHIFT 24
+#define SE_CTX_SAVE_STICKY_WORD_QUAD_STICKY_0_3 \
+ ((0U) << SE_CTX_SAVE_STICKY_WORD_QUAD_SHIFT)
+#define SE_CTX_SAVE_STICKY_WORD_QUAD_STICKY_4_7 \
+ ((1U) << SE_CTX_SAVE_STICKY_WORD_QUAD_SHIFT)
+#define SE_CTX_SAVE_STICKY_WORD_QUAD(x) \
+ (x << SE_CTX_SAVE_STICKY_WORD_QUAD_SHIFT)
+
+#define SE_CTX_SAVE_SRC_SHIFT 29
+#define SE_CTX_SAVE_SRC_STICKY_BITS \
+ ((0U) << SE_CTX_SAVE_SRC_SHIFT)
+#define SE_CTX_SAVE_SRC_RSA_KEYTABLE \
+ ((1U) << SE_CTX_SAVE_SRC_SHIFT)
+#define SE_CTX_SAVE_SRC_AES_KEYTABLE \
+ ((2U) << SE_CTX_SAVE_SRC_SHIFT)
+#define SE_CTX_SAVE_SRC_PKA1_STICKY_BITS \
+ ((3U) << SE_CTX_SAVE_SRC_SHIFT)
+#define SE_CTX_SAVE_SRC_MEM \
+ ((4U) << SE_CTX_SAVE_SRC_SHIFT)
+#define SE_CTX_SAVE_SRC_SRK \
+ ((6U) << SE_CTX_SAVE_SRC_SHIFT)
+#define SE_CTX_SAVE_SRC_PKA1_KEYTABLE \
+ ((7U) << SE_CTX_SAVE_SRC_SHIFT)
+
+#define SE_CTX_STICKY_WORD_QUAD_SHIFT 24
+#define SE_CTX_STICKY_WORD_QUAD_WORDS_0_3 \
+ ((0U) << SE_CTX_STICKY_WORD_QUAD_SHIFT)
+#define SE_CTX_STICKY_WORD_QUAD_WORDS_4_7 \
+ ((1U) << SE_CTX_STICKY_WORD_QUAD_SHIFT)
+#define SE_CTX_STICKY_WORD_QUAD(x) (x << SE_CTX_STICKY_WORD_QUAD_SHIFT)
+
+#define SE_CTX_SAVE_RSA_KEY_INDEX_SHIFT 16
+#define SE_CTX_SAVE_RSA_KEY_INDEX(x) \
+ (x << SE_CTX_SAVE_RSA_KEY_INDEX_SHIFT)
+
+#define SE_CTX_RSA_WORD_QUAD_SHIFT 12
+#define SE_CTX_RSA_WORD_QUAD(x) \
+ (x << SE_CTX_RSA_WORD_QUAD_SHIFT)
+
+#define SE_CTX_PKA1_WORD_QUAD_L_SHIFT 0
+#define SE_CTX_PKA1_WORD_QUAD_L_SIZE \
+ ((true ? 4:0) - \
+ (false ? 4:0) + 1)
+#define SE_CTX_PKA1_WORD_QUAD_L(x)\
+ (((x) << SE_CTX_PKA1_WORD_QUAD_L_SHIFT) & 0x1f)
+
+#define SE_CTX_PKA1_WORD_QUAD_H_SHIFT 12
+#define SE_CTX_PKA1_WORD_QUAD_H(x)\
+ ((((x) >> SE_CTX_PKA1_WORD_QUAD_L_SIZE) & 0xf) \
+ << SE_CTX_PKA1_WORD_QUAD_H_SHIFT)
+
+#define SE_RSA_KEY_INDEX_SLOT0_EXP 0
+#define SE_RSA_KEY_INDEX_SLOT0_MOD 1
+#define SE_RSA_KEY_INDEX_SLOT1_EXP 2
+#define SE_RSA_KEY_INDEX_SLOT1_MOD 3
+
+
+/* SE_CTX_SAVE_AUTO */
+#define SE_CTX_SAVE_AUTO_REG_OFFSET 0x74U
+
+/* Enable */
+#define SE_CTX_SAVE_AUTO_ENABLE_SHIFT 0
+#define SE_CTX_SAVE_AUTO_DIS \
+ ((0U) << SE_CTX_SAVE_AUTO_ENABLE_SHIFT)
+#define SE_CTX_SAVE_AUTO_EN \
+ ((1U) << SE_CTX_SAVE_AUTO_ENABLE_SHIFT)
+#define SE_CTX_SAVE_AUTO_ENABLE(x) \
+ ((x) & ((0x1U) << SE_CTX_SAVE_AUTO_ENABLE_SHIFT))
+
+/* Lock */
+#define SE_CTX_SAVE_AUTO_LOCK_SHIFT 8
+#define SE_CTX_SAVE_AUTO_LOCK_EN \
+ ((1U) << SE_CTX_SAVE_AUTO_LOCK_SHIFT)
+#define SE_CTX_SAVE_AUTO_LOCK_DIS \
+ ((0U) << SE_CTX_SAVE_AUTO_LOCK_SHIFT)
+#define SE_CTX_SAVE_AUTO_LOCK(x) \
+ ((x) & ((0x1U) << SE_CTX_SAVE_AUTO_LOCK_SHIFT))
+
+/* Current context save number of blocks*/
+#define SE_CTX_SAVE_AUTO_CURR_CNT_SHIFT 16
+#define SE_CTX_SAVE_AUTO_CURR_CNT_MASK 0x3FFU
+#define SE_CTX_SAVE_GET_BLK_COUNT(x) \
+ (((x) >> SE_CTX_SAVE_AUTO_CURR_CNT_SHIFT) & \
+ SE_CTX_SAVE_AUTO_CURR_CNT_MASK)
+
+#define SE_CTX_SAVE_SIZE_BLOCKS_SE1 133
+#define SE_CTX_SAVE_SIZE_BLOCKS_SE2 646
+
+/* SE TZRAM OPERATION - only for SE1 */
+#define SE_TZRAM_OPERATION 0x540U
+
+#define SE_TZRAM_OP_MODE_SHIFT 1
+#define SE_TZRAM_OP_COMMAND_INIT 1
+#define SE_TZRAM_OP_COMMAND_SHIFT 0
+#define SE_TZRAM_OP_MODE_SAVE \
+ ((0U) << SE_TZRAM_OP_MODE_SHIFT)
+#define SE_TZRAM_OP_MODE_RESTORE \
+ ((1U) << SE_TZRAM_OP_MODE_SHIFT)
+#define SE_TZRAM_OP_MODE(x) \
+ ((x) & ((0x1U) << SE_TZRAM_OP_MODE_SHIFT))
+
+#define SE_TZRAM_OP_BUSY_SHIFT 2
+#define SE_TZRAM_OP_BUSY_OFF \
+ ((0U) << SE_TZRAM_OP_BUSY_SHIFT)
+#define SE_TZRAM_OP_BUSY_ON \
+ ((1U) << SE_TZRAM_OP_BUSY_SHIFT)
+#define SE_TZRAM_OP_BUSY(x) \
+ ((x) & ((0x1U) << SE_TZRAM_OP_BUSY_SHIFT))
+
+#define SE_TZRAM_OP_REQ_SHIFT 0
+#define SE_TZRAM_OP_REQ_IDLE \
+ ((0U) << SE_TZRAM_OP_REQ_SHIFT)
+#define SE_TZRAM_OP_REQ_INIT \
+ ((1U) << SE_TZRAM_OP_REQ_SHIFT)
+#define SE_TZRAM_OP_REQ(x) \
+ ((x) & ((0x1U) << SE_TZRAM_OP_REQ_SHIFT))
+
+/* SE Interrupt */
+#define SE_INT_ENABLE_REG_OFFSET U(0xC)
+#define SE_INT_STATUS_REG_OFFSET 0x10U
+#define SE_INT_OP_DONE_SHIFT 4
+#define SE_INT_OP_DONE_CLEAR \
+ ((0U) << SE_INT_OP_DONE_SHIFT)
+#define SE_INT_OP_DONE_ACTIVE \
+ ((1U) << SE_INT_OP_DONE_SHIFT)
+#define SE_INT_OP_DONE(x) \
+ ((x) & ((0x1U) << SE_INT_OP_DONE_SHIFT))
+
+/* SE TZRAM SECURITY */
+#define SE_TZRAM_SEC_REG_OFFSET 0x4
+
+#define SE_TZRAM_SEC_SETTING_SHIFT 0
+#define SE_TZRAM_SECURE \
+ ((0UL) << SE_TZRAM_SEC_SETTING_SHIFT)
+#define SE_TZRAM_NONSECURE \
+ ((1UL) << SE_TZRAM_SEC_SETTING_SHIFT)
+#define SE_TZRAM_SEC_SETTING(x) \
+ ((x) & ((0x1UL) << SE_TZRAM_SEC_SETTING_SHIFT))
+
+/* PKA1 KEY SLOTS */
+#define TEGRA_SE_PKA1_KEYSLOT_COUNT 4
+
+
+/* SE error status */
+#define SE_ERR_STATUS_REG_OFFSET 0x804U
+#define SE_CRYPTO_KEYTABLE_DST_REG_OFFSET 0x330
+#define SE_CRYPTO_KEYTABLE_DST_WORD_QUAD_SHIFT 0
+#define SE_CRYPTO_KEYTABLE_DST_WORD_QUAD(x) \
+ (x << SE_CRYPTO_KEYTABLE_DST_WORD_QUAD_SHIFT)
+
+#define SE_KEY_INDEX_SHIFT 8
+#define SE_CRYPTO_KEYTABLE_DST_KEY_INDEX(x) (x << SE_KEY_INDEX_SHIFT)
+
+
+/* SE linked list (LL) register */
+#define SE_IN_LL_ADDR_REG_OFFSET 0x18U
+#define SE_OUT_LL_ADDR_REG_OFFSET 0x24U
+#define SE_BLOCK_COUNT_REG_OFFSET 0x318U
+
+/* AES data sizes */
+#define TEGRA_SE_KEY_256_SIZE 32
+#define TEGRA_SE_KEY_192_SIZE 24
+#define TEGRA_SE_KEY_128_SIZE 16
+#define TEGRA_SE_AES_BLOCK_SIZE 16
+#define TEGRA_SE_AES_MIN_KEY_SIZE 16
+#define TEGRA_SE_AES_MAX_KEY_SIZE 32
+#define TEGRA_SE_AES_IV_SIZE 16
+
+#define TEGRA_SE_RNG_IV_SIZE 16
+#define TEGRA_SE_RNG_DT_SIZE 16
+#define TEGRA_SE_RNG_KEY_SIZE 16
+#define TEGRA_SE_RNG_SEED_SIZE (TEGRA_SE_RNG_IV_SIZE + \
+ TEGRA_SE_RNG_KEY_SIZE + \
+ TEGRA_SE_RNG_DT_SIZE)
+#define TEGRA_SE_RSA512_DIGEST_SIZE 64
+#define TEGRA_SE_RSA1024_DIGEST_SIZE 128
+#define TEGRA_SE_RSA1536_DIGEST_SIZE 192
+#define TEGRA_SE_RSA2048_DIGEST_SIZE 256
+
+#define SE_KEY_TABLE_ACCESS_REG_OFFSET 0x284
+#define SE_KEY_READ_DISABLE_SHIFT 0
+
+#define SE_CTX_BUFER_SIZE 1072
+#define SE_CTX_DRBG_BUFER_SIZE 2112
+
+/* SE blobs size in bytes */
+#define SE_CTX_SAVE_RSA_KEY_LENGTH 1024
+#define SE_CTX_SAVE_RANDOM_DATA_SIZE 16
+#define SE_CTX_SAVE_STICKY_BITS_SIZE 16
+#define SE2_CONTEXT_SAVE_PKA1_STICKY_BITS_LENGTH 16
+#define SE2_CONTEXT_SAVE_PKA1_KEYS_LENGTH 8192
+#define SE_CTX_KNOWN_PATTERN_SIZE 16
+#define SE_CTX_KNOWN_PATTERN_SIZE_WORDS (SE_CTX_KNOWN_PATTERN_SIZE/4)
+
+/* SE RSA */
+#define TEGRA_SE_RSA_KEYSLOT_COUNT 2
+#define SE_RSA_KEY_SIZE_REG_OFFSET 0x404
+#define SE_RSA_EXP_SIZE_REG_OFFSET 0x408
+#define SE_RSA_MAX_EXP_BIT_SIZE 2048
+#define SE_RSA_MAX_EXP_SIZE32 \
+ (SE_RSA_MAX_EXP_BIT_SIZE >> 5)
+#define SE_RSA_MAX_MOD_BIT_SIZE 2048
+#define SE_RSA_MAX_MOD_SIZE32 \
+ (SE_RSA_MAX_MOD_BIT_SIZE >> 5)
+
+/* SE_RSA_KEYTABLE_ADDR */
+#define SE_RSA_KEYTABLE_ADDR 0x420
+#define RSA_KEY_PKT_WORD_ADDR_SHIFT 0
+#define RSA_KEY_PKT_EXPMOD_SEL_SHIFT \
+ ((6U) << RSA_KEY_PKT_WORD_ADDR_SHIFT)
+#define RSA_KEY_MOD \
+ ((1U) << RSA_KEY_PKT_EXPMOD_SEL_SHIFT)
+#define RSA_KEY_EXP \
+ ((0U) << RSA_KEY_PKT_EXPMOD_SEL_SHIFT)
+#define RSA_KEY_PKT_SLOT_SHIFT 7
+#define RSA_KEY_SLOT_1 \
+ ((0U) << RSA_KEY_PKT_SLOT_SHIFT)
+#define RSA_KEY_SLOT_2 \
+ ((1U) << RSA_KEY_PKT_SLOT_SHIFT)
+#define RSA_KEY_PKT_INPUT_MODE_SHIFT 8
+#define RSA_KEY_REG_INPUT \
+ ((0U) << RSA_KEY_PKT_INPUT_MODE_SHIFT)
+#define RSA_KEY_DMA_INPUT \
+ ((1U) << RSA_KEY_PKT_INPUT_MODE_SHIFT)
+
+/* SE_RSA_KEYTABLE_DATA */
+#define SE_RSA_KEYTABLE_DATA 0x424
+
+/* SE_RSA_CONFIG register */
+#define SE_RSA_CONFIG 0x400
+#define RSA_KEY_SLOT_SHIFT 24
+#define RSA_KEY_SLOT(x) \
+ ((x) << RSA_KEY_SLOT_SHIFT)
+
+/*******************************************************************************
+ * Structure definition
+ ******************************************************************************/
+
+/* SE context blob */
+#pragma pack(push, 1)
+typedef struct tegra_aes_key_slot {
+ /* 0 - 7 AES key */
+ uint32_t key[8];
+ /* 8 - 11 Original IV */
+ uint32_t oiv[4];
+ /* 12 - 15 Updated IV */
+ uint32_t uiv[4];
+} tegra_se_aes_key_slot_t;
+#pragma pack(pop)
+
+#pragma pack(push, 1)
+typedef struct tegra_se_context {
+ /* random number */
+ unsigned char rand_data[SE_CTX_SAVE_RANDOM_DATA_SIZE];
+ /* Sticky bits */
+ unsigned char sticky_bits[SE_CTX_SAVE_STICKY_BITS_SIZE * 2];
+ /* AES key slots */
+ tegra_se_aes_key_slot_t key_slots[TEGRA_SE_AES_KEYSLOT_COUNT];
+ /* RSA key slots */
+ unsigned char rsa_keys[SE_CTX_SAVE_RSA_KEY_LENGTH];
+} tegra_se_context_t;
+#pragma pack(pop)
+
+/* PKA context blob */
+#pragma pack(push, 1)
+typedef struct tegra_pka_context {
+ unsigned char sticky_bits[SE2_CONTEXT_SAVE_PKA1_STICKY_BITS_LENGTH];
+ unsigned char pka_keys[SE2_CONTEXT_SAVE_PKA1_KEYS_LENGTH];
+} tegra_pka_context_t;
+#pragma pack(pop)
+
+/* SE context blob */
+#pragma pack(push, 1)
+typedef struct tegra_se_context_blob {
+ /* SE context */
+ tegra_se_context_t se_ctx;
+ /* Known Pattern */
+ unsigned char known_pattern[SE_CTX_KNOWN_PATTERN_SIZE];
+} tegra_se_context_blob_t;
+#pragma pack(pop)
+
+/* SE2 and PKA1 context blob */
+#pragma pack(push, 1)
+typedef struct tegra_se2_context_blob {
+ /* SE2 context */
+ tegra_se_context_t se_ctx;
+ /* PKA1 context */
+ tegra_pka_context_t pka_ctx;
+ /* Known Pattern */
+ unsigned char known_pattern[SE_CTX_KNOWN_PATTERN_SIZE];
+} tegra_se2_context_blob_t;
+#pragma pack(pop)
+
+/* SE AES key type 128bit, 192bit, 256bit */
+typedef enum {
+ SE_AES_KEY128,
+ SE_AES_KEY192,
+ SE_AES_KEY256,
+} tegra_se_aes_key_type_t;
+
+/* SE RSA key slot */
+typedef struct tegra_se_rsa_key_slot {
+ /* 0 - 63 exponent key */
+ uint32_t exponent[SE_RSA_MAX_EXP_SIZE32];
+ /* 64 - 127 modulus key */
+ uint32_t modulus[SE_RSA_MAX_MOD_SIZE32];
+} tegra_se_rsa_key_slot_t;
+
+
+/*******************************************************************************
+ * Inline functions definition
+ ******************************************************************************/
+
+static inline uint32_t tegra_se_read_32(const tegra_se_dev_t *dev, uint32_t offset)
+{
+ return mmio_read_32(dev->se_base + offset);
+}
+
+static inline void tegra_se_write_32(const tegra_se_dev_t *dev, uint32_t offset, uint32_t val)
+{
+ mmio_write_32(dev->se_base + offset, val);
+}
+
+static inline uint32_t tegra_pka_read_32(tegra_pka_dev_t *dev, uint32_t offset)
+{
+ return mmio_read_32(dev->pka_base + offset);
+}
+
+static inline void tegra_pka_write_32(tegra_pka_dev_t *dev, uint32_t offset,
+uint32_t val)
+{
+ mmio_write_32(dev->pka_base + offset, val);
+}
+
+/*******************************************************************************
+ * Prototypes
+ ******************************************************************************/
+int tegra_se_start_normal_operation(const tegra_se_dev_t *, uint32_t);
+int tegra_se_start_ctx_save_operation(const tegra_se_dev_t *, uint32_t);
+
+#endif /* SE_PRIVATE_H */
diff --git a/plat/nvidia/tegra/soc/t210/drivers/se/security_engine.c b/plat/nvidia/tegra/soc/t210/drivers/se/security_engine.c
new file mode 100644
index 0000000..4860858
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t210/drivers/se/security_engine.c
@@ -0,0 +1,1071 @@
+/*
+ * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <common/debug.h>
+#include <drivers/delay_timer.h>
+#include <errno.h>
+#include <lib/mmio.h>
+#include <lib/psci/psci.h>
+#include <se_private.h>
+#include <security_engine.h>
+#include <tegra_platform.h>
+
+/*******************************************************************************
+ * Constants and Macros
+ ******************************************************************************/
+
+#define TIMEOUT_100MS 100U /* Timeout in 100ms */
+#define RNG_AES_KEY_INDEX 1
+
+/*******************************************************************************
+ * Data structure and global variables
+ ******************************************************************************/
+
+/* The security engine contexts are formatted as follows:
+ *
+ * SE1 CONTEXT:
+ * #--------------------------------#
+ * | Random Data 1 Block |
+ * #--------------------------------#
+ * | Sticky Bits 2 Blocks |
+ * #--------------------------------#
+ * | Key Table 64 Blocks |
+ * | For each Key (x16): |
+ * | Key: 2 Blocks |
+ * | Original-IV: 1 Block |
+ * | Updated-IV: 1 Block |
+ * #--------------------------------#
+ * | RSA Keys 64 Blocks |
+ * #--------------------------------#
+ * | Known Pattern 1 Block |
+ * #--------------------------------#
+ *
+ * SE2/PKA1 CONTEXT:
+ * #--------------------------------#
+ * | Random Data 1 Block |
+ * #--------------------------------#
+ * | Sticky Bits 2 Blocks |
+ * #--------------------------------#
+ * | Key Table 64 Blocks |
+ * | For each Key (x16): |
+ * | Key: 2 Blocks |
+ * | Original-IV: 1 Block |
+ * | Updated-IV: 1 Block |
+ * #--------------------------------#
+ * | RSA Keys 64 Blocks |
+ * #--------------------------------#
+ * | PKA sticky bits 1 Block |
+ * #--------------------------------#
+ * | PKA keys 512 Blocks |
+ * #--------------------------------#
+ * | Known Pattern 1 Block |
+ * #--------------------------------#
+ */
+
+/* Known pattern data for T210 */
+static const uint8_t se_ctx_known_pattern_data[SE_CTX_KNOWN_PATTERN_SIZE] = {
+ /* 128 bit AES block */
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f
+ };
+
+/* SE input and output linked list buffers */
+static tegra_se_io_lst_t se1_src_ll_buf;
+static tegra_se_io_lst_t se1_dst_ll_buf;
+
+/* SE2 input and output linked list buffers */
+static tegra_se_io_lst_t se2_src_ll_buf;
+static tegra_se_io_lst_t se2_dst_ll_buf;
+
+/* SE1 context buffer, 132 blocks */
+static __aligned(64) uint8_t se1_ctx_buf[SE_CTX_DRBG_BUFER_SIZE];
+
+/* SE1 security engine device handle */
+static tegra_se_dev_t se_dev_1 = {
+ .se_num = 1,
+ /* Setup base address for se */
+ .se_base = TEGRA_SE1_BASE,
+ /* Setup context size in AES blocks */
+ .ctx_size_blks = SE_CTX_SAVE_SIZE_BLOCKS_SE1,
+ /* Setup SRC buffers for SE operations */
+ .src_ll_buf = &se1_src_ll_buf,
+ /* Setup DST buffers for SE operations */
+ .dst_ll_buf = &se1_dst_ll_buf,
+ /* Setup context save destination */
+ .ctx_save_buf = (uint32_t *)&se1_ctx_buf
+};
+
+/* SE2 security engine device handle (T210B01 only) */
+static tegra_se_dev_t se_dev_2 = {
+ .se_num = 2,
+ /* Setup base address for se */
+ .se_base = TEGRA_SE2_BASE,
+ /* Setup context size in AES blocks */
+ .ctx_size_blks = SE_CTX_SAVE_SIZE_BLOCKS_SE2,
+ /* Setup SRC buffers for SE operations */
+ .src_ll_buf = &se2_src_ll_buf,
+ /* Setup DST buffers for SE operations */
+ .dst_ll_buf = &se2_dst_ll_buf,
+ /* Setup context save destination */
+ .ctx_save_buf = (uint32_t *)(TEGRA_TZRAM_CARVEOUT_BASE + 0x1000)
+};
+
+static bool ecid_valid;
+
+/*******************************************************************************
+ * Functions Definition
+ ******************************************************************************/
+
+static void tegra_se_make_data_coherent(const tegra_se_dev_t *se_dev)
+{
+ flush_dcache_range(((uint64_t)(se_dev->src_ll_buf)),
+ sizeof(tegra_se_io_lst_t));
+ flush_dcache_range(((uint64_t)(se_dev->dst_ll_buf)),
+ sizeof(tegra_se_io_lst_t));
+}
+
+/*
+ * Check that SE operation has completed after kickoff
+ * This function is invoked after an SE operation has been started,
+ * and it checks the following conditions:
+ * 1. SE_INT_STATUS = SE_OP_DONE
+ * 2. SE_STATUS = IDLE
+ * 3. AHB bus data transfer complete.
+ * 4. SE_ERR_STATUS is clean.
+ */
+static int32_t tegra_se_operation_complete(const tegra_se_dev_t *se_dev)
+{
+ uint32_t val = 0;
+ int32_t ret = 0;
+ uint32_t timeout;
+
+ /* Poll the SE interrupt register to ensure H/W operation complete */
+ val = tegra_se_read_32(se_dev, SE_INT_STATUS_REG_OFFSET);
+ for (timeout = 0; (SE_INT_OP_DONE(val) == SE_INT_OP_DONE_CLEAR) &&
+ (timeout < TIMEOUT_100MS); timeout++) {
+ mdelay(1);
+ val = tegra_se_read_32(se_dev, SE_INT_STATUS_REG_OFFSET);
+ }
+
+ if (timeout == TIMEOUT_100MS) {
+ ERROR("%s: ERR: Atomic context save operation timeout!\n",
+ __func__);
+ ret = -ETIMEDOUT;
+ }
+
+ /* Poll the SE status idle to ensure H/W operation complete */
+ if (ret == 0) {
+ val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET);
+ for (timeout = 0; (val != 0U) && (timeout < TIMEOUT_100MS);
+ timeout++) {
+ mdelay(1);
+ val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET);
+ }
+
+ if (timeout == TIMEOUT_100MS) {
+ ERROR("%s: ERR: MEM_INTERFACE and SE state "
+ "idle state timeout.\n", __func__);
+ ret = -ETIMEDOUT;
+ }
+ }
+
+ /* Check AHB bus transfer complete */
+ if (ret == 0) {
+ val = mmio_read_32(TEGRA_AHB_ARB_BASE + ARAHB_MEM_WRQUE_MST_ID_OFFSET);
+ for (timeout = 0; ((val & (ARAHB_MST_ID_SE_MASK | ARAHB_MST_ID_SE2_MASK)) != 0U) &&
+ (timeout < TIMEOUT_100MS); timeout++) {
+ mdelay(1);
+ val = mmio_read_32(TEGRA_AHB_ARB_BASE + ARAHB_MEM_WRQUE_MST_ID_OFFSET);
+ }
+
+ if (timeout == TIMEOUT_100MS) {
+ ERROR("%s: SE write over AHB timeout.\n", __func__);
+ ret = -ETIMEDOUT;
+ }
+ }
+
+ /* Ensure that no errors are thrown during operation */
+ if (ret == 0) {
+ val = tegra_se_read_32(se_dev, SE_ERR_STATUS_REG_OFFSET);
+ if (val != 0U) {
+ ERROR("%s: error during SE operation! 0x%x", __func__, val);
+ ret = -ENOTSUP;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * Wait for SE engine to be idle and clear pending interrupts before
+ * starting the next SE operation.
+ */
+static int32_t tegra_se_operation_prepare(const tegra_se_dev_t *se_dev)
+{
+ int32_t ret = 0;
+ uint32_t val = 0;
+ uint32_t timeout;
+
+ /* disable SE interrupt to prevent interrupt issued by SE operation */
+ tegra_se_write_32(se_dev, SE_INT_ENABLE_REG_OFFSET, 0U);
+
+ /* Wait for previous operation to finish */
+ val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET);
+ for (timeout = 0; (val != 0U) && (timeout < TIMEOUT_100MS); timeout++) {
+ mdelay(1);
+ val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET);
+ }
+
+ if (timeout == TIMEOUT_100MS) {
+ ERROR("%s: ERR: SE status is not idle!\n", __func__);
+ ret = -ETIMEDOUT;
+ }
+
+ /* Clear any pending interrupts from previous operation */
+ val = tegra_se_read_32(se_dev, SE_INT_STATUS_REG_OFFSET);
+ tegra_se_write_32(se_dev, SE_INT_STATUS_REG_OFFSET, val);
+ return ret;
+}
+
+/*
+ * SE atomic context save. At SC7 entry, SE driver triggers the
+ * hardware automatically performs the context save operation.
+ */
+static int32_t tegra_se_context_save_atomic(const tegra_se_dev_t *se_dev)
+{
+ int32_t ret = 0;
+ uint32_t val = 0;
+ uint32_t blk_count_limit = 0;
+ uint32_t block_count;
+
+ /* Check that previous operation is finalized */
+ ret = tegra_se_operation_prepare(se_dev);
+
+ /* Read the context save progress counter: block_count
+ * Ensure no previous context save has been triggered
+ * SE_CTX_SAVE_AUTO.CURR_CNT == 0
+ */
+ if (ret == 0) {
+ val = tegra_se_read_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET);
+ block_count = SE_CTX_SAVE_GET_BLK_COUNT(val);
+ if (block_count != 0U) {
+ ERROR("%s: ctx_save triggered multiple times\n",
+ __func__);
+ ret = -EALREADY;
+ }
+ }
+
+ /* Set the destination block count when the context save complete */
+ if (ret == 0) {
+ blk_count_limit = block_count + se_dev->ctx_size_blks;
+ }
+
+ /* Program SE_CONFIG register as for RNG operation
+ * SE_CONFIG.ENC_ALG = RNG
+ * SE_CONFIG.DEC_ALG = NOP
+ * SE_CONFIG.ENC_MODE is ignored
+ * SE_CONFIG.DEC_MODE is ignored
+ * SE_CONFIG.DST = MEMORY
+ */
+ if (ret == 0) {
+ val = (SE_CONFIG_ENC_ALG_RNG |
+ SE_CONFIG_DEC_ALG_NOP |
+ SE_CONFIG_DST_MEMORY);
+ tegra_se_write_32(se_dev, SE_CONFIG_REG_OFFSET, val);
+
+ tegra_se_make_data_coherent(se_dev);
+
+ /* SE_CTX_SAVE operation */
+ tegra_se_write_32(se_dev, SE_OPERATION_REG_OFFSET,
+ SE_OP_CTX_SAVE);
+
+ ret = tegra_se_operation_complete(se_dev);
+ }
+
+ /* Check that context has written the correct number of blocks */
+ if (ret == 0) {
+ val = tegra_se_read_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET);
+ if (SE_CTX_SAVE_GET_BLK_COUNT(val) != blk_count_limit) {
+ ERROR("%s: expected %d blocks but %d were written\n",
+ __func__, blk_count_limit, val);
+ ret = -ECANCELED;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * Security engine primitive operations, including normal operation
+ * and the context save operation.
+ */
+static int tegra_se_perform_operation(const tegra_se_dev_t *se_dev, uint32_t nbytes,
+ bool context_save)
+{
+ uint32_t nblocks = nbytes / TEGRA_SE_AES_BLOCK_SIZE;
+ int ret = 0;
+
+ assert(se_dev);
+
+ /* Use device buffers for in and out */
+ tegra_se_write_32(se_dev, SE_OUT_LL_ADDR_REG_OFFSET, ((uint64_t)(se_dev->dst_ll_buf)));
+ tegra_se_write_32(se_dev, SE_IN_LL_ADDR_REG_OFFSET, ((uint64_t)(se_dev->src_ll_buf)));
+
+ /* Check that previous operation is finalized */
+ ret = tegra_se_operation_prepare(se_dev);
+ if (ret != 0) {
+ goto op_error;
+ }
+
+ /* Program SE operation size */
+ if (nblocks) {
+ tegra_se_write_32(se_dev, SE_BLOCK_COUNT_REG_OFFSET, nblocks - 1);
+ }
+
+ /* Make SE LL data coherent before the SE operation */
+ tegra_se_make_data_coherent(se_dev);
+
+ /* Start hardware operation */
+ if (context_save)
+ tegra_se_write_32(se_dev, SE_OPERATION_REG_OFFSET, SE_OP_CTX_SAVE);
+ else
+ tegra_se_write_32(se_dev, SE_OPERATION_REG_OFFSET, SE_OP_START);
+
+ /* Wait for operation to finish */
+ ret = tegra_se_operation_complete(se_dev);
+
+op_error:
+ return ret;
+}
+
+/*
+ * Normal security engine operations other than the context save
+ */
+int tegra_se_start_normal_operation(const tegra_se_dev_t *se_dev, uint32_t nbytes)
+{
+ return tegra_se_perform_operation(se_dev, nbytes, false);
+}
+
+/*
+ * Security engine context save operation
+ */
+int tegra_se_start_ctx_save_operation(const tegra_se_dev_t *se_dev, uint32_t nbytes)
+{
+ return tegra_se_perform_operation(se_dev, nbytes, true);
+}
+
+/*
+ * Security Engine sequence to generat SRK
+ * SE and SE2 will generate different SRK by different
+ * entropy seeds.
+ */
+static int tegra_se_generate_srk(const tegra_se_dev_t *se_dev)
+{
+ int ret = PSCI_E_INTERN_FAIL;
+ uint32_t val;
+
+ /* Confgure the following hardware register settings:
+ * SE_CONFIG.DEC_ALG = NOP
+ * SE_CONFIG.ENC_ALG = RNG
+ * SE_CONFIG.DST = SRK
+ * SE_OPERATION.OP = START
+ * SE_CRYPTO_LAST_BLOCK = 0
+ */
+ se_dev->src_ll_buf->last_buff_num = 0;
+ se_dev->dst_ll_buf->last_buff_num = 0;
+
+ /* Configure random number generator */
+ if (ecid_valid)
+ val = (DRBG_MODE_FORCE_INSTANTION | DRBG_SRC_ENTROPY);
+ else
+ val = (DRBG_MODE_FORCE_RESEED | DRBG_SRC_ENTROPY);
+ tegra_se_write_32(se_dev, SE_RNG_CONFIG_REG_OFFSET, val);
+
+ /* Configure output destination = SRK */
+ val = (SE_CONFIG_ENC_ALG_RNG |
+ SE_CONFIG_DEC_ALG_NOP |
+ SE_CONFIG_DST_SRK);
+ tegra_se_write_32(se_dev, SE_CONFIG_REG_OFFSET, val);
+
+ /* Perform hardware operation */
+ ret = tegra_se_start_normal_operation(se_dev, 0);
+
+ return ret;
+}
+
+/*
+ * Generate plain text random data to some memory location using
+ * SE/SE2's SP800-90 random number generator. The random data size
+ * must be some multiple of the AES block size (16 bytes).
+ */
+static int tegra_se_lp_generate_random_data(tegra_se_dev_t *se_dev)
+{
+ int ret = 0;
+ uint32_t val;
+
+ /* Set some arbitrary memory location to store the random data */
+ se_dev->dst_ll_buf->last_buff_num = 0;
+ if (!se_dev->ctx_save_buf) {
+ ERROR("%s: ERR: context save buffer NULL pointer!\n", __func__);
+ return PSCI_E_NOT_PRESENT;
+ }
+ se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(((tegra_se_context_t *)
+ se_dev->ctx_save_buf)->rand_data)));
+ se_dev->dst_ll_buf->buffer[0].data_len = SE_CTX_SAVE_RANDOM_DATA_SIZE;
+
+
+ /* Confgure the following hardware register settings:
+ * SE_CONFIG.DEC_ALG = NOP
+ * SE_CONFIG.ENC_ALG = RNG
+ * SE_CONFIG.ENC_MODE = KEY192
+ * SE_CONFIG.DST = MEMORY
+ */
+ val = (SE_CONFIG_ENC_ALG_RNG |
+ SE_CONFIG_DEC_ALG_NOP |
+ SE_CONFIG_ENC_MODE_KEY192 |
+ SE_CONFIG_DST_MEMORY);
+ tegra_se_write_32(se_dev, SE_CONFIG_REG_OFFSET, val);
+
+ /* Program the RNG options in SE_CRYPTO_CONFIG as follows:
+ * XOR_POS = BYPASS
+ * INPUT_SEL = RANDOM (Entropy or LFSR)
+ * HASH_ENB = DISABLE
+ */
+ val = (SE_CRYPTO_INPUT_RANDOM |
+ SE_CRYPTO_XOR_BYPASS |
+ SE_CRYPTO_CORE_ENCRYPT |
+ SE_CRYPTO_HASH_DISABLE |
+ SE_CRYPTO_KEY_INDEX(RNG_AES_KEY_INDEX) |
+ SE_CRYPTO_IV_ORIGINAL);
+ tegra_se_write_32(se_dev, SE_CRYPTO_REG_OFFSET, val);
+
+ /* Configure RNG */
+ if (ecid_valid)
+ val = (DRBG_MODE_FORCE_INSTANTION | DRBG_SRC_LFSR);
+ else
+ val = (DRBG_MODE_FORCE_RESEED | DRBG_SRC_LFSR);
+ tegra_se_write_32(se_dev, SE_RNG_CONFIG_REG_OFFSET, val);
+
+ /* SE normal operation */
+ ret = tegra_se_start_normal_operation(se_dev, SE_CTX_SAVE_RANDOM_DATA_SIZE);
+
+ return ret;
+}
+
+/*
+ * Encrypt memory blocks with SRK as part of the security engine context.
+ * The data blocks include: random data and the known pattern data, where
+ * the random data is the first block and known pattern is the last block.
+ */
+static int tegra_se_lp_data_context_save(tegra_se_dev_t *se_dev,
+ uint64_t src_addr, uint64_t dst_addr, uint32_t data_size)
+{
+ int ret = 0;
+
+ se_dev->src_ll_buf->last_buff_num = 0;
+ se_dev->dst_ll_buf->last_buff_num = 0;
+ se_dev->src_ll_buf->buffer[0].addr = src_addr;
+ se_dev->src_ll_buf->buffer[0].data_len = data_size;
+ se_dev->dst_ll_buf->buffer[0].addr = dst_addr;
+ se_dev->dst_ll_buf->buffer[0].data_len = data_size;
+
+ /* By setting the context source from memory and calling the context save
+ * operation, the SE encrypts the memory data with SRK.
+ */
+ tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, SE_CTX_SAVE_SRC_MEM);
+
+ ret = tegra_se_start_ctx_save_operation(se_dev, data_size);
+
+ return ret;
+}
+
+/*
+ * Context save the key table access control sticky bits and
+ * security status of each key-slot. The encrypted sticky-bits are
+ * 32 bytes (2 AES blocks) and formatted as the following structure:
+ * { bit in registers bit in context save
+ * SECURITY_0[4] 158
+ * SE_RSA_KEYTABLE_ACCE4SS_1[2:0] 157:155
+ * SE_RSA_KEYTABLE_ACCE4SS_0[2:0] 154:152
+ * SE_RSA_SECURITY_PERKEY_0[1:0] 151:150
+ * SE_CRYPTO_KEYTABLE_ACCESS_15[7:0] 149:142
+ * ...,
+ * SE_CRYPTO_KEYTABLE_ACCESS_0[7:0] 29:22
+ * SE_CRYPTO_SECURITY_PERKEY_0[15:0] 21:6
+ * SE_TZRAM_SECURITY_0[1:0] 5:4
+ * SE_SECURITY_0[16] 3:3
+ * SE_SECURITY_0[2:0] } 2:0
+ */
+static int tegra_se_lp_sticky_bits_context_save(tegra_se_dev_t *se_dev)
+{
+ int ret = PSCI_E_INTERN_FAIL;
+ uint32_t val = 0;
+
+ se_dev->dst_ll_buf->last_buff_num = 0;
+ if (!se_dev->ctx_save_buf) {
+ ERROR("%s: ERR: context save buffer NULL pointer!\n", __func__);
+ return PSCI_E_NOT_PRESENT;
+ }
+ se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(((tegra_se_context_t *)
+ se_dev->ctx_save_buf)->sticky_bits)));
+ se_dev->dst_ll_buf->buffer[0].data_len = SE_CTX_SAVE_STICKY_BITS_SIZE;
+
+ /*
+ * The 1st AES block save the sticky-bits context 1 - 16 bytes (0 - 3 words).
+ * The 2nd AES block save the sticky-bits context 17 - 32 bytes (4 - 7 words).
+ */
+ for (int i = 0; i < 2; i++) {
+ val = SE_CTX_SAVE_SRC_STICKY_BITS |
+ SE_CTX_SAVE_STICKY_WORD_QUAD(i);
+ tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, val);
+
+ /* SE context save operation */
+ ret = tegra_se_start_ctx_save_operation(se_dev,
+ SE_CTX_SAVE_STICKY_BITS_SIZE);
+ if (ret)
+ break;
+ se_dev->dst_ll_buf->buffer[0].addr += SE_CTX_SAVE_STICKY_BITS_SIZE;
+ }
+
+ return ret;
+}
+
+static int tegra_se_aeskeytable_context_save(tegra_se_dev_t *se_dev)
+{
+ uint32_t val = 0;
+ int ret = 0;
+
+ se_dev->dst_ll_buf->last_buff_num = 0;
+ if (!se_dev->ctx_save_buf) {
+ ERROR("%s: ERR: context save buffer NULL pointer!\n", __func__);
+ ret = -EINVAL;
+ goto aes_keytable_save_err;
+ }
+
+ /* AES key context save */
+ for (int slot = 0; slot < TEGRA_SE_AES_KEYSLOT_COUNT; slot++) {
+ se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(
+ ((tegra_se_context_t *)se_dev->
+ ctx_save_buf)->key_slots[slot].key)));
+ se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_KEY_128_SIZE;
+ for (int i = 0; i < 2; i++) {
+ val = SE_CTX_SAVE_SRC_AES_KEYTABLE |
+ SE_CTX_SAVE_KEY_INDEX(slot) |
+ SE_CTX_SAVE_WORD_QUAD(i);
+ tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, val);
+
+ /* SE context save operation */
+ ret = tegra_se_start_ctx_save_operation(se_dev,
+ TEGRA_SE_KEY_128_SIZE);
+ if (ret) {
+ ERROR("%s: ERR: AES key CTX_SAVE OP failed, "
+ "slot=%d, word_quad=%d.\n",
+ __func__, slot, i);
+ goto aes_keytable_save_err;
+ }
+ se_dev->dst_ll_buf->buffer[0].addr += TEGRA_SE_KEY_128_SIZE;
+ }
+
+ /* OIV context save */
+ se_dev->dst_ll_buf->last_buff_num = 0;
+ se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(
+ ((tegra_se_context_t *)se_dev->
+ ctx_save_buf)->key_slots[slot].oiv)));
+ se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_AES_IV_SIZE;
+
+ val = SE_CTX_SAVE_SRC_AES_KEYTABLE |
+ SE_CTX_SAVE_KEY_INDEX(slot) |
+ SE_CTX_SAVE_WORD_QUAD_ORIG_IV;
+ tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, val);
+
+ /* SE context save operation */
+ ret = tegra_se_start_ctx_save_operation(se_dev, TEGRA_SE_AES_IV_SIZE);
+ if (ret) {
+ ERROR("%s: ERR: OIV CTX_SAVE OP failed, slot=%d.\n",
+ __func__, slot);
+ goto aes_keytable_save_err;
+ }
+
+ /* UIV context save */
+ se_dev->dst_ll_buf->last_buff_num = 0;
+ se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(
+ ((tegra_se_context_t *)se_dev->
+ ctx_save_buf)->key_slots[slot].uiv)));
+ se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_AES_IV_SIZE;
+
+ val = SE_CTX_SAVE_SRC_AES_KEYTABLE |
+ SE_CTX_SAVE_KEY_INDEX(slot) |
+ SE_CTX_SAVE_WORD_QUAD_UPD_IV;
+ tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, val);
+
+ /* SE context save operation */
+ ret = tegra_se_start_ctx_save_operation(se_dev, TEGRA_SE_AES_IV_SIZE);
+ if (ret) {
+ ERROR("%s: ERR: UIV CTX_SAVE OP failed, slot=%d\n",
+ __func__, slot);
+ goto aes_keytable_save_err;
+ }
+ }
+
+aes_keytable_save_err:
+ return ret;
+}
+
+static int tegra_se_lp_rsakeytable_context_save(tegra_se_dev_t *se_dev)
+{
+ uint32_t val = 0;
+ int ret = 0;
+ /* For T210, First the modulus and then exponent must be
+ * encrypted and saved. This is repeated for SLOT 0
+ * and SLOT 1. Hence the order:
+ * SLOT 0 modulus : RSA_KEY_INDEX : 1
+ * SLOT 0 exponent : RSA_KEY_INDEX : 0
+ * SLOT 1 modulus : RSA_KEY_INDEX : 3
+ * SLOT 1 exponent : RSA_KEY_INDEX : 2
+ */
+ const unsigned int key_index_mod[TEGRA_SE_RSA_KEYSLOT_COUNT][2] = {
+ /* RSA key slot 0 */
+ {SE_RSA_KEY_INDEX_SLOT0_MOD, SE_RSA_KEY_INDEX_SLOT0_EXP},
+ /* RSA key slot 1 */
+ {SE_RSA_KEY_INDEX_SLOT1_MOD, SE_RSA_KEY_INDEX_SLOT1_EXP},
+ };
+
+ se_dev->dst_ll_buf->last_buff_num = 0;
+ se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(
+ ((tegra_se_context_t *)se_dev->
+ ctx_save_buf)->rsa_keys)));
+ se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_KEY_128_SIZE;
+
+ for (int slot = 0; slot < TEGRA_SE_RSA_KEYSLOT_COUNT; slot++) {
+ /* loop for modulus and exponent */
+ for (int index = 0; index < 2; index++) {
+ for (int word_quad = 0; word_quad < 16; word_quad++) {
+ val = SE_CTX_SAVE_SRC_RSA_KEYTABLE |
+ SE_CTX_SAVE_RSA_KEY_INDEX(
+ key_index_mod[slot][index]) |
+ SE_CTX_RSA_WORD_QUAD(word_quad);
+ tegra_se_write_32(se_dev,
+ SE_CTX_SAVE_CONFIG_REG_OFFSET, val);
+
+ /* SE context save operation */
+ ret = tegra_se_start_ctx_save_operation(se_dev,
+ TEGRA_SE_KEY_128_SIZE);
+ if (ret) {
+ ERROR("%s: ERR: slot=%d.\n",
+ __func__, slot);
+ goto rsa_keytable_save_err;
+ }
+
+ /* Update the pointer to the next word quad */
+ se_dev->dst_ll_buf->buffer[0].addr +=
+ TEGRA_SE_KEY_128_SIZE;
+ }
+ }
+ }
+
+rsa_keytable_save_err:
+ return ret;
+}
+
+static int tegra_se_pkakeytable_sticky_bits_save(tegra_se_dev_t *se_dev)
+{
+ int ret = 0;
+
+ se_dev->dst_ll_buf->last_buff_num = 0;
+ se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(
+ ((tegra_se2_context_blob_t *)se_dev->
+ ctx_save_buf)->pka_ctx.sticky_bits)));
+ se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_AES_BLOCK_SIZE;
+
+ /* PKA1 sticky bits are 1 AES block (16 bytes) */
+ tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET,
+ SE_CTX_SAVE_SRC_PKA1_STICKY_BITS |
+ SE_CTX_STICKY_WORD_QUAD_WORDS_0_3);
+
+ /* SE context save operation */
+ ret = tegra_se_start_ctx_save_operation(se_dev, 0);
+ if (ret) {
+ ERROR("%s: ERR: PKA1 sticky bits CTX_SAVE OP failed\n",
+ __func__);
+ goto pka_sticky_bits_save_err;
+ }
+
+pka_sticky_bits_save_err:
+ return ret;
+}
+
+static int tegra_se_pkakeytable_context_save(tegra_se_dev_t *se_dev)
+{
+ uint32_t val = 0;
+ int ret = 0;
+
+ se_dev->dst_ll_buf->last_buff_num = 0;
+ se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(
+ ((tegra_se2_context_blob_t *)se_dev->
+ ctx_save_buf)->pka_ctx.pka_keys)));
+ se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_KEY_128_SIZE;
+
+ /* for each slot, save word quad 0-127 */
+ for (int slot = 0; slot < TEGRA_SE_PKA1_KEYSLOT_COUNT; slot++) {
+ for (int word_quad = 0; word_quad < 512/4; word_quad++) {
+ val = SE_CTX_SAVE_SRC_PKA1_KEYTABLE |
+ SE_CTX_PKA1_WORD_QUAD_L((slot * 128) +
+ word_quad) |
+ SE_CTX_PKA1_WORD_QUAD_H((slot * 128) +
+ word_quad);
+ tegra_se_write_32(se_dev,
+ SE_CTX_SAVE_CONFIG_REG_OFFSET, val);
+
+ /* SE context save operation */
+ ret = tegra_se_start_ctx_save_operation(se_dev,
+ TEGRA_SE_KEY_128_SIZE);
+ if (ret) {
+ ERROR("%s: ERR: pka1 keytable ctx save error\n",
+ __func__);
+ goto pka_keytable_save_err;
+ }
+
+ /* Update the pointer to the next word quad */
+ se_dev->dst_ll_buf->buffer[0].addr +=
+ TEGRA_SE_KEY_128_SIZE;
+ }
+ }
+
+pka_keytable_save_err:
+ return ret;
+}
+
+static int tegra_se_save_SRK(tegra_se_dev_t *se_dev)
+{
+ tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET,
+ SE_CTX_SAVE_SRC_SRK);
+
+ /* SE context save operation */
+ return tegra_se_start_ctx_save_operation(se_dev, 0);
+}
+
+/*
+ * Lock both SE from non-TZ clients.
+ */
+static inline void tegra_se_lock(tegra_se_dev_t *se_dev)
+{
+ uint32_t val;
+
+ assert(se_dev);
+ val = tegra_se_read_32(se_dev, SE_SECURITY_REG_OFFSET);
+ val |= SE_SECURITY_TZ_LOCK_SOFT(SE_SECURE);
+ tegra_se_write_32(se_dev, SE_SECURITY_REG_OFFSET, val);
+}
+
+/*
+ * Use SRK to encrypt SE state and save to TZRAM carveout
+ */
+static int tegra_se_context_save_sw(tegra_se_dev_t *se_dev)
+{
+ int err = 0;
+
+ assert(se_dev);
+
+ /* Lock entire SE/SE2 as TZ protected */
+ tegra_se_lock(se_dev);
+
+ INFO("%s: generate SRK\n", __func__);
+ /* Generate SRK */
+ err = tegra_se_generate_srk(se_dev);
+ if (err) {
+ ERROR("%s: ERR: SRK generation failed\n", __func__);
+ return err;
+ }
+
+ INFO("%s: generate random data\n", __func__);
+ /* Generate random data */
+ err = tegra_se_lp_generate_random_data(se_dev);
+ if (err) {
+ ERROR("%s: ERR: LP random pattern generation failed\n", __func__);
+ return err;
+ }
+
+ INFO("%s: encrypt random data\n", __func__);
+ /* Encrypt the random data block */
+ err = tegra_se_lp_data_context_save(se_dev,
+ ((uint64_t)(&(((tegra_se_context_t *)se_dev->
+ ctx_save_buf)->rand_data))),
+ ((uint64_t)(&(((tegra_se_context_t *)se_dev->
+ ctx_save_buf)->rand_data))),
+ SE_CTX_SAVE_RANDOM_DATA_SIZE);
+ if (err) {
+ ERROR("%s: ERR: random pattern encryption failed\n", __func__);
+ return err;
+ }
+
+ INFO("%s: save SE sticky bits\n", __func__);
+ /* Save AES sticky bits context */
+ err = tegra_se_lp_sticky_bits_context_save(se_dev);
+ if (err) {
+ ERROR("%s: ERR: sticky bits context save failed\n", __func__);
+ return err;
+ }
+
+ INFO("%s: save AES keytables\n", __func__);
+ /* Save AES key table context */
+ err = tegra_se_aeskeytable_context_save(se_dev);
+ if (err) {
+ ERROR("%s: ERR: LP keytable save failed\n", __func__);
+ return err;
+ }
+
+ /* RSA key slot table context save */
+ INFO("%s: save RSA keytables\n", __func__);
+ err = tegra_se_lp_rsakeytable_context_save(se_dev);
+ if (err) {
+ ERROR("%s: ERR: rsa key table context save failed\n", __func__);
+ return err;
+ }
+
+ /* Only SE2 has an interface with PKA1; thus, PKA1's context is saved
+ * via SE2.
+ */
+ if (se_dev->se_num == 2) {
+ /* Encrypt PKA1 sticky bits on SE2 only */
+ INFO("%s: save PKA sticky bits\n", __func__);
+ err = tegra_se_pkakeytable_sticky_bits_save(se_dev);
+ if (err) {
+ ERROR("%s: ERR: PKA sticky bits context save failed\n", __func__);
+ return err;
+ }
+
+ /* Encrypt PKA1 keyslots on SE2 only */
+ INFO("%s: save PKA keytables\n", __func__);
+ err = tegra_se_pkakeytable_context_save(se_dev);
+ if (err) {
+ ERROR("%s: ERR: PKA key table context save failed\n", __func__);
+ return err;
+ }
+ }
+
+ /* Encrypt known pattern */
+ if (se_dev->se_num == 1) {
+ err = tegra_se_lp_data_context_save(se_dev,
+ ((uint64_t)(&se_ctx_known_pattern_data)),
+ ((uint64_t)(&(((tegra_se_context_blob_t *)se_dev->ctx_save_buf)->known_pattern))),
+ SE_CTX_KNOWN_PATTERN_SIZE);
+ } else if (se_dev->se_num == 2) {
+ err = tegra_se_lp_data_context_save(se_dev,
+ ((uint64_t)(&se_ctx_known_pattern_data)),
+ ((uint64_t)(&(((tegra_se2_context_blob_t *)se_dev->ctx_save_buf)->known_pattern))),
+ SE_CTX_KNOWN_PATTERN_SIZE);
+ }
+ if (err) {
+ ERROR("%s: ERR: save LP known pattern failure\n", __func__);
+ return err;
+ }
+
+ /* Write lp context buffer address into PMC scratch register */
+ if (se_dev->se_num == 1) {
+ /* SE context address, support T210 only */
+ mmio_write_32((uint64_t)TEGRA_PMC_BASE + PMC_SCRATCH43_REG_OFFSET,
+ ((uint64_t)(se_dev->ctx_save_buf)));
+ } else if (se_dev->se_num == 2) {
+ /* SE2 & PKA1 context address */
+ mmio_write_32((uint64_t)TEGRA_PMC_BASE + PMC_SECURE_SCRATCH116_OFFSET,
+ ((uint64_t)(se_dev->ctx_save_buf)));
+ }
+
+ /* Saves SRK to PMC secure scratch registers for BootROM, which
+ * verifies and restores the security engine context on warm boot.
+ */
+ err = tegra_se_save_SRK(se_dev);
+ if (err < 0) {
+ ERROR("%s: ERR: LP SRK save failure\n", __func__);
+ return err;
+ }
+
+ INFO("%s: SE context save done \n", __func__);
+
+ return err;
+}
+
+/*
+ * Initialize the SE engine handle
+ */
+void tegra_se_init(void)
+{
+ uint32_t val = 0;
+ INFO("%s: start SE init\n", __func__);
+
+ /* Generate random SRK to initialize DRBG */
+ tegra_se_generate_srk(&se_dev_1);
+
+ if (tegra_chipid_is_t210_b01()) {
+ tegra_se_generate_srk(&se_dev_2);
+ }
+
+ /* determine if ECID is valid */
+ val = mmio_read_32(TEGRA_FUSE_BASE + FUSE_JTAG_SECUREID_VALID);
+ ecid_valid = (val == ECID_VALID);
+
+ INFO("%s: SE init done\n", __func__);
+}
+
+static void tegra_se_enable_clocks(void)
+{
+ uint32_t val = 0;
+
+ /* Enable entropy clock */
+ val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_W);
+ val |= ENTROPY_CLK_ENB_BIT;
+ mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_W, val);
+
+ /* De-Assert Entropy Reset */
+ val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEVICES_W);
+ val &= ~ENTROPY_RESET_BIT;
+ mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEVICES_W, val);
+
+ /*
+ * Switch SE clock source to CLK_M, to make sure SE clock
+ * is on when saving SE context
+ */
+ mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_RST_CTL_CLK_SRC_SE,
+ SE_CLK_SRC_CLK_M);
+
+ /* Enable SE clock */
+ val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_V);
+ val |= SE_CLK_ENB_BIT;
+ mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_V, val);
+
+ /* De-Assert SE Reset */
+ val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEVICES_V);
+ val &= ~SE_RESET_BIT;
+ mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEVICES_V, val);
+}
+
+static void tegra_se_disable_clocks(void)
+{
+ uint32_t val = 0;
+
+ /* Disable entropy clock */
+ val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_W);
+ val &= ~ENTROPY_CLK_ENB_BIT;
+ mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_W, val);
+
+ /* Disable SE clock */
+ val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_V);
+ val &= ~SE_CLK_ENB_BIT;
+ mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_V, val);
+}
+
+/*
+ * Security engine power suspend entry point.
+ * This function is invoked from PSCI power domain suspend handler.
+ */
+int32_t tegra_se_suspend(void)
+{
+ int32_t ret = 0;
+ uint32_t val = 0;
+
+ /* SE does not use SMMU in EL3, disable SMMU.
+ * This will be re-enabled by kernel on resume */
+ val = mmio_read_32(TEGRA_MC_BASE + MC_SMMU_PPCS_ASID_0);
+ val &= ~PPCS_SMMU_ENABLE;
+ mmio_write_32(TEGRA_MC_BASE + MC_SMMU_PPCS_ASID_0, val);
+
+ tegra_se_enable_clocks();
+
+ if (tegra_chipid_is_t210_b01()) {
+ /* It is T210 B01, Atomic context save se2 and pka1 */
+ INFO("%s: SE2/PKA1 atomic context save\n", __func__);
+ ret = tegra_se_context_save_atomic(&se_dev_2);
+ if (ret != 0) {
+ ERROR("%s: SE2 ctx save failed (%d)\n", __func__, ret);
+ }
+
+ ret = tegra_se_context_save_atomic(&se_dev_1);
+ if (ret != 0) {
+ ERROR("%s: SE1 ctx save failed (%d)\n", __func__, ret);
+ }
+ } else {
+ /* It is T210, SW context save se */
+ INFO("%s: SE1 legacy(SW) context save\n", __func__);
+ ret = tegra_se_context_save_sw(&se_dev_1);
+ if (ret != 0) {
+ ERROR("%s: SE1 ctx save failed (%d)\n", __func__, ret);
+ }
+ }
+
+ tegra_se_disable_clocks();
+
+ return ret;
+}
+
+/*
+ * Save TZRAM to shadow TZRAM in AON
+ */
+int32_t tegra_se_save_tzram(void)
+{
+ uint32_t val = 0;
+ int32_t ret = 0;
+ uint32_t timeout;
+
+ INFO("%s: SE TZRAM save start\n", __func__);
+ tegra_se_enable_clocks();
+
+ val = (SE_TZRAM_OP_REQ_INIT | SE_TZRAM_OP_MODE_SAVE);
+ tegra_se_write_32(&se_dev_1, SE_TZRAM_OPERATION, val);
+
+ val = tegra_se_read_32(&se_dev_1, SE_TZRAM_OPERATION);
+ for (timeout = 0; (SE_TZRAM_OP_BUSY(val) == SE_TZRAM_OP_BUSY_ON) &&
+ (timeout < TIMEOUT_100MS); timeout++) {
+ mdelay(1);
+ val = tegra_se_read_32(&se_dev_1, SE_TZRAM_OPERATION);
+ }
+
+ if (timeout == TIMEOUT_100MS) {
+ ERROR("%s: ERR: TZRAM save timeout!\n", __func__);
+ ret = -ETIMEDOUT;
+ }
+
+ if (ret == 0) {
+ INFO("%s: SE TZRAM save done!\n", __func__);
+ }
+
+ tegra_se_disable_clocks();
+
+ return ret;
+}
+
+/*
+ * The function is invoked by SE resume
+ */
+static void tegra_se_warm_boot_resume(const tegra_se_dev_t *se_dev)
+{
+ uint32_t val;
+
+ assert(se_dev);
+
+ /* Lock RNG source to ENTROPY on resume */
+ val = DRBG_RO_ENT_IGNORE_MEM_ENABLE |
+ DRBG_RO_ENT_SRC_LOCK_ENABLE |
+ DRBG_RO_ENT_SRC_ENABLE;
+ tegra_se_write_32(se_dev, SE_RNG_SRC_CONFIG_REG_OFFSET, val);
+
+ /* Set a random value to SRK to initialize DRBG */
+ tegra_se_generate_srk(se_dev);
+}
+
+/*
+ * The function is invoked on SC7 resume
+ */
+void tegra_se_resume(void)
+{
+ tegra_se_warm_boot_resume(&se_dev_1);
+
+ if (tegra_chipid_is_t210_b01()) {
+ tegra_se_warm_boot_resume(&se_dev_2);
+ }
+}
diff --git a/plat/nvidia/tegra/soc/t210/plat_psci_handlers.c b/plat/nvidia/tegra/soc/t210/plat_psci_handlers.c
new file mode 100644
index 0000000..2ec044c
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t210/plat_psci_handlers.c
@@ -0,0 +1,619 @@
+/*
+ * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <cortex_a57.h>
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <drivers/delay_timer.h>
+#include <lib/mmio.h>
+#include <lib/psci/psci.h>
+#include <plat/common/platform.h>
+
+#include <bpmp.h>
+#include <flowctrl.h>
+#include <lib/utils.h>
+#include <memctrl.h>
+#include <pmc.h>
+#include <platform_def.h>
+#include <security_engine.h>
+#include <tegra_def.h>
+#include <tegra_private.h>
+#include <tegra_platform.h>
+
+/*
+ * Register used to clear CPU reset signals. Each CPU has two reset
+ * signals: CPU reset (3:0) and Core reset (19:16).
+ */
+#define CPU_CMPLX_RESET_CLR 0x454
+#define CPU_CORE_RESET_MASK 0x10001
+
+/* Clock and Reset controller registers for system clock's settings */
+#define SCLK_RATE 0x30
+#define SCLK_BURST_POLICY 0x28
+#define SCLK_BURST_POLICY_DEFAULT 0x10000000
+
+static int cpu_powergate_mask[PLATFORM_MAX_CPUS_PER_CLUSTER];
+static bool tegra_bpmp_available = true;
+
+int32_t tegra_soc_validate_power_state(unsigned int power_state,
+ psci_power_state_t *req_state)
+{
+ int state_id = psci_get_pstate_id(power_state);
+ const plat_params_from_bl2_t *plat_params = bl31_get_plat_params();
+
+ /* Sanity check the requested state id */
+ switch (state_id) {
+ case PSTATE_ID_CORE_POWERDN:
+ /*
+ * Core powerdown request only for afflvl 0
+ */
+ req_state->pwr_domain_state[MPIDR_AFFLVL0] = state_id & 0xff;
+
+ break;
+
+ case PSTATE_ID_CLUSTER_IDLE:
+
+ /*
+ * Cluster idle request for afflvl 0
+ */
+ req_state->pwr_domain_state[MPIDR_AFFLVL0] = PSTATE_ID_CORE_POWERDN;
+ req_state->pwr_domain_state[MPIDR_AFFLVL1] = state_id;
+ break;
+
+ case PSTATE_ID_SOC_POWERDN:
+
+ /*
+ * sc7entry-fw must be present in the system when the bpmp
+ * firmware is not present, for a successful System Suspend
+ * entry.
+ */
+ if (!tegra_bpmp_init() && !plat_params->sc7entry_fw_base)
+ return PSCI_E_NOT_SUPPORTED;
+
+ /*
+ * System powerdown request only for afflvl 2
+ */
+ for (uint32_t i = MPIDR_AFFLVL0; i < PLAT_MAX_PWR_LVL; i++)
+ req_state->pwr_domain_state[i] = PLAT_MAX_OFF_STATE;
+
+ req_state->pwr_domain_state[PLAT_MAX_PWR_LVL] =
+ PLAT_SYS_SUSPEND_STATE_ID;
+
+ break;
+
+ default:
+ ERROR("%s: unsupported state id (%d)\n", __func__, state_id);
+ return PSCI_E_INVALID_PARAMS;
+ }
+
+ return PSCI_E_SUCCESS;
+}
+
+/*******************************************************************************
+ * Platform handler to calculate the proper target power level at the
+ * specified affinity level.
+ ******************************************************************************/
+plat_local_state_t tegra_soc_get_target_pwr_state(unsigned int lvl,
+ const plat_local_state_t *states,
+ unsigned int ncpu)
+{
+ plat_local_state_t target = PSCI_LOCAL_STATE_RUN;
+ int cpu = plat_my_core_pos();
+ int core_pos = read_mpidr() & MPIDR_CPU_MASK;
+ uint32_t bpmp_reply, data[3], val;
+ int ret;
+
+ /* get the power state at this level */
+ if (lvl == MPIDR_AFFLVL1)
+ target = *(states + core_pos);
+ if (lvl == MPIDR_AFFLVL2)
+ target = *(states + cpu);
+
+ if ((lvl == MPIDR_AFFLVL1) && (target == PSTATE_ID_CLUSTER_IDLE)) {
+
+ /* initialize the bpmp interface */
+ ret = tegra_bpmp_init();
+ if (ret != 0U) {
+
+ /*
+ * flag to indicate that BPMP firmware is not
+ * available and the CPU has to handle entry/exit
+ * for all power states
+ */
+ tegra_bpmp_available = false;
+
+ /* Cluster idle not allowed */
+ target = PSCI_LOCAL_STATE_RUN;
+
+ /*******************************************
+ * BPMP is not present, so handle CC6 entry
+ * from the CPU
+ ******************************************/
+
+ /* check if cluster idle state has been enabled */
+ val = mmio_read_32(TEGRA_CL_DVFS_BASE + DVFS_DFLL_CTRL);
+ if (val == ENABLE_CLOSED_LOOP) {
+ /*
+ * Acquire the cluster idle lock to stop
+ * other CPUs from powering up.
+ */
+ tegra_fc_ccplex_pgexit_lock();
+
+ /* Cluster idle only from the last standing CPU */
+ if (tegra_pmc_is_last_on_cpu() && tegra_fc_is_ccx_allowed()) {
+ /* Cluster idle allowed */
+ target = PSTATE_ID_CLUSTER_IDLE;
+ } else {
+ /* release cluster idle lock */
+ tegra_fc_ccplex_pgexit_unlock();
+ }
+ }
+ } else {
+
+ /* Cluster power-down */
+ data[0] = (uint32_t)cpu;
+ data[1] = TEGRA_PM_CC6;
+ data[2] = TEGRA_PM_SC1;
+ ret = tegra_bpmp_send_receive_atomic(MRQ_DO_IDLE,
+ (void *)&data, (int)sizeof(data),
+ (void *)&bpmp_reply,
+ (int)sizeof(bpmp_reply));
+
+ /* check if cluster power down is allowed */
+ if ((ret != 0L) || (bpmp_reply != BPMP_CCx_ALLOWED)) {
+
+ /* Cluster power down not allowed */
+ target = PSCI_LOCAL_STATE_RUN;
+ }
+ }
+
+ } else if (((lvl == MPIDR_AFFLVL2) || (lvl == MPIDR_AFFLVL1)) &&
+ (target == PSTATE_ID_SOC_POWERDN)) {
+
+ /* System Suspend */
+ target = PSTATE_ID_SOC_POWERDN;
+
+ } else {
+ ; /* do nothing */
+ }
+
+ return target;
+}
+
+int32_t tegra_soc_cpu_standby(plat_local_state_t cpu_state)
+{
+ (void)cpu_state;
+ return PSCI_E_SUCCESS;
+}
+
+int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
+{
+ u_register_t mpidr = read_mpidr();
+ const plat_local_state_t *pwr_domain_state =
+ target_state->pwr_domain_state;
+ unsigned int stateid_afflvl2 = pwr_domain_state[MPIDR_AFFLVL2];
+ unsigned int stateid_afflvl1 = pwr_domain_state[MPIDR_AFFLVL1];
+ unsigned int stateid_afflvl0 = pwr_domain_state[MPIDR_AFFLVL0];
+ uint32_t cfg;
+ int ret = PSCI_E_SUCCESS;
+ uint32_t val;
+
+ if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
+
+ assert((stateid_afflvl0 == PLAT_MAX_OFF_STATE) ||
+ (stateid_afflvl0 == PSTATE_ID_SOC_POWERDN));
+ assert((stateid_afflvl1 == PLAT_MAX_OFF_STATE) ||
+ (stateid_afflvl1 == PSTATE_ID_SOC_POWERDN));
+
+ /* Suspend se/se2 and pka1 for T210 B01 and se for T210 */
+ if (tegra_se_suspend() != 0) {
+ ret = PSCI_E_INTERN_FAIL;
+ }
+
+ } else if (stateid_afflvl1 == PSTATE_ID_CLUSTER_IDLE) {
+
+ assert(stateid_afflvl0 == PSTATE_ID_CORE_POWERDN);
+
+ if (!tegra_bpmp_available) {
+
+ /*
+ * When disabled, DFLL loses its state. Enable
+ * open loop state for the DFLL as we dont want
+ * garbage values being written to the pmic
+ * when we enter cluster idle state.
+ */
+ mmio_write_32(TEGRA_CL_DVFS_BASE + DVFS_DFLL_CTRL,
+ ENABLE_OPEN_LOOP);
+
+ /* Find if the platform uses OVR2/MAX77621 PMIC */
+ cfg = mmio_read_32(TEGRA_CL_DVFS_BASE + DVFS_DFLL_OUTPUT_CFG);
+ if (cfg & DFLL_OUTPUT_CFG_CLK_EN_BIT) {
+ /* OVR2 */
+
+ /* PWM tristate */
+ val = mmio_read_32(TEGRA_MISC_BASE + PINMUX_AUX_DVFS_PWM);
+ val |= PINMUX_PWM_TRISTATE;
+ mmio_write_32(TEGRA_MISC_BASE + PINMUX_AUX_DVFS_PWM, val);
+
+ /*
+ * SCRATCH201[1] is being used to identify CPU
+ * PMIC in warmboot code.
+ * 0 : OVR2
+ * 1 : MAX77621
+ */
+ tegra_pmc_write_32(PMC_SCRATCH201, 0x0);
+ } else {
+ /* MAX77621 */
+ tegra_pmc_write_32(PMC_SCRATCH201, 0x2);
+ }
+ }
+
+ /* Prepare for cluster idle */
+ tegra_fc_cluster_idle(mpidr);
+
+ } else if (stateid_afflvl0 == PSTATE_ID_CORE_POWERDN) {
+
+ /* Prepare for cpu powerdn */
+ tegra_fc_cpu_powerdn(mpidr);
+
+ } else {
+ ERROR("%s: Unknown state id (%d, %d, %d)\n", __func__,
+ stateid_afflvl2, stateid_afflvl1, stateid_afflvl0);
+ ret = PSCI_E_NOT_SUPPORTED;
+ }
+
+ return ret;
+}
+
+static void tegra_reset_all_dma_masters(void)
+{
+ uint32_t val, mask;
+
+ /*
+ * Reset all possible DMA masters in the system.
+ */
+ val = GPU_RESET_BIT;
+ mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_GPU_RESET_REG_OFFSET, val);
+
+ val = NVENC_RESET_BIT | TSECB_RESET_BIT | APE_RESET_BIT |
+ NVJPG_RESET_BIT | NVDEC_RESET_BIT;
+ mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEV_SET_Y, val);
+
+ val = HOST1X_RESET_BIT | ISP_RESET_BIT | USBD_RESET_BIT |
+ VI_RESET_BIT | SDMMC4_RESET_BIT | SDMMC1_RESET_BIT |
+ SDMMC2_RESET_BIT;
+ mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEV_SET_L, val);
+
+ val = USB2_RESET_BIT | APBDMA_RESET_BIT | AHBDMA_RESET_BIT;
+ mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEV_SET_H, val);
+
+ val = XUSB_DEV_RESET_BIT | XUSB_HOST_RESET_BIT | TSEC_RESET_BIT |
+ PCIE_RESET_BIT | SDMMC3_RESET_BIT;
+ mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEV_SET_U, val);
+
+ val = SE_RESET_BIT | HDA_RESET_BIT | SATA_RESET_BIT;
+ mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEV_SET_V, val);
+
+ /*
+ * If any of the DMA masters are still alive, assume
+ * that the system has been compromised and reboot.
+ */
+ val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_GPU_RESET_REG_OFFSET);
+ mask = GPU_RESET_BIT;
+ if ((val & mask) != mask)
+ tegra_pmc_system_reset();
+
+ mask = NVENC_RESET_BIT | TSECB_RESET_BIT | APE_RESET_BIT |
+ NVJPG_RESET_BIT | NVDEC_RESET_BIT;
+ val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEV_SET_Y);
+ if ((val & mask) != mask)
+ tegra_pmc_system_reset();
+
+ mask = HOST1X_RESET_BIT | ISP_RESET_BIT | USBD_RESET_BIT |
+ VI_RESET_BIT | SDMMC4_RESET_BIT | SDMMC1_RESET_BIT |
+ SDMMC2_RESET_BIT;
+ val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEV_SET_L);
+ if ((val & mask) != mask)
+ tegra_pmc_system_reset();
+
+ mask = USB2_RESET_BIT | APBDMA_RESET_BIT | AHBDMA_RESET_BIT;
+ val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEV_SET_H);
+ if ((val & mask) != mask)
+ tegra_pmc_system_reset();
+
+ mask = XUSB_DEV_RESET_BIT | XUSB_HOST_RESET_BIT | TSEC_RESET_BIT |
+ PCIE_RESET_BIT | SDMMC3_RESET_BIT;
+ val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEV_SET_U);
+ if ((val & mask) != mask)
+ tegra_pmc_system_reset();
+
+ val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEV_SET_V);
+ mask = SE_RESET_BIT | HDA_RESET_BIT | SATA_RESET_BIT;
+ if ((val & mask) != mask)
+ tegra_pmc_system_reset();
+}
+
+int tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state)
+{
+ u_register_t mpidr = read_mpidr();
+ const plat_local_state_t *pwr_domain_state =
+ target_state->pwr_domain_state;
+ unsigned int stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL];
+ const plat_params_from_bl2_t *plat_params = bl31_get_plat_params();
+ uint32_t val;
+
+ if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
+
+ if (tegra_chipid_is_t210_b01()) {
+ /* Save tzram contents */
+ tegra_se_save_tzram();
+ }
+
+ /* de-init the interface */
+ tegra_bpmp_suspend();
+
+ /*
+ * The CPU needs to load the System suspend entry firmware
+ * if nothing is running on the BPMP.
+ */
+ if (!tegra_bpmp_available) {
+
+ /*
+ * BPMP firmware is not running on the co-processor, so
+ * we need to explicitly load the firmware to enable
+ * entry/exit to/from System Suspend and set the BPMP
+ * on its way.
+ */
+
+ /* Power off BPMP before we proceed */
+ tegra_fc_bpmp_off();
+
+ /* bond out IRAM banks B, C and D */
+ mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_BOND_OUT_U,
+ IRAM_B_LOCK_BIT | IRAM_C_LOCK_BIT |
+ IRAM_D_LOCK_BIT);
+
+ /* bond out APB/AHB DMAs */
+ mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_BOND_OUT_H,
+ APB_DMA_LOCK_BIT | AHB_DMA_LOCK_BIT);
+
+ /* Power off BPMP before we proceed */
+ tegra_fc_bpmp_off();
+
+ /*
+ * Reset all the hardware blocks that can act as DMA
+ * masters on the bus.
+ */
+ tegra_reset_all_dma_masters();
+
+ /*
+ * Mark PMC as accessible to the non-secure world
+ * to allow the COP to execute System Suspend
+ * sequence
+ */
+ val = mmio_read_32(TEGRA_MISC_BASE + APB_SLAVE_SECURITY_ENABLE);
+ val &= ~PMC_SECURITY_EN_BIT;
+ mmio_write_32(TEGRA_MISC_BASE + APB_SLAVE_SECURITY_ENABLE, val);
+
+ /* clean up IRAM of any cruft */
+ zeromem((void *)(uintptr_t)TEGRA_IRAM_BASE,
+ TEGRA_IRAM_A_SIZE);
+
+ /* Copy the firmware to BPMP's internal RAM */
+ (void)memcpy((void *)(uintptr_t)TEGRA_IRAM_BASE,
+ (const void *)(plat_params->sc7entry_fw_base + SC7ENTRY_FW_HEADER_SIZE_BYTES),
+ plat_params->sc7entry_fw_size - SC7ENTRY_FW_HEADER_SIZE_BYTES);
+
+ /* Power on the BPMP and execute from IRAM base */
+ tegra_fc_bpmp_on(TEGRA_IRAM_BASE);
+
+ /* Wait until BPMP powers up */
+ do {
+ val = mmio_read_32(TEGRA_RES_SEMA_BASE + STA_OFFSET);
+ } while (val != SIGN_OF_LIFE);
+ }
+
+ /* enter system suspend */
+ tegra_fc_soc_powerdn(mpidr);
+ }
+
+ return PSCI_E_SUCCESS;
+}
+
+int32_t tegra_soc_pwr_domain_suspend_pwrdown_early(const psci_power_state_t *target_state)
+{
+ return PSCI_E_NOT_SUPPORTED;
+}
+
+int tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
+{
+ const plat_params_from_bl2_t *plat_params = bl31_get_plat_params();
+ uint32_t cfg;
+ uint32_t val, entrypoint = 0;
+ uint64_t offset;
+
+ /* platform parameter passed by the previous bootloader */
+ if (plat_params->l2_ecc_parity_prot_dis != 1) {
+ /* Enable ECC Parity Protection for Cortex-A57 CPUs */
+ val = read_l2ctlr_el1();
+ val |= (uint64_t)CORTEX_A57_L2_ECC_PARITY_PROTECTION_BIT;
+ write_l2ctlr_el1(val);
+ }
+
+ /*
+ * Check if we are exiting from SOC_POWERDN.
+ */
+ if (target_state->pwr_domain_state[PLAT_MAX_PWR_LVL] ==
+ PLAT_SYS_SUSPEND_STATE_ID) {
+
+ /*
+ * Security engine resume
+ */
+ if (tegra_chipid_is_t210_b01()) {
+ tegra_se_resume();
+ }
+
+ /*
+ * Lock scratch registers which hold the CPU vectors
+ */
+ tegra_pmc_lock_cpu_vectors();
+
+ /*
+ * Enable WRAP to INCR burst type conversions for
+ * incoming requests on the AXI slave ports.
+ */
+ val = mmio_read_32(TEGRA_MSELECT_BASE + MSELECT_CONFIG);
+ val &= ~ENABLE_UNSUP_TX_ERRORS;
+ val |= ENABLE_WRAP_TO_INCR_BURSTS;
+ mmio_write_32(TEGRA_MSELECT_BASE + MSELECT_CONFIG, val);
+
+ /*
+ * Restore Boot and Power Management Processor (BPMP) reset
+ * address and reset it, if it is supported by the platform.
+ */
+ if (!tegra_bpmp_available) {
+ tegra_fc_bpmp_off();
+ } else {
+ entrypoint = tegra_pmc_read_32(PMC_SCRATCH39);
+ tegra_fc_bpmp_on(entrypoint);
+
+ /* initialise the interface */
+ tegra_bpmp_resume();
+ }
+
+ if (plat_params->sc7entry_fw_base != 0U) {
+ /* sc7entry-fw is part of TZDRAM area */
+ offset = plat_params->tzdram_base - plat_params->sc7entry_fw_base;
+ tegra_memctrl_tzdram_setup(plat_params->sc7entry_fw_base,
+ plat_params->tzdram_size + offset);
+ }
+
+ if (!tegra_chipid_is_t210_b01()) {
+ /* restrict PMC access to secure world */
+ val = mmio_read_32(TEGRA_MISC_BASE + APB_SLAVE_SECURITY_ENABLE);
+ val |= PMC_SECURITY_EN_BIT;
+ mmio_write_32(TEGRA_MISC_BASE + APB_SLAVE_SECURITY_ENABLE, val);
+ }
+ }
+
+ /*
+ * Check if we are exiting cluster idle state
+ */
+ if (target_state->pwr_domain_state[MPIDR_AFFLVL1] ==
+ PSTATE_ID_CLUSTER_IDLE) {
+
+ if (!tegra_bpmp_available) {
+
+ /* PWM un-tristate */
+ cfg = mmio_read_32(TEGRA_CL_DVFS_BASE + DVFS_DFLL_OUTPUT_CFG);
+ if (cfg & DFLL_OUTPUT_CFG_CLK_EN_BIT) {
+ val = mmio_read_32(TEGRA_MISC_BASE + PINMUX_AUX_DVFS_PWM);
+ val &= ~PINMUX_PWM_TRISTATE;
+ mmio_write_32(TEGRA_MISC_BASE + PINMUX_AUX_DVFS_PWM, val);
+
+ /* make sure the setting took effect */
+ val = mmio_read_32(TEGRA_MISC_BASE + PINMUX_AUX_DVFS_PWM);
+ assert((val & PINMUX_PWM_TRISTATE) == 0U);
+ }
+
+ /*
+ * Restore operation mode for the DFLL ring
+ * oscillator
+ */
+ mmio_write_32(TEGRA_CL_DVFS_BASE + DVFS_DFLL_CTRL,
+ ENABLE_CLOSED_LOOP);
+
+ /* release cluster idle lock */
+ tegra_fc_ccplex_pgexit_unlock();
+ }
+ }
+
+ /*
+ * Mark this CPU as ON in the cpu_powergate_mask[],
+ * so that we use Flow Controller for all subsequent
+ * power ups.
+ */
+ cpu_powergate_mask[plat_my_core_pos()] = 1;
+
+ /*
+ * T210 has a dedicated ARMv7 boot and power mgmt processor, BPMP. It's
+ * used for power management and boot purposes. Inform the BPMP that
+ * we have completed the cluster power up.
+ */
+ tegra_fc_lock_active_cluster();
+
+ /*
+ * Resume PMC hardware block for Tegra210 platforms
+ */
+ if (!tegra_chipid_is_t210_b01()) {
+ tegra_pmc_resume();
+ }
+
+ return PSCI_E_SUCCESS;
+}
+
+int tegra_soc_pwr_domain_on(u_register_t mpidr)
+{
+ int cpu = mpidr & MPIDR_CPU_MASK;
+ uint32_t mask = CPU_CORE_RESET_MASK << cpu;
+
+ /* Deassert CPU reset signals */
+ mmio_write_32(TEGRA_CAR_RESET_BASE + CPU_CMPLX_RESET_CLR, mask);
+
+ /* Turn on CPU using flow controller or PMC */
+ if (cpu_powergate_mask[cpu] == 0) {
+ tegra_pmc_cpu_on(cpu);
+ } else {
+ tegra_fc_cpu_on(cpu);
+ }
+
+ return PSCI_E_SUCCESS;
+}
+
+int32_t tegra_soc_pwr_domain_off_early(const psci_power_state_t *target_state)
+{
+ /* Do not power off the boot CPU */
+ if (plat_is_my_cpu_primary()) {
+ return PSCI_E_DENIED;
+ }
+
+ return PSCI_E_SUCCESS;
+}
+
+int tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
+{
+ tegra_fc_cpu_off(read_mpidr() & MPIDR_CPU_MASK);
+ return PSCI_E_SUCCESS;
+}
+
+int tegra_soc_prepare_system_reset(void)
+{
+ /*
+ * Set System Clock (SCLK) to POR default so that the clock source
+ * for the PMC APB clock would not be changed due to system reset.
+ */
+ mmio_write_32((uintptr_t)TEGRA_CAR_RESET_BASE + SCLK_BURST_POLICY,
+ SCLK_BURST_POLICY_DEFAULT);
+ mmio_write_32((uintptr_t)TEGRA_CAR_RESET_BASE + SCLK_RATE, 0);
+
+ /* Wait 1 ms to make sure clock source/device logic is stabilized. */
+ mdelay(1);
+
+ /*
+ * Program the PMC in order to restart the system.
+ */
+ tegra_pmc_system_reset();
+
+ return PSCI_E_SUCCESS;
+}
+
+__dead2 void tegra_soc_prepare_system_off(void)
+{
+ ERROR("Tegra System Off: operation not handled.\n");
+ panic();
+}
diff --git a/plat/nvidia/tegra/soc/t210/plat_secondary.c b/plat/nvidia/tegra/soc/t210/plat_secondary.c
new file mode 100644
index 0000000..e0242cf
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t210/plat_secondary.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <common/debug.h>
+#include <lib/mmio.h>
+
+#include <pmc.h>
+#include <tegra_def.h>
+
+#define SB_CSR 0x0
+#define SB_CSR_NS_RST_VEC_WR_DIS (1 << 1)
+
+/* CPU reset vector */
+#define SB_AA64_RESET_LOW 0x30 /* width = 31:0 */
+#define SB_AA64_RESET_HI 0x34 /* width = 11:0 */
+
+extern void tegra_secure_entrypoint(void);
+
+/*******************************************************************************
+ * Setup secondary CPU vectors
+ ******************************************************************************/
+void plat_secondary_setup(void)
+{
+ uint32_t val;
+ uint64_t reset_addr = (uint64_t)tegra_secure_entrypoint;
+
+ INFO("Setting up secondary CPU boot\n");
+
+ /* setup secondary CPU vector */
+ mmio_write_32(TEGRA_SB_BASE + SB_AA64_RESET_LOW,
+ (reset_addr & 0xFFFFFFFF) | 1);
+ val = reset_addr >> 32;
+ mmio_write_32(TEGRA_SB_BASE + SB_AA64_RESET_HI, val & 0x7FF);
+
+ /* configure PMC */
+ tegra_pmc_cpu_setup(reset_addr);
+ tegra_pmc_lock_cpu_vectors();
+}
diff --git a/plat/nvidia/tegra/soc/t210/plat_setup.c b/plat/nvidia/tegra/soc/t210/plat_setup.c
new file mode 100644
index 0000000..68cd38e
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t210/plat_setup.c
@@ -0,0 +1,318 @@
+/*
+ * Copyright (c) 2015-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <cortex_a57.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <common/interrupt_props.h>
+#include <drivers/console.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#include <drivers/arm/gic_common.h>
+#include <drivers/arm/gicv2.h>
+#include <bl31/interrupt_mgmt.h>
+
+#include <bpmp.h>
+#include <flowctrl.h>
+#include <memctrl.h>
+#include <plat/common/platform.h>
+#include <security_engine.h>
+#include <tegra_def.h>
+#include <tegra_platform.h>
+#include <tegra_private.h>
+
+/* sets of MMIO ranges setup */
+#define MMIO_RANGE_0_ADDR 0x50000000
+#define MMIO_RANGE_1_ADDR 0x60000000
+#define MMIO_RANGE_2_ADDR 0x70000000
+#define MMIO_RANGE_SIZE 0x200000
+
+/*
+ * Table of regions to map using the MMU.
+ */
+static const mmap_region_t tegra_mmap[] = {
+ MAP_REGION_FLAT(TEGRA_IRAM_BASE, 0x40000, /* 256KB */
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(MMIO_RANGE_0_ADDR, MMIO_RANGE_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(MMIO_RANGE_1_ADDR, MMIO_RANGE_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(MMIO_RANGE_2_ADDR, MMIO_RANGE_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+ {0}
+};
+
+/*******************************************************************************
+ * Set up the pagetables as per the platform memory map & initialize the MMU
+ ******************************************************************************/
+const mmap_region_t *plat_get_mmio_map(void)
+{
+ /* Add the map region for security engine SE2 */
+ if (tegra_chipid_is_t210_b01()) {
+ mmap_add_region((uint64_t)TEGRA_SE2_BASE,
+ (uint64_t)TEGRA_SE2_BASE,
+ (uint64_t)TEGRA_SE2_RANGE_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE);
+ }
+
+ /* MMIO space */
+ return tegra_mmap;
+}
+
+/*******************************************************************************
+ * The Tegra power domain tree has a single system level power domain i.e. a
+ * single root node. The first entry in the power domain descriptor specifies
+ * the number of power domains at the highest power level.
+ *******************************************************************************
+ */
+const unsigned char tegra_power_domain_tree_desc[] = {
+ /* No of root nodes */
+ 1,
+ /* No of clusters */
+ PLATFORM_CLUSTER_COUNT,
+ /* No of CPU cores - cluster0 */
+ PLATFORM_MAX_CPUS_PER_CLUSTER,
+ /* No of CPU cores - cluster1 */
+ PLATFORM_MAX_CPUS_PER_CLUSTER
+};
+
+/*******************************************************************************
+ * This function returns the Tegra default topology tree information.
+ ******************************************************************************/
+const unsigned char *plat_get_power_domain_tree_desc(void)
+{
+ return tegra_power_domain_tree_desc;
+}
+
+/*******************************************************************************
+ * Handler to get the System Counter Frequency
+ ******************************************************************************/
+unsigned int plat_get_syscnt_freq2(void)
+{
+ return 19200000;
+}
+
+/*******************************************************************************
+ * Maximum supported UART controllers
+ ******************************************************************************/
+#define TEGRA210_MAX_UART_PORTS 5
+
+/*******************************************************************************
+ * This variable holds the UART port base addresses
+ ******************************************************************************/
+static uint32_t tegra210_uart_addresses[TEGRA210_MAX_UART_PORTS + 1] = {
+ 0, /* undefined - treated as an error case */
+ TEGRA_UARTA_BASE,
+ TEGRA_UARTB_BASE,
+ TEGRA_UARTC_BASE,
+ TEGRA_UARTD_BASE,
+ TEGRA_UARTE_BASE,
+};
+
+/*******************************************************************************
+ * Enable console corresponding to the console ID
+ ******************************************************************************/
+void plat_enable_console(int32_t id)
+{
+ static console_t uart_console;
+ uint32_t console_clock;
+
+ if ((id > 0) && (id < TEGRA210_MAX_UART_PORTS)) {
+ /*
+ * Reference clock used by the FPGAs is a lot slower.
+ */
+ if (tegra_platform_is_fpga()) {
+ console_clock = TEGRA_BOOT_UART_CLK_13_MHZ;
+ } else {
+ console_clock = TEGRA_BOOT_UART_CLK_408_MHZ;
+ }
+
+ (void)console_16550_register(tegra210_uart_addresses[id],
+ console_clock,
+ TEGRA_CONSOLE_BAUDRATE,
+ &uart_console);
+ console_set_scope(&uart_console, CONSOLE_FLAG_BOOT |
+ CONSOLE_FLAG_RUNTIME | CONSOLE_FLAG_CRASH);
+ }
+}
+
+/*******************************************************************************
+ * Return pointer to the BL31 params from previous bootloader
+ ******************************************************************************/
+struct tegra_bl31_params *plat_get_bl31_params(void)
+{
+ return NULL;
+}
+
+/*******************************************************************************
+ * Return pointer to the BL31 platform params from previous bootloader
+ ******************************************************************************/
+plat_params_from_bl2_t *plat_get_bl31_plat_params(void)
+{
+ return NULL;
+}
+
+/*******************************************************************************
+ * Handler for early platform setup
+ ******************************************************************************/
+void plat_early_platform_setup(void)
+{
+ const plat_params_from_bl2_t *plat_params = bl31_get_plat_params();
+ uint64_t val;
+
+ /* Verify chip id is t210 */
+ assert(tegra_chipid_is_t210());
+
+ /*
+ * Do initial security configuration to allow DRAM/device access.
+ */
+ tegra_memctrl_tzdram_setup(plat_params->tzdram_base,
+ (uint32_t)plat_params->tzdram_size);
+
+ /* platform parameter passed by the previous bootloader */
+ if (plat_params->l2_ecc_parity_prot_dis != 1) {
+ /* Enable ECC Parity Protection for Cortex-A57 CPUs */
+ val = read_l2ctlr_el1();
+ val |= (uint64_t)CORTEX_A57_L2_ECC_PARITY_PROTECTION_BIT;
+ write_l2ctlr_el1(val);
+ }
+
+ /* Initialize security engine driver */
+ tegra_se_init();
+}
+
+/* Secure IRQs for Tegra186 */
+static const interrupt_prop_t tegra210_interrupt_props[] = {
+ INTR_PROP_DESC(TEGRA_SDEI_SGI_PRIVATE, PLAT_SDEI_CRITICAL_PRI,
+ GICV2_INTR_GROUP0, GIC_INTR_CFG_EDGE),
+ INTR_PROP_DESC(TEGRA210_TIMER1_IRQ, PLAT_TEGRA_WDT_PRIO,
+ GICV2_INTR_GROUP0, GIC_INTR_CFG_EDGE),
+ INTR_PROP_DESC(TEGRA210_WDT_CPU_LEGACY_FIQ, PLAT_TEGRA_WDT_PRIO,
+ GICV2_INTR_GROUP0, GIC_INTR_CFG_EDGE),
+};
+
+/*******************************************************************************
+ * Handler for late platform setup
+ ******************************************************************************/
+void plat_late_platform_setup(void)
+{
+ const plat_params_from_bl2_t *plat_params = bl31_get_plat_params();
+ uint64_t sc7entry_end, offset;
+ int ret;
+ uint32_t val;
+
+ /* memmap TZDRAM area containing the SC7 Entry Firmware */
+ if (plat_params->sc7entry_fw_base && plat_params->sc7entry_fw_size) {
+
+ assert(plat_params->sc7entry_fw_size <= TEGRA_IRAM_A_SIZE);
+
+ /*
+ * Verify that the SC7 entry firmware resides inside the TZDRAM
+ * aperture, _before_ the BL31 code and the start address is
+ * exactly 1MB from BL31 base.
+ */
+
+ /* sc7entry-fw must be _before_ BL31 base */
+ assert(plat_params->tzdram_base > plat_params->sc7entry_fw_base);
+
+ sc7entry_end = plat_params->sc7entry_fw_base +
+ plat_params->sc7entry_fw_size;
+ assert(sc7entry_end < plat_params->tzdram_base);
+
+ /* sc7entry-fw start must be exactly 1MB behind BL31 base */
+ offset = plat_params->tzdram_base - plat_params->sc7entry_fw_base;
+ assert(offset == 0x100000);
+
+ /* secure TZDRAM area */
+ tegra_memctrl_tzdram_setup(plat_params->sc7entry_fw_base,
+ plat_params->tzdram_size + offset);
+
+ /* power off BPMP processor until SC7 entry */
+ tegra_fc_bpmp_off();
+
+ /* memmap SC7 entry firmware code */
+ ret = mmap_add_dynamic_region(plat_params->sc7entry_fw_base,
+ plat_params->sc7entry_fw_base,
+ plat_params->sc7entry_fw_size,
+ MT_SECURE | MT_RO_DATA);
+ assert(ret == 0);
+
+ /* restrict PMC access to secure world */
+ val = mmio_read_32(TEGRA_MISC_BASE + APB_SLAVE_SECURITY_ENABLE);
+ val |= PMC_SECURITY_EN_BIT;
+ mmio_write_32(TEGRA_MISC_BASE + APB_SLAVE_SECURITY_ENABLE, val);
+ }
+
+ if (!tegra_chipid_is_t210_b01()) {
+ /* restrict PMC access to secure world */
+ val = mmio_read_32(TEGRA_MISC_BASE + APB_SLAVE_SECURITY_ENABLE);
+ val |= PMC_SECURITY_EN_BIT;
+ mmio_write_32(TEGRA_MISC_BASE + APB_SLAVE_SECURITY_ENABLE, val);
+ }
+}
+
+/*******************************************************************************
+ * Initialize the GIC and SGIs
+ ******************************************************************************/
+void plat_gic_setup(void)
+{
+ tegra_gic_setup(tegra210_interrupt_props, ARRAY_SIZE(tegra210_interrupt_props));
+ tegra_gic_init();
+
+ /* Enable handling for FIQs */
+ tegra_fiq_handler_setup();
+
+ /*
+ * Enable routing watchdog FIQs from the flow controller to
+ * the GICD.
+ */
+ tegra_fc_enable_fiq_to_ccplex_routing();
+}
+/*******************************************************************************
+ * Handler to indicate support for System Suspend
+ ******************************************************************************/
+bool plat_supports_system_suspend(void)
+{
+ const plat_params_from_bl2_t *plat_params = bl31_get_plat_params();
+
+ /*
+ * sc7entry-fw is only supported by Tegra210 SoCs.
+ */
+ if (!tegra_chipid_is_t210_b01() && (plat_params->sc7entry_fw_base != 0U)) {
+ return true;
+ } else if (tegra_chipid_is_t210_b01()) {
+ return true;
+ } else {
+ return false;
+ }
+}
+/*******************************************************************************
+ * Platform specific runtime setup.
+ ******************************************************************************/
+void plat_runtime_setup(void)
+{
+ /*
+ * During cold boot, it is observed that the arbitration
+ * bit is set in the Memory controller leading to false
+ * error interrupts in the non-secure world. To avoid
+ * this, clean the interrupt status register before
+ * booting into the non-secure world
+ */
+ tegra_memctrl_clear_pending_interrupts();
+
+ /*
+ * During boot, USB3 and flash media (SDMMC/SATA) devices need
+ * access to IRAM. Because these clients connect to the MC and
+ * do not have a direct path to the IRAM, the MC implements AHB
+ * redirection during boot to allow path to IRAM. In this mode
+ * accesses to a programmed memory address aperture are directed
+ * to the AHB bus, allowing access to the IRAM. This mode must be
+ * disabled before we jump to the non-secure world.
+ */
+ tegra_memctrl_disable_ahb_redirection();
+}
diff --git a/plat/nvidia/tegra/soc/t210/plat_sip_calls.c b/plat/nvidia/tegra/soc/t210/plat_sip_calls.c
new file mode 100644
index 0000000..f3ebd4b
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t210/plat_sip_calls.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020-2023, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <inttypes.h>
+#include <stdint.h>
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <common/runtime_svc.h>
+#include <errno.h>
+#include <lib/mmio.h>
+#include <lib/utils_def.h>
+
+#include <memctrl.h>
+#include <pmc.h>
+#include <tegra_private.h>
+#include <tegra_platform.h>
+#include <tegra_def.h>
+
+/*******************************************************************************
+ * PMC parameters
+ ******************************************************************************/
+#define PMC_READ U(0xaa)
+#define PMC_WRITE U(0xbb)
+
+/*******************************************************************************
+ * Tegra210 SiP SMCs
+ ******************************************************************************/
+#define TEGRA_SIP_PMC_COMMANDS U(0xC200FE00)
+
+/*******************************************************************************
+ * This function is responsible for handling all T210 SiP calls
+ ******************************************************************************/
+int plat_sip_handler(uint32_t smc_fid,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ const void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ uint32_t val, ns;
+
+ /* Determine which security state this SMC originated from */
+ ns = is_caller_non_secure(flags);
+ if (!ns)
+ SMC_RET1(handle, SMC_UNK);
+
+ if (smc_fid == TEGRA_SIP_PMC_COMMANDS) {
+
+ /* check the address is within PMC range and is 4byte aligned */
+ if ((x2 >= TEGRA_PMC_SIZE) || (x2 & 0x3)) {
+ return -EINVAL;
+ }
+
+ switch (x2) {
+ /* Black listed PMC registers */
+ case PMC_SCRATCH1:
+ case PMC_SCRATCH31 ... PMC_SCRATCH33:
+ case PMC_SCRATCH40:
+ case PMC_SCRATCH42:
+ case PMC_SCRATCH43 ... PMC_SCRATCH48:
+ case PMC_SCRATCH50 ... PMC_SCRATCH51:
+ case PMC_SCRATCH56 ... PMC_SCRATCH57:
+ /* PMC secure-only registers are not accessible */
+ case PMC_DPD_ENABLE_0:
+ case PMC_FUSE_CONTROL_0:
+ case PMC_CRYPTO_OP_0:
+ case PMC_TSC_MULT_0:
+ case PMC_STICKY_BIT:
+ ERROR("%s: error offset=0x%" PRIx64 "\n", __func__, x2);
+ return -EFAULT;
+ default:
+ /* Valid register */
+ break;
+ }
+
+ /* Perform PMC read/write */
+ if (x1 == PMC_READ) {
+ val = mmio_read_32((uint32_t)(TEGRA_PMC_BASE + x2));
+ write_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X1, val);
+ } else if (x1 == PMC_WRITE) {
+ mmio_write_32((uint32_t)(TEGRA_PMC_BASE + x2), (uint32_t)x3);
+ } else {
+ return -EINVAL;
+ }
+ } else {
+ return -ENOTSUP;
+ }
+ return 0;
+}
diff --git a/plat/nvidia/tegra/soc/t210/platform_t210.mk b/plat/nvidia/tegra/soc/t210/platform_t210.mk
new file mode 100644
index 0000000..724cfc3
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t210/platform_t210.mk
@@ -0,0 +1,62 @@
+#
+# Copyright (c) 2015-2019, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+TZDRAM_BASE := 0xFF800000
+$(eval $(call add_define,TZDRAM_BASE))
+
+ERRATA_TEGRA_INVALIDATE_BTB_AT_BOOT := 1
+$(eval $(call add_define,ERRATA_TEGRA_INVALIDATE_BTB_AT_BOOT))
+
+PLATFORM_CLUSTER_COUNT := 2
+$(eval $(call add_define,PLATFORM_CLUSTER_COUNT))
+
+PLATFORM_MAX_CPUS_PER_CLUSTER := 4
+$(eval $(call add_define,PLATFORM_MAX_CPUS_PER_CLUSTER))
+
+MAX_XLAT_TABLES := 10
+$(eval $(call add_define,MAX_XLAT_TABLES))
+
+MAX_MMAP_REGIONS := 16
+$(eval $(call add_define,MAX_MMAP_REGIONS))
+
+ENABLE_TEGRA_WDT_LEGACY_FIQ_HANDLING := 1
+
+PLAT_INCLUDES += -Iplat/nvidia/tegra/include/t210 \
+ -I${SOC_DIR}/drivers/se
+
+BL31_SOURCES += ${TEGRA_GICv2_SOURCES} \
+ drivers/ti/uart/aarch64/16550_console.S \
+ lib/cpus/aarch64/cortex_a53.S \
+ lib/cpus/aarch64/cortex_a57.S \
+ ${TEGRA_DRIVERS}/bpmp/bpmp.c \
+ ${TEGRA_DRIVERS}/flowctrl/flowctrl.c \
+ ${TEGRA_DRIVERS}/memctrl/memctrl_v1.c \
+ ${TEGRA_DRIVERS}/pmc/pmc.c \
+ ${SOC_DIR}/plat_psci_handlers.c \
+ ${SOC_DIR}/plat_setup.c \
+ ${SOC_DIR}/drivers/se/security_engine.c \
+ ${SOC_DIR}/plat_secondary.c \
+ ${SOC_DIR}/plat_sip_calls.c
+
+# Enable workarounds for selected Cortex-A57 erratas.
+A57_DISABLE_NON_TEMPORAL_HINT := 1
+ERRATA_A57_826974 := 1
+ERRATA_A57_826977 := 1
+ERRATA_A57_828024 := 1
+ERRATA_A57_833471 := 1
+
+# Enable workarounds for selected Cortex-A53 erratas.
+A53_DISABLE_NON_TEMPORAL_HINT := 1
+ERRATA_A53_826319 := 1
+ERRATA_A53_836870 := 1
+ERRATA_A53_855873 := 1
+
+# Skip L1 $ flush when powering down Cortex-A57 CPUs
+SKIP_A57_L1_FLUSH_PWR_DWN := 1
+
+# Enable higher performance Non-cacheable load forwarding
+A57_ENABLE_NONCACHEABLE_LOAD_FWD := 1