summaryrefslogtreecommitdiffstats
path: root/plat/intel/soc
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 09:13:47 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 09:13:47 +0000
commit102b0d2daa97dae68d3eed54d8fe37a9cc38a892 (patch)
treebcf648efac40ca6139842707f0eba5a4496a6dd2 /plat/intel/soc
parentInitial commit. (diff)
downloadarm-trusted-firmware-102b0d2daa97dae68d3eed54d8fe37a9cc38a892.tar.xz
arm-trusted-firmware-102b0d2daa97dae68d3eed54d8fe37a9cc38a892.zip
Adding upstream version 2.8.0+dfsg.upstream/2.8.0+dfsgupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'plat/intel/soc')
-rw-r--r--plat/intel/soc/agilex/bl2_plat_setup.c189
-rw-r--r--plat/intel/soc/agilex/bl31_plat_setup.c160
-rw-r--r--plat/intel/soc/agilex/include/agilex_clock_manager.h131
-rw-r--r--plat/intel/soc/agilex/include/agilex_memory_controller.h163
-rw-r--r--plat/intel/soc/agilex/include/agilex_mmc.h7
-rw-r--r--plat/intel/soc/agilex/include/agilex_pinmux.h20
-rw-r--r--plat/intel/soc/agilex/include/socfpga_plat_def.h40
-rw-r--r--plat/intel/soc/agilex/platform.mk83
-rw-r--r--plat/intel/soc/agilex/soc/agilex_clock_manager.c399
-rw-r--r--plat/intel/soc/agilex/soc/agilex_memory_controller.c399
-rw-r--r--plat/intel/soc/agilex/soc/agilex_mmc.c19
-rw-r--r--plat/intel/soc/agilex/soc/agilex_pinmux.c225
-rw-r--r--plat/intel/soc/common/aarch64/plat_helpers.S150
-rw-r--r--plat/intel/soc/common/aarch64/platform_common.c60
-rw-r--r--plat/intel/soc/common/bl2_plat_mem_params_desc.c96
-rw-r--r--plat/intel/soc/common/drivers/ccu/ncore_ccu.c132
-rw-r--r--plat/intel/soc/common/drivers/ccu/ncore_ccu.h109
-rw-r--r--plat/intel/soc/common/drivers/qspi/cadence_qspi.c822
-rw-r--r--plat/intel/soc/common/drivers/qspi/cadence_qspi.h178
-rw-r--r--plat/intel/soc/common/drivers/wdt/watchdog.c52
-rw-r--r--plat/intel/soc/common/drivers/wdt/watchdog.h39
-rw-r--r--plat/intel/soc/common/include/plat_macros.S22
-rw-r--r--plat/intel/soc/common/include/platform_def.h242
-rw-r--r--plat/intel/soc/common/include/socfpga_emac.h24
-rw-r--r--plat/intel/soc/common/include/socfpga_f2sdram_manager.h39
-rw-r--r--plat/intel/soc/common/include/socfpga_fcs.h308
-rw-r--r--plat/intel/soc/common/include/socfpga_handoff.h135
-rw-r--r--plat/intel/soc/common/include/socfpga_mailbox.h240
-rw-r--r--plat/intel/soc/common/include/socfpga_noc.h95
-rw-r--r--plat/intel/soc/common/include/socfpga_private.h68
-rw-r--r--plat/intel/soc/common/include/socfpga_reset_manager.h125
-rw-r--r--plat/intel/soc/common/include/socfpga_sip_svc.h217
-rw-r--r--plat/intel/soc/common/include/socfpga_system_manager.h54
-rw-r--r--plat/intel/soc/common/sip/socfpga_sip_ecc.c46
-rw-r--r--plat/intel/soc/common/sip/socfpga_sip_fcs.c1739
-rw-r--r--plat/intel/soc/common/soc/socfpga_emac.c38
-rw-r--r--plat/intel/soc/common/soc/socfpga_firewall.c123
-rw-r--r--plat/intel/soc/common/soc/socfpga_handoff.c39
-rw-r--r--plat/intel/soc/common/soc/socfpga_mailbox.c647
-rw-r--r--plat/intel/soc/common/soc/socfpga_reset_manager.c331
-rw-r--r--plat/intel/soc/common/socfpga_delay_timer.c49
-rw-r--r--plat/intel/soc/common/socfpga_image_load.c58
-rw-r--r--plat/intel/soc/common/socfpga_psci.c229
-rw-r--r--plat/intel/soc/common/socfpga_sip_svc.c1131
-rw-r--r--plat/intel/soc/common/socfpga_sip_svc_v2.c174
-rw-r--r--plat/intel/soc/common/socfpga_storage.c193
-rw-r--r--plat/intel/soc/common/socfpga_topology.c51
-rw-r--r--plat/intel/soc/n5x/bl31_plat_setup.c165
-rw-r--r--plat/intel/soc/n5x/include/socfpga_plat_def.h42
-rw-r--r--plat/intel/soc/n5x/platform.mk52
-rw-r--r--plat/intel/soc/stratix10/bl2_plat_setup.c188
-rw-r--r--plat/intel/soc/stratix10/bl31_plat_setup.c173
-rw-r--r--plat/intel/soc/stratix10/include/s10_clock_manager.h99
-rw-r--r--plat/intel/soc/stratix10/include/s10_memory_controller.h160
-rw-r--r--plat/intel/soc/stratix10/include/s10_mmc.h12
-rw-r--r--plat/intel/soc/stratix10/include/s10_pinmux.h20
-rw-r--r--plat/intel/soc/stratix10/include/socfpga_plat_def.h40
-rw-r--r--plat/intel/soc/stratix10/platform.mk80
-rw-r--r--plat/intel/soc/stratix10/soc/s10_clock_manager.c322
-rw-r--r--plat/intel/soc/stratix10/soc/s10_memory_controller.c412
-rw-r--r--plat/intel/soc/stratix10/soc/s10_mmc.c19
-rw-r--r--plat/intel/soc/stratix10/soc/s10_pinmux.c217
62 files changed, 11891 insertions, 0 deletions
diff --git a/plat/intel/soc/agilex/bl2_plat_setup.c b/plat/intel/soc/agilex/bl2_plat_setup.c
new file mode 100644
index 0000000..211a7b7
--- /dev/null
+++ b/plat/intel/soc/agilex/bl2_plat_setup.c
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2019-2022, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <common/desc_image_load.h>
+#include <drivers/generic_delay_timer.h>
+#include <drivers/synopsys/dw_mmc.h>
+#include <drivers/ti/uart/uart_16550.h>
+#include <lib/xlat_tables/xlat_tables.h>
+
+#include "agilex_mmc.h"
+#include "agilex_clock_manager.h"
+#include "agilex_memory_controller.h"
+#include "agilex_pinmux.h"
+#include "ccu/ncore_ccu.h"
+#include "qspi/cadence_qspi.h"
+#include "socfpga_emac.h"
+#include "socfpga_f2sdram_manager.h"
+#include "socfpga_handoff.h"
+#include "socfpga_mailbox.h"
+#include "socfpga_private.h"
+#include "socfpga_reset_manager.h"
+#include "socfpga_system_manager.h"
+#include "wdt/watchdog.h"
+
+static struct mmc_device_info mmc_info;
+
+const mmap_region_t agilex_plat_mmap[] = {
+ MAP_REGION_FLAT(DRAM_BASE, DRAM_SIZE,
+ MT_MEMORY | MT_RW | MT_NS),
+ MAP_REGION_FLAT(DEVICE1_BASE, DEVICE1_SIZE,
+ MT_DEVICE | MT_RW | MT_NS),
+ MAP_REGION_FLAT(DEVICE2_BASE, DEVICE2_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(OCRAM_BASE, OCRAM_SIZE,
+ MT_NON_CACHEABLE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(DEVICE3_BASE, DEVICE3_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(MEM64_BASE, MEM64_SIZE,
+ MT_DEVICE | MT_RW | MT_NS),
+ MAP_REGION_FLAT(DEVICE4_BASE, DEVICE4_SIZE,
+ MT_DEVICE | MT_RW | MT_NS),
+ {0},
+};
+
+boot_source_type boot_source = BOOT_SOURCE;
+
+void bl2_el3_early_platform_setup(u_register_t x0, u_register_t x1,
+ u_register_t x2, u_register_t x4)
+{
+ static console_t console;
+ handoff reverse_handoff_ptr;
+
+ generic_delay_timer_init();
+
+ if (socfpga_get_handoff(&reverse_handoff_ptr))
+ return;
+ config_pinmux(&reverse_handoff_ptr);
+ config_clkmgr_handoff(&reverse_handoff_ptr);
+
+ enable_nonsecure_access();
+ deassert_peripheral_reset();
+ config_hps_hs_before_warm_reset();
+
+ watchdog_init(get_wdt_clk());
+
+ console_16550_register(PLAT_INTEL_UART_BASE, get_uart_clk(),
+ PLAT_BAUDRATE, &console);
+
+ socfpga_delay_timer_init();
+ init_ncore_ccu();
+ socfpga_emac_init();
+ init_hard_memory_controller();
+ mailbox_init();
+ agx_mmc_init();
+
+ if (!intel_mailbox_is_fpga_not_ready()) {
+ socfpga_bridges_enable(SOC2FPGA_MASK | LWHPS2FPGA_MASK |
+ FPGA2SOC_MASK);
+ }
+}
+
+
+void bl2_el3_plat_arch_setup(void)
+{
+
+ const mmap_region_t bl_regions[] = {
+ MAP_REGION_FLAT(BL2_BASE, BL2_END - BL2_BASE,
+ MT_MEMORY | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(BL_CODE_BASE, BL_CODE_END - BL_CODE_BASE,
+ MT_CODE | MT_SECURE),
+ MAP_REGION_FLAT(BL_RO_DATA_BASE,
+ BL_RO_DATA_END - BL_RO_DATA_BASE,
+ MT_RO_DATA | MT_SECURE),
+#if USE_COHERENT_MEM_BAR
+ MAP_REGION_FLAT(BL_COHERENT_RAM_BASE,
+ BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+#endif
+ {0},
+ };
+
+ setup_page_tables(bl_regions, agilex_plat_mmap);
+
+ enable_mmu_el3(0);
+
+ dw_mmc_params_t params = EMMC_INIT_PARAMS(0x100000, get_mmc_clk());
+
+ mmc_info.mmc_dev_type = MMC_IS_SD;
+ mmc_info.ocr_voltage = OCR_3_3_3_4 | OCR_3_2_3_3;
+
+ /* Request ownership and direct access to QSPI */
+ mailbox_hps_qspi_enable();
+
+ switch (boot_source) {
+ case BOOT_SOURCE_SDMMC:
+ dw_mmc_init(&params, &mmc_info);
+ socfpga_io_setup(boot_source);
+ break;
+
+ case BOOT_SOURCE_QSPI:
+ cad_qspi_init(0, QSPI_CONFIG_CPHA, QSPI_CONFIG_CPOL,
+ QSPI_CONFIG_CSDA, QSPI_CONFIG_CSDADS,
+ QSPI_CONFIG_CSEOT, QSPI_CONFIG_CSSOT, 0);
+ socfpga_io_setup(boot_source);
+ break;
+
+ default:
+ ERROR("Unsupported boot source\n");
+ panic();
+ break;
+ }
+}
+
+uint32_t get_spsr_for_bl33_entry(void)
+{
+ unsigned long el_status;
+ unsigned int mode;
+ uint32_t spsr;
+
+ /* Figure out what mode we enter the non-secure world in */
+ el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT;
+ el_status &= ID_AA64PFR0_ELX_MASK;
+
+ mode = (el_status) ? MODE_EL2 : MODE_EL1;
+
+ /*
+ * TODO: Consider the possibility of specifying the SPSR in
+ * the FIP ToC and allowing the platform to have a say as
+ * well.
+ */
+ spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
+ return spsr;
+}
+
+
+int bl2_plat_handle_post_image_load(unsigned int image_id)
+{
+ bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id);
+
+ assert(bl_mem_params);
+
+ switch (image_id) {
+ case BL33_IMAGE_ID:
+ bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr();
+ bl_mem_params->ep_info.spsr = get_spsr_for_bl33_entry();
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/*******************************************************************************
+ * Perform any BL3-1 platform setup code
+ ******************************************************************************/
+void bl2_platform_setup(void)
+{
+}
+
diff --git a/plat/intel/soc/agilex/bl31_plat_setup.c b/plat/intel/soc/agilex/bl31_plat_setup.c
new file mode 100644
index 0000000..b1b9514
--- /dev/null
+++ b/plat/intel/soc/agilex/bl31_plat_setup.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2019-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <common/bl_common.h>
+#include <drivers/arm/gicv2.h>
+#include <drivers/ti/uart/uart_16550.h>
+#include <lib/mmio.h>
+#include <lib/xlat_tables/xlat_tables.h>
+
+#include "ccu/ncore_ccu.h"
+#include "socfpga_mailbox.h"
+#include "socfpga_private.h"
+
+static entry_point_info_t bl32_image_ep_info;
+static entry_point_info_t bl33_image_ep_info;
+
+entry_point_info_t *bl31_plat_get_next_image_ep_info(uint32_t type)
+{
+ entry_point_info_t *next_image_info;
+
+ next_image_info = (type == NON_SECURE) ?
+ &bl33_image_ep_info : &bl32_image_ep_info;
+
+ /* None of the images on this platform can have 0x0 as the entrypoint */
+ if (next_image_info->pc)
+ return next_image_info;
+ else
+ return NULL;
+}
+
+void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
+ u_register_t arg2, u_register_t arg3)
+{
+ static console_t console;
+
+ mmio_write_64(PLAT_SEC_ENTRY, PLAT_SEC_WARM_ENTRY);
+
+ console_16550_register(PLAT_INTEL_UART_BASE, PLAT_UART_CLOCK,
+ PLAT_BAUDRATE, &console);
+ /*
+ * Check params passed from BL31 should not be NULL,
+ */
+ void *from_bl2 = (void *) arg0;
+
+ bl_params_t *params_from_bl2 = (bl_params_t *)from_bl2;
+ assert(params_from_bl2 != NULL);
+
+ /*
+ * Copy BL32 (if populated by BL31) and BL33 entry point information.
+ * They are stored in Secure RAM, in BL31's address space.
+ */
+
+ if (params_from_bl2->h.type == PARAM_BL_PARAMS &&
+ params_from_bl2->h.version >= VERSION_2) {
+
+ bl_params_node_t *bl_params = params_from_bl2->head;
+
+ while (bl_params) {
+ if (bl_params->image_id == BL33_IMAGE_ID)
+ bl33_image_ep_info = *bl_params->ep_info;
+
+ bl_params = bl_params->next_params_info;
+ }
+ } else {
+ struct socfpga_bl31_params *arg_from_bl2 =
+ (struct socfpga_bl31_params *) from_bl2;
+
+ assert(arg_from_bl2->h.type == PARAM_BL31);
+ assert(arg_from_bl2->h.version >= VERSION_1);
+
+ bl32_image_ep_info = *arg_from_bl2->bl32_ep_info;
+ bl33_image_ep_info = *arg_from_bl2->bl33_ep_info;
+ }
+ SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE);
+}
+
+static const interrupt_prop_t s10_interrupt_props[] = {
+ PLAT_INTEL_SOCFPGA_G1S_IRQ_PROPS(GICV2_INTR_GROUP0),
+ PLAT_INTEL_SOCFPGA_G0_IRQ_PROPS(GICV2_INTR_GROUP0)
+};
+
+static unsigned int target_mask_array[PLATFORM_CORE_COUNT];
+
+static const gicv2_driver_data_t plat_gicv2_gic_data = {
+ .gicd_base = PLAT_INTEL_SOCFPGA_GICD_BASE,
+ .gicc_base = PLAT_INTEL_SOCFPGA_GICC_BASE,
+ .interrupt_props = s10_interrupt_props,
+ .interrupt_props_num = ARRAY_SIZE(s10_interrupt_props),
+ .target_masks = target_mask_array,
+ .target_masks_num = ARRAY_SIZE(target_mask_array),
+};
+
+/*******************************************************************************
+ * Perform any BL3-1 platform setup code
+ ******************************************************************************/
+void bl31_platform_setup(void)
+{
+ socfpga_delay_timer_init();
+
+ /* Initialize the gic cpu and distributor interfaces */
+ gicv2_driver_init(&plat_gicv2_gic_data);
+ gicv2_distif_init();
+ gicv2_pcpu_distif_init();
+ gicv2_cpuif_enable();
+
+ /* Signal secondary CPUs to jump to BL31 (BL2 = U-boot SPL) */
+ mmio_write_64(PLAT_CPU_RELEASE_ADDR,
+ (uint64_t)plat_secondary_cpus_bl31_entry);
+
+ mailbox_hps_stage_notify(HPS_EXECUTION_STATE_SSBL);
+
+ ncore_enable_ocram_firewall();
+}
+
+const mmap_region_t plat_agilex_mmap[] = {
+ MAP_REGION_FLAT(DRAM_BASE, DRAM_SIZE, MT_MEMORY | MT_RW | MT_NS),
+ MAP_REGION_FLAT(DEVICE1_BASE, DEVICE1_SIZE, MT_DEVICE | MT_RW | MT_NS),
+ MAP_REGION_FLAT(DEVICE2_BASE, DEVICE2_SIZE, MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(OCRAM_BASE, OCRAM_SIZE,
+ MT_NON_CACHEABLE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(DEVICE3_BASE, DEVICE3_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(MEM64_BASE, MEM64_SIZE, MT_DEVICE | MT_RW | MT_NS),
+ MAP_REGION_FLAT(DEVICE4_BASE, DEVICE4_SIZE, MT_DEVICE | MT_RW | MT_NS),
+ {0}
+};
+
+/*******************************************************************************
+ * Perform the very early platform specific architectural setup here. At the
+ * moment this is only intializes the mmu in a quick and dirty way.
+ ******************************************************************************/
+void bl31_plat_arch_setup(void)
+{
+ const mmap_region_t bl_regions[] = {
+ MAP_REGION_FLAT(BL31_BASE, BL31_END - BL31_BASE,
+ MT_MEMORY | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(BL_CODE_BASE, BL_CODE_END - BL_CODE_BASE,
+ MT_CODE | MT_SECURE),
+ MAP_REGION_FLAT(BL_RO_DATA_BASE,
+ BL_RO_DATA_END - BL_RO_DATA_BASE,
+ MT_RO_DATA | MT_SECURE),
+#if USE_COHERENT_MEM
+ MAP_REGION_FLAT(BL_COHERENT_RAM_BASE,
+ BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+#endif
+ {0}
+ };
+
+ setup_page_tables(bl_regions, plat_agilex_mmap);
+ enable_mmu_el3(0);
+}
+
diff --git a/plat/intel/soc/agilex/include/agilex_clock_manager.h b/plat/intel/soc/agilex/include/agilex_clock_manager.h
new file mode 100644
index 0000000..f39d475
--- /dev/null
+++ b/plat/intel/soc/agilex/include/agilex_clock_manager.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CLOCKMANAGER_H
+#define CLOCKMANAGER_H
+
+#include "socfpga_handoff.h"
+
+/* Clock Manager Registers */
+#define CLKMGR_OFFSET 0xffd10000
+
+#define CLKMGR_CTRL 0x0
+#define CLKMGR_STAT 0x4
+#define CLKMGR_INTRCLR 0x14
+
+/* Main PLL Group */
+#define CLKMGR_MAINPLL 0xffd10024
+#define CLKMGR_MAINPLL_EN 0x0
+#define CLKMGR_MAINPLL_BYPASS 0xc
+#define CLKMGR_MAINPLL_MPUCLK 0x18
+#define CLKMGR_MAINPLL_NOCCLK 0x1c
+#define CLKMGR_MAINPLL_NOCDIV 0x20
+#define CLKMGR_MAINPLL_PLLGLOB 0x24
+#define CLKMGR_MAINPLL_FDBCK 0x28
+#define CLKMGR_MAINPLL_MEM 0x2c
+#define CLKMGR_MAINPLL_MEMSTAT 0x30
+#define CLKMGR_MAINPLL_PLLC0 0x34
+#define CLKMGR_MAINPLL_PLLC1 0x38
+#define CLKMGR_MAINPLL_VCOCALIB 0x3c
+#define CLKMGR_MAINPLL_PLLC2 0x40
+#define CLKMGR_MAINPLL_PLLC3 0x44
+#define CLKMGR_MAINPLL_PLLM 0x48
+#define CLKMGR_MAINPLL_LOSTLOCK 0x54
+
+/* Peripheral PLL Group */
+#define CLKMGR_PERPLL 0xffd1007c
+#define CLKMGR_PERPLL_EN 0x0
+#define CLKMGR_PERPLL_BYPASS 0xc
+#define CLKMGR_PERPLL_EMACCTL 0x18
+#define CLKMGR_PERPLL_GPIODIV 0x1c
+#define CLKMGR_PERPLL_PLLGLOB 0x20
+#define CLKMGR_PERPLL_FDBCK 0x24
+#define CLKMGR_PERPLL_MEM 0x28
+#define CLKMGR_PERPLL_MEMSTAT 0x2c
+#define CLKMGR_PERPLL_PLLC0 0x30
+#define CLKMGR_PERPLL_PLLC1 0x34
+#define CLKMGR_PERPLL_VCOCALIB 0x38
+#define CLKMGR_PERPLL_PLLC2 0x3c
+#define CLKMGR_PERPLL_PLLC3 0x40
+#define CLKMGR_PERPLL_PLLM 0x44
+#define CLKMGR_PERPLL_LOSTLOCK 0x50
+
+/* Altera Group */
+#define CLKMGR_ALTERA 0xffd100d0
+#define CLKMGR_ALTERA_JTAG 0x0
+#define CLKMGR_ALTERA_EMACACTR 0x4
+#define CLKMGR_ALTERA_EMACBCTR 0x8
+#define CLKMGR_ALTERA_EMACPTPCTR 0xc
+#define CLKMGR_ALTERA_GPIODBCTR 0x10
+#define CLKMGR_ALTERA_SDMMCCTR 0x14
+#define CLKMGR_ALTERA_S2FUSER0CTR 0x18
+#define CLKMGR_ALTERA_S2FUSER1CTR 0x1c
+#define CLKMGR_ALTERA_PSIREFCTR 0x20
+#define CLKMGR_ALTERA_EXTCNTRST 0x24
+
+/* Membus */
+#define CLKMGR_MEM_REQ BIT(24)
+#define CLKMGR_MEM_WR BIT(25)
+#define CLKMGR_MEM_ERR BIT(26)
+#define CLKMGR_MEM_WDAT_OFFSET 16
+#define CLKMGR_MEM_ADDR 0x4027
+#define CLKMGR_MEM_WDAT 0x80
+
+/* Clock Manager Macros */
+#define CLKMGR_CTRL_BOOTMODE_SET_MSK 0x00000001
+#define CLKMGR_STAT_BUSY_E_BUSY 0x1
+#define CLKMGR_STAT_BUSY(x) (((x) & 0x00000001) >> 0)
+#define CLKMGR_STAT_MAINPLLLOCKED(x) (((x) & 0x00000100) >> 8)
+#define CLKMGR_STAT_PERPLLLOCKED(x) (((x) & 0x00010000) >> 16)
+#define CLKMGR_INTRCLR_MAINLOCKLOST_SET_MSK 0x00000004
+#define CLKMGR_INTRCLR_PERLOCKLOST_SET_MSK 0x00000008
+#define CLKMGR_INTOSC_HZ 460000000
+
+/* Main PLL Macros */
+#define CLKMGR_MAINPLL_EN_RESET 0x000000ff
+
+/* Peripheral PLL Macros */
+#define CLKMGR_PERPLL_EN_RESET 0x00000fff
+#define CLKMGR_PERPLL_EN_SDMMCCLK BIT(5)
+#define CLKMGR_PERPLL_GPIODIV_GPIODBCLK_SET(x) (((x) << 0) & 0x0000ffff)
+
+/* Altera Macros */
+#define CLKMGR_ALTERA_EXTCNTRST_RESET 0xff
+
+/* Shared Macros */
+#define CLKMGR_PSRC(x) (((x) & 0x00030000) >> 16)
+#define CLKMGR_PSRC_MAIN 0
+#define CLKMGR_PSRC_PER 1
+
+#define CLKMGR_PLLGLOB_PSRC_EOSC1 0x0
+#define CLKMGR_PLLGLOB_PSRC_INTOSC 0x1
+#define CLKMGR_PLLGLOB_PSRC_F2S 0x2
+
+#define CLKMGR_PLLM_MDIV(x) ((x) & 0x000003ff)
+#define CLKMGR_PLLGLOB_PD_SET_MSK 0x00000001
+#define CLKMGR_PLLGLOB_RST_SET_MSK 0x00000002
+
+#define CLKMGR_PLLGLOB_REFCLKDIV(x) (((x) & 0x00003f00) >> 8)
+#define CLKMGR_PLLGLOB_AREFCLKDIV(x) (((x) & 0x00000f00) >> 8)
+#define CLKMGR_PLLGLOB_DREFCLKDIV(x) (((x) & 0x00003000) >> 12)
+
+#define CLKMGR_VCOCALIB_HSCNT_SET(x) (((x) << 0) & 0x000003ff)
+#define CLKMGR_VCOCALIB_MSCNT_SET(x) (((x) << 16) & 0x00ff0000)
+
+#define CLKMGR_CLR_LOSTLOCK_BYPASS 0x20000000
+
+typedef struct {
+ uint32_t clk_freq_of_eosc1;
+ uint32_t clk_freq_of_f2h_free;
+ uint32_t clk_freq_of_cb_intosc_ls;
+} CLOCK_SOURCE_CONFIG;
+
+void config_clkmgr_handoff(handoff *hoff_ptr);
+uint32_t get_wdt_clk(void);
+uint32_t get_uart_clk(void);
+uint32_t get_mmc_clk(void);
+
+#endif
diff --git a/plat/intel/soc/agilex/include/agilex_memory_controller.h b/plat/intel/soc/agilex/include/agilex_memory_controller.h
new file mode 100644
index 0000000..3746d92
--- /dev/null
+++ b/plat/intel/soc/agilex/include/agilex_memory_controller.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright (c) 2019, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef AGX_MEMORYCONTROLLER_H
+#define AGX_MEMORYCONTROLLER_H
+
+#define AGX_MPFE_IOHMC_REG_DRAMADDRW 0xf80100a8
+#define AGX_MPFE_IOHMC_CTRLCFG0 0xf8010028
+#define AGX_MPFE_IOHMC_CTRLCFG1 0xf801002c
+#define AGX_MPFE_IOHMC_CTRLCFG2 0xf8010030
+#define AGX_MPFE_IOHMC_CTRLCFG3 0xf8010034
+#define AGX_MPFE_IOHMC_DRAMADDRW 0xf80100a8
+#define AGX_MPFE_IOHMC_DRAMTIMING0 0xf8010050
+#define AGX_MPFE_IOHMC_CALTIMING0 0xf801007c
+#define AGX_MPFE_IOHMC_CALTIMING1 0xf8010080
+#define AGX_MPFE_IOHMC_CALTIMING2 0xf8010084
+#define AGX_MPFE_IOHMC_CALTIMING3 0xf8010088
+#define AGX_MPFE_IOHMC_CALTIMING4 0xf801008c
+#define AGX_MPFE_IOHMC_CALTIMING9 0xf80100a0
+#define AGX_MPFE_IOHMC_CALTIMING9_ACT_TO_ACT(x) (((x) & 0x000000ff) >> 0)
+#define AGX_MPFE_IOHMC_CTRLCFG1_CFG_ADDR_ORDER(value) \
+ (((value) & 0x00000060) >> 5)
+
+#define AGX_MPFE_HMC_ADP_ECCCTRL1 0xf8011100
+#define AGX_MPFE_HMC_ADP_ECCCTRL2 0xf8011104
+#define AGX_MPFE_HMC_ADP_RSTHANDSHAKESTAT 0xf8011218
+#define AGX_MPFE_HMC_ADP_RSTHANDSHAKESTAT_SEQ2CORE 0x000000ff
+#define AGX_MPFE_HMC_ADP_RSTHANDSHAKECTRL 0xf8011214
+
+
+#define AGX_MPFE_IOHMC_REG_CTRLCFG1 0xf801002c
+
+#define AGX_MPFE_IOHMC_REG_NIOSRESERVE0_OFST 0xf8010110
+
+#define IOHMC_DRAMADDRW_COL_ADDR_WIDTH(x) (((x) & 0x0000001f) >> 0)
+#define IOHMC_DRAMADDRW_ROW_ADDR_WIDTH(x) (((x) & 0x000003e0) >> 5)
+#define IOHMC_DRAMADDRW_CS_ADDR_WIDTH(x) (((x) & 0x00070000) >> 16)
+#define IOHMC_DRAMADDRW_BANK_GRP_ADDR_WIDTH(x) (((x) & 0x0000c000) >> 14)
+#define IOHMC_DRAMADDRW_BANK_ADDR_WIDTH(x) (((x) & 0x00003c00) >> 10)
+
+#define AGX_MPFE_DDR(x) (0xf8000000 + x)
+#define AGX_MPFE_HMC_ADP_DDRCALSTAT 0xf801100c
+#define AGX_MPFE_DDR_MAIN_SCHED 0xf8000400
+#define AGX_MPFE_DDR_MAIN_SCHED_DDRCONF 0xf8000408
+#define AGX_MPFE_DDR_MAIN_SCHED_DDRTIMING 0xf800040c
+#define AGX_MPFE_DDR_MAIN_SCHED_DDRCONF_SET_MSK 0x0000001f
+#define AGX_MPFE_DDR_MAIN_SCHED_DDRMODE 0xf8000410
+#define AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV 0xf800043c
+#define AGX_MPFE_DDR_MAIN_SCHED_READLATENCY 0xf8000414
+#define AGX_MPFE_DDR_MAIN_SCHED_ACTIVATE 0xf8000438
+#define AGX_MPFE_DDR_MAIN_SCHED_ACTIVATE_FAWBANK_OFST 10
+#define AGX_MPFE_DDR_MAIN_SCHED_ACTIVATE_FAW_OFST 4
+#define AGX_MPFE_DDR_MAIN_SCHED_ACTIVATE_RRD_OFST 0
+#define AGX_MPFE_DDR_MAIN_SCHED_DDRCONF_SET(x) (((x) << 0) & 0x0000001f)
+#define AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTORD_OFST 0
+#define AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTORD_MSK (BIT(0) | BIT(1))
+#define AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTOWR_OFST 2
+#define AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTOWR_MSK (BIT(2) | BIT(3))
+#define AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSWRTORD_OFST 4
+#define AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSWRTORD_MSK (BIT(4) | BIT(5))
+
+#define AGX_MPFE_HMC_ADP(x) (0xf8011000 + (x))
+#define AGX_MPFE_HMC_ADP_HPSINTFCSEL 0xf8011210
+#define AGX_MPFE_HMC_ADP_DDRIOCTRL 0xf8011008
+#define HMC_ADP_DDRIOCTRL 0x8
+#define HMC_ADP_DDRIOCTRL_IO_SIZE(x) (((x) & 0x00000003) >> 0)
+#define HMC_ADP_DDRIOCTRL_CTRL_BURST_LENGTH(x) (((x) & 0x00003e00) >> 9)
+#define ADP_DRAMADDRWIDTH 0xe0
+
+#define ACT_TO_ACT_DIFF_BANK(value) (((value) & 0x00fc0000) >> 18)
+#define ACT_TO_ACT(value) (((value) & 0x0003f000) >> 12)
+#define ACT_TO_RDWR(value) (((value) & 0x0000003f) >> 0)
+#define ACT_TO_ACT(value) (((value) & 0x0003f000) >> 12)
+
+/* timing 2 */
+#define RD_TO_RD_DIFF_CHIP(value) (((value) & 0x00000fc0) >> 6)
+#define RD_TO_WR_DIFF_CHIP(value) (((value) & 0x3f000000) >> 24)
+#define RD_TO_WR(value) (((value) & 0x00fc0000) >> 18)
+#define RD_TO_PCH(value) (((value) & 0x00000fc0) >> 6)
+
+/* timing 3 */
+#define CALTIMING3_WR_TO_RD_DIFF_CHIP(value) (((value) & 0x0003f000) >> 12)
+#define CALTIMING3_WR_TO_RD(value) (((value) & 0x00000fc0) >> 6)
+
+/* timing 4 */
+#define PCH_TO_VALID(value) (((value) & 0x00000fc0) >> 6)
+
+#define DDRTIMING_BWRATIO_OFST 31
+#define DDRTIMING_WRTORD_OFST 26
+#define DDRTIMING_RDTOWR_OFST 21
+#define DDRTIMING_BURSTLEN_OFST 18
+#define DDRTIMING_WRTOMISS_OFST 12
+#define DDRTIMING_RDTOMISS_OFST 6
+#define DDRTIMING_ACTTOACT_OFST 0
+
+#define ADP_DDRIOCTRL_IO_SIZE(x) (((x) & 0x3) >> 0)
+
+#define DDRMODE_AUTOPRECHARGE_OFST 1
+#define DDRMODE_BWRATIOEXTENDED_OFST 0
+
+
+#define AGX_MPFE_IOHMC_REG_DRAMTIMING0_CFG_TCL(x) (((x) & 0x7f) >> 0)
+#define AGX_MPFE_IOHMC_REG_CTRLCFG0_CFG_MEM_TYPE(x) (((x) & 0x0f) >> 0)
+
+#define AGX_CCU_CPU0_MPRT_DDR 0xf7004400
+#define AGX_CCU_CPU0_MPRT_MEM0 0xf70045c0
+#define AGX_CCU_CPU0_MPRT_MEM1A 0xf70045e0
+#define AGX_CCU_CPU0_MPRT_MEM1B 0xf7004600
+#define AGX_CCU_CPU0_MPRT_MEM1C 0xf7004620
+#define AGX_CCU_CPU0_MPRT_MEM1D 0xf7004640
+#define AGX_CCU_CPU0_MPRT_MEM1E 0xf7004660
+#define AGX_CCU_IOM_MPRT_MEM0 0xf7018560
+#define AGX_CCU_IOM_MPRT_MEM1A 0xf7018580
+#define AGX_CCU_IOM_MPRT_MEM1B 0xf70185a0
+#define AGX_CCU_IOM_MPRT_MEM1C 0xf70185c0
+#define AGX_CCU_IOM_MPRT_MEM1D 0xf70185e0
+#define AGX_CCU_IOM_MPRT_MEM1E 0xf7018600
+
+#define AGX_NOC_FW_DDR_SCR 0xf8020200
+#define AGX_NOC_FW_DDR_SCR_MPUREGION0ADDR_LIMITEXT 0xf802021c
+#define AGX_NOC_FW_DDR_SCR_MPUREGION0ADDR_LIMIT 0xf8020218
+#define AGX_NOC_FW_DDR_SCR_NONMPUREGION0ADDR_LIMITEXT 0xf802029c
+#define AGX_NOC_FW_DDR_SCR_NONMPUREGION0ADDR_LIMIT 0xf8020298
+
+#define AGX_SOC_NOC_FW_DDR_SCR_ENABLE 0xf8020200
+#define AGX_SOC_NOC_FW_DDR_SCR_ENABLESET 0xf8020204
+#define AGX_CCU_NOC_DI_SET_MSK 0x10
+
+#define AGX_SYSMGR_CORE_HMC_CLK 0xffd120b4
+#define AGX_SYSMGR_CORE_HMC_CLK_STATUS 0x00000001
+
+#define AGX_MPFE_IOHMC_NIOSRESERVE0_NIOS_RESERVE0(x) (((x) & 0xffff) >> 0)
+#define AGX_MPFE_HMC_ADP_DDRIOCTRL_IO_SIZE_MSK 0x00000003
+#define AGX_MPFE_HMC_ADP_DDRIOCTRL_IO_SIZE_OFST 0
+#define AGX_MPFE_HMC_ADP_HPSINTFCSEL_ENABLE 0x001f1f1f
+#define AGX_IOHMC_CTRLCFG1_ENABLE_ECC_OFST 7
+
+#define AGX_MPFE_HMC_ADP_ECCCTRL1_AUTOWB_CNT_RST_SET_MSK 0x00010000
+#define AGX_MPFE_HMC_ADP_ECCCTRL1_CNT_RST_SET_MSK 0x00000100
+#define AGX_MPFE_HMC_ADP_ECCCTRL1_ECC_EN_SET_MSK 0x00000001
+
+#define AGX_MPFE_HMC_ADP_ECCCTRL2_AUTOWB_EN_SET_MSK 0x00000001
+#define AGX_MPFE_HMC_ADP_ECCCTRL2_OVRW_RB_ECC_EN_SET_MSK 0x00010000
+#define AGX_MPFE_HMC_ADP_ECCCTRL2_RMW_EN_SET_MSK 0x00000100
+#define AGX_MPFE_HMC_ADP_DDRCALSTAT_CAL(value) (((value) & 0x1) >> 0)
+
+
+#define AGX_MPFE_HMC_ADP_DDRIOCTRL_IO_SIZE(x) (((x) & 0x00003) >> 0)
+#define IOHMC_DRAMADDRW_CFG_BANK_ADDR_WIDTH(x) (((x) & 0x03c00) >> 10)
+#define IOHMC_DRAMADDRW_CFG_BANK_GROUP_ADDR_WIDTH(x) (((x) & 0x0c000) >> 14)
+#define IOHMC_DRAMADDRW_CFG_COL_ADDR_WIDTH(x) (((x) & 0x0001f) >> 0)
+#define IOHMC_DRAMADDRW_CFG_CS_ADDR_WIDTH(x) (((x) & 0x70000) >> 16)
+#define IOHMC_DRAMADDRW_CFG_ROW_ADDR_WIDTH(x) (((x) & 0x003e0) >> 5)
+
+#define AGX_SDRAM_0_LB_ADDR 0x0
+#define AGX_DDR_SIZE 0x40000000
+
+int init_hard_memory_controller(void);
+
+#endif
diff --git a/plat/intel/soc/agilex/include/agilex_mmc.h b/plat/intel/soc/agilex/include/agilex_mmc.h
new file mode 100644
index 0000000..00f4ca5
--- /dev/null
+++ b/plat/intel/soc/agilex/include/agilex_mmc.h
@@ -0,0 +1,7 @@
+/*
+ * Copyright (c) 2020, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+void agx_mmc_init(void);
diff --git a/plat/intel/soc/agilex/include/agilex_pinmux.h b/plat/intel/soc/agilex/include/agilex_pinmux.h
new file mode 100644
index 0000000..fe01062
--- /dev/null
+++ b/plat/intel/soc/agilex/include/agilex_pinmux.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2019, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef AGX_PINMUX_H
+#define AGX_PINMUX_H
+
+#define AGX_PINMUX_PIN0SEL 0xffd13000
+#define AGX_PINMUX_IO0CTRL 0xffd13130
+#define AGX_PINMUX_PINMUX_EMAC0_USEFPGA 0xffd13300
+#define AGX_PINMUX_IO0_DELAY 0xffd13400
+
+#include "socfpga_handoff.h"
+
+void config_pinmux(handoff *handoff);
+
+#endif
+
diff --git a/plat/intel/soc/agilex/include/socfpga_plat_def.h b/plat/intel/soc/agilex/include/socfpga_plat_def.h
new file mode 100644
index 0000000..b216ab1
--- /dev/null
+++ b/plat/intel/soc/agilex/include/socfpga_plat_def.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019-2022, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PLAT_SOCFPGA_DEF_H
+#define PLAT_SOCFPGA_DEF_H
+
+#include <platform_def.h>
+
+/* Platform Setting */
+#define PLATFORM_MODEL PLAT_SOCFPGA_AGILEX
+#define BOOT_SOURCE BOOT_SOURCE_SDMMC
+
+/* FPGA config helpers */
+#define INTEL_SIP_SMC_FPGA_CONFIG_ADDR 0x400000
+#define INTEL_SIP_SMC_FPGA_CONFIG_SIZE 0x2000000
+
+/* Register Mapping */
+#define SOCFPGA_CCU_NOC_REG_BASE 0xf7000000
+#define SOCFPGA_F2SDRAMMGR_REG_BASE U(0xf8024000)
+
+#define SOCFPGA_MMC_REG_BASE 0xff808000
+
+#define SOCFPGA_RSTMGR_REG_BASE 0xffd11000
+#define SOCFPGA_SYSMGR_REG_BASE 0xffd12000
+
+#define SOCFPGA_L4_PER_SCR_REG_BASE 0xffd21000
+#define SOCFPGA_L4_SYS_SCR_REG_BASE 0xffd21100
+#define SOCFPGA_SOC2FPGA_SCR_REG_BASE 0xffd21200
+#define SOCFPGA_LWSOC2FPGA_SCR_REG_BASE 0xffd21300
+
+/* Platform specific system counter */
+#define PLAT_SYS_COUNTER_FREQ_IN_MHZ get_cpu_clk()
+
+uint32_t get_cpu_clk(void);
+
+#endif /* PLAT_SOCFPGA_DEF_H */
diff --git a/plat/intel/soc/agilex/platform.mk b/plat/intel/soc/agilex/platform.mk
new file mode 100644
index 0000000..a1e58fc
--- /dev/null
+++ b/plat/intel/soc/agilex/platform.mk
@@ -0,0 +1,83 @@
+#
+# Copyright (c) 2019-2022, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+PLAT_INCLUDES := \
+ -Iplat/intel/soc/agilex/include/ \
+ -Iplat/intel/soc/common/drivers/ \
+ -Iplat/intel/soc/common/include/
+
+# Include GICv2 driver files
+include drivers/arm/gic/v2/gicv2.mk
+AGX_GICv2_SOURCES := \
+ ${GICV2_SOURCES} \
+ plat/common/plat_gicv2.c
+
+
+PLAT_BL_COMMON_SOURCES := \
+ ${AGX_GICv2_SOURCES} \
+ drivers/delay_timer/delay_timer.c \
+ drivers/delay_timer/generic_delay_timer.c \
+ drivers/ti/uart/aarch64/16550_console.S \
+ lib/xlat_tables/aarch64/xlat_tables.c \
+ lib/xlat_tables/xlat_tables_common.c \
+ plat/intel/soc/common/aarch64/platform_common.c \
+ plat/intel/soc/common/aarch64/plat_helpers.S \
+ plat/intel/soc/common/drivers/ccu/ncore_ccu.c \
+ plat/intel/soc/common/socfpga_delay_timer.c
+
+BL2_SOURCES += \
+ common/desc_image_load.c \
+ drivers/mmc/mmc.c \
+ drivers/intel/soc/stratix10/io/s10_memmap_qspi.c \
+ drivers/io/io_storage.c \
+ drivers/io/io_block.c \
+ drivers/io/io_fip.c \
+ drivers/partition/partition.c \
+ drivers/partition/gpt.c \
+ drivers/synopsys/emmc/dw_mmc.c \
+ lib/cpus/aarch64/cortex_a53.S \
+ plat/intel/soc/agilex/bl2_plat_setup.c \
+ plat/intel/soc/agilex/soc/agilex_clock_manager.c \
+ plat/intel/soc/agilex/soc/agilex_memory_controller.c \
+ plat/intel/soc/agilex/soc/agilex_mmc.c \
+ plat/intel/soc/agilex/soc/agilex_pinmux.c \
+ plat/intel/soc/common/bl2_plat_mem_params_desc.c \
+ plat/intel/soc/common/socfpga_image_load.c \
+ plat/intel/soc/common/socfpga_storage.c \
+ plat/intel/soc/common/soc/socfpga_emac.c \
+ plat/intel/soc/common/soc/socfpga_firewall.c \
+ plat/intel/soc/common/soc/socfpga_handoff.c \
+ plat/intel/soc/common/soc/socfpga_mailbox.c \
+ plat/intel/soc/common/soc/socfpga_reset_manager.c \
+ plat/intel/soc/common/drivers/qspi/cadence_qspi.c \
+ plat/intel/soc/common/drivers/wdt/watchdog.c
+
+include lib/zlib/zlib.mk
+PLAT_INCLUDES += -Ilib/zlib
+BL2_SOURCES += $(ZLIB_SOURCES)
+
+BL31_SOURCES += \
+ drivers/arm/cci/cci.c \
+ lib/cpus/aarch64/aem_generic.S \
+ lib/cpus/aarch64/cortex_a53.S \
+ plat/common/plat_psci_common.c \
+ plat/intel/soc/agilex/bl31_plat_setup.c \
+ plat/intel/soc/agilex/soc/agilex_clock_manager.c \
+ plat/intel/soc/common/socfpga_psci.c \
+ plat/intel/soc/common/socfpga_sip_svc.c \
+ plat/intel/soc/common/socfpga_sip_svc_v2.c \
+ plat/intel/soc/common/socfpga_topology.c \
+ plat/intel/soc/common/sip/socfpga_sip_ecc.c \
+ plat/intel/soc/common/sip/socfpga_sip_fcs.c \
+ plat/intel/soc/common/soc/socfpga_mailbox.c \
+ plat/intel/soc/common/soc/socfpga_reset_manager.c
+
+PROGRAMMABLE_RESET_ADDRESS := 0
+BL2_AT_EL3 := 1
+BL2_INV_DCACHE := 0
+MULTI_CONSOLE_API := 1
+USE_COHERENT_MEM := 1
diff --git a/plat/intel/soc/agilex/soc/agilex_clock_manager.c b/plat/intel/soc/agilex/soc/agilex_clock_manager.c
new file mode 100644
index 0000000..76b9937
--- /dev/null
+++ b/plat/intel/soc/agilex/soc/agilex_clock_manager.c
@@ -0,0 +1,399 @@
+/*
+ * Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <common/debug.h>
+#include <drivers/delay_timer.h>
+#include <errno.h>
+#include <lib/mmio.h>
+
+#include "agilex_clock_manager.h"
+#include "socfpga_handoff.h"
+#include "socfpga_system_manager.h"
+
+
+uint32_t wait_pll_lock(void)
+{
+ uint32_t data;
+ uint32_t count = 0;
+
+ do {
+ data = mmio_read_32(CLKMGR_OFFSET + CLKMGR_STAT);
+ count++;
+ if (count >= 1000)
+ return -ETIMEDOUT;
+
+ } while ((CLKMGR_STAT_MAINPLLLOCKED(data) == 0) ||
+ (CLKMGR_STAT_PERPLLLOCKED(data) == 0));
+ return 0;
+}
+
+uint32_t wait_fsm(void)
+{
+ uint32_t data;
+ uint32_t count = 0;
+
+ do {
+ data = mmio_read_32(CLKMGR_OFFSET + CLKMGR_STAT);
+ count++;
+ if (count >= 1000)
+ return -ETIMEDOUT;
+
+ } while (CLKMGR_STAT_BUSY(data) == CLKMGR_STAT_BUSY_E_BUSY);
+
+ return 0;
+}
+
+uint32_t pll_source_sync_config(uint32_t pll_mem_offset, uint32_t data)
+{
+ uint32_t val = 0;
+ uint32_t count = 0;
+ uint32_t req_status = 0;
+
+ val = (CLKMGR_MEM_WR | CLKMGR_MEM_REQ |
+ (data << CLKMGR_MEM_WDAT_OFFSET) | CLKMGR_MEM_ADDR);
+ mmio_write_32(pll_mem_offset, val);
+
+ do {
+ req_status = mmio_read_32(pll_mem_offset);
+ count++;
+ } while ((req_status & CLKMGR_MEM_REQ) && (count < 10));
+
+ if (count >= 100)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+uint32_t pll_source_sync_read(uint32_t pll_mem_offset)
+{
+ uint32_t val = 0;
+ uint32_t rdata = 0;
+ uint32_t count = 0;
+ uint32_t req_status = 0;
+
+ val = (CLKMGR_MEM_REQ | CLKMGR_MEM_ADDR);
+ mmio_write_32(pll_mem_offset, val);
+
+ do {
+ req_status = mmio_read_32(pll_mem_offset);
+ count++;
+ } while ((req_status & CLKMGR_MEM_REQ) && (count < 10));
+
+ if (count >= 100)
+ return -ETIMEDOUT;
+
+ rdata = mmio_read_32(pll_mem_offset + 0x4);
+ INFO("rdata (%x) = %x\n", pll_mem_offset + 0x4, rdata);
+
+ return rdata;
+}
+
+void config_clkmgr_handoff(handoff *hoff_ptr)
+{
+ uint32_t mdiv, mscnt, hscnt;
+ uint32_t drefclk_div, refclk_div, rdata;
+
+ /* Set clock maanger into boot mode before running configuration */
+ mmio_setbits_32(CLKMGR_OFFSET + CLKMGR_CTRL,
+ CLKMGR_CTRL_BOOTMODE_SET_MSK);
+ /* Bypass all mainpllgrp's clocks */
+ mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_BYPASS, 0x7);
+ wait_fsm();
+
+ /* Bypass all perpllgrp's clocks */
+ mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_BYPASS, 0x7f);
+ wait_fsm();
+
+ /* Put both PLL in reset and power down */
+ mmio_clrbits_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLGLOB,
+ CLKMGR_PLLGLOB_PD_SET_MSK |
+ CLKMGR_PLLGLOB_RST_SET_MSK);
+ mmio_clrbits_32(CLKMGR_PERPLL + CLKMGR_PERPLL_PLLGLOB,
+ CLKMGR_PLLGLOB_PD_SET_MSK |
+ CLKMGR_PLLGLOB_RST_SET_MSK);
+
+ /* Setup main PLL dividers */
+ mdiv = CLKMGR_PLLM_MDIV(hoff_ptr->main_pll_pllm);
+
+ drefclk_div = CLKMGR_PLLGLOB_DREFCLKDIV(
+ hoff_ptr->main_pll_pllglob);
+ refclk_div = CLKMGR_PLLGLOB_REFCLKDIV(
+ hoff_ptr->main_pll_pllglob);
+
+ mscnt = 100 / (mdiv * BIT(drefclk_div));
+ if (!mscnt)
+ mscnt = 1;
+ hscnt = (mdiv * mscnt * BIT(drefclk_div) / refclk_div) - 4;
+
+ mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLGLOB,
+ hoff_ptr->main_pll_pllglob &
+ ~CLKMGR_PLLGLOB_RST_SET_MSK);
+ mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_FDBCK,
+ hoff_ptr->main_pll_fdbck);
+ mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_VCOCALIB,
+ CLKMGR_VCOCALIB_HSCNT_SET(hscnt) |
+ CLKMGR_VCOCALIB_MSCNT_SET(mscnt));
+ mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLC0,
+ hoff_ptr->main_pll_pllc0);
+ mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLC1,
+ hoff_ptr->main_pll_pllc1);
+ mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLC2,
+ hoff_ptr->main_pll_pllc2);
+ mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLC3,
+ hoff_ptr->main_pll_pllc3);
+ mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLM,
+ hoff_ptr->main_pll_pllm);
+ mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_MPUCLK,
+ hoff_ptr->main_pll_mpuclk);
+ mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_NOCCLK,
+ hoff_ptr->main_pll_nocclk);
+ mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_NOCDIV,
+ hoff_ptr->main_pll_nocdiv);
+
+ /* Setup peripheral PLL dividers */
+ mdiv = CLKMGR_PLLM_MDIV(hoff_ptr->per_pll_pllm);
+
+ drefclk_div = CLKMGR_PLLGLOB_DREFCLKDIV(
+ hoff_ptr->per_pll_pllglob);
+ refclk_div = CLKMGR_PLLGLOB_REFCLKDIV(
+ hoff_ptr->per_pll_pllglob);
+
+
+ mscnt = 100 / (mdiv * BIT(drefclk_div));
+ if (!mscnt)
+ mscnt = 1;
+ hscnt = (mdiv * mscnt * BIT(drefclk_div) / refclk_div) - 4;
+
+ mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_PLLGLOB,
+ hoff_ptr->per_pll_pllglob &
+ ~CLKMGR_PLLGLOB_RST_SET_MSK);
+ mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_FDBCK,
+ hoff_ptr->per_pll_fdbck);
+
+ mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_VCOCALIB,
+ CLKMGR_VCOCALIB_HSCNT_SET(hscnt) |
+ CLKMGR_VCOCALIB_MSCNT_SET(mscnt));
+
+ mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_PLLC0,
+ hoff_ptr->per_pll_pllc0);
+ mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_PLLC1,
+ hoff_ptr->per_pll_pllc1);
+ mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_PLLC2,
+ hoff_ptr->per_pll_pllc2);
+ mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_PLLC3,
+ hoff_ptr->per_pll_pllc3);
+ mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_PLLM,
+ hoff_ptr->per_pll_pllm);
+ mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_EMACCTL,
+ hoff_ptr->per_pll_emacctl);
+ mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_GPIODIV,
+ hoff_ptr->per_pll_gpiodiv);
+
+ /* Take both PLL out of reset and power up */
+ mmio_setbits_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLGLOB,
+ CLKMGR_PLLGLOB_PD_SET_MSK |
+ CLKMGR_PLLGLOB_RST_SET_MSK);
+ mmio_setbits_32(CLKMGR_PERPLL + CLKMGR_PERPLL_PLLGLOB,
+ CLKMGR_PLLGLOB_PD_SET_MSK |
+ CLKMGR_PLLGLOB_RST_SET_MSK);
+
+ rdata = pll_source_sync_read(CLKMGR_MAINPLL +
+ CLKMGR_MAINPLL_MEM);
+ pll_source_sync_config(CLKMGR_MAINPLL + CLKMGR_MAINPLL_MEM,
+ rdata | 0x80);
+
+ rdata = pll_source_sync_read(CLKMGR_PERPLL + CLKMGR_PERPLL_MEM);
+ pll_source_sync_config(CLKMGR_PERPLL + CLKMGR_PERPLL_MEM,
+ rdata | 0x80);
+
+ wait_pll_lock();
+
+ /*Configure Ping Pong counters in altera group */
+ mmio_write_32(CLKMGR_ALTERA + CLKMGR_ALTERA_EMACACTR,
+ hoff_ptr->alt_emacactr);
+ mmio_write_32(CLKMGR_ALTERA + CLKMGR_ALTERA_EMACBCTR,
+ hoff_ptr->alt_emacbctr);
+ mmio_write_32(CLKMGR_ALTERA + CLKMGR_ALTERA_EMACPTPCTR,
+ hoff_ptr->alt_emacptpctr);
+ mmio_write_32(CLKMGR_ALTERA + CLKMGR_ALTERA_GPIODBCTR,
+ hoff_ptr->alt_gpiodbctr);
+ mmio_write_32(CLKMGR_ALTERA + CLKMGR_ALTERA_SDMMCCTR,
+ hoff_ptr->alt_sdmmcctr);
+ mmio_write_32(CLKMGR_ALTERA + CLKMGR_ALTERA_S2FUSER0CTR,
+ hoff_ptr->alt_s2fuser0ctr);
+ mmio_write_32(CLKMGR_ALTERA + CLKMGR_ALTERA_S2FUSER1CTR,
+ hoff_ptr->alt_s2fuser1ctr);
+ mmio_write_32(CLKMGR_ALTERA + CLKMGR_ALTERA_PSIREFCTR,
+ hoff_ptr->alt_psirefctr);
+
+ /* Clear lost lock bypass mode */
+ mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_LOSTLOCK, 0x1);
+ mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_LOSTLOCK, 0x1);
+
+ mmio_setbits_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLGLOB,
+ CLKMGR_CLR_LOSTLOCK_BYPASS);
+
+ mmio_setbits_32(CLKMGR_PERPLL + CLKMGR_PERPLL_PLLGLOB,
+ CLKMGR_CLR_LOSTLOCK_BYPASS);
+
+ /* Take all PLLs out of bypass */
+ mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_BYPASS, 0);
+ wait_fsm();
+
+ mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_BYPASS, 0);
+ wait_fsm();
+
+ /* Clear loss lock interrupt status register that */
+ /* might be set during configuration */
+ mmio_clrbits_32(CLKMGR_OFFSET + CLKMGR_INTRCLR,
+ CLKMGR_INTRCLR_MAINLOCKLOST_SET_MSK |
+ CLKMGR_INTRCLR_PERLOCKLOST_SET_MSK);
+
+ /* Take all ping pong counters out of reset */
+ mmio_clrbits_32(CLKMGR_ALTERA + CLKMGR_ALTERA_EXTCNTRST,
+ CLKMGR_ALTERA_EXTCNTRST_RESET);
+
+ /* Set safe mode / out of boot mode */
+ mmio_clrbits_32(CLKMGR_OFFSET + CLKMGR_CTRL,
+ CLKMGR_CTRL_BOOTMODE_SET_MSK);
+ wait_fsm();
+
+ /* Enable mainpllgrp's software-managed clock */
+ mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_EN,
+ CLKMGR_MAINPLL_EN_RESET);
+ mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_EN,
+ CLKMGR_PERPLL_EN_RESET);
+
+ /* Pass clock source frequency into scratch register */
+ mmio_write_32(SOCFPGA_SYSMGR(BOOT_SCRATCH_COLD_1),
+ hoff_ptr->hps_osc_clk_h);
+ mmio_write_32(SOCFPGA_SYSMGR(BOOT_SCRATCH_COLD_2),
+ hoff_ptr->fpga_clk_hz);
+}
+
+/* Extract reference clock from platform clock source */
+uint32_t get_ref_clk(uint32_t pllglob)
+{
+ uint32_t arefclkdiv, ref_clk;
+ uint32_t scr_reg;
+
+ switch (CLKMGR_PSRC(pllglob)) {
+ case CLKMGR_PLLGLOB_PSRC_EOSC1:
+ scr_reg = SOCFPGA_SYSMGR(BOOT_SCRATCH_COLD_1);
+ ref_clk = mmio_read_32(scr_reg);
+ break;
+ case CLKMGR_PLLGLOB_PSRC_INTOSC:
+ ref_clk = CLKMGR_INTOSC_HZ;
+ break;
+ case CLKMGR_PLLGLOB_PSRC_F2S:
+ scr_reg = SOCFPGA_SYSMGR(BOOT_SCRATCH_COLD_2);
+ ref_clk = mmio_read_32(scr_reg);
+ break;
+ default:
+ ref_clk = 0;
+ assert(0);
+ break;
+ }
+
+ arefclkdiv = CLKMGR_PLLGLOB_AREFCLKDIV(pllglob);
+ ref_clk /= arefclkdiv;
+
+ return ref_clk;
+}
+
+/* Calculate clock frequency based on parameter */
+uint32_t get_clk_freq(uint32_t psrc_reg, uint32_t main_pllc, uint32_t per_pllc)
+{
+ uint32_t clk_psrc, mdiv, ref_clk;
+ uint32_t pllm_reg, pllc_reg, pllc_div, pllglob_reg;
+
+ clk_psrc = mmio_read_32(CLKMGR_MAINPLL + psrc_reg);
+
+ switch (CLKMGR_PSRC(clk_psrc)) {
+ case CLKMGR_PSRC_MAIN:
+ pllm_reg = CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLM;
+ pllc_reg = CLKMGR_MAINPLL + main_pllc;
+ pllglob_reg = CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLGLOB;
+ break;
+ case CLKMGR_PSRC_PER:
+ pllm_reg = CLKMGR_PERPLL + CLKMGR_PERPLL_PLLM;
+ pllc_reg = CLKMGR_PERPLL + per_pllc;
+ pllglob_reg = CLKMGR_PERPLL + CLKMGR_PERPLL_PLLGLOB;
+ break;
+ default:
+ return 0;
+ }
+
+ ref_clk = get_ref_clk(mmio_read_32(pllglob_reg));
+ mdiv = CLKMGR_PLLM_MDIV(mmio_read_32(pllm_reg));
+ ref_clk *= mdiv;
+
+ pllc_div = mmio_read_32(pllc_reg) & 0x7ff;
+
+ return ref_clk / pllc_div;
+}
+
+/* Return L3 interconnect clock */
+uint32_t get_l3_clk(void)
+{
+ uint32_t l3_clk;
+
+ l3_clk = get_clk_freq(CLKMGR_MAINPLL_NOCCLK, CLKMGR_MAINPLL_PLLC1,
+ CLKMGR_PERPLL_PLLC1);
+ return l3_clk;
+}
+
+/* Calculate clock frequency to be used for watchdog timer */
+uint32_t get_wdt_clk(void)
+{
+ uint32_t l3_clk, l4_sys_clk;
+
+ l3_clk = get_l3_clk();
+ l4_sys_clk = l3_clk / 4;
+
+ return l4_sys_clk;
+}
+
+/* Calculate clock frequency to be used for UART driver */
+uint32_t get_uart_clk(void)
+{
+ uint32_t data32, l3_clk, l4_sp_clk;
+
+ l3_clk = get_l3_clk();
+
+ data32 = mmio_read_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_NOCDIV);
+ data32 = (data32 >> 16) & 0x3;
+
+ l4_sp_clk = l3_clk >> data32;
+
+ return l4_sp_clk;
+}
+
+/* Calculate clock frequency to be used for SDMMC driver */
+uint32_t get_mmc_clk(void)
+{
+ uint32_t data32, mmc_clk;
+
+ mmc_clk = get_clk_freq(CLKMGR_ALTERA_SDMMCCTR,
+ CLKMGR_MAINPLL_PLLC3, CLKMGR_PERPLL_PLLC3);
+
+ data32 = mmio_read_32(CLKMGR_ALTERA + CLKMGR_ALTERA_SDMMCCTR);
+ data32 = (data32 & 0x7ff) + 1;
+ mmc_clk = (mmc_clk / data32) / 4;
+
+ return mmc_clk;
+}
+
+/* Get cpu freq clock */
+uint32_t get_cpu_clk(void)
+{
+ uint32_t cpu_clk;
+
+ cpu_clk = get_l3_clk()/PLAT_SYS_COUNTER_CONVERT_TO_MHZ;
+
+ return cpu_clk;
+}
diff --git a/plat/intel/soc/agilex/soc/agilex_memory_controller.c b/plat/intel/soc/agilex/soc/agilex_memory_controller.c
new file mode 100644
index 0000000..2aabe87
--- /dev/null
+++ b/plat/intel/soc/agilex/soc/agilex_memory_controller.c
@@ -0,0 +1,399 @@
+/*
+ * Copyright (c) 2019, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <errno.h>
+#include <lib/mmio.h>
+#include <lib/utils.h>
+#include <common/debug.h>
+#include <drivers/delay_timer.h>
+#include <platform_def.h>
+
+#include "agilex_memory_controller.h"
+
+#define ALT_CCU_NOC_DI_SET_MSK 0x10
+
+#define DDR_READ_LATENCY_DELAY 40
+#define MAX_MEM_CAL_RETRY 3
+#define PRE_CALIBRATION_DELAY 1
+#define POST_CALIBRATION_DELAY 1
+#define TIMEOUT_EMIF_CALIBRATION 1000
+#define CLEAR_EMIF_DELAY 1000
+#define CLEAR_EMIF_TIMEOUT 1000
+
+#define DDR_CONFIG(A, B, C, R) (((A) << 24) | ((B) << 16) | ((C) << 8) | (R))
+#define DDR_CONFIG_ELEMENTS (sizeof(ddr_config)/sizeof(uint32_t))
+
+/* tWR = Min. 15ns constant, see JEDEC standard eg. DDR4 is JESD79-4.pdf */
+#define tWR_IN_NS 15
+
+void configure_hmc_adaptor_regs(void);
+void configure_ddr_sched_ctrl_regs(void);
+
+/* The followring are the supported configurations */
+uint32_t ddr_config[] = {
+ /* DDR_CONFIG(Address order,Bank,Column,Row) */
+ /* List for DDR3 or LPDDR3 (pinout order > chip, row, bank, column) */
+ DDR_CONFIG(0, 3, 10, 12),
+ DDR_CONFIG(0, 3, 9, 13),
+ DDR_CONFIG(0, 3, 10, 13),
+ DDR_CONFIG(0, 3, 9, 14),
+ DDR_CONFIG(0, 3, 10, 14),
+ DDR_CONFIG(0, 3, 10, 15),
+ DDR_CONFIG(0, 3, 11, 14),
+ DDR_CONFIG(0, 3, 11, 15),
+ DDR_CONFIG(0, 3, 10, 16),
+ DDR_CONFIG(0, 3, 11, 16),
+ DDR_CONFIG(0, 3, 12, 15), /* 0xa */
+ /* List for DDR4 only (pinout order > chip, bank, row, column) */
+ DDR_CONFIG(1, 3, 10, 14),
+ DDR_CONFIG(1, 4, 10, 14),
+ DDR_CONFIG(1, 3, 10, 15),
+ DDR_CONFIG(1, 4, 10, 15),
+ DDR_CONFIG(1, 3, 10, 16),
+ DDR_CONFIG(1, 4, 10, 16),
+ DDR_CONFIG(1, 3, 10, 17),
+ DDR_CONFIG(1, 4, 10, 17),
+};
+
+static int match_ddr_conf(uint32_t ddr_conf)
+{
+ int i;
+
+ for (i = 0; i < DDR_CONFIG_ELEMENTS; i++) {
+ if (ddr_conf == ddr_config[i])
+ return i;
+ }
+ return 0;
+}
+
+static int check_hmc_clk(void)
+{
+ unsigned long timeout = 0;
+ uint32_t hmc_clk;
+
+ do {
+ hmc_clk = mmio_read_32(AGX_SYSMGR_CORE_HMC_CLK);
+ if (hmc_clk & AGX_SYSMGR_CORE_HMC_CLK_STATUS)
+ break;
+ udelay(1);
+ } while (++timeout < 1000);
+ if (timeout >= 1000)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int clear_emif(void)
+{
+ uint32_t data;
+ unsigned long timeout;
+
+ mmio_write_32(AGX_MPFE_HMC_ADP_RSTHANDSHAKECTRL, 0);
+
+ timeout = 0;
+ do {
+ data = mmio_read_32(AGX_MPFE_HMC_ADP_RSTHANDSHAKESTAT);
+ if ((data & AGX_MPFE_HMC_ADP_RSTHANDSHAKESTAT_SEQ2CORE) == 0)
+ break;
+ udelay(CLEAR_EMIF_DELAY);
+ } while (++timeout < CLEAR_EMIF_TIMEOUT);
+ if (timeout >= CLEAR_EMIF_TIMEOUT)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int mem_calibration(void)
+{
+ int status;
+ uint32_t data;
+ unsigned long timeout;
+ unsigned long retry = 0;
+
+ udelay(PRE_CALIBRATION_DELAY);
+
+ do {
+ if (retry != 0)
+ INFO("DDR: Retrying DRAM calibration\n");
+
+ timeout = 0;
+ do {
+ data = mmio_read_32(AGX_MPFE_HMC_ADP_DDRCALSTAT);
+ if (AGX_MPFE_HMC_ADP_DDRCALSTAT_CAL(data) == 1)
+ break;
+ udelay(500);
+ } while (++timeout < TIMEOUT_EMIF_CALIBRATION);
+
+ if (AGX_MPFE_HMC_ADP_DDRCALSTAT_CAL(data) == 0) {
+ status = clear_emif();
+ if (status)
+ ERROR("Failed to clear Emif\n");
+ } else {
+ break;
+ }
+ } while (++retry < MAX_MEM_CAL_RETRY);
+
+ if (AGX_MPFE_HMC_ADP_DDRCALSTAT_CAL(data) == 0) {
+ ERROR("DDR: DRAM calibration failed.\n");
+ status = -EIO;
+ } else {
+ INFO("DDR: DRAM calibration success.\n");
+ status = 0;
+ }
+
+ udelay(POST_CALIBRATION_DELAY);
+
+ return status;
+}
+
+int init_hard_memory_controller(void)
+{
+ int status;
+
+ status = check_hmc_clk();
+ if (status) {
+ ERROR("DDR: Error, HMC clock not running\n");
+ return status;
+ }
+
+ status = mem_calibration();
+ if (status) {
+ ERROR("DDR: Memory Calibration Failed\n");
+ return status;
+ }
+
+ configure_hmc_adaptor_regs();
+
+ return 0;
+}
+
+void configure_ddr_sched_ctrl_regs(void)
+{
+ uint32_t data, dram_addr_order, ddr_conf, bank, row, col,
+ rd_to_miss, wr_to_miss, burst_len, burst_len_ddr_clk,
+ burst_len_sched_clk, act_to_act, rd_to_wr, wr_to_rd, bw_ratio,
+ t_rtp, t_rp, t_rcd, rd_latency, tw_rin_clk_cycles,
+ bw_ratio_extended, auto_precharge = 0, act_to_act_bank, faw,
+ faw_bank, bus_rd_to_rd, bus_rd_to_wr, bus_wr_to_rd;
+
+ INFO("Init HPS NOC's DDR Scheduler.\n");
+
+ data = mmio_read_32(AGX_MPFE_IOHMC_CTRLCFG1);
+ dram_addr_order = AGX_MPFE_IOHMC_CTRLCFG1_CFG_ADDR_ORDER(data);
+
+ data = mmio_read_32(AGX_MPFE_IOHMC_DRAMADDRW);
+
+ col = IOHMC_DRAMADDRW_COL_ADDR_WIDTH(data);
+ row = IOHMC_DRAMADDRW_ROW_ADDR_WIDTH(data);
+ bank = IOHMC_DRAMADDRW_BANK_ADDR_WIDTH(data) +
+ IOHMC_DRAMADDRW_BANK_GRP_ADDR_WIDTH(data);
+
+ ddr_conf = match_ddr_conf(DDR_CONFIG(dram_addr_order, bank, col, row));
+
+ if (ddr_conf) {
+ mmio_clrsetbits_32(
+ AGX_MPFE_DDR_MAIN_SCHED_DDRCONF,
+ AGX_MPFE_DDR_MAIN_SCHED_DDRCONF_SET_MSK,
+ AGX_MPFE_DDR_MAIN_SCHED_DDRCONF_SET(ddr_conf));
+ } else {
+ ERROR("DDR: Cannot find predefined ddrConf configuration.\n");
+ }
+
+ mmio_write_32(AGX_MPFE_HMC_ADP(ADP_DRAMADDRWIDTH), data);
+
+ data = mmio_read_32(AGX_MPFE_IOHMC_DRAMTIMING0);
+ rd_latency = AGX_MPFE_IOHMC_REG_DRAMTIMING0_CFG_TCL(data);
+
+ data = mmio_read_32(AGX_MPFE_IOHMC_CALTIMING0);
+ act_to_act = ACT_TO_ACT(data);
+ t_rcd = ACT_TO_RDWR(data);
+ act_to_act_bank = ACT_TO_ACT_DIFF_BANK(data);
+
+ data = mmio_read_32(AGX_MPFE_IOHMC_CALTIMING1);
+ rd_to_wr = RD_TO_WR(data);
+ bus_rd_to_rd = RD_TO_RD_DIFF_CHIP(data);
+ bus_rd_to_wr = RD_TO_WR_DIFF_CHIP(data);
+
+ data = mmio_read_32(AGX_MPFE_IOHMC_CALTIMING2);
+ t_rtp = RD_TO_PCH(data);
+
+ data = mmio_read_32(AGX_MPFE_IOHMC_CALTIMING3);
+ wr_to_rd = CALTIMING3_WR_TO_RD(data);
+ bus_wr_to_rd = CALTIMING3_WR_TO_RD_DIFF_CHIP(data);
+
+ data = mmio_read_32(AGX_MPFE_IOHMC_CALTIMING4);
+ t_rp = PCH_TO_VALID(data);
+
+ data = mmio_read_32(AGX_MPFE_HMC_ADP(HMC_ADP_DDRIOCTRL));
+ bw_ratio = ((HMC_ADP_DDRIOCTRL_IO_SIZE(data) == 0) ? 0 : 1);
+
+ data = mmio_read_32(AGX_MPFE_IOHMC_CTRLCFG0);
+ burst_len = HMC_ADP_DDRIOCTRL_CTRL_BURST_LENGTH(data);
+ burst_len_ddr_clk = burst_len / 2;
+ burst_len_sched_clk = ((burst_len/2) / 2);
+
+ data = mmio_read_32(AGX_MPFE_IOHMC_CTRLCFG0);
+ switch (AGX_MPFE_IOHMC_REG_CTRLCFG0_CFG_MEM_TYPE(data)) {
+ case 1:
+ /* DDR4 - 1333MHz */
+ /* 20 (19.995) clock cycles = 15ns */
+ /* Calculate with rounding */
+ tw_rin_clk_cycles = (((tWR_IN_NS * 1333) % 1000) >= 500) ?
+ ((tWR_IN_NS * 1333) / 1000) + 1 :
+ ((tWR_IN_NS * 1333) / 1000);
+ break;
+ default:
+ /* Others - 1066MHz or slower */
+ /* 16 (15.990) clock cycles = 15ns */
+ /* Calculate with rounding */
+ tw_rin_clk_cycles = (((tWR_IN_NS * 1066) % 1000) >= 500) ?
+ ((tWR_IN_NS * 1066) / 1000) + 1 :
+ ((tWR_IN_NS * 1066) / 1000);
+ break;
+ }
+
+ rd_to_miss = t_rtp + t_rp + t_rcd - burst_len_sched_clk;
+ wr_to_miss = ((rd_latency + burst_len_ddr_clk + 2 + tw_rin_clk_cycles)
+ / 2) - rd_to_wr + t_rp + t_rcd;
+
+ mmio_write_32(AGX_MPFE_DDR_MAIN_SCHED_DDRTIMING,
+ bw_ratio << DDRTIMING_BWRATIO_OFST |
+ wr_to_rd << DDRTIMING_WRTORD_OFST|
+ rd_to_wr << DDRTIMING_RDTOWR_OFST |
+ burst_len_sched_clk << DDRTIMING_BURSTLEN_OFST |
+ wr_to_miss << DDRTIMING_WRTOMISS_OFST |
+ rd_to_miss << DDRTIMING_RDTOMISS_OFST |
+ act_to_act << DDRTIMING_ACTTOACT_OFST);
+
+ data = mmio_read_32(AGX_MPFE_HMC_ADP(HMC_ADP_DDRIOCTRL));
+ bw_ratio_extended = ((ADP_DDRIOCTRL_IO_SIZE(data) == 0) ? 1 : 0);
+
+ mmio_write_32(AGX_MPFE_DDR_MAIN_SCHED_DDRMODE,
+ bw_ratio_extended << DDRMODE_BWRATIOEXTENDED_OFST |
+ auto_precharge << DDRMODE_AUTOPRECHARGE_OFST);
+
+ mmio_write_32(AGX_MPFE_DDR_MAIN_SCHED_READLATENCY,
+ (rd_latency / 2) + DDR_READ_LATENCY_DELAY);
+
+ data = mmio_read_32(AGX_MPFE_IOHMC_CALTIMING9);
+ faw = AGX_MPFE_IOHMC_CALTIMING9_ACT_TO_ACT(data);
+
+ faw_bank = 1; // always 1 because we always have 4 bank DDR.
+
+ mmio_write_32(AGX_MPFE_DDR_MAIN_SCHED_ACTIVATE,
+ faw_bank << AGX_MPFE_DDR_MAIN_SCHED_ACTIVATE_FAWBANK_OFST |
+ faw << AGX_MPFE_DDR_MAIN_SCHED_ACTIVATE_FAW_OFST |
+ act_to_act_bank << AGX_MPFE_DDR_MAIN_SCHED_ACTIVATE_RRD_OFST);
+
+ mmio_write_32(AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV,
+ ((bus_rd_to_rd
+ << AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTORD_OFST)
+ & AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTORD_MSK) |
+ ((bus_rd_to_wr
+ << AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTOWR_OFST)
+ & AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTOWR_MSK) |
+ ((bus_wr_to_rd
+ << AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSWRTORD_OFST)
+ & AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSWRTORD_MSK));
+
+}
+
+unsigned long get_physical_dram_size(void)
+{
+ uint32_t data;
+ unsigned long ram_addr_width, ram_ext_if_io_width;
+
+ data = mmio_read_32(AGX_MPFE_HMC_ADP_DDRIOCTRL);
+ switch (AGX_MPFE_HMC_ADP_DDRIOCTRL_IO_SIZE(data)) {
+ case 0:
+ ram_ext_if_io_width = 16;
+ break;
+ case 1:
+ ram_ext_if_io_width = 32;
+ break;
+ case 2:
+ ram_ext_if_io_width = 64;
+ break;
+ default:
+ ram_ext_if_io_width = 0;
+ break;
+ }
+
+ data = mmio_read_32(AGX_MPFE_IOHMC_REG_DRAMADDRW);
+ ram_addr_width = IOHMC_DRAMADDRW_CFG_COL_ADDR_WIDTH(data) +
+ IOHMC_DRAMADDRW_CFG_ROW_ADDR_WIDTH(data) +
+ IOHMC_DRAMADDRW_CFG_BANK_ADDR_WIDTH(data) +
+ IOHMC_DRAMADDRW_CFG_BANK_GROUP_ADDR_WIDTH(data) +
+ IOHMC_DRAMADDRW_CFG_CS_ADDR_WIDTH(data);
+
+ return (1 << ram_addr_width) * (ram_ext_if_io_width / 8);
+}
+
+
+
+void configure_hmc_adaptor_regs(void)
+{
+ uint32_t data;
+ uint32_t dram_io_width;
+
+ /* Configure DDR data rate */
+ dram_io_width = AGX_MPFE_IOHMC_NIOSRESERVE0_NIOS_RESERVE0(
+ mmio_read_32(AGX_MPFE_IOHMC_REG_NIOSRESERVE0_OFST));
+ dram_io_width = (dram_io_width & 0xFF) >> 5;
+
+ data = mmio_read_32(AGX_MPFE_IOHMC_CTRLCFG3);
+
+ dram_io_width |= (data & 0x4);
+
+ mmio_write_32(AGX_MPFE_HMC_ADP_DDRIOCTRL, dram_io_width);
+
+ /* Copy dram addr width from IOHMC to HMC ADP */
+ data = mmio_read_32(AGX_MPFE_IOHMC_DRAMADDRW);
+ mmio_write_32(AGX_MPFE_HMC_ADP(ADP_DRAMADDRWIDTH), data);
+
+ /* Enable nonsecure access to DDR */
+ data = get_physical_dram_size();
+
+ if (data < AGX_DDR_SIZE)
+ data = AGX_DDR_SIZE;
+
+ mmio_write_32(AGX_NOC_FW_DDR_SCR_MPUREGION0ADDR_LIMIT, data - 1);
+ mmio_write_32(AGX_NOC_FW_DDR_SCR_MPUREGION0ADDR_LIMITEXT, 0x1f);
+
+ mmio_write_32(AGX_NOC_FW_DDR_SCR_NONMPUREGION0ADDR_LIMIT, data - 1);
+
+ mmio_write_32(AGX_SOC_NOC_FW_DDR_SCR_ENABLESET, BIT(0) | BIT(8));
+
+ /* ECC enablement */
+ data = mmio_read_32(AGX_MPFE_IOHMC_REG_CTRLCFG1);
+ if (data & (1 << AGX_IOHMC_CTRLCFG1_ENABLE_ECC_OFST)) {
+ mmio_clrsetbits_32(AGX_MPFE_HMC_ADP_ECCCTRL1,
+ AGX_MPFE_HMC_ADP_ECCCTRL1_AUTOWB_CNT_RST_SET_MSK |
+ AGX_MPFE_HMC_ADP_ECCCTRL1_CNT_RST_SET_MSK |
+ AGX_MPFE_HMC_ADP_ECCCTRL1_ECC_EN_SET_MSK,
+ AGX_MPFE_HMC_ADP_ECCCTRL1_AUTOWB_CNT_RST_SET_MSK |
+ AGX_MPFE_HMC_ADP_ECCCTRL1_CNT_RST_SET_MSK);
+
+ mmio_clrsetbits_32(AGX_MPFE_HMC_ADP_ECCCTRL2,
+ AGX_MPFE_HMC_ADP_ECCCTRL2_OVRW_RB_ECC_EN_SET_MSK |
+ AGX_MPFE_HMC_ADP_ECCCTRL2_RMW_EN_SET_MSK |
+ AGX_MPFE_HMC_ADP_ECCCTRL2_AUTOWB_EN_SET_MSK,
+ AGX_MPFE_HMC_ADP_ECCCTRL2_RMW_EN_SET_MSK |
+ AGX_MPFE_HMC_ADP_ECCCTRL2_AUTOWB_EN_SET_MSK);
+
+ mmio_clrsetbits_32(AGX_MPFE_HMC_ADP_ECCCTRL1,
+ AGX_MPFE_HMC_ADP_ECCCTRL1_AUTOWB_CNT_RST_SET_MSK |
+ AGX_MPFE_HMC_ADP_ECCCTRL1_CNT_RST_SET_MSK |
+ AGX_MPFE_HMC_ADP_ECCCTRL1_ECC_EN_SET_MSK,
+ AGX_MPFE_HMC_ADP_ECCCTRL1_ECC_EN_SET_MSK);
+ INFO("Scrubbing ECC\n");
+
+ /* ECC Scrubbing */
+ zeromem(DRAM_BASE, DRAM_SIZE);
+ } else {
+ INFO("ECC is disabled.\n");
+ }
+}
diff --git a/plat/intel/soc/agilex/soc/agilex_mmc.c b/plat/intel/soc/agilex/soc/agilex_mmc.c
new file mode 100644
index 0000000..e05d92a
--- /dev/null
+++ b/plat/intel/soc/agilex/soc/agilex_mmc.c
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2020, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <lib/mmio.h>
+
+#include "socfpga_system_manager.h"
+#include "agilex_clock_manager.h"
+
+void agx_mmc_init(void)
+{
+ mmio_clrbits_32(CLKMGR_PERPLL + CLKMGR_PERPLL_EN,
+ CLKMGR_PERPLL_EN_SDMMCCLK);
+ mmio_write_32(SOCFPGA_SYSMGR(SDMMC),
+ SYSMGR_SDMMC_SMPLSEL(0) | SYSMGR_SDMMC_DRVSEL(3));
+ mmio_setbits_32(CLKMGR_PERPLL + CLKMGR_PERPLL_EN,
+ CLKMGR_PERPLL_EN_SDMMCCLK);
+}
diff --git a/plat/intel/soc/agilex/soc/agilex_pinmux.c b/plat/intel/soc/agilex/soc/agilex_pinmux.c
new file mode 100644
index 0000000..0b908cf
--- /dev/null
+++ b/plat/intel/soc/agilex/soc/agilex_pinmux.c
@@ -0,0 +1,225 @@
+/*
+ * Copyright (c) 2019, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <lib/mmio.h>
+
+#include "agilex_pinmux.h"
+#include "socfpga_system_manager.h"
+
+const uint32_t sysmgr_pinmux_array_sel[] = {
+ 0x00000000, 0x00000001, /* usb */
+ 0x00000004, 0x00000001,
+ 0x00000008, 0x00000001,
+ 0x0000000c, 0x00000001,
+ 0x00000010, 0x00000001,
+ 0x00000014, 0x00000001,
+ 0x00000018, 0x00000001,
+ 0x0000001c, 0x00000001,
+ 0x00000020, 0x00000001,
+ 0x00000024, 0x00000001,
+ 0x00000028, 0x00000001,
+ 0x0000002c, 0x00000001,
+ 0x00000030, 0x00000000, /* emac0 */
+ 0x00000034, 0x00000000,
+ 0x00000038, 0x00000000,
+ 0x0000003c, 0x00000000,
+ 0x00000040, 0x00000000,
+ 0x00000044, 0x00000000,
+ 0x00000048, 0x00000000,
+ 0x0000004c, 0x00000000,
+ 0x00000050, 0x00000000,
+ 0x00000054, 0x00000000,
+ 0x00000058, 0x00000000,
+ 0x0000005c, 0x00000000,
+ 0x00000060, 0x00000008, /* gpio1 */
+ 0x00000064, 0x00000008,
+ 0x00000068, 0x00000005, /* uart0 tx */
+ 0x0000006c, 0x00000005, /* uart 0 rx */
+ 0x00000070, 0x00000008, /* gpio */
+ 0x00000074, 0x00000008,
+ 0x00000078, 0x00000004, /* i2c1 */
+ 0x0000007c, 0x00000004,
+ 0x00000080, 0x00000007, /* jtag */
+ 0x00000084, 0x00000007,
+ 0x00000088, 0x00000007,
+ 0x0000008c, 0x00000007,
+ 0x00000090, 0x00000001, /* sdmmc data0 */
+ 0x00000094, 0x00000001,
+ 0x00000098, 0x00000001,
+ 0x0000009c, 0x00000001,
+ 0x00000100, 0x00000001,
+ 0x00000104, 0x00000001, /* sdmmc.data3 */
+ 0x00000108, 0x00000008, /* loan */
+ 0x0000010c, 0x00000008, /* gpio */
+ 0x00000110, 0x00000008,
+ 0x00000114, 0x00000008, /* gpio1.io21 */
+ 0x00000118, 0x00000005, /* mdio0.mdio */
+ 0x0000011c, 0x00000005 /* mdio0.mdc */
+};
+
+const uint32_t sysmgr_pinmux_array_ctrl[] = {
+ 0x00000000, 0x00502c38, /* Q1_1 */
+ 0x00000004, 0x00102c38,
+ 0x00000008, 0x00502c38,
+ 0x0000000c, 0x00502c38,
+ 0x00000010, 0x00502c38,
+ 0x00000014, 0x00502c38,
+ 0x00000018, 0x00502c38,
+ 0x0000001c, 0x00502c38,
+ 0x00000020, 0x00502c38,
+ 0x00000024, 0x00502c38,
+ 0x00000028, 0x00502c38,
+ 0x0000002c, 0x00502c38,
+ 0x00000030, 0x00102c38, /* Q2_1 */
+ 0x00000034, 0x00102c38,
+ 0x00000038, 0x00502c38,
+ 0x0000003c, 0x00502c38,
+ 0x00000040, 0x00102c38,
+ 0x00000044, 0x00102c38,
+ 0x00000048, 0x00502c38,
+ 0x0000004c, 0x00502c38,
+ 0x00000050, 0x00102c38,
+ 0x00000054, 0x00102c38,
+ 0x00000058, 0x00502c38,
+ 0x0000005c, 0x00502c38,
+ 0x00000060, 0x00502c38, /* Q3_1 */
+ 0x00000064, 0x00502c38,
+ 0x00000068, 0x00102c38,
+ 0x0000006c, 0x00502c38,
+ 0x000000d0, 0x00502c38,
+ 0x000000d4, 0x00502c38,
+ 0x000000d8, 0x00542c38,
+ 0x000000dc, 0x00542c38,
+ 0x000000e0, 0x00502c38,
+ 0x000000e4, 0x00502c38,
+ 0x000000e8, 0x00102c38,
+ 0x000000ec, 0x00502c38,
+ 0x000000f0, 0x00502c38, /* Q4_1 */
+ 0x000000f4, 0x00502c38,
+ 0x000000f8, 0x00102c38,
+ 0x000000fc, 0x00502c38,
+ 0x00000100, 0x00502c38,
+ 0x00000104, 0x00502c38,
+ 0x00000108, 0x00102c38,
+ 0x0000010c, 0x00502c38,
+ 0x00000110, 0x00502c38,
+ 0x00000114, 0x00502c38,
+ 0x00000118, 0x00542c38,
+ 0x0000011c, 0x00102c38
+};
+
+const uint32_t sysmgr_pinmux_array_fpga[] = {
+ 0x00000000, 0x00000000,
+ 0x00000004, 0x00000000,
+ 0x00000008, 0x00000000,
+ 0x0000000c, 0x00000000,
+ 0x00000010, 0x00000000,
+ 0x00000014, 0x00000000,
+ 0x00000018, 0x00000000,
+ 0x0000001c, 0x00000000,
+ 0x00000020, 0x00000000,
+ 0x00000028, 0x00000000,
+ 0x0000002c, 0x00000000,
+ 0x00000030, 0x00000000,
+ 0x00000034, 0x00000000,
+ 0x00000038, 0x00000000,
+ 0x0000003c, 0x00000000,
+ 0x00000040, 0x00000000,
+ 0x00000044, 0x00000000,
+ 0x00000048, 0x00000000,
+ 0x00000050, 0x00000000,
+ 0x00000054, 0x00000000,
+ 0x00000058, 0x0000002a
+};
+
+const uint32_t sysmgr_pinmux_array_iodelay[] = {
+ 0x00000000, 0x00000000,
+ 0x00000004, 0x00000000,
+ 0x00000008, 0x00000000,
+ 0x0000000c, 0x00000000,
+ 0x00000010, 0x00000000,
+ 0x00000014, 0x00000000,
+ 0x00000018, 0x00000000,
+ 0x0000001c, 0x00000000,
+ 0x00000020, 0x00000000,
+ 0x00000024, 0x00000000,
+ 0x00000028, 0x00000000,
+ 0x0000002c, 0x00000000,
+ 0x00000030, 0x00000000,
+ 0x00000034, 0x00000000,
+ 0x00000038, 0x00000000,
+ 0x0000003c, 0x00000000,
+ 0x00000040, 0x00000000,
+ 0x00000044, 0x00000000,
+ 0x00000048, 0x00000000,
+ 0x0000004c, 0x00000000,
+ 0x00000050, 0x00000000,
+ 0x00000054, 0x00000000,
+ 0x00000058, 0x00000000,
+ 0x0000005c, 0x00000000,
+ 0x00000060, 0x00000000,
+ 0x00000064, 0x00000000,
+ 0x00000068, 0x00000000,
+ 0x0000006c, 0x00000000,
+ 0x00000070, 0x00000000,
+ 0x00000074, 0x00000000,
+ 0x00000078, 0x00000000,
+ 0x0000007c, 0x00000000,
+ 0x00000080, 0x00000000,
+ 0x00000084, 0x00000000,
+ 0x00000088, 0x00000000,
+ 0x0000008c, 0x00000000,
+ 0x00000090, 0x00000000,
+ 0x00000094, 0x00000000,
+ 0x00000098, 0x00000000,
+ 0x0000009c, 0x00000000,
+ 0x00000100, 0x00000000,
+ 0x00000104, 0x00000000,
+ 0x00000108, 0x00000000,
+ 0x0000010c, 0x00000000,
+ 0x00000110, 0x00000000,
+ 0x00000114, 0x00000000,
+ 0x00000118, 0x00000000,
+ 0x0000011c, 0x00000000
+};
+
+void config_fpgaintf_mod(void)
+{
+ mmio_write_32(SOCFPGA_SYSMGR(FPGAINTF_EN_2), 1<<8);
+}
+
+
+void config_pinmux(handoff *hoff_ptr)
+{
+ unsigned int i;
+
+ for (i = 0; i < 96; i += 2) {
+ mmio_write_32(AGX_PINMUX_PIN0SEL +
+ hoff_ptr->pinmux_sel_array[i],
+ hoff_ptr->pinmux_sel_array[i+1]);
+ }
+
+ for (i = 0; i < 96; i += 2) {
+ mmio_write_32(AGX_PINMUX_IO0CTRL +
+ hoff_ptr->pinmux_io_array[i],
+ hoff_ptr->pinmux_io_array[i+1]);
+ }
+
+ for (i = 0; i < 42; i += 2) {
+ mmio_write_32(AGX_PINMUX_PINMUX_EMAC0_USEFPGA +
+ hoff_ptr->pinmux_fpga_array[i],
+ hoff_ptr->pinmux_fpga_array[i+1]);
+ }
+
+ for (i = 0; i < 96; i += 2) {
+ mmio_write_32(AGX_PINMUX_IO0_DELAY +
+ hoff_ptr->pinmux_iodelay_array[i],
+ hoff_ptr->pinmux_iodelay_array[i+1]);
+ }
+
+ config_fpgaintf_mod();
+}
+
diff --git a/plat/intel/soc/common/aarch64/plat_helpers.S b/plat/intel/soc/common/aarch64/plat_helpers.S
new file mode 100644
index 0000000..213fd3c
--- /dev/null
+++ b/plat/intel/soc/common/aarch64/plat_helpers.S
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <cpu_macros.S>
+#include <platform_def.h>
+#include <el3_common_macros.S>
+
+ .globl plat_secondary_cold_boot_setup
+ .globl platform_is_primary_cpu
+ .globl plat_is_my_cpu_primary
+ .globl plat_my_core_pos
+ .globl plat_crash_console_init
+ .globl plat_crash_console_putc
+ .globl plat_crash_console_flush
+ .globl platform_mem_init
+ .globl plat_secondary_cpus_bl31_entry
+
+ .globl plat_get_my_entrypoint
+
+ /* -----------------------------------------------------
+ * void plat_secondary_cold_boot_setup (void);
+ *
+ * This function performs any platform specific actions
+ * needed for a secondary cpu after a cold reset e.g
+ * mark the cpu's presence, mechanism to place it in a
+ * holding pen etc.
+ * -----------------------------------------------------
+ */
+func plat_secondary_cold_boot_setup
+ /* Wait until the it gets reset signal from rstmgr gets populated */
+poll_mailbox:
+ wfi
+ mov_imm x0, PLAT_SEC_ENTRY
+ ldr x1, [x0]
+ mov_imm x2, PLAT_CPUID_RELEASE
+ ldr x3, [x2]
+ mrs x4, mpidr_el1
+ and x4, x4, #0xff
+ cmp x3, x4
+ b.ne poll_mailbox
+ br x1
+endfunc plat_secondary_cold_boot_setup
+
+func platform_is_primary_cpu
+ and x0, x0, #(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK)
+ cmp x0, #PLAT_PRIMARY_CPU
+ cset x0, eq
+ ret
+endfunc platform_is_primary_cpu
+
+func plat_is_my_cpu_primary
+ mrs x0, mpidr_el1
+ b platform_is_primary_cpu
+endfunc plat_is_my_cpu_primary
+
+func plat_my_core_pos
+ mrs x0, mpidr_el1
+ and x1, x0, #MPIDR_CPU_MASK
+ and x0, x0, #MPIDR_CLUSTER_MASK
+ add x0, x1, x0, LSR #6
+ ret
+endfunc plat_my_core_pos
+
+func warm_reset_req
+ str xzr, [x4]
+ bl plat_is_my_cpu_primary
+ cbz x0, cpu_in_wfi
+ mov_imm x1, PLAT_SEC_ENTRY
+ str xzr, [x1]
+ mrs x1, rmr_el3
+ orr x1, x1, #0x02
+ msr rmr_el3, x1
+ isb
+ dsb sy
+cpu_in_wfi:
+ wfi
+ b cpu_in_wfi
+endfunc warm_reset_req
+
+func plat_get_my_entrypoint
+ ldr x4, =L2_RESET_DONE_REG
+ ldr x5, [x4]
+ ldr x1, =L2_RESET_DONE_STATUS
+ cmp x1, x5
+ b.eq warm_reset_req
+ mov_imm x1, PLAT_SEC_ENTRY
+ ldr x0, [x1]
+ ret
+endfunc plat_get_my_entrypoint
+
+
+ /* ---------------------------------------------
+ * int plat_crash_console_init(void)
+ * Function to initialize the crash console
+ * without a C Runtime to print crash report.
+ * Clobber list : x0, x1, x2
+ * ---------------------------------------------
+ */
+func plat_crash_console_init
+ mov_imm x0, CRASH_CONSOLE_BASE
+ mov_imm x1, PLAT_UART_CLOCK
+ mov_imm x2, PLAT_BAUDRATE
+ b console_16550_core_init
+endfunc plat_crash_console_init
+
+ /* ---------------------------------------------
+ * int plat_crash_console_putc(void)
+ * Function to print a character on the crash
+ * console without a C Runtime.
+ * Clobber list : x1, x2
+ * ---------------------------------------------
+ */
+func plat_crash_console_putc
+ mov_imm x1, CRASH_CONSOLE_BASE
+ b console_16550_core_putc
+endfunc plat_crash_console_putc
+
+func plat_crash_console_flush
+ mov_imm x0, CRASH_CONSOLE_BASE
+ b console_16550_core_flush
+endfunc plat_crash_console_flush
+
+
+ /* --------------------------------------------------------
+ * void platform_mem_init (void);
+ *
+ * Any memory init, relocation to be done before the
+ * platform boots. Called very early in the boot process.
+ * --------------------------------------------------------
+ */
+func platform_mem_init
+ mov x0, #0
+ ret
+endfunc platform_mem_init
+
+func plat_secondary_cpus_bl31_entry
+ el3_entrypoint_common \
+ _init_sctlr=0 \
+ _warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS \
+ _secondary_cold_boot=!COLD_BOOT_SINGLE_CPU \
+ _init_memory=1 \
+ _init_c_runtime=1 \
+ _exception_vectors=runtime_exceptions \
+ _pie_fixup_size=BL31_LIMIT - BL31_BASE
+endfunc plat_secondary_cpus_bl31_entry
diff --git a/plat/intel/soc/common/aarch64/platform_common.c b/plat/intel/soc/common/aarch64/platform_common.c
new file mode 100644
index 0000000..b79a63c
--- /dev/null
+++ b/plat/intel/soc/common/aarch64/platform_common.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <platform_def.h>
+#include <plat/common/platform.h>
+
+#include "socfpga_private.h"
+
+
+unsigned int plat_get_syscnt_freq2(void)
+{
+ return PLAT_SYS_COUNTER_FREQ_IN_TICKS;
+}
+
+unsigned long socfpga_get_ns_image_entrypoint(void)
+{
+ return PLAT_NS_IMAGE_OFFSET;
+}
+
+/******************************************************************************
+ * Gets SPSR for BL32 entry
+ *****************************************************************************/
+uint32_t socfpga_get_spsr_for_bl32_entry(void)
+{
+ /*
+ * The Secure Payload Dispatcher service is responsible for
+ * setting the SPSR prior to entry into the BL32 image.
+ */
+ return 0;
+}
+
+/******************************************************************************
+ * Gets SPSR for BL33 entry
+ *****************************************************************************/
+uint32_t socfpga_get_spsr_for_bl33_entry(void)
+{
+ unsigned long el_status;
+ unsigned int mode;
+ uint32_t spsr;
+
+ /* Figure out what mode we enter the non-secure world in */
+ el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT;
+ el_status &= ID_AA64PFR0_ELX_MASK;
+
+ mode = (el_status) ? MODE_EL2 : MODE_EL1;
+
+ /*
+ * TODO: Consider the possibility of specifying the SPSR in
+ * the FIP ToC and allowing the platform to have a say as
+ * well.
+ */
+ spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
+ return spsr;
+}
+
diff --git a/plat/intel/soc/common/bl2_plat_mem_params_desc.c b/plat/intel/soc/common/bl2_plat_mem_params_desc.c
new file mode 100644
index 0000000..187c53a
--- /dev/null
+++ b/plat/intel/soc/common/bl2_plat_mem_params_desc.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <common/bl_common.h>
+#include <common/desc_image_load.h>
+#include <platform_def.h>
+#include <plat/common/platform.h>
+
+
+/*******************************************************************************
+ * Following descriptor provides BL image/ep information that gets used
+ * by BL2 to load the images and also subset of this information is
+ * passed to next BL image. The image loading sequence is managed by
+ * populating the images in required loading order. The image execution
+ * sequence is managed by populating the `next_handoff_image_id` with
+ * the next executable image id.
+ ******************************************************************************/
+static bl_mem_params_node_t bl2_mem_params_descs[] = {
+#ifdef SCP_BL2_BASE
+ /* Fill SCP_BL2 related information if it exists */
+ {
+ .image_id = SCP_BL2_IMAGE_ID,
+
+ SET_STATIC_PARAM_HEAD(ep_info, PARAM_IMAGE_BINARY,
+ VERSION_2, entry_point_info_t, SECURE | NON_EXECUTABLE),
+
+ SET_STATIC_PARAM_HEAD(image_info, PARAM_IMAGE_BINARY,
+ VERSION_2, image_info_t, 0),
+ .image_info.image_base = SCP_BL2_BASE,
+ .image_info.image_max_size = SCP_BL2_SIZE,
+
+ .next_handoff_image_id = INVALID_IMAGE_ID,
+ },
+#endif /* SCP_BL2_BASE */
+
+#ifdef EL3_PAYLOAD_BASE
+ /* Fill EL3 payload related information (BL31 is EL3 payload)*/
+ {
+ .image_id = BL31_IMAGE_ID,
+
+ SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+ VERSION_2, entry_point_info_t,
+ SECURE | EXECUTABLE | EP_FIRST_EXE),
+ .ep_info.pc = EL3_PAYLOAD_BASE,
+ .ep_info.spsr = SPSR_64(MODE_EL3, MODE_SP_ELX,
+ DISABLE_ALL_EXCEPTIONS),
+
+ SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+ VERSION_2, image_info_t,
+ IMAGE_ATTRIB_PLAT_SETUP | IMAGE_ATTRIB_SKIP_LOADING),
+
+ .next_handoff_image_id = INVALID_IMAGE_ID,
+ },
+
+#else /* EL3_PAYLOAD_BASE */
+
+ /* Fill BL31 related information */
+ {
+ .image_id = BL31_IMAGE_ID,
+
+ SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+ VERSION_2, entry_point_info_t,
+ SECURE | EXECUTABLE | EP_FIRST_EXE),
+ .ep_info.pc = BL31_BASE,
+ .ep_info.spsr = SPSR_64(MODE_EL3, MODE_SP_ELX,
+ DISABLE_ALL_EXCEPTIONS),
+
+ SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+ VERSION_2, image_info_t, IMAGE_ATTRIB_PLAT_SETUP),
+ .image_info.image_base = BL31_BASE,
+ .image_info.image_max_size = BL31_LIMIT - BL31_BASE,
+
+ .next_handoff_image_id = BL33_IMAGE_ID,
+ },
+#endif /* EL3_PAYLOAD_BASE */
+
+ {
+ .image_id = BL33_IMAGE_ID,
+ SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+ VERSION_2, entry_point_info_t, NON_SECURE | EXECUTABLE),
+ .ep_info.pc = PLAT_NS_IMAGE_OFFSET,
+
+ SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+ VERSION_2, image_info_t, 0),
+ .image_info.image_base = PLAT_NS_IMAGE_OFFSET,
+ .image_info.image_max_size =
+ 0x0 + 0x40000000 - PLAT_NS_IMAGE_OFFSET,
+
+ .next_handoff_image_id = INVALID_IMAGE_ID,
+ },
+};
+
+REGISTER_BL_IMAGE_DESCS(bl2_mem_params_descs)
diff --git a/plat/intel/soc/common/drivers/ccu/ncore_ccu.c b/plat/intel/soc/common/drivers/ccu/ncore_ccu.c
new file mode 100644
index 0000000..d9a238e
--- /dev/null
+++ b/plat/intel/soc/common/drivers/ccu/ncore_ccu.c
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <common/debug.h>
+#include <errno.h>
+#include <lib/mmio.h>
+
+#include "ncore_ccu.h"
+
+uint32_t poll_active_bit(uint32_t dir);
+
+static coh_ss_id_t subsystem_id;
+
+
+void get_subsystem_id(void)
+{
+ uint32_t snoop_filter, directory, coh_agent;
+
+ snoop_filter = CSIDR_NUM_SF(mmio_read_32(NCORE_CCU_CSR(NCORE_CSIDR)));
+ directory = CSUIDR_NUM_DIR(mmio_read_32(NCORE_CCU_CSR(NCORE_CSUIDR)));
+ coh_agent = CSUIDR_NUM_CAI(mmio_read_32(NCORE_CCU_CSR(NCORE_CSUIDR)));
+
+ subsystem_id.num_snoop_filter = snoop_filter + 1;
+ subsystem_id.num_directory = directory;
+ subsystem_id.num_coh_agent = coh_agent;
+}
+
+uint32_t directory_init(void)
+{
+ uint32_t dir_sf_mtn, dir_sf_en;
+ uint32_t dir, sf, ret;
+
+ for (dir = 0; dir < subsystem_id.num_directory; dir++) {
+ for (sf = 0; sf < subsystem_id.num_snoop_filter; sf++) {
+ dir_sf_mtn = DIRECTORY_UNIT(dir, NCORE_DIRUSFMCR);
+ dir_sf_en = DIRECTORY_UNIT(dir, NCORE_DIRUSFER);
+
+ /* Initialize All Entries */
+ mmio_write_32(dir_sf_mtn, SNOOP_FILTER_ID(dir));
+
+ /* Poll Active Bit */
+ ret = poll_active_bit(dir);
+ if (ret != 0) {
+ ERROR("Timeout during active bit polling");
+ return -ETIMEDOUT;
+ }
+
+ /* Snoope Filter Enable */
+ mmio_setbits_32(dir_sf_en, BIT(sf));
+ }
+ }
+
+ return 0;
+}
+
+uint32_t coherent_agent_intfc_init(void)
+{
+ uint32_t dir, ca, ca_id, ca_type, ca_snoop_en;
+
+ for (dir = 0; dir < subsystem_id.num_directory; dir++) {
+ for (ca = 0; ca < subsystem_id.num_coh_agent; ca++) {
+ ca_snoop_en = DIRECTORY_UNIT(ca, NCORE_DIRUCASER0);
+ ca_id = mmio_read_32(COH_AGENT_UNIT(ca, NCORE_CAIUIDR));
+
+ /* Coh Agent Snoop Enable */
+ if (CACHING_AGENT_BIT(ca_id))
+ mmio_write_32(ca_snoop_en, BIT(ca));
+
+ /* Coh Agent Snoop DVM Enable */
+ ca_type = CACHING_AGENT_TYPE(ca_id);
+ if (ca_type == ACE_W_DVM || ca_type == ACE_L_W_DVM)
+ mmio_write_32(NCORE_CCU_CSR(NCORE_CSADSER0),
+ BIT(ca));
+ }
+ }
+
+ return 0;
+}
+
+uint32_t poll_active_bit(uint32_t dir)
+{
+ uint32_t timeout = 80000;
+ uint32_t poll_dir = DIRECTORY_UNIT(dir, NCORE_DIRUSFMAR);
+
+ while (timeout > 0) {
+ if (mmio_read_32(poll_dir) == 0)
+ return 0;
+ timeout--;
+ }
+
+ return -1;
+}
+
+void bypass_ocram_firewall(void)
+{
+ mmio_clrbits_32(COH_CPU0_BYPASS_REG(NCORE_FW_OCRAM_BLK_CGF1),
+ OCRAM_PRIVILEGED_MASK | OCRAM_SECURE_MASK);
+ mmio_clrbits_32(COH_CPU0_BYPASS_REG(NCORE_FW_OCRAM_BLK_CGF2),
+ OCRAM_PRIVILEGED_MASK | OCRAM_SECURE_MASK);
+ mmio_clrbits_32(COH_CPU0_BYPASS_REG(NCORE_FW_OCRAM_BLK_CGF3),
+ OCRAM_PRIVILEGED_MASK | OCRAM_SECURE_MASK);
+ mmio_clrbits_32(COH_CPU0_BYPASS_REG(NCORE_FW_OCRAM_BLK_CGF4),
+ OCRAM_PRIVILEGED_MASK | OCRAM_SECURE_MASK);
+}
+
+void ncore_enable_ocram_firewall(void)
+{
+ mmio_setbits_32(COH_CPU0_BYPASS_REG(NCORE_FW_OCRAM_BLK_CGF1),
+ OCRAM_PRIVILEGED_MASK | OCRAM_SECURE_MASK);
+ mmio_setbits_32(COH_CPU0_BYPASS_REG(NCORE_FW_OCRAM_BLK_CGF2),
+ OCRAM_PRIVILEGED_MASK | OCRAM_SECURE_MASK);
+ mmio_setbits_32(COH_CPU0_BYPASS_REG(NCORE_FW_OCRAM_BLK_CGF3),
+ OCRAM_PRIVILEGED_MASK | OCRAM_SECURE_MASK);
+ mmio_setbits_32(COH_CPU0_BYPASS_REG(NCORE_FW_OCRAM_BLK_CGF4),
+ OCRAM_PRIVILEGED_MASK | OCRAM_SECURE_MASK);
+}
+
+uint32_t init_ncore_ccu(void)
+{
+ uint32_t status;
+
+ get_subsystem_id();
+ status = directory_init();
+ status = coherent_agent_intfc_init();
+ bypass_ocram_firewall();
+
+ return status;
+}
diff --git a/plat/intel/soc/common/drivers/ccu/ncore_ccu.h b/plat/intel/soc/common/drivers/ccu/ncore_ccu.h
new file mode 100644
index 0000000..3f662ff
--- /dev/null
+++ b/plat/intel/soc/common/drivers/ccu/ncore_ccu.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef NCORE_CCU_H
+#define NCORE_CCU_H
+
+
+#define NCORE_CCU_OFFSET 0xf7000000
+
+
+/* Coherent Sub-System Address Map */
+#define NCORE_CAIU_OFFSET 0x00000
+#define NCORE_CAIU_SIZE 0x01000
+
+#define NCORE_NCBU_OFFSET 0x60000
+#define NCORE_NCBU_SIZE 0x01000
+
+#define NCORE_DIRU_OFFSET 0x80000
+#define NCORE_DIRU_SIZE 0x01000
+
+#define NCORE_CMIU_OFFSET 0xc0000
+#define NCORE_CMIU_SIZE 0x01000
+
+#define NCORE_CSR_OFFSET 0xff000
+#define NCORE_CSADSERO 0x00040
+#define NCORE_CSUIDR 0x00ff8
+#define NCORE_CSIDR 0x00ffc
+
+/* Directory Unit Register Map */
+#define NCORE_DIRUSFER 0x00010
+#define NCORE_DIRUMRHER 0x00070
+#define NCORE_DIRUSFMCR 0x00080
+#define NCORE_DIRUSFMAR 0x00084
+
+/* Coherent Agent Interface Unit Register Map */
+#define NCORE_CAIUIDR 0x00ffc
+
+/* Snoop Enable Register */
+#define NCORE_DIRUCASER0 0x00040
+#define NCORE_DIRUCASER1 0x00044
+#define NCORE_DIRUCASER2 0x00048
+#define NCORE_DIRUCASER3 0x0004c
+
+#define NCORE_CSADSER0 0x00040
+#define NCORE_CSADSER1 0x00044
+#define NCORE_CSADSER2 0x00048
+#define NCORE_CSADSER3 0x0004c
+
+/* Protocols Definition */
+#define ACE_W_DVM 0
+#define ACE_L_W_DVM 1
+#define ACE_WO_DVM 2
+#define ACE_L_WO_DVM 3
+
+/* Bypass OC Ram Firewall */
+#define NCORE_FW_OCRAM_BLK_BASE 0x100200
+#define NCORE_FW_OCRAM_BLK_CGF1 0x04
+#define NCORE_FW_OCRAM_BLK_CGF2 0x08
+#define NCORE_FW_OCRAM_BLK_CGF3 0x0c
+#define NCORE_FW_OCRAM_BLK_CGF4 0x10
+
+#define OCRAM_PRIVILEGED_MASK BIT(29)
+#define OCRAM_SECURE_MASK BIT(30)
+
+/* Macros */
+#define NCORE_CCU_REG(base) (NCORE_CCU_OFFSET + (base))
+#define NCORE_CCU_CSR(reg) (NCORE_CCU_REG(NCORE_CSR_OFFSET)\
+ + (reg))
+#define NCORE_CCU_DIR(reg) (NCORE_CCU_REG(NCORE_DIRU_OFFSET)\
+ + (reg))
+#define NCORE_CCU_CAI(reg) (NCORE_CCU_REG(NCORE_CAIU_OFFSET)\
+ + (reg))
+
+#define DIRECTORY_UNIT(x, reg) (NCORE_CCU_DIR(reg)\
+ + NCORE_DIRU_SIZE * (x))
+#define COH_AGENT_UNIT(x, reg) (NCORE_CCU_CAI(reg)\
+ + NCORE_CAIU_SIZE * (x))
+
+#define COH_CPU0_BYPASS_REG(reg) (NCORE_CCU_REG(NCORE_FW_OCRAM_BLK_BASE)\
+ + (reg))
+
+#define CSUIDR_NUM_CMI(x) (((x) & 0x3f000000) >> 24)
+#define CSUIDR_NUM_DIR(x) (((x) & 0x003f0000) >> 16)
+#define CSUIDR_NUM_NCB(x) (((x) & 0x00003f00) >> 8)
+#define CSUIDR_NUM_CAI(x) (((x) & 0x0000007f) >> 0)
+
+#define CSIDR_NUM_SF(x) (((x) & 0x007c0000) >> 18)
+
+#define SNOOP_FILTER_ID(x) (((x) << 16))
+
+#define CACHING_AGENT_BIT(x) (((x) & 0x08000) >> 15)
+#define CACHING_AGENT_TYPE(x) (((x) & 0xf0000) >> 16)
+
+
+typedef struct coh_ss_id {
+ uint8_t num_coh_mem;
+ uint8_t num_directory;
+ uint8_t num_non_coh_bridge;
+ uint8_t num_coh_agent;
+ uint8_t num_snoop_filter;
+} coh_ss_id_t;
+
+uint32_t init_ncore_ccu(void);
+void ncore_enable_ocram_firewall(void);
+
+#endif
diff --git a/plat/intel/soc/common/drivers/qspi/cadence_qspi.c b/plat/intel/soc/common/drivers/qspi/cadence_qspi.c
new file mode 100644
index 0000000..cecf560
--- /dev/null
+++ b/plat/intel/soc/common/drivers/qspi/cadence_qspi.c
@@ -0,0 +1,822 @@
+/*
+ * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2019, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <common/debug.h>
+#include <lib/mmio.h>
+#include <string.h>
+#include <drivers/delay_timer.h>
+#include <drivers/console.h>
+
+#include "cadence_qspi.h"
+
+#define LESS(a, b) (((a) < (b)) ? (a) : (b))
+#define MORE(a, b) (((a) > (b)) ? (a) : (b))
+
+
+uint32_t qspi_device_size;
+int cad_qspi_cs;
+
+int cad_qspi_idle(void)
+{
+ return (mmio_read_32(CAD_QSPI_OFFSET + CAD_QSPI_CFG)
+ & CAD_QSPI_CFG_IDLE) >> 31;
+}
+
+int cad_qspi_set_baudrate_div(uint32_t div)
+{
+ if (div > 0xf)
+ return CAD_INVALID;
+
+ mmio_clrsetbits_32(CAD_QSPI_OFFSET + CAD_QSPI_CFG,
+ ~CAD_QSPI_CFG_BAUDDIV_MSK,
+ CAD_QSPI_CFG_BAUDDIV(div));
+
+ return 0;
+}
+
+int cad_qspi_configure_dev_size(uint32_t addr_bytes,
+ uint32_t bytes_per_dev, uint32_t bytes_per_block)
+{
+
+ mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_DEVSZ,
+ CAD_QSPI_DEVSZ_ADDR_BYTES(addr_bytes) |
+ CAD_QSPI_DEVSZ_BYTES_PER_PAGE(bytes_per_dev) |
+ CAD_QSPI_DEVSZ_BYTES_PER_BLOCK(bytes_per_block));
+ return 0;
+}
+
+int cad_qspi_set_read_config(uint32_t opcode, uint32_t instr_type,
+ uint32_t addr_type, uint32_t data_type,
+ uint32_t mode_bit, uint32_t dummy_clk_cycle)
+{
+ mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_DEVRD,
+ CAD_QSPI_DEV_OPCODE(opcode) |
+ CAD_QSPI_DEV_INST_TYPE(instr_type) |
+ CAD_QSPI_DEV_ADDR_TYPE(addr_type) |
+ CAD_QSPI_DEV_DATA_TYPE(data_type) |
+ CAD_QSPI_DEV_MODE_BIT(mode_bit) |
+ CAD_QSPI_DEV_DUMMY_CLK_CYCLE(dummy_clk_cycle));
+
+ return 0;
+}
+
+int cad_qspi_set_write_config(uint32_t opcode, uint32_t addr_type,
+ uint32_t data_type, uint32_t dummy_clk_cycle)
+{
+ mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_DEVWR,
+ CAD_QSPI_DEV_OPCODE(opcode) |
+ CAD_QSPI_DEV_ADDR_TYPE(addr_type) |
+ CAD_QSPI_DEV_DATA_TYPE(data_type) |
+ CAD_QSPI_DEV_DUMMY_CLK_CYCLE(dummy_clk_cycle));
+
+ return 0;
+}
+
+int cad_qspi_timing_config(uint32_t clkphase, uint32_t clkpol, uint32_t csda,
+ uint32_t csdads, uint32_t cseot, uint32_t cssot,
+ uint32_t rddatacap)
+{
+ uint32_t cfg = mmio_read_32(CAD_QSPI_OFFSET + CAD_QSPI_CFG);
+
+ cfg &= CAD_QSPI_CFG_SELCLKPHASE_CLR_MSK &
+ CAD_QSPI_CFG_SELCLKPOL_CLR_MSK;
+ cfg |= CAD_QSPI_SELCLKPHASE(clkphase) | CAD_QSPI_SELCLKPOL(clkpol);
+
+ mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_CFG, cfg);
+
+ mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_DELAY,
+ CAD_QSPI_DELAY_CSSOT(cssot) | CAD_QSPI_DELAY_CSEOT(cseot) |
+ CAD_QSPI_DELAY_CSDADS(csdads) | CAD_QSPI_DELAY_CSDA(csda));
+
+ return 0;
+}
+
+int cad_qspi_stig_cmd_helper(int cs, uint32_t cmd)
+{
+ uint32_t count = 0;
+
+ /* chip select */
+ mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_CFG,
+ (mmio_read_32(CAD_QSPI_OFFSET + CAD_QSPI_CFG)
+ & CAD_QSPI_CFG_CS_MSK) | CAD_QSPI_CFG_CS(cs));
+
+ mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_FLASHCMD, cmd);
+ mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_FLASHCMD,
+ cmd | CAD_QSPI_FLASHCMD_EXECUTE);
+
+ do {
+ uint32_t reg = mmio_read_32(CAD_QSPI_OFFSET +
+ CAD_QSPI_FLASHCMD);
+ if (!(reg & CAD_QSPI_FLASHCMD_EXECUTE_STAT))
+ break;
+ count++;
+ } while (count < CAD_QSPI_COMMAND_TIMEOUT);
+
+ if (count >= CAD_QSPI_COMMAND_TIMEOUT) {
+ ERROR("Error sending QSPI command %x, timed out\n",
+ cmd);
+ return CAD_QSPI_ERROR;
+ }
+
+ return 0;
+}
+
+int cad_qspi_stig_cmd(uint32_t opcode, uint32_t dummy)
+{
+ if (dummy > ((1 << CAD_QSPI_FLASHCMD_NUM_DUMMYBYTES_MAX) - 1)) {
+ ERROR("Faulty dummy bytes\n");
+ return -1;
+ }
+
+ return cad_qspi_stig_cmd_helper(cad_qspi_cs,
+ CAD_QSPI_FLASHCMD_OPCODE(opcode) |
+ CAD_QSPI_FLASHCMD_NUM_DUMMYBYTES(dummy));
+}
+
+int cad_qspi_stig_read_cmd(uint32_t opcode, uint32_t dummy, uint32_t num_bytes,
+ uint32_t *output)
+{
+ if (dummy > ((1 << CAD_QSPI_FLASHCMD_NUM_DUMMYBYTES_MAX) - 1)) {
+ ERROR("Faulty dummy byes\n");
+ return -1;
+ }
+
+ if ((num_bytes > 8) || (num_bytes == 0))
+ return -1;
+
+ uint32_t cmd =
+ CAD_QSPI_FLASHCMD_OPCODE(opcode) |
+ CAD_QSPI_FLASHCMD_ENRDDATA(1) |
+ CAD_QSPI_FLASHCMD_NUMRDDATABYTES(num_bytes - 1) |
+ CAD_QSPI_FLASHCMD_ENCMDADDR(0) |
+ CAD_QSPI_FLASHCMD_ENMODEBIT(0) |
+ CAD_QSPI_FLASHCMD_NUMADDRBYTES(0) |
+ CAD_QSPI_FLASHCMD_ENWRDATA(0) |
+ CAD_QSPI_FLASHCMD_NUMWRDATABYTES(0) |
+ CAD_QSPI_FLASHCMD_NUMDUMMYBYTES(dummy);
+
+ if (cad_qspi_stig_cmd_helper(cad_qspi_cs, cmd)) {
+ ERROR("failed to send stig cmd\n");
+ return -1;
+ }
+
+ output[0] = mmio_read_32(CAD_QSPI_OFFSET + CAD_QSPI_FLASHCMD_RDDATA0);
+
+ if (num_bytes > 4) {
+ output[1] = mmio_read_32(CAD_QSPI_OFFSET +
+ CAD_QSPI_FLASHCMD_RDDATA1);
+ }
+
+ return 0;
+}
+
+int cad_qspi_stig_wr_cmd(uint32_t opcode, uint32_t dummy, uint32_t num_bytes,
+ uint32_t *input)
+{
+ if (dummy > ((1 << CAD_QSPI_FLASHCMD_NUM_DUMMYBYTES_MAX) - 1)) {
+ ERROR("Faulty dummy byes\n");
+ return -1;
+ }
+
+ if ((num_bytes > 8) || (num_bytes == 0))
+ return -1;
+
+ uint32_t cmd = CAD_QSPI_FLASHCMD_OPCODE(opcode) |
+ CAD_QSPI_FLASHCMD_ENRDDATA(0) |
+ CAD_QSPI_FLASHCMD_NUMRDDATABYTES(0) |
+ CAD_QSPI_FLASHCMD_ENCMDADDR(0) |
+ CAD_QSPI_FLASHCMD_ENMODEBIT(0) |
+ CAD_QSPI_FLASHCMD_NUMADDRBYTES(0) |
+ CAD_QSPI_FLASHCMD_ENWRDATA(1) |
+ CAD_QSPI_FLASHCMD_NUMWRDATABYTES(num_bytes - 1) |
+ CAD_QSPI_FLASHCMD_NUMDUMMYBYTES(dummy);
+
+ mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_FLASHCMD_WRDATA0, input[0]);
+
+ if (num_bytes > 4)
+ mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_FLASHCMD_WRDATA1,
+ input[1]);
+
+ return cad_qspi_stig_cmd_helper(cad_qspi_cs, cmd);
+}
+
+int cad_qspi_stig_addr_cmd(uint32_t opcode, uint32_t dummy, uint32_t addr)
+{
+ uint32_t cmd;
+
+ if (dummy > ((1 << CAD_QSPI_FLASHCMD_NUM_DUMMYBYTES_MAX) - 1))
+ return -1;
+
+ cmd = CAD_QSPI_FLASHCMD_OPCODE(opcode) |
+ CAD_QSPI_FLASHCMD_NUMDUMMYBYTES(dummy) |
+ CAD_QSPI_FLASHCMD_ENCMDADDR(1) |
+ CAD_QSPI_FLASHCMD_NUMADDRBYTES(2);
+
+ mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_FLASHCMD_ADDR, addr);
+
+ return cad_qspi_stig_cmd_helper(cad_qspi_cs, cmd);
+}
+
+int cad_qspi_device_bank_select(uint32_t bank)
+{
+ int status = 0;
+
+ status = cad_qspi_stig_cmd(CAD_QSPI_STIG_OPCODE_WREN, 0);
+ if (status != 0)
+ return status;
+
+ status = cad_qspi_stig_wr_cmd(CAD_QSPI_STIG_OPCODE_WREN_EXT_REG,
+ 0, 1, &bank);
+ if (status != 0)
+ return status;
+
+ return cad_qspi_stig_cmd(CAD_QSPI_STIG_OPCODE_WRDIS, 0);
+}
+
+int cad_qspi_device_status(uint32_t *status)
+{
+ return cad_qspi_stig_read_cmd(CAD_QSPI_STIG_OPCODE_RDSR, 0, 1, status);
+}
+
+#if CAD_QSPI_MICRON_N25Q_SUPPORT
+int cad_qspi_n25q_enable(void)
+{
+ cad_qspi_set_read_config(QSPI_FAST_READ, CAD_QSPI_INST_SINGLE,
+ CAD_QSPI_ADDR_FASTREAD, CAT_QSPI_ADDR_SINGLE_IO, 1,
+ 0);
+ cad_qspi_set_write_config(QSPI_WRITE, 0, 0, 0);
+
+ return 0;
+}
+
+int cad_qspi_n25q_wait_for_program_and_erase(int program_only)
+{
+ uint32_t status, flag_sr;
+ int count = 0;
+
+ while (count < CAD_QSPI_COMMAND_TIMEOUT) {
+ status = cad_qspi_device_status(&status);
+ if (status != 0) {
+ ERROR("Error getting device status\n");
+ return -1;
+ }
+ if (!CAD_QSPI_STIG_SR_BUSY(status))
+ break;
+ count++;
+ }
+
+ if (count >= CAD_QSPI_COMMAND_TIMEOUT) {
+ ERROR("Timed out waiting for idle\n");
+ return -1;
+ }
+
+ count = 0;
+
+ while (count < CAD_QSPI_COMMAND_TIMEOUT) {
+ status = cad_qspi_stig_read_cmd(CAD_QSPI_STIG_OPCODE_RDFLGSR,
+ 0, 1, &flag_sr);
+ if (status != 0) {
+ ERROR("Error waiting program and erase.\n");
+ return status;
+ }
+
+ if ((program_only &&
+ CAD_QSPI_STIG_FLAGSR_PROGRAMREADY(flag_sr)) ||
+ (!program_only &&
+ CAD_QSPI_STIG_FLAGSR_ERASEREADY(flag_sr)))
+ break;
+ }
+
+ if (count >= CAD_QSPI_COMMAND_TIMEOUT)
+ ERROR("Timed out waiting for program and erase\n");
+
+ if ((program_only && CAD_QSPI_STIG_FLAGSR_PROGRAMERROR(flag_sr)) ||
+ (!program_only &&
+ CAD_QSPI_STIG_FLAGSR_ERASEERROR(flag_sr))) {
+ ERROR("Error programming/erasing flash\n");
+ cad_qspi_stig_cmd(CAD_QSPI_STIG_OPCODE_CLFSR, 0);
+ return -1;
+ }
+
+ return 0;
+}
+#endif
+
+int cad_qspi_indirect_read_start_bank(uint32_t flash_addr, uint32_t num_bytes)
+{
+ mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_INDRDSTADDR, flash_addr);
+ mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_INDRDCNT, num_bytes);
+ mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_INDRD,
+ CAD_QSPI_INDRD_START |
+ CAD_QSPI_INDRD_IND_OPS_DONE);
+
+ return 0;
+}
+
+
+int cad_qspi_indirect_write_start_bank(uint32_t flash_addr,
+ uint32_t num_bytes)
+{
+ mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_INDWRSTADDR, flash_addr);
+ mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_INDWRCNT, num_bytes);
+ mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_INDWR,
+ CAD_QSPI_INDWR_START |
+ CAD_QSPI_INDWR_INDDONE);
+
+ return 0;
+}
+
+int cad_qspi_indirect_write_finish(void)
+{
+#if CAD_QSPI_MICRON_N25Q_SUPPORT
+ return cad_qspi_n25q_wait_for_program_and_erase(1);
+#else
+ return 0;
+#endif
+
+}
+
+int cad_qspi_enable(void)
+{
+ int status;
+
+ mmio_setbits_32(CAD_QSPI_OFFSET + CAD_QSPI_CFG, CAD_QSPI_CFG_ENABLE);
+
+#if CAD_QSPI_MICRON_N25Q_SUPPORT
+ status = cad_qspi_n25q_enable();
+ if (status != 0)
+ return status;
+#endif
+ return 0;
+}
+
+int cad_qspi_enable_subsector_bank(uint32_t addr)
+{
+ int status = 0;
+
+ status = cad_qspi_stig_cmd(CAD_QSPI_STIG_OPCODE_WREN, 0);
+ if (status != 0)
+ return status;
+
+ status = cad_qspi_stig_addr_cmd(CAD_QSPI_STIG_OPCODE_SUBSEC_ERASE, 0,
+ addr);
+ if (status != 0)
+ return status;
+
+#if CAD_QSPI_MICRON_N25Q_SUPPORT
+ status = cad_qspi_n25q_wait_for_program_and_erase(0);
+#endif
+ return status;
+}
+
+int cad_qspi_erase_subsector(uint32_t addr)
+{
+ int status = 0;
+
+ status = cad_qspi_device_bank_select(addr >> 24);
+ if (status != 0)
+ return status;
+
+ return cad_qspi_enable_subsector_bank(addr);
+}
+
+int cad_qspi_erase_sector(uint32_t addr)
+{
+ int status = 0;
+
+ status = cad_qspi_device_bank_select(addr >> 24);
+ if (status != 0)
+ return status;
+
+ status = cad_qspi_stig_cmd(CAD_QSPI_STIG_OPCODE_WREN, 0);
+ if (status != 0)
+ return status;
+
+ status = cad_qspi_stig_addr_cmd(CAD_QSPI_STIG_OPCODE_SEC_ERASE, 0,
+ addr);
+ if (status != 0)
+ return status;
+
+#if CAD_QSPI_MICRON_N25Q_SUPPORT
+ status = cad_qspi_n25q_wait_for_program_and_erase(0);
+#endif
+ return status;
+}
+
+void cad_qspi_calibration(uint32_t dev_clk, uint32_t qspi_clk_mhz)
+{
+ int status;
+ uint32_t dev_sclk_mhz = 27; /*min value to get biggest 0xF div factor*/
+ uint32_t data_cap_delay;
+ uint32_t sample_rdid;
+ uint32_t rdid;
+ uint32_t div_actual;
+ uint32_t div_bits;
+ int first_pass, last_pass;
+
+ /*1. Set divider to bigger value (slowest SCLK)
+ *2. RDID and save the value
+ */
+ div_actual = (qspi_clk_mhz + (dev_sclk_mhz - 1)) / dev_sclk_mhz;
+ div_bits = (((div_actual + 1) / 2) - 1);
+ status = cad_qspi_set_baudrate_div(0xf);
+
+ status = cad_qspi_stig_read_cmd(CAD_QSPI_STIG_OPCODE_RDID,
+ 0, 3, &sample_rdid);
+ if (status != 0)
+ return;
+
+ /*3. Set divider to the intended frequency
+ *4. Set the read delay = 0
+ *5. RDID and check whether the value is same as item 2
+ *6. Increase read delay and compared the value against item 2
+ *7. Find the range of read delay that have same as
+ * item 2 and divide it to 2
+ */
+ div_actual = (qspi_clk_mhz + (dev_clk - 1)) / dev_clk;
+ div_bits = (((div_actual + 1) / 2) - 1);
+ status = cad_qspi_set_baudrate_div(div_bits);
+ if (status != 0)
+ return;
+
+ data_cap_delay = 0;
+ first_pass = -1;
+ last_pass = -1;
+
+ do {
+ if (status != 0)
+ break;
+ status = cad_qspi_stig_read_cmd(CAD_QSPI_STIG_OPCODE_RDID, 0,
+ 3, &rdid);
+ if (status != 0)
+ break;
+ if (rdid == sample_rdid) {
+ if (first_pass == -1)
+ first_pass = data_cap_delay;
+ else
+ last_pass = data_cap_delay;
+ }
+
+ data_cap_delay++;
+
+ mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_RDDATACAP,
+ CAD_QSPI_RDDATACAP_BYP(1) |
+ CAD_QSPI_RDDATACAP_DELAY(data_cap_delay));
+
+ } while (data_cap_delay < 0x10);
+
+ if (first_pass > 0) {
+ int diff = first_pass - last_pass;
+
+ data_cap_delay = first_pass + diff / 2;
+ }
+
+ mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_RDDATACAP,
+ CAD_QSPI_RDDATACAP_BYP(1) |
+ CAD_QSPI_RDDATACAP_DELAY(data_cap_delay));
+ status = cad_qspi_stig_read_cmd(CAD_QSPI_STIG_OPCODE_RDID, 0, 3, &rdid);
+
+ if (status != 0)
+ return;
+}
+
+int cad_qspi_int_disable(uint32_t mask)
+{
+ if (cad_qspi_idle() == 0)
+ return -1;
+
+ if ((CAD_QSPI_INT_STATUS_ALL & mask) == 0)
+ return -1;
+
+ mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_IRQMSK, mask);
+ return 0;
+}
+
+void cad_qspi_set_chip_select(int cs)
+{
+ cad_qspi_cs = cs;
+}
+
+int cad_qspi_init(uint32_t desired_clk_freq, uint32_t clk_phase,
+ uint32_t clk_pol, uint32_t csda, uint32_t csdads,
+ uint32_t cseot, uint32_t cssot, uint32_t rddatacap)
+{
+ int status = 0;
+ uint32_t qspi_desired_clk_freq;
+ uint32_t rdid = 0;
+ uint32_t cap_code;
+
+ INFO("Initializing Qspi\n");
+
+ if (cad_qspi_idle() == 0) {
+ ERROR("device not idle\n");
+ return -1;
+ }
+
+
+ status = cad_qspi_timing_config(clk_phase, clk_pol, csda, csdads,
+ cseot, cssot, rddatacap);
+
+ if (status != 0) {
+ ERROR("config set timing failure\n");
+ return status;
+ }
+
+ mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_REMAPADDR,
+ CAD_QSPI_REMAPADDR_VALUE_SET(0));
+
+ status = cad_qspi_int_disable(CAD_QSPI_INT_STATUS_ALL);
+ if (status != 0) {
+ ERROR("failed disable\n");
+ return status;
+ }
+
+ cad_qspi_set_baudrate_div(0xf);
+ status = cad_qspi_enable();
+ if (status != 0) {
+ ERROR("failed enable\n");
+ return status;
+ }
+
+ qspi_desired_clk_freq = 100;
+ cad_qspi_calibration(qspi_desired_clk_freq, 50000000);
+
+ status = cad_qspi_stig_read_cmd(CAD_QSPI_STIG_OPCODE_RDID, 0, 3,
+ &rdid);
+
+ if (status != 0) {
+ ERROR("Error reading RDID\n");
+ return status;
+ }
+
+ /*
+ * NOTE: The Size code seems to be a form of BCD (binary coded decimal).
+ * The first nibble is the 10's digit and the second nibble is the 1's
+ * digit in the number of bytes.
+ *
+ * Capacity ID samples:
+ * 0x15 : 16 Mb => 2 MiB => 1 << 21 ; BCD=15
+ * 0x16 : 32 Mb => 4 MiB => 1 << 22 ; BCD=16
+ * 0x17 : 64 Mb => 8 MiB => 1 << 23 ; BCD=17
+ * 0x18 : 128 Mb => 16 MiB => 1 << 24 ; BCD=18
+ * 0x19 : 256 Mb => 32 MiB => 1 << 25 ; BCD=19
+ * 0x1a
+ * 0x1b
+ * 0x1c
+ * 0x1d
+ * 0x1e
+ * 0x1f
+ * 0x20 : 512 Mb => 64 MiB => 1 << 26 ; BCD=20
+ * 0x21 : 1024 Mb => 128 MiB => 1 << 27 ; BCD=21
+ */
+
+ cap_code = CAD_QSPI_STIG_RDID_CAPACITYID(rdid);
+
+ if (!(((cap_code >> 4) > 0x9) || ((cap_code & 0xf) > 0x9))) {
+ uint32_t decoded_cap = ((cap_code >> 4) * 10) +
+ (cap_code & 0xf);
+ qspi_device_size = 1 << (decoded_cap + 6);
+ INFO("QSPI Capacity: %x\n\n", qspi_device_size);
+
+ } else {
+ ERROR("Invalid CapacityID encountered: 0x%02x\n",
+ cap_code);
+ return -1;
+ }
+
+ cad_qspi_configure_dev_size(INTEL_QSPI_ADDR_BYTES,
+ INTEL_QSPI_BYTES_PER_DEV,
+ INTEL_BYTES_PER_BLOCK);
+
+ INFO("Flash size: %d Bytes\n", qspi_device_size);
+
+ return status;
+}
+
+int cad_qspi_indirect_page_bound_write(uint32_t offset,
+ uint8_t *buffer, uint32_t len)
+{
+ int status = 0, i;
+ uint32_t write_count, write_capacity, *write_data, space,
+ write_fill_level, sram_partition;
+
+ status = cad_qspi_indirect_write_start_bank(offset, len);
+ if (status != 0)
+ return status;
+
+ write_count = 0;
+ sram_partition = CAD_QSPI_SRAMPART_ADDR(mmio_read_32(CAD_QSPI_OFFSET +
+ CAD_QSPI_SRAMPART));
+ write_capacity = (uint32_t) CAD_QSPI_SRAM_FIFO_ENTRY_COUNT -
+ sram_partition;
+
+ while (write_count < len) {
+ write_fill_level = CAD_QSPI_SRAMFILL_INDWRPART(
+ mmio_read_32(CAD_QSPI_OFFSET +
+ CAD_QSPI_SRAMFILL));
+ space = LESS(write_capacity - write_fill_level,
+ (len - write_count) / sizeof(uint32_t));
+ write_data = (uint32_t *)(buffer + write_count);
+ for (i = 0; i < space; ++i)
+ mmio_write_32(CAD_QSPIDATA_OFST, *write_data++);
+
+ write_count += space * sizeof(uint32_t);
+ }
+ return cad_qspi_indirect_write_finish();
+}
+
+int cad_qspi_read_bank(uint8_t *buffer, uint32_t offset, uint32_t size)
+{
+ int status;
+ uint32_t read_count = 0, *read_data;
+ int level = 1, count = 0, i;
+
+ status = cad_qspi_indirect_read_start_bank(offset, size);
+
+ if (status != 0)
+ return status;
+
+ while (read_count < size) {
+ do {
+ level = CAD_QSPI_SRAMFILL_INDRDPART(
+ mmio_read_32(CAD_QSPI_OFFSET +
+ CAD_QSPI_SRAMFILL));
+ read_data = (uint32_t *)(buffer + read_count);
+ for (i = 0; i < level; ++i)
+ *read_data++ = mmio_read_32(CAD_QSPIDATA_OFST);
+
+ read_count += level * sizeof(uint32_t);
+ count++;
+ } while (level > 0);
+ }
+
+ return 0;
+}
+
+int cad_qspi_write_bank(uint32_t offset, uint8_t *buffer, uint32_t size)
+{
+ int status = 0;
+ uint32_t page_offset = offset & (CAD_QSPI_PAGE_SIZE - 1);
+ uint32_t write_size = LESS(size, CAD_QSPI_PAGE_SIZE - page_offset);
+
+ while (size) {
+ status = cad_qspi_indirect_page_bound_write(offset, buffer,
+ write_size);
+ if (status != 0)
+ break;
+
+ offset += write_size;
+ buffer += write_size;
+ size -= write_size;
+ write_size = LESS(size, CAD_QSPI_PAGE_SIZE);
+ }
+ return status;
+}
+
+int cad_qspi_read(void *buffer, uint32_t offset, uint32_t size)
+{
+ uint32_t bank_count, bank_addr, bank_offset, copy_len;
+ uint8_t *read_data;
+ int i, status;
+
+ status = 0;
+
+ if ((offset >= qspi_device_size) ||
+ (offset + size - 1 >= qspi_device_size) ||
+ (size == 0)) {
+ ERROR("Invalid read parameter\n");
+ return -1;
+ }
+
+ if (CAD_QSPI_INDRD_RD_STAT(mmio_read_32(CAD_QSPI_OFFSET +
+ CAD_QSPI_INDRD))) {
+ ERROR("Read in progress\n");
+ return -1;
+ }
+
+ /*
+ * bank_count : Number of bank(s) affected, including partial banks.
+ * bank_addr : Aligned address of the first bank,
+ * including partial bank.
+ * bank_ofst : The offset of the bank to read.
+ * Only used when reading the first bank.
+ */
+ bank_count = CAD_QSPI_BANK_ADDR(offset + size - 1) -
+ CAD_QSPI_BANK_ADDR(offset) + 1;
+ bank_addr = offset & CAD_QSPI_BANK_ADDR_MSK;
+ bank_offset = offset & (CAD_QSPI_BANK_SIZE - 1);
+
+ read_data = (uint8_t *)buffer;
+
+ copy_len = LESS(size, CAD_QSPI_BANK_SIZE - bank_offset);
+
+ for (i = 0; i < bank_count; ++i) {
+ status = cad_qspi_device_bank_select(CAD_QSPI_BANK_ADDR(
+ bank_addr));
+ if (status != 0)
+ break;
+ status = cad_qspi_read_bank(read_data, bank_offset, copy_len);
+ if (status != 0)
+ break;
+
+ bank_addr += CAD_QSPI_BANK_SIZE;
+ read_data += copy_len;
+ size -= copy_len;
+ bank_offset = 0;
+ copy_len = LESS(size, CAD_QSPI_BANK_SIZE);
+ }
+
+ return status;
+}
+
+int cad_qspi_erase(uint32_t offset, uint32_t size)
+{
+ int status = 0;
+ uint32_t subsector_offset = offset & (CAD_QSPI_SUBSECTOR_SIZE - 1);
+ uint32_t erase_size = LESS(size,
+ CAD_QSPI_SUBSECTOR_SIZE - subsector_offset);
+
+ while (size) {
+ status = cad_qspi_erase_subsector(offset);
+ if (status != 0)
+ break;
+
+ offset += erase_size;
+ size -= erase_size;
+ erase_size = LESS(size, CAD_QSPI_SUBSECTOR_SIZE);
+ }
+ return status;
+}
+
+int cad_qspi_write(void *buffer, uint32_t offset, uint32_t size)
+{
+ int status, i;
+ uint32_t bank_count, bank_addr, bank_offset, copy_len;
+ uint8_t *write_data;
+
+ status = 0;
+
+ if ((offset >= qspi_device_size) ||
+ (offset + size - 1 >= qspi_device_size) ||
+ (size == 0)) {
+ return -2;
+ }
+
+ if (CAD_QSPI_INDWR_RDSTAT(mmio_read_32(CAD_QSPI_OFFSET +
+ CAD_QSPI_INDWR))) {
+ ERROR("QSPI Error: Write in progress\n");
+ return -1;
+ }
+
+ bank_count = CAD_QSPI_BANK_ADDR(offset + size - 1) -
+ CAD_QSPI_BANK_ADDR(offset) + 1;
+ bank_addr = offset & CAD_QSPI_BANK_ADDR_MSK;
+ bank_offset = offset & (CAD_QSPI_BANK_SIZE - 1);
+
+ write_data = buffer;
+
+ copy_len = LESS(size, CAD_QSPI_BANK_SIZE - bank_offset);
+
+ for (i = 0; i < bank_count; ++i) {
+ status = cad_qspi_device_bank_select(
+ CAD_QSPI_BANK_ADDR(bank_addr));
+ if (status != 0)
+ break;
+
+ status = cad_qspi_write_bank(bank_offset, write_data,
+ copy_len);
+ if (status != 0)
+ break;
+
+ bank_addr += CAD_QSPI_BANK_SIZE;
+ write_data += copy_len;
+ size -= copy_len;
+ bank_offset = 0;
+
+ copy_len = LESS(size, CAD_QSPI_BANK_SIZE);
+ }
+ return status;
+}
+
+int cad_qspi_update(void *Buffer, uint32_t offset, uint32_t size)
+{
+ int status = 0;
+
+ status = cad_qspi_erase(offset, size);
+ if (status != 0)
+ return status;
+
+ return cad_qspi_write(Buffer, offset, size);
+}
+
+void cad_qspi_reset(void)
+{
+ cad_qspi_stig_cmd(CAD_QSPI_STIG_OPCODE_RESET_EN, 0);
+ cad_qspi_stig_cmd(CAD_QSPI_STIG_OPCODE_RESET_MEM, 0);
+}
+
diff --git a/plat/intel/soc/common/drivers/qspi/cadence_qspi.h b/plat/intel/soc/common/drivers/qspi/cadence_qspi.h
new file mode 100644
index 0000000..cfef585
--- /dev/null
+++ b/plat/intel/soc/common/drivers/qspi/cadence_qspi.h
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2019, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CAD_QSPI_H
+#define CAD_QSPI_H
+
+#define CAD_QSPI_MICRON_N25Q_SUPPORT 1
+
+#define CAD_QSPI_OFFSET 0xff8d2000
+
+#define CAD_INVALID -1
+#define CAD_QSPI_ERROR -2
+
+#define CAD_QSPI_ADDR_FASTREAD 0
+#define CAD_QSPI_ADDR_FASTREAD_DUAL_IO 1
+#define CAD_QSPI_ADDR_FASTREAD_QUAD_IO 2
+#define CAT_QSPI_ADDR_SINGLE_IO 0
+#define CAT_QSPI_ADDR_DUAL_IO 1
+#define CAT_QSPI_ADDR_QUAD_IO 2
+
+#define CAD_QSPI_BANK_ADDR(x) ((x) >> 24)
+#define CAD_QSPI_BANK_ADDR_MSK 0xff000000
+
+#define CAD_QSPI_COMMAND_TIMEOUT 0x10000000
+
+#define CAD_QSPI_CFG 0x0
+#define CAD_QSPI_CFG_BAUDDIV_MSK 0xff87ffff
+#define CAD_QSPI_CFG_BAUDDIV(x) (((x) << 19) & 0x780000)
+#define CAD_QSPI_CFG_CS_MSK ~0x3c00
+#define CAD_QSPI_CFG_CS(x) (((x) << 11))
+#define CAD_QSPI_CFG_ENABLE (1 << 0)
+#define CAD_QSPI_CFG_ENDMA_CLR_MSK 0xffff7fff
+#define CAD_QSPI_CFG_IDLE (1U << 31)
+#define CAD_QSPI_CFG_SELCLKPHASE_CLR_MSK 0xfffffffb
+#define CAD_QSPI_CFG_SELCLKPOL_CLR_MSK 0xfffffffd
+
+#define CAD_QSPIDATA_OFST 0xff900000
+
+#define CAD_QSPI_DELAY 0xc
+#define CAD_QSPI_DELAY_CSSOT(x) (((x) & 0xff) << 0)
+#define CAD_QSPI_DELAY_CSEOT(x) (((x) & 0xff) << 8)
+#define CAD_QSPI_DELAY_CSDADS(x) (((x) & 0xff) << 16)
+#define CAD_QSPI_DELAY_CSDA(x) (((x) & 0xff) << 24)
+
+#define CAD_QSPI_DEVSZ 0x14
+#define CAD_QSPI_DEVSZ_ADDR_BYTES(x) ((x) << 0)
+#define CAD_QSPI_DEVSZ_BYTES_PER_PAGE(x) ((x) << 4)
+#define CAD_QSPI_DEVSZ_BYTES_PER_BLOCK(x) ((x) << 16)
+
+#define CAD_QSPI_DEVWR 0x8
+#define CAD_QSPI_DEVRD 0x4
+#define CAD_QSPI_DEV_OPCODE(x) (((x) & 0xff) << 0)
+#define CAD_QSPI_DEV_INST_TYPE(x) (((x) & 0x03) << 8)
+#define CAD_QSPI_DEV_ADDR_TYPE(x) (((x) & 0x03) << 12)
+#define CAD_QSPI_DEV_DATA_TYPE(x) (((x) & 0x03) << 16)
+#define CAD_QSPI_DEV_MODE_BIT(x) (((x) & 0x01) << 20)
+#define CAD_QSPI_DEV_DUMMY_CLK_CYCLE(x) (((x) & 0x0f) << 24)
+
+#define CAD_QSPI_FLASHCMD 0x90
+#define CAD_QSPI_FLASHCMD_ADDR 0x94
+#define CAD_QSPI_FLASHCMD_EXECUTE 0x1
+#define CAD_QSPI_FLASHCMD_EXECUTE_STAT 0x2
+#define CAD_QSPI_FLASHCMD_NUM_DUMMYBYTES_MAX 5
+#define CAD_QSPI_FLASHCMD_NUM_DUMMYBYTES(x) (((x) << 7) & 0x000f80)
+#define CAD_QSPI_FLASHCMD_OPCODE(x) (((x) & 0xff) << 24)
+#define CAD_QSPI_FLASHCMD_ENRDDATA(x) (((x) & 1) << 23)
+#define CAD_QSPI_FLASHCMD_NUMRDDATABYTES(x) (((x) & 0xf) << 20)
+#define CAD_QSPI_FLASHCMD_ENCMDADDR(x) (((x) & 1) << 19)
+#define CAD_QSPI_FLASHCMD_ENMODEBIT(x) (((x) & 1) << 18)
+#define CAD_QSPI_FLASHCMD_NUMADDRBYTES(x) (((x) & 0x3) << 16)
+#define CAD_QSPI_FLASHCMD_ENWRDATA(x) (((x) & 1) << 15)
+#define CAD_QSPI_FLASHCMD_NUMWRDATABYTES(x) (((x) & 0x7) << 12)
+#define CAD_QSPI_FLASHCMD_NUMDUMMYBYTES(x) (((x) & 0x1f) << 7)
+#define CAD_QSPI_FLASHCMD_RDDATA0 0xa0
+#define CAD_QSPI_FLASHCMD_RDDATA1 0xa4
+#define CAD_QSPI_FLASHCMD_WRDATA0 0xa8
+#define CAD_QSPI_FLASHCMD_WRDATA1 0xac
+
+#define CAD_QSPI_RDDATACAP 0x10
+#define CAD_QSPI_RDDATACAP_BYP(x) (((x) & 1) << 0)
+#define CAD_QSPI_RDDATACAP_DELAY(x) (((x) & 0xf) << 1)
+
+#define CAD_QSPI_REMAPADDR 0x24
+#define CAD_QSPI_REMAPADDR_VALUE_SET(x) (((x) & 0xffffffff) << 0)
+
+#define CAD_QSPI_SRAMPART 0x18
+#define CAD_QSPI_SRAMFILL 0x2c
+#define CAD_QSPI_SRAMPART_ADDR(x) (((x) >> 0) & 0x3ff)
+#define CAD_QSPI_SRAM_FIFO_ENTRY_COUNT (512 / sizeof(uint32_t))
+#define CAD_QSPI_SRAMFILL_INDWRPART(x) (((x) >> 16) & 0x00ffff)
+#define CAD_QSPI_SRAMFILL_INDRDPART(x) (((x) >> 0) & 0x00ffff)
+
+#define CAD_QSPI_SELCLKPHASE(x) (((x) & 1) << 2)
+#define CAD_QSPI_SELCLKPOL(x) (((x) & 1) << 1)
+
+#define CAD_QSPI_STIG_FLAGSR_PROGRAMREADY(x) (((x) >> 7) & 1)
+#define CAD_QSPI_STIG_FLAGSR_ERASEREADY(x) (((x) >> 7) & 1)
+#define CAD_QSPI_STIG_FLAGSR_ERASEERROR(x) (((x) >> 5) & 1)
+#define CAD_QSPI_STIG_FLAGSR_PROGRAMERROR(x) (((x) >> 4) & 1)
+#define CAD_QSPI_STIG_OPCODE_CLFSR 0x50
+#define CAD_QSPI_STIG_OPCODE_RDID 0x9f
+#define CAD_QSPI_STIG_OPCODE_WRDIS 0x4
+#define CAD_QSPI_STIG_OPCODE_WREN 0x6
+#define CAD_QSPI_STIG_OPCODE_SUBSEC_ERASE 0x20
+#define CAD_QSPI_STIG_OPCODE_SEC_ERASE 0xd8
+#define CAD_QSPI_STIG_OPCODE_WREN_EXT_REG 0xc5
+#define CAD_QSPI_STIG_OPCODE_DIE_ERASE 0xc4
+#define CAD_QSPI_STIG_OPCODE_BULK_ERASE 0xc7
+#define CAD_QSPI_STIG_OPCODE_RDSR 0x5
+#define CAD_QSPI_STIG_OPCODE_RDFLGSR 0x70
+#define CAD_QSPI_STIG_OPCODE_RESET_EN 0x66
+#define CAD_QSPI_STIG_OPCODE_RESET_MEM 0x99
+#define CAD_QSPI_STIG_RDID_CAPACITYID(x) (((x) >> 16) & 0xff)
+#define CAD_QSPI_STIG_SR_BUSY(x) (((x) >> 0) & 1)
+
+
+#define CAD_QSPI_INST_SINGLE 0
+#define CAD_QSPI_INST_DUAL 1
+#define CAD_QSPI_INST_QUAD 2
+
+#define CAD_QSPI_INDRDSTADDR 0x68
+#define CAD_QSPI_INDRDCNT 0x6c
+#define CAD_QSPI_INDRD 0x60
+#define CAD_QSPI_INDRD_RD_STAT(x) (((x) >> 2) & 1)
+#define CAD_QSPI_INDRD_START 1
+#define CAD_QSPI_INDRD_IND_OPS_DONE 0x20
+
+#define CAD_QSPI_INDWR 0x70
+#define CAD_QSPI_INDWR_RDSTAT(x) (((x) >> 2) & 1)
+#define CAD_QSPI_INDWRSTADDR 0x78
+#define CAD_QSPI_INDWRCNT 0x7c
+#define CAD_QSPI_INDWR 0x70
+#define CAD_QSPI_INDWR_START 0x1
+#define CAD_QSPI_INDWR_INDDONE 0x20
+
+#define CAD_QSPI_INT_STATUS_ALL 0x0000ffff
+
+#define CAD_QSPI_N25Q_DIE_SIZE 0x02000000
+#define CAD_QSPI_BANK_SIZE 0x01000000
+#define CAD_QSPI_PAGE_SIZE 0x00000100
+
+#define CAD_QSPI_IRQMSK 0x44
+
+#define CAD_QSPI_SUBSECTOR_SIZE 0x1000
+
+#define INTEL_QSPI_ADDR_BYTES 2
+#define INTEL_QSPI_BYTES_PER_DEV 256
+#define INTEL_BYTES_PER_BLOCK 16
+
+#define QSPI_FAST_READ 0xb
+
+#define QSPI_WRITE 0x2
+
+// QSPI CONFIGURATIONS
+
+#define QSPI_CONFIG_CPOL 1
+#define QSPI_CONFIG_CPHA 1
+
+#define QSPI_CONFIG_CSSOT 0x14
+#define QSPI_CONFIG_CSEOT 0x14
+#define QSPI_CONFIG_CSDADS 0xff
+#define QSPI_CONFIG_CSDA 0xc8
+
+int cad_qspi_init(uint32_t desired_clk_freq, uint32_t clk_phase,
+ uint32_t clk_pol, uint32_t csda, uint32_t csdads,
+ uint32_t cseot, uint32_t cssot, uint32_t rddatacap);
+void cad_qspi_set_chip_select(int cs);
+int cad_qspi_erase(uint32_t offset, uint32_t size);
+int cad_qspi_write(void *buffer, uint32_t offset, uint32_t size);
+int cad_qspi_read(void *buffer, uint32_t offset, uint32_t size);
+int cad_qspi_update(void *buffer, uint32_t offset, uint32_t size);
+
+#endif
+
diff --git a/plat/intel/soc/common/drivers/wdt/watchdog.c b/plat/intel/soc/common/drivers/wdt/watchdog.c
new file mode 100644
index 0000000..651189b
--- /dev/null
+++ b/plat/intel/soc/common/drivers/wdt/watchdog.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2019, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <common/debug.h>
+#include <lib/mmio.h>
+
+#include "watchdog.h"
+
+
+/* Reset watchdog timer */
+void watchdog_sw_rst(void)
+{
+ mmio_write_32(WDT_CRR, WDT_SW_RST);
+}
+
+/* Print component information */
+void watchdog_info(void)
+{
+ INFO("Component Type : %x\r\n", mmio_read_32(WDT_COMP_VERSION));
+ INFO("Component Version : %x\r\n", mmio_read_32(WDT_COMP_TYPE));
+}
+
+/* Check watchdog current status */
+void watchdog_status(void)
+{
+ if (mmio_read_32(WDT_CR) & 1) {
+ INFO("Watchdog Timer is currently enabled\n");
+ INFO("Current Counter : 0x%x\r\n", mmio_read_32(WDT_CCVR));
+ } else {
+ INFO("Watchdog Timer is currently disabled\n");
+ }
+}
+
+/* Initialize & enable watchdog */
+void watchdog_init(int watchdog_clk)
+{
+ uint8_t cycles_i = 0;
+ uint32_t wdt_cycles = WDT_MIN_CYCLES;
+ uint32_t top_init_cycles = WDT_PERIOD * watchdog_clk;
+
+ while ((cycles_i < 15) && (wdt_cycles < top_init_cycles)) {
+ wdt_cycles = (wdt_cycles << 1);
+ cycles_i++;
+ }
+
+ mmio_write_32(WDT_TORR, (cycles_i << 4) | cycles_i);
+
+ mmio_write_32(WDT_CR, WDT_CR_RMOD|WDT_CR_EN);
+}
diff --git a/plat/intel/soc/common/drivers/wdt/watchdog.h b/plat/intel/soc/common/drivers/wdt/watchdog.h
new file mode 100644
index 0000000..2c72463
--- /dev/null
+++ b/plat/intel/soc/common/drivers/wdt/watchdog.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2019, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CAD_WATCHDOG_H
+#define CAD_WATCHDOG_H
+
+#define WDT_BASE (0xFFD00200)
+#define WDT_REG_SIZE_OFFSET (0x4)
+#define WDT_MIN_CYCLES (65536)
+#define WDT_PERIOD (20)
+
+#define WDT_CR (WDT_BASE + 0x0)
+#define WDT_TORR (WDT_BASE + 0x4)
+
+#define WDT_CRR (WDT_BASE + 0xC)
+
+#define WDT_CCVR (WDT_BASE + 0x8)
+#define WDT_STAT (WDT_BASE + 0x10)
+#define WDT_EOI (WDT_BASE + 0x14)
+
+#define WDT_COMP_PARAM_1 (WDT_BASE + 0xF4)
+#define WDT_COMP_VERSION (WDT_BASE + 0xF8)
+#define WDT_COMP_TYPE (WDT_BASE + 0XFC)
+
+#define WDT_CR_RMOD (0x0)
+#define WDT_CR_EN (0x1)
+
+#define WDT_SW_RST (0x76)
+
+
+void watchdog_init(int watchdog_clk);
+void watchdog_info(void);
+void watchdog_status(void);
+void watchdog_sw_rst(void);
+
+#endif
diff --git a/plat/intel/soc/common/include/plat_macros.S b/plat/intel/soc/common/include/plat_macros.S
new file mode 100644
index 0000000..43db9a2
--- /dev/null
+++ b/plat/intel/soc/common/include/plat_macros.S
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2019, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PLAT_MACROS_S
+#define PLAT_MACROS_S
+
+#include <platform_def.h>
+
+ /* ---------------------------------------------
+ * The below required platform porting macro
+ * prints out relevant platform registers
+ * whenever an unhandled exception is taken in
+ * BL31.
+ * ---------------------------------------------
+ */
+ .macro plat_crash_print_regs
+ .endm
+
+#endif /* PLAT_MACROS_S */
diff --git a/plat/intel/soc/common/include/platform_def.h b/plat/intel/soc/common/include/platform_def.h
new file mode 100644
index 0000000..2b3f144
--- /dev/null
+++ b/plat/intel/soc/common/include/platform_def.h
@@ -0,0 +1,242 @@
+/*
+ * Copyright (c) 2019-2022, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PLATFORM_DEF_H
+#define PLATFORM_DEF_H
+
+#include <arch.h>
+#include <common/interrupt_props.h>
+#include <common/tbbr/tbbr_img_def.h>
+#include <plat/common/common_def.h>
+
+/* Platform Type */
+#define PLAT_SOCFPGA_STRATIX10 1
+#define PLAT_SOCFPGA_AGILEX 2
+#define PLAT_SOCFPGA_N5X 3
+#define PLAT_SOCFPGA_EMULATOR 0
+
+/* sysmgr.boot_scratch_cold4 & 5 used for CPU release address for SPL */
+#define PLAT_CPU_RELEASE_ADDR 0xffd12210
+
+/*
+ * sysmgr.boot_scratch_cold6 & 7 (64bit) are used to indicate L2 reset
+ * is done and HPS should trigger warm reset via RMR_EL3.
+ */
+#define L2_RESET_DONE_REG 0xFFD12218
+
+/* Magic word to indicate L2 reset is completed */
+#define L2_RESET_DONE_STATUS 0x1228E5E7
+
+/* Define next boot image name and offset */
+#define PLAT_NS_IMAGE_OFFSET 0x10000000
+#define PLAT_HANDOFF_OFFSET 0xFFE3F000
+
+/*******************************************************************************
+ * Platform binary types for linking
+ ******************************************************************************/
+#define PLATFORM_LINKER_FORMAT "elf64-littleaarch64"
+#define PLATFORM_LINKER_ARCH aarch64
+
+/* SoCFPGA supports up to 124GB RAM */
+#define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 39)
+#define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 39)
+
+
+/*******************************************************************************
+ * Generic platform constants
+ ******************************************************************************/
+#define PLAT_PRIMARY_CPU 0
+#define PLAT_SECONDARY_ENTRY_BASE 0x01f78bf0
+
+/* Size of cacheable stacks */
+#define PLATFORM_STACK_SIZE 0x2000
+
+/* PSCI related constant */
+#define PLAT_NUM_POWER_DOMAINS 5
+#define PLAT_MAX_PWR_LVL 1
+#define PLAT_MAX_RET_STATE 1
+#define PLAT_MAX_OFF_STATE 2
+#define PLATFORM_SYSTEM_COUNT U(1)
+#define PLATFORM_CLUSTER_COUNT U(1)
+#define PLATFORM_CLUSTER0_CORE_COUNT U(4)
+#define PLATFORM_CLUSTER1_CORE_COUNT U(0)
+#define PLATFORM_CORE_COUNT (PLATFORM_CLUSTER1_CORE_COUNT + \
+ PLATFORM_CLUSTER0_CORE_COUNT)
+#define PLATFORM_MAX_CPUS_PER_CLUSTER U(4)
+
+/* Interrupt related constant */
+
+#define INTEL_SOCFPGA_IRQ_SEC_PHY_TIMER 29
+
+#define INTEL_SOCFPGA_IRQ_SEC_SGI_0 8
+#define INTEL_SOCFPGA_IRQ_SEC_SGI_1 9
+#define INTEL_SOCFPGA_IRQ_SEC_SGI_2 10
+#define INTEL_SOCFPGA_IRQ_SEC_SGI_3 11
+#define INTEL_SOCFPGA_IRQ_SEC_SGI_4 12
+#define INTEL_SOCFPGA_IRQ_SEC_SGI_5 13
+#define INTEL_SOCFPGA_IRQ_SEC_SGI_6 14
+#define INTEL_SOCFPGA_IRQ_SEC_SGI_7 15
+
+#define TSP_IRQ_SEC_PHY_TIMER INTEL_SOCFPGA_IRQ_SEC_PHY_TIMER
+#define TSP_SEC_MEM_BASE BL32_BASE
+#define TSP_SEC_MEM_SIZE (BL32_LIMIT - BL32_BASE + 1)
+/*******************************************************************************
+ * Platform memory map related constants
+ ******************************************************************************/
+#define DRAM_BASE (0x0)
+#define DRAM_SIZE (0x80000000)
+
+#define OCRAM_BASE (0xFFE00000)
+#define OCRAM_SIZE (0x00040000)
+
+#define MEM64_BASE (0x0100000000)
+#define MEM64_SIZE (0x1F00000000)
+
+#define DEVICE1_BASE (0x80000000)
+#define DEVICE1_SIZE (0x60000000)
+
+#define DEVICE2_BASE (0xF7000000)
+#define DEVICE2_SIZE (0x08E00000)
+
+#define DEVICE3_BASE (0xFFFC0000)
+#define DEVICE3_SIZE (0x00008000)
+
+#define DEVICE4_BASE (0x2000000000)
+#define DEVICE4_SIZE (0x0100000000)
+
+/*******************************************************************************
+ * BL31 specific defines.
+ ******************************************************************************/
+/*
+ * Put BL3-1 at the top of the Trusted SRAM (just below the shared memory, if
+ * present). BL31_BASE is calculated using the current BL3-1 debug size plus a
+ * little space for growth.
+ */
+
+
+#define FIRMWARE_WELCOME_STR "Booting Trusted Firmware\n"
+
+#define BL1_RO_BASE (0xffe00000)
+#define BL1_RO_LIMIT (0xffe0f000)
+#define BL1_RW_BASE (0xffe10000)
+#define BL1_RW_LIMIT (0xffe1ffff)
+#define BL1_RW_SIZE (0x14000)
+
+#define BL2_BASE (0xffe00000)
+#define BL2_LIMIT (0xffe1b000)
+
+#define BL31_BASE (0x1000)
+#define BL31_LIMIT (0x81000)
+
+#define BL_DATA_LIMIT PLAT_HANDOFF_OFFSET
+
+#define PLAT_CPUID_RELEASE (BL_DATA_LIMIT - 16)
+#define PLAT_SEC_ENTRY (BL_DATA_LIMIT - 8)
+
+#define PLAT_SEC_WARM_ENTRY 0
+
+/*******************************************************************************
+ * Platform specific page table and MMU setup constants
+ ******************************************************************************/
+#define MAX_XLAT_TABLES 8
+#define MAX_MMAP_REGIONS 16
+
+/*******************************************************************************
+ * Declarations and constants to access the mailboxes safely. Each mailbox is
+ * aligned on the biggest cache line size in the platform. This is known only
+ * to the platform as it might have a combination of integrated and external
+ * caches. Such alignment ensures that two maiboxes do not sit on the same cache
+ * line at any cache level. They could belong to different cpus/clusters &
+ * get written while being protected by different locks causing corruption of
+ * a valid mailbox address.
+ ******************************************************************************/
+#define CACHE_WRITEBACK_SHIFT 6
+#define CACHE_WRITEBACK_GRANULE (1 << CACHE_WRITEBACK_SHIFT)
+
+#define PLAT_GIC_BASE (0xFFFC0000)
+#define PLAT_GICC_BASE (PLAT_GIC_BASE + 0x2000)
+#define PLAT_GICD_BASE (PLAT_GIC_BASE + 0x1000)
+#define PLAT_GICR_BASE 0
+
+/*******************************************************************************
+ * UART related constants
+ ******************************************************************************/
+#define PLAT_UART0_BASE (0xFFC02000)
+#define PLAT_UART1_BASE (0xFFC02100)
+
+#define CRASH_CONSOLE_BASE PLAT_UART0_BASE
+#define PLAT_INTEL_UART_BASE PLAT_UART0_BASE
+
+#if PLAT_SOCFPGA_EMULATOR
+#define PLAT_BAUDRATE (4800)
+#define PLAT_UART_CLOCK (76800)
+#else
+#define PLAT_BAUDRATE (115200)
+#define PLAT_UART_CLOCK (100000000)
+#endif
+
+/*******************************************************************************
+ * PHY related constants
+ ******************************************************************************/
+
+#define EMAC0_PHY_MODE PHY_INTERFACE_MODE_RGMII
+#define EMAC1_PHY_MODE PHY_INTERFACE_MODE_RGMII
+#define EMAC2_PHY_MODE PHY_INTERFACE_MODE_RGMII
+
+/*******************************************************************************
+ * System counter frequency related constants
+ ******************************************************************************/
+#define PLAT_SYS_COUNTER_FREQ_IN_TICKS (400000000)
+#define PLAT_SYS_COUNTER_CONVERT_TO_MHZ (1000000)
+
+#define PLAT_INTEL_SOCFPGA_GICD_BASE PLAT_GICD_BASE
+#define PLAT_INTEL_SOCFPGA_GICC_BASE PLAT_GICC_BASE
+
+/*
+ * Define a list of Group 1 Secure and Group 0 interrupts as per GICv3
+ * terminology. On a GICv2 system or mode, the lists will be merged and treated
+ * as Group 0 interrupts.
+ */
+#define PLAT_INTEL_SOCFPGA_G1S_IRQ_PROPS(grp) \
+ INTR_PROP_DESC(INTEL_SOCFPGA_IRQ_SEC_PHY_TIMER, \
+ GIC_HIGHEST_SEC_PRIORITY, grp, GIC_INTR_CFG_LEVEL), \
+ INTR_PROP_DESC(INTEL_SOCFPGA_IRQ_SEC_SGI_0, \
+ GIC_HIGHEST_SEC_PRIORITY, grp, GIC_INTR_CFG_EDGE), \
+ INTR_PROP_DESC(INTEL_SOCFPGA_IRQ_SEC_SGI_1, \
+ GIC_HIGHEST_SEC_PRIORITY, grp, GIC_INTR_CFG_EDGE), \
+ INTR_PROP_DESC(INTEL_SOCFPGA_IRQ_SEC_SGI_2, \
+ GIC_HIGHEST_SEC_PRIORITY, grp, GIC_INTR_CFG_EDGE), \
+ INTR_PROP_DESC(INTEL_SOCFPGA_IRQ_SEC_SGI_3, \
+ GIC_HIGHEST_SEC_PRIORITY, grp, GIC_INTR_CFG_EDGE), \
+ INTR_PROP_DESC(INTEL_SOCFPGA_IRQ_SEC_SGI_4, \
+ GIC_HIGHEST_SEC_PRIORITY, grp, GIC_INTR_CFG_EDGE), \
+ INTR_PROP_DESC(INTEL_SOCFPGA_IRQ_SEC_SGI_5, \
+ GIC_HIGHEST_SEC_PRIORITY, grp, GIC_INTR_CFG_EDGE), \
+ INTR_PROP_DESC(INTEL_SOCFPGA_IRQ_SEC_SGI_6, \
+ GIC_HIGHEST_SEC_PRIORITY, grp, GIC_INTR_CFG_EDGE), \
+ INTR_PROP_DESC(INTEL_SOCFPGA_IRQ_SEC_SGI_7, \
+ GIC_HIGHEST_SEC_PRIORITY, grp, GIC_INTR_CFG_EDGE)
+
+#define PLAT_INTEL_SOCFPGA_G0_IRQ_PROPS(grp)
+
+#define MAX_IO_HANDLES 4
+#define MAX_IO_DEVICES 4
+#define MAX_IO_BLOCK_DEVICES 2
+
+#ifndef __ASSEMBLER__
+struct socfpga_bl31_params {
+ param_header_t h;
+ image_info_t *bl31_image_info;
+ entry_point_info_t *bl32_ep_info;
+ image_info_t *bl32_image_info;
+ entry_point_info_t *bl33_ep_info;
+ image_info_t *bl33_image_info;
+};
+#endif
+
+#endif /* PLATFORM_DEF_H */
+
diff --git a/plat/intel/soc/common/include/socfpga_emac.h b/plat/intel/soc/common/include/socfpga_emac.h
new file mode 100644
index 0000000..5b98006
--- /dev/null
+++ b/plat/intel/soc/common/include/socfpga_emac.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2020, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SOCFPGA_EMAC_H
+#define SOCFPGA_EMAC_H
+
+/* EMAC PHY Mode */
+
+#define PHY_INTERFACE_MODE_GMII_MII 0
+#define PHY_INTERFACE_MODE_RGMII 1
+#define PHY_INTERFACE_MODE_RMII 2
+#define PHY_INTERFACE_MODE_RESET 3
+
+/* Mask Definitions */
+
+#define PHY_INTF_SEL_MSK 0x3
+#define FPGAINTF_EN_3_EMAC_MSK(x) (1 << (x * 8))
+
+void socfpga_emac_init(void);
+
+#endif /* SOCFPGA_EMAC_H */
diff --git a/plat/intel/soc/common/include/socfpga_f2sdram_manager.h b/plat/intel/soc/common/include/socfpga_f2sdram_manager.h
new file mode 100644
index 0000000..82bb6cb
--- /dev/null
+++ b/plat/intel/soc/common/include/socfpga_f2sdram_manager.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SOCFPGA_F2SDRAMMANAGER_H
+#define SOCFPGA_F2SDRAMMANAGER_H
+
+#include "socfpga_plat_def.h"
+
+/* FPGA2SDRAM Register Map */
+#define SOCFPGA_F2SDRAMMGR_SIDEBANDMGR_FLAGINSTATUS0 0x14
+#define SOCFPGA_F2SDRAMMGR_SIDEBANDMGR_FLAGOUTCLR0 0x54
+#define SOCFPGA_F2SDRAMMGR_SIDEBANDMGR_FLAGOUTSET0 0x50
+
+#define FLAGOUTSETCLR_F2SDRAM0_ENABLE (BIT(1))
+#define FLAGOUTSETCLR_F2SDRAM1_ENABLE (BIT(4))
+#define FLAGOUTSETCLR_F2SDRAM2_ENABLE (BIT(7))
+
+#define FLAGOUTSETCLR_F2SDRAM0_IDLEREQ (BIT(0))
+#define FLAGOUTSETCLR_F2SDRAM1_IDLEREQ (BIT(3))
+#define FLAGOUTSETCLR_F2SDRAM2_IDLEREQ (BIT(6))
+#define FLAGINTSTATUS_F2SDRAM0_IDLEACK (BIT(1))
+#define FLAGINTSTATUS_F2SDRAM1_IDLEACK (BIT(5))
+#define FLAGINTSTATUS_F2SDRAM2_IDLEACK (BIT(9))
+#define FLAGOUTSETCLR_F2SDRAM0_FORCE_DRAIN (BIT(2))
+#define FLAGOUTSETCLR_F2SDRAM1_FORCE_DRAIN (BIT(5))
+#define FLAGOUTSETCLR_F2SDRAM2_FORCE_DRAIN (BIT(8))
+
+#define FLAGINTSTATUS_F2SOC_RESPEMPTY (BIT(3))
+#define FLAGINTSTATUS_F2SDRAM0_RESPEMPTY (BIT(3))
+#define FLAGINTSTATUS_F2SDRAM1_RESPEMPTY (BIT(7))
+#define FLAGINTSTATUS_F2SDRAM2_RESPEMPTY (BIT(11))
+
+#define SOCFPGA_F2SDRAMMGR(_reg) (SOCFPGA_F2SDRAMMGR_REG_BASE \
+ + (SOCFPGA_F2SDRAMMGR_##_reg))
+
+#endif /* SOCFPGA_F2SDRAMMGR_H */
diff --git a/plat/intel/soc/common/include/socfpga_fcs.h b/plat/intel/soc/common/include/socfpga_fcs.h
new file mode 100644
index 0000000..893551d
--- /dev/null
+++ b/plat/intel/soc/common/include/socfpga_fcs.h
@@ -0,0 +1,308 @@
+/*
+ * Copyright (c) 2020-2022, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SOCFPGA_FCS_H
+#define SOCFPGA_FCS_H
+
+/* FCS Definitions */
+
+#define FCS_RANDOM_WORD_SIZE 8U
+#define FCS_PROV_DATA_WORD_SIZE 44U
+#define FCS_SHA384_WORD_SIZE 12U
+
+#define FCS_RANDOM_BYTE_SIZE (FCS_RANDOM_WORD_SIZE * 4U)
+#define FCS_RANDOM_EXT_MAX_WORD_SIZE 1020U
+#define FCS_PROV_DATA_BYTE_SIZE (FCS_PROV_DATA_WORD_SIZE * 4U)
+#define FCS_SHA384_BYTE_SIZE (FCS_SHA384_WORD_SIZE * 4U)
+
+#define FCS_RANDOM_EXT_OFFSET 3
+
+#define FCS_MODE_DECRYPT 0x0
+#define FCS_MODE_ENCRYPT 0x1
+#define FCS_ENCRYPTION_DATA_0 0x10100
+#define FCS_DECRYPTION_DATA_0 0x10102
+#define FCS_OWNER_ID_OFFSET 0xC
+#define FCS_CRYPTION_CRYPTO_HEADER 0x07000000
+#define FCS_CRYPTION_RESP_WORD_SIZE 4U
+#define FCS_CRYPTION_RESP_SIZE_OFFSET 3U
+
+#define PSGSIGMA_TEARDOWN_MAGIC 0xB852E2A4
+#define PSGSIGMA_SESSION_ID_ONE 0x1
+#define PSGSIGMA_UNKNOWN_SESSION 0xFFFFFFFF
+
+#define RESERVED_AS_ZERO 0x0
+/* FCS Single cert */
+
+#define FCS_BIG_CNTR_SEL 0x1
+
+#define FCS_SVN_CNTR_0_SEL 0x2
+#define FCS_SVN_CNTR_1_SEL 0x3
+#define FCS_SVN_CNTR_2_SEL 0x4
+#define FCS_SVN_CNTR_3_SEL 0x5
+
+#define FCS_BIG_CNTR_VAL_MAX 495U
+#define FCS_SVN_CNTR_VAL_MAX 64U
+
+/* FCS Attestation Cert Request Parameter */
+
+#define FCS_ATTEST_FIRMWARE_CERT 0x01
+#define FCS_ATTEST_DEV_ID_SELF_SIGN_CERT 0x02
+#define FCS_ATTEST_DEV_ID_ENROLL_CERT 0x04
+#define FCS_ATTEST_ENROLL_SELF_SIGN_CERT 0x08
+#define FCS_ATTEST_ALIAS_CERT 0x10
+#define FCS_ATTEST_CERT_MAX_REQ_PARAM 0xFF
+
+/* FCS Crypto Service */
+
+#define FCS_CS_KEY_OBJ_MAX_WORD_SIZE 88U
+#define FCS_CS_KEY_INFO_MAX_WORD_SIZE 36U
+#define FCS_CS_KEY_RESP_STATUS_MASK 0xFF
+#define FCS_CS_KEY_RESP_STATUS_OFFSET 16U
+
+#define FCS_CS_FIELD_SIZE_MASK 0xFFFF
+#define FCS_CS_FIELD_FLAG_OFFSET 24
+#define FCS_CS_FIELD_FLAG_INIT BIT(0)
+#define FCS_CS_FIELD_FLAG_UPDATE BIT(1)
+#define FCS_CS_FIELD_FLAG_FINALIZE BIT(2)
+
+#define FCS_AES_MAX_DATA_SIZE 0x10000000 /* 256 MB */
+#define FCS_AES_MIN_DATA_SIZE 0x20 /* 32 Byte */
+#define FCS_AES_CMD_MAX_WORD_SIZE 15U
+
+#define FCS_GET_DIGEST_CMD_MAX_WORD_SIZE 7U
+#define FCS_GET_DIGEST_RESP_MAX_WORD_SIZE 19U
+#define FCS_MAC_VERIFY_CMD_MAX_WORD_SIZE 23U
+#define FCS_MAC_VERIFY_RESP_MAX_WORD_SIZE 4U
+#define FCS_SHA_HMAC_CRYPTO_PARAM_SIZE_OFFSET 8U
+
+#define FCS_ECDSA_GET_PUBKEY_MAX_WORD_SIZE 5U
+#define FCS_ECDSA_SHA2_DATA_SIGN_CMD_MAX_WORD_SIZE 7U
+#define FCS_ECDSA_SHA2_DATA_SIG_VERIFY_CMD_MAX_WORD_SIZE 43U
+#define FCS_ECDSA_HASH_SIGN_CMD_MAX_WORD_SIZE 17U
+#define FCS_ECDSA_HASH_SIG_VERIFY_CMD_MAX_WORD_SIZE 52U
+#define FCS_ECDH_REQUEST_CMD_MAX_WORD_SIZE 29U
+/* FCS Payload Structure */
+typedef struct fcs_rng_payload_t {
+ uint32_t session_id;
+ uint32_t context_id;
+ uint32_t crypto_header;
+ uint32_t size;
+} fcs_rng_payload;
+
+typedef struct fcs_encrypt_payload_t {
+ uint32_t first_word;
+ uint32_t src_addr;
+ uint32_t src_size;
+ uint32_t dst_addr;
+ uint32_t dst_size;
+} fcs_encrypt_payload;
+
+typedef struct fcs_decrypt_payload_t {
+ uint32_t first_word;
+ uint32_t owner_id[2];
+ uint32_t src_addr;
+ uint32_t src_size;
+ uint32_t dst_addr;
+ uint32_t dst_size;
+} fcs_decrypt_payload;
+
+typedef struct fcs_encrypt_ext_payload_t {
+ uint32_t session_id;
+ uint32_t context_id;
+ uint32_t crypto_header;
+ uint32_t src_addr;
+ uint32_t src_size;
+ uint32_t dst_addr;
+ uint32_t dst_size;
+} fcs_encrypt_ext_payload;
+
+typedef struct fcs_decrypt_ext_payload_t {
+ uint32_t session_id;
+ uint32_t context_id;
+ uint32_t crypto_header;
+ uint32_t owner_id[2];
+ uint32_t src_addr;
+ uint32_t src_size;
+ uint32_t dst_addr;
+ uint32_t dst_size;
+} fcs_decrypt_ext_payload;
+
+typedef struct psgsigma_teardown_msg_t {
+ uint32_t reserved_word;
+ uint32_t magic_word;
+ uint32_t session_id;
+} psgsigma_teardown_msg;
+
+typedef struct fcs_cntr_set_preauth_payload_t {
+ uint32_t first_word;
+ uint32_t counter_value;
+} fcs_cntr_set_preauth_payload;
+
+typedef struct fcs_cs_key_payload_t {
+ uint32_t session_id;
+ uint32_t reserved0;
+ uint32_t reserved1;
+ uint32_t key_id;
+} fcs_cs_key_payload;
+
+typedef struct fcs_crypto_service_data_t {
+ uint32_t session_id;
+ uint32_t context_id;
+ uint32_t key_id;
+ uint32_t crypto_param_size;
+ uint64_t crypto_param;
+ uint8_t is_updated;
+} fcs_crypto_service_data;
+
+typedef struct fcs_crypto_service_aes_data_t {
+ uint32_t session_id;
+ uint32_t context_id;
+ uint32_t param_size;
+ uint32_t key_id;
+ uint32_t crypto_param[7];
+ uint8_t is_updated;
+} fcs_crypto_service_aes_data;
+
+/* Functions Definitions */
+
+uint32_t intel_fcs_random_number_gen(uint64_t addr, uint64_t *ret_size,
+ uint32_t *mbox_error);
+int intel_fcs_random_number_gen_ext(uint32_t session_id, uint32_t context_id,
+ uint32_t size, uint32_t *send_id);
+uint32_t intel_fcs_send_cert(uint64_t addr, uint64_t size,
+ uint32_t *send_id);
+uint32_t intel_fcs_get_provision_data(uint32_t *send_id);
+uint32_t intel_fcs_cntr_set_preauth(uint8_t counter_type,
+ int32_t counter_value,
+ uint32_t test_bit,
+ uint32_t *mbox_error);
+uint32_t intel_fcs_encryption(uint32_t src_addr, uint32_t src_size,
+ uint32_t dst_addr, uint32_t dst_size,
+ uint32_t *send_id);
+
+uint32_t intel_fcs_decryption(uint32_t src_addr, uint32_t src_size,
+ uint32_t dst_addr, uint32_t dst_size,
+ uint32_t *send_id);
+
+int intel_fcs_encryption_ext(uint32_t session_id, uint32_t context_id,
+ uint32_t src_addr, uint32_t src_size,
+ uint32_t dst_addr, uint32_t *dst_size,
+ uint32_t *mbox_error);
+int intel_fcs_decryption_ext(uint32_t sesion_id, uint32_t context_id,
+ uint32_t src_addr, uint32_t src_size,
+ uint32_t dst_addr, uint32_t *dst_size,
+ uint32_t *mbox_error);
+
+int intel_fcs_sigma_teardown(uint32_t session_id, uint32_t *mbox_error);
+int intel_fcs_chip_id(uint32_t *id_low, uint32_t *id_high, uint32_t *mbox_error);
+int intel_fcs_attestation_subkey(uint64_t src_addr, uint32_t src_size,
+ uint64_t dst_addr, uint32_t *dst_size,
+ uint32_t *mbox_error);
+int intel_fcs_get_measurement(uint64_t src_addr, uint32_t src_size,
+ uint64_t dst_addr, uint32_t *dst_size,
+ uint32_t *mbox_error);
+uint32_t intel_fcs_get_rom_patch_sha384(uint64_t addr, uint64_t *ret_size,
+ uint32_t *mbox_error);
+
+int intel_fcs_create_cert_on_reload(uint32_t cert_request,
+ uint32_t *mbox_error);
+int intel_fcs_get_attestation_cert(uint32_t cert_request, uint64_t dst_addr,
+ uint32_t *dst_size, uint32_t *mbox_error);
+
+int intel_fcs_open_crypto_service_session(uint32_t *session_id,
+ uint32_t *mbox_error);
+int intel_fcs_close_crypto_service_session(uint32_t session_id,
+ uint32_t *mbox_error);
+
+int intel_fcs_import_crypto_service_key(uint64_t src_addr, uint32_t src_size,
+ uint32_t *mbox_error);
+int intel_fcs_export_crypto_service_key(uint32_t session_id, uint32_t key_id,
+ uint64_t dst_addr, uint32_t *dst_size,
+ uint32_t *mbox_error);
+int intel_fcs_remove_crypto_service_key(uint32_t session_id, uint32_t key_id,
+ uint32_t *mbox_error);
+int intel_fcs_get_crypto_service_key_info(uint32_t session_id, uint32_t key_id,
+ uint64_t dst_addr, uint32_t *dst_size,
+ uint32_t *mbox_error);
+
+int intel_fcs_get_digest_init(uint32_t session_id, uint32_t context_id,
+ uint32_t key_id, uint32_t param_size,
+ uint64_t param_data, uint32_t *mbox_error);
+int intel_fcs_get_digest_update_finalize(uint32_t session_id, uint32_t context_id,
+ uint32_t src_addr, uint32_t src_size,
+ uint64_t dst_addr, uint32_t *dst_size,
+ uint8_t is_finalised, uint32_t *mbox_error);
+
+int intel_fcs_mac_verify_init(uint32_t session_id, uint32_t context_id,
+ uint32_t key_id, uint32_t param_size,
+ uint64_t param_data, uint32_t *mbox_error);
+int intel_fcs_mac_verify_update_finalize(uint32_t session_id, uint32_t context_id,
+ uint32_t src_addr, uint32_t src_size,
+ uint64_t dst_addr, uint32_t *dst_size,
+ uint32_t data_size, uint8_t is_finalised,
+ uint32_t *mbox_error);
+
+int intel_fcs_ecdsa_hash_sign_init(uint32_t session_id, uint32_t context_id,
+ uint32_t key_id, uint32_t param_size,
+ uint64_t param_data, uint32_t *mbox_error);
+int intel_fcs_ecdsa_hash_sign_finalize(uint32_t session_id, uint32_t context_id,
+ uint32_t src_addr, uint32_t src_size,
+ uint64_t dst_addr, uint32_t *dst_size,
+ uint32_t *mbox_error);
+
+int intel_fcs_ecdsa_hash_sig_verify_init(uint32_t session_id, uint32_t context_id,
+ uint32_t key_id, uint32_t param_size,
+ uint64_t param_data, uint32_t *mbox_error);
+int intel_fcs_ecdsa_hash_sig_verify_finalize(uint32_t session_id, uint32_t context_id,
+ uint32_t src_addr, uint32_t src_size,
+ uint64_t dst_addr, uint32_t *dst_size,
+ uint32_t *mbox_error);
+
+int intel_fcs_ecdsa_sha2_data_sign_init(uint32_t session_id,
+ uint32_t context_id, uint32_t key_id,
+ uint32_t param_size, uint64_t param_data,
+ uint32_t *mbox_error);
+int intel_fcs_ecdsa_sha2_data_sign_update_finalize(uint32_t session_id,
+ uint32_t context_id, uint32_t src_addr,
+ uint32_t src_size, uint64_t dst_addr,
+ uint32_t *dst_size, uint8_t is_finalised,
+ uint32_t *mbox_error);
+
+int intel_fcs_ecdsa_sha2_data_sig_verify_init(uint32_t session_id,
+ uint32_t context_id, uint32_t key_id,
+ uint32_t param_size, uint64_t param_data,
+ uint32_t *mbox_error);
+int intel_fcs_ecdsa_sha2_data_sig_verify_update_finalize(uint32_t session_id,
+ uint32_t context_id, uint32_t src_addr,
+ uint32_t src_size, uint64_t dst_addr,
+ uint32_t *dst_size, uint32_t data_size,
+ uint8_t is_finalised, uint32_t *mbox_error);
+
+int intel_fcs_ecdsa_get_pubkey_init(uint32_t session_id, uint32_t context_id,
+ uint32_t key_id, uint32_t param_size,
+ uint64_t param_data, uint32_t *mbox_error);
+int intel_fcs_ecdsa_get_pubkey_finalize(uint32_t session_id, uint32_t context_id,
+ uint64_t dst_addr, uint32_t *dst_size,
+ uint32_t *mbox_error);
+
+int intel_fcs_ecdh_request_init(uint32_t session_id, uint32_t context_id,
+ uint32_t key_id, uint32_t param_size,
+ uint64_t param_data, uint32_t *mbox_error);
+int intel_fcs_ecdh_request_finalize(uint32_t session_id, uint32_t context_id,
+ uint32_t src_addr, uint32_t src_size,
+ uint64_t dst_addr, uint32_t *dst_size,
+ uint32_t *mbox_error);
+
+int intel_fcs_aes_crypt_init(uint32_t session_id, uint32_t context_id,
+ uint32_t key_id, uint64_t param_addr,
+ uint32_t param_size, uint32_t *mbox_error);
+int intel_fcs_aes_crypt_update_finalize(uint32_t session_id,
+ uint32_t context_id, uint64_t src_addr,
+ uint32_t src_size, uint64_t dst_addr,
+ uint32_t dst_size, uint8_t is_finalised,
+ uint32_t *send_id);
+
+#endif /* SOCFPGA_FCS_H */
diff --git a/plat/intel/soc/common/include/socfpga_handoff.h b/plat/intel/soc/common/include/socfpga_handoff.h
new file mode 100644
index 0000000..ba0f7f3
--- /dev/null
+++ b/plat/intel/soc/common/include/socfpga_handoff.h
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2019, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef HANDOFF_H
+#define HANDOFF_H
+
+#define HANDOFF_MAGIC_HEADER 0x424f4f54 /* BOOT */
+#define HANDOFF_MAGIC_PINMUX_SEL 0x504d5558 /* PMUX */
+#define HANDOFF_MAGIC_IOCTLR 0x494f4354 /* IOCT */
+#define HANDOFF_MAGIC_FPGA 0x46504741 /* FPGA */
+#define HANDOFF_MAGIC_IODELAY 0x444c4159 /* DLAY */
+#define HANDOFF_MAGIC_CLOCK 0x434c4b53 /* CLKS */
+#define HANDOFF_MAGIC_MISC 0x4d495343 /* MISC */
+
+#include <socfpga_plat_def.h>
+
+typedef struct handoff_t {
+ /* header */
+ uint32_t header_magic;
+ uint32_t header_device;
+ uint32_t _pad_0x08_0x10[2];
+
+ /* pinmux configuration - select */
+ uint32_t pinmux_sel_magic;
+ uint32_t pinmux_sel_length;
+ uint32_t _pad_0x18_0x20[2];
+ uint32_t pinmux_sel_array[96]; /* offset, value */
+
+ /* pinmux configuration - io control */
+ uint32_t pinmux_io_magic;
+ uint32_t pinmux_io_length;
+ uint32_t _pad_0x1a8_0x1b0[2];
+ uint32_t pinmux_io_array[96]; /* offset, value */
+
+ /* pinmux configuration - use fpga switch */
+ uint32_t pinmux_fpga_magic;
+ uint32_t pinmux_fpga_length;
+ uint32_t _pad_0x338_0x340[2];
+ uint32_t pinmux_fpga_array[42]; /* offset, value */
+ uint32_t _pad_0x3e8_0x3f0[2];
+
+ /* pinmux configuration - io delay */
+ uint32_t pinmux_delay_magic;
+ uint32_t pinmux_delay_length;
+ uint32_t _pad_0x3f8_0x400[2];
+ uint32_t pinmux_iodelay_array[96]; /* offset, value */
+
+ /* clock configuration */
+
+#if PLATFORM_MODEL == PLAT_SOCFPGA_STRATIX10
+ uint32_t clock_magic;
+ uint32_t clock_length;
+ uint32_t _pad_0x588_0x590[2];
+ uint32_t main_pll_mpuclk;
+ uint32_t main_pll_nocclk;
+ uint32_t main_pll_cntr2clk;
+ uint32_t main_pll_cntr3clk;
+ uint32_t main_pll_cntr4clk;
+ uint32_t main_pll_cntr5clk;
+ uint32_t main_pll_cntr6clk;
+ uint32_t main_pll_cntr7clk;
+ uint32_t main_pll_cntr8clk;
+ uint32_t main_pll_cntr9clk;
+ uint32_t main_pll_nocdiv;
+ uint32_t main_pll_pllglob;
+ uint32_t main_pll_fdbck;
+ uint32_t main_pll_pllc0;
+ uint32_t main_pll_pllc1;
+ uint32_t _pad_0x5cc_0x5d0[1];
+ uint32_t per_pll_cntr2clk;
+ uint32_t per_pll_cntr3clk;
+ uint32_t per_pll_cntr4clk;
+ uint32_t per_pll_cntr5clk;
+ uint32_t per_pll_cntr6clk;
+ uint32_t per_pll_cntr7clk;
+ uint32_t per_pll_cntr8clk;
+ uint32_t per_pll_cntr9clk;
+ uint32_t per_pll_emacctl;
+ uint32_t per_pll_gpiodiv;
+ uint32_t per_pll_pllglob;
+ uint32_t per_pll_fdbck;
+ uint32_t per_pll_pllc0;
+ uint32_t per_pll_pllc1;
+ uint32_t hps_osc_clk_h;
+ uint32_t fpga_clk_hz;
+#elif PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX
+ uint32_t clock_magic;
+ uint32_t clock_length;
+ uint32_t _pad_0x588_0x590[2];
+ uint32_t main_pll_mpuclk;
+ uint32_t main_pll_nocclk;
+ uint32_t main_pll_nocdiv;
+ uint32_t main_pll_pllglob;
+ uint32_t main_pll_fdbck;
+ uint32_t main_pll_pllc0;
+ uint32_t main_pll_pllc1;
+ uint32_t main_pll_pllc2;
+ uint32_t main_pll_pllc3;
+ uint32_t main_pll_pllm;
+ uint32_t per_pll_emacctl;
+ uint32_t per_pll_gpiodiv;
+ uint32_t per_pll_pllglob;
+ uint32_t per_pll_fdbck;
+ uint32_t per_pll_pllc0;
+ uint32_t per_pll_pllc1;
+ uint32_t per_pll_pllc2;
+ uint32_t per_pll_pllc3;
+ uint32_t per_pll_pllm;
+ uint32_t alt_emacactr;
+ uint32_t alt_emacbctr;
+ uint32_t alt_emacptpctr;
+ uint32_t alt_gpiodbctr;
+ uint32_t alt_sdmmcctr;
+ uint32_t alt_s2fuser0ctr;
+ uint32_t alt_s2fuser1ctr;
+ uint32_t alt_psirefctr;
+ uint32_t hps_osc_clk_h;
+ uint32_t fpga_clk_hz;
+ uint32_t _pad_0x604_0x610[3];
+#endif
+ /* misc configuration */
+ uint32_t misc_magic;
+ uint32_t misc_length;
+ uint32_t _pad_0x618_0x620[2];
+} handoff;
+
+int verify_handoff_image(handoff *hoff_ptr, handoff *reverse_hoff_ptr);
+int socfpga_get_handoff(handoff *hoff_ptr);
+
+#endif
+
+
diff --git a/plat/intel/soc/common/include/socfpga_mailbox.h b/plat/intel/soc/common/include/socfpga_mailbox.h
new file mode 100644
index 0000000..1f4b2a4
--- /dev/null
+++ b/plat/intel/soc/common/include/socfpga_mailbox.h
@@ -0,0 +1,240 @@
+/*
+ * Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SOCFPGA_MBOX_H
+#define SOCFPGA_MBOX_H
+
+#include <lib/utils_def.h>
+
+
+#define MBOX_OFFSET 0xffa30000
+
+#define MBOX_ATF_CLIENT_ID 0x1U
+#define MBOX_MAX_JOB_ID 0xFU
+#define MBOX_MAX_IND_JOB_ID (MBOX_MAX_JOB_ID - 1U)
+#define MBOX_JOB_ID MBOX_MAX_JOB_ID
+#define MBOX_TEST_BIT BIT(31)
+
+/* Mailbox Shared Memory Register Map */
+#define MBOX_CIN 0x00
+#define MBOX_ROUT 0x04
+#define MBOX_URG 0x08
+#define MBOX_INT 0x0C
+#define MBOX_COUT 0x20
+#define MBOX_RIN 0x24
+#define MBOX_STATUS 0x2C
+#define MBOX_CMD_BUFFER 0x40
+#define MBOX_RESP_BUFFER 0xC0
+
+/* Mailbox SDM doorbell */
+#define MBOX_DOORBELL_TO_SDM 0x400
+#define MBOX_DOORBELL_FROM_SDM 0x480
+
+
+/* Mailbox commands */
+
+#define MBOX_CMD_NOOP 0x00
+#define MBOX_CMD_SYNC 0x01
+#define MBOX_CMD_RESTART 0x02
+#define MBOX_CMD_CANCEL 0x03
+#define MBOX_CMD_VAB_SRC_CERT 0x0B
+#define MBOX_CMD_GET_IDCODE 0x10
+#define MBOX_CMD_GET_USERCODE 0x13
+#define MBOX_CMD_GET_CHIPID 0x12
+#define MBOX_CMD_REBOOT_HPS 0x47
+
+/* Reconfiguration Commands */
+#define MBOX_CONFIG_STATUS 0x04
+#define MBOX_RECONFIG 0x06
+#define MBOX_RECONFIG_DATA 0x08
+#define MBOX_RECONFIG_STATUS 0x09
+
+/* HWMON Commands */
+#define MBOX_HWMON_READVOLT 0x18
+#define MBOX_HWMON_READTEMP 0x19
+
+
+/* QSPI Commands */
+#define MBOX_CMD_QSPI_OPEN 0x32
+#define MBOX_CMD_QSPI_CLOSE 0x33
+#define MBOX_CMD_QSPI_SET_CS 0x34
+#define MBOX_CMD_QSPI_DIRECT 0x3B
+
+/* RSU Commands */
+#define MBOX_GET_SUBPARTITION_TABLE 0x5A
+#define MBOX_RSU_STATUS 0x5B
+#define MBOX_RSU_UPDATE 0x5C
+#define MBOX_HPS_STAGE_NOTIFY 0x5D
+
+/* FCS Command */
+#define MBOX_FCS_GET_PROVISION 0x7B
+#define MBOX_FCS_CNTR_SET_PREAUTH 0x7C
+#define MBOX_FCS_ENCRYPT_REQ 0x7E
+#define MBOX_FCS_DECRYPT_REQ 0x7F
+#define MBOX_FCS_RANDOM_GEN 0x80
+#define MBOX_FCS_AES_CRYPT_REQ 0x81
+#define MBOX_FCS_GET_DIGEST_REQ 0x82
+#define MBOX_FCS_MAC_VERIFY_REQ 0x83
+#define MBOX_FCS_ECDSA_HASH_SIGN_REQ 0x84
+#define MBOX_FCS_ECDSA_SHA2_DATA_SIGN_REQ 0x85
+#define MBOX_FCS_ECDSA_HASH_SIG_VERIFY 0x86
+#define MBOX_FCS_ECDSA_SHA2_DATA_SIGN_VERIFY 0x87
+#define MBOX_FCS_ECDSA_GET_PUBKEY 0x88
+#define MBOX_FCS_ECDH_REQUEST 0x89
+#define MBOX_FCS_OPEN_CS_SESSION 0xA0
+#define MBOX_FCS_CLOSE_CS_SESSION 0xA1
+#define MBOX_FCS_IMPORT_CS_KEY 0xA5
+#define MBOX_FCS_EXPORT_CS_KEY 0xA6
+#define MBOX_FCS_REMOVE_CS_KEY 0xA7
+#define MBOX_FCS_GET_CS_KEY_INFO 0xA8
+
+/* PSG SIGMA Commands */
+#define MBOX_PSG_SIGMA_TEARDOWN 0xD5
+
+/* Attestation Commands */
+#define MBOX_CREATE_CERT_ON_RELOAD 0x180
+#define MBOX_GET_ATTESTATION_CERT 0x181
+#define MBOX_ATTESTATION_SUBKEY 0x182
+#define MBOX_GET_MEASUREMENT 0x183
+
+/* Miscellaneous commands */
+#define MBOX_GET_ROM_PATCH_SHA384 0x1B0
+
+/* Mailbox Definitions */
+
+#define CMD_DIRECT 0
+#define CMD_INDIRECT 1
+#define CMD_CASUAL 0
+#define CMD_URGENT 1
+
+#define MBOX_WORD_BYTE 4U
+#define MBOX_RESP_BUFFER_SIZE 16
+#define MBOX_CMD_BUFFER_SIZE 32
+#define MBOX_INC_HEADER_MAX_WORD_SIZE 1024U
+
+/* Execution states for HPS_STAGE_NOTIFY */
+#define HPS_EXECUTION_STATE_FSBL 0
+#define HPS_EXECUTION_STATE_SSBL 1
+#define HPS_EXECUTION_STATE_OS 2
+
+/* Status Response */
+#define MBOX_RET_OK 0
+#define MBOX_RET_ERROR -1
+#define MBOX_NO_RESPONSE -2
+#define MBOX_WRONG_ID -3
+#define MBOX_BUFFER_FULL -4
+#define MBOX_BUSY -5
+#define MBOX_TIMEOUT -2047
+
+/* Reconfig Status Response */
+#define RECONFIG_STATUS_STATE 0
+#define RECONFIG_STATUS_PIN_STATUS 2
+#define RECONFIG_STATUS_SOFTFUNC_STATUS 3
+#define PIN_STATUS_NSTATUS (U(1) << 31)
+#define SOFTFUNC_STATUS_SEU_ERROR (1 << 3)
+#define SOFTFUNC_STATUS_INIT_DONE (1 << 1)
+#define SOFTFUNC_STATUS_CONF_DONE (1 << 0)
+#define MBOX_CFGSTAT_STATE_IDLE 0x00000000
+#define MBOX_CFGSTAT_STATE_CONFIG 0x10000000
+#define MBOX_CFGSTAT_STATE_FAILACK 0x08000000
+#define MBOX_CFGSTAT_STATE_ERROR_INVALID 0xf0000001
+#define MBOX_CFGSTAT_STATE_ERROR_CORRUPT 0xf0000002
+#define MBOX_CFGSTAT_STATE_ERROR_AUTH 0xf0000003
+#define MBOX_CFGSTAT_STATE_ERROR_CORE_IO 0xf0000004
+#define MBOX_CFGSTAT_STATE_ERROR_HARDWARE 0xf0000005
+#define MBOX_CFGSTAT_STATE_ERROR_FAKE 0xf0000006
+#define MBOX_CFGSTAT_STATE_ERROR_BOOT_INFO 0xf0000007
+#define MBOX_CFGSTAT_STATE_ERROR_QSPI_ERROR 0xf0000008
+
+
+/* Mailbox Macros */
+
+#define MBOX_ENTRY_TO_ADDR(_buf, ptr) (MBOX_OFFSET + (MBOX_##_buf##_BUFFER) \
+ + MBOX_WORD_BYTE * (ptr))
+
+/* Mailbox interrupt flags and masks */
+#define MBOX_INT_FLAG_COE 0x1
+#define MBOX_INT_FLAG_RIE 0x2
+#define MBOX_INT_FLAG_UAE 0x100
+#define MBOX_COE_BIT(INTERRUPT) ((INTERRUPT) & 0x3)
+#define MBOX_UAE_BIT(INTERRUPT) (((INTERRUPT) & (1<<8)))
+
+/* Mailbox response and status */
+#define MBOX_RESP_ERR(BUFFER) ((BUFFER) & 0x000007ff)
+#define MBOX_RESP_LEN(BUFFER) (((BUFFER) & 0x007ff000) >> 12)
+#define MBOX_RESP_CLIENT_ID(BUFFER) (((BUFFER) & 0xf0000000) >> 28)
+#define MBOX_RESP_JOB_ID(BUFFER) (((BUFFER) & 0x0f000000) >> 24)
+#define MBOX_STATUS_UA_MASK (1<<8)
+
+/* Mailbox command and response */
+#define MBOX_CLIENT_ID_CMD(CLIENT_ID) ((CLIENT_ID) << 28)
+#define MBOX_JOB_ID_CMD(JOB_ID) (JOB_ID<<24)
+#define MBOX_CMD_LEN_CMD(CMD_LEN) ((CMD_LEN) << 12)
+#define MBOX_INDIRECT(val) ((val) << 11)
+#define MBOX_CMD_MASK(header) ((header) & 0x7ff)
+
+/* Mailbox payload */
+#define MBOX_DATA_MAX_LEN 0x3ff
+#define MBOX_PAYLOAD_FLAG_BUSY BIT(0)
+
+/* RSU Macros */
+#define RSU_VERSION_ACMF BIT(8)
+#define RSU_VERSION_ACMF_MASK 0xff00
+
+/* Config Status Macros */
+#define CONFIG_STATUS_WORD_SIZE 16U
+#define CONFIG_STATUS_FW_VER_OFFSET 1
+#define CONFIG_STATUS_FW_VER_MASK 0x00FFFFFF
+
+/* Data structure */
+
+typedef struct mailbox_payload {
+ uint32_t header;
+ uint32_t data[MBOX_DATA_MAX_LEN];
+} mailbox_payload_t;
+
+typedef struct mailbox_container {
+ uint32_t flag;
+ uint32_t index;
+ mailbox_payload_t *payload;
+} mailbox_container_t;
+
+/* Mailbox Function Definitions */
+
+void mailbox_set_int(uint32_t interrupt_input);
+int mailbox_init(void);
+void mailbox_set_qspi_close(void);
+void mailbox_hps_qspi_enable(void);
+
+int mailbox_send_cmd(uint32_t job_id, uint32_t cmd, uint32_t *args,
+ unsigned int len, uint32_t urgent, uint32_t *response,
+ unsigned int *resp_len);
+int mailbox_send_cmd_async(uint32_t *job_id, uint32_t cmd, uint32_t *args,
+ unsigned int len, unsigned int indirect);
+int mailbox_send_cmd_async_ext(uint32_t header_cmd, uint32_t *args,
+ unsigned int len);
+int mailbox_read_response(uint32_t *job_id, uint32_t *response,
+ unsigned int *resp_len);
+int mailbox_read_response_async(uint32_t *job_id, uint32_t *header,
+ uint32_t *response, unsigned int *resp_len,
+ uint8_t ignore_client_id);
+int iterate_resp(uint32_t mbox_resp_len, uint32_t *resp_buf,
+ unsigned int *resp_len);
+
+void mailbox_reset_cold(void);
+void mailbox_clear_response(void);
+
+int intel_mailbox_get_config_status(uint32_t cmd, bool init_done);
+int intel_mailbox_is_fpga_not_ready(void);
+
+int mailbox_rsu_get_spt_offset(uint32_t *resp_buf, uint32_t resp_buf_len);
+int mailbox_rsu_status(uint32_t *resp_buf, uint32_t resp_buf_len);
+int mailbox_rsu_update(uint32_t *flash_offset);
+int mailbox_hps_stage_notify(uint32_t execution_stage);
+int mailbox_hwmon_readtemp(uint32_t chan, uint32_t *resp_buf);
+int mailbox_hwmon_readvolt(uint32_t chan, uint32_t *resp_buf);
+
+#endif /* SOCFPGA_MBOX_H */
diff --git a/plat/intel/soc/common/include/socfpga_noc.h b/plat/intel/soc/common/include/socfpga_noc.h
new file mode 100644
index 0000000..e3c0f73
--- /dev/null
+++ b/plat/intel/soc/common/include/socfpga_noc.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2020-2022, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SOCFPGA_NOC_H
+#define SOCFPGA_NOC_H
+
+/* Macros */
+#define SCR_AXI_AP_MASK BIT(24)
+#define SCR_FPGA2SOC_MASK BIT(16)
+#define SCR_MPU_MASK BIT(0)
+#define DISABLE_L4_FIREWALL (SCR_AXI_AP_MASK | SCR_FPGA2SOC_MASK \
+ | SCR_MPU_MASK)
+#define DISABLE_BRIDGE_FIREWALL 0x0ffe0101
+
+#define SOCFPGA_CCU_NOC(_ctrl, _dev) (SOCFPGA_CCU_NOC_REG_BASE \
+ + (SOCFPGA_CCU_NOC_##_ctrl##_##_dev))
+
+#define SOCFPGA_L4_PER_SCR(_reg) (SOCFPGA_L4_PER_SCR_REG_BASE \
+ + (SOCFPGA_NOC_FW_L4_PER_SCR_##_reg))
+
+#define SOCFPGA_L4_SYS_SCR(_reg) (SOCFPGA_L4_SYS_SCR_REG_BASE \
+ + (SOCFPGA_NOC_FW_L4_SYS_SCR_##_reg))
+
+/* L3 Interconnect Register Map */
+#define SOCFPGA_NOC_FW_L4_PER_SCR_NAND_REGISTER 0x0000
+#define SOCFPGA_NOC_FW_L4_PER_SCR_NAND_DATA 0x0004
+#define SOCFPGA_NOC_FW_L4_PER_SCR_USB0_REGISTER 0x000c
+#define SOCFPGA_NOC_FW_L4_PER_SCR_USB1_REGISTER 0x0010
+#define SOCFPGA_NOC_FW_L4_PER_SCR_SPI_MASTER0 0x001c
+#define SOCFPGA_NOC_FW_L4_PER_SCR_SPI_MASTER1 0x0020
+#define SOCFPGA_NOC_FW_L4_PER_SCR_SPI_SLAVE0 0x0024
+#define SOCFPGA_NOC_FW_L4_PER_SCR_SPI_SLAVE1 0x0028
+#define SOCFPGA_NOC_FW_L4_PER_SCR_EMAC0 0x002c
+#define SOCFPGA_NOC_FW_L4_PER_SCR_EMAC1 0x0030
+#define SOCFPGA_NOC_FW_L4_PER_SCR_EMAC2 0x0034
+#define SOCFPGA_NOC_FW_L4_PER_SCR_SDMMC 0x0040
+#define SOCFPGA_NOC_FW_L4_PER_SCR_GPIO0 0x0044
+#define SOCFPGA_NOC_FW_L4_PER_SCR_GPIO1 0x0048
+#define SOCFPGA_NOC_FW_L4_PER_SCR_I2C0 0x0050
+#define SOCFPGA_NOC_FW_L4_PER_SCR_I2C1 0x0054
+#define SOCFPGA_NOC_FW_L4_PER_SCR_I2C2 0x0058
+#define SOCFPGA_NOC_FW_L4_PER_SCR_I2C3 0x005c
+#define SOCFPGA_NOC_FW_L4_PER_SCR_I2C4 0x0060
+#define SOCFPGA_NOC_FW_L4_PER_SCR_SP_TIMER0 0x0064
+#define SOCFPGA_NOC_FW_L4_PER_SCR_SP_TIMER1 0x0068
+#define SOCFPGA_NOC_FW_L4_PER_SCR_UART0 0x006c
+#define SOCFPGA_NOC_FW_L4_PER_SCR_UART1 0x0070
+
+#define SOCFPGA_NOC_FW_L4_SYS_SCR_DMA_ECC 0x0008
+#define SOCFPGA_NOC_FW_L4_SYS_SCR_EMAC0RX_ECC 0x000c
+#define SOCFPGA_NOC_FW_L4_SYS_SCR_EMAC0TX_ECC 0x0010
+#define SOCFPGA_NOC_FW_L4_SYS_SCR_EMAC1RX_ECC 0x0014
+#define SOCFPGA_NOC_FW_L4_SYS_SCR_EMAC1TX_ECC 0x0018
+#define SOCFPGA_NOC_FW_L4_SYS_SCR_EMAC2RX_ECC 0x001c
+#define SOCFPGA_NOC_FW_L4_SYS_SCR_EMAC2TX_ECC 0x0020
+#define SOCFPGA_NOC_FW_L4_SYS_SCR_NAND_ECC 0x002c
+#define SOCFPGA_NOC_FW_L4_SYS_SCR_NAND_READ_ECC 0x0030
+#define SOCFPGA_NOC_FW_L4_SYS_SCR_NAND_WRITE_ECC 0x0034
+#define SOCFPGA_NOC_FW_L4_SYS_SCR_OCRAM_ECC 0x0038
+#define SOCFPGA_NOC_FW_L4_SYS_SCR_SDMMC_ECC 0x0040
+#define SOCFPGA_NOC_FW_L4_SYS_SCR_USB0_ECC 0x0044
+#define SOCFPGA_NOC_FW_L4_SYS_SCR_USB1_ECC 0x0048
+#define SOCFPGA_NOC_FW_L4_SYS_SCR_CLK_MGR 0x004c
+#define SOCFPGA_NOC_FW_L4_SYS_SCR_IO_MGR 0x0054
+#define SOCFPGA_NOC_FW_L4_SYS_SCR_RST_MGR 0x0058
+#define SOCFPGA_NOC_FW_L4_SYS_SCR_SYS_MGR 0x005c
+#define SOCFPGA_NOC_FW_L4_SYS_SCR_OSC0_TIMER 0x0060
+#define SOCFPGA_NOC_FW_L4_SYS_SCR_OSC1_TIMER 0x0064
+#define SOCFPGA_NOC_FW_L4_SYS_SCR_WATCHDOG0 0x0068
+#define SOCFPGA_NOC_FW_L4_SYS_SCR_WATCHDOG1 0x006c
+#define SOCFPGA_NOC_FW_L4_SYS_SCR_WATCHDOG2 0x0070
+#define SOCFPGA_NOC_FW_L4_SYS_SCR_WATCHDOG3 0x0074
+#define SOCFPGA_NOC_FW_L4_SYS_SCR_DAP 0x0078
+#define SOCFPGA_NOC_FW_L4_SYS_SCR_L4_NOC_PROBES 0x0090
+#define SOCFPGA_NOC_FW_L4_SYS_SCR_L4_NOC_QOS 0x0094
+
+/* CCU NOC Register Map */
+
+#define SOCFPGA_CCU_NOC_CPU0_RAM0 0x04688
+#define SOCFPGA_CCU_NOC_IOM_RAM0 0x18628
+
+#define SOCFPGA_CCU_NOC_ADMASK_P_MASK BIT(0)
+#define SOCFPGA_CCU_NOC_ADMASK_NS_MASK BIT(1)
+
+/* Function Definitions */
+
+void enable_ns_peripheral_access(void);
+void enable_ns_bridge_access(void);
+void enable_ns_ocram_access(void);
+void enable_ocram_firewall(void);
+
+#endif
diff --git a/plat/intel/soc/common/include/socfpga_private.h b/plat/intel/soc/common/include/socfpga_private.h
new file mode 100644
index 0000000..9d389e3
--- /dev/null
+++ b/plat/intel/soc/common/include/socfpga_private.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SOCFPGA_PRIVATE_H
+#define SOCFPGA_PRIVATE_H
+
+#include "socfpga_plat_def.h"
+
+#define EMMC_DESC_SIZE (1<<20)
+
+#define EMMC_INIT_PARAMS(base, clk) \
+ { .bus_width = MMC_BUS_WIDTH_4, \
+ .clk_rate = (clk), \
+ .desc_base = (base), \
+ .desc_size = EMMC_DESC_SIZE, \
+ .flags = 0, \
+ .reg_base = SOCFPGA_MMC_REG_BASE \
+ }
+
+typedef enum {
+ BOOT_SOURCE_FPGA = 0,
+ BOOT_SOURCE_SDMMC,
+ BOOT_SOURCE_NAND,
+ BOOT_SOURCE_RSVD,
+ BOOT_SOURCE_QSPI
+} boot_source_type;
+
+/*******************************************************************************
+ * Function and variable prototypes
+ ******************************************************************************/
+
+void enable_nonsecure_access(void);
+
+void socfpga_io_setup(int boot_source);
+
+void socfgpa_configure_mmu_el3(unsigned long total_base,
+ unsigned long total_size,
+ unsigned long ro_start,
+ unsigned long ro_limit,
+ unsigned long coh_start,
+ unsigned long coh_limit);
+
+
+void socfpga_configure_mmu_el1(unsigned long total_base,
+ unsigned long total_size,
+ unsigned long ro_start,
+ unsigned long ro_limit,
+ unsigned long coh_start,
+ unsigned long coh_limit);
+
+void socfpga_delay_timer_init(void);
+
+void socfpga_gic_driver_init(void);
+
+void socfpga_delay_timer_init_args(void);
+
+uint32_t socfpga_get_spsr_for_bl32_entry(void);
+
+uint32_t socfpga_get_spsr_for_bl33_entry(void);
+
+unsigned long socfpga_get_ns_image_entrypoint(void);
+
+void plat_secondary_cpus_bl31_entry(void);
+
+#endif /* SOCFPGA_PRIVATE_H */
diff --git a/plat/intel/soc/common/include/socfpga_reset_manager.h b/plat/intel/soc/common/include/socfpga_reset_manager.h
new file mode 100644
index 0000000..cce16ab
--- /dev/null
+++ b/plat/intel/soc/common/include/socfpga_reset_manager.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SOCFPGA_RESETMANAGER_H
+#define SOCFPGA_RESETMANAGER_H
+
+#include "socfpga_plat_def.h"
+
+#define SOCFPGA_BRIDGE_ENABLE BIT(0)
+#define SOCFPGA_BRIDGE_HAS_MASK BIT(1)
+
+#define SOC2FPGA_MASK (1<<0)
+#define LWHPS2FPGA_MASK (1<<1)
+#define FPGA2SOC_MASK (1<<2)
+#define F2SDRAM0_MASK (1<<3)
+#define F2SDRAM1_MASK (1<<4)
+#define F2SDRAM2_MASK (1<<5)
+
+/* Register Mapping */
+
+#define SOCFPGA_RSTMGR_STAT 0x000
+#define SOCFPGA_RSTMGR_HDSKEN 0x010
+#define SOCFPGA_RSTMGR_HDSKREQ 0x014
+#define SOCFPGA_RSTMGR_HDSKACK 0x018
+#define SOCFPGA_RSTMGR_MPUMODRST 0x020
+#define SOCFPGA_RSTMGR_PER0MODRST 0x024
+#define SOCFPGA_RSTMGR_PER1MODRST 0x028
+#define SOCFPGA_RSTMGR_BRGMODRST 0x02c
+#define SOCFPGA_RSTMGR_COLDMODRST 0x034
+#define SOCFPGA_RSTMGR_HDSKTIMEOUT 0x064
+
+/* Field Mapping */
+
+#define RSTMGR_PER0MODRST_EMAC0 0x00000001
+#define RSTMGR_PER0MODRST_EMAC1 0x00000002
+#define RSTMGR_PER0MODRST_EMAC2 0x00000004
+#define RSTMGR_PER0MODRST_USB0 0x00000008
+#define RSTMGR_PER0MODRST_USB1 0x00000010
+#define RSTMGR_PER0MODRST_NAND 0x00000020
+#define RSTMGR_PER0MODRST_SDMMC 0x00000080
+#define RSTMGR_PER0MODRST_EMAC0OCP 0x00000100
+#define RSTMGR_PER0MODRST_EMAC1OCP 0x00000200
+#define RSTMGR_PER0MODRST_EMAC2OCP 0x00000400
+#define RSTMGR_PER0MODRST_USB0OCP 0x00000800
+#define RSTMGR_PER0MODRST_USB1OCP 0x00001000
+#define RSTMGR_PER0MODRST_NANDOCP 0x00002000
+#define RSTMGR_PER0MODRST_SDMMCOCP 0x00008000
+#define RSTMGR_PER0MODRST_DMA 0x00010000
+#define RSTMGR_PER0MODRST_SPIM0 0x00020000
+#define RSTMGR_PER0MODRST_SPIM1 0x00040000
+#define RSTMGR_PER0MODRST_SPIS0 0x00080000
+#define RSTMGR_PER0MODRST_SPIS1 0x00100000
+#define RSTMGR_PER0MODRST_DMAOCP 0x00200000
+#define RSTMGR_PER0MODRST_EMACPTP 0x00400000
+#define RSTMGR_PER0MODRST_DMAIF0 0x01000000
+#define RSTMGR_PER0MODRST_DMAIF1 0x02000000
+#define RSTMGR_PER0MODRST_DMAIF2 0x04000000
+#define RSTMGR_PER0MODRST_DMAIF3 0x08000000
+#define RSTMGR_PER0MODRST_DMAIF4 0x10000000
+#define RSTMGR_PER0MODRST_DMAIF5 0x20000000
+#define RSTMGR_PER0MODRST_DMAIF6 0x40000000
+#define RSTMGR_PER0MODRST_DMAIF7 0x80000000
+
+#define RSTMGR_PER1MODRST_WATCHDOG0 0x00000001
+#define RSTMGR_PER1MODRST_WATCHDOG1 0x00000002
+#define RSTMGR_PER1MODRST_WATCHDOG2 0x00000004
+#define RSTMGR_PER1MODRST_WATCHDOG3 0x00000008
+#define RSTMGR_PER1MODRST_L4SYSTIMER0 0x00000010
+#define RSTMGR_PER1MODRST_L4SYSTIMER1 0x00000020
+#define RSTMGR_PER1MODRST_SPTIMER0 0x00000040
+#define RSTMGR_PER1MODRST_SPTIMER1 0x00000080
+#define RSTMGR_PER1MODRST_I2C0 0x00000100
+#define RSTMGR_PER1MODRST_I2C1 0x00000200
+#define RSTMGR_PER1MODRST_I2C2 0x00000400
+#define RSTMGR_PER1MODRST_I2C3 0x00000800
+#define RSTMGR_PER1MODRST_I2C4 0x00001000
+#define RSTMGR_PER1MODRST_UART0 0x00010000
+#define RSTMGR_PER1MODRST_UART1 0x00020000
+#define RSTMGR_PER1MODRST_GPIO0 0x01000000
+#define RSTMGR_PER1MODRST_GPIO1 0x02000000
+
+#define RSTMGR_HDSKEN_FPGAHSEN 0x00000004
+#define RSTMGR_HDSKEN_ETRSTALLEN 0x00000008
+#define RSTMGR_HDSKEN_L2FLUSHEN 0x00000100
+#define RSTMGR_HDSKEN_L3NOC_DBG 0x00010000
+#define RSTMGR_HDSKEN_DEBUG_L3NOC 0x00020000
+#define RSTMGR_HDSKEN_SDRSELFREFEN 0x00000001
+
+#define RSTMGR_HDSKEQ_FPGAHSREQ 0x4
+
+#define RSTMGR_BRGMODRST_SOC2FPGA 0x1
+#define RSTMGR_BRGMODRST_LWHPS2FPGA 0x2
+#define RSTMGR_BRGMODRST_FPGA2SOC 0x4
+#define RSTMGR_BRGMODRST_F2SSDRAM0 0x8
+#define RSTMGR_BRGMODRST_F2SSDRAM1 0x10
+#define RSTMGR_BRGMODRST_F2SSDRAM2 0x20
+#define RSTMGR_BRGMODRST_MPFE 0x40
+#define RSTMGR_BRGMODRST_DDRSCH 0x40
+
+#define RSTMGR_HDSKREQ_FPGAHSREQ (BIT(2))
+#define RSTMGR_HDSKACK_FPGAHSACK_MASK (BIT(2))
+
+/* Definitions */
+
+#define RSTMGR_L2_MODRST 0x0100
+#define RSTMGR_HDSKEN_SET 0x010D
+
+/* Macros */
+
+#define SOCFPGA_RSTMGR(_reg) (SOCFPGA_RSTMGR_REG_BASE \
+ + (SOCFPGA_RSTMGR_##_reg))
+#define RSTMGR_FIELD(_reg, _field) (RSTMGR_##_reg##MODRST_##_field)
+
+/* Function Declarations */
+
+void deassert_peripheral_reset(void);
+void config_hps_hs_before_warm_reset(void);
+
+int socfpga_bridges_enable(uint32_t mask);
+int socfpga_bridges_disable(uint32_t mask);
+
+#endif /* SOCFPGA_RESETMANAGER_H */
diff --git a/plat/intel/soc/common/include/socfpga_sip_svc.h b/plat/intel/soc/common/include/socfpga_sip_svc.h
new file mode 100644
index 0000000..0803eb5
--- /dev/null
+++ b/plat/intel/soc/common/include/socfpga_sip_svc.h
@@ -0,0 +1,217 @@
+/*
+ * Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SOCFPGA_SIP_SVC_H
+#define SOCFPGA_SIP_SVC_H
+
+
+/* SiP status response */
+#define INTEL_SIP_SMC_STATUS_OK 0
+#define INTEL_SIP_SMC_STATUS_BUSY 0x1
+#define INTEL_SIP_SMC_STATUS_REJECTED 0x2
+#define INTEL_SIP_SMC_STATUS_NO_RESPONSE 0x3
+#define INTEL_SIP_SMC_STATUS_ERROR 0x4
+#define INTEL_SIP_SMC_RSU_ERROR 0x7
+
+/* SiP mailbox error code */
+#define GENERIC_RESPONSE_ERROR 0x3FF
+
+/* SiP V2 command code range */
+#define INTEL_SIP_SMC_CMD_MASK 0xFFFF
+#define INTEL_SIP_SMC_CMD_V2_RANGE_BEGIN 0x400
+#define INTEL_SIP_SMC_CMD_V2_RANGE_END 0x4FF
+
+/* SiP V2 protocol header */
+#define INTEL_SIP_SMC_HEADER_JOB_ID_MASK 0xF
+#define INTEL_SIP_SMC_HEADER_JOB_ID_OFFSET 0U
+#define INTEL_SIP_SMC_HEADER_CID_MASK 0xF
+#define INTEL_SIP_SMC_HEADER_CID_OFFSET 4U
+#define INTEL_SIP_SMC_HEADER_VERSION_MASK 0xF
+#define INTEL_SIP_SMC_HEADER_VERSION_OFFSET 60U
+
+/* SMC SiP service function identifier for version 1 */
+
+/* FPGA Reconfig */
+#define INTEL_SIP_SMC_FPGA_CONFIG_START 0xC2000001
+#define INTEL_SIP_SMC_FPGA_CONFIG_WRITE 0x42000002
+#define INTEL_SIP_SMC_FPGA_CONFIG_COMPLETED_WRITE 0xC2000003
+#define INTEL_SIP_SMC_FPGA_CONFIG_ISDONE 0xC2000004
+#define INTEL_SIP_SMC_FPGA_CONFIG_GET_MEM 0xC2000005
+
+/* FPGA Bitstream Flag */
+#define FLAG_PARTIAL_CONFIG BIT(0)
+#define FLAG_AUTHENTICATION BIT(1)
+#define CONFIG_TEST_FLAG(_flag, _type) (((flag) & FLAG_##_type) \
+ == FLAG_##_type)
+
+/* Secure Register Access */
+#define INTEL_SIP_SMC_REG_READ 0xC2000007
+#define INTEL_SIP_SMC_REG_WRITE 0xC2000008
+#define INTEL_SIP_SMC_REG_UPDATE 0xC2000009
+
+/* Remote System Update */
+#define INTEL_SIP_SMC_RSU_STATUS 0xC200000B
+#define INTEL_SIP_SMC_RSU_UPDATE 0xC200000C
+#define INTEL_SIP_SMC_RSU_NOTIFY 0xC200000E
+#define INTEL_SIP_SMC_RSU_RETRY_COUNTER 0xC200000F
+#define INTEL_SIP_SMC_RSU_DCMF_VERSION 0xC2000010
+#define INTEL_SIP_SMC_RSU_COPY_DCMF_VERSION 0xC2000011
+#define INTEL_SIP_SMC_RSU_MAX_RETRY 0xC2000012
+#define INTEL_SIP_SMC_RSU_COPY_MAX_RETRY 0xC2000013
+#define INTEL_SIP_SMC_RSU_DCMF_STATUS 0xC2000014
+#define INTEL_SIP_SMC_RSU_COPY_DCMF_STATUS 0xC2000015
+
+/* Hardware monitor */
+#define INTEL_SIP_SMC_HWMON_READTEMP 0xC2000020
+#define INTEL_SIP_SMC_HWMON_READVOLT 0xC2000021
+#define TEMP_CHANNEL_MAX (1 << 15)
+#define VOLT_CHANNEL_MAX (1 << 15)
+
+/* ECC */
+#define INTEL_SIP_SMC_ECC_DBE 0xC200000D
+
+/* Generic Command */
+#define INTEL_SIP_SMC_SERVICE_COMPLETED 0xC200001E
+#define INTEL_SIP_SMC_FIRMWARE_VERSION 0xC200001F
+#define INTEL_SIP_SMC_HPS_SET_BRIDGES 0xC2000032
+#define INTEL_SIP_SMC_GET_ROM_PATCH_SHA384 0xC2000040
+
+#define SERVICE_COMPLETED_MODE_ASYNC 0x00004F4E
+
+/* Mailbox Command */
+#define INTEL_SIP_SMC_MBOX_SEND_CMD 0xC200003C
+#define INTEL_SIP_SMC_GET_USERCODE 0xC200003D
+
+/* FPGA Crypto Services */
+#define INTEL_SIP_SMC_FCS_RANDOM_NUMBER 0xC200005A
+#define INTEL_SIP_SMC_FCS_RANDOM_NUMBER_EXT 0x4200008F
+#define INTEL_SIP_SMC_FCS_CRYPTION 0x4200005B
+#define INTEL_SIP_SMC_FCS_CRYPTION_EXT 0xC2000090
+#define INTEL_SIP_SMC_FCS_SERVICE_REQUEST 0x4200005C
+#define INTEL_SIP_SMC_FCS_SEND_CERTIFICATE 0x4200005D
+#define INTEL_SIP_SMC_FCS_GET_PROVISION_DATA 0x4200005E
+#define INTEL_SIP_SMC_FCS_CNTR_SET_PREAUTH 0xC200005F
+#define INTEL_SIP_SMC_FCS_PSGSIGMA_TEARDOWN 0xC2000064
+#define INTEL_SIP_SMC_FCS_CHIP_ID 0xC2000065
+#define INTEL_SIP_SMC_FCS_ATTESTATION_SUBKEY 0xC2000066
+#define INTEL_SIP_SMC_FCS_ATTESTATION_MEASUREMENTS 0xC2000067
+#define INTEL_SIP_SMC_FCS_GET_ATTESTATION_CERT 0xC2000068
+#define INTEL_SIP_SMC_FCS_CREATE_CERT_ON_RELOAD 0xC2000069
+#define INTEL_SIP_SMC_FCS_OPEN_CS_SESSION 0xC200006E
+#define INTEL_SIP_SMC_FCS_CLOSE_CS_SESSION 0xC200006F
+#define INTEL_SIP_SMC_FCS_IMPORT_CS_KEY 0x42000070
+#define INTEL_SIP_SMC_FCS_EXPORT_CS_KEY 0xC2000071
+#define INTEL_SIP_SMC_FCS_REMOVE_CS_KEY 0xC2000072
+#define INTEL_SIP_SMC_FCS_GET_CS_KEY_INFO 0xC2000073
+#define INTEL_SIP_SMC_FCS_AES_CRYPT_INIT 0xC2000074
+#define INTEL_SIP_SMC_FCS_AES_CRYPT_UPDATE 0x42000075
+#define INTEL_SIP_SMC_FCS_AES_CRYPT_FINALIZE 0x42000076
+#define INTEL_SIP_SMC_FCS_GET_DIGEST_INIT 0xC2000077
+#define INTEL_SIP_SMC_FCS_GET_DIGEST_UPDATE 0xC2000078
+#define INTEL_SIP_SMC_FCS_GET_DIGEST_FINALIZE 0xC2000079
+#define INTEL_SIP_SMC_FCS_MAC_VERIFY_INIT 0xC200007A
+#define INTEL_SIP_SMC_FCS_MAC_VERIFY_UPDATE 0xC200007B
+#define INTEL_SIP_SMC_FCS_MAC_VERIFY_FINALIZE 0xC200007C
+#define INTEL_SIP_SMC_FCS_ECDSA_HASH_SIGN_INIT 0xC200007D
+#define INTEL_SIP_SMC_FCS_ECDSA_HASH_SIGN_FINALIZE 0xC200007F
+#define INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGN_INIT 0xC2000080
+#define INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGN_UPDATE 0xC2000081
+#define INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGN_FINALIZE 0xC2000082
+#define INTEL_SIP_SMC_FCS_ECDSA_HASH_SIG_VERIFY_INIT 0xC2000083
+#define INTEL_SIP_SMC_FCS_ECDSA_HASH_SIG_VERIFY_FINALIZE 0xC2000085
+#define INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIG_VERIFY_INIT 0xC2000086
+#define INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIG_VERIFY_UPDATE 0xC2000087
+#define INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIG_VERIFY_FINALIZE 0xC2000088
+#define INTEL_SIP_SMC_FCS_ECDSA_GET_PUBKEY_INIT 0xC2000089
+#define INTEL_SIP_SMC_FCS_ECDSA_GET_PUBKEY_FINALIZE 0xC200008B
+#define INTEL_SIP_SMC_FCS_ECDH_REQUEST_INIT 0xC200008C
+#define INTEL_SIP_SMC_FCS_ECDH_REQUEST_FINALIZE 0xC200008E
+
+#define INTEL_SIP_SMC_FCS_SHA_MODE_MASK 0xF
+#define INTEL_SIP_SMC_FCS_DIGEST_SIZE_MASK 0xF
+#define INTEL_SIP_SMC_FCS_DIGEST_SIZE_OFFSET 4U
+#define INTEL_SIP_SMC_FCS_ECC_ALGO_MASK 0xF
+
+/* ECC DBE */
+#define WARM_RESET_WFI_FLAG BIT(31)
+#define SYSMGR_ECC_DBE_COLD_RST_MASK (SYSMGR_ECC_OCRAM_MASK |\
+ SYSMGR_ECC_DDR0_MASK |\
+ SYSMGR_ECC_DDR1_MASK)
+
+/* Non-mailbox SMC Call */
+#define INTEL_SIP_SMC_SVC_VERSION 0xC2000200
+
+/**
+ * SMC SiP service function identifier for version 2
+ * Command code from 0x400 ~ 0x4FF
+ */
+
+/* V2: Non-mailbox function identifier */
+#define INTEL_SIP_SMC_V2_GET_SVC_VERSION 0xC2000400
+#define INTEL_SIP_SMC_V2_REG_READ 0xC2000401
+#define INTEL_SIP_SMC_V2_REG_WRITE 0xC2000402
+#define INTEL_SIP_SMC_V2_REG_UPDATE 0xC2000403
+#define INTEL_SIP_SMC_V2_HPS_SET_BRIDGES 0xC2000404
+
+/* V2: Mailbox function identifier */
+#define INTEL_SIP_SMC_V2_MAILBOX_SEND_COMMAND 0xC2000420
+#define INTEL_SIP_SMC_V2_MAILBOX_POLL_RESPONSE 0xC2000421
+
+/* SMC function IDs for SiP Service queries */
+#define SIP_SVC_CALL_COUNT 0x8200ff00
+#define SIP_SVC_UID 0x8200ff01
+#define SIP_SVC_VERSION 0x8200ff03
+
+/* SiP Service Calls version numbers */
+#define SIP_SVC_VERSION_MAJOR 1
+#define SIP_SVC_VERSION_MINOR 0
+
+
+/* Structure Definitions */
+struct fpga_config_info {
+ uint32_t addr;
+ int size;
+ int size_written;
+ uint32_t write_requested;
+ int subblocks_sent;
+ int block_number;
+};
+
+typedef enum {
+ NO_REQUEST = 0,
+ RECONFIGURATION,
+ BITSTREAM_AUTH
+} config_type;
+
+/* Function Definitions */
+bool is_size_4_bytes_aligned(uint32_t size);
+bool is_address_in_ddr_range(uint64_t addr, uint64_t size);
+
+/* ECC DBE */
+bool cold_reset_for_ecc_dbe(void);
+uint32_t intel_ecc_dbe_notification(uint64_t dbe_value);
+
+/* Secure register access */
+uint32_t intel_secure_reg_read(uint64_t reg_addr, uint32_t *retval);
+uint32_t intel_secure_reg_write(uint64_t reg_addr, uint32_t val,
+ uint32_t *retval);
+uint32_t intel_secure_reg_update(uint64_t reg_addr, uint32_t mask,
+ uint32_t val, uint32_t *retval);
+
+/* Miscellaneous HPS services */
+uint32_t intel_hps_set_bridges(uint64_t enable, uint64_t mask);
+
+/* SiP Service handler for version 2 */
+uintptr_t sip_smc_handler_v2(uint32_t smc_fid,
+ u_register_t x1,
+ u_register_t x2,
+ u_register_t x3,
+ u_register_t x4,
+ void *cookie,
+ void *handle,
+ u_register_t flags);
+
+#endif /* SOCFPGA_SIP_SVC_H */
diff --git a/plat/intel/soc/common/include/socfpga_system_manager.h b/plat/intel/soc/common/include/socfpga_system_manager.h
new file mode 100644
index 0000000..7f67313
--- /dev/null
+++ b/plat/intel/soc/common/include/socfpga_system_manager.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SOCFPGA_SYSTEMMANAGER_H
+#define SOCFPGA_SYSTEMMANAGER_H
+
+#include "socfpga_plat_def.h"
+
+/* System Manager Register Map */
+
+#define SOCFPGA_SYSMGR_SDMMC 0x28
+
+#define SOCFPGA_SYSMGR_FPGAINTF_EN_2 0x6c
+
+#define SOCFPGA_SYSMGR_EMAC_0 0x44
+#define SOCFPGA_SYSMGR_EMAC_1 0x48
+#define SOCFPGA_SYSMGR_EMAC_2 0x4c
+#define SOCFPGA_SYSMGR_FPGAINTF_EN_3 0x70
+
+#define SOCFPGA_SYSMGR_NOC_TIMEOUT 0xc0
+#define SOCFPGA_SYSMGR_NOC_IDLEREQ_SET 0xc4
+#define SOCFPGA_SYSMGR_NOC_IDLEREQ_CLR 0xc8
+#define SOCFPGA_SYSMGR_NOC_IDLEREQ_VAL 0xcc
+#define SOCFPGA_SYSMGR_NOC_IDLEACK 0xd0
+#define SOCFPGA_SYSMGR_NOC_IDLESTATUS 0xd4
+
+#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_0 0x200
+#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_1 0x204
+#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_2 0x208
+#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_8 0x220
+#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_9 0x224
+
+/* Field Masking */
+
+#define SYSMGR_SDMMC_DRVSEL(x) (((x) & 0x7) << 0)
+#define SYSMGR_SDMMC_SMPLSEL(x) (((x) & 0x7) << 4)
+
+#define IDLE_DATA_LWSOC2FPGA BIT(4)
+#define IDLE_DATA_SOC2FPGA BIT(0)
+#define IDLE_DATA_MASK (IDLE_DATA_LWSOC2FPGA | IDLE_DATA_SOC2FPGA)
+
+#define SYSMGR_ECC_OCRAM_MASK BIT(1)
+#define SYSMGR_ECC_DDR0_MASK BIT(16)
+#define SYSMGR_ECC_DDR1_MASK BIT(17)
+
+/* Macros */
+
+#define SOCFPGA_SYSMGR(_reg) (SOCFPGA_SYSMGR_REG_BASE \
+ + (SOCFPGA_SYSMGR_##_reg))
+
+#endif /* SOCFPGA_SYSTEMMANAGER_H */
diff --git a/plat/intel/soc/common/sip/socfpga_sip_ecc.c b/plat/intel/soc/common/sip/socfpga_sip_ecc.c
new file mode 100644
index 0000000..c4e06a6
--- /dev/null
+++ b/plat/intel/soc/common/sip/socfpga_sip_ecc.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2020-2022, ARM Limited and Contributors. All rights reserved.
+ */
+
+#include <assert.h>
+#include <common/debug.h>
+#include <common/runtime_svc.h>
+#include <lib/mmio.h>
+#include <tools_share/uuid.h>
+
+#include "socfpga_fcs.h"
+#include "socfpga_mailbox.h"
+#include "socfpga_reset_manager.h"
+#include "socfpga_sip_svc.h"
+#include "socfpga_system_manager.h"
+
+uint32_t intel_ecc_dbe_notification(uint64_t dbe_value)
+{
+ dbe_value &= WARM_RESET_WFI_FLAG;
+
+ /* Trap CPUs in WFI if warm reset flag is set */
+ if (dbe_value > 0) {
+ while (1) {
+ wfi();
+ }
+ }
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+bool cold_reset_for_ecc_dbe(void)
+{
+ uint32_t dbe_int_status;
+
+ dbe_int_status = mmio_read_32(SOCFPGA_SYSMGR(BOOT_SCRATCH_COLD_8));
+
+ /* Trigger cold reset only for error in critical memory (DDR/OCRAM) */
+ dbe_int_status &= SYSMGR_ECC_DBE_COLD_RST_MASK;
+
+ if (dbe_int_status > 0) {
+ return true;
+ }
+
+ return false;
+}
diff --git a/plat/intel/soc/common/sip/socfpga_sip_fcs.c b/plat/intel/soc/common/sip/socfpga_sip_fcs.c
new file mode 100644
index 0000000..facee0f
--- /dev/null
+++ b/plat/intel/soc/common/sip/socfpga_sip_fcs.c
@@ -0,0 +1,1739 @@
+/*
+ * Copyright (c) 2020-2022, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <lib/mmio.h>
+
+#include "socfpga_fcs.h"
+#include "socfpga_mailbox.h"
+#include "socfpga_sip_svc.h"
+
+/* FCS static variables */
+static fcs_crypto_service_aes_data fcs_aes_init_payload;
+static fcs_crypto_service_data fcs_sha_get_digest_param;
+static fcs_crypto_service_data fcs_sha_mac_verify_param;
+static fcs_crypto_service_data fcs_ecdsa_hash_sign_param;
+static fcs_crypto_service_data fcs_ecdsa_hash_sig_verify_param;
+static fcs_crypto_service_data fcs_sha2_data_sign_param;
+static fcs_crypto_service_data fcs_sha2_data_sig_verify_param;
+static fcs_crypto_service_data fcs_ecdsa_get_pubkey_param;
+static fcs_crypto_service_data fcs_ecdh_request_param;
+
+bool is_size_4_bytes_aligned(uint32_t size)
+{
+ if ((size % MBOX_WORD_BYTE) != 0U) {
+ return false;
+ } else {
+ return true;
+ }
+}
+
+static bool is_8_bytes_aligned(uint32_t data)
+{
+ if ((data % (MBOX_WORD_BYTE * 2U)) != 0U) {
+ return false;
+ } else {
+ return true;
+ }
+}
+
+static bool is_32_bytes_aligned(uint32_t data)
+{
+ if ((data % (8U * MBOX_WORD_BYTE)) != 0U) {
+ return false;
+ } else {
+ return true;
+ }
+}
+
+static int intel_fcs_crypto_service_init(uint32_t session_id,
+ uint32_t context_id, uint32_t key_id,
+ uint32_t param_size, uint64_t param_data,
+ fcs_crypto_service_data *data_addr,
+ uint32_t *mbox_error)
+{
+ if (mbox_error == NULL) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ if (param_size != 4) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ memset(data_addr, 0, sizeof(fcs_crypto_service_data));
+
+ data_addr->session_id = session_id;
+ data_addr->context_id = context_id;
+ data_addr->key_id = key_id;
+ data_addr->crypto_param_size = param_size;
+ data_addr->crypto_param = param_data;
+
+ data_addr->is_updated = 0;
+
+ *mbox_error = 0;
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+uint32_t intel_fcs_random_number_gen(uint64_t addr, uint64_t *ret_size,
+ uint32_t *mbox_error)
+{
+ int status;
+ unsigned int i;
+ unsigned int resp_len = FCS_RANDOM_WORD_SIZE;
+ uint32_t random_data[FCS_RANDOM_WORD_SIZE] = {0U};
+
+ if (!is_address_in_ddr_range(addr, FCS_RANDOM_BYTE_SIZE)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_RANDOM_GEN, NULL, 0U,
+ CMD_CASUAL, random_data, &resp_len);
+
+ if (status < 0) {
+ *mbox_error = -status;
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ if (resp_len != FCS_RANDOM_WORD_SIZE) {
+ *mbox_error = GENERIC_RESPONSE_ERROR;
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ *ret_size = FCS_RANDOM_BYTE_SIZE;
+
+ for (i = 0U; i < FCS_RANDOM_WORD_SIZE; i++) {
+ mmio_write_32(addr, random_data[i]);
+ addr += MBOX_WORD_BYTE;
+ }
+
+ flush_dcache_range(addr - *ret_size, *ret_size);
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_random_number_gen_ext(uint32_t session_id, uint32_t context_id,
+ uint32_t size, uint32_t *send_id)
+{
+ int status;
+ uint32_t payload_size;
+ uint32_t crypto_header;
+
+ if (size > (FCS_RANDOM_EXT_MAX_WORD_SIZE *
+ MBOX_WORD_BYTE) || size == 0U) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ if (!is_size_4_bytes_aligned(size)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ crypto_header = (FCS_CS_FIELD_FLAG_INIT | FCS_CS_FIELD_FLAG_FINALIZE) <<
+ FCS_CS_FIELD_FLAG_OFFSET;
+
+ fcs_rng_payload payload = {
+ session_id,
+ context_id,
+ crypto_header,
+ size
+ };
+
+ payload_size = sizeof(payload) / MBOX_WORD_BYTE;
+
+ status = mailbox_send_cmd_async(send_id, MBOX_FCS_RANDOM_GEN,
+ (uint32_t *) &payload, payload_size,
+ CMD_INDIRECT);
+
+ if (status < 0) {
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+uint32_t intel_fcs_send_cert(uint64_t addr, uint64_t size,
+ uint32_t *send_id)
+{
+ int status;
+
+ if (!is_address_in_ddr_range(addr, size)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ if (!is_size_4_bytes_aligned(size)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ status = mailbox_send_cmd_async(send_id, MBOX_CMD_VAB_SRC_CERT,
+ (uint32_t *)addr, size / MBOX_WORD_BYTE,
+ CMD_DIRECT);
+
+ flush_dcache_range(addr, size);
+
+ if (status < 0) {
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+uint32_t intel_fcs_get_provision_data(uint32_t *send_id)
+{
+ int status;
+
+ status = mailbox_send_cmd_async(send_id, MBOX_FCS_GET_PROVISION,
+ NULL, 0U, CMD_DIRECT);
+
+ if (status < 0) {
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+uint32_t intel_fcs_cntr_set_preauth(uint8_t counter_type, int32_t counter_value,
+ uint32_t test_bit, uint32_t *mbox_error)
+{
+ int status;
+ uint32_t first_word;
+ uint32_t payload_size;
+
+ if ((test_bit != MBOX_TEST_BIT) &&
+ (test_bit != 0)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ if ((counter_type < FCS_BIG_CNTR_SEL) ||
+ (counter_type > FCS_SVN_CNTR_3_SEL)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ if ((counter_type == FCS_BIG_CNTR_SEL) &&
+ (counter_value > FCS_BIG_CNTR_VAL_MAX)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ if ((counter_type >= FCS_SVN_CNTR_0_SEL) &&
+ (counter_type <= FCS_SVN_CNTR_3_SEL) &&
+ (counter_value > FCS_SVN_CNTR_VAL_MAX)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ first_word = test_bit | counter_type;
+ fcs_cntr_set_preauth_payload payload = {
+ first_word,
+ counter_value
+ };
+
+ payload_size = sizeof(payload) / MBOX_WORD_BYTE;
+ status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_CNTR_SET_PREAUTH,
+ (uint32_t *) &payload, payload_size,
+ CMD_CASUAL, NULL, NULL);
+
+ if (status < 0) {
+ *mbox_error = -status;
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+uint32_t intel_fcs_encryption(uint32_t src_addr, uint32_t src_size,
+ uint32_t dst_addr, uint32_t dst_size, uint32_t *send_id)
+{
+ int status;
+ uint32_t load_size;
+
+ fcs_encrypt_payload payload = {
+ FCS_ENCRYPTION_DATA_0,
+ src_addr,
+ src_size,
+ dst_addr,
+ dst_size };
+ load_size = sizeof(payload) / MBOX_WORD_BYTE;
+
+ if (!is_address_in_ddr_range(src_addr, src_size) ||
+ !is_address_in_ddr_range(dst_addr, dst_size)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ if (!is_size_4_bytes_aligned(src_size)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ status = mailbox_send_cmd_async(send_id, MBOX_FCS_ENCRYPT_REQ,
+ (uint32_t *) &payload, load_size,
+ CMD_INDIRECT);
+ inv_dcache_range(dst_addr, dst_size);
+
+ if (status < 0) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+uint32_t intel_fcs_decryption(uint32_t src_addr, uint32_t src_size,
+ uint32_t dst_addr, uint32_t dst_size, uint32_t *send_id)
+{
+ int status;
+ uint32_t load_size;
+ uintptr_t id_offset;
+
+ id_offset = src_addr + FCS_OWNER_ID_OFFSET;
+ fcs_decrypt_payload payload = {
+ FCS_DECRYPTION_DATA_0,
+ {mmio_read_32(id_offset),
+ mmio_read_32(id_offset + MBOX_WORD_BYTE)},
+ src_addr,
+ src_size,
+ dst_addr,
+ dst_size };
+ load_size = sizeof(payload) / MBOX_WORD_BYTE;
+
+ if (!is_address_in_ddr_range(src_addr, src_size) ||
+ !is_address_in_ddr_range(dst_addr, dst_size)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ if (!is_size_4_bytes_aligned(src_size)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ status = mailbox_send_cmd_async(send_id, MBOX_FCS_DECRYPT_REQ,
+ (uint32_t *) &payload, load_size,
+ CMD_INDIRECT);
+ inv_dcache_range(dst_addr, dst_size);
+
+ if (status < 0) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_encryption_ext(uint32_t session_id, uint32_t context_id,
+ uint32_t src_addr, uint32_t src_size,
+ uint32_t dst_addr, uint32_t *dst_size, uint32_t *mbox_error)
+{
+ int status;
+ uint32_t payload_size;
+ uint32_t resp_len = FCS_CRYPTION_RESP_WORD_SIZE;
+ uint32_t resp_data[FCS_CRYPTION_RESP_WORD_SIZE] = {0U};
+
+ if ((dst_size == NULL) || (mbox_error == NULL)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ if (!is_address_in_ddr_range(src_addr, src_size) ||
+ !is_address_in_ddr_range(dst_addr, *dst_size)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ if (!is_size_4_bytes_aligned(src_size)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ fcs_encrypt_ext_payload payload = {
+ session_id,
+ context_id,
+ FCS_CRYPTION_CRYPTO_HEADER,
+ src_addr,
+ src_size,
+ dst_addr,
+ *dst_size
+ };
+
+ payload_size = sizeof(payload) / MBOX_WORD_BYTE;
+
+ status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_ENCRYPT_REQ,
+ (uint32_t *) &payload, payload_size,
+ CMD_CASUAL, resp_data, &resp_len);
+
+ if (status < 0) {
+ *mbox_error = -status;
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ if (resp_len != FCS_CRYPTION_RESP_WORD_SIZE) {
+ *mbox_error = MBOX_RET_ERROR;
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ *dst_size = resp_data[FCS_CRYPTION_RESP_SIZE_OFFSET];
+ inv_dcache_range(dst_addr, *dst_size);
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_decryption_ext(uint32_t session_id, uint32_t context_id,
+ uint32_t src_addr, uint32_t src_size,
+ uint32_t dst_addr, uint32_t *dst_size, uint32_t *mbox_error)
+{
+ int status;
+ uintptr_t id_offset;
+ uint32_t payload_size;
+ uint32_t resp_len = FCS_CRYPTION_RESP_WORD_SIZE;
+ uint32_t resp_data[FCS_CRYPTION_RESP_WORD_SIZE] = {0U};
+
+ if ((dst_size == NULL) || (mbox_error == NULL)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ if (!is_address_in_ddr_range(src_addr, src_size) ||
+ !is_address_in_ddr_range(dst_addr, *dst_size)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ if (!is_size_4_bytes_aligned(src_size)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ id_offset = src_addr + FCS_OWNER_ID_OFFSET;
+ fcs_decrypt_ext_payload payload = {
+ session_id,
+ context_id,
+ FCS_CRYPTION_CRYPTO_HEADER,
+ {mmio_read_32(id_offset),
+ mmio_read_32(id_offset + MBOX_WORD_BYTE)},
+ src_addr,
+ src_size,
+ dst_addr,
+ *dst_size
+ };
+
+ payload_size = sizeof(payload) / MBOX_WORD_BYTE;
+
+ status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_DECRYPT_REQ,
+ (uint32_t *) &payload, payload_size,
+ CMD_CASUAL, resp_data, &resp_len);
+
+ if (status < 0) {
+ *mbox_error = -status;
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ if (resp_len != FCS_CRYPTION_RESP_WORD_SIZE) {
+ *mbox_error = MBOX_RET_ERROR;
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ *dst_size = resp_data[FCS_CRYPTION_RESP_SIZE_OFFSET];
+ inv_dcache_range(dst_addr, *dst_size);
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_sigma_teardown(uint32_t session_id, uint32_t *mbox_error)
+{
+ int status;
+
+ if ((session_id != PSGSIGMA_SESSION_ID_ONE) &&
+ (session_id != PSGSIGMA_UNKNOWN_SESSION)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ psgsigma_teardown_msg message = {
+ RESERVED_AS_ZERO,
+ PSGSIGMA_TEARDOWN_MAGIC,
+ session_id
+ };
+
+ status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_PSG_SIGMA_TEARDOWN,
+ (uint32_t *) &message, sizeof(message) / MBOX_WORD_BYTE,
+ CMD_CASUAL, NULL, NULL);
+
+ if (status < 0) {
+ *mbox_error = -status;
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_chip_id(uint32_t *id_low, uint32_t *id_high, uint32_t *mbox_error)
+{
+ int status;
+ uint32_t load_size;
+ uint32_t chip_id[2];
+
+ load_size = sizeof(chip_id) / MBOX_WORD_BYTE;
+
+ status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_CMD_GET_CHIPID, NULL,
+ 0U, CMD_CASUAL, (uint32_t *) chip_id, &load_size);
+
+ if (status < 0) {
+ *mbox_error = -status;
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ *id_low = chip_id[0];
+ *id_high = chip_id[1];
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_attestation_subkey(uint64_t src_addr, uint32_t src_size,
+ uint64_t dst_addr, uint32_t *dst_size, uint32_t *mbox_error)
+{
+ int status;
+ uint32_t send_size = src_size / MBOX_WORD_BYTE;
+ uint32_t ret_size = *dst_size / MBOX_WORD_BYTE;
+
+
+ if (!is_address_in_ddr_range(src_addr, src_size) ||
+ !is_address_in_ddr_range(dst_addr, *dst_size)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_ATTESTATION_SUBKEY,
+ (uint32_t *) src_addr, send_size, CMD_CASUAL,
+ (uint32_t *) dst_addr, &ret_size);
+
+ if (status < 0) {
+ *mbox_error = -status;
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ *dst_size = ret_size * MBOX_WORD_BYTE;
+ flush_dcache_range(dst_addr, *dst_size);
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_get_measurement(uint64_t src_addr, uint32_t src_size,
+ uint64_t dst_addr, uint32_t *dst_size, uint32_t *mbox_error)
+{
+ int status;
+ uint32_t send_size = src_size / MBOX_WORD_BYTE;
+ uint32_t ret_size = *dst_size / MBOX_WORD_BYTE;
+
+ if (!is_address_in_ddr_range(src_addr, src_size) ||
+ !is_address_in_ddr_range(dst_addr, *dst_size)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_GET_MEASUREMENT,
+ (uint32_t *) src_addr, send_size, CMD_CASUAL,
+ (uint32_t *) dst_addr, &ret_size);
+
+ if (status < 0) {
+ *mbox_error = -status;
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ *dst_size = ret_size * MBOX_WORD_BYTE;
+ flush_dcache_range(dst_addr, *dst_size);
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+uint32_t intel_fcs_get_rom_patch_sha384(uint64_t addr, uint64_t *ret_size,
+ uint32_t *mbox_error)
+{
+ int status;
+ unsigned int resp_len = FCS_SHA384_WORD_SIZE;
+
+ if (!is_address_in_ddr_range(addr, FCS_SHA384_BYTE_SIZE)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_GET_ROM_PATCH_SHA384, NULL, 0U,
+ CMD_CASUAL, (uint32_t *) addr, &resp_len);
+
+ if (status < 0) {
+ *mbox_error = -status;
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ if (resp_len != FCS_SHA384_WORD_SIZE) {
+ *mbox_error = GENERIC_RESPONSE_ERROR;
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ *ret_size = FCS_SHA384_BYTE_SIZE;
+
+ flush_dcache_range(addr, *ret_size);
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_get_attestation_cert(uint32_t cert_request, uint64_t dst_addr,
+ uint32_t *dst_size, uint32_t *mbox_error)
+{
+ int status;
+ uint32_t ret_size = *dst_size / MBOX_WORD_BYTE;
+
+ if (mbox_error == NULL) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ if (cert_request < FCS_ATTEST_FIRMWARE_CERT ||
+ cert_request > FCS_ATTEST_CERT_MAX_REQ_PARAM) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ if (!is_address_in_ddr_range(dst_addr, *dst_size)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_GET_ATTESTATION_CERT,
+ (uint32_t *) &cert_request, 1U, CMD_CASUAL,
+ (uint32_t *) dst_addr, &ret_size);
+
+ if (status < 0) {
+ *mbox_error = -status;
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ *dst_size = ret_size * MBOX_WORD_BYTE;
+ flush_dcache_range(dst_addr, *dst_size);
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_create_cert_on_reload(uint32_t cert_request,
+ uint32_t *mbox_error)
+{
+ int status;
+
+ if (mbox_error == NULL) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ if (cert_request < FCS_ATTEST_FIRMWARE_CERT ||
+ cert_request > FCS_ATTEST_CERT_MAX_REQ_PARAM) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_CREATE_CERT_ON_RELOAD,
+ (uint32_t *) &cert_request, 1U, CMD_CASUAL,
+ NULL, NULL);
+
+ if (status < 0) {
+ *mbox_error = -status;
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_open_crypto_service_session(uint32_t *session_id,
+ uint32_t *mbox_error)
+{
+ int status;
+ uint32_t resp_len = 1U;
+
+ if ((session_id == NULL) || (mbox_error == NULL)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_OPEN_CS_SESSION,
+ NULL, 0U, CMD_CASUAL, session_id, &resp_len);
+
+ if (status < 0) {
+ *mbox_error = -status;
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_close_crypto_service_session(uint32_t session_id,
+ uint32_t *mbox_error)
+{
+ int status;
+
+ if (mbox_error == NULL) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_CLOSE_CS_SESSION,
+ &session_id, 1U, CMD_CASUAL, NULL, NULL);
+
+ if (status < 0) {
+ *mbox_error = -status;
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_import_crypto_service_key(uint64_t src_addr, uint32_t src_size,
+ uint32_t *send_id)
+{
+ int status;
+
+ if (src_size > (FCS_CS_KEY_OBJ_MAX_WORD_SIZE *
+ MBOX_WORD_BYTE)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ if (!is_address_in_ddr_range(src_addr, src_size)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ status = mailbox_send_cmd_async(send_id, MBOX_FCS_IMPORT_CS_KEY,
+ (uint32_t *)src_addr, src_size / MBOX_WORD_BYTE,
+ CMD_INDIRECT);
+
+ if (status < 0) {
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_export_crypto_service_key(uint32_t session_id, uint32_t key_id,
+ uint64_t dst_addr, uint32_t *dst_size,
+ uint32_t *mbox_error)
+{
+ int status;
+ uint32_t i;
+ uint32_t payload_size;
+ uint32_t resp_len = FCS_CS_KEY_OBJ_MAX_WORD_SIZE;
+ uint32_t resp_data[FCS_CS_KEY_OBJ_MAX_WORD_SIZE] = {0U};
+ uint32_t op_status = 0U;
+
+ if ((dst_size == NULL) || (mbox_error == NULL)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ if (!is_address_in_ddr_range(dst_addr, *dst_size)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ fcs_cs_key_payload payload = {
+ session_id,
+ RESERVED_AS_ZERO,
+ RESERVED_AS_ZERO,
+ key_id
+ };
+
+ payload_size = sizeof(payload) / MBOX_WORD_BYTE;
+
+ status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_EXPORT_CS_KEY,
+ (uint32_t *) &payload, payload_size,
+ CMD_CASUAL, resp_data, &resp_len);
+
+ if (resp_len > 0) {
+ op_status = resp_data[0] & FCS_CS_KEY_RESP_STATUS_MASK;
+ }
+
+ if (status < 0) {
+ *mbox_error = (-status) | (op_status << FCS_CS_KEY_RESP_STATUS_OFFSET);
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ if (resp_len > 1) {
+
+ /* Export key object is start at second response data */
+ *dst_size = (resp_len - 1) * MBOX_WORD_BYTE;
+
+ for (i = 1U; i < resp_len; i++) {
+ mmio_write_32(dst_addr, resp_data[i]);
+ dst_addr += MBOX_WORD_BYTE;
+ }
+
+ flush_dcache_range(dst_addr - *dst_size, *dst_size);
+
+ } else {
+
+ /* Unexpected response, missing key object in response */
+ *mbox_error = MBOX_RET_ERROR;
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_remove_crypto_service_key(uint32_t session_id, uint32_t key_id,
+ uint32_t *mbox_error)
+{
+ int status;
+ uint32_t payload_size;
+ uint32_t resp_len = 1U;
+ uint32_t resp_data = 0U;
+ uint32_t op_status = 0U;
+
+ if (mbox_error == NULL) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ fcs_cs_key_payload payload = {
+ session_id,
+ RESERVED_AS_ZERO,
+ RESERVED_AS_ZERO,
+ key_id
+ };
+
+ payload_size = sizeof(payload) / MBOX_WORD_BYTE;
+
+ status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_REMOVE_CS_KEY,
+ (uint32_t *) &payload, payload_size,
+ CMD_CASUAL, &resp_data, &resp_len);
+
+ if (resp_len > 0) {
+ op_status = resp_data & FCS_CS_KEY_RESP_STATUS_MASK;
+ }
+
+ if (status < 0) {
+ *mbox_error = (-status) | (op_status << FCS_CS_KEY_RESP_STATUS_OFFSET);
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_get_crypto_service_key_info(uint32_t session_id, uint32_t key_id,
+ uint64_t dst_addr, uint32_t *dst_size,
+ uint32_t *mbox_error)
+{
+ int status;
+ uint32_t payload_size;
+ uint32_t resp_len = FCS_CS_KEY_INFO_MAX_WORD_SIZE;
+ uint32_t op_status = 0U;
+
+ if ((dst_size == NULL) || (mbox_error == NULL)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ if (!is_address_in_ddr_range(dst_addr, *dst_size)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ fcs_cs_key_payload payload = {
+ session_id,
+ RESERVED_AS_ZERO,
+ RESERVED_AS_ZERO,
+ key_id
+ };
+
+ payload_size = sizeof(payload) / MBOX_WORD_BYTE;
+
+ status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_GET_CS_KEY_INFO,
+ (uint32_t *) &payload, payload_size,
+ CMD_CASUAL, (uint32_t *) dst_addr, &resp_len);
+
+ if (resp_len > 0) {
+ op_status = mmio_read_32(dst_addr) &
+ FCS_CS_KEY_RESP_STATUS_MASK;
+ }
+
+ if (status < 0) {
+ *mbox_error = (-status) | (op_status << FCS_CS_KEY_RESP_STATUS_OFFSET);
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ *dst_size = resp_len * MBOX_WORD_BYTE;
+ flush_dcache_range(dst_addr, *dst_size);
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_get_digest_init(uint32_t session_id, uint32_t context_id,
+ uint32_t key_id, uint32_t param_size,
+ uint64_t param_data, uint32_t *mbox_error)
+{
+ return intel_fcs_crypto_service_init(session_id, context_id,
+ key_id, param_size, param_data,
+ (void *) &fcs_sha_get_digest_param,
+ mbox_error);
+}
+
+int intel_fcs_get_digest_update_finalize(uint32_t session_id,
+ uint32_t context_id, uint32_t src_addr,
+ uint32_t src_size, uint64_t dst_addr,
+ uint32_t *dst_size, uint8_t is_finalised,
+ uint32_t *mbox_error)
+{
+ int status;
+ uint32_t i;
+ uint32_t flag;
+ uint32_t crypto_header;
+ uint32_t resp_len;
+ uint32_t payload[FCS_GET_DIGEST_CMD_MAX_WORD_SIZE] = {0U};
+
+ if (dst_size == NULL || mbox_error == NULL) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ if (fcs_sha_get_digest_param.session_id != session_id ||
+ fcs_sha_get_digest_param.context_id != context_id) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ /* Source data must be 8 bytes aligned */
+ if (!is_8_bytes_aligned(src_size)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ if (!is_address_in_ddr_range(src_addr, src_size) ||
+ !is_address_in_ddr_range(dst_addr, *dst_size)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ resp_len = *dst_size / MBOX_WORD_BYTE;
+
+ /* Prepare crypto header */
+ flag = 0;
+
+ if (fcs_sha_get_digest_param.is_updated) {
+ fcs_sha_get_digest_param.crypto_param_size = 0;
+ } else {
+ flag |= FCS_CS_FIELD_FLAG_INIT;
+ }
+
+ if (is_finalised != 0U) {
+ flag |= FCS_CS_FIELD_FLAG_FINALIZE;
+ } else {
+ flag |= FCS_CS_FIELD_FLAG_UPDATE;
+ fcs_sha_get_digest_param.is_updated = 1;
+ }
+
+ crypto_header = ((flag << FCS_CS_FIELD_FLAG_OFFSET) |
+ (fcs_sha_get_digest_param.crypto_param_size &
+ FCS_CS_FIELD_SIZE_MASK));
+
+ /* Prepare command payload */
+ i = 0;
+ payload[i] = fcs_sha_get_digest_param.session_id;
+ i++;
+ payload[i] = fcs_sha_get_digest_param.context_id;
+ i++;
+ payload[i] = crypto_header;
+ i++;
+
+ if ((crypto_header >> FCS_CS_FIELD_FLAG_OFFSET) &
+ FCS_CS_FIELD_FLAG_INIT) {
+ payload[i] = fcs_sha_get_digest_param.key_id;
+ i++;
+ /* Crypto parameters */
+ payload[i] = fcs_sha_get_digest_param.crypto_param
+ & INTEL_SIP_SMC_FCS_SHA_MODE_MASK;
+ payload[i] |= ((fcs_sha_get_digest_param.crypto_param
+ >> INTEL_SIP_SMC_FCS_DIGEST_SIZE_OFFSET)
+ & INTEL_SIP_SMC_FCS_DIGEST_SIZE_MASK)
+ << FCS_SHA_HMAC_CRYPTO_PARAM_SIZE_OFFSET;
+ i++;
+ }
+ /* Data source address and size */
+ payload[i] = src_addr;
+ i++;
+ payload[i] = src_size;
+ i++;
+
+ status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_GET_DIGEST_REQ,
+ payload, i, CMD_CASUAL,
+ (uint32_t *) dst_addr, &resp_len);
+
+ if (is_finalised != 0U) {
+ memset((void *)&fcs_sha_get_digest_param, 0,
+ sizeof(fcs_crypto_service_data));
+ }
+
+ if (status < 0) {
+ *mbox_error = -status;
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ *dst_size = resp_len * MBOX_WORD_BYTE;
+ flush_dcache_range(dst_addr, *dst_size);
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_mac_verify_init(uint32_t session_id, uint32_t context_id,
+ uint32_t key_id, uint32_t param_size,
+ uint64_t param_data, uint32_t *mbox_error)
+{
+ return intel_fcs_crypto_service_init(session_id, context_id,
+ key_id, param_size, param_data,
+ (void *) &fcs_sha_mac_verify_param,
+ mbox_error);
+}
+
+int intel_fcs_mac_verify_update_finalize(uint32_t session_id,
+ uint32_t context_id, uint32_t src_addr,
+ uint32_t src_size, uint64_t dst_addr,
+ uint32_t *dst_size, uint32_t data_size,
+ uint8_t is_finalised, uint32_t *mbox_error)
+{
+ int status;
+ uint32_t i;
+ uint32_t flag;
+ uint32_t crypto_header;
+ uint32_t resp_len;
+ uint32_t payload[FCS_MAC_VERIFY_CMD_MAX_WORD_SIZE] = {0U};
+ uintptr_t mac_offset;
+
+ if (dst_size == NULL || mbox_error == NULL) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ if (fcs_sha_mac_verify_param.session_id != session_id ||
+ fcs_sha_mac_verify_param.context_id != context_id) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ if (data_size > src_size) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ if (!is_size_4_bytes_aligned(src_size) ||
+ !is_8_bytes_aligned(data_size)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ if (!is_address_in_ddr_range(src_addr, src_size) ||
+ !is_address_in_ddr_range(dst_addr, *dst_size)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ resp_len = *dst_size / MBOX_WORD_BYTE;
+
+ /* Prepare crypto header */
+ flag = 0;
+
+ if (fcs_sha_mac_verify_param.is_updated) {
+ fcs_sha_mac_verify_param.crypto_param_size = 0;
+ } else {
+ flag |= FCS_CS_FIELD_FLAG_INIT;
+ }
+
+ if (is_finalised) {
+ flag |= FCS_CS_FIELD_FLAG_FINALIZE;
+ } else {
+ flag |= FCS_CS_FIELD_FLAG_UPDATE;
+ fcs_sha_mac_verify_param.is_updated = 1;
+ }
+
+ crypto_header = ((flag << FCS_CS_FIELD_FLAG_OFFSET) |
+ (fcs_sha_mac_verify_param.crypto_param_size &
+ FCS_CS_FIELD_SIZE_MASK));
+
+ /* Prepare command payload */
+ i = 0;
+ payload[i] = fcs_sha_mac_verify_param.session_id;
+ i++;
+ payload[i] = fcs_sha_mac_verify_param.context_id;
+ i++;
+ payload[i] = crypto_header;
+ i++;
+
+ if ((crypto_header >> FCS_CS_FIELD_FLAG_OFFSET) &
+ FCS_CS_FIELD_FLAG_INIT) {
+ payload[i] = fcs_sha_mac_verify_param.key_id;
+ i++;
+ /* Crypto parameters */
+ payload[i] = ((fcs_sha_mac_verify_param.crypto_param
+ >> INTEL_SIP_SMC_FCS_DIGEST_SIZE_OFFSET)
+ & INTEL_SIP_SMC_FCS_DIGEST_SIZE_MASK)
+ << FCS_SHA_HMAC_CRYPTO_PARAM_SIZE_OFFSET;
+ i++;
+ }
+ /* Data source address and size */
+ payload[i] = src_addr;
+ i++;
+ payload[i] = data_size;
+ i++;
+
+ if ((crypto_header >> FCS_CS_FIELD_FLAG_OFFSET) &
+ FCS_CS_FIELD_FLAG_FINALIZE) {
+ /* Copy mac data to command */
+ mac_offset = src_addr + data_size;
+ memcpy((uint8_t *) &payload[i], (uint8_t *) mac_offset,
+ src_size - data_size);
+
+ i += (src_size - data_size) / MBOX_WORD_BYTE;
+ }
+
+ status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_MAC_VERIFY_REQ,
+ payload, i, CMD_CASUAL,
+ (uint32_t *) dst_addr, &resp_len);
+
+ if (is_finalised) {
+ memset((void *)&fcs_sha_mac_verify_param, 0,
+ sizeof(fcs_crypto_service_data));
+ }
+
+ if (status < 0) {
+ *mbox_error = -status;
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ *dst_size = resp_len * MBOX_WORD_BYTE;
+ flush_dcache_range(dst_addr, *dst_size);
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_ecdsa_hash_sign_init(uint32_t session_id, uint32_t context_id,
+ uint32_t key_id, uint32_t param_size,
+ uint64_t param_data, uint32_t *mbox_error)
+{
+ return intel_fcs_crypto_service_init(session_id, context_id,
+ key_id, param_size, param_data,
+ (void *) &fcs_ecdsa_hash_sign_param,
+ mbox_error);
+}
+
+int intel_fcs_ecdsa_hash_sign_finalize(uint32_t session_id, uint32_t context_id,
+ uint32_t src_addr, uint32_t src_size,
+ uint64_t dst_addr, uint32_t *dst_size,
+ uint32_t *mbox_error)
+{
+ int status;
+ uint32_t i;
+ uint32_t payload[FCS_ECDSA_HASH_SIGN_CMD_MAX_WORD_SIZE] = {0U};
+ uint32_t resp_len;
+ uintptr_t hash_data_addr;
+
+ if ((dst_size == NULL) || (mbox_error == NULL)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ if (fcs_ecdsa_hash_sign_param.session_id != session_id ||
+ fcs_ecdsa_hash_sign_param.context_id != context_id) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ if (!is_address_in_ddr_range(src_addr, src_size) ||
+ !is_address_in_ddr_range(dst_addr, *dst_size)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ resp_len = *dst_size / MBOX_WORD_BYTE;
+
+ /* Prepare command payload */
+ /* Crypto header */
+ i = 0;
+ payload[i] = fcs_ecdsa_hash_sign_param.session_id;
+ i++;
+ payload[i] = fcs_ecdsa_hash_sign_param.context_id;
+
+ i++;
+ payload[i] = fcs_ecdsa_hash_sign_param.crypto_param_size
+ & FCS_CS_FIELD_SIZE_MASK;
+ payload[i] |= (FCS_CS_FIELD_FLAG_INIT | FCS_CS_FIELD_FLAG_UPDATE
+ | FCS_CS_FIELD_FLAG_FINALIZE)
+ << FCS_CS_FIELD_FLAG_OFFSET;
+ i++;
+ payload[i] = fcs_ecdsa_hash_sign_param.key_id;
+
+ /* Crypto parameters */
+ i++;
+ payload[i] = fcs_ecdsa_hash_sign_param.crypto_param
+ & INTEL_SIP_SMC_FCS_ECC_ALGO_MASK;
+
+ /* Hash Data */
+ i++;
+ hash_data_addr = src_addr;
+ memcpy((uint8_t *) &payload[i], (uint8_t *) hash_data_addr,
+ src_size);
+
+ i += src_size / MBOX_WORD_BYTE;
+
+ status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_ECDSA_HASH_SIGN_REQ,
+ payload, i, CMD_CASUAL, (uint32_t *) dst_addr,
+ &resp_len);
+
+ memset((void *) &fcs_ecdsa_hash_sign_param,
+ 0, sizeof(fcs_crypto_service_data));
+
+ if (status < 0) {
+ *mbox_error = -status;
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ *dst_size = resp_len * MBOX_WORD_BYTE;
+ flush_dcache_range(dst_addr, *dst_size);
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_ecdsa_hash_sig_verify_init(uint32_t session_id, uint32_t context_id,
+ uint32_t key_id, uint32_t param_size,
+ uint64_t param_data, uint32_t *mbox_error)
+{
+ return intel_fcs_crypto_service_init(session_id, context_id,
+ key_id, param_size, param_data,
+ (void *) &fcs_ecdsa_hash_sig_verify_param,
+ mbox_error);
+}
+
+int intel_fcs_ecdsa_hash_sig_verify_finalize(uint32_t session_id, uint32_t context_id,
+ uint32_t src_addr, uint32_t src_size,
+ uint64_t dst_addr, uint32_t *dst_size,
+ uint32_t *mbox_error)
+{
+ int status;
+ uint32_t i = 0;
+ uint32_t payload[FCS_ECDSA_HASH_SIG_VERIFY_CMD_MAX_WORD_SIZE] = {0U};
+ uint32_t resp_len;
+ uintptr_t hash_sig_pubkey_addr;
+
+ if ((dst_size == NULL) || (mbox_error == NULL)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ if (fcs_ecdsa_hash_sig_verify_param.session_id != session_id ||
+ fcs_ecdsa_hash_sig_verify_param.context_id != context_id) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ if (!is_address_in_ddr_range(src_addr, src_size) ||
+ !is_address_in_ddr_range(dst_addr, *dst_size)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ resp_len = *dst_size / MBOX_WORD_BYTE;
+
+ /* Prepare command payload */
+ /* Crypto header */
+ i = 0;
+ payload[i] = fcs_ecdsa_hash_sig_verify_param.session_id;
+
+ i++;
+ payload[i] = fcs_ecdsa_hash_sig_verify_param.context_id;
+
+ i++;
+ payload[i] = fcs_ecdsa_hash_sig_verify_param.crypto_param_size
+ & FCS_CS_FIELD_SIZE_MASK;
+ payload[i] |= (FCS_CS_FIELD_FLAG_INIT | FCS_CS_FIELD_FLAG_UPDATE
+ | FCS_CS_FIELD_FLAG_FINALIZE)
+ << FCS_CS_FIELD_FLAG_OFFSET;
+
+ i++;
+ payload[i] = fcs_ecdsa_hash_sig_verify_param.key_id;
+
+ /* Crypto parameters */
+ i++;
+ payload[i] = fcs_ecdsa_hash_sig_verify_param.crypto_param
+ & INTEL_SIP_SMC_FCS_ECC_ALGO_MASK;
+
+ /* Hash Data Word, Signature Data Word and Public Key Data word */
+ i++;
+ hash_sig_pubkey_addr = src_addr;
+ memcpy((uint8_t *) &payload[i],
+ (uint8_t *) hash_sig_pubkey_addr, src_size);
+
+ i += (src_size / MBOX_WORD_BYTE);
+
+ status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_ECDSA_HASH_SIG_VERIFY,
+ payload, i, CMD_CASUAL, (uint32_t *) dst_addr,
+ &resp_len);
+
+ memset((void *)&fcs_ecdsa_hash_sig_verify_param,
+ 0, sizeof(fcs_crypto_service_data));
+
+ if (status < 0) {
+ *mbox_error = -status;
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ *dst_size = resp_len * MBOX_WORD_BYTE;
+ flush_dcache_range(dst_addr, *dst_size);
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_ecdsa_sha2_data_sign_init(uint32_t session_id,
+ uint32_t context_id, uint32_t key_id,
+ uint32_t param_size, uint64_t param_data,
+ uint32_t *mbox_error)
+{
+ return intel_fcs_crypto_service_init(session_id, context_id,
+ key_id, param_size, param_data,
+ (void *) &fcs_sha2_data_sign_param,
+ mbox_error);
+}
+
+int intel_fcs_ecdsa_sha2_data_sign_update_finalize(uint32_t session_id,
+ uint32_t context_id, uint32_t src_addr,
+ uint32_t src_size, uint64_t dst_addr,
+ uint32_t *dst_size, uint8_t is_finalised,
+ uint32_t *mbox_error)
+{
+ int status;
+ int i;
+ uint32_t flag;
+ uint32_t crypto_header;
+ uint32_t payload[FCS_ECDSA_SHA2_DATA_SIGN_CMD_MAX_WORD_SIZE] = {0U};
+ uint32_t resp_len;
+
+ if ((dst_size == NULL) || (mbox_error == NULL)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ if (fcs_sha2_data_sign_param.session_id != session_id ||
+ fcs_sha2_data_sign_param.context_id != context_id) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ /* Source data must be 8 bytes aligned */
+ if (!is_8_bytes_aligned(src_size)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ if (!is_address_in_ddr_range(src_addr, src_size) ||
+ !is_address_in_ddr_range(dst_addr, *dst_size)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ resp_len = *dst_size / MBOX_WORD_BYTE;
+
+ /* Prepare crypto header */
+ flag = 0;
+ if (fcs_sha2_data_sign_param.is_updated) {
+ fcs_sha2_data_sign_param.crypto_param_size = 0;
+ } else {
+ flag |= FCS_CS_FIELD_FLAG_INIT;
+ }
+
+ if (is_finalised != 0U) {
+ flag |= FCS_CS_FIELD_FLAG_FINALIZE;
+ } else {
+ flag |= FCS_CS_FIELD_FLAG_UPDATE;
+ fcs_sha2_data_sign_param.is_updated = 1;
+ }
+ crypto_header = (flag << FCS_CS_FIELD_FLAG_OFFSET) |
+ fcs_sha2_data_sign_param.crypto_param_size;
+
+ /* Prepare command payload */
+ i = 0;
+ payload[i] = fcs_sha2_data_sign_param.session_id;
+ i++;
+ payload[i] = fcs_sha2_data_sign_param.context_id;
+ i++;
+ payload[i] = crypto_header;
+ i++;
+
+ if ((crypto_header >> FCS_CS_FIELD_FLAG_OFFSET) &
+ FCS_CS_FIELD_FLAG_INIT) {
+ payload[i] = fcs_sha2_data_sign_param.key_id;
+ /* Crypto parameters */
+ i++;
+ payload[i] = fcs_sha2_data_sign_param.crypto_param
+ & INTEL_SIP_SMC_FCS_ECC_ALGO_MASK;
+ i++;
+ }
+
+ /* Data source address and size */
+ payload[i] = src_addr;
+ i++;
+ payload[i] = src_size;
+ i++;
+ status = mailbox_send_cmd(MBOX_JOB_ID,
+ MBOX_FCS_ECDSA_SHA2_DATA_SIGN_REQ, payload,
+ i, CMD_CASUAL, (uint32_t *) dst_addr,
+ &resp_len);
+
+ if (is_finalised != 0U) {
+ memset((void *)&fcs_sha2_data_sign_param, 0,
+ sizeof(fcs_crypto_service_data));
+ }
+
+ if (status < 0) {
+ *mbox_error = -status;
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ *dst_size = resp_len * MBOX_WORD_BYTE;
+ flush_dcache_range(dst_addr, *dst_size);
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_ecdsa_sha2_data_sig_verify_init(uint32_t session_id,
+ uint32_t context_id, uint32_t key_id,
+ uint32_t param_size, uint64_t param_data,
+ uint32_t *mbox_error)
+{
+ return intel_fcs_crypto_service_init(session_id, context_id,
+ key_id, param_size, param_data,
+ (void *) &fcs_sha2_data_sig_verify_param,
+ mbox_error);
+}
+
+int intel_fcs_ecdsa_sha2_data_sig_verify_update_finalize(uint32_t session_id,
+ uint32_t context_id, uint32_t src_addr,
+ uint32_t src_size, uint64_t dst_addr,
+ uint32_t *dst_size, uint32_t data_size,
+ uint8_t is_finalised, uint32_t *mbox_error)
+{
+ int status;
+ uint32_t i;
+ uint32_t flag;
+ uint32_t crypto_header;
+ uint32_t payload[FCS_ECDSA_SHA2_DATA_SIG_VERIFY_CMD_MAX_WORD_SIZE] = {0U};
+ uint32_t resp_len;
+ uintptr_t sig_pubkey_offset;
+
+ if ((dst_size == NULL) || (mbox_error == NULL)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ if (fcs_sha2_data_sig_verify_param.session_id != session_id ||
+ fcs_sha2_data_sig_verify_param.context_id != context_id) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ if (!is_size_4_bytes_aligned(src_size)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ if (!is_8_bytes_aligned(data_size) ||
+ !is_8_bytes_aligned(src_addr)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ if (!is_address_in_ddr_range(src_addr, src_size) ||
+ !is_address_in_ddr_range(dst_addr, *dst_size)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ resp_len = *dst_size / MBOX_WORD_BYTE;
+
+ /* Prepare crypto header */
+ flag = 0;
+ if (fcs_sha2_data_sig_verify_param.is_updated)
+ fcs_sha2_data_sig_verify_param.crypto_param_size = 0;
+ else
+ flag |= FCS_CS_FIELD_FLAG_INIT;
+
+ if (is_finalised != 0U)
+ flag |= FCS_CS_FIELD_FLAG_FINALIZE;
+ else {
+ flag |= FCS_CS_FIELD_FLAG_UPDATE;
+ fcs_sha2_data_sig_verify_param.is_updated = 1;
+ }
+ crypto_header = (flag << FCS_CS_FIELD_FLAG_OFFSET) |
+ fcs_sha2_data_sig_verify_param.crypto_param_size;
+
+ /* Prepare command payload */
+ i = 0;
+ payload[i] = fcs_sha2_data_sig_verify_param.session_id;
+ i++;
+ payload[i] = fcs_sha2_data_sig_verify_param.context_id;
+ i++;
+ payload[i] = crypto_header;
+ i++;
+
+ if ((crypto_header >> FCS_CS_FIELD_FLAG_OFFSET) &
+ FCS_CS_FIELD_FLAG_INIT) {
+ payload[i] = fcs_sha2_data_sig_verify_param.key_id;
+ i++;
+ /* Crypto parameters */
+ payload[i] = fcs_sha2_data_sig_verify_param.crypto_param
+ & INTEL_SIP_SMC_FCS_ECC_ALGO_MASK;
+ i++;
+ }
+
+ /* Data source address and size */
+ payload[i] = src_addr;
+ i++;
+ payload[i] = data_size;
+ i++;
+
+ if ((crypto_header >> FCS_CS_FIELD_FLAG_OFFSET) &
+ FCS_CS_FIELD_FLAG_FINALIZE) {
+ /* Signature + Public Key Data */
+ sig_pubkey_offset = src_addr + data_size;
+ memcpy((uint8_t *) &payload[i], (uint8_t *) sig_pubkey_offset,
+ src_size - data_size);
+
+ i += (src_size - data_size) / MBOX_WORD_BYTE;
+ }
+
+ status = mailbox_send_cmd(MBOX_JOB_ID,
+ MBOX_FCS_ECDSA_SHA2_DATA_SIGN_VERIFY, payload, i,
+ CMD_CASUAL, (uint32_t *) dst_addr, &resp_len);
+
+ if (is_finalised != 0U) {
+ memset((void *) &fcs_sha2_data_sig_verify_param, 0,
+ sizeof(fcs_crypto_service_data));
+ }
+
+ if (status < 0) {
+ *mbox_error = -status;
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ *dst_size = resp_len * MBOX_WORD_BYTE;
+ flush_dcache_range(dst_addr, *dst_size);
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_ecdsa_get_pubkey_init(uint32_t session_id, uint32_t context_id,
+ uint32_t key_id, uint32_t param_size,
+ uint64_t param_data, uint32_t *mbox_error)
+{
+ return intel_fcs_crypto_service_init(session_id, context_id,
+ key_id, param_size, param_data,
+ (void *) &fcs_ecdsa_get_pubkey_param,
+ mbox_error);
+}
+
+int intel_fcs_ecdsa_get_pubkey_finalize(uint32_t session_id, uint32_t context_id,
+ uint64_t dst_addr, uint32_t *dst_size,
+ uint32_t *mbox_error)
+{
+ int status;
+ int i;
+ uint32_t crypto_header;
+ uint32_t ret_size;
+ uint32_t payload[FCS_ECDSA_GET_PUBKEY_MAX_WORD_SIZE] = {0U};
+
+ if ((dst_size == NULL) || (mbox_error == NULL)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ if (fcs_ecdsa_get_pubkey_param.session_id != session_id ||
+ fcs_ecdsa_get_pubkey_param.context_id != context_id) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ ret_size = *dst_size / MBOX_WORD_BYTE;
+
+ crypto_header = ((FCS_CS_FIELD_FLAG_INIT |
+ FCS_CS_FIELD_FLAG_UPDATE |
+ FCS_CS_FIELD_FLAG_FINALIZE) <<
+ FCS_CS_FIELD_FLAG_OFFSET) |
+ fcs_ecdsa_get_pubkey_param.crypto_param_size;
+ i = 0;
+ /* Prepare command payload */
+ payload[i] = session_id;
+ i++;
+ payload[i] = context_id;
+ i++;
+ payload[i] = crypto_header;
+ i++;
+ payload[i] = fcs_ecdsa_get_pubkey_param.key_id;
+ i++;
+ payload[i] = (uint32_t) fcs_ecdsa_get_pubkey_param.crypto_param &
+ INTEL_SIP_SMC_FCS_ECC_ALGO_MASK;
+ i++;
+
+ status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_ECDSA_GET_PUBKEY,
+ payload, i, CMD_CASUAL,
+ (uint32_t *) dst_addr, &ret_size);
+
+ memset((void *) &fcs_ecdsa_get_pubkey_param, 0,
+ sizeof(fcs_crypto_service_data));
+
+ if (status < 0) {
+ *mbox_error = -status;
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ *dst_size = ret_size * MBOX_WORD_BYTE;
+ flush_dcache_range(dst_addr, *dst_size);
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_ecdh_request_init(uint32_t session_id, uint32_t context_id,
+ uint32_t key_id, uint32_t param_size,
+ uint64_t param_data, uint32_t *mbox_error)
+{
+ return intel_fcs_crypto_service_init(session_id, context_id,
+ key_id, param_size, param_data,
+ (void *) &fcs_ecdh_request_param,
+ mbox_error);
+}
+
+int intel_fcs_ecdh_request_finalize(uint32_t session_id, uint32_t context_id,
+ uint32_t src_addr, uint32_t src_size,
+ uint64_t dst_addr, uint32_t *dst_size,
+ uint32_t *mbox_error)
+{
+ int status;
+ uint32_t i;
+ uint32_t payload[FCS_ECDH_REQUEST_CMD_MAX_WORD_SIZE] = {0U};
+ uint32_t resp_len;
+ uintptr_t pubkey;
+
+ if ((dst_size == NULL) || (mbox_error == NULL)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ if (fcs_ecdh_request_param.session_id != session_id ||
+ fcs_ecdh_request_param.context_id != context_id) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ if (!is_address_in_ddr_range(src_addr, src_size) ||
+ !is_address_in_ddr_range(dst_addr, *dst_size)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ resp_len = *dst_size / MBOX_WORD_BYTE;
+
+ /* Prepare command payload */
+ i = 0;
+ /* Crypto header */
+ payload[i] = fcs_ecdh_request_param.session_id;
+ i++;
+ payload[i] = fcs_ecdh_request_param.context_id;
+ i++;
+ payload[i] = fcs_ecdh_request_param.crypto_param_size
+ & FCS_CS_FIELD_SIZE_MASK;
+ payload[i] |= (FCS_CS_FIELD_FLAG_INIT | FCS_CS_FIELD_FLAG_UPDATE
+ | FCS_CS_FIELD_FLAG_FINALIZE)
+ << FCS_CS_FIELD_FLAG_OFFSET;
+ i++;
+ payload[i] = fcs_ecdh_request_param.key_id;
+ i++;
+ /* Crypto parameters */
+ payload[i] = fcs_ecdh_request_param.crypto_param
+ & INTEL_SIP_SMC_FCS_ECC_ALGO_MASK;
+ i++;
+ /* Public key data */
+ pubkey = src_addr;
+ memcpy((uint8_t *) &payload[i], (uint8_t *) pubkey, src_size);
+ i += src_size / MBOX_WORD_BYTE;
+
+ status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_ECDH_REQUEST,
+ payload, i, CMD_CASUAL, (uint32_t *) dst_addr,
+ &resp_len);
+
+ memset((void *)&fcs_ecdh_request_param, 0,
+ sizeof(fcs_crypto_service_data));
+
+ if (status < 0) {
+ *mbox_error = -status;
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ *dst_size = resp_len * MBOX_WORD_BYTE;
+ flush_dcache_range(dst_addr, *dst_size);
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_aes_crypt_init(uint32_t session_id, uint32_t context_id,
+ uint32_t key_id, uint64_t param_addr,
+ uint32_t param_size, uint32_t *mbox_error)
+{
+ if (mbox_error == NULL) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ memset((void *)&fcs_aes_init_payload, 0U, sizeof(fcs_aes_init_payload));
+
+ fcs_aes_init_payload.session_id = session_id;
+ fcs_aes_init_payload.context_id = context_id;
+ fcs_aes_init_payload.param_size = param_size;
+ fcs_aes_init_payload.key_id = key_id;
+
+ memcpy((uint8_t *) fcs_aes_init_payload.crypto_param,
+ (uint8_t *) param_addr, param_size);
+
+ fcs_aes_init_payload.is_updated = 0;
+
+ *mbox_error = 0;
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_aes_crypt_update_finalize(uint32_t session_id,
+ uint32_t context_id, uint64_t src_addr,
+ uint32_t src_size, uint64_t dst_addr,
+ uint32_t dst_size, uint8_t is_finalised,
+ uint32_t *send_id)
+{
+ int status;
+ int i;
+ uint32_t flag;
+ uint32_t crypto_header;
+ uint32_t fcs_aes_crypt_payload[FCS_AES_CMD_MAX_WORD_SIZE];
+
+ if (fcs_aes_init_payload.session_id != session_id ||
+ fcs_aes_init_payload.context_id != context_id) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ if ((!is_8_bytes_aligned(src_addr)) ||
+ (!is_32_bytes_aligned(src_size)) ||
+ (!is_address_in_ddr_range(src_addr, src_size))) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ if ((!is_8_bytes_aligned(dst_addr)) ||
+ (!is_32_bytes_aligned(dst_size))) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ if ((dst_size > FCS_AES_MAX_DATA_SIZE ||
+ dst_size < FCS_AES_MIN_DATA_SIZE) ||
+ (src_size > FCS_AES_MAX_DATA_SIZE ||
+ src_size < FCS_AES_MIN_DATA_SIZE)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ /* Prepare crypto header*/
+ flag = 0;
+ if (fcs_aes_init_payload.is_updated) {
+ fcs_aes_init_payload.param_size = 0;
+ } else {
+ flag |= FCS_CS_FIELD_FLAG_INIT;
+ }
+
+ if (is_finalised != 0U) {
+ flag |= FCS_CS_FIELD_FLAG_FINALIZE;
+ } else {
+ flag |= FCS_CS_FIELD_FLAG_UPDATE;
+ fcs_aes_init_payload.is_updated = 1;
+ }
+ crypto_header = (flag << FCS_CS_FIELD_FLAG_OFFSET) |
+ fcs_aes_init_payload.param_size;
+
+ i = 0U;
+ fcs_aes_crypt_payload[i] = session_id;
+ i++;
+ fcs_aes_crypt_payload[i] = context_id;
+ i++;
+ fcs_aes_crypt_payload[i] = crypto_header;
+ i++;
+
+ if ((crypto_header >> FCS_CS_FIELD_FLAG_OFFSET) &
+ FCS_CS_FIELD_FLAG_INIT) {
+ fcs_aes_crypt_payload[i] = fcs_aes_init_payload.key_id;
+ i++;
+
+ memcpy((uint8_t *) &fcs_aes_crypt_payload[i],
+ (uint8_t *) fcs_aes_init_payload.crypto_param,
+ fcs_aes_init_payload.param_size);
+
+ i += fcs_aes_init_payload.param_size / MBOX_WORD_BYTE;
+ }
+
+ fcs_aes_crypt_payload[i] = (uint32_t) src_addr;
+ i++;
+ fcs_aes_crypt_payload[i] = src_size;
+ i++;
+ fcs_aes_crypt_payload[i] = (uint32_t) dst_addr;
+ i++;
+ fcs_aes_crypt_payload[i] = dst_size;
+ i++;
+
+ status = mailbox_send_cmd_async(send_id, MBOX_FCS_AES_CRYPT_REQ,
+ fcs_aes_crypt_payload, i,
+ CMD_INDIRECT);
+
+ if (is_finalised != 0U) {
+ memset((void *)&fcs_aes_init_payload, 0,
+ sizeof(fcs_aes_init_payload));
+ }
+
+ if (status < 0U) {
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
diff --git a/plat/intel/soc/common/soc/socfpga_emac.c b/plat/intel/soc/common/soc/socfpga_emac.c
new file mode 100644
index 0000000..cacfd53
--- /dev/null
+++ b/plat/intel/soc/common/soc/socfpga_emac.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2020, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <lib/mmio.h>
+#include <platform_def.h>
+
+#include "socfpga_emac.h"
+#include "socfpga_reset_manager.h"
+#include "socfpga_system_manager.h"
+
+void socfpga_emac_init(void)
+{
+ mmio_setbits_32(SOCFPGA_RSTMGR(PER0MODRST),
+ RSTMGR_PER0MODRST_EMAC0 |
+ RSTMGR_PER0MODRST_EMAC1 |
+ RSTMGR_PER0MODRST_EMAC2);
+
+ mmio_clrsetbits_32(SOCFPGA_SYSMGR(EMAC_0),
+ PHY_INTF_SEL_MSK, EMAC0_PHY_MODE);
+ mmio_clrsetbits_32(SOCFPGA_SYSMGR(EMAC_1),
+ PHY_INTF_SEL_MSK, EMAC1_PHY_MODE);
+ mmio_clrsetbits_32(SOCFPGA_SYSMGR(EMAC_2),
+ PHY_INTF_SEL_MSK, EMAC2_PHY_MODE);
+
+ mmio_clrbits_32(SOCFPGA_SYSMGR(FPGAINTF_EN_3),
+ FPGAINTF_EN_3_EMAC_MSK(0) |
+ FPGAINTF_EN_3_EMAC_MSK(1) |
+ FPGAINTF_EN_3_EMAC_MSK(2));
+
+ mmio_clrbits_32(SOCFPGA_RSTMGR(PER0MODRST),
+ RSTMGR_PER0MODRST_EMAC0 |
+ RSTMGR_PER0MODRST_EMAC1 |
+ RSTMGR_PER0MODRST_EMAC2);
+}
+
diff --git a/plat/intel/soc/common/soc/socfpga_firewall.c b/plat/intel/soc/common/soc/socfpga_firewall.c
new file mode 100644
index 0000000..515784b
--- /dev/null
+++ b/plat/intel/soc/common/soc/socfpga_firewall.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <lib/mmio.h>
+#include <lib/utils_def.h>
+
+#include "socfpga_noc.h"
+#include "socfpga_plat_def.h"
+#include "socfpga_system_manager.h"
+
+void enable_nonsecure_access(void)
+{
+ enable_ns_peripheral_access();
+ enable_ns_bridge_access();
+}
+
+void enable_ns_peripheral_access(void)
+{
+ mmio_write_32(SOCFPGA_L4_PER_SCR(NAND_REGISTER), DISABLE_L4_FIREWALL);
+ mmio_write_32(SOCFPGA_L4_PER_SCR(NAND_DATA), DISABLE_L4_FIREWALL);
+
+ mmio_write_32(SOCFPGA_L4_SYS_SCR(NAND_ECC), DISABLE_L4_FIREWALL);
+ mmio_write_32(SOCFPGA_L4_SYS_SCR(NAND_READ_ECC), DISABLE_L4_FIREWALL);
+ mmio_write_32(SOCFPGA_L4_SYS_SCR(NAND_WRITE_ECC),
+ DISABLE_L4_FIREWALL);
+
+ mmio_write_32(SOCFPGA_L4_PER_SCR(USB0_REGISTER), DISABLE_L4_FIREWALL);
+ mmio_write_32(SOCFPGA_L4_PER_SCR(USB1_REGISTER), DISABLE_L4_FIREWALL);
+ mmio_write_32(SOCFPGA_L4_SYS_SCR(USB0_ECC), DISABLE_L4_FIREWALL);
+ mmio_write_32(SOCFPGA_L4_SYS_SCR(USB1_ECC), DISABLE_L4_FIREWALL);
+
+ mmio_write_32(SOCFPGA_L4_PER_SCR(SPI_MASTER0), DISABLE_L4_FIREWALL);
+ mmio_write_32(SOCFPGA_L4_PER_SCR(SPI_MASTER1), DISABLE_L4_FIREWALL);
+ mmio_write_32(SOCFPGA_L4_PER_SCR(SPI_SLAVE0), DISABLE_L4_FIREWALL);
+ mmio_write_32(SOCFPGA_L4_PER_SCR(SPI_SLAVE1), DISABLE_L4_FIREWALL);
+
+ mmio_write_32(SOCFPGA_L4_PER_SCR(EMAC0), DISABLE_L4_FIREWALL);
+ mmio_write_32(SOCFPGA_L4_PER_SCR(EMAC1), DISABLE_L4_FIREWALL);
+ mmio_write_32(SOCFPGA_L4_PER_SCR(EMAC2), DISABLE_L4_FIREWALL);
+
+ mmio_write_32(SOCFPGA_L4_SYS_SCR(EMAC0RX_ECC), DISABLE_L4_FIREWALL);
+ mmio_write_32(SOCFPGA_L4_SYS_SCR(EMAC0TX_ECC), DISABLE_L4_FIREWALL);
+ mmio_write_32(SOCFPGA_L4_SYS_SCR(EMAC1RX_ECC), DISABLE_L4_FIREWALL);
+ mmio_write_32(SOCFPGA_L4_SYS_SCR(EMAC1TX_ECC), DISABLE_L4_FIREWALL);
+ mmio_write_32(SOCFPGA_L4_SYS_SCR(EMAC2RX_ECC), DISABLE_L4_FIREWALL);
+ mmio_write_32(SOCFPGA_L4_SYS_SCR(EMAC2TX_ECC), DISABLE_L4_FIREWALL);
+
+ mmio_write_32(SOCFPGA_L4_PER_SCR(SDMMC), DISABLE_L4_FIREWALL);
+ mmio_write_32(SOCFPGA_L4_SYS_SCR(SDMMC_ECC), DISABLE_L4_FIREWALL);
+
+ mmio_write_32(SOCFPGA_L4_PER_SCR(GPIO0), DISABLE_L4_FIREWALL);
+ mmio_write_32(SOCFPGA_L4_PER_SCR(GPIO1), DISABLE_L4_FIREWALL);
+
+ mmio_write_32(SOCFPGA_L4_PER_SCR(I2C0), DISABLE_L4_FIREWALL);
+ mmio_write_32(SOCFPGA_L4_PER_SCR(I2C1), DISABLE_L4_FIREWALL);
+ mmio_write_32(SOCFPGA_L4_PER_SCR(I2C2), DISABLE_L4_FIREWALL);
+ mmio_write_32(SOCFPGA_L4_PER_SCR(I2C3), DISABLE_L4_FIREWALL);
+ mmio_write_32(SOCFPGA_L4_PER_SCR(I2C4), DISABLE_L4_FIREWALL);
+
+ mmio_write_32(SOCFPGA_L4_PER_SCR(SP_TIMER1), DISABLE_L4_FIREWALL);
+
+ mmio_write_32(SOCFPGA_L4_PER_SCR(UART0), DISABLE_L4_FIREWALL);
+ mmio_write_32(SOCFPGA_L4_PER_SCR(UART1), DISABLE_L4_FIREWALL);
+
+ mmio_write_32(SOCFPGA_L4_SYS_SCR(DMA_ECC), DISABLE_L4_FIREWALL);
+
+
+ mmio_write_32(SOCFPGA_L4_SYS_SCR(OCRAM_ECC), DISABLE_L4_FIREWALL);
+
+ mmio_write_32(SOCFPGA_L4_SYS_SCR(CLK_MGR), DISABLE_L4_FIREWALL);
+
+ mmio_write_32(SOCFPGA_L4_SYS_SCR(IO_MGR), DISABLE_L4_FIREWALL);
+
+
+ mmio_write_32(SOCFPGA_L4_SYS_SCR(RST_MGR), DISABLE_L4_FIREWALL);
+
+ mmio_write_32(SOCFPGA_L4_SYS_SCR(SYS_MGR), DISABLE_L4_FIREWALL);
+
+ mmio_write_32(SOCFPGA_L4_SYS_SCR(OSC0_TIMER), DISABLE_L4_FIREWALL);
+ mmio_write_32(SOCFPGA_L4_SYS_SCR(OSC1_TIMER), DISABLE_L4_FIREWALL);
+
+ mmio_write_32(SOCFPGA_L4_SYS_SCR(WATCHDOG0), DISABLE_L4_FIREWALL);
+ mmio_write_32(SOCFPGA_L4_SYS_SCR(WATCHDOG1), DISABLE_L4_FIREWALL);
+ mmio_write_32(SOCFPGA_L4_SYS_SCR(WATCHDOG2), DISABLE_L4_FIREWALL);
+ mmio_write_32(SOCFPGA_L4_SYS_SCR(WATCHDOG3), DISABLE_L4_FIREWALL);
+
+ mmio_write_32(SOCFPGA_L4_SYS_SCR(DAP), DISABLE_L4_FIREWALL);
+
+ mmio_write_32(SOCFPGA_L4_SYS_SCR(L4_NOC_PROBES), DISABLE_L4_FIREWALL);
+
+ mmio_write_32(SOCFPGA_L4_SYS_SCR(L4_NOC_QOS), DISABLE_L4_FIREWALL);
+
+#if PLATFORM_MODEL == PLAT_SOCFPGA_STRATIX10
+ enable_ns_ocram_access();
+ mmio_write_32(SOCFPGA_SYSMGR(SDMMC), SYSMGR_SDMMC_DRVSEL(3));
+#endif
+
+}
+
+void enable_ns_ocram_access(void)
+{
+ mmio_clrbits_32(SOCFPGA_CCU_NOC(CPU0, RAM0),
+ SOCFPGA_CCU_NOC_ADMASK_P_MASK | SOCFPGA_CCU_NOC_ADMASK_NS_MASK);
+ mmio_clrbits_32(SOCFPGA_CCU_NOC(IOM, RAM0),
+ SOCFPGA_CCU_NOC_ADMASK_P_MASK | SOCFPGA_CCU_NOC_ADMASK_NS_MASK);
+}
+
+void enable_ns_bridge_access(void)
+{
+ mmio_write_32(SOCFPGA_SOC2FPGA_SCR_REG_BASE, DISABLE_BRIDGE_FIREWALL);
+ mmio_write_32(SOCFPGA_LWSOC2FPGA_SCR_REG_BASE, DISABLE_BRIDGE_FIREWALL);
+}
+
+void enable_ocram_firewall(void)
+{
+ mmio_setbits_32(SOCFPGA_CCU_NOC(CPU0, RAM0),
+ SOCFPGA_CCU_NOC_ADMASK_P_MASK | SOCFPGA_CCU_NOC_ADMASK_NS_MASK);
+ mmio_setbits_32(SOCFPGA_CCU_NOC(IOM, RAM0),
+ SOCFPGA_CCU_NOC_ADMASK_P_MASK | SOCFPGA_CCU_NOC_ADMASK_NS_MASK);
+}
diff --git a/plat/intel/soc/common/soc/socfpga_handoff.c b/plat/intel/soc/common/soc/socfpga_handoff.c
new file mode 100644
index 0000000..4bb3a96
--- /dev/null
+++ b/plat/intel/soc/common/soc/socfpga_handoff.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2019, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <string.h>
+
+#include "socfpga_handoff.h"
+
+#define SWAP_UINT32(x) (((x) >> 24) | (((x) & 0x00FF0000) >> 8) | \
+ (((x) & 0x0000FF00) << 8) | ((x) << 24))
+
+int socfpga_get_handoff(handoff *reverse_hoff_ptr)
+{
+ int i;
+ uint32_t *buffer;
+ handoff *handoff_ptr = (handoff *) PLAT_HANDOFF_OFFSET;
+
+ memcpy(reverse_hoff_ptr, handoff_ptr, sizeof(handoff));
+ buffer = (uint32_t *)reverse_hoff_ptr;
+
+ /* convert big endian to little endian */
+ for (i = 0; i < sizeof(handoff) / 4; i++)
+ buffer[i] = SWAP_UINT32(buffer[i]);
+
+ if (reverse_hoff_ptr->header_magic != HANDOFF_MAGIC_HEADER)
+ return -1;
+ if (reverse_hoff_ptr->pinmux_sel_magic != HANDOFF_MAGIC_PINMUX_SEL)
+ return -1;
+ if (reverse_hoff_ptr->pinmux_io_magic != HANDOFF_MAGIC_IOCTLR)
+ return -1;
+ if (reverse_hoff_ptr->pinmux_fpga_magic != HANDOFF_MAGIC_FPGA)
+ return -1;
+ if (reverse_hoff_ptr->pinmux_delay_magic != HANDOFF_MAGIC_IODELAY)
+ return -1;
+
+ return 0;
+}
diff --git a/plat/intel/soc/common/soc/socfpga_mailbox.c b/plat/intel/soc/common/soc/socfpga_mailbox.c
new file mode 100644
index 0000000..79817e6
--- /dev/null
+++ b/plat/intel/soc/common/soc/socfpga_mailbox.c
@@ -0,0 +1,647 @@
+/*
+ * Copyright (c) 2020-2022, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <lib/mmio.h>
+#include <common/debug.h>
+#include <drivers/delay_timer.h>
+
+#include "socfpga_mailbox.h"
+#include "socfpga_sip_svc.h"
+
+static mailbox_payload_t mailbox_resp_payload;
+static mailbox_container_t mailbox_resp_ctr = {0, 0, &mailbox_resp_payload};
+
+static bool is_mailbox_cmdbuf_full(uint32_t cin)
+{
+ uint32_t cout = mmio_read_32(MBOX_OFFSET + MBOX_COUT);
+
+ return (((cin + 1U) % MBOX_CMD_BUFFER_SIZE) == cout);
+}
+
+static bool is_mailbox_cmdbuf_empty(uint32_t cin)
+{
+ uint32_t cout = mmio_read_32(MBOX_OFFSET + MBOX_COUT);
+
+ return (((cout + 1U) % MBOX_CMD_BUFFER_SIZE) == cin);
+}
+
+static int wait_for_mailbox_cmdbuf_empty(uint32_t cin)
+{
+ unsigned int timeout = 200U;
+
+ do {
+ if (is_mailbox_cmdbuf_empty(cin)) {
+ break;
+ }
+ mdelay(10U);
+ } while (--timeout != 0U);
+
+ if (timeout == 0U) {
+ return MBOX_TIMEOUT;
+ }
+
+ return MBOX_RET_OK;
+}
+
+static int write_mailbox_cmd_buffer(uint32_t *cin, uint32_t cout,
+ uint32_t data,
+ bool *is_doorbell_triggered)
+{
+ unsigned int timeout = 100U;
+
+ do {
+ if (is_mailbox_cmdbuf_full(*cin)) {
+ if (!(*is_doorbell_triggered)) {
+ mmio_write_32(MBOX_OFFSET +
+ MBOX_DOORBELL_TO_SDM, 1U);
+ *is_doorbell_triggered = true;
+ }
+ mdelay(10U);
+ } else {
+ mmio_write_32(MBOX_ENTRY_TO_ADDR(CMD, (*cin)++), data);
+ *cin %= MBOX_CMD_BUFFER_SIZE;
+ mmio_write_32(MBOX_OFFSET + MBOX_CIN, *cin);
+ break;
+ }
+ } while (--timeout != 0U);
+
+ if (timeout == 0U) {
+ return MBOX_TIMEOUT;
+ }
+
+ if (*is_doorbell_triggered) {
+ int ret = wait_for_mailbox_cmdbuf_empty(*cin);
+ return ret;
+ }
+
+ return MBOX_RET_OK;
+}
+
+static int fill_mailbox_circular_buffer(uint32_t header_cmd, uint32_t *args,
+ unsigned int len)
+{
+ uint32_t sdm_read_offset, cmd_free_offset;
+ unsigned int i;
+ int ret;
+ bool is_doorbell_triggered = false;
+
+ cmd_free_offset = mmio_read_32(MBOX_OFFSET + MBOX_CIN);
+ sdm_read_offset = mmio_read_32(MBOX_OFFSET + MBOX_COUT);
+
+ ret = write_mailbox_cmd_buffer(&cmd_free_offset, sdm_read_offset,
+ header_cmd, &is_doorbell_triggered);
+ if (ret != 0) {
+ goto restart_mailbox;
+ }
+
+ for (i = 0U; i < len; i++) {
+ is_doorbell_triggered = false;
+ ret = write_mailbox_cmd_buffer(&cmd_free_offset,
+ sdm_read_offset, args[i],
+ &is_doorbell_triggered);
+ if (ret != 0) {
+ goto restart_mailbox;
+ }
+ }
+
+ mmio_write_32(MBOX_OFFSET + MBOX_DOORBELL_TO_SDM, 1U);
+
+ return MBOX_RET_OK;
+
+restart_mailbox:
+ /*
+ * Attempt to restart mailbox if the driver not able to write
+ * into mailbox command buffer
+ */
+ if (MBOX_CMD_MASK(header_cmd) != MBOX_CMD_RESTART) {
+ INFO("Mailbox timed out: Attempting mailbox reset\n");
+ ret = mailbox_init();
+
+ if (ret == MBOX_TIMEOUT) {
+ INFO("Error: Mailbox fail to restart\n");
+ }
+ }
+
+ return MBOX_TIMEOUT;
+}
+
+int mailbox_read_response(unsigned int *job_id, uint32_t *response,
+ unsigned int *resp_len)
+{
+ uint32_t rin;
+ uint32_t rout;
+ uint32_t resp_data;
+ unsigned int ret_resp_len;
+
+ if (mmio_read_32(MBOX_OFFSET + MBOX_DOORBELL_FROM_SDM) == 1U) {
+ mmio_write_32(MBOX_OFFSET + MBOX_DOORBELL_FROM_SDM, 0U);
+ }
+
+ rin = mmio_read_32(MBOX_OFFSET + MBOX_RIN);
+ rout = mmio_read_32(MBOX_OFFSET + MBOX_ROUT);
+
+ if (rout != rin) {
+ resp_data = mmio_read_32(MBOX_ENTRY_TO_ADDR(RESP, (rout)++));
+
+ rout %= MBOX_RESP_BUFFER_SIZE;
+ mmio_write_32(MBOX_OFFSET + MBOX_ROUT, rout);
+
+
+ if (MBOX_RESP_CLIENT_ID(resp_data) != MBOX_ATF_CLIENT_ID) {
+ return MBOX_WRONG_ID;
+ }
+
+ *job_id = MBOX_RESP_JOB_ID(resp_data);
+
+ ret_resp_len = MBOX_RESP_LEN(resp_data);
+
+ if (iterate_resp(ret_resp_len, response, resp_len)
+ != MBOX_RET_OK) {
+ return MBOX_TIMEOUT;
+ }
+
+ if (MBOX_RESP_ERR(resp_data) > 0U) {
+ INFO("Error in response: %x\n", resp_data);
+ return -MBOX_RESP_ERR(resp_data);
+ }
+
+ return MBOX_RET_OK;
+ }
+ return MBOX_NO_RESPONSE;
+}
+
+int mailbox_read_response_async(unsigned int *job_id, uint32_t *header,
+ uint32_t *response, unsigned int *resp_len,
+ uint8_t ignore_client_id)
+{
+ uint32_t rin;
+ uint32_t rout;
+ uint32_t resp_data;
+ uint32_t ret_resp_len = 0;
+ uint8_t is_done = 0;
+
+ if ((mailbox_resp_ctr.flag & MBOX_PAYLOAD_FLAG_BUSY) != 0) {
+ ret_resp_len = MBOX_RESP_LEN(
+ mailbox_resp_ctr.payload->header) -
+ mailbox_resp_ctr.index;
+ }
+
+ if (mmio_read_32(MBOX_OFFSET + MBOX_DOORBELL_FROM_SDM) == 1U) {
+ mmio_write_32(MBOX_OFFSET + MBOX_DOORBELL_FROM_SDM, 0U);
+ }
+
+ rin = mmio_read_32(MBOX_OFFSET + MBOX_RIN);
+ rout = mmio_read_32(MBOX_OFFSET + MBOX_ROUT);
+
+ while (rout != rin && !is_done) {
+
+ resp_data = mmio_read_32(MBOX_ENTRY_TO_ADDR(RESP, (rout)++));
+
+ rout %= MBOX_RESP_BUFFER_SIZE;
+ mmio_write_32(MBOX_OFFSET + MBOX_ROUT, rout);
+ rin = mmio_read_32(MBOX_OFFSET + MBOX_RIN);
+
+ if ((mailbox_resp_ctr.flag & MBOX_PAYLOAD_FLAG_BUSY) != 0) {
+ mailbox_resp_ctr.payload->data[mailbox_resp_ctr.index] = resp_data;
+ mailbox_resp_ctr.index++;
+ ret_resp_len--;
+ } else {
+ if (!ignore_client_id) {
+ if (MBOX_RESP_CLIENT_ID(resp_data) != MBOX_ATF_CLIENT_ID) {
+ *resp_len = 0;
+ return MBOX_WRONG_ID;
+ }
+ }
+
+ *job_id = MBOX_RESP_JOB_ID(resp_data);
+ ret_resp_len = MBOX_RESP_LEN(resp_data);
+ mailbox_resp_ctr.payload->header = resp_data;
+ mailbox_resp_ctr.flag |= MBOX_PAYLOAD_FLAG_BUSY;
+ }
+
+ if (ret_resp_len == 0) {
+ is_done = 1;
+ }
+ }
+
+ if (is_done != 0) {
+
+ /* copy header data to input address if applicable */
+ if (header != 0) {
+ *header = mailbox_resp_ctr.payload->header;
+ }
+
+ /* copy response data to input buffer if applicable */
+ ret_resp_len = MBOX_RESP_LEN(mailbox_resp_ctr.payload->header);
+ if ((ret_resp_len > 0) && (response != NULL) && (resp_len != NULL)) {
+ if (*resp_len > ret_resp_len) {
+ *resp_len = ret_resp_len;
+ }
+
+ memcpy((uint8_t *) response,
+ (uint8_t *) mailbox_resp_ctr.payload->data,
+ *resp_len * MBOX_WORD_BYTE);
+ }
+
+ /* reset async response param */
+ mailbox_resp_ctr.index = 0;
+ mailbox_resp_ctr.flag = 0;
+
+ if (MBOX_RESP_ERR(mailbox_resp_ctr.payload->header) > 0U) {
+ INFO("Error in async response: %x\n",
+ mailbox_resp_ctr.payload->header);
+ return -MBOX_RESP_ERR(mailbox_resp_ctr.payload->header);
+ }
+
+ return MBOX_RET_OK;
+ }
+
+ *resp_len = 0;
+ return (mailbox_resp_ctr.flag & MBOX_PAYLOAD_FLAG_BUSY) ? MBOX_BUSY : MBOX_NO_RESPONSE;
+}
+
+int mailbox_poll_response(uint32_t job_id, uint32_t urgent, uint32_t *response,
+ unsigned int *resp_len)
+{
+ unsigned int timeout = 40U;
+ unsigned int sdm_loop = 255U;
+ unsigned int ret_resp_len;
+ uint32_t rin;
+ uint32_t rout;
+ uint32_t resp_data;
+
+ while (sdm_loop != 0U) {
+
+ do {
+ if (mmio_read_32(MBOX_OFFSET + MBOX_DOORBELL_FROM_SDM)
+ == 1U) {
+ break;
+ }
+ mdelay(10U);
+ } while (--timeout != 0U);
+
+ if (timeout == 0U) {
+ break;
+ }
+
+ mmio_write_32(MBOX_OFFSET + MBOX_DOORBELL_FROM_SDM, 0U);
+
+ if ((urgent & 1U) != 0U) {
+ mdelay(5U);
+ if ((mmio_read_32(MBOX_OFFSET + MBOX_STATUS) &
+ MBOX_STATUS_UA_MASK) ^
+ (urgent & MBOX_STATUS_UA_MASK)) {
+ mmio_write_32(MBOX_OFFSET + MBOX_URG, 0U);
+ return MBOX_RET_OK;
+ }
+
+ mmio_write_32(MBOX_OFFSET + MBOX_URG, 0U);
+ INFO("Error: Mailbox did not get UA");
+ return MBOX_RET_ERROR;
+ }
+
+ rin = mmio_read_32(MBOX_OFFSET + MBOX_RIN);
+ rout = mmio_read_32(MBOX_OFFSET + MBOX_ROUT);
+
+ while (rout != rin) {
+ resp_data = mmio_read_32(MBOX_ENTRY_TO_ADDR(RESP,
+ (rout)++));
+
+ rout %= MBOX_RESP_BUFFER_SIZE;
+ mmio_write_32(MBOX_OFFSET + MBOX_ROUT, rout);
+
+ if (MBOX_RESP_CLIENT_ID(resp_data) != MBOX_ATF_CLIENT_ID
+ || MBOX_RESP_JOB_ID(resp_data) != job_id) {
+ continue;
+ }
+
+ ret_resp_len = MBOX_RESP_LEN(resp_data);
+
+ if (iterate_resp(ret_resp_len, response, resp_len)
+ != MBOX_RET_OK) {
+ return MBOX_TIMEOUT;
+ }
+
+ if (MBOX_RESP_ERR(resp_data) > 0U) {
+ INFO("Error in response: %x\n", resp_data);
+ return -MBOX_RESP_ERR(resp_data);
+ }
+
+ return MBOX_RET_OK;
+ }
+
+ sdm_loop--;
+ }
+
+ INFO("Timed out waiting for SDM\n");
+ return MBOX_TIMEOUT;
+}
+
+int iterate_resp(uint32_t mbox_resp_len, uint32_t *resp_buf,
+ unsigned int *resp_len)
+{
+ unsigned int timeout, total_resp_len = 0U;
+ uint32_t resp_data;
+ uint32_t rin = mmio_read_32(MBOX_OFFSET + MBOX_RIN);
+ uint32_t rout = mmio_read_32(MBOX_OFFSET + MBOX_ROUT);
+
+ while (mbox_resp_len > 0U) {
+ timeout = 100U;
+ mbox_resp_len--;
+ resp_data = mmio_read_32(MBOX_ENTRY_TO_ADDR(RESP, (rout)++));
+
+ if ((resp_buf != NULL) && (resp_len != NULL)
+ && (*resp_len != 0U)) {
+ *(resp_buf + total_resp_len)
+ = resp_data;
+ *resp_len = *resp_len - 1;
+ total_resp_len++;
+ }
+ rout %= MBOX_RESP_BUFFER_SIZE;
+ mmio_write_32(MBOX_OFFSET + MBOX_ROUT, rout);
+
+ do {
+ rin = mmio_read_32(MBOX_OFFSET + MBOX_RIN);
+ if (rout == rin) {
+ mdelay(10U);
+ } else {
+ break;
+ }
+ timeout--;
+ } while ((mbox_resp_len > 0U) && (timeout != 0U));
+
+ if (timeout == 0U) {
+ INFO("Timed out waiting for SDM\n");
+ return MBOX_TIMEOUT;
+ }
+ }
+
+ if (resp_len)
+ *resp_len = total_resp_len;
+
+ return MBOX_RET_OK;
+}
+
+int mailbox_send_cmd_async_ext(uint32_t header_cmd, uint32_t *args,
+ unsigned int len)
+{
+ return fill_mailbox_circular_buffer(header_cmd, args, len);
+}
+
+int mailbox_send_cmd_async(uint32_t *job_id, uint32_t cmd, uint32_t *args,
+ unsigned int len, unsigned int indirect)
+{
+ int status;
+
+ status = fill_mailbox_circular_buffer(
+ MBOX_CLIENT_ID_CMD(MBOX_ATF_CLIENT_ID) |
+ MBOX_JOB_ID_CMD(*job_id) |
+ MBOX_CMD_LEN_CMD(len) |
+ MBOX_INDIRECT(indirect) |
+ cmd, args, len);
+ if (status < 0) {
+ return status;
+ }
+
+ *job_id = (*job_id + 1U) % MBOX_MAX_IND_JOB_ID;
+
+ return MBOX_RET_OK;
+}
+
+int mailbox_send_cmd(uint32_t job_id, uint32_t cmd, uint32_t *args,
+ unsigned int len, uint32_t urgent, uint32_t *response,
+ unsigned int *resp_len)
+{
+ int status = 0;
+
+ if (urgent != 0U) {
+ urgent |= mmio_read_32(MBOX_OFFSET + MBOX_STATUS) &
+ MBOX_STATUS_UA_MASK;
+ mmio_write_32(MBOX_OFFSET + MBOX_URG, cmd);
+ mmio_write_32(MBOX_OFFSET + MBOX_DOORBELL_TO_SDM, 1U);
+ }
+
+ else {
+ status = fill_mailbox_circular_buffer(
+ MBOX_CLIENT_ID_CMD(MBOX_ATF_CLIENT_ID) |
+ MBOX_JOB_ID_CMD(job_id) |
+ MBOX_CMD_LEN_CMD(len) |
+ cmd, args, len);
+ }
+
+ if (status != 0) {
+ return status;
+ }
+
+ status = mailbox_poll_response(job_id, urgent, response, resp_len);
+
+ return status;
+}
+
+void mailbox_clear_response(void)
+{
+ mmio_write_32(MBOX_OFFSET + MBOX_ROUT,
+ mmio_read_32(MBOX_OFFSET + MBOX_RIN));
+}
+
+void mailbox_set_int(uint32_t interrupt)
+{
+
+ mmio_write_32(MBOX_OFFSET+MBOX_INT, MBOX_COE_BIT(interrupt) |
+ MBOX_UAE_BIT(interrupt));
+}
+
+
+void mailbox_set_qspi_open(void)
+{
+ mailbox_set_int(MBOX_INT_FLAG_COE | MBOX_INT_FLAG_RIE);
+ mailbox_send_cmd(MBOX_JOB_ID, MBOX_CMD_QSPI_OPEN, NULL, 0U,
+ CMD_CASUAL, NULL, NULL);
+}
+
+void mailbox_set_qspi_direct(void)
+{
+ mailbox_send_cmd(MBOX_JOB_ID, MBOX_CMD_QSPI_DIRECT, NULL, 0U,
+ CMD_CASUAL, NULL, NULL);
+}
+
+void mailbox_set_qspi_close(void)
+{
+ mailbox_set_int(MBOX_INT_FLAG_COE | MBOX_INT_FLAG_RIE);
+ mailbox_send_cmd(MBOX_JOB_ID, MBOX_CMD_QSPI_CLOSE, NULL, 0U,
+ CMD_CASUAL, NULL, NULL);
+}
+
+void mailbox_qspi_set_cs(uint32_t device_select)
+{
+ uint32_t cs_setting;
+
+ /* QSPI device select settings at 31:28 */
+ cs_setting = (device_select << 28);
+ mailbox_set_int(MBOX_INT_FLAG_COE | MBOX_INT_FLAG_RIE);
+ mailbox_send_cmd(MBOX_JOB_ID, MBOX_CMD_QSPI_SET_CS, &cs_setting,
+ 1U, CMD_CASUAL, NULL, NULL);
+}
+
+void mailbox_hps_qspi_enable(void)
+{
+ mailbox_set_qspi_open();
+ mailbox_set_qspi_direct();
+}
+
+void mailbox_reset_cold(void)
+{
+ mailbox_set_int(MBOX_INT_FLAG_COE | MBOX_INT_FLAG_RIE);
+ mailbox_send_cmd(MBOX_JOB_ID, MBOX_CMD_REBOOT_HPS, NULL, 0U,
+ CMD_CASUAL, NULL, NULL);
+}
+
+int mailbox_rsu_get_spt_offset(uint32_t *resp_buf, unsigned int resp_buf_len)
+{
+ return mailbox_send_cmd(MBOX_JOB_ID, MBOX_GET_SUBPARTITION_TABLE,
+ NULL, 0U, CMD_CASUAL, resp_buf,
+ &resp_buf_len);
+}
+
+struct rsu_status_info {
+ uint64_t current_image;
+ uint64_t fail_image;
+ uint32_t state;
+ uint32_t version;
+ uint32_t error_location;
+ uint32_t error_details;
+ uint32_t retry_counter;
+};
+
+int mailbox_rsu_status(uint32_t *resp_buf, unsigned int resp_buf_len)
+{
+ int ret;
+ struct rsu_status_info *info = (struct rsu_status_info *)resp_buf;
+
+ info->retry_counter = ~0U;
+
+ ret = mailbox_send_cmd(MBOX_JOB_ID, MBOX_RSU_STATUS, NULL, 0U,
+ CMD_CASUAL, resp_buf,
+ &resp_buf_len);
+
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (info->retry_counter != ~0U) {
+ if ((info->version & RSU_VERSION_ACMF_MASK) == 0U) {
+ info->version |= RSU_VERSION_ACMF;
+ }
+ }
+
+ return ret;
+}
+
+int mailbox_rsu_update(uint32_t *flash_offset)
+{
+ return mailbox_send_cmd(MBOX_JOB_ID, MBOX_RSU_UPDATE,
+ flash_offset, 2U,
+ CMD_CASUAL, NULL, NULL);
+}
+
+int mailbox_hps_stage_notify(uint32_t execution_stage)
+{
+ return mailbox_send_cmd(MBOX_JOB_ID, MBOX_HPS_STAGE_NOTIFY,
+ &execution_stage, 1U, CMD_CASUAL,
+ NULL, NULL);
+}
+
+int mailbox_init(void)
+{
+ int status;
+
+ mailbox_set_int(MBOX_INT_FLAG_COE | MBOX_INT_FLAG_RIE |
+ MBOX_INT_FLAG_UAE);
+ mmio_write_32(MBOX_OFFSET + MBOX_URG, 0U);
+ mmio_write_32(MBOX_OFFSET + MBOX_DOORBELL_FROM_SDM, 0U);
+
+ status = mailbox_send_cmd(0U, MBOX_CMD_RESTART, NULL, 0U,
+ CMD_URGENT, NULL, NULL);
+
+ if (status != 0) {
+ return status;
+ }
+
+ mailbox_set_int(MBOX_INT_FLAG_COE | MBOX_INT_FLAG_RIE |
+ MBOX_INT_FLAG_UAE);
+
+ return MBOX_RET_OK;
+}
+
+int intel_mailbox_get_config_status(uint32_t cmd, bool init_done)
+{
+ int status;
+ uint32_t res, response[6];
+ unsigned int resp_len = ARRAY_SIZE(response);
+
+ status = mailbox_send_cmd(MBOX_JOB_ID, cmd, NULL, 0U, CMD_CASUAL,
+ response, &resp_len);
+
+ if (status < 0) {
+ return status;
+ }
+
+ res = response[RECONFIG_STATUS_STATE];
+ if ((res != 0U) && (res != MBOX_CFGSTAT_STATE_CONFIG)) {
+ return res;
+ }
+
+ res = response[RECONFIG_STATUS_PIN_STATUS];
+ if ((res & PIN_STATUS_NSTATUS) == 0U) {
+ return MBOX_CFGSTAT_STATE_ERROR_HARDWARE;
+ }
+
+ res = response[RECONFIG_STATUS_SOFTFUNC_STATUS];
+ if ((res & SOFTFUNC_STATUS_SEU_ERROR) != 0U) {
+ return MBOX_CFGSTAT_STATE_ERROR_HARDWARE;
+ }
+
+ if ((res & SOFTFUNC_STATUS_CONF_DONE) == 0U) {
+ return MBOX_CFGSTAT_STATE_CONFIG;
+ }
+
+ if (init_done && (res & SOFTFUNC_STATUS_INIT_DONE) == 0U) {
+ return MBOX_CFGSTAT_STATE_CONFIG;
+ }
+
+ return MBOX_RET_OK;
+}
+
+int intel_mailbox_is_fpga_not_ready(void)
+{
+ int ret = intel_mailbox_get_config_status(MBOX_RECONFIG_STATUS, true);
+
+ if ((ret != MBOX_RET_OK) && (ret != MBOX_CFGSTAT_STATE_CONFIG)) {
+ ret = intel_mailbox_get_config_status(MBOX_CONFIG_STATUS,
+ false);
+ }
+
+ return ret;
+}
+
+int mailbox_hwmon_readtemp(uint32_t chan, uint32_t *resp_buf)
+{
+ unsigned int resp_len = sizeof(resp_buf);
+
+ return mailbox_send_cmd(MBOX_JOB_ID, MBOX_HWMON_READTEMP, &chan, 1U,
+ CMD_CASUAL, resp_buf,
+ &resp_len);
+
+}
+
+int mailbox_hwmon_readvolt(uint32_t chan, uint32_t *resp_buf)
+{
+ unsigned int resp_len = sizeof(resp_buf);
+
+ return mailbox_send_cmd(MBOX_JOB_ID, MBOX_HWMON_READVOLT, &chan, 1U,
+ CMD_CASUAL, resp_buf,
+ &resp_len);
+}
diff --git a/plat/intel/soc/common/soc/socfpga_reset_manager.c b/plat/intel/soc/common/soc/socfpga_reset_manager.c
new file mode 100644
index 0000000..bb4efab
--- /dev/null
+++ b/plat/intel/soc/common/soc/socfpga_reset_manager.c
@@ -0,0 +1,331 @@
+/*
+ * Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <errno.h>
+#include <common/debug.h>
+#include <drivers/delay_timer.h>
+#include <lib/mmio.h>
+
+#include "socfpga_f2sdram_manager.h"
+#include "socfpga_mailbox.h"
+#include "socfpga_reset_manager.h"
+#include "socfpga_system_manager.h"
+
+
+void deassert_peripheral_reset(void)
+{
+ mmio_clrbits_32(SOCFPGA_RSTMGR(PER1MODRST),
+ RSTMGR_FIELD(PER1, WATCHDOG0) |
+ RSTMGR_FIELD(PER1, WATCHDOG1) |
+ RSTMGR_FIELD(PER1, WATCHDOG2) |
+ RSTMGR_FIELD(PER1, WATCHDOG3) |
+ RSTMGR_FIELD(PER1, L4SYSTIMER0) |
+ RSTMGR_FIELD(PER1, L4SYSTIMER1) |
+ RSTMGR_FIELD(PER1, SPTIMER0) |
+ RSTMGR_FIELD(PER1, SPTIMER1) |
+ RSTMGR_FIELD(PER1, I2C0) |
+ RSTMGR_FIELD(PER1, I2C1) |
+ RSTMGR_FIELD(PER1, I2C2) |
+ RSTMGR_FIELD(PER1, I2C3) |
+ RSTMGR_FIELD(PER1, I2C4) |
+ RSTMGR_FIELD(PER1, UART0) |
+ RSTMGR_FIELD(PER1, UART1) |
+ RSTMGR_FIELD(PER1, GPIO0) |
+ RSTMGR_FIELD(PER1, GPIO1));
+
+ mmio_clrbits_32(SOCFPGA_RSTMGR(PER0MODRST),
+ RSTMGR_FIELD(PER0, EMAC0OCP) |
+ RSTMGR_FIELD(PER0, EMAC1OCP) |
+ RSTMGR_FIELD(PER0, EMAC2OCP) |
+ RSTMGR_FIELD(PER0, USB0OCP) |
+ RSTMGR_FIELD(PER0, USB1OCP) |
+ RSTMGR_FIELD(PER0, NANDOCP) |
+ RSTMGR_FIELD(PER0, SDMMCOCP) |
+ RSTMGR_FIELD(PER0, DMAOCP));
+
+ mmio_clrbits_32(SOCFPGA_RSTMGR(PER0MODRST),
+ RSTMGR_FIELD(PER0, EMAC0) |
+ RSTMGR_FIELD(PER0, EMAC1) |
+ RSTMGR_FIELD(PER0, EMAC2) |
+ RSTMGR_FIELD(PER0, USB0) |
+ RSTMGR_FIELD(PER0, USB1) |
+ RSTMGR_FIELD(PER0, NAND) |
+ RSTMGR_FIELD(PER0, SDMMC) |
+ RSTMGR_FIELD(PER0, DMA) |
+ RSTMGR_FIELD(PER0, SPIM0) |
+ RSTMGR_FIELD(PER0, SPIM1) |
+ RSTMGR_FIELD(PER0, SPIS0) |
+ RSTMGR_FIELD(PER0, SPIS1) |
+ RSTMGR_FIELD(PER0, EMACPTP) |
+ RSTMGR_FIELD(PER0, DMAIF0) |
+ RSTMGR_FIELD(PER0, DMAIF1) |
+ RSTMGR_FIELD(PER0, DMAIF2) |
+ RSTMGR_FIELD(PER0, DMAIF3) |
+ RSTMGR_FIELD(PER0, DMAIF4) |
+ RSTMGR_FIELD(PER0, DMAIF5) |
+ RSTMGR_FIELD(PER0, DMAIF6) |
+ RSTMGR_FIELD(PER0, DMAIF7));
+
+#if PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX
+ mmio_clrbits_32(SOCFPGA_RSTMGR(BRGMODRST),
+ RSTMGR_FIELD(BRG, MPFE));
+#endif
+}
+
+void config_hps_hs_before_warm_reset(void)
+{
+ uint32_t or_mask = 0;
+
+ or_mask |= RSTMGR_HDSKEN_SDRSELFREFEN;
+ or_mask |= RSTMGR_HDSKEN_FPGAHSEN;
+ or_mask |= RSTMGR_HDSKEN_ETRSTALLEN;
+ or_mask |= RSTMGR_HDSKEN_L2FLUSHEN;
+ or_mask |= RSTMGR_HDSKEN_L3NOC_DBG;
+ or_mask |= RSTMGR_HDSKEN_DEBUG_L3NOC;
+
+ mmio_setbits_32(SOCFPGA_RSTMGR(HDSKEN), or_mask);
+}
+
+static int poll_idle_status(uint32_t addr, uint32_t mask, uint32_t match)
+{
+ int time_out = 300;
+
+ while (time_out--) {
+ if ((mmio_read_32(addr) & mask) == match) {
+ return 0;
+ }
+ udelay(1000);
+ }
+ return -ETIMEDOUT;
+}
+
+static void socfpga_s2f_bridge_mask(uint32_t mask,
+ uint32_t *brg_mask,
+ uint32_t *noc_mask)
+{
+ *brg_mask = 0;
+ *noc_mask = 0;
+
+ if ((mask & SOC2FPGA_MASK) != 0U) {
+ *brg_mask |= RSTMGR_FIELD(BRG, SOC2FPGA);
+ *noc_mask |= IDLE_DATA_SOC2FPGA;
+ }
+
+ if ((mask & LWHPS2FPGA_MASK) != 0U) {
+ *brg_mask |= RSTMGR_FIELD(BRG, LWHPS2FPGA);
+ *noc_mask |= IDLE_DATA_LWSOC2FPGA;
+ }
+}
+
+static void socfpga_f2s_bridge_mask(uint32_t mask,
+ uint32_t *brg_mask,
+ uint32_t *f2s_idlereq,
+ uint32_t *f2s_force_drain,
+ uint32_t *f2s_en,
+ uint32_t *f2s_idleack,
+ uint32_t *f2s_respempty)
+{
+ *brg_mask = 0;
+ *f2s_idlereq = 0;
+ *f2s_force_drain = 0;
+ *f2s_en = 0;
+ *f2s_idleack = 0;
+ *f2s_respempty = 0;
+
+#if PLATFORM_MODEL == PLAT_SOCFPGA_STRATIX10
+ if ((mask & FPGA2SOC_MASK) != 0U) {
+ *brg_mask |= RSTMGR_FIELD(BRG, FPGA2SOC);
+ }
+ if ((mask & F2SDRAM0_MASK) != 0U) {
+ *brg_mask |= RSTMGR_FIELD(BRG, F2SSDRAM0);
+ *f2s_idlereq |= FLAGOUTSETCLR_F2SDRAM0_IDLEREQ;
+ *f2s_force_drain |= FLAGOUTSETCLR_F2SDRAM0_FORCE_DRAIN;
+ *f2s_en |= FLAGOUTSETCLR_F2SDRAM0_ENABLE;
+ *f2s_idleack |= FLAGINTSTATUS_F2SDRAM0_IDLEACK;
+ *f2s_respempty |= FLAGINTSTATUS_F2SDRAM0_RESPEMPTY;
+ }
+ if ((mask & F2SDRAM1_MASK) != 0U) {
+ *brg_mask |= RSTMGR_FIELD(BRG, F2SSDRAM1);
+ *f2s_idlereq |= FLAGOUTSETCLR_F2SDRAM1_IDLEREQ;
+ *f2s_force_drain |= FLAGOUTSETCLR_F2SDRAM1_FORCE_DRAIN;
+ *f2s_en |= FLAGOUTSETCLR_F2SDRAM1_ENABLE;
+ *f2s_idleack |= FLAGINTSTATUS_F2SDRAM1_IDLEACK;
+ *f2s_respempty |= FLAGINTSTATUS_F2SDRAM1_RESPEMPTY;
+ }
+ if ((mask & F2SDRAM2_MASK) != 0U) {
+ *brg_mask |= RSTMGR_FIELD(BRG, F2SSDRAM2);
+ *f2s_idlereq |= FLAGOUTSETCLR_F2SDRAM2_IDLEREQ;
+ *f2s_force_drain |= FLAGOUTSETCLR_F2SDRAM2_FORCE_DRAIN;
+ *f2s_en |= FLAGOUTSETCLR_F2SDRAM2_ENABLE;
+ *f2s_idleack |= FLAGINTSTATUS_F2SDRAM2_IDLEACK;
+ *f2s_respempty |= FLAGINTSTATUS_F2SDRAM2_RESPEMPTY;
+ }
+#else
+ if ((mask & FPGA2SOC_MASK) != 0U) {
+ *brg_mask |= RSTMGR_FIELD(BRG, FPGA2SOC);
+ *f2s_idlereq |= FLAGOUTSETCLR_F2SDRAM0_IDLEREQ;
+ *f2s_force_drain |= FLAGOUTSETCLR_F2SDRAM0_FORCE_DRAIN;
+ *f2s_en |= FLAGOUTSETCLR_F2SDRAM0_ENABLE;
+ *f2s_idleack |= FLAGINTSTATUS_F2SDRAM0_IDLEACK;
+ *f2s_respempty |= FLAGINTSTATUS_F2SDRAM0_RESPEMPTY;
+ }
+#endif
+}
+
+int socfpga_bridges_enable(uint32_t mask)
+{
+ int ret = 0;
+ uint32_t brg_mask = 0;
+ uint32_t noc_mask = 0;
+ uint32_t f2s_idlereq = 0;
+ uint32_t f2s_force_drain = 0;
+ uint32_t f2s_en = 0;
+ uint32_t f2s_idleack = 0;
+ uint32_t f2s_respempty = 0;
+
+ /* Enable s2f bridge */
+ socfpga_s2f_bridge_mask(mask, &brg_mask, &noc_mask);
+ if (brg_mask != 0U) {
+ /* Clear idle request */
+ mmio_setbits_32(SOCFPGA_SYSMGR(NOC_IDLEREQ_CLR),
+ noc_mask);
+
+ /* De-assert all bridges */
+ mmio_clrbits_32(SOCFPGA_RSTMGR(BRGMODRST), brg_mask);
+
+ /* Wait until idle ack becomes 0 */
+ ret = poll_idle_status(SOCFPGA_SYSMGR(NOC_IDLEACK),
+ noc_mask, 0);
+ if (ret < 0) {
+ ERROR("S2F bridge enable: "
+ "Timeout waiting for idle ack\n");
+ }
+ }
+
+ /* Enable f2s bridge */
+ socfpga_f2s_bridge_mask(mask, &brg_mask, &f2s_idlereq,
+ &f2s_force_drain, &f2s_en,
+ &f2s_idleack, &f2s_respempty);
+ if (brg_mask != 0U) {
+ mmio_clrbits_32(SOCFPGA_RSTMGR(BRGMODRST), brg_mask);
+
+ mmio_clrbits_32(SOCFPGA_F2SDRAMMGR(SIDEBANDMGR_FLAGOUTSET0),
+ f2s_idlereq);
+
+ ret = poll_idle_status(SOCFPGA_F2SDRAMMGR(
+ SIDEBANDMGR_FLAGINSTATUS0), f2s_idleack, 0);
+ if (ret < 0) {
+ ERROR("F2S bridge enable: "
+ "Timeout waiting for idle ack");
+ }
+
+ mmio_clrbits_32(SOCFPGA_F2SDRAMMGR(SIDEBANDMGR_FLAGOUTSET0),
+ f2s_force_drain);
+ udelay(5);
+
+ mmio_setbits_32(SOCFPGA_F2SDRAMMGR(SIDEBANDMGR_FLAGOUTSET0),
+ f2s_en);
+ udelay(5);
+ }
+
+ return ret;
+}
+
+int socfpga_bridges_disable(uint32_t mask)
+{
+ int ret = 0;
+ int timeout = 300;
+ uint32_t brg_mask = 0;
+ uint32_t noc_mask = 0;
+ uint32_t f2s_idlereq = 0;
+ uint32_t f2s_force_drain = 0;
+ uint32_t f2s_en = 0;
+ uint32_t f2s_idleack = 0;
+ uint32_t f2s_respempty = 0;
+
+ /* Disable s2f bridge */
+ socfpga_s2f_bridge_mask(mask, &brg_mask, &noc_mask);
+ if (brg_mask != 0U) {
+ mmio_setbits_32(SOCFPGA_SYSMGR(NOC_IDLEREQ_SET),
+ noc_mask);
+
+ mmio_write_32(SOCFPGA_SYSMGR(NOC_TIMEOUT), 1);
+
+ ret = poll_idle_status(SOCFPGA_SYSMGR(NOC_IDLEACK),
+ noc_mask, noc_mask);
+ if (ret < 0) {
+ ERROR("S2F Bridge disable: "
+ "Timeout waiting for idle ack\n");
+ }
+
+ ret = poll_idle_status(SOCFPGA_SYSMGR(NOC_IDLESTATUS),
+ noc_mask, noc_mask);
+ if (ret < 0) {
+ ERROR("S2F Bridge disable: "
+ "Timeout waiting for idle status\n");
+ }
+
+ mmio_setbits_32(SOCFPGA_RSTMGR(BRGMODRST), brg_mask);
+
+ mmio_write_32(SOCFPGA_SYSMGR(NOC_TIMEOUT), 0);
+ }
+
+ /* Disable f2s bridge */
+ socfpga_f2s_bridge_mask(mask, &brg_mask, &f2s_idlereq,
+ &f2s_force_drain, &f2s_en,
+ &f2s_idleack, &f2s_respempty);
+ if (brg_mask != 0U) {
+ mmio_setbits_32(SOCFPGA_RSTMGR(HDSKEN),
+ RSTMGR_HDSKEN_FPGAHSEN);
+
+ mmio_setbits_32(SOCFPGA_RSTMGR(HDSKREQ),
+ RSTMGR_HDSKREQ_FPGAHSREQ);
+
+ poll_idle_status(SOCFPGA_RSTMGR(HDSKACK),
+ RSTMGR_HDSKACK_FPGAHSACK_MASK,
+ RSTMGR_HDSKACK_FPGAHSACK_MASK);
+
+ mmio_clrbits_32(SOCFPGA_F2SDRAMMGR(SIDEBANDMGR_FLAGOUTSET0),
+ f2s_en);
+ udelay(5);
+
+ mmio_setbits_32(SOCFPGA_F2SDRAMMGR(SIDEBANDMGR_FLAGOUTSET0),
+ f2s_force_drain);
+ udelay(5);
+
+ do {
+ /* Read response queue status to ensure it is empty */
+ uint32_t idle_status;
+
+ idle_status = mmio_read_32(SOCFPGA_F2SDRAMMGR(
+ SIDEBANDMGR_FLAGINSTATUS0));
+ if ((idle_status & f2s_respempty) != 0U) {
+ idle_status = mmio_read_32(SOCFPGA_F2SDRAMMGR(
+ SIDEBANDMGR_FLAGINSTATUS0));
+ if ((idle_status & f2s_respempty) != 0U) {
+ break;
+ }
+ }
+ udelay(1000);
+ } while (timeout-- > 0);
+
+#if PLATFORM_MODEL == PLAT_SOCFPGA_STRATIX10
+ /* Software must never write a 0x1 to FPGA2SOC_MASK bit */
+ mmio_setbits_32(SOCFPGA_RSTMGR(BRGMODRST),
+ brg_mask & ~RSTMGR_FIELD(BRG, FPGA2SOC));
+#else
+ mmio_setbits_32(SOCFPGA_RSTMGR(BRGMODRST),
+ brg_mask);
+#endif
+ mmio_clrbits_32(SOCFPGA_RSTMGR(HDSKREQ),
+ RSTMGR_HDSKEQ_FPGAHSREQ);
+
+ mmio_setbits_32(SOCFPGA_F2SDRAMMGR(SIDEBANDMGR_FLAGOUTCLR0),
+ f2s_idlereq);
+ }
+
+ return ret;
+}
diff --git a/plat/intel/soc/common/socfpga_delay_timer.c b/plat/intel/soc/common/socfpga_delay_timer.c
new file mode 100644
index 0000000..dcd51e2
--- /dev/null
+++ b/plat/intel/soc/common/socfpga_delay_timer.c
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2019-2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <arch_helpers.h>
+#include <drivers/delay_timer.h>
+#include <lib/mmio.h>
+#include "socfpga_plat_def.h"
+
+#define SOCFPGA_GLOBAL_TIMER 0xffd01000
+#define SOCFPGA_GLOBAL_TIMER_EN 0x3
+
+static timer_ops_t plat_timer_ops;
+/********************************************************************
+ * The timer delay function
+ ********************************************************************/
+static uint32_t socfpga_get_timer_value(void)
+{
+ /*
+ * Generic delay timer implementation expects the timer to be a down
+ * counter. We apply bitwise NOT operator to the tick values returned
+ * by read_cntpct_el0() to simulate the down counter. The value is
+ * clipped from 64 to 32 bits.
+ */
+ return (uint32_t)(~read_cntpct_el0());
+}
+
+void socfpga_delay_timer_init_args(void)
+{
+ plat_timer_ops.get_timer_value = socfpga_get_timer_value;
+ plat_timer_ops.clk_mult = 1;
+ plat_timer_ops.clk_div = PLAT_SYS_COUNTER_FREQ_IN_MHZ;
+
+ timer_init(&plat_timer_ops);
+
+}
+
+void socfpga_delay_timer_init(void)
+{
+ socfpga_delay_timer_init_args();
+ mmio_write_32(SOCFPGA_GLOBAL_TIMER, SOCFPGA_GLOBAL_TIMER_EN);
+
+ asm volatile("msr cntp_ctl_el0, %0" : : "r" (SOCFPGA_GLOBAL_TIMER_EN));
+ asm volatile("msr cntp_tval_el0, %0" : : "r" (~0));
+
+}
diff --git a/plat/intel/soc/common/socfpga_image_load.c b/plat/intel/soc/common/socfpga_image_load.c
new file mode 100644
index 0000000..a5c3279
--- /dev/null
+++ b/plat/intel/soc/common/socfpga_image_load.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <common/desc_image_load.h>
+
+/*******************************************************************************
+ * This function flushes the data structures so that they are visible
+ * in memory for the next BL image.
+ ******************************************************************************/
+void plat_flush_next_bl_params(void)
+{
+ flush_bl_params_desc();
+}
+
+/*******************************************************************************
+ * This function returns the list of loadable images.
+ ******************************************************************************/
+bl_load_info_t *plat_get_bl_image_load_info(void)
+{
+ return get_bl_load_info_from_mem_params_desc();
+}
+
+/*******************************************************************************
+ * This function returns the list of executable images.
+ ******************************************************************************/
+bl_params_t *plat_get_next_bl_params(void)
+{
+ unsigned int count;
+ unsigned int img_id = 0U;
+ unsigned int link_index = 0U;
+ bl_params_node_t *bl_exec_node = NULL;
+ bl_mem_params_node_t *desc_ptr;
+
+ /* If there is no image to start with, return NULL */
+ if (bl_mem_params_desc_num == 0U)
+ return NULL;
+
+ /* Clean next_params_info in BL image node */
+ for (count = 0U; count < bl_mem_params_desc_num; count++) {
+
+ desc_ptr = &bl_mem_params_desc_ptr[link_index];
+ bl_exec_node = &desc_ptr->params_node_mem;
+ bl_exec_node->next_params_info = NULL;
+
+ /* If no next hand-off image then break out */
+ img_id = desc_ptr->next_handoff_image_id;
+ if (img_id == INVALID_IMAGE_ID)
+ break;
+
+ /* Get the index for the next hand-off image */
+ link_index = get_bl_params_node_index(img_id);
+ }
+
+ return get_next_bl_params_from_mem_params_desc();
+}
diff --git a/plat/intel/soc/common/socfpga_psci.c b/plat/intel/soc/common/socfpga_psci.c
new file mode 100644
index 0000000..5fd6559
--- /dev/null
+++ b/plat/intel/soc/common/socfpga_psci.c
@@ -0,0 +1,229 @@
+/*
+ * Copyright (c) 2019-2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <drivers/arm/gicv2.h>
+#include <lib/mmio.h>
+#include <lib/psci/psci.h>
+#include <plat/common/platform.h>
+
+#include "socfpga_mailbox.h"
+#include "socfpga_plat_def.h"
+#include "socfpga_reset_manager.h"
+#include "socfpga_sip_svc.h"
+
+
+/*******************************************************************************
+ * plat handler called when a CPU is about to enter standby.
+ ******************************************************************************/
+void socfpga_cpu_standby(plat_local_state_t cpu_state)
+{
+ /*
+ * Enter standby state
+ * dsb is good practice before using wfi to enter low power states
+ */
+ VERBOSE("%s: cpu_state: 0x%x\n", __func__, cpu_state);
+ dsb();
+ wfi();
+}
+
+/*******************************************************************************
+ * plat handler called when a power domain is about to be turned on. The
+ * mpidr determines the CPU to be turned on.
+ ******************************************************************************/
+int socfpga_pwr_domain_on(u_register_t mpidr)
+{
+ unsigned int cpu_id = plat_core_pos_by_mpidr(mpidr);
+
+ VERBOSE("%s: mpidr: 0x%lx\n", __func__, mpidr);
+
+ if (cpu_id == -1)
+ return PSCI_E_INTERN_FAIL;
+
+ mmio_write_64(PLAT_CPUID_RELEASE, cpu_id);
+
+ /* release core reset */
+ mmio_setbits_32(SOCFPGA_RSTMGR(MPUMODRST), 1 << cpu_id);
+ return PSCI_E_SUCCESS;
+}
+
+/*******************************************************************************
+ * plat handler called when a power domain is about to be turned off. The
+ * target_state encodes the power state that each level should transition to.
+ ******************************************************************************/
+void socfpga_pwr_domain_off(const psci_power_state_t *target_state)
+{
+ for (size_t i = 0; i <= PLAT_MAX_PWR_LVL; i++)
+ VERBOSE("%s: target_state->pwr_domain_state[%lu]=%x\n",
+ __func__, i, target_state->pwr_domain_state[i]);
+
+ /* Prevent interrupts from spuriously waking up this cpu */
+ gicv2_cpuif_disable();
+}
+
+/*******************************************************************************
+ * plat handler called when a power domain is about to be suspended. The
+ * target_state encodes the power state that each level should transition to.
+ ******************************************************************************/
+void socfpga_pwr_domain_suspend(const psci_power_state_t *target_state)
+{
+ unsigned int cpu_id = plat_my_core_pos();
+
+ for (size_t i = 0; i <= PLAT_MAX_PWR_LVL; i++)
+ VERBOSE("%s: target_state->pwr_domain_state[%lu]=%x\n",
+ __func__, i, target_state->pwr_domain_state[i]);
+
+ /* assert core reset */
+ mmio_setbits_32(SOCFPGA_RSTMGR(MPUMODRST), 1 << cpu_id);
+
+}
+
+/*******************************************************************************
+ * plat handler called when a power domain has just been powered on after
+ * being turned off earlier. The target_state encodes the low power state that
+ * each level has woken up from.
+ ******************************************************************************/
+void socfpga_pwr_domain_on_finish(const psci_power_state_t *target_state)
+{
+ for (size_t i = 0; i <= PLAT_MAX_PWR_LVL; i++)
+ VERBOSE("%s: target_state->pwr_domain_state[%lu]=%x\n",
+ __func__, i, target_state->pwr_domain_state[i]);
+
+ /* Program the gic per-cpu distributor or re-distributor interface */
+ gicv2_pcpu_distif_init();
+ gicv2_set_pe_target_mask(plat_my_core_pos());
+
+ /* Enable the gic cpu interface */
+ gicv2_cpuif_enable();
+}
+
+/*******************************************************************************
+ * plat handler called when a power domain has just been powered on after
+ * having been suspended earlier. The target_state encodes the low power state
+ * that each level has woken up from.
+ * TODO: At the moment we reuse the on finisher and reinitialize the secure
+ * context. Need to implement a separate suspend finisher.
+ ******************************************************************************/
+void socfpga_pwr_domain_suspend_finish(const psci_power_state_t *target_state)
+{
+ unsigned int cpu_id = plat_my_core_pos();
+
+ for (size_t i = 0; i <= PLAT_MAX_PWR_LVL; i++)
+ VERBOSE("%s: target_state->pwr_domain_state[%lu]=%x\n",
+ __func__, i, target_state->pwr_domain_state[i]);
+
+ /* release core reset */
+ mmio_clrbits_32(SOCFPGA_RSTMGR(MPUMODRST), 1 << cpu_id);
+}
+
+/*******************************************************************************
+ * plat handlers to shutdown/reboot the system
+ ******************************************************************************/
+static void __dead2 socfpga_system_off(void)
+{
+ wfi();
+ ERROR("System Off: operation not handled.\n");
+ panic();
+}
+
+extern uint64_t intel_rsu_update_address;
+
+static void __dead2 socfpga_system_reset(void)
+{
+ uint32_t addr_buf[2];
+
+ memcpy(addr_buf, &intel_rsu_update_address,
+ sizeof(intel_rsu_update_address));
+
+ if (intel_rsu_update_address)
+ mailbox_rsu_update(addr_buf);
+ else
+ mailbox_reset_cold();
+
+ while (1)
+ wfi();
+}
+
+static int socfpga_system_reset2(int is_vendor, int reset_type,
+ u_register_t cookie)
+{
+ if (cold_reset_for_ecc_dbe()) {
+ mailbox_reset_cold();
+ }
+ /* disable cpuif */
+ gicv2_cpuif_disable();
+
+ /* Store magic number */
+ mmio_write_32(L2_RESET_DONE_REG, L2_RESET_DONE_STATUS);
+
+ /* Increase timeout */
+ mmio_write_32(SOCFPGA_RSTMGR(HDSKTIMEOUT), 0xffffff);
+
+ /* Enable handshakes */
+ mmio_setbits_32(SOCFPGA_RSTMGR(HDSKEN), RSTMGR_HDSKEN_SET);
+
+ /* Reset L2 module */
+ mmio_setbits_32(SOCFPGA_RSTMGR(COLDMODRST), 0x100);
+
+ while (1)
+ wfi();
+
+ /* Should not reach here */
+ return 0;
+}
+
+int socfpga_validate_power_state(unsigned int power_state,
+ psci_power_state_t *req_state)
+{
+ VERBOSE("%s: power_state: 0x%x\n", __func__, power_state);
+
+ return PSCI_E_SUCCESS;
+}
+
+int socfpga_validate_ns_entrypoint(unsigned long ns_entrypoint)
+{
+ VERBOSE("%s: ns_entrypoint: 0x%lx\n", __func__, ns_entrypoint);
+ return PSCI_E_SUCCESS;
+}
+
+void socfpga_get_sys_suspend_power_state(psci_power_state_t *req_state)
+{
+ req_state->pwr_domain_state[PSCI_CPU_PWR_LVL] = PLAT_MAX_OFF_STATE;
+ req_state->pwr_domain_state[1] = PLAT_MAX_OFF_STATE;
+}
+
+/*******************************************************************************
+ * Export the platform handlers via plat_arm_psci_pm_ops. The ARM Standard
+ * platform layer will take care of registering the handlers with PSCI.
+ ******************************************************************************/
+const plat_psci_ops_t socfpga_psci_pm_ops = {
+ .cpu_standby = socfpga_cpu_standby,
+ .pwr_domain_on = socfpga_pwr_domain_on,
+ .pwr_domain_off = socfpga_pwr_domain_off,
+ .pwr_domain_suspend = socfpga_pwr_domain_suspend,
+ .pwr_domain_on_finish = socfpga_pwr_domain_on_finish,
+ .pwr_domain_suspend_finish = socfpga_pwr_domain_suspend_finish,
+ .system_off = socfpga_system_off,
+ .system_reset = socfpga_system_reset,
+ .system_reset2 = socfpga_system_reset2,
+ .validate_power_state = socfpga_validate_power_state,
+ .validate_ns_entrypoint = socfpga_validate_ns_entrypoint,
+ .get_sys_suspend_power_state = socfpga_get_sys_suspend_power_state
+};
+
+/*******************************************************************************
+ * Export the platform specific power ops.
+ ******************************************************************************/
+int plat_setup_psci_ops(uintptr_t sec_entrypoint,
+ const struct plat_psci_ops **psci_ops)
+{
+ /* Save warm boot entrypoint.*/
+ mmio_write_64(PLAT_SEC_ENTRY, sec_entrypoint);
+ *psci_ops = &socfpga_psci_pm_ops;
+
+ return 0;
+}
diff --git a/plat/intel/soc/common/socfpga_sip_svc.c b/plat/intel/soc/common/socfpga_sip_svc.c
new file mode 100644
index 0000000..f079349
--- /dev/null
+++ b/plat/intel/soc/common/socfpga_sip_svc.c
@@ -0,0 +1,1131 @@
+/*
+ * Copyright (c) 2019-2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <common/debug.h>
+#include <common/runtime_svc.h>
+#include <lib/mmio.h>
+#include <tools_share/uuid.h>
+
+#include "socfpga_fcs.h"
+#include "socfpga_mailbox.h"
+#include "socfpga_reset_manager.h"
+#include "socfpga_sip_svc.h"
+
+
+/* Total buffer the driver can hold */
+#define FPGA_CONFIG_BUFFER_SIZE 4
+
+static config_type request_type = NO_REQUEST;
+static int current_block, current_buffer;
+static int read_block, max_blocks;
+static uint32_t send_id, rcv_id;
+static uint32_t bytes_per_block, blocks_submitted;
+static bool bridge_disable;
+
+/* RSU static variables */
+static uint32_t rsu_dcmf_ver[4] = {0};
+static uint16_t rsu_dcmf_stat[4] = {0};
+static uint32_t rsu_max_retry;
+
+/* SiP Service UUID */
+DEFINE_SVC_UUID2(intl_svc_uid,
+ 0xa85273b0, 0xe85a, 0x4862, 0xa6, 0x2a,
+ 0xfa, 0x88, 0x88, 0x17, 0x68, 0x81);
+
+static uint64_t socfpga_sip_handler(uint32_t smc_fid,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ ERROR("%s: unhandled SMC (0x%x)\n", __func__, smc_fid);
+ SMC_RET1(handle, SMC_UNK);
+}
+
+struct fpga_config_info fpga_config_buffers[FPGA_CONFIG_BUFFER_SIZE];
+
+static int intel_fpga_sdm_write_buffer(struct fpga_config_info *buffer)
+{
+ uint32_t args[3];
+
+ while (max_blocks > 0 && buffer->size > buffer->size_written) {
+ args[0] = (1<<8);
+ args[1] = buffer->addr + buffer->size_written;
+ if (buffer->size - buffer->size_written <= bytes_per_block) {
+ args[2] = buffer->size - buffer->size_written;
+ current_buffer++;
+ current_buffer %= FPGA_CONFIG_BUFFER_SIZE;
+ } else {
+ args[2] = bytes_per_block;
+ }
+
+ buffer->size_written += args[2];
+ mailbox_send_cmd_async(&send_id, MBOX_RECONFIG_DATA, args,
+ 3U, CMD_INDIRECT);
+
+ buffer->subblocks_sent++;
+ max_blocks--;
+ }
+
+ return !max_blocks;
+}
+
+static int intel_fpga_sdm_write_all(void)
+{
+ for (int i = 0; i < FPGA_CONFIG_BUFFER_SIZE; i++) {
+ if (intel_fpga_sdm_write_buffer(
+ &fpga_config_buffers[current_buffer])) {
+ break;
+ }
+ }
+ return 0;
+}
+
+static uint32_t intel_mailbox_fpga_config_isdone(void)
+{
+ uint32_t ret;
+
+ switch (request_type) {
+ case RECONFIGURATION:
+ ret = intel_mailbox_get_config_status(MBOX_RECONFIG_STATUS,
+ true);
+ break;
+ case BITSTREAM_AUTH:
+ ret = intel_mailbox_get_config_status(MBOX_RECONFIG_STATUS,
+ false);
+ break;
+ default:
+ ret = intel_mailbox_get_config_status(MBOX_CONFIG_STATUS,
+ false);
+ break;
+ }
+
+ if (ret != 0U) {
+ if (ret == MBOX_CFGSTAT_STATE_CONFIG) {
+ return INTEL_SIP_SMC_STATUS_BUSY;
+ } else {
+ request_type = NO_REQUEST;
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+ }
+
+ if (bridge_disable != 0U) {
+ socfpga_bridges_enable(~0); /* Enable bridge */
+ bridge_disable = false;
+ }
+ request_type = NO_REQUEST;
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+static int mark_last_buffer_xfer_completed(uint32_t *buffer_addr_completed)
+{
+ int i;
+
+ for (i = 0; i < FPGA_CONFIG_BUFFER_SIZE; i++) {
+ if (fpga_config_buffers[i].block_number == current_block) {
+ fpga_config_buffers[i].subblocks_sent--;
+ if (fpga_config_buffers[i].subblocks_sent == 0
+ && fpga_config_buffers[i].size <=
+ fpga_config_buffers[i].size_written) {
+ fpga_config_buffers[i].write_requested = 0;
+ current_block++;
+ *buffer_addr_completed =
+ fpga_config_buffers[i].addr;
+ return 0;
+ }
+ }
+ }
+
+ return -1;
+}
+
+static int intel_fpga_config_completed_write(uint32_t *completed_addr,
+ uint32_t *count, uint32_t *job_id)
+{
+ uint32_t resp[5];
+ unsigned int resp_len = ARRAY_SIZE(resp);
+ int status = INTEL_SIP_SMC_STATUS_OK;
+ int all_completed = 1;
+ *count = 0;
+
+ while (*count < 3) {
+
+ status = mailbox_read_response(job_id,
+ resp, &resp_len);
+
+ if (status < 0) {
+ break;
+ }
+
+ max_blocks++;
+
+ if (mark_last_buffer_xfer_completed(
+ &completed_addr[*count]) == 0) {
+ *count = *count + 1;
+ } else {
+ break;
+ }
+ }
+
+ if (*count <= 0) {
+ if (status != MBOX_NO_RESPONSE &&
+ status != MBOX_TIMEOUT && resp_len != 0) {
+ mailbox_clear_response();
+ request_type = NO_REQUEST;
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ *count = 0;
+ }
+
+ intel_fpga_sdm_write_all();
+
+ if (*count > 0) {
+ status = INTEL_SIP_SMC_STATUS_OK;
+ } else if (*count == 0) {
+ status = INTEL_SIP_SMC_STATUS_BUSY;
+ }
+
+ for (int i = 0; i < FPGA_CONFIG_BUFFER_SIZE; i++) {
+ if (fpga_config_buffers[i].write_requested != 0) {
+ all_completed = 0;
+ break;
+ }
+ }
+
+ if (all_completed == 1) {
+ return INTEL_SIP_SMC_STATUS_OK;
+ }
+
+ return status;
+}
+
+static int intel_fpga_config_start(uint32_t flag)
+{
+ uint32_t argument = 0x1;
+ uint32_t response[3];
+ int status = 0;
+ unsigned int size = 0;
+ unsigned int resp_len = ARRAY_SIZE(response);
+
+ request_type = RECONFIGURATION;
+
+ if (!CONFIG_TEST_FLAG(flag, PARTIAL_CONFIG)) {
+ bridge_disable = true;
+ }
+
+ if (CONFIG_TEST_FLAG(flag, AUTHENTICATION)) {
+ size = 1;
+ bridge_disable = false;
+ request_type = BITSTREAM_AUTH;
+ }
+
+ mailbox_clear_response();
+
+ mailbox_send_cmd(MBOX_JOB_ID, MBOX_CMD_CANCEL, NULL, 0U,
+ CMD_CASUAL, NULL, NULL);
+
+ status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_RECONFIG, &argument, size,
+ CMD_CASUAL, response, &resp_len);
+
+ if (status < 0) {
+ bridge_disable = false;
+ request_type = NO_REQUEST;
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ max_blocks = response[0];
+ bytes_per_block = response[1];
+
+ for (int i = 0; i < FPGA_CONFIG_BUFFER_SIZE; i++) {
+ fpga_config_buffers[i].size = 0;
+ fpga_config_buffers[i].size_written = 0;
+ fpga_config_buffers[i].addr = 0;
+ fpga_config_buffers[i].write_requested = 0;
+ fpga_config_buffers[i].block_number = 0;
+ fpga_config_buffers[i].subblocks_sent = 0;
+ }
+
+ blocks_submitted = 0;
+ current_block = 0;
+ read_block = 0;
+ current_buffer = 0;
+
+ /* Disable bridge on full reconfiguration */
+ if (bridge_disable) {
+ socfpga_bridges_disable(~0);
+ }
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+static bool is_fpga_config_buffer_full(void)
+{
+ for (int i = 0; i < FPGA_CONFIG_BUFFER_SIZE; i++) {
+ if (!fpga_config_buffers[i].write_requested) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool is_address_in_ddr_range(uint64_t addr, uint64_t size)
+{
+ if (!addr && !size) {
+ return true;
+ }
+ if (size > (UINT64_MAX - addr)) {
+ return false;
+ }
+ if (addr < BL31_LIMIT) {
+ return false;
+ }
+ if (addr + size > DRAM_BASE + DRAM_SIZE) {
+ return false;
+ }
+
+ return true;
+}
+
+static uint32_t intel_fpga_config_write(uint64_t mem, uint64_t size)
+{
+ int i;
+
+ intel_fpga_sdm_write_all();
+
+ if (!is_address_in_ddr_range(mem, size) ||
+ is_fpga_config_buffer_full()) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ for (i = 0; i < FPGA_CONFIG_BUFFER_SIZE; i++) {
+ int j = (i + current_buffer) % FPGA_CONFIG_BUFFER_SIZE;
+
+ if (!fpga_config_buffers[j].write_requested) {
+ fpga_config_buffers[j].addr = mem;
+ fpga_config_buffers[j].size = size;
+ fpga_config_buffers[j].size_written = 0;
+ fpga_config_buffers[j].write_requested = 1;
+ fpga_config_buffers[j].block_number =
+ blocks_submitted++;
+ fpga_config_buffers[j].subblocks_sent = 0;
+ break;
+ }
+ }
+
+ if (is_fpga_config_buffer_full()) {
+ return INTEL_SIP_SMC_STATUS_BUSY;
+ }
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+static int is_out_of_sec_range(uint64_t reg_addr)
+{
+#if DEBUG
+ return 0;
+#endif
+
+ switch (reg_addr) {
+ case(0xF8011100): /* ECCCTRL1 */
+ case(0xF8011104): /* ECCCTRL2 */
+ case(0xF8011110): /* ERRINTEN */
+ case(0xF8011114): /* ERRINTENS */
+ case(0xF8011118): /* ERRINTENR */
+ case(0xF801111C): /* INTMODE */
+ case(0xF8011120): /* INTSTAT */
+ case(0xF8011124): /* DIAGINTTEST */
+ case(0xF801112C): /* DERRADDRA */
+ case(0xFFD12028): /* SDMMCGRP_CTRL */
+ case(0xFFD12044): /* EMAC0 */
+ case(0xFFD12048): /* EMAC1 */
+ case(0xFFD1204C): /* EMAC2 */
+ case(0xFFD12090): /* ECC_INT_MASK_VALUE */
+ case(0xFFD12094): /* ECC_INT_MASK_SET */
+ case(0xFFD12098): /* ECC_INT_MASK_CLEAR */
+ case(0xFFD1209C): /* ECC_INTSTATUS_SERR */
+ case(0xFFD120A0): /* ECC_INTSTATUS_DERR */
+ case(0xFFD120C0): /* NOC_TIMEOUT */
+ case(0xFFD120C4): /* NOC_IDLEREQ_SET */
+ case(0xFFD120C8): /* NOC_IDLEREQ_CLR */
+ case(0xFFD120D0): /* NOC_IDLEACK */
+ case(0xFFD120D4): /* NOC_IDLESTATUS */
+ case(0xFFD12200): /* BOOT_SCRATCH_COLD0 */
+ case(0xFFD12204): /* BOOT_SCRATCH_COLD1 */
+ case(0xFFD12220): /* BOOT_SCRATCH_COLD8 */
+ case(0xFFD12224): /* BOOT_SCRATCH_COLD9 */
+ return 0;
+
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+/* Secure register access */
+uint32_t intel_secure_reg_read(uint64_t reg_addr, uint32_t *retval)
+{
+ if (is_out_of_sec_range(reg_addr)) {
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ *retval = mmio_read_32(reg_addr);
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+uint32_t intel_secure_reg_write(uint64_t reg_addr, uint32_t val,
+ uint32_t *retval)
+{
+ if (is_out_of_sec_range(reg_addr)) {
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ mmio_write_32(reg_addr, val);
+
+ return intel_secure_reg_read(reg_addr, retval);
+}
+
+uint32_t intel_secure_reg_update(uint64_t reg_addr, uint32_t mask,
+ uint32_t val, uint32_t *retval)
+{
+ if (!intel_secure_reg_read(reg_addr, retval)) {
+ *retval &= ~mask;
+ *retval |= val & mask;
+ return intel_secure_reg_write(reg_addr, *retval, retval);
+ }
+
+ return INTEL_SIP_SMC_STATUS_ERROR;
+}
+
+/* Intel Remote System Update (RSU) services */
+uint64_t intel_rsu_update_address;
+
+static uint32_t intel_rsu_status(uint64_t *respbuf, unsigned int respbuf_sz)
+{
+ if (mailbox_rsu_status((uint32_t *)respbuf, respbuf_sz) < 0) {
+ return INTEL_SIP_SMC_RSU_ERROR;
+ }
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+static uint32_t intel_rsu_update(uint64_t update_address)
+{
+ intel_rsu_update_address = update_address;
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+static uint32_t intel_rsu_notify(uint32_t execution_stage)
+{
+ if (mailbox_hps_stage_notify(execution_stage) < 0) {
+ return INTEL_SIP_SMC_RSU_ERROR;
+ }
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+static uint32_t intel_rsu_retry_counter(uint32_t *respbuf, uint32_t respbuf_sz,
+ uint32_t *ret_stat)
+{
+ if (mailbox_rsu_status((uint32_t *)respbuf, respbuf_sz) < 0) {
+ return INTEL_SIP_SMC_RSU_ERROR;
+ }
+
+ *ret_stat = respbuf[8];
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+static uint32_t intel_rsu_copy_dcmf_version(uint64_t dcmf_ver_1_0,
+ uint64_t dcmf_ver_3_2)
+{
+ rsu_dcmf_ver[0] = dcmf_ver_1_0;
+ rsu_dcmf_ver[1] = dcmf_ver_1_0 >> 32;
+ rsu_dcmf_ver[2] = dcmf_ver_3_2;
+ rsu_dcmf_ver[3] = dcmf_ver_3_2 >> 32;
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+static uint32_t intel_rsu_copy_dcmf_status(uint64_t dcmf_stat)
+{
+ rsu_dcmf_stat[0] = 0xFFFF & (dcmf_stat >> (0 * 16));
+ rsu_dcmf_stat[1] = 0xFFFF & (dcmf_stat >> (1 * 16));
+ rsu_dcmf_stat[2] = 0xFFFF & (dcmf_stat >> (2 * 16));
+ rsu_dcmf_stat[3] = 0xFFFF & (dcmf_stat >> (3 * 16));
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+/* Intel HWMON services */
+static uint32_t intel_hwmon_readtemp(uint32_t chan, uint32_t *retval)
+{
+ if (chan > TEMP_CHANNEL_MAX) {
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ if (mailbox_hwmon_readtemp(chan, retval) < 0) {
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+static uint32_t intel_hwmon_readvolt(uint32_t chan, uint32_t *retval)
+{
+ if (chan > VOLT_CHANNEL_MAX) {
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ if (mailbox_hwmon_readvolt(chan, retval) < 0) {
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+/* Mailbox services */
+static uint32_t intel_smc_fw_version(uint32_t *fw_version)
+{
+ int status;
+ unsigned int resp_len = CONFIG_STATUS_WORD_SIZE;
+ uint32_t resp_data[CONFIG_STATUS_WORD_SIZE] = {0U};
+
+ status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_CONFIG_STATUS, NULL, 0U,
+ CMD_CASUAL, resp_data, &resp_len);
+
+ if (status < 0) {
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ if (resp_len <= CONFIG_STATUS_FW_VER_OFFSET) {
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ *fw_version = resp_data[CONFIG_STATUS_FW_VER_OFFSET] & CONFIG_STATUS_FW_VER_MASK;
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+static uint32_t intel_mbox_send_cmd(uint32_t cmd, uint32_t *args,
+ unsigned int len, uint32_t urgent, uint64_t response,
+ unsigned int resp_len, int *mbox_status,
+ unsigned int *len_in_resp)
+{
+ *len_in_resp = 0;
+ *mbox_status = GENERIC_RESPONSE_ERROR;
+
+ if (!is_address_in_ddr_range((uint64_t)args, sizeof(uint32_t) * len)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ int status = mailbox_send_cmd(MBOX_JOB_ID, cmd, args, len, urgent,
+ (uint32_t *) response, &resp_len);
+
+ if (status < 0) {
+ *mbox_status = -status;
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ *mbox_status = 0;
+ *len_in_resp = resp_len;
+
+ flush_dcache_range(response, resp_len * MBOX_WORD_BYTE);
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+static int intel_smc_get_usercode(uint32_t *user_code)
+{
+ int status;
+ unsigned int resp_len = sizeof(user_code) / MBOX_WORD_BYTE;
+
+ status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_CMD_GET_USERCODE, NULL,
+ 0U, CMD_CASUAL, user_code, &resp_len);
+
+ if (status < 0) {
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+uint32_t intel_smc_service_completed(uint64_t addr, uint32_t size,
+ uint32_t mode, uint32_t *job_id,
+ uint32_t *ret_size, uint32_t *mbox_error)
+{
+ int status = 0;
+ uint32_t resp_len = size / MBOX_WORD_BYTE;
+
+ if (resp_len > MBOX_DATA_MAX_LEN) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ if (!is_address_in_ddr_range(addr, size)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ if (mode == SERVICE_COMPLETED_MODE_ASYNC) {
+ status = mailbox_read_response_async(job_id,
+ NULL, (uint32_t *) addr, &resp_len, 0);
+ } else {
+ status = mailbox_read_response(job_id,
+ (uint32_t *) addr, &resp_len);
+
+ if (status == MBOX_NO_RESPONSE) {
+ status = MBOX_BUSY;
+ }
+ }
+
+ if (status == MBOX_NO_RESPONSE) {
+ return INTEL_SIP_SMC_STATUS_NO_RESPONSE;
+ }
+
+ if (status == MBOX_BUSY) {
+ return INTEL_SIP_SMC_STATUS_BUSY;
+ }
+
+ *ret_size = resp_len * MBOX_WORD_BYTE;
+ flush_dcache_range(addr, *ret_size);
+
+ if (status != MBOX_RET_OK) {
+ *mbox_error = -status;
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+/* Miscellaneous HPS services */
+uint32_t intel_hps_set_bridges(uint64_t enable, uint64_t mask)
+{
+ int status = 0;
+
+ if ((enable & SOCFPGA_BRIDGE_ENABLE) != 0U) {
+ if ((enable & SOCFPGA_BRIDGE_HAS_MASK) != 0U) {
+ status = socfpga_bridges_enable((uint32_t)mask);
+ } else {
+ status = socfpga_bridges_enable(~0);
+ }
+ } else {
+ if ((enable & SOCFPGA_BRIDGE_HAS_MASK) != 0U) {
+ status = socfpga_bridges_disable((uint32_t)mask);
+ } else {
+ status = socfpga_bridges_disable(~0);
+ }
+ }
+
+ if (status < 0) {
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ return INTEL_SIP_SMC_STATUS_OK;
+}
+
+/*
+ * This function is responsible for handling all SiP calls from the NS world
+ */
+
+uintptr_t sip_smc_handler_v1(uint32_t smc_fid,
+ u_register_t x1,
+ u_register_t x2,
+ u_register_t x3,
+ u_register_t x4,
+ void *cookie,
+ void *handle,
+ u_register_t flags)
+{
+ uint32_t retval = 0, completed_addr[3];
+ uint32_t retval2 = 0;
+ uint32_t mbox_error = 0;
+ uint64_t retval64, rsu_respbuf[9];
+ int status = INTEL_SIP_SMC_STATUS_OK;
+ int mbox_status;
+ unsigned int len_in_resp;
+ u_register_t x5, x6, x7;
+
+ switch (smc_fid) {
+ case SIP_SVC_UID:
+ /* Return UID to the caller */
+ SMC_UUID_RET(handle, intl_svc_uid);
+
+ case INTEL_SIP_SMC_FPGA_CONFIG_ISDONE:
+ status = intel_mailbox_fpga_config_isdone();
+ SMC_RET4(handle, status, 0, 0, 0);
+
+ case INTEL_SIP_SMC_FPGA_CONFIG_GET_MEM:
+ SMC_RET3(handle, INTEL_SIP_SMC_STATUS_OK,
+ INTEL_SIP_SMC_FPGA_CONFIG_ADDR,
+ INTEL_SIP_SMC_FPGA_CONFIG_SIZE -
+ INTEL_SIP_SMC_FPGA_CONFIG_ADDR);
+
+ case INTEL_SIP_SMC_FPGA_CONFIG_START:
+ status = intel_fpga_config_start(x1);
+ SMC_RET4(handle, status, 0, 0, 0);
+
+ case INTEL_SIP_SMC_FPGA_CONFIG_WRITE:
+ status = intel_fpga_config_write(x1, x2);
+ SMC_RET4(handle, status, 0, 0, 0);
+
+ case INTEL_SIP_SMC_FPGA_CONFIG_COMPLETED_WRITE:
+ status = intel_fpga_config_completed_write(completed_addr,
+ &retval, &rcv_id);
+ switch (retval) {
+ case 1:
+ SMC_RET4(handle, INTEL_SIP_SMC_STATUS_OK,
+ completed_addr[0], 0, 0);
+
+ case 2:
+ SMC_RET4(handle, INTEL_SIP_SMC_STATUS_OK,
+ completed_addr[0],
+ completed_addr[1], 0);
+
+ case 3:
+ SMC_RET4(handle, INTEL_SIP_SMC_STATUS_OK,
+ completed_addr[0],
+ completed_addr[1],
+ completed_addr[2]);
+
+ case 0:
+ SMC_RET4(handle, status, 0, 0, 0);
+
+ default:
+ mailbox_clear_response();
+ SMC_RET1(handle, INTEL_SIP_SMC_STATUS_ERROR);
+ }
+
+ case INTEL_SIP_SMC_REG_READ:
+ status = intel_secure_reg_read(x1, &retval);
+ SMC_RET3(handle, status, retval, x1);
+
+ case INTEL_SIP_SMC_REG_WRITE:
+ status = intel_secure_reg_write(x1, (uint32_t)x2, &retval);
+ SMC_RET3(handle, status, retval, x1);
+
+ case INTEL_SIP_SMC_REG_UPDATE:
+ status = intel_secure_reg_update(x1, (uint32_t)x2,
+ (uint32_t)x3, &retval);
+ SMC_RET3(handle, status, retval, x1);
+
+ case INTEL_SIP_SMC_RSU_STATUS:
+ status = intel_rsu_status(rsu_respbuf,
+ ARRAY_SIZE(rsu_respbuf));
+ if (status) {
+ SMC_RET1(handle, status);
+ } else {
+ SMC_RET4(handle, rsu_respbuf[0], rsu_respbuf[1],
+ rsu_respbuf[2], rsu_respbuf[3]);
+ }
+
+ case INTEL_SIP_SMC_RSU_UPDATE:
+ status = intel_rsu_update(x1);
+ SMC_RET1(handle, status);
+
+ case INTEL_SIP_SMC_RSU_NOTIFY:
+ status = intel_rsu_notify(x1);
+ SMC_RET1(handle, status);
+
+ case INTEL_SIP_SMC_RSU_RETRY_COUNTER:
+ status = intel_rsu_retry_counter((uint32_t *)rsu_respbuf,
+ ARRAY_SIZE(rsu_respbuf), &retval);
+ if (status) {
+ SMC_RET1(handle, status);
+ } else {
+ SMC_RET2(handle, status, retval);
+ }
+
+ case INTEL_SIP_SMC_RSU_DCMF_VERSION:
+ SMC_RET3(handle, INTEL_SIP_SMC_STATUS_OK,
+ ((uint64_t)rsu_dcmf_ver[1] << 32) | rsu_dcmf_ver[0],
+ ((uint64_t)rsu_dcmf_ver[3] << 32) | rsu_dcmf_ver[2]);
+
+ case INTEL_SIP_SMC_RSU_COPY_DCMF_VERSION:
+ status = intel_rsu_copy_dcmf_version(x1, x2);
+ SMC_RET1(handle, status);
+
+ case INTEL_SIP_SMC_RSU_DCMF_STATUS:
+ SMC_RET2(handle, INTEL_SIP_SMC_STATUS_OK,
+ ((uint64_t)rsu_dcmf_stat[3] << 48) |
+ ((uint64_t)rsu_dcmf_stat[2] << 32) |
+ ((uint64_t)rsu_dcmf_stat[1] << 16) |
+ rsu_dcmf_stat[0]);
+
+ case INTEL_SIP_SMC_RSU_COPY_DCMF_STATUS:
+ status = intel_rsu_copy_dcmf_status(x1);
+ SMC_RET1(handle, status);
+
+ case INTEL_SIP_SMC_RSU_MAX_RETRY:
+ SMC_RET2(handle, INTEL_SIP_SMC_STATUS_OK, rsu_max_retry);
+
+ case INTEL_SIP_SMC_RSU_COPY_MAX_RETRY:
+ rsu_max_retry = x1;
+ SMC_RET1(handle, INTEL_SIP_SMC_STATUS_OK);
+
+ case INTEL_SIP_SMC_ECC_DBE:
+ status = intel_ecc_dbe_notification(x1);
+ SMC_RET1(handle, status);
+
+ case INTEL_SIP_SMC_SERVICE_COMPLETED:
+ status = intel_smc_service_completed(x1, x2, x3, &rcv_id,
+ &len_in_resp, &mbox_error);
+ SMC_RET4(handle, status, mbox_error, x1, len_in_resp);
+
+ case INTEL_SIP_SMC_FIRMWARE_VERSION:
+ status = intel_smc_fw_version(&retval);
+ SMC_RET2(handle, status, retval);
+
+ case INTEL_SIP_SMC_MBOX_SEND_CMD:
+ x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+ x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+ status = intel_mbox_send_cmd(x1, (uint32_t *)x2, x3, x4, x5, x6,
+ &mbox_status, &len_in_resp);
+ SMC_RET3(handle, status, mbox_status, len_in_resp);
+
+ case INTEL_SIP_SMC_GET_USERCODE:
+ status = intel_smc_get_usercode(&retval);
+ SMC_RET2(handle, status, retval);
+
+ case INTEL_SIP_SMC_FCS_CRYPTION:
+ x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+
+ if (x1 == FCS_MODE_DECRYPT) {
+ status = intel_fcs_decryption(x2, x3, x4, x5, &send_id);
+ } else if (x1 == FCS_MODE_ENCRYPT) {
+ status = intel_fcs_encryption(x2, x3, x4, x5, &send_id);
+ } else {
+ status = INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ SMC_RET3(handle, status, x4, x5);
+
+ case INTEL_SIP_SMC_FCS_CRYPTION_EXT:
+ x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+ x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+ x7 = SMC_GET_GP(handle, CTX_GPREG_X7);
+
+ if (x3 == FCS_MODE_DECRYPT) {
+ status = intel_fcs_decryption_ext(x1, x2, x4, x5, x6,
+ (uint32_t *) &x7, &mbox_error);
+ } else if (x3 == FCS_MODE_ENCRYPT) {
+ status = intel_fcs_encryption_ext(x1, x2, x4, x5, x6,
+ (uint32_t *) &x7, &mbox_error);
+ } else {
+ status = INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ SMC_RET4(handle, status, mbox_error, x6, x7);
+
+ case INTEL_SIP_SMC_FCS_RANDOM_NUMBER:
+ status = intel_fcs_random_number_gen(x1, &retval64,
+ &mbox_error);
+ SMC_RET4(handle, status, mbox_error, x1, retval64);
+
+ case INTEL_SIP_SMC_FCS_RANDOM_NUMBER_EXT:
+ status = intel_fcs_random_number_gen_ext(x1, x2, x3,
+ &send_id);
+ SMC_RET1(handle, status);
+
+ case INTEL_SIP_SMC_FCS_SEND_CERTIFICATE:
+ status = intel_fcs_send_cert(x1, x2, &send_id);
+ SMC_RET1(handle, status);
+
+ case INTEL_SIP_SMC_FCS_GET_PROVISION_DATA:
+ status = intel_fcs_get_provision_data(&send_id);
+ SMC_RET1(handle, status);
+
+ case INTEL_SIP_SMC_FCS_CNTR_SET_PREAUTH:
+ status = intel_fcs_cntr_set_preauth(x1, x2, x3,
+ &mbox_error);
+ SMC_RET2(handle, status, mbox_error);
+
+ case INTEL_SIP_SMC_HPS_SET_BRIDGES:
+ status = intel_hps_set_bridges(x1, x2);
+ SMC_RET1(handle, status);
+
+ case INTEL_SIP_SMC_HWMON_READTEMP:
+ status = intel_hwmon_readtemp(x1, &retval);
+ SMC_RET2(handle, status, retval);
+
+ case INTEL_SIP_SMC_HWMON_READVOLT:
+ status = intel_hwmon_readvolt(x1, &retval);
+ SMC_RET2(handle, status, retval);
+
+ case INTEL_SIP_SMC_FCS_PSGSIGMA_TEARDOWN:
+ status = intel_fcs_sigma_teardown(x1, &mbox_error);
+ SMC_RET2(handle, status, mbox_error);
+
+ case INTEL_SIP_SMC_FCS_CHIP_ID:
+ status = intel_fcs_chip_id(&retval, &retval2, &mbox_error);
+ SMC_RET4(handle, status, mbox_error, retval, retval2);
+
+ case INTEL_SIP_SMC_FCS_ATTESTATION_SUBKEY:
+ status = intel_fcs_attestation_subkey(x1, x2, x3,
+ (uint32_t *) &x4, &mbox_error);
+ SMC_RET4(handle, status, mbox_error, x3, x4);
+
+ case INTEL_SIP_SMC_FCS_ATTESTATION_MEASUREMENTS:
+ status = intel_fcs_get_measurement(x1, x2, x3,
+ (uint32_t *) &x4, &mbox_error);
+ SMC_RET4(handle, status, mbox_error, x3, x4);
+
+ case INTEL_SIP_SMC_FCS_GET_ATTESTATION_CERT:
+ status = intel_fcs_get_attestation_cert(x1, x2,
+ (uint32_t *) &x3, &mbox_error);
+ SMC_RET4(handle, status, mbox_error, x2, x3);
+
+ case INTEL_SIP_SMC_FCS_CREATE_CERT_ON_RELOAD:
+ status = intel_fcs_create_cert_on_reload(x1, &mbox_error);
+ SMC_RET2(handle, status, mbox_error);
+
+ case INTEL_SIP_SMC_FCS_OPEN_CS_SESSION:
+ status = intel_fcs_open_crypto_service_session(&retval, &mbox_error);
+ SMC_RET3(handle, status, mbox_error, retval);
+
+ case INTEL_SIP_SMC_FCS_CLOSE_CS_SESSION:
+ status = intel_fcs_close_crypto_service_session(x1, &mbox_error);
+ SMC_RET2(handle, status, mbox_error);
+
+ case INTEL_SIP_SMC_FCS_IMPORT_CS_KEY:
+ status = intel_fcs_import_crypto_service_key(x1, x2, &send_id);
+ SMC_RET1(handle, status);
+
+ case INTEL_SIP_SMC_FCS_EXPORT_CS_KEY:
+ status = intel_fcs_export_crypto_service_key(x1, x2, x3,
+ (uint32_t *) &x4, &mbox_error);
+ SMC_RET4(handle, status, mbox_error, x3, x4);
+
+ case INTEL_SIP_SMC_FCS_REMOVE_CS_KEY:
+ status = intel_fcs_remove_crypto_service_key(x1, x2,
+ &mbox_error);
+ SMC_RET2(handle, status, mbox_error);
+
+ case INTEL_SIP_SMC_FCS_GET_CS_KEY_INFO:
+ status = intel_fcs_get_crypto_service_key_info(x1, x2, x3,
+ (uint32_t *) &x4, &mbox_error);
+ SMC_RET4(handle, status, mbox_error, x3, x4);
+
+ case INTEL_SIP_SMC_FCS_GET_DIGEST_INIT:
+ x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+ status = intel_fcs_get_digest_init(x1, x2, x3,
+ x4, x5, &mbox_error);
+ SMC_RET2(handle, status, mbox_error);
+
+ case INTEL_SIP_SMC_FCS_GET_DIGEST_UPDATE:
+ x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+ x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+ status = intel_fcs_get_digest_update_finalize(x1, x2, x3,
+ x4, x5, (uint32_t *) &x6, false,
+ &mbox_error);
+ SMC_RET4(handle, status, mbox_error, x5, x6);
+
+ case INTEL_SIP_SMC_FCS_GET_DIGEST_FINALIZE:
+ x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+ x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+ status = intel_fcs_get_digest_update_finalize(x1, x2, x3,
+ x4, x5, (uint32_t *) &x6, true,
+ &mbox_error);
+ SMC_RET4(handle, status, mbox_error, x5, x6);
+
+ case INTEL_SIP_SMC_FCS_MAC_VERIFY_INIT:
+ x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+ status = intel_fcs_mac_verify_init(x1, x2, x3,
+ x4, x5, &mbox_error);
+ SMC_RET2(handle, status, mbox_error);
+
+ case INTEL_SIP_SMC_FCS_MAC_VERIFY_UPDATE:
+ x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+ x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+ x7 = SMC_GET_GP(handle, CTX_GPREG_X7);
+ status = intel_fcs_mac_verify_update_finalize(x1, x2, x3,
+ x4, x5, (uint32_t *) &x6, x7,
+ false, &mbox_error);
+ SMC_RET4(handle, status, mbox_error, x5, x6);
+
+ case INTEL_SIP_SMC_FCS_MAC_VERIFY_FINALIZE:
+ x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+ x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+ x7 = SMC_GET_GP(handle, CTX_GPREG_X7);
+ status = intel_fcs_mac_verify_update_finalize(x1, x2, x3,
+ x4, x5, (uint32_t *) &x6, x7,
+ true, &mbox_error);
+ SMC_RET4(handle, status, mbox_error, x5, x6);
+
+ case INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGN_INIT:
+ x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+ status = intel_fcs_ecdsa_sha2_data_sign_init(x1, x2, x3,
+ x4, x5, &mbox_error);
+ SMC_RET2(handle, status, mbox_error);
+
+ case INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGN_UPDATE:
+ x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+ x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+ status = intel_fcs_ecdsa_sha2_data_sign_update_finalize(x1, x2,
+ x3, x4, x5, (uint32_t *) &x6, false,
+ &mbox_error);
+ SMC_RET4(handle, status, mbox_error, x5, x6);
+
+ case INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGN_FINALIZE:
+ x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+ x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+ status = intel_fcs_ecdsa_sha2_data_sign_update_finalize(x1, x2,
+ x3, x4, x5, (uint32_t *) &x6, true,
+ &mbox_error);
+ SMC_RET4(handle, status, mbox_error, x5, x6);
+
+ case INTEL_SIP_SMC_FCS_ECDSA_HASH_SIGN_INIT:
+ x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+ status = intel_fcs_ecdsa_hash_sign_init(x1, x2, x3,
+ x4, x5, &mbox_error);
+ SMC_RET2(handle, status, mbox_error);
+
+ case INTEL_SIP_SMC_FCS_ECDSA_HASH_SIGN_FINALIZE:
+ x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+ x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+ status = intel_fcs_ecdsa_hash_sign_finalize(x1, x2, x3,
+ x4, x5, (uint32_t *) &x6, &mbox_error);
+ SMC_RET4(handle, status, mbox_error, x5, x6);
+
+ case INTEL_SIP_SMC_FCS_ECDSA_HASH_SIG_VERIFY_INIT:
+ x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+ status = intel_fcs_ecdsa_hash_sig_verify_init(x1, x2, x3,
+ x4, x5, &mbox_error);
+ SMC_RET2(handle, status, mbox_error);
+
+ case INTEL_SIP_SMC_FCS_ECDSA_HASH_SIG_VERIFY_FINALIZE:
+ x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+ x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+ status = intel_fcs_ecdsa_hash_sig_verify_finalize(x1, x2, x3,
+ x4, x5, (uint32_t *) &x6, &mbox_error);
+ SMC_RET4(handle, status, mbox_error, x5, x6);
+
+ case INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIG_VERIFY_INIT:
+ x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+ status = intel_fcs_ecdsa_sha2_data_sig_verify_init(x1, x2, x3,
+ x4, x5, &mbox_error);
+ SMC_RET2(handle, status, mbox_error);
+
+ case INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIG_VERIFY_UPDATE:
+ x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+ x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+ x7 = SMC_GET_GP(handle, CTX_GPREG_X7);
+ status = intel_fcs_ecdsa_sha2_data_sig_verify_update_finalize(
+ x1, x2, x3, x4, x5, (uint32_t *) &x6,
+ x7, false, &mbox_error);
+ SMC_RET4(handle, status, mbox_error, x5, x6);
+
+ case INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIG_VERIFY_FINALIZE:
+ x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+ x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+ x7 = SMC_GET_GP(handle, CTX_GPREG_X7);
+ status = intel_fcs_ecdsa_sha2_data_sig_verify_update_finalize(
+ x1, x2, x3, x4, x5, (uint32_t *) &x6,
+ x7, true, &mbox_error);
+ SMC_RET4(handle, status, mbox_error, x5, x6);
+
+ case INTEL_SIP_SMC_FCS_ECDSA_GET_PUBKEY_INIT:
+ x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+ status = intel_fcs_ecdsa_get_pubkey_init(x1, x2, x3,
+ x4, x5, &mbox_error);
+ SMC_RET2(handle, status, mbox_error);
+
+ case INTEL_SIP_SMC_FCS_ECDSA_GET_PUBKEY_FINALIZE:
+ status = intel_fcs_ecdsa_get_pubkey_finalize(x1, x2, x3,
+ (uint32_t *) &x4, &mbox_error);
+ SMC_RET4(handle, status, mbox_error, x3, x4);
+
+ case INTEL_SIP_SMC_FCS_ECDH_REQUEST_INIT:
+ x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+ status = intel_fcs_ecdh_request_init(x1, x2, x3,
+ x4, x5, &mbox_error);
+ SMC_RET2(handle, status, mbox_error);
+
+ case INTEL_SIP_SMC_FCS_ECDH_REQUEST_FINALIZE:
+ x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+ x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+ status = intel_fcs_ecdh_request_finalize(x1, x2, x3,
+ x4, x5, (uint32_t *) &x6, &mbox_error);
+ SMC_RET4(handle, status, mbox_error, x5, x6);
+
+ case INTEL_SIP_SMC_FCS_AES_CRYPT_INIT:
+ x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+ status = intel_fcs_aes_crypt_init(x1, x2, x3, x4, x5,
+ &mbox_error);
+ SMC_RET2(handle, status, mbox_error);
+
+ case INTEL_SIP_SMC_FCS_AES_CRYPT_UPDATE:
+ x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+ x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+ status = intel_fcs_aes_crypt_update_finalize(x1, x2, x3, x4,
+ x5, x6, false, &send_id);
+ SMC_RET1(handle, status);
+
+ case INTEL_SIP_SMC_FCS_AES_CRYPT_FINALIZE:
+ x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+ x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+ status = intel_fcs_aes_crypt_update_finalize(x1, x2, x3, x4,
+ x5, x6, true, &send_id);
+ SMC_RET1(handle, status);
+
+ case INTEL_SIP_SMC_GET_ROM_PATCH_SHA384:
+ status = intel_fcs_get_rom_patch_sha384(x1, &retval64,
+ &mbox_error);
+ SMC_RET4(handle, status, mbox_error, x1, retval64);
+
+ case INTEL_SIP_SMC_SVC_VERSION:
+ SMC_RET3(handle, INTEL_SIP_SMC_STATUS_OK,
+ SIP_SVC_VERSION_MAJOR,
+ SIP_SVC_VERSION_MINOR);
+
+ default:
+ return socfpga_sip_handler(smc_fid, x1, x2, x3, x4,
+ cookie, handle, flags);
+ }
+}
+
+uintptr_t sip_smc_handler(uint32_t smc_fid,
+ u_register_t x1,
+ u_register_t x2,
+ u_register_t x3,
+ u_register_t x4,
+ void *cookie,
+ void *handle,
+ u_register_t flags)
+{
+ uint32_t cmd = smc_fid & INTEL_SIP_SMC_CMD_MASK;
+
+ if (cmd >= INTEL_SIP_SMC_CMD_V2_RANGE_BEGIN &&
+ cmd <= INTEL_SIP_SMC_CMD_V2_RANGE_END) {
+ return sip_smc_handler_v2(smc_fid, x1, x2, x3, x4,
+ cookie, handle, flags);
+ } else {
+ return sip_smc_handler_v1(smc_fid, x1, x2, x3, x4,
+ cookie, handle, flags);
+ }
+}
+
+DECLARE_RT_SVC(
+ socfpga_sip_svc,
+ OEN_SIP_START,
+ OEN_SIP_END,
+ SMC_TYPE_FAST,
+ NULL,
+ sip_smc_handler
+);
+
+DECLARE_RT_SVC(
+ socfpga_sip_svc_std,
+ OEN_SIP_START,
+ OEN_SIP_END,
+ SMC_TYPE_YIELD,
+ NULL,
+ sip_smc_handler
+);
diff --git a/plat/intel/soc/common/socfpga_sip_svc_v2.c b/plat/intel/soc/common/socfpga_sip_svc_v2.c
new file mode 100644
index 0000000..791c714
--- /dev/null
+++ b/plat/intel/soc/common/socfpga_sip_svc_v2.c
@@ -0,0 +1,174 @@
+/*
+ * Copyright (c) 2022, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <common/debug.h>
+#include <common/runtime_svc.h>
+#include <lib/mmio.h>
+
+#include "socfpga_mailbox.h"
+#include "socfpga_sip_svc.h"
+
+static uint32_t intel_v2_mbox_send_cmd(uint32_t req_header,
+ uint32_t *data, uint32_t data_size)
+{
+ uint32_t value;
+ uint32_t len;
+
+ if ((data == NULL) || (data_size == 0)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ if (data_size > (MBOX_INC_HEADER_MAX_WORD_SIZE * MBOX_WORD_BYTE)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ if (!is_size_4_bytes_aligned(data_size)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ /* Make sure client id align in SMC SiP V2 header and mailbox header */
+ value = (req_header >> INTEL_SIP_SMC_HEADER_CID_OFFSET) &
+ INTEL_SIP_SMC_HEADER_CID_MASK;
+
+ if (value != MBOX_RESP_CLIENT_ID(data[0])) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ /* Make sure job id align in SMC SiP V2 header and mailbox header */
+ value = (req_header >> INTEL_SIP_SMC_HEADER_JOB_ID_OFFSET) &
+ INTEL_SIP_SMC_HEADER_JOB_ID_MASK;
+
+ if (value != MBOX_RESP_JOB_ID(data[0])) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ /*
+ * Make sure data length align in SMC SiP V2 header and
+ * mailbox header
+ */
+ len = (data_size / MBOX_WORD_BYTE) - 1;
+
+ if (len != MBOX_RESP_LEN(data[0])) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ return mailbox_send_cmd_async_ext(data[0], &data[1], len);
+}
+
+static uint32_t intel_v2_mbox_poll_resp(uint64_t req_header,
+ uint32_t *data, uint32_t *data_size,
+ uint64_t *resp_header)
+{
+ int status = 0;
+ uint32_t resp_len;
+ uint32_t job_id = 0;
+ uint32_t client_id = 0;
+ uint32_t version;
+
+ if ((data == NULL) || (data_size == NULL) || (resp_header == NULL)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ if (!is_size_4_bytes_aligned(*data_size)) {
+ return INTEL_SIP_SMC_STATUS_REJECTED;
+ }
+
+ resp_len = (*data_size / MBOX_WORD_BYTE) - 1;
+ status = mailbox_read_response_async(&job_id, &data[0], &data[1],
+ &resp_len, 1);
+
+ if (status == MBOX_BUSY) {
+ status = INTEL_SIP_SMC_STATUS_BUSY;
+ } else if (status == MBOX_NO_RESPONSE) {
+ status = INTEL_SIP_SMC_STATUS_NO_RESPONSE;
+ } else {
+ *data_size = 0;
+
+ if (resp_len > 0) {
+ /*
+ * Fill in the final response length,
+ * the length include both mailbox header and payload
+ */
+ *data_size = (resp_len + 1) * MBOX_WORD_BYTE;
+
+ /* Extract the client id from mailbox header */
+ client_id = MBOX_RESP_CLIENT_ID(data[0]);
+ }
+
+ /*
+ * Extract SMC SiP V2 protocol version from
+ * SMC request header
+ */
+ version = (req_header >> INTEL_SIP_SMC_HEADER_VERSION_OFFSET) &
+ INTEL_SIP_SMC_HEADER_VERSION_MASK;
+
+ /* Fill in SMC SiP V2 protocol response header */
+ *resp_header = 0;
+ *resp_header |= (((uint64_t)job_id) &
+ INTEL_SIP_SMC_HEADER_JOB_ID_MASK) <<
+ INTEL_SIP_SMC_HEADER_JOB_ID_OFFSET;
+ *resp_header |= (((uint64_t)client_id) &
+ INTEL_SIP_SMC_HEADER_CID_MASK) <<
+ INTEL_SIP_SMC_HEADER_CID_OFFSET;
+ *resp_header |= (((uint64_t)version) &
+ INTEL_SIP_SMC_HEADER_VERSION_MASK) <<
+ INTEL_SIP_SMC_HEADER_VERSION_OFFSET;
+ }
+
+ return status;
+}
+
+uintptr_t sip_smc_handler_v2(uint32_t smc_fid,
+ u_register_t x1,
+ u_register_t x2,
+ u_register_t x3,
+ u_register_t x4,
+ void *cookie,
+ void *handle,
+ u_register_t flags)
+{
+ uint32_t retval = 0;
+ uint64_t retval64 = 0;
+ int status = INTEL_SIP_SMC_STATUS_OK;
+
+ switch (smc_fid) {
+ case INTEL_SIP_SMC_V2_GET_SVC_VERSION:
+ SMC_RET4(handle, INTEL_SIP_SMC_STATUS_OK, x1,
+ SIP_SVC_VERSION_MAJOR,
+ SIP_SVC_VERSION_MINOR);
+
+ case INTEL_SIP_SMC_V2_REG_READ:
+ status = intel_secure_reg_read(x2, &retval);
+ SMC_RET4(handle, status, x1, retval, x2);
+
+ case INTEL_SIP_SMC_V2_REG_WRITE:
+ status = intel_secure_reg_write(x2, (uint32_t)x3, &retval);
+ SMC_RET4(handle, status, x1, retval, x2);
+
+ case INTEL_SIP_SMC_V2_REG_UPDATE:
+ status = intel_secure_reg_update(x2, (uint32_t)x3,
+ (uint32_t)x4, &retval);
+ SMC_RET4(handle, status, x1, retval, x2);
+
+ case INTEL_SIP_SMC_V2_HPS_SET_BRIDGES:
+ status = intel_hps_set_bridges(x2, x3);
+ SMC_RET2(handle, status, x1);
+
+ case INTEL_SIP_SMC_V2_MAILBOX_SEND_COMMAND:
+ status = intel_v2_mbox_send_cmd(x1, (uint32_t *)x2, x3);
+ SMC_RET2(handle, status, x1);
+
+ case INTEL_SIP_SMC_V2_MAILBOX_POLL_RESPONSE:
+ status = intel_v2_mbox_poll_resp(x1, (uint32_t *)x2,
+ (uint32_t *) &x3, &retval64);
+ SMC_RET4(handle, status, retval64, x2, x3);
+
+ default:
+ ERROR("%s: unhandled SMC V2 (0x%x)\n", __func__, smc_fid);
+ SMC_RET1(handle, SMC_UNK);
+ }
+}
diff --git a/plat/intel/soc/common/socfpga_storage.c b/plat/intel/soc/common/socfpga_storage.c
new file mode 100644
index 0000000..a2f2c18
--- /dev/null
+++ b/plat/intel/soc/common/socfpga_storage.c
@@ -0,0 +1,193 @@
+/*
+ * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2019, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <common/debug.h>
+#include <common/tbbr/tbbr_img_def.h>
+#include <drivers/io/io_block.h>
+#include <drivers/io/io_driver.h>
+#include <drivers/io/io_fip.h>
+#include <drivers/io/io_memmap.h>
+#include <drivers/io/io_storage.h>
+#include <drivers/mmc.h>
+#include <drivers/partition/partition.h>
+#include <lib/mmio.h>
+#include <tools_share/firmware_image_package.h>
+
+#include "socfpga_private.h"
+
+#define PLAT_FIP_BASE (0)
+#define PLAT_FIP_MAX_SIZE (0x1000000)
+#define PLAT_MMC_DATA_BASE (0xffe3c000)
+#define PLAT_MMC_DATA_SIZE (0x2000)
+#define PLAT_QSPI_DATA_BASE (0x3C00000)
+#define PLAT_QSPI_DATA_SIZE (0x1000000)
+
+
+static const io_dev_connector_t *fip_dev_con;
+static const io_dev_connector_t *boot_dev_con;
+
+static uintptr_t fip_dev_handle;
+static uintptr_t boot_dev_handle;
+
+static const io_uuid_spec_t bl2_uuid_spec = {
+ .uuid = UUID_TRUSTED_BOOT_FIRMWARE_BL2,
+};
+
+static const io_uuid_spec_t bl31_uuid_spec = {
+ .uuid = UUID_EL3_RUNTIME_FIRMWARE_BL31,
+};
+
+static const io_uuid_spec_t bl33_uuid_spec = {
+ .uuid = UUID_NON_TRUSTED_FIRMWARE_BL33,
+};
+
+uintptr_t a2_lba_offset;
+const char a2[] = {0xa2, 0x0};
+
+static const io_block_spec_t gpt_block_spec = {
+ .offset = 0,
+ .length = MMC_BLOCK_SIZE
+};
+
+static int check_fip(const uintptr_t spec);
+static int check_dev(const uintptr_t spec);
+
+static io_block_dev_spec_t boot_dev_spec;
+static int (*register_io_dev)(const io_dev_connector_t **);
+
+static io_block_spec_t fip_spec = {
+ .offset = PLAT_FIP_BASE,
+ .length = PLAT_FIP_MAX_SIZE,
+};
+
+struct plat_io_policy {
+ uintptr_t *dev_handle;
+ uintptr_t image_spec;
+ int (*check)(const uintptr_t spec);
+};
+
+static const struct plat_io_policy policies[] = {
+ [FIP_IMAGE_ID] = {
+ &boot_dev_handle,
+ (uintptr_t)&fip_spec,
+ check_dev
+ },
+ [BL2_IMAGE_ID] = {
+ &fip_dev_handle,
+ (uintptr_t)&bl2_uuid_spec,
+ check_fip
+ },
+ [BL31_IMAGE_ID] = {
+ &fip_dev_handle,
+ (uintptr_t)&bl31_uuid_spec,
+ check_fip
+ },
+ [BL33_IMAGE_ID] = {
+ &fip_dev_handle,
+ (uintptr_t) &bl33_uuid_spec,
+ check_fip
+ },
+ [GPT_IMAGE_ID] = {
+ &boot_dev_handle,
+ (uintptr_t) &gpt_block_spec,
+ check_dev
+ },
+};
+
+static int check_dev(const uintptr_t spec)
+{
+ int result;
+ uintptr_t local_handle;
+
+ result = io_dev_init(boot_dev_handle, (uintptr_t)NULL);
+ if (result == 0) {
+ result = io_open(boot_dev_handle, spec, &local_handle);
+ if (result == 0)
+ io_close(local_handle);
+ }
+ return result;
+}
+
+static int check_fip(const uintptr_t spec)
+{
+ int result;
+ uintptr_t local_image_handle;
+
+ result = io_dev_init(fip_dev_handle, (uintptr_t)FIP_IMAGE_ID);
+ if (result == 0) {
+ result = io_open(fip_dev_handle, spec, &local_image_handle);
+ if (result == 0)
+ io_close(local_image_handle);
+ }
+ return result;
+}
+
+void socfpga_io_setup(int boot_source)
+{
+ int result;
+
+ switch (boot_source) {
+ case BOOT_SOURCE_SDMMC:
+ register_io_dev = &register_io_dev_block;
+ boot_dev_spec.buffer.offset = PLAT_MMC_DATA_BASE;
+ boot_dev_spec.buffer.length = MMC_BLOCK_SIZE;
+ boot_dev_spec.ops.read = mmc_read_blocks;
+ boot_dev_spec.ops.write = mmc_write_blocks;
+ boot_dev_spec.block_size = MMC_BLOCK_SIZE;
+ break;
+
+ case BOOT_SOURCE_QSPI:
+ register_io_dev = &register_io_dev_memmap;
+ fip_spec.offset = fip_spec.offset + PLAT_QSPI_DATA_BASE;
+ break;
+
+ default:
+ ERROR("Unsupported boot source\n");
+ panic();
+ break;
+ }
+
+ result = (*register_io_dev)(&boot_dev_con);
+ assert(result == 0);
+
+ result = register_io_dev_fip(&fip_dev_con);
+ assert(result == 0);
+
+ result = io_dev_open(boot_dev_con, (uintptr_t)&boot_dev_spec,
+ &boot_dev_handle);
+ assert(result == 0);
+
+ result = io_dev_open(fip_dev_con, (uintptr_t)NULL, &fip_dev_handle);
+ assert(result == 0);
+
+ if (boot_source == BOOT_SOURCE_SDMMC) {
+ partition_init(GPT_IMAGE_ID);
+ fip_spec.offset = get_partition_entry(a2)->start;
+ }
+
+ (void)result;
+}
+
+int plat_get_image_source(unsigned int image_id, uintptr_t *dev_handle,
+ uintptr_t *image_spec)
+{
+ int result;
+ const struct plat_io_policy *policy;
+
+ assert(image_id < ARRAY_SIZE(policies));
+
+ policy = &policies[image_id];
+ result = policy->check(policy->image_spec);
+ assert(result == 0);
+
+ *image_spec = policy->image_spec;
+ *dev_handle = *(policy->dev_handle);
+
+ return result;
+}
diff --git a/plat/intel/soc/common/socfpga_topology.c b/plat/intel/soc/common/socfpga_topology.c
new file mode 100644
index 0000000..ca1a91e
--- /dev/null
+++ b/plat/intel/soc/common/socfpga_topology.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <platform_def.h>
+#include <lib/psci/psci.h>
+
+static const unsigned char plat_power_domain_tree_desc[] = {1, 4};
+
+/*******************************************************************************
+ * This function returns the default topology tree information.
+ ******************************************************************************/
+const unsigned char *plat_get_power_domain_tree_desc(void)
+{
+ return plat_power_domain_tree_desc;
+}
+
+/*******************************************************************************
+ * This function implements a part of the critical interface between the psci
+ * generic layer and the platform that allows the former to query the platform
+ * to convert an MPIDR to a unique linear index. An error code (-1) is returned
+ * in case the MPIDR is invalid.
+ ******************************************************************************/
+int plat_core_pos_by_mpidr(u_register_t mpidr)
+{
+ unsigned int cluster_id, cpu_id;
+
+ mpidr &= MPIDR_AFFINITY_MASK;
+
+ if (mpidr & ~(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK))
+ return -1;
+
+ cluster_id = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
+ cpu_id = (mpidr >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK;
+
+ if (cluster_id >= PLATFORM_CLUSTER_COUNT)
+ return -1;
+
+ /*
+ * Validate cpu_id by checking whether it represents a CPU in
+ * one of the two clusters present on the platform.
+ */
+ if (cpu_id >= PLATFORM_MAX_CPUS_PER_CLUSTER)
+ return -1;
+
+ return (cpu_id + (cluster_id * 4));
+}
+
diff --git a/plat/intel/soc/n5x/bl31_plat_setup.c b/plat/intel/soc/n5x/bl31_plat_setup.c
new file mode 100644
index 0000000..5ca1a71
--- /dev/null
+++ b/plat/intel/soc/n5x/bl31_plat_setup.c
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2020-2022, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <arch.h>
+#include <arch_helpers.h>
+#include <common/bl_common.h>
+#include <drivers/arm/gicv2.h>
+#include <drivers/ti/uart/uart_16550.h>
+#include <lib/mmio.h>
+#include <lib/xlat_tables/xlat_tables.h>
+
+#include "ccu/ncore_ccu.h"
+#include "socfpga_mailbox.h"
+#include "socfpga_private.h"
+
+static entry_point_info_t bl32_image_ep_info;
+static entry_point_info_t bl33_image_ep_info;
+
+entry_point_info_t *bl31_plat_get_next_image_ep_info(uint32_t type)
+{
+ entry_point_info_t *next_image_info;
+
+ next_image_info = (type == NON_SECURE) ?
+ &bl33_image_ep_info : &bl32_image_ep_info;
+
+ /* None of the images on this platform can have 0x0 as the entrypoint */
+ if (next_image_info->pc) {
+ return next_image_info;
+ } else {
+ return NULL;
+ }
+}
+
+void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
+ u_register_t arg2, u_register_t arg3)
+{
+ static console_t console;
+
+ mmio_write_64(PLAT_SEC_ENTRY, 0);
+
+ console_16550_register(PLAT_INTEL_UART_BASE, PLAT_UART_CLOCK,
+ PLAT_BAUDRATE, &console);
+ /*
+ * Check params passed from BL31 should not be NULL,
+ */
+ void *from_bl2 = (void *) arg0;
+
+ bl_params_t *params_from_bl2 = (bl_params_t *)from_bl2;
+
+ assert(params_from_bl2 != NULL);
+
+ /*
+ * Copy BL32 (if populated by BL31) and BL33 entry point information.
+ * They are stored in Secure RAM, in BL31's address space.
+ */
+
+ if (params_from_bl2->h.type == PARAM_BL_PARAMS &&
+ params_from_bl2->h.version >= VERSION_2) {
+
+ bl_params_node_t *bl_params = params_from_bl2->head;
+
+ while (bl_params != NULL) {
+ if (bl_params->image_id == BL33_IMAGE_ID)
+ bl33_image_ep_info = *bl_params->ep_info;
+
+ bl_params = bl_params->next_params_info;
+ }
+ } else {
+ struct socfpga_bl31_params *arg_from_bl2 =
+ (struct socfpga_bl31_params *) from_bl2;
+
+ assert(arg_from_bl2->h.type == PARAM_BL31);
+ assert(arg_from_bl2->h.version >= VERSION_1);
+
+ bl32_image_ep_info = *arg_from_bl2->bl32_ep_info;
+ bl33_image_ep_info = *arg_from_bl2->bl33_ep_info;
+ }
+ SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE);
+}
+
+static const interrupt_prop_t s10_interrupt_props[] = {
+ PLAT_INTEL_SOCFPGA_G1S_IRQ_PROPS(GICV2_INTR_GROUP0),
+ PLAT_INTEL_SOCFPGA_G0_IRQ_PROPS(GICV2_INTR_GROUP0)
+};
+
+static unsigned int target_mask_array[PLATFORM_CORE_COUNT];
+
+static const gicv2_driver_data_t plat_gicv2_gic_data = {
+ .gicd_base = PLAT_INTEL_SOCFPGA_GICD_BASE,
+ .gicc_base = PLAT_INTEL_SOCFPGA_GICC_BASE,
+ .interrupt_props = s10_interrupt_props,
+ .interrupt_props_num = ARRAY_SIZE(s10_interrupt_props),
+ .target_masks = target_mask_array,
+ .target_masks_num = ARRAY_SIZE(target_mask_array),
+};
+
+/*******************************************************************************
+ * Perform any BL3-1 platform setup code
+ ******************************************************************************/
+void bl31_platform_setup(void)
+{
+ socfpga_delay_timer_init();
+
+ /* Initialize the gic cpu and distributor interfaces */
+ gicv2_driver_init(&plat_gicv2_gic_data);
+ gicv2_distif_init();
+ gicv2_pcpu_distif_init();
+ gicv2_cpuif_enable();
+
+ /* Signal secondary CPUs to jump to BL31 (BL2 = U-boot SPL) */
+ mmio_write_64(PLAT_CPU_RELEASE_ADDR,
+ (uint64_t)plat_secondary_cpus_bl31_entry);
+
+ mailbox_hps_stage_notify(HPS_EXECUTION_STATE_SSBL);
+
+ ncore_enable_ocram_firewall();
+}
+
+const mmap_region_t plat_dm_mmap[] = {
+ MAP_REGION_FLAT(DRAM_BASE, DRAM_SIZE,
+ MT_MEMORY | MT_RW | MT_NS),
+ MAP_REGION_FLAT(DEVICE1_BASE, DEVICE1_SIZE,
+ MT_DEVICE | MT_RW | MT_NS),
+ MAP_REGION_FLAT(DEVICE2_BASE, DEVICE2_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(OCRAM_BASE, OCRAM_SIZE,
+ MT_NON_CACHEABLE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(DEVICE3_BASE, DEVICE3_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(MEM64_BASE, MEM64_SIZE,
+ MT_DEVICE | MT_RW | MT_NS),
+ MAP_REGION_FLAT(DEVICE4_BASE, DEVICE4_SIZE,
+ MT_DEVICE | MT_RW | MT_NS),
+ {0}
+};
+
+/*******************************************************************************
+ * Perform the very early platform specific architectural setup here. At the
+ * moment this is only intializes the mmu in a quick and dirty way.
+ ******************************************************************************/
+void bl31_plat_arch_setup(void)
+{
+ const mmap_region_t bl_regions[] = {
+ MAP_REGION_FLAT(BL31_BASE, BL31_END - BL31_BASE,
+ MT_MEMORY | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(BL_CODE_BASE, BL_CODE_END - BL_CODE_BASE,
+ MT_CODE | MT_SECURE),
+ MAP_REGION_FLAT(BL_RO_DATA_BASE,
+ BL_RO_DATA_END - BL_RO_DATA_BASE,
+ MT_RO_DATA | MT_SECURE),
+#if USE_COHERENT_MEM
+ MAP_REGION_FLAT(BL_COHERENT_RAM_BASE,
+ BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+#endif
+ {0}
+ };
+
+ setup_page_tables(bl_regions, plat_dm_mmap);
+ enable_mmu_el3(0);
+}
diff --git a/plat/intel/soc/n5x/include/socfpga_plat_def.h b/plat/intel/soc/n5x/include/socfpga_plat_def.h
new file mode 100644
index 0000000..4c36f91
--- /dev/null
+++ b/plat/intel/soc/n5x/include/socfpga_plat_def.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2020-2022, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020-2022, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PLAT_SOCFPGA_DEF_H
+#define PLAT_SOCFPGA_DEF_H
+
+#include <platform_def.h>
+
+/* Platform Setting */
+#define PLATFORM_MODEL PLAT_SOCFPGA_N5X
+#define BOOT_SOURCE BOOT_SOURCE_SDMMC
+
+/* FPGA config helpers */
+#define INTEL_SIP_SMC_FPGA_CONFIG_ADDR 0x400000
+#define INTEL_SIP_SMC_FPGA_CONFIG_SIZE 0x2000000
+
+/* Register Mapping */
+#define SOCFPGA_CCU_NOC_REG_BASE U(0xf7000000)
+#define SOCFPGA_F2SDRAMMGR_REG_BASE U(0xf8024000)
+
+#define SOCFPGA_MMC_REG_BASE U(0xff808000)
+
+#define SOCFPGA_RSTMGR_REG_BASE U(0xffd11000)
+#define SOCFPGA_SYSMGR_REG_BASE U(0xffd12000)
+
+#define SOCFPGA_L4_PER_SCR_REG_BASE U(0xffd21000)
+#define SOCFPGA_L4_SYS_SCR_REG_BASE U(0xffd21100)
+#define SOCFPGA_SOC2FPGA_SCR_REG_BASE U(0xffd21200)
+#define SOCFPGA_LWSOC2FPGA_SCR_REG_BASE U(0xffd21300)
+
+/* Platform specific system counter */
+/*
+ * In N5X the clk init is done in Uboot SPL.
+ * BL31 shall bypass the clk init and only provides other APIs.
+ */
+#define PLAT_SYS_COUNTER_FREQ_IN_MHZ (400)
+
+#endif /* PLAT_SOCFPGA_DEF_H */
diff --git a/plat/intel/soc/n5x/platform.mk b/plat/intel/soc/n5x/platform.mk
new file mode 100644
index 0000000..be1ad8c
--- /dev/null
+++ b/plat/intel/soc/n5x/platform.mk
@@ -0,0 +1,52 @@
+#
+# Copyright (c) 2020-2022, Intel Corporation. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+PLAT_INCLUDES := \
+ -Iplat/intel/soc/n5x/include/ \
+ -Iplat/intel/soc/common/drivers/ \
+ -Iplat/intel/soc/common/include/
+
+# Include GICv2 driver files
+include drivers/arm/gic/v2/gicv2.mk
+DM_GICv2_SOURCES := \
+ ${GICV2_SOURCES} \
+ plat/common/plat_gicv2.c
+
+
+PLAT_BL_COMMON_SOURCES := \
+ ${DM_GICv2_SOURCES} \
+ drivers/delay_timer/delay_timer.c \
+ drivers/delay_timer/generic_delay_timer.c \
+ drivers/ti/uart/aarch64/16550_console.S \
+ lib/xlat_tables/aarch64/xlat_tables.c \
+ lib/xlat_tables/xlat_tables_common.c \
+ plat/intel/soc/common/aarch64/platform_common.c \
+ plat/intel/soc/common/aarch64/plat_helpers.S \
+ plat/intel/soc/common/socfpga_delay_timer.c \
+ plat/intel/soc/common/drivers/ccu/ncore_ccu.c
+
+BL2_SOURCES +=
+
+BL31_SOURCES += \
+ drivers/arm/cci/cci.c \
+ lib/cpus/aarch64/aem_generic.S \
+ lib/cpus/aarch64/cortex_a53.S \
+ plat/common/plat_psci_common.c \
+ plat/intel/soc/n5x/bl31_plat_setup.c \
+ plat/intel/soc/common/socfpga_psci.c \
+ plat/intel/soc/common/socfpga_sip_svc.c \
+ plat/intel/soc/common/socfpga_sip_svc_v2.c \
+ plat/intel/soc/common/socfpga_topology.c \
+ plat/intel/soc/common/sip/socfpga_sip_ecc.c \
+ plat/intel/soc/common/sip/socfpga_sip_fcs.c \
+ plat/intel/soc/common/soc/socfpga_mailbox.c \
+ plat/intel/soc/common/soc/socfpga_reset_manager.c
+
+PROGRAMMABLE_RESET_ADDRESS := 0
+BL2_AT_EL3 := 1
+BL2_INV_DCACHE := 0
+MULTI_CONSOLE_API := 1
+USE_COHERENT_MEM := 1
diff --git a/plat/intel/soc/stratix10/bl2_plat_setup.c b/plat/intel/soc/stratix10/bl2_plat_setup.c
new file mode 100644
index 0000000..73e3216
--- /dev/null
+++ b/plat/intel/soc/stratix10/bl2_plat_setup.c
@@ -0,0 +1,188 @@
+/*
+ * Copyright (c) 2019-2022, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <common/desc_image_load.h>
+#include <drivers/generic_delay_timer.h>
+#include <drivers/synopsys/dw_mmc.h>
+#include <drivers/ti/uart/uart_16550.h>
+#include <lib/xlat_tables/xlat_tables.h>
+
+#include "qspi/cadence_qspi.h"
+#include "socfpga_emac.h"
+#include "socfpga_f2sdram_manager.h"
+#include "socfpga_handoff.h"
+#include "socfpga_mailbox.h"
+#include "socfpga_private.h"
+#include "socfpga_reset_manager.h"
+#include "socfpga_system_manager.h"
+#include "s10_clock_manager.h"
+#include "s10_memory_controller.h"
+#include "s10_mmc.h"
+#include "s10_pinmux.h"
+#include "wdt/watchdog.h"
+
+static struct mmc_device_info mmc_info;
+
+const mmap_region_t plat_stratix10_mmap[] = {
+ MAP_REGION_FLAT(DRAM_BASE, DRAM_SIZE,
+ MT_MEMORY | MT_RW | MT_NS),
+ MAP_REGION_FLAT(DEVICE1_BASE, DEVICE1_SIZE,
+ MT_DEVICE | MT_RW | MT_NS),
+ MAP_REGION_FLAT(DEVICE2_BASE, DEVICE2_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(OCRAM_BASE, OCRAM_SIZE,
+ MT_NON_CACHEABLE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(DEVICE3_BASE, DEVICE3_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(MEM64_BASE, MEM64_SIZE,
+ MT_DEVICE | MT_RW | MT_NS),
+ MAP_REGION_FLAT(DEVICE4_BASE, DEVICE4_SIZE,
+ MT_DEVICE | MT_RW | MT_NS),
+ {0},
+};
+
+boot_source_type boot_source = BOOT_SOURCE;
+
+void bl2_el3_early_platform_setup(u_register_t x0, u_register_t x1,
+ u_register_t x2, u_register_t x4)
+{
+ static console_t console;
+ handoff reverse_handoff_ptr;
+
+ generic_delay_timer_init();
+
+ if (socfpga_get_handoff(&reverse_handoff_ptr))
+ return;
+ config_pinmux(&reverse_handoff_ptr);
+
+ config_clkmgr_handoff(&reverse_handoff_ptr);
+ enable_nonsecure_access();
+ deassert_peripheral_reset();
+ config_hps_hs_before_warm_reset();
+
+ watchdog_init(get_wdt_clk());
+
+ console_16550_register(PLAT_INTEL_UART_BASE, get_uart_clk(),
+ PLAT_BAUDRATE, &console);
+
+ socfpga_emac_init();
+ socfpga_delay_timer_init();
+ init_hard_memory_controller();
+ mailbox_init();
+ s10_mmc_init();
+
+ if (!intel_mailbox_is_fpga_not_ready()) {
+ socfpga_bridges_enable(SOC2FPGA_MASK | LWHPS2FPGA_MASK |
+ FPGA2SOC_MASK | F2SDRAM0_MASK | F2SDRAM1_MASK |
+ F2SDRAM2_MASK);
+ }
+}
+
+
+void bl2_el3_plat_arch_setup(void)
+{
+
+ const mmap_region_t bl_regions[] = {
+ MAP_REGION_FLAT(BL2_BASE, BL2_END - BL2_BASE,
+ MT_MEMORY | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(BL_CODE_BASE, BL_CODE_END - BL_CODE_BASE,
+ MT_CODE | MT_SECURE),
+ MAP_REGION_FLAT(BL_RO_DATA_BASE,
+ BL_RO_DATA_END - BL_RO_DATA_BASE,
+ MT_RO_DATA | MT_SECURE),
+#if USE_COHERENT_MEM_BAR
+ MAP_REGION_FLAT(BL_COHERENT_RAM_BASE,
+ BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+#endif
+ {0},
+ };
+
+ setup_page_tables(bl_regions, plat_stratix10_mmap);
+
+ enable_mmu_el3(0);
+
+ dw_mmc_params_t params = EMMC_INIT_PARAMS(0x100000, get_mmc_clk());
+
+ mmc_info.mmc_dev_type = MMC_IS_SD;
+ mmc_info.ocr_voltage = OCR_3_3_3_4 | OCR_3_2_3_3;
+
+ /* Request ownership and direct access to QSPI */
+ mailbox_hps_qspi_enable();
+
+ switch (boot_source) {
+ case BOOT_SOURCE_SDMMC:
+ dw_mmc_init(&params, &mmc_info);
+ socfpga_io_setup(boot_source);
+ break;
+
+ case BOOT_SOURCE_QSPI:
+ cad_qspi_init(0, QSPI_CONFIG_CPHA, QSPI_CONFIG_CPOL,
+ QSPI_CONFIG_CSDA, QSPI_CONFIG_CSDADS,
+ QSPI_CONFIG_CSEOT, QSPI_CONFIG_CSSOT, 0);
+ socfpga_io_setup(boot_source);
+ break;
+
+ default:
+ ERROR("Unsupported boot source\n");
+ panic();
+ break;
+ }
+}
+
+uint32_t get_spsr_for_bl33_entry(void)
+{
+ unsigned long el_status;
+ unsigned int mode;
+ uint32_t spsr;
+
+ /* Figure out what mode we enter the non-secure world in */
+ el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT;
+ el_status &= ID_AA64PFR0_ELX_MASK;
+
+ mode = (el_status) ? MODE_EL2 : MODE_EL1;
+
+ /*
+ * TODO: Consider the possibility of specifying the SPSR in
+ * the FIP ToC and allowing the platform to have a say as
+ * well.
+ */
+ spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
+ return spsr;
+}
+
+
+int bl2_plat_handle_post_image_load(unsigned int image_id)
+{
+ bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id);
+
+ assert(bl_mem_params);
+
+ switch (image_id) {
+ case BL33_IMAGE_ID:
+ bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr();
+ bl_mem_params->ep_info.spsr = get_spsr_for_bl33_entry();
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/*******************************************************************************
+ * Perform any BL3-1 platform setup code
+ ******************************************************************************/
+void bl2_platform_setup(void)
+{
+}
+
diff --git a/plat/intel/soc/stratix10/bl31_plat_setup.c b/plat/intel/soc/stratix10/bl31_plat_setup.c
new file mode 100644
index 0000000..be0fae5
--- /dev/null
+++ b/plat/intel/soc/stratix10/bl31_plat_setup.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright (c) 2019-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <common/bl_common.h>
+#include <drivers/arm/gicv2.h>
+#include <drivers/ti/uart/uart_16550.h>
+#include <lib/xlat_tables/xlat_tables.h>
+#include <lib/mmio.h>
+#include <plat/common/platform.h>
+#include <platform_def.h>
+
+#include "socfpga_mailbox.h"
+#include "socfpga_noc.h"
+#include "socfpga_private.h"
+#include "socfpga_reset_manager.h"
+#include "socfpga_system_manager.h"
+#include "s10_memory_controller.h"
+#include "s10_pinmux.h"
+#include "s10_clock_manager.h"
+
+
+static entry_point_info_t bl32_image_ep_info;
+static entry_point_info_t bl33_image_ep_info;
+
+entry_point_info_t *bl31_plat_get_next_image_ep_info(uint32_t type)
+{
+ entry_point_info_t *next_image_info;
+
+ next_image_info = (type == NON_SECURE) ?
+ &bl33_image_ep_info : &bl32_image_ep_info;
+
+ /* None of the images on this platform can have 0x0 as the entrypoint */
+ if (next_image_info->pc)
+ return next_image_info;
+ else
+ return NULL;
+}
+
+void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
+ u_register_t arg2, u_register_t arg3)
+{
+ static console_t console;
+
+ mmio_write_64(PLAT_SEC_ENTRY, PLAT_SEC_WARM_ENTRY);
+
+ console_16550_register(PLAT_INTEL_UART_BASE, PLAT_UART_CLOCK,
+ PLAT_BAUDRATE, &console);
+ /*
+ * Check params passed from BL31 should not be NULL,
+ */
+ void *from_bl2 = (void *) arg0;
+
+ bl_params_t *params_from_bl2 = (bl_params_t *)from_bl2;
+ assert(params_from_bl2 != NULL);
+
+ /*
+ * Copy BL32 (if populated by BL31) and BL33 entry point information.
+ * They are stored in Secure RAM, in BL31's address space.
+ */
+
+ if (params_from_bl2->h.type == PARAM_BL_PARAMS &&
+ params_from_bl2->h.version >= VERSION_2) {
+
+ bl_params_node_t *bl_params = params_from_bl2->head;
+
+ while (bl_params) {
+ if (bl_params->image_id == BL33_IMAGE_ID)
+ bl33_image_ep_info = *bl_params->ep_info;
+
+ bl_params = bl_params->next_params_info;
+ }
+ } else {
+ struct socfpga_bl31_params *arg_from_bl2 =
+ (struct socfpga_bl31_params *) from_bl2;
+
+ assert(arg_from_bl2->h.type == PARAM_BL31);
+ assert(arg_from_bl2->h.version >= VERSION_1);
+
+ bl32_image_ep_info = *arg_from_bl2->bl32_ep_info;
+ bl33_image_ep_info = *arg_from_bl2->bl33_ep_info;
+ }
+ SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE);
+}
+
+static const interrupt_prop_t s10_interrupt_props[] = {
+ PLAT_INTEL_SOCFPGA_G1S_IRQ_PROPS(GICV2_INTR_GROUP0),
+ PLAT_INTEL_SOCFPGA_G0_IRQ_PROPS(GICV2_INTR_GROUP0)
+};
+
+static unsigned int target_mask_array[PLATFORM_CORE_COUNT];
+
+static const gicv2_driver_data_t plat_gicv2_gic_data = {
+ .gicd_base = PLAT_INTEL_SOCFPGA_GICD_BASE,
+ .gicc_base = PLAT_INTEL_SOCFPGA_GICC_BASE,
+ .interrupt_props = s10_interrupt_props,
+ .interrupt_props_num = ARRAY_SIZE(s10_interrupt_props),
+ .target_masks = target_mask_array,
+ .target_masks_num = ARRAY_SIZE(target_mask_array),
+};
+
+/*******************************************************************************
+ * Perform any BL3-1 platform setup code
+ ******************************************************************************/
+void bl31_platform_setup(void)
+{
+ socfpga_delay_timer_init();
+
+ /* Initialize the gic cpu and distributor interfaces */
+ gicv2_driver_init(&plat_gicv2_gic_data);
+ gicv2_distif_init();
+ gicv2_pcpu_distif_init();
+ gicv2_cpuif_enable();
+
+ /* Signal secondary CPUs to jump to BL31 (BL2 = U-boot SPL) */
+ mmio_write_64(PLAT_CPU_RELEASE_ADDR,
+ (uint64_t)plat_secondary_cpus_bl31_entry);
+
+ mailbox_hps_stage_notify(HPS_EXECUTION_STATE_SSBL);
+
+ enable_ocram_firewall();
+}
+
+const mmap_region_t plat_stratix10_mmap[] = {
+ MAP_REGION_FLAT(DRAM_BASE, DRAM_SIZE,
+ MT_MEMORY | MT_RW | MT_NS),
+ MAP_REGION_FLAT(DEVICE1_BASE, DEVICE1_SIZE,
+ MT_DEVICE | MT_RW | MT_NS),
+ MAP_REGION_FLAT(DEVICE2_BASE, DEVICE2_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(OCRAM_BASE, OCRAM_SIZE,
+ MT_NON_CACHEABLE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(DEVICE3_BASE, DEVICE3_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(MEM64_BASE, MEM64_SIZE,
+ MT_DEVICE | MT_RW | MT_NS),
+ MAP_REGION_FLAT(DEVICE4_BASE, DEVICE4_SIZE,
+ MT_DEVICE | MT_RW | MT_NS),
+ {0}
+};
+
+/*******************************************************************************
+ * Perform the very early platform specific architectural setup here. At the
+ * moment this is only intializes the mmu in a quick and dirty way.
+ ******************************************************************************/
+void bl31_plat_arch_setup(void)
+{
+ const mmap_region_t bl_regions[] = {
+ MAP_REGION_FLAT(BL31_BASE, BL31_END - BL31_BASE,
+ MT_MEMORY | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(BL_CODE_BASE, BL_CODE_END - BL_CODE_BASE,
+ MT_CODE | MT_SECURE),
+ MAP_REGION_FLAT(BL_RO_DATA_BASE,
+ BL_RO_DATA_END - BL_RO_DATA_BASE,
+ MT_RO_DATA | MT_SECURE),
+#if USE_COHERENT_MEM
+ MAP_REGION_FLAT(BL_COHERENT_RAM_BASE,
+ BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+#endif
+ {0}
+ };
+
+ setup_page_tables(bl_regions, plat_stratix10_mmap);
+ enable_mmu_el3(0);
+}
+
diff --git a/plat/intel/soc/stratix10/include/s10_clock_manager.h b/plat/intel/soc/stratix10/include/s10_clock_manager.h
new file mode 100644
index 0000000..cf57df3
--- /dev/null
+++ b/plat/intel/soc/stratix10/include/s10_clock_manager.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __CLOCKMANAGER_H__
+#define __CLOCKMANAGER_H__
+
+#include "socfpga_handoff.h"
+
+#define ALT_CLKMGR 0xffd10000
+
+#define ALT_CLKMGR_CTRL 0x0
+#define ALT_CLKMGR_STAT 0x4
+#define ALT_CLKMGR_INTRCLR 0x14
+#define ALT_CLKMGR_INTRCLR_MAINLOCKLOST_SET_MSK 0x00000004
+#define ALT_CLKMGR_INTRCLR_PERLOCKLOST_SET_MSK 0x00000008
+
+#define ALT_CLKMGR_CTRL_BOOTMODE_SET_MSK 0x00000001
+#define ALT_CLKMGR_STAT_BUSY_E_BUSY 0x1
+#define ALT_CLKMGR_STAT_BUSY(x) (((x) & 0x00000001) >> 0)
+#define ALT_CLKMGR_STAT_MAINPLLLOCKED(x) (((x) & 0x00000100) >> 8)
+#define ALT_CLKMGR_STAT_PERPLLLOCKED(x) (((x) & 0x00000200) >> 9)
+
+#define ALT_CLKMGR_MAINPLL 0xffd10030
+#define ALT_CLKMGR_MAINPLL_EN 0x0
+#define ALT_CLKMGR_MAINPLL_BYPASS 0xc
+#define ALT_CLKMGR_MAINPLL_MPUCLK 0x18
+#define ALT_CLKMGR_MAINPLL_NOCCLK 0x1c
+#define ALT_CLKMGR_MAINPLL_CNTR2CLK 0x20
+#define ALT_CLKMGR_MAINPLL_CNTR3CLK 0x24
+#define ALT_CLKMGR_MAINPLL_CNTR4CLK 0x28
+#define ALT_CLKMGR_MAINPLL_CNTR5CLK 0x2c
+#define ALT_CLKMGR_MAINPLL_CNTR6CLK 0x30
+#define ALT_CLKMGR_MAINPLL_CNTR7CLK 0x34
+#define ALT_CLKMGR_MAINPLL_CNTR8CLK 0x38
+#define ALT_CLKMGR_MAINPLL_CNTR9CLK 0x3c
+#define ALT_CLKMGR_MAINPLL_NOCDIV 0x40
+#define ALT_CLKMGR_MAINPLL_PLLGLOB 0x44
+#define ALT_CLKMGR_MAINPLL_FDBCK 0x48
+#define ALT_CLKMGR_MAINPLL_PLLC0 0x54
+#define ALT_CLKMGR_MAINPLL_PLLC1 0x58
+#define ALT_CLKMGR_MAINPLL_VCOCALIB 0x5c
+#define ALT_CLKMGR_MAINPLL_EN_RESET 0x000000ff
+#define ALT_CLKMGR_MAINPLL_FDBCK_MDIV(x) (((x) & 0xff000000) >> 24)
+#define ALT_CLKMGR_MAINPLL_PLLGLOB_PD_SET_MSK 0x00000001
+#define ALT_CLKMGR_MAINPLL_PLLGLOB_REFCLKDIV(x) (((x) & 0x00003f00) >> 8)
+#define ALT_CLKMGR_MAINPLL_PLLGLOB_RST_SET_MSK 0x00000002
+#define ALT_CLKMGR_MAINPLL_VCOCALIB_HSCNT_SET(x) (((x) << 0) & 0x000000ff)
+#define ALT_CLKMGR_MAINPLL_VCOCALIB_MSCNT_SET(x) (((x) << 9) & 0x0001fe00)
+
+#define ALT_CLKMGR_PSRC(x) (((x) & 0x00030000) >> 16)
+#define ALT_CLKMGR_SRC_MAIN 0
+#define ALT_CLKMGR_SRC_PER 1
+
+#define ALT_CLKMGR_PLLGLOB_PSRC_EOSC1 0x0
+#define ALT_CLKMGR_PLLGLOB_PSRC_INTOSC 0x1
+#define ALT_CLKMGR_PLLGLOB_PSRC_F2S 0x2
+
+#define ALT_CLKMGR_PERPLL 0xffd100a4
+#define ALT_CLKMGR_PERPLL_EN 0x0
+#define ALT_CLKMGR_PERPLL_EN_SDMMCCLK BIT(5)
+#define ALT_CLKMGR_PERPLL_BYPASS 0xc
+#define ALT_CLKMGR_PERPLL_CNTR2CLK 0x18
+#define ALT_CLKMGR_PERPLL_CNTR3CLK 0x1c
+#define ALT_CLKMGR_PERPLL_CNTR4CLK 0x20
+#define ALT_CLKMGR_PERPLL_CNTR5CLK 0x24
+#define ALT_CLKMGR_PERPLL_CNTR6CLK 0x28
+#define ALT_CLKMGR_PERPLL_CNTR7CLK 0x2c
+#define ALT_CLKMGR_PERPLL_CNTR8CLK 0x30
+#define ALT_CLKMGR_PERPLL_CNTR9CLK 0x34
+#define ALT_CLKMGR_PERPLL_GPIODIV 0x3c
+#define ALT_CLKMGR_PERPLL_EMACCTL 0x38
+#define ALT_CLKMGR_PERPLL_PLLGLOB 0x40
+#define ALT_CLKMGR_PERPLL_FDBCK 0x44
+#define ALT_CLKMGR_PERPLL_PLLC0 0x50
+#define ALT_CLKMGR_PERPLL_PLLC1 0x54
+#define ALT_CLKMGR_PERPLL_EN_RESET 0x00000fff
+#define ALT_CLKMGR_PERPLL_FDBCK_MDIV(x) (((x) & 0xff000000) >> 24)
+#define ALT_CLKMGR_PERPLL_GPIODIV_GPIODBCLK_SET(x) (((x) << 0) & 0x0000ffff)
+#define ALT_CLKMGR_PERPLL_PLLGLOB_PD_SET_MSK 0x00000001
+#define ALT_CLKMGR_PERPLL_PLLGLOB_REFCLKDIV(x) (((x) & 0x00003f00) >> 8)
+#define ALT_CLKMGR_PERPLL_PLLGLOB_REFCLKDIV_SET(x) (((x) << 8) & 0x00003f00)
+#define ALT_CLKMGR_PERPLL_PLLGLOB_RST_SET_MSK 0x00000002
+#define ALT_CLKMGR_PERPLL_VCOCALIB_HSCNT_SET(x) (((x) << 0) & 0x000000ff)
+#define ALT_CLKMGR_PERPLL_VCOCALIB_MSCNT_SET(x) (((x) << 9) & 0x0001fe00)
+#define ALT_CLKMGR_PERPLL_VCOCALIB 0x58
+
+#define ALT_CLKMGR_INTOSC_HZ 460000000
+
+void config_clkmgr_handoff(handoff *hoff_ptr);
+uint32_t get_wdt_clk(void);
+uint32_t get_uart_clk(void);
+uint32_t get_mmc_clk(void);
+uint32_t get_l3_clk(uint32_t ref_clk);
+uint32_t get_ref_clk(uint32_t pllglob);
+
+#endif
diff --git a/plat/intel/soc/stratix10/include/s10_memory_controller.h b/plat/intel/soc/stratix10/include/s10_memory_controller.h
new file mode 100644
index 0000000..155b279
--- /dev/null
+++ b/plat/intel/soc/stratix10/include/s10_memory_controller.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2019, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __S10_MEMORYCONTROLLER_H__
+#define __S10_MEMORYCONTROLLER_H__
+
+#define S10_MPFE_IOHMC_REG_DRAMADDRW 0xf80100a8
+#define S10_MPFE_IOHMC_CTRLCFG0 0xf8010028
+#define S10_MPFE_IOHMC_CTRLCFG1 0xf801002c
+#define S10_MPFE_IOHMC_DRAMADDRW 0xf80100a8
+#define S10_MPFE_IOHMC_DRAMTIMING0 0xf8010050
+#define S10_MPFE_IOHMC_CALTIMING0 0xf801007c
+#define S10_MPFE_IOHMC_CALTIMING1 0xf8010080
+#define S10_MPFE_IOHMC_CALTIMING2 0xf8010084
+#define S10_MPFE_IOHMC_CALTIMING3 0xf8010088
+#define S10_MPFE_IOHMC_CALTIMING4 0xf801008c
+#define S10_MPFE_IOHMC_CALTIMING9 0xf80100a0
+#define S10_MPFE_IOHMC_CALTIMING9_ACT_TO_ACT(x) (((x) & 0x000000ff) >> 0)
+#define S10_MPFE_IOHMC_CTRLCFG1_CFG_ADDR_ORDER(value) \
+ (((value) & 0x00000060) >> 5)
+
+
+#define S10_MPFE_HMC_ADP_ECCCTRL1 0xf8011100
+#define S10_MPFE_HMC_ADP_ECCCTRL2 0xf8011104
+#define S10_MPFE_HMC_ADP_RSTHANDSHAKESTAT 0xf8011218
+#define S10_MPFE_HMC_ADP_RSTHANDSHAKESTAT_SEQ2CORE 0x000000ff
+#define S10_MPFE_HMC_ADP_RSTHANDSHAKECTRL 0xf8011214
+
+
+#define S10_MPFE_IOHMC_REG_CTRLCFG1 0xf801002c
+
+#define S10_MPFE_IOHMC_REG_NIOSRESERVE0_OFST 0xf8010110
+
+#define IOHMC_DRAMADDRW_COL_ADDR_WIDTH(x) (((x) & 0x0000001f) >> 0)
+#define IOHMC_DRAMADDRW_ROW_ADDR_WIDTH(x) (((x) & 0x000003e0) >> 5)
+#define IOHMC_DRAMADDRW_CS_ADDR_WIDTH(x) (((x) & 0x00070000) >> 16)
+#define IOHMC_DRAMADDRW_BANK_GRP_ADDR_WIDTH(x) (((x) & 0x0000c000) >> 14)
+#define IOHMC_DRAMADDRW_BANK_ADDR_WIDTH(x) (((x) & 0x00003c00) >> 10)
+
+#define S10_MPFE_DDR(x) (0xf8000000 + x)
+#define S10_MPFE_HMC_ADP_DDRCALSTAT 0xf801100c
+#define S10_MPFE_DDR_MAIN_SCHED 0xf8000400
+#define S10_MPFE_DDR_MAIN_SCHED_DDRCONF 0xf8000408
+#define S10_MPFE_DDR_MAIN_SCHED_DDRTIMING 0xf800040c
+#define S10_MPFE_DDR_MAIN_SCHED_DDRCONF_SET_MSK 0x0000001f
+#define S10_MPFE_DDR_MAIN_SCHED_DDRMODE 0xf8000410
+#define S10_MPFE_DDR_MAIN_SCHED_DEVTODEV 0xf800043c
+#define S10_MPFE_DDR_MAIN_SCHED_READLATENCY 0xf8000414
+#define S10_MPFE_DDR_MAIN_SCHED_ACTIVATE 0xf8000438
+#define S10_MPFE_DDR_MAIN_SCHED_ACTIVATE_FAWBANK_OFST 10
+#define S10_MPFE_DDR_MAIN_SCHED_ACTIVATE_FAW_OFST 4
+#define S10_MPFE_DDR_MAIN_SCHED_ACTIVATE_RRD_OFST 0
+#define S10_MPFE_DDR_MAIN_SCHED_DDRCONF_SET(x) (((x) << 0) & 0x0000001f)
+#define S10_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTORD_OFST 0
+#define S10_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTORD_MSK (BIT(0) | BIT(1))
+#define S10_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTOWR_OFST 2
+#define S10_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTOWR_MSK (BIT(2) | BIT(3))
+#define S10_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSWRTORD_OFST 4
+#define S10_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSWRTORD_MSK (BIT(4) | BIT(5))
+
+#define S10_MPFE_HMC_ADP(x) (0xf8011000 + (x))
+#define S10_MPFE_HMC_ADP_HPSINTFCSEL 0xf8011210
+#define S10_MPFE_HMC_ADP_DDRIOCTRL 0xf8011008
+#define HMC_ADP_DDRIOCTRL 0x8
+#define HMC_ADP_DDRIOCTRL_IO_SIZE(x) (((x) & 0x00000003) >> 0)
+#define HMC_ADP_DDRIOCTRL_CTRL_BURST_LENGTH(x) (((x) & 0x00003e00) >> 9)
+#define ADP_DRAMADDRWIDTH 0xe0
+
+#define ACT_TO_ACT_DIFF_BANK(value) (((value) & 0x00fc0000) >> 18)
+#define ACT_TO_ACT(value) (((value) & 0x0003f000) >> 12)
+#define ACT_TO_RDWR(value) (((value) & 0x0000003f) >> 0)
+#define ACT_TO_ACT(value) (((value) & 0x0003f000) >> 12)
+
+/* timing 2 */
+#define RD_TO_RD_DIFF_CHIP(value) (((value) & 0x00000fc0) >> 6)
+#define RD_TO_WR_DIFF_CHIP(value) (((value) & 0x3f000000) >> 24)
+#define RD_TO_WR(value) (((value) & 0x00fc0000) >> 18)
+#define RD_TO_PCH(value) (((value) & 0x00000fc0) >> 6)
+
+/* timing 3 */
+#define CALTIMING3_WR_TO_RD_DIFF_CHIP(value) (((value) & 0x0003f000) >> 12)
+#define CALTIMING3_WR_TO_RD(value) (((value) & 0x00000fc0) >> 6)
+
+/* timing 4 */
+#define PCH_TO_VALID(value) (((value) & 0x00000fc0) >> 6)
+
+#define DDRTIMING_BWRATIO_OFST 31
+#define DDRTIMING_WRTORD_OFST 26
+#define DDRTIMING_RDTOWR_OFST 21
+#define DDRTIMING_BURSTLEN_OFST 18
+#define DDRTIMING_WRTOMISS_OFST 12
+#define DDRTIMING_RDTOMISS_OFST 6
+#define DDRTIMING_ACTTOACT_OFST 0
+
+#define ADP_DDRIOCTRL_IO_SIZE(x) (((x) & 0x00000003) >> 0)
+
+#define DDRMODE_AUTOPRECHARGE_OFST 1
+#define DDRMODE_BWRATIOEXTENDED_OFST 0
+
+
+#define S10_MPFE_IOHMC_REG_DRAMTIMING0_CFG_TCL(x) (((x) & 0x0000007f) >> 0)
+#define S10_MPFE_IOHMC_REG_CTRLCFG0_CFG_MEM_TYPE(x) (((x) & 0x0000000f) >> 0)
+
+#define S10_CCU_CPU0_MPRT_DDR 0xf7004400
+#define S10_CCU_CPU0_MPRT_MEM0 0xf70045c0
+#define S10_CCU_CPU0_MPRT_MEM1A 0xf70045e0
+#define S10_CCU_CPU0_MPRT_MEM1B 0xf7004600
+#define S10_CCU_CPU0_MPRT_MEM1C 0xf7004620
+#define S10_CCU_CPU0_MPRT_MEM1D 0xf7004640
+#define S10_CCU_CPU0_MPRT_MEM1E 0xf7004660
+#define S10_CCU_IOM_MPRT_MEM0 0xf7018560
+#define S10_CCU_IOM_MPRT_MEM1A 0xf7018580
+#define S10_CCU_IOM_MPRT_MEM1B 0xf70185a0
+#define S10_CCU_IOM_MPRT_MEM1C 0xf70185c0
+#define S10_CCU_IOM_MPRT_MEM1D 0xf70185e0
+#define S10_CCU_IOM_MPRT_MEM1E 0xf7018600
+
+#define S10_NOC_FW_DDR_SCR 0xf8020100
+#define S10_NOC_FW_DDR_SCR_MPUREGION0ADDR_LIMITEXT 0xf802011c
+#define S10_NOC_FW_DDR_SCR_MPUREGION0ADDR_LIMIT 0xf8020118
+#define S10_NOC_FW_DDR_SCR_NONMPUREGION0ADDR_LIMITEXT 0xf802019c
+#define S10_NOC_FW_DDR_SCR_NONMPUREGION0ADDR_LIMIT 0xf8020198
+
+#define S10_SOC_NOC_FW_DDR_SCR_ENABLE 0xf8020100
+#define S10_CCU_NOC_DI_SET_MSK 0x10
+
+#define S10_SYSMGR_CORE_HMC_CLK 0xffd120b4
+#define S10_SYSMGR_CORE_HMC_CLK_STATUS 0x00000001
+
+#define S10_MPFE_IOHMC_NIOSRESERVE0_NIOS_RESERVE0(x) (((x) & 0x0000ffff) >> 0)
+#define S10_MPFE_HMC_ADP_DDRIOCTRL_IO_SIZE_MSK 0x00000003
+#define S10_MPFE_HMC_ADP_DDRIOCTRL_IO_SIZE_OFST 0
+#define S10_MPFE_HMC_ADP_HPSINTFCSEL_ENABLE 0x001f1f1f
+#define S10_IOHMC_CTRLCFG1_ENABLE_ECC_OFST 7
+
+#define S10_MPFE_HMC_ADP_ECCCTRL1_AUTOWB_CNT_RST_SET_MSK 0x00010000
+#define S10_MPFE_HMC_ADP_ECCCTRL1_CNT_RST_SET_MSK 0x00000100
+#define S10_MPFE_HMC_ADP_ECCCTRL1_ECC_EN_SET_MSK 0x00000001
+
+#define S10_MPFE_HMC_ADP_ECCCTRL2_AUTOWB_EN_SET_MSK 0x00000001
+#define S10_MPFE_HMC_ADP_ECCCTRL2_OVRW_RB_ECC_EN_SET_MSK 0x00010000
+#define S10_MPFE_HMC_ADP_ECCCTRL2_RMW_EN_SET_MSK 0x00000100
+#define S10_MPFE_HMC_ADP_DDRCALSTAT_CAL(value) (((value) & 0x00000001) >> 0)
+
+
+#define S10_MPFE_HMC_ADP_DDRIOCTRL_IO_SIZE(x) (((x) & 0x00000003) >> 0)
+#define IOHMC_DRAMADDRW_CFG_BANK_ADDR_WIDTH(x) (((x) & 0x00003c00) >> 10)
+#define IOHMC_DRAMADDRW_CFG_BANK_GROUP_ADDR_WIDTH(x) (((x) & 0x0000c000) >> 14)
+#define IOHMC_DRAMADDRW_CFG_COL_ADDR_WIDTH(x) (((x) & 0x0000001f) >> 0)
+#define IOHMC_DRAMADDRW_CFG_CS_ADDR_WIDTH(x) (((x) & 0x00070000) >> 16)
+#define IOHMC_DRAMADDRW_CFG_ROW_ADDR_WIDTH(x) (((x) & 0x000003e0) >> 5)
+
+#define S10_SDRAM_0_LB_ADDR 0x0
+
+int init_hard_memory_controller(void);
+
+#endif
diff --git a/plat/intel/soc/stratix10/include/s10_mmc.h b/plat/intel/soc/stratix10/include/s10_mmc.h
new file mode 100644
index 0000000..99f86f5
--- /dev/null
+++ b/plat/intel/soc/stratix10/include/s10_mmc.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright (c) 2022, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __S10_MMC_H__
+#define __S10_MMC_H__
+
+void s10_mmc_init(void);
+
+#endif /* S10_MMC_H */
diff --git a/plat/intel/soc/stratix10/include/s10_pinmux.h b/plat/intel/soc/stratix10/include/s10_pinmux.h
new file mode 100644
index 0000000..82367d7
--- /dev/null
+++ b/plat/intel/soc/stratix10/include/s10_pinmux.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2019, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __S10_PINMUX_H__
+#define __S10_PINMUX_H__
+
+#define S10_PINMUX_PIN0SEL 0xffd13000
+#define S10_PINMUX_IO0CTRL 0xffd13130
+#define S10_PINMUX_PINMUX_EMAC0_USEFPGA 0xffd13300
+#define S10_PINMUX_IO0_DELAY 0xffd13400
+
+#include "socfpga_handoff.h"
+
+void config_pinmux(handoff *handoff);
+
+#endif
+
diff --git a/plat/intel/soc/stratix10/include/socfpga_plat_def.h b/plat/intel/soc/stratix10/include/socfpga_plat_def.h
new file mode 100644
index 0000000..516cc75
--- /dev/null
+++ b/plat/intel/soc/stratix10/include/socfpga_plat_def.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019-2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PLAT_SOCFPGA_DEF_H
+#define PLAT_SOCFPGA_DEF_H
+
+#include <platform_def.h>
+
+/* Platform Setting */
+#define PLATFORM_MODEL PLAT_SOCFPGA_STRATIX10
+#define BOOT_SOURCE BOOT_SOURCE_SDMMC
+
+/* FPGA config helpers */
+#define INTEL_SIP_SMC_FPGA_CONFIG_ADDR 0x400000
+#define INTEL_SIP_SMC_FPGA_CONFIG_SIZE 0x1000000
+
+/* Register Mapping */
+#define SOCFPGA_CCU_NOC_REG_BASE 0xf7000000
+#define SOCFPGA_F2SDRAMMGR_REG_BASE U(0xf8024000)
+
+#define SOCFPGA_MMC_REG_BASE 0xff808000
+
+#define SOCFPGA_RSTMGR_REG_BASE 0xffd11000
+#define SOCFPGA_SYSMGR_REG_BASE 0xffd12000
+
+#define SOCFPGA_L4_PER_SCR_REG_BASE 0xffd21000
+#define SOCFPGA_L4_SYS_SCR_REG_BASE 0xffd21100
+#define SOCFPGA_SOC2FPGA_SCR_REG_BASE 0xffd21200
+#define SOCFPGA_LWSOC2FPGA_SCR_REG_BASE 0xffd21300
+
+/* Platform specific system counter */
+#define PLAT_SYS_COUNTER_FREQ_IN_MHZ get_cpu_clk()
+
+uint32_t get_cpu_clk(void);
+
+#endif /* PLATSOCFPGA_DEF_H */
+
diff --git a/plat/intel/soc/stratix10/platform.mk b/plat/intel/soc/stratix10/platform.mk
new file mode 100644
index 0000000..b7eb4bd
--- /dev/null
+++ b/plat/intel/soc/stratix10/platform.mk
@@ -0,0 +1,80 @@
+#
+# Copyright (c) 2019-2022, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+PLAT_INCLUDES := \
+ -Iplat/intel/soc/stratix10/include/ \
+ -Iplat/intel/soc/common/drivers/ \
+ -Iplat/intel/soc/common/include/
+
+# Include GICv2 driver files
+include drivers/arm/gic/v2/gicv2.mk
+AGX_GICv2_SOURCES := \
+ ${GICV2_SOURCES} \
+ plat/common/plat_gicv2.c
+
+
+PLAT_BL_COMMON_SOURCES := \
+ ${AGX_GICv2_SOURCES} \
+ drivers/delay_timer/delay_timer.c \
+ drivers/delay_timer/generic_delay_timer.c \
+ drivers/ti/uart/aarch64/16550_console.S \
+ lib/xlat_tables/aarch64/xlat_tables.c \
+ lib/xlat_tables/xlat_tables_common.c \
+ plat/intel/soc/common/aarch64/platform_common.c \
+ plat/intel/soc/common/aarch64/plat_helpers.S \
+ plat/intel/soc/common/socfpga_delay_timer.c \
+ plat/intel/soc/common/soc/socfpga_firewall.c
+
+BL2_SOURCES += \
+ common/desc_image_load.c \
+ drivers/mmc/mmc.c \
+ drivers/intel/soc/stratix10/io/s10_memmap_qspi.c \
+ drivers/io/io_storage.c \
+ drivers/io/io_block.c \
+ drivers/io/io_fip.c \
+ drivers/partition/partition.c \
+ drivers/partition/gpt.c \
+ drivers/synopsys/emmc/dw_mmc.c \
+ lib/cpus/aarch64/cortex_a53.S \
+ plat/intel/soc/stratix10/bl2_plat_setup.c \
+ plat/intel/soc/stratix10/soc/s10_clock_manager.c \
+ plat/intel/soc/stratix10/soc/s10_memory_controller.c \
+ plat/intel/soc/stratix10/soc/s10_mmc.c \
+ plat/intel/soc/stratix10/soc/s10_pinmux.c \
+ plat/intel/soc/common/bl2_plat_mem_params_desc.c \
+ plat/intel/soc/common/socfpga_image_load.c \
+ plat/intel/soc/common/socfpga_storage.c \
+ plat/intel/soc/common/soc/socfpga_emac.c \
+ plat/intel/soc/common/soc/socfpga_handoff.c \
+ plat/intel/soc/common/soc/socfpga_mailbox.c \
+ plat/intel/soc/common/soc/socfpga_reset_manager.c \
+ plat/intel/soc/common/drivers/qspi/cadence_qspi.c \
+ plat/intel/soc/common/drivers/wdt/watchdog.c
+
+include lib/zlib/zlib.mk
+PLAT_INCLUDES += -Ilib/zlib
+BL2_SOURCES += $(ZLIB_SOURCES)
+
+BL31_SOURCES += \
+ drivers/arm/cci/cci.c \
+ lib/cpus/aarch64/aem_generic.S \
+ lib/cpus/aarch64/cortex_a53.S \
+ plat/common/plat_psci_common.c \
+ plat/intel/soc/stratix10/soc/s10_clock_manager.c \
+ plat/intel/soc/stratix10/bl31_plat_setup.c \
+ plat/intel/soc/common/socfpga_psci.c \
+ plat/intel/soc/common/socfpga_sip_svc.c \
+ plat/intel/soc/common/socfpga_sip_svc_v2.c \
+ plat/intel/soc/common/socfpga_topology.c \
+ plat/intel/soc/common/sip/socfpga_sip_ecc.c \
+ plat/intel/soc/common/sip/socfpga_sip_fcs.c \
+ plat/intel/soc/common/soc/socfpga_mailbox.c \
+ plat/intel/soc/common/soc/socfpga_reset_manager.c
+
+PROGRAMMABLE_RESET_ADDRESS := 0
+BL2_AT_EL3 := 1
+USE_COHERENT_MEM := 1
diff --git a/plat/intel/soc/stratix10/soc/s10_clock_manager.c b/plat/intel/soc/stratix10/soc/s10_clock_manager.c
new file mode 100644
index 0000000..30009f7
--- /dev/null
+++ b/plat/intel/soc/stratix10/soc/s10_clock_manager.c
@@ -0,0 +1,322 @@
+/*
+ * Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <drivers/delay_timer.h>
+#include <lib/mmio.h>
+#include <platform_def.h>
+
+#include "s10_clock_manager.h"
+#include "socfpga_handoff.h"
+#include "socfpga_system_manager.h"
+
+
+void wait_pll_lock(void)
+{
+ uint32_t data;
+
+ do {
+ data = mmio_read_32(ALT_CLKMGR + ALT_CLKMGR_STAT);
+ } while ((ALT_CLKMGR_STAT_MAINPLLLOCKED(data) == 0) ||
+ (ALT_CLKMGR_STAT_PERPLLLOCKED(data) == 0));
+}
+
+void wait_fsm(void)
+{
+ uint32_t data;
+
+ do {
+ data = mmio_read_32(ALT_CLKMGR + ALT_CLKMGR_STAT);
+ } while (ALT_CLKMGR_STAT_BUSY(data) == ALT_CLKMGR_STAT_BUSY_E_BUSY);
+}
+
+void config_clkmgr_handoff(handoff *hoff_ptr)
+{
+ uint32_t m_div, refclk_div, mscnt, hscnt;
+
+ /* Bypass all mainpllgrp's clocks */
+ mmio_write_32(ALT_CLKMGR_MAINPLL +
+ ALT_CLKMGR_MAINPLL_BYPASS,
+ 0x7);
+ wait_fsm();
+ /* Bypass all perpllgrp's clocks */
+ mmio_write_32(ALT_CLKMGR_PERPLL +
+ ALT_CLKMGR_PERPLL_BYPASS,
+ 0x7f);
+ wait_fsm();
+
+ /* Setup main PLL dividers */
+ m_div = ALT_CLKMGR_MAINPLL_FDBCK_MDIV(hoff_ptr->main_pll_fdbck);
+ refclk_div = ALT_CLKMGR_MAINPLL_PLLGLOB_REFCLKDIV(
+ hoff_ptr->main_pll_pllglob);
+ mscnt = 200 / ((6 + m_div) / refclk_div);
+ hscnt = (m_div + 6) * mscnt / refclk_div - 9;
+
+ mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_PLLGLOB,
+ hoff_ptr->main_pll_pllglob);
+ mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_FDBCK,
+ hoff_ptr->main_pll_fdbck);
+ mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_VCOCALIB,
+ ALT_CLKMGR_MAINPLL_VCOCALIB_HSCNT_SET(hscnt) |
+ ALT_CLKMGR_MAINPLL_VCOCALIB_MSCNT_SET(mscnt));
+ mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_PLLC0,
+ hoff_ptr->main_pll_pllc0);
+ mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_PLLC1,
+ hoff_ptr->main_pll_pllc1);
+
+ mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_NOCDIV,
+ hoff_ptr->main_pll_nocdiv);
+
+ /* Setup peripheral PLL dividers */
+ m_div = ALT_CLKMGR_PERPLL_FDBCK_MDIV(hoff_ptr->per_pll_fdbck);
+ refclk_div = ALT_CLKMGR_PERPLL_PLLGLOB_REFCLKDIV(
+ hoff_ptr->per_pll_pllglob);
+ mscnt = 200 / ((6 + m_div) / refclk_div);
+ hscnt = (m_div + 6) * mscnt / refclk_div - 9;
+
+ mmio_write_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_PLLGLOB,
+ hoff_ptr->per_pll_pllglob);
+ mmio_write_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_FDBCK,
+ hoff_ptr->per_pll_fdbck);
+ mmio_write_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_VCOCALIB,
+ ALT_CLKMGR_PERPLL_VCOCALIB_HSCNT_SET(hscnt) |
+ ALT_CLKMGR_PERPLL_VCOCALIB_MSCNT_SET(mscnt));
+ mmio_write_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_PLLC0,
+ hoff_ptr->per_pll_pllc0);
+ mmio_write_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_PLLC1,
+ hoff_ptr->per_pll_pllc1);
+
+ mmio_write_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_GPIODIV,
+ ALT_CLKMGR_PERPLL_GPIODIV_GPIODBCLK_SET(
+ hoff_ptr->per_pll_gpiodiv));
+ mmio_write_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_EMACCTL,
+ hoff_ptr->per_pll_emacctl);
+
+
+ /* Take both PLL out of reset and power up */
+ mmio_setbits_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_PLLGLOB,
+ ALT_CLKMGR_MAINPLL_PLLGLOB_PD_SET_MSK |
+ ALT_CLKMGR_MAINPLL_PLLGLOB_RST_SET_MSK);
+ mmio_setbits_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_PLLGLOB,
+ ALT_CLKMGR_PERPLL_PLLGLOB_PD_SET_MSK |
+ ALT_CLKMGR_PERPLL_PLLGLOB_RST_SET_MSK);
+
+ wait_pll_lock();
+
+ /* Dividers for C2 to C9 only init after PLLs are lock. */
+ mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_MPUCLK, 0xff);
+ mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_NOCCLK, 0xff);
+ mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_CNTR2CLK, 0xff);
+ mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_CNTR3CLK, 0xff);
+ mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_CNTR4CLK, 0xff);
+ mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_CNTR5CLK, 0xff);
+ mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_CNTR6CLK, 0xff);
+ mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_CNTR7CLK, 0xff);
+ mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_CNTR8CLK, 0xff);
+ mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_CNTR9CLK, 0xff);
+ mmio_write_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_CNTR2CLK, 0xff);
+ mmio_write_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_CNTR3CLK, 0xff);
+ mmio_write_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_CNTR4CLK, 0xff);
+ mmio_write_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_CNTR5CLK, 0xff);
+ mmio_write_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_CNTR6CLK, 0xff);
+ mmio_write_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_CNTR7CLK, 0xff);
+ mmio_write_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_CNTR8CLK, 0xff);
+
+ mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_MPUCLK,
+ hoff_ptr->main_pll_mpuclk);
+ mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_NOCCLK,
+ hoff_ptr->main_pll_nocclk);
+ mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_CNTR2CLK,
+ hoff_ptr->main_pll_cntr2clk);
+ mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_CNTR3CLK,
+ hoff_ptr->main_pll_cntr3clk);
+ mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_CNTR4CLK,
+ hoff_ptr->main_pll_cntr4clk);
+ mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_CNTR5CLK,
+ hoff_ptr->main_pll_cntr5clk);
+ mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_CNTR6CLK,
+ hoff_ptr->main_pll_cntr6clk);
+ mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_CNTR7CLK,
+ hoff_ptr->main_pll_cntr7clk);
+ mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_CNTR8CLK,
+ hoff_ptr->main_pll_cntr8clk);
+ mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_CNTR9CLK,
+ hoff_ptr->main_pll_cntr9clk);
+
+ /* Peripheral PLL Clock Source and Counters/Divider */
+ mmio_write_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_CNTR2CLK,
+ hoff_ptr->per_pll_cntr2clk);
+ mmio_write_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_CNTR3CLK,
+ hoff_ptr->per_pll_cntr3clk);
+ mmio_write_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_CNTR4CLK,
+ hoff_ptr->per_pll_cntr4clk);
+ mmio_write_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_CNTR5CLK,
+ hoff_ptr->per_pll_cntr5clk);
+ mmio_write_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_CNTR6CLK,
+ hoff_ptr->per_pll_cntr6clk);
+ mmio_write_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_CNTR7CLK,
+ hoff_ptr->per_pll_cntr7clk);
+ mmio_write_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_CNTR8CLK,
+ hoff_ptr->per_pll_cntr8clk);
+ mmio_write_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_CNTR9CLK,
+ hoff_ptr->per_pll_cntr9clk);
+
+ /* Take all PLLs out of bypass */
+ mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_BYPASS, 0);
+ wait_fsm();
+ mmio_write_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_BYPASS, 0);
+ wait_fsm();
+
+ /* Set safe mode/ out of boot mode */
+ mmio_clrbits_32(ALT_CLKMGR + ALT_CLKMGR_CTRL,
+ ALT_CLKMGR_CTRL_BOOTMODE_SET_MSK);
+ wait_fsm();
+
+ /* 10 Enable mainpllgrp's software-managed clock */
+ mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_EN,
+ ALT_CLKMGR_MAINPLL_EN_RESET);
+ mmio_write_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_EN,
+ ALT_CLKMGR_PERPLL_EN_RESET);
+
+ /* Clear loss lock interrupt status register that */
+ /* might be set during configuration */
+ mmio_write_32(ALT_CLKMGR + ALT_CLKMGR_INTRCLR,
+ ALT_CLKMGR_INTRCLR_MAINLOCKLOST_SET_MSK |
+ ALT_CLKMGR_INTRCLR_PERLOCKLOST_SET_MSK);
+
+ /* Pass clock source frequency into scratch register */
+ mmio_write_32(SOCFPGA_SYSMGR(BOOT_SCRATCH_COLD_1),
+ hoff_ptr->hps_osc_clk_h);
+ mmio_write_32(SOCFPGA_SYSMGR(BOOT_SCRATCH_COLD_2),
+ hoff_ptr->fpga_clk_hz);
+
+}
+
+/* Extract reference clock from platform clock source */
+uint32_t get_ref_clk(uint32_t pllglob)
+{
+ uint32_t data32, mdiv, refclkdiv, ref_clk;
+ uint32_t scr_reg;
+
+ switch (ALT_CLKMGR_PSRC(pllglob)) {
+ case ALT_CLKMGR_PLLGLOB_PSRC_EOSC1:
+ scr_reg = SOCFPGA_SYSMGR(BOOT_SCRATCH_COLD_1);
+ ref_clk = mmio_read_32(scr_reg);
+ break;
+ case ALT_CLKMGR_PLLGLOB_PSRC_INTOSC:
+ ref_clk = ALT_CLKMGR_INTOSC_HZ;
+ break;
+ case ALT_CLKMGR_PLLGLOB_PSRC_F2S:
+ scr_reg = SOCFPGA_SYSMGR(BOOT_SCRATCH_COLD_2);
+ ref_clk = mmio_read_32(scr_reg);
+ break;
+ default:
+ ref_clk = 0;
+ assert(0);
+ break;
+ }
+
+ refclkdiv = ALT_CLKMGR_MAINPLL_PLLGLOB_REFCLKDIV(pllglob);
+ data32 = mmio_read_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_FDBCK);
+ mdiv = ALT_CLKMGR_MAINPLL_FDBCK_MDIV(data32);
+
+ ref_clk = (ref_clk / refclkdiv) * (6 + mdiv);
+
+ return ref_clk;
+}
+
+/* Calculate L3 interconnect main clock */
+uint32_t get_l3_clk(uint32_t ref_clk)
+{
+ uint32_t noc_base_clk, l3_clk, noc_clk, data32;
+ uint32_t pllc1_reg;
+
+ noc_clk = mmio_read_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_NOCCLK);
+
+ switch (ALT_CLKMGR_PSRC(noc_clk)) {
+ case ALT_CLKMGR_SRC_MAIN:
+ pllc1_reg = ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_PLLC1;
+ break;
+ case ALT_CLKMGR_SRC_PER:
+ pllc1_reg = ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_PLLC1;
+ break;
+ default:
+ pllc1_reg = 0;
+ assert(0);
+ break;
+ }
+
+ data32 = mmio_read_32(pllc1_reg);
+ noc_base_clk = ref_clk / (data32 & 0xff);
+ l3_clk = noc_base_clk / (noc_clk + 1);
+
+ return l3_clk;
+}
+
+/* Calculate clock frequency to be used for watchdog timer */
+uint32_t get_wdt_clk(void)
+{
+ uint32_t data32, ref_clk, l3_clk, l4_sys_clk;
+
+ data32 = mmio_read_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_PLLGLOB);
+ ref_clk = get_ref_clk(data32);
+
+ l3_clk = get_l3_clk(ref_clk);
+
+ l4_sys_clk = l3_clk / 4;
+
+ return l4_sys_clk;
+}
+
+/* Calculate clock frequency to be used for UART driver */
+uint32_t get_uart_clk(void)
+{
+ uint32_t data32, ref_clk, l3_clk, l4_sp_clk;
+
+ data32 = mmio_read_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_PLLGLOB);
+ ref_clk = get_ref_clk(data32);
+
+ l3_clk = get_l3_clk(ref_clk);
+
+ data32 = mmio_read_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_NOCDIV);
+ data32 = (data32 >> 16) & 0x3;
+ data32 = 1 << data32;
+
+ l4_sp_clk = (l3_clk / data32);
+
+ return l4_sp_clk;
+}
+
+/* Calculate clock frequency to be used for SDMMC driver */
+uint32_t get_mmc_clk(void)
+{
+ uint32_t data32, ref_clk, l3_clk, mmc_clk;
+
+ data32 = mmio_read_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_PLLGLOB);
+ ref_clk = get_ref_clk(data32);
+
+ l3_clk = get_l3_clk(ref_clk);
+
+ data32 = mmio_read_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_CNTR6CLK);
+ mmc_clk = (l3_clk / (data32 + 1)) / 4;
+
+ return mmc_clk;
+}
+
+/* Get cpu freq clock */
+uint32_t get_cpu_clk(void)
+{
+ uint32_t data32, ref_clk, cpu_clk;
+
+ data32 = mmio_read_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_PLLGLOB);
+ ref_clk = get_ref_clk(data32);
+
+ cpu_clk = get_l3_clk(ref_clk)/PLAT_SYS_COUNTER_CONVERT_TO_MHZ;
+
+ return cpu_clk;
+}
diff --git a/plat/intel/soc/stratix10/soc/s10_memory_controller.c b/plat/intel/soc/stratix10/soc/s10_memory_controller.c
new file mode 100644
index 0000000..ac756ab
--- /dev/null
+++ b/plat/intel/soc/stratix10/soc/s10_memory_controller.c
@@ -0,0 +1,412 @@
+/*
+ * Copyright (c) 2019, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <errno.h>
+#include <lib/mmio.h>
+#include <lib/utils.h>
+#include <common/debug.h>
+#include <drivers/delay_timer.h>
+#include <platform_def.h>
+#include <string.h>
+
+#include "s10_memory_controller.h"
+#include "socfpga_reset_manager.h"
+
+#define ALT_CCU_NOC_DI_SET_MSK 0x10
+
+#define DDR_READ_LATENCY_DELAY 40
+#define MAX_MEM_CAL_RETRY 3
+#define PRE_CALIBRATION_DELAY 1
+#define POST_CALIBRATION_DELAY 1
+#define TIMEOUT_EMIF_CALIBRATION 1000
+#define CLEAR_EMIF_DELAY 1000
+#define CLEAR_EMIF_TIMEOUT 1000
+
+#define DDR_CONFIG(A, B, C, R) (((A) << 24) | ((B) << 16) | ((C) << 8) | (R))
+#define DDR_CONFIG_ELEMENTS (sizeof(ddr_config)/sizeof(uint32_t))
+
+/* tWR = Min. 15ns constant, see JEDEC standard eg. DDR4 is JESD79-4.pdf */
+#define tWR_IN_NS 15
+
+void configure_hmc_adaptor_regs(void);
+void configure_ddr_sched_ctrl_regs(void);
+
+/* The followring are the supported configurations */
+uint32_t ddr_config[] = {
+ /* DDR_CONFIG(Address order,Bank,Column,Row) */
+ /* List for DDR3 or LPDDR3 (pinout order > chip, row, bank, column) */
+ DDR_CONFIG(0, 3, 10, 12),
+ DDR_CONFIG(0, 3, 9, 13),
+ DDR_CONFIG(0, 3, 10, 13),
+ DDR_CONFIG(0, 3, 9, 14),
+ DDR_CONFIG(0, 3, 10, 14),
+ DDR_CONFIG(0, 3, 10, 15),
+ DDR_CONFIG(0, 3, 11, 14),
+ DDR_CONFIG(0, 3, 11, 15),
+ DDR_CONFIG(0, 3, 10, 16),
+ DDR_CONFIG(0, 3, 11, 16),
+ DDR_CONFIG(0, 3, 12, 15), /* 0xa */
+ /* List for DDR4 only (pinout order > chip, bank, row, column) */
+ DDR_CONFIG(1, 3, 10, 14),
+ DDR_CONFIG(1, 4, 10, 14),
+ DDR_CONFIG(1, 3, 10, 15),
+ DDR_CONFIG(1, 4, 10, 15),
+ DDR_CONFIG(1, 3, 10, 16),
+ DDR_CONFIG(1, 4, 10, 16),
+ DDR_CONFIG(1, 3, 10, 17),
+ DDR_CONFIG(1, 4, 10, 17),
+};
+
+static int match_ddr_conf(uint32_t ddr_conf)
+{
+ int i;
+
+ for (i = 0; i < DDR_CONFIG_ELEMENTS; i++) {
+ if (ddr_conf == ddr_config[i])
+ return i;
+ }
+ return 0;
+}
+
+static int check_hmc_clk(void)
+{
+ unsigned long timeout = 0;
+ uint32_t hmc_clk;
+
+ do {
+ hmc_clk = mmio_read_32(S10_SYSMGR_CORE_HMC_CLK);
+ if (hmc_clk & S10_SYSMGR_CORE_HMC_CLK_STATUS)
+ break;
+ udelay(1);
+ } while (++timeout < 1000);
+ if (timeout >= 1000)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int clear_emif(void)
+{
+ uint32_t data;
+ unsigned long timeout;
+
+ mmio_write_32(S10_MPFE_HMC_ADP_RSTHANDSHAKECTRL, 0);
+
+ timeout = 0;
+ do {
+ data = mmio_read_32(S10_MPFE_HMC_ADP_RSTHANDSHAKESTAT);
+ if ((data & S10_MPFE_HMC_ADP_RSTHANDSHAKESTAT_SEQ2CORE) == 0)
+ break;
+ udelay(CLEAR_EMIF_DELAY);
+ } while (++timeout < CLEAR_EMIF_TIMEOUT);
+ if (timeout >= CLEAR_EMIF_TIMEOUT)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int mem_calibration(void)
+{
+ int status = 0;
+ uint32_t data;
+ unsigned long timeout;
+ unsigned long retry = 0;
+
+ udelay(PRE_CALIBRATION_DELAY);
+
+ do {
+ if (retry != 0)
+ INFO("DDR: Retrying DRAM calibration\n");
+
+ timeout = 0;
+ do {
+ data = mmio_read_32(S10_MPFE_HMC_ADP_DDRCALSTAT);
+ if (S10_MPFE_HMC_ADP_DDRCALSTAT_CAL(data) == 1)
+ break;
+ udelay(500);
+ } while (++timeout < TIMEOUT_EMIF_CALIBRATION);
+
+ if (S10_MPFE_HMC_ADP_DDRCALSTAT_CAL(data) == 0) {
+ status = clear_emif();
+ if (status)
+ ERROR("Failed to clear Emif\n");
+ } else {
+ break;
+ }
+ } while (++retry < MAX_MEM_CAL_RETRY);
+
+ if (S10_MPFE_HMC_ADP_DDRCALSTAT_CAL(data) == 0) {
+ ERROR("DDR: DRAM calibration failed.\n");
+ status = -EIO;
+ } else {
+ INFO("DDR: DRAM calibration success.\n");
+ status = 0;
+ }
+
+ udelay(POST_CALIBRATION_DELAY);
+
+ return status;
+}
+
+int init_hard_memory_controller(void)
+{
+ int status;
+
+ mmio_clrbits_32(S10_CCU_CPU0_MPRT_DDR, S10_CCU_NOC_DI_SET_MSK);
+ mmio_clrbits_32(S10_CCU_CPU0_MPRT_MEM0, S10_CCU_NOC_DI_SET_MSK);
+ mmio_clrbits_32(S10_CCU_CPU0_MPRT_MEM1A, S10_CCU_NOC_DI_SET_MSK);
+ mmio_clrbits_32(S10_CCU_CPU0_MPRT_MEM1B, S10_CCU_NOC_DI_SET_MSK);
+ mmio_clrbits_32(S10_CCU_CPU0_MPRT_MEM1C, S10_CCU_NOC_DI_SET_MSK);
+ mmio_clrbits_32(S10_CCU_CPU0_MPRT_MEM1D, S10_CCU_NOC_DI_SET_MSK);
+ mmio_clrbits_32(S10_CCU_CPU0_MPRT_MEM1E, S10_CCU_NOC_DI_SET_MSK);
+
+ mmio_clrbits_32(S10_CCU_IOM_MPRT_MEM0, S10_CCU_NOC_DI_SET_MSK);
+ mmio_clrbits_32(S10_CCU_IOM_MPRT_MEM1A, S10_CCU_NOC_DI_SET_MSK);
+ mmio_clrbits_32(S10_CCU_IOM_MPRT_MEM1B, S10_CCU_NOC_DI_SET_MSK);
+ mmio_clrbits_32(S10_CCU_IOM_MPRT_MEM1C, S10_CCU_NOC_DI_SET_MSK);
+ mmio_clrbits_32(S10_CCU_IOM_MPRT_MEM1D, S10_CCU_NOC_DI_SET_MSK);
+ mmio_clrbits_32(S10_CCU_IOM_MPRT_MEM1E, S10_CCU_NOC_DI_SET_MSK);
+
+ mmio_write_32(S10_NOC_FW_DDR_SCR_MPUREGION0ADDR_LIMIT, 0xFFFF0000);
+ mmio_write_32(S10_NOC_FW_DDR_SCR_MPUREGION0ADDR_LIMITEXT, 0x1F);
+
+ mmio_write_32(S10_NOC_FW_DDR_SCR_NONMPUREGION0ADDR_LIMIT, 0xFFFF0000);
+ mmio_write_32(S10_NOC_FW_DDR_SCR_NONMPUREGION0ADDR_LIMITEXT, 0x1F);
+ mmio_write_32(S10_SOC_NOC_FW_DDR_SCR_ENABLE, BIT(0) | BIT(8));
+
+ status = check_hmc_clk();
+ if (status) {
+ ERROR("DDR: Error, HMC clock not running\n");
+ return status;
+ }
+
+ mmio_clrbits_32(SOCFPGA_RSTMGR(BRGMODRST), RSTMGR_FIELD(BRG, DDRSCH));
+
+ status = mem_calibration();
+ if (status) {
+ ERROR("DDR: Memory Calibration Failed\n");
+ return status;
+ }
+
+ configure_hmc_adaptor_regs();
+ configure_ddr_sched_ctrl_regs();
+
+ return 0;
+}
+
+void configure_ddr_sched_ctrl_regs(void)
+{
+ uint32_t data, dram_addr_order, ddr_conf, bank, row, col,
+ rd_to_miss, wr_to_miss, burst_len, burst_len_ddr_clk,
+ burst_len_sched_clk, act_to_act, rd_to_wr, wr_to_rd, bw_ratio,
+ t_rtp, t_rp, t_rcd, rd_latency, tw_rin_clk_cycles,
+ bw_ratio_extended, auto_precharge = 0, act_to_act_bank, faw,
+ faw_bank, bus_rd_to_rd, bus_rd_to_wr, bus_wr_to_rd;
+
+ INFO("Init HPS NOC's DDR Scheduler.\n");
+
+ data = mmio_read_32(S10_MPFE_IOHMC_CTRLCFG1);
+ dram_addr_order = S10_MPFE_IOHMC_CTRLCFG1_CFG_ADDR_ORDER(data);
+
+ data = mmio_read_32(S10_MPFE_IOHMC_DRAMADDRW);
+
+ col = IOHMC_DRAMADDRW_COL_ADDR_WIDTH(data);
+ row = IOHMC_DRAMADDRW_ROW_ADDR_WIDTH(data);
+ bank = IOHMC_DRAMADDRW_BANK_ADDR_WIDTH(data) +
+ IOHMC_DRAMADDRW_BANK_GRP_ADDR_WIDTH(data);
+
+ ddr_conf = match_ddr_conf(DDR_CONFIG(dram_addr_order, bank, col, row));
+
+ if (ddr_conf) {
+ mmio_clrsetbits_32(
+ S10_MPFE_DDR_MAIN_SCHED_DDRCONF,
+ S10_MPFE_DDR_MAIN_SCHED_DDRCONF_SET_MSK,
+ S10_MPFE_DDR_MAIN_SCHED_DDRCONF_SET(ddr_conf));
+ } else {
+ ERROR("DDR: Cannot find predefined ddrConf configuration.\n");
+ }
+
+ mmio_write_32(S10_MPFE_HMC_ADP(ADP_DRAMADDRWIDTH), data);
+
+ data = mmio_read_32(S10_MPFE_IOHMC_DRAMTIMING0);
+ rd_latency = S10_MPFE_IOHMC_REG_DRAMTIMING0_CFG_TCL(data);
+
+ data = mmio_read_32(S10_MPFE_IOHMC_CALTIMING0);
+ act_to_act = ACT_TO_ACT(data);
+ t_rcd = ACT_TO_RDWR(data);
+ act_to_act_bank = ACT_TO_ACT_DIFF_BANK(data);
+
+ data = mmio_read_32(S10_MPFE_IOHMC_CALTIMING1);
+ rd_to_wr = RD_TO_WR(data);
+ bus_rd_to_rd = RD_TO_RD_DIFF_CHIP(data);
+ bus_rd_to_wr = RD_TO_WR_DIFF_CHIP(data);
+
+ data = mmio_read_32(S10_MPFE_IOHMC_CALTIMING2);
+ t_rtp = RD_TO_PCH(data);
+
+ data = mmio_read_32(S10_MPFE_IOHMC_CALTIMING3);
+ wr_to_rd = CALTIMING3_WR_TO_RD(data);
+ bus_wr_to_rd = CALTIMING3_WR_TO_RD_DIFF_CHIP(data);
+
+ data = mmio_read_32(S10_MPFE_IOHMC_CALTIMING4);
+ t_rp = PCH_TO_VALID(data);
+
+ data = mmio_read_32(S10_MPFE_HMC_ADP(HMC_ADP_DDRIOCTRL));
+ bw_ratio = ((HMC_ADP_DDRIOCTRL_IO_SIZE(data) == 0) ? 0 : 1);
+
+ data = mmio_read_32(S10_MPFE_IOHMC_CTRLCFG0);
+ burst_len = HMC_ADP_DDRIOCTRL_CTRL_BURST_LENGTH(data);
+ burst_len_ddr_clk = burst_len / 2;
+ burst_len_sched_clk = ((burst_len/2) / 2);
+
+ data = mmio_read_32(S10_MPFE_IOHMC_CTRLCFG0);
+ switch (S10_MPFE_IOHMC_REG_CTRLCFG0_CFG_MEM_TYPE(data)) {
+ case 1:
+ /* DDR4 - 1333MHz */
+ /* 20 (19.995) clock cycles = 15ns */
+ /* Calculate with rounding */
+ tw_rin_clk_cycles = (((tWR_IN_NS * 1333) % 1000) >= 500) ?
+ ((tWR_IN_NS * 1333) / 1000) + 1 :
+ ((tWR_IN_NS * 1333) / 1000);
+ break;
+ default:
+ /* Others - 1066MHz or slower */
+ /* 16 (15.990) clock cycles = 15ns */
+ /* Calculate with rounding */
+ tw_rin_clk_cycles = (((tWR_IN_NS * 1066) % 1000) >= 500) ?
+ ((tWR_IN_NS * 1066) / 1000) + 1 :
+ ((tWR_IN_NS * 1066) / 1000);
+ break;
+ }
+
+ rd_to_miss = t_rtp + t_rp + t_rcd - burst_len_sched_clk;
+ wr_to_miss = ((rd_latency + burst_len_ddr_clk + 2 + tw_rin_clk_cycles)
+ / 2) - rd_to_wr + t_rp + t_rcd;
+
+ mmio_write_32(S10_MPFE_DDR_MAIN_SCHED_DDRTIMING,
+ bw_ratio << DDRTIMING_BWRATIO_OFST |
+ wr_to_rd << DDRTIMING_WRTORD_OFST|
+ rd_to_wr << DDRTIMING_RDTOWR_OFST |
+ burst_len_sched_clk << DDRTIMING_BURSTLEN_OFST |
+ wr_to_miss << DDRTIMING_WRTOMISS_OFST |
+ rd_to_miss << DDRTIMING_RDTOMISS_OFST |
+ act_to_act << DDRTIMING_ACTTOACT_OFST);
+
+ data = mmio_read_32(S10_MPFE_HMC_ADP(HMC_ADP_DDRIOCTRL));
+ bw_ratio_extended = ((ADP_DDRIOCTRL_IO_SIZE(data) == 0) ? 1 : 0);
+
+ mmio_write_32(S10_MPFE_DDR_MAIN_SCHED_DDRMODE,
+ bw_ratio_extended << DDRMODE_BWRATIOEXTENDED_OFST |
+ auto_precharge << DDRMODE_AUTOPRECHARGE_OFST);
+
+ mmio_write_32(S10_MPFE_DDR_MAIN_SCHED_READLATENCY,
+ (rd_latency / 2) + DDR_READ_LATENCY_DELAY);
+
+ data = mmio_read_32(S10_MPFE_IOHMC_CALTIMING9);
+ faw = S10_MPFE_IOHMC_CALTIMING9_ACT_TO_ACT(data);
+
+ faw_bank = 1; // always 1 because we always have 4 bank DDR.
+
+ mmio_write_32(S10_MPFE_DDR_MAIN_SCHED_ACTIVATE,
+ faw_bank << S10_MPFE_DDR_MAIN_SCHED_ACTIVATE_FAWBANK_OFST |
+ faw << S10_MPFE_DDR_MAIN_SCHED_ACTIVATE_FAW_OFST |
+ act_to_act_bank << S10_MPFE_DDR_MAIN_SCHED_ACTIVATE_RRD_OFST);
+
+ mmio_write_32(S10_MPFE_DDR_MAIN_SCHED_DEVTODEV,
+ ((bus_rd_to_rd
+ << S10_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTORD_OFST)
+ & S10_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTORD_MSK) |
+ ((bus_rd_to_wr
+ << S10_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTOWR_OFST)
+ & S10_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTOWR_MSK) |
+ ((bus_wr_to_rd
+ << S10_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSWRTORD_OFST)
+ & S10_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSWRTORD_MSK));
+
+}
+
+unsigned long get_physical_dram_size(void)
+{
+ uint32_t data;
+ unsigned long ram_addr_width, ram_ext_if_io_width;
+
+ data = mmio_read_32(S10_MPFE_HMC_ADP_DDRIOCTRL);
+ switch (S10_MPFE_HMC_ADP_DDRIOCTRL_IO_SIZE(data)) {
+ case 0:
+ ram_ext_if_io_width = 16;
+ break;
+ case 1:
+ ram_ext_if_io_width = 32;
+ break;
+ case 2:
+ ram_ext_if_io_width = 64;
+ break;
+ default:
+ ram_ext_if_io_width = 0;
+ break;
+ }
+
+ data = mmio_read_32(S10_MPFE_IOHMC_REG_DRAMADDRW);
+ ram_addr_width = IOHMC_DRAMADDRW_CFG_COL_ADDR_WIDTH(data) +
+ IOHMC_DRAMADDRW_CFG_ROW_ADDR_WIDTH(data) +
+ IOHMC_DRAMADDRW_CFG_BANK_ADDR_WIDTH(data) +
+ IOHMC_DRAMADDRW_CFG_BANK_GROUP_ADDR_WIDTH(data) +
+ IOHMC_DRAMADDRW_CFG_CS_ADDR_WIDTH(data);
+
+ return (1 << ram_addr_width) * (ram_ext_if_io_width / 8);
+}
+
+
+
+void configure_hmc_adaptor_regs(void)
+{
+ uint32_t data;
+ uint32_t dram_io_width;
+
+ dram_io_width = S10_MPFE_IOHMC_NIOSRESERVE0_NIOS_RESERVE0(
+ mmio_read_32(S10_MPFE_IOHMC_REG_NIOSRESERVE0_OFST));
+
+ dram_io_width = (dram_io_width & 0xFF) >> 5;
+
+ mmio_clrsetbits_32(S10_MPFE_HMC_ADP_DDRIOCTRL,
+ S10_MPFE_HMC_ADP_DDRIOCTRL_IO_SIZE_MSK,
+ dram_io_width << S10_MPFE_HMC_ADP_DDRIOCTRL_IO_SIZE_OFST);
+
+ mmio_write_32(S10_MPFE_HMC_ADP_HPSINTFCSEL,
+ S10_MPFE_HMC_ADP_HPSINTFCSEL_ENABLE);
+
+ data = mmio_read_32(S10_MPFE_IOHMC_REG_CTRLCFG1);
+ if (data & (1 << S10_IOHMC_CTRLCFG1_ENABLE_ECC_OFST)) {
+ mmio_clrsetbits_32(S10_MPFE_HMC_ADP_ECCCTRL1,
+ S10_MPFE_HMC_ADP_ECCCTRL1_AUTOWB_CNT_RST_SET_MSK |
+ S10_MPFE_HMC_ADP_ECCCTRL1_CNT_RST_SET_MSK |
+ S10_MPFE_HMC_ADP_ECCCTRL1_ECC_EN_SET_MSK,
+ S10_MPFE_HMC_ADP_ECCCTRL1_AUTOWB_CNT_RST_SET_MSK |
+ S10_MPFE_HMC_ADP_ECCCTRL1_CNT_RST_SET_MSK);
+
+ mmio_clrsetbits_32(S10_MPFE_HMC_ADP_ECCCTRL2,
+ S10_MPFE_HMC_ADP_ECCCTRL2_OVRW_RB_ECC_EN_SET_MSK |
+ S10_MPFE_HMC_ADP_ECCCTRL2_RMW_EN_SET_MSK |
+ S10_MPFE_HMC_ADP_ECCCTRL2_AUTOWB_EN_SET_MSK,
+ S10_MPFE_HMC_ADP_ECCCTRL2_RMW_EN_SET_MSK |
+ S10_MPFE_HMC_ADP_ECCCTRL2_AUTOWB_EN_SET_MSK);
+
+ mmio_clrsetbits_32(S10_MPFE_HMC_ADP_ECCCTRL1,
+ S10_MPFE_HMC_ADP_ECCCTRL1_AUTOWB_CNT_RST_SET_MSK |
+ S10_MPFE_HMC_ADP_ECCCTRL1_CNT_RST_SET_MSK |
+ S10_MPFE_HMC_ADP_ECCCTRL1_ECC_EN_SET_MSK,
+ S10_MPFE_HMC_ADP_ECCCTRL1_ECC_EN_SET_MSK);
+ INFO("Scrubbing ECC\n");
+
+ /* ECC Scrubbing */
+ zeromem(DRAM_BASE, DRAM_SIZE);
+ } else {
+ INFO("ECC is disabled.\n");
+ }
+}
+
diff --git a/plat/intel/soc/stratix10/soc/s10_mmc.c b/plat/intel/soc/stratix10/soc/s10_mmc.c
new file mode 100644
index 0000000..333bdd6
--- /dev/null
+++ b/plat/intel/soc/stratix10/soc/s10_mmc.c
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2022, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <lib/mmio.h>
+
+#include "s10_clock_manager.h"
+#include "socfpga_system_manager.h"
+
+void s10_mmc_init(void)
+{
+ mmio_clrbits_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_EN,
+ ALT_CLKMGR_PERPLL_EN_SDMMCCLK);
+ mmio_write_32(SOCFPGA_SYSMGR(SDMMC),
+ SYSMGR_SDMMC_SMPLSEL(2) | SYSMGR_SDMMC_DRVSEL(3));
+ mmio_setbits_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_EN,
+ ALT_CLKMGR_PERPLL_EN_SDMMCCLK);
+}
diff --git a/plat/intel/soc/stratix10/soc/s10_pinmux.c b/plat/intel/soc/stratix10/soc/s10_pinmux.c
new file mode 100644
index 0000000..7fb4711
--- /dev/null
+++ b/plat/intel/soc/stratix10/soc/s10_pinmux.c
@@ -0,0 +1,217 @@
+/*
+ * Copyright (c) 2019, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <lib/mmio.h>
+
+#include "s10_pinmux.h"
+
+const uint32_t sysmgr_pinmux_array_sel[] = {
+ 0x00000000, 0x00000001, /* usb */
+ 0x00000004, 0x00000001,
+ 0x00000008, 0x00000001,
+ 0x0000000c, 0x00000001,
+ 0x00000010, 0x00000001,
+ 0x00000014, 0x00000001,
+ 0x00000018, 0x00000001,
+ 0x0000001c, 0x00000001,
+ 0x00000020, 0x00000001,
+ 0x00000024, 0x00000001,
+ 0x00000028, 0x00000001,
+ 0x0000002c, 0x00000001,
+ 0x00000030, 0x00000000, /* emac0 */
+ 0x00000034, 0x00000000,
+ 0x00000038, 0x00000000,
+ 0x0000003c, 0x00000000,
+ 0x00000040, 0x00000000,
+ 0x00000044, 0x00000000,
+ 0x00000048, 0x00000000,
+ 0x0000004c, 0x00000000,
+ 0x00000050, 0x00000000,
+ 0x00000054, 0x00000000,
+ 0x00000058, 0x00000000,
+ 0x0000005c, 0x00000000,
+ 0x00000060, 0x00000008, /* gpio1 */
+ 0x00000064, 0x00000008,
+ 0x00000068, 0x00000005, /* uart0 tx */
+ 0x0000006c, 0x00000005, /* uart 0 rx */
+ 0x00000070, 0x00000008, /* gpio */
+ 0x00000074, 0x00000008,
+ 0x00000078, 0x00000004, /* i2c1 */
+ 0x0000007c, 0x00000004,
+ 0x00000080, 0x00000007, /* jtag */
+ 0x00000084, 0x00000007,
+ 0x00000088, 0x00000007,
+ 0x0000008c, 0x00000007,
+ 0x00000090, 0x00000001, /* sdmmc data0 */
+ 0x00000094, 0x00000001,
+ 0x00000098, 0x00000001,
+ 0x0000009c, 0x00000001,
+ 0x00000100, 0x00000001,
+ 0x00000104, 0x00000001, /* sdmmc.data3 */
+ 0x00000108, 0x00000008, /* loan */
+ 0x0000010c, 0x00000008, /* gpio */
+ 0x00000110, 0x00000008,
+ 0x00000114, 0x00000008, /* gpio1.io21 */
+ 0x00000118, 0x00000005, /* mdio0.mdio */
+ 0x0000011c, 0x00000005 /* mdio0.mdc */
+};
+
+const uint32_t sysmgr_pinmux_array_ctrl[] = {
+ 0x00000000, 0x00502c38, /* Q1_1 */
+ 0x00000004, 0x00102c38,
+ 0x00000008, 0x00502c38,
+ 0x0000000c, 0x00502c38,
+ 0x00000010, 0x00502c38,
+ 0x00000014, 0x00502c38,
+ 0x00000018, 0x00502c38,
+ 0x0000001c, 0x00502c38,
+ 0x00000020, 0x00502c38,
+ 0x00000024, 0x00502c38,
+ 0x00000028, 0x00502c38,
+ 0x0000002c, 0x00502c38,
+ 0x00000030, 0x00102c38, /* Q2_1 */
+ 0x00000034, 0x00102c38,
+ 0x00000038, 0x00502c38,
+ 0x0000003c, 0x00502c38,
+ 0x00000040, 0x00102c38,
+ 0x00000044, 0x00102c38,
+ 0x00000048, 0x00502c38,
+ 0x0000004c, 0x00502c38,
+ 0x00000050, 0x00102c38,
+ 0x00000054, 0x00102c38,
+ 0x00000058, 0x00502c38,
+ 0x0000005c, 0x00502c38,
+ 0x00000060, 0x00502c38, /* Q3_1 */
+ 0x00000064, 0x00502c38,
+ 0x00000068, 0x00102c38,
+ 0x0000006c, 0x00502c38,
+ 0x000000d0, 0x00502c38,
+ 0x000000d4, 0x00502c38,
+ 0x000000d8, 0x00542c38,
+ 0x000000dc, 0x00542c38,
+ 0x000000e0, 0x00502c38,
+ 0x000000e4, 0x00502c38,
+ 0x000000e8, 0x00102c38,
+ 0x000000ec, 0x00502c38,
+ 0x000000f0, 0x00502c38, /* Q4_1 */
+ 0x000000f4, 0x00502c38,
+ 0x000000f8, 0x00102c38,
+ 0x000000fc, 0x00502c38,
+ 0x00000100, 0x00502c38,
+ 0x00000104, 0x00502c38,
+ 0x00000108, 0x00102c38,
+ 0x0000010c, 0x00502c38,
+ 0x00000110, 0x00502c38,
+ 0x00000114, 0x00502c38,
+ 0x00000118, 0x00542c38,
+ 0x0000011c, 0x00102c38
+};
+
+const uint32_t sysmgr_pinmux_array_fpga[] = {
+ 0x00000000, 0x00000000,
+ 0x00000004, 0x00000000,
+ 0x00000008, 0x00000000,
+ 0x0000000c, 0x00000000,
+ 0x00000010, 0x00000000,
+ 0x00000014, 0x00000000,
+ 0x00000018, 0x00000000,
+ 0x0000001c, 0x00000000,
+ 0x00000020, 0x00000000,
+ 0x00000028, 0x00000000,
+ 0x0000002c, 0x00000000,
+ 0x00000030, 0x00000000,
+ 0x00000034, 0x00000000,
+ 0x00000038, 0x00000000,
+ 0x0000003c, 0x00000000,
+ 0x00000040, 0x00000000,
+ 0x00000044, 0x00000000,
+ 0x00000048, 0x00000000,
+ 0x00000050, 0x00000000,
+ 0x00000054, 0x00000000,
+ 0x00000058, 0x0000002a
+};
+
+const uint32_t sysmgr_pinmux_array_iodelay[] = {
+ 0x00000000, 0x00000000,
+ 0x00000004, 0x00000000,
+ 0x00000008, 0x00000000,
+ 0x0000000c, 0x00000000,
+ 0x00000010, 0x00000000,
+ 0x00000014, 0x00000000,
+ 0x00000018, 0x00000000,
+ 0x0000001c, 0x00000000,
+ 0x00000020, 0x00000000,
+ 0x00000024, 0x00000000,
+ 0x00000028, 0x00000000,
+ 0x0000002c, 0x00000000,
+ 0x00000030, 0x00000000,
+ 0x00000034, 0x00000000,
+ 0x00000038, 0x00000000,
+ 0x0000003c, 0x00000000,
+ 0x00000040, 0x00000000,
+ 0x00000044, 0x00000000,
+ 0x00000048, 0x00000000,
+ 0x0000004c, 0x00000000,
+ 0x00000050, 0x00000000,
+ 0x00000054, 0x00000000,
+ 0x00000058, 0x00000000,
+ 0x0000005c, 0x00000000,
+ 0x00000060, 0x00000000,
+ 0x00000064, 0x00000000,
+ 0x00000068, 0x00000000,
+ 0x0000006c, 0x00000000,
+ 0x00000070, 0x00000000,
+ 0x00000074, 0x00000000,
+ 0x00000078, 0x00000000,
+ 0x0000007c, 0x00000000,
+ 0x00000080, 0x00000000,
+ 0x00000084, 0x00000000,
+ 0x00000088, 0x00000000,
+ 0x0000008c, 0x00000000,
+ 0x00000090, 0x00000000,
+ 0x00000094, 0x00000000,
+ 0x00000098, 0x00000000,
+ 0x0000009c, 0x00000000,
+ 0x00000100, 0x00000000,
+ 0x00000104, 0x00000000,
+ 0x00000108, 0x00000000,
+ 0x0000010c, 0x00000000,
+ 0x00000110, 0x00000000,
+ 0x00000114, 0x00000000,
+ 0x00000118, 0x00000000,
+ 0x0000011c, 0x00000000
+};
+
+void config_pinmux(handoff *hoff_ptr)
+{
+ unsigned int i;
+
+ for (i = 0; i < 96; i += 2) {
+ mmio_write_32(S10_PINMUX_PIN0SEL +
+ hoff_ptr->pinmux_sel_array[i],
+ hoff_ptr->pinmux_sel_array[i+1]);
+ }
+
+ for (i = 0; i < 96; i += 2) {
+ mmio_write_32(S10_PINMUX_IO0CTRL +
+ hoff_ptr->pinmux_io_array[i],
+ hoff_ptr->pinmux_io_array[i+1]);
+ }
+
+ for (i = 0; i < 42; i += 2) {
+ mmio_write_32(S10_PINMUX_PINMUX_EMAC0_USEFPGA +
+ hoff_ptr->pinmux_fpga_array[i],
+ hoff_ptr->pinmux_fpga_array[i+1]);
+ }
+
+ for (i = 0; i < 96; i += 2) {
+ mmio_write_32(S10_PINMUX_IO0_DELAY +
+ hoff_ptr->pinmux_iodelay_array[i],
+ hoff_ptr->pinmux_iodelay_array[i+1]);
+ }
+
+}
+