diff options
Diffstat (limited to 'plat/intel')
92 files changed, 19210 insertions, 0 deletions
diff --git a/plat/intel/soc/agilex/bl2_plat_setup.c b/plat/intel/soc/agilex/bl2_plat_setup.c new file mode 100644 index 0000000..211a7b7 --- /dev/null +++ b/plat/intel/soc/agilex/bl2_plat_setup.c @@ -0,0 +1,189 @@ +/* + * Copyright (c) 2019-2022, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2019-2022, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <arch.h> +#include <arch_helpers.h> +#include <assert.h> +#include <common/bl_common.h> +#include <common/debug.h> +#include <common/desc_image_load.h> +#include <drivers/generic_delay_timer.h> +#include <drivers/synopsys/dw_mmc.h> +#include <drivers/ti/uart/uart_16550.h> +#include <lib/xlat_tables/xlat_tables.h> + +#include "agilex_mmc.h" +#include "agilex_clock_manager.h" +#include "agilex_memory_controller.h" +#include "agilex_pinmux.h" +#include "ccu/ncore_ccu.h" +#include "qspi/cadence_qspi.h" +#include "socfpga_emac.h" +#include "socfpga_f2sdram_manager.h" +#include "socfpga_handoff.h" +#include "socfpga_mailbox.h" +#include "socfpga_private.h" +#include "socfpga_reset_manager.h" +#include "socfpga_system_manager.h" +#include "wdt/watchdog.h" + +static struct mmc_device_info mmc_info; + +const mmap_region_t agilex_plat_mmap[] = { + MAP_REGION_FLAT(DRAM_BASE, DRAM_SIZE, + MT_MEMORY | MT_RW | MT_NS), + MAP_REGION_FLAT(DEVICE1_BASE, DEVICE1_SIZE, + MT_DEVICE | MT_RW | MT_NS), + MAP_REGION_FLAT(DEVICE2_BASE, DEVICE2_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(OCRAM_BASE, OCRAM_SIZE, + MT_NON_CACHEABLE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(DEVICE3_BASE, DEVICE3_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(MEM64_BASE, MEM64_SIZE, + MT_DEVICE | MT_RW | MT_NS), + MAP_REGION_FLAT(DEVICE4_BASE, DEVICE4_SIZE, + MT_DEVICE | MT_RW | MT_NS), + {0}, +}; + +boot_source_type boot_source = BOOT_SOURCE; + +void bl2_el3_early_platform_setup(u_register_t x0, u_register_t x1, + u_register_t x2, u_register_t x4) +{ + static console_t console; + handoff reverse_handoff_ptr; + + generic_delay_timer_init(); + + if (socfpga_get_handoff(&reverse_handoff_ptr)) + return; + config_pinmux(&reverse_handoff_ptr); + config_clkmgr_handoff(&reverse_handoff_ptr); + + enable_nonsecure_access(); + deassert_peripheral_reset(); + config_hps_hs_before_warm_reset(); + + watchdog_init(get_wdt_clk()); + + console_16550_register(PLAT_INTEL_UART_BASE, get_uart_clk(), + PLAT_BAUDRATE, &console); + + socfpga_delay_timer_init(); + init_ncore_ccu(); + socfpga_emac_init(); + init_hard_memory_controller(); + mailbox_init(); + agx_mmc_init(); + + if (!intel_mailbox_is_fpga_not_ready()) { + socfpga_bridges_enable(SOC2FPGA_MASK | LWHPS2FPGA_MASK | + FPGA2SOC_MASK); + } +} + + +void bl2_el3_plat_arch_setup(void) +{ + + const mmap_region_t bl_regions[] = { + MAP_REGION_FLAT(BL2_BASE, BL2_END - BL2_BASE, + MT_MEMORY | MT_RW | MT_SECURE), + MAP_REGION_FLAT(BL_CODE_BASE, BL_CODE_END - BL_CODE_BASE, + MT_CODE | MT_SECURE), + MAP_REGION_FLAT(BL_RO_DATA_BASE, + BL_RO_DATA_END - BL_RO_DATA_BASE, + MT_RO_DATA | MT_SECURE), +#if USE_COHERENT_MEM_BAR + MAP_REGION_FLAT(BL_COHERENT_RAM_BASE, + BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE, + MT_DEVICE | MT_RW | MT_SECURE), +#endif + {0}, + }; + + setup_page_tables(bl_regions, agilex_plat_mmap); + + enable_mmu_el3(0); + + dw_mmc_params_t params = EMMC_INIT_PARAMS(0x100000, get_mmc_clk()); + + mmc_info.mmc_dev_type = MMC_IS_SD; + mmc_info.ocr_voltage = OCR_3_3_3_4 | OCR_3_2_3_3; + + /* Request ownership and direct access to QSPI */ + mailbox_hps_qspi_enable(); + + switch (boot_source) { + case BOOT_SOURCE_SDMMC: + dw_mmc_init(¶ms, &mmc_info); + socfpga_io_setup(boot_source); + break; + + case BOOT_SOURCE_QSPI: + cad_qspi_init(0, QSPI_CONFIG_CPHA, QSPI_CONFIG_CPOL, + QSPI_CONFIG_CSDA, QSPI_CONFIG_CSDADS, + QSPI_CONFIG_CSEOT, QSPI_CONFIG_CSSOT, 0); + socfpga_io_setup(boot_source); + break; + + default: + ERROR("Unsupported boot source\n"); + panic(); + break; + } +} + +uint32_t get_spsr_for_bl33_entry(void) +{ + unsigned long el_status; + unsigned int mode; + uint32_t spsr; + + /* Figure out what mode we enter the non-secure world in */ + el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT; + el_status &= ID_AA64PFR0_ELX_MASK; + + mode = (el_status) ? MODE_EL2 : MODE_EL1; + + /* + * TODO: Consider the possibility of specifying the SPSR in + * the FIP ToC and allowing the platform to have a say as + * well. + */ + spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS); + return spsr; +} + + +int bl2_plat_handle_post_image_load(unsigned int image_id) +{ + bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id); + + assert(bl_mem_params); + + switch (image_id) { + case BL33_IMAGE_ID: + bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr(); + bl_mem_params->ep_info.spsr = get_spsr_for_bl33_entry(); + break; + default: + break; + } + + return 0; +} + +/******************************************************************************* + * Perform any BL3-1 platform setup code + ******************************************************************************/ +void bl2_platform_setup(void) +{ +} + diff --git a/plat/intel/soc/agilex/bl31_plat_setup.c b/plat/intel/soc/agilex/bl31_plat_setup.c new file mode 100644 index 0000000..b4e19de --- /dev/null +++ b/plat/intel/soc/agilex/bl31_plat_setup.c @@ -0,0 +1,181 @@ +/* + * Copyright (c) 2019-2020, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2019-2022, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <arch.h> +#include <arch_helpers.h> +#include <assert.h> +#include <common/bl_common.h> +#include <drivers/arm/gicv2.h> +#include <drivers/ti/uart/uart_16550.h> +#include <lib/mmio.h> +#include <lib/xlat_tables/xlat_tables.h> + +#include "ccu/ncore_ccu.h" +#include "socfpga_mailbox.h" +#include "socfpga_private.h" +#include "socfpga_sip_svc.h" + +static entry_point_info_t bl32_image_ep_info; +static entry_point_info_t bl33_image_ep_info; + +entry_point_info_t *bl31_plat_get_next_image_ep_info(uint32_t type) +{ + entry_point_info_t *next_image_info; + + next_image_info = (type == NON_SECURE) ? + &bl33_image_ep_info : &bl32_image_ep_info; + + /* None of the images on this platform can have 0x0 as the entrypoint */ + if (next_image_info->pc) + return next_image_info; + else + return NULL; +} + +void setup_smmu_secure_context(void) +{ + /* + * Program SCR0 register (0xFA000000) + * to set SMCFCFG bit[21] to 0x1 which raise stream match conflict fault + * to set CLIENTPD bit[0] to 0x0 which enables SMMU for secure context + */ + mmio_write_32(0xFA000000, 0x00200000); + + /* + * Program SCR1 register (0xFA000004) + * to set NSNUMSMRGO bit[14:8] to 0x4 which stream mapping register + * for non-secure context and the rest will be secure context + * to set NSNUMCBO bit[5:0] to 0x4 which allocate context bank + * for non-secure context and the rest will be secure context + */ + mmio_write_32(0xFA000004, 0x00000404); +} + +void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1, + u_register_t arg2, u_register_t arg3) +{ + static console_t console; + + mmio_write_64(PLAT_SEC_ENTRY, PLAT_SEC_WARM_ENTRY); + + console_16550_register(PLAT_INTEL_UART_BASE, PLAT_UART_CLOCK, + PLAT_BAUDRATE, &console); + /* + * Check params passed from BL31 should not be NULL, + */ + void *from_bl2 = (void *) arg0; + + bl_params_t *params_from_bl2 = (bl_params_t *)from_bl2; + assert(params_from_bl2 != NULL); + + /* + * Copy BL32 (if populated by BL31) and BL33 entry point information. + * They are stored in Secure RAM, in BL31's address space. + */ + + if (params_from_bl2->h.type == PARAM_BL_PARAMS && + params_from_bl2->h.version >= VERSION_2) { + + bl_params_node_t *bl_params = params_from_bl2->head; + + while (bl_params) { + if (bl_params->image_id == BL33_IMAGE_ID) + bl33_image_ep_info = *bl_params->ep_info; + + bl_params = bl_params->next_params_info; + } + } else { + struct socfpga_bl31_params *arg_from_bl2 = + (struct socfpga_bl31_params *) from_bl2; + + assert(arg_from_bl2->h.type == PARAM_BL31); + assert(arg_from_bl2->h.version >= VERSION_1); + + bl32_image_ep_info = *arg_from_bl2->bl32_ep_info; + bl33_image_ep_info = *arg_from_bl2->bl33_ep_info; + } + SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE); +} + +static const interrupt_prop_t s10_interrupt_props[] = { + PLAT_INTEL_SOCFPGA_G1S_IRQ_PROPS(GICV2_INTR_GROUP0), + PLAT_INTEL_SOCFPGA_G0_IRQ_PROPS(GICV2_INTR_GROUP0) +}; + +static unsigned int target_mask_array[PLATFORM_CORE_COUNT]; + +static const gicv2_driver_data_t plat_gicv2_gic_data = { + .gicd_base = PLAT_INTEL_SOCFPGA_GICD_BASE, + .gicc_base = PLAT_INTEL_SOCFPGA_GICC_BASE, + .interrupt_props = s10_interrupt_props, + .interrupt_props_num = ARRAY_SIZE(s10_interrupt_props), + .target_masks = target_mask_array, + .target_masks_num = ARRAY_SIZE(target_mask_array), +}; + +/******************************************************************************* + * Perform any BL3-1 platform setup code + ******************************************************************************/ +void bl31_platform_setup(void) +{ + socfpga_delay_timer_init(); + + /* Initialize the gic cpu and distributor interfaces */ + gicv2_driver_init(&plat_gicv2_gic_data); + gicv2_distif_init(); + gicv2_pcpu_distif_init(); + gicv2_cpuif_enable(); + setup_smmu_secure_context(); + + /* Signal secondary CPUs to jump to BL31 (BL2 = U-boot SPL) */ + mmio_write_64(PLAT_CPU_RELEASE_ADDR, + (uint64_t)plat_secondary_cpus_bl31_entry); + + mailbox_hps_stage_notify(HPS_EXECUTION_STATE_SSBL); + + ncore_enable_ocram_firewall(); +} + +const mmap_region_t plat_agilex_mmap[] = { + MAP_REGION_FLAT(DRAM_BASE, DRAM_SIZE, MT_MEMORY | MT_RW | MT_NS), + MAP_REGION_FLAT(DEVICE1_BASE, DEVICE1_SIZE, MT_DEVICE | MT_RW | MT_NS), + MAP_REGION_FLAT(DEVICE2_BASE, DEVICE2_SIZE, MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(OCRAM_BASE, OCRAM_SIZE, + MT_NON_CACHEABLE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(DEVICE3_BASE, DEVICE3_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(MEM64_BASE, MEM64_SIZE, MT_DEVICE | MT_RW | MT_NS), + MAP_REGION_FLAT(DEVICE4_BASE, DEVICE4_SIZE, MT_DEVICE | MT_RW | MT_NS), + {0} +}; + +/******************************************************************************* + * Perform the very early platform specific architectural setup here. At the + * moment this is only initializes the mmu in a quick and dirty way. + ******************************************************************************/ +void bl31_plat_arch_setup(void) +{ + const mmap_region_t bl_regions[] = { + MAP_REGION_FLAT(BL31_BASE, BL31_END - BL31_BASE, + MT_MEMORY | MT_RW | MT_SECURE), + MAP_REGION_FLAT(BL_CODE_BASE, BL_CODE_END - BL_CODE_BASE, + MT_CODE | MT_SECURE), + MAP_REGION_FLAT(BL_RO_DATA_BASE, + BL_RO_DATA_END - BL_RO_DATA_BASE, + MT_RO_DATA | MT_SECURE), +#if USE_COHERENT_MEM + MAP_REGION_FLAT(BL_COHERENT_RAM_BASE, + BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE, + MT_DEVICE | MT_RW | MT_SECURE), +#endif + {0} + }; + + setup_page_tables(bl_regions, plat_agilex_mmap); + enable_mmu_el3(0); +} + diff --git a/plat/intel/soc/agilex/include/agilex_clock_manager.h b/plat/intel/soc/agilex/include/agilex_clock_manager.h new file mode 100644 index 0000000..ee22241 --- /dev/null +++ b/plat/intel/soc/agilex/include/agilex_clock_manager.h @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2019-2022, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef CLOCKMANAGER_H +#define CLOCKMANAGER_H + +#include "socfpga_handoff.h" + +/* Clock Manager Registers */ +#define CLKMGR_OFFSET 0xffd10000 + +#define CLKMGR_CTRL 0x0 +#define CLKMGR_STAT 0x4 +#define CLKMGR_INTRCLR 0x14 + +/* Main PLL Group */ +#define CLKMGR_MAINPLL 0xffd10024 +#define CLKMGR_MAINPLL_EN 0x0 +#define CLKMGR_MAINPLL_BYPASS 0xc +#define CLKMGR_MAINPLL_MPUCLK 0x18 +#define CLKMGR_MAINPLL_NOCCLK 0x1c +#define CLKMGR_MAINPLL_NOCDIV 0x20 +#define CLKMGR_MAINPLL_PLLGLOB 0x24 +#define CLKMGR_MAINPLL_FDBCK 0x28 +#define CLKMGR_MAINPLL_MEM 0x2c +#define CLKMGR_MAINPLL_MEMSTAT 0x30 +#define CLKMGR_MAINPLL_PLLC0 0x34 +#define CLKMGR_MAINPLL_PLLC1 0x38 +#define CLKMGR_MAINPLL_VCOCALIB 0x3c +#define CLKMGR_MAINPLL_PLLC2 0x40 +#define CLKMGR_MAINPLL_PLLC3 0x44 +#define CLKMGR_MAINPLL_PLLM 0x48 +#define CLKMGR_MAINPLL_LOSTLOCK 0x54 + +/* Peripheral PLL Group */ +#define CLKMGR_PERPLL 0xffd1007c +#define CLKMGR_PERPLL_EN 0x0 +#define CLKMGR_PERPLL_BYPASS 0xc +#define CLKMGR_PERPLL_EMACCTL 0x18 +#define CLKMGR_PERPLL_GPIODIV 0x1c +#define CLKMGR_PERPLL_PLLGLOB 0x20 +#define CLKMGR_PERPLL_FDBCK 0x24 +#define CLKMGR_PERPLL_MEM 0x28 +#define CLKMGR_PERPLL_MEMSTAT 0x2c +#define CLKMGR_PERPLL_PLLC0 0x30 +#define CLKMGR_PERPLL_PLLC1 0x34 +#define CLKMGR_PERPLL_VCOCALIB 0x38 +#define CLKMGR_PERPLL_PLLC2 0x3c +#define CLKMGR_PERPLL_PLLC3 0x40 +#define CLKMGR_PERPLL_PLLM 0x44 +#define CLKMGR_PERPLL_LOSTLOCK 0x50 + +/* Altera Group */ +#define CLKMGR_ALTERA 0xffd100d0 +#define CLKMGR_ALTERA_JTAG 0x0 +#define CLKMGR_ALTERA_EMACACTR 0x4 +#define CLKMGR_ALTERA_EMACBCTR 0x8 +#define CLKMGR_ALTERA_EMACPTPCTR 0xc +#define CLKMGR_ALTERA_GPIODBCTR 0x10 +#define CLKMGR_ALTERA_SDMMCCTR 0x14 +#define CLKMGR_ALTERA_S2FUSER0CTR 0x18 +#define CLKMGR_ALTERA_S2FUSER1CTR 0x1c +#define CLKMGR_ALTERA_PSIREFCTR 0x20 +#define CLKMGR_ALTERA_EXTCNTRST 0x24 + +/* Membus */ +#define CLKMGR_MEM_REQ BIT(24) +#define CLKMGR_MEM_WR BIT(25) +#define CLKMGR_MEM_ERR BIT(26) +#define CLKMGR_MEM_WDAT_OFFSET 16 +#define CLKMGR_MEM_ADDR 0x4027 +#define CLKMGR_MEM_WDAT 0x80 + +/* Clock Manager Macros */ +#define CLKMGR_CTRL_BOOTMODE_SET_MSK 0x00000001 +#define CLKMGR_STAT_BUSY_E_BUSY 0x1 +#define CLKMGR_STAT_BUSY(x) (((x) & 0x00000001) >> 0) +#define CLKMGR_STAT_MAINPLLLOCKED(x) (((x) & 0x00000100) >> 8) +#define CLKMGR_STAT_PERPLLLOCKED(x) (((x) & 0x00010000) >> 16) +#define CLKMGR_INTRCLR_MAINLOCKLOST_SET_MSK 0x00000004 +#define CLKMGR_INTRCLR_PERLOCKLOST_SET_MSK 0x00000008 +#define CLKMGR_INTOSC_HZ 460000000 + +/* Main PLL Macros */ +#define CLKMGR_MAINPLL_EN_RESET 0x000000ff + +/* Peripheral PLL Macros */ +#define CLKMGR_PERPLL_EN_RESET 0x00000fff +#define CLKMGR_PERPLL_EN_SDMMCCLK BIT(5) +#define CLKMGR_PERPLL_GPIODIV_GPIODBCLK_SET(x) (((x) << 0) & 0x0000ffff) + +/* Altera Macros */ +#define CLKMGR_ALTERA_EXTCNTRST_RESET 0xff + +/* Shared Macros */ +#define CLKMGR_PSRC(x) (((x) & 0x00030000) >> 16) +#define CLKMGR_PSRC_MAIN 0 +#define CLKMGR_PSRC_PER 1 + +#define CLKMGR_PLLGLOB_PSRC_EOSC1 0x0 +#define CLKMGR_PLLGLOB_PSRC_INTOSC 0x1 +#define CLKMGR_PLLGLOB_PSRC_F2S 0x2 + +#define CLKMGR_PLLM_MDIV(x) ((x) & 0x000003ff) +#define CLKMGR_PLLGLOB_PD_SET_MSK 0x00000001 +#define CLKMGR_PLLGLOB_RST_SET_MSK 0x00000002 + +#define CLKMGR_PLLGLOB_REFCLKDIV(x) (((x) & 0x00003f00) >> 8) +#define CLKMGR_PLLGLOB_AREFCLKDIV(x) (((x) & 0x00000f00) >> 8) +#define CLKMGR_PLLGLOB_DREFCLKDIV(x) (((x) & 0x00003000) >> 12) + +#define CLKMGR_VCOCALIB_HSCNT_SET(x) (((x) << 0) & 0x000003ff) +#define CLKMGR_VCOCALIB_MSCNT_SET(x) (((x) << 16) & 0x00ff0000) + +#define CLKMGR_CLR_LOSTLOCK_BYPASS 0x20000000 + +typedef struct { + uint32_t clk_freq_of_eosc1; + uint32_t clk_freq_of_f2h_free; + uint32_t clk_freq_of_cb_intosc_ls; +} CLOCK_SOURCE_CONFIG; + +void config_clkmgr_handoff(handoff *hoff_ptr); +uint32_t get_wdt_clk(void); +uint32_t get_uart_clk(void); +uint32_t get_mmc_clk(void); +uint32_t get_mpu_clk(void); +uint32_t get_cpu_clk(void); + +#endif diff --git a/plat/intel/soc/agilex/include/agilex_memory_controller.h b/plat/intel/soc/agilex/include/agilex_memory_controller.h new file mode 100644 index 0000000..9db4292 --- /dev/null +++ b/plat/intel/soc/agilex/include/agilex_memory_controller.h @@ -0,0 +1,176 @@ +/* + * Copyright (c) 2019, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef AGX_MEMORYCONTROLLER_H +#define AGX_MEMORYCONTROLLER_H + +#define AGX_MPFE_IOHMC_REG_DRAMADDRW 0xf80100a8 +#define AGX_MPFE_IOHMC_CTRLCFG0 0xf8010028 +#define AGX_MPFE_IOHMC_CTRLCFG1 0xf801002c +#define AGX_MPFE_IOHMC_CTRLCFG2 0xf8010030 +#define AGX_MPFE_IOHMC_CTRLCFG3 0xf8010034 +#define AGX_MPFE_IOHMC_DRAMADDRW 0xf80100a8 +#define AGX_MPFE_IOHMC_DRAMTIMING0 0xf8010050 +#define AGX_MPFE_IOHMC_CALTIMING0 0xf801007c +#define AGX_MPFE_IOHMC_CALTIMING1 0xf8010080 +#define AGX_MPFE_IOHMC_CALTIMING2 0xf8010084 +#define AGX_MPFE_IOHMC_CALTIMING3 0xf8010088 +#define AGX_MPFE_IOHMC_CALTIMING4 0xf801008c +#define AGX_MPFE_IOHMC_CALTIMING9 0xf80100a0 +#define AGX_MPFE_IOHMC_CALTIMING9_ACT_TO_ACT(x) (((x) & 0x000000ff) >> 0) +#define AGX_MPFE_IOHMC_CTRLCFG1_CFG_ADDR_ORDER(value) \ + (((value) & 0x00000060) >> 5) + +#define AGX_MPFE_HMC_ADP_ECCCTRL1 0xf8011100 +#define AGX_MPFE_HMC_ADP_ECCCTRL2 0xf8011104 +#define AGX_MPFE_HMC_ADP_RSTHANDSHAKESTAT 0xf8011218 +#define AGX_MPFE_HMC_ADP_RSTHANDSHAKESTAT_SEQ2CORE 0x000000ff +#define AGX_MPFE_HMC_ADP_RSTHANDSHAKECTRL 0xf8011214 + + +#define AGX_MPFE_IOHMC_REG_CTRLCFG1 0xf801002c + +#define AGX_MPFE_IOHMC_REG_NIOSRESERVE0_OFST 0xf8010110 + +#define IOHMC_DRAMADDRW_COL_ADDR_WIDTH(x) (((x) & 0x0000001f) >> 0) +#define IOHMC_DRAMADDRW_ROW_ADDR_WIDTH(x) (((x) & 0x000003e0) >> 5) +#define IOHMC_DRAMADDRW_CS_ADDR_WIDTH(x) (((x) & 0x00070000) >> 16) +#define IOHMC_DRAMADDRW_BANK_GRP_ADDR_WIDTH(x) (((x) & 0x0000c000) >> 14) +#define IOHMC_DRAMADDRW_BANK_ADDR_WIDTH(x) (((x) & 0x00003c00) >> 10) + +#define AGX_MPFE_DDR(x) (0xf8000000 + x) +#define AGX_MPFE_HMC_ADP_DDRCALSTAT 0xf801100c +#define AGX_MPFE_DDR_MAIN_SCHED 0xf8000400 +#define AGX_MPFE_DDR_MAIN_SCHED_DDRCONF 0xf8000408 +#define AGX_MPFE_DDR_MAIN_SCHED_DDRTIMING 0xf800040c +#define AGX_MPFE_DDR_MAIN_SCHED_DDRCONF_SET_MSK 0x0000001f +#define AGX_MPFE_DDR_MAIN_SCHED_DDRMODE 0xf8000410 +#define AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV 0xf800043c +#define AGX_MPFE_DDR_MAIN_SCHED_READLATENCY 0xf8000414 +#define AGX_MPFE_DDR_MAIN_SCHED_ACTIVATE 0xf8000438 +#define AGX_MPFE_DDR_MAIN_SCHED_ACTIVATE_FAWBANK_OFST 10 +#define AGX_MPFE_DDR_MAIN_SCHED_ACTIVATE_FAW_OFST 4 +#define AGX_MPFE_DDR_MAIN_SCHED_ACTIVATE_RRD_OFST 0 +#define AGX_MPFE_DDR_MAIN_SCHED_DDRCONF_SET(x) (((x) << 0) & 0x0000001f) +#define AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTORD_OFST 0 +#define AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTORD_MSK (BIT(0) | BIT(1)) +#define AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTOWR_OFST 2 +#define AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTOWR_MSK (BIT(2) | BIT(3)) +#define AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSWRTORD_OFST 4 +#define AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSWRTORD_MSK (BIT(4) | BIT(5)) + +#define AGX_MPFE_HMC_ADP(x) (0xf8011000 + (x)) +#define AGX_MPFE_HMC_ADP_HPSINTFCSEL 0xf8011210 +#define AGX_MPFE_HMC_ADP_DDRIOCTRL 0xf8011008 +#define HMC_ADP_DDRIOCTRL 0x8 +#define HMC_ADP_DDRIOCTRL_IO_SIZE(x) (((x) & 0x00000003) >> 0) +#define HMC_ADP_DDRIOCTRL_CTRL_BURST_LENGTH(x) (((x) & 0x00003e00) >> 9) +#define ADP_DRAMADDRWIDTH 0xe0 + +#define ACT_TO_ACT_DIFF_BANK(value) (((value) & 0x00fc0000) >> 18) +#define ACT_TO_ACT(value) (((value) & 0x0003f000) >> 12) +#define ACT_TO_RDWR(value) (((value) & 0x0000003f) >> 0) +#define ACT_TO_ACT(value) (((value) & 0x0003f000) >> 12) + +/* timing 2 */ +#define RD_TO_RD_DIFF_CHIP(value) (((value) & 0x00000fc0) >> 6) +#define RD_TO_WR_DIFF_CHIP(value) (((value) & 0x3f000000) >> 24) +#define RD_TO_WR(value) (((value) & 0x00fc0000) >> 18) +#define RD_TO_PCH(value) (((value) & 0x00000fc0) >> 6) + +/* timing 3 */ +#define CALTIMING3_WR_TO_RD_DIFF_CHIP(value) (((value) & 0x0003f000) >> 12) +#define CALTIMING3_WR_TO_RD(value) (((value) & 0x00000fc0) >> 6) + +/* timing 4 */ +#define PCH_TO_VALID(value) (((value) & 0x00000fc0) >> 6) + +#define DDRTIMING_BWRATIO_OFST 31 +#define DDRTIMING_WRTORD_OFST 26 +#define DDRTIMING_RDTOWR_OFST 21 +#define DDRTIMING_BURSTLEN_OFST 18 +#define DDRTIMING_WRTOMISS_OFST 12 +#define DDRTIMING_RDTOMISS_OFST 6 +#define DDRTIMING_ACTTOACT_OFST 0 + +#define ADP_DDRIOCTRL_IO_SIZE(x) (((x) & 0x3) >> 0) + +#define DDRMODE_AUTOPRECHARGE_OFST 1 +#define DDRMODE_BWRATIOEXTENDED_OFST 0 + + +#define AGX_MPFE_IOHMC_REG_DRAMTIMING0_CFG_TCL(x) (((x) & 0x7f) >> 0) +#define AGX_MPFE_IOHMC_REG_CTRLCFG0_CFG_MEM_TYPE(x) (((x) & 0x0f) >> 0) + +#define AGX_CCU_CPU0_MPRT_DDR 0xf7004400 +#define AGX_CCU_CPU0_MPRT_MEM0 0xf70045c0 +#define AGX_CCU_CPU0_MPRT_MEM1A 0xf70045e0 +#define AGX_CCU_CPU0_MPRT_MEM1B 0xf7004600 +#define AGX_CCU_CPU0_MPRT_MEM1C 0xf7004620 +#define AGX_CCU_CPU0_MPRT_MEM1D 0xf7004640 +#define AGX_CCU_CPU0_MPRT_MEM1E 0xf7004660 +#define AGX_CCU_IOM_MPRT_MEM0 0xf7018560 +#define AGX_CCU_IOM_MPRT_MEM1A 0xf7018580 +#define AGX_CCU_IOM_MPRT_MEM1B 0xf70185a0 +#define AGX_CCU_IOM_MPRT_MEM1C 0xf70185c0 +#define AGX_CCU_IOM_MPRT_MEM1D 0xf70185e0 +#define AGX_CCU_IOM_MPRT_MEM1E 0xf7018600 + +#define AGX_NOC_FW_DDR_SCR 0xf8020200 +#define AGX_NOC_FW_DDR_SCR_MPUREGION0ADDR_LIMITEXT 0xf802021c +#define AGX_NOC_FW_DDR_SCR_MPUREGION0ADDR_LIMIT 0xf8020218 +#define AGX_NOC_FW_DDR_SCR_NONMPUREGION0ADDR_LIMITEXT 0xf802029c +#define AGX_NOC_FW_DDR_SCR_NONMPUREGION0ADDR_LIMIT 0xf8020298 + +#define AGX_SOC_NOC_FW_DDR_SCR_ENABLE 0xf8020200 +#define AGX_SOC_NOC_FW_DDR_SCR_ENABLESET 0xf8020204 +#define AGX_CCU_NOC_DI_SET_MSK 0x10 + +#define AGX_SYSMGR_CORE_HMC_CLK 0xffd120b4 +#define AGX_SYSMGR_CORE_HMC_CLK_STATUS 0x00000001 + +#define AGX_MPFE_IOHMC_NIOSRESERVE0_NIOS_RESERVE0(x) (((x) & 0xffff) >> 0) +#define AGX_MPFE_HMC_ADP_DDRIOCTRL_IO_SIZE_MSK 0x00000003 +#define AGX_MPFE_HMC_ADP_DDRIOCTRL_IO_SIZE_OFST 0 +#define AGX_MPFE_HMC_ADP_HPSINTFCSEL_ENABLE 0x001f1f1f +#define AGX_IOHMC_CTRLCFG1_ENABLE_ECC_OFST 7 + +#define AGX_MPFE_HMC_ADP_ECCCTRL1_AUTOWB_CNT_RST_SET_MSK 0x00010000 +#define AGX_MPFE_HMC_ADP_ECCCTRL1_CNT_RST_SET_MSK 0x00000100 +#define AGX_MPFE_HMC_ADP_ECCCTRL1_ECC_EN_SET_MSK 0x00000001 + +#define AGX_MPFE_HMC_ADP_ECCCTRL2_AUTOWB_EN_SET_MSK 0x00000001 +#define AGX_MPFE_HMC_ADP_ECCCTRL2_OVRW_RB_ECC_EN_SET_MSK 0x00010000 +#define AGX_MPFE_HMC_ADP_ECCCTRL2_RMW_EN_SET_MSK 0x00000100 +#define AGX_MPFE_HMC_ADP_DDRCALSTAT_CAL(value) (((value) & 0x1) >> 0) + + +#define AGX_MPFE_HMC_ADP_DDRIOCTRL_IO_SIZE(x) (((x) & 0x00003) >> 0) +#define IOHMC_DRAMADDRW_CFG_BANK_ADDR_WIDTH(x) (((x) & 0x03c00) >> 10) +#define IOHMC_DRAMADDRW_CFG_BANK_GROUP_ADDR_WIDTH(x) (((x) & 0x0c000) >> 14) +#define IOHMC_DRAMADDRW_CFG_COL_ADDR_WIDTH(x) (((x) & 0x0001f) >> 0) +#define IOHMC_DRAMADDRW_CFG_CS_ADDR_WIDTH(x) (((x) & 0x70000) >> 16) +#define IOHMC_DRAMADDRW_CFG_ROW_ADDR_WIDTH(x) (((x) & 0x003e0) >> 5) + +#define AGX_SDRAM_0_LB_ADDR 0x0 +#define AGX_DDR_SIZE 0x40000000 + +/* Macros */ +#define SOCFPGA_MEMCTRL_ECCCTRL1 0x008 +#define SOCFPGA_MEMCTRL_ERRINTEN 0x010 +#define SOCFPGA_MEMCTRL_ERRINTENS 0x014 +#define SOCFPGA_MEMCTRL_ERRINTENR 0x018 +#define SOCFPGA_MEMCTRL_INTMODE 0x01C +#define SOCFPGA_MEMCTRL_INTSTAT 0x020 +#define SOCFPGA_MEMCTRL_DIAGINTTEST 0x024 +#define SOCFPGA_MEMCTRL_DERRADDRA 0x02C + +#define SOCFPGA_MEMCTRL(_reg) (SOCFPGA_MEMCTRL_REG_BASE \ + + (SOCFPGA_MEMCTRL_##_reg)) + +int init_hard_memory_controller(void); + +#endif diff --git a/plat/intel/soc/agilex/include/agilex_mmc.h b/plat/intel/soc/agilex/include/agilex_mmc.h new file mode 100644 index 0000000..00f4ca5 --- /dev/null +++ b/plat/intel/soc/agilex/include/agilex_mmc.h @@ -0,0 +1,7 @@ +/* + * Copyright (c) 2020, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +void agx_mmc_init(void); diff --git a/plat/intel/soc/agilex/include/agilex_pinmux.h b/plat/intel/soc/agilex/include/agilex_pinmux.h new file mode 100644 index 0000000..0701208 --- /dev/null +++ b/plat/intel/soc/agilex/include/agilex_pinmux.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2019-2022, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef AGX_PINMUX_H +#define AGX_PINMUX_H + +#define AGX_PINMUX_BASE 0xffd13000 +#define AGX_PINMUX_PIN0SEL (AGX_PINMUX_BASE + 0x000) +#define AGX_PINMUX_IO0CTRL (AGX_PINMUX_BASE + 0x130) +#define AGX_PINMUX_EMAC0_USEFPGA (AGX_PINMUX_BASE + 0x300) +#define AGX_PINMUX_EMAC1_USEFPGA (AGX_PINMUX_BASE + 0x304) +#define AGX_PINMUX_EMAC2_USEFPGA (AGX_PINMUX_BASE + 0x308) +#define AGX_PINMUX_NAND_USEFPGA (AGX_PINMUX_BASE + 0x320) +#define AGX_PINMUX_SPIM0_USEFPGA (AGX_PINMUX_BASE + 0x328) +#define AGX_PINMUX_SPIM1_USEFPGA (AGX_PINMUX_BASE + 0x32c) +#define AGX_PINMUX_SDMMC_USEFPGA (AGX_PINMUX_BASE + 0x354) +#define AGX_PINMUX_IO0_DELAY (AGX_PINMUX_BASE + 0x400) + +#define AGX_PINMUX_NAND_USEFPGA_VAL BIT(4) +#define AGX_PINMUX_SDMMC_USEFPGA_VAL BIT(8) +#define AGX_PINMUX_SPIM0_USEFPGA_VAL BIT(16) +#define AGX_PINMUX_SPIM1_USEFPGA_VAL BIT(24) +#define AGX_PINMUX_EMAC0_USEFPGA_VAL BIT(0) +#define AGX_PINMUX_EMAC1_USEFPGA_VAL BIT(8) +#define AGX_PINMUX_EMAC2_USEFPGA_VAL BIT(16) + +#include "socfpga_handoff.h" + +void config_pinmux(handoff *handoff); + +#endif + diff --git a/plat/intel/soc/agilex/include/agilex_system_manager.h b/plat/intel/soc/agilex/include/agilex_system_manager.h new file mode 100644 index 0000000..cb9222d --- /dev/null +++ b/plat/intel/soc/agilex/include/agilex_system_manager.h @@ -0,0 +1,195 @@ +/* + * Copyright (c) 2019-2023, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#ifndef AGX_SOCFPGA_SYSTEMMANAGER_H +#define AGX_SOCFPGA_SYSTEMMANAGER_H + +#include "socfpga_plat_def.h" + +/* System Manager Register Map */ +#define SOCFPGA_SYSMGR_SILICONID_1 0x00 +#define SOCFPGA_SYSMGR_SILICONID_2 0x04 +#define SOCFPGA_SYSMGR_WDDBG 0x08 +#define SOCFPGA_SYSMGR_MPU_STATUS 0x10 +#define SOCFPGA_SYSMGR_SDMMC_L3_MASTER 0x2C +#define SOCFPGA_SYSMGR_NAND_L3_MASTER 0x34 +#define SOCFPGA_SYSMGR_USB0_L3_MASTER 0x38 +#define SOCFPGA_SYSMGR_USB1_L3_MASTER 0x3C +#define SOCFPGA_SYSMGR_TSN_GLOBAL 0x40 +#define SOCFPGA_SYSMGR_EMAC_0 0x44 /* TSN_0 */ +#define SOCFPGA_SYSMGR_EMAC_1 0x48 /* TSN_1 */ +#define SOCFPGA_SYSMGR_EMAC_2 0x4C /* TSN_2 */ +#define SOCFPGA_SYSMGR_TSN_0_ACE 0x50 +#define SOCFPGA_SYSMGR_TSN_1_ACE 0x54 +#define SOCFPGA_SYSMGR_TSN_2_ACE 0x58 +#define SOCFPGA_SYSMGR_FPGAINTF_EN_1 0x68 +#define SOCFPGA_SYSMGR_FPGAINTF_EN_2 0x6C +#define SOCFPGA_SYSMGR_FPGAINTF_EN_3 0x70 +#define SOCFPGA_SYSMGR_DMAC0_L3_MASTER 0x74 +#define SOCFPGA_SYSMGR_ETR_L3_MASTER 0x78 +#define SOCFPGA_SYSMGR_DMAC1_L3_MASTER 0x7C +#define SOCFPGA_SYSMGR_SEC_CTRL_SLT 0x80 +#define SOCFPGA_SYSMGR_OSC_TRIM 0x84 +#define SOCFPGA_SYSMGR_DMAC0_CTRL_STATUS_REG 0x88 +#define SOCFPGA_SYSMGR_DMAC1_CTRL_STATUS_REG 0x8C +#define SOCFPGA_SYSMGR_ECC_INTMASK_VALUE 0x90 +#define SOCFPGA_SYSMGR_ECC_INTMASK_SET 0x94 +#define SOCFPGA_SYSMGR_ECC_INTMASK_CLR 0x98 +#define SOCFPGA_SYSMGR_ECC_INTMASK_SERR 0x9C +#define SOCFPGA_SYSMGR_ECC_INTMASK_DERR 0xA0 +/* NOC configuration value for Agilex5 */ +#define SOCFPGA_SYSMGR_NOC_TIMEOUT 0xC0 +#define SOCFPGA_SYSMGR_NOC_IDLEREQ_SET 0xC4 +#define SOCFPGA_SYSMGR_NOC_IDLEREQ_CLR 0xC8 +#define SOCFPGA_SYSMGR_NOC_IDLEREQ_VAL 0xCC +#define SOCFPGA_SYSMGR_NOC_IDLEACK 0xD0 +#define SOCFPGA_SYSMGR_NOC_IDLESTATUS 0xD4 +#define SOCFPGA_SYSMGR_FPGA2SOC_CTRL 0xD8 +#define SOCFPGA_SYSMGR_FPGA_CFG 0xDC +#define SOCFPGA_SYSMGR_GPO 0xE4 +#define SOCFPGA_SYSMGR_GPI 0xE8 +#define SOCFPGA_SYSMGR_MPU 0xF0 +#define SOCFPGA_SYSMGR_SDM_HPS_SPARE 0xF4 +#define SOCFPGA_SYSMGR_HPS_SDM_SPARE 0xF8 +#define SOCFPGA_SYSMGR_DFI_INTF 0xFC +#define SOCFPGA_SYSMGR_NAND_DD_CTRL 0x100 +#define SOCFPGA_SYSMGR_NAND_PHY_CTRL_REG 0x104 +#define SOCFPGA_SYSMGR_NAND_PHY_TSEL_REG 0x108 +#define SOCFPGA_SYSMGR_NAND_DQ_TIMING_REG 0x10C +#define SOCFPGA_SYSMGR_PHY_DQS_TIMING_REG 0x110 +#define SOCFPGA_SYSMGR_NAND_PHY_GATE_LPBK_CTRL_REG 0x114 +#define SOCFPGA_SYSMGR_NAND_PHY_DLL_MASTER_CTRL_REG 0x118 +#define SOCFPGA_SYSMGR_NAND_PHY_DLL_SLAVE_CTRL_REG 0x11C +#define SOCFPGA_SYSMGR_NAND_DD_DEFAULT_SETTING_REG0 0x120 +#define SOCFPGA_SYSMGR_NAND_DD_DEFAULT_SETTING_REG1 0x124 +#define SOCFPGA_SYSMGR_NAND_DD_STATUS_REG 0x128 +#define SOCFPGA_SYSMGR_NAND_DD_ID_LOW_REG 0x12C +#define SOCFPGA_SYSMGR_NAND_DD_ID_HIGH_REG 0x130 +#define SOCFPGA_SYSMGR_NAND_WRITE_PROT_EN_REG 0x134 +#define SOCFPGA_SYSMGR_SDMMC_CMD_QUEUE_SETTING_REG 0x138 +#define SOCFPGA_SYSMGR_I3C_SLV_PID_LOW 0x13C +#define SOCFPGA_SYSMGR_I3C_SLV_PID_HIGH 0x140 +#define SOCFPGA_SYSMGR_I3C_SLV_CTRL_0 0x144 +#define SOCFPGA_SYSMGR_I3C_SLV_CTRL_1 0x148 +#define SOCFPGA_SYSMGR_F2S_BRIDGE_CTRL 0x14C +#define SOCFPGA_SYSMGR_DMA_TBU_STASH_CTRL_REG_0_DMA0 0x150 +#define SOCFPGA_SYSMGR_DMA_TBU_STASH_CTRL_REG_0_DMA1 0x154 +#define SOCFPGA_SYSMGR_SDM_TBU_STASH_CTRL_REG_1_SDM 0x158 +#define SOCFPGA_SYSMGR_IO_TBU_STASH_CTRL_REG_2_USB2 0x15C +#define SOCFPGA_SYSMGR_IO_TBU_STASH_CTRL_REG_2_USB3 0x160 +#define SOCFPGA_SYSMGR_IO_TBU_STASH_CTRL_REG_2_SDMMC 0x164 +#define SOCFPGA_SYSMGR_IO_TBU_STASH_CTRL_REG_2_NAND 0x168 +#define SOCFPGA_SYSMGR_IO_TBU_STASH_CTRL_REG_2_ETR 0x16C +#define SOCFPGA_SYSMGR_TSN_TBU_STASH_CTRL_REG_3_TSN0 0x170 +#define SOCFPGA_SYSMGR_TSN_TBU_STASH_CTRL_REG_3_TSN1 0x174 +#define SOCFPGA_SYSMGR_TSN_TBU_STASH_CTRL_REG_3_TSN2 0x178 +#define SOCFPGA_SYSMGR_DMA_TBU_STREAM_CTRL_REG_0_DMA0 0x17C +#define SOCFPGA_SYSMGR_DMA_TBU_STREAM_CTRL_REG_0_DMA1 0x180 +#define SOCFPGA_SYSMGR_SDM_TBU_STREAM_CTRL_REG_1_SDM 0x184 +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_CTRL_REG_2_USB2 0x188 +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_CTRL_REG_2_USB3 0x18C +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_CTRL_REG_2_SDMMC 0x190 +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_CTRL_REG_2_NAND 0x194 +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_CTRL_REG_2_ETR 0x198 +#define SOCFPGA_SYSMGR_TSN_TBU_STREAM_CTRL_REG_3_TSN0 0x19C +#define SOCFPGA_SYSMGR_TSN_TBU_STREAM_CTRL_REG_3_TSN1 0x1A0 +#define SOCFPGA_SYSMGR_TSN_TBU_STREAM_CTRL_REG_3_TSN2 0x1A4 +#define SOCFPGA_SYSMGR_DMA_TBU_STREAM_ID_AX_REG_0_DMA0 0x1A8 +#define SOCFPGA_SYSMGR_DMA_TBU_STREAM_ID_AX_REG_0_DMA1 0x1AC +#define SOCFPGA_SYSMGR_SDM_TBU_STREAM_ID_AX_REG_1_SDM 0x1B0 +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_ID_AX_REG_2_USB2 0x1B4 +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_ID_AX_REG_2_USB3 0x1B8 +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_ID_AX_REG_2_SDMMC 0x1BC +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_ID_AX_REG_2_NAND 0x1C0 +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_ID_AX_REG_2_ETR 0x1C4 +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_ID_AX_REG_2_TSN0 0x1C8 +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_ID_AX_REG_2_TSN1 0x1CC +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_ID_AX_REG_2_TSN2 0x1D0 +#define SOCFPGA_SYSMGR_USB3_MISC_CTRL_REG0 0x1F0 +#define SOCFPGA_SYSMGR_USB3_MISC_CTRL_REG1 0x1F4 + +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_0 0x200 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_1 0x204 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_2 0x208 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_3 0x20C +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_4 0x210 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_5 0x214 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_6 0x218 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_7 0x21C +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_8 0x220 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_9 0x224 +#define SOCFPGA_SYSMGR_MPFE_CONFIG 0x228 +#define SOCFPGA_SYSMGR_MPFE_status 0x22C +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_0 0x230 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_1 0x234 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_2 0x238 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_3 0x23C +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_4 0x240 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_5 0x244 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_6 0x248 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_7 0x24C +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_8 0x250 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_9 0x254 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_0 0x258 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_1 0x25C +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_2 0x260 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_3 0x264 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_4 0x268 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_5 0x26C +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_6 0x270 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_7 0x274 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_8 0x278 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_9 0x27C + +#define DMA0_STREAM_CTRL_REG 0x10D1217C +#define DMA1_STREAM_CTRL_REG 0x10D12180 +#define SDM_STREAM_CTRL_REG 0x10D12184 +#define USB2_STREAM_CTRL_REG 0x10D12188 +#define USB3_STREAM_CTRL_REG 0x10D1218C +#define SDMMC_STREAM_CTRL_REG 0x10D12190 +#define NAND_STREAM_CTRL_REG 0x10D12194 +#define ETR_STREAM_CTRL_REG 0x10D12198 +#define TSN0_STREAM_CTRL_REG 0x10D1219C +#define TSN1_STREAM_CTRL_REG 0x10D121A0 +#define TSN2_STREAM_CTRL_REG 0x10D121A4 + +/* Stream ID configuration value for Agilex5 */ +#define TSN0 0x00010001 +#define TSN1 0x00020002 +#define TSN2 0x00030003 +#define NAND 0x00040004 +#define SDMMC 0x00050005 +#define USB0 0x00060006 +#define USB1 0x00070007 +#define DMA0 0x00080008 +#define DMA1 0x00090009 +#define SDM 0x000A000A +#define CORE_SIGHT_DEBUG 0x000B000B + +/* Field Masking */ +#define SYSMGR_SDMMC_DRVSEL(x) (((x) & 0x7) << 0) +#define SYSMGR_SDMMC_SMPLSEL(x) (((x) & 0x7) << 4) +#define IDLE_DATA_LWSOC2FPGA BIT(4) +#define IDLE_DATA_SOC2FPGA BIT(0) +#define IDLE_DATA_MASK (IDLE_DATA_LWSOC2FPGA | IDLE_DATA_SOC2FPGA) +#define SYSMGR_ECC_OCRAM_MASK BIT(1) +#define SYSMGR_ECC_DDR0_MASK BIT(16) +#define SYSMGR_ECC_DDR1_MASK BIT(17) +#define WSTREAMIDEN_REG_CTRL BIT(0) +#define RSTREAMIDEN_REG_CTRL BIT(1) +#define WMMUSECSID_REG_VAL BIT(4) +#define RMMUSECSID_REG_VAL BIT(5) + +/* Macros */ +#define SOCFPGA_SYSMGR(_reg) (SOCFPGA_SYSMGR_REG_BASE \ + + (SOCFPGA_SYSMGR_##_reg)) + +#define ENABLE_STREAMID WSTREAMIDEN_REG_CTRL | \ + RSTREAMIDEN_REG_CTRL +#define ENABLE_STREAMID_SECURE_TX WSTREAMIDEN_REG_CTRL | \ + RSTREAMIDEN_REG_CTRL | \ + WMMUSECSID_REG_VAL | RMMUSECSID_REG_VAL + +#endif /* AGX5_SOCFPGA_SYSTEMMANAGER_H */ diff --git a/plat/intel/soc/agilex/include/socfpga_plat_def.h b/plat/intel/soc/agilex/include/socfpga_plat_def.h new file mode 100644 index 0000000..a744d09 --- /dev/null +++ b/plat/intel/soc/agilex/include/socfpga_plat_def.h @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2019-2022, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2019-2023, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef PLAT_SOCFPGA_DEF_H +#define PLAT_SOCFPGA_DEF_H + +#include "agilex_system_manager.h" +#include <platform_def.h> + +/* Platform Setting */ +#define PLATFORM_MODEL PLAT_SOCFPGA_AGILEX +#define BOOT_SOURCE BOOT_SOURCE_SDMMC +#define PLAT_PRIMARY_CPU 0 +#define PLAT_CLUSTER_ID_MPIDR_AFF_SHIFT MPIDR_AFF1_SHIFT +#define PLAT_CPU_ID_MPIDR_AFF_SHIFT MPIDR_AFF0_SHIFT + +/* FPGA config helpers */ +#define INTEL_SIP_SMC_FPGA_CONFIG_ADDR 0x400000 +#define INTEL_SIP_SMC_FPGA_CONFIG_SIZE 0x2000000 + +/* QSPI Setting */ +#define CAD_QSPIDATA_OFST 0xff900000 +#define CAD_QSPI_OFFSET 0xff8d2000 + +/* Register Mapping */ +#define SOCFPGA_CCU_NOC_REG_BASE 0xf7000000 +#define SOCFPGA_F2SDRAMMGR_REG_BASE U(0xf8024000) + +#define SOCFPGA_MMC_REG_BASE 0xff808000 +#define SOCFPGA_MEMCTRL_REG_BASE 0xf8011100 +#define SOCFPGA_RSTMGR_REG_BASE 0xffd11000 +#define SOCFPGA_SYSMGR_REG_BASE 0xffd12000 + +#define SOCFPGA_L4_PER_SCR_REG_BASE 0xffd21000 +#define SOCFPGA_L4_SYS_SCR_REG_BASE 0xffd21100 +#define SOCFPGA_SOC2FPGA_SCR_REG_BASE 0xffd21200 +#define SOCFPGA_LWSOC2FPGA_SCR_REG_BASE 0xffd21300 + +/******************************************************************************* + * Platform memory map related constants + ******************************************************************************/ +#define DRAM_BASE (0x0) +#define DRAM_SIZE (0x80000000) + +#define OCRAM_BASE (0xFFE00000) +#define OCRAM_SIZE (0x00040000) + +#define MEM64_BASE (0x0100000000) +#define MEM64_SIZE (0x1F00000000) + +#define DEVICE1_BASE (0x80000000) +#define DEVICE1_SIZE (0x60000000) + +#define DEVICE2_BASE (0xF7000000) +#define DEVICE2_SIZE (0x08E00000) + +#define DEVICE3_BASE (0xFFFC0000) +#define DEVICE3_SIZE (0x00008000) + +#define DEVICE4_BASE (0x2000000000) +#define DEVICE4_SIZE (0x0100000000) + +#define BL2_BASE (0xffe00000) +#define BL2_LIMIT (0xffe1b000) + +#define BL31_BASE (0x1000) +#define BL31_LIMIT (0x81000) + +/******************************************************************************* + * UART related constants + ******************************************************************************/ +#define PLAT_UART0_BASE (0xFFC02000) +#define PLAT_UART1_BASE (0xFFC02100) + +/******************************************************************************* + * GIC related constants + ******************************************************************************/ +#define PLAT_GIC_BASE (0xFFFC0000) +#define PLAT_GICC_BASE (PLAT_GIC_BASE + 0x2000) +#define PLAT_GICD_BASE (PLAT_GIC_BASE + 0x1000) +#define PLAT_GICR_BASE 0 + +#define PLAT_SYS_COUNTER_FREQ_IN_TICKS (400000000) +#define PLAT_HZ_CONVERT_TO_MHZ (1000000) + +/******************************************************************************* + * SDMMC related pointer function + ******************************************************************************/ +#define SDMMC_READ_BLOCKS mmc_read_blocks +#define SDMMC_WRITE_BLOCKS mmc_write_blocks + +/******************************************************************************* + * sysmgr.boot_scratch_cold6 & 7 (64bit) are used to indicate L2 reset + * is done and HPS should trigger warm reset via RMR_EL3. + ******************************************************************************/ +#define L2_RESET_DONE_REG 0xFFD12218 + +/* Platform specific system counter */ +#define PLAT_SYS_COUNTER_FREQ_IN_MHZ get_cpu_clk() + +#endif /* PLAT_SOCFPGA_DEF_H */ diff --git a/plat/intel/soc/agilex/platform.mk b/plat/intel/soc/agilex/platform.mk new file mode 100644 index 0000000..5c92f72 --- /dev/null +++ b/plat/intel/soc/agilex/platform.mk @@ -0,0 +1,82 @@ +# +# Copyright (c) 2019-2023, ARM Limited and Contributors. All rights reserved. +# Copyright (c) 2019-2022, Intel Corporation. All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +PLAT_INCLUDES := \ + -Iplat/intel/soc/agilex/include/ \ + -Iplat/intel/soc/common/drivers/ \ + -Iplat/intel/soc/common/include/ + +# Include GICv2 driver files +include drivers/arm/gic/v2/gicv2.mk +AGX_GICv2_SOURCES := \ + ${GICV2_SOURCES} \ + plat/common/plat_gicv2.c + + +PLAT_BL_COMMON_SOURCES := \ + ${AGX_GICv2_SOURCES} \ + drivers/delay_timer/delay_timer.c \ + drivers/delay_timer/generic_delay_timer.c \ + drivers/ti/uart/aarch64/16550_console.S \ + lib/xlat_tables/aarch64/xlat_tables.c \ + lib/xlat_tables/xlat_tables_common.c \ + plat/intel/soc/common/aarch64/platform_common.c \ + plat/intel/soc/common/aarch64/plat_helpers.S \ + plat/intel/soc/common/drivers/ccu/ncore_ccu.c \ + plat/intel/soc/common/socfpga_delay_timer.c + +BL2_SOURCES += \ + common/desc_image_load.c \ + drivers/mmc/mmc.c \ + drivers/intel/soc/stratix10/io/s10_memmap_qspi.c \ + drivers/io/io_storage.c \ + drivers/io/io_block.c \ + drivers/io/io_fip.c \ + drivers/partition/partition.c \ + drivers/partition/gpt.c \ + drivers/synopsys/emmc/dw_mmc.c \ + lib/cpus/aarch64/cortex_a53.S \ + plat/intel/soc/agilex/bl2_plat_setup.c \ + plat/intel/soc/agilex/soc/agilex_clock_manager.c \ + plat/intel/soc/agilex/soc/agilex_memory_controller.c \ + plat/intel/soc/agilex/soc/agilex_mmc.c \ + plat/intel/soc/agilex/soc/agilex_pinmux.c \ + plat/intel/soc/common/bl2_plat_mem_params_desc.c \ + plat/intel/soc/common/socfpga_image_load.c \ + plat/intel/soc/common/socfpga_storage.c \ + plat/intel/soc/common/soc/socfpga_emac.c \ + plat/intel/soc/common/soc/socfpga_firewall.c \ + plat/intel/soc/common/soc/socfpga_handoff.c \ + plat/intel/soc/common/soc/socfpga_mailbox.c \ + plat/intel/soc/common/soc/socfpga_reset_manager.c \ + plat/intel/soc/common/drivers/qspi/cadence_qspi.c \ + plat/intel/soc/common/drivers/wdt/watchdog.c + +include lib/zlib/zlib.mk +PLAT_INCLUDES += -Ilib/zlib +BL2_SOURCES += $(ZLIB_SOURCES) + +BL31_SOURCES += \ + drivers/arm/cci/cci.c \ + lib/cpus/aarch64/aem_generic.S \ + lib/cpus/aarch64/cortex_a53.S \ + plat/common/plat_psci_common.c \ + plat/intel/soc/agilex/bl31_plat_setup.c \ + plat/intel/soc/agilex/soc/agilex_clock_manager.c \ + plat/intel/soc/common/socfpga_psci.c \ + plat/intel/soc/common/socfpga_sip_svc.c \ + plat/intel/soc/common/socfpga_sip_svc_v2.c \ + plat/intel/soc/common/socfpga_topology.c \ + plat/intel/soc/common/sip/socfpga_sip_ecc.c \ + plat/intel/soc/common/sip/socfpga_sip_fcs.c \ + plat/intel/soc/common/soc/socfpga_mailbox.c \ + plat/intel/soc/common/soc/socfpga_reset_manager.c + +PROGRAMMABLE_RESET_ADDRESS := 0 +RESET_TO_BL2 := 1 +BL2_INV_DCACHE := 0 +USE_COHERENT_MEM := 1 diff --git a/plat/intel/soc/agilex/soc/agilex_clock_manager.c b/plat/intel/soc/agilex/soc/agilex_clock_manager.c new file mode 100644 index 0000000..d32c3f1 --- /dev/null +++ b/plat/intel/soc/agilex/soc/agilex_clock_manager.c @@ -0,0 +1,409 @@ +/* + * Copyright (c) 2019-2023, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> +#include <common/debug.h> +#include <drivers/delay_timer.h> +#include <errno.h> +#include <lib/mmio.h> + +#include "agilex_clock_manager.h" +#include "agilex_system_manager.h" +#include "socfpga_handoff.h" + + +uint32_t wait_pll_lock(void) +{ + uint32_t data; + uint32_t count = 0; + + do { + data = mmio_read_32(CLKMGR_OFFSET + CLKMGR_STAT); + count++; + if (count >= 1000) + return -ETIMEDOUT; + + } while ((CLKMGR_STAT_MAINPLLLOCKED(data) == 0) || + (CLKMGR_STAT_PERPLLLOCKED(data) == 0)); + return 0; +} + +uint32_t wait_fsm(void) +{ + uint32_t data; + uint32_t count = 0; + + do { + data = mmio_read_32(CLKMGR_OFFSET + CLKMGR_STAT); + count++; + if (count >= 1000) + return -ETIMEDOUT; + + } while (CLKMGR_STAT_BUSY(data) == CLKMGR_STAT_BUSY_E_BUSY); + + return 0; +} + +uint32_t pll_source_sync_config(uint32_t pll_mem_offset, uint32_t data) +{ + uint32_t val = 0; + uint32_t count = 0; + uint32_t req_status = 0; + + val = (CLKMGR_MEM_WR | CLKMGR_MEM_REQ | + (data << CLKMGR_MEM_WDAT_OFFSET) | CLKMGR_MEM_ADDR); + mmio_write_32(pll_mem_offset, val); + + do { + req_status = mmio_read_32(pll_mem_offset); + count++; + } while ((req_status & CLKMGR_MEM_REQ) && (count < 10)); + + if (count >= 100) + return -ETIMEDOUT; + + return 0; +} + +uint32_t pll_source_sync_read(uint32_t pll_mem_offset) +{ + uint32_t val = 0; + uint32_t rdata = 0; + uint32_t count = 0; + uint32_t req_status = 0; + + val = (CLKMGR_MEM_REQ | CLKMGR_MEM_ADDR); + mmio_write_32(pll_mem_offset, val); + + do { + req_status = mmio_read_32(pll_mem_offset); + count++; + } while ((req_status & CLKMGR_MEM_REQ) && (count < 10)); + + if (count >= 100) + return -ETIMEDOUT; + + rdata = mmio_read_32(pll_mem_offset + 0x4); + INFO("rdata (%x) = %x\n", pll_mem_offset + 0x4, rdata); + + return rdata; +} + +void config_clkmgr_handoff(handoff *hoff_ptr) +{ + uint32_t mdiv, mscnt, hscnt; + uint32_t drefclk_div, refclk_div, rdata; + + /* Set clock maanger into boot mode before running configuration */ + mmio_setbits_32(CLKMGR_OFFSET + CLKMGR_CTRL, + CLKMGR_CTRL_BOOTMODE_SET_MSK); + /* Bypass all mainpllgrp's clocks */ + mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_BYPASS, 0x7); + wait_fsm(); + + /* Bypass all perpllgrp's clocks */ + mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_BYPASS, 0x7f); + wait_fsm(); + + /* Put both PLL in reset and power down */ + mmio_clrbits_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLGLOB, + CLKMGR_PLLGLOB_PD_SET_MSK | + CLKMGR_PLLGLOB_RST_SET_MSK); + mmio_clrbits_32(CLKMGR_PERPLL + CLKMGR_PERPLL_PLLGLOB, + CLKMGR_PLLGLOB_PD_SET_MSK | + CLKMGR_PLLGLOB_RST_SET_MSK); + + /* Setup main PLL dividers */ + mdiv = CLKMGR_PLLM_MDIV(hoff_ptr->main_pll_pllm); + + drefclk_div = CLKMGR_PLLGLOB_DREFCLKDIV( + hoff_ptr->main_pll_pllglob); + refclk_div = CLKMGR_PLLGLOB_REFCLKDIV( + hoff_ptr->main_pll_pllglob); + + mscnt = 100 / (mdiv * BIT(drefclk_div)); + if (!mscnt) + mscnt = 1; + hscnt = (mdiv * mscnt * BIT(drefclk_div) / refclk_div) - 4; + + mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLGLOB, + hoff_ptr->main_pll_pllglob & + ~CLKMGR_PLLGLOB_RST_SET_MSK); + mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_FDBCK, + hoff_ptr->main_pll_fdbck); + mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_VCOCALIB, + CLKMGR_VCOCALIB_HSCNT_SET(hscnt) | + CLKMGR_VCOCALIB_MSCNT_SET(mscnt)); + mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLC0, + hoff_ptr->main_pll_pllc0); + mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLC1, + hoff_ptr->main_pll_pllc1); + mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLC2, + hoff_ptr->main_pll_pllc2); + mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLC3, + hoff_ptr->main_pll_pllc3); + mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLM, + hoff_ptr->main_pll_pllm); + mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_MPUCLK, + hoff_ptr->main_pll_mpuclk); + mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_NOCCLK, + hoff_ptr->main_pll_nocclk); + mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_NOCDIV, + hoff_ptr->main_pll_nocdiv); + + /* Setup peripheral PLL dividers */ + mdiv = CLKMGR_PLLM_MDIV(hoff_ptr->per_pll_pllm); + + drefclk_div = CLKMGR_PLLGLOB_DREFCLKDIV( + hoff_ptr->per_pll_pllglob); + refclk_div = CLKMGR_PLLGLOB_REFCLKDIV( + hoff_ptr->per_pll_pllglob); + + + mscnt = 100 / (mdiv * BIT(drefclk_div)); + if (!mscnt) + mscnt = 1; + hscnt = (mdiv * mscnt * BIT(drefclk_div) / refclk_div) - 4; + + mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_PLLGLOB, + hoff_ptr->per_pll_pllglob & + ~CLKMGR_PLLGLOB_RST_SET_MSK); + mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_FDBCK, + hoff_ptr->per_pll_fdbck); + + mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_VCOCALIB, + CLKMGR_VCOCALIB_HSCNT_SET(hscnt) | + CLKMGR_VCOCALIB_MSCNT_SET(mscnt)); + + mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_PLLC0, + hoff_ptr->per_pll_pllc0); + mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_PLLC1, + hoff_ptr->per_pll_pllc1); + mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_PLLC2, + hoff_ptr->per_pll_pllc2); + mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_PLLC3, + hoff_ptr->per_pll_pllc3); + mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_PLLM, + hoff_ptr->per_pll_pllm); + mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_EMACCTL, + hoff_ptr->per_pll_emacctl); + mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_GPIODIV, + hoff_ptr->per_pll_gpiodiv); + + /* Take both PLL out of reset and power up */ + mmio_setbits_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLGLOB, + CLKMGR_PLLGLOB_PD_SET_MSK | + CLKMGR_PLLGLOB_RST_SET_MSK); + mmio_setbits_32(CLKMGR_PERPLL + CLKMGR_PERPLL_PLLGLOB, + CLKMGR_PLLGLOB_PD_SET_MSK | + CLKMGR_PLLGLOB_RST_SET_MSK); + + rdata = pll_source_sync_read(CLKMGR_MAINPLL + + CLKMGR_MAINPLL_MEM); + pll_source_sync_config(CLKMGR_MAINPLL + CLKMGR_MAINPLL_MEM, + rdata | 0x80); + + rdata = pll_source_sync_read(CLKMGR_PERPLL + CLKMGR_PERPLL_MEM); + pll_source_sync_config(CLKMGR_PERPLL + CLKMGR_PERPLL_MEM, + rdata | 0x80); + + wait_pll_lock(); + + /*Configure Ping Pong counters in altera group */ + mmio_write_32(CLKMGR_ALTERA + CLKMGR_ALTERA_EMACACTR, + hoff_ptr->alt_emacactr); + mmio_write_32(CLKMGR_ALTERA + CLKMGR_ALTERA_EMACBCTR, + hoff_ptr->alt_emacbctr); + mmio_write_32(CLKMGR_ALTERA + CLKMGR_ALTERA_EMACPTPCTR, + hoff_ptr->alt_emacptpctr); + mmio_write_32(CLKMGR_ALTERA + CLKMGR_ALTERA_GPIODBCTR, + hoff_ptr->alt_gpiodbctr); + mmio_write_32(CLKMGR_ALTERA + CLKMGR_ALTERA_SDMMCCTR, + hoff_ptr->alt_sdmmcctr); + mmio_write_32(CLKMGR_ALTERA + CLKMGR_ALTERA_S2FUSER0CTR, + hoff_ptr->alt_s2fuser0ctr); + mmio_write_32(CLKMGR_ALTERA + CLKMGR_ALTERA_S2FUSER1CTR, + hoff_ptr->alt_s2fuser1ctr); + mmio_write_32(CLKMGR_ALTERA + CLKMGR_ALTERA_PSIREFCTR, + hoff_ptr->alt_psirefctr); + + /* Clear lost lock bypass mode */ + mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_LOSTLOCK, 0x1); + mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_LOSTLOCK, 0x1); + + mmio_setbits_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLGLOB, + CLKMGR_CLR_LOSTLOCK_BYPASS); + + mmio_setbits_32(CLKMGR_PERPLL + CLKMGR_PERPLL_PLLGLOB, + CLKMGR_CLR_LOSTLOCK_BYPASS); + + /* Take all PLLs out of bypass */ + mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_BYPASS, 0); + wait_fsm(); + + mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_BYPASS, 0); + wait_fsm(); + + /* Clear loss lock interrupt status register that */ + /* might be set during configuration */ + mmio_clrbits_32(CLKMGR_OFFSET + CLKMGR_INTRCLR, + CLKMGR_INTRCLR_MAINLOCKLOST_SET_MSK | + CLKMGR_INTRCLR_PERLOCKLOST_SET_MSK); + + /* Take all ping pong counters out of reset */ + mmio_clrbits_32(CLKMGR_ALTERA + CLKMGR_ALTERA_EXTCNTRST, + CLKMGR_ALTERA_EXTCNTRST_RESET); + + /* Set safe mode / out of boot mode */ + mmio_clrbits_32(CLKMGR_OFFSET + CLKMGR_CTRL, + CLKMGR_CTRL_BOOTMODE_SET_MSK); + wait_fsm(); + + /* Enable mainpllgrp's software-managed clock */ + mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_EN, + CLKMGR_MAINPLL_EN_RESET); + mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_EN, + CLKMGR_PERPLL_EN_RESET); + + /* Pass clock source frequency into scratch register */ + mmio_write_32(SOCFPGA_SYSMGR(BOOT_SCRATCH_COLD_1), + hoff_ptr->hps_osc_clk_h); + mmio_write_32(SOCFPGA_SYSMGR(BOOT_SCRATCH_COLD_2), + hoff_ptr->fpga_clk_hz); +} + +/* Extract reference clock from platform clock source */ +uint32_t get_ref_clk(uint32_t pllglob) +{ + uint32_t arefclkdiv, ref_clk; + uint32_t scr_reg; + + switch (CLKMGR_PSRC(pllglob)) { + case CLKMGR_PLLGLOB_PSRC_EOSC1: + scr_reg = SOCFPGA_SYSMGR(BOOT_SCRATCH_COLD_1); + ref_clk = mmio_read_32(scr_reg); + break; + case CLKMGR_PLLGLOB_PSRC_INTOSC: + ref_clk = CLKMGR_INTOSC_HZ; + break; + case CLKMGR_PLLGLOB_PSRC_F2S: + scr_reg = SOCFPGA_SYSMGR(BOOT_SCRATCH_COLD_2); + ref_clk = mmio_read_32(scr_reg); + break; + default: + ref_clk = 0; + assert(0); + break; + } + + arefclkdiv = CLKMGR_PLLGLOB_AREFCLKDIV(pllglob); + ref_clk /= arefclkdiv; + + return ref_clk; +} + +/* Calculate clock frequency based on parameter */ +uint32_t get_clk_freq(uint32_t psrc_reg, uint32_t main_pllc, uint32_t per_pllc) +{ + uint32_t clk_psrc, mdiv, ref_clk; + uint32_t pllm_reg, pllc_reg, pllc_div, pllglob_reg; + + clk_psrc = mmio_read_32(CLKMGR_MAINPLL + psrc_reg); + + switch (CLKMGR_PSRC(clk_psrc)) { + case CLKMGR_PSRC_MAIN: + pllm_reg = CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLM; + pllc_reg = CLKMGR_MAINPLL + main_pllc; + pllglob_reg = CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLGLOB; + break; + case CLKMGR_PSRC_PER: + pllm_reg = CLKMGR_PERPLL + CLKMGR_PERPLL_PLLM; + pllc_reg = CLKMGR_PERPLL + per_pllc; + pllglob_reg = CLKMGR_PERPLL + CLKMGR_PERPLL_PLLGLOB; + break; + default: + return 0; + } + + ref_clk = get_ref_clk(mmio_read_32(pllglob_reg)); + mdiv = CLKMGR_PLLM_MDIV(mmio_read_32(pllm_reg)); + ref_clk *= mdiv; + + pllc_div = mmio_read_32(pllc_reg) & 0x7ff; + + return ref_clk / pllc_div; +} + +/* Return L3 interconnect clock */ +uint32_t get_l3_clk(void) +{ + uint32_t l3_clk; + + l3_clk = get_clk_freq(CLKMGR_MAINPLL_NOCCLK, CLKMGR_MAINPLL_PLLC1, + CLKMGR_PERPLL_PLLC1); + return l3_clk; +} + +/* Calculate clock frequency to be used for watchdog timer */ +uint32_t get_wdt_clk(void) +{ + uint32_t l3_clk, l4_sys_clk; + + l3_clk = get_l3_clk(); + l4_sys_clk = l3_clk / 4; + + return l4_sys_clk; +} + +/* Calculate clock frequency to be used for UART driver */ +uint32_t get_uart_clk(void) +{ + uint32_t data32, l3_clk, l4_sp_clk; + + l3_clk = get_l3_clk(); + + data32 = mmio_read_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_NOCDIV); + data32 = (data32 >> 16) & 0x3; + + l4_sp_clk = l3_clk >> data32; + + return l4_sp_clk; +} + +/* Calculate clock frequency to be used for SDMMC driver */ +uint32_t get_mmc_clk(void) +{ + uint32_t data32, mmc_clk; + + mmc_clk = get_clk_freq(CLKMGR_ALTERA_SDMMCCTR, + CLKMGR_MAINPLL_PLLC3, CLKMGR_PERPLL_PLLC3); + + data32 = mmio_read_32(CLKMGR_ALTERA + CLKMGR_ALTERA_SDMMCCTR); + data32 = (data32 & 0x7ff) + 1; + mmc_clk = (mmc_clk / data32) / 4; + + return mmc_clk; +} + +/* Return MPU clock */ +uint32_t get_mpu_clk(void) +{ + uint32_t mpu_clk; + + mpu_clk = get_clk_freq(CLKMGR_MAINPLL_NOCCLK, CLKMGR_MAINPLL_PLLC0, + CLKMGR_PERPLL_PLLC0); + return mpu_clk; +} + +/* Get cpu freq clock */ +uint32_t get_cpu_clk(void) +{ + uint32_t cpu_clk; + + cpu_clk = get_mpu_clk()/PLAT_HZ_CONVERT_TO_MHZ; + + return cpu_clk; +} diff --git a/plat/intel/soc/agilex/soc/agilex_memory_controller.c b/plat/intel/soc/agilex/soc/agilex_memory_controller.c new file mode 100644 index 0000000..2aabe87 --- /dev/null +++ b/plat/intel/soc/agilex/soc/agilex_memory_controller.c @@ -0,0 +1,399 @@ +/* + * Copyright (c) 2019, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <errno.h> +#include <lib/mmio.h> +#include <lib/utils.h> +#include <common/debug.h> +#include <drivers/delay_timer.h> +#include <platform_def.h> + +#include "agilex_memory_controller.h" + +#define ALT_CCU_NOC_DI_SET_MSK 0x10 + +#define DDR_READ_LATENCY_DELAY 40 +#define MAX_MEM_CAL_RETRY 3 +#define PRE_CALIBRATION_DELAY 1 +#define POST_CALIBRATION_DELAY 1 +#define TIMEOUT_EMIF_CALIBRATION 1000 +#define CLEAR_EMIF_DELAY 1000 +#define CLEAR_EMIF_TIMEOUT 1000 + +#define DDR_CONFIG(A, B, C, R) (((A) << 24) | ((B) << 16) | ((C) << 8) | (R)) +#define DDR_CONFIG_ELEMENTS (sizeof(ddr_config)/sizeof(uint32_t)) + +/* tWR = Min. 15ns constant, see JEDEC standard eg. DDR4 is JESD79-4.pdf */ +#define tWR_IN_NS 15 + +void configure_hmc_adaptor_regs(void); +void configure_ddr_sched_ctrl_regs(void); + +/* The followring are the supported configurations */ +uint32_t ddr_config[] = { + /* DDR_CONFIG(Address order,Bank,Column,Row) */ + /* List for DDR3 or LPDDR3 (pinout order > chip, row, bank, column) */ + DDR_CONFIG(0, 3, 10, 12), + DDR_CONFIG(0, 3, 9, 13), + DDR_CONFIG(0, 3, 10, 13), + DDR_CONFIG(0, 3, 9, 14), + DDR_CONFIG(0, 3, 10, 14), + DDR_CONFIG(0, 3, 10, 15), + DDR_CONFIG(0, 3, 11, 14), + DDR_CONFIG(0, 3, 11, 15), + DDR_CONFIG(0, 3, 10, 16), + DDR_CONFIG(0, 3, 11, 16), + DDR_CONFIG(0, 3, 12, 15), /* 0xa */ + /* List for DDR4 only (pinout order > chip, bank, row, column) */ + DDR_CONFIG(1, 3, 10, 14), + DDR_CONFIG(1, 4, 10, 14), + DDR_CONFIG(1, 3, 10, 15), + DDR_CONFIG(1, 4, 10, 15), + DDR_CONFIG(1, 3, 10, 16), + DDR_CONFIG(1, 4, 10, 16), + DDR_CONFIG(1, 3, 10, 17), + DDR_CONFIG(1, 4, 10, 17), +}; + +static int match_ddr_conf(uint32_t ddr_conf) +{ + int i; + + for (i = 0; i < DDR_CONFIG_ELEMENTS; i++) { + if (ddr_conf == ddr_config[i]) + return i; + } + return 0; +} + +static int check_hmc_clk(void) +{ + unsigned long timeout = 0; + uint32_t hmc_clk; + + do { + hmc_clk = mmio_read_32(AGX_SYSMGR_CORE_HMC_CLK); + if (hmc_clk & AGX_SYSMGR_CORE_HMC_CLK_STATUS) + break; + udelay(1); + } while (++timeout < 1000); + if (timeout >= 1000) + return -ETIMEDOUT; + + return 0; +} + +static int clear_emif(void) +{ + uint32_t data; + unsigned long timeout; + + mmio_write_32(AGX_MPFE_HMC_ADP_RSTHANDSHAKECTRL, 0); + + timeout = 0; + do { + data = mmio_read_32(AGX_MPFE_HMC_ADP_RSTHANDSHAKESTAT); + if ((data & AGX_MPFE_HMC_ADP_RSTHANDSHAKESTAT_SEQ2CORE) == 0) + break; + udelay(CLEAR_EMIF_DELAY); + } while (++timeout < CLEAR_EMIF_TIMEOUT); + if (timeout >= CLEAR_EMIF_TIMEOUT) + return -ETIMEDOUT; + + return 0; +} + +static int mem_calibration(void) +{ + int status; + uint32_t data; + unsigned long timeout; + unsigned long retry = 0; + + udelay(PRE_CALIBRATION_DELAY); + + do { + if (retry != 0) + INFO("DDR: Retrying DRAM calibration\n"); + + timeout = 0; + do { + data = mmio_read_32(AGX_MPFE_HMC_ADP_DDRCALSTAT); + if (AGX_MPFE_HMC_ADP_DDRCALSTAT_CAL(data) == 1) + break; + udelay(500); + } while (++timeout < TIMEOUT_EMIF_CALIBRATION); + + if (AGX_MPFE_HMC_ADP_DDRCALSTAT_CAL(data) == 0) { + status = clear_emif(); + if (status) + ERROR("Failed to clear Emif\n"); + } else { + break; + } + } while (++retry < MAX_MEM_CAL_RETRY); + + if (AGX_MPFE_HMC_ADP_DDRCALSTAT_CAL(data) == 0) { + ERROR("DDR: DRAM calibration failed.\n"); + status = -EIO; + } else { + INFO("DDR: DRAM calibration success.\n"); + status = 0; + } + + udelay(POST_CALIBRATION_DELAY); + + return status; +} + +int init_hard_memory_controller(void) +{ + int status; + + status = check_hmc_clk(); + if (status) { + ERROR("DDR: Error, HMC clock not running\n"); + return status; + } + + status = mem_calibration(); + if (status) { + ERROR("DDR: Memory Calibration Failed\n"); + return status; + } + + configure_hmc_adaptor_regs(); + + return 0; +} + +void configure_ddr_sched_ctrl_regs(void) +{ + uint32_t data, dram_addr_order, ddr_conf, bank, row, col, + rd_to_miss, wr_to_miss, burst_len, burst_len_ddr_clk, + burst_len_sched_clk, act_to_act, rd_to_wr, wr_to_rd, bw_ratio, + t_rtp, t_rp, t_rcd, rd_latency, tw_rin_clk_cycles, + bw_ratio_extended, auto_precharge = 0, act_to_act_bank, faw, + faw_bank, bus_rd_to_rd, bus_rd_to_wr, bus_wr_to_rd; + + INFO("Init HPS NOC's DDR Scheduler.\n"); + + data = mmio_read_32(AGX_MPFE_IOHMC_CTRLCFG1); + dram_addr_order = AGX_MPFE_IOHMC_CTRLCFG1_CFG_ADDR_ORDER(data); + + data = mmio_read_32(AGX_MPFE_IOHMC_DRAMADDRW); + + col = IOHMC_DRAMADDRW_COL_ADDR_WIDTH(data); + row = IOHMC_DRAMADDRW_ROW_ADDR_WIDTH(data); + bank = IOHMC_DRAMADDRW_BANK_ADDR_WIDTH(data) + + IOHMC_DRAMADDRW_BANK_GRP_ADDR_WIDTH(data); + + ddr_conf = match_ddr_conf(DDR_CONFIG(dram_addr_order, bank, col, row)); + + if (ddr_conf) { + mmio_clrsetbits_32( + AGX_MPFE_DDR_MAIN_SCHED_DDRCONF, + AGX_MPFE_DDR_MAIN_SCHED_DDRCONF_SET_MSK, + AGX_MPFE_DDR_MAIN_SCHED_DDRCONF_SET(ddr_conf)); + } else { + ERROR("DDR: Cannot find predefined ddrConf configuration.\n"); + } + + mmio_write_32(AGX_MPFE_HMC_ADP(ADP_DRAMADDRWIDTH), data); + + data = mmio_read_32(AGX_MPFE_IOHMC_DRAMTIMING0); + rd_latency = AGX_MPFE_IOHMC_REG_DRAMTIMING0_CFG_TCL(data); + + data = mmio_read_32(AGX_MPFE_IOHMC_CALTIMING0); + act_to_act = ACT_TO_ACT(data); + t_rcd = ACT_TO_RDWR(data); + act_to_act_bank = ACT_TO_ACT_DIFF_BANK(data); + + data = mmio_read_32(AGX_MPFE_IOHMC_CALTIMING1); + rd_to_wr = RD_TO_WR(data); + bus_rd_to_rd = RD_TO_RD_DIFF_CHIP(data); + bus_rd_to_wr = RD_TO_WR_DIFF_CHIP(data); + + data = mmio_read_32(AGX_MPFE_IOHMC_CALTIMING2); + t_rtp = RD_TO_PCH(data); + + data = mmio_read_32(AGX_MPFE_IOHMC_CALTIMING3); + wr_to_rd = CALTIMING3_WR_TO_RD(data); + bus_wr_to_rd = CALTIMING3_WR_TO_RD_DIFF_CHIP(data); + + data = mmio_read_32(AGX_MPFE_IOHMC_CALTIMING4); + t_rp = PCH_TO_VALID(data); + + data = mmio_read_32(AGX_MPFE_HMC_ADP(HMC_ADP_DDRIOCTRL)); + bw_ratio = ((HMC_ADP_DDRIOCTRL_IO_SIZE(data) == 0) ? 0 : 1); + + data = mmio_read_32(AGX_MPFE_IOHMC_CTRLCFG0); + burst_len = HMC_ADP_DDRIOCTRL_CTRL_BURST_LENGTH(data); + burst_len_ddr_clk = burst_len / 2; + burst_len_sched_clk = ((burst_len/2) / 2); + + data = mmio_read_32(AGX_MPFE_IOHMC_CTRLCFG0); + switch (AGX_MPFE_IOHMC_REG_CTRLCFG0_CFG_MEM_TYPE(data)) { + case 1: + /* DDR4 - 1333MHz */ + /* 20 (19.995) clock cycles = 15ns */ + /* Calculate with rounding */ + tw_rin_clk_cycles = (((tWR_IN_NS * 1333) % 1000) >= 500) ? + ((tWR_IN_NS * 1333) / 1000) + 1 : + ((tWR_IN_NS * 1333) / 1000); + break; + default: + /* Others - 1066MHz or slower */ + /* 16 (15.990) clock cycles = 15ns */ + /* Calculate with rounding */ + tw_rin_clk_cycles = (((tWR_IN_NS * 1066) % 1000) >= 500) ? + ((tWR_IN_NS * 1066) / 1000) + 1 : + ((tWR_IN_NS * 1066) / 1000); + break; + } + + rd_to_miss = t_rtp + t_rp + t_rcd - burst_len_sched_clk; + wr_to_miss = ((rd_latency + burst_len_ddr_clk + 2 + tw_rin_clk_cycles) + / 2) - rd_to_wr + t_rp + t_rcd; + + mmio_write_32(AGX_MPFE_DDR_MAIN_SCHED_DDRTIMING, + bw_ratio << DDRTIMING_BWRATIO_OFST | + wr_to_rd << DDRTIMING_WRTORD_OFST| + rd_to_wr << DDRTIMING_RDTOWR_OFST | + burst_len_sched_clk << DDRTIMING_BURSTLEN_OFST | + wr_to_miss << DDRTIMING_WRTOMISS_OFST | + rd_to_miss << DDRTIMING_RDTOMISS_OFST | + act_to_act << DDRTIMING_ACTTOACT_OFST); + + data = mmio_read_32(AGX_MPFE_HMC_ADP(HMC_ADP_DDRIOCTRL)); + bw_ratio_extended = ((ADP_DDRIOCTRL_IO_SIZE(data) == 0) ? 1 : 0); + + mmio_write_32(AGX_MPFE_DDR_MAIN_SCHED_DDRMODE, + bw_ratio_extended << DDRMODE_BWRATIOEXTENDED_OFST | + auto_precharge << DDRMODE_AUTOPRECHARGE_OFST); + + mmio_write_32(AGX_MPFE_DDR_MAIN_SCHED_READLATENCY, + (rd_latency / 2) + DDR_READ_LATENCY_DELAY); + + data = mmio_read_32(AGX_MPFE_IOHMC_CALTIMING9); + faw = AGX_MPFE_IOHMC_CALTIMING9_ACT_TO_ACT(data); + + faw_bank = 1; // always 1 because we always have 4 bank DDR. + + mmio_write_32(AGX_MPFE_DDR_MAIN_SCHED_ACTIVATE, + faw_bank << AGX_MPFE_DDR_MAIN_SCHED_ACTIVATE_FAWBANK_OFST | + faw << AGX_MPFE_DDR_MAIN_SCHED_ACTIVATE_FAW_OFST | + act_to_act_bank << AGX_MPFE_DDR_MAIN_SCHED_ACTIVATE_RRD_OFST); + + mmio_write_32(AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV, + ((bus_rd_to_rd + << AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTORD_OFST) + & AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTORD_MSK) | + ((bus_rd_to_wr + << AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTOWR_OFST) + & AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTOWR_MSK) | + ((bus_wr_to_rd + << AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSWRTORD_OFST) + & AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSWRTORD_MSK)); + +} + +unsigned long get_physical_dram_size(void) +{ + uint32_t data; + unsigned long ram_addr_width, ram_ext_if_io_width; + + data = mmio_read_32(AGX_MPFE_HMC_ADP_DDRIOCTRL); + switch (AGX_MPFE_HMC_ADP_DDRIOCTRL_IO_SIZE(data)) { + case 0: + ram_ext_if_io_width = 16; + break; + case 1: + ram_ext_if_io_width = 32; + break; + case 2: + ram_ext_if_io_width = 64; + break; + default: + ram_ext_if_io_width = 0; + break; + } + + data = mmio_read_32(AGX_MPFE_IOHMC_REG_DRAMADDRW); + ram_addr_width = IOHMC_DRAMADDRW_CFG_COL_ADDR_WIDTH(data) + + IOHMC_DRAMADDRW_CFG_ROW_ADDR_WIDTH(data) + + IOHMC_DRAMADDRW_CFG_BANK_ADDR_WIDTH(data) + + IOHMC_DRAMADDRW_CFG_BANK_GROUP_ADDR_WIDTH(data) + + IOHMC_DRAMADDRW_CFG_CS_ADDR_WIDTH(data); + + return (1 << ram_addr_width) * (ram_ext_if_io_width / 8); +} + + + +void configure_hmc_adaptor_regs(void) +{ + uint32_t data; + uint32_t dram_io_width; + + /* Configure DDR data rate */ + dram_io_width = AGX_MPFE_IOHMC_NIOSRESERVE0_NIOS_RESERVE0( + mmio_read_32(AGX_MPFE_IOHMC_REG_NIOSRESERVE0_OFST)); + dram_io_width = (dram_io_width & 0xFF) >> 5; + + data = mmio_read_32(AGX_MPFE_IOHMC_CTRLCFG3); + + dram_io_width |= (data & 0x4); + + mmio_write_32(AGX_MPFE_HMC_ADP_DDRIOCTRL, dram_io_width); + + /* Copy dram addr width from IOHMC to HMC ADP */ + data = mmio_read_32(AGX_MPFE_IOHMC_DRAMADDRW); + mmio_write_32(AGX_MPFE_HMC_ADP(ADP_DRAMADDRWIDTH), data); + + /* Enable nonsecure access to DDR */ + data = get_physical_dram_size(); + + if (data < AGX_DDR_SIZE) + data = AGX_DDR_SIZE; + + mmio_write_32(AGX_NOC_FW_DDR_SCR_MPUREGION0ADDR_LIMIT, data - 1); + mmio_write_32(AGX_NOC_FW_DDR_SCR_MPUREGION0ADDR_LIMITEXT, 0x1f); + + mmio_write_32(AGX_NOC_FW_DDR_SCR_NONMPUREGION0ADDR_LIMIT, data - 1); + + mmio_write_32(AGX_SOC_NOC_FW_DDR_SCR_ENABLESET, BIT(0) | BIT(8)); + + /* ECC enablement */ + data = mmio_read_32(AGX_MPFE_IOHMC_REG_CTRLCFG1); + if (data & (1 << AGX_IOHMC_CTRLCFG1_ENABLE_ECC_OFST)) { + mmio_clrsetbits_32(AGX_MPFE_HMC_ADP_ECCCTRL1, + AGX_MPFE_HMC_ADP_ECCCTRL1_AUTOWB_CNT_RST_SET_MSK | + AGX_MPFE_HMC_ADP_ECCCTRL1_CNT_RST_SET_MSK | + AGX_MPFE_HMC_ADP_ECCCTRL1_ECC_EN_SET_MSK, + AGX_MPFE_HMC_ADP_ECCCTRL1_AUTOWB_CNT_RST_SET_MSK | + AGX_MPFE_HMC_ADP_ECCCTRL1_CNT_RST_SET_MSK); + + mmio_clrsetbits_32(AGX_MPFE_HMC_ADP_ECCCTRL2, + AGX_MPFE_HMC_ADP_ECCCTRL2_OVRW_RB_ECC_EN_SET_MSK | + AGX_MPFE_HMC_ADP_ECCCTRL2_RMW_EN_SET_MSK | + AGX_MPFE_HMC_ADP_ECCCTRL2_AUTOWB_EN_SET_MSK, + AGX_MPFE_HMC_ADP_ECCCTRL2_RMW_EN_SET_MSK | + AGX_MPFE_HMC_ADP_ECCCTRL2_AUTOWB_EN_SET_MSK); + + mmio_clrsetbits_32(AGX_MPFE_HMC_ADP_ECCCTRL1, + AGX_MPFE_HMC_ADP_ECCCTRL1_AUTOWB_CNT_RST_SET_MSK | + AGX_MPFE_HMC_ADP_ECCCTRL1_CNT_RST_SET_MSK | + AGX_MPFE_HMC_ADP_ECCCTRL1_ECC_EN_SET_MSK, + AGX_MPFE_HMC_ADP_ECCCTRL1_ECC_EN_SET_MSK); + INFO("Scrubbing ECC\n"); + + /* ECC Scrubbing */ + zeromem(DRAM_BASE, DRAM_SIZE); + } else { + INFO("ECC is disabled.\n"); + } +} diff --git a/plat/intel/soc/agilex/soc/agilex_mmc.c b/plat/intel/soc/agilex/soc/agilex_mmc.c new file mode 100644 index 0000000..e05d92a --- /dev/null +++ b/plat/intel/soc/agilex/soc/agilex_mmc.c @@ -0,0 +1,19 @@ +/* + * Copyright (c) 2020, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#include <lib/mmio.h> + +#include "socfpga_system_manager.h" +#include "agilex_clock_manager.h" + +void agx_mmc_init(void) +{ + mmio_clrbits_32(CLKMGR_PERPLL + CLKMGR_PERPLL_EN, + CLKMGR_PERPLL_EN_SDMMCCLK); + mmio_write_32(SOCFPGA_SYSMGR(SDMMC), + SYSMGR_SDMMC_SMPLSEL(0) | SYSMGR_SDMMC_DRVSEL(3)); + mmio_setbits_32(CLKMGR_PERPLL + CLKMGR_PERPLL_EN, + CLKMGR_PERPLL_EN_SDMMCCLK); +} diff --git a/plat/intel/soc/agilex/soc/agilex_pinmux.c b/plat/intel/soc/agilex/soc/agilex_pinmux.c new file mode 100644 index 0000000..d2a06fb --- /dev/null +++ b/plat/intel/soc/agilex/soc/agilex_pinmux.c @@ -0,0 +1,245 @@ +/* + * Copyright (c) 2019-2022, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <lib/mmio.h> + +#include "agilex_pinmux.h" +#include "agilex_system_manager.h" + +const uint32_t sysmgr_pinmux_array_sel[] = { + 0x00000000, 0x00000001, /* usb */ + 0x00000004, 0x00000001, + 0x00000008, 0x00000001, + 0x0000000c, 0x00000001, + 0x00000010, 0x00000001, + 0x00000014, 0x00000001, + 0x00000018, 0x00000001, + 0x0000001c, 0x00000001, + 0x00000020, 0x00000001, + 0x00000024, 0x00000001, + 0x00000028, 0x00000001, + 0x0000002c, 0x00000001, + 0x00000030, 0x00000000, /* emac0 */ + 0x00000034, 0x00000000, + 0x00000038, 0x00000000, + 0x0000003c, 0x00000000, + 0x00000040, 0x00000000, + 0x00000044, 0x00000000, + 0x00000048, 0x00000000, + 0x0000004c, 0x00000000, + 0x00000050, 0x00000000, + 0x00000054, 0x00000000, + 0x00000058, 0x00000000, + 0x0000005c, 0x00000000, + 0x00000060, 0x00000008, /* gpio1 */ + 0x00000064, 0x00000008, + 0x00000068, 0x00000005, /* uart0 tx */ + 0x0000006c, 0x00000005, /* uart 0 rx */ + 0x00000070, 0x00000008, /* gpio */ + 0x00000074, 0x00000008, + 0x00000078, 0x00000004, /* i2c1 */ + 0x0000007c, 0x00000004, + 0x00000080, 0x00000007, /* jtag */ + 0x00000084, 0x00000007, + 0x00000088, 0x00000007, + 0x0000008c, 0x00000007, + 0x00000090, 0x00000001, /* sdmmc data0 */ + 0x00000094, 0x00000001, + 0x00000098, 0x00000001, + 0x0000009c, 0x00000001, + 0x00000100, 0x00000001, + 0x00000104, 0x00000001, /* sdmmc.data3 */ + 0x00000108, 0x00000008, /* loan */ + 0x0000010c, 0x00000008, /* gpio */ + 0x00000110, 0x00000008, + 0x00000114, 0x00000008, /* gpio1.io21 */ + 0x00000118, 0x00000005, /* mdio0.mdio */ + 0x0000011c, 0x00000005 /* mdio0.mdc */ +}; + +const uint32_t sysmgr_pinmux_array_ctrl[] = { + 0x00000000, 0x00502c38, /* Q1_1 */ + 0x00000004, 0x00102c38, + 0x00000008, 0x00502c38, + 0x0000000c, 0x00502c38, + 0x00000010, 0x00502c38, + 0x00000014, 0x00502c38, + 0x00000018, 0x00502c38, + 0x0000001c, 0x00502c38, + 0x00000020, 0x00502c38, + 0x00000024, 0x00502c38, + 0x00000028, 0x00502c38, + 0x0000002c, 0x00502c38, + 0x00000030, 0x00102c38, /* Q2_1 */ + 0x00000034, 0x00102c38, + 0x00000038, 0x00502c38, + 0x0000003c, 0x00502c38, + 0x00000040, 0x00102c38, + 0x00000044, 0x00102c38, + 0x00000048, 0x00502c38, + 0x0000004c, 0x00502c38, + 0x00000050, 0x00102c38, + 0x00000054, 0x00102c38, + 0x00000058, 0x00502c38, + 0x0000005c, 0x00502c38, + 0x00000060, 0x00502c38, /* Q3_1 */ + 0x00000064, 0x00502c38, + 0x00000068, 0x00102c38, + 0x0000006c, 0x00502c38, + 0x000000d0, 0x00502c38, + 0x000000d4, 0x00502c38, + 0x000000d8, 0x00542c38, + 0x000000dc, 0x00542c38, + 0x000000e0, 0x00502c38, + 0x000000e4, 0x00502c38, + 0x000000e8, 0x00102c38, + 0x000000ec, 0x00502c38, + 0x000000f0, 0x00502c38, /* Q4_1 */ + 0x000000f4, 0x00502c38, + 0x000000f8, 0x00102c38, + 0x000000fc, 0x00502c38, + 0x00000100, 0x00502c38, + 0x00000104, 0x00502c38, + 0x00000108, 0x00102c38, + 0x0000010c, 0x00502c38, + 0x00000110, 0x00502c38, + 0x00000114, 0x00502c38, + 0x00000118, 0x00542c38, + 0x0000011c, 0x00102c38 +}; + +const uint32_t sysmgr_pinmux_array_fpga[] = { + 0x00000000, 0x00000000, + 0x00000004, 0x00000000, + 0x00000008, 0x00000000, + 0x0000000c, 0x00000000, + 0x00000010, 0x00000000, + 0x00000014, 0x00000000, + 0x00000018, 0x00000000, + 0x0000001c, 0x00000000, + 0x00000020, 0x00000000, + 0x00000028, 0x00000000, + 0x0000002c, 0x00000000, + 0x00000030, 0x00000000, + 0x00000034, 0x00000000, + 0x00000038, 0x00000000, + 0x0000003c, 0x00000000, + 0x00000040, 0x00000000, + 0x00000044, 0x00000000, + 0x00000048, 0x00000000, + 0x00000050, 0x00000000, + 0x00000054, 0x00000000, + 0x00000058, 0x0000002a +}; + +const uint32_t sysmgr_pinmux_array_iodelay[] = { + 0x00000000, 0x00000000, + 0x00000004, 0x00000000, + 0x00000008, 0x00000000, + 0x0000000c, 0x00000000, + 0x00000010, 0x00000000, + 0x00000014, 0x00000000, + 0x00000018, 0x00000000, + 0x0000001c, 0x00000000, + 0x00000020, 0x00000000, + 0x00000024, 0x00000000, + 0x00000028, 0x00000000, + 0x0000002c, 0x00000000, + 0x00000030, 0x00000000, + 0x00000034, 0x00000000, + 0x00000038, 0x00000000, + 0x0000003c, 0x00000000, + 0x00000040, 0x00000000, + 0x00000044, 0x00000000, + 0x00000048, 0x00000000, + 0x0000004c, 0x00000000, + 0x00000050, 0x00000000, + 0x00000054, 0x00000000, + 0x00000058, 0x00000000, + 0x0000005c, 0x00000000, + 0x00000060, 0x00000000, + 0x00000064, 0x00000000, + 0x00000068, 0x00000000, + 0x0000006c, 0x00000000, + 0x00000070, 0x00000000, + 0x00000074, 0x00000000, + 0x00000078, 0x00000000, + 0x0000007c, 0x00000000, + 0x00000080, 0x00000000, + 0x00000084, 0x00000000, + 0x00000088, 0x00000000, + 0x0000008c, 0x00000000, + 0x00000090, 0x00000000, + 0x00000094, 0x00000000, + 0x00000098, 0x00000000, + 0x0000009c, 0x00000000, + 0x00000100, 0x00000000, + 0x00000104, 0x00000000, + 0x00000108, 0x00000000, + 0x0000010c, 0x00000000, + 0x00000110, 0x00000000, + 0x00000114, 0x00000000, + 0x00000118, 0x00000000, + 0x0000011c, 0x00000000 +}; + +void config_fpgaintf_mod(void) +{ + uint32_t val; + + val = 0; + if (mmio_read_32(AGX_PINMUX_NAND_USEFPGA) & 1) + val |= AGX_PINMUX_NAND_USEFPGA_VAL; + if (mmio_read_32(AGX_PINMUX_SDMMC_USEFPGA) & 1) + val |= AGX_PINMUX_SDMMC_USEFPGA_VAL; + if (mmio_read_32(AGX_PINMUX_SPIM0_USEFPGA) & 1) + val |= AGX_PINMUX_SPIM0_USEFPGA_VAL; + if (mmio_read_32(AGX_PINMUX_SPIM1_USEFPGA) & 1) + val |= AGX_PINMUX_SPIM1_USEFPGA_VAL; + mmio_write_32(SOCFPGA_SYSMGR(FPGAINTF_EN_2), val); + + val = 0; + if (mmio_read_32(AGX_PINMUX_EMAC0_USEFPGA) & 1) + val |= AGX_PINMUX_EMAC0_USEFPGA_VAL; + if (mmio_read_32(AGX_PINMUX_EMAC1_USEFPGA) & 1) + val |= AGX_PINMUX_EMAC1_USEFPGA_VAL; + if (mmio_read_32(AGX_PINMUX_EMAC2_USEFPGA) & 1) + val |= AGX_PINMUX_EMAC2_USEFPGA_VAL; + mmio_write_32(SOCFPGA_SYSMGR(FPGAINTF_EN_3), val); +} + + +void config_pinmux(handoff *hoff_ptr) +{ + unsigned int i; + + for (i = 0; i < 96; i += 2) { + mmio_write_32(AGX_PINMUX_PIN0SEL + + hoff_ptr->pinmux_sel_array[i], + hoff_ptr->pinmux_sel_array[i+1]); + } + + for (i = 0; i < 96; i += 2) { + mmio_write_32(AGX_PINMUX_IO0CTRL + + hoff_ptr->pinmux_io_array[i], + hoff_ptr->pinmux_io_array[i+1]); + } + + for (i = 0; i < 40; i += 2) { + mmio_write_32(AGX_PINMUX_EMAC0_USEFPGA + + hoff_ptr->pinmux_fpga_array[i], + hoff_ptr->pinmux_fpga_array[i+1]); + } + + for (i = 0; i < 96; i += 2) { + mmio_write_32(AGX_PINMUX_IO0_DELAY + + hoff_ptr->pinmux_iodelay_array[i], + hoff_ptr->pinmux_iodelay_array[i+1]); + } + + config_fpgaintf_mod(); +} + diff --git a/plat/intel/soc/agilex5/bl2_plat_setup.c b/plat/intel/soc/agilex5/bl2_plat_setup.c new file mode 100644 index 0000000..a2fafd2 --- /dev/null +++ b/plat/intel/soc/agilex5/bl2_plat_setup.c @@ -0,0 +1,174 @@ +/* + * Copyright (c) 2019-2021, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2019-2023, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> +#include <arch.h> +#include <arch_helpers.h> +#include <common/bl_common.h> +#include <common/debug.h> +#include <common/desc_image_load.h> +#include <drivers/cadence/cdns_sdmmc.h> +#include <drivers/generic_delay_timer.h> +#include <drivers/synopsys/dw_mmc.h> +#include <drivers/ti/uart/uart_16550.h> +#include <lib/mmio.h> +#include <lib/xlat_tables/xlat_tables_v2.h> + +#include "agilex5_clock_manager.h" +#include "agilex5_memory_controller.h" +#include "agilex5_mmc.h" +#include "agilex5_pinmux.h" +#include "agilex5_system_manager.h" +#include "ccu/ncore_ccu.h" +#include "combophy/combophy.h" +#include "nand/nand.h" +#include "qspi/cadence_qspi.h" +#include "sdmmc/sdmmc.h" +#include "socfpga_emac.h" +#include "socfpga_f2sdram_manager.h" +#include "socfpga_handoff.h" +#include "socfpga_mailbox.h" +#include "socfpga_private.h" +#include "socfpga_reset_manager.h" +#include "wdt/watchdog.h" + + +/* Declare mmc_info */ +static struct mmc_device_info mmc_info; + +/* Declare cadence idmac descriptor */ +extern struct cdns_idmac_desc cdns_desc[8] __aligned(32); + +const mmap_region_t agilex_plat_mmap[] = { + MAP_REGION_FLAT(DRAM_BASE, DRAM_SIZE, + MT_MEMORY | MT_RW | MT_NS), + MAP_REGION_FLAT(PSS_BASE, PSS_SIZE, + MT_DEVICE | MT_RW | MT_NS), + MAP_REGION_FLAT(MPFE_BASE, MPFE_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(OCRAM_BASE, OCRAM_SIZE, + MT_NON_CACHEABLE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(CCU_BASE, CCU_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(MEM64_BASE, MEM64_SIZE, + MT_DEVICE | MT_RW | MT_NS), + MAP_REGION_FLAT(GIC_BASE, GIC_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + {0}, +}; + +boot_source_type boot_source = BOOT_SOURCE; + +void bl2_el3_early_platform_setup(u_register_t x0, u_register_t x1, + u_register_t x2, u_register_t x4) +{ + static console_t console; + + handoff reverse_handoff_ptr = { 0 }; + + generic_delay_timer_init(); + config_clkmgr_handoff(&reverse_handoff_ptr); + mailbox_init(); + enable_nonsecure_access(); + + deassert_peripheral_reset(); + if (combo_phy_init(&reverse_handoff_ptr) != 0) { + ERROR("Combo Phy initialization failed\n"); + } + + console_16550_register(PLAT_INTEL_UART_BASE, PLAT_UART_CLOCK, + PLAT_BAUDRATE, &console); + + /* Store magic number */ + mmio_write_32(L2_RESET_DONE_REG, PLAT_L2_RESET_REQ); +} + +void bl2_el3_plat_arch_setup(void) +{ + handoff reverse_handoff_ptr; + + struct cdns_sdmmc_params params = EMMC_INIT_PARAMS((uintptr_t) &cdns_desc, get_mmc_clk()); + + mmc_info.mmc_dev_type = MMC_DEVICE_TYPE; + mmc_info.ocr_voltage = OCR_3_3_3_4 | OCR_3_2_3_3; + + /* Request ownership and direct access to QSPI */ + mailbox_hps_qspi_enable(); + + switch (boot_source) { + case BOOT_SOURCE_SDMMC: + NOTICE("SDMMC boot\n"); + sdmmc_init(&reverse_handoff_ptr, ¶ms, &mmc_info); + socfpga_io_setup(boot_source); + break; + + case BOOT_SOURCE_QSPI: + NOTICE("QSPI boot\n"); + cad_qspi_init(0, QSPI_CONFIG_CPHA, QSPI_CONFIG_CPOL, + QSPI_CONFIG_CSDA, QSPI_CONFIG_CSDADS, + QSPI_CONFIG_CSEOT, QSPI_CONFIG_CSSOT, 0); + socfpga_io_setup(boot_source); + break; + + case BOOT_SOURCE_NAND: + NOTICE("NAND boot\n"); + nand_init(&reverse_handoff_ptr); + socfpga_io_setup(boot_source); + break; + + default: + ERROR("Unsupported boot source\n"); + panic(); + break; + } +} + +uint32_t get_spsr_for_bl33_entry(void) +{ + unsigned long el_status; + unsigned int mode; + uint32_t spsr; + + /* Figure out what mode we enter the non-secure world in */ + el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT; + el_status &= ID_AA64PFR0_ELX_MASK; + + mode = (el_status) ? MODE_EL2 : MODE_EL1; + + /* + * TODO: Consider the possibility of specifying the SPSR in + * the FIP ToC and allowing the platform to have a say as + * well. + */ + spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS); + return spsr; +} + +int bl2_plat_handle_post_image_load(unsigned int image_id) +{ + bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id); + + assert(bl_mem_params); + + switch (image_id) { + case BL33_IMAGE_ID: + bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr(); + bl_mem_params->ep_info.spsr = get_spsr_for_bl33_entry(); + break; + default: + break; + } + + return 0; +} + +/******************************************************************************* + * Perform any BL3-1 platform setup code + ******************************************************************************/ +void bl2_platform_setup(void) +{ +} diff --git a/plat/intel/soc/agilex5/bl31_plat_setup.c b/plat/intel/soc/agilex5/bl31_plat_setup.c new file mode 100644 index 0000000..5ae4bf7 --- /dev/null +++ b/plat/intel/soc/agilex5/bl31_plat_setup.c @@ -0,0 +1,284 @@ +/* + * Copyright (c) 2019-2020, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2019-2023, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> +#include <arch.h> +#include <arch_helpers.h> +#include <common/bl_common.h> +#include <drivers/arm/gic_common.h> +#include <drivers/arm/gicv3.h> +#include <drivers/ti/uart/uart_16550.h> +#include <lib/mmio.h> +#include <lib/xlat_tables/xlat_mmu_helpers.h> +#include <lib/xlat_tables/xlat_tables_v2.h> +#include <plat/common/platform.h> + +#include "agilex5_power_manager.h" +#include "ccu/ncore_ccu.h" +#include "socfpga_mailbox.h" +#include "socfpga_private.h" +#include "socfpga_reset_manager.h" + +/* Get non-secure SPSR for BL33. Zephyr and Linux */ +uint32_t arm_get_spsr_for_bl33_entry(void); + +static entry_point_info_t bl32_image_ep_info; +static entry_point_info_t bl33_image_ep_info; + +/* The GICv3 driver only needs to be initialized in EL3 */ +static uintptr_t rdistif_base_addrs[PLATFORM_CORE_COUNT]; + +#define SMMU_SDMMC + +entry_point_info_t *bl31_plat_get_next_image_ep_info(uint32_t type) +{ + entry_point_info_t *next_image_info; + + next_image_info = (type == NON_SECURE) ? + &bl33_image_ep_info : &bl32_image_ep_info; + + /* None of the images on this platform can have 0x0 as the entrypoint */ + if (next_image_info->pc) + return next_image_info; + else + return NULL; +} + +void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1, + u_register_t arg2, u_register_t arg3) +{ + static console_t console; + + mmio_write_64(PLAT_SEC_ENTRY, PLAT_SEC_WARM_ENTRY); + + console_16550_register(PLAT_INTEL_UART_BASE, PLAT_UART_CLOCK, + PLAT_BAUDRATE, &console); + + init_ncore_ccu(); + setup_smmu_stream_id(); + + /* + * Check params passed from BL31 should not be NULL, + */ + void *from_bl2 = (void *) arg0; + +#if RESET_TO_BL31 + /* There are no parameters from BL2 if BL31 is a reset vector */ + assert(from_bl2 == NULL); + void *plat_params_from_bl2 = (void *) arg3; + + assert(plat_params_from_bl2 == NULL); + + /* Populate entry point information for BL33 */ + SET_PARAM_HEAD(&bl33_image_ep_info, + PARAM_EP, + VERSION_1, + 0); + +# if ARM_LINUX_KERNEL_AS_BL33 + /* + * According to the file ``Documentation/arm64/booting.txt`` of the + * Linux kernel tree, Linux expects the physical address of the device + * tree blob (DTB) in x0, while x1-x3 are reserved for future use and + * must be 0. + */ + bl33_image_ep_info.args.arg0 = (u_register_t)ARM_PRELOADED_DTB_BASE; + bl33_image_ep_info.args.arg1 = 0U; + bl33_image_ep_info.args.arg2 = 0U; + bl33_image_ep_info.args.arg3 = 0U; +# endif + +#else /* RESET_TO_BL31 */ + bl_params_t *params_from_bl2 = (bl_params_t *)from_bl2; + + assert(params_from_bl2 != NULL); + + /* + * Copy BL32 (if populated by BL31) and BL33 entry point information. + * They are stored in Secure RAM, in BL31's address space. + */ + + if (params_from_bl2->h.type == PARAM_BL_PARAMS && + params_from_bl2->h.version >= VERSION_2) { + + bl_params_node_t *bl_params = params_from_bl2->head; + + while (bl_params) { + if (bl_params->image_id == BL33_IMAGE_ID) { + bl33_image_ep_info = *bl_params->ep_info; + } + bl_params = bl_params->next_params_info; + } + } else { + struct socfpga_bl31_params *arg_from_bl2 = + (struct socfpga_bl31_params *) from_bl2; + + assert(arg_from_bl2->h.type == PARAM_BL31); + assert(arg_from_bl2->h.version >= VERSION_1); + + bl32_image_ep_info = *arg_from_bl2->bl32_ep_info; + bl33_image_ep_info = *arg_from_bl2->bl33_ep_info; + } + + bl33_image_ep_info.args.arg0 = (u_register_t)ARM_PRELOADED_DTB_BASE; + bl33_image_ep_info.args.arg1 = 0U; + bl33_image_ep_info.args.arg2 = 0U; + bl33_image_ep_info.args.arg3 = 0U; +#endif + + /* + * Tell BL31 where the non-trusted software image + * is located and the entry state information + */ + bl33_image_ep_info.pc = plat_get_ns_image_entrypoint(); + bl33_image_ep_info.spsr = arm_get_spsr_for_bl33_entry(); + + SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE); +} + +static const interrupt_prop_t agx5_interrupt_props[] = { + PLAT_INTEL_SOCFPGA_G1S_IRQ_PROPS(INTR_GROUP1S), + PLAT_INTEL_SOCFPGA_G0_IRQ_PROPS(INTR_GROUP0) +}; + +static const gicv3_driver_data_t plat_gicv3_gic_data = { + .gicd_base = PLAT_INTEL_SOCFPGA_GICD_BASE, + .gicr_base = PLAT_INTEL_SOCFPGA_GICR_BASE, + .interrupt_props = agx5_interrupt_props, + .interrupt_props_num = ARRAY_SIZE(agx5_interrupt_props), + .rdistif_num = PLATFORM_CORE_COUNT, + .rdistif_base_addrs = rdistif_base_addrs, +}; + +/******************************************************************************* + * Perform any BL3-1 platform setup code + ******************************************************************************/ +void bl31_platform_setup(void) +{ + socfpga_delay_timer_init(); + + /* Initialize the gic cpu and distributor interfaces */ + gicv3_driver_init(&plat_gicv3_gic_data); + gicv3_distif_init(); + gicv3_rdistif_init(plat_my_core_pos()); + gicv3_cpuif_enable(plat_my_core_pos()); + mailbox_hps_stage_notify(HPS_EXECUTION_STATE_SSBL); +#if !defined(SIMICS_RUN) + ncore_enable_ocram_firewall(); +#endif + +} + +const mmap_region_t plat_agilex_mmap[] = { + MAP_REGION_FLAT(DRAM_BASE, DRAM_SIZE, MT_MEMORY | MT_RW | MT_NS), + MAP_REGION_FLAT(PSS_BASE, PSS_SIZE, MT_DEVICE | MT_RW | MT_NS), + MAP_REGION_FLAT(MPFE_BASE, MPFE_SIZE, MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(OCRAM_BASE, OCRAM_SIZE, MT_NON_CACHEABLE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(CCU_BASE, CCU_SIZE, MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(MEM64_BASE, MEM64_SIZE, MT_DEVICE | MT_RW | MT_NS), + MAP_REGION_FLAT(GIC_BASE, GIC_SIZE, MT_DEVICE | MT_RW | MT_SECURE), + {0} +}; + +/******************************************************************************* + * Perform the very early platform specific architectural setup here. At the + * moment this is only intializes the mmu in a quick and dirty way. + ******************************************************************************/ +void bl31_plat_arch_setup(void) +{ + uint32_t boot_core = 0x00; + uint32_t cpuid = 0x00; + + cpuid = read_mpidr(); + boot_core = (mmio_read_32(AGX5_PWRMGR(MPU_BOOTCONFIG)) & 0xC00); + NOTICE("BL31: Boot Core = %x\n", boot_core); + NOTICE("BL31: CPU ID = %x\n", cpuid); + +} + +/* Get non-secure image entrypoint for BL33. Zephyr and Linux */ +uintptr_t plat_get_ns_image_entrypoint(void) +{ +#ifdef PRELOADED_BL33_BASE + return PRELOADED_BL33_BASE; +#else + return PLAT_NS_IMAGE_OFFSET; +#endif +} + +/* Get non-secure SPSR for BL33. Zephyr and Linux */ +uint32_t arm_get_spsr_for_bl33_entry(void) +{ + unsigned int mode; + uint32_t spsr; + + /* Figure out what mode we enter the non-secure world in */ + mode = (el_implemented(2) != EL_IMPL_NONE) ? MODE_EL2 : MODE_EL1; + + /* + * TODO: Consider the possibility of specifying the SPSR in + * the FIP ToC and allowing the platform to have a say as + * well. + */ + spsr = SPSR_64((uint64_t)mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS); + return spsr; +} + +/* SMP: Secondary cores BL31 setup reset vector */ +void bl31_plat_set_secondary_cpu_entrypoint(unsigned int cpu_id) +{ + unsigned int pch_cpu = 0x00; + unsigned int pchctlr_old = 0x00; + unsigned int pchctlr_new = 0x00; + uint32_t boot_core = 0x00; + + boot_core = (mmio_read_32(AGX5_PWRMGR(MPU_BOOTCONFIG)) & 0xC00); + /* Update the p-channel based on cpu id */ + pch_cpu = 1 << cpu_id; + + if (boot_core == 0x00) { + /* Update reset vector to 0x00 */ + mmio_write_64(RSTMGR_CPUxRESETBASELOW_CPU2, +(uint64_t) plat_secondary_cpus_bl31_entry >> 2); + } else { + /* Update reset vector to 0x00 */ + mmio_write_64(RSTMGR_CPUxRESETBASELOW_CPU0, +(uint64_t) plat_secondary_cpus_bl31_entry >> 2); + } + + /* Update reset vector to 0x00 */ + mmio_write_64(RSTMGR_CPUxRESETBASELOW_CPU1, (uint64_t) plat_secondary_cpus_bl31_entry >> 2); + mmio_write_64(RSTMGR_CPUxRESETBASELOW_CPU3, (uint64_t) plat_secondary_cpus_bl31_entry >> 2); + + /* On all cores - temporary */ + pchctlr_old = mmio_read_32(AGX5_PWRMGR(MPU_PCHCTLR)); + pchctlr_new = pchctlr_old | (pch_cpu<<1); + mmio_write_32(AGX5_PWRMGR(MPU_PCHCTLR), pchctlr_new); + + /* We will only release the target secondary CPUs */ + /* Bit mask for each CPU BIT0-3 */ + mmio_write_32(RSTMGR_CPUSTRELEASE_CPUx, pch_cpu); +} + +void bl31_plat_set_secondary_cpu_off(void) +{ + unsigned int pch_cpu = 0x00; + unsigned int pch_cpu_off = 0x00; + unsigned int cpu_id = plat_my_core_pos(); + + pch_cpu_off = 1 << cpu_id; + + pch_cpu = mmio_read_32(AGX5_PWRMGR(MPU_PCHCTLR)); + pch_cpu = pch_cpu & ~(pch_cpu_off << 1); + + mmio_write_32(AGX5_PWRMGR(MPU_PCHCTLR), pch_cpu); +} + +void bl31_plat_enable_mmu(uint32_t flags) +{ + /* TODO: Enable mmu when needed */ +} diff --git a/plat/intel/soc/agilex5/include/agilex5_clock_manager.h b/plat/intel/soc/agilex5/include/agilex5_clock_manager.h new file mode 100644 index 0000000..566a80d --- /dev/null +++ b/plat/intel/soc/agilex5/include/agilex5_clock_manager.h @@ -0,0 +1,150 @@ +/* + * Copyright (c) 2019-2022, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef CLOCKMANAGER_H +#define CLOCKMANAGER_H + +#include "socfpga_handoff.h" + +/* Clock Manager Registers */ +#define CLKMGR_OFFSET 0x10d10000 + +#define CLKMGR_CTRL 0x0 +#define CLKMGR_STAT 0x4 +#define CLKMGR_TESTIOCTROL 0x8 +#define CLKMGR_INTRGEN 0xc +#define CLKMGR_INTRMSK 0x10 +#define CLKMGR_INTRCLR 0x14 +#define CLKMGR_INTRSTS 0x18 +#define CLKMGR_INTRSTK 0x1c +#define CLKMGR_INTRRAW 0x20 + +/* Main PLL Group */ +#define CLKMGR_MAINPLL 0x10d10024 +#define CLKMGR_MAINPLL_EN 0x0 +#define CLKMGR_MAINPLL_ENS 0x4 +#define CLKMGR_MAINPLL_BYPASS 0xc +#define CLKMGR_MAINPLL_BYPASSS 0x10 +#define CLKMGR_MAINPLL_BYPASSR 0x14 +#define CLKMGR_MAINPLL_NOCCLK 0x1c +#define CLKMGR_MAINPLL_NOCDIV 0x20 +#define CLKMGR_MAINPLL_PLLGLOB 0x24 +#define CLKMGR_MAINPLL_FDBCK 0x28 +#define CLKMGR_MAINPLL_MEM 0x2c +#define CLKMGR_MAINPLL_MEMSTAT 0x30 +#define CLKMGR_MAINPLL_VCOCALIB 0x34 +#define CLKMGR_MAINPLL_PLLC0 0x38 +#define CLKMGR_MAINPLL_PLLC1 0x3c +#define CLKMGR_MAINPLL_PLLC2 0x40 +#define CLKMGR_MAINPLL_PLLC3 0x44 +#define CLKMGR_MAINPLL_PLLM 0x48 +#define CLKMGR_MAINPLL_FHOP 0x4c +#define CLKMGR_MAINPLL_SSC 0x50 +#define CLKMGR_MAINPLL_LOSTLOCK 0x54 + +/* Peripheral PLL Group */ +#define CLKMGR_PERPLL 0x10d1007c +#define CLKMGR_PERPLL_EN 0x0 +#define CLKMGR_PERPLL_ENS 0x4 +#define CLKMGR_PERPLL_BYPASS 0xc +#define CLKMGR_PERPLL_EMACCTL 0x18 +#define CLKMGR_PERPLL_GPIODIV 0x1c +#define CLKMGR_PERPLL_PLLGLOB 0x20 +#define CLKMGR_PERPLL_FDBCK 0x24 +#define CLKMGR_PERPLL_MEM 0x28 +#define CLKMGR_PERPLL_MEMSTAT 0x2c +#define CLKMGR_PERPLL_PLLC0 0x30 +#define CLKMGR_PERPLL_PLLC1 0x34 +#define CLKMGR_PERPLL_VCOCALIB 0x38 +#define CLKMGR_PERPLL_PLLC2 0x3c +#define CLKMGR_PERPLL_PLLC3 0x40 +#define CLKMGR_PERPLL_PLLM 0x44 +#define CLKMGR_PERPLL_LOSTLOCK 0x50 + +/* Altera Group */ +#define CLKMGR_ALTERA 0x10d100d0 +#define CLKMGR_ALTERA_JTAG 0x0 +#define CLKMGR_ALTERA_EMACACTR 0x4 +#define CLKMGR_ALTERA_EMACBCTR 0x8 +#define CLKMGR_ALTERA_EMACPTPCTR 0xc +#define CLKMGR_ALTERA_GPIODBCTR 0x10 +#define CLKMGR_ALTERA_S2FUSER0CTR 0x18 +#define CLKMGR_ALTERA_S2FUSER1CTR 0x1c +#define CLKMGR_ALTERA_PSIREFCTR 0x20 +#define CLKMGR_ALTERA_EXTCNTRST 0x24 +#define CLKMGR_ALTERA_USB31CTR 0x28 +#define CLKMGR_ALTERA_DSUCTR 0x2c +#define CLKMGR_ALTERA_CORE01CTR 0x30 +#define CLKMGR_ALTERA_CORE23CTR 0x34 +#define CLKMGR_ALTERA_CORE2CTR 0x38 +#define CLKMGR_ALTERA_CORE3CTR 0x3c + +/* Membus */ +#define CLKMGR_MEM_REQ BIT(24) +#define CLKMGR_MEM_WR BIT(25) +#define CLKMGR_MEM_ERR BIT(26) +#define CLKMGR_MEM_WDAT_OFFSET 16 +#define CLKMGR_MEM_ADDR 0x4027 +#define CLKMGR_MEM_WDAT 0x80 + +/* Clock Manager Macros */ +#define CLKMGR_CTRL_BOOTMODE_SET_MSK 0x00000001 +#define CLKMGR_STAT_BUSY_E_BUSY 0x1 +#define CLKMGR_STAT_BUSY(x) (((x) & 0x00000001) >> 0) +#define CLKMGR_STAT_MAINPLLLOCKED(x) (((x) & 0x00000100) >> 8) +#define CLKMGR_STAT_PERPLLLOCKED(x) (((x) & 0x00010000) >> 16) +#define CLKMGR_INTRCLR_MAINLOCKLOST_SET_MSK 0x00000004 +#define CLKMGR_INTRCLR_PERLOCKLOST_SET_MSK 0x00000008 +#define CLKMGR_INTOSC_HZ 460000000 + +/* Main PLL Macros */ +#define CLKMGR_MAINPLL_EN_RESET 0x0000005e +#define CLKMGR_MAINPLL_ENS_RESET 0x0000005e + +/* Peripheral PLL Macros */ +#define CLKMGR_PERPLL_EN_RESET 0x040007FF +#define CLKMGR_PERPLL_ENS_RESET 0x040007FF + +#define CLKMGR_PERPLL_EN_SDMMCCLK BIT(5) +#define CLKMGR_PERPLL_GPIODIV_GPIODBCLK_SET(x) (((x) << 0) & 0x0000ffff) + +/* Altera Macros */ +#define CLKMGR_ALTERA_EXTCNTRST_RESET 0xff + +/* Shared Macros */ +#define CLKMGR_PSRC(x) (((x) & 0x00030000) >> 16) +#define CLKMGR_PSRC_MAIN 0 +#define CLKMGR_PSRC_PER 1 + +#define CLKMGR_PLLGLOB_PSRC_EOSC1 0x0 +#define CLKMGR_PLLGLOB_PSRC_INTOSC 0x1 +#define CLKMGR_PLLGLOB_PSRC_F2S 0x2 + +#define CLKMGR_PLLM_MDIV(x) ((x) & 0x000003ff) +#define CLKMGR_PLLGLOB_PD_SET_MSK 0x00000001 +#define CLKMGR_PLLGLOB_RST_SET_MSK 0x00000002 + +#define CLKMGR_PLLGLOB_REFCLKDIV(x) (((x) & 0x00003f00) >> 8) +#define CLKMGR_PLLGLOB_AREFCLKDIV(x) (((x) & 0x00000f00) >> 8) +#define CLKMGR_PLLGLOB_DREFCLKDIV(x) (((x) & 0x00003000) >> 12) + +#define CLKMGR_VCOCALIB_HSCNT_SET(x) (((x) << 0) & 0x000003ff) +#define CLKMGR_VCOCALIB_MSCNT_SET(x) (((x) << 16) & 0x00ff0000) + +#define CLKMGR_CLR_LOSTLOCK_BYPASS 0x20000000 + +typedef struct { + uint32_t clk_freq_of_eosc1; + uint32_t clk_freq_of_f2h_free; + uint32_t clk_freq_of_cb_intosc_ls; +} CLOCK_SOURCE_CONFIG; + +void config_clkmgr_handoff(handoff *hoff_ptr); +uint32_t get_wdt_clk(void); +uint32_t get_uart_clk(void); +uint32_t get_mmc_clk(void); + +#endif diff --git a/plat/intel/soc/agilex5/include/agilex5_memory_controller.h b/plat/intel/soc/agilex5/include/agilex5_memory_controller.h new file mode 100644 index 0000000..1708488 --- /dev/null +++ b/plat/intel/soc/agilex5/include/agilex5_memory_controller.h @@ -0,0 +1,175 @@ +/* + * Copyright (c) 2019-2022, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef AGX_MEMORYCONTROLLER_H +#define AGX_MEMORYCONTROLLER_H + +#include "socfpga_plat_def.h" + +#define AGX_MPFE_IOHMC_REG_DRAMADDRW 0xf80100a8 +#define AGX_MPFE_IOHMC_CTRLCFG0 0xf8010028 +#define AGX_MPFE_IOHMC_CTRLCFG1 0xf801002c +#define AGX_MPFE_IOHMC_CTRLCFG2 0xf8010030 +#define AGX_MPFE_IOHMC_CTRLCFG3 0xf8010034 +#define AGX_MPFE_IOHMC_DRAMADDRW 0xf80100a8 +#define AGX_MPFE_IOHMC_DRAMTIMING0 0xf8010050 +#define AGX_MPFE_IOHMC_CALTIMING0 0xf801007c +#define AGX_MPFE_IOHMC_CALTIMING1 0xf8010080 +#define AGX_MPFE_IOHMC_CALTIMING2 0xf8010084 +#define AGX_MPFE_IOHMC_CALTIMING3 0xf8010088 +#define AGX_MPFE_IOHMC_CALTIMING4 0xf801008c +#define AGX_MPFE_IOHMC_CALTIMING9 0xf80100a0 +#define AGX_MPFE_IOHMC_CALTIMING9_ACT_TO_ACT(x) (((x) & 0x000000ff) >> 0) +#define AGX_MPFE_IOHMC_CTRLCFG1_CFG_ADDR_ORDER(value) (((value) & 0x00000060) >> 5) + +#define AGX_MPFE_HMC_ADP_ECCCTRL1 0xf8011100 +#define AGX_MPFE_HMC_ADP_ECCCTRL2 0xf8011104 +#define AGX_MPFE_HMC_ADP_RSTHANDSHAKESTAT 0xf8011218 +#define AGX_MPFE_HMC_ADP_RSTHANDSHAKESTAT_SEQ2CORE 0x000000ff +#define AGX_MPFE_HMC_ADP_RSTHANDSHAKECTRL 0xf8011214 + + +#define AGX_MPFE_IOHMC_REG_CTRLCFG1 0xf801002c + +#define AGX_MPFE_IOHMC_REG_NIOSRESERVE0_OFST 0xf8010110 + +#define IOHMC_DRAMADDRW_COL_ADDR_WIDTH(x) (((x) & 0x0000001f) >> 0) +#define IOHMC_DRAMADDRW_ROW_ADDR_WIDTH(x) (((x) & 0x000003e0) >> 5) +#define IOHMC_DRAMADDRW_CS_ADDR_WIDTH(x) (((x) & 0x00070000) >> 16) +#define IOHMC_DRAMADDRW_BANK_GRP_ADDR_WIDTH(x) (((x) & 0x0000c000) >> 14) +#define IOHMC_DRAMADDRW_BANK_ADDR_WIDTH(x) (((x) & 0x00003c00) >> 10) + +#define AGX_MPFE_DDR(x) (0xf8000000 + x) +#define AGX_MPFE_HMC_ADP_DDRCALSTAT 0xf801100c +#define AGX_MPFE_DDR_MAIN_SCHED 0xf8000400 +#define AGX_MPFE_DDR_MAIN_SCHED_DDRCONF 0xf8000408 +#define AGX_MPFE_DDR_MAIN_SCHED_DDRTIMING 0xf800040c +#define AGX_MPFE_DDR_MAIN_SCHED_DDRCONF_SET_MSK 0x0000001f +#define AGX_MPFE_DDR_MAIN_SCHED_DDRMODE 0xf8000410 +#define AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV 0xf800043c +#define AGX_MPFE_DDR_MAIN_SCHED_READLATENCY 0xf8000414 +#define AGX_MPFE_DDR_MAIN_SCHED_ACTIVATE 0xf8000438 +#define AGX_MPFE_DDR_MAIN_SCHED_ACTIVATE_FAWBANK_OFST 10 +#define AGX_MPFE_DDR_MAIN_SCHED_ACTIVATE_FAW_OFST 4 +#define AGX_MPFE_DDR_MAIN_SCHED_ACTIVATE_RRD_OFST 0 +#define AGX_MPFE_DDR_MAIN_SCHED_DDRCONF_SET(x) (((x) << 0) & 0x0000001f) +#define AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTORD_OFST 0 +#define AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTORD_MSK (BIT(0) | BIT(1)) +#define AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTOWR_OFST 2 +#define AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTOWR_MSK (BIT(2) | BIT(3)) +#define AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSWRTORD_OFST 4 +#define AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSWRTORD_MSK (BIT(4) | BIT(5)) + +#define AGX_MPFE_HMC_ADP(x) (0xf8011000 + (x)) +#define AGX_MPFE_HMC_ADP_HPSINTFCSEL 0xf8011210 +#define AGX_MPFE_HMC_ADP_DDRIOCTRL 0xf8011008 +#define HMC_ADP_DDRIOCTRL 0x8 +#define HMC_ADP_DDRIOCTRL_IO_SIZE(x) (((x) & 0x00000003) >> 0) +#define HMC_ADP_DDRIOCTRL_CTRL_BURST_LENGTH(x) (((x) & 0x00003e00) >> 9) +#define ADP_DRAMADDRWIDTH 0xe0 + +#define ACT_TO_ACT_DIFF_BANK(value) (((value) & 0x00fc0000) >> 18) +#define ACT_TO_ACT(value) (((value) & 0x0003f000) >> 12) +#define ACT_TO_RDWR(value) (((value) & 0x0000003f) >> 0) +#define ACT_TO_ACT(value) (((value) & 0x0003f000) >> 12) + +/* timing 2 */ +#define RD_TO_RD_DIFF_CHIP(value) (((value) & 0x00000fc0) >> 6) +#define RD_TO_WR_DIFF_CHIP(value) (((value) & 0x3f000000) >> 24) +#define RD_TO_WR(value) (((value) & 0x00fc0000) >> 18) +#define RD_TO_PCH(value) (((value) & 0x00000fc0) >> 6) + +/* timing 3 */ +#define CALTIMING3_WR_TO_RD_DIFF_CHIP(value) (((value) & 0x0003f000) >> 12) +#define CALTIMING3_WR_TO_RD(value) (((value) & 0x00000fc0) >> 6) + +/* timing 4 */ +#define PCH_TO_VALID(value) (((value) & 0x00000fc0) >> 6) + +#define DDRTIMING_BWRATIO_OFST 31 +#define DDRTIMING_WRTORD_OFST 26 +#define DDRTIMING_RDTOWR_OFST 21 +#define DDRTIMING_BURSTLEN_OFST 18 +#define DDRTIMING_WRTOMISS_OFST 12 +#define DDRTIMING_RDTOMISS_OFST 6 +#define DDRTIMING_ACTTOACT_OFST 0 + +#define ADP_DDRIOCTRL_IO_SIZE(x) (((x) & 0x3) >> 0) + +#define DDRMODE_AUTOPRECHARGE_OFST 1 +#define DDRMODE_BWRATIOEXTENDED_OFST 0 + + +#define AGX_MPFE_IOHMC_REG_DRAMTIMING0_CFG_TCL(x) (((x) & 0x7f) >> 0) +#define AGX_MPFE_IOHMC_REG_CTRLCFG0_CFG_MEM_TYPE(x) (((x) & 0x0f) >> 0) + +#define AGX_CCU_CPU0_MPRT_DDR 0xf7004400 +#define AGX_CCU_CPU0_MPRT_MEM0 0xf70045c0 +#define AGX_CCU_CPU0_MPRT_MEM1A 0xf70045e0 +#define AGX_CCU_CPU0_MPRT_MEM1B 0xf7004600 +#define AGX_CCU_CPU0_MPRT_MEM1C 0xf7004620 +#define AGX_CCU_CPU0_MPRT_MEM1D 0xf7004640 +#define AGX_CCU_CPU0_MPRT_MEM1E 0xf7004660 +#define AGX_CCU_IOM_MPRT_MEM0 0xf7018560 +#define AGX_CCU_IOM_MPRT_MEM1A 0xf7018580 +#define AGX_CCU_IOM_MPRT_MEM1B 0xf70185a0 +#define AGX_CCU_IOM_MPRT_MEM1C 0xf70185c0 +#define AGX_CCU_IOM_MPRT_MEM1D 0xf70185e0 +#define AGX_CCU_IOM_MPRT_MEM1E 0xf7018600 + +#define AGX_NOC_FW_DDR_SCR 0xf8020200 +#define AGX_NOC_FW_DDR_SCR_MPUREGION0ADDR_LIMITEXT 0xf802021c +#define AGX_NOC_FW_DDR_SCR_MPUREGION0ADDR_LIMIT 0xf8020218 +#define AGX_NOC_FW_DDR_SCR_NONMPUREGION0ADDR_LIMITEXT 0xf802029c +#define AGX_NOC_FW_DDR_SCR_NONMPUREGION0ADDR_LIMIT 0xf8020298 + +#define AGX_SOC_NOC_FW_DDR_SCR_ENABLE 0xf8020200 +#define AGX_SOC_NOC_FW_DDR_SCR_ENABLESET 0xf8020204 +#define AGX_CCU_NOC_DI_SET_MSK 0x10 + +#define AGX_SYSMGR_CORE_HMC_CLK 0xffd120b4 +#define AGX_SYSMGR_CORE_HMC_CLK_STATUS 0x00000001 + +#define AGX_MPFE_IOHMC_NIOSRESERVE0_NIOS_RESERVE0(x) (((x) & 0xffff) >> 0) +#define AGX_MPFE_HMC_ADP_DDRIOCTRL_IO_SIZE_MSK 0x00000003 +#define AGX_MPFE_HMC_ADP_DDRIOCTRL_IO_SIZE_OFST 0 +#define AGX_MPFE_HMC_ADP_HPSINTFCSEL_ENABLE 0x001f1f1f +#define AGX_IOHMC_CTRLCFG1_ENABLE_ECC_OFST 7 + +#define AGX_MPFE_HMC_ADP_ECCCTRL1_AUTOWB_CNT_RST_SET_MSK 0x00010000 +#define AGX_MPFE_HMC_ADP_ECCCTRL1_CNT_RST_SET_MSK 0x00000100 +#define AGX_MPFE_HMC_ADP_ECCCTRL1_ECC_EN_SET_MSK 0x00000001 + +#define AGX_MPFE_HMC_ADP_ECCCTRL2_AUTOWB_EN_SET_MSK 0x00000001 +#define AGX_MPFE_HMC_ADP_ECCCTRL2_OVRW_RB_ECC_EN_SET_MSK 0x00010000 +#define AGX_MPFE_HMC_ADP_ECCCTRL2_RMW_EN_SET_MSK 0x00000100 +#define AGX_MPFE_HMC_ADP_DDRCALSTAT_CAL(value) (((value) & 0x1) >> 0) + + +#define AGX_MPFE_HMC_ADP_DDRIOCTRL_IO_SIZE(x) (((x) & 0x00003) >> 0) +#define IOHMC_DRAMADDRW_CFG_BANK_ADDR_WIDTH(x) (((x) & 0x03c00) >> 10) +#define IOHMC_DRAMADDRW_CFG_BANK_GROUP_ADDR_WIDTH(x) (((x) & 0x0c000) >> 14) +#define IOHMC_DRAMADDRW_CFG_COL_ADDR_WIDTH(x) (((x) & 0x0001f) >> 0) +#define IOHMC_DRAMADDRW_CFG_CS_ADDR_WIDTH(x) (((x) & 0x70000) >> 16) +#define IOHMC_DRAMADDRW_CFG_ROW_ADDR_WIDTH(x) (((x) & 0x003e0) >> 5) + +#define AGX_SDRAM_0_LB_ADDR 0x0 +#define AGX_DDR_SIZE 0x40000000 + +/* Macros */ +#define SOCFPGA_MEMCTRL_ECCCTRL1 0x008 +#define SOCFPGA_MEMCTRL_ERRINTEN 0x010 +#define SOCFPGA_MEMCTRL_ERRINTENS 0x014 +#define SOCFPGA_MEMCTRL_ERRINTENR 0x018 +#define SOCFPGA_MEMCTRL_INTMODE 0x01C +#define SOCFPGA_MEMCTRL_INTSTAT 0x020 +#define SOCFPGA_MEMCTRL_DIAGINTTEST 0x024 +#define SOCFPGA_MEMCTRL_DERRADDRA 0x02C + +#define SOCFPGA_MEMCTRL(_reg) (SOCFPGA_MEMCTRL_REG_BASE \ + + (SOCFPGA_MEMCTRL_##_reg)) + +#endif diff --git a/plat/intel/soc/agilex5/include/agilex5_mmc.h b/plat/intel/soc/agilex5/include/agilex5_mmc.h new file mode 100644 index 0000000..c8a5fba --- /dev/null +++ b/plat/intel/soc/agilex5/include/agilex5_mmc.h @@ -0,0 +1,7 @@ +/* + * Copyright (c) 2020-2023, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +void agx5_mmc_init(void); diff --git a/plat/intel/soc/agilex5/include/agilex5_pinmux.h b/plat/intel/soc/agilex5/include/agilex5_pinmux.h new file mode 100644 index 0000000..8a8e8c7 --- /dev/null +++ b/plat/intel/soc/agilex5/include/agilex5_pinmux.h @@ -0,0 +1,202 @@ +/* + * Copyright (c) 2019-2023, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef AGX5_PINMUX_H +#define AGX5_PINMUX_H + +/* PINMUX REGISTER ADDRESS */ +#define AGX5_PINMUX_PIN0SEL 0x10d13000 +#define AGX5_PINMUX_IO0CTRL 0x10d13130 +#define AGX5_PINMUX_EMAC0_USEFPGA 0x10d13300 +#define AGX5_PINMUX_IO0_DELAY 0x10d13400 +#define AGX5_PERIPHERAL 0x10d14044 + +#include "socfpga_handoff.h" + +/* PINMUX DEFINE */ +#define PINMUX_HANDOFF_ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#define PINMUX_HANDOFF_CONFIG_ADDR 0xbeec +#define PINMUX_HANDOFF_CONFIG_VAL 0x7e000 + +/* Macros */ +#define SOCFPGA_PINMUX_SEL_NAND (0x03) +#define SOCFPGA_PINMUX_PIN0SEL (0x00) +#define SOCFPGA_PINMUX_PIN1SEL (0x04) +#define SOCFPGA_PINMUX_PIN2SEL (0x08) +#define SOCFPGA_PINMUX_PIN3SEL (0x0C) +#define SOCFPGA_PINMUX_PIN4SEL (0x10) +#define SOCFPGA_PINMUX_PIN5SEL (0x14) +#define SOCFPGA_PINMUX_PIN6SEL (0x18) +#define SOCFPGA_PINMUX_PIN7SEL (0x1C) +#define SOCFPGA_PINMUX_PIN8SEL (0x20) +#define SOCFPGA_PINMUX_PIN9SEL (0x24) +#define SOCFPGA_PINMUX_PIN10SEL (0x28) +#define SOCFPGA_PINMUX_PIN11SEL (0x2C) +#define SOCFPGA_PINMUX_PIN12SEL (0x30) +#define SOCFPGA_PINMUX_PIN13SEL (0x34) +#define SOCFPGA_PINMUX_PIN14SEL (0x38) +#define SOCFPGA_PINMUX_PIN15SEL (0x3C) +#define SOCFPGA_PINMUX_PIN16SEL (0x40) +#define SOCFPGA_PINMUX_PIN17SEL (0x44) +#define SOCFPGA_PINMUX_PIN18SEL (0x48) +#define SOCFPGA_PINMUX_PIN19SEL (0x4C) +#define SOCFPGA_PINMUX_PIN20SEL (0x50) +#define SOCFPGA_PINMUX_PIN21SEL (0x54) +#define SOCFPGA_PINMUX_PIN22SEL (0x58) +#define SOCFPGA_PINMUX_PIN23SEL (0x5C) +#define SOCFPGA_PINMUX_PIN24SEL (0x60) +#define SOCFPGA_PINMUX_PIN25SEL (0x64) +#define SOCFPGA_PINMUX_PIN26SEL (0x68) +#define SOCFPGA_PINMUX_PIN27SEL (0x6C) +#define SOCFPGA_PINMUX_PIN28SEL (0x70) +#define SOCFPGA_PINMUX_PIN29SEL (0x74) +#define SOCFPGA_PINMUX_PIN30SEL (0x78) +#define SOCFPGA_PINMUX_PIN31SEL (0x7C) +#define SOCFPGA_PINMUX_PIN32SEL (0x80) +#define SOCFPGA_PINMUX_PIN33SEL (0x84) +#define SOCFPGA_PINMUX_PIN34SEL (0x88) +#define SOCFPGA_PINMUX_PIN35SEL (0x8C) +#define SOCFPGA_PINMUX_PIN36SEL (0x90) +#define SOCFPGA_PINMUX_PIN37SEL (0x94) +#define SOCFPGA_PINMUX_PIN38SEL (0x98) +#define SOCFPGA_PINMUX_PIN39SEL (0x9C) +#define SOCFPGA_PINMUX_PIN40SEL (0x100) +#define SOCFPGA_PINMUX_PIN41SEL (0x104) +#define SOCFPGA_PINMUX_PIN42SEL (0x108) +#define SOCFPGA_PINMUX_PIN43SEL (0x10C) +#define SOCFPGA_PINMUX_PIN44SEL (0x110) +#define SOCFPGA_PINMUX_PIN45SEL (0x114) +#define SOCFPGA_PINMUX_PIN46SEL (0x118) +#define SOCFPGA_PINMUX_PIN47SEL (0x11C) + +#define SOCFPGA_PINMUX_IO0CTRL (0x00) +#define SOCFPGA_PINMUX_IO1CTRL (0x04) +#define SOCFPGA_PINMUX_IO2CTRL (0x08) +#define SOCFPGA_PINMUX_IO3CTRL (0x0C) +#define SOCFPGA_PINMUX_IO4CTRL (0x10) +#define SOCFPGA_PINMUX_IO5CTRL (0x14) +#define SOCFPGA_PINMUX_IO6CTRL (0x18) +#define SOCFPGA_PINMUX_IO7CTRL (0x1C) +#define SOCFPGA_PINMUX_IO8CTRL (0x20) +#define SOCFPGA_PINMUX_IO9CTRL (0x24) +#define SOCFPGA_PINMUX_IO10CTRL (0x28) +#define SOCFPGA_PINMUX_IO11CTRL (0x2C) +#define SOCFPGA_PINMUX_IO12CTRL (0x30) +#define SOCFPGA_PINMUX_IO13CTRL (0x34) +#define SOCFPGA_PINMUX_IO14CTRL (0x38) +#define SOCFPGA_PINMUX_IO15CTRL (0x3C) +#define SOCFPGA_PINMUX_IO16CTRL (0x40) +#define SOCFPGA_PINMUX_IO17CTRL (0x44) +#define SOCFPGA_PINMUX_IO18CTRL (0x48) +#define SOCFPGA_PINMUX_IO19CTRL (0x4C) +#define SOCFPGA_PINMUX_IO20CTRL (0x50) +#define SOCFPGA_PINMUX_IO21CTRL (0x54) +#define SOCFPGA_PINMUX_IO22CTRL (0x58) +#define SOCFPGA_PINMUX_IO23CTRL (0x5C) +#define SOCFPGA_PINMUX_IO24CTRL (0x60) +#define SOCFPGA_PINMUX_IO25CTRL (0x64) +#define SOCFPGA_PINMUX_IO26CTRL (0x68) +#define SOCFPGA_PINMUX_IO27CTRL (0x6C) +#define SOCFPGA_PINMUX_IO28CTRL (0xD0) +#define SOCFPGA_PINMUX_IO29CTRL (0xD4) +#define SOCFPGA_PINMUX_IO30CTRL (0xD8) +#define SOCFPGA_PINMUX_IO31CTRL (0xDC) +#define SOCFPGA_PINMUX_IO32CTRL (0xE0) +#define SOCFPGA_PINMUX_IO33CTRL (0xE4) +#define SOCFPGA_PINMUX_IO34CTRL (0xE8) +#define SOCFPGA_PINMUX_IO35CTRL (0xEC) +#define SOCFPGA_PINMUX_IO36CTRL (0xF0) +#define SOCFPGA_PINMUX_IO37CTRL (0xF4) +#define SOCFPGA_PINMUX_IO38CTRL (0xF8) +#define SOCFPGA_PINMUX_IO39CTRL (0xFC) +#define SOCFPGA_PINMUX_IO40CTRL (0x100) +#define SOCFPGA_PINMUX_IO41CTRL (0x104) +#define SOCFPGA_PINMUX_IO42CTRL (0x108) +#define SOCFPGA_PINMUX_IO43CTRL (0x10C) +#define SOCFPGA_PINMUX_IO44CTRL (0x110) +#define SOCFPGA_PINMUX_IO45CTRL (0x114) +#define SOCFPGA_PINMUX_IO46CTRL (0x118) +#define SOCFPGA_PINMUX_IO47CTRL (0x11C) + +#define SOCFPGA_PINMUX_EMAC0_USEFPGA (0x00) +#define SOCFPGA_PINMUX_EMAC1_USEFPGA (0x04) +#define SOCFPGA_PINMUX_EMAC2_USEFPGA (0x08) +#define SOCFPGA_PINMUX_I2C0_USEFPGA (0x0C) +#define SOCFPGA_PINMUX_I2C1_USEFPGA (0x10) +#define SOCFPGA_PINMUX_I2C_EMAC0_USEFPGA (0x14) +#define SOCFPGA_PINMUX_I2C_EMAC1_USEFPGA (0x18) +#define SOCFPGA_PINMUX_I2C_EMAC2_USEFPGA (0x1C) +#define SOCFPGA_PINMUX_NAND_USEFPGA (0x20) +#define SOCFPGA_PINMUX_SPIM0_USEFPGA (0x28) +#define SOCFPGA_PINMUX_SPIM1_USEFPGA (0x2C) +#define SOCFPGA_PINMUX_SPIS0_USEFPGA (0x30) +#define SOCFPGA_PINMUX_SPIS1_USEFPGA (0x34) +#define SOCFPGA_PINMUX_UART0_USEFPGA (0x38) +#define SOCFPGA_PINMUX_UART1_USEFPGA (0x3C) +#define SOCFPGA_PINMUX_MDIO0_USEFPGA (0x40) +#define SOCFPGA_PINMUX_MDIO1_USEFPGA (0x44) +#define SOCFPGA_PINMUX_MDIO2_USEFPGA (0x48) +#define SOCFPGA_PINMUX_JTAG_USEFPGA (0x50) +#define SOCFPGA_PINMUX_SDMMC_USEFPGA (0x54) + +#define SOCFPGA_PINMUX_IO0DELAY (0x00) +#define SOCFPGA_PINMUX_IO1DELAY (0x04) +#define SOCFPGA_PINMUX_IO2DELAY (0x08) +#define SOCFPGA_PINMUX_IO3DELAY (0x0C) +#define SOCFPGA_PINMUX_IO4DELAY (0x10) +#define SOCFPGA_PINMUX_IO5DELAY (0x14) +#define SOCFPGA_PINMUX_IO6DELAY (0x18) +#define SOCFPGA_PINMUX_IO7DELAY (0x1C) +#define SOCFPGA_PINMUX_IO8DELAY (0x20) +#define SOCFPGA_PINMUX_IO9DELAY (0x24) +#define SOCFPGA_PINMUX_IO10DELAY (0x28) +#define SOCFPGA_PINMUX_IO11DELAY (0x2C) +#define SOCFPGA_PINMUX_IO12DELAY (0x30) +#define SOCFPGA_PINMUX_IO13DELAY (0x34) +#define SOCFPGA_PINMUX_IO14DELAY (0x38) +#define SOCFPGA_PINMUX_IO15DELAY (0x3C) +#define SOCFPGA_PINMUX_IO16DELAY (0x40) +#define SOCFPGA_PINMUX_IO17DELAY (0x44) +#define SOCFPGA_PINMUX_IO18DELAY (0x48) +#define SOCFPGA_PINMUX_IO19DELAY (0x4C) +#define SOCFPGA_PINMUX_IO20DELAY (0x50) +#define SOCFPGA_PINMUX_IO21DELAY (0x54) +#define SOCFPGA_PINMUX_IO22DELAY (0x58) +#define SOCFPGA_PINMUX_IO23DELAY (0x5C) +#define SOCFPGA_PINMUX_IO24DELAY (0x60) +#define SOCFPGA_PINMUX_IO25DELAY (0x64) +#define SOCFPGA_PINMUX_IO26DELAY (0x68) +#define SOCFPGA_PINMUX_IO27DELAY (0x6C) +#define SOCFPGA_PINMUX_IO28DELAY (0x70) +#define SOCFPGA_PINMUX_IO29DELAY (0x74) +#define SOCFPGA_PINMUX_IO30DELAY (0x78) +#define SOCFPGA_PINMUX_IO31DELAY (0x7C) +#define SOCFPGA_PINMUX_IO32DELAY (0x80) +#define SOCFPGA_PINMUX_IO33DELAY (0x84) +#define SOCFPGA_PINMUX_IO34DELAY (0x88) +#define SOCFPGA_PINMUX_IO35DELAY (0x8C) +#define SOCFPGA_PINMUX_IO36DELAY (0x90) +#define SOCFPGA_PINMUX_IO37DELAY (0x94) +#define SOCFPGA_PINMUX_IO38DELAY (0x98) +#define SOCFPGA_PINMUX_IO39DELAY (0x9C) +#define SOCFPGA_PINMUX_IO40DELAY (0xA0) +#define SOCFPGA_PINMUX_IO41DELAY (0xA4) +#define SOCFPGA_PINMUX_IO42DELAY (0xA8) +#define SOCFPGA_PINMUX_IO43DELAY (0xAC) +#define SOCFPGA_PINMUX_IO44DELAY (0xB0) +#define SOCFPGA_PINMUX_IO45DELAY (0xB4) +#define SOCFPGA_PINMUX_IO46DELAY (0xB8) +#define SOCFPGA_PINMUX_IO47DELAY (0xBC) + +#define SOCFPGA_PINMUX_I3C0_USEFPGA (0xC0) +#define SOCFPGA_PINMUX_I3C1_USEFPGA (0xC4) + +#define SOCFPGA_PINMUX(_reg) (SOCFPGA_PINMUX_REG_BASE \ + + (SOCFPGA_PINMUX_##_reg)) + +void config_pinmux(handoff *handoff); +void config_peripheral(handoff *handoff); +#endif diff --git a/plat/intel/soc/agilex5/include/agilex5_power_manager.h b/plat/intel/soc/agilex5/include/agilex5_power_manager.h new file mode 100644 index 0000000..1bba74b --- /dev/null +++ b/plat/intel/soc/agilex5/include/agilex5_power_manager.h @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2022-2023, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef POWERMANAGER_H +#define POWERMANAGER_H + +#include "socfpga_handoff.h" + +#define AGX5_PWRMGR_BASE 0x10d14000 + +/* DSU */ +#define AGX5_PWRMGR_DSU_FWENCTL 0x0 +#define AGX5_PWRMGR_DSU_PGENCTL 0x4 +#define AGX5_PWRMGR_DSU_PGSTAT 0x8 +#define AGX5_PWRMGR_DSU_PWRCTLR 0xc +#define AGX5_PWRMGR_DSU_PWRSTAT0 0x10 +#define AGX5_PWRMGR_DSU_PWRSTAT1 0x14 + +/* DSU Macros*/ +#define AGX5_PWRMGR_DSU_FWEN(x) ((x) & 0xf) +#define AGX5_PWRMGR_DSU_PGEN(x) ((x) & 0xf) +#define AGX5_PWRMGR_DSU_PGEN_OUT(x) ((x) & 0xf) +#define AGX5_PWRMGR_DSU_SINGLE_PACCEPT(x) ((x) & 0x1) +#define AGX5_PWRMGR_DSU_SINGLE_PDENY(x) (((x) & 0x1) << 1) +#define AGX5_PWRMGR_DSU_SINGLE_FSM_STATE(x) (((x) & 0xff) << 8) +#define AGX5_PWRMGR_DSU_SINGLE_PCH_DONE(x) (((x) & 0x1) << 31) +#define AGX5_PWRMGR_DSU_MULTI_PACTIVE_IN(x) ((x) & 0xff) +#define AGX5_PWRMGR_DSU_MULTI_PACCEPT(x) (((x) & 0xff) << 8) +#define AGX5_PWRMGR_DSU_MULTI_PDENY(x) (((x) & 0xff) << 16) +#define AGX5_PWRMGR_DSU_MULTI_PCH_DONE(x) (((x) & 0x1) << 31) + +/* CPU */ +#define AGX5_PWRMGR_CPU_PWRCTLR0 0x18 +#define AGX5_PWRMGR_CPU_PWRCTLR1 0x20 +#define AGX5_PWRMGR_CPU_PWRCTLR2 0x28 +#define AGX5_PWRMGR_CPU_PWRCTLR3 0x30 +#define AGX5_PWRMGR_CPU_PWRSTAT0 0x1c +#define AGX5_PWRMGR_CPU_PWRSTAT1 0x24 +#define AGX5_PWRMGR_CPU_PWRSTAT2 0x2c +#define AGX5_PWRMGR_CPU_PWRSTAT3 0x34 + +/* APS */ +#define AGX5_PWRMGR_APS_FWENCTL 0x38 +#define AGX5_PWRMGR_APS_PGENCTL 0x3C +#define AGX5_PWRMGR_APS_PGSTAT 0x40 + +/* PSS */ +#define AGX5_PWRMGR_PSS_FWENCTL 0x44 +#define AGX5_PWRMGR_PSS_PGENCTL 0x48 +#define AGX5_PWRMGR_PSS_PGSTAT 0x4c + +/* PSS Macros*/ +#define AGX5_PWRMGR_PSS_FWEN(x) ((x) & 0xff) +#define AGX5_PWRMGR_PSS_PGEN(x) ((x) & 0xff) +#define AGX5_PWRMGR_PSS_PGEN_OUT(x) ((x) & 0xff) + +/* MPU */ +#define AGX5_PWRMGR_MPU_PCHCTLR 0x50 +#define AGX5_PWRMGR_MPU_PCHSTAT 0x54 +#define AGX5_PWRMGR_MPU_BOOTCONFIG 0x58 +#define AGX5_PWRMGR_CPU_POWER_STATE_MASK 0x1E + +/* MPU Macros*/ +#define AGX5_PWRMGR_MPU_TRIGGER_PCH_DSU(x) ((x) & 0x1) +#define AGX5_PWRMGR_MPU_TRIGGER_PCH_CPU(x) (((x) & 0xf) << 1) +#define AGX5_PWRMGR_MPU_STATUS_PCH_CPU(x) (((x) & 0xf) << 1) + +/* Shared Macros */ +#define AGX5_PWRMGR(_reg) (AGX5_PWRMGR_BASE + \ + (AGX5_PWRMGR_##_reg)) + +/* POWER MANAGER ERROR CODE */ +#define AGX5_PWRMGR_HANDOFF_PERIPHERAL -1 +#define AGX5_PWRMGR_PSS_STAT_BUSY_E_BUSY 0x0 +#define AGX5_PWRMGR_PSS_STAT_BUSY(x) (((x) & 0x000000FF) >> 0) + +int pss_sram_power_off(handoff *hoff_ptr); +int wait_verify_fsm(uint16_t timeout, uint32_t peripheral_handoff); + +#endif diff --git a/plat/intel/soc/agilex5/include/agilex5_system_manager.h b/plat/intel/soc/agilex5/include/agilex5_system_manager.h new file mode 100644 index 0000000..9a58cdb --- /dev/null +++ b/plat/intel/soc/agilex5/include/agilex5_system_manager.h @@ -0,0 +1,200 @@ +/* + * Copyright (c) 2019-2023, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#ifndef AGX5_SOCFPGA_SYSTEMMANAGER_H +#define AGX5_SOCFPGA_SYSTEMMANAGER_H + +#include "socfpga_plat_def.h" + +/* System Manager Register Map */ +#define SOCFPGA_SYSMGR_SILICONID_1 0x00 +#define SOCFPGA_SYSMGR_SILICONID_2 0x04 +#define SOCFPGA_SYSMGR_WDDBG 0x08 +#define SOCFPGA_SYSMGR_MPU_STATUS 0x10 +#define SOCFPGA_SYSMGR_SDMMC_L3_MASTER 0x2C +#define SOCFPGA_SYSMGR_NAND_L3_MASTER 0x34 +#define SOCFPGA_SYSMGR_USB0_L3_MASTER 0x38 +#define SOCFPGA_SYSMGR_USB1_L3_MASTER 0x3C +#define SOCFPGA_SYSMGR_TSN_GLOBAL 0x40 +#define SOCFPGA_SYSMGR_EMAC_0 0x44 /* TSN_0 */ +#define SOCFPGA_SYSMGR_EMAC_1 0x48 /* TSN_1 */ +#define SOCFPGA_SYSMGR_EMAC_2 0x4C /* TSN_2 */ +#define SOCFPGA_SYSMGR_TSN_0_ACE 0x50 +#define SOCFPGA_SYSMGR_TSN_1_ACE 0x54 +#define SOCFPGA_SYSMGR_TSN_2_ACE 0x58 +#define SOCFPGA_SYSMGR_FPGAINTF_EN_1 0x68 +#define SOCFPGA_SYSMGR_FPGAINTF_EN_2 0x6C +#define SOCFPGA_SYSMGR_FPGAINTF_EN_3 0x70 +#define SOCFPGA_SYSMGR_DMAC0_L3_MASTER 0x74 +#define SOCFPGA_SYSMGR_ETR_L3_MASTER 0x78 +#define SOCFPGA_SYSMGR_DMAC1_L3_MASTER 0x7C +#define SOCFPGA_SYSMGR_SEC_CTRL_SLT 0x80 +#define SOCFPGA_SYSMGR_OSC_TRIM 0x84 +#define SOCFPGA_SYSMGR_DMAC0_CTRL_STATUS_REG 0x88 +#define SOCFPGA_SYSMGR_DMAC1_CTRL_STATUS_REG 0x8C +#define SOCFPGA_SYSMGR_ECC_INTMASK_VALUE 0x90 +#define SOCFPGA_SYSMGR_ECC_INTMASK_SET 0x94 +#define SOCFPGA_SYSMGR_ECC_INTMASK_CLR 0x98 +#define SOCFPGA_SYSMGR_ECC_INTMASK_SERR 0x9C +#define SOCFPGA_SYSMGR_ECC_INTMASK_DERR 0xA0 +/* NOC configuration value */ +#define SOCFPGA_SYSMGR_NOC_TIMEOUT 0xC0 +#define SOCFPGA_SYSMGR_NOC_IDLEREQ_SET 0xC4 +#define SOCFPGA_SYSMGR_NOC_IDLEREQ_CLR 0xC8 +#define SOCFPGA_SYSMGR_NOC_IDLEREQ_VAL 0xCC +#define SOCFPGA_SYSMGR_NOC_IDLEACK 0xD0 +#define SOCFPGA_SYSMGR_NOC_IDLESTATUS 0xD4 +#define SOCFPGA_SYSMGR_FPGA2SOC_CTRL 0xD8 +#define SOCFPGA_SYSMGR_FPGA_CFG 0xDC +#define SOCFPGA_SYSMGR_GPO 0xE4 +#define SOCFPGA_SYSMGR_GPI 0xE8 +#define SOCFPGA_SYSMGR_MPU 0xF0 +#define SOCFPGA_SYSMGR_SDM_HPS_SPARE 0xF4 +#define SOCFPGA_SYSMGR_HPS_SDM_SPARE 0xF8 +#define SOCFPGA_SYSMGR_DFI_INTF 0xFC +#define SOCFPGA_SYSMGR_NAND_DD_CTRL 0x100 +#define SOCFPGA_SYSMGR_NAND_PHY_CTRL_REG 0x104 +#define SOCFPGA_SYSMGR_NAND_PHY_TSEL_REG 0x108 +#define SOCFPGA_SYSMGR_NAND_DQ_TIMING_REG 0x10C +#define SOCFPGA_SYSMGR_PHY_DQS_TIMING_REG 0x110 +#define SOCFPGA_SYSMGR_NAND_PHY_GATE_LPBK_CTRL_REG 0x114 +#define SOCFPGA_SYSMGR_NAND_PHY_DLL_MASTER_CTRL_REG 0x118 +#define SOCFPGA_SYSMGR_NAND_PHY_DLL_SLAVE_CTRL_REG 0x11C +#define SOCFPGA_SYSMGR_NAND_DD_DEFAULT_SETTING_REG0 0x120 +#define SOCFPGA_SYSMGR_NAND_DD_DEFAULT_SETTING_REG1 0x124 +#define SOCFPGA_SYSMGR_NAND_DD_STATUS_REG 0x128 +#define SOCFPGA_SYSMGR_NAND_DD_ID_LOW_REG 0x12C +#define SOCFPGA_SYSMGR_NAND_DD_ID_HIGH_REG 0x130 +#define SOCFPGA_SYSMGR_NAND_WRITE_PROT_EN_REG 0x134 +#define SOCFPGA_SYSMGR_SDMMC_CMD_QUEUE_SETTING_REG 0x138 +#define SOCFPGA_SYSMGR_I3C_SLV_PID_LOW 0x13C +#define SOCFPGA_SYSMGR_I3C_SLV_PID_HIGH 0x140 +#define SOCFPGA_SYSMGR_I3C_SLV_CTRL_0 0x144 +#define SOCFPGA_SYSMGR_I3C_SLV_CTRL_1 0x148 +#define SOCFPGA_SYSMGR_F2S_BRIDGE_CTRL 0x14C +#define SOCFPGA_SYSMGR_DMA_TBU_STASH_CTRL_REG_0_DMA0 0x150 +#define SOCFPGA_SYSMGR_DMA_TBU_STASH_CTRL_REG_0_DMA1 0x154 +#define SOCFPGA_SYSMGR_SDM_TBU_STASH_CTRL_REG_1_SDM 0x158 +#define SOCFPGA_SYSMGR_IO_TBU_STASH_CTRL_REG_2_USB2 0x15C +#define SOCFPGA_SYSMGR_IO_TBU_STASH_CTRL_REG_2_USB3 0x160 +#define SOCFPGA_SYSMGR_IO_TBU_STASH_CTRL_REG_2_SDMMC 0x164 +#define SOCFPGA_SYSMGR_IO_TBU_STASH_CTRL_REG_2_NAND 0x168 +#define SOCFPGA_SYSMGR_IO_TBU_STASH_CTRL_REG_2_ETR 0x16C +#define SOCFPGA_SYSMGR_TSN_TBU_STASH_CTRL_REG_3_TSN0 0x170 +#define SOCFPGA_SYSMGR_TSN_TBU_STASH_CTRL_REG_3_TSN1 0x174 +#define SOCFPGA_SYSMGR_TSN_TBU_STASH_CTRL_REG_3_TSN2 0x178 +#define SOCFPGA_SYSMGR_DMA_TBU_STREAM_CTRL_REG_0_DMA0 0x17C +#define SOCFPGA_SYSMGR_DMA_TBU_STREAM_CTRL_REG_0_DMA1 0x180 +#define SOCFPGA_SYSMGR_SDM_TBU_STREAM_CTRL_REG_1_SDM 0x184 +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_CTRL_REG_2_USB2 0x188 +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_CTRL_REG_2_USB3 0x18C +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_CTRL_REG_2_SDMMC 0x190 +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_CTRL_REG_2_NAND 0x194 +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_CTRL_REG_2_ETR 0x198 +#define SOCFPGA_SYSMGR_TSN_TBU_STREAM_CTRL_REG_3_TSN0 0x19C +#define SOCFPGA_SYSMGR_TSN_TBU_STREAM_CTRL_REG_3_TSN1 0x1A0 +#define SOCFPGA_SYSMGR_TSN_TBU_STREAM_CTRL_REG_3_TSN2 0x1A4 +#define SOCFPGA_SYSMGR_DMA_TBU_STREAM_ID_AX_REG_0_DMA0 0x1A8 +#define SOCFPGA_SYSMGR_DMA_TBU_STREAM_ID_AX_REG_0_DMA1 0x1AC +#define SOCFPGA_SYSMGR_SDM_TBU_STREAM_ID_AX_REG_1_SDM 0x1B0 +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_ID_AX_REG_2_USB2 0x1B4 +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_ID_AX_REG_2_USB3 0x1B8 +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_ID_AX_REG_2_SDMMC 0x1BC +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_ID_AX_REG_2_NAND 0x1C0 +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_ID_AX_REG_2_ETR 0x1C4 +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_ID_AX_REG_2_TSN0 0x1C8 +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_ID_AX_REG_2_TSN1 0x1CC +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_ID_AX_REG_2_TSN2 0x1D0 +#define SOCFPGA_SYSMGR_USB3_MISC_CTRL_REG0 0x1F0 +#define SOCFPGA_SYSMGR_USB3_MISC_CTRL_REG1 0x1F4 + +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_0 0x200 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_1 0x204 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_2 0x208 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_3 0x20C +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_4 0x210 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_5 0x214 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_6 0x218 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_7 0x21C +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_8 0x220 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_9 0x224 +#define SOCFPGA_SYSMGR_MPFE_CONFIG 0x228 +#define SOCFPGA_SYSMGR_MPFE_status 0x22C +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_0 0x230 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_1 0x234 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_2 0x238 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_3 0x23C +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_4 0x240 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_5 0x244 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_6 0x248 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_7 0x24C +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_8 0x250 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_9 0x254 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_0 0x258 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_1 0x25C +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_2 0x260 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_3 0x264 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_4 0x268 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_5 0x26C +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_6 0x270 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_7 0x274 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_8 0x278 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_9 0x27C + +#define DMA0_STREAM_CTRL_REG 0x10D1217C +#define DMA1_STREAM_CTRL_REG 0x10D12180 +#define SDM_STREAM_CTRL_REG 0x10D12184 +#define USB2_STREAM_CTRL_REG 0x10D12188 +#define USB3_STREAM_CTRL_REG 0x10D1218C +#define SDMMC_STREAM_CTRL_REG 0x10D12190 +#define NAND_STREAM_CTRL_REG 0x10D12194 +#define ETR_STREAM_CTRL_REG 0x10D12198 +#define TSN0_STREAM_CTRL_REG 0x10D1219C +#define TSN1_STREAM_CTRL_REG 0x10D121A0 +#define TSN2_STREAM_CTRL_REG 0x10D121A4 + +/* Stream ID configuration value for Agilex5 */ +#define TSN0 0x00010001 +#define TSN1 0x00020002 +#define TSN2 0x00030003 +#define NAND 0x00040004 +#define SDMMC 0x00050005 +#define USB0 0x00060006 +#define USB1 0x00070007 +#define DMA0 0x00080008 +#define DMA1 0x00090009 +#define SDM 0x000A000A +#define CORE_SIGHT_DEBUG 0x000B000B + +/* Field Masking */ +#define SYSMGR_SDMMC_DRVSEL(x) (((x) & 0x7) << 0) +#define SYSMGR_SDMMC_SMPLSEL(x) (((x) & 0x7) << 4) + +#define SYSMGR_F2S_BRIDGE_CTRL_EN BIT(0) +#define IDLE_DATA_LWSOC2FPGA BIT(4) +#define IDLE_DATA_SOC2FPGA BIT(0) +#define IDLE_DATA_MASK (IDLE_DATA_LWSOC2FPGA \ + | IDLE_DATA_SOC2FPGA) +#define SYSMGR_ECC_OCRAM_MASK BIT(1) +#define SYSMGR_ECC_DDR0_MASK BIT(16) +#define SYSMGR_ECC_DDR1_MASK BIT(17) + +#define WSTREAMIDEN_REG_CTRL BIT(0) +#define RSTREAMIDEN_REG_CTRL BIT(1) +#define WMMUSECSID_REG_VAL BIT(4) +#define RMMUSECSID_REG_VAL BIT(5) + +/* Macros */ +#define SOCFPGA_SYSMGR(_reg) (SOCFPGA_SYSMGR_REG_BASE \ + + (SOCFPGA_SYSMGR_##_reg)) + +#define ENABLE_STREAMID WSTREAMIDEN_REG_CTRL \ + | RSTREAMIDEN_REG_CTRL +#define ENABLE_STREAMID_SECURE_TX WSTREAMIDEN_REG_CTRL \ + | RSTREAMIDEN_REG_CTRL \ + | WMMUSECSID_REG_VAL \ + | RMMUSECSID_REG_VAL + +#endif /* AGX5_SOCFPGA_SYSTEMMANAGER_H */ diff --git a/plat/intel/soc/agilex5/include/socfpga_plat_def.h b/plat/intel/soc/agilex5/include/socfpga_plat_def.h new file mode 100644 index 0000000..8a49d61 --- /dev/null +++ b/plat/intel/soc/agilex5/include/socfpga_plat_def.h @@ -0,0 +1,119 @@ +/* + * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2019-2023, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef PLAT_SOCFPGA_DEF_H +#define PLAT_SOCFPGA_DEF_H + +#include "agilex5_memory_controller.h" +#include "agilex5_system_manager.h" +#include <platform_def.h> + +/* Platform Setting */ +#define PLATFORM_MODEL PLAT_SOCFPGA_AGILEX5 +#define BOOT_SOURCE BOOT_SOURCE_SDMMC +#define MMC_DEVICE_TYPE 1 /* MMC = 0, SD = 1 */ +#define XLAT_TABLES_V2 U(1) +#define PLAT_PRIMARY_CPU_A55 0x000 +#define PLAT_PRIMARY_CPU_A76 0x200 +#define PLAT_CLUSTER_ID_MPIDR_AFF_SHIFT MPIDR_AFF2_SHIFT +#define PLAT_CPU_ID_MPIDR_AFF_SHIFT MPIDR_AFF1_SHIFT +#define PLAT_L2_RESET_REQ 0xB007C0DE + +/* System Counter */ /* TODO: Update back to 400MHz */ +#define PLAT_SYS_COUNTER_FREQ_IN_TICKS (80000000) +#define PLAT_SYS_COUNTER_FREQ_IN_MHZ (80) + +/* FPGA config helpers */ +#define INTEL_SIP_SMC_FPGA_CONFIG_ADDR 0x400000 +#define INTEL_SIP_SMC_FPGA_CONFIG_SIZE 0x2000000 + +/* QSPI Setting */ +#define CAD_QSPIDATA_OFST 0x10900000 +#define CAD_QSPI_OFFSET 0x108d2000 + +/* Register Mapping */ +#define SOCFPGA_CCU_NOC_REG_BASE 0x1c000000 +#define SOCFPGA_F2SDRAMMGR_REG_BASE 0x18001000 + +#define SOCFPGA_MMC_REG_BASE 0x10808000 +#define SOCFPGA_MEMCTRL_REG_BASE 0x108CC000 +#define SOCFPGA_RSTMGR_REG_BASE 0x10d11000 +#define SOCFPGA_SYSMGR_REG_BASE 0x10d12000 +#define SOCFPGA_PINMUX_REG_BASE 0x10d13000 +#define SOCFPGA_NAND_REG_BASE 0x10B80000 + +#define SOCFPGA_L4_PER_SCR_REG_BASE 0x10d21000 +#define SOCFPGA_L4_SYS_SCR_REG_BASE 0x10d21100 +#define SOCFPGA_SOC2FPGA_SCR_REG_BASE 0x10d21200 +#define SOCFPGA_LWSOC2FPGA_SCR_REG_BASE 0x10d21300 + +/* Define maximum page size for NAND flash devices */ +#define PLATFORM_MTD_MAX_PAGE_SIZE U(0x1000) + +/******************************************************************************* + * Platform memory map related constants + ******************************************************************************/ +#define DRAM_BASE (0x80000000) +#define DRAM_SIZE (0x80000000) + +#define OCRAM_BASE (0x00000000) +#define OCRAM_SIZE (0x00080000) + +#define MEM64_BASE (0x0080000000) +#define MEM64_SIZE (0x0080000000) + +//128MB PSS +#define PSS_BASE (0x10000000) +#define PSS_SIZE (0x08000000) + +//64MB MPFE +#define MPFE_BASE (0x18000000) +#define MPFE_SIZE (0x04000000) + +//16MB CCU +#define CCU_BASE (0x1C000000) +#define CCU_SIZE (0x01000000) + +//1MB GIC +#define GIC_BASE (0x1D000000) +#define GIC_SIZE (0x00100000) + +#define BL2_BASE (0x00000000) +#define BL2_LIMIT (0x0001b000) + +#define BL31_BASE (0x80000000) +#define BL31_LIMIT (0x82000000) + +/******************************************************************************* + * UART related constants + ******************************************************************************/ +#define PLAT_UART0_BASE (0x10C02000) +#define PLAT_UART1_BASE (0x10C02100) + +/******************************************************************************* + * GIC related constants + ******************************************************************************/ +#define PLAT_GIC_BASE (0x1D000000) +#define PLAT_GICC_BASE (PLAT_GIC_BASE + 0x20000) +#define PLAT_GICD_BASE (PLAT_GIC_BASE + 0x00000) +#define PLAT_GICR_BASE (PLAT_GIC_BASE + 0x60000) + +#define PLAT_INTEL_SOCFPGA_GICR_BASE PLAT_GICR_BASE + +/******************************************************************************* + * SDMMC related pointer function + ******************************************************************************/ +#define SDMMC_READ_BLOCKS sdmmc_read_blocks +#define SDMMC_WRITE_BLOCKS sdmmc_write_blocks + +/******************************************************************************* + * sysmgr.boot_scratch_cold6 & 7 (64bit) are used to indicate L2 reset + * is done and HPS should trigger warm reset via RMR_EL3. + ******************************************************************************/ +#define L2_RESET_DONE_REG 0x10D12218 + +#endif /* PLAT_SOCFPGA_DEF_H */ diff --git a/plat/intel/soc/agilex5/platform.mk b/plat/intel/soc/agilex5/platform.mk new file mode 100644 index 0000000..546bc2e --- /dev/null +++ b/plat/intel/soc/agilex5/platform.mk @@ -0,0 +1,106 @@ +# +# Copyright (c) 2019-2020, ARM Limited and Contributors. All rights reserved. +# Copyright (c) 2019-2023, Intel Corporation. All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +# +include lib/xlat_tables_v2/xlat_tables.mk +PLAT_INCLUDES := \ + -Iplat/intel/soc/agilex5/include/ \ + -Iplat/intel/soc/common/drivers/ \ + -Iplat/intel/soc/common/include/ + +# GIC-600 configuration +GICV3_SUPPORT_GIC600 := 1 +# Include GICv3 driver files +include drivers/arm/gic/v3/gicv3.mk +AGX5_GICv3_SOURCES := \ + ${GICV3_SOURCES} \ + plat/common/plat_gicv3.c + +PLAT_BL_COMMON_SOURCES := \ + ${AGX5_GICv3_SOURCES} \ + drivers/cadence/combo_phy/cdns_combo_phy.c \ + drivers/cadence/emmc/cdns_sdmmc.c \ + drivers/cadence/nand/cdns_nand.c \ + drivers/delay_timer/delay_timer.c \ + drivers/delay_timer/generic_delay_timer.c \ + drivers/ti/uart/aarch64/16550_console.S \ + plat/intel/soc/common/aarch64/platform_common.c \ + plat/intel/soc/common/aarch64/plat_helpers.S \ + plat/intel/soc/common/drivers/ccu/ncore_ccu.c \ + plat/intel/soc/common/drivers/combophy/combophy.c \ + plat/intel/soc/common/drivers/sdmmc/sdmmc.c \ + plat/intel/soc/common/drivers/ddr/ddr.c \ + plat/intel/soc/common/drivers/nand/nand.c \ + plat/intel/soc/common/socfpga_delay_timer.c + +BL2_SOURCES += \ + common/desc_image_load.c \ + lib/xlat_tables_v2/aarch64/enable_mmu.S \ + lib/xlat_tables_v2/xlat_tables_context.c \ + lib/xlat_tables_v2/xlat_tables_core.c \ + lib/xlat_tables_v2/aarch64/xlat_tables_arch.c \ + lib/xlat_tables_v2/xlat_tables_utils.c \ + drivers/mmc/mmc.c \ + drivers/intel/soc/stratix10/io/s10_memmap_qspi.c \ + drivers/io/io_storage.c \ + drivers/io/io_block.c \ + drivers/io/io_fip.c \ + drivers/io/io_mtd.c \ + drivers/partition/partition.c \ + drivers/partition/gpt.c \ + drivers/synopsys/emmc/dw_mmc.c \ + lib/cpus/aarch64/cortex_a55.S \ + lib/cpus/aarch64/cortex_a76.S \ + plat/intel/soc/agilex5/soc/agilex5_clock_manager.c \ + plat/intel/soc/agilex5/soc/agilex5_memory_controller.c \ + plat/intel/soc/agilex5/soc/agilex5_mmc.c \ + plat/intel/soc/agilex5/soc/agilex5_pinmux.c \ + plat/intel/soc/agilex5/soc/agilex5_power_manager.c \ + plat/intel/soc/common/bl2_plat_mem_params_desc.c \ + plat/intel/soc/common/socfpga_image_load.c \ + plat/intel/soc/common/socfpga_storage.c \ + plat/intel/soc/common/socfpga_vab.c \ + plat/intel/soc/common/soc/socfpga_emac.c \ + plat/intel/soc/common/soc/socfpga_firewall.c \ + plat/intel/soc/common/soc/socfpga_handoff.c \ + plat/intel/soc/common/soc/socfpga_mailbox.c \ + plat/intel/soc/common/soc/socfpga_reset_manager.c \ + plat/intel/soc/common/drivers/qspi/cadence_qspi.c \ + plat/intel/soc/agilex5/bl2_plat_setup.c \ + plat/intel/soc/common/drivers/wdt/watchdog.c + +include lib/zlib/zlib.mk +PLAT_INCLUDES += -Ilib/zlib +BL2_SOURCES += $(ZLIB_SOURCES) + +BL31_SOURCES += \ + drivers/arm/cci/cci.c \ + ${XLAT_TABLES_LIB_SRCS} \ + lib/cpus/aarch64/aem_generic.S \ + lib/cpus/aarch64/cortex_a55.S \ + lib/cpus/aarch64/cortex_a76.S \ + plat/common/plat_psci_common.c \ + plat/intel/soc/agilex5/bl31_plat_setup.c \ + plat/intel/soc/agilex5/soc/agilex5_power_manager.c \ + plat/intel/soc/common/socfpga_psci.c \ + plat/intel/soc/common/socfpga_sip_svc.c \ + plat/intel/soc/common/socfpga_sip_svc_v2.c \ + plat/intel/soc/common/socfpga_topology.c \ + plat/intel/soc/common/sip/socfpga_sip_ecc.c \ + plat/intel/soc/common/sip/socfpga_sip_fcs.c \ + plat/intel/soc/common/soc/socfpga_mailbox.c \ + plat/intel/soc/common/soc/socfpga_reset_manager.c + +# Configs for A76 and A55 +HW_ASSISTED_COHERENCY := 1 +USE_COHERENT_MEM := 0 +CTX_INCLUDE_AARCH32_REGS := 0 +ERRATA_A55_1530923 := 1 + +$(eval $(call add_define,ARM_PRELOADED_DTB_BASE)) + +PROGRAMMABLE_RESET_ADDRESS := 0 +RESET_TO_BL2 := 1 +BL2_INV_DCACHE := 0 diff --git a/plat/intel/soc/agilex5/soc/agilex5_clock_manager.c b/plat/intel/soc/agilex5/soc/agilex5_clock_manager.c new file mode 100644 index 0000000..cc68153 --- /dev/null +++ b/plat/intel/soc/agilex5/soc/agilex5_clock_manager.c @@ -0,0 +1,253 @@ +/* + * Copyright (c) 2019-2023, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> +#include <errno.h> + +#include <common/debug.h> +#include <drivers/delay_timer.h> +#include <lib/mmio.h> + +#include "agilex5_clock_manager.h" +#include "agilex5_system_manager.h" +#include "socfpga_handoff.h" + +uint32_t wait_pll_lock(void) +{ + uint32_t data; + uint32_t count = 0; + + do { + data = mmio_read_32(CLKMGR_OFFSET + CLKMGR_STAT); + count++; + if (count >= 1000) + return -ETIMEDOUT; + + } while ((CLKMGR_STAT_MAINPLLLOCKED(data) == 0) || + (CLKMGR_STAT_PERPLLLOCKED(data) == 0)); + return 0; +} + +uint32_t wait_fsm(void) +{ + uint32_t data; + uint32_t count = 0; + + do { + data = mmio_read_32(CLKMGR_OFFSET + CLKMGR_STAT); + count++; + if (count >= 1000) + return -ETIMEDOUT; + + } while (CLKMGR_STAT_BUSY(data) == CLKMGR_STAT_BUSY_E_BUSY); + + return 0; +} + +uint32_t pll_source_sync_config(uint32_t pll_mem_offset, uint32_t data) +{ + uint32_t val = 0; + uint32_t count = 0; + uint32_t req_status = 0; + + val = (CLKMGR_MEM_WR | CLKMGR_MEM_REQ | + (data << CLKMGR_MEM_WDAT_OFFSET) | CLKMGR_MEM_ADDR); + mmio_write_32(pll_mem_offset, val); + + do { + req_status = mmio_read_32(pll_mem_offset); + count++; + } while ((req_status & CLKMGR_MEM_REQ) && (count < 10)); + + if (count >= 10) + return -ETIMEDOUT; + + return 0; +} + +uint32_t pll_source_sync_read(uint32_t pll_mem_offset) +{ + uint32_t val = 0; + uint32_t rdata = 0; + uint32_t count = 0; + uint32_t req_status = 0; + + val = (CLKMGR_MEM_REQ | CLKMGR_MEM_ADDR); + mmio_write_32(pll_mem_offset, val); + + do { + req_status = mmio_read_32(pll_mem_offset); + count++; + } while ((req_status & CLKMGR_MEM_REQ) && (count < 10)); + + if (count >= 10) + return -ETIMEDOUT; + + rdata = mmio_read_32(pll_mem_offset + 0x4); + INFO("rdata (%x) = %x\n", pll_mem_offset + 0x4, rdata); + + return rdata; +} + +void config_clkmgr_handoff(handoff *hoff_ptr) +{ + /* Take both PLL out of reset and power up */ + + mmio_setbits_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLGLOB, + CLKMGR_PLLGLOB_PD_SET_MSK | + CLKMGR_PLLGLOB_RST_SET_MSK); + mmio_setbits_32(CLKMGR_PERPLL + CLKMGR_PERPLL_PLLGLOB, + CLKMGR_PLLGLOB_PD_SET_MSK | + CLKMGR_PLLGLOB_RST_SET_MSK); + + /* PLL lock */ + wait_pll_lock(); + + /* Bypass all mainpllgrp's clocks to input clock ref */ + mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_BYPASSS, 0xff); + /* Bypass all perpllgrp's clocks to input clock ref */ + mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_BYPASS, 0xff); + + /* Pass clock source frequency into scratch register */ + mmio_write_32(SOCFPGA_SYSMGR(BOOT_SCRATCH_COLD_1), + hoff_ptr->hps_osc_clk_hz); + mmio_write_32(SOCFPGA_SYSMGR(BOOT_SCRATCH_COLD_2), + hoff_ptr->fpga_clk_hz); + + /* Take all PLLs out of bypass */ + mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_BYPASS, 0); + wait_fsm(); + mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_BYPASS, 0); + wait_fsm(); + + /* Enable mainpllgrp's software-managed clock */ + mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_EN, + CLKMGR_MAINPLL_EN_RESET); + mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_EN, + CLKMGR_PERPLL_EN_RESET); +} + +/* Extract reference clock from platform clock source */ +uint32_t get_ref_clk(uint32_t pllglob) +{ + uint32_t arefclkdiv, ref_clk; + uint32_t scr_reg; + + switch (CLKMGR_PSRC(pllglob)) { + case CLKMGR_PLLGLOB_PSRC_EOSC1: + scr_reg = SOCFPGA_SYSMGR(BOOT_SCRATCH_COLD_1); + ref_clk = mmio_read_32(scr_reg); + break; + case CLKMGR_PLLGLOB_PSRC_INTOSC: + ref_clk = CLKMGR_INTOSC_HZ; + break; + case CLKMGR_PLLGLOB_PSRC_F2S: + scr_reg = SOCFPGA_SYSMGR(BOOT_SCRATCH_COLD_2); + ref_clk = mmio_read_32(scr_reg); + break; + default: + ref_clk = 0; + assert(0); + break; + } + + arefclkdiv = CLKMGR_PLLGLOB_AREFCLKDIV(pllglob); + ref_clk /= arefclkdiv; + + return ref_clk; +} + +/* Calculate clock frequency based on parameter */ +uint32_t get_clk_freq(uint32_t psrc_reg, uint32_t main_pllc, uint32_t per_pllc) +{ + uint32_t ref_clk = 0; + + uint32_t clk_psrc, mdiv; + uint32_t pllm_reg, pllc_reg, pllc_div, pllglob_reg; + + + clk_psrc = mmio_read_32(CLKMGR_MAINPLL + psrc_reg); + clk_psrc = 0; + + switch (clk_psrc) { + case 0: + pllm_reg = CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLM; + pllc_reg = CLKMGR_MAINPLL + main_pllc; + pllglob_reg = CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLGLOB; + break; + } + + ref_clk = get_ref_clk(mmio_read_32(pllglob_reg)); + mdiv = CLKMGR_PLLM_MDIV(mmio_read_32(pllm_reg)); + ref_clk *= mdiv; + + pllc_div = mmio_read_32(pllc_reg) & 0x7ff; + NOTICE("return = %d Hz\n", (ref_clk / pllc_div)); + + ref_clk = 200000000; + return (uint32_t) ref_clk; + +} + +/* Return L3 interconnect clock */ +uint32_t get_l3_clk(void) +{ + uint32_t l3_clk; + + l3_clk = get_clk_freq(CLKMGR_MAINPLL_NOCCLK, CLKMGR_MAINPLL_PLLC1, + CLKMGR_PERPLL_PLLC1); + return l3_clk; +} + +/* Calculate clock frequency to be used for watchdog timer */ +uint32_t get_wdt_clk(void) +{ + uint32_t l3_clk, l4_sys_clk; + + l3_clk = get_l3_clk(); + l4_sys_clk = l3_clk / 4; + + return l4_sys_clk; +} + +/* Calculate clock frequency to be used for UART driver */ +uint32_t get_uart_clk(void) +{ + uint32_t data32, l3_clk, l4_sp_clk; + + l3_clk = get_l3_clk(); + + data32 = mmio_read_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_NOCDIV); + data32 = (data32 >> 16) & 0x3; + + l4_sp_clk = l3_clk >> data32; + + return l4_sp_clk; +} + +/* Calculate clock frequency to be used for SDMMC driver */ +uint32_t get_mmc_clk(void) +{ + uint32_t mmc_clk; + + //TODO: To update when handoff data is ready + //uint32_t data32; + + //mmc_clk = get_clk_freq(CLKMGR_ALTERA_SDMMCCTR, CLKMGR_MAINPLL_PLLC3, CLKMGR_PERPLL_PLLC3); + + //data32 = mmio_read_32(CLKMGR_ALTERA + CLKMGR_ALTERA_SDMMCCTR); + //data32 = (data32 & 0x7ff) + 1; + //mmc_clk = (mmc_clk / data32) / 4; + + + mmc_clk = get_clk_freq(CLKMGR_MAINPLL_NOCCLK, CLKMGR_MAINPLL_PLLC3, + CLKMGR_PERPLL_PLLC3); + + // TODO: To update when handoff data is ready + NOTICE("mmc_clk = %d Hz\n", mmc_clk); + + return mmc_clk; +} diff --git a/plat/intel/soc/agilex5/soc/agilex5_memory_controller.c b/plat/intel/soc/agilex5/soc/agilex5_memory_controller.c new file mode 100644 index 0000000..0ddff4a --- /dev/null +++ b/plat/intel/soc/agilex5/soc/agilex5_memory_controller.c @@ -0,0 +1,400 @@ +/* + * Copyright (c) 2019-2023, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <errno.h> + +#include <common/debug.h> +#include <drivers/delay_timer.h> +#include <lib/mmio.h> +#include <lib/utils.h> + +#include "agilex5_memory_controller.h" +#include <platform_def.h> + +#define ALT_CCU_NOC_DI_SET_MSK 0x10 + +#define DDR_READ_LATENCY_DELAY 40 +#define MAX_MEM_CAL_RETRY 3 +#define PRE_CALIBRATION_DELAY 1 +#define POST_CALIBRATION_DELAY 1 +#define TIMEOUT_EMIF_CALIBRATION 1000 +#define CLEAR_EMIF_DELAY 1000 +#define CLEAR_EMIF_TIMEOUT 1000 + +#define DDR_CONFIG(A, B, C, R) (((A) << 24) | ((B) << 16) | ((C) << 8) | (R)) +#define DDR_CONFIG_ELEMENTS (ARRAY_SIZE(ddr_config)) + +/* tWR = Min. 15ns constant, see JEDEC standard eg. DDR4 is JESD79-4.pdf */ +#define tWR_IN_NS 15 + +void configure_hmc_adaptor_regs(void); +void configure_ddr_sched_ctrl_regs(void); + +/* The followring are the supported configurations */ +uint32_t ddr_config[] = { + /* DDR_CONFIG(Address order,Bank,Column,Row) */ + /* List for DDR3 or LPDDR3 (pinout order > chip, row, bank, column) */ + DDR_CONFIG(0, 3, 10, 12), + DDR_CONFIG(0, 3, 9, 13), + DDR_CONFIG(0, 3, 10, 13), + DDR_CONFIG(0, 3, 9, 14), + DDR_CONFIG(0, 3, 10, 14), + DDR_CONFIG(0, 3, 10, 15), + DDR_CONFIG(0, 3, 11, 14), + DDR_CONFIG(0, 3, 11, 15), + DDR_CONFIG(0, 3, 10, 16), + DDR_CONFIG(0, 3, 11, 16), + DDR_CONFIG(0, 3, 12, 15), /* 0xa */ + /* List for DDR4 only (pinout order > chip, bank, row, column) */ + DDR_CONFIG(1, 3, 10, 14), + DDR_CONFIG(1, 4, 10, 14), + DDR_CONFIG(1, 3, 10, 15), + DDR_CONFIG(1, 4, 10, 15), + DDR_CONFIG(1, 3, 10, 16), + DDR_CONFIG(1, 4, 10, 16), + DDR_CONFIG(1, 3, 10, 17), + DDR_CONFIG(1, 4, 10, 17), +}; + +static int match_ddr_conf(uint32_t ddr_conf) +{ + int i; + + for (i = 0; i < DDR_CONFIG_ELEMENTS; i++) { + if (ddr_conf == ddr_config[i]) + return i; + } + return 0; +} + +static int check_hmc_clk(void) +{ + unsigned long timeout = 0; + uint32_t hmc_clk; + + do { + hmc_clk = mmio_read_32(AGX_SYSMGR_CORE_HMC_CLK); + if (hmc_clk & AGX_SYSMGR_CORE_HMC_CLK_STATUS) + break; + udelay(1); + } while (++timeout < 1000); + if (timeout >= 1000) + return -ETIMEDOUT; + + return 0; +} + +static int clear_emif(void) +{ + uint32_t data; + unsigned long timeout; + + mmio_write_32(AGX_MPFE_HMC_ADP_RSTHANDSHAKECTRL, 0); + + timeout = 0; + do { + data = mmio_read_32(AGX_MPFE_HMC_ADP_RSTHANDSHAKESTAT); + if ((data & AGX_MPFE_HMC_ADP_RSTHANDSHAKESTAT_SEQ2CORE) == 0) + break; + udelay(CLEAR_EMIF_DELAY); + } while (++timeout < CLEAR_EMIF_TIMEOUT); + if (timeout >= CLEAR_EMIF_TIMEOUT) + return -ETIMEDOUT; + + return 0; +} + +static int mem_calibration(void) +{ + int status; + uint32_t data; + unsigned long timeout; + unsigned long retry = 0; + + udelay(PRE_CALIBRATION_DELAY); + + do { + if (retry != 0) + INFO("DDR: Retrying DRAM calibration\n"); + + timeout = 0; + do { + data = mmio_read_32(AGX_MPFE_HMC_ADP_DDRCALSTAT); + if (AGX_MPFE_HMC_ADP_DDRCALSTAT_CAL(data) == 1) + break; + udelay(500); + } while (++timeout < TIMEOUT_EMIF_CALIBRATION); + + if (AGX_MPFE_HMC_ADP_DDRCALSTAT_CAL(data) == 0) { + status = clear_emif(); + if (status) + ERROR("Failed to clear Emif\n"); + } else { + break; + } + } while (++retry < MAX_MEM_CAL_RETRY); + + if (AGX_MPFE_HMC_ADP_DDRCALSTAT_CAL(data) == 0) { + ERROR("DDR: DRAM calibration failed.\n"); + status = -EIO; + } else { + INFO("DDR: DRAM calibration success.\n"); + status = 0; + } + + udelay(POST_CALIBRATION_DELAY); + + return status; +} + +int init_hard_memory_controller(void) +{ + int status; + + status = check_hmc_clk(); + if (status) { + ERROR("DDR: Error, HMC clock not running\n"); + return status; + } + + status = mem_calibration(); + if (status) { + ERROR("DDR: Memory Calibration Failed\n"); + return status; + } + + configure_hmc_adaptor_regs(); + + return 0; +} + +void configure_ddr_sched_ctrl_regs(void) +{ + uint32_t data, dram_addr_order, ddr_conf, bank, row, col, + rd_to_miss, wr_to_miss, burst_len, burst_len_ddr_clk, + burst_len_sched_clk, act_to_act, rd_to_wr, wr_to_rd, bw_ratio, + t_rtp, t_rp, t_rcd, rd_latency, tw_rin_clk_cycles, + bw_ratio_extended, auto_precharge = 0, act_to_act_bank, faw, + faw_bank, bus_rd_to_rd, bus_rd_to_wr, bus_wr_to_rd; + + INFO("Init HPS NOC's DDR Scheduler.\n"); + + data = mmio_read_32(AGX_MPFE_IOHMC_CTRLCFG1); + dram_addr_order = AGX_MPFE_IOHMC_CTRLCFG1_CFG_ADDR_ORDER(data); + + data = mmio_read_32(AGX_MPFE_IOHMC_DRAMADDRW); + + col = IOHMC_DRAMADDRW_COL_ADDR_WIDTH(data); + row = IOHMC_DRAMADDRW_ROW_ADDR_WIDTH(data); + bank = IOHMC_DRAMADDRW_BANK_ADDR_WIDTH(data) + + IOHMC_DRAMADDRW_BANK_GRP_ADDR_WIDTH(data); + + ddr_conf = match_ddr_conf(DDR_CONFIG(dram_addr_order, bank, col, row)); + + if (ddr_conf) { + mmio_clrsetbits_32( + AGX_MPFE_DDR_MAIN_SCHED_DDRCONF, + AGX_MPFE_DDR_MAIN_SCHED_DDRCONF_SET_MSK, + AGX_MPFE_DDR_MAIN_SCHED_DDRCONF_SET(ddr_conf)); + } else { + ERROR("DDR: Cannot find predefined ddrConf configuration.\n"); + } + + mmio_write_32(AGX_MPFE_HMC_ADP(ADP_DRAMADDRWIDTH), data); + + data = mmio_read_32(AGX_MPFE_IOHMC_DRAMTIMING0); + rd_latency = AGX_MPFE_IOHMC_REG_DRAMTIMING0_CFG_TCL(data); + + data = mmio_read_32(AGX_MPFE_IOHMC_CALTIMING0); + act_to_act = ACT_TO_ACT(data); + t_rcd = ACT_TO_RDWR(data); + act_to_act_bank = ACT_TO_ACT_DIFF_BANK(data); + + data = mmio_read_32(AGX_MPFE_IOHMC_CALTIMING1); + rd_to_wr = RD_TO_WR(data); + bus_rd_to_rd = RD_TO_RD_DIFF_CHIP(data); + bus_rd_to_wr = RD_TO_WR_DIFF_CHIP(data); + + data = mmio_read_32(AGX_MPFE_IOHMC_CALTIMING2); + t_rtp = RD_TO_PCH(data); + + data = mmio_read_32(AGX_MPFE_IOHMC_CALTIMING3); + wr_to_rd = CALTIMING3_WR_TO_RD(data); + bus_wr_to_rd = CALTIMING3_WR_TO_RD_DIFF_CHIP(data); + + data = mmio_read_32(AGX_MPFE_IOHMC_CALTIMING4); + t_rp = PCH_TO_VALID(data); + + data = mmio_read_32(AGX_MPFE_HMC_ADP(HMC_ADP_DDRIOCTRL)); + bw_ratio = ((HMC_ADP_DDRIOCTRL_IO_SIZE(data) == 0) ? 0 : 1); + + data = mmio_read_32(AGX_MPFE_IOHMC_CTRLCFG0); + burst_len = HMC_ADP_DDRIOCTRL_CTRL_BURST_LENGTH(data); + burst_len_ddr_clk = burst_len / 2; + burst_len_sched_clk = ((burst_len/2) / 2); + + data = mmio_read_32(AGX_MPFE_IOHMC_CTRLCFG0); + switch (AGX_MPFE_IOHMC_REG_CTRLCFG0_CFG_MEM_TYPE(data)) { + case 1: + /* DDR4 - 1333MHz */ + /* 20 (19.995) clock cycles = 15ns */ + /* Calculate with rounding */ + tw_rin_clk_cycles = (((tWR_IN_NS * 1333) % 1000) >= 500) ? + ((tWR_IN_NS * 1333) / 1000) + 1 : + ((tWR_IN_NS * 1333) / 1000); + break; + default: + /* Others - 1066MHz or slower */ + /* 16 (15.990) clock cycles = 15ns */ + /* Calculate with rounding */ + tw_rin_clk_cycles = (((tWR_IN_NS * 1066) % 1000) >= 500) ? + ((tWR_IN_NS * 1066) / 1000) + 1 : + ((tWR_IN_NS * 1066) / 1000); + break; + } + + rd_to_miss = t_rtp + t_rp + t_rcd - burst_len_sched_clk; + wr_to_miss = ((rd_latency + burst_len_ddr_clk + 2 + tw_rin_clk_cycles) + / 2) - rd_to_wr + t_rp + t_rcd; + + mmio_write_32(AGX_MPFE_DDR_MAIN_SCHED_DDRTIMING, + bw_ratio << DDRTIMING_BWRATIO_OFST | + wr_to_rd << DDRTIMING_WRTORD_OFST| + rd_to_wr << DDRTIMING_RDTOWR_OFST | + burst_len_sched_clk << DDRTIMING_BURSTLEN_OFST | + wr_to_miss << DDRTIMING_WRTOMISS_OFST | + rd_to_miss << DDRTIMING_RDTOMISS_OFST | + act_to_act << DDRTIMING_ACTTOACT_OFST); + + data = mmio_read_32(AGX_MPFE_HMC_ADP(HMC_ADP_DDRIOCTRL)); + bw_ratio_extended = ((ADP_DDRIOCTRL_IO_SIZE(data) == 0) ? 1 : 0); + + mmio_write_32(AGX_MPFE_DDR_MAIN_SCHED_DDRMODE, + bw_ratio_extended << DDRMODE_BWRATIOEXTENDED_OFST | + auto_precharge << DDRMODE_AUTOPRECHARGE_OFST); + + mmio_write_32(AGX_MPFE_DDR_MAIN_SCHED_READLATENCY, + (rd_latency / 2) + DDR_READ_LATENCY_DELAY); + + data = mmio_read_32(AGX_MPFE_IOHMC_CALTIMING9); + faw = AGX_MPFE_IOHMC_CALTIMING9_ACT_TO_ACT(data); + + faw_bank = 1; // always 1 because we always have 4 bank DDR. + + mmio_write_32(AGX_MPFE_DDR_MAIN_SCHED_ACTIVATE, + faw_bank << AGX_MPFE_DDR_MAIN_SCHED_ACTIVATE_FAWBANK_OFST | + faw << AGX_MPFE_DDR_MAIN_SCHED_ACTIVATE_FAW_OFST | + act_to_act_bank << AGX_MPFE_DDR_MAIN_SCHED_ACTIVATE_RRD_OFST); + + mmio_write_32(AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV, + ((bus_rd_to_rd + << AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTORD_OFST) + & AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTORD_MSK) | + ((bus_rd_to_wr + << AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTOWR_OFST) + & AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTOWR_MSK) | + ((bus_wr_to_rd + << AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSWRTORD_OFST) + & AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSWRTORD_MSK)); + +} + +unsigned long get_physical_dram_size(void) +{ + uint32_t data; + unsigned long ram_addr_width, ram_ext_if_io_width; + + data = mmio_read_32(AGX_MPFE_HMC_ADP_DDRIOCTRL); + switch (AGX_MPFE_HMC_ADP_DDRIOCTRL_IO_SIZE(data)) { + case 0: + ram_ext_if_io_width = 16; + break; + case 1: + ram_ext_if_io_width = 32; + break; + case 2: + ram_ext_if_io_width = 64; + break; + default: + ram_ext_if_io_width = 0; + break; + } + + data = mmio_read_32(AGX_MPFE_IOHMC_REG_DRAMADDRW); + ram_addr_width = IOHMC_DRAMADDRW_CFG_COL_ADDR_WIDTH(data) + + IOHMC_DRAMADDRW_CFG_ROW_ADDR_WIDTH(data) + + IOHMC_DRAMADDRW_CFG_BANK_ADDR_WIDTH(data) + + IOHMC_DRAMADDRW_CFG_BANK_GROUP_ADDR_WIDTH(data) + + IOHMC_DRAMADDRW_CFG_CS_ADDR_WIDTH(data); + + return (1 << ram_addr_width) * (ram_ext_if_io_width / 8); +} + + + +void configure_hmc_adaptor_regs(void) +{ + uint32_t data; + uint32_t dram_io_width; + + /* Configure DDR data rate */ + dram_io_width = AGX_MPFE_IOHMC_NIOSRESERVE0_NIOS_RESERVE0( + mmio_read_32(AGX_MPFE_IOHMC_REG_NIOSRESERVE0_OFST)); + dram_io_width = (dram_io_width & 0xFF) >> 5; + + data = mmio_read_32(AGX_MPFE_IOHMC_CTRLCFG3); + + dram_io_width |= (data & 0x4); + + mmio_write_32(AGX_MPFE_HMC_ADP_DDRIOCTRL, dram_io_width); + + /* Copy dram addr width from IOHMC to HMC ADP */ + data = mmio_read_32(AGX_MPFE_IOHMC_DRAMADDRW); + mmio_write_32(AGX_MPFE_HMC_ADP(ADP_DRAMADDRWIDTH), data); + + /* Enable nonsecure access to DDR */ + data = get_physical_dram_size(); + + if (data < AGX_DDR_SIZE) + data = AGX_DDR_SIZE; + + mmio_write_32(AGX_NOC_FW_DDR_SCR_MPUREGION0ADDR_LIMIT, data - 1); + mmio_write_32(AGX_NOC_FW_DDR_SCR_MPUREGION0ADDR_LIMITEXT, 0x1f); + + mmio_write_32(AGX_NOC_FW_DDR_SCR_NONMPUREGION0ADDR_LIMIT, data - 1); + + mmio_write_32(AGX_SOC_NOC_FW_DDR_SCR_ENABLESET, BIT(0) | BIT(8)); + + /* ECC enablement */ + data = mmio_read_32(AGX_MPFE_IOHMC_REG_CTRLCFG1); + if (data & (1 << AGX_IOHMC_CTRLCFG1_ENABLE_ECC_OFST)) { + mmio_clrsetbits_32(AGX_MPFE_HMC_ADP_ECCCTRL1, + AGX_MPFE_HMC_ADP_ECCCTRL1_AUTOWB_CNT_RST_SET_MSK | + AGX_MPFE_HMC_ADP_ECCCTRL1_CNT_RST_SET_MSK | + AGX_MPFE_HMC_ADP_ECCCTRL1_ECC_EN_SET_MSK, + AGX_MPFE_HMC_ADP_ECCCTRL1_AUTOWB_CNT_RST_SET_MSK | + AGX_MPFE_HMC_ADP_ECCCTRL1_CNT_RST_SET_MSK); + + mmio_clrsetbits_32(AGX_MPFE_HMC_ADP_ECCCTRL2, + AGX_MPFE_HMC_ADP_ECCCTRL2_OVRW_RB_ECC_EN_SET_MSK | + AGX_MPFE_HMC_ADP_ECCCTRL2_RMW_EN_SET_MSK | + AGX_MPFE_HMC_ADP_ECCCTRL2_AUTOWB_EN_SET_MSK, + AGX_MPFE_HMC_ADP_ECCCTRL2_RMW_EN_SET_MSK | + AGX_MPFE_HMC_ADP_ECCCTRL2_AUTOWB_EN_SET_MSK); + + mmio_clrsetbits_32(AGX_MPFE_HMC_ADP_ECCCTRL1, + AGX_MPFE_HMC_ADP_ECCCTRL1_AUTOWB_CNT_RST_SET_MSK | + AGX_MPFE_HMC_ADP_ECCCTRL1_CNT_RST_SET_MSK | + AGX_MPFE_HMC_ADP_ECCCTRL1_ECC_EN_SET_MSK, + AGX_MPFE_HMC_ADP_ECCCTRL1_ECC_EN_SET_MSK); + INFO("Scrubbing ECC\n"); + + /* ECC Scrubbing */ + zeromem((void *)DRAM_BASE, DRAM_SIZE); + } else { + INFO("ECC is disabled.\n"); + } +} diff --git a/plat/intel/soc/agilex5/soc/agilex5_mmc.c b/plat/intel/soc/agilex5/soc/agilex5_mmc.c new file mode 100644 index 0000000..48f7341 --- /dev/null +++ b/plat/intel/soc/agilex5/soc/agilex5_mmc.c @@ -0,0 +1,22 @@ +/* + * Copyright (c) 2020-2023, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#include <lib/mmio.h> + +#include "agilex5_clock_manager.h" +#include "agilex5_system_manager.h" + +void agx5_mmc_init(void) +{ +// TODO: To update when handoff data is ready + + //mmio_clrbits_32(CLKMGR_PERPLL + CLKMGR_PERPLL_EN, + // CLKMGR_PERPLL_EN_SDMMCCLK); + //mmio_write_32(SOCFPGA_SYSMGR(SDMMC), + // SYSMGR_SDMMC_SMPLSEL(0) | SYSMGR_SDMMC_DRVSEL(3)); + //mmio_setbits_32(CLKMGR_PERPLL + CLKMGR_PERPLL_EN, + // CLKMGR_PERPLL_EN_SDMMCCLK); + +} diff --git a/plat/intel/soc/agilex5/soc/agilex5_pinmux.c b/plat/intel/soc/agilex5/soc/agilex5_pinmux.c new file mode 100644 index 0000000..50d9e36 --- /dev/null +++ b/plat/intel/soc/agilex5/soc/agilex5_pinmux.c @@ -0,0 +1,225 @@ +/* + * Copyright (c) 2019-2023, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <lib/mmio.h> + +#include "agilex5_pinmux.h" +#include "agilex5_system_manager.h" + +const uint32_t sysmgr_pinmux_array_sel[] = { + 0x00000000, 0x00000001, /* usb */ + 0x00000004, 0x00000001, + 0x00000008, 0x00000001, + 0x0000000c, 0x00000001, + 0x00000010, 0x00000001, + 0x00000014, 0x00000001, + 0x00000018, 0x00000001, + 0x0000001c, 0x00000001, + 0x00000020, 0x00000001, + 0x00000024, 0x00000001, + 0x00000028, 0x00000001, + 0x0000002c, 0x00000001, + 0x00000030, 0x00000000, /* emac0 */ + 0x00000034, 0x00000000, + 0x00000038, 0x00000000, + 0x0000003c, 0x00000000, + 0x00000040, 0x00000000, + 0x00000044, 0x00000000, + 0x00000048, 0x00000000, + 0x0000004c, 0x00000000, + 0x00000050, 0x00000000, + 0x00000054, 0x00000000, + 0x00000058, 0x00000000, + 0x0000005c, 0x00000000, + 0x00000060, 0x00000008, /* gpio1 */ + 0x00000064, 0x00000008, + 0x00000068, 0x00000005, /* uart0 tx */ + 0x0000006c, 0x00000005, /* uart 0 rx */ + 0x00000070, 0x00000008, /* gpio */ + 0x00000074, 0x00000008, + 0x00000078, 0x00000004, /* i2c1 */ + 0x0000007c, 0x00000004, + 0x00000080, 0x00000007, /* jtag */ + 0x00000084, 0x00000007, + 0x00000088, 0x00000007, + 0x0000008c, 0x00000007, + 0x00000090, 0x00000001, /* sdmmc data0 */ + 0x00000094, 0x00000001, + 0x00000098, 0x00000001, + 0x0000009c, 0x00000001, + 0x00000100, 0x00000001, + 0x00000104, 0x00000001, /* sdmmc.data3 */ + 0x00000108, 0x00000008, /* loan */ + 0x0000010c, 0x00000008, /* gpio */ + 0x00000110, 0x00000008, + 0x00000114, 0x00000008, /* gpio1.io21 */ + 0x00000118, 0x00000005, /* mdio0.mdio */ + 0x0000011c, 0x00000005 /* mdio0.mdc */ +}; + +const uint32_t sysmgr_pinmux_array_ctrl[] = { + 0x00000000, 0x00502c38, /* Q1_1 */ + 0x00000004, 0x00102c38, + 0x00000008, 0x00502c38, + 0x0000000c, 0x00502c38, + 0x00000010, 0x00502c38, + 0x00000014, 0x00502c38, + 0x00000018, 0x00502c38, + 0x0000001c, 0x00502c38, + 0x00000020, 0x00502c38, + 0x00000024, 0x00502c38, + 0x00000028, 0x00502c38, + 0x0000002c, 0x00502c38, + 0x00000030, 0x00102c38, /* Q2_1 */ + 0x00000034, 0x00102c38, + 0x00000038, 0x00502c38, + 0x0000003c, 0x00502c38, + 0x00000040, 0x00102c38, + 0x00000044, 0x00102c38, + 0x00000048, 0x00502c38, + 0x0000004c, 0x00502c38, + 0x00000050, 0x00102c38, + 0x00000054, 0x00102c38, + 0x00000058, 0x00502c38, + 0x0000005c, 0x00502c38, + 0x00000060, 0x00502c38, /* Q3_1 */ + 0x00000064, 0x00502c38, + 0x00000068, 0x00102c38, + 0x0000006c, 0x00502c38, + 0x000000d0, 0x00502c38, + 0x000000d4, 0x00502c38, + 0x000000d8, 0x00542c38, + 0x000000dc, 0x00542c38, + 0x000000e0, 0x00502c38, + 0x000000e4, 0x00502c38, + 0x000000e8, 0x00102c38, + 0x000000ec, 0x00502c38, + 0x000000f0, 0x00502c38, /* Q4_1 */ + 0x000000f4, 0x00502c38, + 0x000000f8, 0x00102c38, + 0x000000fc, 0x00502c38, + 0x00000100, 0x00502c38, + 0x00000104, 0x00502c38, + 0x00000108, 0x00102c38, + 0x0000010c, 0x00502c38, + 0x00000110, 0x00502c38, + 0x00000114, 0x00502c38, + 0x00000118, 0x00542c38, + 0x0000011c, 0x00102c38 +}; + +const uint32_t sysmgr_pinmux_array_fpga[] = { + 0x00000000, 0x00000000, + 0x00000004, 0x00000000, + 0x00000008, 0x00000000, + 0x0000000c, 0x00000000, + 0x00000010, 0x00000000, + 0x00000014, 0x00000000, + 0x00000018, 0x00000000, + 0x0000001c, 0x00000000, + 0x00000020, 0x00000000, + 0x00000028, 0x00000000, + 0x0000002c, 0x00000000, + 0x00000030, 0x00000000, + 0x00000034, 0x00000000, + 0x00000038, 0x00000000, + 0x0000003c, 0x00000000, + 0x00000040, 0x00000000, + 0x00000044, 0x00000000, + 0x00000048, 0x00000000, + 0x00000050, 0x00000000, + 0x00000054, 0x00000000, + 0x00000058, 0x0000002a +}; + +const uint32_t sysmgr_pinmux_array_iodelay[] = { + 0x00000000, 0x00000000, + 0x00000004, 0x00000000, + 0x00000008, 0x00000000, + 0x0000000c, 0x00000000, + 0x00000010, 0x00000000, + 0x00000014, 0x00000000, + 0x00000018, 0x00000000, + 0x0000001c, 0x00000000, + 0x00000020, 0x00000000, + 0x00000024, 0x00000000, + 0x00000028, 0x00000000, + 0x0000002c, 0x00000000, + 0x00000030, 0x00000000, + 0x00000034, 0x00000000, + 0x00000038, 0x00000000, + 0x0000003c, 0x00000000, + 0x00000040, 0x00000000, + 0x00000044, 0x00000000, + 0x00000048, 0x00000000, + 0x0000004c, 0x00000000, + 0x00000050, 0x00000000, + 0x00000054, 0x00000000, + 0x00000058, 0x00000000, + 0x0000005c, 0x00000000, + 0x00000060, 0x00000000, + 0x00000064, 0x00000000, + 0x00000068, 0x00000000, + 0x0000006c, 0x00000000, + 0x00000070, 0x00000000, + 0x00000074, 0x00000000, + 0x00000078, 0x00000000, + 0x0000007c, 0x00000000, + 0x00000080, 0x00000000, + 0x00000084, 0x00000000, + 0x00000088, 0x00000000, + 0x0000008c, 0x00000000, + 0x00000090, 0x00000000, + 0x00000094, 0x00000000, + 0x00000098, 0x00000000, + 0x0000009c, 0x00000000, + 0x00000100, 0x00000000, + 0x00000104, 0x00000000, + 0x00000108, 0x00000000, + 0x0000010c, 0x00000000, + 0x00000110, 0x00000000, + 0x00000114, 0x00000000, + 0x00000118, 0x00000000, + 0x0000011c, 0x00000000 +}; + +void config_fpgaintf_mod(void) +{ + mmio_write_32(SOCFPGA_SYSMGR(FPGAINTF_EN_2), 1<<8); +} + +void config_pinmux(handoff *hoff_ptr) +{ + unsigned int i; + + mmio_write_32(PINMUX_HANDOFF_CONFIG_ADDR, PINMUX_HANDOFF_CONFIG_VAL); + for (i = 0; i < PINMUX_HANDOFF_ARRAY_SIZE(hoff_ptr->pinmux_sel_array); i += 2) { + mmio_write_32(AGX5_PINMUX_PIN0SEL + + hoff_ptr->pinmux_sel_array[i], + hoff_ptr->pinmux_sel_array[i + 1]); + } + + config_fpgaintf_mod(); +} + +void config_peripheral(handoff *hoff_ptr) +{ + + // TODO: This need to be update due to peripheral_pwr_gate_array handoff change + // Pending SDM to pass over handoff data + // unsigned int i; + + // for (i = 0; i < 4; i += 2) { + // mmio_write_32(AGX_EDGE_PERIPHERAL + + // hoff_ptr->peripheral_pwr_gate_array[i], + // hoff_ptr->peripheral_pwr_gate_array[i+1]); + // } + + + // TODO: This need to be update due to peripheral_pwr_gate_array handoff change + mmio_write_32(AGX5_PERIPHERAL, + hoff_ptr->peripheral_pwr_gate_array); +} diff --git a/plat/intel/soc/agilex5/soc/agilex5_power_manager.c b/plat/intel/soc/agilex5/soc/agilex5_power_manager.c new file mode 100644 index 0000000..0d81970 --- /dev/null +++ b/plat/intel/soc/agilex5/soc/agilex5_power_manager.c @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2022-2023, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> +#include <errno.h> +#include <common/debug.h> +#include <drivers/delay_timer.h> +#include <lib/mmio.h> + +#include "agilex5_power_manager.h" +#include "socfpga_reset_manager.h" + +int wait_verify_fsm(uint16_t timeout, uint32_t peripheral_handoff) +{ + uint32_t data = 0; + uint32_t count = 0; + uint32_t pgstat = 0; + + /* Wait FSM ready */ + do { + data = mmio_read_32(AGX5_PWRMGR(PSS_PGSTAT)); + count++; + if (count >= 1000) { + return -ETIMEDOUT; + } + + } while (AGX5_PWRMGR_PSS_STAT_BUSY(data) == AGX5_PWRMGR_PSS_STAT_BUSY_E_BUSY); + + /* Verify PSS SRAM power gated */ + pgstat = mmio_read_32(AGX5_PWRMGR(PSS_PGSTAT)); + if (pgstat != (AGX5_PWRMGR_PSS_PGEN_OUT(peripheral_handoff))) { + return AGX5_PWRMGR_HANDOFF_PERIPHERAL; + } + + return 0; +} + +int pss_sram_power_off(handoff *hoff_ptr) +{ + int ret = 0; + uint32_t peripheral_handoff = 0; + + /* Get PSS SRAM handoff data */ + peripheral_handoff = hoff_ptr->peripheral_pwr_gate_array; + + /* Enable firewall for PSS SRAM */ + mmio_write_32(AGX5_PWRMGR(PSS_FWENCTL), + AGX5_PWRMGR_PSS_FWEN(peripheral_handoff)); + + /* Wait */ + udelay(1); + + /* Power gating PSS SRAM */ + mmio_write_32(AGX5_PWRMGR(PSS_PGENCTL), + AGX5_PWRMGR_PSS_PGEN(peripheral_handoff)); + + ret = wait_verify_fsm(1000, peripheral_handoff); + + return ret; +} + +void config_pwrmgr_handoff(handoff *hoff_ptr) +{ + int ret = 0; + + switch (hoff_ptr->header_magic) { + case HANDOFF_MAGIC_PERIPHERAL: + ret = pss_sram_power_off(hoff_ptr); + break; + default: + break; + } + + if (ret != 0) { + ERROR("Config PwrMgr handoff failed. error %d\n", ret); + assert(false); + } +} diff --git a/plat/intel/soc/common/aarch64/plat_helpers.S b/plat/intel/soc/common/aarch64/plat_helpers.S new file mode 100644 index 0000000..cbd0121 --- /dev/null +++ b/plat/intel/soc/common/aarch64/plat_helpers.S @@ -0,0 +1,215 @@ +/* + * Copyright (c) 2019-2023, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <arch.h> +#include <asm_macros.S> +#include <cpu_macros.S> +#include <platform_def.h> +#include <el3_common_macros.S> + + .globl plat_secondary_cold_boot_setup + .globl platform_is_primary_cpu + .globl plat_is_my_cpu_primary + .globl plat_my_core_pos + .globl plat_crash_console_init + .globl plat_crash_console_putc + .globl plat_crash_console_flush + .globl platform_mem_init + .globl plat_secondary_cpus_bl31_entry + + .globl plat_get_my_entrypoint + + /* ----------------------------------------------------- + * void plat_secondary_cold_boot_setup (void); + * + * This function performs any platform specific actions + * needed for a secondary cpu after a cold reset e.g + * mark the cpu's presence, mechanism to place it in a + * holding pen etc. + * ----------------------------------------------------- + */ +func plat_secondary_cold_boot_setup + /* Wait until the it gets reset signal from rstmgr gets populated */ +poll_mailbox: +#if PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX5 + mov_imm x0, PLAT_SEC_ENTRY + cbz x0, poll_mailbox + br x0 +#else + wfi + mov_imm x0, PLAT_SEC_ENTRY + ldr x1, [x0] + mov_imm x2, PLAT_CPUID_RELEASE + ldr x3, [x2] + mrs x4, mpidr_el1 + and x4, x4, #0xff + cmp x3, x4 + b.ne poll_mailbox + br x1 +#endif +endfunc plat_secondary_cold_boot_setup + +#if ((PLATFORM_MODEL == PLAT_SOCFPGA_STRATIX10) || \ + (PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX) || \ + (PLATFORM_MODEL == PLAT_SOCFPGA_N5X)) + +func platform_is_primary_cpu + and x0, x0, #(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK) + cmp x0, #PLAT_PRIMARY_CPU + cset x0, eq + ret +endfunc platform_is_primary_cpu + +#else + +func platform_is_primary_cpu + and x0, x0, #(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK) + cmp x0, #(PLAT_PRIMARY_CPU_A76) + b.eq primary_cpu + cmp x0, #(PLAT_PRIMARY_CPU_A55) + b.eq primary_cpu +primary_cpu: + cset x0, eq + ret +endfunc platform_is_primary_cpu + +#endif + +func plat_is_my_cpu_primary + mrs x0, mpidr_el1 + b platform_is_primary_cpu +endfunc plat_is_my_cpu_primary + +func plat_my_core_pos + mrs x0, mpidr_el1 + and x1, x0, #MPIDR_CPU_MASK + and x0, x0, #MPIDR_CLUSTER_MASK +#if PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX5 + add x0, x1, x0, LSR #8 +#else + add x0, x1, x0, LSR #6 +#endif + ret +endfunc plat_my_core_pos + +func warm_reset_req +#if PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX5 + bl plat_is_my_cpu_primary + cbnz x0, warm_reset +warm_reset: + mov_imm x1, PLAT_SEC_ENTRY + str xzr, [x1] + mrs x1, rmr_el3 + orr x1, x1, #0x02 + msr rmr_el3, x1 + isb + dsb sy +#else + str xzr, [x4] + bl plat_is_my_cpu_primary + cbz x0, cpu_in_wfi + mov_imm x1, PLAT_SEC_ENTRY + str xzr, [x1] + mrs x1, rmr_el3 + orr x1, x1, #0x02 + msr rmr_el3, x1 + isb + dsb sy +cpu_in_wfi: + wfi + b cpu_in_wfi +#endif +endfunc warm_reset_req + +/* TODO: Zephyr warm reset test */ +#if PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX5 +func plat_get_my_entrypoint + ldr x4, =L2_RESET_DONE_REG + ldr x5, [x4] + ldr x1, =PLAT_L2_RESET_REQ + cmp x1, x5 + b.eq zephyr_reset_req + mov_imm x1, PLAT_SEC_ENTRY + ldr x0, [x1] + ret +zephyr_reset_req: + ldr x0, =0x00 + ret +endfunc plat_get_my_entrypoint +#else +func plat_get_my_entrypoint + ldr x4, =L2_RESET_DONE_REG + ldr x5, [x4] + ldr x1, =L2_RESET_DONE_STATUS + cmp x1, x5 + b.eq warm_reset_req + mov_imm x1, PLAT_SEC_ENTRY + ldr x0, [x1] + ret +endfunc plat_get_my_entrypoint +#endif + + /* --------------------------------------------- + * int plat_crash_console_init(void) + * Function to initialize the crash console + * without a C Runtime to print crash report. + * Clobber list : x0, x1, x2 + * --------------------------------------------- + */ +func plat_crash_console_init + mov_imm x0, CRASH_CONSOLE_BASE + mov_imm x1, PLAT_UART_CLOCK + mov_imm x2, PLAT_BAUDRATE + b console_16550_core_init +endfunc plat_crash_console_init + + /* --------------------------------------------- + * int plat_crash_console_putc(void) + * Function to print a character on the crash + * console without a C Runtime. + * Clobber list : x1, x2 + * --------------------------------------------- + */ +func plat_crash_console_putc + mov_imm x1, CRASH_CONSOLE_BASE + b console_16550_core_putc +endfunc plat_crash_console_putc + +func plat_crash_console_flush + mov_imm x0, CRASH_CONSOLE_BASE + b console_16550_core_flush +endfunc plat_crash_console_flush + + + /* -------------------------------------------------------- + * void platform_mem_init (void); + * + * Any memory init, relocation to be done before the + * platform boots. Called very early in the boot process. + * -------------------------------------------------------- + */ +func platform_mem_init + mov x0, #0 + ret +endfunc platform_mem_init + + /* -------------------------------------------------------- + * macro plat_secondary_cpus_bl31_entry; + * + * el3_entrypoint_common init param configuration. + * Called very early in the secondary cores boot process. + * -------------------------------------------------------- + */ +func plat_secondary_cpus_bl31_entry + el3_entrypoint_common \ + _init_sctlr=0 \ + _warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS \ + _secondary_cold_boot=!COLD_BOOT_SINGLE_CPU \ + _init_memory=1 \ + _init_c_runtime=1 \ + _exception_vectors=runtime_exceptions \ + _pie_fixup_size=BL31_LIMIT - BL31_BASE +endfunc plat_secondary_cpus_bl31_entry diff --git a/plat/intel/soc/common/aarch64/platform_common.c b/plat/intel/soc/common/aarch64/platform_common.c new file mode 100644 index 0000000..b79a63c --- /dev/null +++ b/plat/intel/soc/common/aarch64/platform_common.c @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <arch.h> +#include <arch_helpers.h> +#include <platform_def.h> +#include <plat/common/platform.h> + +#include "socfpga_private.h" + + +unsigned int plat_get_syscnt_freq2(void) +{ + return PLAT_SYS_COUNTER_FREQ_IN_TICKS; +} + +unsigned long socfpga_get_ns_image_entrypoint(void) +{ + return PLAT_NS_IMAGE_OFFSET; +} + +/****************************************************************************** + * Gets SPSR for BL32 entry + *****************************************************************************/ +uint32_t socfpga_get_spsr_for_bl32_entry(void) +{ + /* + * The Secure Payload Dispatcher service is responsible for + * setting the SPSR prior to entry into the BL32 image. + */ + return 0; +} + +/****************************************************************************** + * Gets SPSR for BL33 entry + *****************************************************************************/ +uint32_t socfpga_get_spsr_for_bl33_entry(void) +{ + unsigned long el_status; + unsigned int mode; + uint32_t spsr; + + /* Figure out what mode we enter the non-secure world in */ + el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT; + el_status &= ID_AA64PFR0_ELX_MASK; + + mode = (el_status) ? MODE_EL2 : MODE_EL1; + + /* + * TODO: Consider the possibility of specifying the SPSR in + * the FIP ToC and allowing the platform to have a say as + * well. + */ + spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS); + return spsr; +} + diff --git a/plat/intel/soc/common/bl2_plat_mem_params_desc.c b/plat/intel/soc/common/bl2_plat_mem_params_desc.c new file mode 100644 index 0000000..187c53a --- /dev/null +++ b/plat/intel/soc/common/bl2_plat_mem_params_desc.c @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <common/bl_common.h> +#include <common/desc_image_load.h> +#include <platform_def.h> +#include <plat/common/platform.h> + + +/******************************************************************************* + * Following descriptor provides BL image/ep information that gets used + * by BL2 to load the images and also subset of this information is + * passed to next BL image. The image loading sequence is managed by + * populating the images in required loading order. The image execution + * sequence is managed by populating the `next_handoff_image_id` with + * the next executable image id. + ******************************************************************************/ +static bl_mem_params_node_t bl2_mem_params_descs[] = { +#ifdef SCP_BL2_BASE + /* Fill SCP_BL2 related information if it exists */ + { + .image_id = SCP_BL2_IMAGE_ID, + + SET_STATIC_PARAM_HEAD(ep_info, PARAM_IMAGE_BINARY, + VERSION_2, entry_point_info_t, SECURE | NON_EXECUTABLE), + + SET_STATIC_PARAM_HEAD(image_info, PARAM_IMAGE_BINARY, + VERSION_2, image_info_t, 0), + .image_info.image_base = SCP_BL2_BASE, + .image_info.image_max_size = SCP_BL2_SIZE, + + .next_handoff_image_id = INVALID_IMAGE_ID, + }, +#endif /* SCP_BL2_BASE */ + +#ifdef EL3_PAYLOAD_BASE + /* Fill EL3 payload related information (BL31 is EL3 payload)*/ + { + .image_id = BL31_IMAGE_ID, + + SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, + VERSION_2, entry_point_info_t, + SECURE | EXECUTABLE | EP_FIRST_EXE), + .ep_info.pc = EL3_PAYLOAD_BASE, + .ep_info.spsr = SPSR_64(MODE_EL3, MODE_SP_ELX, + DISABLE_ALL_EXCEPTIONS), + + SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, + VERSION_2, image_info_t, + IMAGE_ATTRIB_PLAT_SETUP | IMAGE_ATTRIB_SKIP_LOADING), + + .next_handoff_image_id = INVALID_IMAGE_ID, + }, + +#else /* EL3_PAYLOAD_BASE */ + + /* Fill BL31 related information */ + { + .image_id = BL31_IMAGE_ID, + + SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, + VERSION_2, entry_point_info_t, + SECURE | EXECUTABLE | EP_FIRST_EXE), + .ep_info.pc = BL31_BASE, + .ep_info.spsr = SPSR_64(MODE_EL3, MODE_SP_ELX, + DISABLE_ALL_EXCEPTIONS), + + SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, + VERSION_2, image_info_t, IMAGE_ATTRIB_PLAT_SETUP), + .image_info.image_base = BL31_BASE, + .image_info.image_max_size = BL31_LIMIT - BL31_BASE, + + .next_handoff_image_id = BL33_IMAGE_ID, + }, +#endif /* EL3_PAYLOAD_BASE */ + + { + .image_id = BL33_IMAGE_ID, + SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, + VERSION_2, entry_point_info_t, NON_SECURE | EXECUTABLE), + .ep_info.pc = PLAT_NS_IMAGE_OFFSET, + + SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, + VERSION_2, image_info_t, 0), + .image_info.image_base = PLAT_NS_IMAGE_OFFSET, + .image_info.image_max_size = + 0x0 + 0x40000000 - PLAT_NS_IMAGE_OFFSET, + + .next_handoff_image_id = INVALID_IMAGE_ID, + }, +}; + +REGISTER_BL_IMAGE_DESCS(bl2_mem_params_descs) diff --git a/plat/intel/soc/common/drivers/ccu/ncore_ccu.c b/plat/intel/soc/common/drivers/ccu/ncore_ccu.c new file mode 100644 index 0000000..684a625 --- /dev/null +++ b/plat/intel/soc/common/drivers/ccu/ncore_ccu.c @@ -0,0 +1,146 @@ +/* + * Copyright (c) 2019-2023, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#include <assert.h> +#include <common/debug.h> +#include <errno.h> +#include <lib/mmio.h> +#include <platform_def.h> + +#include "ncore_ccu.h" +#include "socfpga_plat_def.h" +#include "socfpga_system_manager.h" + +uint32_t poll_active_bit(uint32_t dir); + +#define SMMU_DMI 1 + + +static coh_ss_id_t subsystem_id; +void get_subsystem_id(void) +{ + uint32_t snoop_filter, directory, coh_agent; + snoop_filter = CSIDR_NUM_SF(mmio_read_32(NCORE_CCU_CSR(NCORE_CSIDR))); + directory = CSUIDR_NUM_DIR(mmio_read_32(NCORE_CCU_CSR(NCORE_CSUIDR))); + coh_agent = CSUIDR_NUM_CAI(mmio_read_32(NCORE_CCU_CSR(NCORE_CSUIDR))); + subsystem_id.num_snoop_filter = snoop_filter + 1; + subsystem_id.num_directory = directory; + subsystem_id.num_coh_agent = coh_agent; +} +uint32_t directory_init(void) +{ + uint32_t dir_sf_mtn, dir_sf_en; + uint32_t dir, sf, ret; + for (dir = 0; dir < subsystem_id.num_directory; dir++) { + for (sf = 0; sf < subsystem_id.num_snoop_filter; sf++) { + dir_sf_mtn = DIRECTORY_UNIT(dir, NCORE_DIRUSFMCR); + dir_sf_en = DIRECTORY_UNIT(dir, NCORE_DIRUSFER); + /* Initialize All Entries */ + mmio_write_32(dir_sf_mtn, SNOOP_FILTER_ID(sf)); + /* Poll Active Bit */ + ret = poll_active_bit(dir); + if (ret != 0) { + ERROR("Timeout during active bit polling"); + return -ETIMEDOUT; + } + /* Disable snoop filter, a bit per snoop filter */ + mmio_clrbits_32(dir_sf_en, BIT(sf)); + } + } + return 0; +} +uint32_t coherent_agent_intfc_init(void) +{ + uint32_t dir, ca, ca_id, ca_type, ca_snoop_en; + for (dir = 0; dir < subsystem_id.num_directory; dir++) { + for (ca = 0; ca < subsystem_id.num_coh_agent; ca++) { + ca_snoop_en = DIRECTORY_UNIT(ca, NCORE_DIRUCASER0); + ca_id = mmio_read_32(COH_AGENT_UNIT(ca, NCORE_CAIUIDR)); + /* Coh Agent Snoop Enable */ + if (CACHING_AGENT_BIT(ca_id)) + mmio_setbits_32(ca_snoop_en, BIT(ca)); + /* Coh Agent Snoop DVM Enable */ + ca_type = CACHING_AGENT_TYPE(ca_id); + if (ca_type == ACE_W_DVM || ca_type == ACE_L_W_DVM) + mmio_setbits_32(NCORE_CCU_CSR(NCORE_CSADSER0), + BIT(ca)); + } + } + return 0; +} +uint32_t poll_active_bit(uint32_t dir) +{ + uint32_t timeout = 80000; + uint32_t poll_dir = DIRECTORY_UNIT(dir, NCORE_DIRUSFMAR); + while (timeout > 0) { + if (mmio_read_32(poll_dir) == 0) + return 0; + timeout--; + } + return -1; +} +void bypass_ocram_firewall(void) +{ + mmio_clrbits_32(COH_CPU0_BYPASS_REG(NCORE_FW_OCRAM_BLK_CGF1), + OCRAM_PRIVILEGED_MASK | OCRAM_SECURE_MASK); + mmio_clrbits_32(COH_CPU0_BYPASS_REG(NCORE_FW_OCRAM_BLK_CGF2), + OCRAM_PRIVILEGED_MASK | OCRAM_SECURE_MASK); + mmio_clrbits_32(COH_CPU0_BYPASS_REG(NCORE_FW_OCRAM_BLK_CGF3), + OCRAM_PRIVILEGED_MASK | OCRAM_SECURE_MASK); + mmio_clrbits_32(COH_CPU0_BYPASS_REG(NCORE_FW_OCRAM_BLK_CGF4), + OCRAM_PRIVILEGED_MASK | OCRAM_SECURE_MASK); +} +void ncore_enable_ocram_firewall(void) +{ + mmio_setbits_32(COH_CPU0_BYPASS_REG(NCORE_FW_OCRAM_BLK_CGF1), + OCRAM_PRIVILEGED_MASK | OCRAM_SECURE_MASK); + mmio_setbits_32(COH_CPU0_BYPASS_REG(NCORE_FW_OCRAM_BLK_CGF2), + OCRAM_PRIVILEGED_MASK | OCRAM_SECURE_MASK); + mmio_setbits_32(COH_CPU0_BYPASS_REG(NCORE_FW_OCRAM_BLK_CGF3), + OCRAM_PRIVILEGED_MASK | OCRAM_SECURE_MASK); + mmio_setbits_32(COH_CPU0_BYPASS_REG(NCORE_FW_OCRAM_BLK_CGF4), + OCRAM_PRIVILEGED_MASK | OCRAM_SECURE_MASK); +} +uint32_t init_ncore_ccu(void) +{ + uint32_t status; + get_subsystem_id(); + status = directory_init(); + status = coherent_agent_intfc_init(); + bypass_ocram_firewall(); + return status; +} + +void setup_smmu_stream_id(void) +{ + /* Configure Stream ID for Agilex5 */ + mmio_write_32(SOCFPGA_SYSMGR(DMA_TBU_STREAM_ID_AX_REG_0_DMA0), DMA0); + mmio_write_32(SOCFPGA_SYSMGR(DMA_TBU_STREAM_ID_AX_REG_0_DMA1), DMA1); + mmio_write_32(SOCFPGA_SYSMGR(SDM_TBU_STREAM_ID_AX_REG_1_SDM), SDM); + /* Reg map showing USB2 but Linux USB0? */ + mmio_write_32(SOCFPGA_SYSMGR(IO_TBU_STREAM_ID_AX_REG_2_USB2), USB0); + /* Reg map showing USB3 but Linux USB1? */ + mmio_write_32(SOCFPGA_SYSMGR(IO_TBU_STREAM_ID_AX_REG_2_USB3), USB1); + mmio_write_32(SOCFPGA_SYSMGR(IO_TBU_STREAM_ID_AX_REG_2_SDMMC), SDMMC); + mmio_write_32(SOCFPGA_SYSMGR(IO_TBU_STREAM_ID_AX_REG_2_NAND), NAND); + /* To confirm ETR - core sight debug*/ + mmio_write_32(SOCFPGA_SYSMGR(IO_TBU_STREAM_ID_AX_REG_2_ETR), CORE_SIGHT_DEBUG); + mmio_write_32(SOCFPGA_SYSMGR(IO_TBU_STREAM_ID_AX_REG_2_TSN0), TSN0); + mmio_write_32(SOCFPGA_SYSMGR(IO_TBU_STREAM_ID_AX_REG_2_TSN1), TSN1); + mmio_write_32(SOCFPGA_SYSMGR(IO_TBU_STREAM_ID_AX_REG_2_TSN2), TSN2); + + /* Enabled Stream ctrl register for Agilex5 */ + mmio_write_32(SOCFPGA_SYSMGR(DMA_TBU_STREAM_CTRL_REG_0_DMA0), ENABLE_STREAMID); + mmio_write_32(SOCFPGA_SYSMGR(DMA_TBU_STREAM_CTRL_REG_0_DMA1), ENABLE_STREAMID); + mmio_write_32(SOCFPGA_SYSMGR(SDM_TBU_STREAM_CTRL_REG_1_SDM), ENABLE_STREAMID_SECURE_TX); + mmio_write_32(SOCFPGA_SYSMGR(IO_TBU_STREAM_CTRL_REG_2_USB2), ENABLE_STREAMID); + mmio_write_32(SOCFPGA_SYSMGR(IO_TBU_STREAM_CTRL_REG_2_USB3), ENABLE_STREAMID); + mmio_write_32(SOCFPGA_SYSMGR(IO_TBU_STREAM_CTRL_REG_2_SDMMC), ENABLE_STREAMID); + mmio_write_32(SOCFPGA_SYSMGR(IO_TBU_STREAM_CTRL_REG_2_NAND), ENABLE_STREAMID); + mmio_write_32(SOCFPGA_SYSMGR(IO_TBU_STREAM_CTRL_REG_2_ETR), ENABLE_STREAMID); + mmio_write_32(SOCFPGA_SYSMGR(TSN_TBU_STREAM_CTRL_REG_3_TSN0), ENABLE_STREAMID); + mmio_write_32(SOCFPGA_SYSMGR(TSN_TBU_STREAM_CTRL_REG_3_TSN1), ENABLE_STREAMID); + mmio_write_32(SOCFPGA_SYSMGR(TSN_TBU_STREAM_CTRL_REG_3_TSN2), ENABLE_STREAMID); +} diff --git a/plat/intel/soc/common/drivers/ccu/ncore_ccu.h b/plat/intel/soc/common/drivers/ccu/ncore_ccu.h new file mode 100644 index 0000000..6cdbeb8 --- /dev/null +++ b/plat/intel/soc/common/drivers/ccu/ncore_ccu.h @@ -0,0 +1,431 @@ +/* + * Copyright (c) 2019-2023, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#ifndef NCORE_CCU_H +#define NCORE_CCU_H + +#include <stdbool.h> +#include <stdint.h> + +#ifndef CCU_ACTIVATE_COH_FPGA +#define CCU_ACTIVATE_COH_FPGA 0 +#endif +// Address map for ccu init +#define addr_CAIUIDR1 (0x1C000000) +#define addr_GRBUNRRUCR (0x1c0ffff8) +#define base_addr_NRS_CAIU0 (0x1c000000) +#define base_addr_NRS_NCAIU0 (0x1c001000) +#define base_addr_NRS_NCAIU1 (0x1c002000) +#define base_addr_NRS_NCAIU2 (0x1c003000) +#define base_addr_NRS_NCAIU3 (0x1c004000) +#define base_addr_NRS_DCE0 (0x1c005000) +#define base_addr_NRS_DCE1 (0x1c006000) +//#define base_addr_NRS_DMI0 (0x1c007000) +//#define base_addr_NRS_DMI1 (0x1c008000) +//DMI +#define ALT_CCU_CCU_DMI0_DMIUSMCTCR_ADDR 0x1C007300 +#define ALT_CCU_CCU_DMI1_DMIUSMCTCR_ADDR 0x1C008300 +//DSU +#define ALT_CCU_DSU_CAIUAMIGR_ADDR 0x1C0003C0 +#define ALT_CCU_DSU_CAIUMIFSR_ADDR 0x1C0003C4 +#define ALT_CCU_DSU_CAIUGPRBLR1_ADDR 0x1C000414 +#define ALT_CCU_DSU_CAIUGPRBHR1_ADDR 0x1C000418 +#define ALT_CCU_DSU_CAIUGPRAR1_ADDR 0x1C000410 +#define ALT_CCU_DSU_CAIUGPRBLR2_ADDR 0x1C000424 +#define ALT_CCU_DSU_CAIUGPRBHR2_ADDR 0x1C000428 +#define ALT_CCU_DSU_CAIUGPRAR2_ADDR 0x1C000420 +#define ALT_CCU_DSU_CAIUGPRBLR4_ADDR 0x1C000444 +#define ALT_CCU_DSU_CAIUGPRBHR4_ADDR 0x1C000448 +#define ALT_CCU_DSU_CAIUGPRAR4_ADDR 0x1C000440 +#define ALT_CCU_DSU_CAIUGPRBLR5_ADDR 0x1C000454 +#define ALT_CCU_DSU_CAIUGPRBHR5_ADDR 0x1C000458 +#define ALT_CCU_DSU_CAIUGPRAR5_ADDR 0x1C000450 +#define ALT_CCU_DSU_CAIUGPRBLR6_ADDR 0x1C000464 +#define ALT_CCU_DSU_CAIUGPRBHR6_ADDR 0x1C000468 +#define ALT_CCU_DSU_CAIUGPRAR6_ADDR 0x1C000460 +#define ALT_CCU_DSU_CAIUGPRBLR7_ADDR 0x1C000474 +#define ALT_CCU_DSU_CAIUGPRBHR7_ADDR 0x1C000478 +#define ALT_CCU_DSU_CAIUGPRAR7_ADDR 0x1C000470 +#define ALT_CCU_DSU_CAIUGPRBLR8_ADDR 0x1C000484 +#define ALT_CCU_DSU_CAIUGPRBHR8_ADDR 0x1C000488 +#define ALT_CCU_DSU_CAIUGPRAR8_ADDR 0x1C000480 +#define ALT_CCU_DSU_CAIUGPRBLR9_ADDR 0x1C000494 +#define ALT_CCU_DSU_CAIUGPRBHR9_ADDR 0x1C000498 +#define ALT_CCU_DSU_CAIUGPRAR9_ADDR 0x1C000490 +#define ALT_CCU_DSU_CAIUGPRBLR10_ADDR 0x1C0004A4 +#define ALT_CCU_DSU_CAIUGPRBHR10_ADDR 0x1C0004A8 +#define ALT_CCU_DSU_CAIUGPRAR10_ADDR 0x1C0004A0 +//GIC +#define ALT_CCU_GIC_M_XAIUAMIGR_ADDR 0x1C0023C0 +#define ALT_CCU_GIC_M_XAIUMIFSR_ADDR 0x1C0023C4 +#define ALT_CCU_GIC_M_XAIUGPRBLR1_ADDR 0x1C002414 +#define ALT_CCU_GIC_M_XAIUGPRBHR1_ADDR 0x1C002418 +#define ALT_CCU_GIC_M_XAIUGPRAR1_ADDR 0x1C002410 +#define ALT_CCU_GIC_M_XAIUGPRBLR6_ADDR 0x1C002464 +#define ALT_CCU_GIC_M_XAIUGPRBHR6_ADDR 0x1C002468 +#define ALT_CCU_GIC_M_XAIUGPRAR6_ADDR 0x1C002460 +#define ALT_CCU_GIC_M_XAIUGPRBLR8_ADDR 0x1C002484 +#define ALT_CCU_GIC_M_XAIUGPRBHR8_ADDR 0x1C002488 +#define ALT_CCU_GIC_M_XAIUGPRAR8_ADDR 0x1C002480 +#define ALT_CCU_GIC_M_XAIUGPRBLR10_ADDR 0x1C0024A4 +#define ALT_CCU_GIC_M_XAIUGPRBHR10_ADDR 0x1C0024A8 +#define ALT_CCU_GIC_M_XAIUGPRAR10_ADDR 0x1C0024A0 +//FPGA2SOC +#define ALT_CCU_FPGA2SOC_XAIUAMIGR_ADDR 0x1C0013C0 +#define ALT_CCU_FPGA2SOC_XAIUMIFSR_ADDR 0x1C0013C4 +#define ALT_CCU_FPGA2SOC_XAIUGPRBLR1_ADDR 0x1C001414 +#define ALT_CCU_FPGA2SOC_XAIUGPRBHR1_ADDR 0x1C001418 +#define ALT_CCU_FPGA2SOC_XAIUGPRAR1_ADDR 0x1C001410 +#define ALT_CCU_FPGA2SOC_XAIUGPRBLR6_ADDR 0x1C001464 +#define ALT_CCU_FPGA2SOC_XAIUGPRBHR6_ADDR 0x1C001468 +#define ALT_CCU_FPGA2SOC_XAIUGPRAR6_ADDR 0x1C001460 +#define ALT_CCU_FPGA2SOC_XAIUGPRBLR8_ADDR 0x1C001484 +#define ALT_CCU_FPGA2SOC_XAIUGPRBHR8_ADDR 0x1C001488 +#define ALT_CCU_FPGA2SOC_XAIUGPRAR8_ADDR 0x1C001480 +#define ALT_CCU_FPGA2SOC_XAIUGPRBLR10_ADDR 0x1C0014A4 +#define ALT_CCU_FPGA2SOC_XAIUGPRBHR10_ADDR 0x1C0014A8 +#define ALT_CCU_FPGA2SOC_XAIUGPRAR10_ADDR 0x1C0014A0 +//TCU +#define ALT_CCU_TCU_BASE 0x1C003000 +#define ALT_CCU_TCU_XAIUAMIGR_ADDR ALT_CCU_TCU_BASE + 0x03C0 +#define ALT_CCU_TCU_XAIUMIFSR_ADDR ALT_CCU_TCU_BASE + 0x03C4 +#define ALT_CCU_TCU_XAIUGPRBLR0_ADDR ALT_CCU_TCU_BASE + 0x0404 +#define ALT_CCU_TCU_XAIUGPRBHR0_ADDR ALT_CCU_TCU_BASE + 0x0408 +#define ALT_CCU_TCU_XAIUGPRAR0_ADDR ALT_CCU_TCU_BASE + 0x0400 +#define ALT_CCU_TCU_XAIUGPRBLR1_ADDR ALT_CCU_TCU_BASE + 0x0414 +#define ALT_CCU_TCU_XAIUGPRBHR1_ADDR ALT_CCU_TCU_BASE + 0x0418 +#define ALT_CCU_TCU_XAIUGPRAR1_ADDR ALT_CCU_TCU_BASE + 0x0410 +#define ALT_CCU_TCU_XAIUGPRBLR2_ADDR ALT_CCU_TCU_BASE + 0x0424 +#define ALT_CCU_TCU_XAIUGPRBHR2_ADDR ALT_CCU_TCU_BASE + 0x0428 +#define ALT_CCU_TCU_XAIUGPRAR2_ADDR ALT_CCU_TCU_BASE + 0x0420 +#define ALT_CCU_TCU_XAIUGPRBLR6_ADDR 0x1C003464 +#define ALT_CCU_TCU_XAIUGPRBHR6_ADDR 0x1C003468 +#define ALT_CCU_TCU_XAIUGPRAR6_ADDR 0x1C003460 +#define ALT_CCU_TCU_XAIUGPRBLR8_ADDR 0x1C003484 +#define ALT_CCU_TCU_XAIUGPRBHR8_ADDR 0x1C003488 +#define ALT_CCU_TCU_XAIUGPRAR8_ADDR 0x1C003480 +#define ALT_CCU_TCU_XAIUGPRBLR10_ADDR 0x1C0034A4 +#define ALT_CCU_TCU_XAIUGPRBHR10_ADDR 0x1C0034A8 +#define ALT_CCU_TCU_XAIUGPRAR10_ADDR 0x1C0034A0 +//IOM +#define ALT_CCU_CCU_IOM_XAIUAMIGR_ADDR 0x1C0043C0 +#define ALT_CCU_CCU_IOM_XAIUMIFSR_ADDR 0x1C0013C4 +#define ALT_CCU_IOM_XAIUGPRBLR1_ADDR 0x1C001414 +#define ALT_CCU_IOM_XAIUGPRBHR1_ADDR 0x1C001418 +#define ALT_CCU_IOM_XAIUGPRAR1_ADDR 0x1C001410 +#define ALT_CCU_CCU_IOM_XAIUGPRBLR6_ADDR 0x1C001464 +#define ALT_CCU_CCU_IOM_XAIUGPRBHR6_ADDR 0x1C001468 +#define ALT_CCU_CCU_IOM_XAIUGPRAR6_ADDR 0x1C001460 +#define ALT_CCU_CCU_IOM_XAIUGPRBLR8_ADDR 0x1C001484 +#define ALT_CCU_CCU_IOM_XAIUGPRBHR8_ADDR 0x1C001488 +#define ALT_CCU_CCU_IOM_XAIUGPRAR8_ADDR 0x1C001480 +#define ALT_CCU_CCU_IOM_XAIUGPRBLR10_ADDR 0x1C0014A4 +#define ALT_CCU_CCU_IOM_XAIUGPRBHR10_ADDR 0x1C0014A8 +#define ALT_CCU_CCU_IOM_XAIUGPRAR10_ADDR 0x1C0014A0 +//DCE +#define ALT_CCU_DCE0_DCEUAMIGR_ADDR 0x1C0053C0 +#define ALT_CCU_DCE0_DCEUMIFSR_ADDR 0x1C0053C4 +#define ALT_CCU_DCE0_DCEUGPRBLR6_ADDR 0x1C005464 +#define ALT_CCU_DCE0_DCEUGPRBHR6_ADDR 0x1C005468 +#define ALT_CCU_DCE0_DCEUGPRAR6_ADDR 0x1C005460 +#define ALT_CCU_DCE0_DCEUGPRBLR8_ADDR 0x1C005484 +#define ALT_CCU_DCE0_DCEUGPRBHR8_ADDR 0x1C005488 +#define ALT_CCU_DCE0_DCEUGPRAR8_ADDR 0x1C005480 +#define ALT_CCU_DCE0_DCEUGPRBLR10_ADDR 0x1C0054A4 +#define ALT_CCU_DCE0_DCEUGPRBHR10_ADDR 0x1C0054A8 +#define ALT_CCU_DCE0_DCEUGPRAR10_ADDR 0x1C0054A0 +#define ALT_CCU_DCE1_DCEUAMIGR_ADDR 0x1C0063C0 +#define ALT_CCU_DCE1_DCEUMIFSR_ADDR 0x1C0063C4 +#define ALT_CCU_DCE1_DCEUGPRBLR6_ADDR 0x1C006464 +#define ALT_CCU_DCE1_DCEUGPRBHR6_ADDR 0x1C006468 +#define ALT_CCU_DCE1_DCEUGPRAR6_ADDR 0x1C006460 +#define ALT_CCU_DCE1_DCEUGPRBLR8_ADDR 0x1C006484 +#define ALT_CCU_DCE1_DCEUGPRBHR8_ADDR 0x1C006488 +#define ALT_CCU_DCE1_DCEUGPRAR8_ADDR 0x1C006480 +#define ALT_CCU_DCE1_DCEUGPRBLR10_ADDR 0x1C0064A4 +#define ALT_CCU_DCE1_DCEUGPRBHR10_ADDR 0x1C0064A8 +#define ALT_CCU_DCE1_DCEUGPRAR10_ADDR 0x1C0064A0 +#define offset_NRS_GPRAR0 (0x400) +#define offset_NRS_GPRBLR0 (0x404) +#define offset_NRS_GPRBHR0 (0x408) +#define offset_NRS_GPRAR1 (0x410) +#define offset_NRS_GPRBLR1 (0x414) +#define offset_NRS_GPRBHR1 (0x418) +#define offset_NRS_GPRAR2 (0x420) +#define offset_NRS_GPRBLR2 (0x424) +#define offset_NRS_GPRBHR2 (0x428) +#define offset_NRS_GPRAR3 (0x430) +#define offset_NRS_GPRBLR3 (0x434) +#define offset_NRS_GPRBHR3 (0x438) +#define offset_NRS_GPRAR4 (0x440) +#define offset_NRS_GPRBLR4 (0x444) +#define offset_NRS_GPRBHR4 (0x448) +#define offset_NRS_GPRAR5 (0x450) +#define offset_NRS_GPRBLR5 (0x454) +#define offset_NRS_GPRBHR5 (0x458) +#define offset_NRS_GPRAR6 (0x460) +#define offset_NRS_GPRBLR6 (0x464) +#define offset_NRS_GPRBHR6 (0x468) +#define offset_NRS_GPRAR7 (0x470) +#define offset_NRS_GPRBLR7 (0x474) +#define offset_NRS_GPRBHR7 (0x478) +#define offset_NRS_GPRAR8 (0x480) +#define offset_NRS_GPRBLR8 (0x484) +#define offset_NRS_GPRBHR8 (0x488) +#define offset_NRS_GPRAR9 (0x490) +#define offset_NRS_GPRBLR9 (0x494) +#define offset_NRS_GPRBHR9 (0x498) +#define offset_NRS_GPRAR10 (0x4a0) +#define offset_NRS_GPRBLR10 (0x4a4) +#define offset_NRS_GPRBHR10 (0x4a8) +#define offset_NRS_AMIGR (0x3c0) +#define offset_NRS_MIFSR (0x3c4) +#define offset_NRS_DMIUSMCTCR (0x300) +#define base_addr_DII0_PSSPERIPHS (0x10000) +#define base_addr_DII0_LWHPS2FPGA (0x20000) +#define base_addr_DII0_HPS2FPGA_1G (0x40000) +#define base_addr_DII0_HPS2FPGA_15G (0x400000) +#define base_addr_DII0_HPS2FPGA_240G (0x4000000) +#define base_addr_DII1_MPFEREGS (0x18000) +#define base_addr_DII2_GICREGS (0x1D000) +#define base_addr_DII3_OCRAM (0x0) +#define base_addr_BHR (0x0) +#define base_addr_DMI_SDRAM_2G (0x80000) +#define base_addr_DMI_SDRAM_30G (0x800000) +#define base_addr_DMI_SDRAM_480G (0x8000000) +// ((0x0<<9) | (0xf<<20) | (0x1<<30) | (0x1<<31)) +#define wr_DII0_PSSPERIPHS 0xC0F00000 +// ((0x0<<9) | (0x11<<20) | (0x1<<30) | (0x1<<31)) +#define wr_DII0_LWHPS2FPGA 0xC1100000 +// ((0x0<<9) | (0x12<<20) | (0x1<<30) | (0x1<<31)) +#define wr_DII0_HPS2FPGA_1G 0xC1200000 +// ((0x0<<9) | (0x16<<20) | (0x1<<30) | (0x1<<31)) +#define wr_DII0_HPS2FPGA_15G 0xC1600000 +// ((0x0<<9) | (0x1a<<20) | (0x1<<30) | (0x1<<31)) +#define wr_DII0_HPS2FPGA_240G 0xC1A00000 +// ((0x1<<9) | (0xe<<20) | (0x1<<30) | (0x1<<31)) +#define wr_DII1_MPFEREGS 0xC0E00200 +// ((0x2<<9) | (0x8<<20) | (0x1<<30) | (0x1<<31)) +#define wr_DII2_GICREGS 0xC0800400 +// ((0x3<<9) | (0x9<<20) | (0x1<<30) | (0x1<<31)) +#define wr_DII3_OCRAM 0xC0900600 +// ((0x0<<9) | (0x12<<20) | (0x0<<30) | (0x1<<31)) +#define wr_DMI_SDRAM_1G_ORDERED 0x81200000 +// ((0x1<<1) | (0x1<<2) | (0x0<<9) | (0x12<<20) | (0x0<<30) | (0x1<<31)) +#define wr_DMI_SDRAM_1G 0x81200006 +// ((0x0<<9) | (0x13<<20) | (0x0<<30) | (0x1<<31)) +#define wr_DMI_SDRAM_2G_ORDERED 0x81300000 +// ((0x1<<1) | (0x1<<2) | (0x0<<9) | (0x13<<20) | (0x0<<30) | (0x1<<31)) +#define wr_DMI_SDRAM_2G 0x81300006 +// ((0x0<<9) | (0x16<<20) | (0x0<<30) | (0x1<<31)) +#define wr_DMI_SDRAM_15G_ORDERED 0x81600000 +// ((0x1<<1) | (0x1<<2) | (0x0<<9) | (0x16<<20) | (0x0<<30) | (0x1<<31)) +#define wr_DMI_SDRAM_15G 0x81600006 +// ((0x0<<9) | (0x17<<20) | (0x0<<30) | (0x1<<31)) +#define wr_DMI_SDRAM_30G_ORDERED 0x81700000 +// ((0x1<<1) | (0x1<<2) | (0x0<<9) | (0x17<<20) | (0x0<<30) | (0x1<<31)) +#define wr_DMI_SDRAM_30G 0x81700006 +// ((0x0<<9) | (0x1a<<20) | (0x0<<30) | (0x1<<31)) +#define wr_DMI_SDRAM_240G_ORDERED 0x81a00000 +// ((0x1<<1) | (0x1<<2) | (0x0<<9) | (0x1a<<20) | (0x0<<30) | (0x1<<31)) +#define wr_DMI_SDRAM_240G 0x81a00006 +// ((0x0<<9) | (0x1b<<20) | (0x0<<30) | (0x1<<31)) +#define wr_DMI_SDRAM_480G_ORDERED 0x81b00000 +// ((0x1<<1) | (0x1<<2) | (0x0<<9) | (0x1b<<20) | (0x0<<30) | (0x1<<31)) +#define wr_DMI_SDRAM_480G 0x81b00006 + +typedef enum CCU_REGION_SECURITY_e { + // + // Allow secure accesses only. + // + CCU_REGION_SECURITY_SECURE_ONLY, + // + // Allow non-secure accesses only. + // + CCU_REGION_SECURITY_NON_SECURE_ONLY, + // + // Allow accesses of any security state. + // + CCU_REGION_SECURITY_DONT_CARE +} CCU_REGION_SECURITY_t; +typedef enum CCU_REGION_PRIVILEGE_e { + // + // Allow privileged accesses only. + // + CCU_REGION_PRIVILEGE_PRIVILEGED_ONLY, + // + // Allow unprivileged accesses only. + // + CCU_REGION_PRIVILEGE_NON_PRIVILEGED_ONLY, + // + // Allow accesses of any privilege. + // + CCU_REGION_PRIVILEGE_DONT_CARE +} CCU_REGION_PRIVILEGE_t; +// +// Initializes the CCU by enabling all regions except RAM 1 - 5. +// This is needed because of an RTL change around 2016.02.24. +// +// Runtime measurement: +// - arm : 14,830,000 ps (2016.05.31; sanity/printf_aarch32) +// - aarch64 : 14,837,500 ps (2016.05.31; sanity/printf) +// +// Runtime history: +// - arm : 20,916,668 ps (2016.05.30; sanity/printf_aarch32) +// - aarch64 : 20,924,168 ps (2016.05.30; sanity/printf) +// +int ccu_hps_init(void); + +typedef enum ccu_hps_ram_region_e { + ccu_hps_ram_region_ramspace0 = 0, + ccu_hps_ram_region_ramspace1 = 1, + ccu_hps_ram_region_ramspace2 = 2, + ccu_hps_ram_region_ramspace3 = 3, + ccu_hps_ram_region_ramspace4 = 4, + ccu_hps_ram_region_ramspace5 = 5, +} ccu_hps_ram_region_t; + +// Disables a RAM (OCRAM) region with the given ID. +int ccu_hps_ram_region_disable(int id); + +// Enables a RAM (OCRAM) region with the given ID. +int ccu_hps_ram_region_enable(int id); + +// Attempts to remap a RAM (OCRAM) region with the given ID to span the given +// start and end address. It also assigns the security and privilege policy. +// Regions must be a power-of-two size with a minimum size of 64B. +int ccu_hps_ram_region_remap(int id, uintptr_t start, uintptr_t end, +CCU_REGION_SECURITY_t security, CCU_REGION_PRIVILEGE_t privilege); + +// Verifies that all enabled RAM (OCRAM) regions does not overlap. +int ccu_hps_ram_validate(void); + +typedef enum ccu_hps_mem_region_e { + ccu_hps_mem_region_ddrspace0 = 0, + ccu_hps_mem_region_memspace0 = 1, + ccu_hps_mem_region_memspace1a = 2, + ccu_hps_mem_region_memspace1b = 3, + ccu_hps_mem_region_memspace1c = 4, + ccu_hps_mem_region_memspace1d = 5, + ccu_hps_mem_region_memspace1e = 6, +} ccu_hps_mem_region_t; + +// Disables mem0 (DDR) region with the given ID. +int ccu_hps_mem0_region_disable(int id); + +// Enables mem0 (DDR) region with the given ID. +int ccu_hps_mem0_region_enable(int id); + +// Attempts to remap mem0 (DDR) region with the given ID to span the given +// start and end address. It also assigns the security nad privlege policy. +// Regions must be a power-of-two in size with a minimum size of 64B. +int ccu_hps_mem0_region_remap(int id, uintptr_t start, uintptr_t end, +CCU_REGION_SECURITY_t security, CCU_REGION_PRIVILEGE_t privilege); + +// Verifies that all enabled mem0 (DDR) regions does not overlap. +int ccu_hps_mem0_validate(void); + +typedef enum ccu_hps_ios_region_e { + ccu_hps_ios_region_iospace0a = 0, + ccu_hps_ios_region_iospace0b = 1, + ccu_hps_ios_region_iospace1a = 2, + ccu_hps_ios_region_iospace1b = 3, + ccu_hps_ios_region_iospace1c = 4, + ccu_hps_ios_region_iospace1d = 5, + ccu_hps_ios_region_iospace1e = 6, + ccu_hps_ios_region_iospace1f = 7, + ccu_hps_ios_region_iospace1g = 8, + ccu_hps_ios_region_iospace2a = 9, + ccu_hps_ios_region_iospace2b = 10, + ccu_hps_ios_region_iospace2c = 11, +} ccu_hps_ios_region_t; + +// Disables the IOS (IO Slave) region with the given ID. +int ccu_hps_ios_region_disable(int id); + +// Enables the IOS (IO Slave) region with the given ID. +int ccu_hps_ios_region_enable(int id); + + +#define NCORE_CCU_OFFSET 0xf7000000 + +/* Coherent Sub-System Address Map */ +#define NCORE_CAIU_OFFSET 0x00000 +#define NCORE_CAIU_SIZE 0x01000 +#define NCORE_NCBU_OFFSET 0x60000 +#define NCORE_NCBU_SIZE 0x01000 +#define NCORE_DIRU_OFFSET 0x80000 +#define NCORE_DIRU_SIZE 0x01000 +#define NCORE_CMIU_OFFSET 0xc0000 +#define NCORE_CMIU_SIZE 0x01000 +#define NCORE_CSR_OFFSET 0xff000 +#define NCORE_CSADSERO 0x00040 +#define NCORE_CSUIDR 0x00ff8 +#define NCORE_CSIDR 0x00ffc +/* Directory Unit Register Map */ +#define NCORE_DIRUSFER 0x00010 +#define NCORE_DIRUMRHER 0x00070 +#define NCORE_DIRUSFMCR 0x00080 +#define NCORE_DIRUSFMAR 0x00084 +/* Coherent Agent Interface Unit Register Map */ +#define NCORE_CAIUIDR 0x00ffc +/* Snoop Enable Register */ +#define NCORE_DIRUCASER0 0x00040 +#define NCORE_DIRUCASER1 0x00044 +#define NCORE_DIRUCASER2 0x00048 +#define NCORE_DIRUCASER3 0x0004c +#define NCORE_CSADSER0 0x00040 +#define NCORE_CSADSER1 0x00044 +#define NCORE_CSADSER2 0x00048 +#define NCORE_CSADSER3 0x0004c +/* Protocols Definition */ +#define ACE_W_DVM 0 +#define ACE_L_W_DVM 1 +#define ACE_WO_DVM 2 +#define ACE_L_WO_DVM 3 +/* Bypass OC Ram Firewall */ +#define NCORE_FW_OCRAM_BLK_BASE 0x100200 +#define NCORE_FW_OCRAM_BLK_CGF1 0x04 +#define NCORE_FW_OCRAM_BLK_CGF2 0x08 +#define NCORE_FW_OCRAM_BLK_CGF3 0x0c +#define NCORE_FW_OCRAM_BLK_CGF4 0x10 +#define OCRAM_PRIVILEGED_MASK BIT(29) +#define OCRAM_SECURE_MASK BIT(30) +/* Macros */ +#define NCORE_CCU_REG(base) (NCORE_CCU_OFFSET + (base)) +#define NCORE_CCU_CSR(reg) (NCORE_CCU_REG(NCORE_CSR_OFFSET)\ + + (reg)) +#define NCORE_CCU_DIR(reg) (NCORE_CCU_REG(NCORE_DIRU_OFFSET)\ + + (reg)) +#define NCORE_CCU_CAI(reg) (NCORE_CCU_REG(NCORE_CAIU_OFFSET)\ + + (reg)) +#define DIRECTORY_UNIT(x, reg) (NCORE_CCU_DIR(reg)\ + + NCORE_DIRU_SIZE * (x)) +#define COH_AGENT_UNIT(x, reg) (NCORE_CCU_CAI(reg)\ + + NCORE_CAIU_SIZE * (x)) +#define COH_CPU0_BYPASS_REG(reg) (NCORE_CCU_REG(NCORE_FW_OCRAM_BLK_BASE)\ + + (reg)) +#define CSUIDR_NUM_CMI(x) (((x) & 0x3f000000) >> 24) +#define CSUIDR_NUM_DIR(x) (((x) & 0x003f0000) >> 16) +#define CSUIDR_NUM_NCB(x) (((x) & 0x00003f00) >> 8) +#define CSUIDR_NUM_CAI(x) (((x) & 0x0000007f) >> 0) +#define CSIDR_NUM_SF(x) (((x) & 0x007c0000) >> 18) +#define SNOOP_FILTER_ID(x) (((x) << 16)) +#define CACHING_AGENT_BIT(x) (((x) & 0x08000) >> 15) +#define CACHING_AGENT_TYPE(x) (((x) & 0xf0000) >> 16) + +typedef struct coh_ss_id { + uint8_t num_coh_mem; + uint8_t num_directory; + uint8_t num_non_coh_bridge; + uint8_t num_coh_agent; + uint8_t num_snoop_filter; +} coh_ss_id_t; + +uint32_t init_ncore_ccu(void); +void ncore_enable_ocram_firewall(void); +void setup_smmu_stream_id(void); + +#endif diff --git a/plat/intel/soc/common/drivers/combophy/combophy.c b/plat/intel/soc/common/drivers/combophy/combophy.c new file mode 100644 index 0000000..6c53bc1 --- /dev/null +++ b/plat/intel/soc/common/drivers/combophy/combophy.c @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2022-2023, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> +#include <errno.h> +#include <stdbool.h> +#include <string.h> + +#include <arch_helpers.h> +#include <common/debug.h> +#include <drivers/cadence/cdns_sdmmc.h> +#include <drivers/delay_timer.h> +#include <lib/mmio.h> +#include <lib/utils.h> + +#include "combophy.h" +#include "sdmmc/sdmmc.h" + +/* Temp assigned handoff data, need to remove when SDM up and run. */ +void config_nand(handoff *hoff_ptr) +{ + /* This is hardcoded input value for Combo PHY and SD host controller. */ + hoff_ptr->peripheral_pwr_gate_array = 0x40; + +} + +/* DFI configuration */ +int dfi_select(handoff *hoff_ptr) +{ + uint32_t data = 0; + + /* Temp assigned handoff data, need to remove when SDM up and run. */ + handoff reverse_handoff_ptr; + + /* Temp assigned handoff data, need to remove when SDM up and run. */ + config_nand(&reverse_handoff_ptr); + + if (((reverse_handoff_ptr.peripheral_pwr_gate_array) & PERIPHERAL_SDMMC_MASK) == 0U) { + ERROR("SDMMC/NAND is not set properly\n"); + return -ENXIO; + } + + mmio_setbits_32(SOCFPGA_SYSMGR(DFI_INTF), + (((reverse_handoff_ptr.peripheral_pwr_gate_array) & + PERIPHERAL_SDMMC_MASK) >> PERIPHERAL_SDMMC_OFFSET)); + data = mmio_read_32(SOCFPGA_SYSMGR(DFI_INTF)); + if ((data & DFI_INTF_MASK) != (((reverse_handoff_ptr.peripheral_pwr_gate_array) & + PERIPHERAL_SDMMC_MASK) >> PERIPHERAL_SDMMC_OFFSET)) { + ERROR("DFI is not set properly\n"); + return -ENXIO; + } + + return 0; +} + +int combo_phy_init(handoff *hoff_ptr) +{ + /* SDMMC/NAND DFI selection based on system manager DFI register */ + int ret = dfi_select(hoff_ptr); + + if (ret != 0U) { + ERROR("DFI configuration failed\n"); + return ret; + } + + return 0; +} diff --git a/plat/intel/soc/common/drivers/combophy/combophy.h b/plat/intel/soc/common/drivers/combophy/combophy.h new file mode 100644 index 0000000..ca571f7 --- /dev/null +++ b/plat/intel/soc/common/drivers/combophy/combophy.h @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2022-2023, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef COMBOPHY_H +#define COMBOPHY_H + +#include <lib/mmio.h> + +#include "socfpga_handoff.h" + +#define PERIPHERAL_SDMMC_MASK 0x60 +#define PERIPHERAL_SDMMC_OFFSET 6 +#define DFI_INTF_MASK 0x1 + +/* FUNCTION DEFINATION */ +/* + * @brief Nand controller initialization function + * + * @hoff_ptr: Pointer to the hand-off data + * Return: 0 on success, a negative errno on failure + */ +int combo_phy_init(handoff *hoff_ptr); +int dfi_select(handoff *hoff_ptr); + +#endif diff --git a/plat/intel/soc/common/drivers/ddr/ddr.c b/plat/intel/soc/common/drivers/ddr/ddr.c new file mode 100644 index 0000000..188302f --- /dev/null +++ b/plat/intel/soc/common/drivers/ddr/ddr.c @@ -0,0 +1,342 @@ +/* + * Copyright (c) 2022, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> +#include <errno.h> +#include <common/debug.h> +#include "ddr.h" +#include <lib/mmio.h> +#include "socfpga_handoff.h" + +int ddr_calibration_check(void) +{ + // DDR calibration check + int status = 0; + uint32_t u32data_read = 0; + + NOTICE("DDR: Access address 0x%x:...\n", IO96B_0_REG_BASE); + u32data_read = mmio_read_32(IO96B_0_REG_BASE); + NOTICE("DDR: Access address 0x%x: read 0x%04x\n", IO96B_0_REG_BASE, u32data_read); + + if (u32data_read == -EPERM) { + status = -EPERM; + assert(u32data_read); + } + + u32data_read = 0x0; + NOTICE("DDR: Access address 0x%x: ...\n", IO96B_1_REG_BASE); + u32data_read = mmio_read_32(IO96B_1_REG_BASE); + NOTICE("DDR: Access address 0x%x: read 0x%04x\n", IO96B_1_REG_BASE, u32data_read); + + if (u32data_read == -EPERM) { + status = -EPERM; + assert(u32data_read); + } + + return status; +} + +int iossm_mb_init(void) +{ + // int status; + + // Update according to IOSSM mailbox spec + + // if (status) { + // return status; + // } + + return 0; +} + +int wait_respond(uint16_t timeout) +{ + uint32_t status = 0; + uint32_t count = 0; + uint32_t data = 0; + + /* Wait status command response ready */ + do { + data = mmio_read_32(IO96B_CSR_REG(CMD_RESPONSE_STATUS)); + count++; + if (count >= timeout) { + return -ETIMEDOUT; + } + + } while (STATUS_COMMAND_RESPONSE(data) != STATUS_COMMAND_RESPONSE_READY); + + status = (data & STATUS_GENERAL_ERROR_MASK) >> STATUS_GENERAL_ERROR_OFFSET; + if (status != 0) { + return status; + } + + status = (data & STATUS_CMD_RESPONSE_ERROR_MASK) >> STATUS_CMD_RESPONSE_ERROR_OFFSET; + if (status != 0) { + return status; + } + + return status; +} + +int iossm_mb_read_response(void) +{ + uint32_t status = 0; + unsigned int i; + uint32_t resp_data[IOSSM_RESP_MAX_WORD_SIZE]; + uint32_t resp_param_reg; + + // Check STATUS_CMD_RESPONSE_DATA_PTR_VALID in + // STATUS_COMMAND_RESPONSE to ensure data pointer response + + /* Read CMD_RESPONSE_STATUS and CMD_RESPONSE_DATA_* */ + resp_data[0] = mmio_read_32(IO96B_CSR_REG(CMD_RESPONSE_STATUS)); + resp_data[0] = (resp_data[0] & CMD_RESPONSE_DATA_SHORT_MASK) >> + CMD_RESPONSE_DATA_SHORT_OFFSET; + resp_param_reg = CMD_RESPONSE_STATUS; + for (i = 1; i < IOSSM_RESP_MAX_WORD_SIZE; i++) { + resp_param_reg = resp_param_reg - CMD_RESPONSE_OFFSET; + resp_data[i] = mmio_read_32(IO96B_CSR_REG(resp_param_reg)); + } + + /* Wait for STATUS_COMMAND_RESPONSE_READY*/ + status = wait_respond(1000); + + /* Read CMD_RESPONSE_STATUS and CMD_RESPONSE_DATA_* */ + mmio_setbits_32(STATUS_COMMAND_RESPONSE(IO96B_CSR_REG( + CMD_RESPONSE_STATUS)), + STATUS_COMMAND_RESPONSE_READY_CLEAR); + + return status; +} + +int iossm_mb_send(uint32_t cmd_target_ip_type, uint32_t cmd_target_ip_instance_id, + uint32_t cmd_type, uint32_t cmd_opcode, uint32_t *args, + unsigned int len) +{ + unsigned int i; + uint32_t status = 0; + uint32_t cmd_req; + uint32_t cmd_param_reg; + + cmd_target_ip_type = (cmd_target_ip_type & CMD_TARGET_IP_TYPE_MASK) << + CMD_TARGET_IP_TYPE_OFFSET; + cmd_target_ip_instance_id = (cmd_target_ip_instance_id & + CMD_TARGET_IP_INSTANCE_ID_MASK) << + CMD_TARGET_IP_INSTANCE_ID_OFFSET; + cmd_type = (cmd_type & CMD_TYPE_MASK) << CMD_TYPE_OFFSET; + cmd_opcode = (cmd_opcode & CMD_OPCODE_MASK) << CMD_OPCODE_OFFSET; + cmd_req = cmd_target_ip_type | cmd_target_ip_instance_id | cmd_type | + cmd_opcode; + + /* send mailbox request */ + IOSSM_MB_WRITE(IO96B_CSR_REG(CMD_REQ), cmd_req); + if (len != 0) { + cmd_param_reg = CMD_REQ; + for (i = 0; i < len; i++) { + cmd_param_reg = cmd_param_reg - CMD_PARAM_OFFSET; + IOSSM_MB_WRITE(IO96B_CSR_REG(cmd_param_reg), args[i]); + } + } + + status = iossm_mb_read_response(); + if (status != 0) { + return status; + } + + return status; +} + +int ddr_iossm_mailbox_cmd(uint32_t cmd_opcode) +{ + // IOSSM + uint32_t status = 0; + unsigned int i = 0; + uint32_t payload[IOSSM_CMD_MAX_WORD_SIZE] = {0U}; + + switch (cmd_opcode) { + case CMD_INIT: + status = iossm_mb_init(); + break; + + case OPCODE_GET_MEM_INTF_INFO: + status = iossm_mb_send(0, 0, MBOX_CMD_GET_SYS_INFO, + OPCODE_GET_MEM_INTF_INFO, payload, i); + break; + + case OPCODE_GET_MEM_TECHNOLOGY: + status = iossm_mb_send(0, 0, MBOX_CMD_GET_MEM_INFO, + OPCODE_GET_MEM_TECHNOLOGY, payload, i); + break; + + case OPCODE_GET_MEM_WIDTH_INFO: + status = iossm_mb_send(0, 0, MBOX_CMD_GET_MEM_INFO, + OPCODE_GET_MEM_WIDTH_INFO, payload, i); + break; + + case OPCODE_ECC_ENABLE_STATUS: + status = iossm_mb_send(0, 0, + MBOX_CMD_TRIG_CONTROLLER_OP, OPCODE_ECC_ENABLE_STATUS, + payload, i); + break; + + case OPCODE_ECC_INTERRUPT_MASK: + // payload[i] = CMD_PARAM_0 [16:0]: ECC_INTERRUPT_MASK + status = iossm_mb_send(0, 0, + MBOX_CMD_TRIG_CONTROLLER_OP, OPCODE_ECC_INTERRUPT_MASK, + payload, i); + break; + + case OPCODE_ECC_SCRUB_MODE_0_START: + // payload[i] = CMD_PARAM_0 [15:0]: ECC_SCRUB_INTERVAL + //i++; + // payload[i] = CMD_PARAM_1 [11:0]: ECC_SCRUB_LEN + //i++; + // payload[i] = CMD_PARAM_2 [0:0]: ECC_SCRUB_FULL_MEM + //i++; + // payload[i]= CMD_PARAM_3 [31:0]: ECC_SCRUB_START_ADDR [31:0] + //i++; + // payload[i] = CMD_PARAM_4 [5:0]: ECC_SCRUB_START_ADDR [36:32] + //i++; + // payload[i] = CMD_PARAM_5 [31:0]: ECC_SCRUB_END_ADDR [31:0] + //i++; + // payload[i] = CMD_PARAM_6 [5:0]: ECC_SCRUB_END_ADDR [36:32] + //i++; + status = iossm_mb_send(0, 0, + MBOX_CMD_TRIG_CONTROLLER_OP, OPCODE_ECC_SCRUB_MODE_0_START, + payload, i); + break; + + case OPCODE_ECC_SCRUB_MODE_1_START: + // payload[i] = CMD_PARAM_0 [15:0]: ECC_SCRUB_IDLE_CNT + //i++; + // payload[i] = CMD_PARAM_1 [11:0]: ECC_SCRUB_LEN + //i++; + // payload[i] = CMD_PARAM_2 [0:0]: ECC_SCRUB_FULL_MEM + //i++; + // payload[i] = CMD_PARAM_3 [31:0]: ECC_SCRUB_START_ADDR [31:0] + //i++; + // payload[i] = CMD_PARAM_4 [5:0]: ECC_SCRUB_START_ADDR [36:32] + //i++; + // payload[i] = CMD_PARAM_5 [31:0]: ECC_SCRUB_END_ADDR [31:0] + //i++; + // payload[i] = CMD_PARAM_6 [5:0]: ECC_SCRUB_END_ADDR [36:32] + //i++; + status = iossm_mb_send(0, 0, + MBOX_CMD_TRIG_CONTROLLER_OP, OPCODE_ECC_SCRUB_MODE_1_START, + payload, i); + break; + + case OPCODE_BIST_RESULTS_STATUS: + status = iossm_mb_send(0, 0, + MBOX_CMD_TRIG_CONTROLLER_OP, OPCODE_BIST_RESULTS_STATUS, + payload, i); + break; + + case OPCODE_BIST_MEM_INIT_START: + status = iossm_mb_send(0, 0, + MBOX_CMD_TRIG_CONTROLLER_OP, OPCODE_BIST_MEM_INIT_START, + payload, i); + break; + + case OPCODE_TRIG_MEM_CAL: + status = iossm_mb_send(0, 0, MBOX_CMD_TRIG_MEM_CAL_OP, + OPCODE_TRIG_MEM_CAL, payload, i); + break; + + default: + break; + } + + if (status == -EPERM) { + assert(status); + } + + return status; +} + +int ddr_config_handoff(handoff *hoff_ptr) +{ + /* Populate DDR handoff data */ + /* TODO: To add in DDR handoff configuration once available */ + return 0; +} + +// DDR firewall and non secure access +void ddr_enable_ns_access(void) +{ + /* Please set the ddr non secure registers accordingly */ + + mmio_setbits_32(CCU_REG(DMI0_DMIUSMCTCR), + CCU_DMI_ALLOCEN | CCU_DMI_LOOKUPEN); + mmio_setbits_32(CCU_REG(DMI1_DMIUSMCTCR), + CCU_DMI_ALLOCEN | CCU_DMI_LOOKUPEN); + + /* TODO: To add in CCU NCORE OCRAM bypass mask for non secure registers */ + NOTICE("DDR non secure configured\n"); +} + +void ddr_enable_firewall(void) +{ + /* Please set the ddr firewall registers accordingly */ + /* TODO: To add in CCU NCORE OCRAM bypass mask for firewall registers */ + NOTICE("DDR firewall enabled\n"); +} + +bool is_ddr_init_in_progress(void) +{ + uint32_t reg = mmio_read_32(SOCFPGA_SYSMGR(BOOT_SCRATCH_POR_0)); + + if (reg & SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_0_MASK) { + return true; + } + return false; +} + +int ddr_init(void) +{ + // DDR driver initialization + int status = -EPERM; + uint32_t cmd_opcode = 0; + + // Check and set Boot Scratch Register + if (is_ddr_init_in_progress()) { + return status; + } + mmio_write_32(SOCFPGA_SYSMGR(BOOT_SCRATCH_POR_0), 0x01); + + // Populate DDR handoff data + handoff reverse_handoff_ptr; + + if (!socfpga_get_handoff(&reverse_handoff_ptr)) { + assert(status); + } + status = ddr_config_handoff(&reverse_handoff_ptr); + if (status == -EPERM) { + assert(status); + } + + // CCU and firewall setup + ddr_enable_ns_access(); + ddr_enable_firewall(); + + // DDR calibration check + status = ddr_calibration_check(); + if (status == -EPERM) { + assert(status); + } + + // DDR mailbox command + status = ddr_iossm_mailbox_cmd(cmd_opcode); + if (status != 0) { + assert(status); + } + + // Check and set Boot Scratch Register + mmio_write_32(SOCFPGA_SYSMGR(BOOT_SCRATCH_POR_0), 0x00); + + NOTICE("DDR init successfully\n"); + return status; +} diff --git a/plat/intel/soc/common/drivers/ddr/ddr.h b/plat/intel/soc/common/drivers/ddr/ddr.h new file mode 100644 index 0000000..416b64e --- /dev/null +++ b/plat/intel/soc/common/drivers/ddr/ddr.h @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2022-2023, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef DDR_H +#define DDR_H + +#include <lib/mmio.h> +#include "socfpga_handoff.h" + +/* MACRO DEFINATION */ +#define IO96B_0_REG_BASE 0x18400000 +#define IO96B_1_REG_BASE 0x18800000 +#define IO96B_CSR_BASE 0x05000000 +#define IO96B_CSR_REG(reg) (IO96B_CSR_BASE + reg) + +#define IOSSM_CMD_MAX_WORD_SIZE 7U +#define IOSSM_RESP_MAX_WORD_SIZE 4U + +#define CCU_REG_BASE 0x1C000000 +#define DMI0_DMIUSMCTCR 0x7300 +#define DMI1_DMIUSMCTCR 0x8300 +#define CCU_DMI_ALLOCEN BIT(1) +#define CCU_DMI_LOOKUPEN BIT(2) +#define CCU_REG(reg) (CCU_REG_BASE + reg) + +// CMD_RESPONSE_STATUS Register +#define CMD_RESPONSE_STATUS 0x45C +#define CMD_RESPONSE_OFFSET 0x4 +#define CMD_RESPONSE_DATA_SHORT_MASK GENMASK(31, 16) +#define CMD_RESPONSE_DATA_SHORT_OFFSET 16 +#define STATUS_CMD_RESPONSE_ERROR_MASK GENMASK(7, 5) +#define STATUS_CMD_RESPONSE_ERROR_OFFSET 5 +#define STATUS_GENERAL_ERROR_MASK GENMASK(4, 1) +#define STATUS_GENERAL_ERROR_OFFSET 1 +#define STATUS_COMMAND_RESPONSE_READY 0x1 +#define STATUS_COMMAND_RESPONSE_READY_CLEAR 0x0 +#define STATUS_COMMAND_RESPONSE_READY_MASK 0x1 +#define STATUS_COMMAND_RESPONSE_READY_OFFSET 0 +#define STATUS_COMMAND_RESPONSE(x) (((x) & \ + STATUS_COMMAND_RESPONSE_READY_MASK) >> \ + STATUS_COMMAND_RESPONSE_READY_OFFSET) + +// CMD_REQ Register +#define CMD_STATUS 0x400 +#define CMD_PARAM 0x438 +#define CMD_REQ 0x43C +#define CMD_PARAM_OFFSET 0x4 +#define CMD_TARGET_IP_TYPE_MASK GENMASK(31, 29) +#define CMD_TARGET_IP_TYPE_OFFSET 29 +#define CMD_TARGET_IP_INSTANCE_ID_MASK GENMASK(28, 24) +#define CMD_TARGET_IP_INSTANCE_ID_OFFSET 24 +#define CMD_TYPE_MASK GENMASK(23, 16) +#define CMD_TYPE_OFFSET 16 +#define CMD_OPCODE_MASK GENMASK(15, 0) +#define CMD_OPCODE_OFFSET 0 + +#define CMD_INIT 0 + +#define OPCODE_GET_MEM_INTF_INFO 0x0001 +#define OPCODE_GET_MEM_TECHNOLOGY 0x0002 +#define OPCODE_GET_MEM_WIDTH_INFO 0x0004 +#define OPCODE_TRIG_MEM_CAL 0x000A +#define OPCODE_ECC_ENABLE_STATUS 0x0102 +#define OPCODE_ECC_INTERRUPT_MASK 0x0105 +#define OPCODE_ECC_SCRUB_MODE_0_START 0x0202 +#define OPCODE_ECC_SCRUB_MODE_1_START 0x0203 +#define OPCODE_BIST_RESULTS_STATUS 0x0302 +#define OPCODE_BIST_MEM_INIT_START 0x0303 +// Please update according to IOSSM mailbox spec +#define MBOX_ID_IOSSM 0x00 +#define MBOX_CMD_GET_SYS_INFO 0x01 +// Please update according to IOSSM mailbox spec +#define MBOX_CMD_GET_MEM_INFO 0x02 +#define MBOX_CMD_TRIG_CONTROLLER_OP 0x04 +#define MBOX_CMD_TRIG_MEM_CAL_OP 0x05 +#define MBOX_CMD_POKE_REG 0xFD +#define MBOX_CMD_PEEK_REG 0xFE +#define MBOX_CMD_GET_DEBUG_LOG 0xFF +// Please update according to IOSSM mailbox spec +#define MBOX_CMD_DIRECT 0x00 + +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_0_MASK 0x01 + +#define IOSSM_MB_WRITE(addr, data) mmio_write_32(addr, data) + +/* FUNCTION DEFINATION */ +int ddr_calibration_check(void); + +int iossm_mb_init(void); + +int iossm_mb_read_response(void); + +int iossm_mb_send(uint32_t cmd_target_ip_type, uint32_t cmd_target_ip_instance_id, + uint32_t cmd_type, uint32_t cmd_opcode, uint32_t *args, + unsigned int len); + +int ddr_iossm_mailbox_cmd(uint32_t cmd); + +int ddr_init(void); + +int ddr_config_handoff(handoff *hoff_ptr); + +void ddr_enable_ns_access(void); + +void ddr_enable_firewall(void); + +bool is_ddr_init_in_progress(void); + +#endif diff --git a/plat/intel/soc/common/drivers/nand/nand.c b/plat/intel/soc/common/drivers/nand/nand.c new file mode 100644 index 0000000..c6acbe3 --- /dev/null +++ b/plat/intel/soc/common/drivers/nand/nand.c @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2022-2023, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> +#include <errno.h> +#include <stdbool.h> +#include <string.h> + +#include <arch_helpers.h> +#include <common/debug.h> +#include <drivers/cadence/cdns_nand.h> +#include <drivers/delay_timer.h> +#include <lib/mmio.h> +#include <lib/utils.h> +#include "nand.h" + +#include "agilex5_pinmux.h" +#include "combophy/combophy.h" + +/* Pinmux configuration */ +static void nand_pinmux_config(void) +{ + mmio_write_32(SOCFPGA_PINMUX(PIN0SEL), SOCFPGA_PINMUX_SEL_NAND); + mmio_write_32(SOCFPGA_PINMUX(PIN1SEL), SOCFPGA_PINMUX_SEL_NAND); + mmio_write_32(SOCFPGA_PINMUX(PIN2SEL), SOCFPGA_PINMUX_SEL_NAND); + mmio_write_32(SOCFPGA_PINMUX(PIN3SEL), SOCFPGA_PINMUX_SEL_NAND); + mmio_write_32(SOCFPGA_PINMUX(PIN4SEL), SOCFPGA_PINMUX_SEL_NAND); + mmio_write_32(SOCFPGA_PINMUX(PIN5SEL), SOCFPGA_PINMUX_SEL_NAND); + mmio_write_32(SOCFPGA_PINMUX(PIN6SEL), SOCFPGA_PINMUX_SEL_NAND); + mmio_write_32(SOCFPGA_PINMUX(PIN7SEL), SOCFPGA_PINMUX_SEL_NAND); + mmio_write_32(SOCFPGA_PINMUX(PIN8SEL), SOCFPGA_PINMUX_SEL_NAND); + mmio_write_32(SOCFPGA_PINMUX(PIN9SEL), SOCFPGA_PINMUX_SEL_NAND); + mmio_write_32(SOCFPGA_PINMUX(PIN10SEL), SOCFPGA_PINMUX_SEL_NAND); + mmio_write_32(SOCFPGA_PINMUX(PIN11SEL), SOCFPGA_PINMUX_SEL_NAND); + mmio_write_32(SOCFPGA_PINMUX(PIN12SEL), SOCFPGA_PINMUX_SEL_NAND); + mmio_write_32(SOCFPGA_PINMUX(PIN13SEL), SOCFPGA_PINMUX_SEL_NAND); + mmio_write_32(SOCFPGA_PINMUX(PIN14SEL), SOCFPGA_PINMUX_SEL_NAND); + mmio_write_32(SOCFPGA_PINMUX(PIN16SEL), SOCFPGA_PINMUX_SEL_NAND); + mmio_write_32(SOCFPGA_PINMUX(PIN17SEL), SOCFPGA_PINMUX_SEL_NAND); + mmio_write_32(SOCFPGA_PINMUX(PIN18SEL), SOCFPGA_PINMUX_SEL_NAND); + mmio_write_32(SOCFPGA_PINMUX(PIN19SEL), SOCFPGA_PINMUX_SEL_NAND); + mmio_write_32(SOCFPGA_PINMUX(PIN20SEL), SOCFPGA_PINMUX_SEL_NAND); + mmio_write_32(SOCFPGA_PINMUX(PIN21SEL), SOCFPGA_PINMUX_SEL_NAND); + mmio_write_32(SOCFPGA_PINMUX(PIN22SEL), SOCFPGA_PINMUX_SEL_NAND); + mmio_write_32(SOCFPGA_PINMUX(PIN23SEL), SOCFPGA_PINMUX_SEL_NAND); +} + +int nand_init(handoff *hoff_ptr) +{ + /* NAND pin mux configuration */ + nand_pinmux_config(); + + return cdns_nand_host_init(); +} diff --git a/plat/intel/soc/common/drivers/nand/nand.h b/plat/intel/soc/common/drivers/nand/nand.h new file mode 100644 index 0000000..b060a72 --- /dev/null +++ b/plat/intel/soc/common/drivers/nand/nand.h @@ -0,0 +1,22 @@ +/* + * Copyright (c) 2022-2023, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef DDR_H +#define DDR_H + +#include <lib/mmio.h> +#include "socfpga_handoff.h" + +/* FUNCTION DEFINATION */ +/* + * @brief Nand controller initialization function + * + * @hoff_ptr: Pointer to the hand-off data + * Return: 0 on success, a negative errno on failure + */ +int nand_init(handoff *hoff_ptr); + +#endif diff --git a/plat/intel/soc/common/drivers/qspi/cadence_qspi.c b/plat/intel/soc/common/drivers/qspi/cadence_qspi.c new file mode 100644 index 0000000..da8a8bd --- /dev/null +++ b/plat/intel/soc/common/drivers/qspi/cadence_qspi.c @@ -0,0 +1,823 @@ +/* + * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2019, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> +#include <common/debug.h> +#include <lib/mmio.h> +#include <string.h> +#include <drivers/delay_timer.h> +#include <drivers/console.h> + +#include "cadence_qspi.h" +#include "socfpga_plat_def.h" + +#define LESS(a, b) (((a) < (b)) ? (a) : (b)) +#define MORE(a, b) (((a) > (b)) ? (a) : (b)) + + +uint32_t qspi_device_size; +int cad_qspi_cs; + +int cad_qspi_idle(void) +{ + return (mmio_read_32(CAD_QSPI_OFFSET + CAD_QSPI_CFG) + & CAD_QSPI_CFG_IDLE) >> 31; +} + +int cad_qspi_set_baudrate_div(uint32_t div) +{ + if (div > 0xf) + return CAD_INVALID; + + mmio_clrsetbits_32(CAD_QSPI_OFFSET + CAD_QSPI_CFG, + ~CAD_QSPI_CFG_BAUDDIV_MSK, + CAD_QSPI_CFG_BAUDDIV(div)); + + return 0; +} + +int cad_qspi_configure_dev_size(uint32_t addr_bytes, + uint32_t bytes_per_dev, uint32_t bytes_per_block) +{ + + mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_DEVSZ, + CAD_QSPI_DEVSZ_ADDR_BYTES(addr_bytes) | + CAD_QSPI_DEVSZ_BYTES_PER_PAGE(bytes_per_dev) | + CAD_QSPI_DEVSZ_BYTES_PER_BLOCK(bytes_per_block)); + return 0; +} + +int cad_qspi_set_read_config(uint32_t opcode, uint32_t instr_type, + uint32_t addr_type, uint32_t data_type, + uint32_t mode_bit, uint32_t dummy_clk_cycle) +{ + mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_DEVRD, + CAD_QSPI_DEV_OPCODE(opcode) | + CAD_QSPI_DEV_INST_TYPE(instr_type) | + CAD_QSPI_DEV_ADDR_TYPE(addr_type) | + CAD_QSPI_DEV_DATA_TYPE(data_type) | + CAD_QSPI_DEV_MODE_BIT(mode_bit) | + CAD_QSPI_DEV_DUMMY_CLK_CYCLE(dummy_clk_cycle)); + + return 0; +} + +int cad_qspi_set_write_config(uint32_t opcode, uint32_t addr_type, + uint32_t data_type, uint32_t dummy_clk_cycle) +{ + mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_DEVWR, + CAD_QSPI_DEV_OPCODE(opcode) | + CAD_QSPI_DEV_ADDR_TYPE(addr_type) | + CAD_QSPI_DEV_DATA_TYPE(data_type) | + CAD_QSPI_DEV_DUMMY_CLK_CYCLE(dummy_clk_cycle)); + + return 0; +} + +int cad_qspi_timing_config(uint32_t clkphase, uint32_t clkpol, uint32_t csda, + uint32_t csdads, uint32_t cseot, uint32_t cssot, + uint32_t rddatacap) +{ + uint32_t cfg = mmio_read_32(CAD_QSPI_OFFSET + CAD_QSPI_CFG); + + cfg &= CAD_QSPI_CFG_SELCLKPHASE_CLR_MSK & + CAD_QSPI_CFG_SELCLKPOL_CLR_MSK; + cfg |= CAD_QSPI_SELCLKPHASE(clkphase) | CAD_QSPI_SELCLKPOL(clkpol); + + mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_CFG, cfg); + + mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_DELAY, + CAD_QSPI_DELAY_CSSOT(cssot) | CAD_QSPI_DELAY_CSEOT(cseot) | + CAD_QSPI_DELAY_CSDADS(csdads) | CAD_QSPI_DELAY_CSDA(csda)); + + return 0; +} + +int cad_qspi_stig_cmd_helper(int cs, uint32_t cmd) +{ + uint32_t count = 0; + + /* chip select */ + mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_CFG, + (mmio_read_32(CAD_QSPI_OFFSET + CAD_QSPI_CFG) + & CAD_QSPI_CFG_CS_MSK) | CAD_QSPI_CFG_CS(cs)); + + mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_FLASHCMD, cmd); + mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_FLASHCMD, + cmd | CAD_QSPI_FLASHCMD_EXECUTE); + + do { + uint32_t reg = mmio_read_32(CAD_QSPI_OFFSET + + CAD_QSPI_FLASHCMD); + if (!(reg & CAD_QSPI_FLASHCMD_EXECUTE_STAT)) + break; + count++; + } while (count < CAD_QSPI_COMMAND_TIMEOUT); + + if (count >= CAD_QSPI_COMMAND_TIMEOUT) { + ERROR("Error sending QSPI command %x, timed out\n", + cmd); + return CAD_QSPI_ERROR; + } + + return 0; +} + +int cad_qspi_stig_cmd(uint32_t opcode, uint32_t dummy) +{ + if (dummy > ((1 << CAD_QSPI_FLASHCMD_NUM_DUMMYBYTES_MAX) - 1)) { + ERROR("Faulty dummy bytes\n"); + return -1; + } + + return cad_qspi_stig_cmd_helper(cad_qspi_cs, + CAD_QSPI_FLASHCMD_OPCODE(opcode) | + CAD_QSPI_FLASHCMD_NUM_DUMMYBYTES(dummy)); +} + +int cad_qspi_stig_read_cmd(uint32_t opcode, uint32_t dummy, uint32_t num_bytes, + uint32_t *output) +{ + if (dummy > ((1 << CAD_QSPI_FLASHCMD_NUM_DUMMYBYTES_MAX) - 1)) { + ERROR("Faulty dummy byes\n"); + return -1; + } + + if ((num_bytes > 8) || (num_bytes == 0)) + return -1; + + uint32_t cmd = + CAD_QSPI_FLASHCMD_OPCODE(opcode) | + CAD_QSPI_FLASHCMD_ENRDDATA(1) | + CAD_QSPI_FLASHCMD_NUMRDDATABYTES(num_bytes - 1) | + CAD_QSPI_FLASHCMD_ENCMDADDR(0) | + CAD_QSPI_FLASHCMD_ENMODEBIT(0) | + CAD_QSPI_FLASHCMD_NUMADDRBYTES(0) | + CAD_QSPI_FLASHCMD_ENWRDATA(0) | + CAD_QSPI_FLASHCMD_NUMWRDATABYTES(0) | + CAD_QSPI_FLASHCMD_NUMDUMMYBYTES(dummy); + + if (cad_qspi_stig_cmd_helper(cad_qspi_cs, cmd)) { + ERROR("failed to send stig cmd\n"); + return -1; + } + + output[0] = mmio_read_32(CAD_QSPI_OFFSET + CAD_QSPI_FLASHCMD_RDDATA0); + + if (num_bytes > 4) { + output[1] = mmio_read_32(CAD_QSPI_OFFSET + + CAD_QSPI_FLASHCMD_RDDATA1); + } + + return 0; +} + +int cad_qspi_stig_wr_cmd(uint32_t opcode, uint32_t dummy, uint32_t num_bytes, + uint32_t *input) +{ + if (dummy > ((1 << CAD_QSPI_FLASHCMD_NUM_DUMMYBYTES_MAX) - 1)) { + ERROR("Faulty dummy byes\n"); + return -1; + } + + if ((num_bytes > 8) || (num_bytes == 0)) + return -1; + + uint32_t cmd = CAD_QSPI_FLASHCMD_OPCODE(opcode) | + CAD_QSPI_FLASHCMD_ENRDDATA(0) | + CAD_QSPI_FLASHCMD_NUMRDDATABYTES(0) | + CAD_QSPI_FLASHCMD_ENCMDADDR(0) | + CAD_QSPI_FLASHCMD_ENMODEBIT(0) | + CAD_QSPI_FLASHCMD_NUMADDRBYTES(0) | + CAD_QSPI_FLASHCMD_ENWRDATA(1) | + CAD_QSPI_FLASHCMD_NUMWRDATABYTES(num_bytes - 1) | + CAD_QSPI_FLASHCMD_NUMDUMMYBYTES(dummy); + + mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_FLASHCMD_WRDATA0, input[0]); + + if (num_bytes > 4) + mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_FLASHCMD_WRDATA1, + input[1]); + + return cad_qspi_stig_cmd_helper(cad_qspi_cs, cmd); +} + +int cad_qspi_stig_addr_cmd(uint32_t opcode, uint32_t dummy, uint32_t addr) +{ + uint32_t cmd; + + if (dummy > ((1 << CAD_QSPI_FLASHCMD_NUM_DUMMYBYTES_MAX) - 1)) + return -1; + + cmd = CAD_QSPI_FLASHCMD_OPCODE(opcode) | + CAD_QSPI_FLASHCMD_NUMDUMMYBYTES(dummy) | + CAD_QSPI_FLASHCMD_ENCMDADDR(1) | + CAD_QSPI_FLASHCMD_NUMADDRBYTES(2); + + mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_FLASHCMD_ADDR, addr); + + return cad_qspi_stig_cmd_helper(cad_qspi_cs, cmd); +} + +int cad_qspi_device_bank_select(uint32_t bank) +{ + int status = 0; + + status = cad_qspi_stig_cmd(CAD_QSPI_STIG_OPCODE_WREN, 0); + if (status != 0) + return status; + + status = cad_qspi_stig_wr_cmd(CAD_QSPI_STIG_OPCODE_WREN_EXT_REG, + 0, 1, &bank); + if (status != 0) + return status; + + return cad_qspi_stig_cmd(CAD_QSPI_STIG_OPCODE_WRDIS, 0); +} + +int cad_qspi_device_status(uint32_t *status) +{ + return cad_qspi_stig_read_cmd(CAD_QSPI_STIG_OPCODE_RDSR, 0, 1, status); +} + +#if CAD_QSPI_MICRON_N25Q_SUPPORT +int cad_qspi_n25q_enable(void) +{ + cad_qspi_set_read_config(QSPI_FAST_READ, CAD_QSPI_INST_SINGLE, + CAD_QSPI_ADDR_FASTREAD, CAT_QSPI_ADDR_SINGLE_IO, 1, + 0); + cad_qspi_set_write_config(QSPI_WRITE, 0, 0, 0); + + return 0; +} + +int cad_qspi_n25q_wait_for_program_and_erase(int program_only) +{ + uint32_t status, flag_sr; + int count = 0; + + while (count < CAD_QSPI_COMMAND_TIMEOUT) { + status = cad_qspi_device_status(&status); + if (status != 0) { + ERROR("Error getting device status\n"); + return -1; + } + if (!CAD_QSPI_STIG_SR_BUSY(status)) + break; + count++; + } + + if (count >= CAD_QSPI_COMMAND_TIMEOUT) { + ERROR("Timed out waiting for idle\n"); + return -1; + } + + count = 0; + + while (count < CAD_QSPI_COMMAND_TIMEOUT) { + status = cad_qspi_stig_read_cmd(CAD_QSPI_STIG_OPCODE_RDFLGSR, + 0, 1, &flag_sr); + if (status != 0) { + ERROR("Error waiting program and erase.\n"); + return status; + } + + if ((program_only && + CAD_QSPI_STIG_FLAGSR_PROGRAMREADY(flag_sr)) || + (!program_only && + CAD_QSPI_STIG_FLAGSR_ERASEREADY(flag_sr))) + break; + } + + if (count >= CAD_QSPI_COMMAND_TIMEOUT) + ERROR("Timed out waiting for program and erase\n"); + + if ((program_only && CAD_QSPI_STIG_FLAGSR_PROGRAMERROR(flag_sr)) || + (!program_only && + CAD_QSPI_STIG_FLAGSR_ERASEERROR(flag_sr))) { + ERROR("Error programming/erasing flash\n"); + cad_qspi_stig_cmd(CAD_QSPI_STIG_OPCODE_CLFSR, 0); + return -1; + } + + return 0; +} +#endif + +int cad_qspi_indirect_read_start_bank(uint32_t flash_addr, uint32_t num_bytes) +{ + mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_INDRDSTADDR, flash_addr); + mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_INDRDCNT, num_bytes); + mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_INDRD, + CAD_QSPI_INDRD_START | + CAD_QSPI_INDRD_IND_OPS_DONE); + + return 0; +} + + +int cad_qspi_indirect_write_start_bank(uint32_t flash_addr, + uint32_t num_bytes) +{ + mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_INDWRSTADDR, flash_addr); + mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_INDWRCNT, num_bytes); + mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_INDWR, + CAD_QSPI_INDWR_START | + CAD_QSPI_INDWR_INDDONE); + + return 0; +} + +int cad_qspi_indirect_write_finish(void) +{ +#if CAD_QSPI_MICRON_N25Q_SUPPORT + return cad_qspi_n25q_wait_for_program_and_erase(1); +#else + return 0; +#endif + +} + +int cad_qspi_enable(void) +{ + int status; + + mmio_setbits_32(CAD_QSPI_OFFSET + CAD_QSPI_CFG, CAD_QSPI_CFG_ENABLE); + +#if CAD_QSPI_MICRON_N25Q_SUPPORT + status = cad_qspi_n25q_enable(); + if (status != 0) + return status; +#endif + return 0; +} + +int cad_qspi_enable_subsector_bank(uint32_t addr) +{ + int status = 0; + + status = cad_qspi_stig_cmd(CAD_QSPI_STIG_OPCODE_WREN, 0); + if (status != 0) + return status; + + status = cad_qspi_stig_addr_cmd(CAD_QSPI_STIG_OPCODE_SUBSEC_ERASE, 0, + addr); + if (status != 0) + return status; + +#if CAD_QSPI_MICRON_N25Q_SUPPORT + status = cad_qspi_n25q_wait_for_program_and_erase(0); +#endif + return status; +} + +int cad_qspi_erase_subsector(uint32_t addr) +{ + int status = 0; + + status = cad_qspi_device_bank_select(addr >> 24); + if (status != 0) + return status; + + return cad_qspi_enable_subsector_bank(addr); +} + +int cad_qspi_erase_sector(uint32_t addr) +{ + int status = 0; + + status = cad_qspi_device_bank_select(addr >> 24); + if (status != 0) + return status; + + status = cad_qspi_stig_cmd(CAD_QSPI_STIG_OPCODE_WREN, 0); + if (status != 0) + return status; + + status = cad_qspi_stig_addr_cmd(CAD_QSPI_STIG_OPCODE_SEC_ERASE, 0, + addr); + if (status != 0) + return status; + +#if CAD_QSPI_MICRON_N25Q_SUPPORT + status = cad_qspi_n25q_wait_for_program_and_erase(0); +#endif + return status; +} + +void cad_qspi_calibration(uint32_t dev_clk, uint32_t qspi_clk_mhz) +{ + int status; + uint32_t dev_sclk_mhz = 27; /*min value to get biggest 0xF div factor*/ + uint32_t data_cap_delay; + uint32_t sample_rdid; + uint32_t rdid; + uint32_t div_actual; + uint32_t div_bits; + int first_pass, last_pass; + + /*1. Set divider to bigger value (slowest SCLK) + *2. RDID and save the value + */ + div_actual = (qspi_clk_mhz + (dev_sclk_mhz - 1)) / dev_sclk_mhz; + div_bits = (((div_actual + 1) / 2) - 1); + status = cad_qspi_set_baudrate_div(0xf); + + status = cad_qspi_stig_read_cmd(CAD_QSPI_STIG_OPCODE_RDID, + 0, 3, &sample_rdid); + if (status != 0) + return; + + /*3. Set divider to the intended frequency + *4. Set the read delay = 0 + *5. RDID and check whether the value is same as item 2 + *6. Increase read delay and compared the value against item 2 + *7. Find the range of read delay that have same as + * item 2 and divide it to 2 + */ + div_actual = (qspi_clk_mhz + (dev_clk - 1)) / dev_clk; + div_bits = (((div_actual + 1) / 2) - 1); + status = cad_qspi_set_baudrate_div(div_bits); + if (status != 0) + return; + + data_cap_delay = 0; + first_pass = -1; + last_pass = -1; + + do { + if (status != 0) + break; + status = cad_qspi_stig_read_cmd(CAD_QSPI_STIG_OPCODE_RDID, 0, + 3, &rdid); + if (status != 0) + break; + if (rdid == sample_rdid) { + if (first_pass == -1) + first_pass = data_cap_delay; + else + last_pass = data_cap_delay; + } + + data_cap_delay++; + + mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_RDDATACAP, + CAD_QSPI_RDDATACAP_BYP(1) | + CAD_QSPI_RDDATACAP_DELAY(data_cap_delay)); + + } while (data_cap_delay < 0x10); + + if (first_pass > 0) { + int diff = first_pass - last_pass; + + data_cap_delay = first_pass + diff / 2; + } + + mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_RDDATACAP, + CAD_QSPI_RDDATACAP_BYP(1) | + CAD_QSPI_RDDATACAP_DELAY(data_cap_delay)); + status = cad_qspi_stig_read_cmd(CAD_QSPI_STIG_OPCODE_RDID, 0, 3, &rdid); + + if (status != 0) + return; +} + +int cad_qspi_int_disable(uint32_t mask) +{ + if (cad_qspi_idle() == 0) + return -1; + + if ((CAD_QSPI_INT_STATUS_ALL & mask) == 0) + return -1; + + mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_IRQMSK, mask); + return 0; +} + +void cad_qspi_set_chip_select(int cs) +{ + cad_qspi_cs = cs; +} + +int cad_qspi_init(uint32_t desired_clk_freq, uint32_t clk_phase, + uint32_t clk_pol, uint32_t csda, uint32_t csdads, + uint32_t cseot, uint32_t cssot, uint32_t rddatacap) +{ + int status = 0; + uint32_t qspi_desired_clk_freq; + uint32_t rdid = 0; + uint32_t cap_code; + + INFO("Initializing Qspi\n"); + + if (cad_qspi_idle() == 0) { + ERROR("device not idle\n"); + return -1; + } + + + status = cad_qspi_timing_config(clk_phase, clk_pol, csda, csdads, + cseot, cssot, rddatacap); + + if (status != 0) { + ERROR("config set timing failure\n"); + return status; + } + + mmio_write_32(CAD_QSPI_OFFSET + CAD_QSPI_REMAPADDR, + CAD_QSPI_REMAPADDR_VALUE_SET(0)); + + status = cad_qspi_int_disable(CAD_QSPI_INT_STATUS_ALL); + if (status != 0) { + ERROR("failed disable\n"); + return status; + } + + cad_qspi_set_baudrate_div(0xf); + status = cad_qspi_enable(); + if (status != 0) { + ERROR("failed enable\n"); + return status; + } + + qspi_desired_clk_freq = 100; + cad_qspi_calibration(qspi_desired_clk_freq, 50000000); + + status = cad_qspi_stig_read_cmd(CAD_QSPI_STIG_OPCODE_RDID, 0, 3, + &rdid); + + if (status != 0) { + ERROR("Error reading RDID\n"); + return status; + } + + /* + * NOTE: The Size code seems to be a form of BCD (binary coded decimal). + * The first nibble is the 10's digit and the second nibble is the 1's + * digit in the number of bytes. + * + * Capacity ID samples: + * 0x15 : 16 Mb => 2 MiB => 1 << 21 ; BCD=15 + * 0x16 : 32 Mb => 4 MiB => 1 << 22 ; BCD=16 + * 0x17 : 64 Mb => 8 MiB => 1 << 23 ; BCD=17 + * 0x18 : 128 Mb => 16 MiB => 1 << 24 ; BCD=18 + * 0x19 : 256 Mb => 32 MiB => 1 << 25 ; BCD=19 + * 0x1a + * 0x1b + * 0x1c + * 0x1d + * 0x1e + * 0x1f + * 0x20 : 512 Mb => 64 MiB => 1 << 26 ; BCD=20 + * 0x21 : 1024 Mb => 128 MiB => 1 << 27 ; BCD=21 + */ + + cap_code = CAD_QSPI_STIG_RDID_CAPACITYID(rdid); + + if (!(((cap_code >> 4) > 0x9) || ((cap_code & 0xf) > 0x9))) { + uint32_t decoded_cap = ((cap_code >> 4) * 10) + + (cap_code & 0xf); + qspi_device_size = 1 << (decoded_cap + 6); + INFO("QSPI Capacity: %x\n\n", qspi_device_size); + + } else { + ERROR("Invalid CapacityID encountered: 0x%02x\n", + cap_code); + return -1; + } + + cad_qspi_configure_dev_size(INTEL_QSPI_ADDR_BYTES, + INTEL_QSPI_BYTES_PER_DEV, + INTEL_BYTES_PER_BLOCK); + + INFO("Flash size: %d Bytes\n", qspi_device_size); + + return status; +} + +int cad_qspi_indirect_page_bound_write(uint32_t offset, + uint8_t *buffer, uint32_t len) +{ + int status = 0, i; + uint32_t write_count, write_capacity, *write_data, space, + write_fill_level, sram_partition; + + status = cad_qspi_indirect_write_start_bank(offset, len); + if (status != 0) + return status; + + write_count = 0; + sram_partition = CAD_QSPI_SRAMPART_ADDR(mmio_read_32(CAD_QSPI_OFFSET + + CAD_QSPI_SRAMPART)); + write_capacity = (uint32_t) CAD_QSPI_SRAM_FIFO_ENTRY_COUNT - + sram_partition; + + while (write_count < len) { + write_fill_level = CAD_QSPI_SRAMFILL_INDWRPART( + mmio_read_32(CAD_QSPI_OFFSET + + CAD_QSPI_SRAMFILL)); + space = LESS(write_capacity - write_fill_level, + (len - write_count) / sizeof(uint32_t)); + write_data = (uint32_t *)(buffer + write_count); + for (i = 0; i < space; ++i) + mmio_write_32(CAD_QSPIDATA_OFST, *write_data++); + + write_count += space * sizeof(uint32_t); + } + return cad_qspi_indirect_write_finish(); +} + +int cad_qspi_read_bank(uint8_t *buffer, uint32_t offset, uint32_t size) +{ + int status; + uint32_t read_count = 0, *read_data; + int level = 1, count = 0, i; + + status = cad_qspi_indirect_read_start_bank(offset, size); + + if (status != 0) + return status; + + while (read_count < size) { + do { + level = CAD_QSPI_SRAMFILL_INDRDPART( + mmio_read_32(CAD_QSPI_OFFSET + + CAD_QSPI_SRAMFILL)); + read_data = (uint32_t *)(buffer + read_count); + for (i = 0; i < level; ++i) + *read_data++ = mmio_read_32(CAD_QSPIDATA_OFST); + + read_count += level * sizeof(uint32_t); + count++; + } while (level > 0); + } + + return 0; +} + +int cad_qspi_write_bank(uint32_t offset, uint8_t *buffer, uint32_t size) +{ + int status = 0; + uint32_t page_offset = offset & (CAD_QSPI_PAGE_SIZE - 1); + uint32_t write_size = LESS(size, CAD_QSPI_PAGE_SIZE - page_offset); + + while (size) { + status = cad_qspi_indirect_page_bound_write(offset, buffer, + write_size); + if (status != 0) + break; + + offset += write_size; + buffer += write_size; + size -= write_size; + write_size = LESS(size, CAD_QSPI_PAGE_SIZE); + } + return status; +} + +int cad_qspi_read(void *buffer, uint32_t offset, uint32_t size) +{ + uint32_t bank_count, bank_addr, bank_offset, copy_len; + uint8_t *read_data; + int i, status; + + status = 0; + + if ((offset >= qspi_device_size) || + (offset + size - 1 >= qspi_device_size) || + (size == 0)) { + ERROR("Invalid read parameter\n"); + return -1; + } + + if (CAD_QSPI_INDRD_RD_STAT(mmio_read_32(CAD_QSPI_OFFSET + + CAD_QSPI_INDRD))) { + ERROR("Read in progress\n"); + return -1; + } + + /* + * bank_count : Number of bank(s) affected, including partial banks. + * bank_addr : Aligned address of the first bank, + * including partial bank. + * bank_ofst : The offset of the bank to read. + * Only used when reading the first bank. + */ + bank_count = CAD_QSPI_BANK_ADDR(offset + size - 1) - + CAD_QSPI_BANK_ADDR(offset) + 1; + bank_addr = offset & CAD_QSPI_BANK_ADDR_MSK; + bank_offset = offset & (CAD_QSPI_BANK_SIZE - 1); + + read_data = (uint8_t *)buffer; + + copy_len = LESS(size, CAD_QSPI_BANK_SIZE - bank_offset); + + for (i = 0; i < bank_count; ++i) { + status = cad_qspi_device_bank_select(CAD_QSPI_BANK_ADDR( + bank_addr)); + if (status != 0) + break; + status = cad_qspi_read_bank(read_data, bank_offset, copy_len); + if (status != 0) + break; + + bank_addr += CAD_QSPI_BANK_SIZE; + read_data += copy_len; + size -= copy_len; + bank_offset = 0; + copy_len = LESS(size, CAD_QSPI_BANK_SIZE); + } + + return status; +} + +int cad_qspi_erase(uint32_t offset, uint32_t size) +{ + int status = 0; + uint32_t subsector_offset = offset & (CAD_QSPI_SUBSECTOR_SIZE - 1); + uint32_t erase_size = LESS(size, + CAD_QSPI_SUBSECTOR_SIZE - subsector_offset); + + while (size) { + status = cad_qspi_erase_subsector(offset); + if (status != 0) + break; + + offset += erase_size; + size -= erase_size; + erase_size = LESS(size, CAD_QSPI_SUBSECTOR_SIZE); + } + return status; +} + +int cad_qspi_write(void *buffer, uint32_t offset, uint32_t size) +{ + int status, i; + uint32_t bank_count, bank_addr, bank_offset, copy_len; + uint8_t *write_data; + + status = 0; + + if ((offset >= qspi_device_size) || + (offset + size - 1 >= qspi_device_size) || + (size == 0)) { + return -2; + } + + if (CAD_QSPI_INDWR_RDSTAT(mmio_read_32(CAD_QSPI_OFFSET + + CAD_QSPI_INDWR))) { + ERROR("QSPI Error: Write in progress\n"); + return -1; + } + + bank_count = CAD_QSPI_BANK_ADDR(offset + size - 1) - + CAD_QSPI_BANK_ADDR(offset) + 1; + bank_addr = offset & CAD_QSPI_BANK_ADDR_MSK; + bank_offset = offset & (CAD_QSPI_BANK_SIZE - 1); + + write_data = buffer; + + copy_len = LESS(size, CAD_QSPI_BANK_SIZE - bank_offset); + + for (i = 0; i < bank_count; ++i) { + status = cad_qspi_device_bank_select( + CAD_QSPI_BANK_ADDR(bank_addr)); + if (status != 0) + break; + + status = cad_qspi_write_bank(bank_offset, write_data, + copy_len); + if (status != 0) + break; + + bank_addr += CAD_QSPI_BANK_SIZE; + write_data += copy_len; + size -= copy_len; + bank_offset = 0; + + copy_len = LESS(size, CAD_QSPI_BANK_SIZE); + } + return status; +} + +int cad_qspi_update(void *Buffer, uint32_t offset, uint32_t size) +{ + int status = 0; + + status = cad_qspi_erase(offset, size); + if (status != 0) + return status; + + return cad_qspi_write(Buffer, offset, size); +} + +void cad_qspi_reset(void) +{ + cad_qspi_stig_cmd(CAD_QSPI_STIG_OPCODE_RESET_EN, 0); + cad_qspi_stig_cmd(CAD_QSPI_STIG_OPCODE_RESET_MEM, 0); +} + diff --git a/plat/intel/soc/common/drivers/qspi/cadence_qspi.h b/plat/intel/soc/common/drivers/qspi/cadence_qspi.h new file mode 100644 index 0000000..104ab5f --- /dev/null +++ b/plat/intel/soc/common/drivers/qspi/cadence_qspi.h @@ -0,0 +1,174 @@ +/* + * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2019, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef CAD_QSPI_H +#define CAD_QSPI_H + +#define CAD_QSPI_MICRON_N25Q_SUPPORT 1 + +#define CAD_INVALID -1 +#define CAD_QSPI_ERROR -2 + +#define CAD_QSPI_ADDR_FASTREAD 0 +#define CAD_QSPI_ADDR_FASTREAD_DUAL_IO 1 +#define CAD_QSPI_ADDR_FASTREAD_QUAD_IO 2 +#define CAT_QSPI_ADDR_SINGLE_IO 0 +#define CAT_QSPI_ADDR_DUAL_IO 1 +#define CAT_QSPI_ADDR_QUAD_IO 2 + +#define CAD_QSPI_BANK_ADDR(x) ((x) >> 24) +#define CAD_QSPI_BANK_ADDR_MSK 0xff000000 + +#define CAD_QSPI_COMMAND_TIMEOUT 0x10000000 + +#define CAD_QSPI_CFG 0x0 +#define CAD_QSPI_CFG_BAUDDIV_MSK 0xff87ffff +#define CAD_QSPI_CFG_BAUDDIV(x) (((x) << 19) & 0x780000) +#define CAD_QSPI_CFG_CS_MSK ~0x3c00 +#define CAD_QSPI_CFG_CS(x) (((x) << 11)) +#define CAD_QSPI_CFG_ENABLE (1 << 0) +#define CAD_QSPI_CFG_ENDMA_CLR_MSK 0xffff7fff +#define CAD_QSPI_CFG_IDLE (1U << 31) +#define CAD_QSPI_CFG_SELCLKPHASE_CLR_MSK 0xfffffffb +#define CAD_QSPI_CFG_SELCLKPOL_CLR_MSK 0xfffffffd + +#define CAD_QSPI_DELAY 0xc +#define CAD_QSPI_DELAY_CSSOT(x) (((x) & 0xff) << 0) +#define CAD_QSPI_DELAY_CSEOT(x) (((x) & 0xff) << 8) +#define CAD_QSPI_DELAY_CSDADS(x) (((x) & 0xff) << 16) +#define CAD_QSPI_DELAY_CSDA(x) (((x) & 0xff) << 24) + +#define CAD_QSPI_DEVSZ 0x14 +#define CAD_QSPI_DEVSZ_ADDR_BYTES(x) ((x) << 0) +#define CAD_QSPI_DEVSZ_BYTES_PER_PAGE(x) ((x) << 4) +#define CAD_QSPI_DEVSZ_BYTES_PER_BLOCK(x) ((x) << 16) + +#define CAD_QSPI_DEVWR 0x8 +#define CAD_QSPI_DEVRD 0x4 +#define CAD_QSPI_DEV_OPCODE(x) (((x) & 0xff) << 0) +#define CAD_QSPI_DEV_INST_TYPE(x) (((x) & 0x03) << 8) +#define CAD_QSPI_DEV_ADDR_TYPE(x) (((x) & 0x03) << 12) +#define CAD_QSPI_DEV_DATA_TYPE(x) (((x) & 0x03) << 16) +#define CAD_QSPI_DEV_MODE_BIT(x) (((x) & 0x01) << 20) +#define CAD_QSPI_DEV_DUMMY_CLK_CYCLE(x) (((x) & 0x0f) << 24) + +#define CAD_QSPI_FLASHCMD 0x90 +#define CAD_QSPI_FLASHCMD_ADDR 0x94 +#define CAD_QSPI_FLASHCMD_EXECUTE 0x1 +#define CAD_QSPI_FLASHCMD_EXECUTE_STAT 0x2 +#define CAD_QSPI_FLASHCMD_NUM_DUMMYBYTES_MAX 5 +#define CAD_QSPI_FLASHCMD_NUM_DUMMYBYTES(x) (((x) << 7) & 0x000f80) +#define CAD_QSPI_FLASHCMD_OPCODE(x) (((x) & 0xff) << 24) +#define CAD_QSPI_FLASHCMD_ENRDDATA(x) (((x) & 1) << 23) +#define CAD_QSPI_FLASHCMD_NUMRDDATABYTES(x) (((x) & 0xf) << 20) +#define CAD_QSPI_FLASHCMD_ENCMDADDR(x) (((x) & 1) << 19) +#define CAD_QSPI_FLASHCMD_ENMODEBIT(x) (((x) & 1) << 18) +#define CAD_QSPI_FLASHCMD_NUMADDRBYTES(x) (((x) & 0x3) << 16) +#define CAD_QSPI_FLASHCMD_ENWRDATA(x) (((x) & 1) << 15) +#define CAD_QSPI_FLASHCMD_NUMWRDATABYTES(x) (((x) & 0x7) << 12) +#define CAD_QSPI_FLASHCMD_NUMDUMMYBYTES(x) (((x) & 0x1f) << 7) +#define CAD_QSPI_FLASHCMD_RDDATA0 0xa0 +#define CAD_QSPI_FLASHCMD_RDDATA1 0xa4 +#define CAD_QSPI_FLASHCMD_WRDATA0 0xa8 +#define CAD_QSPI_FLASHCMD_WRDATA1 0xac + +#define CAD_QSPI_RDDATACAP 0x10 +#define CAD_QSPI_RDDATACAP_BYP(x) (((x) & 1) << 0) +#define CAD_QSPI_RDDATACAP_DELAY(x) (((x) & 0xf) << 1) + +#define CAD_QSPI_REMAPADDR 0x24 +#define CAD_QSPI_REMAPADDR_VALUE_SET(x) (((x) & 0xffffffff) << 0) + +#define CAD_QSPI_SRAMPART 0x18 +#define CAD_QSPI_SRAMFILL 0x2c +#define CAD_QSPI_SRAMPART_ADDR(x) (((x) >> 0) & 0x3ff) +#define CAD_QSPI_SRAM_FIFO_ENTRY_COUNT (512 / sizeof(uint32_t)) +#define CAD_QSPI_SRAMFILL_INDWRPART(x) (((x) >> 16) & 0x00ffff) +#define CAD_QSPI_SRAMFILL_INDRDPART(x) (((x) >> 0) & 0x00ffff) + +#define CAD_QSPI_SELCLKPHASE(x) (((x) & 1) << 2) +#define CAD_QSPI_SELCLKPOL(x) (((x) & 1) << 1) + +#define CAD_QSPI_STIG_FLAGSR_PROGRAMREADY(x) (((x) >> 7) & 1) +#define CAD_QSPI_STIG_FLAGSR_ERASEREADY(x) (((x) >> 7) & 1) +#define CAD_QSPI_STIG_FLAGSR_ERASEERROR(x) (((x) >> 5) & 1) +#define CAD_QSPI_STIG_FLAGSR_PROGRAMERROR(x) (((x) >> 4) & 1) +#define CAD_QSPI_STIG_OPCODE_CLFSR 0x50 +#define CAD_QSPI_STIG_OPCODE_RDID 0x9f +#define CAD_QSPI_STIG_OPCODE_WRDIS 0x4 +#define CAD_QSPI_STIG_OPCODE_WREN 0x6 +#define CAD_QSPI_STIG_OPCODE_SUBSEC_ERASE 0x20 +#define CAD_QSPI_STIG_OPCODE_SEC_ERASE 0xd8 +#define CAD_QSPI_STIG_OPCODE_WREN_EXT_REG 0xc5 +#define CAD_QSPI_STIG_OPCODE_DIE_ERASE 0xc4 +#define CAD_QSPI_STIG_OPCODE_BULK_ERASE 0xc7 +#define CAD_QSPI_STIG_OPCODE_RDSR 0x5 +#define CAD_QSPI_STIG_OPCODE_RDFLGSR 0x70 +#define CAD_QSPI_STIG_OPCODE_RESET_EN 0x66 +#define CAD_QSPI_STIG_OPCODE_RESET_MEM 0x99 +#define CAD_QSPI_STIG_RDID_CAPACITYID(x) (((x) >> 16) & 0xff) +#define CAD_QSPI_STIG_SR_BUSY(x) (((x) >> 0) & 1) + + +#define CAD_QSPI_INST_SINGLE 0 +#define CAD_QSPI_INST_DUAL 1 +#define CAD_QSPI_INST_QUAD 2 + +#define CAD_QSPI_INDRDSTADDR 0x68 +#define CAD_QSPI_INDRDCNT 0x6c +#define CAD_QSPI_INDRD 0x60 +#define CAD_QSPI_INDRD_RD_STAT(x) (((x) >> 2) & 1) +#define CAD_QSPI_INDRD_START 1 +#define CAD_QSPI_INDRD_IND_OPS_DONE 0x20 + +#define CAD_QSPI_INDWR 0x70 +#define CAD_QSPI_INDWR_RDSTAT(x) (((x) >> 2) & 1) +#define CAD_QSPI_INDWRSTADDR 0x78 +#define CAD_QSPI_INDWRCNT 0x7c +#define CAD_QSPI_INDWR 0x70 +#define CAD_QSPI_INDWR_START 0x1 +#define CAD_QSPI_INDWR_INDDONE 0x20 + +#define CAD_QSPI_INT_STATUS_ALL 0x0000ffff + +#define CAD_QSPI_N25Q_DIE_SIZE 0x02000000 +#define CAD_QSPI_BANK_SIZE 0x01000000 +#define CAD_QSPI_PAGE_SIZE 0x00000100 + +#define CAD_QSPI_IRQMSK 0x44 + +#define CAD_QSPI_SUBSECTOR_SIZE 0x1000 + +#define INTEL_QSPI_ADDR_BYTES 2 +#define INTEL_QSPI_BYTES_PER_DEV 256 +#define INTEL_BYTES_PER_BLOCK 16 + +#define QSPI_FAST_READ 0xb + +#define QSPI_WRITE 0x2 + +// QSPI CONFIGURATIONS + +#define QSPI_CONFIG_CPOL 1 +#define QSPI_CONFIG_CPHA 1 + +#define QSPI_CONFIG_CSSOT 0x14 +#define QSPI_CONFIG_CSEOT 0x14 +#define QSPI_CONFIG_CSDADS 0xff +#define QSPI_CONFIG_CSDA 0xc8 + +int cad_qspi_init(uint32_t desired_clk_freq, uint32_t clk_phase, + uint32_t clk_pol, uint32_t csda, uint32_t csdads, + uint32_t cseot, uint32_t cssot, uint32_t rddatacap); +void cad_qspi_set_chip_select(int cs); +int cad_qspi_erase(uint32_t offset, uint32_t size); +int cad_qspi_write(void *buffer, uint32_t offset, uint32_t size); +int cad_qspi_read(void *buffer, uint32_t offset, uint32_t size); +int cad_qspi_update(void *buffer, uint32_t offset, uint32_t size); + +#endif + diff --git a/plat/intel/soc/common/drivers/sdmmc/sdmmc.c b/plat/intel/soc/common/drivers/sdmmc/sdmmc.c new file mode 100644 index 0000000..8666f54 --- /dev/null +++ b/plat/intel/soc/common/drivers/sdmmc/sdmmc.c @@ -0,0 +1,769 @@ +/* + * Copyright (c) 2022-2023, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> +#include <errno.h> +#include <stdbool.h> +#include <string.h> + +#include <arch_helpers.h> +#include <common/debug.h> +#include <drivers/cadence/cdns_combo_phy.h> +#include <drivers/cadence/cdns_sdmmc.h> +#include <drivers/delay_timer.h> +#include <lib/mmio.h> +#include <lib/utils.h> + +#include "agilex5_pinmux.h" +#include "sdmmc.h" + +static const struct mmc_ops *ops; +static unsigned int mmc_ocr_value; +static struct mmc_csd_emmc mmc_csd; +static struct sd_switch_status sd_switch_func_status; +static unsigned char mmc_ext_csd[512] __aligned(16); +static unsigned int mmc_flags; +static struct mmc_device_info *mmc_dev_info; +static unsigned int rca; +static unsigned int scr[2]__aligned(16) = { 0 }; + +extern const struct mmc_ops cdns_sdmmc_ops; +extern struct cdns_sdmmc_params cdns_params; +extern struct cdns_sdmmc_combo_phy sdmmc_combo_phy_reg; +extern struct cdns_sdmmc_sdhc sdmmc_sdhc_reg; + +static bool is_cmd23_enabled(void) +{ + return ((mmc_flags & MMC_FLAG_CMD23) != 0U); +} + +static bool is_sd_cmd6_enabled(void) +{ + return ((mmc_flags & MMC_FLAG_SD_CMD6) != 0U); +} + +/* TODO: Will romove once ATF driver is developed */ +void sdmmc_pin_config(void) +{ + /* temp use base + addr. Official must change to common method */ + mmio_write_32(AGX5_PINMUX_PIN0SEL+0x00, 0x0); + mmio_write_32(AGX5_PINMUX_PIN0SEL+0x04, 0x0); + mmio_write_32(AGX5_PINMUX_PIN0SEL+0x08, 0x0); + mmio_write_32(AGX5_PINMUX_PIN0SEL+0x0C, 0x0); + mmio_write_32(AGX5_PINMUX_PIN0SEL+0x10, 0x0); + mmio_write_32(AGX5_PINMUX_PIN0SEL+0x14, 0x0); + mmio_write_32(AGX5_PINMUX_PIN0SEL+0x18, 0x0); + mmio_write_32(AGX5_PINMUX_PIN0SEL+0x1C, 0x0); + mmio_write_32(AGX5_PINMUX_PIN0SEL+0x20, 0x0); + mmio_write_32(AGX5_PINMUX_PIN0SEL+0x24, 0x0); + mmio_write_32(AGX5_PINMUX_PIN0SEL+0x28, 0x0); +} + +static int sdmmc_send_cmd(unsigned int idx, unsigned int arg, + unsigned int r_type, unsigned int *r_data) +{ + struct mmc_cmd cmd; + int ret; + + zeromem(&cmd, sizeof(struct mmc_cmd)); + + cmd.cmd_idx = idx; + cmd.cmd_arg = arg; + cmd.resp_type = r_type; + + ret = ops->send_cmd(&cmd); + + if ((ret == 0) && (r_data != NULL)) { + int i; + + for (i = 0; i < 4; i++) { + *r_data = cmd.resp_data[i]; + r_data++; + } + } + + if (ret != 0) { + VERBOSE("Send command %u error: %d\n", idx, ret); + } + + return ret; +} + +static int sdmmc_device_state(void) +{ + int retries = DEFAULT_SDMMC_MAX_RETRIES; + unsigned int resp_data[4]; + + do { + int ret; + + if (retries == 0) { + ERROR("CMD13 failed after %d retries\n", + DEFAULT_SDMMC_MAX_RETRIES); + return -EIO; + } + + ret = sdmmc_send_cmd(MMC_CMD(13), rca << RCA_SHIFT_OFFSET, + MMC_RESPONSE_R1, &resp_data[0]); + if (ret != 0) { + retries--; + continue; + } + + if ((resp_data[0] & STATUS_SWITCH_ERROR) != 0U) { + return -EIO; + } + + retries--; + } while ((resp_data[0] & STATUS_READY_FOR_DATA) == 0U); + + return MMC_GET_STATE(resp_data[0]); +} + +static int sdmmc_set_ext_csd(unsigned int ext_cmd, unsigned int value) +{ + int ret; + + ret = sdmmc_send_cmd(MMC_CMD(6), + EXTCSD_WRITE_BYTES | EXTCSD_CMD(ext_cmd) | + EXTCSD_VALUE(value) | EXTCSD_CMD_SET_NORMAL, + MMC_RESPONSE_R1B, NULL); + if (ret != 0) { + return ret; + } + + do { + ret = sdmmc_device_state(); + if (ret < 0) { + return ret; + } + } while (ret == MMC_STATE_PRG); + + return 0; +} + +static int sdmmc_mmc_sd_switch(unsigned int bus_width) +{ + int ret; + int retries = DEFAULT_SDMMC_MAX_RETRIES; + unsigned int bus_width_arg = 0; + + /* CMD55: Application Specific Command */ + ret = sdmmc_send_cmd(MMC_CMD(55), rca << RCA_SHIFT_OFFSET, + MMC_RESPONSE_R5, NULL); + if (ret != 0) { + return ret; + } + + ret = ops->prepare(0, (uintptr_t)&scr, sizeof(scr)); + if (ret != 0) { + return ret; + } + + /* ACMD51: SEND_SCR */ + do { + ret = sdmmc_send_cmd(MMC_ACMD(51), 0, MMC_RESPONSE_R1, NULL); + if ((ret != 0) && (retries == 0)) { + ERROR("ACMD51 failed after %d retries (ret=%d)\n", + DEFAULT_SDMMC_MAX_RETRIES, ret); + return ret; + } + + retries--; + } while (ret != 0); + + ret = ops->read(0, (uintptr_t)&scr, sizeof(scr)); + if (ret != 0) { + return ret; + } + + if (((scr[0] & SD_SCR_BUS_WIDTH_4) != 0U) && + (bus_width == MMC_BUS_WIDTH_4)) { + bus_width_arg = 2; + } + + /* CMD55: Application Specific Command */ + ret = sdmmc_send_cmd(MMC_CMD(55), rca << RCA_SHIFT_OFFSET, + MMC_RESPONSE_R5, NULL); + if (ret != 0) { + return ret; + } + + /* ACMD6: SET_BUS_WIDTH */ + ret = sdmmc_send_cmd(MMC_ACMD(6), bus_width_arg, MMC_RESPONSE_R1, NULL); + if (ret != 0) { + return ret; + } + + do { + ret = sdmmc_device_state(); + if (ret < 0) { + return ret; + } + } while (ret == MMC_STATE_PRG); + + return 0; +} + +static int sdmmc_set_ios(unsigned int clk, unsigned int bus_width) +{ + int ret; + unsigned int width = bus_width; + + if (mmc_dev_info->mmc_dev_type != MMC_IS_EMMC) { + if (width == MMC_BUS_WIDTH_8) { + WARN("Wrong bus config for SD-card, force to 4\n"); + width = MMC_BUS_WIDTH_4; + } + ret = sdmmc_mmc_sd_switch(width); + if (ret != 0) { + return ret; + } + } else if (mmc_csd.spec_vers == 4U) { + ret = sdmmc_set_ext_csd(CMD_EXTCSD_BUS_WIDTH, + (unsigned int)width); + if (ret != 0) { + return ret; + } + } else { + VERBOSE("Wrong MMC type or spec version\n"); + } + + return ops->set_ios(clk, width); +} + +static int sdmmc_fill_device_info(void) +{ + unsigned long long c_size; + unsigned int speed_idx; + unsigned int nb_blocks; + unsigned int freq_unit; + int ret = 0; + struct mmc_csd_sd_v2 *csd_sd_v2; + + switch (mmc_dev_info->mmc_dev_type) { + case MMC_IS_EMMC: + mmc_dev_info->block_size = MMC_BLOCK_SIZE; + + ret = ops->prepare(0, (uintptr_t)&mmc_ext_csd, + sizeof(mmc_ext_csd)); + if (ret != 0) { + return ret; + } + + /* MMC CMD8: SEND_EXT_CSD */ + ret = sdmmc_send_cmd(MMC_CMD(8), 0, MMC_RESPONSE_R1, NULL); + if (ret != 0) { + return ret; + } + + ret = ops->read(0, (uintptr_t)&mmc_ext_csd, + sizeof(mmc_ext_csd)); + if (ret != 0) { + return ret; + } + + do { + ret = sdmmc_device_state(); + if (ret < 0) { + return ret; + } + } while (ret != MMC_STATE_TRAN); + + nb_blocks = (mmc_ext_csd[CMD_EXTCSD_SEC_CNT] << 0) | + (mmc_ext_csd[CMD_EXTCSD_SEC_CNT + 1] << 8) | + (mmc_ext_csd[CMD_EXTCSD_SEC_CNT + 2] << 16) | + (mmc_ext_csd[CMD_EXTCSD_SEC_CNT + 3] << 24); + + mmc_dev_info->device_size = (unsigned long long)nb_blocks * + mmc_dev_info->block_size; + + break; + + case MMC_IS_SD: + /* + * Use the same mmc_csd struct, as required fields here + * (READ_BL_LEN, C_SIZE, CSIZE_MULT) are common with eMMC. + */ + mmc_dev_info->block_size = BIT_32(mmc_csd.read_bl_len); + + c_size = ((unsigned long long)mmc_csd.c_size_high << 2U) | + (unsigned long long)mmc_csd.c_size_low; + assert(c_size != 0xFFFU); + + mmc_dev_info->device_size = (c_size + 1U) * + BIT_64(mmc_csd.c_size_mult + 2U) * + mmc_dev_info->block_size; + + break; + + case MMC_IS_SD_HC: + assert(mmc_csd.csd_structure == 1U); + + mmc_dev_info->block_size = MMC_BLOCK_SIZE; + + /* Need to use mmc_csd_sd_v2 struct */ + csd_sd_v2 = (struct mmc_csd_sd_v2 *)&mmc_csd; + c_size = ((unsigned long long)csd_sd_v2->c_size_high << 16) | + (unsigned long long)csd_sd_v2->c_size_low; + + mmc_dev_info->device_size = (c_size + 1U) << SDMMC_MULT_BY_512K_SHIFT; + + break; + + default: + ret = -EINVAL; + break; + } + + if (ret < 0) { + return ret; + } + + speed_idx = (mmc_csd.tran_speed & CSD_TRAN_SPEED_MULT_MASK) >> + CSD_TRAN_SPEED_MULT_SHIFT; + + assert(speed_idx > 0U); + + if (mmc_dev_info->mmc_dev_type == MMC_IS_EMMC) { + mmc_dev_info->max_bus_freq = tran_speed_base[speed_idx]; + } else { + mmc_dev_info->max_bus_freq = sd_tran_speed_base[speed_idx]; + } + + freq_unit = mmc_csd.tran_speed & CSD_TRAN_SPEED_UNIT_MASK; + while (freq_unit != 0U) { + mmc_dev_info->max_bus_freq *= 10U; + --freq_unit; + } + + mmc_dev_info->max_bus_freq *= 10000U; + + return 0; +} + +static int sdmmc_sd_switch(unsigned int mode, unsigned char group, + unsigned char func) +{ + unsigned int group_shift = (group - 1U) * 4U; + unsigned int group_mask = GENMASK(group_shift + 3U, group_shift); + unsigned int arg; + int ret; + + ret = ops->prepare(0, (uintptr_t)&sd_switch_func_status, + sizeof(sd_switch_func_status)); + if (ret != 0) { + return ret; + } + + /* MMC CMD6: SWITCH_FUNC */ + arg = mode | SD_SWITCH_ALL_GROUPS_MASK; + arg &= ~group_mask; + arg |= func << group_shift; + ret = sdmmc_send_cmd(MMC_CMD(6), arg, MMC_RESPONSE_R1, NULL); + if (ret != 0) { + return ret; + } + + return ops->read(0, (uintptr_t)&sd_switch_func_status, + sizeof(sd_switch_func_status)); +} + +static int sdmmc_sd_send_op_cond(void) +{ + int n; + unsigned int resp_data[4]; + + for (n = 0; n < SEND_SDMMC_OP_COND_MAX_RETRIES; n++) { + int ret; + + /* CMD55: Application Specific Command */ + ret = sdmmc_send_cmd(MMC_CMD(55), 0, MMC_RESPONSE_R1, NULL); + if (ret != 0) { + return ret; + } + + /* ACMD41: SD_SEND_OP_COND */ + ret = sdmmc_send_cmd(MMC_ACMD(41), OCR_HCS | + mmc_dev_info->ocr_voltage, MMC_RESPONSE_R3, + &resp_data[0]); + if (ret != 0) { + return ret; + } + + if ((resp_data[0] & OCR_POWERUP) != 0U) { + mmc_ocr_value = resp_data[0]; + + if ((mmc_ocr_value & OCR_HCS) != 0U) { + mmc_dev_info->mmc_dev_type = MMC_IS_SD_HC; + } else { + mmc_dev_info->mmc_dev_type = MMC_IS_SD; + } + + return 0; + } + + mdelay(10); + } + + ERROR("ACMD41 failed after %d retries\n", SEND_SDMMC_OP_COND_MAX_RETRIES); + + return -EIO; +} + +static int sdmmc_reset_to_idle(void) +{ + int ret; + + /* CMD0: reset to IDLE */ + ret = sdmmc_send_cmd(MMC_CMD(0), 0, 0, NULL); + if (ret != 0) { + return ret; + } + + mdelay(2); + + return 0; +} + +static int sdmmc_send_op_cond(void) +{ + int ret, n; + unsigned int resp_data[4]; + + ret = sdmmc_reset_to_idle(); + if (ret != 0) { + return ret; + } + + for (n = 0; n < SEND_SDMMC_OP_COND_MAX_RETRIES; n++) { + ret = sdmmc_send_cmd(MMC_CMD(1), OCR_SECTOR_MODE | + OCR_VDD_MIN_2V7 | OCR_VDD_MIN_1V7, + MMC_RESPONSE_R3, &resp_data[0]); + if (ret != 0) { + return ret; + } + + if ((resp_data[0] & OCR_POWERUP) != 0U) { + mmc_ocr_value = resp_data[0]; + return 0; + } + + mdelay(10); + } + + ERROR("CMD1 failed after %d retries\n", SEND_SDMMC_OP_COND_MAX_RETRIES); + + return -EIO; +} + +static int sdmmc_enumerate(unsigned int clk, unsigned int bus_width) +{ + int ret; + unsigned int resp_data[4]; + + ops->init(); + + ret = sdmmc_reset_to_idle(); + if (ret != 0) { + return ret; + } + + if (mmc_dev_info->mmc_dev_type == MMC_IS_EMMC) { + ret = sdmmc_send_op_cond(); + } else { + /* CMD8: Send Interface Condition Command */ + ret = sdmmc_send_cmd(MMC_CMD(8), VHS_2_7_3_6_V | CMD8_CHECK_PATTERN, + MMC_RESPONSE_R5, &resp_data[0]); + + if ((ret == 0) && ((resp_data[0] & 0xffU) == CMD8_CHECK_PATTERN)) { + ret = sdmmc_sd_send_op_cond(); + } + } + if (ret != 0) { + return ret; + } + + /* CMD2: Card Identification */ + ret = sdmmc_send_cmd(MMC_CMD(2), 0, MMC_RESPONSE_R2, NULL); + if (ret != 0) { + return ret; + } + + /* CMD3: Set Relative Address */ + if (mmc_dev_info->mmc_dev_type == MMC_IS_EMMC) { + rca = MMC_FIX_RCA; + ret = sdmmc_send_cmd(MMC_CMD(3), rca << RCA_SHIFT_OFFSET, + MMC_RESPONSE_R1, NULL); + if (ret != 0) { + return ret; + } + } else { + ret = sdmmc_send_cmd(MMC_CMD(3), 0, + MMC_RESPONSE_R6, &resp_data[0]); + if (ret != 0) { + return ret; + } + + rca = (resp_data[0] & 0xFFFF0000U) >> 16; + } + + /* CMD9: CSD Register */ + ret = sdmmc_send_cmd(MMC_CMD(9), rca << RCA_SHIFT_OFFSET, + MMC_RESPONSE_R2, &resp_data[0]); + if (ret != 0) { + return ret; + } + + memcpy(&mmc_csd, &resp_data, sizeof(resp_data)); + + /* CMD7: Select Card */ + ret = sdmmc_send_cmd(MMC_CMD(7), rca << RCA_SHIFT_OFFSET, + MMC_RESPONSE_R1, NULL); + if (ret != 0) { + return ret; + } + + do { + ret = sdmmc_device_state(); + if (ret < 0) { + return ret; + } + } while (ret != MMC_STATE_TRAN); + + ret = sdmmc_set_ios(clk, bus_width); + if (ret != 0) { + return ret; + } + + ret = sdmmc_fill_device_info(); + if (ret != 0) { + return ret; + } + + if (is_sd_cmd6_enabled() && + (mmc_dev_info->mmc_dev_type == MMC_IS_SD_HC)) { + /* Try to switch to High Speed Mode */ + ret = sdmmc_sd_switch(SD_SWITCH_FUNC_CHECK, 1U, 1U); + if (ret != 0) { + return ret; + } + + if ((sd_switch_func_status.support_g1 & BIT(9)) == 0U) { + /* High speed not supported, keep default speed */ + return 0; + } + + ret = sdmmc_sd_switch(SD_SWITCH_FUNC_SWITCH, 1U, 1U); + if (ret != 0) { + return ret; + } + + if ((sd_switch_func_status.sel_g2_g1 & 0x1U) == 0U) { + /* Cannot switch to high speed, keep default speed */ + return 0; + } + + mmc_dev_info->max_bus_freq = 50000000U; + ret = ops->set_ios(clk, bus_width); + } + + return ret; +} + +size_t sdmmc_read_blocks(int lba, uintptr_t buf, size_t size) +{ + int ret; + unsigned int cmd_idx, cmd_arg; + + assert((ops != NULL) && + (ops->read != NULL) && + (size != 0U) && + ((size & MMC_BLOCK_MASK) == 0U)); + + ret = ops->prepare(lba, buf, size); + if (ret != 0) { + return 0; + } + + if (is_cmd23_enabled()) { + /* Set block count */ + ret = sdmmc_send_cmd(MMC_CMD(23), size / MMC_BLOCK_SIZE, + MMC_RESPONSE_R1, NULL); + if (ret != 0) { + return 0; + } + + cmd_idx = MMC_CMD(18); + } else { + if (size > MMC_BLOCK_SIZE) { + cmd_idx = MMC_CMD(18); + } else { + cmd_idx = MMC_CMD(17); + } + } + + if (((mmc_ocr_value & OCR_ACCESS_MODE_MASK) == OCR_BYTE_MODE) && + (mmc_dev_info->mmc_dev_type != MMC_IS_SD_HC)) { + cmd_arg = lba * MMC_BLOCK_SIZE; + } else { + cmd_arg = lba; + } + + ret = sdmmc_send_cmd(cmd_idx, cmd_arg, MMC_RESPONSE_R1, NULL); + if (ret != 0) { + return 0; + } + + ret = ops->read(lba, buf, size); + if (ret != 0) { + return 0; + } + + /* Wait buffer empty */ + do { + ret = sdmmc_device_state(); + if (ret < 0) { + return 0; + } + } while ((ret != MMC_STATE_TRAN) && (ret != MMC_STATE_DATA)); + + if (!is_cmd23_enabled() && (size > MMC_BLOCK_SIZE)) { + ret = sdmmc_send_cmd(MMC_CMD(12), 0, MMC_RESPONSE_R1B, NULL); + if (ret != 0) { + return 0; + } + } + + return size; +} + +size_t sdmmc_write_blocks(int lba, const uintptr_t buf, size_t size) +{ + int ret; + unsigned int cmd_idx, cmd_arg; + + assert((ops != NULL) && + (ops->write != NULL) && + (size != 0U) && + ((buf & MMC_BLOCK_MASK) == 0U) && + ((size & MMC_BLOCK_MASK) == 0U)); + + ret = ops->prepare(lba, buf, size); + if (ret != 0) { + return 0; + } + + if (is_cmd23_enabled()) { + /* Set block count */ + ret = sdmmc_send_cmd(MMC_CMD(23), size / MMC_BLOCK_SIZE, + MMC_RESPONSE_R1, NULL); + if (ret != 0) { + return 0; + } + + cmd_idx = MMC_CMD(25); + } else { + if (size > MMC_BLOCK_SIZE) { + cmd_idx = MMC_CMD(25); + } else { + cmd_idx = MMC_CMD(24); + } + } + + if ((mmc_ocr_value & OCR_ACCESS_MODE_MASK) == OCR_BYTE_MODE) { + cmd_arg = lba * MMC_BLOCK_SIZE; + } else { + cmd_arg = lba; + } + + ret = sdmmc_send_cmd(cmd_idx, cmd_arg, MMC_RESPONSE_R1, NULL); + if (ret != 0) { + return 0; + } + + ret = ops->write(lba, buf, size); + if (ret != 0) { + return 0; + } + + /* Wait buffer empty */ + do { + ret = sdmmc_device_state(); + if (ret < 0) { + return 0; + } + } while ((ret != MMC_STATE_TRAN) && (ret != MMC_STATE_RCV)); + + if (!is_cmd23_enabled() && (size > MMC_BLOCK_SIZE)) { + ret = sdmmc_send_cmd(MMC_CMD(12), 0, MMC_RESPONSE_R1B, NULL); + if (ret != 0) { + return 0; + } + } + + return size; +} + +int sd_or_mmc_init(const struct mmc_ops *ops_ptr, unsigned int clk, + unsigned int width, unsigned int flags, + struct mmc_device_info *device_info) +{ + assert((ops_ptr != NULL) && + (ops_ptr->init != NULL) && + (ops_ptr->send_cmd != NULL) && + (ops_ptr->set_ios != NULL) && + (ops_ptr->prepare != NULL) && + (ops_ptr->read != NULL) && + (ops_ptr->write != NULL) && + (device_info != NULL) && + (clk != 0) && + ((width == MMC_BUS_WIDTH_1) || + (width == MMC_BUS_WIDTH_4) || + (width == MMC_BUS_WIDTH_8) || + (width == MMC_BUS_WIDTH_DDR_4) || + (width == MMC_BUS_WIDTH_DDR_8))); + + ops = ops_ptr; + mmc_flags = flags; + mmc_dev_info = device_info; + + return sdmmc_enumerate(clk, width); +} + +int sdmmc_init(handoff *hoff_ptr, struct cdns_sdmmc_params *params, struct mmc_device_info *info) +{ + int result = 0; + + /* SDMMC pin mux configuration */ + sdmmc_pin_config(); + cdns_set_sdmmc_var(&sdmmc_combo_phy_reg, &sdmmc_sdhc_reg); + result = cdns_sd_host_init(&sdmmc_combo_phy_reg, &sdmmc_sdhc_reg); + if (result < 0) { + return result; + } + + assert((params != NULL) && + ((params->reg_base & MMC_BLOCK_MASK) == 0) && + ((params->desc_base & MMC_BLOCK_MASK) == 0) && + ((params->desc_size & MMC_BLOCK_MASK) == 0) && + ((params->reg_pinmux & MMC_BLOCK_MASK) == 0) && + ((params->reg_phy & MMC_BLOCK_MASK) == 0) && + (params->desc_size > 0) && + (params->clk_rate > 0) && + ((params->bus_width == MMC_BUS_WIDTH_1) || + (params->bus_width == MMC_BUS_WIDTH_4) || + (params->bus_width == MMC_BUS_WIDTH_8))); + + memcpy(&cdns_params, params, sizeof(struct cdns_sdmmc_params)); + cdns_params.cdn_sdmmc_dev_type = info->mmc_dev_type; + cdns_params.cdn_sdmmc_dev_mode = SD_DS; + + result = sd_or_mmc_init(&cdns_sdmmc_ops, params->clk_rate, params->bus_width, + params->flags, info); + + return result; +} diff --git a/plat/intel/soc/common/drivers/sdmmc/sdmmc.h b/plat/intel/soc/common/drivers/sdmmc/sdmmc.h new file mode 100644 index 0000000..16c6b04 --- /dev/null +++ b/plat/intel/soc/common/drivers/sdmmc/sdmmc.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2022-2023, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef SDMMC_H +#define SDMMC_H + +#include <lib/mmio.h> +#include "socfpga_handoff.h" + +#define PERIPHERAL_SDMMC_MASK 0x60 +#define PERIPHERAL_SDMMC_OFFSET 6 + +#define DEFAULT_SDMMC_MAX_RETRIES 5 +#define SEND_SDMMC_OP_COND_MAX_RETRIES 100 +#define SDMMC_MULT_BY_512K_SHIFT 19 + +static const unsigned char tran_speed_base[16] = { + 0, 10, 12, 13, 15, 20, 26, 30, 35, 40, 45, 52, 55, 60, 70, 80 +}; + +static const unsigned char sd_tran_speed_base[16] = { + 0, 10, 12, 13, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 70, 80 +}; + + +/* FUNCTION DEFINATION */ +/* + * @brief SDMMC controller initialization function + * + * @hoff_ptr: Pointer to the hand-off data + * Return: 0 on success, a negative errno on failure + */ +int sdmmc_init(handoff *hoff_ptr, struct cdns_sdmmc_params *params, + struct mmc_device_info *info); +int sd_or_mmc_init(const struct mmc_ops *ops_ptr, unsigned int clk, + unsigned int width, unsigned int flags, + struct mmc_device_info *device_info); +void sdmmc_pin_config(void); +#endif diff --git a/plat/intel/soc/common/drivers/wdt/watchdog.c b/plat/intel/soc/common/drivers/wdt/watchdog.c new file mode 100644 index 0000000..651189b --- /dev/null +++ b/plat/intel/soc/common/drivers/wdt/watchdog.c @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2019, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <common/debug.h> +#include <lib/mmio.h> + +#include "watchdog.h" + + +/* Reset watchdog timer */ +void watchdog_sw_rst(void) +{ + mmio_write_32(WDT_CRR, WDT_SW_RST); +} + +/* Print component information */ +void watchdog_info(void) +{ + INFO("Component Type : %x\r\n", mmio_read_32(WDT_COMP_VERSION)); + INFO("Component Version : %x\r\n", mmio_read_32(WDT_COMP_TYPE)); +} + +/* Check watchdog current status */ +void watchdog_status(void) +{ + if (mmio_read_32(WDT_CR) & 1) { + INFO("Watchdog Timer is currently enabled\n"); + INFO("Current Counter : 0x%x\r\n", mmio_read_32(WDT_CCVR)); + } else { + INFO("Watchdog Timer is currently disabled\n"); + } +} + +/* Initialize & enable watchdog */ +void watchdog_init(int watchdog_clk) +{ + uint8_t cycles_i = 0; + uint32_t wdt_cycles = WDT_MIN_CYCLES; + uint32_t top_init_cycles = WDT_PERIOD * watchdog_clk; + + while ((cycles_i < 15) && (wdt_cycles < top_init_cycles)) { + wdt_cycles = (wdt_cycles << 1); + cycles_i++; + } + + mmio_write_32(WDT_TORR, (cycles_i << 4) | cycles_i); + + mmio_write_32(WDT_CR, WDT_CR_RMOD|WDT_CR_EN); +} diff --git a/plat/intel/soc/common/drivers/wdt/watchdog.h b/plat/intel/soc/common/drivers/wdt/watchdog.h new file mode 100644 index 0000000..4ee4cff --- /dev/null +++ b/plat/intel/soc/common/drivers/wdt/watchdog.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2019-2022, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef CAD_WATCHDOG_H +#define CAD_WATCHDOG_H + +#if PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX5 +#define WDT_BASE (0x10D00200) +#else +#define WDT_BASE (0xFFD00200) +#endif +#define WDT_REG_SIZE_OFFSET (0x4) +#define WDT_MIN_CYCLES (65536) +#define WDT_PERIOD (20) + +#define WDT_CR (WDT_BASE + 0x0) +#define WDT_TORR (WDT_BASE + 0x4) + +#define WDT_CRR (WDT_BASE + 0xC) + +#define WDT_CCVR (WDT_BASE + 0x8) +#define WDT_STAT (WDT_BASE + 0x10) +#define WDT_EOI (WDT_BASE + 0x14) + +#define WDT_COMP_PARAM_1 (WDT_BASE + 0xF4) +#define WDT_COMP_VERSION (WDT_BASE + 0xF8) +#define WDT_COMP_TYPE (WDT_BASE + 0XFC) + +#define WDT_CR_RMOD (0x0) +#define WDT_CR_EN (0x1) + +#define WDT_SW_RST (0x76) + + +void watchdog_init(int watchdog_clk); +void watchdog_info(void); +void watchdog_status(void); +void watchdog_sw_rst(void); + +#endif diff --git a/plat/intel/soc/common/include/plat_macros.S b/plat/intel/soc/common/include/plat_macros.S new file mode 100644 index 0000000..43db9a2 --- /dev/null +++ b/plat/intel/soc/common/include/plat_macros.S @@ -0,0 +1,22 @@ +/* + * Copyright (c) 2019, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef PLAT_MACROS_S +#define PLAT_MACROS_S + +#include <platform_def.h> + + /* --------------------------------------------- + * The below required platform porting macro + * prints out relevant platform registers + * whenever an unhandled exception is taken in + * BL31. + * --------------------------------------------- + */ + .macro plat_crash_print_regs + .endm + +#endif /* PLAT_MACROS_S */ diff --git a/plat/intel/soc/common/include/platform_def.h b/plat/intel/soc/common/include/platform_def.h new file mode 100644 index 0000000..49fc567 --- /dev/null +++ b/plat/intel/soc/common/include/platform_def.h @@ -0,0 +1,210 @@ +/* + * Copyright (c) 2019-2022, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2019-2023, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef PLATFORM_DEF_H +#define PLATFORM_DEF_H + +#include <arch.h> +#include <common/interrupt_props.h> +#include <common/tbbr/tbbr_img_def.h> +#include <plat/common/common_def.h> +#include "socfpga_plat_def.h" + +/* Platform Type */ +#define PLAT_SOCFPGA_STRATIX10 1 +#define PLAT_SOCFPGA_AGILEX 2 +#define PLAT_SOCFPGA_N5X 3 +#define PLAT_SOCFPGA_AGILEX5 4 +#define SIMICS_RUN 1 +#define MAX_IO_MTD_DEVICES U(1) + +/* sysmgr.boot_scratch_cold4 & 5 used for CPU release address for SPL */ +#define PLAT_CPU_RELEASE_ADDR 0xffd12210 + +/* Magic word to indicate L2 reset is completed */ +#define L2_RESET_DONE_STATUS 0x1228E5E7 + +/* Define next boot image name and offset */ +/* Get non-secure image entrypoint for BL33. Zephyr and Linux */ +#if PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX5 + +#ifndef PRELOADED_BL33_BASE +#define PLAT_NS_IMAGE_OFFSET 0x80200000 +#else +#define PLAT_NS_IMAGE_OFFSET PRELOADED_BL33_BASE +#endif +#define PLAT_HANDOFF_OFFSET 0x0003F000 + +#else +#define PLAT_NS_IMAGE_OFFSET 0x10000000 +#define PLAT_HANDOFF_OFFSET 0xFFE3F000 +#endif + +/******************************************************************************* + * Platform binary types for linking + ******************************************************************************/ +#define PLATFORM_LINKER_FORMAT "elf64-littleaarch64" +#define PLATFORM_LINKER_ARCH aarch64 + +/* SoCFPGA supports up to 124GB RAM */ +#define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 39) +#define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 39) + + +/******************************************************************************* + * Generic platform constants + ******************************************************************************/ +#define PLAT_SECONDARY_ENTRY_BASE 0x01f78bf0 + +/* Size of cacheable stacks */ +#define PLATFORM_STACK_SIZE 0x2000 + +/* PSCI related constant */ +#define PLAT_NUM_POWER_DOMAINS 5 +#define PLAT_MAX_PWR_LVL 1 +#define PLAT_MAX_RET_STATE 1 +#define PLAT_MAX_OFF_STATE 2 +#define PLATFORM_SYSTEM_COUNT U(1) +#define PLATFORM_CLUSTER_COUNT U(1) +#define PLATFORM_CLUSTER0_CORE_COUNT U(4) +#define PLATFORM_CLUSTER1_CORE_COUNT U(0) +#define PLATFORM_CORE_COUNT (PLATFORM_CLUSTER1_CORE_COUNT + \ + PLATFORM_CLUSTER0_CORE_COUNT) +#define PLATFORM_MAX_CPUS_PER_CLUSTER U(4) + +/* Interrupt related constant */ + +#define INTEL_SOCFPGA_IRQ_SEC_PHY_TIMER 29 + +#define INTEL_SOCFPGA_IRQ_SEC_SGI_0 8 +#define INTEL_SOCFPGA_IRQ_SEC_SGI_1 9 +#define INTEL_SOCFPGA_IRQ_SEC_SGI_2 10 +#define INTEL_SOCFPGA_IRQ_SEC_SGI_3 11 +#define INTEL_SOCFPGA_IRQ_SEC_SGI_4 12 +#define INTEL_SOCFPGA_IRQ_SEC_SGI_5 13 +#define INTEL_SOCFPGA_IRQ_SEC_SGI_6 14 +#define INTEL_SOCFPGA_IRQ_SEC_SGI_7 15 + +#define TSP_IRQ_SEC_PHY_TIMER INTEL_SOCFPGA_IRQ_SEC_PHY_TIMER +#define TSP_SEC_MEM_BASE BL32_BASE +#define TSP_SEC_MEM_SIZE (BL32_LIMIT - BL32_BASE + 1) + + +/******************************************************************************* + * BL31 specific defines. + ******************************************************************************/ +/* + * Put BL3-1 at the top of the Trusted SRAM (just below the shared memory, if + * present). BL31_BASE is calculated using the current BL3-1 debug size plus a + * little space for growth. + */ + +#define FIRMWARE_WELCOME_STR "Booting Trusted Firmware\n" + +#define BL1_RO_BASE (0xffe00000) +#define BL1_RO_LIMIT (0xffe0f000) +#define BL1_RW_BASE (0xffe10000) +#define BL1_RW_LIMIT (0xffe1ffff) +#define BL1_RW_SIZE (0x14000) + +#define BL_DATA_LIMIT PLAT_HANDOFF_OFFSET + +#define PLAT_CPUID_RELEASE (BL_DATA_LIMIT - 16) +#define PLAT_SEC_ENTRY (BL_DATA_LIMIT - 8) + +#define CMP_ENTRY 0xFFE3EFF8 + +#define PLAT_SEC_WARM_ENTRY 0 + +/******************************************************************************* + * Platform specific page table and MMU setup constants + ******************************************************************************/ +#define MAX_XLAT_TABLES 8 +#define MAX_MMAP_REGIONS 16 + +/******************************************************************************* + * Declarations and constants to access the mailboxes safely. Each mailbox is + * aligned on the biggest cache line size in the platform. This is known only + * to the platform as it might have a combination of integrated and external + * caches. Such alignment ensures that two maiboxes do not sit on the same cache + * line at any cache level. They could belong to different cpus/clusters & + * get written while being protected by different locks causing corruption of + * a valid mailbox address. + ******************************************************************************/ +#define CACHE_WRITEBACK_SHIFT 6 +#define CACHE_WRITEBACK_GRANULE (1 << CACHE_WRITEBACK_SHIFT) + +/******************************************************************************* + * UART related constants + ******************************************************************************/ +#define CRASH_CONSOLE_BASE PLAT_UART0_BASE +#define PLAT_INTEL_UART_BASE PLAT_UART0_BASE + +#define PLAT_BAUDRATE (115200) +#define PLAT_UART_CLOCK (100000000) + +/******************************************************************************* + * PHY related constants + ******************************************************************************/ + +#define EMAC0_PHY_MODE PHY_INTERFACE_MODE_RGMII +#define EMAC1_PHY_MODE PHY_INTERFACE_MODE_RGMII +#define EMAC2_PHY_MODE PHY_INTERFACE_MODE_RGMII + +/******************************************************************************* + * GIC related constants + ******************************************************************************/ +#define PLAT_INTEL_SOCFPGA_GICD_BASE PLAT_GICD_BASE +#define PLAT_INTEL_SOCFPGA_GICC_BASE PLAT_GICC_BASE + +/******************************************************************************* + * System counter frequency related constants + ******************************************************************************/ + +/* + * Define a list of Group 1 Secure and Group 0 interrupts as per GICv3 + * terminology. On a GICv2 system or mode, the lists will be merged and treated + * as Group 0 interrupts. + */ +#define PLAT_INTEL_SOCFPGA_G1S_IRQ_PROPS(grp) \ + INTR_PROP_DESC(INTEL_SOCFPGA_IRQ_SEC_PHY_TIMER, \ + GIC_HIGHEST_SEC_PRIORITY, grp, GIC_INTR_CFG_LEVEL), \ + INTR_PROP_DESC(INTEL_SOCFPGA_IRQ_SEC_SGI_0, \ + GIC_HIGHEST_SEC_PRIORITY, grp, GIC_INTR_CFG_EDGE), \ + INTR_PROP_DESC(INTEL_SOCFPGA_IRQ_SEC_SGI_1, \ + GIC_HIGHEST_SEC_PRIORITY, grp, GIC_INTR_CFG_EDGE), \ + INTR_PROP_DESC(INTEL_SOCFPGA_IRQ_SEC_SGI_2, \ + GIC_HIGHEST_SEC_PRIORITY, grp, GIC_INTR_CFG_EDGE), \ + INTR_PROP_DESC(INTEL_SOCFPGA_IRQ_SEC_SGI_3, \ + GIC_HIGHEST_SEC_PRIORITY, grp, GIC_INTR_CFG_EDGE), \ + INTR_PROP_DESC(INTEL_SOCFPGA_IRQ_SEC_SGI_4, \ + GIC_HIGHEST_SEC_PRIORITY, grp, GIC_INTR_CFG_EDGE), \ + INTR_PROP_DESC(INTEL_SOCFPGA_IRQ_SEC_SGI_5, \ + GIC_HIGHEST_SEC_PRIORITY, grp, GIC_INTR_CFG_EDGE), \ + INTR_PROP_DESC(INTEL_SOCFPGA_IRQ_SEC_SGI_6, \ + GIC_HIGHEST_SEC_PRIORITY, grp, GIC_INTR_CFG_EDGE), \ + INTR_PROP_DESC(INTEL_SOCFPGA_IRQ_SEC_SGI_7, \ + GIC_HIGHEST_SEC_PRIORITY, grp, GIC_INTR_CFG_EDGE) + +#define PLAT_INTEL_SOCFPGA_G0_IRQ_PROPS(grp) + +#define MAX_IO_HANDLES 4 +#define MAX_IO_DEVICES 4 +#define MAX_IO_BLOCK_DEVICES 2 + +#ifndef __ASSEMBLER__ +struct socfpga_bl31_params { + param_header_t h; + image_info_t *bl31_image_info; + entry_point_info_t *bl32_ep_info; + image_info_t *bl32_image_info; + entry_point_info_t *bl33_ep_info; + image_info_t *bl33_image_info; +}; +#endif + +#endif /* PLATFORM_DEF_H */ diff --git a/plat/intel/soc/common/include/socfpga_emac.h b/plat/intel/soc/common/include/socfpga_emac.h new file mode 100644 index 0000000..5b98006 --- /dev/null +++ b/plat/intel/soc/common/include/socfpga_emac.h @@ -0,0 +1,24 @@ +/* + * Copyright (c) 2020, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef SOCFPGA_EMAC_H +#define SOCFPGA_EMAC_H + +/* EMAC PHY Mode */ + +#define PHY_INTERFACE_MODE_GMII_MII 0 +#define PHY_INTERFACE_MODE_RGMII 1 +#define PHY_INTERFACE_MODE_RMII 2 +#define PHY_INTERFACE_MODE_RESET 3 + +/* Mask Definitions */ + +#define PHY_INTF_SEL_MSK 0x3 +#define FPGAINTF_EN_3_EMAC_MSK(x) (1 << (x * 8)) + +void socfpga_emac_init(void); + +#endif /* SOCFPGA_EMAC_H */ diff --git a/plat/intel/soc/common/include/socfpga_f2sdram_manager.h b/plat/intel/soc/common/include/socfpga_f2sdram_manager.h new file mode 100644 index 0000000..1bebfc9 --- /dev/null +++ b/plat/intel/soc/common/include/socfpga_f2sdram_manager.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2019-2023, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef SOCFPGA_F2SDRAMMANAGER_H +#define SOCFPGA_F2SDRAMMANAGER_H + +#include "socfpga_plat_def.h" + +/* FPGA2SDRAM Register Map */ +#define SOCFPGA_F2SDRAMMGR_SIDEBANDMGR_FLAGINSTATUS0 0x14 +#define SOCFPGA_F2SDRAMMGR_SIDEBANDMGR_FLAGOUTCLR0 0x54 +#define SOCFPGA_F2SDRAMMGR_SIDEBANDMGR_FLAGOUTSET0 0x50 + +#define FLAGOUTCLR0_F2SDRAM0_ENABLE (BIT(8)) +#define FLAGOUTSETCLR_F2SDRAM0_ENABLE (BIT(1)) +#define FLAGOUTSETCLR_F2SDRAM1_ENABLE (BIT(4)) +#define FLAGOUTSETCLR_F2SDRAM2_ENABLE (BIT(7)) + +#define FLAGOUTSETCLR_F2SDRAM0_IDLEREQ (BIT(0)) +#define FLAGOUTSETCLR_F2SDRAM1_IDLEREQ (BIT(3)) +#define FLAGOUTSETCLR_F2SDRAM2_IDLEREQ (BIT(6)) +#define FLAGINSTATUS_F2SDRAM0_IDLEACK (BIT(1)) +#define FLAGINSTATUS_F2SDRAM1_IDLEACK (BIT(5)) +#define FLAGINSTATUS_F2SDRAM2_IDLEACK (BIT(9)) +#define FLAGINSTATUS_F2SDRAM0_CMDIDLE (BIT(2)) +#define FLAGINSTATUS_F2SDRAM1_CMDIDLE (BIT(6)) +#define FLAGINSTATUS_F2SDRAM2_CMDIDLE (BIT(10)) +#define FLAGINSTATUS_F2SDRAM0_NOCIDLE (BIT(0)) +#define FLAGINSTATUS_F2SDRAM1_NOCIDLE (BIT(4)) +#define FLAGINSTATUS_F2SDRAM2_NOCIDLE (BIT(8)) + +#define FLAGOUTSETCLR_F2SDRAM0_FORCE_DRAIN (BIT(2)) +#define FLAGOUTSETCLR_F2SDRAM1_FORCE_DRAIN (BIT(5)) +#define FLAGOUTSETCLR_F2SDRAM2_FORCE_DRAIN (BIT(8)) + +#define FLAGINSTATUS_F2SOC_RESPEMPTY (BIT(3)) +#define FLAGINSTATUS_F2SDRAM0_RESPEMPTY (BIT(3)) +#define FLAGINSTATUS_F2SDRAM1_RESPEMPTY (BIT(7)) +#define FLAGINSTATUS_F2SDRAM2_RESPEMPTY (BIT(11)) +#define FLAGINSTATUS_F2S_FM_TRACKERIDLE (BIT(4)) + +#define SOCFPGA_F2SDRAMMGR(_reg) (SOCFPGA_F2SDRAMMGR_REG_BASE \ + + (SOCFPGA_F2SDRAMMGR_##_reg)) + +#endif /* SOCFPGA_F2SDRAMMGR_H */ diff --git a/plat/intel/soc/common/include/socfpga_fcs.h b/plat/intel/soc/common/include/socfpga_fcs.h new file mode 100644 index 0000000..6bb70e0 --- /dev/null +++ b/plat/intel/soc/common/include/socfpga_fcs.h @@ -0,0 +1,340 @@ +/* + * Copyright (c) 2020-2022, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef SOCFPGA_FCS_H +#define SOCFPGA_FCS_H + +/* FCS Definitions */ + +#define FCS_RANDOM_WORD_SIZE 8U +#define FCS_PROV_DATA_WORD_SIZE 44U +#define FCS_SHA384_WORD_SIZE 12U + +#define FCS_RANDOM_BYTE_SIZE (FCS_RANDOM_WORD_SIZE * 4U) +#define FCS_RANDOM_EXT_MAX_WORD_SIZE 1020U +#define FCS_PROV_DATA_BYTE_SIZE (FCS_PROV_DATA_WORD_SIZE * 4U) +#define FCS_SHA384_BYTE_SIZE (FCS_SHA384_WORD_SIZE * 4U) + +#define FCS_RANDOM_EXT_OFFSET 3 + +#define FCS_MODE_DECRYPT 0x0 +#define FCS_MODE_ENCRYPT 0x1 +#define FCS_ENCRYPTION_DATA_0 0x10100 +#define FCS_DECRYPTION_DATA_0 0x10102 +#define FCS_OWNER_ID_OFFSET 0xC +#define FCS_CRYPTION_CRYPTO_HEADER 0x07000000 +#define FCS_CRYPTION_RESP_WORD_SIZE 4U +#define FCS_CRYPTION_RESP_SIZE_OFFSET 3U + +#define PSGSIGMA_TEARDOWN_MAGIC 0xB852E2A4 +#define PSGSIGMA_SESSION_ID_ONE 0x1 +#define PSGSIGMA_UNKNOWN_SESSION 0xFFFFFFFF + +#define RESERVED_AS_ZERO 0x0 +/* FCS Single cert */ + +#define FCS_BIG_CNTR_SEL 0x1 + +#define FCS_SVN_CNTR_0_SEL 0x2 +#define FCS_SVN_CNTR_1_SEL 0x3 +#define FCS_SVN_CNTR_2_SEL 0x4 +#define FCS_SVN_CNTR_3_SEL 0x5 + +#define FCS_BIG_CNTR_VAL_MAX 495U +#define FCS_SVN_CNTR_VAL_MAX 64U + +/* FCS Attestation Cert Request Parameter */ + +#define FCS_ATTEST_FIRMWARE_CERT 0x01 +#define FCS_ATTEST_DEV_ID_SELF_SIGN_CERT 0x02 +#define FCS_ATTEST_DEV_ID_ENROLL_CERT 0x04 +#define FCS_ATTEST_ENROLL_SELF_SIGN_CERT 0x08 +#define FCS_ATTEST_ALIAS_CERT 0x10 +#define FCS_ATTEST_CERT_MAX_REQ_PARAM 0xFF + +/* FCS Crypto Service */ + +#define FCS_CS_KEY_OBJ_MAX_WORD_SIZE 88U +#define FCS_CS_KEY_INFO_MAX_WORD_SIZE 36U +#define FCS_CS_KEY_RESP_STATUS_MASK 0xFF +#define FCS_CS_KEY_RESP_STATUS_OFFSET 16U + +#define FCS_CS_FIELD_SIZE_MASK 0xFFFF +#define FCS_CS_FIELD_FLAG_OFFSET 24 +#define FCS_CS_FIELD_FLAG_INIT BIT(0) +#define FCS_CS_FIELD_FLAG_UPDATE BIT(1) +#define FCS_CS_FIELD_FLAG_FINALIZE BIT(2) + +#define FCS_AES_MAX_DATA_SIZE 0x10000000 /* 256 MB */ +#define FCS_AES_MIN_DATA_SIZE 0x20 /* 32 Byte */ +#define FCS_AES_CMD_MAX_WORD_SIZE 15U + +#define FCS_MAX_DATA_SIZE 0x20000000 /* 512 MB */ +#define FCS_MIN_DATA_SIZE 0x8 /* 8 Bytes */ + +#define FCS_GET_DIGEST_CMD_MAX_WORD_SIZE 7U +#define FCS_GET_DIGEST_RESP_MAX_WORD_SIZE 19U +#define FCS_MAC_VERIFY_CMD_MAX_WORD_SIZE 23U +#define FCS_MAC_VERIFY_RESP_MAX_WORD_SIZE 4U +#define FCS_SHA_HMAC_CRYPTO_PARAM_SIZE_OFFSET 8U + +#define FCS_ECDSA_GET_PUBKEY_MAX_WORD_SIZE 5U +#define FCS_ECDSA_SHA2_DATA_SIGN_CMD_MAX_WORD_SIZE 7U +#define FCS_ECDSA_SHA2_DATA_SIG_VERIFY_CMD_MAX_WORD_SIZE 43U +#define FCS_ECDSA_HASH_SIGN_CMD_MAX_WORD_SIZE 17U +#define FCS_ECDSA_HASH_SIG_VERIFY_CMD_MAX_WORD_SIZE 52U +#define FCS_ECDH_REQUEST_CMD_MAX_WORD_SIZE 29U + +#define FCS_CRYPTO_ECB_BUFFER_SIZE 12U +#define FCS_CRYPTO_CBC_CTR_BUFFER_SIZE 28U +#define FCS_CRYPTO_BLOCK_MODE_MASK 0x07 +#define FCS_CRYPTO_ECB_MODE 0x00 +#define FCS_CRYPTO_CBC_MODE 0x01 +#define FCS_CRYPTO_CTR_MODE 0x02 + +/* FCS Payload Structure */ +typedef struct fcs_rng_payload_t { + uint32_t session_id; + uint32_t context_id; + uint32_t crypto_header; + uint32_t size; +} fcs_rng_payload; + +typedef struct fcs_encrypt_payload_t { + uint32_t first_word; + uint32_t src_addr; + uint32_t src_size; + uint32_t dst_addr; + uint32_t dst_size; +} fcs_encrypt_payload; + +typedef struct fcs_decrypt_payload_t { + uint32_t first_word; + uint32_t owner_id[2]; + uint32_t src_addr; + uint32_t src_size; + uint32_t dst_addr; + uint32_t dst_size; +} fcs_decrypt_payload; + +typedef struct fcs_encrypt_ext_payload_t { + uint32_t session_id; + uint32_t context_id; + uint32_t crypto_header; + uint32_t src_addr; + uint32_t src_size; + uint32_t dst_addr; + uint32_t dst_size; +} fcs_encrypt_ext_payload; + +typedef struct fcs_decrypt_ext_payload_t { + uint32_t session_id; + uint32_t context_id; + uint32_t crypto_header; + uint32_t owner_id[2]; + uint32_t src_addr; + uint32_t src_size; + uint32_t dst_addr; + uint32_t dst_size; +} fcs_decrypt_ext_payload; + +typedef struct psgsigma_teardown_msg_t { + uint32_t reserved_word; + uint32_t magic_word; + uint32_t session_id; +} psgsigma_teardown_msg; + +typedef struct fcs_cntr_set_preauth_payload_t { + uint32_t first_word; + uint32_t counter_value; +} fcs_cntr_set_preauth_payload; + +typedef struct fcs_cs_key_payload_t { + uint32_t session_id; + uint32_t reserved0; + uint32_t reserved1; + uint32_t key_id; +} fcs_cs_key_payload; + +typedef struct fcs_crypto_service_data_t { + uint32_t session_id; + uint32_t context_id; + uint32_t key_id; + uint32_t crypto_param_size; + uint64_t crypto_param; + uint8_t is_updated; +} fcs_crypto_service_data; + +typedef struct fcs_crypto_service_aes_data_t { + uint32_t session_id; + uint32_t context_id; + uint32_t param_size; + uint32_t key_id; + uint32_t crypto_param[7]; + uint8_t is_updated; +} fcs_crypto_service_aes_data; + +/* Functions Definitions */ + +uint32_t intel_fcs_random_number_gen(uint64_t addr, uint64_t *ret_size, + uint32_t *mbox_error); +int intel_fcs_random_number_gen_ext(uint32_t session_id, uint32_t context_id, + uint32_t size, uint32_t *send_id); +uint32_t intel_fcs_send_cert(uint64_t addr, uint64_t size, + uint32_t *send_id); +uint32_t intel_fcs_get_provision_data(uint32_t *send_id); +uint32_t intel_fcs_cntr_set_preauth(uint8_t counter_type, + int32_t counter_value, + uint32_t test_bit, + uint32_t *mbox_error); +uint32_t intel_fcs_encryption(uint32_t src_addr, uint32_t src_size, + uint32_t dst_addr, uint32_t dst_size, + uint32_t *send_id); + +uint32_t intel_fcs_decryption(uint32_t src_addr, uint32_t src_size, + uint32_t dst_addr, uint32_t dst_size, + uint32_t *send_id); + +int intel_fcs_encryption_ext(uint32_t session_id, uint32_t context_id, + uint32_t src_addr, uint32_t src_size, + uint32_t dst_addr, uint32_t *dst_size, + uint32_t *mbox_error); +int intel_fcs_decryption_ext(uint32_t sesion_id, uint32_t context_id, + uint32_t src_addr, uint32_t src_size, + uint32_t dst_addr, uint32_t *dst_size, + uint32_t *mbox_error); + +int intel_fcs_sigma_teardown(uint32_t session_id, uint32_t *mbox_error); +int intel_fcs_chip_id(uint32_t *id_low, uint32_t *id_high, uint32_t *mbox_error); +int intel_fcs_attestation_subkey(uint64_t src_addr, uint32_t src_size, + uint64_t dst_addr, uint32_t *dst_size, + uint32_t *mbox_error); +int intel_fcs_get_measurement(uint64_t src_addr, uint32_t src_size, + uint64_t dst_addr, uint32_t *dst_size, + uint32_t *mbox_error); +uint32_t intel_fcs_get_rom_patch_sha384(uint64_t addr, uint64_t *ret_size, + uint32_t *mbox_error); + +int intel_fcs_create_cert_on_reload(uint32_t cert_request, + uint32_t *mbox_error); +int intel_fcs_get_attestation_cert(uint32_t cert_request, uint64_t dst_addr, + uint32_t *dst_size, uint32_t *mbox_error); + +int intel_fcs_open_crypto_service_session(uint32_t *session_id, + uint32_t *mbox_error); +int intel_fcs_close_crypto_service_session(uint32_t session_id, + uint32_t *mbox_error); + +int intel_fcs_import_crypto_service_key(uint64_t src_addr, uint32_t src_size, + uint32_t *mbox_error); +int intel_fcs_export_crypto_service_key(uint32_t session_id, uint32_t key_id, + uint64_t dst_addr, uint32_t *dst_size, + uint32_t *mbox_error); +int intel_fcs_remove_crypto_service_key(uint32_t session_id, uint32_t key_id, + uint32_t *mbox_error); +int intel_fcs_get_crypto_service_key_info(uint32_t session_id, uint32_t key_id, + uint64_t dst_addr, uint32_t *dst_size, + uint32_t *mbox_error); + +int intel_fcs_get_digest_init(uint32_t session_id, uint32_t context_id, + uint32_t key_id, uint32_t param_size, + uint64_t param_data, uint32_t *mbox_error); +int intel_fcs_get_digest_update_finalize(uint32_t session_id, uint32_t context_id, + uint32_t src_addr, uint32_t src_size, + uint64_t dst_addr, uint32_t *dst_size, + uint8_t is_finalised, uint32_t *mbox_error); +int intel_fcs_get_digest_smmu_update_finalize(uint32_t session_id, uint32_t context_id, + uint32_t src_addr, uint32_t src_size, + uint64_t dst_addr, uint32_t *dst_size, + uint8_t is_finalised, uint32_t *mbox_error, + uint32_t *send_id); + +int intel_fcs_mac_verify_init(uint32_t session_id, uint32_t context_id, + uint32_t key_id, uint32_t param_size, + uint64_t param_data, uint32_t *mbox_error); +int intel_fcs_mac_verify_update_finalize(uint32_t session_id, uint32_t context_id, + uint32_t src_addr, uint32_t src_size, + uint64_t dst_addr, uint32_t *dst_size, + uint32_t data_size, uint8_t is_finalised, + uint32_t *mbox_error); +int intel_fcs_mac_verify_smmu_update_finalize(uint32_t session_id, uint32_t context_id, + uint32_t src_addr, uint32_t src_size, + uint64_t dst_addr, uint32_t *dst_size, + uint32_t data_size, uint8_t is_finalised, + uint32_t *mbox_error, uint32_t *send_id); + +int intel_fcs_ecdsa_hash_sign_init(uint32_t session_id, uint32_t context_id, + uint32_t key_id, uint32_t param_size, + uint64_t param_data, uint32_t *mbox_error); +int intel_fcs_ecdsa_hash_sign_finalize(uint32_t session_id, uint32_t context_id, + uint32_t src_addr, uint32_t src_size, + uint64_t dst_addr, uint32_t *dst_size, + uint32_t *mbox_error); + +int intel_fcs_ecdsa_hash_sig_verify_init(uint32_t session_id, uint32_t context_id, + uint32_t key_id, uint32_t param_size, + uint64_t param_data, uint32_t *mbox_error); +int intel_fcs_ecdsa_hash_sig_verify_finalize(uint32_t session_id, uint32_t context_id, + uint32_t src_addr, uint32_t src_size, + uint64_t dst_addr, uint32_t *dst_size, + uint32_t *mbox_error); + +int intel_fcs_ecdsa_sha2_data_sign_init(uint32_t session_id, + uint32_t context_id, uint32_t key_id, + uint32_t param_size, uint64_t param_data, + uint32_t *mbox_error); +int intel_fcs_ecdsa_sha2_data_sign_update_finalize(uint32_t session_id, + uint32_t context_id, uint32_t src_addr, + uint32_t src_size, uint64_t dst_addr, + uint32_t *dst_size, uint8_t is_finalised, + uint32_t *mbox_error); +int intel_fcs_ecdsa_sha2_data_sign_smmu_update_finalize(uint32_t session_id, + uint32_t context_id, uint32_t src_addr, + uint32_t src_size, uint64_t dst_addr, + uint32_t *dst_size, uint8_t is_finalised, + uint32_t *mbox_error, uint32_t *send_id); + +int intel_fcs_ecdsa_sha2_data_sig_verify_init(uint32_t session_id, + uint32_t context_id, uint32_t key_id, + uint32_t param_size, uint64_t param_data, + uint32_t *mbox_error); +int intel_fcs_ecdsa_sha2_data_sig_verify_update_finalize(uint32_t session_id, + uint32_t context_id, uint32_t src_addr, + uint32_t src_size, uint64_t dst_addr, + uint32_t *dst_size, uint32_t data_size, + uint8_t is_finalised, uint32_t *mbox_error); +int intel_fcs_ecdsa_sha2_data_sig_verify_smmu_update_finalize(uint32_t session_id, + uint32_t context_id, uint32_t src_addr, + uint32_t src_size, uint64_t dst_addr, + uint32_t *dst_size, uint32_t data_size, + uint8_t is_finalised, uint32_t *mbox_error, + uint32_t *send_id); + +int intel_fcs_ecdsa_get_pubkey_init(uint32_t session_id, uint32_t context_id, + uint32_t key_id, uint32_t param_size, + uint64_t param_data, uint32_t *mbox_error); +int intel_fcs_ecdsa_get_pubkey_finalize(uint32_t session_id, uint32_t context_id, + uint64_t dst_addr, uint32_t *dst_size, + uint32_t *mbox_error); + +int intel_fcs_ecdh_request_init(uint32_t session_id, uint32_t context_id, + uint32_t key_id, uint32_t param_size, + uint64_t param_data, uint32_t *mbox_error); +int intel_fcs_ecdh_request_finalize(uint32_t session_id, uint32_t context_id, + uint32_t src_addr, uint32_t src_size, + uint64_t dst_addr, uint32_t *dst_size, + uint32_t *mbox_error); + +int intel_fcs_aes_crypt_init(uint32_t session_id, uint32_t context_id, + uint32_t key_id, uint64_t param_addr, + uint32_t param_size, uint32_t *mbox_error); +int intel_fcs_aes_crypt_update_finalize(uint32_t session_id, + uint32_t context_id, uint64_t src_addr, + uint32_t src_size, uint64_t dst_addr, + uint32_t dst_size, uint8_t is_finalised, + uint32_t *send_id); + +#endif /* SOCFPGA_FCS_H */ diff --git a/plat/intel/soc/common/include/socfpga_handoff.h b/plat/intel/soc/common/include/socfpga_handoff.h new file mode 100644 index 0000000..b2913c7 --- /dev/null +++ b/plat/intel/soc/common/include/socfpga_handoff.h @@ -0,0 +1,189 @@ +/* + * Copyright (c) 2019-2023, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef HANDOFF_H +#define HANDOFF_H + +#define HANDOFF_MAGIC_HEADER 0x424f4f54 /* BOOT */ +#define HANDOFF_MAGIC_PINMUX_SEL 0x504d5558 /* PMUX */ +#define HANDOFF_MAGIC_IOCTLR 0x494f4354 /* IOCT */ +#define HANDOFF_MAGIC_FPGA 0x46504741 /* FPGA */ +#define HANDOFF_MAGIC_IODELAY 0x444c4159 /* DLAY */ +#define HANDOFF_MAGIC_CLOCK 0x434c4b53 /* CLKS */ +#define HANDOFF_MAGIC_MISC 0x4d495343 /* MISC */ +#define HANDOFF_MAGIC_PERIPHERAL 0x50455249 /* PERIPHERAL */ +#define HANDOFF_MAGIC_DDR 0x5344524d /* DDR */ + +#include <socfpga_plat_def.h> + +typedef struct handoff_t { + /* header */ + uint32_t header_magic; + uint32_t header_device; + uint32_t _pad_0x08_0x10[2]; + + /* pinmux configuration - select */ + uint32_t pinmux_sel_magic; + uint32_t pinmux_sel_length; + uint32_t _pad_0x18_0x20[2]; + uint32_t pinmux_sel_array[96]; /* offset, value */ + + /* pinmux configuration - io control */ + uint32_t pinmux_io_magic; + uint32_t pinmux_io_length; + uint32_t _pad_0x1a8_0x1b0[2]; + uint32_t pinmux_io_array[96]; /* offset, value */ + + /* pinmux configuration - use fpga switch */ + uint32_t pinmux_fpga_magic; + uint32_t pinmux_fpga_length; + uint32_t _pad_0x338_0x340[2]; + uint32_t pinmux_fpga_array[44]; /* offset, value */ + /* TODO: Temp remove due to add in extra handoff data */ + // uint32_t _pad_0x3e8_0x3f0[2]; + + /* pinmux configuration - io delay */ + uint32_t pinmux_delay_magic; + uint32_t pinmux_delay_length; + uint32_t _pad_0x3f8_0x400[2]; + uint32_t pinmux_iodelay_array[96]; /* offset, value */ + + /* clock configuration */ +#if PLATFORM_MODEL == PLAT_SOCFPGA_STRATIX10 + uint32_t clock_magic; + uint32_t clock_length; + uint32_t _pad_0x588_0x590[2]; + uint32_t main_pll_mpuclk; + uint32_t main_pll_nocclk; + uint32_t main_pll_cntr2clk; + uint32_t main_pll_cntr3clk; + uint32_t main_pll_cntr4clk; + uint32_t main_pll_cntr5clk; + uint32_t main_pll_cntr6clk; + uint32_t main_pll_cntr7clk; + uint32_t main_pll_cntr8clk; + uint32_t main_pll_cntr9clk; + uint32_t main_pll_nocdiv; + uint32_t main_pll_pllglob; + uint32_t main_pll_fdbck; + uint32_t main_pll_pllc0; + uint32_t main_pll_pllc1; + uint32_t _pad_0x5cc_0x5d0[1]; + uint32_t per_pll_cntr2clk; + uint32_t per_pll_cntr3clk; + uint32_t per_pll_cntr4clk; + uint32_t per_pll_cntr5clk; + uint32_t per_pll_cntr6clk; + uint32_t per_pll_cntr7clk; + uint32_t per_pll_cntr8clk; + uint32_t per_pll_cntr9clk; + uint32_t per_pll_emacctl; + uint32_t per_pll_gpiodiv; + uint32_t per_pll_pllglob; + uint32_t per_pll_fdbck; + uint32_t per_pll_pllc0; + uint32_t per_pll_pllc1; + uint32_t hps_osc_clk_h; + uint32_t fpga_clk_hz; +#elif PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX + uint32_t clock_magic; + uint32_t clock_length; + uint32_t _pad_0x588_0x590[2]; + uint32_t main_pll_mpuclk; + uint32_t main_pll_nocclk; + uint32_t main_pll_nocdiv; + uint32_t main_pll_pllglob; + uint32_t main_pll_fdbck; + uint32_t main_pll_pllc0; + uint32_t main_pll_pllc1; + uint32_t main_pll_pllc2; + uint32_t main_pll_pllc3; + uint32_t main_pll_pllm; + uint32_t per_pll_emacctl; + uint32_t per_pll_gpiodiv; + uint32_t per_pll_pllglob; + uint32_t per_pll_fdbck; + uint32_t per_pll_pllc0; + uint32_t per_pll_pllc1; + uint32_t per_pll_pllc2; + uint32_t per_pll_pllc3; + uint32_t per_pll_pllm; + uint32_t alt_emacactr; + uint32_t alt_emacbctr; + uint32_t alt_emacptpctr; + uint32_t alt_gpiodbctr; + uint32_t alt_sdmmcctr; + uint32_t alt_s2fuser0ctr; + uint32_t alt_s2fuser1ctr; + uint32_t alt_psirefctr; + uint32_t hps_osc_clk_h; + uint32_t fpga_clk_hz; + uint32_t _pad_0x604_0x610[3]; +#elif PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX5 + uint32_t clock_magic; + uint32_t clock_length; + uint32_t _pad_0x588_0x590[2]; + uint32_t main_pll_nocclk; + uint32_t main_pll_nocdiv; + uint32_t main_pll_pllglob; + uint32_t main_pll_fdbck; + uint32_t main_pll_pllc0; + uint32_t main_pll_pllc1; + uint32_t main_pll_pllc2; + uint32_t main_pll_pllc3; + uint32_t main_pll_pllm; + uint32_t per_pll_emacctl; + uint32_t per_pll_gpiodiv; + uint32_t per_pll_pllglob; + uint32_t per_pll_fdbck; + uint32_t per_pll_pllc0; + uint32_t per_pll_pllc1; + uint32_t per_pll_pllc2; + uint32_t per_pll_pllc3; + uint32_t per_pll_pllm; + uint32_t alt_emacactr; + uint32_t alt_emacbctr; + uint32_t alt_emacptpctr; + uint32_t alt_gpiodbctr; + uint32_t alt_sdmmcctr; + uint32_t alt_s2fuser0ctr; + uint32_t alt_s2fuser1ctr; + uint32_t alt_psirefctr; + /* TODO: Temp added for clk manager. */ + uint32_t qspi_clk_khz; + uint32_t hps_osc_clk_hz; + uint32_t fpga_clk_hz; + /* TODO: Temp added for clk manager. */ + uint32_t ddr_reset_type; + /* TODO: Temp added for clk manager. */ + uint32_t hps_status_coldreset; + /* TODO: Temp remove due to add in extra handoff data */ + //uint32_t _pad_0x604_0x610[3]; +#endif + /* misc configuration */ + uint32_t misc_magic; + uint32_t misc_length; + uint32_t _pad_0x618_0x620[2]; + +#if PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX5 + /* peripheral configuration - select */ + uint32_t peripheral_pwr_gate_magic; + uint32_t peripheral_pwr_gate_length; + uint32_t _pad_0x08_0x0C[2]; + uint32_t peripheral_pwr_gate_array; /* offset, value */ + + /* ddr configuration - select */ + uint32_t ddr_magic; + uint32_t ddr_length; + uint32_t _pad_0x1C_0x20[2]; + uint32_t ddr_array[4]; /* offset, value */ +#endif +} handoff; + +int verify_handoff_image(handoff *hoff_ptr, handoff *reverse_hoff_ptr); +int socfpga_get_handoff(handoff *hoff_ptr); + +#endif diff --git a/plat/intel/soc/common/include/socfpga_mailbox.h b/plat/intel/soc/common/include/socfpga_mailbox.h new file mode 100644 index 0000000..77d3af9 --- /dev/null +++ b/plat/intel/soc/common/include/socfpga_mailbox.h @@ -0,0 +1,253 @@ +/* + * Copyright (c) 2019-2023, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef SOCFPGA_MBOX_H +#define SOCFPGA_MBOX_H + +#include <lib/utils_def.h> + +#if PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX5 +#define MBOX_OFFSET 0x10a30000 +#else +#define MBOX_OFFSET 0xffa30000 +#endif + +#define MBOX_ATF_CLIENT_ID 0x1U +#define MBOX_MAX_JOB_ID 0xFU +#define MBOX_MAX_IND_JOB_ID (MBOX_MAX_JOB_ID - 1U) +#define MBOX_JOB_ID MBOX_MAX_JOB_ID +#define MBOX_TEST_BIT BIT(31) + +/* Mailbox Shared Memory Register Map */ +#define MBOX_CIN 0x00 +#define MBOX_ROUT 0x04 +#define MBOX_URG 0x08 +#define MBOX_INT 0x0C +#define MBOX_COUT 0x20 +#define MBOX_RIN 0x24 +#define MBOX_STATUS 0x2C +#define MBOX_CMD_BUFFER 0x40 +#define MBOX_RESP_BUFFER 0xC0 + +/* Mailbox SDM doorbell */ +#define MBOX_DOORBELL_TO_SDM 0x400 +#define MBOX_DOORBELL_FROM_SDM 0x480 + + +/* Mailbox commands */ + +#define MBOX_CMD_NOOP 0x00 +#define MBOX_CMD_SYNC 0x01 +#define MBOX_CMD_RESTART 0x02 +#define MBOX_CMD_CANCEL 0x03 +#define MBOX_CMD_VAB_SRC_CERT 0x0B +#define MBOX_CMD_GET_IDCODE 0x10 +#define MBOX_CMD_GET_USERCODE 0x13 +#define MBOX_CMD_GET_CHIPID 0x12 +#define MBOX_CMD_REBOOT_HPS 0x47 + +/* Reconfiguration Commands */ +#define MBOX_CONFIG_STATUS 0x04 +#define MBOX_RECONFIG 0x06 +#define MBOX_RECONFIG_DATA 0x08 +#define MBOX_RECONFIG_STATUS 0x09 + +/* HWMON Commands */ +#define MBOX_HWMON_READVOLT 0x18 +#define MBOX_HWMON_READTEMP 0x19 + + +/* QSPI Commands */ +#define MBOX_CMD_QSPI_OPEN 0x32 +#define MBOX_CMD_QSPI_CLOSE 0x33 +#define MBOX_CMD_QSPI_SET_CS 0x34 +#define MBOX_CMD_QSPI_DIRECT 0x3B + +/* SEU Commands */ +#define MBOX_CMD_SEU_ERR_READ 0x3C + +/* RSU Commands */ +#define MBOX_GET_SUBPARTITION_TABLE 0x5A +#define MBOX_RSU_STATUS 0x5B +#define MBOX_RSU_UPDATE 0x5C +#define MBOX_HPS_STAGE_NOTIFY 0x5D + +/* FCS Command */ +#define MBOX_FCS_GET_PROVISION 0x7B +#define MBOX_FCS_CNTR_SET_PREAUTH 0x7C +#define MBOX_FCS_ENCRYPT_REQ 0x7E +#define MBOX_FCS_DECRYPT_REQ 0x7F +#define MBOX_FCS_RANDOM_GEN 0x80 +#define MBOX_FCS_AES_CRYPT_REQ 0x81 +#define MBOX_FCS_GET_DIGEST_REQ 0x82 +#define MBOX_FCS_MAC_VERIFY_REQ 0x83 +#define MBOX_FCS_ECDSA_HASH_SIGN_REQ 0x84 +#define MBOX_FCS_ECDSA_SHA2_DATA_SIGN_REQ 0x85 +#define MBOX_FCS_ECDSA_HASH_SIG_VERIFY 0x86 +#define MBOX_FCS_ECDSA_SHA2_DATA_SIGN_VERIFY 0x87 +#define MBOX_FCS_ECDSA_GET_PUBKEY 0x88 +#define MBOX_FCS_ECDH_REQUEST 0x89 +#define MBOX_FCS_OPEN_CS_SESSION 0xA0 +#define MBOX_FCS_CLOSE_CS_SESSION 0xA1 +#define MBOX_FCS_IMPORT_CS_KEY 0xA5 +#define MBOX_FCS_EXPORT_CS_KEY 0xA6 +#define MBOX_FCS_REMOVE_CS_KEY 0xA7 +#define MBOX_FCS_GET_CS_KEY_INFO 0xA8 + +/* PSG SIGMA Commands */ +#define MBOX_PSG_SIGMA_TEARDOWN 0xD5 + +/* Attestation Commands */ +#define MBOX_CREATE_CERT_ON_RELOAD 0x180 +#define MBOX_GET_ATTESTATION_CERT 0x181 +#define MBOX_ATTESTATION_SUBKEY 0x182 +#define MBOX_GET_MEASUREMENT 0x183 + +/* Miscellaneous commands */ +#define MBOX_GET_ROM_PATCH_SHA384 0x1B0 + +/* Mailbox Definitions */ + +#define CMD_DIRECT 0 +#define CMD_INDIRECT 1 +#define CMD_CASUAL 0 +#define CMD_URGENT 1 + +#define MBOX_WORD_BYTE 4U +#define MBOX_RESP_BUFFER_SIZE 16 +#define MBOX_CMD_BUFFER_SIZE 32 +#define MBOX_INC_HEADER_MAX_WORD_SIZE 1024U + +/* Execution states for HPS_STAGE_NOTIFY */ +#define HPS_EXECUTION_STATE_FSBL 0 +#define HPS_EXECUTION_STATE_SSBL 1 +#define HPS_EXECUTION_STATE_OS 2 + +/* Status Response */ +#define MBOX_RET_OK 0 +#define MBOX_RET_ERROR -1 +#define MBOX_NO_RESPONSE -2 +#define MBOX_WRONG_ID -3 +#define MBOX_BUFFER_FULL -4 +#define MBOX_BUSY -5 +#define MBOX_TIMEOUT -2047 + +/* Key Status */ +#define MBOX_RET_SDOS_DECRYPTION_ERROR_102 -258 +#define MBOX_RET_SDOS_DECRYPTION_ERROR_103 -259 + +/* Reconfig Status Response */ +#define RECONFIG_STATUS_STATE 0 +#define RECONFIG_STATUS_PIN_STATUS 2 +#define RECONFIG_STATUS_SOFTFUNC_STATUS 3 +#define PIN_STATUS_NSTATUS (U(1) << 31) +#define SOFTFUNC_STATUS_SEU_ERROR (1 << 3) +#define SOFTFUNC_STATUS_INIT_DONE (1 << 1) +#define SOFTFUNC_STATUS_CONF_DONE (1 << 0) +#define MBOX_CFGSTAT_STATE_IDLE 0x00000000 +#define MBOX_CFGSTAT_STATE_CONFIG 0x10000000 +#define MBOX_CFGSTAT_VAB_BS_PREAUTH 0x20000000 +#define MBOX_CFGSTAT_STATE_FAILACK 0x08000000 +#define MBOX_CFGSTAT_STATE_ERROR_INVALID 0xf0000001 +#define MBOX_CFGSTAT_STATE_ERROR_CORRUPT 0xf0000002 +#define MBOX_CFGSTAT_STATE_ERROR_AUTH 0xf0000003 +#define MBOX_CFGSTAT_STATE_ERROR_CORE_IO 0xf0000004 +#define MBOX_CFGSTAT_STATE_ERROR_HARDWARE 0xf0000005 +#define MBOX_CFGSTAT_STATE_ERROR_FAKE 0xf0000006 +#define MBOX_CFGSTAT_STATE_ERROR_BOOT_INFO 0xf0000007 +#define MBOX_CFGSTAT_STATE_ERROR_QSPI_ERROR 0xf0000008 + + +/* Mailbox Macros */ + +#define MBOX_ENTRY_TO_ADDR(_buf, ptr) (MBOX_OFFSET + (MBOX_##_buf##_BUFFER) \ + + MBOX_WORD_BYTE * (ptr)) + +/* Mailbox interrupt flags and masks */ +#define MBOX_INT_FLAG_COE 0x1 +#define MBOX_INT_FLAG_RIE 0x2 +#define MBOX_INT_FLAG_UAE 0x100 +#define MBOX_COE_BIT(INTERRUPT) ((INTERRUPT) & 0x3) +#define MBOX_UAE_BIT(INTERRUPT) (((INTERRUPT) & (1<<8))) + +/* Mailbox response and status */ +#define MBOX_RESP_ERR(BUFFER) ((BUFFER) & 0x000007ff) +#define MBOX_RESP_LEN(BUFFER) (((BUFFER) & 0x007ff000) >> 12) +#define MBOX_RESP_CLIENT_ID(BUFFER) (((BUFFER) & 0xf0000000) >> 28) +#define MBOX_RESP_JOB_ID(BUFFER) (((BUFFER) & 0x0f000000) >> 24) +#define MBOX_STATUS_UA_MASK (1<<8) + +/* Mailbox command and response */ +#define MBOX_CLIENT_ID_CMD(CLIENT_ID) ((CLIENT_ID) << 28) +#define MBOX_JOB_ID_CMD(JOB_ID) (JOB_ID<<24) +#define MBOX_CMD_LEN_CMD(CMD_LEN) ((CMD_LEN) << 12) +#define MBOX_INDIRECT(val) ((val) << 11) +#define MBOX_CMD_MASK(header) ((header) & 0x7ff) + +/* Mailbox payload */ +#define MBOX_DATA_MAX_LEN 0x3ff +#define MBOX_PAYLOAD_FLAG_BUSY BIT(0) + +/* RSU Macros */ +#define RSU_VERSION_ACMF BIT(8) +#define RSU_VERSION_ACMF_MASK 0xff00 + +/* Config Status Macros */ +#define CONFIG_STATUS_WORD_SIZE 16U +#define CONFIG_STATUS_FW_VER_OFFSET 1 +#define CONFIG_STATUS_FW_VER_MASK 0x00FFFFFF + +/* Data structure */ + +typedef struct mailbox_payload { + uint32_t header; + uint32_t data[MBOX_DATA_MAX_LEN]; +} mailbox_payload_t; + +typedef struct mailbox_container { + uint32_t flag; + uint32_t index; + mailbox_payload_t *payload; +} mailbox_container_t; + +/* Mailbox Function Definitions */ + +void mailbox_set_int(uint32_t interrupt_input); +int mailbox_init(void); +void mailbox_set_qspi_close(void); +void mailbox_hps_qspi_enable(void); + +int mailbox_send_cmd(uint32_t job_id, uint32_t cmd, uint32_t *args, + unsigned int len, uint32_t urgent, uint32_t *response, + unsigned int *resp_len); +int mailbox_send_cmd_async(uint32_t *job_id, uint32_t cmd, uint32_t *args, + unsigned int len, unsigned int indirect); +int mailbox_send_cmd_async_ext(uint32_t header_cmd, uint32_t *args, + unsigned int len); +int mailbox_read_response(uint32_t *job_id, uint32_t *response, + unsigned int *resp_len); +int mailbox_read_response_async(uint32_t *job_id, uint32_t *header, + uint32_t *response, unsigned int *resp_len, + uint8_t ignore_client_id); +int iterate_resp(uint32_t mbox_resp_len, uint32_t *resp_buf, + unsigned int *resp_len); + +void mailbox_reset_cold(void); +void mailbox_reset_warm(uint32_t reset_type); +void mailbox_clear_response(void); + +int intel_mailbox_get_config_status(uint32_t cmd, bool init_done); +int intel_mailbox_is_fpga_not_ready(void); + +int mailbox_rsu_get_spt_offset(uint32_t *resp_buf, uint32_t resp_buf_len); +int mailbox_rsu_status(uint32_t *resp_buf, uint32_t resp_buf_len); +int mailbox_rsu_update(uint32_t *flash_offset); +int mailbox_hps_stage_notify(uint32_t execution_stage); +int mailbox_hwmon_readtemp(uint32_t chan, uint32_t *resp_buf); +int mailbox_hwmon_readvolt(uint32_t chan, uint32_t *resp_buf); +int mailbox_seu_err_status(uint32_t *resp_buf, uint32_t resp_buf_len); + +#endif /* SOCFPGA_MBOX_H */ diff --git a/plat/intel/soc/common/include/socfpga_noc.h b/plat/intel/soc/common/include/socfpga_noc.h new file mode 100644 index 0000000..3fc3f81 --- /dev/null +++ b/plat/intel/soc/common/include/socfpga_noc.h @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2020-2023, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef SOCFPGA_NOC_H +#define SOCFPGA_NOC_H + +/* Macros */ +#define SCR_AXI_AP_MASK BIT(24) +#define SCR_FPGA2SOC_MASK BIT(16) +#define SCR_MPU_MASK BIT(0) +#define DISABLE_L4_FIREWALL (SCR_AXI_AP_MASK | SCR_FPGA2SOC_MASK \ + | SCR_MPU_MASK) +#define DISABLE_BRIDGE_FIREWALL 0x0ffe0101 + +#define SOCFPGA_CCU_NOC(_ctrl, _dev) (SOCFPGA_CCU_NOC_REG_BASE \ + + (SOCFPGA_CCU_NOC_##_ctrl##_##_dev)) + +#define SOCFPGA_L4_PER_SCR(_reg) (SOCFPGA_L4_PER_SCR_REG_BASE \ + + (SOCFPGA_NOC_FW_L4_PER_SCR_##_reg)) + +#define SOCFPGA_L4_SYS_SCR(_reg) (SOCFPGA_L4_SYS_SCR_REG_BASE \ + + (SOCFPGA_NOC_FW_L4_SYS_SCR_##_reg)) + +/* L3 Interconnect Register Map */ +#define SOCFPGA_NOC_FW_L4_PER_SCR_NAND_REGISTER 0x0000 +#define SOCFPGA_NOC_FW_L4_PER_SCR_NAND_DATA 0x0004 +#define SOCFPGA_NOC_FW_L4_PER_SCR_USB0_REGISTER 0x000c +#define SOCFPGA_NOC_FW_L4_PER_SCR_USB1_REGISTER 0x0010 +#define SOCFPGA_NOC_FW_L4_PER_SCR_SPI_MASTER0 0x001c +#define SOCFPGA_NOC_FW_L4_PER_SCR_SPI_MASTER1 0x0020 +#define SOCFPGA_NOC_FW_L4_PER_SCR_SPI_SLAVE0 0x0024 +#define SOCFPGA_NOC_FW_L4_PER_SCR_SPI_SLAVE1 0x0028 +#define SOCFPGA_NOC_FW_L4_PER_SCR_EMAC0 0x002c +#define SOCFPGA_NOC_FW_L4_PER_SCR_EMAC1 0x0030 +#define SOCFPGA_NOC_FW_L4_PER_SCR_EMAC2 0x0034 +#define SOCFPGA_NOC_FW_L4_PER_SCR_SDMMC 0x0040 +#define SOCFPGA_NOC_FW_L4_PER_SCR_GPIO0 0x0044 +#define SOCFPGA_NOC_FW_L4_PER_SCR_GPIO1 0x0048 +#define SOCFPGA_NOC_FW_L4_PER_SCR_I2C0 0x0050 +#define SOCFPGA_NOC_FW_L4_PER_SCR_I2C1 0x0054 +#define SOCFPGA_NOC_FW_L4_PER_SCR_I2C2 0x0058 +#define SOCFPGA_NOC_FW_L4_PER_SCR_I2C3 0x005c +#define SOCFPGA_NOC_FW_L4_PER_SCR_I2C4 0x0060 +#define SOCFPGA_NOC_FW_L4_PER_SCR_SP_TIMER0 0x0064 +#define SOCFPGA_NOC_FW_L4_PER_SCR_SP_TIMER1 0x0068 +#define SOCFPGA_NOC_FW_L4_PER_SCR_UART0 0x006c +#define SOCFPGA_NOC_FW_L4_PER_SCR_UART1 0x0070 + +#define SOCFPGA_NOC_FW_L4_SYS_SCR_DMA_ECC 0x0008 +#define SOCFPGA_NOC_FW_L4_SYS_SCR_EMAC0RX_ECC 0x000c +#define SOCFPGA_NOC_FW_L4_SYS_SCR_EMAC0TX_ECC 0x0010 +#define SOCFPGA_NOC_FW_L4_SYS_SCR_EMAC1RX_ECC 0x0014 +#define SOCFPGA_NOC_FW_L4_SYS_SCR_EMAC1TX_ECC 0x0018 +#define SOCFPGA_NOC_FW_L4_SYS_SCR_EMAC2RX_ECC 0x001c +#define SOCFPGA_NOC_FW_L4_SYS_SCR_EMAC2TX_ECC 0x0020 +#define SOCFPGA_NOC_FW_L4_SYS_SCR_NAND_ECC 0x002c +#define SOCFPGA_NOC_FW_L4_SYS_SCR_NAND_READ_ECC 0x0030 +#define SOCFPGA_NOC_FW_L4_SYS_SCR_NAND_WRITE_ECC 0x0034 +#define SOCFPGA_NOC_FW_L4_SYS_SCR_OCRAM_ECC 0x0038 +#define SOCFPGA_NOC_FW_L4_SYS_SCR_SDMMC_ECC 0x0040 +#define SOCFPGA_NOC_FW_L4_SYS_SCR_USB0_ECC 0x0044 +#define SOCFPGA_NOC_FW_L4_SYS_SCR_USB1_ECC 0x0048 +#define SOCFPGA_NOC_FW_L4_SYS_SCR_CLK_MGR 0x004c +#define SOCFPGA_NOC_FW_L4_SYS_SCR_IO_MGR 0x0054 +#define SOCFPGA_NOC_FW_L4_SYS_SCR_RST_MGR 0x0058 +#define SOCFPGA_NOC_FW_L4_SYS_SCR_SYS_MGR 0x005c +#define SOCFPGA_NOC_FW_L4_SYS_SCR_OSC0_TIMER 0x0060 +#define SOCFPGA_NOC_FW_L4_SYS_SCR_OSC1_TIMER 0x0064 +#define SOCFPGA_NOC_FW_L4_SYS_SCR_WATCHDOG0 0x0068 +#define SOCFPGA_NOC_FW_L4_SYS_SCR_WATCHDOG1 0x006c +#define SOCFPGA_NOC_FW_L4_SYS_SCR_WATCHDOG2 0x0070 +#define SOCFPGA_NOC_FW_L4_SYS_SCR_WATCHDOG3 0x0074 +#define SOCFPGA_NOC_FW_L4_SYS_SCR_DAP 0x0078 +#define SOCFPGA_NOC_FW_L4_SYS_SCR_WATCHDOG4 0x007c +#define SOCFPGA_NOC_FW_L4_SYS_SCR_PWRMGR 0x0080 +#define SOCFPGA_NOC_FW_L4_SYS_SCR_USB1_RXECC 0x0084 +#define SOCFPGA_NOC_FW_L4_SYS_SCR_USB1_TXECC 0x0088 +#define SOCFPGA_NOC_FW_L4_SYS_SCR_L4_NOC_PROBES 0x0090 +#define SOCFPGA_NOC_FW_L4_SYS_SCR_L4_NOC_QOS 0x0094 + +/* CCU NOC Register Map */ + +#define SOCFPGA_CCU_NOC_CPU0_RAM0 0x04688 +#define SOCFPGA_CCU_NOC_IOM_RAM0 0x18628 + +#define SOCFPGA_CCU_NOC_ADMASK_P_MASK BIT(0) +#define SOCFPGA_CCU_NOC_ADMASK_NS_MASK BIT(1) + +/* Function Definitions */ + +void enable_ns_peripheral_access(void); +void enable_ns_bridge_access(void); +void enable_ns_ocram_access(void); +void enable_ocram_firewall(void); + +#endif diff --git a/plat/intel/soc/common/include/socfpga_private.h b/plat/intel/soc/common/include/socfpga_private.h new file mode 100644 index 0000000..9d389e3 --- /dev/null +++ b/plat/intel/soc/common/include/socfpga_private.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2019-2022, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef SOCFPGA_PRIVATE_H +#define SOCFPGA_PRIVATE_H + +#include "socfpga_plat_def.h" + +#define EMMC_DESC_SIZE (1<<20) + +#define EMMC_INIT_PARAMS(base, clk) \ + { .bus_width = MMC_BUS_WIDTH_4, \ + .clk_rate = (clk), \ + .desc_base = (base), \ + .desc_size = EMMC_DESC_SIZE, \ + .flags = 0, \ + .reg_base = SOCFPGA_MMC_REG_BASE \ + } + +typedef enum { + BOOT_SOURCE_FPGA = 0, + BOOT_SOURCE_SDMMC, + BOOT_SOURCE_NAND, + BOOT_SOURCE_RSVD, + BOOT_SOURCE_QSPI +} boot_source_type; + +/******************************************************************************* + * Function and variable prototypes + ******************************************************************************/ + +void enable_nonsecure_access(void); + +void socfpga_io_setup(int boot_source); + +void socfgpa_configure_mmu_el3(unsigned long total_base, + unsigned long total_size, + unsigned long ro_start, + unsigned long ro_limit, + unsigned long coh_start, + unsigned long coh_limit); + + +void socfpga_configure_mmu_el1(unsigned long total_base, + unsigned long total_size, + unsigned long ro_start, + unsigned long ro_limit, + unsigned long coh_start, + unsigned long coh_limit); + +void socfpga_delay_timer_init(void); + +void socfpga_gic_driver_init(void); + +void socfpga_delay_timer_init_args(void); + +uint32_t socfpga_get_spsr_for_bl32_entry(void); + +uint32_t socfpga_get_spsr_for_bl33_entry(void); + +unsigned long socfpga_get_ns_image_entrypoint(void); + +void plat_secondary_cpus_bl31_entry(void); + +#endif /* SOCFPGA_PRIVATE_H */ diff --git a/plat/intel/soc/common/include/socfpga_reset_manager.h b/plat/intel/soc/common/include/socfpga_reset_manager.h new file mode 100644 index 0000000..9d06a3d --- /dev/null +++ b/plat/intel/soc/common/include/socfpga_reset_manager.h @@ -0,0 +1,254 @@ +/* + * Copyright (c) 2019-2023, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef SOCFPGA_RESETMANAGER_H +#define SOCFPGA_RESETMANAGER_H + +#include "socfpga_plat_def.h" + +/* Status Response */ +#define RSTMGR_RET_OK 0 +#define RSTMGR_RET_ERROR -1 + +#define SOCFPGA_BRIDGE_ENABLE BIT(0) +#define SOCFPGA_BRIDGE_HAS_MASK BIT(1) + +#define SOC2FPGA_MASK (1<<0) +#define LWHPS2FPGA_MASK (1<<1) +#define FPGA2SOC_MASK (1<<2) +#define F2SDRAM0_MASK (1<<3) +#define F2SDRAM1_MASK (1<<4) +#define F2SDRAM2_MASK (1<<5) + +/* Register Mapping */ + +#define SOCFPGA_RSTMGR_STAT 0x000 +#define SOCFPGA_RSTMGR_MISCSTAT 0x008 +#define SOCFPGA_RSTMGR_HDSKEN 0x010 +#define SOCFPGA_RSTMGR_HDSKREQ 0x014 +#define SOCFPGA_RSTMGR_HDSKACK 0x018 +#define SOCFPGA_RSTMGR_HDSKSTALL 0x01C +#if PLATFORM_MODEL != PLAT_SOCFPGA_AGILEX5 +#define SOCFPGA_RSTMGR_MPUMODRST 0x020 +#endif +#define SOCFPGA_RSTMGR_PER0MODRST 0x024 +#define SOCFPGA_RSTMGR_PER1MODRST 0x028 +#define SOCFPGA_RSTMGR_BRGMODRST 0x02C +#if PLATFORM_MODEL != PLAT_SOCFPGA_AGILEX5 +#define SOCFPGA_RSTMGR_COLDMODRST 0x034 +#endif +#define SOCFPGA_RSTMGR_DBGMODRST 0x03C +#define SOCFPGA_RSTMGR_BRGWARMMASK 0x04C +#define SOCFPGA_RSTMGR_TSTSTA 0x05C +#define SOCFPGA_RSTMGR_HDSKTIMEOUT 0x064 +#define SOCFPGA_RSTMGR_DBGHDSKTIMEOUT 0x06C +#define SOCFPGA_RSTMGR_DBGRSTCMPLT 0x070 +#define SOCFPGA_RSTMGR_HPSRSTCMPLT 0x080 +#define SOCFPGA_RSTMGR_CPUINREST 0x090 +#define SOCFPGA_RSTMGR_CPURSTRELEASE 0x094 +#define SOCFPGA_RSTMGR_CPUBASELOW_0 0x098 +#define SOCFPGA_RSTMGR_CPUBASEHIGH_0 0x09C +#define SOCFPGA_RSTMGR_CPUBASELOW_1 0x0A0 +#define SOCFPGA_RSTMGR_CPUBASEHIGH_1 0x0A4 +#define SOCFPGA_RSTMGR_CPUBASELOW_2 0x0A8 +#define SOCFPGA_RSTMGR_CPUBASEHIGH_2 0x0AC +#define SOCFPGA_RSTMGR_CPUBASELOW_3 0x0B0 +#define SOCFPGA_RSTMGR_CPUBASEHIGH_3 0x0B4 + +/* Field Mapping */ +/* PER0MODRST */ +#define RSTMGR_PER0MODRST_EMAC0 0x00000001 //TSN0 +#define RSTMGR_PER0MODRST_EMAC1 0x00000002 //TSN1 +#define RSTMGR_PER0MODRST_EMAC2 0x00000004 //TSN2 +#define RSTMGR_PER0MODRST_USB0 0x00000008 +#define RSTMGR_PER0MODRST_USB1 0x00000010 +#define RSTMGR_PER0MODRST_NAND 0x00000020 +#define RSTMGR_PER0MODRST_SOFTPHY 0x00000040 +#define RSTMGR_PER0MODRST_SDMMC 0x00000080 +#define RSTMGR_PER0MODRST_EMAC0OCP 0x00000100 //TSN0ECC +#define RSTMGR_PER0MODRST_EMAC1OCP 0x00000200 //TSN1ECC +#define RSTMGR_PER0MODRST_EMAC2OCP 0x00000400 //TSN2ECC +#define RSTMGR_PER0MODRST_USB0OCP 0x00000800 +#define RSTMGR_PER0MODRST_USB1OCP 0x00001000 +#define RSTMGR_PER0MODRST_NANDOCP 0x00002000 +#define RSTMGR_PER0MODRST_SDMMCOCP 0x00008000 +#define RSTMGR_PER0MODRST_DMA 0x00010000 +#define RSTMGR_PER0MODRST_SPIM0 0x00020000 +#define RSTMGR_PER0MODRST_SPIM1 0x00040000 +#define RSTMGR_PER0MODRST_SPIS0 0x00080000 +#define RSTMGR_PER0MODRST_SPIS1 0x00100000 +#define RSTMGR_PER0MODRST_DMAOCP 0x00200000 +#define RSTMGR_PER0MODRST_EMACPTP 0x00400000 +#define RSTMGR_PER0MODRST_DMAIF0 0x01000000 +#define RSTMGR_PER0MODRST_DMAIF1 0x02000000 +#define RSTMGR_PER0MODRST_DMAIF2 0x04000000 +#define RSTMGR_PER0MODRST_DMAIF3 0x08000000 +#define RSTMGR_PER0MODRST_DMAIF4 0x10000000 +#define RSTMGR_PER0MODRST_DMAIF5 0x20000000 +#define RSTMGR_PER0MODRST_DMAIF6 0x40000000 +#define RSTMGR_PER0MODRST_DMAIF7 0x80000000 + +/* PER1MODRST */ +#define RSTMGR_PER1MODRST_WATCHDOG0 0x00000001 +#define RSTMGR_PER1MODRST_WATCHDOG1 0x00000002 +#define RSTMGR_PER1MODRST_WATCHDOG2 0x00000004 +#define RSTMGR_PER1MODRST_WATCHDOG3 0x00000008 +#define RSTMGR_PER1MODRST_L4SYSTIMER0 0x00000010 +#define RSTMGR_PER1MODRST_L4SYSTIMER1 0x00000020 +#define RSTMGR_PER1MODRST_SPTIMER0 0x00000040 +#define RSTMGR_PER1MODRST_SPTIMER1 0x00000080 +#define RSTMGR_PER1MODRST_I2C0 0x00000100 +#define RSTMGR_PER1MODRST_I2C1 0x00000200 +#define RSTMGR_PER1MODRST_I2C2 0x00000400 +#define RSTMGR_PER1MODRST_I2C3 0x00000800 +#define RSTMGR_PER1MODRST_I2C4 0x00001000 +#define RSTMGR_PER1MODRST_I3C0 0x00002000 +#define RSTMGR_PER1MODRST_I3C1 0x00004000 +#define RSTMGR_PER1MODRST_UART0 0x00010000 +#define RSTMGR_PER1MODRST_UART1 0x00020000 +#define RSTMGR_PER1MODRST_GPIO0 0x01000000 +#define RSTMGR_PER1MODRST_GPIO1 0x02000000 +#define RSTMGR_PER1MODRST_WATCHDOG4 0x04000000 + +/* HDSKEN */ +#define RSTMGR_HDSKEN_EMIF_FLUSH 0x00000001 +#define RSTMGR_HDSKEN_FPGAHSEN 0x00000004 +#define RSTMGR_HDSKEN_ETRSTALLEN 0x00000008 +#define RSTMGR_HDSKEN_LWS2F_FLUSH 0x00000200 +#define RSTMGR_HDSKEN_S2F_FLUSH 0x00000400 +#define RSTMGR_HDSKEN_F2SDRAM_FLUSH 0x00000800 +#define RSTMGR_HDSKEN_F2S_FLUSH 0x00001000 +#define RSTMGR_HDSKEN_L3NOC_DBG 0x00010000 +#define RSTMGR_HDSKEN_DEBUG_L3NOC 0x00020000 + +/* HDSKREQ */ +#define RSTMGR_HDSKREQ_EMIFFLUSHREQ 0x00000001 +#define RSTMGR_HDSKREQ_ETRSTALLREQ 0x00000008 +#define RSTMGR_HDSKREQ_LWS2F_FLUSH 0x00000200 +#define RSTMGR_HDSKREQ_S2F_FLUSH 0x00000400 +#define RSTMGR_HDSKREQ_F2SDRAM_FLUSH 0x00000800 +#define RSTMGR_HDSKREQ_F2S_FLUSH 0x00001000 +#define RSTMGR_HDSKREQ_L3NOC_DBG 0x00010000 +#define RSTMGR_HDSKREQ_DEBUG_L3NOC 0x00020000 +#define RSTMGR_HDSKREQ_FPGAHSREQ 0x00000004 +#define RSTMGR_HDSKREQ_LWSOC2FPGAREQ 0x00000200 +#define RSTMGR_HDSKREQ_SOC2FPGAREQ 0x00000400 +#define RSTMGR_HDSKREQ_F2SDRAM0REQ 0x00000800 +#define RSTMGR_HDSKREQ_FPGA2SOCREQ 0x00001000 + +/* HDSKACK */ +#define RSTMGR_HDSKACK_EMIFFLUSHREQ 0x00000001 +#define RSTMGR_HDSKACK_FPGAHSREQ 0x00000004 +#define RSTMGR_HDSKACK_ETRSTALLREQ 0x00000008 +#define RSTMGR_HDSKACK_LWS2F_FLUSH 0x00000200 +#define RSTMGR_HDSKACK_S2F_FLUSH 0x00000400 +#define RSTMGR_HDSKACK_F2SDRAM_FLUSH 0x00000800 +#define RSTMGR_HDSKACK_F2S_FLUSH 0x00001000 +#define RSTMGR_HDSKACK_L3NOC_DBG 0x00010000 +#define RSTMGR_HDSKACK_DEBUG_L3NOC 0x00020000 +#define RSTMGR_HDSKACK_FPGAHSACK 0x00000004 +#define RSTMGR_HDSKACK_LWSOC2FPGAACK 0x00000200 +#define RSTMGR_HDSKACK_SOC2FPGAACK 0x00000400 +#define RSTMGR_HDSKACK_F2SDRAM0ACK 0x00000800 +#define RSTMGR_HDSKACK_FPGA2SOCACK 0x00001000 +#define RSTMGR_HDSKACK_FPGAHSACK_DASRT 0x00000000 +#define RSTMGR_HDSKACK_F2SDRAM0ACK_DASRT 0x00000000 +#define RSTMGR_HDSKACK_FPGA2SOCACK_DASRT 0x00000000 + +/* HDSKSTALL */ +#define RSTMGR_HDSKACK_ETRSTALLWARMRST 0x00000001 + +/* BRGMODRST */ +#define RSTMGR_BRGMODRST_SOC2FPGA 0x00000001 +#define RSTMGR_BRGMODRST_LWHPS2FPGA 0x00000002 +#define RSTMGR_BRGMODRST_FPGA2SOC 0x00000004 +#define RSTMGR_BRGMODRST_F2SSDRAM0 0x00000008 +#if PLATFORM_MODEL == PLAT_SOCFPGA_STRATIX10 +#define RSTMGR_BRGMODRST_F2SSDRAM1 0x10 +#define RSTMGR_BRGMODRST_F2SSDRAM2 0x20 +#define RSTMGR_BRGMODRST_DDRSCH 0x40 +#elif PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX5 +#define RSTMGR_BRGMODRST_F2SSDRAM1 0x10 +#define RSTMGR_BRGMODRST_F2SSDRAM2 0x20 +#endif + +#define RSTMGR_BRGMODRST_MPFE 0x40 + +/* DBGMODRST */ +#define RSTMGR_DBGMODRST_DBG_RST 0x00000001 + +/* BRGMODRSTMASK */ +#define RSTMGR_BRGMODRSTMASK_SOC2FPGA 0x00000001 +#define RSTMGR_BRGMODRSTMASK_LWHPS2FPGA 0x00000002 +#define RSTMGR_BRGMODRSTMASK_FPGA2SOC 0x00000004 +#define RSTMGR_BRGMODRSTMASK_F2SDRAM0 0x00000008 +#define RSTMGR_BRGMODRSTMASK_MPFE 0x00000040 + +/* TSTSTA */ +#define RSTMGR_TSTSTA_RSTST 0x0000001F + +/* HDSKTIMEOUT */ +#define RSTMGR_HDSKTIMEOUT_VAL 0xFFFFFFFF + +/* DBGHDSKTIMEOUT */ +#define RSTMGR_DBGHDSKTIMEOUT_VAL 0xFFFFFFFF + +/* DBGRSTCMPLT */ +#define RSTMGR_DBGRSTCMPLT_VAL 0xFFFFFFFF + +/* HPSRSTCMPLT */ +#define RSTMGR_DBGRSTCMPLT_VAL 0xFFFFFFFF + +/* CPUINRESET */ +#define RSTMGR_CPUINRESET_CPU0 0x00000001 +#define RSTMGR_CPUINRESET_CPU1 0x00000002 +#define RSTMGR_CPUINRESET_CPU2 0x00000004 +#define RSTMGR_CPUINRESET_CPU3 0x00000008 + +/* CPUSTRELEASE */ +#define RSTMGR_CPUSTRELEASE_CPUx 0x10D11094 + +/* CPUxRESETBASE */ +#define RSTMGR_CPUxRESETBASELOW_CPU0 0x10D11098 +#define RSTMGR_CPUxRESETBASEHIGH_CPU0 0x10D1109C +#define RSTMGR_CPUxRESETBASELOW_CPU1 0x10D110A0 +#define RSTMGR_CPUxRESETBASEHIGH_CPU1 0x10D110A4 +#define RSTMGR_CPUxRESETBASELOW_CPU2 0x10D110A8 +#define RSTMGR_CPUxRESETBASEHIGH_CPU2 0x10D110AC +#define RSTMGR_CPUxRESETBASELOW_CPU3 0x10D110B0 +#define RSTMGR_CPUxRESETBASEHIGH_CPU3 0x10D110B4 + +/* Definitions */ + +#define RSTMGR_L2_MODRST 0x0100 +#define RSTMGR_HDSKEN_SET 0x010D + +/* Macros */ +#define SOCFPGA_RSTMGR(_reg) (SOCFPGA_RSTMGR_REG_BASE + (SOCFPGA_RSTMGR_##_reg)) +#define RSTMGR_FIELD(_reg, _field) (RSTMGR_##_reg##MODRST_##_field) + +/* Reset type to SDM from PSCI */ +// Temp add macro here for reset type +#define SOCFPGA_RESET_TYPE_COLD 0 +#define SOCFPGA_RESET_TYPE_WARM 1 + +/* Function Declarations */ + +void deassert_peripheral_reset(void); +void config_hps_hs_before_warm_reset(void); + +int socfpga_bridges_reset(uint32_t mask); +int socfpga_bridges_enable(uint32_t mask); +int socfpga_bridges_disable(uint32_t mask); + +int socfpga_cpurstrelease(unsigned int cpu_id); +int socfpga_cpu_reset_base(unsigned int cpu_id); + +/* SMP: Func proto */ +void bl31_plat_set_secondary_cpu_entrypoint(unsigned int cpu_id); +void bl31_plat_set_secondary_cpu_off(void); + +#endif /* SOCFPGA_RESETMANAGER_H */ diff --git a/plat/intel/soc/common/include/socfpga_sip_svc.h b/plat/intel/soc/common/include/socfpga_sip_svc.h new file mode 100644 index 0000000..0668301 --- /dev/null +++ b/plat/intel/soc/common/include/socfpga_sip_svc.h @@ -0,0 +1,239 @@ +/* + * Copyright (c) 2019-2022, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef SOCFPGA_SIP_SVC_H +#define SOCFPGA_SIP_SVC_H + + +/* SiP status response */ +#define INTEL_SIP_SMC_STATUS_OK 0 +#define INTEL_SIP_SMC_STATUS_BUSY 0x1 +#define INTEL_SIP_SMC_STATUS_REJECTED 0x2 +#define INTEL_SIP_SMC_STATUS_NO_RESPONSE 0x3 +#define INTEL_SIP_SMC_STATUS_ERROR 0x4 +#define INTEL_SIP_SMC_RSU_ERROR 0x7 +#define INTEL_SIP_SMC_SEU_ERR_READ_ERROR 0x8 + +/* SiP mailbox error code */ +#define GENERIC_RESPONSE_ERROR 0x3FF + +/* SiP V2 command code range */ +#define INTEL_SIP_SMC_CMD_MASK 0xFFFF +#define INTEL_SIP_SMC_CMD_V2_RANGE_BEGIN 0x400 +#define INTEL_SIP_SMC_CMD_V2_RANGE_END 0x4FF + +/* SiP V2 protocol header */ +#define INTEL_SIP_SMC_HEADER_JOB_ID_MASK 0xF +#define INTEL_SIP_SMC_HEADER_JOB_ID_OFFSET 0U +#define INTEL_SIP_SMC_HEADER_CID_MASK 0xF +#define INTEL_SIP_SMC_HEADER_CID_OFFSET 4U +#define INTEL_SIP_SMC_HEADER_VERSION_MASK 0xF +#define INTEL_SIP_SMC_HEADER_VERSION_OFFSET 60U + +/* SMC SiP service function identifier for version 1 */ + +/* FPGA Reconfig */ +#define INTEL_SIP_SMC_FPGA_CONFIG_START 0xC2000001 +#define INTEL_SIP_SMC_FPGA_CONFIG_WRITE 0x42000002 +#define INTEL_SIP_SMC_FPGA_CONFIG_COMPLETED_WRITE 0xC2000003 +#define INTEL_SIP_SMC_FPGA_CONFIG_ISDONE 0xC2000004 +#define INTEL_SIP_SMC_FPGA_CONFIG_GET_MEM 0xC2000005 + +/* FPGA Bitstream Flag */ +#define FLAG_PARTIAL_CONFIG BIT(0) +#define FLAG_AUTHENTICATION BIT(1) +#define CONFIG_TEST_FLAG(_flag, _type) (((flag) & FLAG_##_type) \ + == FLAG_##_type) + +/* Secure Register Access */ +#define INTEL_SIP_SMC_REG_READ 0xC2000007 +#define INTEL_SIP_SMC_REG_WRITE 0xC2000008 +#define INTEL_SIP_SMC_REG_UPDATE 0xC2000009 + +/* Remote System Update */ +#define INTEL_SIP_SMC_RSU_STATUS 0xC200000B +#define INTEL_SIP_SMC_RSU_UPDATE 0xC200000C +#define INTEL_SIP_SMC_RSU_NOTIFY 0xC200000E +#define INTEL_SIP_SMC_RSU_RETRY_COUNTER 0xC200000F +#define INTEL_SIP_SMC_RSU_DCMF_VERSION 0xC2000010 +#define INTEL_SIP_SMC_RSU_COPY_DCMF_VERSION 0xC2000011 +#define INTEL_SIP_SMC_RSU_MAX_RETRY 0xC2000012 +#define INTEL_SIP_SMC_RSU_COPY_MAX_RETRY 0xC2000013 +#define INTEL_SIP_SMC_RSU_DCMF_STATUS 0xC2000014 +#define INTEL_SIP_SMC_RSU_COPY_DCMF_STATUS 0xC2000015 + +/* Hardware monitor */ +#define INTEL_SIP_SMC_HWMON_READTEMP 0xC2000020 +#define INTEL_SIP_SMC_HWMON_READVOLT 0xC2000021 +#define TEMP_CHANNEL_MAX (1 << 15) +#define VOLT_CHANNEL_MAX (1 << 15) + +/* ECC */ +#define INTEL_SIP_SMC_ECC_DBE 0xC200000D + +/* Generic Command */ +#define INTEL_SIP_SMC_SERVICE_COMPLETED 0xC200001E +#define INTEL_SIP_SMC_FIRMWARE_VERSION 0xC200001F +#define INTEL_SIP_SMC_HPS_SET_BRIDGES 0xC2000032 +#define INTEL_SIP_SMC_GET_ROM_PATCH_SHA384 0xC2000040 + +#define SERVICE_COMPLETED_MODE_ASYNC 0x00004F4E + +/* Mailbox Command */ +#define INTEL_SIP_SMC_MBOX_SEND_CMD 0xC200003C +#define INTEL_SIP_SMC_GET_USERCODE 0xC200003D + +/* FPGA Crypto Services */ +#define INTEL_SIP_SMC_FCS_RANDOM_NUMBER 0xC200005A +#define INTEL_SIP_SMC_FCS_RANDOM_NUMBER_EXT 0x4200008F +#define INTEL_SIP_SMC_FCS_CRYPTION 0x4200005B +#define INTEL_SIP_SMC_FCS_CRYPTION_EXT 0xC2000090 +#define INTEL_SIP_SMC_FCS_SERVICE_REQUEST 0x4200005C +#define INTEL_SIP_SMC_FCS_SEND_CERTIFICATE 0x4200005D +#define INTEL_SIP_SMC_FCS_GET_PROVISION_DATA 0x4200005E +#define INTEL_SIP_SMC_FCS_CNTR_SET_PREAUTH 0xC200005F +#define INTEL_SIP_SMC_FCS_PSGSIGMA_TEARDOWN 0xC2000064 +#define INTEL_SIP_SMC_FCS_CHIP_ID 0xC2000065 +#define INTEL_SIP_SMC_FCS_ATTESTATION_SUBKEY 0xC2000066 +#define INTEL_SIP_SMC_FCS_ATTESTATION_MEASUREMENTS 0xC2000067 +#define INTEL_SIP_SMC_FCS_GET_ATTESTATION_CERT 0xC2000068 +#define INTEL_SIP_SMC_FCS_CREATE_CERT_ON_RELOAD 0xC2000069 +#define INTEL_SIP_SMC_FCS_OPEN_CS_SESSION 0xC200006E +#define INTEL_SIP_SMC_FCS_CLOSE_CS_SESSION 0xC200006F +#define INTEL_SIP_SMC_FCS_IMPORT_CS_KEY 0x42000070 +#define INTEL_SIP_SMC_FCS_EXPORT_CS_KEY 0xC2000071 +#define INTEL_SIP_SMC_FCS_REMOVE_CS_KEY 0xC2000072 +#define INTEL_SIP_SMC_FCS_GET_CS_KEY_INFO 0xC2000073 +#define INTEL_SIP_SMC_FCS_AES_CRYPT_INIT 0xC2000074 +#define INTEL_SIP_SMC_FCS_AES_CRYPT_UPDATE 0x42000075 +#define INTEL_SIP_SMC_FCS_AES_CRYPT_FINALIZE 0x42000076 +#define INTEL_SIP_SMC_FCS_GET_DIGEST_INIT 0xC2000077 +#define INTEL_SIP_SMC_FCS_GET_DIGEST_UPDATE 0xC2000078 +#define INTEL_SIP_SMC_FCS_GET_DIGEST_FINALIZE 0xC2000079 +#define INTEL_SIP_SMC_FCS_GET_DIGEST_SMMU_UPDATE 0x42000091 +#define INTEL_SIP_SMC_FCS_GET_DIGEST_SMMU_FINALIZE 0x42000092 +#define INTEL_SIP_SMC_FCS_MAC_VERIFY_INIT 0xC200007A +#define INTEL_SIP_SMC_FCS_MAC_VERIFY_UPDATE 0xC200007B +#define INTEL_SIP_SMC_FCS_MAC_VERIFY_FINALIZE 0xC200007C +#define INTEL_SIP_SMC_FCS_MAC_VERIFY_SMMU_UPDATE 0x42000093 +#define INTEL_SIP_SMC_FCS_MAC_VERIFY_SMMU_FINALIZE 0x42000094 +#define INTEL_SIP_SMC_FCS_ECDSA_HASH_SIGN_INIT 0xC200007D +#define INTEL_SIP_SMC_FCS_ECDSA_HASH_SIGN_FINALIZE 0xC200007F +#define INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGN_INIT 0xC2000080 +#define INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGN_UPDATE 0xC2000081 +#define INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGN_FINALIZE 0xC2000082 +#define INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGN_SMMU_UPDATE 0x42000095 +#define INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGN_SMMU_FINALIZE 0x42000096 +#define INTEL_SIP_SMC_FCS_ECDSA_HASH_SIG_VERIFY_INIT 0xC2000083 +#define INTEL_SIP_SMC_FCS_ECDSA_HASH_SIG_VERIFY_FINALIZE 0xC2000085 +#define INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIG_VERIFY_INIT 0xC2000086 +#define INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIG_VERIFY_UPDATE 0xC2000087 +#define INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIG_VERIFY_FINALIZE 0xC2000088 +#define INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIG_VERIFY_SMMU_UPDATE 0x42000097 +#define INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIG_VERIFY_SMMU_FINALIZE 0x42000098 +#define INTEL_SIP_SMC_FCS_ECDSA_GET_PUBKEY_INIT 0xC2000089 +#define INTEL_SIP_SMC_FCS_ECDSA_GET_PUBKEY_FINALIZE 0xC200008B +#define INTEL_SIP_SMC_FCS_ECDH_REQUEST_INIT 0xC200008C +#define INTEL_SIP_SMC_FCS_ECDH_REQUEST_FINALIZE 0xC200008E + +/* SEU ERR */ +#define INTEL_SIP_SMC_SEU_ERR_STATUS 0xC2000099 + +#define INTEL_SIP_SMC_FCS_SHA_MODE_MASK 0xF +#define INTEL_SIP_SMC_FCS_DIGEST_SIZE_MASK 0xF +#define INTEL_SIP_SMC_FCS_DIGEST_SIZE_OFFSET 4U +#define INTEL_SIP_SMC_FCS_ECC_ALGO_MASK 0xF + +/* ECC DBE */ +#define WARM_RESET_WFI_FLAG BIT(31) +#define SYSMGR_ECC_DBE_COLD_RST_MASK (SYSMGR_ECC_OCRAM_MASK |\ + SYSMGR_ECC_DDR0_MASK |\ + SYSMGR_ECC_DDR1_MASK) + +/* Non-mailbox SMC Call */ +#define INTEL_SIP_SMC_SVC_VERSION 0xC2000200 + +/** + * SMC SiP service function identifier for version 2 + * Command code from 0x400 ~ 0x4FF + */ + +/* V2: Non-mailbox function identifier */ +#define INTEL_SIP_SMC_V2_GET_SVC_VERSION 0xC2000400 +#define INTEL_SIP_SMC_V2_REG_READ 0xC2000401 +#define INTEL_SIP_SMC_V2_REG_WRITE 0xC2000402 +#define INTEL_SIP_SMC_V2_REG_UPDATE 0xC2000403 +#define INTEL_SIP_SMC_V2_HPS_SET_BRIDGES 0xC2000404 +#define INTEL_SIP_SMC_V2_RSU_UPDATE_ADDR 0xC2000405 + +/* V2: Mailbox function identifier */ +#define INTEL_SIP_SMC_V2_MAILBOX_SEND_COMMAND 0xC2000420 +#define INTEL_SIP_SMC_V2_MAILBOX_POLL_RESPONSE 0xC2000421 + +/* SMC function IDs for SiP Service queries */ +#define SIP_SVC_CALL_COUNT 0x8200ff00 +#define SIP_SVC_UID 0x8200ff01 +#define SIP_SVC_VERSION 0x8200ff03 + +/* SiP Service Calls version numbers */ +/* + * Increase if there is any backward compatibility impact + */ +#define SIP_SVC_VERSION_MAJOR 2 +/* + * Increase if there is new SMC function ID being added + */ +#define SIP_SVC_VERSION_MINOR 2 + + +/* Structure Definitions */ +struct fpga_config_info { + uint32_t addr; + int size; + int size_written; + uint32_t write_requested; + int subblocks_sent; + int block_number; +}; + +typedef enum { + NO_REQUEST = 0, + RECONFIGURATION, + BITSTREAM_AUTH +} config_type; + +/* Function Definitions */ +bool is_size_4_bytes_aligned(uint32_t size); +bool is_address_in_ddr_range(uint64_t addr, uint64_t size); + +/* ECC DBE */ +bool cold_reset_for_ecc_dbe(void); +uint32_t intel_ecc_dbe_notification(uint64_t dbe_value); + +/* Secure register access */ +uint32_t intel_secure_reg_read(uint64_t reg_addr, uint32_t *retval); +uint32_t intel_secure_reg_write(uint64_t reg_addr, uint32_t val, + uint32_t *retval); +uint32_t intel_secure_reg_update(uint64_t reg_addr, uint32_t mask, + uint32_t val, uint32_t *retval); + +/* Set RSU update address*/ +uint32_t intel_rsu_update(uint64_t update_address); + +/* Miscellaneous HPS services */ +uint32_t intel_hps_set_bridges(uint64_t enable, uint64_t mask); + +/* SiP Service handler for version 2 */ +uintptr_t sip_smc_handler_v2(uint32_t smc_fid, + u_register_t x1, + u_register_t x2, + u_register_t x3, + u_register_t x4, + void *cookie, + void *handle, + u_register_t flags); + +#endif /* SOCFPGA_SIP_SVC_H */ diff --git a/plat/intel/soc/common/include/socfpga_system_manager.h b/plat/intel/soc/common/include/socfpga_system_manager.h new file mode 100644 index 0000000..f860f57 --- /dev/null +++ b/plat/intel/soc/common/include/socfpga_system_manager.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2019-2023, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef SOCFPGA_SYSTEMMANAGER_H +#define SOCFPGA_SYSTEMMANAGER_H + +#include "socfpga_plat_def.h" + +/* System Manager Register Map */ + +#define SOCFPGA_SYSMGR_SDMMC 0x28 + +/* Field Masking */ + +#define SYSMGR_SDMMC_DRVSEL(x) (((x) & 0x7) << 0) +#define SYSMGR_SDMMC_SMPLSEL(x) (((x) & 0x7) << 4) + +#define IDLE_DATA_LWSOC2FPGA BIT(4) +#define IDLE_DATA_SOC2FPGA BIT(0) +#define IDLE_DATA_MASK (IDLE_DATA_LWSOC2FPGA | IDLE_DATA_SOC2FPGA) + +#define SYSMGR_QSPI_REFCLK_MASK GENMASK(27, 0) + +#define SYSMGR_ECC_OCRAM_MASK BIT(1) +#define SYSMGR_ECC_DDR0_MASK BIT(16) +#define SYSMGR_ECC_DDR1_MASK BIT(17) + +/* Macros */ + +#define SOCFPGA_SYSMGR(_reg) (SOCFPGA_SYSMGR_REG_BASE \ + + (SOCFPGA_SYSMGR_##_reg)) + +#endif /* SOCFPGA_SYSTEMMANAGER_H */ diff --git a/plat/intel/soc/common/include/socfpga_vab.h b/plat/intel/soc/common/include/socfpga_vab.h new file mode 100644 index 0000000..f6081df --- /dev/null +++ b/plat/intel/soc/common/include/socfpga_vab.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2020-2023, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef SOCFPGA_VAB_H +#define SOCFPGA_VAB_H + + +#include <stdlib.h> +#include "socfpga_fcs.h" + +struct fcs_hps_vab_certificate_data { + uint32_t vab_cert_magic_num; /* offset 0x10 */ + uint32_t flags; + uint8_t rsvd0_1[8]; + uint8_t fcs_sha384[FCS_SHA384_WORD_SIZE]; /* offset 0x20 */ +}; + +struct fcs_hps_vab_certificate_header { + uint32_t cert_magic_num; /* offset 0 */ + uint32_t cert_data_sz; + uint32_t cert_ver; + uint32_t cert_type; + struct fcs_hps_vab_certificate_data d; /* offset 0x10 */ + /* keychain starts at offset 0x50 */ +}; + +/* Macros */ +#define IS_BYTE_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0) +#define BYTE_ALIGN(x, a) __ALIGN_MASK((x), (typeof(x))(a)-1) +#define __ALIGN_MASK(x, mask) (((x)+(mask))&~(mask)) +#define VAB_CERT_HEADER_SIZE sizeof(struct fcs_hps_vab_certificate_header) +#define VAB_CERT_MAGIC_OFFSET offsetof(struct fcs_hps_vab_certificate_header, d) +#define VAB_CERT_FIT_SHA384_OFFSET offsetof(struct fcs_hps_vab_certificate_data, fcs_sha384[0]) +#define SDM_CERT_MAGIC_NUM 0x25D04E7F +#define CHUNKSZ_PER_WD_RESET (256 * 1024) + +/* SHA related return Macro */ +#define ENOVABIMG 1 /* VAB certificate not available */ +#define EIMGERR 2 /* Image format/size not valid */ +#define ETIMEOUT 3 /* Execution timeout */ +#define EPROCESS 4 /* Process error */ +#define EKEYREJECTED 5/* Key was rejected by service */ + +/* Function Definitions */ +static size_t get_img_size(uint8_t *img_buf, size_t img_buf_sz); +int socfpga_vendor_authentication(void **p_image, size_t *p_size); +static uint32_t get_unaligned_le32(const void *p); +void sha384_csum_wd(const unsigned char *input, unsigned int ilen, +unsigned char *output, unsigned int chunk_sz); + +#endif diff --git a/plat/intel/soc/common/sip/socfpga_sip_ecc.c b/plat/intel/soc/common/sip/socfpga_sip_ecc.c new file mode 100644 index 0000000..c444d48 --- /dev/null +++ b/plat/intel/soc/common/sip/socfpga_sip_ecc.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* + * Copyright (c) 2020-2023, ARM Limited and Contributors. All rights reserved. + */ + +#include <assert.h> +#include <common/debug.h> +#include <common/runtime_svc.h> +#include <lib/mmio.h> +#include <tools_share/uuid.h> + +#include "socfpga_fcs.h" +#include "socfpga_mailbox.h" +#include "socfpga_plat_def.h" +#include "socfpga_reset_manager.h" +#include "socfpga_sip_svc.h" +#include "socfpga_system_manager.h" + + +uint32_t intel_ecc_dbe_notification(uint64_t dbe_value) +{ + dbe_value &= WARM_RESET_WFI_FLAG; + + /* Trap CPUs in WFI if warm reset flag is set */ + if (dbe_value > 0) { + while (1) { + wfi(); + } + } + + return INTEL_SIP_SMC_STATUS_OK; +} + +bool cold_reset_for_ecc_dbe(void) +{ + uint32_t dbe_int_status; + + dbe_int_status = mmio_read_32(SOCFPGA_SYSMGR(BOOT_SCRATCH_COLD_8)); + + /* Trigger cold reset only for error in critical memory (DDR/OCRAM) */ + dbe_int_status &= SYSMGR_ECC_DBE_COLD_RST_MASK; + + if (dbe_int_status > 0) { + return true; + } + + return false; +} diff --git a/plat/intel/soc/common/sip/socfpga_sip_fcs.c b/plat/intel/soc/common/sip/socfpga_sip_fcs.c new file mode 100644 index 0000000..beaa720 --- /dev/null +++ b/plat/intel/soc/common/sip/socfpga_sip_fcs.c @@ -0,0 +1,2313 @@ +/* + * Copyright (c) 2020-2022, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <arch_helpers.h> +#include <lib/mmio.h> + +#include "socfpga_fcs.h" +#include "socfpga_mailbox.h" +#include "socfpga_sip_svc.h" + +/* FCS static variables */ +static fcs_crypto_service_aes_data fcs_aes_init_payload; +static fcs_crypto_service_data fcs_sha_get_digest_param; +static fcs_crypto_service_data fcs_sha_mac_verify_param; +static fcs_crypto_service_data fcs_ecdsa_hash_sign_param; +static fcs_crypto_service_data fcs_ecdsa_hash_sig_verify_param; +static fcs_crypto_service_data fcs_sha2_data_sign_param; +static fcs_crypto_service_data fcs_sha2_data_sig_verify_param; +static fcs_crypto_service_data fcs_ecdsa_get_pubkey_param; +static fcs_crypto_service_data fcs_ecdh_request_param; + +bool is_size_4_bytes_aligned(uint32_t size) +{ + if ((size % MBOX_WORD_BYTE) != 0U) { + return false; + } else { + return true; + } +} + +static bool is_8_bytes_aligned(uint32_t data) +{ + if ((data % (MBOX_WORD_BYTE * 2U)) != 0U) { + return false; + } else { + return true; + } +} + +static bool is_32_bytes_aligned(uint32_t data) +{ + if ((data % (8U * MBOX_WORD_BYTE)) != 0U) { + return false; + } else { + return true; + } +} + +static int intel_fcs_crypto_service_init(uint32_t session_id, + uint32_t context_id, uint32_t key_id, + uint32_t param_size, uint64_t param_data, + fcs_crypto_service_data *data_addr, + uint32_t *mbox_error) +{ + if (mbox_error == NULL) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (param_size != 4) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + memset(data_addr, 0, sizeof(fcs_crypto_service_data)); + + data_addr->session_id = session_id; + data_addr->context_id = context_id; + data_addr->key_id = key_id; + data_addr->crypto_param_size = param_size; + data_addr->crypto_param = param_data; + + data_addr->is_updated = 0; + + *mbox_error = 0; + + return INTEL_SIP_SMC_STATUS_OK; +} + +uint32_t intel_fcs_random_number_gen(uint64_t addr, uint64_t *ret_size, + uint32_t *mbox_error) +{ + int status; + unsigned int i; + unsigned int resp_len = FCS_RANDOM_WORD_SIZE; + uint32_t random_data[FCS_RANDOM_WORD_SIZE] = {0U}; + + if (!is_address_in_ddr_range(addr, FCS_RANDOM_BYTE_SIZE)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_RANDOM_GEN, NULL, 0U, + CMD_CASUAL, random_data, &resp_len); + + if (status < 0) { + *mbox_error = -status; + return INTEL_SIP_SMC_STATUS_ERROR; + } + + if (resp_len != FCS_RANDOM_WORD_SIZE) { + *mbox_error = GENERIC_RESPONSE_ERROR; + return INTEL_SIP_SMC_STATUS_ERROR; + } + + *ret_size = FCS_RANDOM_BYTE_SIZE; + + for (i = 0U; i < FCS_RANDOM_WORD_SIZE; i++) { + mmio_write_32(addr, random_data[i]); + addr += MBOX_WORD_BYTE; + } + + flush_dcache_range(addr - *ret_size, *ret_size); + + return INTEL_SIP_SMC_STATUS_OK; +} + +int intel_fcs_random_number_gen_ext(uint32_t session_id, uint32_t context_id, + uint32_t size, uint32_t *send_id) +{ + int status; + uint32_t payload_size; + uint32_t crypto_header; + + if (size > (FCS_RANDOM_EXT_MAX_WORD_SIZE * + MBOX_WORD_BYTE) || size == 0U) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (!is_size_4_bytes_aligned(size)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + crypto_header = (FCS_CS_FIELD_FLAG_INIT | FCS_CS_FIELD_FLAG_FINALIZE) << + FCS_CS_FIELD_FLAG_OFFSET; + + fcs_rng_payload payload = { + session_id, + context_id, + crypto_header, + size + }; + + payload_size = sizeof(payload) / MBOX_WORD_BYTE; + + status = mailbox_send_cmd_async(send_id, MBOX_FCS_RANDOM_GEN, + (uint32_t *) &payload, payload_size, + CMD_INDIRECT); + + if (status < 0) { + return INTEL_SIP_SMC_STATUS_ERROR; + } + + return INTEL_SIP_SMC_STATUS_OK; +} + +uint32_t intel_fcs_send_cert(uint64_t addr, uint64_t size, + uint32_t *send_id) +{ + int status; + + if (!is_address_in_ddr_range(addr, size)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (!is_size_4_bytes_aligned(size)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + status = mailbox_send_cmd_async(send_id, MBOX_CMD_VAB_SRC_CERT, + (uint32_t *)addr, size / MBOX_WORD_BYTE, + CMD_DIRECT); + + flush_dcache_range(addr, size); + + if (status < 0) { + return INTEL_SIP_SMC_STATUS_ERROR; + } + + return INTEL_SIP_SMC_STATUS_OK; +} + +uint32_t intel_fcs_get_provision_data(uint32_t *send_id) +{ + int status; + + status = mailbox_send_cmd_async(send_id, MBOX_FCS_GET_PROVISION, + NULL, 0U, CMD_DIRECT); + + if (status < 0) { + return INTEL_SIP_SMC_STATUS_ERROR; + } + + return INTEL_SIP_SMC_STATUS_OK; +} + +uint32_t intel_fcs_cntr_set_preauth(uint8_t counter_type, int32_t counter_value, + uint32_t test_bit, uint32_t *mbox_error) +{ + int status; + uint32_t first_word; + uint32_t payload_size; + + if ((test_bit != MBOX_TEST_BIT) && + (test_bit != 0)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if ((counter_type < FCS_BIG_CNTR_SEL) || + (counter_type > FCS_SVN_CNTR_3_SEL)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if ((counter_type == FCS_BIG_CNTR_SEL) && + (counter_value > FCS_BIG_CNTR_VAL_MAX)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if ((counter_type >= FCS_SVN_CNTR_0_SEL) && + (counter_type <= FCS_SVN_CNTR_3_SEL) && + (counter_value > FCS_SVN_CNTR_VAL_MAX)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + first_word = test_bit | counter_type; + fcs_cntr_set_preauth_payload payload = { + first_word, + counter_value + }; + + payload_size = sizeof(payload) / MBOX_WORD_BYTE; + status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_CNTR_SET_PREAUTH, + (uint32_t *) &payload, payload_size, + CMD_CASUAL, NULL, NULL); + + if (status < 0) { + *mbox_error = -status; + return INTEL_SIP_SMC_STATUS_ERROR; + } + + return INTEL_SIP_SMC_STATUS_OK; +} + +uint32_t intel_fcs_encryption(uint32_t src_addr, uint32_t src_size, + uint32_t dst_addr, uint32_t dst_size, uint32_t *send_id) +{ + int status; + uint32_t load_size; + + fcs_encrypt_payload payload = { + FCS_ENCRYPTION_DATA_0, + src_addr, + src_size, + dst_addr, + dst_size }; + load_size = sizeof(payload) / MBOX_WORD_BYTE; + + if (!is_address_in_ddr_range(src_addr, src_size) || + !is_address_in_ddr_range(dst_addr, dst_size)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (!is_size_4_bytes_aligned(src_size)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + status = mailbox_send_cmd_async(send_id, MBOX_FCS_ENCRYPT_REQ, + (uint32_t *) &payload, load_size, + CMD_INDIRECT); + inv_dcache_range(dst_addr, dst_size); + + if (status < 0) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + return INTEL_SIP_SMC_STATUS_OK; +} + +uint32_t intel_fcs_decryption(uint32_t src_addr, uint32_t src_size, + uint32_t dst_addr, uint32_t dst_size, uint32_t *send_id) +{ + int status; + uint32_t load_size; + uintptr_t id_offset; + + inv_dcache_range(src_addr, src_size); /* flush cache before mmio read to avoid reading old values */ + id_offset = src_addr + FCS_OWNER_ID_OFFSET; + fcs_decrypt_payload payload = { + FCS_DECRYPTION_DATA_0, + {mmio_read_32(id_offset), + mmio_read_32(id_offset + MBOX_WORD_BYTE)}, + src_addr, + src_size, + dst_addr, + dst_size }; + load_size = sizeof(payload) / MBOX_WORD_BYTE; + + if (!is_address_in_ddr_range(src_addr, src_size) || + !is_address_in_ddr_range(dst_addr, dst_size)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (!is_size_4_bytes_aligned(src_size)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + status = mailbox_send_cmd_async(send_id, MBOX_FCS_DECRYPT_REQ, + (uint32_t *) &payload, load_size, + CMD_INDIRECT); + inv_dcache_range(dst_addr, dst_size); + + if (status < 0) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + return INTEL_SIP_SMC_STATUS_OK; +} + +int intel_fcs_encryption_ext(uint32_t session_id, uint32_t context_id, + uint32_t src_addr, uint32_t src_size, + uint32_t dst_addr, uint32_t *dst_size, uint32_t *mbox_error) +{ + int status; + uint32_t payload_size; + uint32_t resp_len = FCS_CRYPTION_RESP_WORD_SIZE; + uint32_t resp_data[FCS_CRYPTION_RESP_WORD_SIZE] = {0U}; + + if ((dst_size == NULL) || (mbox_error == NULL)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (!is_address_in_ddr_range(src_addr, src_size) || + !is_address_in_ddr_range(dst_addr, *dst_size)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (!is_size_4_bytes_aligned(src_size)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + fcs_encrypt_ext_payload payload = { + session_id, + context_id, + FCS_CRYPTION_CRYPTO_HEADER, + src_addr, + src_size, + dst_addr, + *dst_size + }; + + payload_size = sizeof(payload) / MBOX_WORD_BYTE; + + status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_ENCRYPT_REQ, + (uint32_t *) &payload, payload_size, + CMD_CASUAL, resp_data, &resp_len); + + if (status < 0) { + *mbox_error = -status; + return INTEL_SIP_SMC_STATUS_ERROR; + } + + if (resp_len != FCS_CRYPTION_RESP_WORD_SIZE) { + *mbox_error = MBOX_RET_ERROR; + return INTEL_SIP_SMC_STATUS_ERROR; + } + + *dst_size = resp_data[FCS_CRYPTION_RESP_SIZE_OFFSET]; + inv_dcache_range(dst_addr, *dst_size); + + return INTEL_SIP_SMC_STATUS_OK; +} + +int intel_fcs_decryption_ext(uint32_t session_id, uint32_t context_id, + uint32_t src_addr, uint32_t src_size, + uint32_t dst_addr, uint32_t *dst_size, uint32_t *mbox_error) +{ + int status; + uintptr_t id_offset; + uint32_t payload_size; + uint32_t resp_len = FCS_CRYPTION_RESP_WORD_SIZE; + uint32_t resp_data[FCS_CRYPTION_RESP_WORD_SIZE] = {0U}; + + if ((dst_size == NULL) || (mbox_error == NULL)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (!is_address_in_ddr_range(src_addr, src_size) || + !is_address_in_ddr_range(dst_addr, *dst_size)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (!is_size_4_bytes_aligned(src_size)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + inv_dcache_range(src_addr, src_size); /* flush cache before mmio read to avoid reading old values */ + id_offset = src_addr + FCS_OWNER_ID_OFFSET; + fcs_decrypt_ext_payload payload = { + session_id, + context_id, + FCS_CRYPTION_CRYPTO_HEADER, + {mmio_read_32(id_offset), + mmio_read_32(id_offset + MBOX_WORD_BYTE)}, + src_addr, + src_size, + dst_addr, + *dst_size + }; + + payload_size = sizeof(payload) / MBOX_WORD_BYTE; + + status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_DECRYPT_REQ, + (uint32_t *) &payload, payload_size, + CMD_CASUAL, resp_data, &resp_len); + + if (status == MBOX_RET_SDOS_DECRYPTION_ERROR_102 || + status == MBOX_RET_SDOS_DECRYPTION_ERROR_103) { + *mbox_error = -status; + } else if (status < 0) { + *mbox_error = -status; + return INTEL_SIP_SMC_STATUS_ERROR; + } + + if (resp_len != FCS_CRYPTION_RESP_WORD_SIZE) { + *mbox_error = MBOX_RET_ERROR; + return INTEL_SIP_SMC_STATUS_ERROR; + } + + *dst_size = resp_data[FCS_CRYPTION_RESP_SIZE_OFFSET]; + inv_dcache_range(dst_addr, *dst_size); + + return INTEL_SIP_SMC_STATUS_OK; +} + +int intel_fcs_sigma_teardown(uint32_t session_id, uint32_t *mbox_error) +{ + int status; + + if ((session_id != PSGSIGMA_SESSION_ID_ONE) && + (session_id != PSGSIGMA_UNKNOWN_SESSION)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + psgsigma_teardown_msg message = { + RESERVED_AS_ZERO, + PSGSIGMA_TEARDOWN_MAGIC, + session_id + }; + + status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_PSG_SIGMA_TEARDOWN, + (uint32_t *) &message, sizeof(message) / MBOX_WORD_BYTE, + CMD_CASUAL, NULL, NULL); + + if (status < 0) { + *mbox_error = -status; + return INTEL_SIP_SMC_STATUS_ERROR; + } + + return INTEL_SIP_SMC_STATUS_OK; +} + +int intel_fcs_chip_id(uint32_t *id_low, uint32_t *id_high, uint32_t *mbox_error) +{ + int status; + uint32_t load_size; + uint32_t chip_id[2]; + + load_size = sizeof(chip_id) / MBOX_WORD_BYTE; + + status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_CMD_GET_CHIPID, NULL, + 0U, CMD_CASUAL, (uint32_t *) chip_id, &load_size); + + if (status < 0) { + *mbox_error = -status; + return INTEL_SIP_SMC_STATUS_ERROR; + } + + *id_low = chip_id[0]; + *id_high = chip_id[1]; + + return INTEL_SIP_SMC_STATUS_OK; +} + +int intel_fcs_attestation_subkey(uint64_t src_addr, uint32_t src_size, + uint64_t dst_addr, uint32_t *dst_size, uint32_t *mbox_error) +{ + int status; + uint32_t send_size = src_size / MBOX_WORD_BYTE; + uint32_t ret_size = *dst_size / MBOX_WORD_BYTE; + + + if (!is_address_in_ddr_range(src_addr, src_size) || + !is_address_in_ddr_range(dst_addr, *dst_size)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_ATTESTATION_SUBKEY, + (uint32_t *) src_addr, send_size, CMD_CASUAL, + (uint32_t *) dst_addr, &ret_size); + + if (status < 0) { + *mbox_error = -status; + return INTEL_SIP_SMC_STATUS_ERROR; + } + + *dst_size = ret_size * MBOX_WORD_BYTE; + flush_dcache_range(dst_addr, *dst_size); + + return INTEL_SIP_SMC_STATUS_OK; +} + +int intel_fcs_get_measurement(uint64_t src_addr, uint32_t src_size, + uint64_t dst_addr, uint32_t *dst_size, uint32_t *mbox_error) +{ + int status; + uint32_t send_size = src_size / MBOX_WORD_BYTE; + uint32_t ret_size = *dst_size / MBOX_WORD_BYTE; + + if (!is_address_in_ddr_range(src_addr, src_size) || + !is_address_in_ddr_range(dst_addr, *dst_size)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_GET_MEASUREMENT, + (uint32_t *) src_addr, send_size, CMD_CASUAL, + (uint32_t *) dst_addr, &ret_size); + + if (status < 0) { + *mbox_error = -status; + return INTEL_SIP_SMC_STATUS_ERROR; + } + + *dst_size = ret_size * MBOX_WORD_BYTE; + flush_dcache_range(dst_addr, *dst_size); + + return INTEL_SIP_SMC_STATUS_OK; +} + +uint32_t intel_fcs_get_rom_patch_sha384(uint64_t addr, uint64_t *ret_size, + uint32_t *mbox_error) +{ + int status; + unsigned int resp_len = FCS_SHA384_WORD_SIZE; + + if (!is_address_in_ddr_range(addr, FCS_SHA384_BYTE_SIZE)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_GET_ROM_PATCH_SHA384, NULL, 0U, + CMD_CASUAL, (uint32_t *) addr, &resp_len); + + if (status < 0) { + *mbox_error = -status; + return INTEL_SIP_SMC_STATUS_ERROR; + } + + if (resp_len != FCS_SHA384_WORD_SIZE) { + *mbox_error = GENERIC_RESPONSE_ERROR; + return INTEL_SIP_SMC_STATUS_ERROR; + } + + *ret_size = FCS_SHA384_BYTE_SIZE; + + flush_dcache_range(addr, *ret_size); + + return INTEL_SIP_SMC_STATUS_OK; +} + +int intel_fcs_get_attestation_cert(uint32_t cert_request, uint64_t dst_addr, + uint32_t *dst_size, uint32_t *mbox_error) +{ + int status; + uint32_t ret_size = *dst_size / MBOX_WORD_BYTE; + + if (mbox_error == NULL) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (cert_request < FCS_ATTEST_FIRMWARE_CERT || + cert_request > FCS_ATTEST_CERT_MAX_REQ_PARAM) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (!is_address_in_ddr_range(dst_addr, *dst_size)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_GET_ATTESTATION_CERT, + (uint32_t *) &cert_request, 1U, CMD_CASUAL, + (uint32_t *) dst_addr, &ret_size); + + if (status < 0) { + *mbox_error = -status; + return INTEL_SIP_SMC_STATUS_ERROR; + } + + *dst_size = ret_size * MBOX_WORD_BYTE; + flush_dcache_range(dst_addr, *dst_size); + + return INTEL_SIP_SMC_STATUS_OK; +} + +int intel_fcs_create_cert_on_reload(uint32_t cert_request, + uint32_t *mbox_error) +{ + int status; + + if (mbox_error == NULL) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (cert_request < FCS_ATTEST_FIRMWARE_CERT || + cert_request > FCS_ATTEST_CERT_MAX_REQ_PARAM) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_CREATE_CERT_ON_RELOAD, + (uint32_t *) &cert_request, 1U, CMD_CASUAL, + NULL, NULL); + + if (status < 0) { + *mbox_error = -status; + return INTEL_SIP_SMC_STATUS_ERROR; + } + + return INTEL_SIP_SMC_STATUS_OK; +} + +int intel_fcs_open_crypto_service_session(uint32_t *session_id, + uint32_t *mbox_error) +{ + int status; + uint32_t resp_len = 1U; + + if ((session_id == NULL) || (mbox_error == NULL)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_OPEN_CS_SESSION, + NULL, 0U, CMD_CASUAL, session_id, &resp_len); + + if (status < 0) { + *mbox_error = -status; + return INTEL_SIP_SMC_STATUS_ERROR; + } + + return INTEL_SIP_SMC_STATUS_OK; +} + +int intel_fcs_close_crypto_service_session(uint32_t session_id, + uint32_t *mbox_error) +{ + int status; + + if (mbox_error == NULL) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_CLOSE_CS_SESSION, + &session_id, 1U, CMD_CASUAL, NULL, NULL); + + if (status < 0) { + *mbox_error = -status; + return INTEL_SIP_SMC_STATUS_ERROR; + } + + return INTEL_SIP_SMC_STATUS_OK; +} + +int intel_fcs_import_crypto_service_key(uint64_t src_addr, uint32_t src_size, + uint32_t *send_id) +{ + int status; + + if (src_size > (FCS_CS_KEY_OBJ_MAX_WORD_SIZE * + MBOX_WORD_BYTE)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (!is_address_in_ddr_range(src_addr, src_size)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + status = mailbox_send_cmd_async(send_id, MBOX_FCS_IMPORT_CS_KEY, + (uint32_t *)src_addr, src_size / MBOX_WORD_BYTE, + CMD_INDIRECT); + + if (status < 0) { + return INTEL_SIP_SMC_STATUS_ERROR; + } + + return INTEL_SIP_SMC_STATUS_OK; +} + +int intel_fcs_export_crypto_service_key(uint32_t session_id, uint32_t key_id, + uint64_t dst_addr, uint32_t *dst_size, + uint32_t *mbox_error) +{ + int status; + uint32_t i; + uint32_t payload_size; + uint32_t resp_len = FCS_CS_KEY_OBJ_MAX_WORD_SIZE; + uint32_t resp_data[FCS_CS_KEY_OBJ_MAX_WORD_SIZE] = {0U}; + uint32_t op_status = 0U; + + if ((dst_size == NULL) || (mbox_error == NULL)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (!is_address_in_ddr_range(dst_addr, *dst_size)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + fcs_cs_key_payload payload = { + session_id, + RESERVED_AS_ZERO, + RESERVED_AS_ZERO, + key_id + }; + + payload_size = sizeof(payload) / MBOX_WORD_BYTE; + + status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_EXPORT_CS_KEY, + (uint32_t *) &payload, payload_size, + CMD_CASUAL, resp_data, &resp_len); + + if (resp_len > 0) { + op_status = resp_data[0] & FCS_CS_KEY_RESP_STATUS_MASK; + } + + if (status < 0) { + *mbox_error = (-status) | (op_status << FCS_CS_KEY_RESP_STATUS_OFFSET); + return INTEL_SIP_SMC_STATUS_ERROR; + } + + if (resp_len > 1) { + + /* Export key object is start at second response data */ + *dst_size = (resp_len - 1) * MBOX_WORD_BYTE; + + for (i = 1U; i < resp_len; i++) { + mmio_write_32(dst_addr, resp_data[i]); + dst_addr += MBOX_WORD_BYTE; + } + + flush_dcache_range(dst_addr - *dst_size, *dst_size); + + } else { + + /* Unexpected response, missing key object in response */ + *mbox_error = MBOX_RET_ERROR; + return INTEL_SIP_SMC_STATUS_ERROR; + } + + return INTEL_SIP_SMC_STATUS_OK; +} + +int intel_fcs_remove_crypto_service_key(uint32_t session_id, uint32_t key_id, + uint32_t *mbox_error) +{ + int status; + uint32_t payload_size; + uint32_t resp_len = 1U; + uint32_t resp_data = 0U; + uint32_t op_status = 0U; + + if (mbox_error == NULL) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + fcs_cs_key_payload payload = { + session_id, + RESERVED_AS_ZERO, + RESERVED_AS_ZERO, + key_id + }; + + payload_size = sizeof(payload) / MBOX_WORD_BYTE; + + status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_REMOVE_CS_KEY, + (uint32_t *) &payload, payload_size, + CMD_CASUAL, &resp_data, &resp_len); + + if (resp_len > 0) { + op_status = resp_data & FCS_CS_KEY_RESP_STATUS_MASK; + } + + if (status < 0) { + *mbox_error = (-status) | (op_status << FCS_CS_KEY_RESP_STATUS_OFFSET); + return INTEL_SIP_SMC_STATUS_ERROR; + } + + return INTEL_SIP_SMC_STATUS_OK; +} + +int intel_fcs_get_crypto_service_key_info(uint32_t session_id, uint32_t key_id, + uint64_t dst_addr, uint32_t *dst_size, + uint32_t *mbox_error) +{ + int status; + uint32_t payload_size; + uint32_t resp_len = FCS_CS_KEY_INFO_MAX_WORD_SIZE; + uint32_t op_status = 0U; + + if ((dst_size == NULL) || (mbox_error == NULL)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (!is_address_in_ddr_range(dst_addr, *dst_size)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + fcs_cs_key_payload payload = { + session_id, + RESERVED_AS_ZERO, + RESERVED_AS_ZERO, + key_id + }; + + payload_size = sizeof(payload) / MBOX_WORD_BYTE; + + status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_GET_CS_KEY_INFO, + (uint32_t *) &payload, payload_size, + CMD_CASUAL, (uint32_t *) dst_addr, &resp_len); + + if (resp_len > 0) { + inv_dcache_range(dst_addr, (resp_len * MBOX_WORD_BYTE)); /* flush cache before mmio read to avoid reading old values */ + op_status = mmio_read_32(dst_addr) & + FCS_CS_KEY_RESP_STATUS_MASK; + } + + if (status < 0) { + *mbox_error = (-status) | (op_status << FCS_CS_KEY_RESP_STATUS_OFFSET); + return INTEL_SIP_SMC_STATUS_ERROR; + } + + *dst_size = resp_len * MBOX_WORD_BYTE; + flush_dcache_range(dst_addr, *dst_size); + + return INTEL_SIP_SMC_STATUS_OK; +} + +int intel_fcs_get_digest_init(uint32_t session_id, uint32_t context_id, + uint32_t key_id, uint32_t param_size, + uint64_t param_data, uint32_t *mbox_error) +{ + return intel_fcs_crypto_service_init(session_id, context_id, + key_id, param_size, param_data, + (void *) &fcs_sha_get_digest_param, + mbox_error); +} + +int intel_fcs_get_digest_update_finalize(uint32_t session_id, + uint32_t context_id, uint32_t src_addr, + uint32_t src_size, uint64_t dst_addr, + uint32_t *dst_size, uint8_t is_finalised, + uint32_t *mbox_error) +{ + int status; + uint32_t i; + uint32_t flag; + uint32_t crypto_header; + uint32_t resp_len; + uint32_t payload[FCS_GET_DIGEST_CMD_MAX_WORD_SIZE] = {0U}; + + if (dst_size == NULL || mbox_error == NULL) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (fcs_sha_get_digest_param.session_id != session_id || + fcs_sha_get_digest_param.context_id != context_id) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + /* Source data must be 8 bytes aligned */ + if (!is_8_bytes_aligned(src_size)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (!is_address_in_ddr_range(src_addr, src_size) || + !is_address_in_ddr_range(dst_addr, *dst_size)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + resp_len = *dst_size / MBOX_WORD_BYTE; + + /* Prepare crypto header */ + flag = 0; + + if (fcs_sha_get_digest_param.is_updated) { + fcs_sha_get_digest_param.crypto_param_size = 0; + } else { + flag |= FCS_CS_FIELD_FLAG_INIT; + } + + if (is_finalised != 0U) { + flag |= FCS_CS_FIELD_FLAG_FINALIZE; + } else { + flag |= FCS_CS_FIELD_FLAG_UPDATE; + fcs_sha_get_digest_param.is_updated = 1; + } + + crypto_header = ((flag << FCS_CS_FIELD_FLAG_OFFSET) | + (fcs_sha_get_digest_param.crypto_param_size & + FCS_CS_FIELD_SIZE_MASK)); + + /* Prepare command payload */ + i = 0; + payload[i] = fcs_sha_get_digest_param.session_id; + i++; + payload[i] = fcs_sha_get_digest_param.context_id; + i++; + payload[i] = crypto_header; + i++; + + if ((crypto_header >> FCS_CS_FIELD_FLAG_OFFSET) & + FCS_CS_FIELD_FLAG_INIT) { + payload[i] = fcs_sha_get_digest_param.key_id; + i++; + /* Crypto parameters */ + payload[i] = fcs_sha_get_digest_param.crypto_param + & INTEL_SIP_SMC_FCS_SHA_MODE_MASK; + payload[i] |= ((fcs_sha_get_digest_param.crypto_param + >> INTEL_SIP_SMC_FCS_DIGEST_SIZE_OFFSET) + & INTEL_SIP_SMC_FCS_DIGEST_SIZE_MASK) + << FCS_SHA_HMAC_CRYPTO_PARAM_SIZE_OFFSET; + i++; + } + /* Data source address and size */ + payload[i] = src_addr; + i++; + payload[i] = src_size; + i++; + + status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_GET_DIGEST_REQ, + payload, i, CMD_CASUAL, + (uint32_t *) dst_addr, &resp_len); + + if (is_finalised != 0U) { + memset((void *)&fcs_sha_get_digest_param, 0, + sizeof(fcs_crypto_service_data)); + } + + if (status < 0) { + *mbox_error = -status; + return INTEL_SIP_SMC_STATUS_ERROR; + } + + *dst_size = resp_len * MBOX_WORD_BYTE; + flush_dcache_range(dst_addr, *dst_size); + + return INTEL_SIP_SMC_STATUS_OK; +} + +int intel_fcs_get_digest_smmu_update_finalize(uint32_t session_id, + uint32_t context_id, uint32_t src_addr, + uint32_t src_size, uint64_t dst_addr, + uint32_t *dst_size, uint8_t is_finalised, + uint32_t *mbox_error, uint32_t *send_id) +{ + int status; + uint32_t i; + uint32_t flag; + uint32_t crypto_header; + uint32_t resp_len; + uint32_t payload[FCS_GET_DIGEST_CMD_MAX_WORD_SIZE] = {0U}; + + /* Source data must be 8 bytes aligned */ + if (dst_size == NULL || mbox_error == NULL || + !is_8_bytes_aligned(src_size)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (fcs_sha_get_digest_param.session_id != session_id || + fcs_sha_get_digest_param.context_id != context_id) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (!is_address_in_ddr_range(src_addr, src_size) || + !is_address_in_ddr_range(dst_addr, *dst_size)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + resp_len = *dst_size / MBOX_WORD_BYTE; + + /* Prepare crypto header */ + flag = 0; + + if (fcs_sha_get_digest_param.is_updated) { + fcs_sha_get_digest_param.crypto_param_size = 0; + } else { + flag |= FCS_CS_FIELD_FLAG_INIT; + } + + if (is_finalised != 0U) { + flag |= FCS_CS_FIELD_FLAG_FINALIZE; + } else { + flag |= FCS_CS_FIELD_FLAG_UPDATE; + fcs_sha_get_digest_param.is_updated = 1; + } + + crypto_header = ((flag << FCS_CS_FIELD_FLAG_OFFSET) | + (fcs_sha_get_digest_param.crypto_param_size & + FCS_CS_FIELD_SIZE_MASK)); + + /* Prepare command payload */ + i = 0; + payload[i] = fcs_sha_get_digest_param.session_id; + i++; + payload[i] = fcs_sha_get_digest_param.context_id; + i++; + payload[i] = crypto_header; + i++; + + if ((crypto_header >> FCS_CS_FIELD_FLAG_OFFSET) & + FCS_CS_FIELD_FLAG_INIT) { + payload[i] = fcs_sha_get_digest_param.key_id; + i++; + /* Crypto parameters */ + payload[i] = fcs_sha_get_digest_param.crypto_param + & INTEL_SIP_SMC_FCS_SHA_MODE_MASK; + payload[i] |= ((fcs_sha_get_digest_param.crypto_param + >> INTEL_SIP_SMC_FCS_DIGEST_SIZE_OFFSET) + & INTEL_SIP_SMC_FCS_DIGEST_SIZE_MASK) + << FCS_SHA_HMAC_CRYPTO_PARAM_SIZE_OFFSET; + i++; + } + /* Data source address and size */ + payload[i] = src_addr; + i++; + payload[i] = src_size; + i++; + + status = mailbox_send_cmd_async(send_id, MBOX_FCS_GET_DIGEST_REQ, + payload, i, CMD_INDIRECT); + + if (is_finalised != 0U) { + memset((void *)&fcs_sha_get_digest_param, 0, + sizeof(fcs_crypto_service_data)); + } + + if (status < 0) { + *mbox_error = -status; + return INTEL_SIP_SMC_STATUS_ERROR; + } + + *dst_size = resp_len * MBOX_WORD_BYTE; + flush_dcache_range(dst_addr, *dst_size); + + return INTEL_SIP_SMC_STATUS_OK; +} + +int intel_fcs_mac_verify_init(uint32_t session_id, uint32_t context_id, + uint32_t key_id, uint32_t param_size, + uint64_t param_data, uint32_t *mbox_error) +{ + return intel_fcs_crypto_service_init(session_id, context_id, + key_id, param_size, param_data, + (void *) &fcs_sha_mac_verify_param, + mbox_error); +} + +int intel_fcs_mac_verify_update_finalize(uint32_t session_id, + uint32_t context_id, uint32_t src_addr, + uint32_t src_size, uint64_t dst_addr, + uint32_t *dst_size, uint32_t data_size, + uint8_t is_finalised, uint32_t *mbox_error) +{ + int status; + uint32_t i; + uint32_t flag; + uint32_t crypto_header; + uint32_t resp_len; + uint32_t payload[FCS_MAC_VERIFY_CMD_MAX_WORD_SIZE] = {0U}; + uintptr_t mac_offset; + uint32_t dst_size_check = 0; + + if (dst_size == NULL || mbox_error == NULL) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (fcs_sha_mac_verify_param.session_id != session_id || + fcs_sha_mac_verify_param.context_id != context_id) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (data_size > src_size) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (!is_size_4_bytes_aligned(src_size) || + !is_8_bytes_aligned(data_size)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (!is_address_in_ddr_range(src_addr, src_size) || + !is_address_in_ddr_range(dst_addr, *dst_size)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + dst_size_check = *dst_size; + if ((dst_size_check > FCS_MAX_DATA_SIZE || + dst_size_check < FCS_MIN_DATA_SIZE) || + (src_size > FCS_MAX_DATA_SIZE || + src_size < FCS_MIN_DATA_SIZE)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + resp_len = *dst_size / MBOX_WORD_BYTE; + + /* Prepare crypto header */ + flag = 0; + + if (fcs_sha_mac_verify_param.is_updated) { + fcs_sha_mac_verify_param.crypto_param_size = 0; + } else { + flag |= FCS_CS_FIELD_FLAG_INIT; + } + + if (is_finalised) { + flag |= FCS_CS_FIELD_FLAG_FINALIZE; + } else { + flag |= FCS_CS_FIELD_FLAG_UPDATE; + fcs_sha_mac_verify_param.is_updated = 1; + } + + crypto_header = ((flag << FCS_CS_FIELD_FLAG_OFFSET) | + (fcs_sha_mac_verify_param.crypto_param_size & + FCS_CS_FIELD_SIZE_MASK)); + + /* Prepare command payload */ + i = 0; + payload[i] = fcs_sha_mac_verify_param.session_id; + i++; + payload[i] = fcs_sha_mac_verify_param.context_id; + i++; + payload[i] = crypto_header; + i++; + + if ((crypto_header >> FCS_CS_FIELD_FLAG_OFFSET) & + FCS_CS_FIELD_FLAG_INIT) { + payload[i] = fcs_sha_mac_verify_param.key_id; + i++; + /* Crypto parameters */ + payload[i] = ((fcs_sha_mac_verify_param.crypto_param + >> INTEL_SIP_SMC_FCS_DIGEST_SIZE_OFFSET) + & INTEL_SIP_SMC_FCS_DIGEST_SIZE_MASK) + << FCS_SHA_HMAC_CRYPTO_PARAM_SIZE_OFFSET; + i++; + } + /* Data source address and size */ + payload[i] = src_addr; + i++; + payload[i] = data_size; + i++; + + if ((crypto_header >> FCS_CS_FIELD_FLAG_OFFSET) & + FCS_CS_FIELD_FLAG_FINALIZE) { + /* Copy mac data to command */ + mac_offset = src_addr + data_size; + + if ((i + ((src_size - data_size) / MBOX_WORD_BYTE)) > + FCS_MAC_VERIFY_CMD_MAX_WORD_SIZE) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + memcpy((uint8_t *) &payload[i], (uint8_t *) mac_offset, + src_size - data_size); + + i += (src_size - data_size) / MBOX_WORD_BYTE; + } + + status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_MAC_VERIFY_REQ, + payload, i, CMD_CASUAL, + (uint32_t *) dst_addr, &resp_len); + + if (is_finalised) { + memset((void *)&fcs_sha_mac_verify_param, 0, + sizeof(fcs_crypto_service_data)); + } + + if (status < 0) { + *mbox_error = -status; + return INTEL_SIP_SMC_STATUS_ERROR; + } + + *dst_size = resp_len * MBOX_WORD_BYTE; + flush_dcache_range(dst_addr, *dst_size); + + return INTEL_SIP_SMC_STATUS_OK; +} + +int intel_fcs_mac_verify_smmu_update_finalize(uint32_t session_id, + uint32_t context_id, uint32_t src_addr, + uint32_t src_size, uint64_t dst_addr, + uint32_t *dst_size, uint32_t data_size, + uint8_t is_finalised, uint32_t *mbox_error, + uint32_t *send_id) +{ + int status; + uint32_t i; + uint32_t flag; + uint32_t crypto_header; + uint32_t resp_len; + uint32_t payload[FCS_MAC_VERIFY_CMD_MAX_WORD_SIZE] = {0U}; + uintptr_t mac_offset; + uint32_t dst_size_check = 0; + /* + * Source data must be 4 bytes aligned + * User data must be 8 bytes aligned + */ + if (dst_size == NULL || mbox_error == NULL || + !is_size_4_bytes_aligned(src_size) || + !is_8_bytes_aligned(data_size)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (data_size > src_size) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (fcs_sha_mac_verify_param.session_id != session_id || + fcs_sha_mac_verify_param.context_id != context_id) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (!is_address_in_ddr_range(src_addr, src_size) || + !is_address_in_ddr_range(dst_addr, *dst_size)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + dst_size_check = *dst_size; + if ((dst_size_check > FCS_MAX_DATA_SIZE || + dst_size_check < FCS_MIN_DATA_SIZE) || + (src_size > FCS_MAX_DATA_SIZE || + src_size < FCS_MIN_DATA_SIZE)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + resp_len = *dst_size / MBOX_WORD_BYTE; + + /* Prepare crypto header */ + flag = 0; + + if (fcs_sha_mac_verify_param.is_updated) { + fcs_sha_mac_verify_param.crypto_param_size = 0; + } else { + flag |= FCS_CS_FIELD_FLAG_INIT; + } + + if (is_finalised) { + flag |= FCS_CS_FIELD_FLAG_FINALIZE; + } else { + flag |= FCS_CS_FIELD_FLAG_UPDATE; + fcs_sha_mac_verify_param.is_updated = 1; + } + + crypto_header = ((flag << FCS_CS_FIELD_FLAG_OFFSET) | + (fcs_sha_mac_verify_param.crypto_param_size & + FCS_CS_FIELD_SIZE_MASK)); + + /* Prepare command payload */ + i = 0; + payload[i] = fcs_sha_mac_verify_param.session_id; + i++; + payload[i] = fcs_sha_mac_verify_param.context_id; + i++; + payload[i] = crypto_header; + i++; + + if ((crypto_header >> FCS_CS_FIELD_FLAG_OFFSET) & + FCS_CS_FIELD_FLAG_INIT) { + payload[i] = fcs_sha_mac_verify_param.key_id; + i++; + /* Crypto parameters */ + payload[i] = ((fcs_sha_mac_verify_param.crypto_param + >> INTEL_SIP_SMC_FCS_DIGEST_SIZE_OFFSET) + & INTEL_SIP_SMC_FCS_DIGEST_SIZE_MASK) + << FCS_SHA_HMAC_CRYPTO_PARAM_SIZE_OFFSET; + i++; + } + /* Data source address and size */ + payload[i] = src_addr; + i++; + payload[i] = data_size; + i++; + + if ((crypto_header >> FCS_CS_FIELD_FLAG_OFFSET) & + FCS_CS_FIELD_FLAG_FINALIZE) { + /* Copy mac data to command + * Using dst_addr (physical address) to store mac_offset + * mac_offset = MAC data + */ + mac_offset = dst_addr; + + if ((i + ((src_size - data_size) / MBOX_WORD_BYTE)) > + FCS_MAC_VERIFY_CMD_MAX_WORD_SIZE) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + memcpy((uint8_t *) &payload[i], (uint8_t *) mac_offset, + src_size - data_size); + + memset((void *) dst_addr, 0, *dst_size); + + i += (src_size - data_size) / MBOX_WORD_BYTE; + } + + status = mailbox_send_cmd_async(send_id, MBOX_FCS_MAC_VERIFY_REQ, + payload, i, CMD_INDIRECT); + + if (is_finalised) { + memset((void *)&fcs_sha_mac_verify_param, 0, + sizeof(fcs_crypto_service_data)); + } + + if (status < 0) { + *mbox_error = -status; + return INTEL_SIP_SMC_STATUS_ERROR; + } + + *dst_size = resp_len * MBOX_WORD_BYTE; + flush_dcache_range(dst_addr, *dst_size); + + return INTEL_SIP_SMC_STATUS_OK; +} + +int intel_fcs_ecdsa_hash_sign_init(uint32_t session_id, uint32_t context_id, + uint32_t key_id, uint32_t param_size, + uint64_t param_data, uint32_t *mbox_error) +{ + return intel_fcs_crypto_service_init(session_id, context_id, + key_id, param_size, param_data, + (void *) &fcs_ecdsa_hash_sign_param, + mbox_error); +} + +int intel_fcs_ecdsa_hash_sign_finalize(uint32_t session_id, uint32_t context_id, + uint32_t src_addr, uint32_t src_size, + uint64_t dst_addr, uint32_t *dst_size, + uint32_t *mbox_error) +{ + int status; + uint32_t i; + uint32_t payload[FCS_ECDSA_HASH_SIGN_CMD_MAX_WORD_SIZE] = {0U}; + uint32_t resp_len; + uintptr_t hash_data_addr; + uint32_t dst_size_check = 0; + + if ((dst_size == NULL) || (mbox_error == NULL)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (fcs_ecdsa_hash_sign_param.session_id != session_id || + fcs_ecdsa_hash_sign_param.context_id != context_id) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (!is_address_in_ddr_range(src_addr, src_size) || + !is_address_in_ddr_range(dst_addr, *dst_size)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + dst_size_check = *dst_size; + if ((dst_size_check > FCS_MAX_DATA_SIZE || + dst_size_check < FCS_MIN_DATA_SIZE) || + (src_size > FCS_MAX_DATA_SIZE || + src_size < FCS_MIN_DATA_SIZE)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + resp_len = *dst_size / MBOX_WORD_BYTE; + + /* Prepare command payload */ + /* Crypto header */ + i = 0; + payload[i] = fcs_ecdsa_hash_sign_param.session_id; + i++; + payload[i] = fcs_ecdsa_hash_sign_param.context_id; + + i++; + payload[i] = fcs_ecdsa_hash_sign_param.crypto_param_size + & FCS_CS_FIELD_SIZE_MASK; + payload[i] |= (FCS_CS_FIELD_FLAG_INIT | FCS_CS_FIELD_FLAG_UPDATE + | FCS_CS_FIELD_FLAG_FINALIZE) + << FCS_CS_FIELD_FLAG_OFFSET; + i++; + payload[i] = fcs_ecdsa_hash_sign_param.key_id; + + /* Crypto parameters */ + i++; + payload[i] = fcs_ecdsa_hash_sign_param.crypto_param + & INTEL_SIP_SMC_FCS_ECC_ALGO_MASK; + + /* Hash Data */ + i++; + hash_data_addr = src_addr; + + if ((i + ((src_size) / MBOX_WORD_BYTE)) > + FCS_ECDSA_HASH_SIGN_CMD_MAX_WORD_SIZE) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + memcpy((uint8_t *) &payload[i], (uint8_t *) hash_data_addr, + src_size); + + i += src_size / MBOX_WORD_BYTE; + + status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_ECDSA_HASH_SIGN_REQ, + payload, i, CMD_CASUAL, (uint32_t *) dst_addr, + &resp_len); + + memset((void *) &fcs_ecdsa_hash_sign_param, + 0, sizeof(fcs_crypto_service_data)); + + if (status < 0) { + *mbox_error = -status; + return INTEL_SIP_SMC_STATUS_ERROR; + } + + *dst_size = resp_len * MBOX_WORD_BYTE; + flush_dcache_range(dst_addr, *dst_size); + + return INTEL_SIP_SMC_STATUS_OK; +} + +int intel_fcs_ecdsa_hash_sig_verify_init(uint32_t session_id, uint32_t context_id, + uint32_t key_id, uint32_t param_size, + uint64_t param_data, uint32_t *mbox_error) +{ + return intel_fcs_crypto_service_init(session_id, context_id, + key_id, param_size, param_data, + (void *) &fcs_ecdsa_hash_sig_verify_param, + mbox_error); +} + +int intel_fcs_ecdsa_hash_sig_verify_finalize(uint32_t session_id, uint32_t context_id, + uint32_t src_addr, uint32_t src_size, + uint64_t dst_addr, uint32_t *dst_size, + uint32_t *mbox_error) +{ + int status; + uint32_t i = 0; + uint32_t payload[FCS_ECDSA_HASH_SIG_VERIFY_CMD_MAX_WORD_SIZE] = {0U}; + uint32_t resp_len; + uintptr_t hash_sig_pubkey_addr; + uint32_t dst_size_check = 0; + + if ((dst_size == NULL) || (mbox_error == NULL)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (fcs_ecdsa_hash_sig_verify_param.session_id != session_id || + fcs_ecdsa_hash_sig_verify_param.context_id != context_id) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (!is_address_in_ddr_range(src_addr, src_size) || + !is_address_in_ddr_range(dst_addr, *dst_size)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + dst_size_check = *dst_size; + if ((dst_size_check > FCS_MAX_DATA_SIZE || + dst_size_check < FCS_MIN_DATA_SIZE) || + (src_size > FCS_MAX_DATA_SIZE || + src_size < FCS_MIN_DATA_SIZE)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + resp_len = *dst_size / MBOX_WORD_BYTE; + + /* Prepare command payload */ + /* Crypto header */ + i = 0; + payload[i] = fcs_ecdsa_hash_sig_verify_param.session_id; + + i++; + payload[i] = fcs_ecdsa_hash_sig_verify_param.context_id; + + i++; + payload[i] = fcs_ecdsa_hash_sig_verify_param.crypto_param_size + & FCS_CS_FIELD_SIZE_MASK; + payload[i] |= (FCS_CS_FIELD_FLAG_INIT | FCS_CS_FIELD_FLAG_UPDATE + | FCS_CS_FIELD_FLAG_FINALIZE) + << FCS_CS_FIELD_FLAG_OFFSET; + + i++; + payload[i] = fcs_ecdsa_hash_sig_verify_param.key_id; + + /* Crypto parameters */ + i++; + payload[i] = fcs_ecdsa_hash_sig_verify_param.crypto_param + & INTEL_SIP_SMC_FCS_ECC_ALGO_MASK; + + /* Hash Data Word, Signature Data Word and Public Key Data word */ + i++; + hash_sig_pubkey_addr = src_addr; + + if ((i + ((src_size) / MBOX_WORD_BYTE)) > + FCS_ECDSA_HASH_SIG_VERIFY_CMD_MAX_WORD_SIZE) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + memcpy((uint8_t *) &payload[i], + (uint8_t *) hash_sig_pubkey_addr, src_size); + + i += (src_size / MBOX_WORD_BYTE); + + status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_ECDSA_HASH_SIG_VERIFY, + payload, i, CMD_CASUAL, (uint32_t *) dst_addr, + &resp_len); + + memset((void *)&fcs_ecdsa_hash_sig_verify_param, + 0, sizeof(fcs_crypto_service_data)); + + if (status < 0) { + *mbox_error = -status; + return INTEL_SIP_SMC_STATUS_ERROR; + } + + *dst_size = resp_len * MBOX_WORD_BYTE; + flush_dcache_range(dst_addr, *dst_size); + + return INTEL_SIP_SMC_STATUS_OK; +} + +int intel_fcs_ecdsa_sha2_data_sign_init(uint32_t session_id, + uint32_t context_id, uint32_t key_id, + uint32_t param_size, uint64_t param_data, + uint32_t *mbox_error) +{ + return intel_fcs_crypto_service_init(session_id, context_id, + key_id, param_size, param_data, + (void *) &fcs_sha2_data_sign_param, + mbox_error); +} + +int intel_fcs_ecdsa_sha2_data_sign_update_finalize(uint32_t session_id, + uint32_t context_id, uint32_t src_addr, + uint32_t src_size, uint64_t dst_addr, + uint32_t *dst_size, uint8_t is_finalised, + uint32_t *mbox_error) +{ + int status; + int i; + uint32_t flag; + uint32_t crypto_header; + uint32_t payload[FCS_ECDSA_SHA2_DATA_SIGN_CMD_MAX_WORD_SIZE] = {0U}; + uint32_t resp_len; + + if ((dst_size == NULL) || (mbox_error == NULL)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (fcs_sha2_data_sign_param.session_id != session_id || + fcs_sha2_data_sign_param.context_id != context_id) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + /* Source data must be 8 bytes aligned */ + if (!is_8_bytes_aligned(src_size)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (!is_address_in_ddr_range(src_addr, src_size) || + !is_address_in_ddr_range(dst_addr, *dst_size)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + resp_len = *dst_size / MBOX_WORD_BYTE; + + /* Prepare crypto header */ + flag = 0; + if (fcs_sha2_data_sign_param.is_updated) { + fcs_sha2_data_sign_param.crypto_param_size = 0; + } else { + flag |= FCS_CS_FIELD_FLAG_INIT; + } + + if (is_finalised != 0U) { + flag |= FCS_CS_FIELD_FLAG_FINALIZE; + } else { + flag |= FCS_CS_FIELD_FLAG_UPDATE; + fcs_sha2_data_sign_param.is_updated = 1; + } + crypto_header = (flag << FCS_CS_FIELD_FLAG_OFFSET) | + fcs_sha2_data_sign_param.crypto_param_size; + + /* Prepare command payload */ + i = 0; + payload[i] = fcs_sha2_data_sign_param.session_id; + i++; + payload[i] = fcs_sha2_data_sign_param.context_id; + i++; + payload[i] = crypto_header; + i++; + + if ((crypto_header >> FCS_CS_FIELD_FLAG_OFFSET) & + FCS_CS_FIELD_FLAG_INIT) { + payload[i] = fcs_sha2_data_sign_param.key_id; + /* Crypto parameters */ + i++; + payload[i] = fcs_sha2_data_sign_param.crypto_param + & INTEL_SIP_SMC_FCS_ECC_ALGO_MASK; + i++; + } + + /* Data source address and size */ + payload[i] = src_addr; + i++; + payload[i] = src_size; + i++; + status = mailbox_send_cmd(MBOX_JOB_ID, + MBOX_FCS_ECDSA_SHA2_DATA_SIGN_REQ, payload, + i, CMD_CASUAL, (uint32_t *) dst_addr, + &resp_len); + + if (is_finalised != 0U) { + memset((void *)&fcs_sha2_data_sign_param, 0, + sizeof(fcs_crypto_service_data)); + } + + if (status < 0) { + *mbox_error = -status; + return INTEL_SIP_SMC_STATUS_ERROR; + } + + *dst_size = resp_len * MBOX_WORD_BYTE; + flush_dcache_range(dst_addr, *dst_size); + + return INTEL_SIP_SMC_STATUS_OK; +} + +int intel_fcs_ecdsa_sha2_data_sign_smmu_update_finalize(uint32_t session_id, + uint32_t context_id, uint32_t src_addr, + uint32_t src_size, uint64_t dst_addr, + uint32_t *dst_size, uint8_t is_finalised, + uint32_t *mbox_error, uint32_t *send_id) +{ + int status; + int i; + uint32_t flag; + uint32_t crypto_header; + uint32_t payload[FCS_ECDSA_SHA2_DATA_SIGN_CMD_MAX_WORD_SIZE] = {0U}; + uint32_t resp_len; + + /* Source data must be 8 bytes aligned */ + if ((dst_size == NULL) || (mbox_error == NULL || + !is_8_bytes_aligned(src_size))) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (fcs_sha2_data_sign_param.session_id != session_id || + fcs_sha2_data_sign_param.context_id != context_id) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (!is_address_in_ddr_range(src_addr, src_size) || + !is_address_in_ddr_range(dst_addr, *dst_size)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + resp_len = *dst_size / MBOX_WORD_BYTE; + + /* Prepare crypto header */ + flag = 0; + if (fcs_sha2_data_sign_param.is_updated) { + fcs_sha2_data_sign_param.crypto_param_size = 0; + } else { + flag |= FCS_CS_FIELD_FLAG_INIT; + } + + if (is_finalised != 0U) { + flag |= FCS_CS_FIELD_FLAG_FINALIZE; + } else { + flag |= FCS_CS_FIELD_FLAG_UPDATE; + fcs_sha2_data_sign_param.is_updated = 1; + } + crypto_header = (flag << FCS_CS_FIELD_FLAG_OFFSET) | + fcs_sha2_data_sign_param.crypto_param_size; + + /* Prepare command payload */ + i = 0; + payload[i] = fcs_sha2_data_sign_param.session_id; + i++; + payload[i] = fcs_sha2_data_sign_param.context_id; + i++; + payload[i] = crypto_header; + i++; + + if ((crypto_header >> FCS_CS_FIELD_FLAG_OFFSET) & + FCS_CS_FIELD_FLAG_INIT) { + payload[i] = fcs_sha2_data_sign_param.key_id; + /* Crypto parameters */ + i++; + payload[i] = fcs_sha2_data_sign_param.crypto_param + & INTEL_SIP_SMC_FCS_ECC_ALGO_MASK; + i++; + } + + /* Data source address and size */ + payload[i] = src_addr; + i++; + payload[i] = src_size; + i++; + + status = mailbox_send_cmd_async(send_id, + MBOX_FCS_ECDSA_SHA2_DATA_SIGN_REQ, + payload, i, CMD_INDIRECT); + + if (is_finalised != 0U) { + memset((void *)&fcs_sha2_data_sign_param, 0, + sizeof(fcs_crypto_service_data)); + } + + if (status < 0) { + *mbox_error = -status; + return INTEL_SIP_SMC_STATUS_ERROR; + } + + *dst_size = resp_len * MBOX_WORD_BYTE; + flush_dcache_range(dst_addr, *dst_size); + + return INTEL_SIP_SMC_STATUS_OK; +} + +int intel_fcs_ecdsa_sha2_data_sig_verify_init(uint32_t session_id, + uint32_t context_id, uint32_t key_id, + uint32_t param_size, uint64_t param_data, + uint32_t *mbox_error) +{ + return intel_fcs_crypto_service_init(session_id, context_id, + key_id, param_size, param_data, + (void *) &fcs_sha2_data_sig_verify_param, + mbox_error); +} + +int intel_fcs_ecdsa_sha2_data_sig_verify_update_finalize(uint32_t session_id, + uint32_t context_id, uint32_t src_addr, + uint32_t src_size, uint64_t dst_addr, + uint32_t *dst_size, uint32_t data_size, + uint8_t is_finalised, uint32_t *mbox_error) +{ + int status; + uint32_t i; + uint32_t flag; + uint32_t crypto_header; + uint32_t payload[FCS_ECDSA_SHA2_DATA_SIG_VERIFY_CMD_MAX_WORD_SIZE] = {0U}; + uint32_t resp_len; + uintptr_t sig_pubkey_offset; + uint32_t dst_size_check = 0; + + if ((dst_size == NULL) || (mbox_error == NULL)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (fcs_sha2_data_sig_verify_param.session_id != session_id || + fcs_sha2_data_sig_verify_param.context_id != context_id) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (data_size > src_size) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (!is_size_4_bytes_aligned(src_size)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (!is_8_bytes_aligned(data_size) || + !is_8_bytes_aligned(src_addr)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (!is_address_in_ddr_range(src_addr, src_size) || + !is_address_in_ddr_range(dst_addr, *dst_size)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + dst_size_check = *dst_size; + if ((dst_size_check > FCS_MAX_DATA_SIZE || + dst_size_check < FCS_MIN_DATA_SIZE) || + (src_size > FCS_MAX_DATA_SIZE || + src_size < FCS_MIN_DATA_SIZE)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + resp_len = *dst_size / MBOX_WORD_BYTE; + + /* Prepare crypto header */ + flag = 0; + if (fcs_sha2_data_sig_verify_param.is_updated) + fcs_sha2_data_sig_verify_param.crypto_param_size = 0; + else + flag |= FCS_CS_FIELD_FLAG_INIT; + + if (is_finalised != 0U) + flag |= FCS_CS_FIELD_FLAG_FINALIZE; + else { + flag |= FCS_CS_FIELD_FLAG_UPDATE; + fcs_sha2_data_sig_verify_param.is_updated = 1; + } + crypto_header = (flag << FCS_CS_FIELD_FLAG_OFFSET) | + fcs_sha2_data_sig_verify_param.crypto_param_size; + + /* Prepare command payload */ + i = 0; + payload[i] = fcs_sha2_data_sig_verify_param.session_id; + i++; + payload[i] = fcs_sha2_data_sig_verify_param.context_id; + i++; + payload[i] = crypto_header; + i++; + + if ((crypto_header >> FCS_CS_FIELD_FLAG_OFFSET) & + FCS_CS_FIELD_FLAG_INIT) { + payload[i] = fcs_sha2_data_sig_verify_param.key_id; + i++; + /* Crypto parameters */ + payload[i] = fcs_sha2_data_sig_verify_param.crypto_param + & INTEL_SIP_SMC_FCS_ECC_ALGO_MASK; + i++; + } + + /* Data source address and size */ + payload[i] = src_addr; + i++; + payload[i] = data_size; + i++; + + if ((crypto_header >> FCS_CS_FIELD_FLAG_OFFSET) & + FCS_CS_FIELD_FLAG_FINALIZE) { + /* Signature + Public Key Data */ + sig_pubkey_offset = src_addr + data_size; + + if ((i + ((src_size - data_size) / MBOX_WORD_BYTE)) > + FCS_ECDSA_SHA2_DATA_SIG_VERIFY_CMD_MAX_WORD_SIZE) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + memcpy((uint8_t *) &payload[i], (uint8_t *) sig_pubkey_offset, + src_size - data_size); + + i += (src_size - data_size) / MBOX_WORD_BYTE; + } + + status = mailbox_send_cmd(MBOX_JOB_ID, + MBOX_FCS_ECDSA_SHA2_DATA_SIGN_VERIFY, payload, i, + CMD_CASUAL, (uint32_t *) dst_addr, &resp_len); + + if (is_finalised != 0U) { + memset((void *) &fcs_sha2_data_sig_verify_param, 0, + sizeof(fcs_crypto_service_data)); + } + + if (status < 0) { + *mbox_error = -status; + return INTEL_SIP_SMC_STATUS_ERROR; + } + + *dst_size = resp_len * MBOX_WORD_BYTE; + flush_dcache_range(dst_addr, *dst_size); + + return INTEL_SIP_SMC_STATUS_OK; +} + +int intel_fcs_ecdsa_sha2_data_sig_verify_smmu_update_finalize(uint32_t session_id, + uint32_t context_id, uint32_t src_addr, + uint32_t src_size, uint64_t dst_addr, + uint32_t *dst_size, uint32_t data_size, + uint8_t is_finalised, uint32_t *mbox_error, + uint32_t *send_id) +{ + int status; + uint32_t i; + uint32_t flag; + uint32_t crypto_header; + uint32_t payload[FCS_ECDSA_SHA2_DATA_SIG_VERIFY_CMD_MAX_WORD_SIZE] = {0U}; + uint32_t resp_len; + uintptr_t sig_pubkey_offset; + uint32_t dst_size_check = 0; + + /* + * Source data must be 4 bytes aligned + * Source address must be 8 bytes aligned + * User data must be 8 bytes aligned + */ + if ((dst_size == NULL) || (mbox_error == NULL) || + !is_size_4_bytes_aligned(src_size) || + !is_8_bytes_aligned(src_addr) || + !is_8_bytes_aligned(data_size)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (fcs_sha2_data_sig_verify_param.session_id != session_id || + fcs_sha2_data_sig_verify_param.context_id != context_id) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (data_size > src_size) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (!is_address_in_ddr_range(src_addr, src_size) || + !is_address_in_ddr_range(dst_addr, *dst_size)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + dst_size_check = *dst_size; + if ((dst_size_check > FCS_MAX_DATA_SIZE || + dst_size_check < FCS_MIN_DATA_SIZE) || + (src_size > FCS_MAX_DATA_SIZE || + src_size < FCS_MIN_DATA_SIZE)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + resp_len = *dst_size / MBOX_WORD_BYTE; + + /* Prepare crypto header */ + flag = 0; + if (fcs_sha2_data_sig_verify_param.is_updated) + fcs_sha2_data_sig_verify_param.crypto_param_size = 0; + else + flag |= FCS_CS_FIELD_FLAG_INIT; + + if (is_finalised != 0U) + flag |= FCS_CS_FIELD_FLAG_FINALIZE; + else { + flag |= FCS_CS_FIELD_FLAG_UPDATE; + fcs_sha2_data_sig_verify_param.is_updated = 1; + } + crypto_header = (flag << FCS_CS_FIELD_FLAG_OFFSET) | + fcs_sha2_data_sig_verify_param.crypto_param_size; + + /* Prepare command payload */ + i = 0; + payload[i] = fcs_sha2_data_sig_verify_param.session_id; + i++; + payload[i] = fcs_sha2_data_sig_verify_param.context_id; + i++; + payload[i] = crypto_header; + i++; + + if ((crypto_header >> FCS_CS_FIELD_FLAG_OFFSET) & + FCS_CS_FIELD_FLAG_INIT) { + payload[i] = fcs_sha2_data_sig_verify_param.key_id; + i++; + /* Crypto parameters */ + payload[i] = fcs_sha2_data_sig_verify_param.crypto_param + & INTEL_SIP_SMC_FCS_ECC_ALGO_MASK; + i++; + } + + /* Data source address and size */ + payload[i] = src_addr; + i++; + payload[i] = data_size; + i++; + + if ((crypto_header >> FCS_CS_FIELD_FLAG_OFFSET) & + FCS_CS_FIELD_FLAG_FINALIZE) { + /* Copy mac data to command + * Using dst_addr (physical address) to store sig_pubkey_offset + * sig_pubkey_offset is Signature + Public Key Data + */ + sig_pubkey_offset = dst_addr; + + if ((i + ((src_size - data_size) / MBOX_WORD_BYTE)) > + FCS_ECDSA_SHA2_DATA_SIG_VERIFY_CMD_MAX_WORD_SIZE) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + memcpy((uint8_t *) &payload[i], (uint8_t *) sig_pubkey_offset, + src_size - data_size); + + memset((void *) dst_addr, 0, *dst_size); + + i += (src_size - data_size) / MBOX_WORD_BYTE; + } + + status = mailbox_send_cmd_async(send_id, + MBOX_FCS_ECDSA_SHA2_DATA_SIGN_VERIFY, + payload, i, CMD_INDIRECT); + + if (is_finalised != 0U) { + memset((void *) &fcs_sha2_data_sig_verify_param, 0, + sizeof(fcs_crypto_service_data)); + } + + if (status < 0) { + *mbox_error = -status; + return INTEL_SIP_SMC_STATUS_ERROR; + } + + *dst_size = resp_len * MBOX_WORD_BYTE; + flush_dcache_range(dst_addr, *dst_size); + + return INTEL_SIP_SMC_STATUS_OK; +} + +int intel_fcs_ecdsa_get_pubkey_init(uint32_t session_id, uint32_t context_id, + uint32_t key_id, uint32_t param_size, + uint64_t param_data, uint32_t *mbox_error) +{ + return intel_fcs_crypto_service_init(session_id, context_id, + key_id, param_size, param_data, + (void *) &fcs_ecdsa_get_pubkey_param, + mbox_error); +} + +int intel_fcs_ecdsa_get_pubkey_finalize(uint32_t session_id, uint32_t context_id, + uint64_t dst_addr, uint32_t *dst_size, + uint32_t *mbox_error) +{ + int status; + int i; + uint32_t crypto_header; + uint32_t ret_size; + uint32_t payload[FCS_ECDSA_GET_PUBKEY_MAX_WORD_SIZE] = {0U}; + + if ((dst_size == NULL) || (mbox_error == NULL)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (fcs_ecdsa_get_pubkey_param.session_id != session_id || + fcs_ecdsa_get_pubkey_param.context_id != context_id) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + ret_size = *dst_size / MBOX_WORD_BYTE; + + crypto_header = ((FCS_CS_FIELD_FLAG_INIT | + FCS_CS_FIELD_FLAG_UPDATE | + FCS_CS_FIELD_FLAG_FINALIZE) << + FCS_CS_FIELD_FLAG_OFFSET) | + fcs_ecdsa_get_pubkey_param.crypto_param_size; + i = 0; + /* Prepare command payload */ + payload[i] = session_id; + i++; + payload[i] = context_id; + i++; + payload[i] = crypto_header; + i++; + payload[i] = fcs_ecdsa_get_pubkey_param.key_id; + i++; + payload[i] = (uint32_t) fcs_ecdsa_get_pubkey_param.crypto_param & + INTEL_SIP_SMC_FCS_ECC_ALGO_MASK; + i++; + + status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_ECDSA_GET_PUBKEY, + payload, i, CMD_CASUAL, + (uint32_t *) dst_addr, &ret_size); + + memset((void *) &fcs_ecdsa_get_pubkey_param, 0, + sizeof(fcs_crypto_service_data)); + + if (status < 0) { + *mbox_error = -status; + return INTEL_SIP_SMC_STATUS_ERROR; + } + + *dst_size = ret_size * MBOX_WORD_BYTE; + flush_dcache_range(dst_addr, *dst_size); + + return INTEL_SIP_SMC_STATUS_OK; +} + +int intel_fcs_ecdh_request_init(uint32_t session_id, uint32_t context_id, + uint32_t key_id, uint32_t param_size, + uint64_t param_data, uint32_t *mbox_error) +{ + return intel_fcs_crypto_service_init(session_id, context_id, + key_id, param_size, param_data, + (void *) &fcs_ecdh_request_param, + mbox_error); +} + +int intel_fcs_ecdh_request_finalize(uint32_t session_id, uint32_t context_id, + uint32_t src_addr, uint32_t src_size, + uint64_t dst_addr, uint32_t *dst_size, + uint32_t *mbox_error) +{ + int status; + uint32_t i; + uint32_t payload[FCS_ECDH_REQUEST_CMD_MAX_WORD_SIZE] = {0U}; + uint32_t resp_len; + uintptr_t pubkey; + uint32_t dst_size_check = 0; + + if ((dst_size == NULL) || (mbox_error == NULL)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + + if (fcs_ecdh_request_param.session_id != session_id || + fcs_ecdh_request_param.context_id != context_id) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (!is_address_in_ddr_range(src_addr, src_size) || + !is_address_in_ddr_range(dst_addr, *dst_size)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + dst_size_check = *dst_size; + if ((dst_size_check > FCS_MAX_DATA_SIZE || + dst_size_check < FCS_MIN_DATA_SIZE) || + (src_size > FCS_MAX_DATA_SIZE || + src_size < FCS_MIN_DATA_SIZE)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + resp_len = *dst_size / MBOX_WORD_BYTE; + + /* Prepare command payload */ + i = 0; + /* Crypto header */ + payload[i] = fcs_ecdh_request_param.session_id; + i++; + payload[i] = fcs_ecdh_request_param.context_id; + i++; + payload[i] = fcs_ecdh_request_param.crypto_param_size + & FCS_CS_FIELD_SIZE_MASK; + payload[i] |= (FCS_CS_FIELD_FLAG_INIT | FCS_CS_FIELD_FLAG_UPDATE + | FCS_CS_FIELD_FLAG_FINALIZE) + << FCS_CS_FIELD_FLAG_OFFSET; + i++; + payload[i] = fcs_ecdh_request_param.key_id; + i++; + /* Crypto parameters */ + payload[i] = fcs_ecdh_request_param.crypto_param + & INTEL_SIP_SMC_FCS_ECC_ALGO_MASK; + i++; + /* Public key data */ + pubkey = src_addr; + + if ((i + ((src_size) / MBOX_WORD_BYTE)) > + FCS_ECDH_REQUEST_CMD_MAX_WORD_SIZE) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + memcpy((uint8_t *) &payload[i], (uint8_t *) pubkey, src_size); + i += src_size / MBOX_WORD_BYTE; + + status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_ECDH_REQUEST, + payload, i, CMD_CASUAL, (uint32_t *) dst_addr, + &resp_len); + + memset((void *)&fcs_ecdh_request_param, 0, + sizeof(fcs_crypto_service_data)); + + if (status < 0) { + *mbox_error = -status; + return INTEL_SIP_SMC_STATUS_ERROR; + } + + *dst_size = resp_len * MBOX_WORD_BYTE; + flush_dcache_range(dst_addr, *dst_size); + + return INTEL_SIP_SMC_STATUS_OK; +} + +int intel_fcs_aes_crypt_init(uint32_t session_id, uint32_t context_id, + uint32_t key_id, uint64_t param_addr, + uint32_t param_size, uint32_t *mbox_error) +{ + /* ptr to get param_addr value */ + uint64_t *param_addr_ptr; + + param_addr_ptr = (uint64_t *) param_addr; + + /* + * Since crypto param size vary between mode. + * Check ECB here and limit to size 12 bytes + */ + if (((*param_addr_ptr & FCS_CRYPTO_BLOCK_MODE_MASK) == FCS_CRYPTO_ECB_MODE) && + (param_size > FCS_CRYPTO_ECB_BUFFER_SIZE)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + /* + * Since crypto param size vary between mode. + * Check CBC/CTR here and limit to size 28 bytes + */ + if ((((*param_addr_ptr & FCS_CRYPTO_BLOCK_MODE_MASK) == FCS_CRYPTO_CBC_MODE) || + ((*param_addr_ptr & FCS_CRYPTO_BLOCK_MODE_MASK) == FCS_CRYPTO_CTR_MODE)) && + (param_size > FCS_CRYPTO_CBC_CTR_BUFFER_SIZE)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (mbox_error == NULL) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + memset((void *)&fcs_aes_init_payload, 0U, sizeof(fcs_aes_init_payload)); + + fcs_aes_init_payload.session_id = session_id; + fcs_aes_init_payload.context_id = context_id; + fcs_aes_init_payload.param_size = param_size; + fcs_aes_init_payload.key_id = key_id; + + memcpy((uint8_t *) fcs_aes_init_payload.crypto_param, + (uint8_t *) param_addr, param_size); + + fcs_aes_init_payload.is_updated = 0; + + *mbox_error = 0; + + return INTEL_SIP_SMC_STATUS_OK; +} + +int intel_fcs_aes_crypt_update_finalize(uint32_t session_id, + uint32_t context_id, uint64_t src_addr, + uint32_t src_size, uint64_t dst_addr, + uint32_t dst_size, uint8_t is_finalised, + uint32_t *send_id) +{ + int status; + int i; + uint32_t flag; + uint32_t crypto_header; + uint32_t fcs_aes_crypt_payload[FCS_AES_CMD_MAX_WORD_SIZE]; + + if (fcs_aes_init_payload.session_id != session_id || + fcs_aes_init_payload.context_id != context_id) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if ((!is_8_bytes_aligned(src_addr)) || + (!is_32_bytes_aligned(src_size)) || + (!is_address_in_ddr_range(src_addr, src_size))) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if ((!is_8_bytes_aligned(dst_addr)) || + (!is_32_bytes_aligned(dst_size))) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if ((dst_size > FCS_AES_MAX_DATA_SIZE || + dst_size < FCS_AES_MIN_DATA_SIZE) || + (src_size > FCS_AES_MAX_DATA_SIZE || + src_size < FCS_AES_MIN_DATA_SIZE)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + /* Prepare crypto header*/ + flag = 0; + if (fcs_aes_init_payload.is_updated) { + fcs_aes_init_payload.param_size = 0; + } else { + flag |= FCS_CS_FIELD_FLAG_INIT; + } + + if (is_finalised != 0U) { + flag |= FCS_CS_FIELD_FLAG_FINALIZE; + } else { + flag |= FCS_CS_FIELD_FLAG_UPDATE; + fcs_aes_init_payload.is_updated = 1; + } + crypto_header = (flag << FCS_CS_FIELD_FLAG_OFFSET) | + fcs_aes_init_payload.param_size; + + i = 0U; + fcs_aes_crypt_payload[i] = session_id; + i++; + fcs_aes_crypt_payload[i] = context_id; + i++; + fcs_aes_crypt_payload[i] = crypto_header; + i++; + + if ((crypto_header >> FCS_CS_FIELD_FLAG_OFFSET) & + FCS_CS_FIELD_FLAG_INIT) { + fcs_aes_crypt_payload[i] = fcs_aes_init_payload.key_id; + i++; + + if ((i + ((fcs_aes_init_payload.param_size) / MBOX_WORD_BYTE)) > + FCS_AES_CMD_MAX_WORD_SIZE) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + memcpy((uint8_t *) &fcs_aes_crypt_payload[i], + (uint8_t *) fcs_aes_init_payload.crypto_param, + fcs_aes_init_payload.param_size); + + i += fcs_aes_init_payload.param_size / MBOX_WORD_BYTE; + } + + fcs_aes_crypt_payload[i] = (uint32_t) src_addr; + i++; + fcs_aes_crypt_payload[i] = src_size; + i++; + fcs_aes_crypt_payload[i] = (uint32_t) dst_addr; + i++; + fcs_aes_crypt_payload[i] = dst_size; + i++; + + status = mailbox_send_cmd_async(send_id, MBOX_FCS_AES_CRYPT_REQ, + fcs_aes_crypt_payload, i, + CMD_INDIRECT); + + if (is_finalised != 0U) { + memset((void *)&fcs_aes_init_payload, 0, + sizeof(fcs_aes_init_payload)); + } + + if (status < 0U) { + return INTEL_SIP_SMC_STATUS_ERROR; + } + + return INTEL_SIP_SMC_STATUS_OK; +} diff --git a/plat/intel/soc/common/soc/socfpga_emac.c b/plat/intel/soc/common/soc/socfpga_emac.c new file mode 100644 index 0000000..49c92c3 --- /dev/null +++ b/plat/intel/soc/common/soc/socfpga_emac.c @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2020-2022, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <lib/mmio.h> +#include <platform_def.h> + +#include "socfpga_emac.h" +#include "socfpga_plat_def.h" +#include "socfpga_reset_manager.h" +#include "socfpga_system_manager.h" + +void socfpga_emac_init(void) +{ + mmio_setbits_32(SOCFPGA_RSTMGR(PER0MODRST), + RSTMGR_PER0MODRST_EMAC0 | + RSTMGR_PER0MODRST_EMAC1 | + RSTMGR_PER0MODRST_EMAC2); + + mmio_clrsetbits_32(SOCFPGA_SYSMGR(EMAC_0), + PHY_INTF_SEL_MSK, EMAC0_PHY_MODE); + mmio_clrsetbits_32(SOCFPGA_SYSMGR(EMAC_1), + PHY_INTF_SEL_MSK, EMAC1_PHY_MODE); + mmio_clrsetbits_32(SOCFPGA_SYSMGR(EMAC_2), + PHY_INTF_SEL_MSK, EMAC2_PHY_MODE); + + mmio_clrbits_32(SOCFPGA_SYSMGR(FPGAINTF_EN_3), + FPGAINTF_EN_3_EMAC_MSK(0) | + FPGAINTF_EN_3_EMAC_MSK(1) | + FPGAINTF_EN_3_EMAC_MSK(2)); + + mmio_clrbits_32(SOCFPGA_RSTMGR(PER0MODRST), + RSTMGR_PER0MODRST_EMAC0 | + RSTMGR_PER0MODRST_EMAC1 | + RSTMGR_PER0MODRST_EMAC2); +} + diff --git a/plat/intel/soc/common/soc/socfpga_firewall.c b/plat/intel/soc/common/soc/socfpga_firewall.c new file mode 100644 index 0000000..6247df3 --- /dev/null +++ b/plat/intel/soc/common/soc/socfpga_firewall.c @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2019-2023, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <lib/mmio.h> +#include <lib/utils_def.h> + +#include "socfpga_noc.h" +#include "socfpga_plat_def.h" +#include "socfpga_system_manager.h" + +void enable_nonsecure_access(void) +{ + enable_ns_peripheral_access(); + enable_ns_bridge_access(); +} + +void enable_ns_peripheral_access(void) +{ + mmio_write_32(SOCFPGA_L4_PER_SCR(NAND_REGISTER), DISABLE_L4_FIREWALL); +#if ((PLATFORM_MODEL == PLAT_SOCFPGA_STRATIX10) || \ + (PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX) || \ + (PLATFORM_MODEL == PLAT_SOCFPGA_N5X)) + mmio_write_32(SOCFPGA_L4_PER_SCR(NAND_DATA), DISABLE_L4_FIREWALL); +#endif + + mmio_write_32(SOCFPGA_L4_SYS_SCR(NAND_ECC), DISABLE_L4_FIREWALL); + mmio_write_32(SOCFPGA_L4_SYS_SCR(NAND_READ_ECC), DISABLE_L4_FIREWALL); + mmio_write_32(SOCFPGA_L4_SYS_SCR(NAND_WRITE_ECC), + DISABLE_L4_FIREWALL); + + mmio_write_32(SOCFPGA_L4_PER_SCR(USB0_REGISTER), DISABLE_L4_FIREWALL); + mmio_write_32(SOCFPGA_L4_PER_SCR(USB1_REGISTER), DISABLE_L4_FIREWALL); + mmio_write_32(SOCFPGA_L4_SYS_SCR(USB0_ECC), DISABLE_L4_FIREWALL); + mmio_write_32(SOCFPGA_L4_SYS_SCR(USB1_ECC), DISABLE_L4_FIREWALL); + + mmio_write_32(SOCFPGA_L4_PER_SCR(SPI_MASTER0), DISABLE_L4_FIREWALL); + mmio_write_32(SOCFPGA_L4_PER_SCR(SPI_MASTER1), DISABLE_L4_FIREWALL); + mmio_write_32(SOCFPGA_L4_PER_SCR(SPI_SLAVE0), DISABLE_L4_FIREWALL); + mmio_write_32(SOCFPGA_L4_PER_SCR(SPI_SLAVE1), DISABLE_L4_FIREWALL); + + mmio_write_32(SOCFPGA_L4_PER_SCR(EMAC0), DISABLE_L4_FIREWALL); + mmio_write_32(SOCFPGA_L4_PER_SCR(EMAC1), DISABLE_L4_FIREWALL); + mmio_write_32(SOCFPGA_L4_PER_SCR(EMAC2), DISABLE_L4_FIREWALL); + + mmio_write_32(SOCFPGA_L4_SYS_SCR(EMAC0RX_ECC), DISABLE_L4_FIREWALL); + mmio_write_32(SOCFPGA_L4_SYS_SCR(EMAC0TX_ECC), DISABLE_L4_FIREWALL); + mmio_write_32(SOCFPGA_L4_SYS_SCR(EMAC1RX_ECC), DISABLE_L4_FIREWALL); + mmio_write_32(SOCFPGA_L4_SYS_SCR(EMAC1TX_ECC), DISABLE_L4_FIREWALL); + mmio_write_32(SOCFPGA_L4_SYS_SCR(EMAC2RX_ECC), DISABLE_L4_FIREWALL); + mmio_write_32(SOCFPGA_L4_SYS_SCR(EMAC2TX_ECC), DISABLE_L4_FIREWALL); + + mmio_write_32(SOCFPGA_L4_PER_SCR(SDMMC), DISABLE_L4_FIREWALL); + mmio_write_32(SOCFPGA_L4_SYS_SCR(SDMMC_ECC), DISABLE_L4_FIREWALL); + + mmio_write_32(SOCFPGA_L4_PER_SCR(GPIO0), DISABLE_L4_FIREWALL); + mmio_write_32(SOCFPGA_L4_PER_SCR(GPIO1), DISABLE_L4_FIREWALL); + + mmio_write_32(SOCFPGA_L4_PER_SCR(I2C0), DISABLE_L4_FIREWALL); + mmio_write_32(SOCFPGA_L4_PER_SCR(I2C1), DISABLE_L4_FIREWALL); + mmio_write_32(SOCFPGA_L4_PER_SCR(I2C2), DISABLE_L4_FIREWALL); + mmio_write_32(SOCFPGA_L4_PER_SCR(I2C3), DISABLE_L4_FIREWALL); + mmio_write_32(SOCFPGA_L4_PER_SCR(I2C4), DISABLE_L4_FIREWALL); + + mmio_write_32(SOCFPGA_L4_PER_SCR(SP_TIMER0), DISABLE_L4_FIREWALL); + mmio_write_32(SOCFPGA_L4_PER_SCR(SP_TIMER1), DISABLE_L4_FIREWALL); + + mmio_write_32(SOCFPGA_L4_PER_SCR(UART0), DISABLE_L4_FIREWALL); + mmio_write_32(SOCFPGA_L4_PER_SCR(UART1), DISABLE_L4_FIREWALL); + + mmio_write_32(SOCFPGA_L4_SYS_SCR(DMA_ECC), DISABLE_L4_FIREWALL); + + + mmio_write_32(SOCFPGA_L4_SYS_SCR(OCRAM_ECC), DISABLE_L4_FIREWALL); + + mmio_write_32(SOCFPGA_L4_SYS_SCR(CLK_MGR), DISABLE_L4_FIREWALL); + + mmio_write_32(SOCFPGA_L4_SYS_SCR(IO_MGR), DISABLE_L4_FIREWALL); + + + mmio_write_32(SOCFPGA_L4_SYS_SCR(RST_MGR), DISABLE_L4_FIREWALL); + + mmio_write_32(SOCFPGA_L4_SYS_SCR(SYS_MGR), DISABLE_L4_FIREWALL); + + mmio_write_32(SOCFPGA_L4_SYS_SCR(OSC0_TIMER), DISABLE_L4_FIREWALL); + mmio_write_32(SOCFPGA_L4_SYS_SCR(OSC1_TIMER), DISABLE_L4_FIREWALL); + + mmio_write_32(SOCFPGA_L4_SYS_SCR(WATCHDOG0), DISABLE_L4_FIREWALL); + mmio_write_32(SOCFPGA_L4_SYS_SCR(WATCHDOG1), DISABLE_L4_FIREWALL); + mmio_write_32(SOCFPGA_L4_SYS_SCR(WATCHDOG2), DISABLE_L4_FIREWALL); + mmio_write_32(SOCFPGA_L4_SYS_SCR(WATCHDOG3), DISABLE_L4_FIREWALL); +#if PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX5 + mmio_write_32(SOCFPGA_L4_SYS_SCR(WATCHDOG4), DISABLE_L4_FIREWALL); +#endif + + mmio_write_32(SOCFPGA_L4_SYS_SCR(DAP), DISABLE_L4_FIREWALL); + +#if PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX5 + mmio_write_32(SOCFPGA_L4_SYS_SCR(PWRMGR), DISABLE_L4_FIREWALL); + + mmio_write_32(SOCFPGA_L4_SYS_SCR(USB1_RXECC), DISABLE_L4_FIREWALL); + mmio_write_32(SOCFPGA_L4_SYS_SCR(USB1_TXECC), DISABLE_L4_FIREWALL); +#endif + + mmio_write_32(SOCFPGA_L4_SYS_SCR(L4_NOC_PROBES), DISABLE_L4_FIREWALL); + + mmio_write_32(SOCFPGA_L4_SYS_SCR(L4_NOC_QOS), DISABLE_L4_FIREWALL); + +#if PLATFORM_MODEL == PLAT_SOCFPGA_STRATIX10 + enable_ns_ocram_access(); + mmio_write_32(SOCFPGA_SYSMGR(SDMMC), SYSMGR_SDMMC_DRVSEL(3)); +#endif + +} + +void enable_ns_ocram_access(void) +{ + mmio_clrbits_32(SOCFPGA_CCU_NOC(CPU0, RAM0), + SOCFPGA_CCU_NOC_ADMASK_P_MASK | SOCFPGA_CCU_NOC_ADMASK_NS_MASK); + mmio_clrbits_32(SOCFPGA_CCU_NOC(IOM, RAM0), + SOCFPGA_CCU_NOC_ADMASK_P_MASK | SOCFPGA_CCU_NOC_ADMASK_NS_MASK); +} + +void enable_ns_bridge_access(void) +{ + mmio_write_32(SOCFPGA_SOC2FPGA_SCR_REG_BASE, DISABLE_BRIDGE_FIREWALL); + mmio_write_32(SOCFPGA_LWSOC2FPGA_SCR_REG_BASE, DISABLE_BRIDGE_FIREWALL); +} + +void enable_ocram_firewall(void) +{ + mmio_setbits_32(SOCFPGA_CCU_NOC(CPU0, RAM0), + SOCFPGA_CCU_NOC_ADMASK_P_MASK | SOCFPGA_CCU_NOC_ADMASK_NS_MASK); + mmio_setbits_32(SOCFPGA_CCU_NOC(IOM, RAM0), + SOCFPGA_CCU_NOC_ADMASK_P_MASK | SOCFPGA_CCU_NOC_ADMASK_NS_MASK); +} diff --git a/plat/intel/soc/common/soc/socfpga_handoff.c b/plat/intel/soc/common/soc/socfpga_handoff.c new file mode 100644 index 0000000..526c6e1 --- /dev/null +++ b/plat/intel/soc/common/soc/socfpga_handoff.c @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2019-2023, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <errno.h> +#include <string.h> + +#include "socfpga_handoff.h" + +#define SWAP_UINT32(x) (((x) >> 24) | (((x) & 0x00FF0000) >> 8) | \ + (((x) & 0x0000FF00) << 8) | ((x) << 24)) + +int socfpga_get_handoff(handoff *reverse_hoff_ptr) +{ + int i; + uint32_t *buffer; + handoff *handoff_ptr = (handoff *) PLAT_HANDOFF_OFFSET; + + if (sizeof(*handoff_ptr) > sizeof(handoff)) { + return -EOVERFLOW; + } + + memcpy(reverse_hoff_ptr, handoff_ptr, sizeof(handoff)); + buffer = (uint32_t *)reverse_hoff_ptr; + + /* convert big endian to little endian */ + for (i = 0; i < sizeof(handoff) / 4; i++) + buffer[i] = SWAP_UINT32(buffer[i]); + + if (reverse_hoff_ptr->header_magic != HANDOFF_MAGIC_HEADER) { + return -1; + } + if (reverse_hoff_ptr->pinmux_sel_magic != HANDOFF_MAGIC_PINMUX_SEL) { + return -1; + } + if (reverse_hoff_ptr->pinmux_io_magic != HANDOFF_MAGIC_IOCTLR) { + return -1; + } + if (reverse_hoff_ptr->pinmux_fpga_magic != HANDOFF_MAGIC_FPGA) { + return -1; + } + if (reverse_hoff_ptr->pinmux_delay_magic != HANDOFF_MAGIC_IODELAY) { + return -1; + } + if (reverse_hoff_ptr->clock_magic != HANDOFF_MAGIC_CLOCK) { + return -1; + } + +#if PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX5 + if (reverse_hoff_ptr->peripheral_pwr_gate_magic != HANDOFF_MAGIC_PERIPHERAL) { + return -1; + } + + if (reverse_hoff_ptr->ddr_magic != HANDOFF_MAGIC_DDR) { + return -1; + } +#endif + + return 0; +} diff --git a/plat/intel/soc/common/soc/socfpga_mailbox.c b/plat/intel/soc/common/soc/socfpga_mailbox.c new file mode 100644 index 0000000..d93fc8a --- /dev/null +++ b/plat/intel/soc/common/soc/socfpga_mailbox.c @@ -0,0 +1,698 @@ +/* + * Copyright (c) 2020-2022, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <lib/mmio.h> +#include <common/debug.h> +#include <drivers/delay_timer.h> +#include <platform_def.h> + +#include "socfpga_mailbox.h" +#include "socfpga_plat_def.h" +#include "socfpga_sip_svc.h" +#include "socfpga_system_manager.h" + +static mailbox_payload_t mailbox_resp_payload; +static mailbox_container_t mailbox_resp_ctr = {0, 0, &mailbox_resp_payload}; + +static bool is_mailbox_cmdbuf_full(uint32_t cin) +{ + uint32_t cout = mmio_read_32(MBOX_OFFSET + MBOX_COUT); + + return (((cin + 1U) % MBOX_CMD_BUFFER_SIZE) == cout); +} + +static bool is_mailbox_cmdbuf_empty(uint32_t cin) +{ + uint32_t cout = mmio_read_32(MBOX_OFFSET + MBOX_COUT); + + return (((cout + 1U) % MBOX_CMD_BUFFER_SIZE) == cin); +} + +static int wait_for_mailbox_cmdbuf_empty(uint32_t cin) +{ + unsigned int timeout = 200U; + + do { + if (is_mailbox_cmdbuf_empty(cin)) { + break; + } + mdelay(10U); + } while (--timeout != 0U); + + if (timeout == 0U) { + return MBOX_TIMEOUT; + } + + return MBOX_RET_OK; +} + +static int write_mailbox_cmd_buffer(uint32_t *cin, uint32_t cout, + uint32_t data, + bool *is_doorbell_triggered) +{ + unsigned int timeout = 100U; + + do { + if (is_mailbox_cmdbuf_full(*cin)) { + if (!(*is_doorbell_triggered)) { + mmio_write_32(MBOX_OFFSET + + MBOX_DOORBELL_TO_SDM, 1U); + *is_doorbell_triggered = true; + } + mdelay(10U); + } else { + mmio_write_32(MBOX_ENTRY_TO_ADDR(CMD, (*cin)++), data); + *cin %= MBOX_CMD_BUFFER_SIZE; + mmio_write_32(MBOX_OFFSET + MBOX_CIN, *cin); + break; + } + } while (--timeout != 0U); + + if (timeout == 0U) { + return MBOX_TIMEOUT; + } + + if (*is_doorbell_triggered) { + int ret = wait_for_mailbox_cmdbuf_empty(*cin); + return ret; + } + + return MBOX_RET_OK; +} + +static int fill_mailbox_circular_buffer(uint32_t header_cmd, uint32_t *args, + unsigned int len) +{ + uint32_t sdm_read_offset, cmd_free_offset; + unsigned int i; + int ret; + bool is_doorbell_triggered = false; + + cmd_free_offset = mmio_read_32(MBOX_OFFSET + MBOX_CIN); + sdm_read_offset = mmio_read_32(MBOX_OFFSET + MBOX_COUT); + + ret = write_mailbox_cmd_buffer(&cmd_free_offset, sdm_read_offset, + header_cmd, &is_doorbell_triggered); + if (ret != 0) { + goto restart_mailbox; + } + + for (i = 0U; i < len; i++) { + is_doorbell_triggered = false; + ret = write_mailbox_cmd_buffer(&cmd_free_offset, + sdm_read_offset, args[i], + &is_doorbell_triggered); + if (ret != 0) { + goto restart_mailbox; + } + } + + mmio_write_32(MBOX_OFFSET + MBOX_DOORBELL_TO_SDM, 1U); + + return MBOX_RET_OK; + +restart_mailbox: + /* + * Attempt to restart mailbox if the driver not able to write + * into mailbox command buffer + */ + if (MBOX_CMD_MASK(header_cmd) != MBOX_CMD_RESTART) { + INFO("Mailbox timed out: Attempting mailbox reset\n"); + ret = mailbox_init(); + + if (ret == MBOX_TIMEOUT) { + INFO("Error: Mailbox fail to restart\n"); + } + } + + return MBOX_TIMEOUT; +} + +int mailbox_read_response(unsigned int *job_id, uint32_t *response, + unsigned int *resp_len) +{ + uint32_t rin; + uint32_t rout; + uint32_t resp_data; + unsigned int ret_resp_len; + + if (mmio_read_32(MBOX_OFFSET + MBOX_DOORBELL_FROM_SDM) == 1U) { + mmio_write_32(MBOX_OFFSET + MBOX_DOORBELL_FROM_SDM, 0U); + } + + rin = mmio_read_32(MBOX_OFFSET + MBOX_RIN); + rout = mmio_read_32(MBOX_OFFSET + MBOX_ROUT); + + if (rout != rin) { + resp_data = mmio_read_32(MBOX_ENTRY_TO_ADDR(RESP, (rout)++)); + + rout %= MBOX_RESP_BUFFER_SIZE; + mmio_write_32(MBOX_OFFSET + MBOX_ROUT, rout); + + + if (MBOX_RESP_CLIENT_ID(resp_data) != MBOX_ATF_CLIENT_ID) { + return MBOX_WRONG_ID; + } + + *job_id = MBOX_RESP_JOB_ID(resp_data); + + ret_resp_len = MBOX_RESP_LEN(resp_data); + + if (iterate_resp(ret_resp_len, response, resp_len) + != MBOX_RET_OK) { + return MBOX_TIMEOUT; + } + + if (MBOX_RESP_ERR(resp_data) > 0U) { + INFO("Error in response: %x\n", resp_data); + return -MBOX_RESP_ERR(resp_data); + } + + return MBOX_RET_OK; + } + return MBOX_NO_RESPONSE; +} + +int mailbox_read_response_async(unsigned int *job_id, uint32_t *header, + uint32_t *response, unsigned int *resp_len, + uint8_t ignore_client_id) +{ + uint32_t rin; + uint32_t rout; + uint32_t resp_data; + uint32_t ret_resp_len = 0; + uint8_t is_done = 0; + uint32_t resp_len_check = 0; + + if ((mailbox_resp_ctr.flag & MBOX_PAYLOAD_FLAG_BUSY) != 0) { + ret_resp_len = MBOX_RESP_LEN( + mailbox_resp_ctr.payload->header) - + mailbox_resp_ctr.index; + } + + if (mmio_read_32(MBOX_OFFSET + MBOX_DOORBELL_FROM_SDM) == 1U) { + mmio_write_32(MBOX_OFFSET + MBOX_DOORBELL_FROM_SDM, 0U); + } + + rin = mmio_read_32(MBOX_OFFSET + MBOX_RIN); + rout = mmio_read_32(MBOX_OFFSET + MBOX_ROUT); + + while (rout != rin && !is_done) { + + resp_data = mmio_read_32(MBOX_ENTRY_TO_ADDR(RESP, (rout)++)); + + rout %= MBOX_RESP_BUFFER_SIZE; + mmio_write_32(MBOX_OFFSET + MBOX_ROUT, rout); + rin = mmio_read_32(MBOX_OFFSET + MBOX_RIN); + + if ((mailbox_resp_ctr.flag & MBOX_PAYLOAD_FLAG_BUSY) != 0) { + mailbox_resp_ctr.payload->data[mailbox_resp_ctr.index] = resp_data; + mailbox_resp_ctr.index++; + ret_resp_len--; + } else { + if (!ignore_client_id) { + if (MBOX_RESP_CLIENT_ID(resp_data) != MBOX_ATF_CLIENT_ID) { + *resp_len = 0; + return MBOX_WRONG_ID; + } + } + + *job_id = MBOX_RESP_JOB_ID(resp_data); + ret_resp_len = MBOX_RESP_LEN(resp_data); + mailbox_resp_ctr.payload->header = resp_data; + mailbox_resp_ctr.flag |= MBOX_PAYLOAD_FLAG_BUSY; + } + + if (ret_resp_len == 0) { + is_done = 1; + } + } + + if (is_done != 0) { + + /* copy header data to input address if applicable */ + if (header != 0) { + *header = mailbox_resp_ctr.payload->header; + } + + /* copy response data to input buffer if applicable */ + ret_resp_len = MBOX_RESP_LEN(mailbox_resp_ctr.payload->header); + if ((ret_resp_len > 0) && (response != NULL) && (resp_len != NULL)) { + if (*resp_len > ret_resp_len) { + *resp_len = ret_resp_len; + } + + resp_len_check = (uint32_t) *resp_len; + + if (resp_len_check > MBOX_DATA_MAX_LEN) { + return MBOX_RET_ERROR; + } + + memcpy((uint8_t *) response, + (uint8_t *) mailbox_resp_ctr.payload->data, + *resp_len * MBOX_WORD_BYTE); + } + + /* reset async response param */ + mailbox_resp_ctr.index = 0; + mailbox_resp_ctr.flag = 0; + + if (MBOX_RESP_ERR(mailbox_resp_ctr.payload->header) > 0U) { + INFO("Error in async response: %x\n", + mailbox_resp_ctr.payload->header); + return -MBOX_RESP_ERR(mailbox_resp_ctr.payload->header); + } + + return MBOX_RET_OK; + } + + *resp_len = 0; + return (mailbox_resp_ctr.flag & MBOX_PAYLOAD_FLAG_BUSY) ? MBOX_BUSY : MBOX_NO_RESPONSE; +} + +int mailbox_poll_response(uint32_t job_id, uint32_t urgent, uint32_t *response, + unsigned int *resp_len) +{ + unsigned int timeout = 40U; + unsigned int sdm_loop = 255U; + unsigned int ret_resp_len; + uint32_t rin; + uint32_t rout; + uint32_t resp_data; + + while (sdm_loop != 0U) { + + do { + if (mmio_read_32(MBOX_OFFSET + MBOX_DOORBELL_FROM_SDM) + == 1U) { + break; + } + mdelay(10U); + } while (--timeout != 0U); + + if (timeout == 0U) { + break; + } + + mmio_write_32(MBOX_OFFSET + MBOX_DOORBELL_FROM_SDM, 0U); + + if ((urgent & 1U) != 0U) { + mdelay(5U); + if ((mmio_read_32(MBOX_OFFSET + MBOX_STATUS) & + MBOX_STATUS_UA_MASK) ^ + (urgent & MBOX_STATUS_UA_MASK)) { + mmio_write_32(MBOX_OFFSET + MBOX_URG, 0U); + return MBOX_RET_OK; + } + + mmio_write_32(MBOX_OFFSET + MBOX_URG, 0U); + INFO("Error: Mailbox did not get UA"); + return MBOX_RET_ERROR; + } + + rin = mmio_read_32(MBOX_OFFSET + MBOX_RIN); + rout = mmio_read_32(MBOX_OFFSET + MBOX_ROUT); + + while (rout != rin) { + resp_data = mmio_read_32(MBOX_ENTRY_TO_ADDR(RESP, + (rout)++)); + + rout %= MBOX_RESP_BUFFER_SIZE; + mmio_write_32(MBOX_OFFSET + MBOX_ROUT, rout); + + if (MBOX_RESP_CLIENT_ID(resp_data) != MBOX_ATF_CLIENT_ID + || MBOX_RESP_JOB_ID(resp_data) != job_id) { + continue; + } + + ret_resp_len = MBOX_RESP_LEN(resp_data); + + if (iterate_resp(ret_resp_len, response, resp_len) + != MBOX_RET_OK) { + return MBOX_TIMEOUT; + } + + if (MBOX_RESP_ERR(resp_data) > 0U) { + INFO("Error in response: %x\n", resp_data); + return -MBOX_RESP_ERR(resp_data); + } + + return MBOX_RET_OK; + } + + sdm_loop--; + } + + INFO("Timed out waiting for SDM\n"); + return MBOX_TIMEOUT; +} + +int iterate_resp(uint32_t mbox_resp_len, uint32_t *resp_buf, + unsigned int *resp_len) +{ + unsigned int timeout, total_resp_len = 0U; + uint32_t resp_data; + uint32_t rin = mmio_read_32(MBOX_OFFSET + MBOX_RIN); + uint32_t rout = mmio_read_32(MBOX_OFFSET + MBOX_ROUT); + + while (mbox_resp_len > 0U) { + timeout = 100U; + mbox_resp_len--; + resp_data = mmio_read_32(MBOX_ENTRY_TO_ADDR(RESP, (rout)++)); + + if ((resp_buf != NULL) && (resp_len != NULL) + && (*resp_len != 0U)) { + *(resp_buf + total_resp_len) + = resp_data; + *resp_len = *resp_len - 1; + total_resp_len++; + } + rout %= MBOX_RESP_BUFFER_SIZE; + mmio_write_32(MBOX_OFFSET + MBOX_ROUT, rout); + + do { + rin = mmio_read_32(MBOX_OFFSET + MBOX_RIN); + if (rout == rin) { + mdelay(10U); + } else { + break; + } + timeout--; + } while ((mbox_resp_len > 0U) && (timeout != 0U)); + + if (timeout == 0U) { + INFO("Timed out waiting for SDM\n"); + return MBOX_TIMEOUT; + } + } + + if (resp_len) + *resp_len = total_resp_len; + + return MBOX_RET_OK; +} + +int mailbox_send_cmd_async_ext(uint32_t header_cmd, uint32_t *args, + unsigned int len) +{ + return fill_mailbox_circular_buffer(header_cmd, args, len); +} + +int mailbox_send_cmd_async(uint32_t *job_id, uint32_t cmd, uint32_t *args, + unsigned int len, unsigned int indirect) +{ + int status; + + status = fill_mailbox_circular_buffer( + MBOX_CLIENT_ID_CMD(MBOX_ATF_CLIENT_ID) | + MBOX_JOB_ID_CMD(*job_id) | + MBOX_CMD_LEN_CMD(len) | + MBOX_INDIRECT(indirect) | + cmd, args, len); + if (status < 0) { + return status; + } + + *job_id = (*job_id + 1U) % MBOX_MAX_IND_JOB_ID; + + return MBOX_RET_OK; +} + +int mailbox_send_cmd(uint32_t job_id, uint32_t cmd, uint32_t *args, + unsigned int len, uint32_t urgent, uint32_t *response, + unsigned int *resp_len) +{ + int status = 0; + + if (urgent != 0U) { + urgent |= mmio_read_32(MBOX_OFFSET + MBOX_STATUS) & + MBOX_STATUS_UA_MASK; + mmio_write_32(MBOX_OFFSET + MBOX_URG, cmd); + mmio_write_32(MBOX_OFFSET + MBOX_DOORBELL_TO_SDM, 1U); + } + + else { + status = fill_mailbox_circular_buffer( + MBOX_CLIENT_ID_CMD(MBOX_ATF_CLIENT_ID) | + MBOX_JOB_ID_CMD(job_id) | + MBOX_CMD_LEN_CMD(len) | + cmd, args, len); + } + + if (status != 0) { + return status; + } + + status = mailbox_poll_response(job_id, urgent, response, resp_len); + + return status; +} + +void mailbox_clear_response(void) +{ + mmio_write_32(MBOX_OFFSET + MBOX_ROUT, + mmio_read_32(MBOX_OFFSET + MBOX_RIN)); +} + +void mailbox_set_int(uint32_t interrupt) +{ + + mmio_write_32(MBOX_OFFSET+MBOX_INT, MBOX_COE_BIT(interrupt) | + MBOX_UAE_BIT(interrupt)); +} + + +void mailbox_set_qspi_open(void) +{ + mailbox_set_int(MBOX_INT_FLAG_COE | MBOX_INT_FLAG_RIE); + mailbox_send_cmd(MBOX_JOB_ID, MBOX_CMD_QSPI_OPEN, NULL, 0U, + CMD_CASUAL, NULL, NULL); +} + +void mailbox_set_qspi_direct(void) +{ + uint32_t response[1], qspi_clk, reg; + unsigned int resp_len = ARRAY_SIZE(response); + + mailbox_send_cmd(MBOX_JOB_ID, MBOX_CMD_QSPI_DIRECT, NULL, 0U, + CMD_CASUAL, response, &resp_len); + + qspi_clk = response[0]; + INFO("QSPI ref clock: %u\n", qspi_clk); + + /* + * Store QSPI ref clock frequency in BOOT_SCRATCH_COLD_0 register for + * later boot loader (i.e. u-boot) use. + * The frequency is stored in kHz and occupies BOOT_SCRATCH_COLD_0 + * register bits[27:0]. + */ + qspi_clk /= 1000; + reg = mmio_read_32(SOCFPGA_SYSMGR(BOOT_SCRATCH_COLD_0)); + reg &= ~SYSMGR_QSPI_REFCLK_MASK; + reg |= qspi_clk & SYSMGR_QSPI_REFCLK_MASK; + mmio_write_32(SOCFPGA_SYSMGR(BOOT_SCRATCH_COLD_0), reg); +} + +void mailbox_set_qspi_close(void) +{ + mailbox_set_int(MBOX_INT_FLAG_COE | MBOX_INT_FLAG_RIE); + mailbox_send_cmd(MBOX_JOB_ID, MBOX_CMD_QSPI_CLOSE, NULL, 0U, + CMD_CASUAL, NULL, NULL); +} + +void mailbox_qspi_set_cs(uint32_t device_select) +{ + uint32_t cs_setting; + + /* QSPI device select settings at 31:28 */ + cs_setting = (device_select << 28); + mailbox_set_int(MBOX_INT_FLAG_COE | MBOX_INT_FLAG_RIE); + mailbox_send_cmd(MBOX_JOB_ID, MBOX_CMD_QSPI_SET_CS, &cs_setting, + 1U, CMD_CASUAL, NULL, NULL); +} + +void mailbox_hps_qspi_enable(void) +{ + mailbox_set_qspi_open(); + mailbox_set_qspi_direct(); +} + +void mailbox_reset_cold(void) +{ + mailbox_set_int(MBOX_INT_FLAG_COE | MBOX_INT_FLAG_RIE); + + mailbox_send_cmd(MBOX_JOB_ID, MBOX_CMD_REBOOT_HPS, 0U, 0U, + CMD_CASUAL, NULL, NULL); +} + +void mailbox_reset_warm(uint32_t reset_type) +{ + mailbox_set_int(MBOX_INT_FLAG_COE | MBOX_INT_FLAG_RIE); + + reset_type = 0x01; // Warm reset header data must be 1 + mailbox_send_cmd(MBOX_JOB_ID, MBOX_CMD_REBOOT_HPS, &reset_type, 1U, + CMD_CASUAL, NULL, NULL); +} + +int mailbox_rsu_get_spt_offset(uint32_t *resp_buf, unsigned int resp_buf_len) +{ + return mailbox_send_cmd(MBOX_JOB_ID, MBOX_GET_SUBPARTITION_TABLE, + NULL, 0U, CMD_CASUAL, resp_buf, + &resp_buf_len); +} + +struct rsu_status_info { + uint64_t current_image; + uint64_t fail_image; + uint32_t state; + uint32_t version; + uint32_t error_location; + uint32_t error_details; + uint32_t retry_counter; +}; + +int mailbox_rsu_status(uint32_t *resp_buf, unsigned int resp_buf_len) +{ + int ret; + struct rsu_status_info *info = (struct rsu_status_info *)resp_buf; + + info->retry_counter = ~0U; + + ret = mailbox_send_cmd(MBOX_JOB_ID, MBOX_RSU_STATUS, NULL, 0U, + CMD_CASUAL, resp_buf, + &resp_buf_len); + + if (ret < 0) { + return ret; + } + + if (info->retry_counter != ~0U) { + if ((info->version & RSU_VERSION_ACMF_MASK) == 0U) { + info->version |= RSU_VERSION_ACMF; + } + } + + return ret; +} + +int mailbox_rsu_update(uint32_t *flash_offset) +{ + return mailbox_send_cmd(MBOX_JOB_ID, MBOX_RSU_UPDATE, + flash_offset, 2U, + CMD_CASUAL, NULL, NULL); +} + +int mailbox_hps_stage_notify(uint32_t execution_stage) +{ + return mailbox_send_cmd(MBOX_JOB_ID, MBOX_HPS_STAGE_NOTIFY, + &execution_stage, 1U, CMD_CASUAL, + NULL, NULL); +} + +int mailbox_init(void) +{ + int status; + + mailbox_set_int(MBOX_INT_FLAG_COE | MBOX_INT_FLAG_RIE | + MBOX_INT_FLAG_UAE); + mmio_write_32(MBOX_OFFSET + MBOX_URG, 0U); + mmio_write_32(MBOX_OFFSET + MBOX_DOORBELL_FROM_SDM, 0U); + + status = mailbox_send_cmd(0U, MBOX_CMD_RESTART, NULL, 0U, + CMD_URGENT, NULL, NULL); + + if (status != 0) { + return status; + } + + mailbox_set_int(MBOX_INT_FLAG_COE | MBOX_INT_FLAG_RIE | + MBOX_INT_FLAG_UAE); + + return MBOX_RET_OK; +} + +int intel_mailbox_get_config_status(uint32_t cmd, bool init_done) +{ + int status; + uint32_t res, response[6]; + unsigned int resp_len = ARRAY_SIZE(response); + + status = mailbox_send_cmd(MBOX_JOB_ID, cmd, NULL, 0U, CMD_CASUAL, + response, &resp_len); + + if (status < 0) { + return status; + } + + res = response[RECONFIG_STATUS_STATE]; + + if (res == MBOX_CFGSTAT_VAB_BS_PREAUTH) { + return MBOX_CFGSTAT_STATE_CONFIG; + } + + if ((res != 0U) && (res != MBOX_CFGSTAT_STATE_CONFIG)) { + return res; + } + + res = response[RECONFIG_STATUS_PIN_STATUS]; + if ((res & PIN_STATUS_NSTATUS) == 0U) { + return MBOX_CFGSTAT_STATE_ERROR_HARDWARE; + } + + res = response[RECONFIG_STATUS_SOFTFUNC_STATUS]; + if ((res & SOFTFUNC_STATUS_SEU_ERROR) != 0U) { + ERROR("SoftFunction Status SEU ERROR\n"); + } + + if ((res & SOFTFUNC_STATUS_CONF_DONE) == 0U) { + return MBOX_CFGSTAT_STATE_CONFIG; + } + + if (init_done && (res & SOFTFUNC_STATUS_INIT_DONE) == 0U) { + return MBOX_CFGSTAT_STATE_CONFIG; + } + + return MBOX_RET_OK; +} + +int intel_mailbox_is_fpga_not_ready(void) +{ + int ret = intel_mailbox_get_config_status(MBOX_RECONFIG_STATUS, true); + + if ((ret != MBOX_RET_OK) && (ret != MBOX_CFGSTAT_STATE_CONFIG)) { + ret = intel_mailbox_get_config_status(MBOX_CONFIG_STATUS, + false); + } + + return ret; +} + +int mailbox_hwmon_readtemp(uint32_t chan, uint32_t *resp_buf) +{ + unsigned int resp_len = sizeof(resp_buf); + + return mailbox_send_cmd(MBOX_JOB_ID, MBOX_HWMON_READTEMP, &chan, 1U, + CMD_CASUAL, resp_buf, + &resp_len); + +} + +int mailbox_hwmon_readvolt(uint32_t chan, uint32_t *resp_buf) +{ + unsigned int resp_len = sizeof(resp_buf); + + return mailbox_send_cmd(MBOX_JOB_ID, MBOX_HWMON_READVOLT, &chan, 1U, + CMD_CASUAL, resp_buf, + &resp_len); +} + +int mailbox_seu_err_status(uint32_t *resp_buf, unsigned int resp_buf_len) +{ + + return mailbox_send_cmd(MBOX_JOB_ID, MBOX_CMD_SEU_ERR_READ, NULL, 0U, + CMD_CASUAL, resp_buf, + &resp_buf_len); +} diff --git a/plat/intel/soc/common/soc/socfpga_reset_manager.c b/plat/intel/soc/common/soc/socfpga_reset_manager.c new file mode 100644 index 0000000..7db86c7 --- /dev/null +++ b/plat/intel/soc/common/soc/socfpga_reset_manager.c @@ -0,0 +1,913 @@ +/* + * Copyright (c) 2019-2023, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> +#include <errno.h> +#include <common/debug.h> +#include <drivers/delay_timer.h> +#include <lib/mmio.h> +#include <platform_def.h> + +#include "socfpga_f2sdram_manager.h" +#include "socfpga_mailbox.h" +#include "socfpga_plat_def.h" +#include "socfpga_reset_manager.h" +#include "socfpga_system_manager.h" + +void deassert_peripheral_reset(void) +{ + mmio_clrbits_32(SOCFPGA_RSTMGR(PER1MODRST), + RSTMGR_FIELD(PER1, WATCHDOG0) | + RSTMGR_FIELD(PER1, WATCHDOG1) | + RSTMGR_FIELD(PER1, WATCHDOG2) | + RSTMGR_FIELD(PER1, WATCHDOG3) | + RSTMGR_FIELD(PER1, WATCHDOG4) | + RSTMGR_FIELD(PER1, L4SYSTIMER0) | + RSTMGR_FIELD(PER1, L4SYSTIMER1) | + RSTMGR_FIELD(PER1, SPTIMER0) | + RSTMGR_FIELD(PER1, SPTIMER1) | + RSTMGR_FIELD(PER1, I2C0) | + RSTMGR_FIELD(PER1, I2C1) | + RSTMGR_FIELD(PER1, I2C2) | + RSTMGR_FIELD(PER1, I2C3) | + RSTMGR_FIELD(PER1, I2C4) | + RSTMGR_FIELD(PER1, I3C0) | + RSTMGR_FIELD(PER1, I3C1) | + RSTMGR_FIELD(PER1, UART0) | + RSTMGR_FIELD(PER1, UART1) | + RSTMGR_FIELD(PER1, GPIO0) | + RSTMGR_FIELD(PER1, GPIO1)); + + mmio_clrbits_32(SOCFPGA_RSTMGR(PER0MODRST), + RSTMGR_FIELD(PER0, SOFTPHY) | + RSTMGR_FIELD(PER0, EMAC0OCP) | + RSTMGR_FIELD(PER0, EMAC1OCP) | + RSTMGR_FIELD(PER0, EMAC2OCP) | + RSTMGR_FIELD(PER0, USB0OCP) | + RSTMGR_FIELD(PER0, USB1OCP) | + RSTMGR_FIELD(PER0, NANDOCP) | + RSTMGR_FIELD(PER0, SDMMCOCP) | + RSTMGR_FIELD(PER0, DMAOCP)); + + mmio_clrbits_32(SOCFPGA_RSTMGR(PER0MODRST), + RSTMGR_FIELD(PER0, EMAC0) | + RSTMGR_FIELD(PER0, EMAC1) | + RSTMGR_FIELD(PER0, EMAC2) | + RSTMGR_FIELD(PER0, USB0) | + RSTMGR_FIELD(PER0, USB1) | + RSTMGR_FIELD(PER0, NAND) | + RSTMGR_FIELD(PER0, SDMMC) | + RSTMGR_FIELD(PER0, DMA) | + RSTMGR_FIELD(PER0, SPIM0) | + RSTMGR_FIELD(PER0, SPIM1) | + RSTMGR_FIELD(PER0, SPIS0) | + RSTMGR_FIELD(PER0, SPIS1) | + RSTMGR_FIELD(PER0, EMACPTP) | + RSTMGR_FIELD(PER0, DMAIF0) | + RSTMGR_FIELD(PER0, DMAIF1) | + RSTMGR_FIELD(PER0, DMAIF2) | + RSTMGR_FIELD(PER0, DMAIF3) | + RSTMGR_FIELD(PER0, DMAIF4) | + RSTMGR_FIELD(PER0, DMAIF5) | + RSTMGR_FIELD(PER0, DMAIF6) | + RSTMGR_FIELD(PER0, DMAIF7)); + +#if PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX + mmio_clrbits_32(SOCFPGA_RSTMGR(BRGMODRST), + RSTMGR_FIELD(BRG, MPFE)); +#endif +} + +void config_hps_hs_before_warm_reset(void) +{ + uint32_t or_mask = 0; + + or_mask |= RSTMGR_HDSKEN_EMIF_FLUSH; + or_mask |= RSTMGR_HDSKEN_FPGAHSEN; + or_mask |= RSTMGR_HDSKEN_ETRSTALLEN; + or_mask |= RSTMGR_HDSKEN_LWS2F_FLUSH; + or_mask |= RSTMGR_HDSKEN_L3NOC_DBG; + or_mask |= RSTMGR_HDSKEN_DEBUG_L3NOC; + + mmio_setbits_32(SOCFPGA_RSTMGR(HDSKEN), or_mask); +} + +static int poll_idle_status(uint32_t addr, uint32_t mask, uint32_t match, uint32_t delay_ms) +{ + int time_out = delay_ms; + + while (time_out-- > 0) { + + if ((mmio_read_32(addr) & mask) == match) { + return 0; + } + udelay(1000); + } + return -ETIMEDOUT; +} + +#if PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX5 +static int poll_idle_status_by_counter(uint32_t addr, uint32_t mask, + uint32_t match, uint32_t delay_ms) +{ + int time_out = delay_ms; + + while (time_out-- > 0) { + + if ((mmio_read_32(addr) & mask) == match) { + return 0; + } + + /* ToDo: Shall use udelay for product release */ + for (int i = 0; i < 2000; i++) { + /* dummy delay */ + } + } + return -ETIMEDOUT; +} +#endif + +static int poll_idle_status_by_clkcycles(uint32_t addr, uint32_t mask, + uint32_t match, uint32_t delay_clk_cycles) +{ + int time_out = delay_clk_cycles; + + while (time_out-- > 0) { + + if ((mmio_read_32(addr) & mask) == match) { + return 0; + } + udelay(1); + } + return -ETIMEDOUT; +} + +static void socfpga_s2f_bridge_mask(uint32_t mask, + uint32_t *brg_mask, + uint32_t *noc_mask) +{ + *brg_mask = 0; + *noc_mask = 0; + + if ((mask & SOC2FPGA_MASK) != 0U) { + *brg_mask |= RSTMGR_FIELD(BRG, SOC2FPGA); + *noc_mask |= IDLE_DATA_SOC2FPGA; + } + + if ((mask & LWHPS2FPGA_MASK) != 0U) { + *brg_mask |= RSTMGR_FIELD(BRG, LWHPS2FPGA); + *noc_mask |= IDLE_DATA_LWSOC2FPGA; + } +} + +static void socfpga_f2s_bridge_mask(uint32_t mask, + uint32_t *brg_mask, + uint32_t *f2s_idlereq, + uint32_t *f2s_force_drain, + uint32_t *f2s_en, + uint32_t *f2s_idleack, + uint32_t *f2s_respempty, + uint32_t *f2s_cmdidle) +{ + *brg_mask = 0; + *f2s_idlereq = 0; + *f2s_force_drain = 0; + *f2s_en = 0; + *f2s_idleack = 0; + *f2s_respempty = 0; + *f2s_cmdidle = 0; + +#if PLATFORM_MODEL == PLAT_SOCFPGA_STRATIX10 + if ((mask & FPGA2SOC_MASK) != 0U) { + *brg_mask |= RSTMGR_FIELD(BRG, FPGA2SOC); + } + if ((mask & F2SDRAM0_MASK) != 0U) { + *brg_mask |= RSTMGR_FIELD(BRG, F2SSDRAM0); + *f2s_idlereq |= FLAGOUTSETCLR_F2SDRAM0_IDLEREQ; + *f2s_force_drain |= FLAGOUTSETCLR_F2SDRAM0_FORCE_DRAIN; + *f2s_en |= FLAGOUTSETCLR_F2SDRAM0_ENABLE; + *f2s_idleack |= FLAGINSTATUS_F2SDRAM0_IDLEACK; + *f2s_respempty |= FLAGINSTATUS_F2SDRAM0_RESPEMPTY; + *f2s_cmdidle |= FLAGINSTATUS_F2SDRAM0_CMDIDLE; + } + if ((mask & F2SDRAM1_MASK) != 0U) { + *brg_mask |= RSTMGR_FIELD(BRG, F2SSDRAM1); + *f2s_idlereq |= FLAGOUTSETCLR_F2SDRAM1_IDLEREQ; + *f2s_force_drain |= FLAGOUTSETCLR_F2SDRAM1_FORCE_DRAIN; + *f2s_en |= FLAGOUTSETCLR_F2SDRAM1_ENABLE; + *f2s_idleack |= FLAGINSTATUS_F2SDRAM1_IDLEACK; + *f2s_respempty |= FLAGINSTATUS_F2SDRAM1_RESPEMPTY; + *f2s_cmdidle |= FLAGINSTATUS_F2SDRAM1_CMDIDLE; + } + if ((mask & F2SDRAM2_MASK) != 0U) { + *brg_mask |= RSTMGR_FIELD(BRG, F2SSDRAM2); + *f2s_idlereq |= FLAGOUTSETCLR_F2SDRAM2_IDLEREQ; + *f2s_force_drain |= FLAGOUTSETCLR_F2SDRAM2_FORCE_DRAIN; + *f2s_en |= FLAGOUTSETCLR_F2SDRAM2_ENABLE; + *f2s_idleack |= FLAGINSTATUS_F2SDRAM2_IDLEACK; + *f2s_respempty |= FLAGINSTATUS_F2SDRAM2_RESPEMPTY; + *f2s_cmdidle |= FLAGINSTATUS_F2SDRAM2_CMDIDLE; + } +#elif PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX5 + if (mask & FPGA2SOC_MASK) { + *brg_mask |= RSTMGR_FIELD(BRG, FPGA2SOC); + *f2s_idlereq |= FLAGOUTSETCLR_F2SDRAM0_IDLEREQ; + *f2s_force_drain |= FLAGOUTSETCLR_F2SDRAM0_FORCE_DRAIN; + *f2s_en |= FLAGOUTSETCLR_F2SDRAM0_ENABLE; + *f2s_idleack |= FLAGINSTATUS_F2SDRAM0_IDLEACK; + *f2s_respempty |= FLAGINSTATUS_F2SDRAM0_RESPEMPTY; + } + if (mask & F2SDRAM0_MASK) { + *brg_mask |= RSTMGR_FIELD(BRG, F2SSDRAM0); + *f2s_idlereq |= FLAGOUTSETCLR_F2SDRAM0_IDLEREQ; + *f2s_force_drain |= FLAGOUTSETCLR_F2SDRAM0_FORCE_DRAIN; + *f2s_en |= FLAGOUTSETCLR_F2SDRAM0_ENABLE; + *f2s_idleack |= FLAGINSTATUS_F2SDRAM0_IDLEACK; + *f2s_respempty |= FLAGINSTATUS_F2SDRAM0_RESPEMPTY; + } + if (mask & F2SDRAM1_MASK) { + *brg_mask |= RSTMGR_FIELD(BRG, F2SSDRAM1); + *f2s_idlereq |= FLAGOUTSETCLR_F2SDRAM1_IDLEREQ; + *f2s_force_drain |= FLAGOUTSETCLR_F2SDRAM1_FORCE_DRAIN; + *f2s_en |= FLAGOUTSETCLR_F2SDRAM1_ENABLE; + *f2s_idleack |= FLAGINSTATUS_F2SDRAM1_IDLEACK; + *f2s_respempty |= FLAGINSTATUS_F2SDRAM1_RESPEMPTY; + } + if (mask & F2SDRAM2_MASK) { + *brg_mask |= RSTMGR_FIELD(BRG, F2SSDRAM2); + *f2s_idlereq |= FLAGOUTSETCLR_F2SDRAM2_IDLEREQ; + *f2s_force_drain |= FLAGOUTSETCLR_F2SDRAM2_FORCE_DRAIN; + *f2s_en |= FLAGOUTSETCLR_F2SDRAM2_ENABLE; + *f2s_idleack |= FLAGINSTATUS_F2SDRAM2_IDLEACK; + *f2s_respempty |= FLAGINSTATUS_F2SDRAM2_RESPEMPTY; + } +#else + if ((mask & FPGA2SOC_MASK) != 0U) { + *brg_mask |= RSTMGR_FIELD(BRG, FPGA2SOC); + *f2s_idlereq |= FLAGOUTSETCLR_F2SDRAM0_IDLEREQ; + *f2s_force_drain |= FLAGOUTSETCLR_F2SDRAM0_FORCE_DRAIN; + *f2s_en |= FLAGOUTSETCLR_F2SDRAM0_ENABLE; + *f2s_idleack |= FLAGINSTATUS_F2SDRAM0_IDLEACK; + *f2s_respempty |= FLAGINSTATUS_F2SDRAM0_RESPEMPTY; + *f2s_cmdidle |= FLAGINSTATUS_F2SDRAM0_CMDIDLE; + } +#endif +} + +int socfpga_bridges_reset(uint32_t mask) +{ + int ret = 0; + int timeout = 300; + uint32_t brg_mask = 0; + uint32_t noc_mask = 0; + uint32_t f2s_idlereq = 0; + uint32_t f2s_force_drain = 0; + uint32_t f2s_en = 0; + uint32_t f2s_idleack = 0; + uint32_t f2s_respempty = 0; + uint32_t f2s_cmdidle = 0; + + /* Reset s2f bridge */ + socfpga_s2f_bridge_mask(mask, &brg_mask, &noc_mask); + if (brg_mask) { + if (mask & SOC2FPGA_MASK) { + /* Request handshake with SOC2FPGA bridge to clear traffic */ + mmio_setbits_32(SOCFPGA_RSTMGR(HDSKREQ), + RSTMGR_HDSKREQ_S2F_FLUSH); + + /* Wait for bridge to idle status */ + ret = poll_idle_status(SOCFPGA_RSTMGR(HDSKACK), + RSTMGR_HDSKACK_S2F_FLUSH, + RSTMGR_HDSKACK_S2F_FLUSH, 300); + } + + if (mask & LWHPS2FPGA_MASK) { + /* Request handshake with LWSOC2FPGA bridge to clear traffic */ + mmio_setbits_32(SOCFPGA_RSTMGR(HDSKREQ), + RSTMGR_HDSKREQ_LWS2F_FLUSH); + + /* Wait for bridge to idle status */ + ret = poll_idle_status(SOCFPGA_RSTMGR(HDSKACK), + RSTMGR_HDSKACK_LWS2F_FLUSH, + RSTMGR_HDSKACK_LWS2F_FLUSH, 300); + } + + if (ret < 0) { + ERROR("S2F Bridge reset: Timeout waiting for idle ack\n"); + assert(false); + } + + /* Assert reset to bridge */ + mmio_setbits_32(SOCFPGA_RSTMGR(BRGMODRST), + brg_mask); + + /* Clear idle requests to bridge */ + if (mask & SOC2FPGA_MASK) { + mmio_clrbits_32(SOCFPGA_RSTMGR(HDSKREQ), + RSTMGR_HDSKREQ_S2F_FLUSH); + } + + if (mask & LWHPS2FPGA_MASK) { + mmio_clrbits_32(SOCFPGA_RSTMGR(HDSKREQ), + RSTMGR_HDSKREQ_LWS2F_FLUSH); + } + + /* When FPGA reconfig is complete */ + mmio_clrbits_32(SOCFPGA_RSTMGR(BRGMODRST), brg_mask); + } + + /* Reset f2s bridge */ + socfpga_f2s_bridge_mask(mask, &brg_mask, &f2s_idlereq, + &f2s_force_drain, &f2s_en, + &f2s_idleack, &f2s_respempty, + &f2s_cmdidle); + + if (brg_mask) { + mmio_setbits_32(SOCFPGA_RSTMGR(HDSKEN), + RSTMGR_HDSKEN_FPGAHSEN); + + mmio_setbits_32(SOCFPGA_RSTMGR(HDSKREQ), + RSTMGR_HDSKREQ_FPGAHSREQ); + + ret = poll_idle_status(SOCFPGA_RSTMGR(HDSKACK), + RSTMGR_HDSKACK_FPGAHSREQ, + RSTMGR_HDSKACK_FPGAHSREQ, 300); + + if (ret < 0) { + ERROR("F2S Bridge disable: Timeout waiting for idle req\n"); + assert(false); + } + + /* Disable f2s bridge */ + mmio_clrbits_32(SOCFPGA_F2SDRAMMGR(SIDEBANDMGR_FLAGOUTSET0), + f2s_en); + udelay(5); + + mmio_setbits_32(SOCFPGA_F2SDRAMMGR(SIDEBANDMGR_FLAGOUTSET0), + f2s_force_drain); + udelay(5); + + do { + /* Read response queue status to ensure it is empty */ + uint32_t idle_status; + + idle_status = mmio_read_32(SOCFPGA_F2SDRAMMGR( + SIDEBANDMGR_FLAGINSTATUS0)); + if (idle_status & f2s_respempty) { + idle_status = mmio_read_32(SOCFPGA_F2SDRAMMGR( + SIDEBANDMGR_FLAGINSTATUS0)); + if (idle_status & f2s_respempty) { + break; + } + } + udelay(1000); + } while (timeout-- > 0); + + /* Assert reset to f2s bridge */ + mmio_setbits_32(SOCFPGA_RSTMGR(BRGMODRST), + brg_mask); + + /* Clear idle request to FPGA */ + mmio_clrbits_32(SOCFPGA_RSTMGR(HDSKREQ), + RSTMGR_HDSKREQ_FPGAHSREQ); + + /* Clear idle request to MPFE */ + mmio_setbits_32(SOCFPGA_F2SDRAMMGR(SIDEBANDMGR_FLAGOUTCLR0), + f2s_idlereq); + + /* When FPGA reconfig is complete */ + mmio_clrbits_32(SOCFPGA_RSTMGR(BRGMODRST), brg_mask); + + /* Enable f2s bridge */ + mmio_clrbits_32(SOCFPGA_F2SDRAMMGR(SIDEBANDMGR_FLAGOUTSET0), + f2s_idlereq); + + ret = poll_idle_status(SOCFPGA_F2SDRAMMGR( + SIDEBANDMGR_FLAGINSTATUS0), f2s_idleack, 0, 300); + if (ret < 0) { + ERROR("F2S bridge enable: Timeout waiting for idle ack"); + assert(false); + } + + mmio_clrbits_32(SOCFPGA_F2SDRAMMGR(SIDEBANDMGR_FLAGOUTSET0), + f2s_force_drain); + udelay(5); + + mmio_setbits_32(SOCFPGA_F2SDRAMMGR(SIDEBANDMGR_FLAGOUTSET0), + f2s_en); + udelay(5); + } + + return ret; +} + +int socfpga_bridges_enable(uint32_t mask) +{ + int ret = 0; + uint32_t brg_mask = 0; + uint32_t noc_mask = 0; + uint32_t f2s_idlereq = 0; + uint32_t f2s_force_drain = 0; + uint32_t f2s_en = 0; + uint32_t f2s_idleack = 0; + uint32_t f2s_respempty = 0; + uint32_t f2s_cmdidle = 0; +#if PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX5 + uint32_t delay = 0; +#endif + + /* Enable s2f bridge */ + socfpga_s2f_bridge_mask(mask, &brg_mask, &noc_mask); +#if PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX5 + /* Enable SOC2FPGA bridge */ + if (brg_mask & RSTMGR_BRGMODRSTMASK_SOC2FPGA) { + /* Write Reset Manager hdskreq[soc2fpga_flush_req] = 1 */ + NOTICE("Set S2F hdskreq ...\n"); + mmio_setbits_32(SOCFPGA_RSTMGR(HDSKREQ), + RSTMGR_HDSKREQ_SOC2FPGAREQ); + + /* Read Reset Manager hdskack[soc2fpga] = 1 */ + ret = poll_idle_status_by_counter(SOCFPGA_RSTMGR(HDSKACK), + RSTMGR_HDSKACK_SOC2FPGAACK, RSTMGR_HDSKACK_SOC2FPGAACK, + 300); + + if (ret < 0) { + ERROR("S2F bridge enable: Timeout hdskack\n"); + } + + /* Write Reset Manager hdskreq[soc2fpga_flush_req] = 0 */ + NOTICE("Clear S2F hdskreq ...\n"); + mmio_clrbits_32(SOCFPGA_RSTMGR(HDSKREQ), + RSTMGR_HDSKREQ_SOC2FPGAREQ); + + /* Write Reset Manager brgmodrst[soc2fpga] = 1 */ + NOTICE("Assert S2F ...\n"); + mmio_setbits_32(SOCFPGA_RSTMGR(BRGMODRST), + RSTMGR_BRGMODRST_SOC2FPGA); + + /* ToDo: Shall use udelay for product release */ + for (delay = 0; delay < 1000; delay++) { + /* dummy delay */ + } + + /* Write Reset Manager brgmodrst[soc2fpga] = 0 */ + NOTICE("Deassert S2F ...\n"); + mmio_clrbits_32(SOCFPGA_RSTMGR(BRGMODRST), + RSTMGR_BRGMODRST_SOC2FPGA); + } + + /* Enable LWSOC2FPGA bridge */ + if (brg_mask & RSTMGR_BRGMODRSTMASK_LWHPS2FPGA) { + /* Write Reset Manager hdskreq[lwsoc2fpga_flush_req] = 1 */ + NOTICE("Set LWS2F hdskreq ...\n"); + mmio_setbits_32(SOCFPGA_RSTMGR(HDSKREQ), + RSTMGR_HDSKREQ_LWSOC2FPGAREQ); + + /* Read Reset Manager hdskack[lwsoc2fpga] = 1 */ + ret = poll_idle_status_by_counter(SOCFPGA_RSTMGR(HDSKACK), + RSTMGR_HDSKACK_LWSOC2FPGAACK, RSTMGR_HDSKACK_LWSOC2FPGAACK, + 300); + + if (ret < 0) { + ERROR("LWS2F bridge enable: Timeout hdskack\n"); + } + + /* Write Reset Manager hdskreq[lwsoc2fpga_flush_req] = 0 */ + NOTICE("Clear LWS2F hdskreq ...\n"); + mmio_clrbits_32(SOCFPGA_RSTMGR(HDSKREQ), + RSTMGR_HDSKREQ_LWSOC2FPGAREQ); + + /* Write Reset Manager brgmodrst[lwsoc2fpga] = 1 */ + NOTICE("Assert LWS2F ...\n"); + mmio_setbits_32(SOCFPGA_RSTMGR(BRGMODRST), + RSTMGR_BRGMODRST_LWHPS2FPGA); + + /* ToDo: Shall use udelay for product release */ + for (delay = 0; delay < 1000; delay++) { + /* dummy delay */ + } + + /* Write Reset Manager brgmodrst[lwsoc2fpga] = 0 */ + NOTICE("Deassert LWS2F ...\n"); + mmio_clrbits_32(SOCFPGA_RSTMGR(BRGMODRST), + RSTMGR_BRGMODRST_LWHPS2FPGA); + } +#else + if (brg_mask != 0U) { + /* Clear idle request */ + mmio_setbits_32(SOCFPGA_SYSMGR(NOC_IDLEREQ_CLR), + noc_mask); + + /* De-assert all bridges */ + mmio_clrbits_32(SOCFPGA_RSTMGR(BRGMODRST), brg_mask); + + /* Wait until idle ack becomes 0 */ + ret = poll_idle_status(SOCFPGA_SYSMGR(NOC_IDLEACK), + noc_mask, 0, 300); + if (ret < 0) { + ERROR("S2F bridge enable: Timeout idle ack\n"); + } + } +#endif + + /* Enable f2s bridge */ + socfpga_f2s_bridge_mask(mask, &brg_mask, &f2s_idlereq, + &f2s_force_drain, &f2s_en, + &f2s_idleack, &f2s_respempty, &f2s_cmdidle); +#if PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX5 + /* Enable FPGA2SOC bridge */ + if (brg_mask & RSTMGR_BRGMODRSTMASK_FPGA2SOC) { + /* Write Reset Manager hdsken[fpgahsen] = 1 */ + NOTICE("Set FPGA hdsken(fpgahsen) ...\n"); + mmio_setbits_32(SOCFPGA_RSTMGR(HDSKEN), RSTMGR_HDSKEN_FPGAHSEN); + + /* Write Reset Manager hdskreq[fpgahsreq] = 1 */ + NOTICE("Set FPGA hdskreq(fpgahsreq) ...\n"); + mmio_setbits_32(SOCFPGA_RSTMGR(HDSKREQ), RSTMGR_HDSKREQ_FPGAHSREQ); + + /* Read Reset Manager hdskack[fpgahsack] = 1 */ + NOTICE("Get FPGA hdskack(fpgahsack) ...\n"); + ret = poll_idle_status_by_counter(SOCFPGA_RSTMGR(HDSKACK), + RSTMGR_HDSKACK_FPGAHSACK, RSTMGR_HDSKACK_FPGAHSACK, + 300); + + if (ret < 0) { + ERROR("FPGA bridge fpga handshake fpgahsreq: Timeout\n"); + } + + /* Write Reset Manager hdskreq[f2s_flush_req] = 1 */ + NOTICE("Set F2S hdskreq(f2s_flush_req) ...\n"); + mmio_setbits_32(SOCFPGA_RSTMGR(HDSKREQ), + RSTMGR_HDSKREQ_FPGA2SOCREQ); + + /* Read Reset Manager hdskack[f2s_flush_ack] = 1 */ + NOTICE("Get F2S hdskack(f2s_flush_ack) ...\n"); + ret = poll_idle_status_by_counter(SOCFPGA_RSTMGR(HDSKACK), + RSTMGR_HDSKACK_FPGA2SOCACK, RSTMGR_HDSKACK_FPGA2SOCACK, + 300); + + if (ret < 0) { + ERROR("F2S bridge fpga handshake f2sdram_flush_req: Timeout\n"); + } + + /* Write Reset Manager hdskreq[fpgahsreq] = 1 */ + NOTICE("Clear FPGA hdskreq(fpgahsreq) ...\n"); + mmio_clrbits_32(SOCFPGA_RSTMGR(HDSKREQ), RSTMGR_HDSKREQ_FPGAHSREQ); + + /* Write Reset Manager hdskreq[f2s_flush_req] = 1 */ + NOTICE("Clear F2S hdskreq(f2s_flush_req) ...\n"); + mmio_clrbits_32(SOCFPGA_RSTMGR(HDSKREQ), + RSTMGR_HDSKREQ_FPGA2SOCREQ); + + /* Read Reset Manager hdskack[f2s_flush_ack] = 0 */ + NOTICE("Get F2SDRAM hdskack(f2s_flush_ack) ...\n"); + ret = poll_idle_status_by_counter(SOCFPGA_RSTMGR(HDSKACK), + RSTMGR_HDSKACK_FPGA2SOCACK, RSTMGR_HDSKACK_FPGA2SOCACK_DASRT, + 300); + + if (ret < 0) { + ERROR("F2S bridge fpga handshake f2s_flush_ack: Timeout\n"); + } + + /* Read Reset Manager hdskack[fpgahsack] = 0 */ + NOTICE("Get FPGA hdskack(fpgahsack) ...\n"); + ret = poll_idle_status_by_counter(SOCFPGA_RSTMGR(HDSKACK), + RSTMGR_HDSKACK_FPGAHSACK, RSTMGR_HDSKACK_FPGAHSACK_DASRT, + 300); + + if (ret < 0) { + ERROR("F2S bridge fpga handshake fpgahsack: Timeout\n"); + } + + /* Write Reset Manager brgmodrst[fpga2soc] = 1 */ + NOTICE("Assert F2S ...\n"); + mmio_setbits_32(SOCFPGA_RSTMGR(BRGMODRST), RSTMGR_BRGMODRST_FPGA2SOC); + + /* ToDo: Shall use udelay for product release */ + for (delay = 0; delay < 1000; delay++) { + /* dummy delay */ + } + + /* Write Reset Manager brgmodrst[fpga2soc] = 0 */ + NOTICE("Deassert F2S ...\n"); + mmio_clrbits_32(SOCFPGA_RSTMGR(BRGMODRST), RSTMGR_BRGMODRST_FPGA2SOC); + + /* Write System Manager f2s bridge control register[f2soc_enable] = 1 */ + NOTICE("Deassert F2S f2soc_enable ...\n"); + mmio_setbits_32(SOCFPGA_SYSMGR(F2S_BRIDGE_CTRL), + SYSMGR_F2S_BRIDGE_CTRL_EN); + } + + /* Enable FPGA2SDRAM bridge */ + if (brg_mask & RSTMGR_BRGMODRSTMASK_F2SDRAM0) { + /* Write Reset Manager hdsken[fpgahsen] = 1 */ + NOTICE("Set F2SDRAM hdsken(fpgahsen) ...\n"); + mmio_setbits_32(SOCFPGA_RSTMGR(HDSKEN), RSTMGR_HDSKEN_FPGAHSEN); + + /* Write Reset Manager hdskreq[fpgahsreq] = 1 */ + NOTICE("Set F2SDRAM hdskreq(fpgahsreq) ...\n"); + mmio_setbits_32(SOCFPGA_RSTMGR(HDSKREQ), RSTMGR_HDSKREQ_FPGAHSREQ); + + /* Read Reset Manager hdskack[fpgahsack] = 1 */ + NOTICE("Get F2SDRAM hdskack(fpgahsack) ...\n"); + ret = poll_idle_status_by_counter(SOCFPGA_RSTMGR(HDSKACK), + RSTMGR_HDSKACK_FPGAHSACK, RSTMGR_HDSKACK_FPGAHSACK, + 300); + + if (ret < 0) { + ERROR("F2SDRAM bridge fpga handshake fpgahsreq: Timeout\n"); + } + + /* Write Reset Manager hdskreq[f2sdram_flush_req] = 1 */ + NOTICE("Set F2SDRAM hdskreq(f2sdram_flush_req) ...\n"); + mmio_setbits_32(SOCFPGA_RSTMGR(HDSKREQ), + RSTMGR_HDSKREQ_F2SDRAM0REQ); + + /* Read Reset Manager hdskack[f2sdram_flush_ack] = 1 */ + NOTICE("Get F2SDRAM hdskack(f2sdram_flush_ack) ...\n"); + ret = poll_idle_status_by_counter(SOCFPGA_RSTMGR(HDSKACK), + RSTMGR_HDSKACK_F2SDRAM0ACK, RSTMGR_HDSKACK_F2SDRAM0ACK, + 300); + + if (ret < 0) { + ERROR("F2SDRAM bridge fpga handshake f2sdram_flush_req: Timeout\n"); + } + + /* Write Reset Manager hdskreq[fpgahsreq] = 1 */ + NOTICE("Clear F2SDRAM hdskreq(fpgahsreq) ...\n"); + mmio_clrbits_32(SOCFPGA_RSTMGR(HDSKREQ), RSTMGR_HDSKREQ_FPGAHSREQ); + + /* Write Reset Manager hdskreq[f2sdram_flush_req] = 1 */ + NOTICE("Clear F2SDRAM hdskreq(f2sdram_flush_req) ...\n"); + mmio_clrbits_32(SOCFPGA_RSTMGR(HDSKREQ), RSTMGR_HDSKREQ_F2SDRAM0REQ); + + /* Read Reset Manager hdskack[f2sdram_flush_ack] = 0 */ + NOTICE("Get F2SDRAM hdskack(f2sdram_flush_ack) ...\n"); + ret = poll_idle_status_by_counter(SOCFPGA_RSTMGR(HDSKACK), + RSTMGR_HDSKACK_F2SDRAM0ACK, RSTMGR_HDSKACK_F2SDRAM0ACK_DASRT, + 300); + + if (ret < 0) { + ERROR("F2SDRAM bridge fpga handshake f2sdram_flush_ack: Timeout\n"); + } + + /* Read Reset Manager hdskack[fpgahsack] = 0 */ + NOTICE("Get F2SDRAM hdskack(fpgahsack) ...\n"); + ret = poll_idle_status_by_counter(SOCFPGA_RSTMGR(HDSKACK), + RSTMGR_HDSKACK_FPGAHSACK, RSTMGR_HDSKACK_FPGAHSACK_DASRT, + 300); + + if (ret < 0) { + ERROR("F2SDRAM bridge fpga handshake fpgahsack: Timeout\n"); + } + + /* Write Reset Manager brgmodrst[fpga2sdram] = 1 */ + NOTICE("Assert F2SDRAM ...\n"); + mmio_setbits_32(SOCFPGA_RSTMGR(BRGMODRST), + RSTMGR_BRGMODRST_F2SSDRAM0); + + /* ToDo: Shall use udelay for product release */ + for (delay = 0; delay < 1000; delay++) { + /* dummy delay */ + } + + /* Write Reset Manager brgmodrst[fpga2sdram] = 0 */ + NOTICE("Deassert F2SDRAM ...\n"); + mmio_clrbits_32(SOCFPGA_RSTMGR(BRGMODRST), + RSTMGR_BRGMODRST_F2SSDRAM0); + + /* + * Clear fpga2sdram_manager_main_SidebandManager_FlagOutClr0 + * f2s_ready_latency_enable + */ + NOTICE("Clear F2SDRAM f2s_ready_latency_enable ...\n"); + mmio_setbits_32(SOCFPGA_F2SDRAMMGR(SIDEBANDMGR_FLAGOUTCLR0), + FLAGOUTCLR0_F2SDRAM0_ENABLE); + } +#else + if (brg_mask != 0U) { + mmio_clrbits_32(SOCFPGA_RSTMGR(BRGMODRST), brg_mask); + + mmio_setbits_32(SOCFPGA_F2SDRAMMGR(SIDEBANDMGR_FLAGOUTCLR0), + f2s_idlereq); + + ret = poll_idle_status(SOCFPGA_F2SDRAMMGR(SIDEBANDMGR_FLAGINSTATUS0), + f2s_idleack, 0, 300); + + if (ret < 0) { + ERROR("F2S bridge enable: Timeout idle ack"); + } + + /* Clear the force drain */ + mmio_setbits_32(SOCFPGA_F2SDRAMMGR(SIDEBANDMGR_FLAGOUTCLR0), + f2s_force_drain); + udelay(5); + + mmio_setbits_32(SOCFPGA_F2SDRAMMGR(SIDEBANDMGR_FLAGOUTSET0), + f2s_en); + udelay(5); + } +#endif + return ret; +} + +int socfpga_bridge_nongraceful_disable(uint32_t mask) +{ + int ret = 0; + int timeout = 1000; + uint32_t brg_mask = 0; + uint32_t f2s_idlereq = 0; + uint32_t f2s_force_drain = 0; + uint32_t f2s_en = 0; + uint32_t f2s_idleack = 0; + uint32_t f2s_respempty = 0; + uint32_t f2s_cmdidle = 0; + + socfpga_f2s_bridge_mask(mask, &brg_mask, &f2s_idlereq, + &f2s_force_drain, &f2s_en, + &f2s_idleack, &f2s_respempty, &f2s_cmdidle); + + mmio_setbits_32(SOCFPGA_F2SDRAMMGR(SIDEBANDMGR_FLAGOUTSET0), + f2s_idlereq); + + /* Time out Error - Bus is still active */ + /* Performing a non-graceful shutdown with Force drain */ + mmio_setbits_32(SOCFPGA_F2SDRAMMGR(SIDEBANDMGR_FLAGOUTSET0), + f2s_force_drain); + + ret = -ETIMEDOUT; + do { + /* Read response queue status to ensure it is empty */ + uint32_t idle_status; + + idle_status = mmio_read_32(SOCFPGA_F2SDRAMMGR(SIDEBANDMGR_FLAGINSTATUS0)); + if ((idle_status & f2s_respempty) != 0U) { + idle_status = mmio_read_32(SOCFPGA_F2SDRAMMGR(SIDEBANDMGR_FLAGINSTATUS0)); + if ((idle_status & f2s_respempty) != 0U) { + /* No time-out we are good! */ + ret = 0; + break; + } + } + + asm("nop"); + + } while (timeout-- > 0); + + return ret; +} + +int socfpga_bridges_disable(uint32_t mask) +{ + int ret = 0; + uint32_t brg_mask = 0; + uint32_t noc_mask = 0; + uint32_t f2s_idlereq = 0; + uint32_t f2s_force_drain = 0; + uint32_t f2s_en = 0; + uint32_t f2s_idleack = 0; + uint32_t f2s_respempty = 0; + uint32_t f2s_cmdidle = 0; + + /* Disable s2f bridge */ + socfpga_s2f_bridge_mask(mask, &brg_mask, &noc_mask); + if (brg_mask != 0U) { + mmio_setbits_32(SOCFPGA_SYSMGR(NOC_IDLEREQ_SET), + noc_mask); + + mmio_write_32(SOCFPGA_SYSMGR(NOC_TIMEOUT), 1); + + ret = poll_idle_status(SOCFPGA_SYSMGR(NOC_IDLEACK), + noc_mask, noc_mask, 300); + if (ret < 0) { + ERROR("S2F Bridge disable: Timeout idle ack\n"); + } + + ret = poll_idle_status(SOCFPGA_SYSMGR(NOC_IDLESTATUS), + noc_mask, noc_mask, 300); + if (ret < 0) { + ERROR("S2F Bridge disable: Timeout idle status\n"); + } + + mmio_setbits_32(SOCFPGA_RSTMGR(BRGMODRST), brg_mask); + + mmio_write_32(SOCFPGA_SYSMGR(NOC_TIMEOUT), 0); + } + + /* Disable f2s bridge */ + socfpga_f2s_bridge_mask(mask, &brg_mask, &f2s_idlereq, + &f2s_force_drain, &f2s_en, + &f2s_idleack, &f2s_respempty, &f2s_cmdidle); + if (brg_mask != 0U) { + + if (mmio_read_32(SOCFPGA_RSTMGR(BRGMODRST)) & brg_mask) { + /* Bridge cannot be reset twice */ + return 0; + } + + /* Starts the fence and drain traffic from F2SDRAM to MPFE */ + mmio_setbits_32(SOCFPGA_RSTMGR(HDSKEN), + RSTMGR_HDSKEN_FPGAHSEN); + udelay(5); + /* Ignoring FPGA ACK as it will time-out */ + mmio_setbits_32(SOCFPGA_RSTMGR(HDSKREQ), + RSTMGR_HDSKREQ_FPGAHSREQ); + + ret = poll_idle_status_by_clkcycles(SOCFPGA_RSTMGR(HDSKACK), + RSTMGR_HDSKACK_FPGAHSREQ, + RSTMGR_HDSKACK_FPGAHSREQ, 1000); + + /* DISABLE F2S Bridge */ + mmio_setbits_32(SOCFPGA_F2SDRAMMGR(SIDEBANDMGR_FLAGOUTCLR0), + f2s_en); + udelay(5); + + ret = socfpga_bridge_nongraceful_disable(mask); + + /* Bridge reset */ +#if PLATFORM_MODEL == PLAT_SOCFPGA_STRATIX10 + /* Software must never write a 0x1 to FPGA2SOC_MASK bit */ + mmio_setbits_32(SOCFPGA_RSTMGR(BRGMODRST), + brg_mask & ~RSTMGR_FIELD(BRG, FPGA2SOC)); +#else + mmio_setbits_32(SOCFPGA_RSTMGR(BRGMODRST), + brg_mask); +#endif + /* Re-enable traffic to SDRAM*/ + mmio_clrbits_32(SOCFPGA_RSTMGR(HDSKREQ), + RSTMGR_HDSKREQ_FPGAHSREQ); + + mmio_setbits_32(SOCFPGA_F2SDRAMMGR(SIDEBANDMGR_FLAGOUTCLR0), + f2s_idlereq); + } + + return ret; +} + +/* CPUxRESETBASELOW */ +int socfpga_cpu_reset_base(unsigned int cpu_id) +{ + int ret = 0; + uint32_t entrypoint = 0; + + ret = socfpga_cpurstrelease(cpu_id); + + if (ret < 0) { + return RSTMGR_RET_ERROR; + } + + if (ret == RSTMGR_RET_OK) { + + switch (cpu_id) { + case 0: + entrypoint = mmio_read_32(SOCFPGA_RSTMGR_CPUBASELOW_0); + entrypoint |= mmio_read_32(SOCFPGA_RSTMGR_CPUBASEHIGH_0) << 24; + break; + + case 1: + entrypoint = mmio_read_32(SOCFPGA_RSTMGR_CPUBASELOW_1); + entrypoint |= mmio_read_32(SOCFPGA_RSTMGR_CPUBASEHIGH_1) << 24; + break; + + case 2: + entrypoint = mmio_read_32(SOCFPGA_RSTMGR_CPUBASELOW_2); + entrypoint |= mmio_read_32(SOCFPGA_RSTMGR_CPUBASEHIGH_2) << 24; + break; + + case 3: + entrypoint = mmio_read_32(SOCFPGA_RSTMGR_CPUBASELOW_3); + entrypoint |= mmio_read_32(SOCFPGA_RSTMGR_CPUBASEHIGH_3) << 24; + break; + + default: + break; + } + + mmio_write_64(PLAT_SEC_ENTRY, entrypoint); + } + + return RSTMGR_RET_OK; +} + +/* CPURSTRELEASE */ +int socfpga_cpurstrelease(unsigned int cpu_id) +{ + unsigned int timeout = 0; + + do { + /* Read response queue status to ensure it is empty */ + uint32_t cpurstrelease_status; + + cpurstrelease_status = mmio_read_32(SOCFPGA_RSTMGR(CPURSTRELEASE)); + + if ((cpurstrelease_status & RSTMGR_CPUSTRELEASE_CPUx) == cpu_id) { + return RSTMGR_RET_OK; + } + udelay(1000); + } while (timeout-- > 0); + + return RSTMGR_RET_ERROR; +} diff --git a/plat/intel/soc/common/socfpga_delay_timer.c b/plat/intel/soc/common/socfpga_delay_timer.c new file mode 100644 index 0000000..8fce5cf --- /dev/null +++ b/plat/intel/soc/common/socfpga_delay_timer.c @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2019-2022, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> +#include <arch_helpers.h> +#include <drivers/delay_timer.h> +#include <lib/mmio.h> +#include "socfpga_plat_def.h" + + +#if PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX +#include "agilex_clock_manager.h" +#elif PLATFORM_MODEL == PLAT_SOCFPGA_N5X +#include "n5x_clock_manager.h" +#elif PLATFORM_MODEL == PLAT_SOCFPGA_STRATIX10 +#include "s10_clock_manager.h" +#endif + +#define SOCFPGA_GLOBAL_TIMER 0xffd01000 +#define SOCFPGA_GLOBAL_TIMER_EN 0x3 + +static timer_ops_t plat_timer_ops; +/******************************************************************** + * The timer delay function + ********************************************************************/ +static uint32_t socfpga_get_timer_value(void) +{ + /* + * Generic delay timer implementation expects the timer to be a down + * counter. We apply bitwise NOT operator to the tick values returned + * by read_cntpct_el0() to simulate the down counter. The value is + * clipped from 64 to 32 bits. + */ + return (uint32_t)(~read_cntpct_el0()); +} + +void socfpga_delay_timer_init_args(void) +{ + plat_timer_ops.get_timer_value = socfpga_get_timer_value; + plat_timer_ops.clk_mult = 1; + plat_timer_ops.clk_div = PLAT_SYS_COUNTER_FREQ_IN_MHZ; + + timer_init(&plat_timer_ops); + +} + +void socfpga_delay_timer_init(void) +{ + socfpga_delay_timer_init_args(); + mmio_write_32(SOCFPGA_GLOBAL_TIMER, SOCFPGA_GLOBAL_TIMER_EN); + + NOTICE("BL31 CLK freq = %d MHz\n", PLAT_SYS_COUNTER_FREQ_IN_MHZ); + + asm volatile("msr cntp_ctl_el0, %0" : : "r" (SOCFPGA_GLOBAL_TIMER_EN)); + asm volatile("msr cntp_tval_el0, %0" : : "r" (~0)); + +} diff --git a/plat/intel/soc/common/socfpga_image_load.c b/plat/intel/soc/common/socfpga_image_load.c new file mode 100644 index 0000000..a5c3279 --- /dev/null +++ b/plat/intel/soc/common/socfpga_image_load.c @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <common/desc_image_load.h> + +/******************************************************************************* + * This function flushes the data structures so that they are visible + * in memory for the next BL image. + ******************************************************************************/ +void plat_flush_next_bl_params(void) +{ + flush_bl_params_desc(); +} + +/******************************************************************************* + * This function returns the list of loadable images. + ******************************************************************************/ +bl_load_info_t *plat_get_bl_image_load_info(void) +{ + return get_bl_load_info_from_mem_params_desc(); +} + +/******************************************************************************* + * This function returns the list of executable images. + ******************************************************************************/ +bl_params_t *plat_get_next_bl_params(void) +{ + unsigned int count; + unsigned int img_id = 0U; + unsigned int link_index = 0U; + bl_params_node_t *bl_exec_node = NULL; + bl_mem_params_node_t *desc_ptr; + + /* If there is no image to start with, return NULL */ + if (bl_mem_params_desc_num == 0U) + return NULL; + + /* Clean next_params_info in BL image node */ + for (count = 0U; count < bl_mem_params_desc_num; count++) { + + desc_ptr = &bl_mem_params_desc_ptr[link_index]; + bl_exec_node = &desc_ptr->params_node_mem; + bl_exec_node->next_params_info = NULL; + + /* If no next hand-off image then break out */ + img_id = desc_ptr->next_handoff_image_id; + if (img_id == INVALID_IMAGE_ID) + break; + + /* Get the index for the next hand-off image */ + link_index = get_bl_params_node_index(img_id); + } + + return get_next_bl_params_from_mem_params_desc(); +} diff --git a/plat/intel/soc/common/socfpga_psci.c b/plat/intel/soc/common/socfpga_psci.c new file mode 100644 index 0000000..c93e13f --- /dev/null +++ b/plat/intel/soc/common/socfpga_psci.c @@ -0,0 +1,287 @@ +/* + * Copyright (c) 2019-2023, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2019-2023, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <arch_helpers.h> +#include <common/debug.h> + +#ifndef GICV3_SUPPORT_GIC600 +#include <drivers/arm/gicv2.h> +#else +#include <drivers/arm/gicv3.h> +#endif +#include <lib/mmio.h> +#include <lib/psci/psci.h> +#include <plat/common/platform.h> +#include "socfpga_mailbox.h" +#include "socfpga_plat_def.h" +#include "socfpga_reset_manager.h" +#include "socfpga_sip_svc.h" +#include "socfpga_system_manager.h" + +#if PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX5 +void socfpga_wakeup_secondary_cpu(unsigned int cpu_id); +extern void plat_secondary_cold_boot_setup(void); +#endif + +/******************************************************************************* + * plat handler called when a CPU is about to enter standby. + ******************************************************************************/ +void socfpga_cpu_standby(plat_local_state_t cpu_state) +{ + /* + * Enter standby state + * dsb is good practice before using wfi to enter low power states + */ + VERBOSE("%s: cpu_state: 0x%x\n", __func__, cpu_state); + dsb(); + wfi(); +} + +/******************************************************************************* + * plat handler called when a power domain is about to be turned on. The + * mpidr determines the CPU to be turned on. + ******************************************************************************/ +int socfpga_pwr_domain_on(u_register_t mpidr) +{ + unsigned int cpu_id = plat_core_pos_by_mpidr(mpidr); +#if PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX5 + /* TODO: Add in CPU FUSE from SDM */ +#else + uint32_t psci_boot = 0x00; + + VERBOSE("%s: mpidr: 0x%lx\n", __func__, mpidr); +#endif + + if (cpu_id == -1) + return PSCI_E_INTERN_FAIL; + +#if PLATFORM_MODEL != PLAT_SOCFPGA_AGILEX5 + if (cpu_id == 0x00) { + psci_boot = mmio_read_32(SOCFPGA_SYSMGR(BOOT_SCRATCH_COLD_8)); + psci_boot |= 0x80000; /* bit 19 */ + mmio_write_32(SOCFPGA_SYSMGR(BOOT_SCRATCH_COLD_8), psci_boot); + } + + mmio_write_64(PLAT_CPUID_RELEASE, cpu_id); +#endif + + /* release core reset */ +#if PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX5 + bl31_plat_set_secondary_cpu_entrypoint(cpu_id); +#else + mmio_setbits_32(SOCFPGA_RSTMGR(MPUMODRST), 1 << cpu_id); + mmio_write_64(PLAT_CPUID_RELEASE, cpu_id); +#endif + + return PSCI_E_SUCCESS; +} + +/******************************************************************************* + * plat handler called when a power domain is about to be turned off. The + * target_state encodes the power state that each level should transition to. + ******************************************************************************/ +void socfpga_pwr_domain_off(const psci_power_state_t *target_state) +{ + for (size_t i = 0; i <= PLAT_MAX_PWR_LVL; i++) + VERBOSE("%s: target_state->pwr_domain_state[%lu]=%x\n", + __func__, i, target_state->pwr_domain_state[i]); + + /* Prevent interrupts from spuriously waking up this cpu */ +#ifdef GICV3_SUPPORT_GIC600 + gicv3_cpuif_disable(plat_my_core_pos()); +#else + gicv2_cpuif_disable(); +#endif + +} + +/******************************************************************************* + * plat handler called when a power domain is about to be suspended. The + * target_state encodes the power state that each level should transition to. + ******************************************************************************/ +void socfpga_pwr_domain_suspend(const psci_power_state_t *target_state) +{ +#if PLATFORM_MODEL != PLAT_SOCFPGA_AGILEX5 + unsigned int cpu_id = plat_my_core_pos(); +#endif + + for (size_t i = 0; i <= PLAT_MAX_PWR_LVL; i++) + VERBOSE("%s: target_state->pwr_domain_state[%lu]=%x\n", + __func__, i, target_state->pwr_domain_state[i]); + +#if PLATFORM_MODEL != PLAT_SOCFPGA_AGILEX5 + /* assert core reset */ + mmio_setbits_32(SOCFPGA_RSTMGR(MPUMODRST), 1 << cpu_id); +#endif +} + +/******************************************************************************* + * plat handler called when a power domain has just been powered on after + * being turned off earlier. The target_state encodes the low power state that + * each level has woken up from. + ******************************************************************************/ +void socfpga_pwr_domain_on_finish(const psci_power_state_t *target_state) +{ + for (size_t i = 0; i <= PLAT_MAX_PWR_LVL; i++) + VERBOSE("%s: target_state->pwr_domain_state[%lu]=%x\n", + __func__, i, target_state->pwr_domain_state[i]); + + /* Enable the gic cpu interface */ +#ifdef GICV3_SUPPORT_GIC600 + gicv3_rdistif_init(plat_my_core_pos()); + gicv3_cpuif_enable(plat_my_core_pos()); +#else + /* Program the gic per-cpu distributor or re-distributor interface */ + gicv2_pcpu_distif_init(); + gicv2_set_pe_target_mask(plat_my_core_pos()); + + /* Enable the gic cpu interface */ + gicv2_cpuif_enable(); +#endif +} + +/******************************************************************************* + * plat handler called when a power domain has just been powered on after + * having been suspended earlier. The target_state encodes the low power state + * that each level has woken up from. + * TODO: At the moment we reuse the on finisher and reinitialize the secure + * context. Need to implement a separate suspend finisher. + ******************************************************************************/ +void socfpga_pwr_domain_suspend_finish(const psci_power_state_t *target_state) +{ +#if PLATFORM_MODEL != PLAT_SOCFPGA_AGILEX5 + unsigned int cpu_id = plat_my_core_pos(); +#endif + + for (size_t i = 0; i <= PLAT_MAX_PWR_LVL; i++) + VERBOSE("%s: target_state->pwr_domain_state[%lu]=%x\n", + __func__, i, target_state->pwr_domain_state[i]); + +#if PLATFORM_MODEL != PLAT_SOCFPGA_AGILEX5 + /* release core reset */ + mmio_clrbits_32(SOCFPGA_RSTMGR(MPUMODRST), 1 << cpu_id); +#endif +} + +/******************************************************************************* + * plat handlers to shutdown/reboot the system + ******************************************************************************/ +static void __dead2 socfpga_system_off(void) +{ + wfi(); + ERROR("System Off: operation not handled.\n"); + panic(); +} + +extern uint64_t intel_rsu_update_address; + +static void __dead2 socfpga_system_reset(void) +{ + uint32_t addr_buf[2]; + + memcpy(addr_buf, &intel_rsu_update_address, + sizeof(intel_rsu_update_address)); + if (intel_rsu_update_address) { + mailbox_rsu_update(addr_buf); + } else { + mailbox_reset_cold(); + } + + while (1) + wfi(); +} + +static int socfpga_system_reset2(int is_vendor, int reset_type, + u_register_t cookie) +{ +#if PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX5 + mailbox_reset_warm(reset_type); +#else + if (cold_reset_for_ecc_dbe()) { + mailbox_reset_cold(); + } +#endif + + /* disable cpuif */ +#ifdef GICV3_SUPPORT_GIC600 + gicv3_cpuif_disable(plat_my_core_pos()); +#else + gicv2_cpuif_disable(); +#endif + + /* Store magic number */ + mmio_write_32(L2_RESET_DONE_REG, L2_RESET_DONE_STATUS); + + /* Increase timeout */ + mmio_write_32(SOCFPGA_RSTMGR(HDSKTIMEOUT), 0xffffff); + + /* Enable handshakes */ + mmio_setbits_32(SOCFPGA_RSTMGR(HDSKEN), RSTMGR_HDSKEN_SET); + +#if PLATFORM_MODEL != PLAT_SOCFPGA_AGILEX5 + /* Reset L2 module */ + mmio_setbits_32(SOCFPGA_RSTMGR(COLDMODRST), 0x100); +#endif + + while (1) + wfi(); + + /* Should not reach here */ + return 0; +} + +int socfpga_validate_power_state(unsigned int power_state, + psci_power_state_t *req_state) +{ + VERBOSE("%s: power_state: 0x%x\n", __func__, power_state); + + return PSCI_E_SUCCESS; +} + +int socfpga_validate_ns_entrypoint(unsigned long ns_entrypoint) +{ + VERBOSE("%s: ns_entrypoint: 0x%lx\n", __func__, ns_entrypoint); + return PSCI_E_SUCCESS; +} + +void socfpga_get_sys_suspend_power_state(psci_power_state_t *req_state) +{ + req_state->pwr_domain_state[PSCI_CPU_PWR_LVL] = PLAT_MAX_OFF_STATE; + req_state->pwr_domain_state[1] = PLAT_MAX_OFF_STATE; +} + +/******************************************************************************* + * Export the platform handlers via plat_arm_psci_pm_ops. The ARM Standard + * platform layer will take care of registering the handlers with PSCI. + ******************************************************************************/ +const plat_psci_ops_t socfpga_psci_pm_ops = { + .cpu_standby = socfpga_cpu_standby, + .pwr_domain_on = socfpga_pwr_domain_on, + .pwr_domain_off = socfpga_pwr_domain_off, + .pwr_domain_suspend = socfpga_pwr_domain_suspend, + .pwr_domain_on_finish = socfpga_pwr_domain_on_finish, + .pwr_domain_suspend_finish = socfpga_pwr_domain_suspend_finish, + .system_off = socfpga_system_off, + .system_reset = socfpga_system_reset, + .system_reset2 = socfpga_system_reset2, + .validate_power_state = socfpga_validate_power_state, + .validate_ns_entrypoint = socfpga_validate_ns_entrypoint, + .get_sys_suspend_power_state = socfpga_get_sys_suspend_power_state +}; + +/******************************************************************************* + * Export the platform specific power ops. + ******************************************************************************/ +int plat_setup_psci_ops(uintptr_t sec_entrypoint, + const struct plat_psci_ops **psci_ops) +{ + /* Save warm boot entrypoint.*/ + mmio_write_64(PLAT_SEC_ENTRY, sec_entrypoint); + *psci_ops = &socfpga_psci_pm_ops; + + return 0; +} diff --git a/plat/intel/soc/common/socfpga_sip_svc.c b/plat/intel/soc/common/socfpga_sip_svc.c new file mode 100644 index 0000000..c6530cf --- /dev/null +++ b/plat/intel/soc/common/socfpga_sip_svc.c @@ -0,0 +1,1275 @@ +/* + * Copyright (c) 2019-2023, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> +#include <common/debug.h> +#include <common/runtime_svc.h> +#include <lib/mmio.h> +#include <tools_share/uuid.h> + +#include "socfpga_fcs.h" +#include "socfpga_mailbox.h" +#include "socfpga_plat_def.h" +#include "socfpga_reset_manager.h" +#include "socfpga_sip_svc.h" +#include "socfpga_system_manager.h" + +/* Total buffer the driver can hold */ +#define FPGA_CONFIG_BUFFER_SIZE 4 + +static config_type request_type = NO_REQUEST; +static int current_block, current_buffer; +static int read_block, max_blocks; +static uint32_t send_id, rcv_id; +static uint32_t bytes_per_block, blocks_submitted; +static bool bridge_disable; + +/* RSU static variables */ +static uint32_t rsu_dcmf_ver[4] = {0}; +static uint16_t rsu_dcmf_stat[4] = {0}; +static uint32_t rsu_max_retry; + +/* SiP Service UUID */ +DEFINE_SVC_UUID2(intl_svc_uid, + 0xa85273b0, 0xe85a, 0x4862, 0xa6, 0x2a, + 0xfa, 0x88, 0x88, 0x17, 0x68, 0x81); + +static uint64_t socfpga_sip_handler(uint32_t smc_fid, + uint64_t x1, + uint64_t x2, + uint64_t x3, + uint64_t x4, + void *cookie, + void *handle, + uint64_t flags) +{ + ERROR("%s: unhandled SMC (0x%x)\n", __func__, smc_fid); + SMC_RET1(handle, SMC_UNK); +} + +struct fpga_config_info fpga_config_buffers[FPGA_CONFIG_BUFFER_SIZE]; + +static int intel_fpga_sdm_write_buffer(struct fpga_config_info *buffer) +{ + uint32_t args[3]; + + while (max_blocks > 0 && buffer->size > buffer->size_written) { + args[0] = (1<<8); + args[1] = buffer->addr + buffer->size_written; + if (buffer->size - buffer->size_written <= bytes_per_block) { + args[2] = buffer->size - buffer->size_written; + current_buffer++; + current_buffer %= FPGA_CONFIG_BUFFER_SIZE; + } else { + args[2] = bytes_per_block; + } + + buffer->size_written += args[2]; + mailbox_send_cmd_async(&send_id, MBOX_RECONFIG_DATA, args, + 3U, CMD_INDIRECT); + + buffer->subblocks_sent++; + max_blocks--; + } + + return !max_blocks; +} + +static int intel_fpga_sdm_write_all(void) +{ + for (int i = 0; i < FPGA_CONFIG_BUFFER_SIZE; i++) { + if (intel_fpga_sdm_write_buffer( + &fpga_config_buffers[current_buffer])) { + break; + } + } + return 0; +} + +static uint32_t intel_mailbox_fpga_config_isdone(void) +{ + uint32_t ret; + + switch (request_type) { + case RECONFIGURATION: + ret = intel_mailbox_get_config_status(MBOX_RECONFIG_STATUS, + true); + break; + case BITSTREAM_AUTH: + ret = intel_mailbox_get_config_status(MBOX_RECONFIG_STATUS, + false); + break; + default: + ret = intel_mailbox_get_config_status(MBOX_CONFIG_STATUS, + false); + break; + } + + if (ret != 0U) { + if (ret == MBOX_CFGSTAT_STATE_CONFIG) { + return INTEL_SIP_SMC_STATUS_BUSY; + } else { + request_type = NO_REQUEST; + return INTEL_SIP_SMC_STATUS_ERROR; + } + } + + if (bridge_disable != 0U) { + socfpga_bridges_enable(~0); /* Enable bridge */ + bridge_disable = false; + } + request_type = NO_REQUEST; + + return INTEL_SIP_SMC_STATUS_OK; +} + +static int mark_last_buffer_xfer_completed(uint32_t *buffer_addr_completed) +{ + int i; + + for (i = 0; i < FPGA_CONFIG_BUFFER_SIZE; i++) { + if (fpga_config_buffers[i].block_number == current_block) { + fpga_config_buffers[i].subblocks_sent--; + if (fpga_config_buffers[i].subblocks_sent == 0 + && fpga_config_buffers[i].size <= + fpga_config_buffers[i].size_written) { + fpga_config_buffers[i].write_requested = 0; + current_block++; + *buffer_addr_completed = + fpga_config_buffers[i].addr; + return 0; + } + } + } + + return -1; +} + +static int intel_fpga_config_completed_write(uint32_t *completed_addr, + uint32_t *count, uint32_t *job_id) +{ + uint32_t resp[5]; + unsigned int resp_len = ARRAY_SIZE(resp); + int status = INTEL_SIP_SMC_STATUS_OK; + int all_completed = 1; + *count = 0; + + while (*count < 3) { + + status = mailbox_read_response(job_id, + resp, &resp_len); + + if (status < 0) { + break; + } + + max_blocks++; + + if (mark_last_buffer_xfer_completed( + &completed_addr[*count]) == 0) { + *count = *count + 1; + } else { + break; + } + } + + if (*count <= 0) { + if (status != MBOX_NO_RESPONSE && + status != MBOX_TIMEOUT && resp_len != 0) { + mailbox_clear_response(); + request_type = NO_REQUEST; + return INTEL_SIP_SMC_STATUS_ERROR; + } + + *count = 0; + } + + intel_fpga_sdm_write_all(); + + if (*count > 0) { + status = INTEL_SIP_SMC_STATUS_OK; + } else if (*count == 0) { + status = INTEL_SIP_SMC_STATUS_BUSY; + } + + for (int i = 0; i < FPGA_CONFIG_BUFFER_SIZE; i++) { + if (fpga_config_buffers[i].write_requested != 0) { + all_completed = 0; + break; + } + } + + if (all_completed == 1) { + return INTEL_SIP_SMC_STATUS_OK; + } + + return status; +} + +static int intel_fpga_config_start(uint32_t flag) +{ + uint32_t argument = 0x1; + uint32_t response[3]; + int status = 0; + unsigned int size = 0; + unsigned int resp_len = ARRAY_SIZE(response); + + request_type = RECONFIGURATION; + + if (!CONFIG_TEST_FLAG(flag, PARTIAL_CONFIG)) { + bridge_disable = true; + } + + if (CONFIG_TEST_FLAG(flag, AUTHENTICATION)) { + size = 1; + bridge_disable = false; + request_type = BITSTREAM_AUTH; + } + + mailbox_clear_response(); + + mailbox_send_cmd(MBOX_JOB_ID, MBOX_CMD_CANCEL, NULL, 0U, + CMD_CASUAL, NULL, NULL); + + status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_RECONFIG, &argument, size, + CMD_CASUAL, response, &resp_len); + + if (status < 0) { + bridge_disable = false; + request_type = NO_REQUEST; + return INTEL_SIP_SMC_STATUS_ERROR; + } + + max_blocks = response[0]; + bytes_per_block = response[1]; + + for (int i = 0; i < FPGA_CONFIG_BUFFER_SIZE; i++) { + fpga_config_buffers[i].size = 0; + fpga_config_buffers[i].size_written = 0; + fpga_config_buffers[i].addr = 0; + fpga_config_buffers[i].write_requested = 0; + fpga_config_buffers[i].block_number = 0; + fpga_config_buffers[i].subblocks_sent = 0; + } + + blocks_submitted = 0; + current_block = 0; + read_block = 0; + current_buffer = 0; + + /* Disable bridge on full reconfiguration */ + if (bridge_disable) { + socfpga_bridges_disable(~0); + } + + return INTEL_SIP_SMC_STATUS_OK; +} + +static bool is_fpga_config_buffer_full(void) +{ + for (int i = 0; i < FPGA_CONFIG_BUFFER_SIZE; i++) { + if (!fpga_config_buffers[i].write_requested) { + return false; + } + } + return true; +} + +bool is_address_in_ddr_range(uint64_t addr, uint64_t size) +{ + if (!addr && !size) { + return true; + } + if (size > (UINT64_MAX - addr)) { + return false; + } + if (addr < BL31_LIMIT) { + return false; + } + if (addr + size > DRAM_BASE + DRAM_SIZE) { + return false; + } + + return true; +} + +static uint32_t intel_fpga_config_write(uint64_t mem, uint64_t size) +{ + int i; + + intel_fpga_sdm_write_all(); + + if (!is_address_in_ddr_range(mem, size) || + is_fpga_config_buffer_full()) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + for (i = 0; i < FPGA_CONFIG_BUFFER_SIZE; i++) { + int j = (i + current_buffer) % FPGA_CONFIG_BUFFER_SIZE; + + if (!fpga_config_buffers[j].write_requested) { + fpga_config_buffers[j].addr = mem; + fpga_config_buffers[j].size = size; + fpga_config_buffers[j].size_written = 0; + fpga_config_buffers[j].write_requested = 1; + fpga_config_buffers[j].block_number = + blocks_submitted++; + fpga_config_buffers[j].subblocks_sent = 0; + break; + } + } + + if (is_fpga_config_buffer_full()) { + return INTEL_SIP_SMC_STATUS_BUSY; + } + + return INTEL_SIP_SMC_STATUS_OK; +} + +static int is_out_of_sec_range(uint64_t reg_addr) +{ +#if DEBUG + return 0; +#endif + +#if PLATFORM_MODEL != PLAT_SOCFPGA_AGILEX5 + switch (reg_addr) { + case(0xF8011100): /* ECCCTRL1 */ + case(0xF8011104): /* ECCCTRL2 */ + case(0xF8011110): /* ERRINTEN */ + case(0xF8011114): /* ERRINTENS */ + case(0xF8011118): /* ERRINTENR */ + case(0xF801111C): /* INTMODE */ + case(0xF8011120): /* INTSTAT */ + case(0xF8011124): /* DIAGINTTEST */ + case(0xF801112C): /* DERRADDRA */ + case(0xFA000000): /* SMMU SCR0 */ + case(0xFA000004): /* SMMU SCR1 */ + case(0xFA000400): /* SMMU NSCR0 */ + case(0xFA004000): /* SMMU SSD0_REG */ + case(0xFA000820): /* SMMU SMR8 */ + case(0xFA000c20): /* SMMU SCR8 */ + case(0xFA028000): /* SMMU CB8_SCTRL */ + case(0xFA001020): /* SMMU CBAR8 */ + case(0xFA028030): /* SMMU TCR_LPAE */ + case(0xFA028020): /* SMMU CB8_TTBR0_LOW */ + case(0xFA028024): /* SMMU CB8_PRRR_HIGH */ + case(0xFA028038): /* SMMU CB8_PRRR_MIR0 */ + case(0xFA02803C): /* SMMU CB8_PRRR_MIR1 */ + case(0xFA028010): /* SMMU_CB8)TCR2 */ + case(0xFFD080A4): /* SDM SMMU STREAM ID REG */ + case(0xFA001820): /* SMMU_CBA2R8 */ + case(0xFA000074): /* SMMU_STLBGSTATUS */ + case(0xFA0287F4): /* SMMU_CB8_TLBSTATUS */ + case(0xFA000060): /* SMMU_STLBIALL */ + case(0xFA000070): /* SMMU_STLBGSYNC */ + case(0xFA028618): /* CB8_TLBALL */ + case(0xFA0287F0): /* CB8_TLBSYNC */ + case(0xFFD12028): /* SDMMCGRP_CTRL */ + case(0xFFD12044): /* EMAC0 */ + case(0xFFD12048): /* EMAC1 */ + case(0xFFD1204C): /* EMAC2 */ + case(0xFFD12090): /* ECC_INT_MASK_VALUE */ + case(0xFFD12094): /* ECC_INT_MASK_SET */ + case(0xFFD12098): /* ECC_INT_MASK_CLEAR */ + case(0xFFD1209C): /* ECC_INTSTATUS_SERR */ + case(0xFFD120A0): /* ECC_INTSTATUS_DERR */ + case(0xFFD120C0): /* NOC_TIMEOUT */ + case(0xFFD120C4): /* NOC_IDLEREQ_SET */ + case(0xFFD120C8): /* NOC_IDLEREQ_CLR */ + case(0xFFD120D0): /* NOC_IDLEACK */ + case(0xFFD120D4): /* NOC_IDLESTATUS */ + case(0xFFD12200): /* BOOT_SCRATCH_COLD0 */ + case(0xFFD12204): /* BOOT_SCRATCH_COLD1 */ + case(0xFFD12220): /* BOOT_SCRATCH_COLD8 */ + case(0xFFD12224): /* BOOT_SCRATCH_COLD9 */ + return 0; +#else + switch (reg_addr) { + + case(0xF8011104): /* ECCCTRL2 */ + case(0xFFD12028): /* SDMMCGRP_CTRL */ + case(0xFFD120C4): /* NOC_IDLEREQ_SET */ + case(0xFFD120C8): /* NOC_IDLEREQ_CLR */ + case(0xFFD120D0): /* NOC_IDLEACK */ + + + case(SOCFPGA_MEMCTRL(ECCCTRL1)): /* ECCCTRL1 */ + case(SOCFPGA_MEMCTRL(ERRINTEN)): /* ERRINTEN */ + case(SOCFPGA_MEMCTRL(ERRINTENS)): /* ERRINTENS */ + case(SOCFPGA_MEMCTRL(ERRINTENR)): /* ERRINTENR */ + case(SOCFPGA_MEMCTRL(INTMODE)): /* INTMODE */ + case(SOCFPGA_MEMCTRL(INTSTAT)): /* INTSTAT */ + case(SOCFPGA_MEMCTRL(DIAGINTTEST)): /* DIAGINTTEST */ + case(SOCFPGA_MEMCTRL(DERRADDRA)): /* DERRADDRA */ + + case(SOCFPGA_SYSMGR(EMAC_0)): /* EMAC0 */ + case(SOCFPGA_SYSMGR(EMAC_1)): /* EMAC1 */ + case(SOCFPGA_SYSMGR(EMAC_2)): /* EMAC2 */ + case(SOCFPGA_SYSMGR(ECC_INTMASK_VALUE)): /* ECC_INT_MASK_VALUE */ + case(SOCFPGA_SYSMGR(ECC_INTMASK_SET)): /* ECC_INT_MASK_SET */ + case(SOCFPGA_SYSMGR(ECC_INTMASK_CLR)): /* ECC_INT_MASK_CLEAR */ + case(SOCFPGA_SYSMGR(ECC_INTMASK_SERR)): /* ECC_INTSTATUS_SERR */ + case(SOCFPGA_SYSMGR(ECC_INTMASK_DERR)): /* ECC_INTSTATUS_DERR */ + case(SOCFPGA_SYSMGR(NOC_TIMEOUT)): /* NOC_TIMEOUT */ + case(SOCFPGA_SYSMGR(NOC_IDLESTATUS)): /* NOC_IDLESTATUS */ + case(SOCFPGA_SYSMGR(BOOT_SCRATCH_COLD_0)): /* BOOT_SCRATCH_COLD0 */ + case(SOCFPGA_SYSMGR(BOOT_SCRATCH_COLD_1)): /* BOOT_SCRATCH_COLD1 */ + case(SOCFPGA_SYSMGR(BOOT_SCRATCH_COLD_8)): /* BOOT_SCRATCH_COLD8 */ + case(SOCFPGA_SYSMGR(BOOT_SCRATCH_COLD_9)): /* BOOT_SCRATCH_COLD9 */ + return 0; +#endif + default: + break; + } + + return -1; +} + +/* Secure register access */ +uint32_t intel_secure_reg_read(uint64_t reg_addr, uint32_t *retval) +{ + if (is_out_of_sec_range(reg_addr)) { + return INTEL_SIP_SMC_STATUS_ERROR; + } + + *retval = mmio_read_32(reg_addr); + + return INTEL_SIP_SMC_STATUS_OK; +} + +uint32_t intel_secure_reg_write(uint64_t reg_addr, uint32_t val, + uint32_t *retval) +{ + if (is_out_of_sec_range(reg_addr)) { + return INTEL_SIP_SMC_STATUS_ERROR; + } + + mmio_write_32(reg_addr, val); + + return intel_secure_reg_read(reg_addr, retval); +} + +uint32_t intel_secure_reg_update(uint64_t reg_addr, uint32_t mask, + uint32_t val, uint32_t *retval) +{ + if (!intel_secure_reg_read(reg_addr, retval)) { + *retval &= ~mask; + *retval |= val & mask; + return intel_secure_reg_write(reg_addr, *retval, retval); + } + + return INTEL_SIP_SMC_STATUS_ERROR; +} + +/* Intel Remote System Update (RSU) services */ +uint64_t intel_rsu_update_address; + +static uint32_t intel_rsu_status(uint64_t *respbuf, unsigned int respbuf_sz) +{ + if (mailbox_rsu_status((uint32_t *)respbuf, respbuf_sz) < 0) { + return INTEL_SIP_SMC_RSU_ERROR; + } + + return INTEL_SIP_SMC_STATUS_OK; +} + +uint32_t intel_rsu_update(uint64_t update_address) +{ + if (update_address > SIZE_MAX) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + intel_rsu_update_address = update_address; + return INTEL_SIP_SMC_STATUS_OK; +} + +static uint32_t intel_rsu_notify(uint32_t execution_stage) +{ + if (mailbox_hps_stage_notify(execution_stage) < 0) { + return INTEL_SIP_SMC_RSU_ERROR; + } + + return INTEL_SIP_SMC_STATUS_OK; +} + +static uint32_t intel_rsu_retry_counter(uint32_t *respbuf, uint32_t respbuf_sz, + uint32_t *ret_stat) +{ + if (mailbox_rsu_status((uint32_t *)respbuf, respbuf_sz) < 0) { + return INTEL_SIP_SMC_RSU_ERROR; + } + + *ret_stat = respbuf[8]; + return INTEL_SIP_SMC_STATUS_OK; +} + +static uint32_t intel_rsu_copy_dcmf_version(uint64_t dcmf_ver_1_0, + uint64_t dcmf_ver_3_2) +{ + rsu_dcmf_ver[0] = dcmf_ver_1_0; + rsu_dcmf_ver[1] = dcmf_ver_1_0 >> 32; + rsu_dcmf_ver[2] = dcmf_ver_3_2; + rsu_dcmf_ver[3] = dcmf_ver_3_2 >> 32; + + return INTEL_SIP_SMC_STATUS_OK; +} + +static uint32_t intel_rsu_copy_dcmf_status(uint64_t dcmf_stat) +{ + rsu_dcmf_stat[0] = 0xFFFF & (dcmf_stat >> (0 * 16)); + rsu_dcmf_stat[1] = 0xFFFF & (dcmf_stat >> (1 * 16)); + rsu_dcmf_stat[2] = 0xFFFF & (dcmf_stat >> (2 * 16)); + rsu_dcmf_stat[3] = 0xFFFF & (dcmf_stat >> (3 * 16)); + + return INTEL_SIP_SMC_STATUS_OK; +} + +/* Intel HWMON services */ +static uint32_t intel_hwmon_readtemp(uint32_t chan, uint32_t *retval) +{ + if (mailbox_hwmon_readtemp(chan, retval) < 0) { + return INTEL_SIP_SMC_STATUS_ERROR; + } + + return INTEL_SIP_SMC_STATUS_OK; +} + +static uint32_t intel_hwmon_readvolt(uint32_t chan, uint32_t *retval) +{ + if (mailbox_hwmon_readvolt(chan, retval) < 0) { + return INTEL_SIP_SMC_STATUS_ERROR; + } + + return INTEL_SIP_SMC_STATUS_OK; +} + +/* Mailbox services */ +static uint32_t intel_smc_fw_version(uint32_t *fw_version) +{ + int status; + unsigned int resp_len = CONFIG_STATUS_WORD_SIZE; + uint32_t resp_data[CONFIG_STATUS_WORD_SIZE] = {0U}; + + status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_CONFIG_STATUS, NULL, 0U, + CMD_CASUAL, resp_data, &resp_len); + + if (status < 0) { + return INTEL_SIP_SMC_STATUS_ERROR; + } + + if (resp_len <= CONFIG_STATUS_FW_VER_OFFSET) { + return INTEL_SIP_SMC_STATUS_ERROR; + } + + *fw_version = resp_data[CONFIG_STATUS_FW_VER_OFFSET] & CONFIG_STATUS_FW_VER_MASK; + + return INTEL_SIP_SMC_STATUS_OK; +} + +static uint32_t intel_mbox_send_cmd(uint32_t cmd, uint32_t *args, + unsigned int len, uint32_t urgent, uint64_t response, + unsigned int resp_len, int *mbox_status, + unsigned int *len_in_resp) +{ + *len_in_resp = 0; + *mbox_status = GENERIC_RESPONSE_ERROR; + + if (!is_address_in_ddr_range((uint64_t)args, sizeof(uint32_t) * len)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + int status = mailbox_send_cmd(MBOX_JOB_ID, cmd, args, len, urgent, + (uint32_t *) response, &resp_len); + + if (status < 0) { + *mbox_status = -status; + return INTEL_SIP_SMC_STATUS_ERROR; + } + + *mbox_status = 0; + *len_in_resp = resp_len; + + flush_dcache_range(response, resp_len * MBOX_WORD_BYTE); + + return INTEL_SIP_SMC_STATUS_OK; +} + +static int intel_smc_get_usercode(uint32_t *user_code) +{ + int status; + unsigned int resp_len = sizeof(user_code) / MBOX_WORD_BYTE; + + status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_CMD_GET_USERCODE, NULL, + 0U, CMD_CASUAL, user_code, &resp_len); + + if (status < 0) { + return INTEL_SIP_SMC_STATUS_ERROR; + } + + return INTEL_SIP_SMC_STATUS_OK; +} + +uint32_t intel_smc_service_completed(uint64_t addr, uint32_t size, + uint32_t mode, uint32_t *job_id, + uint32_t *ret_size, uint32_t *mbox_error) +{ + int status = 0; + uint32_t resp_len = size / MBOX_WORD_BYTE; + + if (resp_len > MBOX_DATA_MAX_LEN) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (!is_address_in_ddr_range(addr, size)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (mode == SERVICE_COMPLETED_MODE_ASYNC) { + status = mailbox_read_response_async(job_id, + NULL, (uint32_t *) addr, &resp_len, 0); + } else { + status = mailbox_read_response(job_id, + (uint32_t *) addr, &resp_len); + + if (status == MBOX_NO_RESPONSE) { + status = MBOX_BUSY; + } + } + + if (status == MBOX_NO_RESPONSE) { + return INTEL_SIP_SMC_STATUS_NO_RESPONSE; + } + + if (status == MBOX_BUSY) { + return INTEL_SIP_SMC_STATUS_BUSY; + } + + *ret_size = resp_len * MBOX_WORD_BYTE; + flush_dcache_range(addr, *ret_size); + + if (status == MBOX_RET_SDOS_DECRYPTION_ERROR_102 || + status == MBOX_RET_SDOS_DECRYPTION_ERROR_103) { + *mbox_error = -status; + } else if (status != MBOX_RET_OK) { + *mbox_error = -status; + return INTEL_SIP_SMC_STATUS_ERROR; + } + + return INTEL_SIP_SMC_STATUS_OK; +} + +/* Miscellaneous HPS services */ +uint32_t intel_hps_set_bridges(uint64_t enable, uint64_t mask) +{ + int status = 0; + + if ((enable & SOCFPGA_BRIDGE_ENABLE) != 0U) { + if ((enable & SOCFPGA_BRIDGE_HAS_MASK) != 0U) { + status = socfpga_bridges_enable((uint32_t)mask); + } else { + status = socfpga_bridges_enable(~0); + } + } else { + if ((enable & SOCFPGA_BRIDGE_HAS_MASK) != 0U) { + status = socfpga_bridges_disable((uint32_t)mask); + } else { + status = socfpga_bridges_disable(~0); + } + } + + if (status < 0) { + return INTEL_SIP_SMC_STATUS_ERROR; + } + + return INTEL_SIP_SMC_STATUS_OK; +} + +/* SDM SEU Error services */ +static uint32_t intel_sdm_seu_err_read(uint64_t *respbuf, unsigned int respbuf_sz) +{ + if (mailbox_seu_err_status((uint32_t *)respbuf, respbuf_sz) < 0) { + return INTEL_SIP_SMC_SEU_ERR_READ_ERROR; + } + + return INTEL_SIP_SMC_STATUS_OK; +} + +/* + * This function is responsible for handling all SiP calls from the NS world + */ + +uintptr_t sip_smc_handler_v1(uint32_t smc_fid, + u_register_t x1, + u_register_t x2, + u_register_t x3, + u_register_t x4, + void *cookie, + void *handle, + u_register_t flags) +{ + uint32_t retval = 0, completed_addr[3]; + uint32_t retval2 = 0; + uint32_t mbox_error = 0; + uint64_t retval64, rsu_respbuf[9], seu_respbuf[3]; + int status = INTEL_SIP_SMC_STATUS_OK; + int mbox_status; + unsigned int len_in_resp; + u_register_t x5, x6, x7; + + switch (smc_fid) { + case SIP_SVC_UID: + /* Return UID to the caller */ + SMC_UUID_RET(handle, intl_svc_uid); + + case INTEL_SIP_SMC_FPGA_CONFIG_ISDONE: + status = intel_mailbox_fpga_config_isdone(); + SMC_RET4(handle, status, 0, 0, 0); + + case INTEL_SIP_SMC_FPGA_CONFIG_GET_MEM: + SMC_RET3(handle, INTEL_SIP_SMC_STATUS_OK, + INTEL_SIP_SMC_FPGA_CONFIG_ADDR, + INTEL_SIP_SMC_FPGA_CONFIG_SIZE - + INTEL_SIP_SMC_FPGA_CONFIG_ADDR); + + case INTEL_SIP_SMC_FPGA_CONFIG_START: + status = intel_fpga_config_start(x1); + SMC_RET4(handle, status, 0, 0, 0); + + case INTEL_SIP_SMC_FPGA_CONFIG_WRITE: + status = intel_fpga_config_write(x1, x2); + SMC_RET4(handle, status, 0, 0, 0); + + case INTEL_SIP_SMC_FPGA_CONFIG_COMPLETED_WRITE: + status = intel_fpga_config_completed_write(completed_addr, + &retval, &rcv_id); + switch (retval) { + case 1: + SMC_RET4(handle, INTEL_SIP_SMC_STATUS_OK, + completed_addr[0], 0, 0); + + case 2: + SMC_RET4(handle, INTEL_SIP_SMC_STATUS_OK, + completed_addr[0], + completed_addr[1], 0); + + case 3: + SMC_RET4(handle, INTEL_SIP_SMC_STATUS_OK, + completed_addr[0], + completed_addr[1], + completed_addr[2]); + + case 0: + SMC_RET4(handle, status, 0, 0, 0); + + default: + mailbox_clear_response(); + SMC_RET1(handle, INTEL_SIP_SMC_STATUS_ERROR); + } + + case INTEL_SIP_SMC_REG_READ: + status = intel_secure_reg_read(x1, &retval); + SMC_RET3(handle, status, retval, x1); + + case INTEL_SIP_SMC_REG_WRITE: + status = intel_secure_reg_write(x1, (uint32_t)x2, &retval); + SMC_RET3(handle, status, retval, x1); + + case INTEL_SIP_SMC_REG_UPDATE: + status = intel_secure_reg_update(x1, (uint32_t)x2, + (uint32_t)x3, &retval); + SMC_RET3(handle, status, retval, x1); + + case INTEL_SIP_SMC_RSU_STATUS: + status = intel_rsu_status(rsu_respbuf, + ARRAY_SIZE(rsu_respbuf)); + if (status) { + SMC_RET1(handle, status); + } else { + SMC_RET4(handle, rsu_respbuf[0], rsu_respbuf[1], + rsu_respbuf[2], rsu_respbuf[3]); + } + + case INTEL_SIP_SMC_RSU_UPDATE: + status = intel_rsu_update(x1); + SMC_RET1(handle, status); + + case INTEL_SIP_SMC_RSU_NOTIFY: + status = intel_rsu_notify(x1); + SMC_RET1(handle, status); + + case INTEL_SIP_SMC_RSU_RETRY_COUNTER: + status = intel_rsu_retry_counter((uint32_t *)rsu_respbuf, + ARRAY_SIZE(rsu_respbuf), &retval); + if (status) { + SMC_RET1(handle, status); + } else { + SMC_RET2(handle, status, retval); + } + + case INTEL_SIP_SMC_RSU_DCMF_VERSION: + SMC_RET3(handle, INTEL_SIP_SMC_STATUS_OK, + ((uint64_t)rsu_dcmf_ver[1] << 32) | rsu_dcmf_ver[0], + ((uint64_t)rsu_dcmf_ver[3] << 32) | rsu_dcmf_ver[2]); + + case INTEL_SIP_SMC_RSU_COPY_DCMF_VERSION: + status = intel_rsu_copy_dcmf_version(x1, x2); + SMC_RET1(handle, status); + + case INTEL_SIP_SMC_RSU_DCMF_STATUS: + SMC_RET2(handle, INTEL_SIP_SMC_STATUS_OK, + ((uint64_t)rsu_dcmf_stat[3] << 48) | + ((uint64_t)rsu_dcmf_stat[2] << 32) | + ((uint64_t)rsu_dcmf_stat[1] << 16) | + rsu_dcmf_stat[0]); + + case INTEL_SIP_SMC_RSU_COPY_DCMF_STATUS: + status = intel_rsu_copy_dcmf_status(x1); + SMC_RET1(handle, status); + + case INTEL_SIP_SMC_RSU_MAX_RETRY: + SMC_RET2(handle, INTEL_SIP_SMC_STATUS_OK, rsu_max_retry); + + case INTEL_SIP_SMC_RSU_COPY_MAX_RETRY: + rsu_max_retry = x1; + SMC_RET1(handle, INTEL_SIP_SMC_STATUS_OK); + + case INTEL_SIP_SMC_ECC_DBE: + status = intel_ecc_dbe_notification(x1); + SMC_RET1(handle, status); + + case INTEL_SIP_SMC_SERVICE_COMPLETED: + status = intel_smc_service_completed(x1, x2, x3, &rcv_id, + &len_in_resp, &mbox_error); + SMC_RET4(handle, status, mbox_error, x1, len_in_resp); + + case INTEL_SIP_SMC_FIRMWARE_VERSION: + status = intel_smc_fw_version(&retval); + SMC_RET2(handle, status, retval); + + case INTEL_SIP_SMC_MBOX_SEND_CMD: + x5 = SMC_GET_GP(handle, CTX_GPREG_X5); + x6 = SMC_GET_GP(handle, CTX_GPREG_X6); + status = intel_mbox_send_cmd(x1, (uint32_t *)x2, x3, x4, x5, x6, + &mbox_status, &len_in_resp); + SMC_RET3(handle, status, mbox_status, len_in_resp); + + case INTEL_SIP_SMC_GET_USERCODE: + status = intel_smc_get_usercode(&retval); + SMC_RET2(handle, status, retval); + + case INTEL_SIP_SMC_FCS_CRYPTION: + x5 = SMC_GET_GP(handle, CTX_GPREG_X5); + + if (x1 == FCS_MODE_DECRYPT) { + status = intel_fcs_decryption(x2, x3, x4, x5, &send_id); + } else if (x1 == FCS_MODE_ENCRYPT) { + status = intel_fcs_encryption(x2, x3, x4, x5, &send_id); + } else { + status = INTEL_SIP_SMC_STATUS_REJECTED; + } + + SMC_RET3(handle, status, x4, x5); + + case INTEL_SIP_SMC_FCS_CRYPTION_EXT: + x5 = SMC_GET_GP(handle, CTX_GPREG_X5); + x6 = SMC_GET_GP(handle, CTX_GPREG_X6); + x7 = SMC_GET_GP(handle, CTX_GPREG_X7); + + if (x3 == FCS_MODE_DECRYPT) { + status = intel_fcs_decryption_ext(x1, x2, x4, x5, x6, + (uint32_t *) &x7, &mbox_error); + } else if (x3 == FCS_MODE_ENCRYPT) { + status = intel_fcs_encryption_ext(x1, x2, x4, x5, x6, + (uint32_t *) &x7, &mbox_error); + } else { + status = INTEL_SIP_SMC_STATUS_REJECTED; + } + + SMC_RET4(handle, status, mbox_error, x6, x7); + + case INTEL_SIP_SMC_FCS_RANDOM_NUMBER: + status = intel_fcs_random_number_gen(x1, &retval64, + &mbox_error); + SMC_RET4(handle, status, mbox_error, x1, retval64); + + case INTEL_SIP_SMC_FCS_RANDOM_NUMBER_EXT: + status = intel_fcs_random_number_gen_ext(x1, x2, x3, + &send_id); + SMC_RET1(handle, status); + + case INTEL_SIP_SMC_FCS_SEND_CERTIFICATE: + status = intel_fcs_send_cert(x1, x2, &send_id); + SMC_RET1(handle, status); + + case INTEL_SIP_SMC_FCS_GET_PROVISION_DATA: + status = intel_fcs_get_provision_data(&send_id); + SMC_RET1(handle, status); + + case INTEL_SIP_SMC_FCS_CNTR_SET_PREAUTH: + status = intel_fcs_cntr_set_preauth(x1, x2, x3, + &mbox_error); + SMC_RET2(handle, status, mbox_error); + + case INTEL_SIP_SMC_HPS_SET_BRIDGES: + status = intel_hps_set_bridges(x1, x2); + SMC_RET1(handle, status); + + case INTEL_SIP_SMC_HWMON_READTEMP: + status = intel_hwmon_readtemp(x1, &retval); + SMC_RET2(handle, status, retval); + + case INTEL_SIP_SMC_HWMON_READVOLT: + status = intel_hwmon_readvolt(x1, &retval); + SMC_RET2(handle, status, retval); + + case INTEL_SIP_SMC_FCS_PSGSIGMA_TEARDOWN: + status = intel_fcs_sigma_teardown(x1, &mbox_error); + SMC_RET2(handle, status, mbox_error); + + case INTEL_SIP_SMC_FCS_CHIP_ID: + status = intel_fcs_chip_id(&retval, &retval2, &mbox_error); + SMC_RET4(handle, status, mbox_error, retval, retval2); + + case INTEL_SIP_SMC_FCS_ATTESTATION_SUBKEY: + status = intel_fcs_attestation_subkey(x1, x2, x3, + (uint32_t *) &x4, &mbox_error); + SMC_RET4(handle, status, mbox_error, x3, x4); + + case INTEL_SIP_SMC_FCS_ATTESTATION_MEASUREMENTS: + status = intel_fcs_get_measurement(x1, x2, x3, + (uint32_t *) &x4, &mbox_error); + SMC_RET4(handle, status, mbox_error, x3, x4); + + case INTEL_SIP_SMC_FCS_GET_ATTESTATION_CERT: + status = intel_fcs_get_attestation_cert(x1, x2, + (uint32_t *) &x3, &mbox_error); + SMC_RET4(handle, status, mbox_error, x2, x3); + + case INTEL_SIP_SMC_FCS_CREATE_CERT_ON_RELOAD: + status = intel_fcs_create_cert_on_reload(x1, &mbox_error); + SMC_RET2(handle, status, mbox_error); + + case INTEL_SIP_SMC_FCS_OPEN_CS_SESSION: + status = intel_fcs_open_crypto_service_session(&retval, &mbox_error); + SMC_RET3(handle, status, mbox_error, retval); + + case INTEL_SIP_SMC_FCS_CLOSE_CS_SESSION: + status = intel_fcs_close_crypto_service_session(x1, &mbox_error); + SMC_RET2(handle, status, mbox_error); + + case INTEL_SIP_SMC_FCS_IMPORT_CS_KEY: + status = intel_fcs_import_crypto_service_key(x1, x2, &send_id); + SMC_RET1(handle, status); + + case INTEL_SIP_SMC_FCS_EXPORT_CS_KEY: + status = intel_fcs_export_crypto_service_key(x1, x2, x3, + (uint32_t *) &x4, &mbox_error); + SMC_RET4(handle, status, mbox_error, x3, x4); + + case INTEL_SIP_SMC_FCS_REMOVE_CS_KEY: + status = intel_fcs_remove_crypto_service_key(x1, x2, + &mbox_error); + SMC_RET2(handle, status, mbox_error); + + case INTEL_SIP_SMC_FCS_GET_CS_KEY_INFO: + status = intel_fcs_get_crypto_service_key_info(x1, x2, x3, + (uint32_t *) &x4, &mbox_error); + SMC_RET4(handle, status, mbox_error, x3, x4); + + case INTEL_SIP_SMC_FCS_GET_DIGEST_INIT: + x5 = SMC_GET_GP(handle, CTX_GPREG_X5); + status = intel_fcs_get_digest_init(x1, x2, x3, + x4, x5, &mbox_error); + SMC_RET2(handle, status, mbox_error); + + case INTEL_SIP_SMC_FCS_GET_DIGEST_UPDATE: + x5 = SMC_GET_GP(handle, CTX_GPREG_X5); + x6 = SMC_GET_GP(handle, CTX_GPREG_X6); + status = intel_fcs_get_digest_update_finalize(x1, x2, x3, + x4, x5, (uint32_t *) &x6, false, + &mbox_error); + SMC_RET4(handle, status, mbox_error, x5, x6); + + case INTEL_SIP_SMC_FCS_GET_DIGEST_FINALIZE: + x5 = SMC_GET_GP(handle, CTX_GPREG_X5); + x6 = SMC_GET_GP(handle, CTX_GPREG_X6); + status = intel_fcs_get_digest_update_finalize(x1, x2, x3, + x4, x5, (uint32_t *) &x6, true, + &mbox_error); + SMC_RET4(handle, status, mbox_error, x5, x6); + + case INTEL_SIP_SMC_FCS_GET_DIGEST_SMMU_UPDATE: + x5 = SMC_GET_GP(handle, CTX_GPREG_X5); + x6 = SMC_GET_GP(handle, CTX_GPREG_X6); + status = intel_fcs_get_digest_smmu_update_finalize(x1, x2, x3, + x4, x5, (uint32_t *) &x6, false, + &mbox_error, &send_id); + SMC_RET4(handle, status, mbox_error, x5, x6); + + case INTEL_SIP_SMC_FCS_GET_DIGEST_SMMU_FINALIZE: + x5 = SMC_GET_GP(handle, CTX_GPREG_X5); + x6 = SMC_GET_GP(handle, CTX_GPREG_X6); + status = intel_fcs_get_digest_smmu_update_finalize(x1, x2, x3, + x4, x5, (uint32_t *) &x6, true, + &mbox_error, &send_id); + SMC_RET4(handle, status, mbox_error, x5, x6); + + case INTEL_SIP_SMC_FCS_MAC_VERIFY_INIT: + x5 = SMC_GET_GP(handle, CTX_GPREG_X5); + status = intel_fcs_mac_verify_init(x1, x2, x3, + x4, x5, &mbox_error); + SMC_RET2(handle, status, mbox_error); + + case INTEL_SIP_SMC_FCS_MAC_VERIFY_UPDATE: + x5 = SMC_GET_GP(handle, CTX_GPREG_X5); + x6 = SMC_GET_GP(handle, CTX_GPREG_X6); + x7 = SMC_GET_GP(handle, CTX_GPREG_X7); + status = intel_fcs_mac_verify_update_finalize(x1, x2, x3, + x4, x5, (uint32_t *) &x6, x7, + false, &mbox_error); + SMC_RET4(handle, status, mbox_error, x5, x6); + + case INTEL_SIP_SMC_FCS_MAC_VERIFY_FINALIZE: + x5 = SMC_GET_GP(handle, CTX_GPREG_X5); + x6 = SMC_GET_GP(handle, CTX_GPREG_X6); + x7 = SMC_GET_GP(handle, CTX_GPREG_X7); + status = intel_fcs_mac_verify_update_finalize(x1, x2, x3, + x4, x5, (uint32_t *) &x6, x7, + true, &mbox_error); + SMC_RET4(handle, status, mbox_error, x5, x6); + + case INTEL_SIP_SMC_FCS_MAC_VERIFY_SMMU_UPDATE: + x5 = SMC_GET_GP(handle, CTX_GPREG_X5); + x6 = SMC_GET_GP(handle, CTX_GPREG_X6); + x7 = SMC_GET_GP(handle, CTX_GPREG_X7); + status = intel_fcs_mac_verify_smmu_update_finalize(x1, x2, x3, + x4, x5, (uint32_t *) &x6, x7, + false, &mbox_error, &send_id); + SMC_RET4(handle, status, mbox_error, x5, x6); + + case INTEL_SIP_SMC_FCS_MAC_VERIFY_SMMU_FINALIZE: + x5 = SMC_GET_GP(handle, CTX_GPREG_X5); + x6 = SMC_GET_GP(handle, CTX_GPREG_X6); + x7 = SMC_GET_GP(handle, CTX_GPREG_X7); + status = intel_fcs_mac_verify_smmu_update_finalize(x1, x2, x3, + x4, x5, (uint32_t *) &x6, x7, + true, &mbox_error, &send_id); + SMC_RET4(handle, status, mbox_error, x5, x6); + + case INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGN_INIT: + x5 = SMC_GET_GP(handle, CTX_GPREG_X5); + status = intel_fcs_ecdsa_sha2_data_sign_init(x1, x2, x3, + x4, x5, &mbox_error); + SMC_RET2(handle, status, mbox_error); + + case INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGN_UPDATE: + x5 = SMC_GET_GP(handle, CTX_GPREG_X5); + x6 = SMC_GET_GP(handle, CTX_GPREG_X6); + status = intel_fcs_ecdsa_sha2_data_sign_update_finalize(x1, x2, + x3, x4, x5, (uint32_t *) &x6, false, + &mbox_error); + SMC_RET4(handle, status, mbox_error, x5, x6); + + case INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGN_FINALIZE: + x5 = SMC_GET_GP(handle, CTX_GPREG_X5); + x6 = SMC_GET_GP(handle, CTX_GPREG_X6); + status = intel_fcs_ecdsa_sha2_data_sign_update_finalize(x1, x2, + x3, x4, x5, (uint32_t *) &x6, true, + &mbox_error); + SMC_RET4(handle, status, mbox_error, x5, x6); + + case INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGN_SMMU_UPDATE: + x5 = SMC_GET_GP(handle, CTX_GPREG_X5); + x6 = SMC_GET_GP(handle, CTX_GPREG_X6); + status = intel_fcs_ecdsa_sha2_data_sign_smmu_update_finalize(x1, + x2, x3, x4, x5, (uint32_t *) &x6, false, + &mbox_error, &send_id); + SMC_RET4(handle, status, mbox_error, x5, x6); + + case INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGN_SMMU_FINALIZE: + x5 = SMC_GET_GP(handle, CTX_GPREG_X5); + x6 = SMC_GET_GP(handle, CTX_GPREG_X6); + status = intel_fcs_ecdsa_sha2_data_sign_smmu_update_finalize(x1, + x2, x3, x4, x5, (uint32_t *) &x6, true, + &mbox_error, &send_id); + SMC_RET4(handle, status, mbox_error, x5, x6); + + case INTEL_SIP_SMC_FCS_ECDSA_HASH_SIGN_INIT: + x5 = SMC_GET_GP(handle, CTX_GPREG_X5); + status = intel_fcs_ecdsa_hash_sign_init(x1, x2, x3, + x4, x5, &mbox_error); + SMC_RET2(handle, status, mbox_error); + + case INTEL_SIP_SMC_FCS_ECDSA_HASH_SIGN_FINALIZE: + x5 = SMC_GET_GP(handle, CTX_GPREG_X5); + x6 = SMC_GET_GP(handle, CTX_GPREG_X6); + status = intel_fcs_ecdsa_hash_sign_finalize(x1, x2, x3, + x4, x5, (uint32_t *) &x6, &mbox_error); + SMC_RET4(handle, status, mbox_error, x5, x6); + + case INTEL_SIP_SMC_FCS_ECDSA_HASH_SIG_VERIFY_INIT: + x5 = SMC_GET_GP(handle, CTX_GPREG_X5); + status = intel_fcs_ecdsa_hash_sig_verify_init(x1, x2, x3, + x4, x5, &mbox_error); + SMC_RET2(handle, status, mbox_error); + + case INTEL_SIP_SMC_FCS_ECDSA_HASH_SIG_VERIFY_FINALIZE: + x5 = SMC_GET_GP(handle, CTX_GPREG_X5); + x6 = SMC_GET_GP(handle, CTX_GPREG_X6); + status = intel_fcs_ecdsa_hash_sig_verify_finalize(x1, x2, x3, + x4, x5, (uint32_t *) &x6, &mbox_error); + SMC_RET4(handle, status, mbox_error, x5, x6); + + case INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIG_VERIFY_INIT: + x5 = SMC_GET_GP(handle, CTX_GPREG_X5); + status = intel_fcs_ecdsa_sha2_data_sig_verify_init(x1, x2, x3, + x4, x5, &mbox_error); + SMC_RET2(handle, status, mbox_error); + + case INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIG_VERIFY_UPDATE: + x5 = SMC_GET_GP(handle, CTX_GPREG_X5); + x6 = SMC_GET_GP(handle, CTX_GPREG_X6); + x7 = SMC_GET_GP(handle, CTX_GPREG_X7); + status = intel_fcs_ecdsa_sha2_data_sig_verify_update_finalize( + x1, x2, x3, x4, x5, (uint32_t *) &x6, + x7, false, &mbox_error); + SMC_RET4(handle, status, mbox_error, x5, x6); + + case INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIG_VERIFY_SMMU_UPDATE: + x5 = SMC_GET_GP(handle, CTX_GPREG_X5); + x6 = SMC_GET_GP(handle, CTX_GPREG_X6); + x7 = SMC_GET_GP(handle, CTX_GPREG_X7); + status = intel_fcs_ecdsa_sha2_data_sig_verify_smmu_update_finalize( + x1, x2, x3, x4, x5, (uint32_t *) &x6, + x7, false, &mbox_error, &send_id); + SMC_RET4(handle, status, mbox_error, x5, x6); + + case INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIG_VERIFY_SMMU_FINALIZE: + x5 = SMC_GET_GP(handle, CTX_GPREG_X5); + x6 = SMC_GET_GP(handle, CTX_GPREG_X6); + x7 = SMC_GET_GP(handle, CTX_GPREG_X7); + status = intel_fcs_ecdsa_sha2_data_sig_verify_smmu_update_finalize( + x1, x2, x3, x4, x5, (uint32_t *) &x6, + x7, true, &mbox_error, &send_id); + SMC_RET4(handle, status, mbox_error, x5, x6); + + case INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIG_VERIFY_FINALIZE: + x5 = SMC_GET_GP(handle, CTX_GPREG_X5); + x6 = SMC_GET_GP(handle, CTX_GPREG_X6); + x7 = SMC_GET_GP(handle, CTX_GPREG_X7); + status = intel_fcs_ecdsa_sha2_data_sig_verify_update_finalize( + x1, x2, x3, x4, x5, (uint32_t *) &x6, + x7, true, &mbox_error); + SMC_RET4(handle, status, mbox_error, x5, x6); + + case INTEL_SIP_SMC_FCS_ECDSA_GET_PUBKEY_INIT: + x5 = SMC_GET_GP(handle, CTX_GPREG_X5); + status = intel_fcs_ecdsa_get_pubkey_init(x1, x2, x3, + x4, x5, &mbox_error); + SMC_RET2(handle, status, mbox_error); + + case INTEL_SIP_SMC_FCS_ECDSA_GET_PUBKEY_FINALIZE: + status = intel_fcs_ecdsa_get_pubkey_finalize(x1, x2, x3, + (uint32_t *) &x4, &mbox_error); + SMC_RET4(handle, status, mbox_error, x3, x4); + + case INTEL_SIP_SMC_FCS_ECDH_REQUEST_INIT: + x5 = SMC_GET_GP(handle, CTX_GPREG_X5); + status = intel_fcs_ecdh_request_init(x1, x2, x3, + x4, x5, &mbox_error); + SMC_RET2(handle, status, mbox_error); + + case INTEL_SIP_SMC_FCS_ECDH_REQUEST_FINALIZE: + x5 = SMC_GET_GP(handle, CTX_GPREG_X5); + x6 = SMC_GET_GP(handle, CTX_GPREG_X6); + status = intel_fcs_ecdh_request_finalize(x1, x2, x3, + x4, x5, (uint32_t *) &x6, &mbox_error); + SMC_RET4(handle, status, mbox_error, x5, x6); + + case INTEL_SIP_SMC_FCS_AES_CRYPT_INIT: + x5 = SMC_GET_GP(handle, CTX_GPREG_X5); + status = intel_fcs_aes_crypt_init(x1, x2, x3, x4, x5, + &mbox_error); + SMC_RET2(handle, status, mbox_error); + + case INTEL_SIP_SMC_FCS_AES_CRYPT_UPDATE: + x5 = SMC_GET_GP(handle, CTX_GPREG_X5); + x6 = SMC_GET_GP(handle, CTX_GPREG_X6); + status = intel_fcs_aes_crypt_update_finalize(x1, x2, x3, x4, + x5, x6, false, &send_id); + SMC_RET1(handle, status); + + case INTEL_SIP_SMC_FCS_AES_CRYPT_FINALIZE: + x5 = SMC_GET_GP(handle, CTX_GPREG_X5); + x6 = SMC_GET_GP(handle, CTX_GPREG_X6); + status = intel_fcs_aes_crypt_update_finalize(x1, x2, x3, x4, + x5, x6, true, &send_id); + SMC_RET1(handle, status); + + case INTEL_SIP_SMC_GET_ROM_PATCH_SHA384: + status = intel_fcs_get_rom_patch_sha384(x1, &retval64, + &mbox_error); + SMC_RET4(handle, status, mbox_error, x1, retval64); + + case INTEL_SIP_SMC_SVC_VERSION: + SMC_RET3(handle, INTEL_SIP_SMC_STATUS_OK, + SIP_SVC_VERSION_MAJOR, + SIP_SVC_VERSION_MINOR); + + case INTEL_SIP_SMC_SEU_ERR_STATUS: + status = intel_sdm_seu_err_read(seu_respbuf, + ARRAY_SIZE(seu_respbuf)); + if (status) { + SMC_RET1(handle, status); + } else { + SMC_RET3(handle, seu_respbuf[0], seu_respbuf[1], seu_respbuf[2]); + } + + default: + return socfpga_sip_handler(smc_fid, x1, x2, x3, x4, + cookie, handle, flags); + } +} + +uintptr_t sip_smc_handler(uint32_t smc_fid, + u_register_t x1, + u_register_t x2, + u_register_t x3, + u_register_t x4, + void *cookie, + void *handle, + u_register_t flags) +{ + uint32_t cmd = smc_fid & INTEL_SIP_SMC_CMD_MASK; + + if (cmd >= INTEL_SIP_SMC_CMD_V2_RANGE_BEGIN && + cmd <= INTEL_SIP_SMC_CMD_V2_RANGE_END) { + return sip_smc_handler_v2(smc_fid, x1, x2, x3, x4, + cookie, handle, flags); + } else { + return sip_smc_handler_v1(smc_fid, x1, x2, x3, x4, + cookie, handle, flags); + } +} + +DECLARE_RT_SVC( + socfpga_sip_svc, + OEN_SIP_START, + OEN_SIP_END, + SMC_TYPE_FAST, + NULL, + sip_smc_handler +); + +DECLARE_RT_SVC( + socfpga_sip_svc_std, + OEN_SIP_START, + OEN_SIP_END, + SMC_TYPE_YIELD, + NULL, + sip_smc_handler +); diff --git a/plat/intel/soc/common/socfpga_sip_svc_v2.c b/plat/intel/soc/common/socfpga_sip_svc_v2.c new file mode 100644 index 0000000..e9996d3 --- /dev/null +++ b/plat/intel/soc/common/socfpga_sip_svc_v2.c @@ -0,0 +1,178 @@ +/* + * Copyright (c) 2022, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> +#include <common/debug.h> +#include <common/runtime_svc.h> +#include <lib/mmio.h> + +#include "socfpga_mailbox.h" +#include "socfpga_sip_svc.h" + +static uint32_t intel_v2_mbox_send_cmd(uint32_t req_header, + uint32_t *data, uint32_t data_size) +{ + uint32_t value; + uint32_t len; + + if ((data == NULL) || (data_size == 0)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (data_size > (MBOX_INC_HEADER_MAX_WORD_SIZE * MBOX_WORD_BYTE)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (!is_size_4_bytes_aligned(data_size)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + /* Make sure client id align in SMC SiP V2 header and mailbox header */ + value = (req_header >> INTEL_SIP_SMC_HEADER_CID_OFFSET) & + INTEL_SIP_SMC_HEADER_CID_MASK; + + if (value != MBOX_RESP_CLIENT_ID(data[0])) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + /* Make sure job id align in SMC SiP V2 header and mailbox header */ + value = (req_header >> INTEL_SIP_SMC_HEADER_JOB_ID_OFFSET) & + INTEL_SIP_SMC_HEADER_JOB_ID_MASK; + + if (value != MBOX_RESP_JOB_ID(data[0])) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + /* + * Make sure data length align in SMC SiP V2 header and + * mailbox header + */ + len = (data_size / MBOX_WORD_BYTE) - 1; + + if (len != MBOX_RESP_LEN(data[0])) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + return mailbox_send_cmd_async_ext(data[0], &data[1], len); +} + +static uint32_t intel_v2_mbox_poll_resp(uint64_t req_header, + uint32_t *data, uint32_t *data_size, + uint64_t *resp_header) +{ + int status = 0; + uint32_t resp_len; + uint32_t job_id = 0; + uint32_t client_id = 0; + uint32_t version; + + if ((data == NULL) || (data_size == NULL) || (resp_header == NULL)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + if (!is_size_4_bytes_aligned(*data_size)) { + return INTEL_SIP_SMC_STATUS_REJECTED; + } + + resp_len = (*data_size / MBOX_WORD_BYTE) - 1; + status = mailbox_read_response_async(&job_id, &data[0], &data[1], + &resp_len, 1); + + if (status == MBOX_BUSY) { + status = INTEL_SIP_SMC_STATUS_BUSY; + } else if (status == MBOX_NO_RESPONSE) { + status = INTEL_SIP_SMC_STATUS_NO_RESPONSE; + } else { + *data_size = 0; + + if (resp_len > 0) { + /* + * Fill in the final response length, + * the length include both mailbox header and payload + */ + *data_size = (resp_len + 1) * MBOX_WORD_BYTE; + + /* Extract the client id from mailbox header */ + client_id = MBOX_RESP_CLIENT_ID(data[0]); + } + + /* + * Extract SMC SiP V2 protocol version from + * SMC request header + */ + version = (req_header >> INTEL_SIP_SMC_HEADER_VERSION_OFFSET) & + INTEL_SIP_SMC_HEADER_VERSION_MASK; + + /* Fill in SMC SiP V2 protocol response header */ + *resp_header = 0; + *resp_header |= (((uint64_t)job_id) & + INTEL_SIP_SMC_HEADER_JOB_ID_MASK) << + INTEL_SIP_SMC_HEADER_JOB_ID_OFFSET; + *resp_header |= (((uint64_t)client_id) & + INTEL_SIP_SMC_HEADER_CID_MASK) << + INTEL_SIP_SMC_HEADER_CID_OFFSET; + *resp_header |= (((uint64_t)version) & + INTEL_SIP_SMC_HEADER_VERSION_MASK) << + INTEL_SIP_SMC_HEADER_VERSION_OFFSET; + } + + return status; +} + +uintptr_t sip_smc_handler_v2(uint32_t smc_fid, + u_register_t x1, + u_register_t x2, + u_register_t x3, + u_register_t x4, + void *cookie, + void *handle, + u_register_t flags) +{ + uint32_t retval = 0; + uint64_t retval64 = 0; + int status = INTEL_SIP_SMC_STATUS_OK; + + switch (smc_fid) { + case INTEL_SIP_SMC_V2_GET_SVC_VERSION: + SMC_RET4(handle, INTEL_SIP_SMC_STATUS_OK, x1, + SIP_SVC_VERSION_MAJOR, + SIP_SVC_VERSION_MINOR); + + case INTEL_SIP_SMC_V2_REG_READ: + status = intel_secure_reg_read(x2, &retval); + SMC_RET4(handle, status, x1, retval, x2); + + case INTEL_SIP_SMC_V2_REG_WRITE: + status = intel_secure_reg_write(x2, (uint32_t)x3, &retval); + SMC_RET4(handle, status, x1, retval, x2); + + case INTEL_SIP_SMC_V2_REG_UPDATE: + status = intel_secure_reg_update(x2, (uint32_t)x3, + (uint32_t)x4, &retval); + SMC_RET4(handle, status, x1, retval, x2); + + case INTEL_SIP_SMC_V2_HPS_SET_BRIDGES: + status = intel_hps_set_bridges(x2, x3); + SMC_RET2(handle, status, x1); + + case INTEL_SIP_SMC_V2_RSU_UPDATE_ADDR: + status = intel_rsu_update(x2); + SMC_RET2(handle, status, x1); + + case INTEL_SIP_SMC_V2_MAILBOX_SEND_COMMAND: + status = intel_v2_mbox_send_cmd(x1, (uint32_t *)x2, x3); + SMC_RET2(handle, status, x1); + + case INTEL_SIP_SMC_V2_MAILBOX_POLL_RESPONSE: + status = intel_v2_mbox_poll_resp(x1, (uint32_t *)x2, + (uint32_t *) &x3, &retval64); + SMC_RET4(handle, status, retval64, x2, x3); + + default: + ERROR("%s: unhandled SMC V2 (0x%x)\n", __func__, smc_fid); + SMC_RET1(handle, SMC_UNK); + } +} diff --git a/plat/intel/soc/common/socfpga_storage.c b/plat/intel/soc/common/socfpga_storage.c new file mode 100644 index 0000000..e80f074 --- /dev/null +++ b/plat/intel/soc/common/socfpga_storage.c @@ -0,0 +1,216 @@ +/* + * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2019-2023, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <arch_helpers.h> +#include <assert.h> +#include <common/debug.h> +#include <common/tbbr/tbbr_img_def.h> +#include <drivers/cadence/cdns_nand.h> +#include <drivers/cadence/cdns_sdmmc.h> +#include <drivers/io/io_block.h> +#include <drivers/io/io_driver.h> +#include <drivers/io/io_fip.h> +#include <drivers/io/io_memmap.h> +#include <drivers/io/io_mtd.h> +#include <drivers/io/io_storage.h> +#include <drivers/mmc.h> +#include <drivers/partition/partition.h> +#include <lib/mmio.h> +#include <tools_share/firmware_image_package.h> + +#include "drivers/sdmmc/sdmmc.h" +#include "socfpga_private.h" + + +#define PLAT_FIP_BASE (0) +#define PLAT_FIP_MAX_SIZE (0x1000000) +#define PLAT_MMC_DATA_BASE (0xffe3c000) +#define PLAT_MMC_DATA_SIZE (0x2000) +#define PLAT_QSPI_DATA_BASE (0x3C00000) +#define PLAT_QSPI_DATA_SIZE (0x1000000) +#define PLAT_NAND_DATA_BASE (0x0200000) +#define PLAT_NAND_DATA_SIZE (0x1000000) + +static const io_dev_connector_t *fip_dev_con; +static const io_dev_connector_t *boot_dev_con; + +static io_mtd_dev_spec_t nand_dev_spec; + +static uintptr_t fip_dev_handle; +static uintptr_t boot_dev_handle; + +static const io_uuid_spec_t bl2_uuid_spec = { + .uuid = UUID_TRUSTED_BOOT_FIRMWARE_BL2, +}; + +static const io_uuid_spec_t bl31_uuid_spec = { + .uuid = UUID_EL3_RUNTIME_FIRMWARE_BL31, +}; + +static const io_uuid_spec_t bl33_uuid_spec = { + .uuid = UUID_NON_TRUSTED_FIRMWARE_BL33, +}; + +uintptr_t a2_lba_offset; +const char a2[] = {0xa2, 0x0}; + +static const io_block_spec_t gpt_block_spec = { + .offset = 0, + .length = MMC_BLOCK_SIZE +}; + +static int check_fip(const uintptr_t spec); +static int check_dev(const uintptr_t spec); + +static io_block_dev_spec_t boot_dev_spec; +static int (*register_io_dev)(const io_dev_connector_t **); + +static io_block_spec_t fip_spec = { + .offset = PLAT_FIP_BASE, + .length = PLAT_FIP_MAX_SIZE, +}; + +struct plat_io_policy { + uintptr_t *dev_handle; + uintptr_t image_spec; + int (*check)(const uintptr_t spec); +}; + +static const struct plat_io_policy policies[] = { + [FIP_IMAGE_ID] = { + &boot_dev_handle, + (uintptr_t)&fip_spec, + check_dev + }, + [BL2_IMAGE_ID] = { + &fip_dev_handle, + (uintptr_t)&bl2_uuid_spec, + check_fip + }, + [BL31_IMAGE_ID] = { + &fip_dev_handle, + (uintptr_t)&bl31_uuid_spec, + check_fip + }, + [BL33_IMAGE_ID] = { + &fip_dev_handle, + (uintptr_t) &bl33_uuid_spec, + check_fip + }, + [GPT_IMAGE_ID] = { + &boot_dev_handle, + (uintptr_t) &gpt_block_spec, + check_dev + }, +}; + +static int check_dev(const uintptr_t spec) +{ + int result; + uintptr_t local_handle; + + result = io_dev_init(boot_dev_handle, (uintptr_t)NULL); + if (result == 0) { + result = io_open(boot_dev_handle, spec, &local_handle); + if (result == 0) + io_close(local_handle); + } + return result; +} + +static int check_fip(const uintptr_t spec) +{ + int result; + uintptr_t local_image_handle; + + result = io_dev_init(fip_dev_handle, (uintptr_t)FIP_IMAGE_ID); + if (result == 0) { + result = io_open(fip_dev_handle, spec, &local_image_handle); + if (result == 0) + io_close(local_image_handle); + } + return result; +} + +void socfpga_io_setup(int boot_source) +{ + int result; + + switch (boot_source) { + case BOOT_SOURCE_SDMMC: + register_io_dev = ®ister_io_dev_block; + boot_dev_spec.buffer.offset = PLAT_MMC_DATA_BASE; + boot_dev_spec.buffer.length = SOCFPGA_MMC_BLOCK_SIZE; + boot_dev_spec.ops.read = SDMMC_READ_BLOCKS; + boot_dev_spec.ops.write = SDMMC_WRITE_BLOCKS; + boot_dev_spec.block_size = MMC_BLOCK_SIZE; + break; + + case BOOT_SOURCE_QSPI: + register_io_dev = ®ister_io_dev_memmap; + fip_spec.offset = PLAT_QSPI_DATA_BASE; + break; + +#if PLATFORM_MODEL == PLAT_SOCFPGA_AGILEX5 + case BOOT_SOURCE_NAND: + register_io_dev = ®ister_io_dev_mtd; + nand_dev_spec.ops.init = cdns_nand_init_mtd; + nand_dev_spec.ops.read = cdns_nand_read; + nand_dev_spec.ops.write = NULL; + fip_spec.offset = PLAT_NAND_DATA_BASE; + break; +#endif + + default: + ERROR("Unsupported boot source\n"); + panic(); + break; + } + + result = (*register_io_dev)(&boot_dev_con); + assert(result == 0); + + result = register_io_dev_fip(&fip_dev_con); + assert(result == 0); + + if (boot_source == BOOT_SOURCE_NAND) { + result = io_dev_open(boot_dev_con, (uintptr_t)&nand_dev_spec, + &boot_dev_handle); + } else { + result = io_dev_open(boot_dev_con, (uintptr_t)&boot_dev_spec, + &boot_dev_handle); + } + assert(result == 0); + + result = io_dev_open(fip_dev_con, (uintptr_t)NULL, &fip_dev_handle); + assert(result == 0); + + if (boot_source == BOOT_SOURCE_SDMMC) { + partition_init(GPT_IMAGE_ID); + fip_spec.offset = get_partition_entry(a2)->start; + } + + (void)result; +} + +int plat_get_image_source(unsigned int image_id, uintptr_t *dev_handle, + uintptr_t *image_spec) +{ + int result; + const struct plat_io_policy *policy; + + assert(image_id < ARRAY_SIZE(policies)); + + policy = &policies[image_id]; + result = policy->check(policy->image_spec); + assert(result == 0); + + *image_spec = policy->image_spec; + *dev_handle = *(policy->dev_handle); + + return result; +} diff --git a/plat/intel/soc/common/socfpga_topology.c b/plat/intel/soc/common/socfpga_topology.c new file mode 100644 index 0000000..28c9557 --- /dev/null +++ b/plat/intel/soc/common/socfpga_topology.c @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <arch.h> +#include <platform_def.h> +#include <lib/psci/psci.h> + +static const unsigned char plat_power_domain_tree_desc[] = {1, 4}; + +/******************************************************************************* + * This function returns the default topology tree information. + ******************************************************************************/ +const unsigned char *plat_get_power_domain_tree_desc(void) +{ + return plat_power_domain_tree_desc; +} + +/******************************************************************************* + * This function implements a part of the critical interface between the psci + * generic layer and the platform that allows the former to query the platform + * to convert an MPIDR to a unique linear index. An error code (-1) is returned + * in case the MPIDR is invalid. + ******************************************************************************/ +int plat_core_pos_by_mpidr(u_register_t mpidr) +{ + unsigned int cluster_id, cpu_id; + + mpidr &= MPIDR_AFFINITY_MASK; + + if (mpidr & ~(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK)) + return -1; + + cluster_id = (mpidr >> PLAT_CLUSTER_ID_MPIDR_AFF_SHIFT) & MPIDR_AFFLVL_MASK; + cpu_id = (mpidr >> PLAT_CPU_ID_MPIDR_AFF_SHIFT) & MPIDR_AFFLVL_MASK; + + if (cluster_id >= PLATFORM_CLUSTER_COUNT) + return -1; + + /* + * Validate cpu_id by checking whether it represents a CPU in + * one of the two clusters present on the platform. + */ + if (cpu_id >= PLATFORM_MAX_CPUS_PER_CLUSTER) + return -1; + + return (cpu_id + (cluster_id * 4)); +} + diff --git a/plat/intel/soc/common/socfpga_vab.c b/plat/intel/soc/common/socfpga_vab.c new file mode 100644 index 0000000..e16610c --- /dev/null +++ b/plat/intel/soc/common/socfpga_vab.c @@ -0,0 +1,160 @@ +/* + * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2019-2023, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> +#include <errno.h> + +#include <arch_helpers.h> +#include <common/debug.h> +#include <common/tbbr/tbbr_img_def.h> +#include <drivers/delay_timer.h> +#include <lib/mmio.h> +#include <lib/utils.h> +#include <tools_share/firmware_image_package.h> + +#include "socfpga_mailbox.h" +#include "socfpga_vab.h" + +static size_t get_img_size(uint8_t *img_buf, size_t img_buf_sz) +{ + uint8_t *img_buf_end = img_buf + img_buf_sz; + uint32_t cert_sz = get_unaligned_le32(img_buf_end - sizeof(uint32_t)); + uint8_t *p = img_buf_end - cert_sz - sizeof(uint32_t); + + /* Ensure p is pointing within the img_buf */ + if (p < img_buf || p > (img_buf_end - VAB_CERT_HEADER_SIZE)) + return 0; + + if (get_unaligned_le32(p) == SDM_CERT_MAGIC_NUM) + return (size_t)(p - img_buf); + + return 0; +} + + + +int socfpga_vendor_authentication(void **p_image, size_t *p_size) +{ + int retry_count = 20; + uint8_t hash384[FCS_SHA384_WORD_SIZE]; + uint64_t img_addr, mbox_data_addr; + uint32_t img_sz, mbox_data_sz; + uint8_t *cert_hash_ptr, *mbox_relocate_data_addr; + uint32_t resp = 0, resp_len = 1; + int ret = 0; + + img_addr = (uintptr_t)*p_image; + img_sz = get_img_size((uint8_t *)img_addr, *p_size); + + if (!img_sz) { + NOTICE("VAB certificate not found in image!\n"); + return -ENOVABIMG; + } + + if (!IS_BYTE_ALIGNED(img_sz, sizeof(uint32_t))) { + NOTICE("Image size (%d bytes) not aliged to 4 bytes!\n", img_sz); + return -EIMGERR; + } + + /* Generate HASH384 from the image */ + /* TODO: This part need to cross check !!!!!! */ + sha384_csum_wd((uint8_t *)img_addr, img_sz, hash384, CHUNKSZ_PER_WD_RESET); + cert_hash_ptr = (uint8_t *)(img_addr + img_sz + + VAB_CERT_MAGIC_OFFSET + VAB_CERT_FIT_SHA384_OFFSET); + + /* + * Compare the SHA384 found in certificate against the SHA384 + * calculated from image + */ + if (memcmp(hash384, cert_hash_ptr, FCS_SHA384_WORD_SIZE)) { + NOTICE("SHA384 does not match!\n"); + return -EKEYREJECTED; + } + + + mbox_data_addr = img_addr + img_sz - sizeof(uint32_t); + /* Size in word (32bits) */ + mbox_data_sz = (BYTE_ALIGN(*p_size - img_sz, sizeof(uint32_t))) >> 2; + + NOTICE("mbox_data_addr = %lx mbox_data_sz = %d\n", mbox_data_addr, mbox_data_sz); + + /* TODO: This part need to cross check !!!!!! */ + // mbox_relocate_data_addr = (uint8_t *)malloc(mbox_data_sz * sizeof(uint32_t)); + // if (!mbox_relocate_data_addr) { + // NOTICE("Cannot allocate memory for VAB certificate relocation!\n"); + // return -ENOMEM; + // } + + memcpy(mbox_relocate_data_addr, (uint8_t *)mbox_data_addr, mbox_data_sz * sizeof(uint32_t)); + *(uint32_t *)mbox_relocate_data_addr = 0; + + do { + /* Invoke SMC call to ATF to send the VAB certificate to SDM */ + ret = mailbox_send_cmd(MBOX_JOB_ID, MBOX_CMD_VAB_SRC_CERT, +(uint32_t *)mbox_relocate_data_addr, mbox_data_sz, 0, &resp, &resp_len); + + /* If SDM is not available, just delay 50ms and retry again */ + /* 0x1FF = The device is busy */ + if (ret == MBOX_RESP_ERR(0x1FF)) { + mdelay(50); + } else { + break; + } + } while (--retry_count); + + /* Free the relocate certificate memory space */ + zeromem((void *)&mbox_relocate_data_addr, sizeof(uint32_t)); + + + /* Exclude the size of the VAB certificate from image size */ + *p_size = img_sz; + + if (ret) { + /* + * Unsupported mailbox command or device not in the + * owned/secure state + */ + /* 0x85 = Not allowed under current security setting */ + if (ret == MBOX_RESP_ERR(0x85)) { + /* SDM bypass authentication */ + NOTICE("Image Authentication bypassed at address\n"); + return 0; + } + NOTICE("VAB certificate authentication failed in SDM\n"); + /* 0x1FF = The device is busy */ + if (ret == MBOX_RESP_ERR(0x1FF)) { + NOTICE("Operation timed out\n"); + return -ETIMEOUT; + } else if (ret == MBOX_WRONG_ID) { + NOTICE("No such process\n"); + return -EPROCESS; + } + } else { + /* If Certificate Process Status has error */ + if (resp) { + NOTICE("VAB certificate execution format error\n"); + return -EIMGERR; + } + } + + NOTICE("Image Authentication bypassed at address\n"); + return ret; + +} + +static uint32_t get_unaligned_le32(const void *p) +{ + /* TODO: Temp for testing */ + //return le32_to_cpup((__le32 *)p); + return 0; +} + +void sha384_csum_wd(const unsigned char *input, unsigned int ilen, + unsigned char *output, unsigned int chunk_sz) +{ + /* TODO: Update sha384 start, update and finish */ +} diff --git a/plat/intel/soc/n5x/bl31_plat_setup.c b/plat/intel/soc/n5x/bl31_plat_setup.c new file mode 100644 index 0000000..a5337ce --- /dev/null +++ b/plat/intel/soc/n5x/bl31_plat_setup.c @@ -0,0 +1,165 @@ +/* + * Copyright (c) 2020-2022, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> +#include <arch.h> +#include <arch_helpers.h> +#include <common/bl_common.h> +#include <drivers/arm/gicv2.h> +#include <drivers/ti/uart/uart_16550.h> +#include <lib/mmio.h> +#include <lib/xlat_tables/xlat_tables.h> + +#include "ccu/ncore_ccu.h" +#include "socfpga_mailbox.h" +#include "socfpga_private.h" + +static entry_point_info_t bl32_image_ep_info; +static entry_point_info_t bl33_image_ep_info; + +entry_point_info_t *bl31_plat_get_next_image_ep_info(uint32_t type) +{ + entry_point_info_t *next_image_info; + + next_image_info = (type == NON_SECURE) ? + &bl33_image_ep_info : &bl32_image_ep_info; + + /* None of the images on this platform can have 0x0 as the entrypoint */ + if (next_image_info->pc) { + return next_image_info; + } else { + return NULL; + } +} + +void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1, + u_register_t arg2, u_register_t arg3) +{ + static console_t console; + + mmio_write_64(PLAT_SEC_ENTRY, 0); + + console_16550_register(PLAT_INTEL_UART_BASE, PLAT_UART_CLOCK, + PLAT_BAUDRATE, &console); + /* + * Check params passed from BL31 should not be NULL, + */ + void *from_bl2 = (void *) arg0; + + bl_params_t *params_from_bl2 = (bl_params_t *)from_bl2; + + assert(params_from_bl2 != NULL); + + /* + * Copy BL32 (if populated by BL31) and BL33 entry point information. + * They are stored in Secure RAM, in BL31's address space. + */ + + if (params_from_bl2->h.type == PARAM_BL_PARAMS && + params_from_bl2->h.version >= VERSION_2) { + + bl_params_node_t *bl_params = params_from_bl2->head; + + while (bl_params != NULL) { + if (bl_params->image_id == BL33_IMAGE_ID) + bl33_image_ep_info = *bl_params->ep_info; + + bl_params = bl_params->next_params_info; + } + } else { + struct socfpga_bl31_params *arg_from_bl2 = + (struct socfpga_bl31_params *) from_bl2; + + assert(arg_from_bl2->h.type == PARAM_BL31); + assert(arg_from_bl2->h.version >= VERSION_1); + + bl32_image_ep_info = *arg_from_bl2->bl32_ep_info; + bl33_image_ep_info = *arg_from_bl2->bl33_ep_info; + } + SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE); +} + +static const interrupt_prop_t s10_interrupt_props[] = { + PLAT_INTEL_SOCFPGA_G1S_IRQ_PROPS(GICV2_INTR_GROUP0), + PLAT_INTEL_SOCFPGA_G0_IRQ_PROPS(GICV2_INTR_GROUP0) +}; + +static unsigned int target_mask_array[PLATFORM_CORE_COUNT]; + +static const gicv2_driver_data_t plat_gicv2_gic_data = { + .gicd_base = PLAT_INTEL_SOCFPGA_GICD_BASE, + .gicc_base = PLAT_INTEL_SOCFPGA_GICC_BASE, + .interrupt_props = s10_interrupt_props, + .interrupt_props_num = ARRAY_SIZE(s10_interrupt_props), + .target_masks = target_mask_array, + .target_masks_num = ARRAY_SIZE(target_mask_array), +}; + +/******************************************************************************* + * Perform any BL3-1 platform setup code + ******************************************************************************/ +void bl31_platform_setup(void) +{ + socfpga_delay_timer_init(); + + /* Initialize the gic cpu and distributor interfaces */ + gicv2_driver_init(&plat_gicv2_gic_data); + gicv2_distif_init(); + gicv2_pcpu_distif_init(); + gicv2_cpuif_enable(); + + /* Signal secondary CPUs to jump to BL31 (BL2 = U-boot SPL) */ + mmio_write_64(PLAT_CPU_RELEASE_ADDR, + (uint64_t)plat_secondary_cpus_bl31_entry); + + mailbox_hps_stage_notify(HPS_EXECUTION_STATE_SSBL); + + ncore_enable_ocram_firewall(); +} + +const mmap_region_t plat_dm_mmap[] = { + MAP_REGION_FLAT(DRAM_BASE, DRAM_SIZE, + MT_MEMORY | MT_RW | MT_NS), + MAP_REGION_FLAT(DEVICE1_BASE, DEVICE1_SIZE, + MT_DEVICE | MT_RW | MT_NS), + MAP_REGION_FLAT(DEVICE2_BASE, DEVICE2_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(OCRAM_BASE, OCRAM_SIZE, + MT_NON_CACHEABLE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(DEVICE3_BASE, DEVICE3_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(MEM64_BASE, MEM64_SIZE, + MT_DEVICE | MT_RW | MT_NS), + MAP_REGION_FLAT(DEVICE4_BASE, DEVICE4_SIZE, + MT_DEVICE | MT_RW | MT_NS), + {0} +}; + +/******************************************************************************* + * Perform the very early platform specific architectural setup here. At the + * moment this is only initializes the mmu in a quick and dirty way. + ******************************************************************************/ +void bl31_plat_arch_setup(void) +{ + const mmap_region_t bl_regions[] = { + MAP_REGION_FLAT(BL31_BASE, BL31_END - BL31_BASE, + MT_MEMORY | MT_RW | MT_SECURE), + MAP_REGION_FLAT(BL_CODE_BASE, BL_CODE_END - BL_CODE_BASE, + MT_CODE | MT_SECURE), + MAP_REGION_FLAT(BL_RO_DATA_BASE, + BL_RO_DATA_END - BL_RO_DATA_BASE, + MT_RO_DATA | MT_SECURE), +#if USE_COHERENT_MEM + MAP_REGION_FLAT(BL_COHERENT_RAM_BASE, + BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE, + MT_DEVICE | MT_RW | MT_SECURE), +#endif + {0} + }; + + setup_page_tables(bl_regions, plat_dm_mmap); + enable_mmu_el3(0); +} diff --git a/plat/intel/soc/n5x/include/n5x_clock_manager.h b/plat/intel/soc/n5x/include/n5x_clock_manager.h new file mode 100644 index 0000000..14a5717 --- /dev/null +++ b/plat/intel/soc/n5x/include/n5x_clock_manager.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2019-2023, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef CLOCKMANAGER_H +#define CLOCKMANAGER_H + +#include "socfpga_handoff.h" + +/* MACRO DEFINITION */ +#define SOCFPGA_GLOBAL_TIMER 0xffd01000 +#define SOCFPGA_GLOBAL_TIMER_EN 0x3 + +#define CLKMGR_PLLGLOB_VCO_PSRC_MASK GENMASK(17, 16) +#define CLKMGR_PLLGLOB_VCO_PSRC_OFFSET 16 +#define CLKMGR_PLLDIV_FDIV_MASK GENMASK(16, 8) +#define CLKMGR_PLLDIV_FDIV_OFFSET 8 +#define CLKMGR_PLLDIV_REFCLKDIV_MASK GENMASK(5, 0) +#define CLKMGR_PLLDIV_REFCLKDIV_OFFSET 0 +#define CLKMGR_PLLDIV_OUTDIV_QDIV_MASK GENMASK(26, 24) +#define CLKMGR_PLLDIV_OUTDIV_QDIV_OFFSET 24 + +#define CLKMGR_PLLOUTDIV_C0CNT_MASK GENMASK(4, 0) +#define CLKMGR_PLLOUTDIV_C0CNT_OFFSET 0 +#define CLKMGR_PLLOUTDIV_C1CNT_MASK GENMASK(12, 8) +#define CLKMGR_PLLOUTDIV_C1CNT_OFFSET 8 +#define CLKMGR_PLLDIV_OUTDIV_QDIV_MASK GENMASK(26, 24) +#define CLKMGR_PLLDIV_OUTDIV_QDIV_OFFSET 24 +#define CLKMGR_CLKSRC_MASK GENMASK(18, 16) +#define CLKMGR_CLKSRC_OFFSET 16 +#define CLKMGR_NOCDIV_DIVIDER_MASK GENMASK(1, 0) +#define CLKMGR_NOCDIV_L4MAIN_OFFSET 0 + +#define CLKMGR_INTOSC_HZ 400000000 +#define CLKMGR_VCO_PSRC_EOSC1 0 +#define CLKMGR_VCO_PSRC_INTOSC 1 +#define CLKMGR_VCO_PSRC_F2S 2 +#define CLKMGR_CLKSRC_MAIN 0 +#define CLKMGR_CLKSRC_PER 1 + +#define CLKMGR_N5X_BASE 0xffd10000 +#define CLKMGR_MAINPLL_NOCCLK 0x40 +#define CLKMGR_MAINPLL_NOCDIV 0x44 +#define CLKMGR_MAINPLL_PLLGLOB 0x48 +#define CLKMGR_MAINPLL_PLLOUTDIV 0x54 +#define CLKMGR_MAINPLL_PLLDIV 0x50 +#define CLKMGR_PERPLL_PLLGLOB 0x9c +#define CLKMGR_PERPLL_PLLDIV 0xa4 +#define CLKMGR_PERPLL_PLLOUTDIV 0xa8 + +/* FUNCTION DEFINITION */ +uint64_t clk_get_pll_output_hz(void); +uint64_t get_l4_clk(void); +uint32_t get_clk_freq(uint32_t psrc_reg); +uint32_t get_mpu_clk(void); +uint32_t get_cpu_clk(void); + +#endif diff --git a/plat/intel/soc/n5x/include/n5x_system_manager.h b/plat/intel/soc/n5x/include/n5x_system_manager.h new file mode 100644 index 0000000..b628219 --- /dev/null +++ b/plat/intel/soc/n5x/include/n5x_system_manager.h @@ -0,0 +1,197 @@ +/* + * Copyright (c) 2019-2023, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#ifndef N5X_SOCFPGA_SYSTEMMANAGER_H +#define N5X_SOCFPGA_SYSTEMMANAGER_H + +#include "socfpga_plat_def.h" + +/* System Manager Register Map */ +#define SOCFPGA_SYSMGR_SILICONID_1 0x00 +#define SOCFPGA_SYSMGR_SILICONID_2 0x04 +#define SOCFPGA_SYSMGR_WDDBG 0x08 +#define SOCFPGA_SYSMGR_MPU_STATUS 0x10 +#define SOCFPGA_SYSMGR_SDMMC_L3_MASTER 0x2C +#define SOCFPGA_SYSMGR_NAND_L3_MASTER 0x34 +#define SOCFPGA_SYSMGR_USB0_L3_MASTER 0x38 +#define SOCFPGA_SYSMGR_USB1_L3_MASTER 0x3C +#define SOCFPGA_SYSMGR_TSN_GLOBAL 0x40 +#define SOCFPGA_SYSMGR_EMAC_0 0x44 /* TSN_0 */ +#define SOCFPGA_SYSMGR_EMAC_1 0x48 /* TSN_1 */ +#define SOCFPGA_SYSMGR_EMAC_2 0x4C /* TSN_2 */ +#define SOCFPGA_SYSMGR_TSN_0_ACE 0x50 +#define SOCFPGA_SYSMGR_TSN_1_ACE 0x54 +#define SOCFPGA_SYSMGR_TSN_2_ACE 0x58 +#define SOCFPGA_SYSMGR_FPGAINTF_EN_1 0x68 +#define SOCFPGA_SYSMGR_FPGAINTF_EN_2 0x6C +#define SOCFPGA_SYSMGR_FPGAINTF_EN_3 0x70 +#define SOCFPGA_SYSMGR_DMAC0_L3_MASTER 0x74 +#define SOCFPGA_SYSMGR_ETR_L3_MASTER 0x78 +#define SOCFPGA_SYSMGR_DMAC1_L3_MASTER 0x7C +#define SOCFPGA_SYSMGR_SEC_CTRL_SLT 0x80 +#define SOCFPGA_SYSMGR_OSC_TRIM 0x84 +#define SOCFPGA_SYSMGR_DMAC0_CTRL_STATUS_REG 0x88 +#define SOCFPGA_SYSMGR_DMAC1_CTRL_STATUS_REG 0x8C +#define SOCFPGA_SYSMGR_ECC_INTMASK_VALUE 0x90 +#define SOCFPGA_SYSMGR_ECC_INTMASK_SET 0x94 +#define SOCFPGA_SYSMGR_ECC_INTMASK_CLR 0x98 +#define SOCFPGA_SYSMGR_ECC_INTMASK_SERR 0x9C +#define SOCFPGA_SYSMGR_ECC_INTMASK_DERR 0xA0 +/* NOC configuration value for Agilex5 */ +#define SOCFPGA_SYSMGR_NOC_TIMEOUT 0xC0 +#define SOCFPGA_SYSMGR_NOC_IDLEREQ_SET 0xC4 +#define SOCFPGA_SYSMGR_NOC_IDLEREQ_CLR 0xC8 +#define SOCFPGA_SYSMGR_NOC_IDLEREQ_VAL 0xCC +#define SOCFPGA_SYSMGR_NOC_IDLEACK 0xD0 +#define SOCFPGA_SYSMGR_NOC_IDLESTATUS 0xD4 +#define SOCFPGA_SYSMGR_FPGA2SOC_CTRL 0xD8 +#define SOCFPGA_SYSMGR_FPGA_CFG 0xDC +#define SOCFPGA_SYSMGR_GPO 0xE4 +#define SOCFPGA_SYSMGR_GPI 0xE8 +#define SOCFPGA_SYSMGR_MPU 0xF0 +#define SOCFPGA_SYSMGR_SDM_HPS_SPARE 0xF4 +#define SOCFPGA_SYSMGR_HPS_SDM_SPARE 0xF8 +#define SOCFPGA_SYSMGR_DFI_INTF 0xFC +#define SOCFPGA_SYSMGR_NAND_DD_CTRL 0x100 +#define SOCFPGA_SYSMGR_NAND_PHY_CTRL_REG 0x104 +#define SOCFPGA_SYSMGR_NAND_PHY_TSEL_REG 0x108 +#define SOCFPGA_SYSMGR_NAND_DQ_TIMING_REG 0x10C +#define SOCFPGA_SYSMGR_PHY_DQS_TIMING_REG 0x110 +#define SOCFPGA_SYSMGR_NAND_PHY_GATE_LPBK_CTRL_REG 0x114 +#define SOCFPGA_SYSMGR_NAND_PHY_DLL_MASTER_CTRL_REG 0x118 +#define SOCFPGA_SYSMGR_NAND_PHY_DLL_SLAVE_CTRL_REG 0x11C +#define SOCFPGA_SYSMGR_NAND_DD_DEFAULT_SETTING_REG0 0x120 +#define SOCFPGA_SYSMGR_NAND_DD_DEFAULT_SETTING_REG1 0x124 +#define SOCFPGA_SYSMGR_NAND_DD_STATUS_REG 0x128 +#define SOCFPGA_SYSMGR_NAND_DD_ID_LOW_REG 0x12C +#define SOCFPGA_SYSMGR_NAND_DD_ID_HIGH_REG 0x130 +#define SOCFPGA_SYSMGR_NAND_WRITE_PROT_EN_REG 0x134 +#define SOCFPGA_SYSMGR_SDMMC_CMD_QUEUE_SETTING_REG 0x138 +#define SOCFPGA_SYSMGR_I3C_SLV_PID_LOW 0x13C +#define SOCFPGA_SYSMGR_I3C_SLV_PID_HIGH 0x140 +#define SOCFPGA_SYSMGR_I3C_SLV_CTRL_0 0x144 +#define SOCFPGA_SYSMGR_I3C_SLV_CTRL_1 0x148 +#define SOCFPGA_SYSMGR_F2S_BRIDGE_CTRL 0x14C +#define SOCFPGA_SYSMGR_DMA_TBU_STASH_CTRL_REG_0_DMA0 0x150 +#define SOCFPGA_SYSMGR_DMA_TBU_STASH_CTRL_REG_0_DMA1 0x154 +#define SOCFPGA_SYSMGR_SDM_TBU_STASH_CTRL_REG_1_SDM 0x158 +#define SOCFPGA_SYSMGR_IO_TBU_STASH_CTRL_REG_2_USB2 0x15C +#define SOCFPGA_SYSMGR_IO_TBU_STASH_CTRL_REG_2_USB3 0x160 +#define SOCFPGA_SYSMGR_IO_TBU_STASH_CTRL_REG_2_SDMMC 0x164 +#define SOCFPGA_SYSMGR_IO_TBU_STASH_CTRL_REG_2_NAND 0x168 +#define SOCFPGA_SYSMGR_IO_TBU_STASH_CTRL_REG_2_ETR 0x16C +#define SOCFPGA_SYSMGR_TSN_TBU_STASH_CTRL_REG_3_TSN0 0x170 +#define SOCFPGA_SYSMGR_TSN_TBU_STASH_CTRL_REG_3_TSN1 0x174 +#define SOCFPGA_SYSMGR_TSN_TBU_STASH_CTRL_REG_3_TSN2 0x178 +#define SOCFPGA_SYSMGR_DMA_TBU_STREAM_CTRL_REG_0_DMA0 0x17C +#define SOCFPGA_SYSMGR_DMA_TBU_STREAM_CTRL_REG_0_DMA1 0x180 +#define SOCFPGA_SYSMGR_SDM_TBU_STREAM_CTRL_REG_1_SDM 0x184 +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_CTRL_REG_2_USB2 0x188 +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_CTRL_REG_2_USB3 0x18C +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_CTRL_REG_2_SDMMC 0x190 +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_CTRL_REG_2_NAND 0x194 +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_CTRL_REG_2_ETR 0x198 +#define SOCFPGA_SYSMGR_TSN_TBU_STREAM_CTRL_REG_3_TSN0 0x19C +#define SOCFPGA_SYSMGR_TSN_TBU_STREAM_CTRL_REG_3_TSN1 0x1A0 +#define SOCFPGA_SYSMGR_TSN_TBU_STREAM_CTRL_REG_3_TSN2 0x1A4 +#define SOCFPGA_SYSMGR_DMA_TBU_STREAM_ID_AX_REG_0_DMA0 0x1A8 +#define SOCFPGA_SYSMGR_DMA_TBU_STREAM_ID_AX_REG_0_DMA1 0x1AC +#define SOCFPGA_SYSMGR_SDM_TBU_STREAM_ID_AX_REG_1_SDM 0x1B0 +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_ID_AX_REG_2_USB2 0x1B4 +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_ID_AX_REG_2_USB3 0x1B8 +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_ID_AX_REG_2_SDMMC 0x1BC +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_ID_AX_REG_2_NAND 0x1C0 +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_ID_AX_REG_2_ETR 0x1C4 +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_ID_AX_REG_2_TSN0 0x1C8 +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_ID_AX_REG_2_TSN1 0x1CC +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_ID_AX_REG_2_TSN2 0x1D0 +#define SOCFPGA_SYSMGR_USB3_MISC_CTRL_REG0 0x1F0 +#define SOCFPGA_SYSMGR_USB3_MISC_CTRL_REG1 0x1F4 + +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_0 0x200 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_1 0x204 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_2 0x208 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_3 0x20C +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_4 0x210 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_5 0x214 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_6 0x218 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_7 0x21C +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_8 0x220 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_9 0x224 +#define SOCFPGA_SYSMGR_MPFE_CONFIG 0x228 +#define SOCFPGA_SYSMGR_MPFE_status 0x22C +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_0 0x230 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_1 0x234 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_2 0x238 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_3 0x23C +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_4 0x240 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_5 0x244 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_6 0x248 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_7 0x24C +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_8 0x250 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_9 0x254 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_0 0x258 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_1 0x25C +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_2 0x260 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_3 0x264 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_4 0x268 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_5 0x26C +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_6 0x270 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_7 0x274 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_8 0x278 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_9 0x27C + +#define DMA0_STREAM_CTRL_REG 0x10D1217C +#define DMA1_STREAM_CTRL_REG 0x10D12180 +#define SDM_STREAM_CTRL_REG 0x10D12184 +#define USB2_STREAM_CTRL_REG 0x10D12188 +#define USB3_STREAM_CTRL_REG 0x10D1218C +#define SDMMC_STREAM_CTRL_REG 0x10D12190 +#define NAND_STREAM_CTRL_REG 0x10D12194 +#define ETR_STREAM_CTRL_REG 0x10D12198 +#define TSN0_STREAM_CTRL_REG 0x10D1219C +#define TSN1_STREAM_CTRL_REG 0x10D121A0 +#define TSN2_STREAM_CTRL_REG 0x10D121A4 + +/* Stream ID configuration value for Agilex5 */ +#define TSN0 0x00010001 +#define TSN1 0x00020002 +#define TSN2 0x00030003 +#define NAND 0x00040004 +#define SDMMC 0x00050005 +#define USB0 0x00060006 +#define USB1 0x00070007 +#define DMA0 0x00080008 +#define DMA1 0x00090009 +#define SDM 0x000A000A +#define CORE_SIGHT_DEBUG 0x000B000B + + + + +/* Field Masking */ +#define SYSMGR_SDMMC_DRVSEL(x) (((x) & 0x7) << 0) +#define SYSMGR_SDMMC_SMPLSEL(x) (((x) & 0x7) << 4) +#define IDLE_DATA_LWSOC2FPGA BIT(4) +#define IDLE_DATA_SOC2FPGA BIT(0) +#define IDLE_DATA_MASK (IDLE_DATA_LWSOC2FPGA | IDLE_DATA_SOC2FPGA) +#define SYSMGR_ECC_OCRAM_MASK BIT(1) +#define SYSMGR_ECC_DDR0_MASK BIT(16) +#define SYSMGR_ECC_DDR1_MASK BIT(17) +#define WSTREAMIDEN_REG_CTRL BIT(0) +#define RSTREAMIDEN_REG_CTRL BIT(1) +#define WMMUSECSID_REG_VAL BIT(4) +#define RMMUSECSID_REG_VAL BIT(5) + +/* Macros */ +#define SOCFPGA_SYSMGR(_reg) (SOCFPGA_SYSMGR_REG_BASE \ + + (SOCFPGA_SYSMGR_##_reg)) +#define ENABLE_STREAMID WSTREAMIDEN_REG_CTRL | \ + RSTREAMIDEN_REG_CTRL +#define ENABLE_STREAMID_SECURE_TX WSTREAMIDEN_REG_CTRL | \ + RSTREAMIDEN_REG_CTRL | \ + WMMUSECSID_REG_VAL | RMMUSECSID_REG_VAL + +#endif /* N5X_SOCFPGA_SYSTEMMANAGER_H */ diff --git a/plat/intel/soc/n5x/include/socfpga_plat_def.h b/plat/intel/soc/n5x/include/socfpga_plat_def.h new file mode 100644 index 0000000..a06bbc4 --- /dev/null +++ b/plat/intel/soc/n5x/include/socfpga_plat_def.h @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2020-2022, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2020-2023, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef PLAT_SOCFPGA_DEF_H +#define PLAT_SOCFPGA_DEF_H + +#include "n5x_system_manager.h" +#include <platform_def.h> + +/* Platform Setting */ +#define PLATFORM_MODEL PLAT_SOCFPGA_N5X +#define BOOT_SOURCE BOOT_SOURCE_SDMMC +#define PLAT_PRIMARY_CPU 0 +#define PLAT_CLUSTER_ID_MPIDR_AFF_SHIFT MPIDR_AFF1_SHIFT +#define PLAT_CPU_ID_MPIDR_AFF_SHIFT MPIDR_AFF0_SHIFT + +/* FPGA config helpers */ +#define INTEL_SIP_SMC_FPGA_CONFIG_ADDR 0x400000 +#define INTEL_SIP_SMC_FPGA_CONFIG_SIZE 0x2000000 + +/* QSPI Setting */ +#define CAD_QSPIDATA_OFST 0xff900000 +#define CAD_QSPI_OFFSET 0xff8d2000 + +/* Register Mapping */ +#define SOCFPGA_CCU_NOC_REG_BASE U(0xf7000000) +#define SOCFPGA_F2SDRAMMGR_REG_BASE U(0xf8024000) + +#define SOCFPGA_MMC_REG_BASE U(0xff808000) + +#define SOCFPGA_RSTMGR_REG_BASE U(0xffd11000) +#define SOCFPGA_SYSMGR_REG_BASE U(0xffd12000) + +#define SOCFPGA_L4_PER_SCR_REG_BASE U(0xffd21000) +#define SOCFPGA_L4_SYS_SCR_REG_BASE U(0xffd21100) +#define SOCFPGA_SOC2FPGA_SCR_REG_BASE U(0xffd21200) +#define SOCFPGA_LWSOC2FPGA_SCR_REG_BASE U(0xffd21300) + + +/******************************************************************************* + * Platform memory map related constants + ******************************************************************************/ +#define DRAM_BASE (0x0) +#define DRAM_SIZE (0x80000000) + +#define OCRAM_BASE (0xFFE00000) +#define OCRAM_SIZE (0x00040000) + +#define MEM64_BASE (0x0100000000) +#define MEM64_SIZE (0x1F00000000) + +#define DEVICE1_BASE (0x80000000) +#define DEVICE1_SIZE (0x60000000) + +#define DEVICE2_BASE (0xF7000000) +#define DEVICE2_SIZE (0x08E00000) + +#define DEVICE3_BASE (0xFFFC0000) +#define DEVICE3_SIZE (0x00008000) + +#define DEVICE4_BASE (0x2000000000) +#define DEVICE4_SIZE (0x0100000000) + +#define BL2_BASE (0xffe00000) +#define BL2_LIMIT (0xffe1b000) + +#define BL31_BASE (0x1000) +#define BL31_LIMIT (0x81000) + +/******************************************************************************* + * UART related constants + ******************************************************************************/ +#define PLAT_UART0_BASE (0xFFC02000) +#define PLAT_UART1_BASE (0xFFC02100) + +/******************************************************************************* + * GIC related constants + ******************************************************************************/ +#define PLAT_GIC_BASE (0xFFFC0000) +#define PLAT_GICC_BASE (PLAT_GIC_BASE + 0x2000) +#define PLAT_GICD_BASE (PLAT_GIC_BASE + 0x1000) +#define PLAT_GICR_BASE 0 + +#define PLAT_SYS_COUNTER_FREQ_IN_TICKS (400000000) +#define PLAT_HZ_CONVERT_TO_MHZ (1000000) + +/******************************************************************************* + * SDMMC related pointer function + ******************************************************************************/ +#define SDMMC_READ_BLOCKS mmc_read_blocks +#define SDMMC_WRITE_BLOCKS mmc_write_blocks + +/******************************************************************************* + * sysmgr.boot_scratch_cold6 & 7 (64bit) are used to indicate L2 reset + * is done and HPS should trigger warm reset via RMR_EL3. + ******************************************************************************/ +#define L2_RESET_DONE_REG 0xFFD12218 + +/* Platform specific system counter */ +#define PLAT_SYS_COUNTER_FREQ_IN_MHZ get_cpu_clk() + +#endif /* PLAT_SOCFPGA_DEF_H */ diff --git a/plat/intel/soc/n5x/platform.mk b/plat/intel/soc/n5x/platform.mk new file mode 100644 index 0000000..95f076f --- /dev/null +++ b/plat/intel/soc/n5x/platform.mk @@ -0,0 +1,52 @@ +# +# Copyright (c) 2020-2022, Intel Corporation. All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +PLAT_INCLUDES := \ + -Iplat/intel/soc/n5x/include/ \ + -Iplat/intel/soc/common/drivers/ \ + -Iplat/intel/soc/common/include/ + +# Include GICv2 driver files +include drivers/arm/gic/v2/gicv2.mk +DM_GICv2_SOURCES := \ + ${GICV2_SOURCES} \ + plat/common/plat_gicv2.c + + +PLAT_BL_COMMON_SOURCES := \ + ${DM_GICv2_SOURCES} \ + drivers/delay_timer/delay_timer.c \ + drivers/delay_timer/generic_delay_timer.c \ + drivers/ti/uart/aarch64/16550_console.S \ + lib/xlat_tables/aarch64/xlat_tables.c \ + lib/xlat_tables/xlat_tables_common.c \ + plat/intel/soc/common/aarch64/platform_common.c \ + plat/intel/soc/common/aarch64/plat_helpers.S \ + plat/intel/soc/common/socfpga_delay_timer.c \ + plat/intel/soc/common/drivers/ccu/ncore_ccu.c + +BL2_SOURCES += + +BL31_SOURCES += \ + drivers/arm/cci/cci.c \ + lib/cpus/aarch64/aem_generic.S \ + lib/cpus/aarch64/cortex_a53.S \ + plat/common/plat_psci_common.c \ + plat/intel/soc/n5x/bl31_plat_setup.c \ + plat/intel/soc/n5x/soc/n5x_clock_manager.c \ + plat/intel/soc/common/socfpga_psci.c \ + plat/intel/soc/common/socfpga_sip_svc.c \ + plat/intel/soc/common/socfpga_sip_svc_v2.c \ + plat/intel/soc/common/socfpga_topology.c \ + plat/intel/soc/common/sip/socfpga_sip_ecc.c \ + plat/intel/soc/common/sip/socfpga_sip_fcs.c \ + plat/intel/soc/common/soc/socfpga_mailbox.c \ + plat/intel/soc/common/soc/socfpga_reset_manager.c + +PROGRAMMABLE_RESET_ADDRESS := 0 +RESET_TO_BL2 := 1 +BL2_INV_DCACHE := 0 +USE_COHERENT_MEM := 1 diff --git a/plat/intel/soc/n5x/soc/n5x_clock_manager.c b/plat/intel/soc/n5x/soc/n5x_clock_manager.c new file mode 100644 index 0000000..f32e0f8 --- /dev/null +++ b/plat/intel/soc/n5x/soc/n5x_clock_manager.c @@ -0,0 +1,157 @@ +/* + * Copyright (c) 2019-2022, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> +#include <errno.h> +#include <common/debug.h> +#include <drivers/delay_timer.h> +#include <lib/mmio.h> + +#include "n5x_clock_manager.h" +#include "n5x_system_manager.h" + + + +uint64_t clk_get_pll_output_hz(void) +{ + uint32_t clksrc; + uint32_t scr_reg; + uint32_t divf; + uint32_t divr; + uint32_t divq; + uint32_t power = 1; + uint64_t clock = 0; + + clksrc = ((get_clk_freq(CLKMGR_PERPLL_PLLGLOB)) & + CLKMGR_PLLGLOB_VCO_PSRC_MASK) >> CLKMGR_PLLGLOB_VCO_PSRC_OFFSET; + + switch (clksrc) { + case CLKMGR_VCO_PSRC_EOSC1: + scr_reg = SOCFPGA_SYSMGR(BOOT_SCRATCH_COLD_1); + clock = mmio_read_32(scr_reg); + break; + + case CLKMGR_VCO_PSRC_INTOSC: + clock = CLKMGR_INTOSC_HZ; + break; + + case CLKMGR_VCO_PSRC_F2S: + scr_reg = SOCFPGA_SYSMGR(BOOT_SCRATCH_COLD_2); + clock = mmio_read_32(scr_reg); + break; + } + + divf = ((get_clk_freq(CLKMGR_PERPLL_PLLDIV)) & + CLKMGR_PLLDIV_FDIV_MASK) >> CLKMGR_PLLDIV_FDIV_OFFSET; + divr = ((get_clk_freq(CLKMGR_PERPLL_PLLDIV)) & + CLKMGR_PLLDIV_REFCLKDIV_MASK) >> CLKMGR_PLLDIV_REFCLKDIV_OFFSET; + divq = ((get_clk_freq(CLKMGR_PERPLL_PLLDIV)) & + CLKMGR_PLLDIV_OUTDIV_QDIV_MASK) >> CLKMGR_PLLDIV_OUTDIV_QDIV_OFFSET; + + while (divq) { + power *= 2; + divq--; + } + + return ((clock * 2 * (divf + 1)) / ((divr + 1) * power)); +} + +uint64_t get_l4_clk(void) +{ + uint32_t clock = 0; + uint32_t mainpll_c1cnt; + uint32_t perpll_c1cnt; + uint32_t clksrc; + + mainpll_c1cnt = ((get_clk_freq(CLKMGR_MAINPLL_PLLOUTDIV)) & + CLKMGR_PLLOUTDIV_C1CNT_MASK) >> CLKMGR_PLLOUTDIV_C1CNT_OFFSET; + + perpll_c1cnt = ((get_clk_freq(CLKMGR_PERPLL_PLLOUTDIV)) & + CLKMGR_PLLOUTDIV_C1CNT_MASK) >> CLKMGR_PLLOUTDIV_C1CNT_OFFSET; + + clksrc = ((get_clk_freq(CLKMGR_MAINPLL_NOCCLK)) & CLKMGR_CLKSRC_MASK) >> + CLKMGR_CLKSRC_OFFSET; + + switch (clksrc) { + case CLKMGR_CLKSRC_MAIN: + clock = clk_get_pll_output_hz(); + clock /= 1 + mainpll_c1cnt; + break; + + case CLKMGR_CLKSRC_PER: + clock = clk_get_pll_output_hz(); + clock /= 1 + perpll_c1cnt; + break; + + default: + return 0; + break; + } + + clock /= BIT(((get_clk_freq(CLKMGR_MAINPLL_NOCDIV)) >> + CLKMGR_NOCDIV_L4MAIN_OFFSET) & CLKMGR_NOCDIV_DIVIDER_MASK); + + return clock; +} + +/* Return MPU clock */ +uint32_t get_mpu_clk(void) +{ + uint32_t clock = 0; + uint32_t mainpll_c0cnt; + uint32_t perpll_c0cnt; + uint32_t clksrc; + + mainpll_c0cnt = ((get_clk_freq(CLKMGR_MAINPLL_PLLOUTDIV)) & + CLKMGR_PLLOUTDIV_C0CNT_MASK) >> CLKMGR_PLLOUTDIV_C0CNT_OFFSET; + + perpll_c0cnt = ((get_clk_freq(CLKMGR_PERPLL_PLLOUTDIV)) & + CLKMGR_PLLOUTDIV_C0CNT_MASK) >> CLKMGR_PLLOUTDIV_C0CNT_OFFSET; + + clksrc = ((get_clk_freq(CLKMGR_MAINPLL_NOCCLK)) & CLKMGR_CLKSRC_MASK) >> + CLKMGR_CLKSRC_OFFSET; + + switch (clksrc) { + case CLKMGR_CLKSRC_MAIN: + clock = clk_get_pll_output_hz(); + clock /= 1 + mainpll_c0cnt; + break; + + case CLKMGR_CLKSRC_PER: + clock = clk_get_pll_output_hz(); + clock /= 1 + perpll_c0cnt; + break; + + default: + return 0; + break; + } + + clock /= BIT(((get_clk_freq(CLKMGR_MAINPLL_NOCDIV)) >> + CLKMGR_NOCDIV_L4MAIN_OFFSET) & CLKMGR_NOCDIV_DIVIDER_MASK); + + return clock; +} + +/* Calculate clock frequency based on parameter */ +uint32_t get_clk_freq(uint32_t psrc_reg) +{ + uint32_t clk_psrc; + + clk_psrc = mmio_read_32(CLKMGR_N5X_BASE + psrc_reg); + + return clk_psrc; +} + +/* Get cpu freq clock */ +uint32_t get_cpu_clk(void) +{ + uint32_t cpu_clk = 0; + + cpu_clk = get_mpu_clk()/PLAT_HZ_CONVERT_TO_MHZ; + + return cpu_clk; +} diff --git a/plat/intel/soc/stratix10/bl2_plat_setup.c b/plat/intel/soc/stratix10/bl2_plat_setup.c new file mode 100644 index 0000000..73e3216 --- /dev/null +++ b/plat/intel/soc/stratix10/bl2_plat_setup.c @@ -0,0 +1,188 @@ +/* + * Copyright (c) 2019-2022, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2019-2022, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <arch.h> +#include <arch_helpers.h> +#include <assert.h> +#include <common/bl_common.h> +#include <common/debug.h> +#include <common/desc_image_load.h> +#include <drivers/generic_delay_timer.h> +#include <drivers/synopsys/dw_mmc.h> +#include <drivers/ti/uart/uart_16550.h> +#include <lib/xlat_tables/xlat_tables.h> + +#include "qspi/cadence_qspi.h" +#include "socfpga_emac.h" +#include "socfpga_f2sdram_manager.h" +#include "socfpga_handoff.h" +#include "socfpga_mailbox.h" +#include "socfpga_private.h" +#include "socfpga_reset_manager.h" +#include "socfpga_system_manager.h" +#include "s10_clock_manager.h" +#include "s10_memory_controller.h" +#include "s10_mmc.h" +#include "s10_pinmux.h" +#include "wdt/watchdog.h" + +static struct mmc_device_info mmc_info; + +const mmap_region_t plat_stratix10_mmap[] = { + MAP_REGION_FLAT(DRAM_BASE, DRAM_SIZE, + MT_MEMORY | MT_RW | MT_NS), + MAP_REGION_FLAT(DEVICE1_BASE, DEVICE1_SIZE, + MT_DEVICE | MT_RW | MT_NS), + MAP_REGION_FLAT(DEVICE2_BASE, DEVICE2_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(OCRAM_BASE, OCRAM_SIZE, + MT_NON_CACHEABLE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(DEVICE3_BASE, DEVICE3_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(MEM64_BASE, MEM64_SIZE, + MT_DEVICE | MT_RW | MT_NS), + MAP_REGION_FLAT(DEVICE4_BASE, DEVICE4_SIZE, + MT_DEVICE | MT_RW | MT_NS), + {0}, +}; + +boot_source_type boot_source = BOOT_SOURCE; + +void bl2_el3_early_platform_setup(u_register_t x0, u_register_t x1, + u_register_t x2, u_register_t x4) +{ + static console_t console; + handoff reverse_handoff_ptr; + + generic_delay_timer_init(); + + if (socfpga_get_handoff(&reverse_handoff_ptr)) + return; + config_pinmux(&reverse_handoff_ptr); + + config_clkmgr_handoff(&reverse_handoff_ptr); + enable_nonsecure_access(); + deassert_peripheral_reset(); + config_hps_hs_before_warm_reset(); + + watchdog_init(get_wdt_clk()); + + console_16550_register(PLAT_INTEL_UART_BASE, get_uart_clk(), + PLAT_BAUDRATE, &console); + + socfpga_emac_init(); + socfpga_delay_timer_init(); + init_hard_memory_controller(); + mailbox_init(); + s10_mmc_init(); + + if (!intel_mailbox_is_fpga_not_ready()) { + socfpga_bridges_enable(SOC2FPGA_MASK | LWHPS2FPGA_MASK | + FPGA2SOC_MASK | F2SDRAM0_MASK | F2SDRAM1_MASK | + F2SDRAM2_MASK); + } +} + + +void bl2_el3_plat_arch_setup(void) +{ + + const mmap_region_t bl_regions[] = { + MAP_REGION_FLAT(BL2_BASE, BL2_END - BL2_BASE, + MT_MEMORY | MT_RW | MT_SECURE), + MAP_REGION_FLAT(BL_CODE_BASE, BL_CODE_END - BL_CODE_BASE, + MT_CODE | MT_SECURE), + MAP_REGION_FLAT(BL_RO_DATA_BASE, + BL_RO_DATA_END - BL_RO_DATA_BASE, + MT_RO_DATA | MT_SECURE), +#if USE_COHERENT_MEM_BAR + MAP_REGION_FLAT(BL_COHERENT_RAM_BASE, + BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE, + MT_DEVICE | MT_RW | MT_SECURE), +#endif + {0}, + }; + + setup_page_tables(bl_regions, plat_stratix10_mmap); + + enable_mmu_el3(0); + + dw_mmc_params_t params = EMMC_INIT_PARAMS(0x100000, get_mmc_clk()); + + mmc_info.mmc_dev_type = MMC_IS_SD; + mmc_info.ocr_voltage = OCR_3_3_3_4 | OCR_3_2_3_3; + + /* Request ownership and direct access to QSPI */ + mailbox_hps_qspi_enable(); + + switch (boot_source) { + case BOOT_SOURCE_SDMMC: + dw_mmc_init(¶ms, &mmc_info); + socfpga_io_setup(boot_source); + break; + + case BOOT_SOURCE_QSPI: + cad_qspi_init(0, QSPI_CONFIG_CPHA, QSPI_CONFIG_CPOL, + QSPI_CONFIG_CSDA, QSPI_CONFIG_CSDADS, + QSPI_CONFIG_CSEOT, QSPI_CONFIG_CSSOT, 0); + socfpga_io_setup(boot_source); + break; + + default: + ERROR("Unsupported boot source\n"); + panic(); + break; + } +} + +uint32_t get_spsr_for_bl33_entry(void) +{ + unsigned long el_status; + unsigned int mode; + uint32_t spsr; + + /* Figure out what mode we enter the non-secure world in */ + el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT; + el_status &= ID_AA64PFR0_ELX_MASK; + + mode = (el_status) ? MODE_EL2 : MODE_EL1; + + /* + * TODO: Consider the possibility of specifying the SPSR in + * the FIP ToC and allowing the platform to have a say as + * well. + */ + spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS); + return spsr; +} + + +int bl2_plat_handle_post_image_load(unsigned int image_id) +{ + bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id); + + assert(bl_mem_params); + + switch (image_id) { + case BL33_IMAGE_ID: + bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr(); + bl_mem_params->ep_info.spsr = get_spsr_for_bl33_entry(); + break; + default: + break; + } + + return 0; +} + +/******************************************************************************* + * Perform any BL3-1 platform setup code + ******************************************************************************/ +void bl2_platform_setup(void) +{ +} + diff --git a/plat/intel/soc/stratix10/bl31_plat_setup.c b/plat/intel/soc/stratix10/bl31_plat_setup.c new file mode 100644 index 0000000..ba00e82 --- /dev/null +++ b/plat/intel/soc/stratix10/bl31_plat_setup.c @@ -0,0 +1,173 @@ +/* + * Copyright (c) 2019-2020, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2019-2022, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <arch.h> +#include <arch_helpers.h> +#include <assert.h> +#include <common/bl_common.h> +#include <drivers/arm/gicv2.h> +#include <drivers/ti/uart/uart_16550.h> +#include <lib/xlat_tables/xlat_tables.h> +#include <lib/mmio.h> +#include <plat/common/platform.h> +#include <platform_def.h> + +#include "socfpga_mailbox.h" +#include "socfpga_noc.h" +#include "socfpga_private.h" +#include "socfpga_reset_manager.h" +#include "socfpga_system_manager.h" +#include "s10_memory_controller.h" +#include "s10_pinmux.h" +#include "s10_clock_manager.h" + + +static entry_point_info_t bl32_image_ep_info; +static entry_point_info_t bl33_image_ep_info; + +entry_point_info_t *bl31_plat_get_next_image_ep_info(uint32_t type) +{ + entry_point_info_t *next_image_info; + + next_image_info = (type == NON_SECURE) ? + &bl33_image_ep_info : &bl32_image_ep_info; + + /* None of the images on this platform can have 0x0 as the entrypoint */ + if (next_image_info->pc) + return next_image_info; + else + return NULL; +} + +void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1, + u_register_t arg2, u_register_t arg3) +{ + static console_t console; + + mmio_write_64(PLAT_SEC_ENTRY, PLAT_SEC_WARM_ENTRY); + + console_16550_register(PLAT_INTEL_UART_BASE, PLAT_UART_CLOCK, + PLAT_BAUDRATE, &console); + /* + * Check params passed from BL31 should not be NULL, + */ + void *from_bl2 = (void *) arg0; + + bl_params_t *params_from_bl2 = (bl_params_t *)from_bl2; + assert(params_from_bl2 != NULL); + + /* + * Copy BL32 (if populated by BL31) and BL33 entry point information. + * They are stored in Secure RAM, in BL31's address space. + */ + + if (params_from_bl2->h.type == PARAM_BL_PARAMS && + params_from_bl2->h.version >= VERSION_2) { + + bl_params_node_t *bl_params = params_from_bl2->head; + + while (bl_params) { + if (bl_params->image_id == BL33_IMAGE_ID) + bl33_image_ep_info = *bl_params->ep_info; + + bl_params = bl_params->next_params_info; + } + } else { + struct socfpga_bl31_params *arg_from_bl2 = + (struct socfpga_bl31_params *) from_bl2; + + assert(arg_from_bl2->h.type == PARAM_BL31); + assert(arg_from_bl2->h.version >= VERSION_1); + + bl32_image_ep_info = *arg_from_bl2->bl32_ep_info; + bl33_image_ep_info = *arg_from_bl2->bl33_ep_info; + } + SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE); +} + +static const interrupt_prop_t s10_interrupt_props[] = { + PLAT_INTEL_SOCFPGA_G1S_IRQ_PROPS(GICV2_INTR_GROUP0), + PLAT_INTEL_SOCFPGA_G0_IRQ_PROPS(GICV2_INTR_GROUP0) +}; + +static unsigned int target_mask_array[PLATFORM_CORE_COUNT]; + +static const gicv2_driver_data_t plat_gicv2_gic_data = { + .gicd_base = PLAT_INTEL_SOCFPGA_GICD_BASE, + .gicc_base = PLAT_INTEL_SOCFPGA_GICC_BASE, + .interrupt_props = s10_interrupt_props, + .interrupt_props_num = ARRAY_SIZE(s10_interrupt_props), + .target_masks = target_mask_array, + .target_masks_num = ARRAY_SIZE(target_mask_array), +}; + +/******************************************************************************* + * Perform any BL3-1 platform setup code + ******************************************************************************/ +void bl31_platform_setup(void) +{ + socfpga_delay_timer_init(); + + /* Initialize the gic cpu and distributor interfaces */ + gicv2_driver_init(&plat_gicv2_gic_data); + gicv2_distif_init(); + gicv2_pcpu_distif_init(); + gicv2_cpuif_enable(); + + /* Signal secondary CPUs to jump to BL31 (BL2 = U-boot SPL) */ + mmio_write_64(PLAT_CPU_RELEASE_ADDR, + (uint64_t)plat_secondary_cpus_bl31_entry); + + mailbox_hps_stage_notify(HPS_EXECUTION_STATE_SSBL); + + enable_ocram_firewall(); +} + +const mmap_region_t plat_stratix10_mmap[] = { + MAP_REGION_FLAT(DRAM_BASE, DRAM_SIZE, + MT_MEMORY | MT_RW | MT_NS), + MAP_REGION_FLAT(DEVICE1_BASE, DEVICE1_SIZE, + MT_DEVICE | MT_RW | MT_NS), + MAP_REGION_FLAT(DEVICE2_BASE, DEVICE2_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(OCRAM_BASE, OCRAM_SIZE, + MT_NON_CACHEABLE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(DEVICE3_BASE, DEVICE3_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(MEM64_BASE, MEM64_SIZE, + MT_DEVICE | MT_RW | MT_NS), + MAP_REGION_FLAT(DEVICE4_BASE, DEVICE4_SIZE, + MT_DEVICE | MT_RW | MT_NS), + {0} +}; + +/******************************************************************************* + * Perform the very early platform specific architectural setup here. At the + * moment this is only initializes the mmu in a quick and dirty way. + ******************************************************************************/ +void bl31_plat_arch_setup(void) +{ + const mmap_region_t bl_regions[] = { + MAP_REGION_FLAT(BL31_BASE, BL31_END - BL31_BASE, + MT_MEMORY | MT_RW | MT_SECURE), + MAP_REGION_FLAT(BL_CODE_BASE, BL_CODE_END - BL_CODE_BASE, + MT_CODE | MT_SECURE), + MAP_REGION_FLAT(BL_RO_DATA_BASE, + BL_RO_DATA_END - BL_RO_DATA_BASE, + MT_RO_DATA | MT_SECURE), +#if USE_COHERENT_MEM + MAP_REGION_FLAT(BL_COHERENT_RAM_BASE, + BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE, + MT_DEVICE | MT_RW | MT_SECURE), +#endif + {0} + }; + + setup_page_tables(bl_regions, plat_stratix10_mmap); + enable_mmu_el3(0); +} + diff --git a/plat/intel/soc/stratix10/include/s10_clock_manager.h b/plat/intel/soc/stratix10/include/s10_clock_manager.h new file mode 100644 index 0000000..5f76375 --- /dev/null +++ b/plat/intel/soc/stratix10/include/s10_clock_manager.h @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2019-2023, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef __CLOCKMANAGER_H__ +#define __CLOCKMANAGER_H__ + +#include "s10_system_manager.h" +#include "socfpga_handoff.h" + +#define ALT_CLKMGR 0xffd10000 + +#define ALT_CLKMGR_CTRL 0x0 +#define ALT_CLKMGR_STAT 0x4 +#define ALT_CLKMGR_INTRCLR 0x14 +#define ALT_CLKMGR_INTRCLR_MAINLOCKLOST_SET_MSK 0x00000004 +#define ALT_CLKMGR_INTRCLR_PERLOCKLOST_SET_MSK 0x00000008 + +#define ALT_CLKMGR_CTRL_BOOTMODE_SET_MSK 0x00000001 +#define ALT_CLKMGR_STAT_BUSY_E_BUSY 0x1 +#define ALT_CLKMGR_STAT_BUSY(x) (((x) & 0x00000001) >> 0) +#define ALT_CLKMGR_STAT_MAINPLLLOCKED(x) (((x) & 0x00000100) >> 8) +#define ALT_CLKMGR_STAT_PERPLLLOCKED(x) (((x) & 0x00000200) >> 9) + +#define ALT_CLKMGR_MAINPLL 0xffd10030 +#define ALT_CLKMGR_MAINPLL_EN 0x0 +#define ALT_CLKMGR_MAINPLL_BYPASS 0xc +#define ALT_CLKMGR_MAINPLL_MPUCLK 0x18 +#define ALT_CLKMGR_MAINPLL_NOCCLK 0x1c +#define ALT_CLKMGR_MAINPLL_CNTR2CLK 0x20 +#define ALT_CLKMGR_MAINPLL_CNTR3CLK 0x24 +#define ALT_CLKMGR_MAINPLL_CNTR4CLK 0x28 +#define ALT_CLKMGR_MAINPLL_CNTR5CLK 0x2c +#define ALT_CLKMGR_MAINPLL_CNTR6CLK 0x30 +#define ALT_CLKMGR_MAINPLL_CNTR7CLK 0x34 +#define ALT_CLKMGR_MAINPLL_CNTR8CLK 0x38 +#define ALT_CLKMGR_MAINPLL_CNTR9CLK 0x3c +#define ALT_CLKMGR_MAINPLL_NOCDIV 0x40 +#define ALT_CLKMGR_MAINPLL_PLLGLOB 0x44 +#define ALT_CLKMGR_MAINPLL_FDBCK 0x48 +#define ALT_CLKMGR_MAINPLL_PLLC0 0x54 +#define ALT_CLKMGR_MAINPLL_PLLC1 0x58 +#define ALT_CLKMGR_MAINPLL_VCOCALIB 0x5c +#define ALT_CLKMGR_MAINPLL_EN_RESET 0x000000ff +#define ALT_CLKMGR_MAINPLL_FDBCK_MDIV(x) (((x) & 0xff000000) >> 24) +#define ALT_CLKMGR_MAINPLL_PLLGLOB_PD_SET_MSK 0x00000001 +#define ALT_CLKMGR_MAINPLL_PLLGLOB_REFCLKDIV(x) (((x) & 0x00003f00) >> 8) +#define ALT_CLKMGR_MAINPLL_PLLGLOB_RST_SET_MSK 0x00000002 +#define ALT_CLKMGR_MAINPLL_VCOCALIB_HSCNT_SET(x) (((x) << 0) & 0x000000ff) +#define ALT_CLKMGR_MAINPLL_VCOCALIB_MSCNT_SET(x) (((x) << 9) & 0x0001fe00) + +#define ALT_CLKMGR_PSRC(x) (((x) & 0x00030000) >> 16) +#define ALT_CLKMGR_SRC_MAIN 0 +#define ALT_CLKMGR_SRC_PER 1 + +#define ALT_CLKMGR_PLLGLOB_PSRC_EOSC1 0x0 +#define ALT_CLKMGR_PLLGLOB_PSRC_INTOSC 0x1 +#define ALT_CLKMGR_PLLGLOB_PSRC_F2S 0x2 + +#define ALT_CLKMGR_PERPLL 0xffd100a4 +#define ALT_CLKMGR_PERPLL_EN 0x0 +#define ALT_CLKMGR_PERPLL_EN_SDMMCCLK BIT(5) +#define ALT_CLKMGR_PERPLL_BYPASS 0xc +#define ALT_CLKMGR_PERPLL_CNTR2CLK 0x18 +#define ALT_CLKMGR_PERPLL_CNTR3CLK 0x1c +#define ALT_CLKMGR_PERPLL_CNTR4CLK 0x20 +#define ALT_CLKMGR_PERPLL_CNTR5CLK 0x24 +#define ALT_CLKMGR_PERPLL_CNTR6CLK 0x28 +#define ALT_CLKMGR_PERPLL_CNTR7CLK 0x2c +#define ALT_CLKMGR_PERPLL_CNTR8CLK 0x30 +#define ALT_CLKMGR_PERPLL_CNTR9CLK 0x34 +#define ALT_CLKMGR_PERPLL_GPIODIV 0x3c +#define ALT_CLKMGR_PERPLL_EMACCTL 0x38 +#define ALT_CLKMGR_PERPLL_PLLGLOB 0x40 +#define ALT_CLKMGR_PERPLL_FDBCK 0x44 +#define ALT_CLKMGR_PERPLL_PLLC0 0x50 +#define ALT_CLKMGR_PERPLL_PLLC1 0x54 +#define ALT_CLKMGR_PERPLL_EN_RESET 0x00000fff +#define ALT_CLKMGR_PERPLL_FDBCK_MDIV(x) (((x) & 0xff000000) >> 24) +#define ALT_CLKMGR_PERPLL_GPIODIV_GPIODBCLK_SET(x) (((x) << 0) & 0x0000ffff) +#define ALT_CLKMGR_PERPLL_PLLGLOB_PD_SET_MSK 0x00000001 +#define ALT_CLKMGR_PERPLL_PLLGLOB_REFCLKDIV(x) (((x) & 0x00003f00) >> 8) +#define ALT_CLKMGR_PERPLL_PLLGLOB_REFCLKDIV_SET(x) (((x) << 8) & 0x00003f00) +#define ALT_CLKMGR_PERPLL_PLLGLOB_RST_SET_MSK 0x00000002 +#define ALT_CLKMGR_PERPLL_VCOCALIB_HSCNT_SET(x) (((x) << 0) & 0x000000ff) +#define ALT_CLKMGR_PERPLL_VCOCALIB_MSCNT_SET(x) (((x) << 9) & 0x0001fe00) +#define ALT_CLKMGR_PERPLL_VCOCALIB 0x58 + +#define ALT_CLKMGR_INTOSC_HZ 460000000 + +void config_clkmgr_handoff(handoff *hoff_ptr); +uint32_t get_wdt_clk(void); +uint32_t get_uart_clk(void); +uint32_t get_mmc_clk(void); +uint32_t get_l3_clk(uint32_t ref_clk); +uint32_t get_ref_clk(uint32_t pllglob); +uint32_t get_cpu_clk(void); + +#endif diff --git a/plat/intel/soc/stratix10/include/s10_memory_controller.h b/plat/intel/soc/stratix10/include/s10_memory_controller.h new file mode 100644 index 0000000..155b279 --- /dev/null +++ b/plat/intel/soc/stratix10/include/s10_memory_controller.h @@ -0,0 +1,160 @@ +/* + * Copyright (c) 2019, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef __S10_MEMORYCONTROLLER_H__ +#define __S10_MEMORYCONTROLLER_H__ + +#define S10_MPFE_IOHMC_REG_DRAMADDRW 0xf80100a8 +#define S10_MPFE_IOHMC_CTRLCFG0 0xf8010028 +#define S10_MPFE_IOHMC_CTRLCFG1 0xf801002c +#define S10_MPFE_IOHMC_DRAMADDRW 0xf80100a8 +#define S10_MPFE_IOHMC_DRAMTIMING0 0xf8010050 +#define S10_MPFE_IOHMC_CALTIMING0 0xf801007c +#define S10_MPFE_IOHMC_CALTIMING1 0xf8010080 +#define S10_MPFE_IOHMC_CALTIMING2 0xf8010084 +#define S10_MPFE_IOHMC_CALTIMING3 0xf8010088 +#define S10_MPFE_IOHMC_CALTIMING4 0xf801008c +#define S10_MPFE_IOHMC_CALTIMING9 0xf80100a0 +#define S10_MPFE_IOHMC_CALTIMING9_ACT_TO_ACT(x) (((x) & 0x000000ff) >> 0) +#define S10_MPFE_IOHMC_CTRLCFG1_CFG_ADDR_ORDER(value) \ + (((value) & 0x00000060) >> 5) + + +#define S10_MPFE_HMC_ADP_ECCCTRL1 0xf8011100 +#define S10_MPFE_HMC_ADP_ECCCTRL2 0xf8011104 +#define S10_MPFE_HMC_ADP_RSTHANDSHAKESTAT 0xf8011218 +#define S10_MPFE_HMC_ADP_RSTHANDSHAKESTAT_SEQ2CORE 0x000000ff +#define S10_MPFE_HMC_ADP_RSTHANDSHAKECTRL 0xf8011214 + + +#define S10_MPFE_IOHMC_REG_CTRLCFG1 0xf801002c + +#define S10_MPFE_IOHMC_REG_NIOSRESERVE0_OFST 0xf8010110 + +#define IOHMC_DRAMADDRW_COL_ADDR_WIDTH(x) (((x) & 0x0000001f) >> 0) +#define IOHMC_DRAMADDRW_ROW_ADDR_WIDTH(x) (((x) & 0x000003e0) >> 5) +#define IOHMC_DRAMADDRW_CS_ADDR_WIDTH(x) (((x) & 0x00070000) >> 16) +#define IOHMC_DRAMADDRW_BANK_GRP_ADDR_WIDTH(x) (((x) & 0x0000c000) >> 14) +#define IOHMC_DRAMADDRW_BANK_ADDR_WIDTH(x) (((x) & 0x00003c00) >> 10) + +#define S10_MPFE_DDR(x) (0xf8000000 + x) +#define S10_MPFE_HMC_ADP_DDRCALSTAT 0xf801100c +#define S10_MPFE_DDR_MAIN_SCHED 0xf8000400 +#define S10_MPFE_DDR_MAIN_SCHED_DDRCONF 0xf8000408 +#define S10_MPFE_DDR_MAIN_SCHED_DDRTIMING 0xf800040c +#define S10_MPFE_DDR_MAIN_SCHED_DDRCONF_SET_MSK 0x0000001f +#define S10_MPFE_DDR_MAIN_SCHED_DDRMODE 0xf8000410 +#define S10_MPFE_DDR_MAIN_SCHED_DEVTODEV 0xf800043c +#define S10_MPFE_DDR_MAIN_SCHED_READLATENCY 0xf8000414 +#define S10_MPFE_DDR_MAIN_SCHED_ACTIVATE 0xf8000438 +#define S10_MPFE_DDR_MAIN_SCHED_ACTIVATE_FAWBANK_OFST 10 +#define S10_MPFE_DDR_MAIN_SCHED_ACTIVATE_FAW_OFST 4 +#define S10_MPFE_DDR_MAIN_SCHED_ACTIVATE_RRD_OFST 0 +#define S10_MPFE_DDR_MAIN_SCHED_DDRCONF_SET(x) (((x) << 0) & 0x0000001f) +#define S10_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTORD_OFST 0 +#define S10_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTORD_MSK (BIT(0) | BIT(1)) +#define S10_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTOWR_OFST 2 +#define S10_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTOWR_MSK (BIT(2) | BIT(3)) +#define S10_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSWRTORD_OFST 4 +#define S10_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSWRTORD_MSK (BIT(4) | BIT(5)) + +#define S10_MPFE_HMC_ADP(x) (0xf8011000 + (x)) +#define S10_MPFE_HMC_ADP_HPSINTFCSEL 0xf8011210 +#define S10_MPFE_HMC_ADP_DDRIOCTRL 0xf8011008 +#define HMC_ADP_DDRIOCTRL 0x8 +#define HMC_ADP_DDRIOCTRL_IO_SIZE(x) (((x) & 0x00000003) >> 0) +#define HMC_ADP_DDRIOCTRL_CTRL_BURST_LENGTH(x) (((x) & 0x00003e00) >> 9) +#define ADP_DRAMADDRWIDTH 0xe0 + +#define ACT_TO_ACT_DIFF_BANK(value) (((value) & 0x00fc0000) >> 18) +#define ACT_TO_ACT(value) (((value) & 0x0003f000) >> 12) +#define ACT_TO_RDWR(value) (((value) & 0x0000003f) >> 0) +#define ACT_TO_ACT(value) (((value) & 0x0003f000) >> 12) + +/* timing 2 */ +#define RD_TO_RD_DIFF_CHIP(value) (((value) & 0x00000fc0) >> 6) +#define RD_TO_WR_DIFF_CHIP(value) (((value) & 0x3f000000) >> 24) +#define RD_TO_WR(value) (((value) & 0x00fc0000) >> 18) +#define RD_TO_PCH(value) (((value) & 0x00000fc0) >> 6) + +/* timing 3 */ +#define CALTIMING3_WR_TO_RD_DIFF_CHIP(value) (((value) & 0x0003f000) >> 12) +#define CALTIMING3_WR_TO_RD(value) (((value) & 0x00000fc0) >> 6) + +/* timing 4 */ +#define PCH_TO_VALID(value) (((value) & 0x00000fc0) >> 6) + +#define DDRTIMING_BWRATIO_OFST 31 +#define DDRTIMING_WRTORD_OFST 26 +#define DDRTIMING_RDTOWR_OFST 21 +#define DDRTIMING_BURSTLEN_OFST 18 +#define DDRTIMING_WRTOMISS_OFST 12 +#define DDRTIMING_RDTOMISS_OFST 6 +#define DDRTIMING_ACTTOACT_OFST 0 + +#define ADP_DDRIOCTRL_IO_SIZE(x) (((x) & 0x00000003) >> 0) + +#define DDRMODE_AUTOPRECHARGE_OFST 1 +#define DDRMODE_BWRATIOEXTENDED_OFST 0 + + +#define S10_MPFE_IOHMC_REG_DRAMTIMING0_CFG_TCL(x) (((x) & 0x0000007f) >> 0) +#define S10_MPFE_IOHMC_REG_CTRLCFG0_CFG_MEM_TYPE(x) (((x) & 0x0000000f) >> 0) + +#define S10_CCU_CPU0_MPRT_DDR 0xf7004400 +#define S10_CCU_CPU0_MPRT_MEM0 0xf70045c0 +#define S10_CCU_CPU0_MPRT_MEM1A 0xf70045e0 +#define S10_CCU_CPU0_MPRT_MEM1B 0xf7004600 +#define S10_CCU_CPU0_MPRT_MEM1C 0xf7004620 +#define S10_CCU_CPU0_MPRT_MEM1D 0xf7004640 +#define S10_CCU_CPU0_MPRT_MEM1E 0xf7004660 +#define S10_CCU_IOM_MPRT_MEM0 0xf7018560 +#define S10_CCU_IOM_MPRT_MEM1A 0xf7018580 +#define S10_CCU_IOM_MPRT_MEM1B 0xf70185a0 +#define S10_CCU_IOM_MPRT_MEM1C 0xf70185c0 +#define S10_CCU_IOM_MPRT_MEM1D 0xf70185e0 +#define S10_CCU_IOM_MPRT_MEM1E 0xf7018600 + +#define S10_NOC_FW_DDR_SCR 0xf8020100 +#define S10_NOC_FW_DDR_SCR_MPUREGION0ADDR_LIMITEXT 0xf802011c +#define S10_NOC_FW_DDR_SCR_MPUREGION0ADDR_LIMIT 0xf8020118 +#define S10_NOC_FW_DDR_SCR_NONMPUREGION0ADDR_LIMITEXT 0xf802019c +#define S10_NOC_FW_DDR_SCR_NONMPUREGION0ADDR_LIMIT 0xf8020198 + +#define S10_SOC_NOC_FW_DDR_SCR_ENABLE 0xf8020100 +#define S10_CCU_NOC_DI_SET_MSK 0x10 + +#define S10_SYSMGR_CORE_HMC_CLK 0xffd120b4 +#define S10_SYSMGR_CORE_HMC_CLK_STATUS 0x00000001 + +#define S10_MPFE_IOHMC_NIOSRESERVE0_NIOS_RESERVE0(x) (((x) & 0x0000ffff) >> 0) +#define S10_MPFE_HMC_ADP_DDRIOCTRL_IO_SIZE_MSK 0x00000003 +#define S10_MPFE_HMC_ADP_DDRIOCTRL_IO_SIZE_OFST 0 +#define S10_MPFE_HMC_ADP_HPSINTFCSEL_ENABLE 0x001f1f1f +#define S10_IOHMC_CTRLCFG1_ENABLE_ECC_OFST 7 + +#define S10_MPFE_HMC_ADP_ECCCTRL1_AUTOWB_CNT_RST_SET_MSK 0x00010000 +#define S10_MPFE_HMC_ADP_ECCCTRL1_CNT_RST_SET_MSK 0x00000100 +#define S10_MPFE_HMC_ADP_ECCCTRL1_ECC_EN_SET_MSK 0x00000001 + +#define S10_MPFE_HMC_ADP_ECCCTRL2_AUTOWB_EN_SET_MSK 0x00000001 +#define S10_MPFE_HMC_ADP_ECCCTRL2_OVRW_RB_ECC_EN_SET_MSK 0x00010000 +#define S10_MPFE_HMC_ADP_ECCCTRL2_RMW_EN_SET_MSK 0x00000100 +#define S10_MPFE_HMC_ADP_DDRCALSTAT_CAL(value) (((value) & 0x00000001) >> 0) + + +#define S10_MPFE_HMC_ADP_DDRIOCTRL_IO_SIZE(x) (((x) & 0x00000003) >> 0) +#define IOHMC_DRAMADDRW_CFG_BANK_ADDR_WIDTH(x) (((x) & 0x00003c00) >> 10) +#define IOHMC_DRAMADDRW_CFG_BANK_GROUP_ADDR_WIDTH(x) (((x) & 0x0000c000) >> 14) +#define IOHMC_DRAMADDRW_CFG_COL_ADDR_WIDTH(x) (((x) & 0x0000001f) >> 0) +#define IOHMC_DRAMADDRW_CFG_CS_ADDR_WIDTH(x) (((x) & 0x00070000) >> 16) +#define IOHMC_DRAMADDRW_CFG_ROW_ADDR_WIDTH(x) (((x) & 0x000003e0) >> 5) + +#define S10_SDRAM_0_LB_ADDR 0x0 + +int init_hard_memory_controller(void); + +#endif diff --git a/plat/intel/soc/stratix10/include/s10_mmc.h b/plat/intel/soc/stratix10/include/s10_mmc.h new file mode 100644 index 0000000..99f86f5 --- /dev/null +++ b/plat/intel/soc/stratix10/include/s10_mmc.h @@ -0,0 +1,12 @@ +/* + * Copyright (c) 2022, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef __S10_MMC_H__ +#define __S10_MMC_H__ + +void s10_mmc_init(void); + +#endif /* S10_MMC_H */ diff --git a/plat/intel/soc/stratix10/include/s10_pinmux.h b/plat/intel/soc/stratix10/include/s10_pinmux.h new file mode 100644 index 0000000..82367d7 --- /dev/null +++ b/plat/intel/soc/stratix10/include/s10_pinmux.h @@ -0,0 +1,20 @@ +/* + * Copyright (c) 2019, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef __S10_PINMUX_H__ +#define __S10_PINMUX_H__ + +#define S10_PINMUX_PIN0SEL 0xffd13000 +#define S10_PINMUX_IO0CTRL 0xffd13130 +#define S10_PINMUX_PINMUX_EMAC0_USEFPGA 0xffd13300 +#define S10_PINMUX_IO0_DELAY 0xffd13400 + +#include "socfpga_handoff.h" + +void config_pinmux(handoff *handoff); + +#endif + diff --git a/plat/intel/soc/stratix10/include/s10_system_manager.h b/plat/intel/soc/stratix10/include/s10_system_manager.h new file mode 100644 index 0000000..88c0b46 --- /dev/null +++ b/plat/intel/soc/stratix10/include/s10_system_manager.h @@ -0,0 +1,194 @@ +/* + * Copyright (c) 2019-2023, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#ifndef S10_SOCFPGA_SYSTEMMANAGER_H +#define S10_SOCFPGA_SYSTEMMANAGER_H + +#include "socfpga_plat_def.h" + +/* System Manager Register Map */ +#define SOCFPGA_SYSMGR_SILICONID_1 0x00 +#define SOCFPGA_SYSMGR_SILICONID_2 0x04 +#define SOCFPGA_SYSMGR_WDDBG 0x08 +#define SOCFPGA_SYSMGR_MPU_STATUS 0x10 +#define SOCFPGA_SYSMGR_SDMMC_L3_MASTER 0x2C +#define SOCFPGA_SYSMGR_NAND_L3_MASTER 0x34 +#define SOCFPGA_SYSMGR_USB0_L3_MASTER 0x38 +#define SOCFPGA_SYSMGR_USB1_L3_MASTER 0x3C +#define SOCFPGA_SYSMGR_TSN_GLOBAL 0x40 +#define SOCFPGA_SYSMGR_EMAC_0 0x44 /* TSN_0 */ +#define SOCFPGA_SYSMGR_EMAC_1 0x48 /* TSN_1 */ +#define SOCFPGA_SYSMGR_EMAC_2 0x4C /* TSN_2 */ +#define SOCFPGA_SYSMGR_TSN_0_ACE 0x50 +#define SOCFPGA_SYSMGR_TSN_1_ACE 0x54 +#define SOCFPGA_SYSMGR_TSN_2_ACE 0x58 +#define SOCFPGA_SYSMGR_FPGAINTF_EN_1 0x68 +#define SOCFPGA_SYSMGR_FPGAINTF_EN_2 0x6C +#define SOCFPGA_SYSMGR_FPGAINTF_EN_3 0x70 +#define SOCFPGA_SYSMGR_DMAC0_L3_MASTER 0x74 +#define SOCFPGA_SYSMGR_ETR_L3_MASTER 0x78 +#define SOCFPGA_SYSMGR_DMAC1_L3_MASTER 0x7C +#define SOCFPGA_SYSMGR_SEC_CTRL_SLT 0x80 +#define SOCFPGA_SYSMGR_OSC_TRIM 0x84 +#define SOCFPGA_SYSMGR_DMAC0_CTRL_STATUS_REG 0x88 +#define SOCFPGA_SYSMGR_DMAC1_CTRL_STATUS_REG 0x8C +#define SOCFPGA_SYSMGR_ECC_INTMASK_VALUE 0x90 +#define SOCFPGA_SYSMGR_ECC_INTMASK_SET 0x94 +#define SOCFPGA_SYSMGR_ECC_INTMASK_CLR 0x98 +#define SOCFPGA_SYSMGR_ECC_INTMASK_SERR 0x9C +#define SOCFPGA_SYSMGR_ECC_INTMASK_DERR 0xA0 +/* NOC configuration value for Agilex5 */ +#define SOCFPGA_SYSMGR_NOC_TIMEOUT 0xC0 +#define SOCFPGA_SYSMGR_NOC_IDLEREQ_SET 0xC4 +#define SOCFPGA_SYSMGR_NOC_IDLEREQ_CLR 0xC8 +#define SOCFPGA_SYSMGR_NOC_IDLEREQ_VAL 0xCC +#define SOCFPGA_SYSMGR_NOC_IDLEACK 0xD0 +#define SOCFPGA_SYSMGR_NOC_IDLESTATUS 0xD4 +#define SOCFPGA_SYSMGR_FPGA2SOC_CTRL 0xD8 +#define SOCFPGA_SYSMGR_FPGA_CFG 0xDC +#define SOCFPGA_SYSMGR_GPO 0xE4 +#define SOCFPGA_SYSMGR_GPI 0xE8 +#define SOCFPGA_SYSMGR_MPU 0xF0 +#define SOCFPGA_SYSMGR_SDM_HPS_SPARE 0xF4 +#define SOCFPGA_SYSMGR_HPS_SDM_SPARE 0xF8 +#define SOCFPGA_SYSMGR_DFI_INTF 0xFC +#define SOCFPGA_SYSMGR_NAND_DD_CTRL 0x100 +#define SOCFPGA_SYSMGR_NAND_PHY_CTRL_REG 0x104 +#define SOCFPGA_SYSMGR_NAND_PHY_TSEL_REG 0x108 +#define SOCFPGA_SYSMGR_NAND_DQ_TIMING_REG 0x10C +#define SOCFPGA_SYSMGR_PHY_DQS_TIMING_REG 0x110 +#define SOCFPGA_SYSMGR_NAND_PHY_GATE_LPBK_CTRL_REG 0x114 +#define SOCFPGA_SYSMGR_NAND_PHY_DLL_MASTER_CTRL_REG 0x118 +#define SOCFPGA_SYSMGR_NAND_PHY_DLL_SLAVE_CTRL_REG 0x11C +#define SOCFPGA_SYSMGR_NAND_DD_DEFAULT_SETTING_REG0 0x120 +#define SOCFPGA_SYSMGR_NAND_DD_DEFAULT_SETTING_REG1 0x124 +#define SOCFPGA_SYSMGR_NAND_DD_STATUS_REG 0x128 +#define SOCFPGA_SYSMGR_NAND_DD_ID_LOW_REG 0x12C +#define SOCFPGA_SYSMGR_NAND_DD_ID_HIGH_REG 0x130 +#define SOCFPGA_SYSMGR_NAND_WRITE_PROT_EN_REG 0x134 +#define SOCFPGA_SYSMGR_SDMMC_CMD_QUEUE_SETTING_REG 0x138 +#define SOCFPGA_SYSMGR_I3C_SLV_PID_LOW 0x13C +#define SOCFPGA_SYSMGR_I3C_SLV_PID_HIGH 0x140 +#define SOCFPGA_SYSMGR_I3C_SLV_CTRL_0 0x144 +#define SOCFPGA_SYSMGR_I3C_SLV_CTRL_1 0x148 +#define SOCFPGA_SYSMGR_F2S_BRIDGE_CTRL 0x14C +#define SOCFPGA_SYSMGR_DMA_TBU_STASH_CTRL_REG_0_DMA0 0x150 +#define SOCFPGA_SYSMGR_DMA_TBU_STASH_CTRL_REG_0_DMA1 0x154 +#define SOCFPGA_SYSMGR_SDM_TBU_STASH_CTRL_REG_1_SDM 0x158 +#define SOCFPGA_SYSMGR_IO_TBU_STASH_CTRL_REG_2_USB2 0x15C +#define SOCFPGA_SYSMGR_IO_TBU_STASH_CTRL_REG_2_USB3 0x160 +#define SOCFPGA_SYSMGR_IO_TBU_STASH_CTRL_REG_2_SDMMC 0x164 +#define SOCFPGA_SYSMGR_IO_TBU_STASH_CTRL_REG_2_NAND 0x168 +#define SOCFPGA_SYSMGR_IO_TBU_STASH_CTRL_REG_2_ETR 0x16C +#define SOCFPGA_SYSMGR_TSN_TBU_STASH_CTRL_REG_3_TSN0 0x170 +#define SOCFPGA_SYSMGR_TSN_TBU_STASH_CTRL_REG_3_TSN1 0x174 +#define SOCFPGA_SYSMGR_TSN_TBU_STASH_CTRL_REG_3_TSN2 0x178 +#define SOCFPGA_SYSMGR_DMA_TBU_STREAM_CTRL_REG_0_DMA0 0x17C +#define SOCFPGA_SYSMGR_DMA_TBU_STREAM_CTRL_REG_0_DMA1 0x180 +#define SOCFPGA_SYSMGR_SDM_TBU_STREAM_CTRL_REG_1_SDM 0x184 +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_CTRL_REG_2_USB2 0x188 +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_CTRL_REG_2_USB3 0x18C +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_CTRL_REG_2_SDMMC 0x190 +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_CTRL_REG_2_NAND 0x194 +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_CTRL_REG_2_ETR 0x198 +#define SOCFPGA_SYSMGR_TSN_TBU_STREAM_CTRL_REG_3_TSN0 0x19C +#define SOCFPGA_SYSMGR_TSN_TBU_STREAM_CTRL_REG_3_TSN1 0x1A0 +#define SOCFPGA_SYSMGR_TSN_TBU_STREAM_CTRL_REG_3_TSN2 0x1A4 +#define SOCFPGA_SYSMGR_DMA_TBU_STREAM_ID_AX_REG_0_DMA0 0x1A8 +#define SOCFPGA_SYSMGR_DMA_TBU_STREAM_ID_AX_REG_0_DMA1 0x1AC +#define SOCFPGA_SYSMGR_SDM_TBU_STREAM_ID_AX_REG_1_SDM 0x1B0 +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_ID_AX_REG_2_USB2 0x1B4 +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_ID_AX_REG_2_USB3 0x1B8 +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_ID_AX_REG_2_SDMMC 0x1BC +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_ID_AX_REG_2_NAND 0x1C0 +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_ID_AX_REG_2_ETR 0x1C4 +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_ID_AX_REG_2_TSN0 0x1C8 +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_ID_AX_REG_2_TSN1 0x1CC +#define SOCFPGA_SYSMGR_IO_TBU_STREAM_ID_AX_REG_2_TSN2 0x1D0 +#define SOCFPGA_SYSMGR_USB3_MISC_CTRL_REG0 0x1F0 +#define SOCFPGA_SYSMGR_USB3_MISC_CTRL_REG1 0x1F4 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_0 0x200 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_1 0x204 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_2 0x208 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_3 0x20C +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_4 0x210 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_5 0x214 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_6 0x218 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_7 0x21C +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_8 0x220 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_COLD_9 0x224 +#define SOCFPGA_SYSMGR_MPFE_CONFIG 0x228 +#define SOCFPGA_SYSMGR_MPFE_status 0x22C +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_0 0x230 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_1 0x234 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_2 0x238 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_3 0x23C +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_4 0x240 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_5 0x244 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_6 0x248 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_7 0x24C +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_8 0x250 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_WARM_9 0x254 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_0 0x258 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_1 0x25C +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_2 0x260 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_3 0x264 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_4 0x268 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_5 0x26C +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_6 0x270 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_7 0x274 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_8 0x278 +#define SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_9 0x27C + +#define DMA0_STREAM_CTRL_REG 0x10D1217C +#define DMA1_STREAM_CTRL_REG 0x10D12180 +#define SDM_STREAM_CTRL_REG 0x10D12184 +#define USB2_STREAM_CTRL_REG 0x10D12188 +#define USB3_STREAM_CTRL_REG 0x10D1218C +#define SDMMC_STREAM_CTRL_REG 0x10D12190 +#define NAND_STREAM_CTRL_REG 0x10D12194 +#define ETR_STREAM_CTRL_REG 0x10D12198 +#define TSN0_STREAM_CTRL_REG 0x10D1219C +#define TSN1_STREAM_CTRL_REG 0x10D121A0 +#define TSN2_STREAM_CTRL_REG 0x10D121A4 + +/* Stream ID configuration value for Agilex5 */ +#define TSN0 0x00010001 +#define TSN1 0x00020002 +#define TSN2 0x00030003 +#define NAND 0x00040004 +#define SDMMC 0x00050005 +#define USB0 0x00060006 +#define USB1 0x00070007 +#define DMA0 0x00080008 +#define DMA1 0x00090009 +#define SDM 0x000A000A +#define CORE_SIGHT_DEBUG 0x000B000B + +/* Field Masking */ +#define SYSMGR_SDMMC_DRVSEL(x) (((x) & 0x7) << 0) +#define SYSMGR_SDMMC_SMPLSEL(x) (((x) & 0x7) << 4) +#define IDLE_DATA_LWSOC2FPGA BIT(4) +#define IDLE_DATA_SOC2FPGA BIT(0) +#define IDLE_DATA_MASK (IDLE_DATA_LWSOC2FPGA | IDLE_DATA_SOC2FPGA) +#define SYSMGR_ECC_OCRAM_MASK BIT(1) +#define SYSMGR_ECC_DDR0_MASK BIT(16) +#define SYSMGR_ECC_DDR1_MASK BIT(17) +#define WSTREAMIDEN_REG_CTRL BIT(0) +#define RSTREAMIDEN_REG_CTRL BIT(1) +#define WMMUSECSID_REG_VAL BIT(4) +#define RMMUSECSID_REG_VAL BIT(5) + +/* Macros */ + +#define SOCFPGA_SYSMGR(_reg) (SOCFPGA_SYSMGR_REG_BASE \ + + (SOCFPGA_SYSMGR_##_reg)) +#define ENABLE_STREAMID WSTREAMIDEN_REG_CTRL | \ + RSTREAMIDEN_REG_CTRL +#define ENABLE_STREAMID_SECURE_TX WSTREAMIDEN_REG_CTRL | \ + RSTREAMIDEN_REG_CTRL | \ + WMMUSECSID_REG_VAL | RMMUSECSID_REG_VAL + +#endif /* S10_SOCFPGA_SYSTEMMANAGER_H */ diff --git a/plat/intel/soc/stratix10/include/socfpga_plat_def.h b/plat/intel/soc/stratix10/include/socfpga_plat_def.h new file mode 100644 index 0000000..7c9f15a --- /dev/null +++ b/plat/intel/soc/stratix10/include/socfpga_plat_def.h @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2019-2023, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef PLAT_SOCFPGA_DEF_H +#define PLAT_SOCFPGA_DEF_H + +#include <platform_def.h> +#include "s10_system_manager.h" + +/* Platform Setting */ +#define PLATFORM_MODEL PLAT_SOCFPGA_STRATIX10 +#define BOOT_SOURCE BOOT_SOURCE_SDMMC +#define PLAT_PRIMARY_CPU 0 +#define PLAT_CLUSTER_ID_MPIDR_AFF_SHIFT MPIDR_AFF1_SHIFT +#define PLAT_CPU_ID_MPIDR_AFF_SHIFT MPIDR_AFF0_SHIFT + +/* FPGA config helpers */ +#define INTEL_SIP_SMC_FPGA_CONFIG_ADDR 0x400000 +#define INTEL_SIP_SMC_FPGA_CONFIG_SIZE 0x1000000 + +/* QSPI Setting */ +#define CAD_QSPIDATA_OFST 0xff900000 +#define CAD_QSPI_OFFSET 0xff8d2000 + +/* Register Mapping */ +#define SOCFPGA_CCU_NOC_REG_BASE 0xf7000000 +#define SOCFPGA_F2SDRAMMGR_REG_BASE U(0xf8024000) + +#define SOCFPGA_MMC_REG_BASE 0xff808000 + +#define SOCFPGA_RSTMGR_REG_BASE 0xffd11000 +#define SOCFPGA_SYSMGR_REG_BASE 0xffd12000 + +#define SOCFPGA_L4_PER_SCR_REG_BASE 0xffd21000 +#define SOCFPGA_L4_SYS_SCR_REG_BASE 0xffd21100 +#define SOCFPGA_SOC2FPGA_SCR_REG_BASE 0xffd21200 +#define SOCFPGA_LWSOC2FPGA_SCR_REG_BASE 0xffd21300 + +/******************************************************************************* + * Platform memory map related constants + ******************************************************************************/ +#define DRAM_BASE (0x0) +#define DRAM_SIZE (0x80000000) + +#define OCRAM_BASE (0xFFE00000) +#define OCRAM_SIZE (0x00040000) + +#define MEM64_BASE (0x0100000000) +#define MEM64_SIZE (0x1F00000000) + +#define DEVICE1_BASE (0x80000000) +#define DEVICE1_SIZE (0x60000000) + +#define DEVICE2_BASE (0xF7000000) +#define DEVICE2_SIZE (0x08E00000) + +#define DEVICE3_BASE (0xFFFC0000) +#define DEVICE3_SIZE (0x00008000) + +#define DEVICE4_BASE (0x2000000000) +#define DEVICE4_SIZE (0x0100000000) + +#define BL2_BASE (0xffe00000) +#define BL2_LIMIT (0xffe1b000) + +#define BL31_BASE (0x1000) +#define BL31_LIMIT (0x81000) + +/******************************************************************************* + * UART related constants + ******************************************************************************/ +#define PLAT_UART0_BASE (0xFFC02000) +#define PLAT_UART1_BASE (0xFFC02100) + +/******************************************************************************* + * GIC related constants + ******************************************************************************/ +#define PLAT_GIC_BASE (0xFFFC0000) +#define PLAT_GICC_BASE (PLAT_GIC_BASE + 0x2000) +#define PLAT_GICD_BASE (PLAT_GIC_BASE + 0x1000) +#define PLAT_GICR_BASE 0 + +#define PLAT_SYS_COUNTER_FREQ_IN_TICKS (400000000) +#define PLAT_HZ_CONVERT_TO_MHZ (1000000) + +/******************************************************************************* + * SDMMC related pointer function + ******************************************************************************/ +#define SDMMC_READ_BLOCKS mmc_read_blocks +#define SDMMC_WRITE_BLOCKS mmc_write_blocks + +/******************************************************************************* + * sysmgr.boot_scratch_cold6 & 7 (64bit) are used to indicate L2 reset + * is done and HPS should trigger warm reset via RMR_EL3. + ******************************************************************************/ +#define L2_RESET_DONE_REG 0xFFD12218 + +/* Platform specific system counter */ +#define PLAT_SYS_COUNTER_FREQ_IN_MHZ get_cpu_clk() + +#endif /* PLATSOCFPGA_DEF_H */ + diff --git a/plat/intel/soc/stratix10/platform.mk b/plat/intel/soc/stratix10/platform.mk new file mode 100644 index 0000000..6bc96fb --- /dev/null +++ b/plat/intel/soc/stratix10/platform.mk @@ -0,0 +1,80 @@ +# +# Copyright (c) 2019-2023, ARM Limited and Contributors. All rights reserved. +# Copyright (c) 2019-2022, Intel Corporation. All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +PLAT_INCLUDES := \ + -Iplat/intel/soc/stratix10/include/ \ + -Iplat/intel/soc/common/drivers/ \ + -Iplat/intel/soc/common/include/ + +# Include GICv2 driver files +include drivers/arm/gic/v2/gicv2.mk +AGX_GICv2_SOURCES := \ + ${GICV2_SOURCES} \ + plat/common/plat_gicv2.c + + +PLAT_BL_COMMON_SOURCES := \ + ${AGX_GICv2_SOURCES} \ + drivers/delay_timer/delay_timer.c \ + drivers/delay_timer/generic_delay_timer.c \ + drivers/ti/uart/aarch64/16550_console.S \ + lib/xlat_tables/aarch64/xlat_tables.c \ + lib/xlat_tables/xlat_tables_common.c \ + plat/intel/soc/common/aarch64/platform_common.c \ + plat/intel/soc/common/aarch64/plat_helpers.S \ + plat/intel/soc/common/socfpga_delay_timer.c \ + plat/intel/soc/common/soc/socfpga_firewall.c + +BL2_SOURCES += \ + common/desc_image_load.c \ + drivers/mmc/mmc.c \ + drivers/intel/soc/stratix10/io/s10_memmap_qspi.c \ + drivers/io/io_storage.c \ + drivers/io/io_block.c \ + drivers/io/io_fip.c \ + drivers/partition/partition.c \ + drivers/partition/gpt.c \ + drivers/synopsys/emmc/dw_mmc.c \ + lib/cpus/aarch64/cortex_a53.S \ + plat/intel/soc/stratix10/bl2_plat_setup.c \ + plat/intel/soc/stratix10/soc/s10_clock_manager.c \ + plat/intel/soc/stratix10/soc/s10_memory_controller.c \ + plat/intel/soc/stratix10/soc/s10_mmc.c \ + plat/intel/soc/stratix10/soc/s10_pinmux.c \ + plat/intel/soc/common/bl2_plat_mem_params_desc.c \ + plat/intel/soc/common/socfpga_image_load.c \ + plat/intel/soc/common/socfpga_storage.c \ + plat/intel/soc/common/soc/socfpga_emac.c \ + plat/intel/soc/common/soc/socfpga_handoff.c \ + plat/intel/soc/common/soc/socfpga_mailbox.c \ + plat/intel/soc/common/soc/socfpga_reset_manager.c \ + plat/intel/soc/common/drivers/qspi/cadence_qspi.c \ + plat/intel/soc/common/drivers/wdt/watchdog.c + +include lib/zlib/zlib.mk +PLAT_INCLUDES += -Ilib/zlib +BL2_SOURCES += $(ZLIB_SOURCES) + +BL31_SOURCES += \ + drivers/arm/cci/cci.c \ + lib/cpus/aarch64/aem_generic.S \ + lib/cpus/aarch64/cortex_a53.S \ + plat/common/plat_psci_common.c \ + plat/intel/soc/stratix10/soc/s10_clock_manager.c \ + plat/intel/soc/stratix10/bl31_plat_setup.c \ + plat/intel/soc/common/socfpga_psci.c \ + plat/intel/soc/common/socfpga_sip_svc.c \ + plat/intel/soc/common/socfpga_sip_svc_v2.c \ + plat/intel/soc/common/socfpga_topology.c \ + plat/intel/soc/common/sip/socfpga_sip_ecc.c \ + plat/intel/soc/common/sip/socfpga_sip_fcs.c \ + plat/intel/soc/common/soc/socfpga_mailbox.c \ + plat/intel/soc/common/soc/socfpga_reset_manager.c + +PROGRAMMABLE_RESET_ADDRESS := 0 +RESET_TO_BL2 := 1 +USE_COHERENT_MEM := 1 diff --git a/plat/intel/soc/stratix10/soc/s10_clock_manager.c b/plat/intel/soc/stratix10/soc/s10_clock_manager.c new file mode 100644 index 0000000..416d359 --- /dev/null +++ b/plat/intel/soc/stratix10/soc/s10_clock_manager.c @@ -0,0 +1,322 @@ +/* + * Copyright (c) 2019-2022, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <arch.h> +#include <arch_helpers.h> +#include <assert.h> +#include <drivers/delay_timer.h> +#include <lib/mmio.h> +#include <platform_def.h> + +#include "s10_clock_manager.h" +#include "socfpga_handoff.h" +#include "socfpga_system_manager.h" + + +void wait_pll_lock(void) +{ + uint32_t data; + + do { + data = mmio_read_32(ALT_CLKMGR + ALT_CLKMGR_STAT); + } while ((ALT_CLKMGR_STAT_MAINPLLLOCKED(data) == 0) || + (ALT_CLKMGR_STAT_PERPLLLOCKED(data) == 0)); +} + +void wait_fsm(void) +{ + uint32_t data; + + do { + data = mmio_read_32(ALT_CLKMGR + ALT_CLKMGR_STAT); + } while (ALT_CLKMGR_STAT_BUSY(data) == ALT_CLKMGR_STAT_BUSY_E_BUSY); +} + +void config_clkmgr_handoff(handoff *hoff_ptr) +{ + uint32_t m_div, refclk_div, mscnt, hscnt; + + /* Bypass all mainpllgrp's clocks */ + mmio_write_32(ALT_CLKMGR_MAINPLL + + ALT_CLKMGR_MAINPLL_BYPASS, + 0x7); + wait_fsm(); + /* Bypass all perpllgrp's clocks */ + mmio_write_32(ALT_CLKMGR_PERPLL + + ALT_CLKMGR_PERPLL_BYPASS, + 0x7f); + wait_fsm(); + + /* Setup main PLL dividers */ + m_div = ALT_CLKMGR_MAINPLL_FDBCK_MDIV(hoff_ptr->main_pll_fdbck); + refclk_div = ALT_CLKMGR_MAINPLL_PLLGLOB_REFCLKDIV( + hoff_ptr->main_pll_pllglob); + mscnt = 200 / ((6 + m_div) / refclk_div); + hscnt = (m_div + 6) * mscnt / refclk_div - 9; + + mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_PLLGLOB, + hoff_ptr->main_pll_pllglob); + mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_FDBCK, + hoff_ptr->main_pll_fdbck); + mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_VCOCALIB, + ALT_CLKMGR_MAINPLL_VCOCALIB_HSCNT_SET(hscnt) | + ALT_CLKMGR_MAINPLL_VCOCALIB_MSCNT_SET(mscnt)); + mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_PLLC0, + hoff_ptr->main_pll_pllc0); + mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_PLLC1, + hoff_ptr->main_pll_pllc1); + + mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_NOCDIV, + hoff_ptr->main_pll_nocdiv); + + /* Setup peripheral PLL dividers */ + m_div = ALT_CLKMGR_PERPLL_FDBCK_MDIV(hoff_ptr->per_pll_fdbck); + refclk_div = ALT_CLKMGR_PERPLL_PLLGLOB_REFCLKDIV( + hoff_ptr->per_pll_pllglob); + mscnt = 200 / ((6 + m_div) / refclk_div); + hscnt = (m_div + 6) * mscnt / refclk_div - 9; + + mmio_write_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_PLLGLOB, + hoff_ptr->per_pll_pllglob); + mmio_write_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_FDBCK, + hoff_ptr->per_pll_fdbck); + mmio_write_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_VCOCALIB, + ALT_CLKMGR_PERPLL_VCOCALIB_HSCNT_SET(hscnt) | + ALT_CLKMGR_PERPLL_VCOCALIB_MSCNT_SET(mscnt)); + mmio_write_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_PLLC0, + hoff_ptr->per_pll_pllc0); + mmio_write_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_PLLC1, + hoff_ptr->per_pll_pllc1); + + mmio_write_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_GPIODIV, + ALT_CLKMGR_PERPLL_GPIODIV_GPIODBCLK_SET( + hoff_ptr->per_pll_gpiodiv)); + mmio_write_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_EMACCTL, + hoff_ptr->per_pll_emacctl); + + + /* Take both PLL out of reset and power up */ + mmio_setbits_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_PLLGLOB, + ALT_CLKMGR_MAINPLL_PLLGLOB_PD_SET_MSK | + ALT_CLKMGR_MAINPLL_PLLGLOB_RST_SET_MSK); + mmio_setbits_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_PLLGLOB, + ALT_CLKMGR_PERPLL_PLLGLOB_PD_SET_MSK | + ALT_CLKMGR_PERPLL_PLLGLOB_RST_SET_MSK); + + wait_pll_lock(); + + /* Dividers for C2 to C9 only init after PLLs are lock. */ + mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_MPUCLK, 0xff); + mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_NOCCLK, 0xff); + mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_CNTR2CLK, 0xff); + mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_CNTR3CLK, 0xff); + mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_CNTR4CLK, 0xff); + mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_CNTR5CLK, 0xff); + mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_CNTR6CLK, 0xff); + mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_CNTR7CLK, 0xff); + mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_CNTR8CLK, 0xff); + mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_CNTR9CLK, 0xff); + mmio_write_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_CNTR2CLK, 0xff); + mmio_write_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_CNTR3CLK, 0xff); + mmio_write_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_CNTR4CLK, 0xff); + mmio_write_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_CNTR5CLK, 0xff); + mmio_write_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_CNTR6CLK, 0xff); + mmio_write_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_CNTR7CLK, 0xff); + mmio_write_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_CNTR8CLK, 0xff); + + mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_MPUCLK, + hoff_ptr->main_pll_mpuclk); + mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_NOCCLK, + hoff_ptr->main_pll_nocclk); + mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_CNTR2CLK, + hoff_ptr->main_pll_cntr2clk); + mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_CNTR3CLK, + hoff_ptr->main_pll_cntr3clk); + mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_CNTR4CLK, + hoff_ptr->main_pll_cntr4clk); + mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_CNTR5CLK, + hoff_ptr->main_pll_cntr5clk); + mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_CNTR6CLK, + hoff_ptr->main_pll_cntr6clk); + mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_CNTR7CLK, + hoff_ptr->main_pll_cntr7clk); + mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_CNTR8CLK, + hoff_ptr->main_pll_cntr8clk); + mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_CNTR9CLK, + hoff_ptr->main_pll_cntr9clk); + + /* Peripheral PLL Clock Source and Counters/Divider */ + mmio_write_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_CNTR2CLK, + hoff_ptr->per_pll_cntr2clk); + mmio_write_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_CNTR3CLK, + hoff_ptr->per_pll_cntr3clk); + mmio_write_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_CNTR4CLK, + hoff_ptr->per_pll_cntr4clk); + mmio_write_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_CNTR5CLK, + hoff_ptr->per_pll_cntr5clk); + mmio_write_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_CNTR6CLK, + hoff_ptr->per_pll_cntr6clk); + mmio_write_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_CNTR7CLK, + hoff_ptr->per_pll_cntr7clk); + mmio_write_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_CNTR8CLK, + hoff_ptr->per_pll_cntr8clk); + mmio_write_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_CNTR9CLK, + hoff_ptr->per_pll_cntr9clk); + + /* Take all PLLs out of bypass */ + mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_BYPASS, 0); + wait_fsm(); + mmio_write_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_BYPASS, 0); + wait_fsm(); + + /* Set safe mode/ out of boot mode */ + mmio_clrbits_32(ALT_CLKMGR + ALT_CLKMGR_CTRL, + ALT_CLKMGR_CTRL_BOOTMODE_SET_MSK); + wait_fsm(); + + /* 10 Enable mainpllgrp's software-managed clock */ + mmio_write_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_EN, + ALT_CLKMGR_MAINPLL_EN_RESET); + mmio_write_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_EN, + ALT_CLKMGR_PERPLL_EN_RESET); + + /* Clear loss lock interrupt status register that */ + /* might be set during configuration */ + mmio_write_32(ALT_CLKMGR + ALT_CLKMGR_INTRCLR, + ALT_CLKMGR_INTRCLR_MAINLOCKLOST_SET_MSK | + ALT_CLKMGR_INTRCLR_PERLOCKLOST_SET_MSK); + + /* Pass clock source frequency into scratch register */ + mmio_write_32(SOCFPGA_SYSMGR(BOOT_SCRATCH_COLD_1), + hoff_ptr->hps_osc_clk_h); + mmio_write_32(SOCFPGA_SYSMGR(BOOT_SCRATCH_COLD_2), + hoff_ptr->fpga_clk_hz); + +} + +/* Extract reference clock from platform clock source */ +uint32_t get_ref_clk(uint32_t pllglob) +{ + uint32_t data32, mdiv, refclkdiv, ref_clk; + uint32_t scr_reg; + + switch (ALT_CLKMGR_PSRC(pllglob)) { + case ALT_CLKMGR_PLLGLOB_PSRC_EOSC1: + scr_reg = SOCFPGA_SYSMGR(BOOT_SCRATCH_COLD_1); + ref_clk = mmio_read_32(scr_reg); + break; + case ALT_CLKMGR_PLLGLOB_PSRC_INTOSC: + ref_clk = ALT_CLKMGR_INTOSC_HZ; + break; + case ALT_CLKMGR_PLLGLOB_PSRC_F2S: + scr_reg = SOCFPGA_SYSMGR(BOOT_SCRATCH_COLD_2); + ref_clk = mmio_read_32(scr_reg); + break; + default: + ref_clk = 0; + assert(0); + break; + } + + refclkdiv = ALT_CLKMGR_MAINPLL_PLLGLOB_REFCLKDIV(pllglob); + data32 = mmio_read_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_FDBCK); + mdiv = ALT_CLKMGR_MAINPLL_FDBCK_MDIV(data32); + + ref_clk = (ref_clk / refclkdiv) * (6 + mdiv); + + return ref_clk; +} + +/* Calculate L3 interconnect main clock */ +uint32_t get_l3_clk(uint32_t ref_clk) +{ + uint32_t noc_base_clk, l3_clk, noc_clk, data32; + uint32_t pllc1_reg; + + noc_clk = mmio_read_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_NOCCLK); + + switch (ALT_CLKMGR_PSRC(noc_clk)) { + case ALT_CLKMGR_SRC_MAIN: + pllc1_reg = ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_PLLC1; + break; + case ALT_CLKMGR_SRC_PER: + pllc1_reg = ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_PLLC1; + break; + default: + pllc1_reg = 0; + assert(0); + break; + } + + data32 = mmio_read_32(pllc1_reg); + noc_base_clk = ref_clk / (data32 & 0xff); + l3_clk = noc_base_clk / (noc_clk + 1); + + return l3_clk; +} + +/* Calculate clock frequency to be used for watchdog timer */ +uint32_t get_wdt_clk(void) +{ + uint32_t data32, ref_clk, l3_clk, l4_sys_clk; + + data32 = mmio_read_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_PLLGLOB); + ref_clk = get_ref_clk(data32); + + l3_clk = get_l3_clk(ref_clk); + + l4_sys_clk = l3_clk / 4; + + return l4_sys_clk; +} + +/* Calculate clock frequency to be used for UART driver */ +uint32_t get_uart_clk(void) +{ + uint32_t data32, ref_clk, l3_clk, l4_sp_clk; + + data32 = mmio_read_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_PLLGLOB); + ref_clk = get_ref_clk(data32); + + l3_clk = get_l3_clk(ref_clk); + + data32 = mmio_read_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_NOCDIV); + data32 = (data32 >> 16) & 0x3; + data32 = 1 << data32; + + l4_sp_clk = (l3_clk / data32); + + return l4_sp_clk; +} + +/* Calculate clock frequency to be used for SDMMC driver */ +uint32_t get_mmc_clk(void) +{ + uint32_t data32, ref_clk, l3_clk, mmc_clk; + + data32 = mmio_read_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_PLLGLOB); + ref_clk = get_ref_clk(data32); + + l3_clk = get_l3_clk(ref_clk); + + data32 = mmio_read_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_CNTR6CLK); + mmc_clk = (l3_clk / (data32 + 1)) / 4; + + return mmc_clk; +} + +/* Get cpu freq clock */ +uint32_t get_cpu_clk(void) +{ + uint32_t data32, ref_clk, cpu_clk; + + data32 = mmio_read_32(ALT_CLKMGR_MAINPLL + ALT_CLKMGR_MAINPLL_PLLGLOB); + ref_clk = get_ref_clk(data32); + + cpu_clk = get_l3_clk(ref_clk)/PLAT_HZ_CONVERT_TO_MHZ; + + return cpu_clk; +} diff --git a/plat/intel/soc/stratix10/soc/s10_memory_controller.c b/plat/intel/soc/stratix10/soc/s10_memory_controller.c new file mode 100644 index 0000000..ac756ab --- /dev/null +++ b/plat/intel/soc/stratix10/soc/s10_memory_controller.c @@ -0,0 +1,412 @@ +/* + * Copyright (c) 2019, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <arch.h> +#include <arch_helpers.h> +#include <errno.h> +#include <lib/mmio.h> +#include <lib/utils.h> +#include <common/debug.h> +#include <drivers/delay_timer.h> +#include <platform_def.h> +#include <string.h> + +#include "s10_memory_controller.h" +#include "socfpga_reset_manager.h" + +#define ALT_CCU_NOC_DI_SET_MSK 0x10 + +#define DDR_READ_LATENCY_DELAY 40 +#define MAX_MEM_CAL_RETRY 3 +#define PRE_CALIBRATION_DELAY 1 +#define POST_CALIBRATION_DELAY 1 +#define TIMEOUT_EMIF_CALIBRATION 1000 +#define CLEAR_EMIF_DELAY 1000 +#define CLEAR_EMIF_TIMEOUT 1000 + +#define DDR_CONFIG(A, B, C, R) (((A) << 24) | ((B) << 16) | ((C) << 8) | (R)) +#define DDR_CONFIG_ELEMENTS (sizeof(ddr_config)/sizeof(uint32_t)) + +/* tWR = Min. 15ns constant, see JEDEC standard eg. DDR4 is JESD79-4.pdf */ +#define tWR_IN_NS 15 + +void configure_hmc_adaptor_regs(void); +void configure_ddr_sched_ctrl_regs(void); + +/* The followring are the supported configurations */ +uint32_t ddr_config[] = { + /* DDR_CONFIG(Address order,Bank,Column,Row) */ + /* List for DDR3 or LPDDR3 (pinout order > chip, row, bank, column) */ + DDR_CONFIG(0, 3, 10, 12), + DDR_CONFIG(0, 3, 9, 13), + DDR_CONFIG(0, 3, 10, 13), + DDR_CONFIG(0, 3, 9, 14), + DDR_CONFIG(0, 3, 10, 14), + DDR_CONFIG(0, 3, 10, 15), + DDR_CONFIG(0, 3, 11, 14), + DDR_CONFIG(0, 3, 11, 15), + DDR_CONFIG(0, 3, 10, 16), + DDR_CONFIG(0, 3, 11, 16), + DDR_CONFIG(0, 3, 12, 15), /* 0xa */ + /* List for DDR4 only (pinout order > chip, bank, row, column) */ + DDR_CONFIG(1, 3, 10, 14), + DDR_CONFIG(1, 4, 10, 14), + DDR_CONFIG(1, 3, 10, 15), + DDR_CONFIG(1, 4, 10, 15), + DDR_CONFIG(1, 3, 10, 16), + DDR_CONFIG(1, 4, 10, 16), + DDR_CONFIG(1, 3, 10, 17), + DDR_CONFIG(1, 4, 10, 17), +}; + +static int match_ddr_conf(uint32_t ddr_conf) +{ + int i; + + for (i = 0; i < DDR_CONFIG_ELEMENTS; i++) { + if (ddr_conf == ddr_config[i]) + return i; + } + return 0; +} + +static int check_hmc_clk(void) +{ + unsigned long timeout = 0; + uint32_t hmc_clk; + + do { + hmc_clk = mmio_read_32(S10_SYSMGR_CORE_HMC_CLK); + if (hmc_clk & S10_SYSMGR_CORE_HMC_CLK_STATUS) + break; + udelay(1); + } while (++timeout < 1000); + if (timeout >= 1000) + return -ETIMEDOUT; + + return 0; +} + +static int clear_emif(void) +{ + uint32_t data; + unsigned long timeout; + + mmio_write_32(S10_MPFE_HMC_ADP_RSTHANDSHAKECTRL, 0); + + timeout = 0; + do { + data = mmio_read_32(S10_MPFE_HMC_ADP_RSTHANDSHAKESTAT); + if ((data & S10_MPFE_HMC_ADP_RSTHANDSHAKESTAT_SEQ2CORE) == 0) + break; + udelay(CLEAR_EMIF_DELAY); + } while (++timeout < CLEAR_EMIF_TIMEOUT); + if (timeout >= CLEAR_EMIF_TIMEOUT) + return -ETIMEDOUT; + + return 0; +} + +static int mem_calibration(void) +{ + int status = 0; + uint32_t data; + unsigned long timeout; + unsigned long retry = 0; + + udelay(PRE_CALIBRATION_DELAY); + + do { + if (retry != 0) + INFO("DDR: Retrying DRAM calibration\n"); + + timeout = 0; + do { + data = mmio_read_32(S10_MPFE_HMC_ADP_DDRCALSTAT); + if (S10_MPFE_HMC_ADP_DDRCALSTAT_CAL(data) == 1) + break; + udelay(500); + } while (++timeout < TIMEOUT_EMIF_CALIBRATION); + + if (S10_MPFE_HMC_ADP_DDRCALSTAT_CAL(data) == 0) { + status = clear_emif(); + if (status) + ERROR("Failed to clear Emif\n"); + } else { + break; + } + } while (++retry < MAX_MEM_CAL_RETRY); + + if (S10_MPFE_HMC_ADP_DDRCALSTAT_CAL(data) == 0) { + ERROR("DDR: DRAM calibration failed.\n"); + status = -EIO; + } else { + INFO("DDR: DRAM calibration success.\n"); + status = 0; + } + + udelay(POST_CALIBRATION_DELAY); + + return status; +} + +int init_hard_memory_controller(void) +{ + int status; + + mmio_clrbits_32(S10_CCU_CPU0_MPRT_DDR, S10_CCU_NOC_DI_SET_MSK); + mmio_clrbits_32(S10_CCU_CPU0_MPRT_MEM0, S10_CCU_NOC_DI_SET_MSK); + mmio_clrbits_32(S10_CCU_CPU0_MPRT_MEM1A, S10_CCU_NOC_DI_SET_MSK); + mmio_clrbits_32(S10_CCU_CPU0_MPRT_MEM1B, S10_CCU_NOC_DI_SET_MSK); + mmio_clrbits_32(S10_CCU_CPU0_MPRT_MEM1C, S10_CCU_NOC_DI_SET_MSK); + mmio_clrbits_32(S10_CCU_CPU0_MPRT_MEM1D, S10_CCU_NOC_DI_SET_MSK); + mmio_clrbits_32(S10_CCU_CPU0_MPRT_MEM1E, S10_CCU_NOC_DI_SET_MSK); + + mmio_clrbits_32(S10_CCU_IOM_MPRT_MEM0, S10_CCU_NOC_DI_SET_MSK); + mmio_clrbits_32(S10_CCU_IOM_MPRT_MEM1A, S10_CCU_NOC_DI_SET_MSK); + mmio_clrbits_32(S10_CCU_IOM_MPRT_MEM1B, S10_CCU_NOC_DI_SET_MSK); + mmio_clrbits_32(S10_CCU_IOM_MPRT_MEM1C, S10_CCU_NOC_DI_SET_MSK); + mmio_clrbits_32(S10_CCU_IOM_MPRT_MEM1D, S10_CCU_NOC_DI_SET_MSK); + mmio_clrbits_32(S10_CCU_IOM_MPRT_MEM1E, S10_CCU_NOC_DI_SET_MSK); + + mmio_write_32(S10_NOC_FW_DDR_SCR_MPUREGION0ADDR_LIMIT, 0xFFFF0000); + mmio_write_32(S10_NOC_FW_DDR_SCR_MPUREGION0ADDR_LIMITEXT, 0x1F); + + mmio_write_32(S10_NOC_FW_DDR_SCR_NONMPUREGION0ADDR_LIMIT, 0xFFFF0000); + mmio_write_32(S10_NOC_FW_DDR_SCR_NONMPUREGION0ADDR_LIMITEXT, 0x1F); + mmio_write_32(S10_SOC_NOC_FW_DDR_SCR_ENABLE, BIT(0) | BIT(8)); + + status = check_hmc_clk(); + if (status) { + ERROR("DDR: Error, HMC clock not running\n"); + return status; + } + + mmio_clrbits_32(SOCFPGA_RSTMGR(BRGMODRST), RSTMGR_FIELD(BRG, DDRSCH)); + + status = mem_calibration(); + if (status) { + ERROR("DDR: Memory Calibration Failed\n"); + return status; + } + + configure_hmc_adaptor_regs(); + configure_ddr_sched_ctrl_regs(); + + return 0; +} + +void configure_ddr_sched_ctrl_regs(void) +{ + uint32_t data, dram_addr_order, ddr_conf, bank, row, col, + rd_to_miss, wr_to_miss, burst_len, burst_len_ddr_clk, + burst_len_sched_clk, act_to_act, rd_to_wr, wr_to_rd, bw_ratio, + t_rtp, t_rp, t_rcd, rd_latency, tw_rin_clk_cycles, + bw_ratio_extended, auto_precharge = 0, act_to_act_bank, faw, + faw_bank, bus_rd_to_rd, bus_rd_to_wr, bus_wr_to_rd; + + INFO("Init HPS NOC's DDR Scheduler.\n"); + + data = mmio_read_32(S10_MPFE_IOHMC_CTRLCFG1); + dram_addr_order = S10_MPFE_IOHMC_CTRLCFG1_CFG_ADDR_ORDER(data); + + data = mmio_read_32(S10_MPFE_IOHMC_DRAMADDRW); + + col = IOHMC_DRAMADDRW_COL_ADDR_WIDTH(data); + row = IOHMC_DRAMADDRW_ROW_ADDR_WIDTH(data); + bank = IOHMC_DRAMADDRW_BANK_ADDR_WIDTH(data) + + IOHMC_DRAMADDRW_BANK_GRP_ADDR_WIDTH(data); + + ddr_conf = match_ddr_conf(DDR_CONFIG(dram_addr_order, bank, col, row)); + + if (ddr_conf) { + mmio_clrsetbits_32( + S10_MPFE_DDR_MAIN_SCHED_DDRCONF, + S10_MPFE_DDR_MAIN_SCHED_DDRCONF_SET_MSK, + S10_MPFE_DDR_MAIN_SCHED_DDRCONF_SET(ddr_conf)); + } else { + ERROR("DDR: Cannot find predefined ddrConf configuration.\n"); + } + + mmio_write_32(S10_MPFE_HMC_ADP(ADP_DRAMADDRWIDTH), data); + + data = mmio_read_32(S10_MPFE_IOHMC_DRAMTIMING0); + rd_latency = S10_MPFE_IOHMC_REG_DRAMTIMING0_CFG_TCL(data); + + data = mmio_read_32(S10_MPFE_IOHMC_CALTIMING0); + act_to_act = ACT_TO_ACT(data); + t_rcd = ACT_TO_RDWR(data); + act_to_act_bank = ACT_TO_ACT_DIFF_BANK(data); + + data = mmio_read_32(S10_MPFE_IOHMC_CALTIMING1); + rd_to_wr = RD_TO_WR(data); + bus_rd_to_rd = RD_TO_RD_DIFF_CHIP(data); + bus_rd_to_wr = RD_TO_WR_DIFF_CHIP(data); + + data = mmio_read_32(S10_MPFE_IOHMC_CALTIMING2); + t_rtp = RD_TO_PCH(data); + + data = mmio_read_32(S10_MPFE_IOHMC_CALTIMING3); + wr_to_rd = CALTIMING3_WR_TO_RD(data); + bus_wr_to_rd = CALTIMING3_WR_TO_RD_DIFF_CHIP(data); + + data = mmio_read_32(S10_MPFE_IOHMC_CALTIMING4); + t_rp = PCH_TO_VALID(data); + + data = mmio_read_32(S10_MPFE_HMC_ADP(HMC_ADP_DDRIOCTRL)); + bw_ratio = ((HMC_ADP_DDRIOCTRL_IO_SIZE(data) == 0) ? 0 : 1); + + data = mmio_read_32(S10_MPFE_IOHMC_CTRLCFG0); + burst_len = HMC_ADP_DDRIOCTRL_CTRL_BURST_LENGTH(data); + burst_len_ddr_clk = burst_len / 2; + burst_len_sched_clk = ((burst_len/2) / 2); + + data = mmio_read_32(S10_MPFE_IOHMC_CTRLCFG0); + switch (S10_MPFE_IOHMC_REG_CTRLCFG0_CFG_MEM_TYPE(data)) { + case 1: + /* DDR4 - 1333MHz */ + /* 20 (19.995) clock cycles = 15ns */ + /* Calculate with rounding */ + tw_rin_clk_cycles = (((tWR_IN_NS * 1333) % 1000) >= 500) ? + ((tWR_IN_NS * 1333) / 1000) + 1 : + ((tWR_IN_NS * 1333) / 1000); + break; + default: + /* Others - 1066MHz or slower */ + /* 16 (15.990) clock cycles = 15ns */ + /* Calculate with rounding */ + tw_rin_clk_cycles = (((tWR_IN_NS * 1066) % 1000) >= 500) ? + ((tWR_IN_NS * 1066) / 1000) + 1 : + ((tWR_IN_NS * 1066) / 1000); + break; + } + + rd_to_miss = t_rtp + t_rp + t_rcd - burst_len_sched_clk; + wr_to_miss = ((rd_latency + burst_len_ddr_clk + 2 + tw_rin_clk_cycles) + / 2) - rd_to_wr + t_rp + t_rcd; + + mmio_write_32(S10_MPFE_DDR_MAIN_SCHED_DDRTIMING, + bw_ratio << DDRTIMING_BWRATIO_OFST | + wr_to_rd << DDRTIMING_WRTORD_OFST| + rd_to_wr << DDRTIMING_RDTOWR_OFST | + burst_len_sched_clk << DDRTIMING_BURSTLEN_OFST | + wr_to_miss << DDRTIMING_WRTOMISS_OFST | + rd_to_miss << DDRTIMING_RDTOMISS_OFST | + act_to_act << DDRTIMING_ACTTOACT_OFST); + + data = mmio_read_32(S10_MPFE_HMC_ADP(HMC_ADP_DDRIOCTRL)); + bw_ratio_extended = ((ADP_DDRIOCTRL_IO_SIZE(data) == 0) ? 1 : 0); + + mmio_write_32(S10_MPFE_DDR_MAIN_SCHED_DDRMODE, + bw_ratio_extended << DDRMODE_BWRATIOEXTENDED_OFST | + auto_precharge << DDRMODE_AUTOPRECHARGE_OFST); + + mmio_write_32(S10_MPFE_DDR_MAIN_SCHED_READLATENCY, + (rd_latency / 2) + DDR_READ_LATENCY_DELAY); + + data = mmio_read_32(S10_MPFE_IOHMC_CALTIMING9); + faw = S10_MPFE_IOHMC_CALTIMING9_ACT_TO_ACT(data); + + faw_bank = 1; // always 1 because we always have 4 bank DDR. + + mmio_write_32(S10_MPFE_DDR_MAIN_SCHED_ACTIVATE, + faw_bank << S10_MPFE_DDR_MAIN_SCHED_ACTIVATE_FAWBANK_OFST | + faw << S10_MPFE_DDR_MAIN_SCHED_ACTIVATE_FAW_OFST | + act_to_act_bank << S10_MPFE_DDR_MAIN_SCHED_ACTIVATE_RRD_OFST); + + mmio_write_32(S10_MPFE_DDR_MAIN_SCHED_DEVTODEV, + ((bus_rd_to_rd + << S10_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTORD_OFST) + & S10_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTORD_MSK) | + ((bus_rd_to_wr + << S10_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTOWR_OFST) + & S10_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTOWR_MSK) | + ((bus_wr_to_rd + << S10_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSWRTORD_OFST) + & S10_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSWRTORD_MSK)); + +} + +unsigned long get_physical_dram_size(void) +{ + uint32_t data; + unsigned long ram_addr_width, ram_ext_if_io_width; + + data = mmio_read_32(S10_MPFE_HMC_ADP_DDRIOCTRL); + switch (S10_MPFE_HMC_ADP_DDRIOCTRL_IO_SIZE(data)) { + case 0: + ram_ext_if_io_width = 16; + break; + case 1: + ram_ext_if_io_width = 32; + break; + case 2: + ram_ext_if_io_width = 64; + break; + default: + ram_ext_if_io_width = 0; + break; + } + + data = mmio_read_32(S10_MPFE_IOHMC_REG_DRAMADDRW); + ram_addr_width = IOHMC_DRAMADDRW_CFG_COL_ADDR_WIDTH(data) + + IOHMC_DRAMADDRW_CFG_ROW_ADDR_WIDTH(data) + + IOHMC_DRAMADDRW_CFG_BANK_ADDR_WIDTH(data) + + IOHMC_DRAMADDRW_CFG_BANK_GROUP_ADDR_WIDTH(data) + + IOHMC_DRAMADDRW_CFG_CS_ADDR_WIDTH(data); + + return (1 << ram_addr_width) * (ram_ext_if_io_width / 8); +} + + + +void configure_hmc_adaptor_regs(void) +{ + uint32_t data; + uint32_t dram_io_width; + + dram_io_width = S10_MPFE_IOHMC_NIOSRESERVE0_NIOS_RESERVE0( + mmio_read_32(S10_MPFE_IOHMC_REG_NIOSRESERVE0_OFST)); + + dram_io_width = (dram_io_width & 0xFF) >> 5; + + mmio_clrsetbits_32(S10_MPFE_HMC_ADP_DDRIOCTRL, + S10_MPFE_HMC_ADP_DDRIOCTRL_IO_SIZE_MSK, + dram_io_width << S10_MPFE_HMC_ADP_DDRIOCTRL_IO_SIZE_OFST); + + mmio_write_32(S10_MPFE_HMC_ADP_HPSINTFCSEL, + S10_MPFE_HMC_ADP_HPSINTFCSEL_ENABLE); + + data = mmio_read_32(S10_MPFE_IOHMC_REG_CTRLCFG1); + if (data & (1 << S10_IOHMC_CTRLCFG1_ENABLE_ECC_OFST)) { + mmio_clrsetbits_32(S10_MPFE_HMC_ADP_ECCCTRL1, + S10_MPFE_HMC_ADP_ECCCTRL1_AUTOWB_CNT_RST_SET_MSK | + S10_MPFE_HMC_ADP_ECCCTRL1_CNT_RST_SET_MSK | + S10_MPFE_HMC_ADP_ECCCTRL1_ECC_EN_SET_MSK, + S10_MPFE_HMC_ADP_ECCCTRL1_AUTOWB_CNT_RST_SET_MSK | + S10_MPFE_HMC_ADP_ECCCTRL1_CNT_RST_SET_MSK); + + mmio_clrsetbits_32(S10_MPFE_HMC_ADP_ECCCTRL2, + S10_MPFE_HMC_ADP_ECCCTRL2_OVRW_RB_ECC_EN_SET_MSK | + S10_MPFE_HMC_ADP_ECCCTRL2_RMW_EN_SET_MSK | + S10_MPFE_HMC_ADP_ECCCTRL2_AUTOWB_EN_SET_MSK, + S10_MPFE_HMC_ADP_ECCCTRL2_RMW_EN_SET_MSK | + S10_MPFE_HMC_ADP_ECCCTRL2_AUTOWB_EN_SET_MSK); + + mmio_clrsetbits_32(S10_MPFE_HMC_ADP_ECCCTRL1, + S10_MPFE_HMC_ADP_ECCCTRL1_AUTOWB_CNT_RST_SET_MSK | + S10_MPFE_HMC_ADP_ECCCTRL1_CNT_RST_SET_MSK | + S10_MPFE_HMC_ADP_ECCCTRL1_ECC_EN_SET_MSK, + S10_MPFE_HMC_ADP_ECCCTRL1_ECC_EN_SET_MSK); + INFO("Scrubbing ECC\n"); + + /* ECC Scrubbing */ + zeromem(DRAM_BASE, DRAM_SIZE); + } else { + INFO("ECC is disabled.\n"); + } +} + diff --git a/plat/intel/soc/stratix10/soc/s10_mmc.c b/plat/intel/soc/stratix10/soc/s10_mmc.c new file mode 100644 index 0000000..333bdd6 --- /dev/null +++ b/plat/intel/soc/stratix10/soc/s10_mmc.c @@ -0,0 +1,19 @@ +/* + * Copyright (c) 2022, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#include <lib/mmio.h> + +#include "s10_clock_manager.h" +#include "socfpga_system_manager.h" + +void s10_mmc_init(void) +{ + mmio_clrbits_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_EN, + ALT_CLKMGR_PERPLL_EN_SDMMCCLK); + mmio_write_32(SOCFPGA_SYSMGR(SDMMC), + SYSMGR_SDMMC_SMPLSEL(2) | SYSMGR_SDMMC_DRVSEL(3)); + mmio_setbits_32(ALT_CLKMGR_PERPLL + ALT_CLKMGR_PERPLL_EN, + ALT_CLKMGR_PERPLL_EN_SDMMCCLK); +} diff --git a/plat/intel/soc/stratix10/soc/s10_pinmux.c b/plat/intel/soc/stratix10/soc/s10_pinmux.c new file mode 100644 index 0000000..7fb4711 --- /dev/null +++ b/plat/intel/soc/stratix10/soc/s10_pinmux.c @@ -0,0 +1,217 @@ +/* + * Copyright (c) 2019, Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <lib/mmio.h> + +#include "s10_pinmux.h" + +const uint32_t sysmgr_pinmux_array_sel[] = { + 0x00000000, 0x00000001, /* usb */ + 0x00000004, 0x00000001, + 0x00000008, 0x00000001, + 0x0000000c, 0x00000001, + 0x00000010, 0x00000001, + 0x00000014, 0x00000001, + 0x00000018, 0x00000001, + 0x0000001c, 0x00000001, + 0x00000020, 0x00000001, + 0x00000024, 0x00000001, + 0x00000028, 0x00000001, + 0x0000002c, 0x00000001, + 0x00000030, 0x00000000, /* emac0 */ + 0x00000034, 0x00000000, + 0x00000038, 0x00000000, + 0x0000003c, 0x00000000, + 0x00000040, 0x00000000, + 0x00000044, 0x00000000, + 0x00000048, 0x00000000, + 0x0000004c, 0x00000000, + 0x00000050, 0x00000000, + 0x00000054, 0x00000000, + 0x00000058, 0x00000000, + 0x0000005c, 0x00000000, + 0x00000060, 0x00000008, /* gpio1 */ + 0x00000064, 0x00000008, + 0x00000068, 0x00000005, /* uart0 tx */ + 0x0000006c, 0x00000005, /* uart 0 rx */ + 0x00000070, 0x00000008, /* gpio */ + 0x00000074, 0x00000008, + 0x00000078, 0x00000004, /* i2c1 */ + 0x0000007c, 0x00000004, + 0x00000080, 0x00000007, /* jtag */ + 0x00000084, 0x00000007, + 0x00000088, 0x00000007, + 0x0000008c, 0x00000007, + 0x00000090, 0x00000001, /* sdmmc data0 */ + 0x00000094, 0x00000001, + 0x00000098, 0x00000001, + 0x0000009c, 0x00000001, + 0x00000100, 0x00000001, + 0x00000104, 0x00000001, /* sdmmc.data3 */ + 0x00000108, 0x00000008, /* loan */ + 0x0000010c, 0x00000008, /* gpio */ + 0x00000110, 0x00000008, + 0x00000114, 0x00000008, /* gpio1.io21 */ + 0x00000118, 0x00000005, /* mdio0.mdio */ + 0x0000011c, 0x00000005 /* mdio0.mdc */ +}; + +const uint32_t sysmgr_pinmux_array_ctrl[] = { + 0x00000000, 0x00502c38, /* Q1_1 */ + 0x00000004, 0x00102c38, + 0x00000008, 0x00502c38, + 0x0000000c, 0x00502c38, + 0x00000010, 0x00502c38, + 0x00000014, 0x00502c38, + 0x00000018, 0x00502c38, + 0x0000001c, 0x00502c38, + 0x00000020, 0x00502c38, + 0x00000024, 0x00502c38, + 0x00000028, 0x00502c38, + 0x0000002c, 0x00502c38, + 0x00000030, 0x00102c38, /* Q2_1 */ + 0x00000034, 0x00102c38, + 0x00000038, 0x00502c38, + 0x0000003c, 0x00502c38, + 0x00000040, 0x00102c38, + 0x00000044, 0x00102c38, + 0x00000048, 0x00502c38, + 0x0000004c, 0x00502c38, + 0x00000050, 0x00102c38, + 0x00000054, 0x00102c38, + 0x00000058, 0x00502c38, + 0x0000005c, 0x00502c38, + 0x00000060, 0x00502c38, /* Q3_1 */ + 0x00000064, 0x00502c38, + 0x00000068, 0x00102c38, + 0x0000006c, 0x00502c38, + 0x000000d0, 0x00502c38, + 0x000000d4, 0x00502c38, + 0x000000d8, 0x00542c38, + 0x000000dc, 0x00542c38, + 0x000000e0, 0x00502c38, + 0x000000e4, 0x00502c38, + 0x000000e8, 0x00102c38, + 0x000000ec, 0x00502c38, + 0x000000f0, 0x00502c38, /* Q4_1 */ + 0x000000f4, 0x00502c38, + 0x000000f8, 0x00102c38, + 0x000000fc, 0x00502c38, + 0x00000100, 0x00502c38, + 0x00000104, 0x00502c38, + 0x00000108, 0x00102c38, + 0x0000010c, 0x00502c38, + 0x00000110, 0x00502c38, + 0x00000114, 0x00502c38, + 0x00000118, 0x00542c38, + 0x0000011c, 0x00102c38 +}; + +const uint32_t sysmgr_pinmux_array_fpga[] = { + 0x00000000, 0x00000000, + 0x00000004, 0x00000000, + 0x00000008, 0x00000000, + 0x0000000c, 0x00000000, + 0x00000010, 0x00000000, + 0x00000014, 0x00000000, + 0x00000018, 0x00000000, + 0x0000001c, 0x00000000, + 0x00000020, 0x00000000, + 0x00000028, 0x00000000, + 0x0000002c, 0x00000000, + 0x00000030, 0x00000000, + 0x00000034, 0x00000000, + 0x00000038, 0x00000000, + 0x0000003c, 0x00000000, + 0x00000040, 0x00000000, + 0x00000044, 0x00000000, + 0x00000048, 0x00000000, + 0x00000050, 0x00000000, + 0x00000054, 0x00000000, + 0x00000058, 0x0000002a +}; + +const uint32_t sysmgr_pinmux_array_iodelay[] = { + 0x00000000, 0x00000000, + 0x00000004, 0x00000000, + 0x00000008, 0x00000000, + 0x0000000c, 0x00000000, + 0x00000010, 0x00000000, + 0x00000014, 0x00000000, + 0x00000018, 0x00000000, + 0x0000001c, 0x00000000, + 0x00000020, 0x00000000, + 0x00000024, 0x00000000, + 0x00000028, 0x00000000, + 0x0000002c, 0x00000000, + 0x00000030, 0x00000000, + 0x00000034, 0x00000000, + 0x00000038, 0x00000000, + 0x0000003c, 0x00000000, + 0x00000040, 0x00000000, + 0x00000044, 0x00000000, + 0x00000048, 0x00000000, + 0x0000004c, 0x00000000, + 0x00000050, 0x00000000, + 0x00000054, 0x00000000, + 0x00000058, 0x00000000, + 0x0000005c, 0x00000000, + 0x00000060, 0x00000000, + 0x00000064, 0x00000000, + 0x00000068, 0x00000000, + 0x0000006c, 0x00000000, + 0x00000070, 0x00000000, + 0x00000074, 0x00000000, + 0x00000078, 0x00000000, + 0x0000007c, 0x00000000, + 0x00000080, 0x00000000, + 0x00000084, 0x00000000, + 0x00000088, 0x00000000, + 0x0000008c, 0x00000000, + 0x00000090, 0x00000000, + 0x00000094, 0x00000000, + 0x00000098, 0x00000000, + 0x0000009c, 0x00000000, + 0x00000100, 0x00000000, + 0x00000104, 0x00000000, + 0x00000108, 0x00000000, + 0x0000010c, 0x00000000, + 0x00000110, 0x00000000, + 0x00000114, 0x00000000, + 0x00000118, 0x00000000, + 0x0000011c, 0x00000000 +}; + +void config_pinmux(handoff *hoff_ptr) +{ + unsigned int i; + + for (i = 0; i < 96; i += 2) { + mmio_write_32(S10_PINMUX_PIN0SEL + + hoff_ptr->pinmux_sel_array[i], + hoff_ptr->pinmux_sel_array[i+1]); + } + + for (i = 0; i < 96; i += 2) { + mmio_write_32(S10_PINMUX_IO0CTRL + + hoff_ptr->pinmux_io_array[i], + hoff_ptr->pinmux_io_array[i+1]); + } + + for (i = 0; i < 42; i += 2) { + mmio_write_32(S10_PINMUX_PINMUX_EMAC0_USEFPGA + + hoff_ptr->pinmux_fpga_array[i], + hoff_ptr->pinmux_fpga_array[i+1]); + } + + for (i = 0; i < 96; i += 2) { + mmio_write_32(S10_PINMUX_IO0_DELAY + + hoff_ptr->pinmux_iodelay_array[i], + hoff_ptr->pinmux_iodelay_array[i+1]); + } + +} + |