diff options
Diffstat (limited to '')
66 files changed, 9312 insertions, 0 deletions
diff --git a/plat/imx/imx8m/ddr/clock.c b/plat/imx/imx8m/ddr/clock.c new file mode 100644 index 0000000..31f2f56 --- /dev/null +++ b/plat/imx/imx8m/ddr/clock.c @@ -0,0 +1,146 @@ +/* + * Copyright 2018-2023 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <stdbool.h> + +#include <lib/mmio.h> +#include <platform_def.h> + +#define IMX_CCM_IP_BASE (IMX_CCM_BASE + 0xa000) +#define DRAM_SEL_CFG (IMX_CCM_BASE + 0x9800) +#define CCM_IP_CLK_ROOT_GEN_TAGET(i) (IMX_CCM_IP_BASE + 0x80 * (i) + 0x00) +#define CCM_IP_CLK_ROOT_GEN_TAGET_SET(i) (IMX_CCM_IP_BASE + 0x80 * (i) + 0x04) +#define CCM_IP_CLK_ROOT_GEN_TAGET_CLR(i) (IMX_CCM_IP_BASE + 0x80 * (i) + 0x08) +#define PLL_FREQ_800M U(0x00ece580) +#define PLL_FREQ_400M U(0x00ec6984) +#define PLL_FREQ_167M U(0x00f5a406) + +void ddr_pll_bypass_100mts(void) +{ + /* change the clock source of dram_alt_clk_root to source 2 --100MHz */ + mmio_write_32(CCM_IP_CLK_ROOT_GEN_TAGET_CLR(0), (0x7 << 24) | (0x7 << 16)); + mmio_write_32(CCM_IP_CLK_ROOT_GEN_TAGET_SET(0), (0x2 << 24)); + + /* change the clock source of dram_apb_clk_root to source 2 --40MHz/2 */ + mmio_write_32(CCM_IP_CLK_ROOT_GEN_TAGET_CLR(1), (0x7 << 24) | (0x7 << 16)); + mmio_write_32(CCM_IP_CLK_ROOT_GEN_TAGET_SET(1), (0x2 << 24) | (0x1 << 16)); + + /* configure pll bypass mode */ + mmio_write_32(DRAM_SEL_CFG + 0x4, BIT(24)); +} + +void ddr_pll_bypass_400mts(void) +{ + /* change the clock source of dram_alt_clk_root to source 1 --400MHz */ + mmio_write_32(CCM_IP_CLK_ROOT_GEN_TAGET_CLR(0), (0x7 << 24) | (0x7 << 16)); + mmio_write_32(CCM_IP_CLK_ROOT_GEN_TAGET_SET(0), (0x1 << 24) | (0x1 << 16)); + + /* change the clock source of dram_apb_clk_root to source 3 --160MHz/2 */ + mmio_write_32(CCM_IP_CLK_ROOT_GEN_TAGET_CLR(1), (0x7 << 24) | (0x7 << 16)); + mmio_write_32(CCM_IP_CLK_ROOT_GEN_TAGET_SET(1), (0x3 << 24) | (0x1 << 16)); + + /* configure pll bypass mode */ + mmio_write_32(DRAM_SEL_CFG + 0x4, BIT(24)); +} + +void ddr_pll_unbypass(void) +{ + mmio_write_32(DRAM_SEL_CFG + 0x8, BIT(24)); + mmio_write_32(CCM_IP_CLK_ROOT_GEN_TAGET_CLR(1), (0x7 << 24) | (0x7 << 16)); + /* to source 4 --800MHz/5 */ + mmio_write_32(CCM_IP_CLK_ROOT_GEN_TAGET_SET(1), (0x4 << 24) | (0x4 << 16)); +} + +#if defined(PLAT_imx8mq) +void dram_pll_init(unsigned int drate) +{ + /* bypass the PLL */ + mmio_setbits_32(HW_DRAM_PLL_CFG0, 0x30); + + switch (drate) { + case 3200: + mmio_write_32(HW_DRAM_PLL_CFG2, PLL_FREQ_800M); + break; + case 1600: + mmio_write_32(HW_DRAM_PLL_CFG2, PLL_FREQ_400M); + break; + case 667: + mmio_write_32(HW_DRAM_PLL_CFG2, PLL_FREQ_167M); + break; + default: + break; + } + + /* unbypass the PLL */ + mmio_clrbits_32(HW_DRAM_PLL_CFG0, 0x30); + while (!(mmio_read_32(HW_DRAM_PLL_CFG0) & BIT(31))) { + ; + } +} +#else +void dram_pll_init(unsigned int drate) +{ + /* bypass the PLL */ + mmio_setbits_32(DRAM_PLL_CTRL, (1 << 16)); + mmio_clrbits_32(DRAM_PLL_CTRL, (1 << 9)); + + switch (drate) { + case 4000: + mmio_write_32(DRAM_PLL_CTRL + 0x4, (250 << 12) | (3 << 4) | 1); + break; + case 3733: + case 3732: + mmio_write_32(DRAM_PLL_CTRL + 0x4, (311 << 12) | (4 << 4) | 1); + break; + case 3200: + mmio_write_32(DRAM_PLL_CTRL + 0x4, (200 << 12) | (3 << 4) | 1); + break; + case 2400: + mmio_write_32(DRAM_PLL_CTRL + 0x4, (300 << 12) | (3 << 4) | 2); + break; + case 1600: + mmio_write_32(DRAM_PLL_CTRL + 0x4, (400 << 12) | (3 << 4) | 3); + break; + case 1066: + mmio_write_32(DRAM_PLL_CTRL + 0x4, (266 << 12) | (3 << 4) | 3); + break; + case 667: + mmio_write_32(DRAM_PLL_CTRL + 0x4, (334 << 12) | (3 << 4) | 4); + break; + default: + break; + } + + mmio_setbits_32(DRAM_PLL_CTRL, BIT(9)); + /* wait for PLL locked */ + while (!(mmio_read_32(DRAM_PLL_CTRL) & BIT(31))) { + ; + } + + /* unbypass the PLL */ + mmio_clrbits_32(DRAM_PLL_CTRL, BIT(16)); +} +#endif + +/* change the dram clock frequency */ +void dram_clock_switch(unsigned int target_drate, bool bypass_mode) +{ + if (bypass_mode) { + switch (target_drate) { + case 400: + ddr_pll_bypass_400mts(); + break; + case 100: + ddr_pll_bypass_100mts(); + break; + default: + ddr_pll_unbypass(); + break; + } + } else { + dram_pll_init(target_drate); + } +} diff --git a/plat/imx/imx8m/ddr/ddr4_dvfs.c b/plat/imx/imx8m/ddr/ddr4_dvfs.c new file mode 100644 index 0000000..94bfaba --- /dev/null +++ b/plat/imx/imx8m/ddr/ddr4_dvfs.c @@ -0,0 +1,263 @@ +/* + * Copyright 2018-2023 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <drivers/delay_timer.h> +#include <lib/mmio.h> + +#include <dram.h> + +void ddr4_mr_write(uint32_t mr, uint32_t data, uint32_t mr_type, + uint32_t rank, uint32_t dram_type) +{ + uint32_t val, mr_mirror, data_mirror; + + /* + * 1. Poll MRSTAT.mr_wr_busy until it is 0 to make sure + * that there is no outstanding MR transAction. + */ + + /* + * ERR050712: + * When performing a software driven MR access, the following sequence + * must be done automatically before performing other APB register accesses. + * 1. Set MRCTRL0.mr_wr=1 + * 2. Check for MRSTAT.mr_wr_busy=0. If not, go to step (2) + * 3. Check for MRSTAT.mr_wr_busy=0 again (for the second time). If not, go to step (2) + */ + mmio_setbits_32(DDRC_MRCTRL0(0), BIT(31)); + + do { + while (mmio_read_32(DDRC_MRSTAT(0)) & 0x1) { + ; + } + + } while (mmio_read_32(DDRC_MRSTAT(0)) & 0x1); + + /* + * 2. Write the MRCTRL0.mr_type, MRCTRL0.mr_addr, MRCTRL0.mr_rank + * and (for MRWs) MRCTRL1.mr_data to define the MR transaction. + */ + val = mmio_read_32(DDRC_DIMMCTL(0)); + if ((val & 0x2) && (rank == 0x2)) { + mr_mirror = (mr & 0x4) | ((mr & 0x1) << 1) | ((mr & 0x2) >> 1); /* BA0, BA1 swap */ + if (dram_type == DDRC_DDR4) { + data_mirror = (data & 0x1607) | ((data & 0x8) << 1) | ((data & 0x10) >> 1) | + ((data & 0x20) << 1) | ((data & 0x40) >> 1) | ((data & 0x80) << 1) | + ((data & 0x100) >> 1) | ((data & 0x800) << 2) | ((data & 0x2000) >> 2) ; + } else { + data_mirror = (data & 0xfe07) | ((data & 0x8) << 1) | ((data & 0x10) >> 1) | + ((data & 0x20) << 1) | ((data & 0x40) >> 1) | ((data & 0x80) << 1) | + ((data & 0x100) >> 1); + } + } else { + mr_mirror = mr; + data_mirror = data; + } + + mmio_write_32(DDRC_MRCTRL0(0), mr_type | (mr_mirror << 12) | (rank << 4)); + mmio_write_32(DDRC_MRCTRL1(0), data_mirror); + + /* + * 3. In a separate APB transaction, write the MRCTRL0.mr_wr to 1. + * This bit is self-clearing, and triggers the MR transaction. + * The uMCTL2 then asserts the MRSTAT.mr_wr_busy while it performs + * the MR transaction to SDRAM, and no further accesses can be + * initiated until it is deasserted. + */ + mmio_setbits_32(DDRC_MRCTRL0(0), BIT(31)); + + while (mmio_read_32(DDRC_MRSTAT(0))) { + ; + } +} + +void dram_cfg_all_mr(struct dram_info *info, uint32_t pstate) +{ + uint32_t num_rank = info->num_rank; + uint32_t dram_type = info->dram_type; + /* + * 15. Perform MRS commands as required to re-program + * timing registers in the SDRAM for the new frequency + * (in particular, CL, CWL and WR may need to be changed). + */ + + for (int i = 1; i <= num_rank; i++) { + for (int j = 0; j < 6; j++) { + ddr4_mr_write(j, info->mr_table[pstate][j], 0, i, dram_type); + } + ddr4_mr_write(6, info->mr_table[pstate][7], 0, i, dram_type); + } +} + +void sw_pstate(uint32_t pstate, uint32_t drate) +{ + uint32_t val; + + mmio_write_32(DDRC_SWCTL(0), 0x0); + + /* + * Update any registers which may be required to + * change for the new frequency. + */ + mmio_write_32(DDRC_MSTR2(0), pstate); + mmio_setbits_32(DDRC_MSTR(0), (0x1 << 29)); + + /* + * Toggle RFSHCTL3.refresh_update_level to allow the + * new refresh-related register values to propagate + * to the refresh logic. + */ + val = mmio_read_32(DDRC_RFSHCTL3(0)); + if (val & 0x2) { + mmio_write_32(DDRC_RFSHCTL3(0), val & 0xFFFFFFFD); + } else { + mmio_write_32(DDRC_RFSHCTL3(0), val | 0x2); + } + + /* + * 19. If required, trigger the initialization in the PHY. + * If using the gen2 multiPHY, PLL initialization should + * be triggered at this point. See the PHY databook for + * details about the frequency change procedure. + */ + mmio_write_32(DDRC_DFIMISC(0), 0x00000000 | (pstate << 8)); + mmio_write_32(DDRC_DFIMISC(0), 0x00000020 | (pstate << 8)); + + /* wait DFISTAT.dfi_init_complete to 0 */ + while (mmio_read_32(DDRC_DFISTAT(0)) & 0x1) { + ; + } + + /* change the clock to the target frequency */ + dram_clock_switch(drate, false); + + mmio_write_32(DDRC_DFIMISC(0), 0x00000000 | (pstate << 8)); + + /* wait DFISTAT.dfi_init_complete to 1 */ + while (!(mmio_read_32(DDRC_DFISTAT(0)) & 0x1)) { + ; + } + + /* + * When changing frequencies the controller may violate the JEDEC + * requirement that no more than 16 refreshes should be issued within + * 2*tREFI. These extra refreshes are not expected to cause a problem + * in the SDRAM. This issue can be avoided by waiting for at least 2*tREFI + * before exiting self-refresh in step 19. + */ + udelay(14); + + /* 14. Exit the self-refresh state by setting PWRCTL.selfref_sw = 0. */ + mmio_clrbits_32(DDRC_PWRCTL(0), (1 << 5)); + + while ((mmio_read_32(DDRC_STAT(0)) & 0x3f) == 0x23) { + ; + } +} + +void ddr4_swffc(struct dram_info *info, unsigned int pstate) +{ + uint32_t drate = info->timing_info->fsp_table[pstate]; + + /* + * 1. set SWCTL.sw_done to disable quasi-dynamic register + * programming outside reset. + */ + mmio_write_32(DDRC_SWCTL(0), 0x0); + + /* + * 2. Write 0 to PCTRL_n.port_en. This blocks AXI port(s) + * from taking any transaction (blocks traffic on AXI ports). + */ + mmio_write_32(DDRC_PCTRL_0(0), 0x0); + + /* + * 3. Poll PSTAT.rd_port_busy_n=0 and PSTAT.wr_port_busy_n=0. + * Wait until all AXI ports are idle (the uMCTL2 core has to + * be idle). + */ + while (mmio_read_32(DDRC_PSTAT(0)) & 0x10001) { + ; + } + + /* + * 4. Write 0 to SBRCTL.scrub_en. Disable SBR, required only if + * SBR instantiated. + * 5. Poll SBRSTAT.scrub_busy=0. + * 6. Set DERATEEN.derate_enable = 0, if DERATEEN.derate_eanble = 1 + * and the read latency (RL) value needs to change after the frequency + * change (LPDDR2/3/4 only). + * 7. Set DBG1.dis_hif=1 so that no new commands will be accepted by the uMCTL2. + */ + mmio_setbits_32(DDRC_DBG1(0), (0x1 << 1)); + + /* + * 8. Poll DBGCAM.dbg_wr_q_empty and DBGCAM.dbg_rd_q_empty to ensure + * that write and read data buffers are empty. + */ + while ((mmio_read_32(DDRC_DBGCAM(0)) & 0x06000000) != 0x06000000) { + ; + } + + /* + * 9. For DDR4, update MR6 with the new tDLLK value via the Mode + * Register Write signals + * 10. Set DFILPCFG0.dfi_lp_en_sr = 0, if DFILPCFG0.dfi_lp_en_sr = 1, + * and wait until DFISTAT.dfi_lp_ack + * 11. If DFI PHY Master interface is active in uMCTL2, then disable it + * 12. Wait until STAT.operating_mode[1:0]!=11 indicating that the + * controller is not in self-refresh mode. + */ + if ((mmio_read_32(DDRC_STAT(0)) & 0x3) == 0x3) { + VERBOSE("DRAM is in Self Refresh\n"); + } + + /* + * 13. Assert PWRCTL.selfref_sw for the DWC_ddr_umctl2 core to enter + * the self-refresh mode. + */ + mmio_setbits_32(DDRC_PWRCTL(0), (1 << 5)); + + /* + * 14. Wait until STAT.operating_mode[1:0]==11 indicating that the + * controller core is in self-refresh mode. + */ + while ((mmio_read_32(DDRC_STAT(0)) & 0x3f) != 0x23) { + ; + } + + sw_pstate(pstate, drate); + dram_cfg_all_mr(info, pstate); + + /* 23. Enable HIF commands by setting DBG1.dis_hif=0. */ + mmio_clrbits_32(DDRC_DBG1(0), (0x1 << 1)); + + /* + * 24. Reset DERATEEN.derate_enable = 1 if DERATEEN.derate_enable + * has been set to 0 in step 6. + * 25. If DFI PHY Master interface was active before step 11 then + * enable it back by programming DFIPHYMSTR.phymstr_en = 1'b1. + * 26. Write 1 to PCTRL_n.port_en. AXI port(s) are no longer blocked + * from taking transactions (Re-enable traffic on AXI ports) + */ + mmio_write_32(DDRC_PCTRL_0(0), 0x1); + + /* + * 27. Write 1 to SBRCTL.scrub_en. Enable SBR if desired, only + * required if SBR instantiated. + */ + + /* + * set SWCTL.sw_done to enable quasi-dynamic register programming + * outside reset. + */ + mmio_write_32(DDRC_SWCTL(0), 0x1); + + /* wait SWSTAT.sw_done_ack to 1 */ + while (!(mmio_read_32(DDRC_SWSTAT(0)) & 0x1)) { + ; + } +} diff --git a/plat/imx/imx8m/ddr/dram.c b/plat/imx/imx8m/ddr/dram.c new file mode 100644 index 0000000..b5f6973 --- /dev/null +++ b/plat/imx/imx8m/ddr/dram.c @@ -0,0 +1,388 @@ +/* + * Copyright 2019-2023 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <bl31/interrupt_mgmt.h> +#include <common/runtime_svc.h> +#include <lib/mmio.h> +#include <lib/spinlock.h> +#include <plat/common/platform.h> + +#include <dram.h> +#include <gpc.h> + +#define IMX_SIP_DDR_DVFS_GET_FREQ_COUNT 0x10 +#define IMX_SIP_DDR_DVFS_GET_FREQ_INFO 0x11 + +struct dram_info dram_info; + +/* lock used for DDR DVFS */ +spinlock_t dfs_lock; + +#if defined(PLAT_imx8mq) +/* ocram used to dram timing */ +static uint8_t dram_timing_saved[13 * 1024] __aligned(8); +#endif + +static volatile uint32_t wfe_done; +static volatile bool wait_ddrc_hwffc_done = true; +static unsigned int dev_fsp = 0x1; + +static uint32_t fsp_init_reg[3][4] = { + { DDRC_INIT3(0), DDRC_INIT4(0), DDRC_INIT6(0), DDRC_INIT7(0) }, + { DDRC_FREQ1_INIT3(0), DDRC_FREQ1_INIT4(0), DDRC_FREQ1_INIT6(0), DDRC_FREQ1_INIT7(0) }, + { DDRC_FREQ2_INIT3(0), DDRC_FREQ2_INIT4(0), DDRC_FREQ2_INIT6(0), DDRC_FREQ2_INIT7(0) }, +}; + +#if defined(PLAT_imx8mq) +static inline struct dram_cfg_param *get_cfg_ptr(void *ptr, + void *old_base, void *new_base) +{ + uintptr_t offset = (uintptr_t)ptr & ~((uintptr_t)old_base); + + return (struct dram_cfg_param *)(offset + new_base); +} + +/* copy the dram timing info from DRAM to OCRAM */ +void imx8mq_dram_timing_copy(struct dram_timing_info *from) +{ + struct dram_timing_info *info = (struct dram_timing_info *)dram_timing_saved; + + /* copy the whole 13KB content used for dram timing info */ + memcpy(dram_timing_saved, from, sizeof(dram_timing_saved)); + + /* correct the header after copied into ocram */ + info->ddrc_cfg = get_cfg_ptr(info->ddrc_cfg, from, dram_timing_saved); + info->ddrphy_cfg = get_cfg_ptr(info->ddrphy_cfg, from, dram_timing_saved); + info->ddrphy_trained_csr = get_cfg_ptr(info->ddrphy_trained_csr, from, dram_timing_saved); + info->ddrphy_pie = get_cfg_ptr(info->ddrphy_pie, from, dram_timing_saved); +} +#endif + +#if defined(PLAT_imx8mp) +static uint32_t lpddr4_mr_read(unsigned int mr_rank, unsigned int mr_addr) +{ + unsigned int tmp, drate_byte; + + tmp = mmio_read_32(DRC_PERF_MON_MRR0_DAT(0)); + mmio_write_32(DRC_PERF_MON_MRR0_DAT(0), tmp | 0x1); + do { + tmp = mmio_read_32(DDRC_MRSTAT(0)); + } while (tmp & 0x1); + + mmio_write_32(DDRC_MRCTRL0(0), (mr_rank << 4) | 0x1); + mmio_write_32(DDRC_MRCTRL1(0), (mr_addr << 8)); + mmio_write_32(DDRC_MRCTRL0(0), (mr_rank << 4) | BIT(31) | 0x1); + + /* Workaround for SNPS STAR 9001549457 */ + do { + tmp = mmio_read_32(DDRC_MRSTAT(0)); + } while (tmp & 0x1); + + do { + tmp = mmio_read_32(DRC_PERF_MON_MRR0_DAT(0)); + } while (!(tmp & 0x8)); + tmp = mmio_read_32(DRC_PERF_MON_MRR1_DAT(0)); + + drate_byte = (mmio_read_32(DDRC_DERATEEN(0)) >> 4) & 0xff; + tmp = (tmp >> (drate_byte * 8)) & 0xff; + mmio_write_32(DRC_PERF_MON_MRR0_DAT(0), 0x4); + + return tmp; +} +#endif + +static void get_mr_values(uint32_t (*mr_value)[8]) +{ + uint32_t init_val; + unsigned int i, fsp_index; + + for (fsp_index = 0U; fsp_index < 3U; fsp_index++) { + for (i = 0U; i < 4U; i++) { + init_val = mmio_read_32(fsp_init_reg[fsp_index][i]); + mr_value[fsp_index][2*i] = init_val >> 16; + mr_value[fsp_index][2*i + 1] = init_val & 0xFFFF; + } + +#if defined(PLAT_imx8mp) + if (dram_info.dram_type == DDRC_LPDDR4) { + mr_value[fsp_index][5] = lpddr4_mr_read(1, MR12); /* read MR12 from DRAM */ + mr_value[fsp_index][7] = lpddr4_mr_read(1, MR14); /* read MR14 from DRAM */ + } +#endif + } +} + +static void save_rank_setting(void) +{ + uint32_t i, offset; + uint32_t pstate_num = dram_info.num_fsp; + + /* only support maximum 3 setpoints */ + pstate_num = (pstate_num > MAX_FSP_NUM) ? MAX_FSP_NUM : pstate_num; + + for (i = 0U; i < pstate_num; i++) { + offset = i ? (i + 1) * 0x1000 : 0U; + dram_info.rank_setting[i][0] = mmio_read_32(DDRC_DRAMTMG2(0) + offset); + if (dram_info.dram_type != DDRC_LPDDR4) { + dram_info.rank_setting[i][1] = mmio_read_32(DDRC_DRAMTMG9(0) + offset); + } +#if !defined(PLAT_imx8mq) + dram_info.rank_setting[i][2] = mmio_read_32(DDRC_RANKCTL(0) + offset); +#endif + } +#if defined(PLAT_imx8mq) + dram_info.rank_setting[0][2] = mmio_read_32(DDRC_RANKCTL(0)); +#endif +} +/* Restore the ddrc configs */ +void dram_umctl2_init(struct dram_timing_info *timing) +{ + struct dram_cfg_param *ddrc_cfg = timing->ddrc_cfg; + unsigned int i; + + for (i = 0U; i < timing->ddrc_cfg_num; i++) { + mmio_write_32(ddrc_cfg->reg, ddrc_cfg->val); + ddrc_cfg++; + } + + /* set the default fsp to P0 */ + mmio_write_32(DDRC_MSTR2(0), 0x0); +} + +/* Restore the dram PHY config */ +void dram_phy_init(struct dram_timing_info *timing) +{ + struct dram_cfg_param *cfg = timing->ddrphy_cfg; + unsigned int i; + + /* Restore the PHY init config */ + cfg = timing->ddrphy_cfg; + for (i = 0U; i < timing->ddrphy_cfg_num; i++) { + dwc_ddrphy_apb_wr(cfg->reg, cfg->val); + cfg++; + } + + /* Restore the DDR PHY CSRs */ + cfg = timing->ddrphy_trained_csr; + for (i = 0U; i < timing->ddrphy_trained_csr_num; i++) { + dwc_ddrphy_apb_wr(cfg->reg, cfg->val); + cfg++; + } + + /* Load the PIE image */ + cfg = timing->ddrphy_pie; + for (i = 0U; i < timing->ddrphy_pie_num; i++) { + dwc_ddrphy_apb_wr(cfg->reg, cfg->val); + cfg++; + } +} + +/* EL3 SGI-8 IPI handler for DDR Dynamic frequency scaling */ +static uint64_t waiting_dvfs(uint32_t id, uint32_t flags, + void *handle, void *cookie) +{ + uint64_t mpidr = read_mpidr_el1(); + unsigned int cpu_id = MPIDR_AFFLVL0_VAL(mpidr); + uint32_t irq; + + irq = plat_ic_acknowledge_interrupt(); + if (irq < 1022U) { + plat_ic_end_of_interrupt(irq); + } + + /* set the WFE done status */ + spin_lock(&dfs_lock); + wfe_done |= (1 << cpu_id * 8); + dsb(); + spin_unlock(&dfs_lock); + + while (1) { + /* ddr frequency change done */ + if (!wait_ddrc_hwffc_done) + break; + + wfe(); + } + + return 0; +} + +void dram_info_init(unsigned long dram_timing_base) +{ + uint32_t ddrc_mstr, current_fsp; + unsigned int idx = 0; + uint32_t flags = 0; + uint32_t rc; + unsigned int i; + + /* Get the dram type & rank */ + ddrc_mstr = mmio_read_32(DDRC_MSTR(0)); + + dram_info.dram_type = ddrc_mstr & DDR_TYPE_MASK; + dram_info.num_rank = ((ddrc_mstr >> 24) & ACTIVE_RANK_MASK) == 0x3 ? + DDRC_ACTIVE_TWO_RANK : DDRC_ACTIVE_ONE_RANK; + + /* Get current fsp info */ + current_fsp = mmio_read_32(DDRC_DFIMISC(0)); + current_fsp = (current_fsp >> 8) & 0xf; + dram_info.boot_fsp = current_fsp; + dram_info.current_fsp = current_fsp; + +#if defined(PLAT_imx8mq) + imx8mq_dram_timing_copy((struct dram_timing_info *)dram_timing_base); + dram_timing_base = (unsigned long) dram_timing_saved; +#endif + get_mr_values(dram_info.mr_table); + + dram_info.timing_info = (struct dram_timing_info *)dram_timing_base; + + /* get the num of supported fsp */ + for (i = 0U; i < 4U; ++i) { + if (!dram_info.timing_info->fsp_table[i]) { + break; + } + idx = i; + } + + /* only support maximum 3 setpoints */ + dram_info.num_fsp = (i > MAX_FSP_NUM) ? MAX_FSP_NUM : i; + + /* no valid fsp table, return directly */ + if (i == 0U) { + return; + } + + /* save the DRAMTMG2/9 for rank to rank workaround */ + save_rank_setting(); + + /* check if has bypass mode support */ + if (dram_info.timing_info->fsp_table[idx] < 666) { + dram_info.bypass_mode = true; + } else { + dram_info.bypass_mode = false; + } + + /* Register the EL3 handler for DDR DVFS */ + set_interrupt_rm_flag(flags, NON_SECURE); + rc = register_interrupt_type_handler(INTR_TYPE_EL3, waiting_dvfs, flags); + if (rc != 0) { + panic(); + } + + if (dram_info.dram_type == DDRC_LPDDR4 && current_fsp != 0x0) { + /* flush the L1/L2 cache */ + dcsw_op_all(DCCSW); + lpddr4_swffc(&dram_info, dev_fsp, 0x0); + dev_fsp = (~dev_fsp) & 0x1; + } else if (current_fsp != 0x0) { + /* flush the L1/L2 cache */ + dcsw_op_all(DCCSW); + ddr4_swffc(&dram_info, 0x0); + } +} + +/* + * For each freq return the following info: + * + * r1: data rate + * r2: 1 + dram_core parent + * r3: 1 + dram_alt parent index + * r4: 1 + dram_apb parent index + * + * The parent indices can be used by an OS who manages source clocks to enabled + * them ahead of the switch. + * + * A parent value of "0" means "don't care". + * + * Current implementation of freq switch is hardcoded in + * plat/imx/common/imx8m/clock.c but in theory this can be enhanced to support + * a wide variety of rates. + */ +int dram_dvfs_get_freq_info(void *handle, u_register_t index) +{ + switch (index) { + case 0: + SMC_RET4(handle, dram_info.timing_info->fsp_table[0], + 1, 0, 5); + case 1: + if (!dram_info.bypass_mode) { + SMC_RET4(handle, dram_info.timing_info->fsp_table[1], + 1, 0, 0); + } + SMC_RET4(handle, dram_info.timing_info->fsp_table[1], + 2, 2, 4); + case 2: + if (!dram_info.bypass_mode) { + SMC_RET4(handle, dram_info.timing_info->fsp_table[2], + 1, 0, 0); + } + SMC_RET4(handle, dram_info.timing_info->fsp_table[2], + 2, 3, 3); + case 3: + SMC_RET4(handle, dram_info.timing_info->fsp_table[3], + 1, 0, 0); + default: + SMC_RET1(handle, -3); + } +} + +int dram_dvfs_handler(uint32_t smc_fid, void *handle, + u_register_t x1, u_register_t x2, u_register_t x3) +{ + uint64_t mpidr = read_mpidr_el1(); + unsigned int cpu_id = MPIDR_AFFLVL0_VAL(mpidr); + unsigned int fsp_index = x1; + uint32_t online_cores = x2; + + if (x1 == IMX_SIP_DDR_DVFS_GET_FREQ_COUNT) { + SMC_RET1(handle, dram_info.num_fsp); + } else if (x1 == IMX_SIP_DDR_DVFS_GET_FREQ_INFO) { + return dram_dvfs_get_freq_info(handle, x2); + } else if (x1 < 3U) { + wait_ddrc_hwffc_done = true; + dsb(); + + /* trigger the SGI IPI to info other cores */ + for (int i = 0; i < PLATFORM_CORE_COUNT; i++) { + if (cpu_id != i && (online_cores & (0x1 << (i * 8)))) { + plat_ic_raise_el3_sgi(0x8, i); + } + } +#if defined(PLAT_imx8mq) + for (unsigned int i = 0; i < PLATFORM_CORE_COUNT; i++) { + if (i != cpu_id && online_cores & (1 << (i * 8))) { + imx_gpc_core_wake(1 << i); + } + } +#endif + /* make sure all the core in WFE */ + online_cores &= ~(0x1 << (cpu_id * 8)); + while (1) { + if (online_cores == wfe_done) { + break; + } + } + + /* flush the L1/L2 cache */ + dcsw_op_all(DCCSW); + + if (dram_info.dram_type == DDRC_LPDDR4) { + lpddr4_swffc(&dram_info, dev_fsp, fsp_index); + dev_fsp = (~dev_fsp) & 0x1; + } else { + ddr4_swffc(&dram_info, fsp_index); + } + + dram_info.current_fsp = fsp_index; + wait_ddrc_hwffc_done = false; + wfe_done = 0; + dsb(); + sev(); + isb(); + } + + SMC_RET1(handle, 0); +} diff --git a/plat/imx/imx8m/ddr/dram_retention.c b/plat/imx/imx8m/ddr/dram_retention.c new file mode 100644 index 0000000..d98a37e --- /dev/null +++ b/plat/imx/imx8m/ddr/dram_retention.c @@ -0,0 +1,224 @@ +/* + * Copyright 2018-2023 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <stdbool.h> +#include <lib/mmio.h> + +#include <dram.h> +#include <gpc_reg.h> +#include <platform_def.h> + +#define SRC_DDR1_RCR (IMX_SRC_BASE + 0x1000) +#define SRC_DDR2_RCR (IMX_SRC_BASE + 0x1004) + +#define CCM_SRC_CTRL_OFFSET (IMX_CCM_BASE + 0x800) +#define CCM_CCGR_OFFSET (IMX_CCM_BASE + 0x4000) +#define CCM_TARGET_ROOT_OFFSET (IMX_CCM_BASE + 0x8000) +#define CCM_SRC_CTRL(n) (CCM_SRC_CTRL_OFFSET + 0x10 * (n)) +#define CCM_CCGR(n) (CCM_CCGR_OFFSET + 0x10 * (n)) +#define CCM_TARGET_ROOT(n) (CCM_TARGET_ROOT_OFFSET + 0x80 * (n)) + +#define DBGCAM_EMPTY 0x36000000 + +static void rank_setting_update(void) +{ + uint32_t i, offset; + uint32_t pstate_num = dram_info.num_fsp; + + /* only support maximum 3 setpoints */ + pstate_num = (pstate_num > MAX_FSP_NUM) ? MAX_FSP_NUM : pstate_num; + + for (i = 0U; i < pstate_num; i++) { + offset = i ? (i + 1) * 0x1000 : 0U; + mmio_write_32(DDRC_DRAMTMG2(0) + offset, dram_info.rank_setting[i][0]); + if (dram_info.dram_type != DDRC_LPDDR4) { + mmio_write_32(DDRC_DRAMTMG9(0) + offset, dram_info.rank_setting[i][1]); + } + +#if !defined(PLAT_imx8mq) + mmio_write_32(DDRC_RANKCTL(0) + offset, + dram_info.rank_setting[i][2]); +#endif + } +#if defined(PLAT_imx8mq) + mmio_write_32(DDRC_RANKCTL(0), dram_info.rank_setting[0][2]); +#endif +} + +void dram_enter_retention(void) +{ + /* Wait DBGCAM to be empty */ + while (mmio_read_32(DDRC_DBGCAM(0)) != DBGCAM_EMPTY) { + ; + } + + /* Block AXI ports from taking anymore transactions */ + mmio_write_32(DDRC_PCTRL_0(0), 0x0); + /* Wait until all AXI ports are idle */ + while (mmio_read_32(DDRC_PSTAT(0)) & 0x10001) { + ; + } + + /* Enter self refresh */ + mmio_write_32(DDRC_PWRCTL(0), 0xaa); + + /* LPDDR4 & DDR4/DDR3L need to check different status */ + if (dram_info.dram_type == DDRC_LPDDR4) { + while (0x223 != (mmio_read_32(DDRC_STAT(0)) & 0x33f)) { + ; + } + } else { + while (0x23 != (mmio_read_32(DDRC_STAT(0)) & 0x3f)) { + ; + } + } + + mmio_write_32(DDRC_DFIMISC(0), 0x0); + mmio_write_32(DDRC_SWCTL(0), 0x0); + mmio_write_32(DDRC_DFIMISC(0), 0x1f00); + mmio_write_32(DDRC_DFIMISC(0), 0x1f20); + + while (mmio_read_32(DDRC_DFISTAT(0)) & 0x1) { + ; + } + + mmio_write_32(DDRC_DFIMISC(0), 0x1f00); + /* wait DFISTAT.dfi_init_complete to 1 */ + while (!(mmio_read_32(DDRC_DFISTAT(0)) & 0x1)) { + ; + } + + mmio_write_32(DDRC_SWCTL(0), 0x1); + + /* should check PhyInLP3 pub reg */ + dwc_ddrphy_apb_wr(0xd0000, 0x0); + if (!(dwc_ddrphy_apb_rd(0x90028) & 0x1)) { + INFO("PhyInLP3 = 1\n"); + } + dwc_ddrphy_apb_wr(0xd0000, 0x1); + + /* pwrdnreqn_async adbm/adbs of ddr */ + mmio_clrbits_32(IMX_GPC_BASE + GPC_PU_PWRHSK, DDRMIX_ADB400_SYNC); + while (mmio_read_32(IMX_GPC_BASE + GPC_PU_PWRHSK) & DDRMIX_ADB400_ACK) + ; + mmio_setbits_32(IMX_GPC_BASE + GPC_PU_PWRHSK, DDRMIX_ADB400_SYNC); + + /* remove PowerOk */ + mmio_write_32(SRC_DDR1_RCR, 0x8F000008); + + mmio_write_32(CCM_CCGR(5), 0); + mmio_write_32(CCM_SRC_CTRL(15), 2); + + /* enable the phy iso */ + mmio_setbits_32(IMX_GPC_BASE + DDRMIX_PGC, 1); + mmio_setbits_32(IMX_GPC_BASE + PU_PGC_DN_TRG, DDRMIX_PWR_REQ); + + VERBOSE("dram enter retention\n"); +} + +void dram_exit_retention(void) +{ + VERBOSE("dram exit retention\n"); + /* assert all reset */ +#if defined(PLAT_imx8mq) + mmio_write_32(SRC_DDR2_RCR, 0x8F000003); + mmio_write_32(SRC_DDR1_RCR, 0x8F00000F); + mmio_write_32(SRC_DDR2_RCR, 0x8F000000); +#else + mmio_write_32(SRC_DDR1_RCR, 0x8F00001F); + mmio_write_32(SRC_DDR1_RCR, 0x8F00000F); +#endif + mmio_write_32(CCM_CCGR(5), 2); + mmio_write_32(CCM_SRC_CTRL(15), 2); + + /* change the clock source of dram_apb_clk_root */ + mmio_write_32(CCM_TARGET_ROOT(65) + 0x8, (0x7 << 24) | (0x7 << 16)); + mmio_write_32(CCM_TARGET_ROOT(65) + 0x4, (0x4 << 24) | (0x3 << 16)); + + /* disable iso */ + mmio_setbits_32(IMX_GPC_BASE + PU_PGC_UP_TRG, DDRMIX_PWR_REQ); + mmio_write_32(SRC_DDR1_RCR, 0x8F000006); + + /* wait dram pll locked */ + while (!(mmio_read_32(DRAM_PLL_CTRL) & BIT(31))) { + ; + } + + /* ddrc re-init */ + dram_umctl2_init(dram_info.timing_info); + + /* + * Skips the DRAM init routine and starts up in selfrefresh mode + * Program INIT0.skip_dram_init = 2'b11 + */ + mmio_setbits_32(DDRC_INIT0(0), 0xc0000000); + /* Keeps the controller in self-refresh mode */ + mmio_write_32(DDRC_PWRCTL(0), 0xaa); + mmio_write_32(DDRC_DBG1(0), 0x0); + mmio_write_32(SRC_DDR1_RCR, 0x8F000004); + mmio_write_32(SRC_DDR1_RCR, 0x8F000000); + + /* before write Dynamic reg, sw_done should be 0 */ + mmio_write_32(DDRC_SWCTL(0), 0x0); + +#if !PLAT_imx8mn + if (dram_info.dram_type == DDRC_LPDDR4) { + mmio_write_32(DDRC_DDR_SS_GPR0, 0x01); /*LPDDR4 mode */ + } +#endif /* !PLAT_imx8mn */ + + mmio_write_32(DDRC_DFIMISC(0), 0x0); + + /* dram phy re-init */ + dram_phy_init(dram_info.timing_info); + + /* workaround for rank-to-rank issue */ + rank_setting_update(); + + /* DWC_DDRPHYA_APBONLY0_MicroContMuxSel */ + dwc_ddrphy_apb_wr(0xd0000, 0x0); + while (dwc_ddrphy_apb_rd(0x20097)) { + ; + } + dwc_ddrphy_apb_wr(0xd0000, 0x1); + + /* before write Dynamic reg, sw_done should be 0 */ + mmio_write_32(DDRC_SWCTL(0), 0x0); + mmio_write_32(DDRC_DFIMISC(0), 0x20); + /* wait DFISTAT.dfi_init_complete to 1 */ + while (!(mmio_read_32(DDRC_DFISTAT(0)) & 0x1)) { + ; + } + + /* clear DFIMISC.dfi_init_start */ + mmio_write_32(DDRC_DFIMISC(0), 0x0); + /* set DFIMISC.dfi_init_complete_en */ + mmio_write_32(DDRC_DFIMISC(0), 0x1); + + /* set SWCTL.sw_done to enable quasi-dynamic register programming */ + mmio_write_32(DDRC_SWCTL(0), 0x1); + /* wait SWSTAT.sw_done_ack to 1 */ + while (!(mmio_read_32(DDRC_SWSTAT(0)) & 0x1)) { + ; + } + + mmio_write_32(DDRC_PWRCTL(0), 0x88); + /* wait STAT to normal state */ + while (0x1 != (mmio_read_32(DDRC_STAT(0)) & 0x7)) { + ; + } + + mmio_write_32(DDRC_PCTRL_0(0), 0x1); + /* dis_auto-refresh is set to 0 */ + mmio_write_32(DDRC_RFSHCTL3(0), 0x0); + + /* should check PhyInLP3 pub reg */ + dwc_ddrphy_apb_wr(0xd0000, 0x0); + if (!(dwc_ddrphy_apb_rd(0x90028) & 0x1)) { + VERBOSE("PHYInLP3 = 0\n"); + } + dwc_ddrphy_apb_wr(0xd0000, 0x1); +} diff --git a/plat/imx/imx8m/ddr/lpddr4_dvfs.c b/plat/imx/imx8m/ddr/lpddr4_dvfs.c new file mode 100644 index 0000000..2f5f7b5 --- /dev/null +++ b/plat/imx/imx8m/ddr/lpddr4_dvfs.c @@ -0,0 +1,295 @@ +/* + * Copyright 2018-2023 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <lib/mmio.h> + +#include <dram.h> + +static void lpddr4_mr_write(uint32_t mr_rank, uint32_t mr_addr, uint32_t mr_data) +{ + /* + * 1. Poll MRSTAT.mr_wr_busy until it is 0. This checks that there + * is no outstanding MR transaction. No + * writes should be performed to MRCTRL0 and MRCTRL1 if MRSTAT.mr_wr_busy = 1. + */ + while (mmio_read_32(DDRC_MRSTAT(0)) & 0x1) + ; + + /* + * 2. Write the MRCTRL0.mr_type, MRCTRL0.mr_addr, + * MRCTRL0.mr_rank and (for MRWs) + * MRCTRL1.mr_data to define the MR transaction. + */ + mmio_write_32(DDRC_MRCTRL0(0), (mr_rank << 4)); + mmio_write_32(DDRC_MRCTRL1(0), (mr_addr << 8) | mr_data); + mmio_setbits_32(DDRC_MRCTRL0(0), BIT(31)); +} + +void lpddr4_swffc(struct dram_info *info, unsigned int init_fsp, + unsigned int fsp_index) + +{ + uint32_t mr, emr, emr2, emr3; + uint32_t mr11, mr12, mr22, mr14; + uint32_t val; + uint32_t derate_backup[3]; + uint32_t (*mr_data)[8]; + uint32_t phy_master; + + /* 1. program targetd UMCTL2_REGS_FREQ1/2/3,already done, skip it. */ + + /* 2. MR13.FSP-WR=1, MRW to update MR registers */ + mr_data = info->mr_table; + mr = mr_data[fsp_index][0]; + emr = mr_data[fsp_index][1]; + emr2 = mr_data[fsp_index][2]; + emr3 = mr_data[fsp_index][3]; + mr11 = mr_data[fsp_index][4]; + mr12 = mr_data[fsp_index][5]; + mr22 = mr_data[fsp_index][6]; + mr14 = mr_data[fsp_index][7]; + + val = (init_fsp == 1) ? 0x2 << 6 : 0x1 << 6; + emr3 = (emr3 & 0x003f) | val | 0x0d00; + + /* 12. set PWRCTL.selfref_en=0 */ + mmio_clrbits_32(DDRC_PWRCTL(0), 0xf); + + phy_master = mmio_read_32(DDRC_DFIPHYMSTR(0)); + + /* It is more safe to config it here */ + mmio_clrbits_32(DDRC_DFIPHYMSTR(0), 0x1); + + lpddr4_mr_write(3, 13, emr3); + lpddr4_mr_write(3, 1, mr); + lpddr4_mr_write(3, 2, emr); + lpddr4_mr_write(3, 3, emr2); + lpddr4_mr_write(3, 11, mr11); + lpddr4_mr_write(3, 12, mr12); + lpddr4_mr_write(3, 14, mr14); + lpddr4_mr_write(3, 22, mr22); + + do { + val = mmio_read_32(DDRC_MRSTAT(0)); + } while (val & 0x1); + + /* 3. disable AXI ports */ + mmio_write_32(DDRC_PCTRL_0(0), 0x0); + + /* 4.Poll PSTAT.rd_port_busy_n=0 and PSTAT.wr_port_busy_n=0. */ + do { + val = mmio_read_32(DDRC_PSTAT(0)); + } while (val != 0); + + /* 6.disable SBRCTL.scrub_en, skip if never enable it */ + /* 7.poll SBRSTAT.scrub_busy Q2: should skip phy master if never enable it */ + /* Disable phy master */ +#ifdef DFILP_SPT + /* 8. disable DFI LP */ + /* DFILPCFG0.dfi_lp_en_sr */ + val = mmio_read_32(DDRC_DFILPCFG0(0)); + if (val & 0x100) { + mmio_write_32(DDRC_DFILPCFG0(0), 0x0); + do { + val = mmio_read_32(DDRC_DFISTAT(0)); // dfi_lp_ack + val2 = mmio_read_32(DDRC_STAT(0)); // operating_mode + } while (((val & 0x2) == 0x2) && ((val2 & 0x7) == 3)); + } +#endif + /* 9. wait until in normal or power down states */ + do { + /* operating_mode */ + val = mmio_read_32(DDRC_STAT(0)); + } while (((val & 0x7) != 1) && ((val & 0x7) != 2)); + + /* 10. Disable automatic derating: derate_enable */ + val = mmio_read_32(DDRC_DERATEEN(0)); + derate_backup[0] = val; + mmio_clrbits_32(DDRC_DERATEEN(0), 0x1); + + val = mmio_read_32(DDRC_FREQ1_DERATEEN(0)); + derate_backup[1] = val; + mmio_clrbits_32(DDRC_FREQ1_DERATEEN(0), 0x1); + + val = mmio_read_32(DDRC_FREQ2_DERATEEN(0)); + derate_backup[2] = val; + mmio_clrbits_32(DDRC_FREQ2_DERATEEN(0), 0x1); + + /* 11. disable automatic ZQ calibration */ + mmio_setbits_32(DDRC_ZQCTL0(0), BIT(31)); + mmio_setbits_32(DDRC_FREQ1_ZQCTL0(0), BIT(31)); + mmio_setbits_32(DDRC_FREQ2_ZQCTL0(0), BIT(31)); + + /* 12. set PWRCTL.selfref_en=0 */ + mmio_clrbits_32(DDRC_PWRCTL(0), 0x1); + + /* 13.Poll STAT.operating_mode is in "Normal" (001) or "Power-down" (010) */ + do { + val = mmio_read_32(DDRC_STAT(0)); + } while (((val & 0x7) != 1) && ((val & 0x7) != 2)); + + /* 14-15. trigger SW SR */ + /* bit 5: selfref_sw, bit 6: stay_in_selfref */ + mmio_setbits_32(DDRC_PWRCTL(0), 0x60); + + /* 16. Poll STAT.selfref_state in "Self Refresh 1" */ + do { + val = mmio_read_32(DDRC_STAT(0)); + } while ((val & 0x300) != 0x100); + + /* 17. disable dq */ + mmio_setbits_32(DDRC_DBG1(0), 0x1); + + /* 18. Poll DBGCAM.wr_data_pipeline_empty and DBGCAM.rd_data_pipeline_empty */ + do { + val = mmio_read_32(DDRC_DBGCAM(0)); + val &= 0x30000000; + } while (val != 0x30000000); + + /* 19. change MR13.FSP-OP to new FSP and MR13.VRCG to high current */ + emr3 = (((~init_fsp) & 0x1) << 7) | (0x1 << 3) | (emr3 & 0x0077) | 0x0d00; + lpddr4_mr_write(3, 13, emr3); + + /* 20. enter SR Power Down */ + mmio_clrsetbits_32(DDRC_PWRCTL(0), 0x60, 0x20); + + /* 21. Poll STAT.selfref_state is in "SR Power down" */ + do { + val = mmio_read_32(DDRC_STAT(0)); + } while ((val & 0x300) != 0x200); + + /* 22. set dfi_init_complete_en = 0 */ + + /* 23. switch clock */ + /* set SWCTL.dw_done to 0 */ + mmio_write_32(DDRC_SWCTL(0), 0x0000); + + /* 24. program frequency mode=1(bit 29), target_frequency=target_freq (bit 29) */ + mmio_write_32(DDRC_MSTR2(0), fsp_index); + + /* 25. DBICTL for FSP-OP[1], skip it if never enable it */ + + /* 26.trigger initialization in the PHY */ + + /* Q3: if refresh level is updated, then should program */ + /* as updating refresh, need to toggle refresh_update_level signal */ + val = mmio_read_32(DDRC_RFSHCTL3(0)); + val = val ^ 0x2; + mmio_write_32(DDRC_RFSHCTL3(0), val); + + /* Q4: only for legacy PHY, so here can skipped */ + + /* dfi_frequency -> 0x1x */ + val = mmio_read_32(DDRC_DFIMISC(0)); + val &= 0xFE; + val |= (fsp_index << 8); + mmio_write_32(DDRC_DFIMISC(0), val); + /* dfi_init_start */ + val |= 0x20; + mmio_write_32(DDRC_DFIMISC(0), val); + + /* polling dfi_init_complete de-assert */ + do { + val = mmio_read_32(DDRC_DFISTAT(0)); + } while ((val & 0x1) == 0x1); + + /* change the clock frequency */ + dram_clock_switch(info->timing_info->fsp_table[fsp_index], info->bypass_mode); + + /* dfi_init_start de-assert */ + mmio_clrbits_32(DDRC_DFIMISC(0), 0x20); + + /* polling dfi_init_complete re-assert */ + do { + val = mmio_read_32(DDRC_DFISTAT(0)); + } while ((val & 0x1) == 0x0); + + /* 27. set ZQCTL0.dis_srx_zqcl = 1 */ + if (fsp_index == 0) { + mmio_setbits_32(DDRC_ZQCTL0(0), BIT(30)); + } else if (fsp_index == 1) { + mmio_setbits_32(DDRC_FREQ1_ZQCTL0(0), BIT(30)); + } else { + mmio_setbits_32(DDRC_FREQ2_ZQCTL0(0), BIT(30)); + } + + /* 28,29. exit "self refresh power down" to stay "self refresh 2" */ + /* exit SR power down */ + mmio_clrsetbits_32(DDRC_PWRCTL(0), 0x60, 0x40); + /* 30. Poll STAT.selfref_state in "Self refresh 2" */ + do { + val = mmio_read_32(DDRC_STAT(0)); + } while ((val & 0x300) != 0x300); + + /* 31. change MR13.VRCG to normal */ + emr3 = (emr3 & 0x00f7) | 0x0d00; + lpddr4_mr_write(3, 13, emr3); + + /* restore the PHY master */ + mmio_write_32(DDRC_DFIPHYMSTR(0), phy_master); + + /* 32. issue ZQ if required: zq_calib_short, bit 4 */ + /* polling zq_calib_short_busy */ + mmio_setbits_32(DDRC_DBGCMD(0), 0x10); + + do { + val = mmio_read_32(DDRC_DBGSTAT(0)); + } while ((val & 0x10) != 0x0); + + /* 33. Reset ZQCTL0.dis_srx_zqcl=0 */ + if (fsp_index == 1) + mmio_clrbits_32(DDRC_FREQ1_ZQCTL0(0), BIT(30)); + else if (fsp_index == 2) + mmio_clrbits_32(DDRC_FREQ2_ZQCTL0(0), BIT(30)); + else + mmio_clrbits_32(DDRC_ZQCTL0(0), BIT(30)); + + /* set SWCTL.dw_done to 1 and poll SWSTAT.sw_done_ack=1 */ + mmio_write_32(DDRC_SWCTL(0), 0x1); + + /* wait SWSTAT.sw_done_ack to 1 */ + do { + val = mmio_read_32(DDRC_SWSTAT(0)); + } while ((val & 0x1) == 0x0); + + /* 34. set PWRCTL.stay_in_selfreh=0, exit SR */ + mmio_clrbits_32(DDRC_PWRCTL(0), 0x40); + /* wait tXSR */ + + /* 35. Poll STAT.selfref_state in "Idle" */ + do { + val = mmio_read_32(DDRC_STAT(0)); + } while ((val & 0x300) != 0x0); + +#ifdef DFILP_SPT + /* 36. restore dfi_lp.dfi_lp_en_sr */ + mmio_setbits_32(DDRC_DFILPCFG0(0), BIT(8)); +#endif + + /* 37. re-enable CAM: dis_dq */ + mmio_clrbits_32(DDRC_DBG1(0), 0x1); + + /* 38. re-enable automatic SR: selfref_en */ + mmio_setbits_32(DDRC_PWRCTL(0), 0x1); + + /* 39. re-enable automatic ZQ: dis_auto_zq=0 */ + /* disable automatic ZQ calibration */ + if (fsp_index == 1) + mmio_clrbits_32(DDRC_FREQ1_ZQCTL0(0), BIT(31)); + else if (fsp_index == 2) + mmio_clrbits_32(DDRC_FREQ2_ZQCTL0(0), BIT(31)); + else + mmio_clrbits_32(DDRC_ZQCTL0(0), BIT(31)); + /* 40. re-emable automatic derating: derate_enable */ + mmio_write_32(DDRC_DERATEEN(0), derate_backup[0]); + mmio_write_32(DDRC_FREQ1_DERATEEN(0), derate_backup[1]); + mmio_write_32(DDRC_FREQ2_DERATEEN(0), derate_backup[2]); + + /* 41. write 1 to PCTRL.port_en */ + mmio_write_32(DDRC_PCTRL_0(0), 0x1); + + /* 42. enable SBRCTL.scrub_en, skip if never enable it */ +} diff --git a/plat/imx/imx8m/gpc_common.c b/plat/imx/imx8m/gpc_common.c new file mode 100644 index 0000000..71e0af1 --- /dev/null +++ b/plat/imx/imx8m/gpc_common.c @@ -0,0 +1,306 @@ +/* + * Copyright (c) 2018-2023, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <stdbool.h> + +#include <arch.h> +#include <arch_helpers.h> +#include <common/debug.h> +#include <common/runtime_svc.h> +#include <lib/mmio.h> +#include <lib/psci/psci.h> + +#include <gpc.h> +#include <imx8m_psci.h> +#include <plat_imx8.h> + +#define MAX_PLL_NUM U(10) + +static uint32_t gpc_imr_offset[] = { IMR1_CORE0_A53, IMR1_CORE1_A53, IMR1_CORE2_A53, IMR1_CORE3_A53, }; + +DEFINE_BAKERY_LOCK(gpc_lock); + +#define FSL_SIP_CONFIG_GPC_PM_DOMAIN 0x03 + +#pragma weak imx_set_cpu_pwr_off +#pragma weak imx_set_cpu_pwr_on +#pragma weak imx_set_cpu_lpm +#pragma weak imx_set_cluster_powerdown +#pragma weak imx_set_sys_wakeup +#pragma weak imx_noc_slot_config +#pragma weak imx_gpc_handler +#pragma weak imx_anamix_override + +void imx_set_cpu_secure_entry(unsigned int core_id, uintptr_t sec_entrypoint) +{ + uint64_t temp_base; + + temp_base = (uint64_t) sec_entrypoint; + temp_base >>= 2; + + mmio_write_32(IMX_SRC_BASE + SRC_GPR1_OFFSET + (core_id << 3), + ((uint32_t)(temp_base >> 22) & 0xffff)); + mmio_write_32(IMX_SRC_BASE + SRC_GPR1_OFFSET + (core_id << 3) + 4, + ((uint32_t)temp_base & 0x003fffff)); +} + +void imx_set_cpu_pwr_off(unsigned int core_id) +{ + + bakery_lock_get(&gpc_lock); + + /* enable the wfi power down of the core */ + mmio_setbits_32(IMX_GPC_BASE + LPCR_A53_AD, COREx_WFI_PDN(core_id)); + + bakery_lock_release(&gpc_lock); + + /* assert the pcg pcr bit of the core */ + mmio_setbits_32(IMX_GPC_BASE + COREx_PGC_PCR(core_id), 0x1); +} + +void imx_set_cpu_pwr_on(unsigned int core_id) +{ + bakery_lock_get(&gpc_lock); + + /* clear the wfi power down bit of the core */ + mmio_clrbits_32(IMX_GPC_BASE + LPCR_A53_AD, COREx_WFI_PDN(core_id)); + + bakery_lock_release(&gpc_lock); + + /* assert the ncpuporeset */ + mmio_clrbits_32(IMX_SRC_BASE + SRC_A53RCR1, (1 << core_id)); + /* assert the pcg pcr bit of the core */ + mmio_setbits_32(IMX_GPC_BASE + COREx_PGC_PCR(core_id), 0x1); + /* sw power up the core */ + mmio_setbits_32(IMX_GPC_BASE + CPU_PGC_UP_TRG, (1 << core_id)); + + /* wait for the power up finished */ + while ((mmio_read_32(IMX_GPC_BASE + CPU_PGC_UP_TRG) & (1 << core_id)) != 0) + ; + + /* deassert the pcg pcr bit of the core */ + mmio_clrbits_32(IMX_GPC_BASE + COREx_PGC_PCR(core_id), 0x1); + /* deassert the ncpuporeset */ + mmio_setbits_32(IMX_SRC_BASE + SRC_A53RCR1, (1 << core_id)); +} + +void imx_set_cpu_lpm(unsigned int core_id, bool pdn) +{ + bakery_lock_get(&gpc_lock); + + if (pdn) { + /* enable the core WFI PDN & IRQ PUP */ + mmio_setbits_32(IMX_GPC_BASE + LPCR_A53_AD, COREx_WFI_PDN(core_id) | + COREx_IRQ_WUP(core_id)); + /* assert the pcg pcr bit of the core */ + mmio_setbits_32(IMX_GPC_BASE + COREx_PGC_PCR(core_id), 0x1); + } else { + /* disable CORE WFI PDN & IRQ PUP */ + mmio_clrbits_32(IMX_GPC_BASE + LPCR_A53_AD, COREx_WFI_PDN(core_id) | + COREx_IRQ_WUP(core_id)); + /* deassert the pcg pcr bit of the core */ + mmio_clrbits_32(IMX_GPC_BASE + COREx_PGC_PCR(core_id), 0x1); + } + + bakery_lock_release(&gpc_lock); +} + +/* + * the plat and noc can only be power up & down by slot method, + * slot0: plat power down; slot1: noc power down; slot2: noc power up; + * slot3: plat power up. plat's pup&pdn ack is used by default. if + * noc is config to power down, then noc's pdn ack should be used. + */ +static void imx_a53_plat_slot_config(bool pdn) +{ + if (pdn) { + mmio_setbits_32(IMX_GPC_BASE + SLTx_CFG(0), PLAT_PDN_SLT_CTRL); + mmio_setbits_32(IMX_GPC_BASE + SLTx_CFG(3), PLAT_PUP_SLT_CTRL); + mmio_write_32(IMX_GPC_BASE + PGC_ACK_SEL_A53, A53_PLAT_PDN_ACK | + A53_PLAT_PUP_ACK); + mmio_setbits_32(IMX_GPC_BASE + PLAT_PGC_PCR, 0x1); + } else { + mmio_clrbits_32(IMX_GPC_BASE + SLTx_CFG(0), PLAT_PDN_SLT_CTRL); + mmio_clrbits_32(IMX_GPC_BASE + SLTx_CFG(3), PLAT_PUP_SLT_CTRL); + mmio_write_32(IMX_GPC_BASE + PGC_ACK_SEL_A53, A53_DUMMY_PUP_ACK | + A53_DUMMY_PDN_ACK); + mmio_clrbits_32(IMX_GPC_BASE + PLAT_PGC_PCR, 0x1); + } +} + +void imx_set_cluster_standby(bool enter) +{ + /* + * Enable BIT 6 of A53 AD register to make sure system + * don't enter LPM mode. + */ + if (enter) + mmio_setbits_32(IMX_GPC_BASE + LPCR_A53_AD, (1 << 6)); + else + mmio_clrbits_32(IMX_GPC_BASE + LPCR_A53_AD, (1 << 6)); +} + +/* i.mx8mq need to override it */ +void imx_set_cluster_powerdown(unsigned int last_core, uint8_t power_state) +{ + uint32_t val; + + if (!is_local_state_run(power_state)) { + /* config C0~1's LPM, enable a53 clock off in LPM */ + mmio_clrsetbits_32(IMX_GPC_BASE + LPCR_A53_BSC, A53_CLK_ON_LPM, + LPM_MODE(power_state)); + /* config C2-3's LPM */ + mmio_setbits_32(IMX_GPC_BASE + LPCR_A53_BSC2, LPM_MODE(power_state)); + + /* enable PLAT/SCU power down */ + val = mmio_read_32(IMX_GPC_BASE + LPCR_A53_AD); + val &= ~EN_L2_WFI_PDN; + /* L2 cache memory is on in WAIT mode */ + if (is_local_state_off(power_state)) { + val |= (L2PGE | EN_PLAT_PDN); + imx_a53_plat_slot_config(true); + } + + mmio_write_32(IMX_GPC_BASE + LPCR_A53_AD, val); + } else { + /* clear the slot and ack for cluster power down */ + imx_a53_plat_slot_config(false); + /* reverse the cluster level setting */ + mmio_clrsetbits_32(IMX_GPC_BASE + LPCR_A53_BSC, 0xf, A53_CLK_ON_LPM); + mmio_clrbits_32(IMX_GPC_BASE + LPCR_A53_BSC2, 0xf); + + /* clear PLAT/SCU power down */ + mmio_clrsetbits_32(IMX_GPC_BASE + LPCR_A53_AD, (L2PGE | EN_PLAT_PDN), + EN_L2_WFI_PDN); + } +} + +static unsigned int gicd_read_isenabler(uintptr_t base, unsigned int id) +{ + unsigned int n = id >> ISENABLER_SHIFT; + + return mmio_read_32(base + GICD_ISENABLER + (n << 2)); +} + +/* + * gic's clock will be gated in system suspend, so gic has no ability to + * to wakeup the system, we need to config the imr based on the irq + * enable status in gic, then gpc will monitor the wakeup irq + */ +void imx_set_sys_wakeup(unsigned int last_core, bool pdn) +{ + uint32_t irq_mask; + uintptr_t gicd_base = PLAT_GICD_BASE; + + if (pdn) + mmio_clrsetbits_32(IMX_GPC_BASE + LPCR_A53_BSC, A53_CORE_WUP_SRC(last_core), + IRQ_SRC_A53_WUP); + else + mmio_clrsetbits_32(IMX_GPC_BASE + LPCR_A53_BSC, IRQ_SRC_A53_WUP, + A53_CORE_WUP_SRC(last_core)); + + /* clear last core's IMR based on GIC's mask setting */ + for (int i = 0; i < IRQ_IMR_NUM; i++) { + if (pdn) + /* set the wakeup irq base GIC */ + irq_mask = ~gicd_read_isenabler(gicd_base, 32 * (i + 1)); + else + irq_mask = IMR_MASK_ALL; + + mmio_write_32(IMX_GPC_BASE + gpc_imr_offset[last_core] + i * 4, + irq_mask); + } +} + +/* + * this function only need to be override by platform + * that support noc power down, for example: imx8mm. + * otherwize, keep it empty. + */ +void imx_noc_slot_config(bool pdn) +{ + +} + +/* this is common for all imx8m soc */ +void imx_set_sys_lpm(unsigned int last_core, bool retention) +{ + uint32_t val; + + val = mmio_read_32(IMX_GPC_BASE + SLPCR); + val &= ~(SLPCR_EN_DSM | SLPCR_VSTBY | SLPCR_SBYOS | + SLPCR_BYPASS_PMIC_READY | SLPCR_A53_FASTWUP_STOP_MODE); + + if (retention) + val |= (SLPCR_EN_DSM | SLPCR_VSTBY | SLPCR_SBYOS | + SLPCR_BYPASS_PMIC_READY); + + mmio_write_32(IMX_GPC_BASE + SLPCR, val); + + /* config the noc power down */ + imx_noc_slot_config(retention); + + /* config wakeup irqs' mask in gpc */ + imx_set_sys_wakeup(last_core, retention); +} + +void imx_set_rbc_count(void) +{ + mmio_setbits_32(IMX_GPC_BASE + SLPCR, SLPCR_RBC_EN | + (0x8 << SLPCR_RBC_COUNT_SHIFT)); +} + +void imx_clear_rbc_count(void) +{ + mmio_clrbits_32(IMX_GPC_BASE + SLPCR, SLPCR_RBC_EN | + (0x3f << SLPCR_RBC_COUNT_SHIFT)); +} + +struct pll_override pll[MAX_PLL_NUM] = { + {.reg = 0x0, .override_mask = (1 << 12) | (1 << 8), }, + {.reg = 0x14, .override_mask = (1 << 12) | (1 << 8), }, + {.reg = 0x28, .override_mask = (1 << 12) | (1 << 8), }, + {.reg = 0x50, .override_mask = (1 << 12) | (1 << 8), }, + {.reg = 0x64, .override_mask = (1 << 10) | (1 << 8), }, + {.reg = 0x74, .override_mask = (1 << 10) | (1 << 8), }, + {.reg = 0x84, .override_mask = (1 << 10) | (1 << 8), }, + {.reg = 0x94, .override_mask = 0x5555500, }, + {.reg = 0x104, .override_mask = 0x5555500, }, + {.reg = 0x114, .override_mask = 0x500, }, +}; + +#define PLL_BYPASS BIT(4) +void imx_anamix_override(bool enter) +{ + unsigned int i; + + /* + * bypass all the plls & enable the override bit before + * entering DSM mode. + */ + for (i = 0U; i < MAX_PLL_NUM; i++) { + if (enter) { + mmio_setbits_32(IMX_ANAMIX_BASE + pll[i].reg, PLL_BYPASS); + mmio_setbits_32(IMX_ANAMIX_BASE + pll[i].reg, pll[i].override_mask); + } else { + mmio_clrbits_32(IMX_ANAMIX_BASE + pll[i].reg, PLL_BYPASS); + mmio_clrbits_32(IMX_ANAMIX_BASE + pll[i].reg, pll[i].override_mask); + } + } +} + +int imx_gpc_handler(uint32_t smc_fid, u_register_t x1, u_register_t x2, u_register_t x3) +{ + switch (x1) { + case FSL_SIP_CONFIG_GPC_PM_DOMAIN: + imx_gpc_pm_domain_enable(x2, x3); + break; + default: + return SMC_UNK; + } + + return 0; +} diff --git a/plat/imx/imx8m/imx8m_caam.c b/plat/imx/imx8m/imx8m_caam.c new file mode 100644 index 0000000..a491550 --- /dev/null +++ b/plat/imx/imx8m/imx8m_caam.c @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2019-2022 NXP. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <common/debug.h> +#include <lib/mmio.h> + +#include <imx8m_caam.h> + +#define HAB_JR0_DID U(0x8011) + +void imx8m_caam_init(void) +{ + uint32_t sm_cmd; + + /* Dealloc part 0 and 2 with current DID */ + sm_cmd = (0 << SMC_PART_SHIFT | SMC_CMD_DEALLOC_PART); + mmio_write_32(SM_CMD, sm_cmd); + + sm_cmd = (2 << SMC_PART_SHIFT | SMC_CMD_DEALLOC_PART); + mmio_write_32(SM_CMD, sm_cmd); + + /* config CAAM JRaMID set MID to Cortex A */ + if (mmio_read_32(CAAM_JR0MID) == HAB_JR0_DID) { + NOTICE("Do not release JR0 to NS as it can be used by HAB\n"); + } else { + mmio_write_32(CAAM_JR0MID, CAAM_NS_MID); + } + + mmio_write_32(CAAM_JR1MID, CAAM_NS_MID); + mmio_write_32(CAAM_JR2MID, CAAM_NS_MID); + + /* Alloc partition 0 writing SMPO and SMAGs */ + mmio_write_32(SM_P0_PERM, 0xff); + mmio_write_32(SM_P0_SMAG2, 0xffffffff); + mmio_write_32(SM_P0_SMAG1, 0xffffffff); + + /* Allocate page 0 and 1 to partition 0 with DID set */ + sm_cmd = (0 << SMC_PAGE_SHIFT | 0 << SMC_PART_SHIFT | + SMC_CMD_ALLOC_PAGE); + mmio_write_32(SM_CMD, sm_cmd); + + sm_cmd = (1 << SMC_PAGE_SHIFT | 0 << SMC_PART_SHIFT | + SMC_CMD_ALLOC_PAGE); + mmio_write_32(SM_CMD, sm_cmd); +} diff --git a/plat/imx/imx8m/imx8m_ccm.c b/plat/imx/imx8m/imx8m_ccm.c new file mode 100644 index 0000000..10a00c9 --- /dev/null +++ b/plat/imx/imx8m/imx8m_ccm.c @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2023, Pengutronix. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <lib/mmio.h> +#include <platform_def.h> + +#define UCR1 0x80 +#define UCR1_UARTEN BIT(0) +#define DOMAIN0_RUNNING(d) (((d) & 0x3) != 0) + +static struct imx_uart { + unsigned int ccm_reg; + unsigned int uart_base; +} imx8m_uart_info[] = { + { /* UART 1 */ + .ccm_reg = 0x4490, + .uart_base = 0x30860000, + }, { /* UART 2 */ + .ccm_reg = 0x44a0, + .uart_base = 0x30890000, + }, { /* UART 3 */ + .ccm_reg = 0x44b0, + .uart_base = 0x30880000, + }, { /* UART 4 */ + .ccm_reg = 0x44c0, + .uart_base = 0x30a60000, + } +}; + +unsigned int imx8m_uart_get_base(void) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(imx8m_uart_info); i++) { + uint32_t val; + + /* + * At least check that the clock-gate is ungated before we + * access the UART register. + */ + val = mmio_read_32(IMX_CCM_BASE + imx8m_uart_info[i].ccm_reg); + if (DOMAIN0_RUNNING(val)) { + val = mmio_read_32(imx8m_uart_info[i].uart_base + UCR1); + if (val & UCR1_UARTEN) { + return imx8m_uart_info[i].uart_base; + } + } + } + + /* + * We should return an error and inform the user but we can't do it + * this early. + */ + return 0; +} diff --git a/plat/imx/imx8m/imx8m_csu.c b/plat/imx/imx8m/imx8m_csu.c new file mode 100644 index 0000000..2b3a7d9 --- /dev/null +++ b/plat/imx/imx8m/imx8m_csu.c @@ -0,0 +1,56 @@ +/* + * Copyright 2020-2022 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <lib/mmio.h> + +#include <imx8m_csu.h> + +void imx_csu_init(const struct imx_csu_cfg *csu_cfg) +{ + const struct imx_csu_cfg *csu = csu_cfg; + uint32_t val; + + while (csu->type != CSU_INVALID) { + switch (csu->type) { + case CSU_CSL: + val = mmio_read_32(CSLx_REG(csu->idx)); + if (val & CSLx_LOCK(csu->idx)) { + break; + } + mmio_clrsetbits_32(CSLx_REG(csu->idx), CSLx_CFG(0xff, csu->idx), + CSLx_CFG(csu->csl_level | (csu->lock << 8), csu->idx)); + break; + case CSU_HP: + val = mmio_read_32(CSU_HP_REG(csu->idx)); + if (val & CSU_HP_LOCK(csu->idx)) { + break; + } + mmio_clrsetbits_32(CSU_HP_REG(csu->idx), CSU_HP_CFG(0x1, csu->idx), + CSU_HP_CFG(csu->hp | (csu->lock << 0x1), csu->idx)); + break; + case CSU_SA: + val = mmio_read_32(CSU_SA_REG(csu->idx)); + if (val & CSU_SA_LOCK(csu->idx)) { + break; + } + mmio_clrsetbits_32(CSU_SA_REG(csu->idx), CSU_SA_CFG(0x1, csu->idx), + CSU_SA_CFG(csu->sa | (csu->lock << 0x1), csu->idx)); + break; + case CSU_HPCONTROL: + val = mmio_read_32(CSU_HPCONTROL_REG(csu->idx)); + if (val & CSU_HPCONTROL_LOCK(csu->idx)) { + break; + } + mmio_clrsetbits_32(CSU_HPCONTROL_REG(csu->idx), CSU_HPCONTROL_CFG(0x1, csu->idx), + CSU_HPCONTROL_CFG(csu->hpctrl | (csu->lock << 0x1), csu->idx)); + break; + default: + break; + } + + csu++; + } +} diff --git a/plat/imx/imx8m/imx8m_dyn_cfg_helpers.c b/plat/imx/imx8m/imx8m_dyn_cfg_helpers.c new file mode 100644 index 0000000..5d65ef2 --- /dev/null +++ b/plat/imx/imx8m/imx8m_dyn_cfg_helpers.c @@ -0,0 +1,201 @@ +/* + * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2022, Linaro. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> + +#include <arch_helpers.h> +#if MEASURED_BOOT +#include <common/desc_image_load.h> +#endif +#include <common/fdt_wrappers.h> +#include <libfdt.h> +#include <platform_def.h> + +#define DTB_PROP_HW_LOG_ADDR "tpm_event_log_addr" +#define DTB_PROP_HW_LOG_SIZE "tpm_event_log_size" + +#if MEASURED_BOOT + +static int imx8m_event_log_fdt_init_overlay(uintptr_t dt_base, int dt_size) +{ + int ret; + int offset; + void *dtb = (void *)dt_base; + + ret = fdt_create_empty_tree(dtb, dt_size); + if (ret < 0) { + ERROR("cannot create empty dtb tree: %s\n", + fdt_strerror(ret)); + return ret; + } + + offset = fdt_path_offset(dtb, "/"); + if (offset < 0) { + ERROR("cannot find root of the tree: %s\n", + fdt_strerror(offset)); + return offset; + } + + offset = fdt_add_subnode(dtb, offset, "fragment@0"); + if (offset < 0) { + ERROR("cannot add fragment node: %s\n", + fdt_strerror(offset)); + return offset; + } + + ret = fdt_setprop_string(dtb, offset, "target-path", "/"); + if (ret < 0) { + ERROR("cannot set target-path property: %s\n", + fdt_strerror(ret)); + return ret; + } + + offset = fdt_add_subnode(dtb, offset, "__overlay__"); + if (offset < 0) { + ERROR("cannot add __overlay__ node: %s\n", + fdt_strerror(offset)); + return ret; + } + + offset = fdt_add_subnode(dtb, offset, "tpm_event_log"); + if (offset < 0) { + ERROR("cannot add tpm_event_log node: %s\n", + fdt_strerror(offset)); + return offset; + } + + ret = fdt_setprop_string(dtb, offset, "compatible", + "arm,tpm_event_log"); + if (ret < 0) { + ERROR("cannot set compatible property: %s\n", + fdt_strerror(ret)); + return ret; + } + + ret = fdt_setprop_u64(dtb, offset, "tpm_event_log_addr", 0); + if (ret < 0) { + ERROR("cannot set tpm_event_log_addr property: %s\n", + fdt_strerror(ret)); + return ret; + } + + ret = fdt_setprop_u32(dtb, offset, "tpm_event_log_size", 0); + if (ret < 0) { + ERROR("cannot set tpm_event_log_size property: %s\n", + fdt_strerror(ret)); + return ret; + } + + return ret; +} + +/* + * Write the Event Log address and its size in the DTB. + * + * This function is supposed to be called only by BL2. + * + * Returns: + * 0 = success + * < 0 = error + */ +static int imx8m_set_event_log_info(uintptr_t config_base, + uintptr_t log_addr, size_t log_size) +{ + /* As libfdt uses void *, we can't avoid this cast */ + void *dtb = (void *)config_base; + const char *compatible_tpm = "arm,tpm_event_log"; + uint64_t base = cpu_to_fdt64(log_addr); + uint32_t sz = cpu_to_fdt32(log_size); + int err, node; + + err = fdt_open_into(dtb, dtb, PLAT_IMX8M_DTO_MAX_SIZE); + if (err < 0) { + ERROR("Invalid Device Tree at %p: error %d\n", dtb, err); + return err; + } + + /* + * Verify that the DTB is valid, before attempting to write to it, + * and get the DTB root node. + */ + + /* Check if the pointer to DT is correct */ + err = fdt_check_header(dtb); + if (err < 0) { + WARN("Invalid DTB file passed\n"); + return err; + } + + /* + * Find the TPM node in device tree. + */ + node = fdt_node_offset_by_compatible(dtb, -1, compatible_tpm); + if (node < 0) { + ERROR("The compatible property '%s' not%s", compatible_tpm, + " found in the config\n"); + return node; + } + + err = fdt_setprop(dtb, node, DTB_PROP_HW_LOG_ADDR, &base, 8); + if (err < 0) { + ERROR("Failed to add log addr err %d\n", err); + return err; + } + + err = fdt_setprop(dtb, node, DTB_PROP_HW_LOG_SIZE, &sz, 4); + if (err < 0) { + ERROR("Failed to add log addr err %d\n", err); + return err; + } + + err = fdt_pack(dtb); + if (err < 0) { + ERROR("Failed to pack Device Tree at %p: error %d\n", dtb, err); + return err; + } + + /* + * Ensure that the info written to the DTB is visible + * to other images. + */ + flush_dcache_range(config_base, fdt_totalsize(dtb)); + + return err; +} + +/* + * This function writes the Event Log address and its size + * in the QEMU DTB. + * + * This function is supposed to be called only by BL2. + * + * Returns: + * 0 = success + * < 0 = error + */ +int imx8m_set_nt_fw_info(size_t log_size, uintptr_t *ns_log_addr) +{ + uintptr_t ns_addr; + int err; + + assert(ns_log_addr != NULL); + + ns_addr = PLAT_IMX8M_DTO_BASE + PLAT_IMX8M_DTO_MAX_SIZE; + + imx8m_event_log_fdt_init_overlay(PLAT_IMX8M_DTO_BASE, + PLAT_IMX8M_DTO_MAX_SIZE); + + /* Write the Event Log address and its size in the DTB */ + err = imx8m_set_event_log_info(PLAT_IMX8M_DTO_BASE, + ns_addr, log_size); + + /* Return Event Log address in Non-secure memory */ + *ns_log_addr = (err < 0) ? 0UL : ns_addr; + return err; +} + +#endif /* MEASURED_BOOT */ diff --git a/plat/imx/imx8m/imx8m_image_load.c b/plat/imx/imx8m/imx8m_image_load.c new file mode 100644 index 0000000..3a03069 --- /dev/null +++ b/plat/imx/imx8m/imx8m_image_load.c @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <common/bl_common.h> +#include <common/desc_image_load.h> + +#include <platform_def.h> +#include <plat/common/platform.h> + +void plat_flush_next_bl_params(void) +{ + flush_bl_params_desc(); +} + +bl_load_info_t *plat_get_bl_image_load_info(void) +{ + return get_bl_load_info_from_mem_params_desc(); +} + +bl_params_t *plat_get_next_bl_params(void) +{ + return get_next_bl_params_from_mem_params_desc(); +} diff --git a/plat/imx/imx8m/imx8m_measured_boot.c b/plat/imx/imx8m/imx8m_measured_boot.c new file mode 100644 index 0000000..bfcd6ce --- /dev/null +++ b/plat/imx/imx8m/imx8m_measured_boot.c @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2022-2023, Arm Limited. All rights reserved. + * Copyright (c) 2022, Linaro. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <string.h> + +#include "./include/imx8m_measured_boot.h" +#include <drivers/measured_boot/event_log/event_log.h> +#include <plat/arm/common/plat_arm.h> + +/* Event Log data */ +static uint8_t event_log[PLAT_IMX_EVENT_LOG_MAX_SIZE]; + +/* FVP table with platform specific image IDs, names and PCRs */ +static const event_log_metadata_t imx8m_event_log_metadata[] = { + { BL31_IMAGE_ID, EVLOG_BL31_STRING, PCR_0 }, + { BL32_IMAGE_ID, EVLOG_BL32_STRING, PCR_0 }, + { BL32_EXTRA1_IMAGE_ID, EVLOG_BL32_EXTRA1_STRING, PCR_0 }, + { BL32_EXTRA2_IMAGE_ID, EVLOG_BL32_EXTRA2_STRING, PCR_0 }, + { BL33_IMAGE_ID, EVLOG_BL33_STRING, PCR_0 }, + { EVLOG_INVALID_ID, NULL, (unsigned int)(-1) } /* Terminator */ +}; + +int plat_mboot_measure_image(unsigned int image_id, image_info_t *image_data) +{ + /* Calculate image hash and record data in Event Log */ + int err = event_log_measure_and_record(image_data->image_base, + image_data->image_size, + image_id, + imx8m_event_log_metadata); + if (err != 0) { + ERROR("%s%s image id %u (%i)\n", + "Failed to ", "record", image_id, err); + return err; + } + + return 0; +} + +void bl2_plat_mboot_init(void) +{ + event_log_init(event_log, event_log + sizeof(event_log)); + event_log_write_header(); +} + +void bl2_plat_mboot_finish(void) +{ + int rc = 0; + + /* Event Log address in Non-Secure memory */ + uintptr_t ns_log_addr; + + /* Event Log filled size */ + size_t event_log_cur_size; + + event_log_cur_size = event_log_get_cur_size(event_log); + + rc = imx8m_set_nt_fw_info(event_log_cur_size, &ns_log_addr); + if (rc != 0) { + ERROR("%s(): Unable to update %s_FW_CONFIG\n", + __func__, "NT"); + /* + * It is a fatal error because on i.MX U-boot assumes that + * a valid event log exists and will use it to record the + * measurements into the fTPM. + */ + panic(); + } + + /* Copy Event Log to Non-secure memory */ + (void)memcpy((void *)ns_log_addr, (const void *)event_log, + event_log_cur_size); + + /* Ensure that the Event Log is visible in Non-secure memory */ + flush_dcache_range(ns_log_addr, event_log_cur_size); + + dump_event_log((uint8_t *)event_log, event_log_cur_size); +} + +int plat_mboot_measure_key(const void *pk_oid, const void *pk_ptr, + size_t pk_len) +{ + return 0; +} diff --git a/plat/imx/imx8m/imx8m_psci_common.c b/plat/imx/imx8m/imx8m_psci_common.c new file mode 100644 index 0000000..48eb8a6 --- /dev/null +++ b/plat/imx/imx8m/imx8m_psci_common.c @@ -0,0 +1,260 @@ +/* + * Copyright (c) 2018-2023, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <stdbool.h> + +#include <arch.h> +#include <arch_helpers.h> +#include <common/debug.h> +#include <drivers/delay_timer.h> +#include <lib/mmio.h> +#include <lib/psci/psci.h> + +#include <dram.h> +#include <gpc.h> +#include <imx8m_psci.h> +#include <plat_imx8.h> + +/* + * below callback functions need to be override by i.mx8mq, + * for other i.mx8m soc, if no special requirement, + * reuse below ones. + */ +#pragma weak imx_validate_power_state +#pragma weak imx_pwr_domain_off +#pragma weak imx_domain_suspend +#pragma weak imx_domain_suspend_finish +#pragma weak imx_get_sys_suspend_power_state + +int imx_validate_ns_entrypoint(uintptr_t ns_entrypoint) +{ + /* The non-secure entrypoint should be in RAM space */ + if (ns_entrypoint < PLAT_NS_IMAGE_OFFSET) + return PSCI_E_INVALID_PARAMS; + + return PSCI_E_SUCCESS; +} + +int imx_pwr_domain_on(u_register_t mpidr) +{ + unsigned int core_id; + uint64_t base_addr = BL31_START; + + core_id = MPIDR_AFFLVL0_VAL(mpidr); + + imx_set_cpu_secure_entry(core_id, base_addr); + imx_set_cpu_pwr_on(core_id); + + return PSCI_E_SUCCESS; +} + +void imx_pwr_domain_on_finish(const psci_power_state_t *target_state) +{ + plat_gic_pcpu_init(); + plat_gic_cpuif_enable(); +} + +void imx_pwr_domain_off(const psci_power_state_t *target_state) +{ + uint64_t mpidr = read_mpidr_el1(); + unsigned int core_id = MPIDR_AFFLVL0_VAL(mpidr); + + plat_gic_cpuif_disable(); + imx_set_cpu_pwr_off(core_id); +} + +int imx_validate_power_state(unsigned int power_state, + psci_power_state_t *req_state) +{ + int pwr_lvl = psci_get_pstate_pwrlvl(power_state); + int pwr_type = psci_get_pstate_type(power_state); + int state_id = psci_get_pstate_id(power_state); + + if (pwr_lvl > PLAT_MAX_PWR_LVL) + return PSCI_E_INVALID_PARAMS; + + if (pwr_type == PSTATE_TYPE_STANDBY) { + CORE_PWR_STATE(req_state) = PLAT_MAX_RET_STATE; + CLUSTER_PWR_STATE(req_state) = PLAT_MAX_RET_STATE; + } + + if (pwr_type == PSTATE_TYPE_POWERDOWN && state_id == 0x33) { + CORE_PWR_STATE(req_state) = PLAT_MAX_OFF_STATE; + CLUSTER_PWR_STATE(req_state) = PLAT_WAIT_RET_STATE; + } + + return PSCI_E_SUCCESS; +} + +void imx_cpu_standby(plat_local_state_t cpu_state) +{ + dsb(); + write_scr_el3(read_scr_el3() | SCR_FIQ_BIT); + isb(); + + wfi(); + + write_scr_el3(read_scr_el3() & (~SCR_FIQ_BIT)); + isb(); +} + +void imx_domain_suspend(const psci_power_state_t *target_state) +{ + uint64_t base_addr = BL31_START; + uint64_t mpidr = read_mpidr_el1(); + unsigned int core_id = MPIDR_AFFLVL0_VAL(mpidr); + + if (is_local_state_off(CORE_PWR_STATE(target_state))) { + plat_gic_cpuif_disable(); + imx_set_cpu_secure_entry(core_id, base_addr); + imx_set_cpu_lpm(core_id, true); + } else { + dsb(); + write_scr_el3(read_scr_el3() | SCR_FIQ_BIT); + isb(); + } + + if (!is_local_state_run(CLUSTER_PWR_STATE(target_state))) + imx_set_cluster_powerdown(core_id, CLUSTER_PWR_STATE(target_state)); + + if (is_local_state_off(SYSTEM_PWR_STATE(target_state))) { + imx_set_sys_lpm(core_id, true); + dram_enter_retention(); + imx_anamix_override(true); + } +} + +void imx_domain_suspend_finish(const psci_power_state_t *target_state) +{ + uint64_t mpidr = read_mpidr_el1(); + unsigned int core_id = MPIDR_AFFLVL0_VAL(mpidr); + + if (is_local_state_off(SYSTEM_PWR_STATE(target_state))) { + imx_anamix_override(false); + dram_exit_retention(); + imx_set_sys_lpm(core_id, false); + } + + if (!is_local_state_run(CLUSTER_PWR_STATE(target_state))) { + imx_clear_rbc_count(); + imx_set_cluster_powerdown(core_id, PSCI_LOCAL_STATE_RUN); + } + + if (is_local_state_off(CORE_PWR_STATE(target_state))) { + imx_set_cpu_lpm(core_id, false); + plat_gic_cpuif_enable(); + } else { + write_scr_el3(read_scr_el3() & (~SCR_FIQ_BIT)); + isb(); + } +} + +void imx_get_sys_suspend_power_state(psci_power_state_t *req_state) +{ + unsigned int i; + + for (i = IMX_PWR_LVL0; i <= PLAT_MAX_PWR_LVL; i++) + req_state->pwr_domain_state[i] = PLAT_STOP_OFF_STATE; +} + +static void __dead2 imx_wdog_restart(bool external_reset) +{ + uintptr_t wdog_base = IMX_WDOG_BASE; + unsigned int val; + + val = mmio_read_16(wdog_base); + /* + * Common watchdog init flags, for additional details check + * 6.6.4.1 Watchdog Control Register (WDOGx_WCR) + * + * Initial bit selection: + * WDOG_WCR_WDE - Enable the watchdog. + * + * 0x000E mask is used to keep previous values (that could be set + * in SPL) of WDBG and WDE/WDT (both are write-one once-only bits). + */ + val = (val & 0x000E) | WDOG_WCR_WDE; + if (external_reset) { + /* + * To assert WDOG_B (external reset) we have + * to set WDA bit 0 (already set in previous step). + * SRS bits are required to be set to 1 (no effect on the + * system). + */ + val |= WDOG_WCR_SRS; + } else { + /* + * To assert Software Reset Signal (internal reset) we have + * to set SRS bit to 0 (already set in previous step). + * SRE bit is required to be set to 1 when used in + * conjunction with the Software Reset Signal before + * SRS asserton, otherwise SRS bit will just automatically + * reset to 1. + * + * Also we set WDA to 1 (no effect on system). + */ + val |= WDOG_WCR_SRE | WDOG_WCR_WDA; + } + + mmio_write_16(wdog_base, val); + + mmio_write_16(wdog_base + WDOG_WSR, 0x5555); + mmio_write_16(wdog_base + WDOG_WSR, 0xaaaa); + while (1) + ; +} + +void __dead2 imx_system_reset(void) +{ +#ifdef IMX_WDOG_B_RESET + imx_wdog_restart(true); +#else + imx_wdog_restart(false); +#endif +} + +int imx_system_reset2(int is_vendor, int reset_type, u_register_t cookie) +{ + imx_wdog_restart(false); + + /* + * imx_wdog_restart cannot return (as it's a __dead function), + * however imx_system_reset2 has to return some value according + * to PSCI v1.1 spec. + */ + return 0; +} + +void __dead2 imx_system_off(void) +{ + uint32_t val; + + val = mmio_read_32(IMX_SNVS_BASE + SNVS_LPCR); + val |= SNVS_LPCR_SRTC_ENV | SNVS_LPCR_DP_EN | SNVS_LPCR_TOP; + mmio_write_32(IMX_SNVS_BASE + SNVS_LPCR, val); + + while (1) + ; +} + +void __dead2 imx_pwr_domain_pwr_down_wfi(const psci_power_state_t *target_state) +{ + /* + * before enter WAIT or STOP mode with PLAT(SCU) power down, + * rbc count need to be enabled to make sure PLAT is + * power down successfully even if the the wakeup IRQ is pending + * early before the power down sequence. the RBC counter is + * drived by the 32K OSC, so delay 30us to make sure the counter + * is really running. + */ + if (is_local_state_off(CLUSTER_PWR_STATE(target_state))) { + imx_set_rbc_count(); + udelay(30); + } + + while (1) + wfi(); +} diff --git a/plat/imx/imx8m/imx8m_snvs.c b/plat/imx/imx8m/imx8m_snvs.c new file mode 100644 index 0000000..7874a68 --- /dev/null +++ b/plat/imx/imx8m/imx8m_snvs.c @@ -0,0 +1,19 @@ +/* + * Copyright 2022-2023 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <lib/mmio.h> +#include <platform_def.h> + +#define SNVS_HPCOMR U(0x04) +#define SNVS_NPSWA_EN BIT(31) + +void enable_snvs_privileged_access(void) +{ + unsigned int val; + + val = mmio_read_32(IMX_SNVS_BASE + SNVS_HPCOMR); + mmio_write_32(IMX_SNVS_BASE + SNVS_HPCOMR, val | SNVS_NPSWA_EN); +} diff --git a/plat/imx/imx8m/imx8mm/gpc.c b/plat/imx/imx8m/imx8mm/gpc.c new file mode 100644 index 0000000..f173a16 --- /dev/null +++ b/plat/imx/imx8m/imx8mm/gpc.c @@ -0,0 +1,375 @@ +/* + * Copyright (c) 2019-2022, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <stdlib.h> +#include <stdint.h> +#include <stdbool.h> + +#include <common/debug.h> +#include <drivers/delay_timer.h> +#include <lib/mmio.h> +#include <lib/psci/psci.h> +#include <lib/smccc.h> +#include <platform_def.h> +#include <services/std_svc.h> + +#include <gpc.h> +#include <imx_sip_svc.h> + +#define CCGR(x) (0x4000 + (x) * 16) + +enum pu_domain_id { + HSIOMIX, + PCIE, + OTG1, + OTG2, + GPUMIX, + VPUMIX, + VPU_G1, + VPU_G2, + VPU_H1, + DISPMIX, + MIPI, + /* below two domain only for ATF internal use */ + GPU2D, + GPU3D, + MAX_DOMAINS, +}; + +/* PU domain */ +static struct imx_pwr_domain pu_domains[] = { + IMX_MIX_DOMAIN(HSIOMIX, false), + IMX_PD_DOMAIN(PCIE, false), + IMX_PD_DOMAIN(OTG1, true), + IMX_PD_DOMAIN(OTG2, true), + IMX_MIX_DOMAIN(GPUMIX, false), + IMX_MIX_DOMAIN(VPUMIX, false), + IMX_PD_DOMAIN(VPU_G1, false), + IMX_PD_DOMAIN(VPU_G2, false), + IMX_PD_DOMAIN(VPU_H1, false), + IMX_MIX_DOMAIN(DISPMIX, false), + IMX_PD_DOMAIN(MIPI, false), + /* below two domain only for ATF internal use */ + IMX_MIX_DOMAIN(GPU2D, false), + IMX_MIX_DOMAIN(GPU3D, false), +}; + +static unsigned int pu_domain_status; + +#define GPU_RCR 0x40 +#define VPU_RCR 0x44 + +#define VPU_CTL_BASE 0x38330000 +#define BLK_SFT_RSTN_CSR 0x0 +#define H1_SFT_RSTN BIT(2) +#define G1_SFT_RSTN BIT(1) +#define G2_SFT_RSTN BIT(0) + +#define DISP_CTL_BASE 0x32e28000 + +void vpu_sft_reset_assert(uint32_t domain_id) +{ + uint32_t val; + + val = mmio_read_32(VPU_CTL_BASE + BLK_SFT_RSTN_CSR); + + switch (domain_id) { + case VPU_G1: + val &= ~G1_SFT_RSTN; + mmio_write_32(VPU_CTL_BASE + BLK_SFT_RSTN_CSR, val); + break; + case VPU_G2: + val &= ~G2_SFT_RSTN; + mmio_write_32(VPU_CTL_BASE + BLK_SFT_RSTN_CSR, val); + break; + case VPU_H1: + val &= ~H1_SFT_RSTN; + mmio_write_32(VPU_CTL_BASE + BLK_SFT_RSTN_CSR, val); + break; + default: + break; + } +} + +void vpu_sft_reset_deassert(uint32_t domain_id) +{ + uint32_t val; + + val = mmio_read_32(VPU_CTL_BASE + BLK_SFT_RSTN_CSR); + + switch (domain_id) { + case VPU_G1: + val |= G1_SFT_RSTN; + mmio_write_32(VPU_CTL_BASE + BLK_SFT_RSTN_CSR, val); + break; + case VPU_G2: + val |= G2_SFT_RSTN; + mmio_write_32(VPU_CTL_BASE + BLK_SFT_RSTN_CSR, val); + break; + case VPU_H1: + val |= H1_SFT_RSTN; + mmio_write_32(VPU_CTL_BASE + BLK_SFT_RSTN_CSR, val); + break; + default: + break; + } +} + +void imx_gpc_pm_domain_enable(uint32_t domain_id, bool on) +{ + if (domain_id >= MAX_DOMAINS) { + return; + } + + struct imx_pwr_domain *pwr_domain = &pu_domains[domain_id]; + + if (on) { + pu_domain_status |= (1 << domain_id); + + if (domain_id == VPU_G1 || domain_id == VPU_G2 || + domain_id == VPU_H1) { + vpu_sft_reset_assert(domain_id); + } + + /* HSIOMIX has no PU bit, so skip for it */ + if (domain_id != HSIOMIX) { + /* clear the PGC bit */ + mmio_clrbits_32(IMX_GPC_BASE + pwr_domain->pgc_offset, 0x1); + + /* power up the domain */ + mmio_setbits_32(IMX_GPC_BASE + PU_PGC_UP_TRG, pwr_domain->pwr_req); + + /* wait for power request done */ + while (mmio_read_32(IMX_GPC_BASE + PU_PGC_UP_TRG) & pwr_domain->pwr_req) { + ; + } + } + + if (domain_id == VPU_G1 || domain_id == VPU_G2 || + domain_id == VPU_H1) { + vpu_sft_reset_deassert(domain_id); + /* dealy for a while to make sure reset done */ + udelay(100); + } + + if (domain_id == GPUMIX) { + /* assert reset */ + mmio_write_32(IMX_SRC_BASE + GPU_RCR, 0x1); + + /* power up GPU2D */ + mmio_clrbits_32(IMX_GPC_BASE + GPU2D_PGC, 0x1); + + mmio_setbits_32(IMX_GPC_BASE + PU_PGC_UP_TRG, GPU2D_PWR_REQ); + + /* wait for power request done */ + while (mmio_read_32(IMX_GPC_BASE + PU_PGC_UP_TRG) & GPU2D_PWR_REQ) { + ; + } + + udelay(1); + + /* power up GPU3D */ + mmio_clrbits_32(IMX_GPC_BASE + GPU3D_PGC, 0x1); + + mmio_setbits_32(IMX_GPC_BASE + PU_PGC_UP_TRG, GPU3D_PWR_REQ); + + /* wait for power request done */ + while (mmio_read_32(IMX_GPC_BASE + PU_PGC_UP_TRG) & GPU3D_PWR_REQ) { + ; + } + + udelay(10); + /* release the gpumix reset */ + mmio_write_32(IMX_SRC_BASE + GPU_RCR, 0x0); + udelay(10); + } + + /* vpu sft clock enable */ + if (domain_id == VPUMIX) { + mmio_write_32(IMX_SRC_BASE + VPU_RCR, 0x1); + udelay(5); + mmio_write_32(IMX_SRC_BASE + VPU_RCR, 0x0); + udelay(5); + + /* enable all clock */ + mmio_write_32(VPU_CTL_BASE + 0x4, 0x7); + } + + if (domain_id == DISPMIX) { + /* special setting for DISPMIX */ + mmio_write_32(DISP_CTL_BASE + 0x4, 0x1fff); + mmio_write_32(DISP_CTL_BASE, 0x7f); + mmio_write_32(DISP_CTL_BASE + 0x8, 0x30000); + } + + /* handle the ADB400 sync */ + if (pwr_domain->need_sync) { + /* clear adb power down request */ + mmio_setbits_32(IMX_GPC_BASE + GPC_PU_PWRHSK, pwr_domain->adb400_sync); + + /* wait for adb power request ack */ + while (!(mmio_read_32(IMX_GPC_BASE + GPC_PU_PWRHSK) & pwr_domain->adb400_ack)) { + ; + } + } + + if (domain_id == GPUMIX) { + /* power up GPU2D ADB */ + mmio_setbits_32(IMX_GPC_BASE + GPC_PU_PWRHSK, GPU2D_ADB400_SYNC); + + /* wait for adb power request ack */ + while (!(mmio_read_32(IMX_GPC_BASE + GPC_PU_PWRHSK) & GPU2D_ADB400_ACK)) { + ; + } + + /* power up GPU3D ADB */ + mmio_setbits_32(IMX_GPC_BASE + GPC_PU_PWRHSK, GPU3D_ADB400_SYNC); + + /* wait for adb power request ack */ + while (!(mmio_read_32(IMX_GPC_BASE + GPC_PU_PWRHSK) & GPU3D_ADB400_ACK)) { + ; + } + } + } else { + pu_domain_status &= ~(1 << domain_id); + + if (domain_id == OTG1 || domain_id == OTG2) { + return; + } + + /* GPU2D & GPU3D ADB power down */ + if (domain_id == GPUMIX) { + mmio_clrbits_32(IMX_GPC_BASE + GPC_PU_PWRHSK, GPU2D_ADB400_SYNC); + + /* wait for adb power request ack */ + while ((mmio_read_32(IMX_GPC_BASE + GPC_PU_PWRHSK) & GPU2D_ADB400_ACK)) { + ; + } + + mmio_clrbits_32(IMX_GPC_BASE + GPC_PU_PWRHSK, GPU3D_ADB400_SYNC); + + /* wait for adb power request ack */ + while ((mmio_read_32(IMX_GPC_BASE + GPC_PU_PWRHSK) & GPU3D_ADB400_ACK)) { + ; + } + } + + /* handle the ADB400 sync */ + if (pwr_domain->need_sync) { + /* set adb power down request */ + mmio_clrbits_32(IMX_GPC_BASE + GPC_PU_PWRHSK, pwr_domain->adb400_sync); + + /* wait for adb power request ack */ + while ((mmio_read_32(IMX_GPC_BASE + GPC_PU_PWRHSK) & pwr_domain->adb400_ack)) { + ; + } + } + + if (domain_id == GPUMIX) { + /* power down GPU2D */ + mmio_setbits_32(IMX_GPC_BASE + GPU2D_PGC, 0x1); + + mmio_setbits_32(IMX_GPC_BASE + PU_PGC_DN_TRG, GPU2D_PWR_REQ); + + /* wait for power request done */ + while (mmio_read_32(IMX_GPC_BASE + PU_PGC_DN_TRG) & GPU2D_PWR_REQ) { + ; + } + + /* power down GPU3D */ + mmio_setbits_32(IMX_GPC_BASE + GPU3D_PGC, 0x1); + + mmio_setbits_32(IMX_GPC_BASE + PU_PGC_DN_TRG, GPU3D_PWR_REQ); + + /* wait for power request done */ + while (mmio_read_32(IMX_GPC_BASE + PU_PGC_DN_TRG) & GPU3D_PWR_REQ) { + ; + } + } + + /* HSIOMIX has no PU bit, so skip for it */ + if (domain_id != HSIOMIX) { + /* set the PGC bit */ + mmio_setbits_32(IMX_GPC_BASE + pwr_domain->pgc_offset, 0x1); + + /* power down the domain */ + mmio_setbits_32(IMX_GPC_BASE + PU_PGC_DN_TRG, pwr_domain->pwr_req); + + /* wait for power request done */ + while (mmio_read_32(IMX_GPC_BASE + PU_PGC_DN_TRG) & pwr_domain->pwr_req) { + ; + } + } + } +} + +void imx_gpc_init(void) +{ + unsigned int val; + int i; + + /* mask all the wakeup irq by default */ + for (i = 0; i < 4; i++) { + mmio_write_32(IMX_GPC_BASE + IMR1_CORE0_A53 + i * 4, ~0x0); + mmio_write_32(IMX_GPC_BASE + IMR1_CORE1_A53 + i * 4, ~0x0); + mmio_write_32(IMX_GPC_BASE + IMR1_CORE2_A53 + i * 4, ~0x0); + mmio_write_32(IMX_GPC_BASE + IMR1_CORE3_A53 + i * 4, ~0x0); + mmio_write_32(IMX_GPC_BASE + IMR1_CORE0_M4 + i * 4, ~0x0); + } + + val = mmio_read_32(IMX_GPC_BASE + LPCR_A53_BSC); + /* use GIC wake_request to wakeup C0~C3 from LPM */ + val |= 0x30c00000; + /* clear the MASTER0 LPM handshake */ + val &= ~(1 << 6); + mmio_write_32(IMX_GPC_BASE + LPCR_A53_BSC, val); + + /* clear MASTER1 & MASTER2 mapping in CPU0(A53) */ + mmio_clrbits_32(IMX_GPC_BASE + MST_CPU_MAPPING, (MASTER1_MAPPING | + MASTER2_MAPPING)); + + /* set all mix/PU in A53 domain */ + mmio_write_32(IMX_GPC_BASE + PGC_CPU_0_1_MAPPING, 0xffff); + + /* + * Set the CORE & SCU power up timing: + * SW = 0x1, SW2ISO = 0x1; + * the CPU CORE and SCU power up timing counter + * is drived by 32K OSC, each domain's power up + * latency is (SW + SW2ISO) / 32768 + */ + mmio_write_32(IMX_GPC_BASE + COREx_PGC_PCR(0) + 0x4, 0x81); + mmio_write_32(IMX_GPC_BASE + COREx_PGC_PCR(1) + 0x4, 0x81); + mmio_write_32(IMX_GPC_BASE + COREx_PGC_PCR(2) + 0x4, 0x81); + mmio_write_32(IMX_GPC_BASE + COREx_PGC_PCR(3) + 0x4, 0x81); + mmio_write_32(IMX_GPC_BASE + PLAT_PGC_PCR + 0x4, 0x81); + mmio_write_32(IMX_GPC_BASE + PGC_SCU_TIMING, + (0x59 << 10) | 0x5B | (0x2 << 20)); + + /* set DUMMY PDN/PUP ACK by default for A53 domain */ + mmio_write_32(IMX_GPC_BASE + PGC_ACK_SEL_A53, + A53_DUMMY_PUP_ACK | A53_DUMMY_PDN_ACK); + + /* clear DSM by default */ + val = mmio_read_32(IMX_GPC_BASE + SLPCR); + val &= ~SLPCR_EN_DSM; + /* enable the fast wakeup wait mode */ + val |= SLPCR_A53_FASTWUP_WAIT_MODE; + /* clear the RBC */ + val &= ~(0x3f << SLPCR_RBC_COUNT_SHIFT); + /* set the STBY_COUNT to 0x5, (128 * 30)us */ + val &= ~(0x7 << SLPCR_STBY_COUNT_SHFT); + val |= (0x5 << SLPCR_STBY_COUNT_SHFT); + mmio_write_32(IMX_GPC_BASE + SLPCR, val); + + /* + * USB PHY power up needs to make sure RESET bit in SRC is clear, + * otherwise, the PU power up bit in GPC will NOT self-cleared. + * only need to do it once. + */ + mmio_clrbits_32(IMX_SRC_BASE + SRC_OTG1PHY_SCR, 0x1); + mmio_clrbits_32(IMX_SRC_BASE + SRC_OTG2PHY_SCR, 0x1); +} diff --git a/plat/imx/imx8m/imx8mm/imx8mm_bl2_el3_setup.c b/plat/imx/imx8m/imx8mm/imx8mm_bl2_el3_setup.c new file mode 100644 index 0000000..c39dd93 --- /dev/null +++ b/plat/imx/imx8m/imx8mm/imx8mm_bl2_el3_setup.c @@ -0,0 +1,143 @@ +/* + * Copyright 2017-2021 NXP + * Copyright 2021 Arm + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> + +#include <arch_helpers.h> +#include <common/bl_common.h> +#include <common/debug.h> +#include <common/desc_image_load.h> +#include <context.h> +#include <drivers/console.h> +#include <drivers/generic_delay_timer.h> +#include <drivers/mmc.h> +#include <lib/mmio.h> +#include <lib/optee_utils.h> +#include <lib/utils.h> +#include <stdbool.h> +#include <tbbr_img_def.h> + +#include <imx_aipstz.h> +#include <imx_csu.h> +#include <imx_uart.h> +#include <imx_usdhc.h> +#include <plat/common/platform.h> + +#include "imx8mm_private.h" +#include "platform_def.h" + +static const struct aipstz_cfg aipstz[] = { + {IMX_AIPSTZ1, 0x77777777, 0x77777777, .opacr = {0x0, 0x0, 0x0, 0x0, 0x0}, }, + {IMX_AIPSTZ2, 0x77777777, 0x77777777, .opacr = {0x0, 0x0, 0x0, 0x0, 0x0}, }, + {IMX_AIPSTZ3, 0x77777777, 0x77777777, .opacr = {0x0, 0x0, 0x0, 0x0, 0x0}, }, + {IMX_AIPSTZ4, 0x77777777, 0x77777777, .opacr = {0x0, 0x0, 0x0, 0x0, 0x0}, }, + {0}, +}; + +static void imx8mm_usdhc_setup(void) +{ + imx_usdhc_params_t params; + struct mmc_device_info info; + + params.reg_base = PLAT_IMX8MM_BOOT_MMC_BASE; + /* + The imx8mm SD Card Speed modes for USDHC2 + +--------------+--------------------+--------------+--------------+ + |Bus Speed Mode|Max. Clock Frequency|Max. Bus Speed|Signal Voltage| + +--------------+--------------------+--------------+--------------+ + |Default Speed | 25 MHz | 12.5 MB/s | 3.3V | + |High Speed | 50 MHz | 25 MB/s | 3.3V | + +--------------+--------------------+--------------+--------------+ + + We pick 50 Mhz here for High Speed access. + */ + params.clk_rate = 50000000; + params.bus_width = MMC_BUS_WIDTH_1; + params.flags = 0; + info.mmc_dev_type = MMC_IS_SD; + info.ocr_voltage = OCR_3_3_3_4 | OCR_3_2_3_3; + imx_usdhc_init(¶ms, &info); +} + +void bl2_el3_early_platform_setup(u_register_t arg1, u_register_t arg2, + u_register_t arg3, u_register_t arg4) +{ + int i; + static console_t console; + + /* enable CSU NS access permission */ + for (i = 0; i < MAX_CSU_NUM; i++) { + mmio_write_32(IMX_CSU_BASE + i * 4, CSU_CSL_OPEN_ACCESS); + } + + /* config the aips access permission */ + imx_aipstz_init(aipstz); + + console_imx_uart_register(IMX_BOOT_UART_BASE, IMX_BOOT_UART_CLK_IN_HZ, + IMX_CONSOLE_BAUDRATE, &console); + + generic_delay_timer_init(); + + /* select the CKIL source to 32K OSC */ + mmio_write_32(0x30360124, 0x1); + + imx8mm_usdhc_setup(); + + /* Open handles to a FIP image */ + plat_imx_io_setup(); +} + +void bl2_el3_plat_arch_setup(void) +{ +} + +void bl2_platform_setup(void) +{ +} + +int bl2_plat_handle_post_image_load(unsigned int image_id) +{ + int err = 0; + bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id); + bl_mem_params_node_t *pager_mem_params = NULL; + bl_mem_params_node_t *paged_mem_params = NULL; + + assert(bl_mem_params); + + switch (image_id) { + case BL32_IMAGE_ID: + pager_mem_params = get_bl_mem_params_node(BL32_EXTRA1_IMAGE_ID); + assert(pager_mem_params); + + paged_mem_params = get_bl_mem_params_node(BL32_EXTRA2_IMAGE_ID); + assert(paged_mem_params); + + err = parse_optee_header(&bl_mem_params->ep_info, + &pager_mem_params->image_info, + &paged_mem_params->image_info); + if (err != 0) { + WARN("OPTEE header parse error.\n"); + } + + break; + default: + /* Do nothing in default case */ + break; + } + + return err; +} + +unsigned int plat_get_syscnt_freq2(void) +{ + return COUNTER_FREQUENCY; +} + +void bl2_plat_runtime_setup(void) +{ + return; +} diff --git a/plat/imx/imx8m/imx8mm/imx8mm_bl2_mem_params_desc.c b/plat/imx/imx8m/imx8mm/imx8mm_bl2_mem_params_desc.c new file mode 100644 index 0000000..e44345d --- /dev/null +++ b/plat/imx/imx8m/imx8mm/imx8mm_bl2_mem_params_desc.c @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <arch.h> +#include <common/desc_image_load.h> +#include <plat/common/platform.h> +#include <platform_def.h> + +static bl_mem_params_node_t bl2_mem_params_descs[] = { + { + .image_id = BL31_IMAGE_ID, + SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, VERSION_2, + entry_point_info_t, + SECURE | EXECUTABLE | EP_FIRST_EXE), + .ep_info.pc = BL31_BASE, + .ep_info.spsr = SPSR_64(MODE_EL3, MODE_SP_ELX, + DISABLE_ALL_EXCEPTIONS), + SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, VERSION_2, image_info_t, + IMAGE_ATTRIB_PLAT_SETUP), + .image_info.image_base = BL31_BASE, + .image_info.image_max_size = BL31_LIMIT - BL31_BASE, + .next_handoff_image_id = INVALID_IMAGE_ID, + }, + { + .image_id = BL32_IMAGE_ID, + + SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, VERSION_2, + entry_point_info_t, + SECURE | EXECUTABLE), + .ep_info.pc = BL32_BASE, + + SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, VERSION_2, + image_info_t, 0), + + .image_info.image_base = BL32_BASE, + .image_info.image_max_size = BL32_SIZE, + + .next_handoff_image_id = BL33_IMAGE_ID, + }, + { + .image_id = BL32_EXTRA1_IMAGE_ID, + + SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, VERSION_2, + entry_point_info_t, + SECURE | NON_EXECUTABLE), + + SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, VERSION_2, + image_info_t, IMAGE_ATTRIB_SKIP_LOADING), + .image_info.image_base = BL32_BASE, + .image_info.image_max_size = BL32_SIZE, + + .next_handoff_image_id = INVALID_IMAGE_ID, + }, + { + /* This is a zero sized image so we don't set base or size */ + .image_id = BL32_EXTRA2_IMAGE_ID, + + SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, + VERSION_2, entry_point_info_t, + SECURE | NON_EXECUTABLE), + + SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, + VERSION_2, image_info_t, + IMAGE_ATTRIB_SKIP_LOADING), + .next_handoff_image_id = INVALID_IMAGE_ID, + }, + { + .image_id = BL33_IMAGE_ID, + SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, VERSION_2, + entry_point_info_t, + NON_SECURE | EXECUTABLE), + # ifdef PRELOADED_BL33_BASE + .ep_info.pc = PLAT_NS_IMAGE_OFFSET, + + SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, + VERSION_2, image_info_t, + IMAGE_ATTRIB_SKIP_LOADING), + # else + .ep_info.pc = PLAT_NS_IMAGE_OFFSET, + + SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, + VERSION_2, image_info_t, 0), + .image_info.image_base = PLAT_NS_IMAGE_OFFSET, + .image_info.image_max_size = PLAT_NS_IMAGE_SIZE, + # endif /* PRELOADED_BL33_BASE */ + + .next_handoff_image_id = INVALID_IMAGE_ID, + } +}; + +REGISTER_BL_IMAGE_DESCS(bl2_mem_params_descs); diff --git a/plat/imx/imx8m/imx8mm/imx8mm_bl31_setup.c b/plat/imx/imx8m/imx8mm/imx8mm_bl31_setup.c new file mode 100644 index 0000000..dc9dd59 --- /dev/null +++ b/plat/imx/imx8m/imx8mm/imx8mm_bl31_setup.c @@ -0,0 +1,265 @@ +/* + * Copyright (c) 2019-2022 ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> +#include <stdbool.h> + +#include <platform_def.h> + +#include <arch_helpers.h> +#include <common/bl_common.h> +#include <common/debug.h> +#include <context.h> +#include <drivers/arm/tzc380.h> +#include <drivers/console.h> +#include <drivers/generic_delay_timer.h> +#include <lib/el3_runtime/context_mgmt.h> +#include <lib/mmio.h> +#include <lib/xlat_tables/xlat_tables_v2.h> +#include <plat/common/platform.h> + +#include <dram.h> +#include <gpc.h> +#include <imx_aipstz.h> +#include <imx_uart.h> +#include <imx_rdc.h> +#include <imx8m_caam.h> +#include <imx8m_ccm.h> +#include <imx8m_csu.h> +#include <imx8m_snvs.h> +#include <plat_imx8.h> + +#define TRUSTY_PARAMS_LEN_BYTES (4096*2) + +/* + * Note: DRAM region is mapped with entire size available and uses MT_RW + * attributes. + * See details in docs/plat/imx8m.rst "High Assurance Boot (HABv4)" section + * for explanation of this mapping scheme. + */ +static const mmap_region_t imx_mmap[] = { + MAP_REGION_FLAT(IMX_GIC_BASE, IMX_GIC_SIZE, MT_DEVICE | MT_RW), + MAP_REGION_FLAT(IMX_AIPS_BASE, IMX_AIPS_SIZE, MT_DEVICE | MT_RW), /* AIPS map */ + MAP_REGION_FLAT(OCRAM_S_BASE, OCRAM_S_SIZE, MT_DEVICE | MT_RW), /* OCRAM_S */ + MAP_REGION_FLAT(IMX_DDRPHY_BASE, IMX_DDR_IPS_SIZE, MT_DEVICE | MT_RW), /* DDRMIX */ + MAP_REGION_FLAT(IMX_VPUMIX_BASE, IMX_VPUMIX_SIZE, MT_DEVICE | MT_RW), /* VPUMIX */ + MAP_REGION_FLAT(IMX_CAAM_RAM_BASE, IMX_CAAM_RAM_SIZE, MT_MEMORY | MT_RW), /* CAMM RAM */ + MAP_REGION_FLAT(IMX_NS_OCRAM_BASE, IMX_NS_OCRAM_SIZE, MT_MEMORY | MT_RW), /* NS OCRAM */ + MAP_REGION_FLAT(IMX_ROM_BASE, IMX_ROM_SIZE, MT_MEMORY | MT_RO), /* ROM code */ + MAP_REGION_FLAT(IMX_DRAM_BASE, IMX_DRAM_SIZE, MT_MEMORY | MT_RW | MT_NS), /* DRAM */ + {0}, +}; + +static const struct aipstz_cfg aipstz[] = { + {IMX_AIPSTZ1, 0x77777777, 0x77777777, .opacr = {0x0, 0x0, 0x0, 0x0, 0x0}, }, + {IMX_AIPSTZ2, 0x77777777, 0x77777777, .opacr = {0x0, 0x0, 0x0, 0x0, 0x0}, }, + {IMX_AIPSTZ3, 0x77777777, 0x77777777, .opacr = {0x0, 0x0, 0x0, 0x0, 0x0}, }, + {IMX_AIPSTZ4, 0x77777777, 0x77777777, .opacr = {0x0, 0x0, 0x0, 0x0, 0x0}, }, + {0}, +}; + +static const struct imx_rdc_cfg rdc[] = { + /* Master domain assignment */ + RDC_MDAn(RDC_MDA_M4, DID1), + + /* peripherals domain permission */ + RDC_PDAPn(RDC_PDAP_UART4, D1R | D1W), + RDC_PDAPn(RDC_PDAP_UART2, D0R | D0W), + + /* memory region */ + + /* Sentinel */ + {0}, +}; + +static const struct imx_csu_cfg csu_cfg[] = { + /* peripherals csl setting */ + CSU_CSLx(0x1, CSU_SEC_LEVEL_0, UNLOCKED), + + /* master HP0~1 */ + + /* SA setting */ + + /* HP control setting */ + + /* Sentinel */ + {0} +}; + +static entry_point_info_t bl32_image_ep_info; +static entry_point_info_t bl33_image_ep_info; + +/* get SPSR for BL33 entry */ +static uint32_t get_spsr_for_bl33_entry(void) +{ + unsigned long el_status; + unsigned long mode; + uint32_t spsr; + + /* figure out what mode we enter the non-secure world */ + el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT; + el_status &= ID_AA64PFR0_ELX_MASK; + + mode = (el_status) ? MODE_EL2 : MODE_EL1; + + spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS); + return spsr; +} + +void bl31_tzc380_setup(void) +{ + unsigned int val; + + val = mmio_read_32(IMX_IOMUX_GPR_BASE + 0x28); + if ((val & GPR_TZASC_EN) != GPR_TZASC_EN) + return; + + tzc380_init(IMX_TZASC_BASE); + + /* + * Need to substact offset 0x40000000 from CPU address when + * programming tzasc region for i.mx8mm. + */ + + /* Enable 1G-5G S/NS RW */ + tzc380_configure_region(0, 0x00000000, TZC_ATTR_REGION_SIZE(TZC_REGION_SIZE_4G) | + TZC_ATTR_REGION_EN_MASK | TZC_ATTR_SP_ALL); +} + +void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1, + u_register_t arg2, u_register_t arg3) +{ + unsigned int console_base = IMX_BOOT_UART_BASE; + static console_t console; + int i; + + /* Enable CSU NS access permission */ + for (i = 0; i < 64; i++) { + mmio_write_32(IMX_CSU_BASE + i * 4, 0x00ff00ff); + } + + imx_aipstz_init(aipstz); + + imx_rdc_init(rdc); + + imx_csu_init(csu_cfg); + + if (console_base == 0U) { + console_base = imx8m_uart_get_base(); + } + + console_imx_uart_register(console_base, IMX_BOOT_UART_CLK_IN_HZ, + IMX_CONSOLE_BAUDRATE, &console); + /* This console is only used for boot stage */ + console_set_scope(&console, CONSOLE_FLAG_BOOT); + + imx8m_caam_init(); + + /* + * tell BL3-1 where the non-secure software image is located + * and the entry state information. + */ + bl33_image_ep_info.pc = PLAT_NS_IMAGE_OFFSET; + bl33_image_ep_info.spsr = get_spsr_for_bl33_entry(); + SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE); + +#if defined(SPD_opteed) || defined(SPD_trusty) + /* Populate entry point information for BL32 */ + SET_PARAM_HEAD(&bl32_image_ep_info, PARAM_EP, VERSION_1, 0); + SET_SECURITY_STATE(bl32_image_ep_info.h.attr, SECURE); + bl32_image_ep_info.pc = BL32_BASE; + bl32_image_ep_info.spsr = 0; + + /* Pass TEE base and size to bl33 */ + bl33_image_ep_info.args.arg1 = BL32_BASE; + bl33_image_ep_info.args.arg2 = BL32_SIZE; + +#ifdef SPD_trusty + bl32_image_ep_info.args.arg0 = BL32_SIZE; + bl32_image_ep_info.args.arg1 = BL32_BASE; +#else + /* Make sure memory is clean */ + mmio_write_32(BL32_FDT_OVERLAY_ADDR, 0); + bl33_image_ep_info.args.arg3 = BL32_FDT_OVERLAY_ADDR; + bl32_image_ep_info.args.arg3 = BL32_FDT_OVERLAY_ADDR; +#endif +#endif + +#if !defined(SPD_opteed) && !defined(SPD_trusty) + enable_snvs_privileged_access(); +#endif + + bl31_tzc380_setup(); +} + +#define MAP_BL31_TOTAL \ + MAP_REGION_FLAT(BL31_START, BL31_SIZE, MT_MEMORY | MT_RW | MT_SECURE) +#define MAP_BL31_RO \ + MAP_REGION_FLAT(BL_CODE_BASE, BL_CODE_END - BL_CODE_BASE, MT_MEMORY | MT_RO | MT_SECURE) +#define MAP_COHERENT_MEM \ + MAP_REGION_FLAT(BL_COHERENT_RAM_BASE, BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE, \ + MT_DEVICE | MT_RW | MT_SECURE) +#define MAP_BL32_TOTAL \ + MAP_REGION_FLAT(BL32_BASE, BL32_SIZE, MT_MEMORY | MT_RW) + +void bl31_plat_arch_setup(void) +{ + const mmap_region_t bl_regions[] = { + MAP_BL31_TOTAL, + MAP_BL31_RO, +#if USE_COHERENT_MEM + MAP_COHERENT_MEM, +#endif +#if defined(SPD_opteed) || defined(SPD_trusty) + /* Map TEE memory */ + MAP_BL32_TOTAL, +#endif + {0} + }; + + setup_page_tables(bl_regions, imx_mmap); + enable_mmu_el3(0); +} + +void bl31_platform_setup(void) +{ + generic_delay_timer_init(); + + /* select the CKIL source to 32K OSC */ + mmio_write_32(IMX_ANAMIX_BASE + ANAMIX_MISC_CTL, 0x1); + + /* Init the dram info */ + dram_info_init(SAVED_DRAM_TIMING_BASE); + + plat_gic_driver_init(); + plat_gic_init(); + + imx_gpc_init(); +} + +entry_point_info_t *bl31_plat_get_next_image_ep_info(unsigned int type) +{ + if (type == NON_SECURE) + return &bl33_image_ep_info; + if (type == SECURE) + return &bl32_image_ep_info; + + return NULL; +} + +unsigned int plat_get_syscnt_freq2(void) +{ + return COUNTER_FREQUENCY; +} + +#ifdef SPD_trusty +void plat_trusty_set_boot_args(aapcs64_params_t *args) +{ + args->arg0 = BL32_SIZE; + args->arg1 = BL32_BASE; + args->arg2 = TRUSTY_PARAMS_LEN_BYTES; +} +#endif diff --git a/plat/imx/imx8m/imx8mm/imx8mm_psci.c b/plat/imx/imx8m/imx8mm/imx8mm_psci.c new file mode 100644 index 0000000..815d3a2 --- /dev/null +++ b/plat/imx/imx8m/imx8mm/imx8mm_psci.c @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <stdbool.h> + +#include <arch.h> +#include <arch_helpers.h> +#include <common/debug.h> +#include <lib/mmio.h> +#include <lib/psci/psci.h> + +#include <gpc.h> +#include <imx8m_psci.h> +#include <plat_imx8.h> + +static const plat_psci_ops_t imx_plat_psci_ops = { + .pwr_domain_on = imx_pwr_domain_on, + .pwr_domain_on_finish = imx_pwr_domain_on_finish, + .pwr_domain_off = imx_pwr_domain_off, + .validate_ns_entrypoint = imx_validate_ns_entrypoint, + .validate_power_state = imx_validate_power_state, + .cpu_standby = imx_cpu_standby, + .pwr_domain_suspend = imx_domain_suspend, + .pwr_domain_suspend_finish = imx_domain_suspend_finish, + .pwr_domain_pwr_down_wfi = imx_pwr_domain_pwr_down_wfi, + .get_sys_suspend_power_state = imx_get_sys_suspend_power_state, + .system_reset = imx_system_reset, + .system_reset2 = imx_system_reset2, + .system_off = imx_system_off, +}; + +/* export the platform specific psci ops */ +int plat_setup_psci_ops(uintptr_t sec_entrypoint, + const plat_psci_ops_t **psci_ops) +{ + /* sec_entrypoint is used for warm reset */ + imx_mailbox_init(sec_entrypoint); + + *psci_ops = &imx_plat_psci_ops; + + return 0; +} diff --git a/plat/imx/imx8m/imx8mm/imx8mm_rotpk.S b/plat/imx/imx8m/imx8mm/imx8mm_rotpk.S new file mode 100644 index 0000000..544ee8a --- /dev/null +++ b/plat/imx/imx8m/imx8mm/imx8mm_rotpk.S @@ -0,0 +1,15 @@ +/* + * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + + .global imx8mm_rotpk_hash + .global imx8mm_rotpk_hash_end +imx8mm_rotpk_hash: + /* DER header */ + .byte 0x30, 0x31, 0x30, 0x0D, 0x06, 0x09, 0x60, 0x86, 0x48 + .byte 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05, 0x00, 0x04, 0x20 + /* SHA256 */ + .incbin ROTPK_HASH +imx8mm_rotpk_hash_end: diff --git a/plat/imx/imx8m/imx8mm/imx8mm_trusted_boot.c b/plat/imx/imx8m/imx8mm/imx8mm_trusted_boot.c new file mode 100644 index 0000000..a4384d7 --- /dev/null +++ b/plat/imx/imx8m/imx8mm/imx8mm_trusted_boot.c @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <plat/common/platform.h> + +extern char imx8mm_rotpk_hash[], imx8mm_rotpk_hash_end[]; + +int plat_get_rotpk_info(void *cookie, void **key_ptr, unsigned int *key_len, + unsigned int *flags) +{ + *key_ptr = imx8mm_rotpk_hash; + *key_len = imx8mm_rotpk_hash_end - imx8mm_rotpk_hash; + *flags = ROTPK_IS_HASH; + + return 0; +} + +int plat_get_nv_ctr(void *cookie, unsigned int *nv_ctr) +{ + *nv_ctr = 0; + + return 0; +} + +int plat_set_nv_ctr(void *cookie, unsigned int nv_ctr) +{ + return 1; +} + +int plat_get_mbedtls_heap(void **heap_addr, size_t *heap_size) +{ + return get_mbedtls_heap_helper(heap_addr, heap_size); +} diff --git a/plat/imx/imx8m/imx8mm/include/gpc_reg.h b/plat/imx/imx8m/imx8mm/include/gpc_reg.h new file mode 100644 index 0000000..1a4eae5 --- /dev/null +++ b/plat/imx/imx8m/imx8mm/include/gpc_reg.h @@ -0,0 +1,129 @@ +/* + * Copyright 2020 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef GPC_REG_H +#define GPC_REG_H + +#define LPCR_A53_BSC 0x0 +#define LPCR_A53_BSC2 0x108 +#define LPCR_A53_AD 0x4 +#define LPCR_M4 0x8 +#define SLPCR 0x14 +#define MST_CPU_MAPPING 0x18 +#define MLPCR 0x20 +#define PGC_ACK_SEL_A53 0x24 +#define IMR1_CORE0_A53 0x30 +#define IMR1_CORE1_A53 0x40 +#define IMR1_CORE2_A53 0x1C0 +#define IMR1_CORE3_A53 0x1D0 +#define IMR1_CORE0_M4 0x50 +#define SLT0_CFG 0xB0 +#define GPC_PU_PWRHSK 0x1FC +#define PGC_CPU_0_1_MAPPING 0xEC +#define CPU_PGC_UP_TRG 0xF0 +#define PU_PGC_UP_TRG 0xF8 +#define CPU_PGC_DN_TRG 0xFC +#define PU_PGC_DN_TRG 0x104 +#define LPS_CPU1 0x114 +#define A53_CORE0_PGC 0x800 +#define A53_PLAT_PGC 0x900 +#define PLAT_PGC_PCR 0x900 +#define NOC_PGC_PCR 0xa40 +#define PGC_SCU_TIMING 0x910 + +#define MASK_DSM_TRIGGER_A53 BIT(31) +#define IRQ_SRC_A53_WUP BIT(30) +#define IRQ_SRC_A53_WUP_SHIFT 30 +#define IRQ_SRC_C1 BIT(29) +#define IRQ_SRC_C0 BIT(28) +#define IRQ_SRC_C3 BIT(23) +#define IRQ_SRC_C2 BIT(22) +#define CPU_CLOCK_ON_LPM BIT(14) +#define A53_CLK_ON_LPM BIT(14) +#define MASTER0_LPM_HSK BIT(6) +#define MASTER1_LPM_HSK BIT(7) +#define MASTER2_LPM_HSK BIT(8) + +#define L2PGE BIT(31) +#define EN_L2_WFI_PDN BIT(5) +#define EN_PLAT_PDN BIT(4) + +#define SLPCR_EN_DSM BIT(31) +#define SLPCR_RBC_EN BIT(30) +#define SLPCR_A53_FASTWUP_STOP_MODE BIT(17) +#define SLPCR_A53_FASTWUP_WAIT_MODE BIT(16) +#define SLPCR_VSTBY BIT(2) +#define SLPCR_SBYOS BIT(1) +#define SLPCR_BYPASS_PMIC_READY BIT(0) +#define SLPCR_RBC_COUNT_SHIFT 24 +#define SLPCR_STBY_COUNT_SHFT 3 + +#define A53_DUMMY_PDN_ACK BIT(15) +#define A53_DUMMY_PUP_ACK BIT(31) +#define A53_PLAT_PDN_ACK BIT(2) +#define A53_PLAT_PUP_ACK BIT(18) +#define NOC_PDN_SLT_CTRL BIT(10) +#define NOC_PUP_SLT_CTRL BIT(11) +#define NOC_PGC_PDN_ACK BIT(3) +#define NOC_PGC_PUP_ACK BIT(19) + +#define PLAT_PUP_SLT_CTRL BIT(9) +#define PLAT_PDN_SLT_CTRL BIT(8) + +#define SLT_PLAT_PDN BIT(8) +#define SLT_PLAT_PUP BIT(9) + +#define MASTER1_MAPPING BIT(1) +#define MASTER2_MAPPING BIT(2) + +#define MIPI_PWR_REQ BIT(0) +#define PCIE_PWR_REQ BIT(1) +#define OTG1_PWR_REQ BIT(2) +#define OTG2_PWR_REQ BIT(3) +#define HSIOMIX_PWR_REQ BIT(4) +#define DDRMIX_PWR_REQ BIT(5) +#define GPU2D_PWR_REQ BIT(6) +#define GPUMIX_PWR_REQ BIT(7) +#define VPUMIX_PWR_REQ BIT(8) +#define GPU3D_PWR_REQ BIT(9) +#define DISPMIX_PWR_REQ BIT(10) +#define VPU_G1_PWR_REQ BIT(11) +#define VPU_G2_PWR_REQ BIT(12) +#define VPU_H1_PWR_REQ BIT(13) + +#define DDRMIX_ADB400_SYNC BIT(2) +#define HSIOMIX_ADB400_SYNC (0x3 << 5) +#define DISPMIX_ADB400_SYNC BIT(7) +#define VPUMIX_ADB400_SYNC BIT(8) +#define GPU3D_ADB400_SYNC BIT(9) +#define GPU2D_ADB400_SYNC BIT(10) +#define GPUMIX_ADB400_SYNC BIT(11) +#define DDRMIX_ADB400_ACK BIT(20) +#define HSIOMIX_ADB400_ACK (0x3 << 23) +#define DISPMIX_ADB400_ACK BIT(25) +#define VPUMIX_ADB400_ACK BIT(26) +#define GPU3D_ADB400_ACK BIT(27) +#define GPU2D_ADB400_ACK BIT(28) +#define GPUMIX_ADB400_ACK BIT(29) + +#define MIPI_PGC 0xc00 +#define PCIE_PGC 0xc40 +#define OTG1_PGC 0xc80 +#define OTG2_PGC 0xcc0 +#define HSIOMIX_PGC 0xd00 +#define DDRMIX_PGC 0xd40 +#define GPU2D_PGC 0xd80 +#define GPUMIX_PGC 0xdc0 +#define VPUMIX_PGC 0xe00 +#define GPU3D_PGC 0xe40 +#define DISPMIX_PGC 0xe80 +#define VPU_G1_PGC 0xec0 +#define VPU_G2_PGC 0xf00 +#define VPU_H1_PGC 0xf40 + +#define IRQ_IMR_NUM U(4) + +#endif /* GPC_REG_H */ diff --git a/plat/imx/imx8m/imx8mm/include/imx8mm_private.h b/plat/imx/imx8m/imx8mm/include/imx8mm_private.h new file mode 100644 index 0000000..5e0ef97 --- /dev/null +++ b/plat/imx/imx8m/imx8mm/include/imx8mm_private.h @@ -0,0 +1,15 @@ +/* + * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef IMX8MM_PRIVATE_H +#define IMX8MM_PRIVATE_H + +/******************************************************************************* + * Function and variable prototypes + ******************************************************************************/ +void plat_imx_io_setup(void); + +#endif /* IMX8MM_PRIVATE_H */ diff --git a/plat/imx/imx8m/imx8mm/include/imx_sec_def.h b/plat/imx/imx8m/imx8mm/include/imx_sec_def.h new file mode 100644 index 0000000..6215983 --- /dev/null +++ b/plat/imx/imx8m/imx8mm/include/imx_sec_def.h @@ -0,0 +1,216 @@ +/* + * Copyright 2020-2022 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef IMX_SEC_DEF_H +#define IMX_SEC_DEF_H + +/* RDC MDA index */ +enum rdc_mda_idx { + RDC_MDA_A53 = 0, + RDC_MDA_M4 = 1, + RDC_MDA_PCIE_CTRL1 = 2, + RDC_MDA_SDMA3p = 3, + RDC_MDA_VPU_Decoders = 4, + RDC_MDA_LCDIF = 5, + RDC_MDA_CSI1 = 6, + RDC_MDA_SDMA3b = 7, + RDC_MDA_Coresight = 8, + RDC_MDA_DAP = 9, + RDC_MDA_CAAM = 10, + RDC_MDA_SDMA1p = 11, + RDC_MDA_SDMA1b = 12, + RDC_MDA_APBHDMA = 13, + RDC_MDA_NAND = 14, + RDC_MDA_uSDHC1 = 15, + RDC_MDA_uSDHC2 = 16, + RDC_MDA_uSDHC3 = 17, + RDC_MDA_GPU = 18, + RDC_MDA_USB1 = 19, + RDC_MDA_USB2 = 20, + RDC_MDA_TESTPORT = 21, + RDC_MDA_ENET1_TX = 22, + RDC_MDA_ENET1_RX = 23, + RDC_MDA_SDMA2p = 24, + RDC_MDA_SDMA2b = 24, + RDC_MDA_SDMA2_to_SPBA2 = 24, + RDC_MDA_SDMA3_to_SPBA2 = 25, + RDC_MDA_SDMA1_to_SPBA1 = 26, +}; + +/* RDC Peripherals index */ +enum rdc_pdap_idx { + RDC_PDAP_GPIO2 = 1, + RDC_PDAP_GPIO3 = 2, + RDC_PDAP_GPIO4 = 3, + RDC_PDAP_GPIO5 = 4, + RDC_PDAP_ANA_TSENSOR = 6, + RDC_PDAP_ANA_OSC = 7, + RDC_PDAP_WDOG1 = 8, + RDC_PDAP_WDOG2 = 9, + RDC_PDAP_WDOG3 = 10, + RDC_PDAP_SDMA3 = 11, + RDC_PDAP_SDMA2 = 12, + RDC_PDAP_GPT1 = 13, + RDC_PDAP_GPT2 = 14, + RDC_PDAP_GPT3 = 15, + RDC_PDAP_ROMCP = 17, + RDC_PDAP_IOMUXC = 19, + RDC_PDAP_IOMUXC_GPR = 20, + RDC_PDAP_OCOTP_CTRL = 21, + RDC_PDAP_ANA_PLL = 22, + RDC_PDAP_SNVS_HP = 23, + RDC_PDAP_CCM = 24, + RDC_PDAP_SRC = 25, + RDC_PDAP_GPC = 26, + RDC_PDAP_SEMAPHORE1 = 27, + RDC_PDAP_SEMAPHORE2 = 28, + RDC_PDAP_RDC = 29, + RDC_PDAP_CSU = 30, + RDC_PDAP_LCDIF = 32, + RDC_PDAP_MIPI_DSI = 33, + RDC_PDAP_CSI = 34, + RDC_PDAP_MIPI_CSI = 35, + RDC_PDAP_USB1 = 36, + RDC_PDAP_PWM1 = 38, + RDC_PDAP_PWM2 = 39, + RDC_PDAP_PWM3 = 40, + RDC_PDAP_PWM4 = 41, + RDC_PDAP_System_Counter_RD = 42, + RDC_PDAP_System_Counter_CMP = 43, + RDC_PDAP_System_Counter_CTRL = 44, + RDC_PDAP_GPT6 = 46, + RDC_PDAP_GPT5 = 47, + RDC_PDAP_GPT4 = 48, + RDC_PDAP_TZASC = 56, + RDC_PDAP_USB2 = 59, + RDC_PDAP_PERFMON1 = 60, + RDC_PDAP_PERFMON2 = 61, + RDC_PDAP_PLATFORM_CTRL = 62, + RDC_PDAP_QoSC = 63, + RDC_PDAP_I2C1 = 66, + RDC_PDAP_I2C2 = 67, + RDC_PDAP_I2C3 = 68, + RDC_PDAP_I2C4 = 69, + RDC_PDAP_UART4 = 70, + RDC_PDAP_MU_A = 74, + RDC_PDAP_MU_B = 75, + RDC_PDAP_SEMAPHORE_HS = 76, + RDC_PDAP_SAI1 = 78, + RDC_PDAP_SAI2 = 79, + RDC_PDAP_SAI3 = 80, + RDC_PDAP_SAI5 = 82, + RDC_PDAP_SAI6 = 83, + RDC_PDAP_uSDHC1 = 84, + RDC_PDAP_uSDHC2 = 85, + RDC_PDAP_uSDHC3 = 86, + RDC_PDAP_PCIE_PHY1 = 88, + RDC_PDAP_SPBA2 = 90, + RDC_PDAP_QSPI = 91, + RDC_PDAP_SDMA1 = 93, + RDC_PDAP_ENET1 = 94, + RDC_PDAP_SPDIF1 = 97, + RDC_PDAP_eCSPI1 = 98, + RDC_PDAP_eCSPI2 = 99, + RDC_PDAP_eCSPI3 = 100, + RDC_PDAP_MICFIL = 101, + RDC_PDAP_UART1 = 102, + RDC_PDAP_UART3 = 104, + RDC_PDAP_UART2 = 105, + RDC_PDAP_SPDIF2 = 106, + RDC_PDAP_SPBA1 = 111, + RDC_PDAP_CAAM = 114, +}; + +enum csu_csl_idx { + CSU_CSL_GPIO1 = 0, + CSU_CSL_GPIO2 = 1, + CSU_CSL_GPIO3 = 2, + CSU_CSL_GPIO4 = 3, + CSU_CSL_GPIO5 = 4, + CSU_CSL_ANA_TSENSOR = 6, + CSU_CSL_ANA_OSC = 7, + CSU_CSL_WDOG1 = 8, + CSU_CSL_WDOG2 = 9, + CSU_CSL_WDOG3 = 10, + CSU_CSL_SDMA2 = 12, + CSU_CSL_GPT1 = 13, + CSU_CSL_GPT2 = 14, + CSU_CSL_GPT3 = 15, + CSU_CSL_ROMCP = 17, + CSU_CSL_LCDIF = 18, + CSU_CSL_IOMUXC = 19, + CSU_CSL_IOMUXC_GPR = 20, + CSU_CSL_OCOTP_CTRL = 21, + CSU_CSL_ANA_PLL = 22, + CSU_CSL_SNVS_HP = 23, + CSU_CSL_CCM = 24, + CSU_CSL_SRC = 25, + CSU_CSL_GPC = 26, + CSU_CSL_SEMAPHORE1 = 27, + CSU_CSL_SEMAPHORE2 = 28, + CSU_CSL_RDC = 29, + CSU_CSL_CSU = 30, + CSU_CSL_DC_MST0 = 32, + CSU_CSL_DC_MST1 = 33, + CSU_CSL_DC_MST2 = 34, + CSU_CSL_DC_MST3 = 35, + CSU_CSL_PWM1 = 38, + CSU_CSL_PWM2 = 39, + CSU_CSL_PWM3 = 40, + CSU_CSL_PWM4 = 41, + CSU_CSL_System_Counter_RD = 42, + CSU_CSL_System_Counter_CMP = 43, + CSU_CSL_System_Counter_CTRL = 44, + CSU_CSL_GPT6 = 46, + CSU_CSL_GPT5 = 47, + CSU_CSL_GPT4 = 48, + CSU_CSL_TZASC = 56, + CSU_CSL_MTR = 59, + CSU_CSL_PERFMON1 = 60, + CSU_CSL_PERFMON2 = 61, + CSU_CSL_PLATFORM_CTRL = 62, + CSU_CSL_QoSC = 63, + CSU_CSL_MIPI_PHY = 64, + CSU_CSL_MIPI_DSI = 65, + CSU_CSL_I2C1 = 66, + CSU_CSL_I2C2 = 67, + CSU_CSL_I2C3 = 68, + CSU_CSL_I2C4 = 69, + CSU_CSL_UART4 = 70, + CSU_CSL_MIPI_CSI1 = 71, + CSU_CSL_MIPI_CSI_PHY1 = 72, + CSU_CSL_CSI1 = 73, + CSU_CSL_MU_A = 74, + CSU_CSL_MU_B = 75, + CSU_CSL_SEMAPHORE_HS = 76, + CSU_CSL_SAI1 = 78, + CSU_CSL_SAI6 = 80, + CSU_CSL_SAI5 = 81, + CSU_CSL_SAI4 = 82, + CSU_CSL_uSDHC1 = 84, + CSU_CSL_uSDHC2 = 85, + CSU_CSL_MIPI_CSI2 = 86, + CSU_CSL_MIPI_CSI_PHY2 = 87, + CSU_CSL_CSI2 = 88, + CSU_CSL_SPBA2 = 90, + CSU_CSL_QSPI = 91, + CSU_CSL_SDMA1 = 93, + CSU_CSL_ENET1 = 94, + CSU_CSL_SPDIF1 = 97, + CSU_CSL_eCSPI1 = 98, + CSU_CSL_eCSPI2 = 99, + CSU_CSL_eCSPI3 = 100, + CSU_CSL_UART1 = 102, + CSU_CSL_UART3 = 104, + CSU_CSL_UART2 = 105, + CSU_CSL_SPDIF2 = 106, + CSU_CSL_SAI2 = 107, + CSU_CSL_SAI3 = 108, + CSU_CSL_SPBA1 = 111, + CSU_CSL_CAAM = 114, +}; + +#endif /* IMX_SEC_DEF_H */ diff --git a/plat/imx/imx8m/imx8mm/include/platform_def.h b/plat/imx/imx8m/imx8mm/include/platform_def.h new file mode 100644 index 0000000..65749f3 --- /dev/null +++ b/plat/imx/imx8m/imx8mm/include/platform_def.h @@ -0,0 +1,176 @@ +/* + * Copyright (c) 2021-2022, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <arch.h> +#include <common/tbbr/tbbr_img_def.h> +#include <lib/utils_def.h> +#include <plat/common/common_def.h> + +#define PLATFORM_LINKER_FORMAT "elf64-littleaarch64" +#define PLATFORM_LINKER_ARCH aarch64 + +#define PLATFORM_STACK_SIZE 0xB00 +#define CACHE_WRITEBACK_GRANULE 64 + +#define PLAT_PRIMARY_CPU U(0x0) +#define PLATFORM_MAX_CPU_PER_CLUSTER U(4) +#define PLATFORM_CLUSTER_COUNT U(1) +#define PLATFORM_CLUSTER0_CORE_COUNT U(4) +#define PLATFORM_CLUSTER1_CORE_COUNT U(0) +#define PLATFORM_CORE_COUNT (PLATFORM_CLUSTER0_CORE_COUNT) + +#define IMX_PWR_LVL0 MPIDR_AFFLVL0 +#define IMX_PWR_LVL1 MPIDR_AFFLVL1 +#define IMX_PWR_LVL2 MPIDR_AFFLVL2 + +#define PWR_DOMAIN_AT_MAX_LVL U(1) +#define PLAT_MAX_PWR_LVL U(2) +#define PLAT_MAX_OFF_STATE U(4) +#define PLAT_MAX_RET_STATE U(2) + +#define PLAT_WAIT_RET_STATE U(1) +#define PLAT_STOP_OFF_STATE U(3) + +#define PLAT_PRI_BITS U(3) +#define PLAT_SDEI_CRITICAL_PRI 0x10 +#define PLAT_SDEI_NORMAL_PRI 0x20 +#define PLAT_SDEI_SGI_PRIVATE U(9) + +#if defined(NEED_BL2) +#define BL2_BASE U(0x920000) +#define BL2_SIZE SZ_128K +#define BL2_LIMIT (BL2_BASE + BL2_SIZE) +#define BL31_BASE U(0x900000) +#define IMX_FIP_BASE U(0x40310000) +#define IMX_FIP_SIZE U(0x000300000) +#define IMX_FIP_LIMIT U(FIP_BASE + FIP_SIZE) + +/* Define FIP image location on eMMC */ +#define IMX_FIP_MMC_BASE U(0x100000) + +#define PLAT_IMX8MM_BOOT_MMC_BASE U(0x30B50000) /* SD */ +#else +#define BL31_BASE U(0x920000) +#endif + +#define BL31_SIZE SZ_128K +#define BL31_LIMIT (BL31_BASE + BL31_SIZE) + +/* non-secure uboot base */ +#define PLAT_NS_IMAGE_OFFSET U(0x40200000) +#define PLAT_NS_IMAGE_SIZE U(0x00200000) + +#define BL32_FDT_OVERLAY_ADDR (PLAT_NS_IMAGE_OFFSET + 0x3000000) + +/* GICv3 base address */ +#define PLAT_GICD_BASE U(0x38800000) +#define PLAT_GICR_BASE U(0x38880000) + +#define PLAT_VIRT_ADDR_SPACE_SIZE (1ull << 32) +#define PLAT_PHY_ADDR_SPACE_SIZE (1ull << 32) + +#define MAX_XLAT_TABLES 8 +#define MAX_MMAP_REGIONS 16 + +#define HAB_RVT_BASE U(0x00000900) /* HAB_RVT for i.MX8MM */ + +#define IMX_BOOT_UART_CLK_IN_HZ 24000000 /* Select 24MHz oscillator */ + +#define PLAT_CRASH_UART_BASE IMX_BOOT_UART_BASE +#define PLAT_CRASH_UART_CLK_IN_HZ 24000000 +#define IMX_CONSOLE_BAUDRATE 115200 + +#define IMX_AIPSTZ1 U(0x301f0000) +#define IMX_AIPSTZ2 U(0x305f0000) +#define IMX_AIPSTZ3 U(0x309f0000) +#define IMX_AIPSTZ4 U(0x32df0000) + +#define IMX_AIPS_BASE U(0x30000000) +#define IMX_AIPS_SIZE U(0x3000000) +#define IMX_GPV_BASE U(0x32000000) +#define IMX_GPV_SIZE U(0x800000) +#define IMX_AIPS1_BASE U(0x30200000) +#define IMX_AIPS4_BASE U(0x32c00000) +#define IMX_ANAMIX_BASE U(0x30360000) +#define IMX_CCM_BASE U(0x30380000) +#define IMX_SRC_BASE U(0x30390000) +#define IMX_GPC_BASE U(0x303a0000) +#define IMX_RDC_BASE U(0x303d0000) +#define IMX_CSU_BASE U(0x303e0000) +#define IMX_WDOG_BASE U(0x30280000) +#define IMX_SNVS_BASE U(0x30370000) +#define IMX_NOC_BASE U(0x32700000) +#define IMX_TZASC_BASE U(0x32F80000) +#define IMX_IOMUX_GPR_BASE U(0x30340000) +#define IMX_CAAM_BASE U(0x30900000) +#define IMX_DDRC_BASE U(0x3d400000) +#define IMX_DDRPHY_BASE U(0x3c000000) +#define IMX_DDR_IPS_BASE U(0x3d000000) +#define IMX_DDR_IPS_SIZE U(0x1800000) +#define IMX_VPUMIX_BASE U(0x38330000) +#define IMX_VPUMIX_SIZE U(0x100000) +#define IMX_ROM_BASE U(0x0) +#define IMX_ROM_SIZE U(0x40000) +#define IMX_NS_OCRAM_BASE U(0x900000) +#define IMX_NS_OCRAM_SIZE U(0x20000) +#define IMX_CAAM_RAM_BASE U(0x100000) +#define IMX_CAAM_RAM_SIZE U(0x10000) +#define IMX_DRAM_BASE U(0x40000000) +#define IMX_DRAM_SIZE U(0xc0000000) + +#define GPV_BASE U(0x32000000) +#define GPV_SIZE U(0x800000) +#define IMX_GIC_BASE PLAT_GICD_BASE +#define IMX_GIC_SIZE U(0x200000) + +#define WDOG_WSR U(0x2) +#define WDOG_WCR_WDZST BIT(0) +#define WDOG_WCR_WDBG BIT(1) +#define WDOG_WCR_WDE BIT(2) +#define WDOG_WCR_WDT BIT(3) +#define WDOG_WCR_SRS BIT(4) +#define WDOG_WCR_WDA BIT(5) +#define WDOG_WCR_SRE BIT(6) +#define WDOG_WCR_WDW BIT(7) + +#define SRC_A53RCR0 U(0x4) +#define SRC_A53RCR1 U(0x8) +#define SRC_OTG1PHY_SCR U(0x20) +#define SRC_OTG2PHY_SCR U(0x24) +#define SRC_GPR1_OFFSET U(0x74) +#define SRC_GPR10_OFFSET U(0x98) +#define SRC_GPR10_PERSIST_SECONDARY_BOOT BIT(30) + +#define SNVS_LPCR U(0x38) +#define SNVS_LPCR_SRTC_ENV BIT(0) +#define SNVS_LPCR_DP_EN BIT(5) +#define SNVS_LPCR_TOP BIT(6) + +#define IOMUXC_GPR10 U(0x28) +#define GPR_TZASC_EN BIT(0) +#define GPR_TZASC_EN_LOCK BIT(16) + +#define ANAMIX_MISC_CTL U(0x124) +#define DRAM_PLL_CTRL (IMX_ANAMIX_BASE + 0x50) + +#define MAX_CSU_NUM U(64) + +#define OCRAM_S_BASE U(0x00180000) +#define OCRAM_S_SIZE U(0x8000) +#define OCRAM_S_LIMIT (OCRAM_S_BASE + OCRAM_S_SIZE) +#define SAVED_DRAM_TIMING_BASE OCRAM_S_BASE + +#define COUNTER_FREQUENCY 8000000 /* 8MHz */ + +#define IMX_WDOG_B_RESET + +#define MAX_IO_HANDLES 3U +#define MAX_IO_DEVICES 2U +#define MAX_IO_BLOCK_DEVICES 1U + +#define PLAT_IMX8M_DTO_BASE 0x53000000 +#define PLAT_IMX8M_DTO_MAX_SIZE 0x1000 +#define PLAT_IMX_EVENT_LOG_MAX_SIZE UL(0x400) diff --git a/plat/imx/imx8m/imx8mm/platform.mk b/plat/imx/imx8m/imx8mm/platform.mk new file mode 100644 index 0000000..97f4f24 --- /dev/null +++ b/plat/imx/imx8m/imx8mm/platform.mk @@ -0,0 +1,190 @@ +# +# Copyright (c) 2019-2023, ARM Limited and Contributors. All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +# +# +# Translation tables library +include lib/xlat_tables_v2/xlat_tables.mk + +PLAT_INCLUDES := -Iplat/imx/common/include \ + -Iplat/imx/imx8m/include \ + -Iplat/imx/imx8m/imx8mm/include \ + -Idrivers/imx/usdhc \ + -Iinclude/common/tbbr \ + -Iinclude/lib/libfdt + +# Include GICv3 driver files +include drivers/arm/gic/v3/gicv3.mk + +include lib/libfdt/libfdt.mk + +IMX_DRAM_SOURCES := plat/imx/imx8m/ddr/dram.c \ + plat/imx/imx8m/ddr/clock.c \ + plat/imx/imx8m/ddr/dram_retention.c \ + plat/imx/imx8m/ddr/ddr4_dvfs.c \ + plat/imx/imx8m/ddr/lpddr4_dvfs.c + +IMX_GIC_SOURCES := ${GICV3_SOURCES} \ + plat/common/plat_gicv3.c \ + plat/common/plat_psci_common.c \ + plat/imx/common/plat_imx8_gic.c + +BL31_SOURCES += plat/imx/common/imx8_helpers.S \ + plat/imx/imx8m/gpc_common.c \ + plat/imx/imx8m/imx_hab.c \ + plat/imx/imx8m/imx_aipstz.c \ + plat/imx/imx8m/imx_rdc.c \ + plat/imx/imx8m/imx8m_csu.c \ + plat/imx/imx8m/imx8m_caam.c \ + plat/imx/imx8m/imx8m_ccm.c \ + plat/imx/imx8m/imx8m_psci_common.c \ + plat/imx/imx8m/imx8m_snvs.c \ + plat/imx/imx8m/imx8mm/imx8mm_bl31_setup.c \ + plat/imx/imx8m/imx8mm/imx8mm_psci.c \ + plat/imx/imx8m/imx8mm/gpc.c \ + plat/imx/common/imx8_topology.c \ + plat/imx/common/imx_sip_handler.c \ + plat/imx/common/imx_sip_svc.c \ + plat/imx/common/imx_uart_console.S \ + lib/cpus/aarch64/cortex_a53.S \ + drivers/arm/tzc/tzc380.c \ + drivers/delay_timer/delay_timer.c \ + drivers/delay_timer/generic_delay_timer.c \ + ${XLAT_TABLES_LIB_SRCS} \ + ${IMX_DRAM_SOURCES} \ + ${IMX_GIC_SOURCES} + +ifeq (${NEED_BL2},yes) +BL2_SOURCES += common/desc_image_load.c \ + common/fdt_wrappers.c \ + plat/imx/common/imx8_helpers.S \ + plat/imx/common/imx_uart_console.S \ + plat/imx/imx8m/imx8mm/imx8mm_bl2_el3_setup.c \ + plat/imx/imx8m/imx8mm/gpc.c \ + plat/imx/imx8m/imx_aipstz.c \ + plat/common/plat_psci_common.c \ + lib/xlat_tables/aarch64/xlat_tables.c \ + lib/xlat_tables/xlat_tables_common.c \ + lib/cpus/aarch64/cortex_a53.S \ + drivers/delay_timer/delay_timer.c \ + drivers/delay_timer/generic_delay_timer.c \ + ${PLAT_GIC_SOURCES} \ + ${PLAT_DRAM_SOURCES} \ + drivers/mmc/mmc.c \ + drivers/io/io_block.c \ + drivers/io/io_fip.c \ + drivers/io/io_memmap.c \ + drivers/io/io_storage.c \ + drivers/imx/usdhc/imx_usdhc.c \ + plat/imx/imx8m/imx8mm/imx8mm_bl2_mem_params_desc.c \ + plat/imx/common/imx_io_storage.c \ + plat/imx/imx8m/imx8m_image_load.c \ + lib/optee/optee_utils.c +endif + +# Add the build options to pack BLx images and kernel device tree +# in the FIP if the platform requires. +ifneq ($(BL2),) +RESET_TO_BL31 := 0 +$(eval $(call TOOL_ADD_PAYLOAD,${BUILD_PLAT}/tb_fw.crt,--tb-fw-cert)) +endif +ifneq ($(BL32_EXTRA1),) +$(eval $(call TOOL_ADD_IMG,BL32_EXTRA1,--tos-fw-extra1)) +endif +ifneq ($(BL32_EXTRA2),) +$(eval $(call TOOL_ADD_IMG,BL32_EXTRA2,--tos-fw-extra2)) +endif +ifneq ($(HW_CONFIG),) +$(eval $(call TOOL_ADD_IMG,HW_CONFIG,--hw-config)) +endif + +ifeq (${NEED_BL2},yes) +$(eval $(call add_define,NEED_BL2)) +LOAD_IMAGE_V2 := 1 +# Non-TF Boot ROM +RESET_TO_BL2 := 1 +endif + +ifneq (${TRUSTED_BOARD_BOOT},0) + +include drivers/auth/mbedtls/mbedtls_crypto.mk +include drivers/auth/mbedtls/mbedtls_x509.mk + +AUTH_SOURCES := drivers/auth/auth_mod.c \ + drivers/auth/crypto_mod.c \ + drivers/auth/img_parser_mod.c \ + drivers/auth/tbbr/tbbr_cot_common.c \ + drivers/auth/tbbr/tbbr_cot_bl2.c + +BL2_SOURCES += ${AUTH_SOURCES} \ + plat/common/tbbr/plat_tbbr.c \ + plat/imx/imx8m/imx8mm/imx8mm_trusted_boot.c \ + plat/imx/imx8m/imx8mm/imx8mm_rotpk.S + +ROT_KEY = $(BUILD_PLAT)/rot_key.pem +ROTPK_HASH = $(BUILD_PLAT)/rotpk_sha256.bin + +$(eval $(call add_define_val,ROTPK_HASH,'"$(ROTPK_HASH)"')) +$(eval $(call MAKE_LIB_DIRS)) + +$(BUILD_PLAT)/bl2/imx8mm_rotpk.o: $(ROTPK_HASH) + +certificates: $(ROT_KEY) + +$(ROT_KEY): | $(BUILD_PLAT) + @echo " OPENSSL $@" + @if [ ! -f $(ROT_KEY) ]; then \ + ${OPENSSL_BIN_PATH}/openssl genrsa 2048 > $@ 2>/dev/null; \ + fi + +$(ROTPK_HASH): $(ROT_KEY) + @echo " OPENSSL $@" + $(Q)${OPENSSL_BIN_PATH}/openssl rsa -in $< -pubout -outform DER 2>/dev/null |\ + ${OPENSSL_BIN_PATH}/openssl dgst -sha256 -binary > $@ 2>/dev/null +endif + +ENABLE_PIE := 1 +USE_COHERENT_MEM := 1 +RESET_TO_BL31 := 1 +A53_DISABLE_NON_TEMPORAL_HINT := 0 + +ERRATA_A53_835769 := 1 +ERRATA_A53_843419 := 1 +ERRATA_A53_855873 := 1 + +BL32_BASE ?= 0xbe000000 +$(eval $(call add_define,BL32_BASE)) + +BL32_SIZE ?= 0x2000000 +$(eval $(call add_define,BL32_SIZE)) + +IMX_BOOT_UART_BASE ?= 0x30890000 +ifeq (${IMX_BOOT_UART_BASE},auto) + override IMX_BOOT_UART_BASE := 0 +endif +$(eval $(call add_define,IMX_BOOT_UART_BASE)) + +EL3_EXCEPTION_HANDLING := $(SDEI_SUPPORT) +ifeq (${SDEI_SUPPORT}, 1) +BL31_SOURCES += plat/imx/common/imx_ehf.c \ + plat/imx/common/imx_sdei.c +endif + +ifeq (${MEASURED_BOOT},1) + MEASURED_BOOT_MK := drivers/measured_boot/event_log/event_log.mk + $(info Including ${MEASURED_BOOT_MK}) + include ${MEASURED_BOOT_MK} + +ifneq (${MBOOT_EL_HASH_ALG}, sha256) + $(eval $(call add_define,TF_MBEDTLS_MBOOT_USE_SHA512)) +endif + +BL2_SOURCES += plat/imx/imx8m/imx8m_measured_boot.c \ + plat/imx/imx8m/imx8m_dyn_cfg_helpers.c \ + ${EVENT_LOG_SOURCES} +endif + +ifeq (${SPD},trusty) + BL31_CFLAGS += -DPLAT_XLAT_TABLES_DYNAMIC=1 +endif diff --git a/plat/imx/imx8m/imx8mn/gpc.c b/plat/imx/imx8m/imx8mn/gpc.c new file mode 100644 index 0000000..20c9a55 --- /dev/null +++ b/plat/imx/imx8m/imx8mn/gpc.c @@ -0,0 +1,207 @@ +/* + * Copyright 2019-2022 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <stdbool.h> +#include <stdint.h> +#include <stdlib.h> + +#include <common/debug.h> +#include <drivers/delay_timer.h> +#include <lib/mmio.h> +#include <lib/psci/psci.h> +#include <lib/smccc.h> +#include <services/std_svc.h> + +#include <gpc.h> +#include <imx_sip_svc.h> +#include <platform_def.h> + +#define CCGR(x) (0x4000 + (x) * 0x10) + +#define MIPI_PWR_REQ BIT(0) +#define OTG1_PWR_REQ BIT(2) +#define HSIOMIX_PWR_REQ BIT(4) +#define GPUMIX_PWR_REQ BIT(7) +#define DISPMIX_PWR_REQ BIT(10) + +#define HSIOMIX_ADB400_SYNC BIT(5) +#define DISPMIX_ADB400_SYNC BIT(7) +#define GPUMIX_ADB400_SYNC (0x5 << 9) +#define HSIOMIX_ADB400_ACK BIT(23) +#define DISPMIX_ADB400_ACK BIT(25) +#define GPUMIX_ADB400_ACK (0x5 << 27) + +#define MIPI_PGC 0xc00 +#define OTG1_PGC 0xc80 +#define HSIOMIX_PGC 0xd00 +#define GPUMIX_PGC 0xdc0 +#define DISPMIX_PGC 0xe80 + +enum pu_domain_id { + HSIOMIX, + OTG1 = 2, + GPUMIX = 4, + DISPMIX = 9, + MIPI, +}; + +/* PU domain, add some hole to minimize the uboot change */ +static struct imx_pwr_domain pu_domains[11] = { + [HSIOMIX] = IMX_MIX_DOMAIN(HSIOMIX, false), + [OTG1] = IMX_PD_DOMAIN(OTG1, true), + [GPUMIX] = IMX_MIX_DOMAIN(GPUMIX, false), + [DISPMIX] = IMX_MIX_DOMAIN(DISPMIX, false), + [MIPI] = IMX_PD_DOMAIN(MIPI, true), +}; + +static unsigned int pu_domain_status; + +void imx_gpc_pm_domain_enable(uint32_t domain_id, bool on) +{ + if (domain_id > MIPI) { + return; + } + + struct imx_pwr_domain *pwr_domain = &pu_domains[domain_id]; + + if (on) { + if (pwr_domain->need_sync) { + pu_domain_status |= (1 << domain_id); + } + + /* HSIOMIX has no PU bit, so skip for it */ + if (domain_id != HSIOMIX) { + /* clear the PGC bit */ + mmio_clrbits_32(IMX_GPC_BASE + pwr_domain->pgc_offset, 0x1); + + /* power up the domain */ + mmio_setbits_32(IMX_GPC_BASE + PU_PGC_UP_TRG, pwr_domain->pwr_req); + + /* wait for power request done */ + while (mmio_read_32(IMX_GPC_BASE + PU_PGC_UP_TRG) & pwr_domain->pwr_req) { + ; + } + } + + if (domain_id == DISPMIX) { + /* de-reset bus_blk clk and + * enable bus_blk clk + */ + mmio_write_32(0x32e28000, 0x100); + mmio_write_32(0x32e28004, 0x100); + } + + /* handle the ADB400 sync */ + if (pwr_domain->need_sync) { + /* clear adb power down request */ + mmio_setbits_32(IMX_GPC_BASE + GPC_PU_PWRHSK, pwr_domain->adb400_sync); + + /* wait for adb power request ack */ + while (!(mmio_read_32(IMX_GPC_BASE + GPC_PU_PWRHSK) & pwr_domain->adb400_ack)) { + ; + } + } + } else { + pu_domain_status &= ~(1 << domain_id); + + if (domain_id == OTG1) { + return; + } + + /* handle the ADB400 sync */ + if (pwr_domain->need_sync) { + + /* set adb power down request */ + mmio_clrbits_32(IMX_GPC_BASE + GPC_PU_PWRHSK, pwr_domain->adb400_sync); + + /* wait for adb power request ack */ + while ((mmio_read_32(IMX_GPC_BASE + GPC_PU_PWRHSK) & pwr_domain->adb400_ack)) { + ; + } + } + + /* HSIOMIX has no PU bit, so skip for it */ + if (domain_id != HSIOMIX) { + /* set the PGC bit */ + mmio_setbits_32(IMX_GPC_BASE + pwr_domain->pgc_offset, 0x1); + + /* power down the domain */ + mmio_setbits_32(IMX_GPC_BASE + PU_PGC_DN_TRG, pwr_domain->pwr_req); + + /* wait for power request done */ + while (mmio_read_32(IMX_GPC_BASE + PU_PGC_DN_TRG) & pwr_domain->pwr_req) { + ; + } + } + } +} + +void imx_gpc_init(void) +{ + unsigned int val; + int i; + + /* mask all the wakeup irq by default */ + for (i = 0; i < 4; i++) { + mmio_write_32(IMX_GPC_BASE + IMR1_CORE0_A53 + i * 4, ~0x0); + mmio_write_32(IMX_GPC_BASE + IMR1_CORE1_A53 + i * 4, ~0x0); + mmio_write_32(IMX_GPC_BASE + IMR1_CORE2_A53 + i * 4, ~0x0); + mmio_write_32(IMX_GPC_BASE + IMR1_CORE3_A53 + i * 4, ~0x0); + mmio_write_32(IMX_GPC_BASE + IMR1_CORE0_M4 + i * 4, ~0x0); + } + + val = mmio_read_32(IMX_GPC_BASE + LPCR_A53_BSC); + /* use GIC wake_request to wakeup C0~C3 from LPM */ + val |= CORE_WKUP_FROM_GIC; + /* clear the MASTER0 LPM handshake */ + val &= ~MASTER0_LPM_HSK; + mmio_write_32(IMX_GPC_BASE + LPCR_A53_BSC, val); + + /* clear MASTER1 & MASTER2 mapping in CPU0(A53) */ + mmio_clrbits_32(IMX_GPC_BASE + MST_CPU_MAPPING, (MASTER1_MAPPING | + MASTER2_MAPPING)); + + /* set all mix/PU in A53 domain */ + mmio_write_32(IMX_GPC_BASE + PGC_CPU_0_1_MAPPING, 0xffff); + + /* + * Set the CORE & SCU power up timing: + * SW = 0x1, SW2ISO = 0x1; + * the CPU CORE and SCU power up timing counter + * is drived by 32K OSC, each domain's power up + * latency is (SW + SW2ISO) / 32768 + */ + mmio_write_32(IMX_GPC_BASE + COREx_PGC_PCR(0) + 0x4, 0x401); + mmio_write_32(IMX_GPC_BASE + COREx_PGC_PCR(1) + 0x4, 0x401); + mmio_write_32(IMX_GPC_BASE + COREx_PGC_PCR(2) + 0x4, 0x401); + mmio_write_32(IMX_GPC_BASE + COREx_PGC_PCR(3) + 0x4, 0x401); + mmio_write_32(IMX_GPC_BASE + PLAT_PGC_PCR + 0x4, 0x401); + mmio_write_32(IMX_GPC_BASE + PGC_SCU_TIMING, + (0x59 << TMC_TMR_SHIFT) | 0x5B | (0x2 << TRC1_TMC_SHIFT)); + + /* set DUMMY PDN/PUP ACK by default for A53 domain */ + mmio_write_32(IMX_GPC_BASE + PGC_ACK_SEL_A53, + A53_DUMMY_PUP_ACK | A53_DUMMY_PDN_ACK); + + /* clear DSM by default */ + val = mmio_read_32(IMX_GPC_BASE + SLPCR); + val &= ~SLPCR_EN_DSM; + /* enable the fast wakeup wait mode */ + val |= SLPCR_A53_FASTWUP_WAIT_MODE; + /* clear the RBC */ + val &= ~(0x3f << SLPCR_RBC_COUNT_SHIFT); + /* set the STBY_COUNT to 0x5, (128 * 30)us */ + val &= ~(0x7 << SLPCR_STBY_COUNT_SHFT); + val |= (0x5 << SLPCR_STBY_COUNT_SHFT); + mmio_write_32(IMX_GPC_BASE + SLPCR, val); + + /* + * USB PHY power up needs to make sure RESET bit in SRC is clear, + * otherwise, the PU power up bit in GPC will NOT self-cleared. + * only need to do it once. + */ + mmio_clrbits_32(IMX_SRC_BASE + SRC_OTG1PHY_SCR, 0x1); +} diff --git a/plat/imx/imx8m/imx8mn/imx8mn_bl31_setup.c b/plat/imx/imx8m/imx8mn/imx8mn_bl31_setup.c new file mode 100644 index 0000000..f9e430b --- /dev/null +++ b/plat/imx/imx8m/imx8mn/imx8mn_bl31_setup.c @@ -0,0 +1,269 @@ +/* + * Copyright 2019-2022 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> +#include <stdbool.h> + +#include <arch_helpers.h> +#include <common/bl_common.h> +#include <common/debug.h> +#include <context.h> +#include <drivers/arm/tzc380.h> +#include <drivers/console.h> +#include <drivers/generic_delay_timer.h> +#include <lib/el3_runtime/context_mgmt.h> +#include <lib/mmio.h> +#include <lib/xlat_tables/xlat_tables_v2.h> +#include <plat/common/platform.h> + +#include <dram.h> +#include <gpc.h> +#include <imx_aipstz.h> +#include <imx_uart.h> +#include <imx_rdc.h> +#include <imx8m_caam.h> +#include <imx8m_ccm.h> +#include <imx8m_csu.h> +#include <imx8m_snvs.h> +#include <platform_def.h> +#include <plat_imx8.h> + +#define TRUSTY_PARAMS_LEN_BYTES (4096*2) + +static const mmap_region_t imx_mmap[] = { + GIC_MAP, AIPS_MAP, OCRAM_S_MAP, DDRC_MAP, + CAAM_RAM_MAP, NS_OCRAM_MAP, ROM_MAP, DRAM_MAP, + {0}, +}; + +static const struct aipstz_cfg aipstz[] = { + {IMX_AIPSTZ1, 0x77777777, 0x77777777, .opacr = {0x0, 0x0, 0x0, 0x0, 0x0}, }, + {IMX_AIPSTZ2, 0x77777777, 0x77777777, .opacr = {0x0, 0x0, 0x0, 0x0, 0x0}, }, + {IMX_AIPSTZ3, 0x77777777, 0x77777777, .opacr = {0x0, 0x0, 0x0, 0x0, 0x0}, }, + {IMX_AIPSTZ4, 0x77777777, 0x77777777, .opacr = {0x0, 0x0, 0x0, 0x0, 0x0}, }, + {0}, +}; + +static const struct imx_rdc_cfg rdc[] = { + /* Master domain assignment */ + RDC_MDAn(RDC_MDA_M7, DID1), + + /* peripherals domain permission */ + RDC_PDAPn(RDC_PDAP_UART4, D1R | D1W), + RDC_PDAPn(RDC_PDAP_UART2, D0R | D0W), + + /* memory region */ + RDC_MEM_REGIONn(16, 0x0, 0x0, 0xff), + RDC_MEM_REGIONn(17, 0x0, 0x0, 0xff), + RDC_MEM_REGIONn(18, 0x0, 0x0, 0xff), + + /* Sentinel */ + {0}, +}; + +static const struct imx_csu_cfg csu_cfg[] = { + /* peripherals csl setting */ + CSU_CSLx(CSU_CSL_OCRAM, CSU_SEC_LEVEL_2, UNLOCKED), + CSU_CSLx(CSU_CSL_OCRAM_S, CSU_SEC_LEVEL_2, UNLOCKED), + + /* master HP0~1 */ + + /* SA setting */ + + /* HP control setting */ + + /* Sentinel */ + {0} +}; + + +static entry_point_info_t bl32_image_ep_info; +static entry_point_info_t bl33_image_ep_info; + +/* get SPSR for BL33 entry */ +static uint32_t get_spsr_for_bl33_entry(void) +{ + unsigned long el_status; + unsigned long mode; + uint32_t spsr; + + /* figure out what mode we enter the non-secure world */ + el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT; + el_status &= ID_AA64PFR0_ELX_MASK; + + mode = (el_status) ? MODE_EL2 : MODE_EL1; + + spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS); + return spsr; +} + +static void bl31_tzc380_setup(void) +{ + unsigned int val; + + val = mmio_read_32(IMX_IOMUX_GPR_BASE + 0x28); + if ((val & GPR_TZASC_EN) != GPR_TZASC_EN) + return; + + tzc380_init(IMX_TZASC_BASE); + + /* + * Need to substact offset 0x40000000 from CPU address when + * programming tzasc region for i.mx8mn. + */ + + /* Enable 1G-5G S/NS RW */ + tzc380_configure_region(0, 0x00000000, TZC_ATTR_REGION_SIZE(TZC_REGION_SIZE_4G) | + TZC_ATTR_REGION_EN_MASK | TZC_ATTR_SP_ALL); +} + +void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1, + u_register_t arg2, u_register_t arg3) +{ + unsigned int console_base = IMX_BOOT_UART_BASE; + static console_t console; + unsigned int val; + int i; + + /* Enable CSU NS access permission */ + for (i = 0; i < 64; i++) { + mmio_write_32(IMX_CSU_BASE + i * 4, 0x00ff00ff); + } + + imx_aipstz_init(aipstz); + + imx_rdc_init(rdc); + + imx_csu_init(csu_cfg); + + /* + * Configure the force_incr programmable bit in GPV_5 of PL301_display, which fixes + * partial write issue. The AXI2AHB bridge is used for masters that access the TCM + * through system bus. Please refer to errata ERR050362 for more information. + */ + mmio_setbits_32((GPV5_BASE_ADDR + FORCE_INCR_OFFSET), FORCE_INCR_BIT_MASK); + + /* config the ocram memory range for secure access */ + mmio_write_32(IMX_IOMUX_GPR_BASE + 0x2c, 0x4c1); + val = mmio_read_32(IMX_IOMUX_GPR_BASE + 0x2c); + mmio_write_32(IMX_IOMUX_GPR_BASE + 0x2c, val | 0x3DFF0000); + + if (console_base == 0U) { + console_base = imx8m_uart_get_base(); + } + + console_imx_uart_register(console_base, IMX_BOOT_UART_CLK_IN_HZ, + IMX_CONSOLE_BAUDRATE, &console); + /* This console is only used for boot stage */ + console_set_scope(&console, CONSOLE_FLAG_BOOT); + + imx8m_caam_init(); + + /* + * tell BL3-1 where the non-secure software image is located + * and the entry state information. + */ + bl33_image_ep_info.pc = PLAT_NS_IMAGE_OFFSET; + bl33_image_ep_info.spsr = get_spsr_for_bl33_entry(); + SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE); + +#if defined(SPD_opteed) || defined(SPD_trusty) + /* Populate entry point information for BL32 */ + SET_PARAM_HEAD(&bl32_image_ep_info, PARAM_EP, VERSION_1, 0); + SET_SECURITY_STATE(bl32_image_ep_info.h.attr, SECURE); + bl32_image_ep_info.pc = BL32_BASE; + bl32_image_ep_info.spsr = 0; + + /* Pass TEE base and size to bl33 */ + bl33_image_ep_info.args.arg1 = BL32_BASE; + bl33_image_ep_info.args.arg2 = BL32_SIZE; + +#ifdef SPD_trusty + bl32_image_ep_info.args.arg0 = BL32_SIZE; + bl32_image_ep_info.args.arg1 = BL32_BASE; +#else + /* Make sure memory is clean */ + mmio_write_32(BL32_FDT_OVERLAY_ADDR, 0); + bl33_image_ep_info.args.arg3 = BL32_FDT_OVERLAY_ADDR; + bl32_image_ep_info.args.arg3 = BL32_FDT_OVERLAY_ADDR; +#endif +#endif + +#if !defined(SPD_opteed) && !defined(SPD_trusty) + enable_snvs_privileged_access(); +#endif + + bl31_tzc380_setup(); +} + +#define MAP_BL31_TOTAL \ + MAP_REGION_FLAT(BL31_START, BL31_SIZE, MT_MEMORY | MT_RW | MT_SECURE) +#define MAP_BL31_RO \ + MAP_REGION_FLAT(BL_CODE_BASE, BL_CODE_END - BL_CODE_BASE, MT_MEMORY | MT_RO | MT_SECURE) +#define MAP_COHERENT_MEM \ + MAP_REGION_FLAT(BL_COHERENT_RAM_BASE, BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE, \ + MT_DEVICE | MT_RW | MT_SECURE) +#define MAP_BL32_TOTAL \ + MAP_REGION_FLAT(BL32_BASE, BL32_SIZE, MT_MEMORY | MT_RW) + +void bl31_plat_arch_setup(void) +{ + const mmap_region_t bl_regions[] = { + MAP_BL31_TOTAL, + MAP_BL31_RO, +#if USE_COHERENT_MEM + MAP_COHERENT_MEM, +#endif +#if defined(SPD_opteed) || defined(SPD_trusty) + /* Map TEE memory */ + MAP_BL32_TOTAL, +#endif + {0} + }; + + setup_page_tables(bl_regions, imx_mmap); + enable_mmu_el3(0); +} + +void bl31_platform_setup(void) +{ + generic_delay_timer_init(); + + /* select the CKIL source to 32K OSC */ + mmio_write_32(IMX_ANAMIX_BASE + ANAMIX_MISC_CTL, 0x1); + + /* Init the dram info */ + dram_info_init(SAVED_DRAM_TIMING_BASE); + + plat_gic_driver_init(); + plat_gic_init(); + + imx_gpc_init(); +} + +entry_point_info_t *bl31_plat_get_next_image_ep_info(unsigned int type) +{ + if (type == NON_SECURE) + return &bl33_image_ep_info; + if (type == SECURE) + return &bl32_image_ep_info; + + return NULL; +} + +unsigned int plat_get_syscnt_freq2(void) +{ + return COUNTER_FREQUENCY; +} + +#ifdef SPD_trusty +void plat_trusty_set_boot_args(aapcs64_params_t *args) +{ + args->arg0 = BL32_SIZE; + args->arg1 = BL32_BASE; + args->arg2 = TRUSTY_PARAMS_LEN_BYTES; +} +#endif diff --git a/plat/imx/imx8m/imx8mn/imx8mn_psci.c b/plat/imx/imx8m/imx8mn/imx8mn_psci.c new file mode 100644 index 0000000..f541fc1 --- /dev/null +++ b/plat/imx/imx8m/imx8mn/imx8mn_psci.c @@ -0,0 +1,44 @@ +/* + * Copyright 2019-2020 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <stdbool.h> + +#include <arch.h> +#include <arch_helpers.h> +#include <common/debug.h> +#include <lib/mmio.h> +#include <lib/psci/psci.h> + +#include <gpc.h> +#include <imx8m_psci.h> +#include <plat_imx8.h> + +static const plat_psci_ops_t imx_plat_psci_ops = { + .pwr_domain_on = imx_pwr_domain_on, + .pwr_domain_on_finish = imx_pwr_domain_on_finish, + .pwr_domain_off = imx_pwr_domain_off, + .validate_ns_entrypoint = imx_validate_ns_entrypoint, + .validate_power_state = imx_validate_power_state, + .cpu_standby = imx_cpu_standby, + .pwr_domain_suspend = imx_domain_suspend, + .pwr_domain_suspend_finish = imx_domain_suspend_finish, + .pwr_domain_pwr_down_wfi = imx_pwr_domain_pwr_down_wfi, + .get_sys_suspend_power_state = imx_get_sys_suspend_power_state, + .system_reset = imx_system_reset, + .system_off = imx_system_off, +}; + +/* export the platform specific psci ops */ +int plat_setup_psci_ops(uintptr_t sec_entrypoint, + const plat_psci_ops_t **psci_ops) +{ + /* sec_entrypoint is used for warm reset */ + imx_mailbox_init(sec_entrypoint); + + *psci_ops = &imx_plat_psci_ops; + + return 0; +} diff --git a/plat/imx/imx8m/imx8mn/include/gpc_reg.h b/plat/imx/imx8m/imx8mn/include/gpc_reg.h new file mode 100644 index 0000000..8a81368 --- /dev/null +++ b/plat/imx/imx8m/imx8mn/include/gpc_reg.h @@ -0,0 +1,111 @@ +/* + * Copyright 2020 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef GPC_REG_H +#define GPC_REG_H + +#define LPCR_A53_BSC 0x0 +#define LPCR_A53_BSC2 0x108 +#define LPCR_A53_AD 0x4 +#define LPCR_M4 0x8 +#define SLPCR 0x14 +#define MST_CPU_MAPPING 0x18 +#define MLPCR 0x20 +#define PGC_ACK_SEL_A53 0x24 +#define IMR1_CORE0_A53 0x30 +#define IMR1_CORE1_A53 0x40 +#define IMR1_CORE2_A53 0x1C0 +#define IMR1_CORE3_A53 0x1D0 +#define IMR1_CORE0_M4 0x50 +#define SLT0_CFG 0xB0 +#define GPC_PU_PWRHSK 0x1FC +#define PGC_CPU_0_1_MAPPING 0xEC +#define CPU_PGC_UP_TRG 0xF0 +#define PU_PGC_UP_TRG 0xF8 +#define CPU_PGC_DN_TRG 0xFC +#define PU_PGC_DN_TRG 0x104 +#define LPS_CPU1 0x114 +#define A53_CORE0_PGC 0x800 +#define A53_PLAT_PGC 0x900 +#define PLAT_PGC_PCR 0x900 +#define NOC_PGC_PCR 0xa40 +#define PGC_SCU_TIMING 0x910 + +#define MASK_DSM_TRIGGER_A53 BIT(31) +#define IRQ_SRC_A53_WUP BIT(30) +#define IRQ_SRC_A53_WUP_SHIFT 30 +#define IRQ_SRC_C1 BIT(29) +#define IRQ_SRC_C0 BIT(28) +#define IRQ_SRC_C3 BIT(23) +#define IRQ_SRC_C2 BIT(22) +#define CPU_CLOCK_ON_LPM BIT(14) +#define A53_CLK_ON_LPM BIT(14) +#define MASTER0_LPM_HSK BIT(6) +#define MASTER1_LPM_HSK BIT(7) +#define MASTER2_LPM_HSK BIT(8) + +#define L2PGE BIT(31) +#define EN_L2_WFI_PDN BIT(5) +#define EN_PLAT_PDN BIT(4) + +#define SLPCR_EN_DSM BIT(31) +#define SLPCR_RBC_EN BIT(30) +#define SLPCR_A53_FASTWUP_STOP_MODE BIT(17) +#define SLPCR_A53_FASTWUP_WAIT_MODE BIT(16) +#define SLPCR_VSTBY BIT(2) +#define SLPCR_SBYOS BIT(1) +#define SLPCR_BYPASS_PMIC_READY BIT(0) +#define SLPCR_RBC_COUNT_SHIFT 24 +#define SLPCR_STBY_COUNT_SHFT 3 + +#define A53_DUMMY_PDN_ACK BIT(15) +#define A53_DUMMY_PUP_ACK BIT(31) +#define A53_PLAT_PDN_ACK BIT(2) +#define A53_PLAT_PUP_ACK BIT(18) +#define NOC_PDN_SLT_CTRL BIT(10) +#define NOC_PUP_SLT_CTRL BIT(11) +#define NOC_PGC_PDN_ACK BIT(3) +#define NOC_PGC_PUP_ACK BIT(19) + +#define PLAT_PUP_SLT_CTRL BIT(9) +#define PLAT_PDN_SLT_CTRL BIT(8) + +#define SLT_PLAT_PDN BIT(8) +#define SLT_PLAT_PUP BIT(9) + +#define MASTER1_MAPPING BIT(1) +#define MASTER2_MAPPING BIT(2) + +#define TMR_TCD2_SHIFT 0 +#define TMC_TMR_SHIFT 10 +#define TRC1_TMC_SHIFT 20 + +#define MIPI_PWR_REQ BIT(0) +#define OTG1_PWR_REQ BIT(2) +#define HSIOMIX_PWR_REQ BIT(4) +#define DDRMIX_PWR_REQ BIT(5) +#define GPUMIX_PWR_REQ BIT(7) +#define DISPMIX_PWR_REQ BIT(10) + +#define DDRMIX_ADB400_SYNC BIT(2) +#define HSIOMIX_ADB400_SYNC BIT(5) +#define DISPMIX_ADB400_SYNC BIT(7) +#define GPUMIX_ADB400_SYNC (0x5 << 9) +#define DDRMIX_ADB400_ACK BIT(20) +#define HSIOMIX_ADB400_ACK BIT(23) +#define DISPMIX_ADB400_ACK BIT(25) +#define GPUMIX_ADB400_ACK (0x5 << 27) + +#define MIPI_PGC 0xc00 +#define OTG1_PGC 0xc80 +#define HSIOMIX_PGC 0xd00 +#define DDRMIX_PGC 0xd40 +#define GPUMIX_PGC 0xdc0 +#define DISPMIX_PGC 0xe80 + +#define IRQ_IMR_NUM U(4) + +#endif /* GPC_REG_H */ diff --git a/plat/imx/imx8m/imx8mn/include/imx_sec_def.h b/plat/imx/imx8m/imx8mn/include/imx_sec_def.h new file mode 100644 index 0000000..0ef14a9 --- /dev/null +++ b/plat/imx/imx8m/imx8mn/include/imx_sec_def.h @@ -0,0 +1,210 @@ +/* + * Copyright 2020-2022 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef IMX_SEC_DEF_H +#define IMX_SEC_DEF_H + +/* RDC MDA index */ +enum rdc_mda_idx { + RDC_MDA_A53 = 0, + RDC_MDA_M7 = 1, + RDC_MDA_SDMA3p = 3, + RDC_MDA_LCDIF = 5, + RDC_MDA_ISI = 6, + RDC_MDA_SDMA3b = 7, + RDC_MDA_Coresight = 8, + RDC_MDA_DAP = 9, + RDC_MDA_CAAM = 10, + RDC_MDA_SDMA1p = 11, + RDC_MDA_SDMA1b = 12, + RDC_MDA_APBHDMA = 13, + RDC_MDA_RAWNAND = 14, + RDC_MDA_uSDHC1 = 15, + RDC_MDA_uSDHC2 = 16, + RDC_MDA_uSDHC3 = 17, + RDC_MDA_GPU = 18, + RDC_MDA_USB1 = 19, + RDC_MDA_TESTPORT = 21, + RDC_MDA_ENET1_TX = 22, + RDC_MDA_ENET1_RX = 23, + RDC_MDA_SDMA2 = 24, +}; + +/* RDC Peripherals index */ +enum rdc_pdap_idx { + RDC_PDAP_GPIO1 = 0, + RDC_PDAP_GPIO2 = 1, + RDC_PDAP_GPIO3 = 2, + RDC_PDAP_GPIO4 = 3, + RDC_PDAP_GPIO5 = 4, + RDC_PDAP_ANA_TSENSOR = 6, + RDC_PDAP_ANA_OSC = 7, + RDC_PDAP_WDOG1 = 8, + RDC_PDAP_WDOG2 = 9, + RDC_PDAP_WDOG3 = 10, + RDC_PDAP_SDMA3 = 11, + RDC_PDAP_SDMA2 = 12, + RDC_PDAP_GPT1 = 13, + RDC_PDAP_GPT2 = 14, + RDC_PDAP_GPT3 = 15, + RDC_PDAP_ROMCP = 17, + RDC_PDAP_IOMUXC = 19, + RDC_PDAP_IOMUXC_GPR = 20, + RDC_PDAP_OCOTP_CTRL = 21, + RDC_PDAP_ANA_PLL = 22, + RDC_PDAP_SNVS_HP = 23, + RDC_PDAP_CCM = 24, + RDC_PDAP_SRC = 25, + RDC_PDAP_GPC = 26, + RDC_PDAP_SEMAPHORE1 = 27, + RDC_PDAP_SEMAPHORE2 = 28, + RDC_PDAP_RDC = 29, + RDC_PDAP_CSU = 30, + RDC_PDAP_LCDIF = 32, + RDC_PDAP_MIPI_DSI = 33, + RDC_PDAP_ISI = 34, + RDC_PDAP_MIPI_CSI = 35, + RDC_PDAP_USB1 = 36, + RDC_PDAP_PWM1 = 38, + RDC_PDAP_PWM2 = 39, + RDC_PDAP_PWM3 = 40, + RDC_PDAP_PWM4 = 41, + RDC_PDAP_System_Counter_RD = 42, + RDC_PDAP_System_Counter_CMP = 43, + RDC_PDAP_System_Counter_CTRL = 44, + RDC_PDAP_GPT6 = 46, + RDC_PDAP_GPT5 = 47, + RDC_PDAP_GPT4 = 48, + RDC_PDAP_TZASC = 56, + RDC_PDAP_PERFMON1 = 60, + RDC_PDAP_PERFMON2 = 61, + RDC_PDAP_PLATFORM_CTRL = 62, + RDC_PDAP_QoSC = 63, + RDC_PDAP_I2C1 = 66, + RDC_PDAP_I2C2 = 67, + RDC_PDAP_I2C3 = 68, + RDC_PDAP_I2C4 = 69, + RDC_PDAP_UART4 = 70, + RDC_PDAP_MU_A = 74, + RDC_PDAP_MU_B = 75, + RDC_PDAP_SEMAPHORE_HS = 76, + RDC_PDAP_SAI2 = 79, + RDC_PDAP_SAI3 = 80, + RDC_PDAP_SAI5 = 82, + RDC_PDAP_SAI6 = 83, + RDC_PDAP_uSDHC1 = 84, + RDC_PDAP_uSDHC2 = 85, + RDC_PDAP_uSDHC3 = 86, + RDC_PDAP_SAI7 = 87, + RDC_PDAP_SPBA2 = 90, + RDC_PDAP_QSPI = 91, + RDC_PDAP_SDMA1 = 93, + RDC_PDAP_ENET1 = 94, + RDC_PDAP_SPDIF1 = 97, + RDC_PDAP_eCSPI1 = 98, + RDC_PDAP_eCSPI2 = 99, + RDC_PDAP_eCSPI3 = 100, + RDC_PDAP_MICFIL = 101, + RDC_PDAP_UART1 = 102, + RDC_PDAP_UART3 = 104, + RDC_PDAP_UART2 = 105, + RDC_PDAP_ASRC = 107, + RDC_PDAP_SPBA1 = 111, + RDC_PDAP_CAAM = 114, +}; + +enum csu_csl_idx { + CSU_CSL_GPIO1 = 0, + CSU_CSL_GPIO2 = 1, + CSU_CSL_GPIO3 = 2, + CSU_CSL_GPIO4 = 3, + CSU_CSL_GPIO5 = 4, + CSU_CSL_ANA_TSENSOR = 6, + CSU_CSL_ANA_OSC = 7, + CSU_CSL_WDOG1 = 8, + CSU_CSL_WDOG2 = 9, + CSU_CSL_WDOG3 = 10, + CSU_CSL_SDMA2 = 12, + CSU_CSL_GPT1 = 13, + CSU_CSL_GPT2 = 14, + CSU_CSL_GPT3 = 15, + CSU_CSL_ROMCP = 17, + CSU_CSL_LCDIF = 18, + CSU_CSL_IOMUXC = 19, + CSU_CSL_IOMUXC_GPR = 20, + CSU_CSL_OCOTP_CTRL = 21, + CSU_CSL_ANA_PLL = 22, + CSU_CSL_SNVS_HP = 23, + CSU_CSL_CCM = 24, + CSU_CSL_SRC = 25, + CSU_CSL_GPC = 26, + CSU_CSL_SEMAPHORE1 = 27, + CSU_CSL_SEMAPHORE2 = 28, + CSU_CSL_RDC = 29, + CSU_CSL_CSU = 30, + CSU_CSL_DC_MST0 = 32, + CSU_CSL_DC_MST1 = 33, + CSU_CSL_DC_MST2 = 34, + CSU_CSL_DC_MST3 = 35, + CSU_CSL_PWM1 = 38, + CSU_CSL_PWM2 = 39, + CSU_CSL_PWM3 = 40, + CSU_CSL_PWM4 = 41, + CSU_CSL_System_Counter_RD = 42, + CSU_CSL_System_Counter_CMP = 43, + CSU_CSL_System_Counter_CTRL = 44, + CSU_CSL_GPT6 = 46, + CSU_CSL_GPT5 = 47, + CSU_CSL_GPT4 = 48, + CSU_CSL_TZASC = 56, + CSU_CSL_MTR = 59, + CSU_CSL_PERFMON1 = 60, + CSU_CSL_PERFMON2 = 61, + CSU_CSL_PLATFORM_CTRL = 62, + CSU_CSL_QoSC = 63, + CSU_CSL_MIPI_PHY = 64, + CSU_CSL_MIPI_DSI = 65, + CSU_CSL_I2C1 = 66, + CSU_CSL_I2C2 = 67, + CSU_CSL_I2C3 = 68, + CSU_CSL_I2C4 = 69, + CSU_CSL_UART4 = 70, + CSU_CSL_MIPI_CSI1 = 71, + CSU_CSL_MIPI_CSI_PHY1 = 72, + CSU_CSL_CSI1 = 73, + CSU_CSL_MU_A = 74, + CSU_CSL_MU_B = 75, + CSU_CSL_SEMAPHORE_HS = 76, + CSU_CSL_SAI1 = 78, + CSU_CSL_SAI6 = 80, + CSU_CSL_SAI5 = 81, + CSU_CSL_SAI4 = 82, + CSU_CSL_uSDHC1 = 84, + CSU_CSL_uSDHC2 = 85, + CSU_CSL_MIPI_CSI2 = 86, + CSU_CSL_MIPI_CSI_PHY2 = 87, + CSU_CSL_CSI2 = 88, + CSU_CSL_SPBA2 = 90, + CSU_CSL_QSPI = 91, + CSU_CSL_SDMA1 = 93, + CSU_CSL_ENET1 = 94, + CSU_CSL_SPDIF1 = 97, + CSU_CSL_eCSPI1 = 98, + CSU_CSL_eCSPI2 = 99, + CSU_CSL_eCSPI3 = 100, + CSU_CSL_UART1 = 102, + CSU_CSL_UART3 = 104, + CSU_CSL_UART2 = 105, + CSU_CSL_SPDIF2 = 106, + CSU_CSL_SAI2 = 107, + CSU_CSL_SAI3 = 108, + CSU_CSL_SPBA1 = 111, + CSU_CSL_CAAM = 114, + CSU_CSL_OCRAM = 118, + CSU_CSL_OCRAM_S = 119, +}; + +#endif /* IMX_SEC_DEF_H */ diff --git a/plat/imx/imx8m/imx8mn/include/platform_def.h b/plat/imx/imx8m/imx8mn/include/platform_def.h new file mode 100644 index 0000000..d5176dd --- /dev/null +++ b/plat/imx/imx8m/imx8mn/include/platform_def.h @@ -0,0 +1,166 @@ +/* + * Copyright 2020-2022 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#ifndef PLATFORM_DEF_H +#define PLATFORM_DEF_H + +#include <lib/utils_def.h> +#include <lib/xlat_tables/xlat_tables_v2.h> +#include <plat/common/common_def.h> + +#define PLATFORM_LINKER_FORMAT "elf64-littleaarch64" +#define PLATFORM_LINKER_ARCH aarch64 + +#define PLATFORM_STACK_SIZE 0xB00 +#define CACHE_WRITEBACK_GRANULE 64 + +#define PLAT_PRIMARY_CPU U(0x0) +#define PLATFORM_MAX_CPU_PER_CLUSTER U(4) +#define PLATFORM_CLUSTER_COUNT U(1) +#define PLATFORM_CLUSTER0_CORE_COUNT U(4) +#define PLATFORM_CLUSTER1_CORE_COUNT U(0) +#define PLATFORM_CORE_COUNT (PLATFORM_CLUSTER0_CORE_COUNT) + +#define IMX_PWR_LVL0 MPIDR_AFFLVL0 +#define IMX_PWR_LVL1 MPIDR_AFFLVL1 +#define IMX_PWR_LVL2 MPIDR_AFFLVL2 + +#define PWR_DOMAIN_AT_MAX_LVL U(1) +#define PLAT_MAX_PWR_LVL U(2) +#define PLAT_MAX_OFF_STATE U(4) +#define PLAT_MAX_RET_STATE U(2) + +#define PLAT_WAIT_RET_STATE U(1) +#define PLAT_STOP_OFF_STATE U(3) + +#define PLAT_PRI_BITS U(3) +#define PLAT_SDEI_CRITICAL_PRI 0x10 +#define PLAT_SDEI_NORMAL_PRI 0x20 +#define PLAT_SDEI_SGI_PRIVATE U(9) + +#define BL31_BASE U(0x960000) +#define BL31_SIZE SZ_128K +#define BL31_LIMIT (BL31_BASE + BL31_SIZE) + +/* non-secure uboot base */ +#define PLAT_NS_IMAGE_OFFSET U(0x40200000) + +#define BL32_FDT_OVERLAY_ADDR (PLAT_NS_IMAGE_OFFSET + 0x3000000) + +/* GICv3 base address */ +#define PLAT_GICD_BASE U(0x38800000) +#define PLAT_GICR_BASE U(0x38880000) + +#define PLAT_VIRT_ADDR_SPACE_SIZE (ULL(1) << 32) +#define PLAT_PHY_ADDR_SPACE_SIZE (ULL(1) << 32) + +#define MAX_XLAT_TABLES 8 +#define MAX_MMAP_REGIONS 16 + +#define HAB_RVT_BASE U(0x00000900) /* HAB_RVT for i.MX8MM */ + +#define IMX_BOOT_UART_CLK_IN_HZ 24000000 /* Select 24MHz oscillator */ +#define PLAT_CRASH_UART_BASE IMX_BOOT_UART_BASE +#define PLAT_CRASH_UART_CLK_IN_HZ 24000000 +#define IMX_CONSOLE_BAUDRATE 115200 + +#define IMX_AIPSTZ1 U(0x301f0000) +#define IMX_AIPSTZ2 U(0x305f0000) +#define IMX_AIPSTZ3 U(0x309f0000) +#define IMX_AIPSTZ4 U(0x32df0000) + +#define IMX_AIPS_BASE U(0x30000000) +#define IMX_AIPS_SIZE U(0x3000000) +#define IMX_GPV_BASE U(0x32000000) +#define IMX_GPV_SIZE U(0x800000) +#define IMX_AIPS1_BASE U(0x30200000) +#define IMX_AIPS4_BASE U(0x32c00000) +#define IMX_ANAMIX_BASE U(0x30360000) +#define IMX_CCM_BASE U(0x30380000) +#define IMX_SRC_BASE U(0x30390000) +#define IMX_GPC_BASE U(0x303a0000) +#define IMX_RDC_BASE U(0x303d0000) +#define IMX_CSU_BASE U(0x303e0000) +#define IMX_WDOG_BASE U(0x30280000) +#define IMX_SNVS_BASE U(0x30370000) +#define IMX_NOC_BASE U(0x32700000) +#define IMX_TZASC_BASE U(0x32F80000) +#define IMX_IOMUX_GPR_BASE U(0x30340000) +#define IMX_CAAM_BASE U(0x30900000) +#define IMX_DDRC_BASE U(0x3d400000) +#define IMX_DDRPHY_BASE U(0x3c000000) +#define IMX_DDR_IPS_BASE U(0x3d000000) +#define IMX_DDR_IPS_SIZE U(0x1800000) +#define IMX_ROM_BASE U(0x0) +#define IMX_ROM_SIZE U(0x40000) +#define IMX_NS_OCRAM_BASE U(0x900000) +#define IMX_NS_OCRAM_SIZE U(0x60000) +#define IMX_CAAM_RAM_BASE U(0x100000) +#define IMX_CAAM_RAM_SIZE U(0x10000) +#define IMX_DRAM_BASE U(0x40000000) +#define IMX_DRAM_SIZE U(0xc0000000) + +#define IMX_GIC_BASE PLAT_GICD_BASE +#define IMX_GIC_SIZE U(0x200000) + +#define WDOG_WSR U(0x2) +#define WDOG_WCR_WDZST BIT(0) +#define WDOG_WCR_WDBG BIT(1) +#define WDOG_WCR_WDE BIT(2) +#define WDOG_WCR_WDT BIT(3) +#define WDOG_WCR_SRS BIT(4) +#define WDOG_WCR_WDA BIT(5) +#define WDOG_WCR_SRE BIT(6) +#define WDOG_WCR_WDW BIT(7) + +#define SRC_A53RCR0 U(0x4) +#define SRC_A53RCR1 U(0x8) +#define SRC_OTG1PHY_SCR U(0x20) +#define SRC_GPR1_OFFSET U(0x74) + +#define SNVS_LPCR U(0x38) +#define SNVS_LPCR_SRTC_ENV BIT(0) +#define SNVS_LPCR_DP_EN BIT(5) +#define SNVS_LPCR_TOP BIT(6) + +#define IOMUXC_GPR10 U(0x28) +#define GPR_TZASC_EN BIT(0) +#define GPR_TZASC_EN_LOCK BIT(16) + +#define ANAMIX_MISC_CTL U(0x124) +#define DRAM_PLL_CTRL (IMX_ANAMIX_BASE + 0x50) + +#define MAX_CSU_NUM U(64) + +#define OCRAM_S_BASE U(0x00180000) +#define OCRAM_S_SIZE U(0x8000) +#define OCRAM_S_LIMIT (OCRAM_S_BASE + OCRAM_S_SIZE) +#define SAVED_DRAM_TIMING_BASE OCRAM_S_BASE + +#define COUNTER_FREQUENCY 8000000 /* 8MHz */ + +#define GPV5_BASE_ADDR U(0x32500000) +#define FORCE_INCR_OFFSET U(0x4044) +#define FORCE_INCR_BIT_MASK U(0x2) + +#define IMX_WDOG_B_RESET + +#define GIC_MAP MAP_REGION_FLAT(IMX_GIC_BASE, IMX_GIC_SIZE, MT_DEVICE | MT_RW) +#define AIPS_MAP MAP_REGION_FLAT(IMX_AIPS_BASE, IMX_AIPS_SIZE, MT_DEVICE | MT_RW) /* AIPS map */ +#define OCRAM_S_MAP MAP_REGION_FLAT(OCRAM_S_BASE, OCRAM_S_SIZE, MT_DEVICE | MT_RW) /* OCRAM_S */ +#define DDRC_MAP MAP_REGION_FLAT(IMX_DDRPHY_BASE, IMX_DDR_IPS_SIZE, MT_DEVICE | MT_RW) /* DDRMIX */ +#define CAAM_RAM_MAP MAP_REGION_FLAT(IMX_CAAM_RAM_BASE, IMX_CAAM_RAM_SIZE, MT_MEMORY | MT_RW) /* CAMM RAM */ +#define NS_OCRAM_MAP MAP_REGION_FLAT(IMX_NS_OCRAM_BASE, IMX_NS_OCRAM_SIZE, MT_MEMORY | MT_RW) /* NS OCRAM */ +#define ROM_MAP MAP_REGION_FLAT(IMX_ROM_BASE, IMX_ROM_SIZE, MT_MEMORY | MT_RO) /* ROM code */ + +/* + * Note: DRAM region is mapped with entire size available and uses MT_RW + * attributes. + * See details in docs/plat/imx8m.rst "High Assurance Boot (HABv4)" section + * for explanation of this mapping scheme. + */ +#define DRAM_MAP MAP_REGION_FLAT(IMX_DRAM_BASE, IMX_DRAM_SIZE, MT_MEMORY | MT_RW | MT_NS) /* DRAM */ + +#endif /* platform_def.h */ diff --git a/plat/imx/imx8m/imx8mn/platform.mk b/plat/imx/imx8m/imx8mn/platform.mk new file mode 100644 index 0000000..e0826e2 --- /dev/null +++ b/plat/imx/imx8m/imx8mn/platform.mk @@ -0,0 +1,82 @@ +# +# Copyright 2019-2022 NXP +# +# SPDX-License-Identifier: BSD-3-Clause +# + +PLAT_INCLUDES := -Iplat/imx/common/include \ + -Iplat/imx/imx8m/include \ + -Iplat/imx/imx8m/imx8mn/include +# Translation tables library +include lib/xlat_tables_v2/xlat_tables.mk + +# Include GICv3 driver files +include drivers/arm/gic/v3/gicv3.mk + +IMX_DRAM_SOURCES := plat/imx/imx8m/ddr/dram.c \ + plat/imx/imx8m/ddr/clock.c \ + plat/imx/imx8m/ddr/dram_retention.c \ + plat/imx/imx8m/ddr/ddr4_dvfs.c \ + plat/imx/imx8m/ddr/lpddr4_dvfs.c + + +IMX_GIC_SOURCES := ${GICV3_SOURCES} \ + plat/common/plat_gicv3.c \ + plat/common/plat_psci_common.c \ + plat/imx/common/plat_imx8_gic.c + +BL31_SOURCES += plat/imx/common/imx8_helpers.S \ + plat/imx/imx8m/gpc_common.c \ + plat/imx/imx8m/imx_hab.c \ + plat/imx/imx8m/imx_aipstz.c \ + plat/imx/imx8m/imx_rdc.c \ + plat/imx/imx8m/imx8m_caam.c \ + plat/imx/imx8m/imx8m_ccm.c \ + plat/imx/imx8m/imx8m_csu.c \ + plat/imx/imx8m/imx8m_psci_common.c \ + plat/imx/imx8m/imx8m_snvs.c \ + plat/imx/imx8m/imx8mn/imx8mn_bl31_setup.c \ + plat/imx/imx8m/imx8mn/imx8mn_psci.c \ + plat/imx/imx8m/imx8mn/gpc.c \ + plat/imx/common/imx8_topology.c \ + plat/imx/common/imx_sip_handler.c \ + plat/imx/common/imx_sip_svc.c \ + plat/imx/common/imx_uart_console.S \ + lib/cpus/aarch64/cortex_a53.S \ + drivers/arm/tzc/tzc380.c \ + drivers/delay_timer/delay_timer.c \ + drivers/delay_timer/generic_delay_timer.c \ + ${IMX_DRAM_SOURCES} \ + ${IMX_GIC_SOURCES} \ + ${XLAT_TABLES_LIB_SRCS} + +ENABLE_PIE := 1 +USE_COHERENT_MEM := 1 +RESET_TO_BL31 := 1 +A53_DISABLE_NON_TEMPORAL_HINT := 0 + +ERRATA_A53_835769 := 1 +ERRATA_A53_843419 := 1 +ERRATA_A53_855873 := 1 + +BL32_BASE ?= 0xbe000000 +$(eval $(call add_define,BL32_BASE)) + +BL32_SIZE ?= 0x2000000 +$(eval $(call add_define,BL32_SIZE)) + +IMX_BOOT_UART_BASE ?= 0x30890000 +ifeq (${IMX_BOOT_UART_BASE},auto) + override IMX_BOOT_UART_BASE := 0 +endif +$(eval $(call add_define,IMX_BOOT_UART_BASE)) + +EL3_EXCEPTION_HANDLING := $(SDEI_SUPPORT) +ifeq (${SDEI_SUPPORT}, 1) +BL31_SOURCES += plat/imx/common/imx_ehf.c \ + plat/imx/common/imx_sdei.c +endif + +ifeq (${SPD},trusty) + BL31_CFLAGS += -DPLAT_XLAT_TABLES_DYNAMIC=1 +endif diff --git a/plat/imx/imx8m/imx8mp/gpc.c b/plat/imx/imx8m/imx8mp/gpc.c new file mode 100644 index 0000000..956b508 --- /dev/null +++ b/plat/imx/imx8m/imx8mp/gpc.c @@ -0,0 +1,385 @@ +/* + * Copyright 2019-2022 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <stdbool.h> +#include <stdint.h> +#include <stdlib.h> + +#include <common/debug.h> +#include <drivers/delay_timer.h> +#include <lib/mmio.h> +#include <lib/psci/psci.h> +#include <lib/smccc.h> +#include <services/std_svc.h> + +#include <gpc.h> +#include <imx_aipstz.h> +#include <imx_sip_svc.h> +#include <platform_def.h> + +#define CCGR(x) (0x4000 + (x) * 0x10) +#define IMR_NUM U(5) + +struct imx_noc_setting { + uint32_t domain_id; + uint32_t start; + uint32_t end; + uint32_t prioriy; + uint32_t mode; + uint32_t socket_qos_en; +}; + +enum clk_type { + CCM_ROOT_SLICE, + CCM_CCGR, +}; + +struct clk_setting { + uint32_t offset; + uint32_t val; + enum clk_type type; +}; + +enum pu_domain_id { + /* hsio ss */ + HSIOMIX, + PCIE_PHY, + USB1_PHY, + USB2_PHY, + MLMIX, + AUDIOMIX, + /* gpu ss */ + GPUMIX, + GPU2D, + GPU3D, + /* vpu ss */ + VPUMIX, + VPU_G1, + VPU_G2, + VPU_H1, + /* media ss */ + MEDIAMIX, + MEDIAMIX_ISPDWP, + MIPI_PHY1, + MIPI_PHY2, + /* HDMI ss */ + HDMIMIX, + HDMI_PHY, + DDRMIX, + MAX_DOMAINS, +}; + +/* PU domain, add some hole to minimize the uboot change */ +static struct imx_pwr_domain pu_domains[MAX_DOMAINS] = { + [MIPI_PHY1] = IMX_PD_DOMAIN(MIPI_PHY1, false), + [PCIE_PHY] = IMX_PD_DOMAIN(PCIE_PHY, false), + [USB1_PHY] = IMX_PD_DOMAIN(USB1_PHY, true), + [USB2_PHY] = IMX_PD_DOMAIN(USB2_PHY, true), + [MLMIX] = IMX_MIX_DOMAIN(MLMIX, false), + [AUDIOMIX] = IMX_MIX_DOMAIN(AUDIOMIX, false), + [GPU2D] = IMX_PD_DOMAIN(GPU2D, false), + [GPUMIX] = IMX_MIX_DOMAIN(GPUMIX, false), + [VPUMIX] = IMX_MIX_DOMAIN(VPUMIX, false), + [GPU3D] = IMX_PD_DOMAIN(GPU3D, false), + [MEDIAMIX] = IMX_MIX_DOMAIN(MEDIAMIX, false), + [VPU_G1] = IMX_PD_DOMAIN(VPU_G1, false), + [VPU_G2] = IMX_PD_DOMAIN(VPU_G2, false), + [VPU_H1] = IMX_PD_DOMAIN(VPU_H1, false), + [HDMIMIX] = IMX_MIX_DOMAIN(HDMIMIX, false), + [HDMI_PHY] = IMX_PD_DOMAIN(HDMI_PHY, false), + [MIPI_PHY2] = IMX_PD_DOMAIN(MIPI_PHY2, false), + [HSIOMIX] = IMX_MIX_DOMAIN(HSIOMIX, false), + [MEDIAMIX_ISPDWP] = IMX_PD_DOMAIN(MEDIAMIX_ISPDWP, false), +}; + +static struct imx_noc_setting noc_setting[] = { + {MLMIX, 0x180, 0x180, 0x80000303, 0x0, 0x0}, + {AUDIOMIX, 0x200, 0x200, 0x80000303, 0x0, 0x0}, + {AUDIOMIX, 0x280, 0x480, 0x80000404, 0x0, 0x0}, + {GPUMIX, 0x500, 0x580, 0x80000303, 0x0, 0x0}, + {HDMIMIX, 0x600, 0x680, 0x80000202, 0x0, 0x1}, + {HDMIMIX, 0x700, 0x700, 0x80000505, 0x0, 0x0}, + {HSIOMIX, 0x780, 0x900, 0x80000303, 0x0, 0x0}, + {MEDIAMIX, 0x980, 0xb80, 0x80000202, 0x0, 0x1}, + {MEDIAMIX_ISPDWP, 0xc00, 0xd00, 0x80000505, 0x0, 0x0}, + {VPU_G1, 0xd80, 0xd80, 0x80000303, 0x0, 0x0}, + {VPU_G2, 0xe00, 0xe00, 0x80000303, 0x0, 0x0}, + {VPU_H1, 0xe80, 0xe80, 0x80000303, 0x0, 0x0} +}; + +static struct clk_setting hsiomix_clk[] = { + { 0x8380, 0x0, CCM_ROOT_SLICE }, + { 0x44d0, 0x0, CCM_CCGR }, + { 0x45c0, 0x0, CCM_CCGR }, +}; + +static struct aipstz_cfg aipstz5[] = { + {IMX_AIPSTZ5, 0x77777777, 0x77777777, .opacr = {0x0, 0x0, 0x0, 0x0, 0x0}, }, + {0}, +}; + +static unsigned int pu_domain_status; + +static void imx_noc_qos(unsigned int domain_id) +{ + unsigned int i; + uint32_t hurry; + + if (domain_id == HDMIMIX) { + mmio_write_32(IMX_HDMI_CTL_BASE + TX_CONTROL1, 0x22018); + mmio_write_32(IMX_HDMI_CTL_BASE + TX_CONTROL1, 0x22010); + + /* set GPR to make lcdif read hurry level 0x7 */ + hurry = mmio_read_32(IMX_HDMI_CTL_BASE + TX_CONTROL0); + hurry |= 0x00077000; + mmio_write_32(IMX_HDMI_CTL_BASE + TX_CONTROL0, hurry); + } + + if (domain_id == MEDIAMIX) { + /* handle mediamix special */ + mmio_write_32(IMX_MEDIAMIX_CTL_BASE + RSTn_CSR, 0x1FFFFFF); + mmio_write_32(IMX_MEDIAMIX_CTL_BASE + CLK_EN_CSR, 0x1FFFFFF); + mmio_write_32(IMX_MEDIAMIX_CTL_BASE + RST_DIV, 0x40030000); + + /* set GPR to make lcdif read hurry level 0x7 */ + hurry = mmio_read_32(IMX_MEDIAMIX_CTL_BASE + LCDIF_ARCACHE_CTRL); + hurry |= 0xfc00; + mmio_write_32(IMX_MEDIAMIX_CTL_BASE + LCDIF_ARCACHE_CTRL, hurry); + /* set GPR to make isi write hurry level 0x7 */ + hurry = mmio_read_32(IMX_MEDIAMIX_CTL_BASE + ISI_CACHE_CTRL); + hurry |= 0x1ff00000; + mmio_write_32(IMX_MEDIAMIX_CTL_BASE + ISI_CACHE_CTRL, hurry); + } + + /* set MIX NoC */ + for (i = 0; i < ARRAY_SIZE(noc_setting); i++) { + if (noc_setting[i].domain_id == domain_id) { + udelay(50); + uint32_t offset = noc_setting[i].start; + + while (offset <= noc_setting[i].end) { + mmio_write_32(IMX_NOC_BASE + offset + 0x8, noc_setting[i].prioriy); + mmio_write_32(IMX_NOC_BASE + offset + 0xc, noc_setting[i].mode); + mmio_write_32(IMX_NOC_BASE + offset + 0x18, noc_setting[i].socket_qos_en); + offset += 0x80; + } + } + } +} + +void imx_gpc_pm_domain_enable(uint32_t domain_id, bool on) +{ + struct imx_pwr_domain *pwr_domain = &pu_domains[domain_id]; + unsigned int i; + + /* validate the domain id */ + if (domain_id >= MAX_DOMAINS) { + return; + } + + if (domain_id == HSIOMIX) { + for (i = 0; i < ARRAY_SIZE(hsiomix_clk); i++) { + hsiomix_clk[i].val = mmio_read_32(IMX_CCM_BASE + hsiomix_clk[i].offset); + mmio_setbits_32(IMX_CCM_BASE + hsiomix_clk[i].offset, + hsiomix_clk[i].type == CCM_ROOT_SLICE ? BIT(28) : 0x3); + } + } + + if (on) { + if (pwr_domain->need_sync) { + pu_domain_status |= (1 << domain_id); + } + + if (domain_id == HDMIMIX) { + /* assert the reset */ + mmio_write_32(IMX_HDMI_CTL_BASE + RTX_RESET_CTL0, 0x0); + /* enable all th function clock */ + mmio_write_32(IMX_HDMI_CTL_BASE + RTX_CLK_CTL0, 0xFFFFFFFF); + mmio_write_32(IMX_HDMI_CTL_BASE + RTX_CLK_CTL1, 0x7ffff87e); + } + + /* clear the PGC bit */ + mmio_clrbits_32(IMX_GPC_BASE + pwr_domain->pgc_offset, 0x1); + + /* power up the domain */ + mmio_setbits_32(IMX_GPC_BASE + PU_PGC_UP_TRG, pwr_domain->pwr_req); + + /* wait for power request done */ + while (mmio_read_32(IMX_GPC_BASE + PU_PGC_UP_TRG) & pwr_domain->pwr_req) + ; + + if (domain_id == HDMIMIX) { + /* wait for memory repair done for HDMIMIX */ + while (!(mmio_read_32(IMX_SRC_BASE + 0x94) & BIT(8))) + ; + /* disable all the function clock */ + mmio_write_32(IMX_HDMI_CTL_BASE + RTX_CLK_CTL0, 0x0); + mmio_write_32(IMX_HDMI_CTL_BASE + RTX_CLK_CTL1, 0x0); + /* deassert the reset */ + mmio_write_32(IMX_HDMI_CTL_BASE + RTX_RESET_CTL0, 0xffffffff); + /* enable all the clock again */ + mmio_write_32(IMX_HDMI_CTL_BASE + RTX_CLK_CTL0, 0xFFFFFFFF); + mmio_write_32(IMX_HDMI_CTL_BASE + RTX_CLK_CTL1, 0x7ffff87e); + } + + if (domain_id == HSIOMIX) { + /* enable HSIOMIX clock */ + mmio_write_32(IMX_HSIOMIX_CTL_BASE, 0x2); + } + + /* handle the ADB400 sync */ + if (pwr_domain->need_sync) { + /* clear adb power down request */ + mmio_setbits_32(IMX_GPC_BASE + GPC_PU_PWRHSK, pwr_domain->adb400_sync); + + /* wait for adb power request ack */ + while (!(mmio_read_32(IMX_GPC_BASE + GPC_PU_PWRHSK) & pwr_domain->adb400_ack)) + ; + } + + imx_noc_qos(domain_id); + + /* AIPS5 config is lost when audiomix is off, so need to re-init it */ + if (domain_id == AUDIOMIX) { + imx_aipstz_init(aipstz5); + } + } else { + if (pwr_domain->always_on) { + return; + } + + if (pwr_domain->need_sync) { + pu_domain_status &= ~(1 << domain_id); + } + + /* handle the ADB400 sync */ + if (pwr_domain->need_sync) { + /* set adb power down request */ + mmio_clrbits_32(IMX_GPC_BASE + GPC_PU_PWRHSK, pwr_domain->adb400_sync); + + /* wait for adb power request ack */ + while ((mmio_read_32(IMX_GPC_BASE + GPC_PU_PWRHSK) & pwr_domain->adb400_ack)) + ; + } + + /* set the PGC bit */ + mmio_setbits_32(IMX_GPC_BASE + pwr_domain->pgc_offset, 0x1); + + /* + * leave the G1, G2, H1 power domain on until VPUMIX power off, + * otherwise system will hang due to VPUMIX ACK + */ + if (domain_id == VPU_H1 || domain_id == VPU_G1 || domain_id == VPU_G2) { + return; + } + + if (domain_id == VPUMIX) { + mmio_write_32(IMX_GPC_BASE + PU_PGC_DN_TRG, VPU_G1_PWR_REQ | + VPU_G2_PWR_REQ | VPU_H1_PWR_REQ); + + while (mmio_read_32(IMX_GPC_BASE + PU_PGC_DN_TRG) & (VPU_G1_PWR_REQ | + VPU_G2_PWR_REQ | VPU_H1_PWR_REQ)) + ; + } + + /* power down the domain */ + mmio_setbits_32(IMX_GPC_BASE + PU_PGC_DN_TRG, pwr_domain->pwr_req); + + /* wait for power request done */ + while (mmio_read_32(IMX_GPC_BASE + PU_PGC_DN_TRG) & pwr_domain->pwr_req) + ; + + if (domain_id == HDMIMIX) { + /* disable all the clocks of HDMIMIX */ + mmio_write_32(IMX_HDMI_CTL_BASE + 0x40, 0x0); + mmio_write_32(IMX_HDMI_CTL_BASE + 0x50, 0x0); + } + } + + if (domain_id == HSIOMIX) { + for (i = 0; i < ARRAY_SIZE(hsiomix_clk); i++) { + mmio_write_32(IMX_CCM_BASE + hsiomix_clk[i].offset, hsiomix_clk[i].val); + } + } +} + +void imx_gpc_init(void) +{ + uint32_t val; + unsigned int i; + + /* mask all the wakeup irq by default */ + for (i = 0; i < IMR_NUM; i++) { + mmio_write_32(IMX_GPC_BASE + IMR1_CORE0_A53 + i * 4, ~0x0); + mmio_write_32(IMX_GPC_BASE + IMR1_CORE1_A53 + i * 4, ~0x0); + mmio_write_32(IMX_GPC_BASE + IMR1_CORE2_A53 + i * 4, ~0x0); + mmio_write_32(IMX_GPC_BASE + IMR1_CORE3_A53 + i * 4, ~0x0); + mmio_write_32(IMX_GPC_BASE + IMR1_CORE0_M4 + i * 4, ~0x0); + } + + val = mmio_read_32(IMX_GPC_BASE + LPCR_A53_BSC); + /* use GIC wake_request to wakeup C0~C3 from LPM */ + val |= CORE_WKUP_FROM_GIC; + /* clear the MASTER0 LPM handshake */ + val &= ~MASTER0_LPM_HSK; + mmio_write_32(IMX_GPC_BASE + LPCR_A53_BSC, val); + + /* clear MASTER1 & MASTER2 mapping in CPU0(A53) */ + mmio_clrbits_32(IMX_GPC_BASE + MST_CPU_MAPPING, (MASTER1_MAPPING | + MASTER2_MAPPING)); + + /* set all mix/PU in A53 domain */ + mmio_write_32(IMX_GPC_BASE + PGC_CPU_0_1_MAPPING, 0x3fffff); + + /* + * Set the CORE & SCU power up timing: + * SW = 0x1, SW2ISO = 0x1; + * the CPU CORE and SCU power up timing counter + * is drived by 32K OSC, each domain's power up + * latency is (SW + SW2ISO) / 32768 + */ + mmio_write_32(IMX_GPC_BASE + COREx_PGC_PCR(0) + 0x4, 0x401); + mmio_write_32(IMX_GPC_BASE + COREx_PGC_PCR(1) + 0x4, 0x401); + mmio_write_32(IMX_GPC_BASE + COREx_PGC_PCR(2) + 0x4, 0x401); + mmio_write_32(IMX_GPC_BASE + COREx_PGC_PCR(3) + 0x4, 0x401); + mmio_write_32(IMX_GPC_BASE + PLAT_PGC_PCR + 0x4, 0x401); + mmio_write_32(IMX_GPC_BASE + PGC_SCU_TIMING, + (0x59 << TMC_TMR_SHIFT) | 0x5B | (0x2 << TRC1_TMC_SHIFT)); + + /* set DUMMY PDN/PUP ACK by default for A53 domain */ + mmio_write_32(IMX_GPC_BASE + PGC_ACK_SEL_A53, + A53_DUMMY_PUP_ACK | A53_DUMMY_PDN_ACK); + + /* clear DSM by default */ + val = mmio_read_32(IMX_GPC_BASE + SLPCR); + val &= ~SLPCR_EN_DSM; + /* enable the fast wakeup wait/stop mode */ + val |= SLPCR_A53_FASTWUP_WAIT_MODE; + val |= SLPCR_A53_FASTWUP_STOP_MODE; + /* clear the RBC */ + val &= ~(0x3f << SLPCR_RBC_COUNT_SHIFT); + /* set the STBY_COUNT to 0x5, (128 * 30)us */ + val &= ~(0x7 << SLPCR_STBY_COUNT_SHFT); + val |= (0x5 << SLPCR_STBY_COUNT_SHFT); + mmio_write_32(IMX_GPC_BASE + SLPCR, val); + + /* + * USB PHY power up needs to make sure RESET bit in SRC is clear, + * otherwise, the PU power up bit in GPC will NOT self-cleared. + * only need to do it once. + */ + mmio_clrbits_32(IMX_SRC_BASE + SRC_OTG1PHY_SCR, 0x1); + mmio_clrbits_32(IMX_SRC_BASE + SRC_OTG2PHY_SCR, 0x1); + + /* enable all the power domain by default */ + for (i = 0; i < 101; i++) { + mmio_write_32(IMX_CCM_BASE + CCGR(i), 0x3); + } + + for (i = 0; i < 20; i++) { + imx_gpc_pm_domain_enable(i, true); + } +} diff --git a/plat/imx/imx8m/imx8mp/imx8mp_bl2_el3_setup.c b/plat/imx/imx8m/imx8mp/imx8mp_bl2_el3_setup.c new file mode 100644 index 0000000..08cbeeb --- /dev/null +++ b/plat/imx/imx8m/imx8mp/imx8mp_bl2_el3_setup.c @@ -0,0 +1,117 @@ +/* + * Copyright 2021 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> +#include <stdbool.h> + +#include <arch_helpers.h> +#include <common/bl_common.h> +#include <common/debug.h> +#include <common/desc_image_load.h> +#include <common/tbbr/tbbr_img_def.h> +#include <context.h> +#include <drivers/arm/tzc380.h> +#include <drivers/console.h> +#include <drivers/generic_delay_timer.h> +#include <drivers/mmc.h> +#include <lib/el3_runtime/context_mgmt.h> +#include <lib/mmio.h> +#include <lib/optee_utils.h> +#include <lib/xlat_tables/xlat_tables_v2.h> + +#include <imx8m_caam.h> +#include "imx8mp_private.h" +#include <imx_aipstz.h> +#include <imx_rdc.h> +#include <imx_uart.h> +#include <plat/common/platform.h> +#include <plat_imx8.h> +#include <platform_def.h> + + +static const struct aipstz_cfg aipstz[] = { + {IMX_AIPSTZ1, 0x77777777, 0x77777777, .opacr = {0x0, 0x0, 0x0, 0x0, 0x0}, }, + {IMX_AIPSTZ2, 0x77777777, 0x77777777, .opacr = {0x0, 0x0, 0x0, 0x0, 0x0}, }, + {IMX_AIPSTZ3, 0x77777777, 0x77777777, .opacr = {0x0, 0x0, 0x0, 0x0, 0x0}, }, + {IMX_AIPSTZ4, 0x77777777, 0x77777777, .opacr = {0x0, 0x0, 0x0, 0x0, 0x0}, }, + {0}, +}; + +void bl2_el3_early_platform_setup(u_register_t arg0, u_register_t arg1, + u_register_t arg2, u_register_t arg3) +{ + static console_t console; + unsigned int i; + + /* Enable CSU NS access permission */ + for (i = 0U; i < 64; i++) { + mmio_write_32(IMX_CSU_BASE + i * 4, 0x00ff00ff); + } + + imx_aipstz_init(aipstz); + + console_imx_uart_register(IMX_BOOT_UART_BASE, IMX_BOOT_UART_CLK_IN_HZ, + IMX_CONSOLE_BAUDRATE, &console); + + generic_delay_timer_init(); + + /* select the CKIL source to 32K OSC */ + mmio_write_32(IMX_ANAMIX_BASE + ANAMIX_MISC_CTL, 0x1); + + /* Open handles to a FIP image */ + plat_imx_io_setup(); +} + +void bl2_el3_plat_arch_setup(void) +{ +} + +void bl2_platform_setup(void) +{ +} + +int bl2_plat_handle_post_image_load(unsigned int image_id) +{ + int err = 0; + bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id); + bl_mem_params_node_t *pager_mem_params = NULL; + bl_mem_params_node_t *paged_mem_params = NULL; + + assert(bl_mem_params); + + switch (image_id) { + case BL32_IMAGE_ID: + pager_mem_params = get_bl_mem_params_node(BL32_EXTRA1_IMAGE_ID); + assert(pager_mem_params); + + paged_mem_params = get_bl_mem_params_node(BL32_EXTRA2_IMAGE_ID); + assert(paged_mem_params); + + err = parse_optee_header(&bl_mem_params->ep_info, + &pager_mem_params->image_info, + &paged_mem_params->image_info); + if (err != 0) { + WARN("OPTEE header parse error.\n"); + } + + break; + default: + /* Do nothing in default case */ + break; + } + + return err; +} + +unsigned int plat_get_syscnt_freq2(void) +{ + return COUNTER_FREQUENCY; +} + +void bl2_plat_runtime_setup(void) +{ + return; +} diff --git a/plat/imx/imx8m/imx8mp/imx8mp_bl2_mem_params_desc.c b/plat/imx/imx8m/imx8mp/imx8mp_bl2_mem_params_desc.c new file mode 100644 index 0000000..f2f6808 --- /dev/null +++ b/plat/imx/imx8m/imx8mp/imx8mp_bl2_mem_params_desc.c @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2021, Arm Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <arch.h> +#include <common/desc_image_load.h> +#include <plat/common/platform.h> +#include <platform_def.h> + +static bl_mem_params_node_t bl2_mem_params_descs[] = { + { + .image_id = BL31_IMAGE_ID, + SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, VERSION_2, + entry_point_info_t, + SECURE | EXECUTABLE | EP_FIRST_EXE), + .ep_info.pc = BL31_BASE, + .ep_info.spsr = SPSR_64(MODE_EL3, MODE_SP_ELX, + DISABLE_ALL_EXCEPTIONS), + SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, VERSION_2, image_info_t, + IMAGE_ATTRIB_PLAT_SETUP), + .image_info.image_base = BL31_BASE, + .image_info.image_max_size = BL31_LIMIT - BL31_BASE, + .next_handoff_image_id = INVALID_IMAGE_ID, + }, + { + .image_id = BL32_IMAGE_ID, + + SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, VERSION_2, + entry_point_info_t, + SECURE | EXECUTABLE), + .ep_info.pc = BL32_BASE, + + SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, VERSION_2, + image_info_t, 0), + + .image_info.image_base = BL32_BASE, + .image_info.image_max_size = BL32_SIZE, + + .next_handoff_image_id = BL33_IMAGE_ID, + }, + { + .image_id = BL32_EXTRA1_IMAGE_ID, + + SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, VERSION_2, + entry_point_info_t, + SECURE | NON_EXECUTABLE), + + SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, VERSION_2, + image_info_t, IMAGE_ATTRIB_SKIP_LOADING), + .image_info.image_base = BL32_BASE, + .image_info.image_max_size = BL32_SIZE, + + .next_handoff_image_id = INVALID_IMAGE_ID, + }, + { + /* This is a zero sized image so we don't set base or size */ + .image_id = BL32_EXTRA2_IMAGE_ID, + + SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, + VERSION_2, entry_point_info_t, + SECURE | NON_EXECUTABLE), + + SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, + VERSION_2, image_info_t, + IMAGE_ATTRIB_SKIP_LOADING), + .next_handoff_image_id = INVALID_IMAGE_ID, + }, + { + .image_id = BL33_IMAGE_ID, + SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, VERSION_2, + entry_point_info_t, + NON_SECURE | EXECUTABLE), + # ifdef PRELOADED_BL33_BASE + .ep_info.pc = PLAT_NS_IMAGE_OFFSET, + + SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, + VERSION_2, image_info_t, + IMAGE_ATTRIB_SKIP_LOADING), + # else + .ep_info.pc = PLAT_NS_IMAGE_OFFSET, + + SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, + VERSION_2, image_info_t, 0), + .image_info.image_base = PLAT_NS_IMAGE_OFFSET, + .image_info.image_max_size = PLAT_NS_IMAGE_SIZE, + # endif /* PRELOADED_BL33_BASE */ + + .next_handoff_image_id = INVALID_IMAGE_ID, + } +}; + +REGISTER_BL_IMAGE_DESCS(bl2_mem_params_descs); diff --git a/plat/imx/imx8m/imx8mp/imx8mp_bl31_setup.c b/plat/imx/imx8m/imx8mp/imx8mp_bl31_setup.c new file mode 100644 index 0000000..43fa064 --- /dev/null +++ b/plat/imx/imx8m/imx8mp/imx8mp_bl31_setup.c @@ -0,0 +1,261 @@ +/* + * Copyright 2020-2022 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> +#include <stdbool.h> + +#include <arch_helpers.h> +#include <common/bl_common.h> +#include <common/debug.h> +#include <context.h> +#include <drivers/arm/tzc380.h> +#include <drivers/console.h> +#include <drivers/generic_delay_timer.h> +#include <lib/el3_runtime/context_mgmt.h> +#include <lib/mmio.h> +#include <lib/xlat_tables/xlat_tables_v2.h> +#include <plat/common/platform.h> + +#include <dram.h> +#include <gpc.h> +#include <imx_aipstz.h> +#include <imx_uart.h> +#include <imx_rdc.h> +#include <imx8m_caam.h> +#include <imx8m_ccm.h> +#include <imx8m_csu.h> +#include <imx8m_snvs.h> +#include <platform_def.h> +#include <plat_imx8.h> + +#define TRUSTY_PARAMS_LEN_BYTES (4096*2) + +static const mmap_region_t imx_mmap[] = { + GIC_MAP, AIPS_MAP, OCRAM_S_MAP, DDRC_MAP, + NOC_MAP, CAAM_RAM_MAP, NS_OCRAM_MAP, + ROM_MAP, DRAM_MAP, + {0}, +}; + +static const struct aipstz_cfg aipstz[] = { + {IMX_AIPSTZ1, 0x77777777, 0x77777777, .opacr = {0x0, 0x0, 0x0, 0x0, 0x0}, }, + {IMX_AIPSTZ2, 0x77777777, 0x77777777, .opacr = {0x0, 0x0, 0x0, 0x0, 0x0}, }, + {IMX_AIPSTZ3, 0x77777777, 0x77777777, .opacr = {0x0, 0x0, 0x0, 0x0, 0x0}, }, + {IMX_AIPSTZ4, 0x77777777, 0x77777777, .opacr = {0x0, 0x0, 0x0, 0x0, 0x0}, }, + {0}, +}; + +static const struct imx_rdc_cfg rdc[] = { + /* Master domain assignment */ + RDC_MDAn(RDC_MDA_M7, DID1), + + /* peripherals domain permission */ + RDC_PDAPn(RDC_PDAP_UART2, D0R | D0W), + + /* memory region */ + + /* Sentinel */ + {0}, +}; + +static const struct imx_csu_cfg csu_cfg[] = { + /* peripherals csl setting */ + CSU_CSLx(CSU_CSL_OCRAM, CSU_SEC_LEVEL_2, UNLOCKED), + CSU_CSLx(CSU_CSL_OCRAM_S, CSU_SEC_LEVEL_2, UNLOCKED), + + /* master HP0~1 */ + + /* SA setting */ + + /* HP control setting */ + + /* Sentinel */ + {0} +}; + +static entry_point_info_t bl32_image_ep_info; +static entry_point_info_t bl33_image_ep_info; + +/* get SPSR for BL33 entry */ +static uint32_t get_spsr_for_bl33_entry(void) +{ + unsigned long el_status; + unsigned long mode; + uint32_t spsr; + + /* figure out what mode we enter the non-secure world */ + el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT; + el_status &= ID_AA64PFR0_ELX_MASK; + + mode = (el_status) ? MODE_EL2 : MODE_EL1; + + spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS); + return spsr; +} + +static void bl31_tzc380_setup(void) +{ + unsigned int val; + + val = mmio_read_32(IMX_IOMUX_GPR_BASE + 0x28); + if ((val & GPR_TZASC_EN) != GPR_TZASC_EN) + return; + + tzc380_init(IMX_TZASC_BASE); + + /* + * Need to substact offset 0x40000000 from CPU address when + * programming tzasc region for i.mx8mp. + */ + + /* Enable 1G-5G S/NS RW */ + tzc380_configure_region(0, 0x00000000, TZC_ATTR_REGION_SIZE(TZC_REGION_SIZE_4G) | + TZC_ATTR_REGION_EN_MASK | TZC_ATTR_SP_ALL); +} + +void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1, + u_register_t arg2, u_register_t arg3) +{ + unsigned int console_base = IMX_BOOT_UART_BASE; + static console_t console; + unsigned int val; + unsigned int i; + + /* Enable CSU NS access permission */ + for (i = 0; i < 64; i++) { + mmio_write_32(IMX_CSU_BASE + i * 4, 0x00ff00ff); + } + + imx_aipstz_init(aipstz); + + imx_rdc_init(rdc); + + imx_csu_init(csu_cfg); + + /* config the ocram memory range for secure access */ + mmio_write_32(IMX_IOMUX_GPR_BASE + 0x2c, 0x4E1); + val = mmio_read_32(IMX_IOMUX_GPR_BASE + 0x2c); + mmio_write_32(IMX_IOMUX_GPR_BASE + 0x2c, val | 0x3DFF0000); + + if (console_base == 0U) { + console_base = imx8m_uart_get_base(); + } + + console_imx_uart_register(console_base, IMX_BOOT_UART_CLK_IN_HZ, + IMX_CONSOLE_BAUDRATE, &console); + /* This console is only used for boot stage */ + console_set_scope(&console, CONSOLE_FLAG_BOOT); + + imx8m_caam_init(); + + /* + * tell BL3-1 where the non-secure software image is located + * and the entry state information. + */ + bl33_image_ep_info.pc = PLAT_NS_IMAGE_OFFSET; + bl33_image_ep_info.spsr = get_spsr_for_bl33_entry(); + SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE); + +#if defined(SPD_opteed) || defined(SPD_trusty) + /* Populate entry point information for BL32 */ + SET_PARAM_HEAD(&bl32_image_ep_info, PARAM_EP, VERSION_1, 0); + SET_SECURITY_STATE(bl32_image_ep_info.h.attr, SECURE); + bl32_image_ep_info.pc = BL32_BASE; + bl32_image_ep_info.spsr = 0; + + /* Pass TEE base and size to bl33 */ + bl33_image_ep_info.args.arg1 = BL32_BASE; + bl33_image_ep_info.args.arg2 = BL32_SIZE; + +#ifdef SPD_trusty + bl32_image_ep_info.args.arg0 = BL32_SIZE; + bl32_image_ep_info.args.arg1 = BL32_BASE; +#else + /* Make sure memory is clean */ + mmio_write_32(BL32_FDT_OVERLAY_ADDR, 0); + bl33_image_ep_info.args.arg3 = BL32_FDT_OVERLAY_ADDR; + bl32_image_ep_info.args.arg3 = BL32_FDT_OVERLAY_ADDR; +#endif +#endif + +#if !defined(SPD_opteed) && !defined(SPD_trusty) + enable_snvs_privileged_access(); +#endif + + bl31_tzc380_setup(); +} + +#define MAP_BL31_TOTAL \ + MAP_REGION_FLAT(BL31_START, BL31_SIZE, MT_MEMORY | MT_RW | MT_SECURE) +#define MAP_BL31_RO \ + MAP_REGION_FLAT(BL_CODE_BASE, BL_CODE_END - BL_CODE_BASE, MT_MEMORY | MT_RO | MT_SECURE) +#define MAP_COHERENT_MEM \ + MAP_REGION_FLAT(BL_COHERENT_RAM_BASE, BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE, \ + MT_DEVICE | MT_RW | MT_SECURE) +#define MAP_BL32_TOTAL \ + MAP_REGION_FLAT(BL32_BASE, BL32_SIZE, MT_MEMORY | MT_RW) + +void bl31_plat_arch_setup(void) +{ + const mmap_region_t bl_regions[] = { + MAP_BL31_TOTAL, + MAP_BL31_RO, +#if USE_COHERENT_MEM + MAP_COHERENT_MEM, +#endif +#if defined(SPD_opteed) || defined(SPD_trusty) + /* Map TEE memory */ + MAP_BL32_TOTAL, +#endif + {0} + }; + + setup_page_tables(bl_regions, imx_mmap); + enable_mmu_el3(0); +} + +void bl31_platform_setup(void) +{ + generic_delay_timer_init(); + + /* select the CKIL source to 32K OSC */ + mmio_write_32(IMX_ANAMIX_BASE + ANAMIX_MISC_CTL, 0x1); + + /* Init the dram info */ + dram_info_init(SAVED_DRAM_TIMING_BASE); + + plat_gic_driver_init(); + plat_gic_init(); + + imx_gpc_init(); +} + +entry_point_info_t *bl31_plat_get_next_image_ep_info(unsigned int type) +{ + if (type == NON_SECURE) { + return &bl33_image_ep_info; + } + + if (type == SECURE) { + return &bl32_image_ep_info; + } + + return NULL; +} + +unsigned int plat_get_syscnt_freq2(void) +{ + return COUNTER_FREQUENCY; +} + +#ifdef SPD_trusty +void plat_trusty_set_boot_args(aapcs64_params_t *args) +{ + args->arg0 = BL32_SIZE; + args->arg1 = BL32_BASE; + args->arg2 = TRUSTY_PARAMS_LEN_BYTES; +} +#endif diff --git a/plat/imx/imx8m/imx8mp/imx8mp_psci.c b/plat/imx/imx8m/imx8mp/imx8mp_psci.c new file mode 100644 index 0000000..bc7b246 --- /dev/null +++ b/plat/imx/imx8m/imx8mp/imx8mp_psci.c @@ -0,0 +1,44 @@ +/* + * Copyright 2020 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <stdbool.h> + +#include <arch.h> +#include <arch_helpers.h> +#include <common/debug.h> +#include <lib/mmio.h> +#include <lib/psci/psci.h> + +#include <gpc.h> +#include <imx8m_psci.h> +#include <plat_imx8.h> + +static const plat_psci_ops_t imx_plat_psci_ops = { + .pwr_domain_on = imx_pwr_domain_on, + .pwr_domain_on_finish = imx_pwr_domain_on_finish, + .pwr_domain_off = imx_pwr_domain_off, + .validate_ns_entrypoint = imx_validate_ns_entrypoint, + .validate_power_state = imx_validate_power_state, + .cpu_standby = imx_cpu_standby, + .pwr_domain_suspend = imx_domain_suspend, + .pwr_domain_suspend_finish = imx_domain_suspend_finish, + .pwr_domain_pwr_down_wfi = imx_pwr_domain_pwr_down_wfi, + .get_sys_suspend_power_state = imx_get_sys_suspend_power_state, + .system_reset = imx_system_reset, + .system_off = imx_system_off, +}; + +/* export the platform specific psci ops */ +int plat_setup_psci_ops(uintptr_t sec_entrypoint, + const plat_psci_ops_t **psci_ops) +{ + /* sec_entrypoint is used for warm reset */ + imx_mailbox_init(sec_entrypoint); + + *psci_ops = &imx_plat_psci_ops; + + return 0; +} diff --git a/plat/imx/imx8m/imx8mp/imx8mp_rotpk.S b/plat/imx/imx8m/imx8mp/imx8mp_rotpk.S new file mode 100644 index 0000000..a4c7ce1 --- /dev/null +++ b/plat/imx/imx8m/imx8mp/imx8mp_rotpk.S @@ -0,0 +1,15 @@ +/* + * Copyright (c) 2021, Arm Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + + .global imx8mp_rotpk_hash + .global imx8mp_rotpk_hash_end +imx8mp_rotpk_hash: + /* DER header */ + .byte 0x30, 0x31, 0x30, 0x0D, 0x06, 0x09, 0x60, 0x86, 0x48 + .byte 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05, 0x00, 0x04, 0x20 + /* SHA256 */ + .incbin ROTPK_HASH +imx8mp_rotpk_hash_end: diff --git a/plat/imx/imx8m/imx8mp/imx8mp_trusted_boot.c b/plat/imx/imx8m/imx8mp/imx8mp_trusted_boot.c new file mode 100644 index 0000000..5d1a6c2 --- /dev/null +++ b/plat/imx/imx8m/imx8mp/imx8mp_trusted_boot.c @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2021, Arm Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <plat/common/platform.h> + +extern char imx8mp_rotpk_hash[], imx8mp_rotpk_hash_end[]; + +int plat_get_rotpk_info(void *cookie, void **key_ptr, unsigned int *key_len, + unsigned int *flags) +{ + *key_ptr = imx8mp_rotpk_hash; + *key_len = imx8mp_rotpk_hash_end - imx8mp_rotpk_hash; + *flags = ROTPK_IS_HASH; + + return 0; +} + +int plat_get_nv_ctr(void *cookie, unsigned int *nv_ctr) +{ + *nv_ctr = 0; + + return 0; +} + +int plat_set_nv_ctr(void *cookie, unsigned int nv_ctr) +{ + return 1; +} + +int plat_get_mbedtls_heap(void **heap_addr, size_t *heap_size) +{ + return get_mbedtls_heap_helper(heap_addr, heap_size); +} diff --git a/plat/imx/imx8m/imx8mp/include/gpc_reg.h b/plat/imx/imx8m/imx8mp/include/gpc_reg.h new file mode 100644 index 0000000..7909937 --- /dev/null +++ b/plat/imx/imx8m/imx8mp/include/gpc_reg.h @@ -0,0 +1,151 @@ +/* + * Copyright 2020 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef GPC_REG_H +#define GPC_REG_H + +#define LPCR_A53_BSC 0x0 +#define LPCR_A53_BSC2 0x180 +#define LPCR_A53_AD 0x4 +#define LPCR_M4 0x8 +#define SLPCR 0x14 +#define MST_CPU_MAPPING 0x18 +#define MLPCR 0x20 +#define PGC_ACK_SEL_A53 0x24 +#define IMR1_CORE0_A53 0x30 +#define IMR1_CORE1_A53 0x44 +#define IMR1_CORE2_A53 0x194 +#define IMR1_CORE3_A53 0x1A8 +#define IMR1_CORE0_M4 0x58 + +#define SLT0_CFG 0x200 +#define GPC_PU_PWRHSK 0x190 +#define PGC_CPU_0_1_MAPPING 0x1CC +#define CPU_PGC_UP_TRG 0xD0 +#define PU_PGC_UP_TRG 0xD8 +#define CPU_PGC_DN_TRG 0xDC +#define PU_PGC_DN_TRG 0xE4 +#define LPS_CPU1 0xEC + +#define A53_CORE0_PGC 0x800 +#define A53_PLAT_PGC 0x900 +#define PLAT_PGC_PCR 0x900 +#define NOC_PGC_PCR 0xa40 +#define PGC_SCU_TIMING 0x910 + +#define MASK_DSM_TRIGGER_A53 BIT(31) +#define IRQ_SRC_A53_WUP BIT(30) +#define IRQ_SRC_A53_WUP_SHIFT 30 +#define IRQ_SRC_C1 BIT(29) +#define IRQ_SRC_C0 BIT(28) +#define IRQ_SRC_C3 BIT(23) +#define IRQ_SRC_C2 BIT(22) +#define CPU_CLOCK_ON_LPM BIT(14) +#define A53_CLK_ON_LPM BIT(14) +#define MASTER0_LPM_HSK BIT(6) +#define MASTER1_LPM_HSK BIT(7) +#define MASTER2_LPM_HSK BIT(8) + +#define L2PGE BIT(31) +#define EN_L2_WFI_PDN BIT(5) +#define EN_PLAT_PDN BIT(4) + +#define SLPCR_EN_DSM BIT(31) +#define SLPCR_RBC_EN BIT(30) +#define SLPCR_A53_FASTWUP_STOP_MODE BIT(17) +#define SLPCR_A53_FASTWUP_WAIT_MODE BIT(16) +#define SLPCR_VSTBY BIT(2) +#define SLPCR_SBYOS BIT(1) +#define SLPCR_BYPASS_PMIC_READY BIT(0) +#define SLPCR_RBC_COUNT_SHIFT 24 +#define SLPCR_STBY_COUNT_SHFT 3 + +#define A53_DUMMY_PDN_ACK BIT(30) +#define A53_DUMMY_PUP_ACK BIT(31) +#define A53_PLAT_PDN_ACK BIT(8) +#define A53_PLAT_PUP_ACK BIT(9) + +#define NOC_PDN_SLT_CTRL BIT(12) +#define NOC_PUP_SLT_CTRL BIT(13) +#define NOC_PGC_PDN_ACK BIT(12) +#define NOC_PGC_PUP_ACK BIT(13) + +#define PLAT_PUP_SLT_CTRL BIT(9) +#define PLAT_PDN_SLT_CTRL BIT(8) + +#define SLT_PLAT_PDN BIT(8) +#define SLT_PLAT_PUP BIT(9) + +#define MASTER1_MAPPING BIT(1) +#define MASTER2_MAPPING BIT(2) + +#define TMR_TCD2_SHIFT 0 +#define TMC_TMR_SHIFT 10 +#define TRC1_TMC_SHIFT 20 + +#define MIPI_PHY1_PWR_REQ BIT(0) +#define PCIE_PHY_PWR_REQ BIT(1) +#define USB1_PHY_PWR_REQ BIT(2) +#define USB2_PHY_PWR_REQ BIT(3) +#define MLMIX_PWR_REQ BIT(4) +#define AUDIOMIX_PWR_REQ BIT(5) +#define GPU2D_PWR_REQ BIT(6) +#define GPUMIX_PWR_REQ BIT(7) +#define VPUMIX_PWR_REQ BIT(8) +#define GPU3D_PWR_REQ BIT(9) +#define MEDIAMIX_PWR_REQ BIT(10) +#define VPU_G1_PWR_REQ BIT(11) +#define VPU_G2_PWR_REQ BIT(12) +#define VPU_H1_PWR_REQ BIT(13) +#define HDMIMIX_PWR_REQ BIT(14) +#define HDMI_PHY_PWR_REQ BIT(15) +#define MIPI_PHY2_PWR_REQ BIT(16) +#define HSIOMIX_PWR_REQ BIT(17) +#define MEDIAMIX_ISPDWP_PWR_REQ BIT(18) +#define DDRMIX_PWR_REQ BIT(19) + +#define AUDIOMIX_ADB400_SYNC (BIT(4) | BIT(15)) +#define MLMIX_ADB400_SYNC (BIT(7) | BIT(8)) +#define GPUMIX_ADB400_SYNC BIT(9) +#define VPUMIX_ADB400_SYNC BIT(10) +#define DDRMIX_ADB400_SYNC BIT(11) +#define HSIOMIX_ADB400_SYNC BIT(12) +#define HDMIMIX_ADB400_SYNC BIT(13) +#define MEDIAMIX_ADB400_SYNC BIT(14) + +#define AUDIOMIX_ADB400_ACK (BIT(20) | BIT(31)) +#define MLMIX_ADB400_ACK (BIT(23) | BIT(24)) +#define GPUMIX_ADB400_ACK BIT(25) +#define VPUMIX_ADB400_ACK BIT(26) +#define DDRMIX_ADB400_ACK BIT(27) +#define HSIOMIX_ADB400_ACK BIT(28) +#define HDMIMIX_ADB400_ACK BIT(29) +#define MEDIAMIX_ADB400_ACK BIT(30) + +#define MIPI_PHY1_PGC 0xb00 +#define PCIE_PHY_PGC 0xb40 +#define USB1_PHY_PGC 0xb80 +#define USB2_PHY_PGC 0xbc0 +#define MLMIX_PGC 0xc00 +#define AUDIOMIX_PGC 0xc40 +#define GPU2D_PGC 0xc80 +#define GPUMIX_PGC 0xcc0 +#define VPUMIX_PGC 0xd00 +#define GPU3D_PGC 0xd40 +#define MEDIAMIX_PGC 0xd80 +#define VPU_G1_PGC 0xdc0 +#define VPU_G2_PGC 0xe00 +#define VPU_H1_PGC 0xe40 +#define HDMIMIX_PGC 0xe80 +#define HDMI_PHY_PGC 0xec0 +#define MIPI_PHY2_PGC 0xf00 +#define HSIOMIX_PGC 0xf40 +#define MEDIAMIX_ISPDWP_PGC 0xf80 +#define DDRMIX_PGC 0xfc0 + +#define IRQ_IMR_NUM U(5) + +#endif /* GPC_REG_H */ diff --git a/plat/imx/imx8m/imx8mp/include/imx8mp_private.h b/plat/imx/imx8m/imx8mp/include/imx8mp_private.h new file mode 100644 index 0000000..0a02334 --- /dev/null +++ b/plat/imx/imx8m/imx8mp/include/imx8mp_private.h @@ -0,0 +1,15 @@ +/* + * Copyright (c) 2021, Arm Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef IMX8MP_PRIVATE_H +#define IMX8MP_PRIVATE_H + +/******************************************************************************* + * Function and variable prototypes + ******************************************************************************/ +void plat_imx_io_setup(void); + +#endif /* IMX8MP_PRIVATE_H */ diff --git a/plat/imx/imx8m/imx8mp/include/imx_sec_def.h b/plat/imx/imx8m/imx8mp/include/imx_sec_def.h new file mode 100644 index 0000000..ba248b5 --- /dev/null +++ b/plat/imx/imx8m/imx8mp/include/imx_sec_def.h @@ -0,0 +1,274 @@ +/* + * Copyright 2020-2022 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef IMX_SEC_DEF_H +#define IMX_SEC_DEF_H + +/* RDC MDA index */ +enum rdc_mda_idx { + RDC_MDA_A53 = 0, + RDC_MDA_M7 = 1, + RDC_MDA_PCIE_CTRL1 = 2, + RDC_MDA_SDMA3p = 3, + RDC_MDA_SDMA3b = 4, + RDC_MDA_LCDIF = 5, + RDC_MDA_ISI = 6, + RDC_MDA_NPU = 7, + RDC_MDA_Coresight = 8, + RDC_MDA_DAP = 9, + RDC_MDA_CAAM = 10, + RDC_MDA_SDMA1p = 11, + RDC_MDA_SDMA1b = 12, + RDC_MDA_APBHDMA = 13, + RDC_MDA_RAWNAND = 14, + RDC_MDA_uSDHC1 = 15, + RDC_MDA_uSDHC2 = 16, + RDC_MDA_uSDHC3 = 17, + RDC_MDA_AUDIO_PROCESSOR = 18, + RDC_MDA_USB1 = 19, + RDC_MDA_USB2 = 20, + RDC_MDA_TESTPORT = 21, + RDC_MDA_ENET1_TX = 22, + RDC_MDA_ENET1_RX = 23, + RDC_MDA_SDMA2 = 24, + RDC_MDA_SDMA3_to_SPBA2 = 25, + RDC_MDA_SDMA1_to_SPBA1 = 26, + RDC_MDA_LCDIF2 = 27, + RDC_MDA_HDMI_TX = 28, + RDC_MDA_ENET2 = 29, + RDC_MDA_GPU3D = 30, + RDC_MDA_GPU2D = 31, + RDC_MDA_VPU_G1 = 32, + RDC_MDA_VPU_G2 = 33, + RDC_MDA_VPU_VC8000E = 34, + RDC_MDA_AUDIO_EDMA = 35, + RDC_MDA_ISP1 = 36, + RDC_MDA_ISP2 = 37, + RDC_MDA_DEWARP = 38, + RDC_MDA_GIC500 = 39, +}; + +/* RDC Peripherals index */ +enum rdc_pdap_idx { + RDC_PDAP_GPIO1 = 0, + RDC_PDAP_GPIO2 = 1, + RDC_PDAP_GPIO3 = 2, + RDC_PDAP_GPIO4 = 3, + RDC_PDAP_GPIO5 = 4, + RDC_PDAP_MU_2_A = 5, + RDC_PDAP_ANA_TSENSOR = 6, + RDC_PDAP_ANA_OSC = 7, + RDC_PDAP_WDOG1 = 8, + RDC_PDAP_WDOG2 = 9, + RDC_PDAP_WDOG3 = 10, + RDC_PDAP_GPT1 = 13, + RDC_PDAP_GPT2 = 14, + RDC_PDAP_GPT3 = 15, + RDC_PDAP_MU_2_B = 16, + RDC_PDAP_ROMCP = 17, + RDC_PDAP_MU_3_A = 18, + RDC_PDAP_IOMUXC = 19, + RDC_PDAP_IOMUXC_GPR = 20, + RDC_PDAP_OCOTP_CTRL = 21, + RDC_PDAP_ANA_PLL = 22, + RDC_PDAP_SNVS_HP = 23, + RDC_PDAP_CCM = 24, + RDC_PDAP_SRC = 25, + RDC_PDAP_GPC = 26, + RDC_PDAP_SEMAPHORE1 = 27, + RDC_PDAP_SEMAPHORE2 = 28, + RDC_PDAP_RDC = 29, + RDC_PDAP_CSU = 30, + RDC_PDAP_MU_3_B = 31, + RDC_PDAP_ISI = 32, + RDC_PDAP_ISP0 = 33, + RDC_PDAP_ISP1 = 34, + RDC_PDAP_IPS_Dewarp = 35, + RDC_PDAP_MIPI_CSI0 = 36, + RDC_PDAP_HSIOMIX_BLK_CTL = 37, + RDC_PDAP_PWM1 = 38, + RDC_PDAP_PWM2 = 39, + RDC_PDAP_PWM3 = 40, + RDC_PDAP_PWM4 = 41, + RDC_PDAP_System_Counter_RD = 42, + RDC_PDAP_System_Counter_CMP = 43, + RDC_PDAP_System_Counter_CTRL = 44, + RDC_PDAP_I2C5 = 45, + RDC_PDAP_GPT6 = 46, + RDC_PDAP_GPT5 = 47, + RDC_PDAP_GPT4 = 48, + RDC_PDAP_MIPI_CSI1 = 49, + RDC_PDAP_MIPI_DSI0 = 50, + RDC_PDAP_MEDIAMIX_BLK_CTL = 51, + RDC_PDAP_LCDIF1 = 52, + RDC_PDAP_eDMA_Management_Page = 53, + RDC_PDAP_eDMA_Channels_15_0 = 54, + RDC_PDAP_eDMA_Channels_31_16 = 55, + RDC_PDAP_TZASC = 56, + RDC_PDAP_I2C6 = 57, + RDC_PDAP_CAAM = 58, + RDC_PDAP_LCDIF2 = 59, + RDC_PDAP_PERFMON1 = 60, + RDC_PDAP_PERFMON2 = 61, + RDC_PDAP_NOC_BLK_CTL = 62, + RDC_PDAP_QoSC = 63, + RDC_PDAP_LVDS0 = 64, + RDC_PDAP_LVDS1 = 65, + RDC_PDAP_I2C1 = 66, + RDC_PDAP_I2C2 = 67, + RDC_PDAP_I2C3 = 68, + RDC_PDAP_I2C4 = 69, + RDC_PDAP_UART4 = 70, + RDC_PDAP_HDMI_TX = 71, + RDC_PDAP_IRQ_STEER_Audio_Processor = 72, + RDC_PDAP_SDMA2 = 73, + RDC_PDAP_MU_1_A = 74, + RDC_PDAP_MU_1_B = 75, + RDC_PDAP_SEMAPHORE_HS = 76, + RDC_PDAP_SAI1 = 78, + RDC_PDAP_SAI2 = 79, + RDC_PDAP_SAI3 = 80, + RDC_PDAP_CAN_FD1 = 81, + RDC_PDAP_SAI5 = 82, + RDC_PDAP_SAI6 = 83, + RDC_PDAP_uSDHC1 = 84, + RDC_PDAP_uSDHC2 = 85, + RDC_PDAP_uSDHC3 = 86, + RDC_PDAP_PCIE_PHY1 = 87, + RDC_PDAP_HDMI_TX_AUDLNK_MSTR = 88, + RDC_PDAP_CAN_FD2 = 89, + RDC_PDAP_SPBA2 = 90, + RDC_PDAP_QSPI = 91, + RDC_PDAP_AUDIO_BLK_CTRL = 92, + RDC_PDAP_SDMA1 = 93, + RDC_PDAP_ENET1 = 94, + RDC_PDAP_ENET2_TSN = 95, + RDC_PDAP_ASRC = 97, + RDC_PDAP_eCSPI1 = 98, + RDC_PDAP_eCSPI2 = 99, + RDC_PDAP_eCSPI3 = 100, + RDC_PDAP_SAI7 = 101, + RDC_PDAP_UART1 = 102, + RDC_PDAP_UART3 = 104, + RDC_PDAP_UART2 = 105, + RDC_PDAP_PDM_MICFIL = 106, + RDC_PDAP_AUDIO_XCVR_RX_eARC = 107, + RDC_PDAP_SDMA3 = 109, + RDC_PDAP_SPBA1 = 111, +}; + +enum csu_csl_idx { + CSU_CSL_GPIO1 = 0, + CSU_CSL_GPIO2 = 1, + CSU_CSL_GPIO3 = 2, + CSU_CSL_GPIO4 = 3, + CSU_CSL_GPIO5 = 4, + CSU_CSL_MU_2_A = 5, + CSU_CSL_ANA_TSENSOR = 6, + CSU_CSL_ANA_OSC = 7, + CSU_CSL_WDOG1 = 8, + CSU_CSL_WDOG2 = 9, + CSU_CSL_WDOG3 = 10, + CSU_CSL_GPT1 = 13, + CSU_CSL_GPT2 = 14, + CSU_CSL_GPT3 = 15, + CSU_CSL_MU_2_B = 16, + CSU_CSL_ROMCP = 17, + CSU_CSL_MU_3_A = 18, + CSU_CSL_IOMUXC = 19, + CSU_CSL_IOMUXC_GPR = 20, + CSU_CSL_OCOTP_CTRL = 21, + CSU_CSL_ANA_PLL = 22, + CSU_CSL_SNVS_HP = 23, + CSU_CSL_CCM = 24, + CSU_CSL_SRC = 25, + CSU_CSL_GPC = 26, + CSU_CSL_SEMAPHORE1 = 27, + CSU_CSL_SEMAPHORE2 = 28, + CSU_CSL_RDC = 29, + CSU_CSL_CSU = 30, + CSU_CSL_MU_3_B = 31, + CSU_CSL_ISI = 32, + CSU_CSL_ISP0 = 33, + CSU_CSL_ISP1 = 34, + CSU_CSL_IPS_Dewarp = 35, + CSU_CSL_MIPI_CSI0 = 36, + CSU_CSL_HSIOMIX_BLK_CTL = 37, + CSU_CSL_PWM1 = 38, + CSU_CSL_PWM2 = 39, + CSU_CSL_PWM3 = 40, + CSU_CSL_PWM4 = 41, + CSU_CSL_System_Counter_RD = 42, + CSU_CSL_System_Counter_CMP = 43, + CSU_CSL_System_Counter_CTRL = 44, + CSU_CSL_I2C5 = 45, + CSU_CSL_GPT6 = 46, + CSU_CSL_GPT5 = 47, + CSU_CSL_GPT4 = 48, + CSU_CSL_MIPI_CSI1 = 49, + CSU_CSL_MIPI_DSI0 = 50, + CSU_CSL_MEDIAMIX_BLK_CTL = 51, + CSU_CSL_LCDIF1 = 52, + CSU_CSL_eDMA_Management_Page = 53, + CSU_CSL_eDMA_Channels_15_0 = 54, + CSU_CSL_eDMA_Channels_31_16 = 55, + CSU_CSL_TZASC = 56, + CSU_CSL_I2C6 = 57, + CSU_CSL_CAAM = 58, + CSU_CSL_LCDIF2 = 59, + CSU_CSL_PERFMON1 = 60, + CSU_CSL_PERFMON2 = 61, + CSU_CSL_NOC_BLK_CTL = 62, + CSU_CSL_QoSC = 63, + CSU_CSL_LVDS0 = 64, + CSU_CSL_LVDS1 = 65, + CSU_CSL_I2C1 = 66, + CSU_CSL_I2C2 = 67, + CSU_CSL_I2C3 = 68, + CSU_CSL_I2C4 = 69, + CSU_CSL_UART4 = 70, + CSU_CSL_HDMI_TX = 71, + CSU_CSL_IRQ_STEER_Audio_Processor = 72, + CSU_CSL_SDMA2 = 73, + CSU_CSL_MU_1_A = 74, + CSU_CSL_MU_1_B = 75, + CSU_CSL_SEMAPHORE_HS = 76, + CSU_CSL_SAI1 = 78, + CSU_CSL_SAI2 = 79, + CSU_CSL_SAI3 = 80, + CSU_CSL_CAN_FD1 = 81, + CSU_CSL_SAI5 = 82, + CSU_CSL_SAI6 = 83, + CSU_CSL_uSDHC1 = 84, + CSU_CSL_uSDHC2 = 85, + CSU_CSL_uSDHC3 = 86, + CSU_CSL_PCIE_PHY1 = 87, + CSU_CSL_HDMI_TX_AUDLNK_MSTR = 88, + CSU_CSL_CAN_FD2 = 89, + CSU_CSL_SPBA2 = 90, + CSU_CSL_QSPI = 91, + CSU_CSL_AUDIO_BLK_CTRL = 92, + CSU_CSL_SDMA1 = 93, + CSU_CSL_ENET1 = 94, + CSU_CSL_ENET2_TSN = 95, + CSU_CSL_ASRC = 97, + CSU_CSL_eCSPI1 = 98, + CSU_CSL_eCSPI2 = 99, + CSU_CSL_eCSPI3 = 100, + CSU_CSL_SAI7 = 101, + CSU_CSL_UART1 = 102, + CSU_CSL_UART3 = 104, + CSU_CSL_UART2 = 105, + CSU_CSL_PDM_MICFIL = 106, + CSU_CSL_AUDIO_XCVR_RX_eARC = 107, + CSU_CSL_SDMA3 = 109, + CSU_CSL_SPBA1 = 111, + CSU_CSL_OCRAM_A = 113, + CSU_CSL_OCRAM = 118, + CSU_CSL_OCRAM_S = 119, +}; + +#endif /* IMX_SEC_DEF_H */ diff --git a/plat/imx/imx8m/imx8mp/include/platform_def.h b/plat/imx/imx8m/imx8mp/include/platform_def.h new file mode 100644 index 0000000..1281270 --- /dev/null +++ b/plat/imx/imx8m/imx8mp/include/platform_def.h @@ -0,0 +1,203 @@ +/* + * Copyright 2020-2023 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#ifndef PLATFORM_DEF_H +#define PLATFORM_DEF_H + +#include <common/tbbr/tbbr_img_def.h> +#include <lib/utils_def.h> +#include <lib/xlat_tables/xlat_tables_v2.h> +#include <plat/common/common_def.h> + +#define PLATFORM_LINKER_FORMAT "elf64-littleaarch64" +#define PLATFORM_LINKER_ARCH aarch64 + +#define PLATFORM_STACK_SIZE 0xB00 +#define CACHE_WRITEBACK_GRANULE 64 + +#define PLAT_PRIMARY_CPU U(0x0) +#define PLATFORM_MAX_CPU_PER_CLUSTER U(4) +#define PLATFORM_CLUSTER_COUNT U(1) +#define PLATFORM_CLUSTER0_CORE_COUNT U(4) +#define PLATFORM_CLUSTER1_CORE_COUNT U(0) +#define PLATFORM_CORE_COUNT (PLATFORM_CLUSTER0_CORE_COUNT) + +#define IMX_PWR_LVL0 MPIDR_AFFLVL0 +#define IMX_PWR_LVL1 MPIDR_AFFLVL1 +#define IMX_PWR_LVL2 MPIDR_AFFLVL2 + +#define PWR_DOMAIN_AT_MAX_LVL U(1) +#define PLAT_MAX_PWR_LVL U(2) +#define PLAT_MAX_OFF_STATE U(4) +#define PLAT_MAX_RET_STATE U(2) + +#define PLAT_WAIT_RET_STATE U(1) +#define PLAT_STOP_OFF_STATE U(3) + +#if defined(NEED_BL2) +#define BL2_BASE U(0x970000) +#define BL2_SIZE SZ_128K +#define BL2_LIMIT (BL2_BASE + BL2_SIZE) +#define BL31_BASE U(0x950000) +#define IMX_FIP_BASE U(0x40310000) +#define IMX_FIP_SIZE U(0x000300000) +#define IMX_FIP_LIMIT U(FIP_BASE + FIP_SIZE) + +/* Define FIP image location on eMMC */ +#define IMX_FIP_MMC_BASE U(0x100000) + +#define PLAT_IMX8MP_BOOT_MMC_BASE U(0x30B50000) /* SD */ +#else +#define BL31_BASE U(0x970000) +#endif + +#define BL31_SIZE SZ_128K +#define BL31_LIMIT (BL31_BASE + BL31_SIZE) + +#define PLAT_PRI_BITS U(3) +#define PLAT_SDEI_CRITICAL_PRI 0x10 +#define PLAT_SDEI_NORMAL_PRI 0x20 +#define PLAT_SDEI_SGI_PRIVATE U(9) + +/* non-secure uboot base */ +#define PLAT_NS_IMAGE_OFFSET U(0x40200000) +#define PLAT_NS_IMAGE_SIZE U(0x00200000) + +#define BL32_FDT_OVERLAY_ADDR (PLAT_NS_IMAGE_OFFSET + 0x3000000) + +/* GICv3 base address */ +#define PLAT_GICD_BASE U(0x38800000) +#define PLAT_GICR_BASE U(0x38880000) + +#define PLAT_VIRT_ADDR_SPACE_SIZE (ULL(1) << 32) +#define PLAT_PHY_ADDR_SPACE_SIZE (ULL(1) << 32) + +#define MAX_XLAT_TABLES 8 +#define MAX_MMAP_REGIONS 16 + +#define HAB_RVT_BASE U(0x00000900) /* HAB_RVT for i.MX8MM */ + +#define IMX_BOOT_UART_CLK_IN_HZ 24000000 /* Select 24MHz oscillator */ +#define PLAT_CRASH_UART_BASE IMX_BOOT_UART_BASE +#define PLAT_CRASH_UART_CLK_IN_HZ 24000000 +#define IMX_CONSOLE_BAUDRATE 115200 + +#define IMX_AIPSTZ1 U(0x301f0000) +#define IMX_AIPSTZ2 U(0x305f0000) +#define IMX_AIPSTZ3 U(0x309f0000) +#define IMX_AIPSTZ4 U(0x32df0000) +#define IMX_AIPSTZ5 U(0x30df0000) + +#define IMX_AIPS_BASE U(0x30000000) +#define IMX_AIPS_SIZE U(0x3000000) +#define IMX_GPV_BASE U(0x32000000) +#define IMX_GPV_SIZE U(0x800000) +#define IMX_AIPS1_BASE U(0x30200000) +#define IMX_AIPS4_BASE U(0x32c00000) +#define IMX_ANAMIX_BASE U(0x30360000) +#define IMX_CCM_BASE U(0x30380000) +#define IMX_SRC_BASE U(0x30390000) +#define IMX_GPC_BASE U(0x303a0000) +#define IMX_RDC_BASE U(0x303d0000) +#define IMX_CSU_BASE U(0x303e0000) +#define IMX_WDOG_BASE U(0x30280000) +#define IMX_SNVS_BASE U(0x30370000) +#define IMX_NOC_BASE U(0x32700000) +#define IMX_NOC_SIZE U(0x100000) +#define IMX_TZASC_BASE U(0x32F80000) +#define IMX_IOMUX_GPR_BASE U(0x30340000) +#define IMX_CAAM_BASE U(0x30900000) +#define IMX_DDRC_BASE U(0x3d400000) +#define IMX_DDRPHY_BASE U(0x3c000000) +#define IMX_DDR_IPS_BASE U(0x3d000000) +#define IMX_DDR_IPS_SIZE U(0x1900000) +#define IMX_ROM_BASE U(0x0) +#define IMX_ROM_SIZE U(0x40000) +#define IMX_NS_OCRAM_BASE U(0x900000) +#define IMX_NS_OCRAM_SIZE U(0x60000) +#define IMX_CAAM_RAM_BASE U(0x100000) +#define IMX_CAAM_RAM_SIZE U(0x10000) +#define IMX_DRAM_BASE U(0x40000000) +#define IMX_DRAM_SIZE U(0xc0000000) + +#define IMX_GIC_BASE PLAT_GICD_BASE +#define IMX_GIC_SIZE U(0x200000) + +#define IMX_HSIOMIX_CTL_BASE U(0x32f10000) +#define IMX_HDMI_CTL_BASE U(0x32fc0000) +#define RTX_RESET_CTL0 U(0x20) +#define RTX_CLK_CTL0 U(0x40) +#define RTX_CLK_CTL1 U(0x50) +#define TX_CONTROL0 U(0x200) +#define TX_CONTROL1 U(0x220) + +#define IMX_MEDIAMIX_CTL_BASE U(0x32ec0000) +#define RSTn_CSR U(0x0) +#define CLK_EN_CSR U(0x4) +#define RST_DIV U(0x8) +#define LCDIF_ARCACHE_CTRL U(0x4c) +#define ISI_CACHE_CTRL U(0x50) + +#define WDOG_WSR U(0x2) +#define WDOG_WCR_WDZST BIT(0) +#define WDOG_WCR_WDBG BIT(1) +#define WDOG_WCR_WDE BIT(2) +#define WDOG_WCR_WDT BIT(3) +#define WDOG_WCR_SRS BIT(4) +#define WDOG_WCR_WDA BIT(5) +#define WDOG_WCR_SRE BIT(6) +#define WDOG_WCR_WDW BIT(7) + +#define SRC_A53RCR0 U(0x4) +#define SRC_A53RCR1 U(0x8) +#define SRC_OTG1PHY_SCR U(0x20) +#define SRC_OTG2PHY_SCR U(0x24) +#define SRC_GPR1_OFFSET U(0x74) + +#define SNVS_LPCR U(0x38) +#define SNVS_LPCR_SRTC_ENV BIT(0) +#define SNVS_LPCR_DP_EN BIT(5) +#define SNVS_LPCR_TOP BIT(6) + +#define IOMUXC_GPR10 U(0x28) +#define GPR_TZASC_EN BIT(0) +#define GPR_TZASC_EN_LOCK BIT(16) + +#define ANAMIX_MISC_CTL U(0x124) +#define DRAM_PLL_CTRL (IMX_ANAMIX_BASE + 0x50) + +#define MAX_CSU_NUM U(64) + +#define OCRAM_S_BASE U(0x00180000) +#define OCRAM_S_SIZE U(0x8000) +#define OCRAM_S_LIMIT (OCRAM_S_BASE + OCRAM_S_SIZE) +#define SAVED_DRAM_TIMING_BASE OCRAM_S_BASE + +#define COUNTER_FREQUENCY 8000000 /* 8MHz */ + +#define IMX_WDOG_B_RESET + +#define MAX_IO_HANDLES 3U +#define MAX_IO_DEVICES 2U +#define MAX_IO_BLOCK_DEVICES 1U + +#define GIC_MAP MAP_REGION_FLAT(IMX_GIC_BASE, IMX_GIC_SIZE, MT_DEVICE | MT_RW) +#define AIPS_MAP MAP_REGION_FLAT(IMX_AIPS_BASE, IMX_AIPS_SIZE, MT_DEVICE | MT_RW) /* AIPS map */ +#define OCRAM_S_MAP MAP_REGION_FLAT(OCRAM_S_BASE, OCRAM_S_SIZE, MT_MEMORY | MT_RW) /* OCRAM_S */ +#define DDRC_MAP MAP_REGION_FLAT(IMX_DDRPHY_BASE, IMX_DDR_IPS_SIZE, MT_DEVICE | MT_RW) /* DDRMIX */ +#define NOC_MAP MAP_REGION_FLAT(IMX_NOC_BASE, IMX_NOC_SIZE, MT_DEVICE | MT_RW) /* NOC QoS */ +#define CAAM_RAM_MAP MAP_REGION_FLAT(IMX_CAAM_RAM_BASE, IMX_CAAM_RAM_SIZE, MT_MEMORY | MT_RW) /* CAMM RAM */ +#define NS_OCRAM_MAP MAP_REGION_FLAT(IMX_NS_OCRAM_BASE, IMX_NS_OCRAM_SIZE, MT_MEMORY | MT_RW) /* NS OCRAM */ +#define ROM_MAP MAP_REGION_FLAT(IMX_ROM_BASE, IMX_ROM_SIZE, MT_MEMORY | MT_RO) /* ROM code */ + +/* + * Note: DRAM region is mapped with entire size available and uses MT_RW + * attributes. + * See details in docs/plat/imx8m.rst "High Assurance Boot (HABv4)" section + * for explanation of this mapping scheme. + */ +#define DRAM_MAP MAP_REGION_FLAT(IMX_DRAM_BASE, IMX_DRAM_SIZE, MT_MEMORY | MT_RW | MT_NS) /* DRAM */ + +#endif /* platform_def.h */ diff --git a/plat/imx/imx8m/imx8mp/platform.mk b/plat/imx/imx8m/imx8mp/platform.mk new file mode 100644 index 0000000..ce69071 --- /dev/null +++ b/plat/imx/imx8m/imx8mp/platform.mk @@ -0,0 +1,173 @@ +# +# Copyright 2019-2022 NXP +# +# SPDX-License-Identifier: BSD-3-Clause +# + +PLAT_INCLUDES := -Iplat/imx/common/include \ + -Iplat/imx/imx8m/include \ + -Iplat/imx/imx8m/imx8mp/include \ + -Idrivers/imx/usdhc \ + -Iinclude/common/tbbr +# Translation tables library +include lib/xlat_tables_v2/xlat_tables.mk + +# Include GICv3 driver files +include drivers/arm/gic/v3/gicv3.mk + +IMX_DRAM_SOURCES := plat/imx/imx8m/ddr/dram.c \ + plat/imx/imx8m/ddr/clock.c \ + plat/imx/imx8m/ddr/dram_retention.c \ + plat/imx/imx8m/ddr/ddr4_dvfs.c \ + plat/imx/imx8m/ddr/lpddr4_dvfs.c + +IMX_GIC_SOURCES := ${GICV3_SOURCES} \ + plat/common/plat_gicv3.c \ + plat/common/plat_psci_common.c \ + plat/imx/common/plat_imx8_gic.c + +BL31_SOURCES += plat/imx/common/imx8_helpers.S \ + plat/imx/imx8m/gpc_common.c \ + plat/imx/imx8m/imx_hab.c \ + plat/imx/imx8m/imx_aipstz.c \ + plat/imx/imx8m/imx_rdc.c \ + plat/imx/imx8m/imx8m_caam.c \ + plat/imx/imx8m/imx8m_ccm.c \ + plat/imx/imx8m/imx8m_csu.c \ + plat/imx/imx8m/imx8m_psci_common.c \ + plat/imx/imx8m/imx8m_snvs.c \ + plat/imx/imx8m/imx8mp/imx8mp_bl31_setup.c \ + plat/imx/imx8m/imx8mp/imx8mp_psci.c \ + plat/imx/imx8m/imx8mp/gpc.c \ + plat/imx/common/imx8_topology.c \ + plat/imx/common/imx_sip_handler.c \ + plat/imx/common/imx_sip_svc.c \ + plat/imx/common/imx_uart_console.S \ + lib/cpus/aarch64/cortex_a53.S \ + drivers/arm/tzc/tzc380.c \ + drivers/delay_timer/delay_timer.c \ + drivers/delay_timer/generic_delay_timer.c \ + ${IMX_DRAM_SOURCES} \ + ${IMX_GIC_SOURCES} \ + ${XLAT_TABLES_LIB_SRCS} + +ifeq (${NEED_BL2},yes) +BL2_SOURCES += common/desc_image_load.c \ + plat/imx/common/imx8_helpers.S \ + plat/imx/common/imx_uart_console.S \ + plat/imx/imx8m/imx8mp/imx8mp_bl2_el3_setup.c \ + plat/imx/imx8m/imx8mp/gpc.c \ + plat/imx/imx8m/imx_aipstz.c \ + plat/imx/imx8m/imx_rdc.c \ + plat/imx/imx8m/imx8m_caam.c \ + plat/common/plat_psci_common.c \ + lib/cpus/aarch64/cortex_a53.S \ + drivers/arm/tzc/tzc380.c \ + drivers/delay_timer/delay_timer.c \ + drivers/delay_timer/generic_delay_timer.c \ + ${PLAT_GIC_SOURCES} \ + ${PLAT_DRAM_SOURCES} \ + ${XLAT_TABLES_LIB_SRCS} \ + drivers/mmc/mmc.c \ + drivers/io/io_block.c \ + drivers/io/io_fip.c \ + drivers/io/io_memmap.c \ + drivers/io/io_storage.c \ + drivers/imx/usdhc/imx_usdhc.c \ + plat/imx/imx8m/imx8mp/imx8mp_bl2_mem_params_desc.c \ + plat/imx/common/imx_io_storage.c \ + plat/imx/imx8m/imx8m_image_load.c \ + lib/optee/optee_utils.c +endif + +# Add the build options to pack BLx images and kernel device tree +# in the FIP if the platform requires. +ifneq ($(BL2),) +RESET_TO_BL31 := 0 +$(eval $(call TOOL_ADD_PAYLOAD,${BUILD_PLAT}/tb_fw.crt,--tb-fw-cert)) +endif +ifneq ($(BL32_EXTRA1),) +$(eval $(call TOOL_ADD_IMG,BL32_EXTRA1,--tos-fw-extra1)) +endif +ifneq ($(BL32_EXTRA2),) +$(eval $(call TOOL_ADD_IMG,BL32_EXTRA2,--tos-fw-extra2)) +endif +ifneq ($(HW_CONFIG),) +$(eval $(call TOOL_ADD_IMG,HW_CONFIG,--hw-config)) +endif + +ifeq (${NEED_BL2},yes) +$(eval $(call add_define,NEED_BL2)) +LOAD_IMAGE_V2 := 1 +# Non-TF Boot ROM +RESET_TO_BL2 := 1 +endif + +ifneq (${TRUSTED_BOARD_BOOT},0) + +include drivers/auth/mbedtls/mbedtls_crypto.mk +include drivers/auth/mbedtls/mbedtls_x509.mk + +AUTH_SOURCES := drivers/auth/auth_mod.c \ + drivers/auth/crypto_mod.c \ + drivers/auth/img_parser_mod.c \ + drivers/auth/tbbr/tbbr_cot_common.c \ + drivers/auth/tbbr/tbbr_cot_bl2.c + +BL2_SOURCES += ${AUTH_SOURCES} \ + plat/common/tbbr/plat_tbbr.c \ + plat/imx/imx8m/imx8mp/imx8mp_trusted_boot.c \ + plat/imx/imx8m/imx8mp/imx8mp_rotpk.S + +ROT_KEY = $(BUILD_PLAT)/rot_key.pem +ROTPK_HASH = $(BUILD_PLAT)/rotpk_sha256.bin + +$(eval $(call add_define_val,ROTPK_HASH,'"$(ROTPK_HASH)"')) +$(eval $(call MAKE_LIB_DIRS)) + +$(BUILD_PLAT)/bl2/imx8mp_rotpk.o: $(ROTPK_HASH) + +certificates: $(ROT_KEY) + +$(ROT_KEY): | $(BUILD_PLAT) + @echo " OPENSSL $@" + @if [ ! -f $(ROT_KEY) ]; then \ + ${OPENSSL_BIN_PATH}/openssl genrsa 2048 > $@ 2>/dev/null; \ + fi + +$(ROTPK_HASH): $(ROT_KEY) + @echo " OPENSSL $@" + $(Q)${OPENSSL_BIN_PATH}/openssl rsa -in $< -pubout -outform DER 2>/dev/null |\ + ${OPENSSL_BIN_PATH}/openssl dgst -sha256 -binary > $@ 2>/dev/null +endif + +ENABLE_PIE := 1 +USE_COHERENT_MEM := 1 +RESET_TO_BL31 := 1 +A53_DISABLE_NON_TEMPORAL_HINT := 0 + +ERRATA_A53_835769 := 1 +ERRATA_A53_843419 := 1 +ERRATA_A53_855873 := 1 + +BL32_BASE ?= 0x56000000 +$(eval $(call add_define,BL32_BASE)) + +BL32_SIZE ?= 0x2000000 +$(eval $(call add_define,BL32_SIZE)) + +IMX_BOOT_UART_BASE ?= 0x30890000 +ifeq (${IMX_BOOT_UART_BASE},auto) + override IMX_BOOT_UART_BASE := 0 +endif +$(eval $(call add_define,IMX_BOOT_UART_BASE)) + +EL3_EXCEPTION_HANDLING := $(SDEI_SUPPORT) +ifeq (${SDEI_SUPPORT}, 1) +BL31_SOURCES += plat/imx/common/imx_ehf.c \ + plat/imx/common/imx_sdei.c +endif + +ifeq (${SPD},trusty) + BL31_CFLAGS += -DPLAT_XLAT_TABLES_DYNAMIC=1 +endif diff --git a/plat/imx/imx8m/imx8mq/gpc.c b/plat/imx/imx8m/imx8mq/gpc.c new file mode 100644 index 0000000..ebf92f7 --- /dev/null +++ b/plat/imx/imx8m/imx8mq/gpc.c @@ -0,0 +1,448 @@ +/* + * Copyright (c) 2018-2023, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <stdlib.h> +#include <stdint.h> +#include <stdbool.h> + +#include <arch_helpers.h> +#include <common/debug.h> +#include <drivers/delay_timer.h> +#include <lib/mmio.h> +#include <lib/psci/psci.h> +#include <lib/smccc.h> +#include <lib/spinlock.h> +#include <plat/common/platform.h> +#include <services/std_svc.h> + +#include <gpc.h> +#include <platform_def.h> + +#define FSL_SIP_CONFIG_GPC_MASK U(0x00) +#define FSL_SIP_CONFIG_GPC_UNMASK U(0x01) +#define FSL_SIP_CONFIG_GPC_SET_WAKE U(0x02) +#define FSL_SIP_CONFIG_GPC_PM_DOMAIN U(0x03) +#define FSL_SIP_CONFIG_GPC_SET_AFF U(0x04) +#define FSL_SIP_CONFIG_GPC_CORE_WAKE U(0x05) + +#define MAX_HW_IRQ_NUM U(128) +#define MAX_IMR_NUM U(4) + +static uint32_t gpc_saved_imrs[16]; +static uint32_t gpc_wake_irqs[4]; +static uint32_t gpc_imr_offset[] = { + IMX_GPC_BASE + IMR1_CORE0_A53, + IMX_GPC_BASE + IMR1_CORE1_A53, + IMX_GPC_BASE + IMR1_CORE2_A53, + IMX_GPC_BASE + IMR1_CORE3_A53, + IMX_GPC_BASE + IMR1_CORE0_M4, +}; + +spinlock_t gpc_imr_lock[4]; + +static void gpc_imr_core_spin_lock(unsigned int core_id) +{ + spin_lock(&gpc_imr_lock[core_id]); +} + +static void gpc_imr_core_spin_unlock(unsigned int core_id) +{ + spin_unlock(&gpc_imr_lock[core_id]); +} + +static void gpc_save_imr_lpm(unsigned int core_id, unsigned int imr_idx) +{ + uint32_t reg = gpc_imr_offset[core_id] + imr_idx * 4; + + gpc_imr_core_spin_lock(core_id); + + gpc_saved_imrs[core_id + imr_idx * 4] = mmio_read_32(reg); + mmio_write_32(reg, ~gpc_wake_irqs[imr_idx]); + + gpc_imr_core_spin_unlock(core_id); +} + +static void gpc_restore_imr_lpm(unsigned int core_id, unsigned int imr_idx) +{ + uint32_t reg = gpc_imr_offset[core_id] + imr_idx * 4; + uint32_t val = gpc_saved_imrs[core_id + imr_idx * 4]; + + gpc_imr_core_spin_lock(core_id); + + mmio_write_32(reg, val); + + gpc_imr_core_spin_unlock(core_id); +} + +/* + * On i.MX8MQ, only in system suspend mode, the A53 cluster can + * enter LPM mode and shutdown the A53 PLAT power domain. So LPM + * wakeup only used for system suspend. when system enter suspend, + * any A53 CORE can be the last core to suspend the system, But + * the LPM wakeup can only use the C0's IMR to wakeup A53 cluster + * from LPM, so save C0's IMRs before suspend, restore back after + * resume. + */ +void imx_set_sys_wakeup(unsigned int last_core, bool pdn) +{ + unsigned int imr, core; + + if (pdn) { + for (imr = 0U; imr < MAX_IMR_NUM; imr++) { + for (core = 0U; core < PLATFORM_CORE_COUNT; core++) { + gpc_save_imr_lpm(core, imr); + } + } + } else { + for (imr = 0U; imr < MAX_IMR_NUM; imr++) { + for (core = 0U; core < PLATFORM_CORE_COUNT; core++) { + gpc_restore_imr_lpm(core, imr); + } + } + } +} + +static void imx_gpc_hwirq_mask(unsigned int hwirq) +{ + uintptr_t reg; + unsigned int val; + + if (hwirq >= MAX_HW_IRQ_NUM) { + return; + } + + gpc_imr_core_spin_lock(0); + reg = gpc_imr_offset[0] + (hwirq / 32) * 4; + val = mmio_read_32(reg); + val |= 1 << hwirq % 32; + mmio_write_32(reg, val); + gpc_imr_core_spin_unlock(0); +} + +static void imx_gpc_hwirq_unmask(unsigned int hwirq) +{ + uintptr_t reg; + unsigned int val; + + if (hwirq >= MAX_HW_IRQ_NUM) { + return; + } + + gpc_imr_core_spin_lock(0); + reg = gpc_imr_offset[0] + (hwirq / 32) * 4; + val = mmio_read_32(reg); + val &= ~(1 << hwirq % 32); + mmio_write_32(reg, val); + gpc_imr_core_spin_unlock(0); +} + +static void imx_gpc_set_wake(uint32_t hwirq, bool on) +{ + uint32_t mask, idx; + + if (hwirq >= MAX_HW_IRQ_NUM) { + return; + } + + mask = 1 << hwirq % 32; + idx = hwirq / 32; + gpc_wake_irqs[idx] = on ? gpc_wake_irqs[idx] | mask : + gpc_wake_irqs[idx] & ~mask; +} + +static void imx_gpc_mask_irq0(uint32_t core_id, uint32_t mask) +{ + gpc_imr_core_spin_lock(core_id); + if (mask) { + mmio_setbits_32(gpc_imr_offset[core_id], 1); + } else { + mmio_clrbits_32(gpc_imr_offset[core_id], 1); + } + + dsb(); + gpc_imr_core_spin_unlock(core_id); +} + +void imx_gpc_core_wake(uint32_t cpumask) +{ + for (int i = 0; i < PLATFORM_CORE_COUNT; i++) { + if (cpumask & (1 << i)) { + imx_gpc_mask_irq0(i, false); + } + } +} + +void imx_gpc_set_a53_core_awake(uint32_t core_id) +{ + imx_gpc_mask_irq0(core_id, true); +} + +static void imx_gpc_set_affinity(uint32_t hwirq, unsigned int cpu_idx) +{ + uintptr_t reg; + unsigned int val; + + if (hwirq >= MAX_HW_IRQ_NUM || cpu_idx >= 4) { + return; + } + + /* + * using the mask/unmask bit as affinity function.unmask the + * IMR bit to enable IRQ wakeup for this core. + */ + gpc_imr_core_spin_lock(cpu_idx); + reg = gpc_imr_offset[cpu_idx] + (hwirq / 32) * 4; + val = mmio_read_32(reg); + val &= ~(1 << hwirq % 32); + mmio_write_32(reg, val); + gpc_imr_core_spin_unlock(cpu_idx); + + /* clear affinity of other core */ + for (int i = 0; i < PLATFORM_CORE_COUNT; i++) { + if (cpu_idx != i) { + gpc_imr_core_spin_lock(i); + reg = gpc_imr_offset[i] + (hwirq / 32) * 4; + val = mmio_read_32(reg); + val |= (1 << hwirq % 32); + mmio_write_32(reg, val); + gpc_imr_core_spin_unlock(i); + } + } +} + +/* use wfi power down the core */ +void imx_set_cpu_pwr_off(unsigned int core_id) +{ + bakery_lock_get(&gpc_lock); + + /* enable the wfi power down of the core */ + mmio_setbits_32(IMX_GPC_BASE + LPCR_A53_AD, COREx_WFI_PDN(core_id) | + (1 << (core_id + 20))); + + bakery_lock_release(&gpc_lock); + + /* assert the pcg pcr bit of the core */ + mmio_setbits_32(IMX_GPC_BASE + COREx_PGC_PCR(core_id), 0x1); +}; + +/* if out of lpm, we need to do reverse steps */ +void imx_set_cpu_lpm(unsigned int core_id, bool pdn) +{ + bakery_lock_get(&gpc_lock); + + if (pdn) { + /* enable the core WFI PDN & IRQ PUP */ + mmio_setbits_32(IMX_GPC_BASE + LPCR_A53_AD, COREx_WFI_PDN(core_id) | + (1 << (core_id + 20)) | COREx_IRQ_WUP(core_id)); + /* assert the pcg pcr bit of the core */ + mmio_setbits_32(IMX_GPC_BASE + COREx_PGC_PCR(core_id), 0x1); + } else { + /* disable CORE WFI PDN & IRQ PUP */ + mmio_clrbits_32(IMX_GPC_BASE + LPCR_A53_AD, COREx_WFI_PDN(core_id) | + COREx_IRQ_WUP(core_id)); + /* deassert the pcg pcr bit of the core */ + mmio_setbits_32(IMX_GPC_BASE + COREx_PGC_PCR(core_id), 0x1); + } + + bakery_lock_release(&gpc_lock); +} + +void imx_pup_pdn_slot_config(int last_core, bool pdn) +{ + if (pdn) { + /* SLOT0 for A53 PLAT power down */ + mmio_setbits_32(IMX_GPC_BASE + SLTx_CFG(0), SLT_PLAT_PDN); + /* SLOT1 for A53 PLAT power up */ + mmio_setbits_32(IMX_GPC_BASE + SLTx_CFG(1), SLT_PLAT_PUP); + /* SLOT2 for A53 primary core power up */ + mmio_setbits_32(IMX_GPC_BASE + SLTx_CFG(2), SLT_COREx_PUP(last_core)); + /* ACK setting: PLAT ACK for PDN, CORE ACK for PUP */ + mmio_clrsetbits_32(IMX_GPC_BASE + PGC_ACK_SEL_A53, 0xFFFFFFFF, + A53_PLAT_PDN_ACK | SLT_COREx_PUP_ACK(last_core)); + } else { + mmio_clrbits_32(IMX_GPC_BASE + SLTx_CFG(0), 0xFFFFFFFF); + mmio_clrbits_32(IMX_GPC_BASE + SLTx_CFG(1), 0xFFFFFFFF); + mmio_clrbits_32(IMX_GPC_BASE + SLTx_CFG(2), 0xFFFFFFFF); + mmio_clrsetbits_32(IMX_GPC_BASE + PGC_ACK_SEL_A53, 0xFFFFFFFF, + A53_DUMMY_PDN_ACK | A53_DUMMY_PUP_ACK); + } +} + +void imx_set_cluster_powerdown(unsigned int last_core, uint8_t power_state) +{ + uint32_t val; + + if (is_local_state_off(power_state)) { + val = mmio_read_32(IMX_GPC_BASE + LPCR_A53_BSC); + val |= A53_LPM_STOP; /* enable C0-C1's STOP mode */ + val &= ~CPU_CLOCK_ON_LPM; /* disable CPU clock in LPM mode */ + mmio_write_32(IMX_GPC_BASE + LPCR_A53_BSC, val); + + /* enable C2-3's STOP mode */ + mmio_setbits_32(IMX_GPC_BASE + LPCR_A53_BSC2, A53_LPM_STOP); + + /* enable PLAT/SCU power down */ + val = mmio_read_32(IMX_GPC_BASE + LPCR_A53_AD); + val &= ~EN_L2_WFI_PDN; + val |= L2PGE | EN_PLAT_PDN; + val &= ~COREx_IRQ_WUP(last_core); /* disable IRQ PUP for last core */ + val |= COREx_LPM_PUP(last_core); /* enable LPM PUP for last core */ + mmio_write_32(IMX_GPC_BASE + LPCR_A53_AD, val); + + imx_pup_pdn_slot_config(last_core, true); + + /* enable PLAT PGC */ + mmio_setbits_32(IMX_GPC_BASE + A53_PLAT_PGC, 0x1); + } else { + /* clear PLAT PGC */ + mmio_clrbits_32(IMX_GPC_BASE + A53_PLAT_PGC, 0x1); + + /* clear the slot and ack for cluster power down */ + imx_pup_pdn_slot_config(last_core, false); + + val = mmio_read_32(IMX_GPC_BASE + LPCR_A53_BSC); + val &= ~A53_LPM_MASK; /* clear the C0~1 LPM */ + val |= CPU_CLOCK_ON_LPM; /* disable cpu clock in LPM */ + mmio_write_32(IMX_GPC_BASE + LPCR_A53_BSC, val); + + /* set A53 LPM to RUN mode */ + mmio_clrbits_32(IMX_GPC_BASE + LPCR_A53_BSC2, A53_LPM_MASK); + + /* clear PLAT/SCU power down */ + val = mmio_read_32(IMX_GPC_BASE + LPCR_A53_AD); + val |= EN_L2_WFI_PDN; + val &= ~(L2PGE | EN_PLAT_PDN); + val &= ~COREx_LPM_PUP(last_core); /* disable C0's LPM PUP */ + mmio_write_32(IMX_GPC_BASE + LPCR_A53_AD, val); + } +} + +#define MAX_PLL_NUM U(12) + +static const struct pll_override imx8mq_pll[MAX_PLL_NUM] = { + {.reg = 0x0, .override_mask = 0x140000, }, + {.reg = 0x8, .override_mask = 0x140000, }, + {.reg = 0x10, .override_mask = 0x140000, }, + {.reg = 0x18, .override_mask = 0x140000, }, + {.reg = 0x20, .override_mask = 0x140000, }, + {.reg = 0x28, .override_mask = 0x140000, }, + {.reg = 0x30, .override_mask = 0x1555540, }, + {.reg = 0x3c, .override_mask = 0x1555540, }, + {.reg = 0x48, .override_mask = 0x140, }, + {.reg = 0x54, .override_mask = 0x140, }, + {.reg = 0x60, .override_mask = 0x140, }, + {.reg = 0x70, .override_mask = 0xa, }, +}; + +void imx_anamix_override(bool enter) +{ + unsigned int i; + + /* enable the pll override bit before entering DSM mode */ + for (i = 0; i < MAX_PLL_NUM; i++) { + if (enter) { + mmio_setbits_32(IMX_ANAMIX_BASE + imx8mq_pll[i].reg, + imx8mq_pll[i].override_mask); + } else { + mmio_clrbits_32(IMX_ANAMIX_BASE + imx8mq_pll[i].reg, + imx8mq_pll[i].override_mask); + } + } +} + +int imx_gpc_handler(uint32_t smc_fid, + u_register_t x1, + u_register_t x2, + u_register_t x3) +{ + switch (x1) { + case FSL_SIP_CONFIG_GPC_CORE_WAKE: + imx_gpc_core_wake(x2); + break; + case FSL_SIP_CONFIG_GPC_SET_WAKE: + imx_gpc_set_wake(x2, x3); + break; + case FSL_SIP_CONFIG_GPC_MASK: + imx_gpc_hwirq_mask(x2); + break; + case FSL_SIP_CONFIG_GPC_UNMASK: + imx_gpc_hwirq_unmask(x2); + break; + case FSL_SIP_CONFIG_GPC_SET_AFF: + imx_gpc_set_affinity(x2, x3); + break; + default: + return SMC_UNK; + } + + return 0; +} + +void imx_gpc_init(void) +{ + uint32_t val; + unsigned int i, j; + + /* mask all the interrupt by default */ + for (i = 0U; i < PLATFORM_CORE_COUNT; i++) { + for (j = 0U; j < ARRAY_SIZE(gpc_imr_offset); j++) { + mmio_write_32(gpc_imr_offset[j] + i * 4, ~0x0); + } + } + + /* Due to the hardware design requirement, need to make + * sure GPR interrupt(#32) is unmasked during RUN mode to + * avoid entering DSM mode by mistake. + */ + for (i = 0U; i < PLATFORM_CORE_COUNT; i++) { + mmio_write_32(gpc_imr_offset[i], ~0x1); + } + + /* leave the IOMUX_GPC bit 12 on for core wakeup */ + mmio_setbits_32(IMX_IOMUX_GPR_BASE + 0x4, 1 << 12); + + /* use external IRQs to wakeup C0~C3 from LPM */ + val = mmio_read_32(IMX_GPC_BASE + LPCR_A53_BSC); + val |= IRQ_SRC_A53_WUP; + /* clear the MASTER0 LPM handshake */ + val &= ~MASTER0_LPM_HSK; + mmio_write_32(IMX_GPC_BASE + LPCR_A53_BSC, val); + + /* mask M4 DSM trigger if M4 is NOT enabled */ + mmio_setbits_32(IMX_GPC_BASE + LPCR_M4, DSM_MODE_MASK); + + /* set all mix/PU in A53 domain */ + mmio_write_32(IMX_GPC_BASE + PGC_CPU_0_1_MAPPING, 0xfffd); + + /* set SCU timing */ + mmio_write_32(IMX_GPC_BASE + PGC_SCU_TIMING, + (0x59 << 10) | 0x5B | (0x2 << 20)); + + /* set DUMMY PDN/PUP ACK by default for A53 domain */ + mmio_write_32(IMX_GPC_BASE + PGC_ACK_SEL_A53, A53_DUMMY_PUP_ACK | + A53_DUMMY_PDN_ACK); + + /* disable DSM mode by default */ + mmio_clrbits_32(IMX_GPC_BASE + SLPCR, DSM_MODE_MASK); + + /* + * USB PHY power up needs to make sure RESET bit in SRC is clear, + * otherwise, the PU power up bit in GPC will NOT self-cleared. + * only need to do it once. + */ + mmio_clrbits_32(IMX_SRC_BASE + SRC_OTG1PHY_SCR, 0x1); + mmio_clrbits_32(IMX_SRC_BASE + SRC_OTG2PHY_SCR, 0x1); + + /* + * for USB OTG, the limitation are: + * 1. before system clock config, the IPG clock run at 12.5MHz, delay time + * should be longer than 82us. + * 2. after system clock config, ipg clock run at 66.5MHz, delay time + * be longer that 15.3 us. + * Add 100us to make sure the USB OTG SRC is clear safely. + */ + udelay(100); +} diff --git a/plat/imx/imx8m/imx8mq/imx8mq_bl31_setup.c b/plat/imx/imx8m/imx8mq/imx8mq_bl31_setup.c new file mode 100644 index 0000000..7065a65 --- /dev/null +++ b/plat/imx/imx8m/imx8mq/imx8mq_bl31_setup.c @@ -0,0 +1,258 @@ +/* + * Copyright (c) 2018-2023, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> +#include <stdbool.h> + +#include <platform_def.h> + +#include <arch_helpers.h> +#include <common/bl_common.h> +#include <common/debug.h> +#include <context.h> +#include <drivers/arm/tzc380.h> +#include <drivers/console.h> +#include <drivers/generic_delay_timer.h> +#include <lib/el3_runtime/context_mgmt.h> +#include <lib/mmio.h> +#include <lib/xlat_tables/xlat_tables_v2.h> +#include <plat/common/platform.h> + +#include <dram.h> +#include <gpc.h> +#include <imx_aipstz.h> +#include <imx_uart.h> +#include <imx8m_caam.h> +#include <plat_imx8.h> + +#define TRUSTY_PARAMS_LEN_BYTES (4096*2) + +/* + * Avoid the pointer dereference of the canonical mmio_read_8() implementation. + * This prevents the compiler from mis-interpreting the MMIO access as an + * illegal memory access to a very low address (the IMX ROM is mapped at 0). + */ +static uint8_t mmio_read_8_ldrb(uintptr_t address) +{ + uint8_t reg; + + __asm__ volatile ("ldrb %w0, [%1]" : "=r" (reg) : "r" (address)); + + return reg; +} + +static const mmap_region_t imx_mmap[] = { + MAP_REGION_FLAT(GPV_BASE, GPV_SIZE, MT_DEVICE | MT_RW), /* GPV map */ + MAP_REGION_FLAT(IMX_ROM_BASE, IMX_ROM_SIZE, MT_MEMORY | MT_RO), /* ROM map */ + MAP_REGION_FLAT(IMX_AIPS_BASE, IMX_AIPS_SIZE, MT_DEVICE | MT_RW), /* AIPS map */ + MAP_REGION_FLAT(IMX_GIC_BASE, IMX_GIC_SIZE, MT_DEVICE | MT_RW), /* GIC map */ + MAP_REGION_FLAT(IMX_DDRPHY_BASE, IMX_DDR_IPS_SIZE, MT_DEVICE | MT_RW), /* DDRMIX map */ + MAP_REGION_FLAT(IMX_DRAM_BASE, IMX_DRAM_SIZE, MT_MEMORY | MT_RW | MT_NS), + {0}, +}; + +static const struct aipstz_cfg aipstz[] = { + {AIPSTZ1_BASE, 0x77777777, 0x77777777, .opacr = {0x0, 0x0, 0x0, 0x0, 0x0}, }, + {AIPSTZ2_BASE, 0x77777777, 0x77777777, .opacr = {0x0, 0x0, 0x0, 0x0, 0x0}, }, + {AIPSTZ3_BASE, 0x77777777, 0x77777777, .opacr = {0x0, 0x0, 0x0, 0x0, 0x0}, }, + {AIPSTZ4_BASE, 0x77777777, 0x77777777, .opacr = {0x0, 0x0, 0x0, 0x0, 0x0}, }, + {0}, +}; + +static entry_point_info_t bl32_image_ep_info; +static entry_point_info_t bl33_image_ep_info; + +static uint32_t imx_soc_revision; + +int imx_soc_info_handler(uint32_t smc_fid, u_register_t x1, u_register_t x2, + u_register_t x3) +{ + return imx_soc_revision; +} + +#define ANAMIX_DIGPROG 0x6c +#define ROM_SOC_INFO_A0 0x800 +#define ROM_SOC_INFO_B0 0x83C +#define OCOTP_SOC_INFO_B1 0x40 + +static void imx8mq_soc_info_init(void) +{ + uint32_t rom_version; + uint32_t ocotp_val; + + imx_soc_revision = mmio_read_32(IMX_ANAMIX_BASE + ANAMIX_DIGPROG); + rom_version = mmio_read_8_ldrb(IMX_ROM_BASE + ROM_SOC_INFO_A0); + if (rom_version == 0x10) + return; + + rom_version = mmio_read_8_ldrb(IMX_ROM_BASE + ROM_SOC_INFO_B0); + if (rom_version == 0x20) { + imx_soc_revision &= ~0xff; + imx_soc_revision |= rom_version; + return; + } + + /* 0xff0055aa is magic number for B1 */ + ocotp_val = mmio_read_32(IMX_OCOTP_BASE + OCOTP_SOC_INFO_B1); + if (ocotp_val == 0xff0055aa) { + imx_soc_revision &= ~0xff; + if (rom_version == 0x22) { + imx_soc_revision |= 0x22; + } else { + imx_soc_revision |= 0x21; + } + return; + } +} + +/* get SPSR for BL33 entry */ +static uint32_t get_spsr_for_bl33_entry(void) +{ + unsigned long el_status; + unsigned long mode; + uint32_t spsr; + + /* figure out what mode we enter the non-secure world */ + el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT; + el_status &= ID_AA64PFR0_ELX_MASK; + + mode = (el_status) ? MODE_EL2 : MODE_EL1; + + spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS); + return spsr; +} + +static void bl31_tz380_setup(void) +{ + unsigned int val; + + val = mmio_read_32(IMX_IOMUX_GPR_BASE + IOMUXC_GPR10); + if ((val & GPR_TZASC_EN) != GPR_TZASC_EN) + return; + + tzc380_init(IMX_TZASC_BASE); + /* + * Need to substact offset 0x40000000 from CPU address when + * programming tzasc region for i.mx8mq. Enable 1G-5G S/NS RW + */ + tzc380_configure_region(0, 0x00000000, TZC_ATTR_REGION_SIZE(TZC_REGION_SIZE_4G) | + TZC_ATTR_REGION_EN_MASK | TZC_ATTR_SP_ALL); +} + +void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1, + u_register_t arg2, u_register_t arg3) +{ + static console_t console; + int i; + /* enable CSU NS access permission */ + for (i = 0; i < 64; i++) { + mmio_write_32(IMX_CSU_BASE + i * 4, 0xffffffff); + } + + imx_aipstz_init(aipstz); + + console_imx_uart_register(IMX_BOOT_UART_BASE, IMX_BOOT_UART_CLK_IN_HZ, + IMX_CONSOLE_BAUDRATE, &console); + /* This console is only used for boot stage */ + console_set_scope(&console, CONSOLE_FLAG_BOOT); + + imx8m_caam_init(); + + /* + * tell BL3-1 where the non-secure software image is located + * and the entry state information. + */ + bl33_image_ep_info.pc = PLAT_NS_IMAGE_OFFSET; + bl33_image_ep_info.spsr = get_spsr_for_bl33_entry(); + SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE); + +#if defined(SPD_opteed) || defined(SPD_trusty) + /* Populate entry point information for BL32 */ + SET_PARAM_HEAD(&bl32_image_ep_info, PARAM_EP, VERSION_1, 0); + SET_SECURITY_STATE(bl32_image_ep_info.h.attr, SECURE); + bl32_image_ep_info.pc = BL32_BASE; + bl32_image_ep_info.spsr = 0; + + /* Pass TEE base and size to bl33 */ + bl33_image_ep_info.args.arg1 = BL32_BASE; + bl33_image_ep_info.args.arg2 = BL32_SIZE; + +#ifdef SPD_trusty + bl32_image_ep_info.args.arg0 = BL32_SIZE; + bl32_image_ep_info.args.arg1 = BL32_BASE; +#else + /* Make sure memory is clean */ + mmio_write_32(BL32_FDT_OVERLAY_ADDR, 0); + bl33_image_ep_info.args.arg3 = BL32_FDT_OVERLAY_ADDR; + bl32_image_ep_info.args.arg3 = BL32_FDT_OVERLAY_ADDR; +#endif +#endif + + bl31_tz380_setup(); +} + +void bl31_plat_arch_setup(void) +{ + const mmap_region_t bl_regions[] = { + MAP_REGION_FLAT(BL31_START, BL31_SIZE, + MT_MEMORY | MT_RW | MT_SECURE), + MAP_REGION_FLAT(BL_CODE_BASE, BL_CODE_END - BL_CODE_BASE, + MT_MEMORY | MT_RO | MT_SECURE), +#if USE_COHERENT_MEM + MAP_REGION_FLAT(BL_COHERENT_RAM_BASE, + BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE, + MT_DEVICE | MT_RW | MT_SECURE), +#endif + /* Map TEE memory */ + MAP_REGION_FLAT(BL32_BASE, BL32_SIZE, MT_MEMORY | MT_RW), + {0}, + }; + + setup_page_tables(bl_regions, imx_mmap); + /* enable the MMU */ + enable_mmu_el3(0); +} + +void bl31_platform_setup(void) +{ + generic_delay_timer_init(); + + /* init the GICv3 cpu and distributor interface */ + plat_gic_driver_init(); + plat_gic_init(); + + /* determine SOC revision for erratas */ + imx8mq_soc_info_init(); + + /* gpc init */ + imx_gpc_init(); + + dram_info_init(SAVED_DRAM_TIMING_BASE); +} + +entry_point_info_t *bl31_plat_get_next_image_ep_info(unsigned int type) +{ + if (type == NON_SECURE) + return &bl33_image_ep_info; + if (type == SECURE) + return &bl32_image_ep_info; + + return NULL; +} + +unsigned int plat_get_syscnt_freq2(void) +{ + return COUNTER_FREQUENCY; +} + +#ifdef SPD_trusty +void plat_trusty_set_boot_args(aapcs64_params_t *args) +{ + args->arg0 = BL32_SIZE; + args->arg1 = BL32_BASE; + args->arg2 = TRUSTY_PARAMS_LEN_BYTES; +} +#endif diff --git a/plat/imx/imx8m/imx8mq/imx8mq_psci.c b/plat/imx/imx8m/imx8mq/imx8mq_psci.c new file mode 100644 index 0000000..3375ce7 --- /dev/null +++ b/plat/imx/imx8m/imx8mq/imx8mq_psci.c @@ -0,0 +1,156 @@ +/* + * Copyright (c) 2018-2023, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <stdbool.h> + +#include <arch.h> +#include <arch_helpers.h> +#include <common/debug.h> +#include <drivers/delay_timer.h> +#include <lib/mmio.h> +#include <lib/psci/psci.h> + +#include <dram.h> +#include <gpc.h> +#include <imx8m_psci.h> +#include <plat_imx8.h> + +int imx_validate_power_state(unsigned int power_state, + psci_power_state_t *req_state) +{ + int pwr_lvl = psci_get_pstate_pwrlvl(power_state); + int pwr_type = psci_get_pstate_type(power_state); + int state_id = psci_get_pstate_id(power_state); + + if (pwr_lvl > PLAT_MAX_PWR_LVL) + return PSCI_E_INVALID_PARAMS; + + if (pwr_type == PSTATE_TYPE_STANDBY) { + CORE_PWR_STATE(req_state) = PLAT_MAX_RET_STATE; + CLUSTER_PWR_STATE(req_state) = PLAT_MAX_RET_STATE; + } + + if (pwr_type == PSTATE_TYPE_POWERDOWN && state_id == 0x33) { + CORE_PWR_STATE(req_state) = PLAT_MAX_OFF_STATE; + CLUSTER_PWR_STATE(req_state) = PLAT_MAX_RET_STATE; + } + + return PSCI_E_SUCCESS; +} + +void imx_pwr_domain_off(const psci_power_state_t *target_state) +{ + uint64_t mpidr = read_mpidr_el1(); + unsigned int core_id = MPIDR_AFFLVL0_VAL(mpidr); + + plat_gic_cpuif_disable(); + imx_set_cpu_pwr_off(core_id); + + /* + * TODO: Find out why this is still + * needed in order not to break suspend + */ + udelay(50); +} + +void imx_domain_suspend(const psci_power_state_t *target_state) +{ + uint64_t base_addr = BL31_START; + uint64_t mpidr = read_mpidr_el1(); + unsigned int core_id = MPIDR_AFFLVL0_VAL(mpidr); + + if (is_local_state_off(CORE_PWR_STATE(target_state))) { + /* disable the cpu interface */ + plat_gic_cpuif_disable(); + imx_set_cpu_secure_entry(core_id, base_addr); + imx_set_cpu_lpm(core_id, true); + } else { + dsb(); + write_scr_el3(read_scr_el3() | SCR_FIQ_BIT); + isb(); + } + + if (is_local_state_off(CLUSTER_PWR_STATE(target_state))) + imx_set_cluster_powerdown(core_id, CLUSTER_PWR_STATE(target_state)); + else + imx_set_cluster_standby(true); + + if (is_local_state_retn(SYSTEM_PWR_STATE(target_state))) { + imx_set_sys_lpm(core_id, true); + dram_enter_retention(); + imx_anamix_override(true); + } +} + +void imx_domain_suspend_finish(const psci_power_state_t *target_state) +{ + uint64_t mpidr = read_mpidr_el1(); + unsigned int core_id = MPIDR_AFFLVL0_VAL(mpidr); + + /* check the system level status */ + if (is_local_state_retn(SYSTEM_PWR_STATE(target_state))) { + imx_anamix_override(false); + dram_exit_retention(); + imx_set_sys_lpm(core_id, false); + imx_clear_rbc_count(); + } + + /* check the cluster level power status */ + if (is_local_state_off(CLUSTER_PWR_STATE(target_state))) + imx_set_cluster_powerdown(core_id, PSCI_LOCAL_STATE_RUN); + else + imx_set_cluster_standby(false); + + /* check the core level power status */ + if (is_local_state_off(CORE_PWR_STATE(target_state))) { + /* mark this core as awake by masking IRQ0 */ + imx_gpc_set_a53_core_awake(core_id); + /* clear the core lpm setting */ + imx_set_cpu_lpm(core_id, false); + /* enable the gic cpu interface */ + plat_gic_cpuif_enable(); + } else { + write_scr_el3(read_scr_el3() & (~0x4)); + isb(); + } +} + +void imx_get_sys_suspend_power_state(psci_power_state_t *req_state) +{ + unsigned int i; + + for (i = IMX_PWR_LVL0; i < PLAT_MAX_PWR_LVL; i++) + req_state->pwr_domain_state[i] = PLAT_STOP_OFF_STATE; + + req_state->pwr_domain_state[PLAT_MAX_PWR_LVL] = PLAT_MAX_RET_STATE; +} + +static const plat_psci_ops_t imx_plat_psci_ops = { + .pwr_domain_on = imx_pwr_domain_on, + .pwr_domain_on_finish = imx_pwr_domain_on_finish, + .pwr_domain_off = imx_pwr_domain_off, + .validate_ns_entrypoint = imx_validate_ns_entrypoint, + .validate_power_state = imx_validate_power_state, + .cpu_standby = imx_cpu_standby, + .pwr_domain_suspend = imx_domain_suspend, + .pwr_domain_suspend_finish = imx_domain_suspend_finish, + .pwr_domain_pwr_down_wfi = imx_pwr_domain_pwr_down_wfi, + .get_sys_suspend_power_state = imx_get_sys_suspend_power_state, + .system_reset = imx_system_reset, + .system_reset2 = imx_system_reset2, + .system_off = imx_system_off, +}; + +/* export the platform specific psci ops */ +int plat_setup_psci_ops(uintptr_t sec_entrypoint, + const plat_psci_ops_t **psci_ops) +{ + imx_mailbox_init(sec_entrypoint); + /* sec_entrypoint is used for warm reset */ + *psci_ops = &imx_plat_psci_ops; + + return 0; +} diff --git a/plat/imx/imx8m/imx8mq/include/gpc_reg.h b/plat/imx/imx8m/imx8mq/include/gpc_reg.h new file mode 100644 index 0000000..f171bd9 --- /dev/null +++ b/plat/imx/imx8m/imx8mq/include/gpc_reg.h @@ -0,0 +1,89 @@ +/* + * Copyright 2020 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef GPC_REG_H +#define GPC_REG_H + +#define LPCR_A53_BSC 0x0 +#define LPCR_A53_BSC2 0x108 +#define LPCR_A53_AD 0x4 +#define LPCR_M4 0x8 +#define SLPCR 0x14 +#define MST_CPU_MAPPING 0x18 +#define MLPCR 0x20 +#define PGC_ACK_SEL_A53 0x24 +#define IMR1_CORE0_A53 0x30 +#define IMR1_CORE1_A53 0x40 +#define IMR1_CORE2_A53 0x1C0 +#define IMR1_CORE3_A53 0x1D0 +#define IMR1_CORE0_M4 0x50 +#define SLT0_CFG 0xB0 +#define GPC_PU_PWRHSK 0x1FC +#define PGC_CPU_0_1_MAPPING 0xEC +#define CPU_PGC_UP_TRG 0xF0 +#define PU_PGC_UP_TRG 0xF8 +#define CPU_PGC_DN_TRG 0xFC +#define PU_PGC_DN_TRG 0x104 +#define LPS_CPU1 0x114 +#define A53_CORE0_PGC 0x800 +#define A53_PLAT_PGC 0x900 +#define PLAT_PGC_PCR 0x900 +#define NOC_PGC_PCR 0xa40 +#define PGC_SCU_TIMING 0x910 + +#define MASK_DSM_TRIGGER_A53 BIT(31) +#define IRQ_SRC_A53_WUP BIT(30) +#define IRQ_SRC_A53_WUP_SHIFT 30 +#define IRQ_SRC_C1 BIT(29) +#define IRQ_SRC_C0 BIT(28) +#define IRQ_SRC_C3 BIT(23) +#define IRQ_SRC_C2 BIT(22) +#define CPU_CLOCK_ON_LPM BIT(14) +#define A53_CLK_ON_LPM BIT(14) +#define MASTER0_LPM_HSK BIT(6) +#define MASTER1_LPM_HSK BIT(7) +#define MASTER2_LPM_HSK BIT(8) + +#define L2PGE BIT(31) +#define EN_L2_WFI_PDN BIT(5) +#define EN_PLAT_PDN BIT(4) + +#define SLPCR_EN_DSM BIT(31) +#define SLPCR_RBC_EN BIT(30) +#define SLPCR_A53_FASTWUP_STOP_MODE BIT(17) +#define SLPCR_A53_FASTWUP_WAIT_MODE BIT(16) +#define SLPCR_VSTBY BIT(2) +#define SLPCR_SBYOS BIT(1) +#define SLPCR_BYPASS_PMIC_READY BIT(0) +#define SLPCR_RBC_COUNT_SHIFT 24 +#define SLPCR_STBY_COUNT_SHFT 3 + +#define A53_DUMMY_PDN_ACK BIT(15) +#define A53_DUMMY_PUP_ACK BIT(31) +#define A53_PLAT_PDN_ACK BIT(2) +#define A53_PLAT_PUP_ACK BIT(18) +#define NOC_PDN_SLT_CTRL BIT(10) +#define NOC_PUP_SLT_CTRL BIT(11) +#define NOC_PGC_PDN_ACK BIT(3) +#define NOC_PGC_PUP_ACK BIT(19) + +#define DDRMIX_PWR_REQ BIT(5) +#define DDRMIX_ADB400_SYNC BIT(1) +#define DDRMIX_ADB400_ACK BIT(18) +#define DDRMIX_PGC 0xd40 + +#define PLAT_PUP_SLT_CTRL BIT(9) +#define PLAT_PDN_SLT_CTRL BIT(8) + +#define SLT_PLAT_PDN BIT(8) +#define SLT_PLAT_PUP BIT(9) + +#define MASTER1_MAPPING BIT(1) +#define MASTER2_MAPPING BIT(2) + +#define IRQ_IMR_NUM U(4) + +#endif /* GPC_REG_H */ diff --git a/plat/imx/imx8m/imx8mq/include/imx_sec_def.h b/plat/imx/imx8m/imx8mq/include/imx_sec_def.h new file mode 100644 index 0000000..0f77141 --- /dev/null +++ b/plat/imx/imx8m/imx8mq/include/imx_sec_def.h @@ -0,0 +1,249 @@ +/* + * Copyright 2020-2022 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef IMX_SEC_DEF_H +#define IMX_SEC_DEF_H + +/* RDC MDA index */ +enum rdc_mda_idx { + RDC_MDA_A53 = 0, + RDC_MDA_M4 = 1, + RDC_MDA_PCIE_CTRL1 = 2, + RDC_MDA_PCIE_CTRL2 = 3, + RDC_MDA_VPU_DEC = 4, + RDC_MDA_LCDIF = 5, + RDC_MDA_CSI1 = 6, + RDC_MDA_CSI2 = 7, + RDC_MDA_Coresight = 8, + RDC_MDA_DAP = 9, + RDC_MDA_CAAM = 10, + RDC_MDA_SDMAp = 11, + RDC_MDA_SDMAb = 12, + RDC_MDA_APBHDMA = 13, + RDC_MDA_RAWNAND = 14, + RDC_MDA_uSDHC1 = 15, + RDC_MDA_uSDHC2 = 16, + RDC_MDA_DCSS = 17, + RDC_MDA_GPU = 18, + RDC_MDA_USB1 = 19, + RDC_MDA_USB2 = 20, + RDC_MDA_TESTPORT = 21, + RDC_MDA_ENET1_TX = 22, + RDC_MDA_ENET1_RX = 23, + RDC_MDA_SDMA2 = 24, + RDC_MDA_SDMA1 = 26, +}; + +/* RDC Peripherals index */ +enum rdc_pdap_idx { + RDC_PDAP_GPIO1 = 0, + RDC_PDAP_GPIO2 = 1, + RDC_PDAP_GPIO3 = 2, + RDC_PDAP_GPIO4 = 3, + RDC_PDAP_GPIO5 = 4, + RDC_PDAP_ANA_TSENSOR = 6, + RDC_PDAP_ANA_OSC = 7, + RDC_PDAP_WDOG1 = 8, + RDC_PDAP_WDOG2 = 9, + RDC_PDAP_WDOG3 = 10, + RDC_PDAP_SDMA2 = 12, + RDC_PDAP_GPT1 = 13, + RDC_PDAP_GPT2 = 14, + RDC_PDAP_GPT3 = 15, + RDC_PDAP_ROMCP = 17, + RDC_PDAP_LCDIF = 18, + RDC_PDAP_IOMUXC = 19, + RDC_PDAP_IOMUXC_GPR = 20, + RDC_PDAP_OCOTP_CTRL = 21, + RDC_PDAP_ANATOP_PLL = 22, + RDC_PDAP_SNVS_HP = 23, + RDC_PDAP_CCM = 24, + RDC_PDAP_SRC = 25, + RDC_PDAP_GPC = 26, + RDC_PDAP_SEMAPHORE1 = 27, + RDC_PDAP_SEMAPHORE2 = 28, + RDC_PDAP_RDC = 29, + RDC_PDAP_CSU = 30, + RDC_PDAP_MST0 = 32, + RDC_PDAP_MST1 = 33, + RDC_PDAP_MST2 = 34, + RDC_PDAP_MST3 = 35, + RDC_PDAP_HDMI_SEC = 36, + RDC_PDAP_PWM1 = 38, + RDC_PDAP_PWM2 = 39, + RDC_PDAP_PWM3 = 40, + RDC_PDAP_PWM4 = 41, + RDC_PDAP_SysCounter_RD = 42, + RDC_PDAP_SysCounter_CMP = 43, + RDC_PDAP_SysCounter_CTRL = 44, + RDC_PDAP_HDMI_CTRL = 45, + RDC_PDAP_GPT6 = 46, + RDC_PDAP_GPT5 = 47, + RDC_PDAP_GPT4 = 48, + RDC_PDAP_TZASC = 56, + RDC_PDAP_MTR = 59, + RDC_PDAP_PERFMON1 = 60, + RDC_PDAP_PERFMON2 = 61, + RDC_PDAP_PLATFORM_CTRL = 62, + RDC_PDAP_QoSC = 63, + RDC_PDAP_MIPI_PHY = 64, + RDC_PDAP_MIPI_DSI = 65, + RDC_PDAP_I2C1 = 66, + RDC_PDAP_I2C2 = 67, + RDC_PDAP_I2C3 = 68, + RDC_PDAP_I2C4 = 69, + RDC_PDAP_UART4 = 70, + RDC_PDAP_MIPI_CSI1 = 71, + RDC_PDAP_MIPI_CSI_PHY1 = 72, + RDC_PDAP_CSI1 = 73, + RDC_PDAP_MU_A = 74, + RDC_PDAP_MU_B = 75, + RDC_PDAP_SEMAPHORE_HS = 76, + RDC_PDAP_SAI1 = 78, + RDC_PDAP_SAI6 = 80, + RDC_PDAP_SAI5 = 81, + RDC_PDAP_SAI4 = 82, + RDC_PDAP_USDHC1 = 84, + RDC_PDAP_USDHC2 = 85, + RDC_PDAP_MIPI_CSI2 = 86, + RDC_PDAP_MIPI_CSI_PHY2 = 87, + RDC_PDAP_CSI2 = 88, + RDC_PDAP_QSPI = 91, + RDC_PDAP_SDMA1 = 93, + RDC_PDAP_ENET1 = 94, + RDC_PDAP_SPDIF1 = 97, + RDC_PDAP_ECSPI1 = 98, + RDC_PDAP_ECSPI2 = 99, + RDC_PDAP_ECSPI3 = 100, + RDC_PDAP_UART1 = 102, + RDC_PDAP_UART3 = 104, + RDC_PDAP_UART2 = 105, + RDC_PDAP_SPDIF2 = 106, + RDC_PDAP_SAI2 = 107, + RDC_PDAP_SAI3 = 108, + RDC_PDAP_SPBA1 = 111, + RDC_PDAP_CAAM = 114, + RDC_PDAP_DDRC_SEC = 115, + RDC_PDAP_GIC_EXSC = 116, + RDC_PDAP_USB_EXSC = 117, + RDC_PDAP_OCRAM_TZ = 118, + RDC_PDAP_OCRAM_S_TZ = 119, + RDC_PDAP_VPU_SEC = 120, + RDC_PDAP_DAP_EXSC = 121, + RDC_PDAP_ROMCP_SEC = 122, + RDC_PDAP_APBHDMA_SEC = 123, + RDC_PDAP_M4_SEC = 124, + RDC_PDAP_QSPI_SEC = 125, + RDC_PDAP_GPU_EXSC = 126, + RDC_PDAP_PCIE = 127, +}; + +enum csu_csl_idx { + CSU_CSL_GPIO1 = 0, + CSU_CSL_GPIO2 = 1, + CSU_CSL_GPIO3 = 2, + CSU_CSL_GPIO4 = 3, + CSU_CSL_GPIO5 = 4, + CSU_CSL_ANA_TSENSOR = 6, + CSU_CSL_ANA_OSC = 7, + CSU_CSL_WDOG1 = 8, + CSU_CSL_WDOG2 = 9, + CSU_CSL_WDOG3 = 10, + CSU_CSL_SDMA2 = 12, + CSU_CSL_GPT1 = 13, + CSU_CSL_GPT2 = 14, + CSU_CSL_GPT3 = 15, + CSU_CSL_ROMCP = 17, + CSU_CSL_LCDIF = 18, + CSU_CSL_IOMUXC = 19, + CSU_CSL_IOMUXC_GPR = 20, + CSU_CSL_OCOTP_CTRL = 21, + CSU_CSL_ANATOP_PLL = 22, + CSU_CSL_SNVS_HP = 23, + CSU_CSL_CCM = 24, + CSU_CSL_SRC = 25, + CSU_CSL_GPC = 26, + CSU_CSL_SEMAPHORE1 = 27, + CSU_CSL_SEMAPHORE2 = 28, + CSU_CSL_RDC = 29, + CSU_CSL_CSU = 30, + CSU_CSL_MST0 = 32, + CSU_CSL_MST1 = 33, + CSU_CSL_MST2 = 34, + CSU_CSL_MST3 = 35, + CSU_CSL_HDMI_SEC = 36, + CSU_CSL_PWM1 = 38, + CSU_CSL_PWM2 = 39, + CSU_CSL_PWM3 = 40, + CSU_CSL_PWM4 = 41, + CSU_CSL_SysCounter_RD = 42, + CSU_CSL_SysCounter_CMP = 43, + CSU_CSL_SysCounter_CTRL = 44, + CSU_CSL_HDMI_CTRL = 45, + CSU_CSL_GPT6 = 46, + CSU_CSL_GPT5 = 47, + CSU_CSL_GPT4 = 48, + CSU_CSL_TZASC = 56, + CSU_CSL_MTR = 59, + CSU_CSL_PERFMON1 = 60, + CSU_CSL_PERFMON2 = 61, + CSU_CSL_PLATFORM_CTRL = 62, + CSU_CSL_QoSC = 63, + CSU_CSL_MIPI_PHY = 64, + CSU_CSL_MIPI_DSI = 65, + CSU_CSL_I2C1 = 66, + CSU_CSL_I2C2 = 67, + CSU_CSL_I2C3 = 68, + CSU_CSL_I2C4 = 69, + CSU_CSL_UART4 = 70, + CSU_CSL_MIPI_CSI1 = 71, + CSU_CSL_MIPI_CSI_PHY1 = 72, + CSU_CSL_CSI1 = 73, + CSU_CSL_MU_A = 74, + CSU_CSL_MU_B = 75, + CSU_CSL_SEMAPHORE_HS = 76, + CSU_CSL_SAI1 = 78, + CSU_CSL_SAI6 = 80, + CSU_CSL_SAI5 = 81, + CSU_CSL_SAI4 = 82, + CSU_CSL_USDHC1 = 84, + CSU_CSL_USDHC2 = 85, + CSU_CSL_MIPI_CSI2 = 86, + CSU_CSL_MIPI_CSI_PHY2 = 87, + CSU_CSL_CSI2 = 88, + CSU_CSL_QSPI = 91, + CSU_CSL_SDMA1 = 93, + CSU_CSL_ENET1 = 94, + CSU_CSL_SPDIF1 = 97, + CSU_CSL_ECSPI1 = 98, + CSU_CSL_ECSPI2 = 99, + CSU_CSL_ECSPI3 = 100, + CSU_CSL_UART1 = 102, + CSU_CSL_UART3 = 104, + CSU_CSL_UART2 = 105, + CSU_CSL_SPDIF2 = 106, + CSU_CSL_SAI2 = 107, + CSU_CSL_SAI3 = 108, + CSU_CSL_SPBA1 = 111, + CSU_CSL_MOD_EN3 = 112, + CSU_CSL_MOD_EN0 = 113, + CSU_CSL_CAAM = 114, + CSU_CSL_DDRC_SEC = 115, + CSU_CSL_GIC_EXSC = 116, + CSU_CSL_USB_EXSC = 117, + CSU_CSL_OCRAM_TZ = 118, + CSU_CSL_OCRAM_S_TZ = 119, + CSU_CSL_VPU_SEC = 120, + CSU_CSL_DAP_EXSC = 121, + CSU_CSL_ROMCP_SEC = 122, + CSU_CSL_APBHDMA_SEC = 123, + CSU_CSL_M4_SEC = 124, + CSU_CSL_QSPI_SEC = 125, + CSU_CSL_GPU_EXSC = 126, + CSU_CSL_PCIE = 127, +}; + +#endif /* IMX_SEC_DEF_H */ diff --git a/plat/imx/imx8m/imx8mq/include/platform_def.h b/plat/imx/imx8m/imx8mq/include/platform_def.h new file mode 100644 index 0000000..2526a02 --- /dev/null +++ b/plat/imx/imx8m/imx8mq/include/platform_def.h @@ -0,0 +1,143 @@ +/* + * Copyright (c) 2018-2022, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <lib/utils_def.h> +#include <plat/common/common_def.h> + +#define PLATFORM_LINKER_FORMAT "elf64-littleaarch64" +#define PLATFORM_LINKER_ARCH aarch64 + +#define PLATFORM_STACK_SIZE 0x800 +#define CACHE_WRITEBACK_GRANULE 64 + +#define PLAT_PRIMARY_CPU U(0x0) +#define PLATFORM_MAX_CPU_PER_CLUSTER U(4) +#define PLATFORM_CLUSTER_COUNT U(1) +#define PLATFORM_CLUSTER0_CORE_COUNT U(4) +#define PLATFORM_CLUSTER1_CORE_COUNT U(0) +#define PLATFORM_CORE_COUNT (PLATFORM_CLUSTER0_CORE_COUNT) + +#define IMX_PWR_LVL0 MPIDR_AFFLVL0 +#define IMX_PWR_LVL1 MPIDR_AFFLVL1 +#define IMX_PWR_LVL2 MPIDR_AFFLVL2 + +#define PWR_DOMAIN_AT_MAX_LVL U(1) +#define PLAT_MAX_PWR_LVL U(2) +#define PLAT_MAX_OFF_STATE U(4) +#define PLAT_MAX_RET_STATE U(1) + +#define PLAT_WAIT_RET_STATE PLAT_MAX_RET_STATE +#define PLAT_WAIT_OFF_STATE U(2) +#define PLAT_STOP_OFF_STATE U(3) + +#define BL31_BASE U(0x910000) +#define BL31_SIZE SZ_64K +#define BL31_LIMIT (BL31_BASE + BL31_SIZE) + +/* non-secure uboot base */ +#define PLAT_NS_IMAGE_OFFSET U(0x40200000) +#define BL32_FDT_OVERLAY_ADDR (PLAT_NS_IMAGE_OFFSET + 0x3000000) + +/* GICv3 base address */ +#define PLAT_GICD_BASE U(0x38800000) +#define PLAT_GICR_BASE U(0x38880000) + +#define PLAT_VIRT_ADDR_SPACE_SIZE (1ull << 32) +#define PLAT_PHY_ADDR_SPACE_SIZE (1ull << 32) + +#ifdef SPD_trusty +#define MAX_XLAT_TABLES 5 +#define MAX_MMAP_REGIONS 15 +#else +#define MAX_XLAT_TABLES 4 +#define MAX_MMAP_REGIONS 14 +#endif + +#define HAB_RVT_BASE U(0x00000880) /* HAB_RVT for i.MX8MQ */ + +#define IMX_BOOT_UART_CLK_IN_HZ 25000000 /* Select 25Mhz oscillator */ +#define PLAT_CRASH_UART_BASE IMX_BOOT_UART_BASE +#define PLAT_CRASH_UART_CLK_IN_HZ 25000000 +#define IMX_CONSOLE_BAUDRATE 115200 + +#define IMX_AIPS_BASE U(0x30200000) +#define IMX_AIPS_SIZE U(0xC00000) +#define IMX_AIPS1_BASE U(0x30200000) +#define IMX_AIPS3_ARB_BASE U(0x30800000) +#define IMX_OCOTP_BASE U(0x30350000) +#define IMX_ANAMIX_BASE U(0x30360000) +#define IMX_CCM_BASE U(0x30380000) +#define IMX_SRC_BASE U(0x30390000) +#define IMX_GPC_BASE U(0x303a0000) +#define IMX_RDC_BASE U(0x303d0000) +#define IMX_CSU_BASE U(0x303e0000) +#define IMX_WDOG_BASE U(0x30280000) +#define IMX_SNVS_BASE U(0x30370000) +#define IMX_NOC_BASE U(0x32700000) +#define IMX_TZASC_BASE U(0x32F80000) +#define IMX_CAAM_BASE U(0x30900000) +#define IMX_IOMUX_GPR_BASE U(0x30340000) +#define IMX_DDRC_BASE U(0x3d400000) +#define IMX_DDRPHY_BASE U(0x3c000000) +#define IMX_DDR_IPS_BASE U(0x3d000000) +#define IMX_DDR_IPS_SIZE U(0x1800000) +#define IMX_DRAM_BASE U(0x40000000) +#define IMX_DRAM_SIZE U(0xc0000000) + +#define IMX_ROM_BASE U(0x00000000) +#define IMX_ROM_SIZE U(0x20000) + +#define AIPSTZ1_BASE U(0x301f0000) +#define AIPSTZ2_BASE U(0x305f0000) +#define AIPSTZ3_BASE U(0x309f0000) +#define AIPSTZ4_BASE U(0x32df0000) + +#define GPV_BASE U(0x32000000) +#define GPV_SIZE U(0x800000) +#define IMX_GIC_BASE PLAT_GICD_BASE +#define IMX_GIC_SIZE U(0x200000) + +#define WDOG_WSR U(0x2) +#define WDOG_WCR_WDZST BIT(0) +#define WDOG_WCR_WDBG BIT(1) +#define WDOG_WCR_WDE BIT(2) +#define WDOG_WCR_WDT BIT(3) +#define WDOG_WCR_SRS BIT(4) +#define WDOG_WCR_WDA BIT(5) +#define WDOG_WCR_SRE BIT(6) +#define WDOG_WCR_WDW BIT(7) + +#define SRC_A53RCR0 U(0x4) +#define SRC_A53RCR1 U(0x8) +#define SRC_OTG1PHY_SCR U(0x20) +#define SRC_OTG2PHY_SCR U(0x24) +#define SRC_GPR1_OFFSET U(0x74) +#define SRC_GPR10_OFFSET U(0x98) +#define SRC_GPR10_PERSIST_SECONDARY_BOOT BIT(30) + +#define SNVS_LPCR U(0x38) +#define SNVS_LPCR_SRTC_ENV BIT(0) +#define SNVS_LPCR_DP_EN BIT(5) +#define SNVS_LPCR_TOP BIT(6) + +#define SAVED_DRAM_TIMING_BASE U(0x40000000) + +#define HW_DRAM_PLL_CFG0 (IMX_ANAMIX_BASE + 0x60) +#define HW_DRAM_PLL_CFG1 (IMX_ANAMIX_BASE + 0x64) +#define HW_DRAM_PLL_CFG2 (IMX_ANAMIX_BASE + 0x68) +#define DRAM_PLL_CTRL HW_DRAM_PLL_CFG0 + +#define IOMUXC_GPR10 U(0x28) +#define GPR_TZASC_EN BIT(0) +#define GPR_TZASC_EN_LOCK BIT(16) + +#define OCRAM_S_BASE U(0x00180000) +#define OCRAM_S_SIZE U(0x8000) +#define OCRAM_S_LIMIT (OCRAM_S_BASE + OCRAM_S_SIZE) + +#define COUNTER_FREQUENCY 8333333 /* 25MHz / 3 */ + +#define IMX_WDOG_B_RESET diff --git a/plat/imx/imx8m/imx8mq/platform.mk b/plat/imx/imx8m/imx8mq/platform.mk new file mode 100644 index 0000000..b1c189f --- /dev/null +++ b/plat/imx/imx8m/imx8mq/platform.mk @@ -0,0 +1,69 @@ +# +# Copyright (c) 2018-2023, ARM Limited and Contributors. All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +# Translation tables library +include lib/xlat_tables_v2/xlat_tables.mk + +PLAT_INCLUDES := -Iplat/imx/common/include \ + -Iplat/imx/imx8m/include \ + -Iplat/imx/imx8m/imx8mq/include + +# Include GICv3 driver files +include drivers/arm/gic/v3/gicv3.mk + +IMX_DRAM_SOURCES := plat/imx/imx8m/ddr/dram.c \ + plat/imx/imx8m/ddr/clock.c \ + plat/imx/imx8m/ddr/dram_retention.c \ + plat/imx/imx8m/ddr/ddr4_dvfs.c \ + plat/imx/imx8m/ddr/lpddr4_dvfs.c + +IMX_GIC_SOURCES := ${GICV3_SOURCES} \ + plat/common/plat_gicv3.c \ + plat/common/plat_psci_common.c \ + plat/imx/common/plat_imx8_gic.c + +BL31_SOURCES += plat/imx/common/imx8_helpers.S \ + plat/imx/imx8m/imx8mq/imx8mq_bl31_setup.c \ + plat/imx/imx8m/imx8mq/imx8mq_psci.c \ + plat/imx/imx8m/gpc_common.c \ + plat/imx/imx8m/imx_aipstz.c \ + plat/imx/imx8m/imx8m_caam.c \ + plat/imx/imx8m/imx8m_psci_common.c \ + plat/imx/imx8m/imx8mq/gpc.c \ + plat/imx/common/imx8_topology.c \ + plat/imx/common/imx_sip_handler.c \ + plat/imx/common/imx_sip_svc.c \ + plat/imx/common/imx_uart_console.S \ + lib/cpus/aarch64/cortex_a53.S \ + drivers/arm/tzc/tzc380.c \ + drivers/delay_timer/delay_timer.c \ + drivers/delay_timer/generic_delay_timer.c \ + ${XLAT_TABLES_LIB_SRCS} \ + ${IMX_DRAM_SOURCES} \ + ${IMX_GIC_SOURCES} + +ENABLE_PIE := 1 +USE_COHERENT_MEM := 1 +RESET_TO_BL31 := 1 +A53_DISABLE_NON_TEMPORAL_HINT := 0 +WARMBOOT_ENABLE_DCACHE_EARLY := 1 + +ERRATA_A53_835769 := 1 +ERRATA_A53_843419 := 1 +ERRATA_A53_855873 := 1 + +BL32_BASE ?= 0xfe000000 +$(eval $(call add_define,BL32_BASE)) + +BL32_SIZE ?= 0x2000000 +$(eval $(call add_define,BL32_SIZE)) + +IMX_BOOT_UART_BASE ?= 0x30860000 +$(eval $(call add_define,IMX_BOOT_UART_BASE)) + +ifeq (${SPD},trusty) + BL31_CFLAGS += -DPLAT_XLAT_TABLES_DYNAMIC=1 +endif diff --git a/plat/imx/imx8m/imx_aipstz.c b/plat/imx/imx8m/imx_aipstz.c new file mode 100644 index 0000000..ecf8b1d --- /dev/null +++ b/plat/imx/imx8m/imx_aipstz.c @@ -0,0 +1,24 @@ +/* + * Copyright (c) 2019, Arm Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <lib/mmio.h> + +#include <imx_aipstz.h> + +void imx_aipstz_init(const struct aipstz_cfg *aipstz_cfg) +{ + const struct aipstz_cfg *aipstz = aipstz_cfg; + + while (aipstz->base != 0U) { + mmio_write_32(aipstz->base + AIPSTZ_MPR0, aipstz->mpr0); + mmio_write_32(aipstz->base + AIPSTZ_MPR1, aipstz->mpr1); + + for (int i = 0; i < AIPSTZ_OPACR_NUM; i++) + mmio_write_32(aipstz->base + OPACR_OFFSET(i), aipstz->opacr[i]); + + aipstz++; + } +} diff --git a/plat/imx/imx8m/imx_hab.c b/plat/imx/imx8m/imx_hab.c new file mode 100644 index 0000000..222046f --- /dev/null +++ b/plat/imx/imx8m/imx_hab.c @@ -0,0 +1,124 @@ +/* + * Copyright 2017-2020 NXP + * Copyright 2022 Leica Geosystems AG + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <common/runtime_svc.h> +#include <imx_sip_svc.h> + +#define HAB_CID_ATF U(2) /* TF-A Caller ID */ + +/* HAB Status definitions */ +enum hab_status { + HAB_STS_ANY = 0x00, /* Match any status in report_event() */ + HAB_FAILURE = 0x33, /* Operation failed */ + HAB_WARNING = 0x69, /* Operation completed with warning */ + HAB_SUCCESS = 0xf0 /* Operation completed successfully */ +}; + +/* HAB Configuration definitions */ +enum hab_config { + HAB_CFG_RETURN = 0x33, /* Field Return IC */ + HAB_CFG_OPEN = 0xf0, /* Non-secure IC */ + HAB_CFG_CLOSED = 0xcc /* Secure IC */ +}; + +/* HAB State definitions */ +enum hab_state { + HAB_STATE_INITIAL = 0x33, /* Initializing state (transitory) */ + HAB_STATE_CHECK = 0x55, /* Check state (non-secure) */ + HAB_STATE_NONSECURE = 0x66, /* Non-secure state */ + HAB_STATE_TRUSTED = 0x99, /* Trusted state */ + HAB_STATE_SECURE = 0xaa, /* Secure state */ + HAB_STATE_FAIL_SOFT = 0xcc, /* Soft fail state */ + HAB_STATE_FAIL_HARD = 0xff, /* Hard fail state (terminal) */ + HAB_STATE_NONE = 0xf0 /* No security state machine */ +}; + +/* HAB Verification Target definitions */ +enum hab_target { + HAB_TGT_MEMORY = 0x0f, /* Check memory allowed list */ + HAB_TGT_PERIPHERAL = 0xf0, /* Check peripheral allowed list */ + HAB_TGT_ANY = 0x55 /* Check memory & peripheral allowed list */ +}; + +/* Authenticate Image Loader Callback prototype */ +typedef enum hab_status hab_loader_callback_f_t(void **, size_t *, const void *); + +/* + * HAB Rom VectorTable (RVT) structure. + * This table provides function pointers into the HAB library in ROM for + * use by post-ROM boot sequence components. + * Functions are ordered in the structure below based on the offsets in ROM + * image, and shall not be changed! + * Details on API allocation offsets and function description could be + * found in following documents from NXP: + * - High Assurance Boot Version 4 Application Programming Interface + * Reference Manual (available in CST package) + * - HABv4 RVT Guidelines and Recommendations (AN12263) + */ +struct hab_rvt_api { + uint64_t hdr; + enum hab_status (*entry)(void); + enum hab_status (*exit)(void); + enum hab_status (*check_target)(enum hab_target type, const void *start, size_t bytes); + void* (*authenticate_image)(uint8_t cid, long ivt_offset, void **start, + size_t *bytes, hab_loader_callback_f_t loader); + enum hab_status (*run_dcd)(const uint8_t *dcd); + enum hab_status (*run_csf)(const uint8_t *csf, uint8_t cid, uint32_t srkmask); + enum hab_status (*assert)(long type, const void *data, uint32_t count); + enum hab_status (*report_event)(enum hab_status status, uint32_t index, + uint8_t *event, size_t *bytes); + enum hab_status (*report_status)(enum hab_config *config, enum hab_state *state); + void (*failsafe)(void); + void* (*authenticate_image_no_dcd)(uint8_t cid, long ivt_offset, void **start, + size_t *bytes, hab_loader_callback_f_t loader); + uint32_t (*get_version)(void); + enum hab_status (*authenticate_container)(uint8_t cid, long ivt_offset, void **start, + size_t *bytes, hab_loader_callback_f_t loader, uint32_t srkmask, int skip_dcd); +}; + +struct hab_rvt_api *g_hab_rvt_api = (struct hab_rvt_api *)HAB_RVT_BASE; + +/******************************************************************************* + * Handler for servicing HAB SMC calls + ******************************************************************************/ +int imx_hab_handler(uint32_t smc_fid, + u_register_t x1, + u_register_t x2, + u_register_t x3, + u_register_t x4) +{ + switch (x1) { + case IMX_SIP_HAB_ENTRY: + return g_hab_rvt_api->entry(); + case IMX_SIP_HAB_EXIT: + return g_hab_rvt_api->exit(); + case IMX_SIP_HAB_CHECK_TARGET: + return g_hab_rvt_api->check_target((enum hab_target)x2, + (const void *)x3, (size_t)x4); + case IMX_SIP_HAB_AUTH_IMG: + return (unsigned long)g_hab_rvt_api->authenticate_image(HAB_CID_ATF, + x2, (void **)x3, (size_t *)x4, NULL); + case IMX_SIP_HAB_REPORT_EVENT: + return g_hab_rvt_api->report_event(HAB_FAILURE, + (uint32_t)x2, (uint8_t *)x3, (size_t *)x4); + case IMX_SIP_HAB_REPORT_STATUS: + return g_hab_rvt_api->report_status((enum hab_config *)x2, + (enum hab_state *)x3); + case IMX_SIP_HAB_FAILSAFE: + g_hab_rvt_api->failsafe(); + break; + case IMX_SIP_HAB_AUTH_IMG_NO_DCD: + return (unsigned long)g_hab_rvt_api->authenticate_image_no_dcd( + HAB_CID_ATF, x2, (void **)x3, (size_t *)x4, NULL); + case IMX_SIP_HAB_GET_VERSION: + return g_hab_rvt_api->get_version(); + default: + return SMC_UNK; + }; + + return SMC_OK; +} diff --git a/plat/imx/imx8m/imx_rdc.c b/plat/imx/imx8m/imx_rdc.c new file mode 100644 index 0000000..85de191 --- /dev/null +++ b/plat/imx/imx8m/imx_rdc.c @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2019, NXP. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <lib/mmio.h> + +#include <imx_rdc.h> + +void imx_rdc_init(const struct imx_rdc_cfg *rdc_cfg) +{ + const struct imx_rdc_cfg *rdc = rdc_cfg; + + while (rdc->type != RDC_INVALID) { + switch (rdc->type) { + case RDC_MDA: + /* MDA config */ + mmio_write_32(MDAn(rdc->index), rdc->setting.rdc_mda); + break; + case RDC_PDAP: + /* peripheral access permission config */ + mmio_write_32(PDAPn(rdc->index), rdc->setting.rdc_pdap); + break; + case RDC_MEM_REGION: + /* memory region access permission config */ + mmio_write_32(MRSAn(rdc->index), rdc->setting.rdc_mem_region[0]); + mmio_write_32(MREAn(rdc->index), rdc->setting.rdc_mem_region[1]); + mmio_write_32(MRCn(rdc->index), rdc->setting.rdc_mem_region[2]); + break; + default: + break; + } + + rdc++; + } +} diff --git a/plat/imx/imx8m/include/ddrc.h b/plat/imx/imx8m/include/ddrc.h new file mode 100644 index 0000000..55af3ff --- /dev/null +++ b/plat/imx/imx8m/include/ddrc.h @@ -0,0 +1,336 @@ +/* + * Copyright 2019-2022 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef IMX_DDRC_H +#define IMX_DDRC_H + +#define DDRC_IPS_BASE_ADDR(X) (0x3d400000 + ((X) * 0x2000000)) +#define DDRC_DDR_SS_GPR0 0x3d000000 + +/* DWC ddr umctl2 REGs offset*/ +/**********************/ +#define DDRC_MSTR(X) (DDRC_IPS_BASE_ADDR(X) + 0x00) +#define DDRC_STAT(X) (DDRC_IPS_BASE_ADDR(X) + 0x04) +#define DDRC_MSTR1(X) (DDRC_IPS_BASE_ADDR(X) + 0x08) +#define DDRC_MRCTRL0(X) (DDRC_IPS_BASE_ADDR(X) + 0x10) +#define DDRC_MRCTRL1(X) (DDRC_IPS_BASE_ADDR(X) + 0x14) +#define DDRC_MRSTAT(X) (DDRC_IPS_BASE_ADDR(X) + 0x18) +#define DDRC_MRCTRL2(X) (DDRC_IPS_BASE_ADDR(X) + 0x1c) +#define DDRC_DERATEEN(X) (DDRC_IPS_BASE_ADDR(X) + 0x20) +#define DDRC_DERATEINT(X) (DDRC_IPS_BASE_ADDR(X) + 0x24) +#define DDRC_MSTR2(X) (DDRC_IPS_BASE_ADDR(X) + 0x28) +#define DDRC_PWRCTL(X) (DDRC_IPS_BASE_ADDR(X) + 0x30) +#define DDRC_PWRTMG(X) (DDRC_IPS_BASE_ADDR(X) + 0x34) +#define DDRC_HWLPCTL(X) (DDRC_IPS_BASE_ADDR(X) + 0x38) +#define DDRC_HWFFCCTL(X) (DDRC_IPS_BASE_ADDR(X) + 0x3c) +#define DDRC_HWFFCSTAT(X) (DDRC_IPS_BASE_ADDR(X) + 0x40) +#define DDRC_RFSHCTL0(X) (DDRC_IPS_BASE_ADDR(X) + 0x50) +#define DDRC_RFSHCTL1(X) (DDRC_IPS_BASE_ADDR(X) + 0x54) +#define DDRC_RFSHCTL2(X) (DDRC_IPS_BASE_ADDR(X) + 0x58) +#define DDRC_RFSHCTL3(X) (DDRC_IPS_BASE_ADDR(X) + 0x60) +#define DDRC_RFSHTMG(X) (DDRC_IPS_BASE_ADDR(X) + 0x64) +#define DDRC_ECCCFG0(X) (DDRC_IPS_BASE_ADDR(X) + 0x70) +#define DDRC_ECCCFG1(X) (DDRC_IPS_BASE_ADDR(X) + 0x74) +#define DDRC_ECCSTAT(X) (DDRC_IPS_BASE_ADDR(X) + 0x78) +#define DDRC_ECCCLR(X) (DDRC_IPS_BASE_ADDR(X) + 0x7c) +#define DDRC_ECCERRCNT(X) (DDRC_IPS_BASE_ADDR(X) + 0x80) +#define DDRC_ECCCADDR0(X) (DDRC_IPS_BASE_ADDR(X) + 0x84) +#define DDRC_ECCCADDR1(X) (DDRC_IPS_BASE_ADDR(X) + 0x88) +#define DDRC_ECCCSYN0(X) (DDRC_IPS_BASE_ADDR(X) + 0x8c) +#define DDRC_ECCCSYN1(X) (DDRC_IPS_BASE_ADDR(X) + 0x90) +#define DDRC_ECCCSYN2(X) (DDRC_IPS_BASE_ADDR(X) + 0x94) +#define DDRC_ECCBITMASK0(X) (DDRC_IPS_BASE_ADDR(X) + 0x98) +#define DDRC_ECCBITMASK1(X) (DDRC_IPS_BASE_ADDR(X) + 0x9c) +#define DDRC_ECCBITMASK2(X) (DDRC_IPS_BASE_ADDR(X) + 0xa0) +#define DDRC_ECCUADDR0(X) (DDRC_IPS_BASE_ADDR(X) + 0xa4) +#define DDRC_ECCUADDR1(X) (DDRC_IPS_BASE_ADDR(X) + 0xa8) +#define DDRC_ECCUSYN0(X) (DDRC_IPS_BASE_ADDR(X) + 0xac) +#define DDRC_ECCUSYN1(X) (DDRC_IPS_BASE_ADDR(X) + 0xb0) +#define DDRC_ECCUSYN2(X) (DDRC_IPS_BASE_ADDR(X) + 0xb4) +#define DDRC_ECCPOISONADDR0(X) (DDRC_IPS_BASE_ADDR(X) + 0xb8) +#define DDRC_ECCPOISONADDR1(X) (DDRC_IPS_BASE_ADDR(X) + 0xbc) +#define DDRC_CRCPARCTL0(X) (DDRC_IPS_BASE_ADDR(X) + 0xc0) +#define DDRC_CRCPARCTL1(X) (DDRC_IPS_BASE_ADDR(X) + 0xc4) +#define DDRC_CRCPARCTL2(X) (DDRC_IPS_BASE_ADDR(X) + 0xc8) +#define DDRC_CRCPARSTAT(X) (DDRC_IPS_BASE_ADDR(X) + 0xcc) +#define DDRC_INIT0(X) (DDRC_IPS_BASE_ADDR(X) + 0xd0) +#define DDRC_INIT1(X) (DDRC_IPS_BASE_ADDR(X) + 0xd4) +#define DDRC_INIT2(X) (DDRC_IPS_BASE_ADDR(X) + 0xd8) +#define DDRC_INIT3(X) (DDRC_IPS_BASE_ADDR(X) + 0xdc) +#define DDRC_INIT4(X) (DDRC_IPS_BASE_ADDR(X) + 0xe0) +#define DDRC_INIT5(X) (DDRC_IPS_BASE_ADDR(X) + 0xe4) +#define DDRC_INIT6(X) (DDRC_IPS_BASE_ADDR(X) + 0xe8) +#define DDRC_INIT7(X) (DDRC_IPS_BASE_ADDR(X) + 0xec) +#define DDRC_DIMMCTL(X) (DDRC_IPS_BASE_ADDR(X) + 0xf0) +#define DDRC_RANKCTL(X) (DDRC_IPS_BASE_ADDR(X) + 0xf4) +#define DDRC_DRAMTMG0(X) (DDRC_IPS_BASE_ADDR(X) + 0x100) +#define DDRC_DRAMTMG1(X) (DDRC_IPS_BASE_ADDR(X) + 0x104) +#define DDRC_DRAMTMG2(X) (DDRC_IPS_BASE_ADDR(X) + 0x108) +#define DDRC_DRAMTMG3(X) (DDRC_IPS_BASE_ADDR(X) + 0x10c) +#define DDRC_DRAMTMG4(X) (DDRC_IPS_BASE_ADDR(X) + 0x110) +#define DDRC_DRAMTMG5(X) (DDRC_IPS_BASE_ADDR(X) + 0x114) +#define DDRC_DRAMTMG6(X) (DDRC_IPS_BASE_ADDR(X) + 0x118) +#define DDRC_DRAMTMG7(X) (DDRC_IPS_BASE_ADDR(X) + 0x11c) +#define DDRC_DRAMTMG8(X) (DDRC_IPS_BASE_ADDR(X) + 0x120) +#define DDRC_DRAMTMG9(X) (DDRC_IPS_BASE_ADDR(X) + 0x124) +#define DDRC_DRAMTMG10(X) (DDRC_IPS_BASE_ADDR(X) + 0x128) +#define DDRC_DRAMTMG11(X) (DDRC_IPS_BASE_ADDR(X) + 0x12c) +#define DDRC_DRAMTMG12(X) (DDRC_IPS_BASE_ADDR(X) + 0x130) +#define DDRC_DRAMTMG13(X) (DDRC_IPS_BASE_ADDR(X) + 0x134) +#define DDRC_DRAMTMG14(X) (DDRC_IPS_BASE_ADDR(X) + 0x138) +#define DDRC_DRAMTMG15(X) (DDRC_IPS_BASE_ADDR(X) + 0x13C) +#define DDRC_DRAMTMG16(X) (DDRC_IPS_BASE_ADDR(X) + 0x140) +#define DDRC_DRAMTMG17(X) (DDRC_IPS_BASE_ADDR(X) + 0x144) + +#define DDRC_ZQCTL0(X) (DDRC_IPS_BASE_ADDR(X) + 0x180) +#define DDRC_ZQCTL1(X) (DDRC_IPS_BASE_ADDR(X) + 0x184) +#define DDRC_ZQCTL2(X) (DDRC_IPS_BASE_ADDR(X) + 0x188) +#define DDRC_ZQSTAT(X) (DDRC_IPS_BASE_ADDR(X) + 0x18c) +#define DDRC_DFITMG0(X) (DDRC_IPS_BASE_ADDR(X) + 0x190) +#define DDRC_DFITMG1(X) (DDRC_IPS_BASE_ADDR(X) + 0x194) +#define DDRC_DFILPCFG0(X) (DDRC_IPS_BASE_ADDR(X) + 0x198) +#define DDRC_DFILPCFG1(X) (DDRC_IPS_BASE_ADDR(X) + 0x19c) +#define DDRC_DFIUPD0(X) (DDRC_IPS_BASE_ADDR(X) + 0x1a0) +#define DDRC_DFIUPD1(X) (DDRC_IPS_BASE_ADDR(X) + 0x1a4) +#define DDRC_DFIUPD2(X) (DDRC_IPS_BASE_ADDR(X) + 0x1a8) +#define DDRC_DFIMISC(X) (DDRC_IPS_BASE_ADDR(X) + 0x1b0) +#define DDRC_DFITMG2(X) (DDRC_IPS_BASE_ADDR(X) + 0x1b4) +#define DDRC_DFITMG3(X) (DDRC_IPS_BASE_ADDR(X) + 0x1b8) +#define DDRC_DFISTAT(X) (DDRC_IPS_BASE_ADDR(X) + 0x1bc) + +#define DDRC_DBICTL(X) (DDRC_IPS_BASE_ADDR(X) + 0x1c0) +#define DDRC_DFIPHYMSTR(X) (DDRC_IPS_BASE_ADDR(X) + 0x1c4) +#define DDRC_TRAINCTL0(X) (DDRC_IPS_BASE_ADDR(X) + 0x1d0) +#define DDRC_TRAINCTL1(X) (DDRC_IPS_BASE_ADDR(X) + 0x1d4) +#define DDRC_TRAINCTL2(X) (DDRC_IPS_BASE_ADDR(X) + 0x1d8) +#define DDRC_TRAINSTAT(X) (DDRC_IPS_BASE_ADDR(X) + 0x1dc) +#define DDRC_ADDRMAP0(X) (DDRC_IPS_BASE_ADDR(X) + 0x200) +#define DDRC_ADDRMAP1(X) (DDRC_IPS_BASE_ADDR(X) + 0x204) +#define DDRC_ADDRMAP2(X) (DDRC_IPS_BASE_ADDR(X) + 0x208) +#define DDRC_ADDRMAP3(X) (DDRC_IPS_BASE_ADDR(X) + 0x20c) +#define DDRC_ADDRMAP4(X) (DDRC_IPS_BASE_ADDR(X) + 0x210) +#define DDRC_ADDRMAP5(X) (DDRC_IPS_BASE_ADDR(X) + 0x214) +#define DDRC_ADDRMAP6(X) (DDRC_IPS_BASE_ADDR(X) + 0x218) +#define DDRC_ADDRMAP7(X) (DDRC_IPS_BASE_ADDR(X) + 0x21c) +#define DDRC_ADDRMAP8(X) (DDRC_IPS_BASE_ADDR(X) + 0x220) +#define DDRC_ADDRMAP9(X) (DDRC_IPS_BASE_ADDR(X) + 0x224) +#define DDRC_ADDRMAP10(X) (DDRC_IPS_BASE_ADDR(X) + 0x228) +#define DDRC_ADDRMAP11(X) (DDRC_IPS_BASE_ADDR(X) + 0x22c) + +#define DDRC_ODTCFG(X) (DDRC_IPS_BASE_ADDR(X) + 0x240) +#define DDRC_ODTMAP(X) (DDRC_IPS_BASE_ADDR(X) + 0x244) +#define DDRC_SCHED(X) (DDRC_IPS_BASE_ADDR(X) + 0x250) +#define DDRC_SCHED1(X) (DDRC_IPS_BASE_ADDR(X) + 0x254) +#define DDRC_PERFHPR1(X) (DDRC_IPS_BASE_ADDR(X) + 0x25c) +#define DDRC_PERFLPR1(X) (DDRC_IPS_BASE_ADDR(X) + 0x264) +#define DDRC_PERFWR1(X) (DDRC_IPS_BASE_ADDR(X) + 0x26c) +#define DDRC_PERFVPR1(X) (DDRC_IPS_BASE_ADDR(X) + 0x274) + +#define DDRC_PERFVPW1(X) (DDRC_IPS_BASE_ADDR(X) + 0x278) + +#define DDRC_DQMAP0(X) (DDRC_IPS_BASE_ADDR(X) + 0x280) +#define DDRC_DQMAP1(X) (DDRC_IPS_BASE_ADDR(X) + 0x284) +#define DDRC_DQMAP2(X) (DDRC_IPS_BASE_ADDR(X) + 0x288) +#define DDRC_DQMAP3(X) (DDRC_IPS_BASE_ADDR(X) + 0x28c) +#define DDRC_DQMAP4(X) (DDRC_IPS_BASE_ADDR(X) + 0x290) +#define DDRC_DQMAP5(X) (DDRC_IPS_BASE_ADDR(X) + 0x294) +#define DDRC_DBG0(X) (DDRC_IPS_BASE_ADDR(X) + 0x300) +#define DDRC_DBG1(X) (DDRC_IPS_BASE_ADDR(X) + 0x304) +#define DDRC_DBGCAM(X) (DDRC_IPS_BASE_ADDR(X) + 0x308) +#define DDRC_DBGCMD(X) (DDRC_IPS_BASE_ADDR(X) + 0x30c) +#define DDRC_DBGSTAT(X) (DDRC_IPS_BASE_ADDR(X) + 0x310) + +#define DDRC_SWCTL(X) (DDRC_IPS_BASE_ADDR(X) + 0x320) +#define DDRC_SWSTAT(X) (DDRC_IPS_BASE_ADDR(X) + 0x324) +#define DDRC_OCPARCFG0(X) (DDRC_IPS_BASE_ADDR(X) + 0x330) +#define DDRC_OCPARCFG1(X) (DDRC_IPS_BASE_ADDR(X) + 0x334) +#define DDRC_OCPARCFG2(X) (DDRC_IPS_BASE_ADDR(X) + 0x338) +#define DDRC_OCPARCFG3(X) (DDRC_IPS_BASE_ADDR(X) + 0x33c) +#define DDRC_OCPARSTAT0(X) (DDRC_IPS_BASE_ADDR(X) + 0x340) +#define DDRC_OCPARSTAT1(X) (DDRC_IPS_BASE_ADDR(X) + 0x344) +#define DDRC_OCPARWLOG0(X) (DDRC_IPS_BASE_ADDR(X) + 0x348) +#define DDRC_OCPARWLOG1(X) (DDRC_IPS_BASE_ADDR(X) + 0x34c) +#define DDRC_OCPARWLOG2(X) (DDRC_IPS_BASE_ADDR(X) + 0x350) +#define DDRC_OCPARAWLOG0(X) (DDRC_IPS_BASE_ADDR(X) + 0x354) +#define DDRC_OCPARAWLOG1(X) (DDRC_IPS_BASE_ADDR(X) + 0x358) +#define DDRC_OCPARRLOG0(X) (DDRC_IPS_BASE_ADDR(X) + 0x35c) +#define DDRC_OCPARRLOG1(X) (DDRC_IPS_BASE_ADDR(X) + 0x360) +#define DDRC_OCPARARLOG0(X) (DDRC_IPS_BASE_ADDR(X) + 0x364) +#define DDRC_OCPARARLOG1(X) (DDRC_IPS_BASE_ADDR(X) + 0x368) +#define DDRC_POISONCFG(X) (DDRC_IPS_BASE_ADDR(X) + 0x36C) +#define DDRC_POISONSTAT(X) (DDRC_IPS_BASE_ADDR(X) + 0x370) +#define DDRC_ADVECCINDEX(X) (DDRC_IPS_BASE_ADDR(X) + 0x3) +#define DDRC_ADVECCSTAT(X) (DDRC_IPS_BASE_ADDR(X) + 0x3) +#define DDRC_ECCPOISONPAT0(X) (DDRC_IPS_BASE_ADDR(X) + 0x3) +#define DDRC_ECCPOISONPAT1(X) (DDRC_IPS_BASE_ADDR(X) + 0x3) +#define DDRC_ECCPOISONPAT2(X) (DDRC_IPS_BASE_ADDR(X) + 0x3) +#define DDRC_HIFCTL(X) (DDRC_IPS_BASE_ADDR(X) + 0x3) + +#define DDRC_PSTAT(X) (DDRC_IPS_BASE_ADDR(X) + 0x3fc) +#define DDRC_PCCFG(X) (DDRC_IPS_BASE_ADDR(X) + 0x400) +#define DDRC_PCFGR_0(X) (DDRC_IPS_BASE_ADDR(X) + 0x404) +#define DDRC_PCFGR_1(X) (DDRC_IPS_BASE_ADDR(X) + 1 * 0xb0 + 0x404) +#define DDRC_PCFGR_2(X) (DDRC_IPS_BASE_ADDR(X) + 2 * 0xb0 + 0x404) +#define DDRC_PCFGR_3(X) (DDRC_IPS_BASE_ADDR(X) + 3 * 0xb0 + 0x404) +#define DDRC_PCFGW_0(X) (DDRC_IPS_BASE_ADDR(X) + 0x408) +#define DDRC_PCFGW_1(X) (DDRC_IPS_BASE_ADDR(X) + 1 * 0xb0 + 0x408) +#define DDRC_PCFGW_2(X) (DDRC_IPS_BASE_ADDR(X) + 2 * 0xb0 + 0x408) +#define DDRC_PCFGW_3(X) (DDRC_IPS_BASE_ADDR(X) + 3 * 0xb0 + 0x408) +#define DDRC_PCFGC_0(X) (DDRC_IPS_BASE_ADDR(X) + 0x40c) +#define DDRC_PCFGIDMASKCH(X) (DDRC_IPS_BASE_ADDR(X) + 0x410) +#define DDRC_PCFGIDVALUECH(X) (DDRC_IPS_BASE_ADDR(X) + 0x414) +#define DDRC_PCTRL_0(X) (DDRC_IPS_BASE_ADDR(X) + 0x490) +#define DDRC_PCTRL_1(X) (DDRC_IPS_BASE_ADDR(X) + 0x490 + 1 * 0xb0) +#define DDRC_PCTRL_2(X) (DDRC_IPS_BASE_ADDR(X) + 0x490 + 2 * 0xb0) +#define DDRC_PCTRL_3(X) (DDRC_IPS_BASE_ADDR(X) + 0x490 + 3 * 0xb0) +#define DDRC_PCFGQOS0_0(X) (DDRC_IPS_BASE_ADDR(X) + 0x494) +#define DDRC_PCFGQOS1_0(X) (DDRC_IPS_BASE_ADDR(X) + 0x498) +#define DDRC_PCFGWQOS0_0(X) (DDRC_IPS_BASE_ADDR(X) + 0x49c) +#define DDRC_PCFGWQOS1_0(X) (DDRC_IPS_BASE_ADDR(X) + 0x4a0) +#define DDRC_SARBASE0(X) (DDRC_IPS_BASE_ADDR(X) + 0xf04) +#define DDRC_SARSIZE0(X) (DDRC_IPS_BASE_ADDR(X) + 0xf08) +#define DDRC_SBRCTL(X) (DDRC_IPS_BASE_ADDR(X) + 0xf24) +#define DDRC_SBRSTAT(X) (DDRC_IPS_BASE_ADDR(X) + 0xf28) +#define DDRC_SBRWDATA0(X) (DDRC_IPS_BASE_ADDR(X) + 0xf2c) +#define DDRC_SBRWDATA1(X) (DDRC_IPS_BASE_ADDR(X) + 0xf30) +#define DDRC_PDCH(X) (DDRC_IPS_BASE_ADDR(X) + 0xf34) + +/* SHADOW registers */ +#define DDRC_FREQ1_DERATEEN(X) (DDRC_IPS_BASE_ADDR(X) + 0x2020) +#define DDRC_FREQ1_DERATEINT(X) (DDRC_IPS_BASE_ADDR(X) + 0x2024) +#define DDRC_FREQ1_RFSHCTL0(X) (DDRC_IPS_BASE_ADDR(X) + 0x2050) +#define DDRC_FREQ1_RFSHTMG(X) (DDRC_IPS_BASE_ADDR(X) + 0x2064) +#define DDRC_FREQ1_INIT3(X) (DDRC_IPS_BASE_ADDR(X) + 0x20dc) +#define DDRC_FREQ1_INIT4(X) (DDRC_IPS_BASE_ADDR(X) + 0x20e0) +#define DDRC_FREQ1_INIT6(X) (DDRC_IPS_BASE_ADDR(X) + 0x20e8) +#define DDRC_FREQ1_INIT7(X) (DDRC_IPS_BASE_ADDR(X) + 0x20ec) +#define DDRC_FREQ1_DRAMTMG0(X) (DDRC_IPS_BASE_ADDR(X) + 0x2100) +#define DDRC_FREQ1_DRAMTMG1(X) (DDRC_IPS_BASE_ADDR(X) + 0x2104) +#define DDRC_FREQ1_DRAMTMG2(X) (DDRC_IPS_BASE_ADDR(X) + 0x2108) +#define DDRC_FREQ1_DRAMTMG3(X) (DDRC_IPS_BASE_ADDR(X) + 0x210c) +#define DDRC_FREQ1_DRAMTMG4(X) (DDRC_IPS_BASE_ADDR(X) + 0x2110) +#define DDRC_FREQ1_DRAMTMG5(X) (DDRC_IPS_BASE_ADDR(X) + 0x2114) +#define DDRC_FREQ1_DRAMTMG6(X) (DDRC_IPS_BASE_ADDR(X) + 0x2118) +#define DDRC_FREQ1_DRAMTMG7(X) (DDRC_IPS_BASE_ADDR(X) + 0x211c) +#define DDRC_FREQ1_DRAMTMG8(X) (DDRC_IPS_BASE_ADDR(X) + 0x2120) +#define DDRC_FREQ1_DRAMTMG9(X) (DDRC_IPS_BASE_ADDR(X) + 0x2124) +#define DDRC_FREQ1_DRAMTMG10(X) (DDRC_IPS_BASE_ADDR(X) + 0x2128) +#define DDRC_FREQ1_DRAMTMG11(X) (DDRC_IPS_BASE_ADDR(X) + 0x212c) +#define DDRC_FREQ1_DRAMTMG12(X) (DDRC_IPS_BASE_ADDR(X) + 0x2130) +#define DDRC_FREQ1_DRAMTMG13(X) (DDRC_IPS_BASE_ADDR(X) + 0x2134) +#define DDRC_FREQ1_DRAMTMG14(X) (DDRC_IPS_BASE_ADDR(X) + 0x2138) +#define DDRC_FREQ1_DRAMTMG15(X) (DDRC_IPS_BASE_ADDR(X) + 0x213C) +#define DDRC_FREQ1_DRAMTMG16(X) (DDRC_IPS_BASE_ADDR(X) + 0x2140) +#define DDRC_FREQ1_DRAMTMG17(X) (DDRC_IPS_BASE_ADDR(X) + 0x2144) +#define DDRC_FREQ1_ZQCTL0(X) (DDRC_IPS_BASE_ADDR(X) + 0x2180) +#define DDRC_FREQ1_DFITMG0(X) (DDRC_IPS_BASE_ADDR(X) + 0x2190) +#define DDRC_FREQ1_DFITMG1(X) (DDRC_IPS_BASE_ADDR(X) + 0x2194) +#define DDRC_FREQ1_DFITMG2(X) (DDRC_IPS_BASE_ADDR(X) + 0x21b4) +#define DDRC_FREQ1_DFITMG3(X) (DDRC_IPS_BASE_ADDR(X) + 0x21b8) +#define DDRC_FREQ1_ODTCFG(X) (DDRC_IPS_BASE_ADDR(X) + 0x2240) + +#define DDRC_FREQ2_DERATEEN(X) (DDRC_IPS_BASE_ADDR(X) + 0x3020) +#define DDRC_FREQ2_DERATEINT(X) (DDRC_IPS_BASE_ADDR(X) + 0x3024) +#define DDRC_FREQ2_RFSHCTL0(X) (DDRC_IPS_BASE_ADDR(X) + 0x3050) +#define DDRC_FREQ2_RFSHTMG(X) (DDRC_IPS_BASE_ADDR(X) + 0x3064) +#define DDRC_FREQ2_INIT3(X) (DDRC_IPS_BASE_ADDR(X) + 0x30dc) +#define DDRC_FREQ2_INIT4(X) (DDRC_IPS_BASE_ADDR(X) + 0x30e0) +#define DDRC_FREQ2_INIT6(X) (DDRC_IPS_BASE_ADDR(X) + 0x30e8) +#define DDRC_FREQ2_INIT7(X) (DDRC_IPS_BASE_ADDR(X) + 0x30ec) +#define DDRC_FREQ2_DRAMTMG0(X) (DDRC_IPS_BASE_ADDR(X) + 0x3100) +#define DDRC_FREQ2_DRAMTMG1(X) (DDRC_IPS_BASE_ADDR(X) + 0x3104) +#define DDRC_FREQ2_DRAMTMG2(X) (DDRC_IPS_BASE_ADDR(X) + 0x3108) +#define DDRC_FREQ2_DRAMTMG3(X) (DDRC_IPS_BASE_ADDR(X) + 0x310c) +#define DDRC_FREQ2_DRAMTMG4(X) (DDRC_IPS_BASE_ADDR(X) + 0x3110) +#define DDRC_FREQ2_DRAMTMG5(X) (DDRC_IPS_BASE_ADDR(X) + 0x3114) +#define DDRC_FREQ2_DRAMTMG6(X) (DDRC_IPS_BASE_ADDR(X) + 0x3118) +#define DDRC_FREQ2_DRAMTMG7(X) (DDRC_IPS_BASE_ADDR(X) + 0x311c) +#define DDRC_FREQ2_DRAMTMG8(X) (DDRC_IPS_BASE_ADDR(X) + 0x3120) +#define DDRC_FREQ2_DRAMTMG9(X) (DDRC_IPS_BASE_ADDR(X) + 0x3124) +#define DDRC_FREQ2_DRAMTMG10(X) (DDRC_IPS_BASE_ADDR(X) + 0x3128) +#define DDRC_FREQ2_DRAMTMG11(X) (DDRC_IPS_BASE_ADDR(X) + 0x312c) +#define DDRC_FREQ2_DRAMTMG12(X) (DDRC_IPS_BASE_ADDR(X) + 0x3130) +#define DDRC_FREQ2_DRAMTMG13(X) (DDRC_IPS_BASE_ADDR(X) + 0x3134) +#define DDRC_FREQ2_DRAMTMG14(X) (DDRC_IPS_BASE_ADDR(X) + 0x3138) +#define DDRC_FREQ2_DRAMTMG15(X) (DDRC_IPS_BASE_ADDR(X) + 0x313C) +#define DDRC_FREQ2_DRAMTMG16(X) (DDRC_IPS_BASE_ADDR(X) + 0x3140) +#define DDRC_FREQ2_DRAMTMG17(X) (DDRC_IPS_BASE_ADDR(X) + 0x3144) +#define DDRC_FREQ2_ZQCTL0(X) (DDRC_IPS_BASE_ADDR(X) + 0x3180) +#define DDRC_FREQ2_DFITMG0(X) (DDRC_IPS_BASE_ADDR(X) + 0x3190) +#define DDRC_FREQ2_DFITMG1(X) (DDRC_IPS_BASE_ADDR(X) + 0x3194) +#define DDRC_FREQ2_DFITMG2(X) (DDRC_IPS_BASE_ADDR(X) + 0x31b4) +#define DDRC_FREQ2_DFITMG3(X) (DDRC_IPS_BASE_ADDR(X) + 0x31b8) +#define DDRC_FREQ2_ODTCFG(X) (DDRC_IPS_BASE_ADDR(X) + 0x3240) + +#define DDRC_FREQ3_DERATEEN(X) (DDRC_IPS_BASE_ADDR(X) + 0x4020) +#define DDRC_FREQ3_DERATEINT(X) (DDRC_IPS_BASE_ADDR(X) + 0x4024) +#define DDRC_FREQ3_RFSHCTL0(X) (DDRC_IPS_BASE_ADDR(X) + 0x4050) +#define DDRC_FREQ3_RFSHTMG(X) (DDRC_IPS_BASE_ADDR(X) + 0x4064) +#define DDRC_FREQ3_INIT3(X) (DDRC_IPS_BASE_ADDR(X) + 0x40dc) +#define DDRC_FREQ3_INIT4(X) (DDRC_IPS_BASE_ADDR(X) + 0x40e0) +#define DDRC_FREQ3_INIT6(X) (DDRC_IPS_BASE_ADDR(X) + 0x40e8) +#define DDRC_FREQ3_INIT7(X) (DDRC_IPS_BASE_ADDR(X) + 0x40ec) +#define DDRC_FREQ3_DRAMTMG0(X) (DDRC_IPS_BASE_ADDR(X) + 0x4100) +#define DDRC_FREQ3_DRAMTMG1(X) (DDRC_IPS_BASE_ADDR(X) + 0x4104) +#define DDRC_FREQ3_DRAMTMG2(X) (DDRC_IPS_BASE_ADDR(X) + 0x4108) +#define DDRC_FREQ3_DRAMTMG3(X) (DDRC_IPS_BASE_ADDR(X) + 0x410c) +#define DDRC_FREQ3_DRAMTMG4(X) (DDRC_IPS_BASE_ADDR(X) + 0x4110) +#define DDRC_FREQ3_DRAMTMG5(X) (DDRC_IPS_BASE_ADDR(X) + 0x4114) +#define DDRC_FREQ3_DRAMTMG6(X) (DDRC_IPS_BASE_ADDR(X) + 0x4118) +#define DDRC_FREQ3_DRAMTMG7(X) (DDRC_IPS_BASE_ADDR(X) + 0x411c) +#define DDRC_FREQ3_DRAMTMG8(X) (DDRC_IPS_BASE_ADDR(X) + 0x4120) +#define DDRC_FREQ3_DRAMTMG9(X) (DDRC_IPS_BASE_ADDR(X) + 0x4124) +#define DDRC_FREQ3_DRAMTMG10(X) (DDRC_IPS_BASE_ADDR(X) + 0x4128) +#define DDRC_FREQ3_DRAMTMG11(X) (DDRC_IPS_BASE_ADDR(X) + 0x412c) +#define DDRC_FREQ3_DRAMTMG12(X) (DDRC_IPS_BASE_ADDR(X) + 0x4130) +#define DDRC_FREQ3_DRAMTMG13(X) (DDRC_IPS_BASE_ADDR(X) + 0x4134) +#define DDRC_FREQ3_DRAMTMG14(X) (DDRC_IPS_BASE_ADDR(X) + 0x4138) +#define DDRC_FREQ3_DRAMTMG15(X) (DDRC_IPS_BASE_ADDR(X) + 0x413C) +#define DDRC_FREQ3_DRAMTMG16(X) (DDRC_IPS_BASE_ADDR(X) + 0x4140) + +#define DDRC_FREQ3_ZQCTL0(X) (DDRC_IPS_BASE_ADDR(X) + 0x4180) +#define DDRC_FREQ3_DFITMG0(X) (DDRC_IPS_BASE_ADDR(X) + 0x4190) +#define DDRC_FREQ3_DFITMG1(X) (DDRC_IPS_BASE_ADDR(X) + 0x4194) +#define DDRC_FREQ3_DFITMG2(X) (DDRC_IPS_BASE_ADDR(X) + 0x41b4) +#define DDRC_FREQ3_DFITMG3(X) (DDRC_IPS_BASE_ADDR(X) + 0x41b8) +#define DDRC_FREQ3_ODTCFG(X) (DDRC_IPS_BASE_ADDR(X) + 0x4240) +#define DDRC_DFITMG0_SHADOW(X) (DDRC_IPS_BASE_ADDR(X) + 0x2190) +#define DDRC_DFITMG1_SHADOW(X) (DDRC_IPS_BASE_ADDR(X) + 0x2194) +#define DDRC_DFITMG2_SHADOW(X) (DDRC_IPS_BASE_ADDR(X) + 0x21b4) +#define DDRC_DFITMG3_SHADOW(X) (DDRC_IPS_BASE_ADDR(X) + 0x21b8) +#define DDRC_ODTCFG_SHADOW(X) (DDRC_IPS_BASE_ADDR(X) + 0x2240) + +#define DRC_PERF_MON_BASE_ADDR(X) (0x3d800000 + ((X) * 0x2000000)) +#define DRC_PERF_MON_CNT0_CTL(X) (DRC_PERF_MON_BASE_ADDR(X) + 0x0) +#define DRC_PERF_MON_CNT1_CTL(X) (DRC_PERF_MON_BASE_ADDR(X) + 0x4) +#define DRC_PERF_MON_CNT2_CTL(X) (DRC_PERF_MON_BASE_ADDR(X) + 0x8) +#define DRC_PERF_MON_CNT3_CTL(X) (DRC_PERF_MON_BASE_ADDR(X) + 0xC) +#define DRC_PERF_MON_CNT0_DAT(X) (DRC_PERF_MON_BASE_ADDR(X) + 0x20) +#define DRC_PERF_MON_CNT1_DAT(X) (DRC_PERF_MON_BASE_ADDR(X) + 0x24) +#define DRC_PERF_MON_CNT2_DAT(X) (DRC_PERF_MON_BASE_ADDR(X) + 0x28) +#define DRC_PERF_MON_CNT3_DAT(X) (DRC_PERF_MON_BASE_ADDR(X) + 0x2C) +#define DRC_PERF_MON_DPCR_DAT(X) (DRC_PERF_MON_BASE_ADDR(X) + 0x30) +#define DRC_PERF_MON_MRR0_DAT(X) (DRC_PERF_MON_BASE_ADDR(X) + 0x40) +#define DRC_PERF_MON_MRR1_DAT(X) (DRC_PERF_MON_BASE_ADDR(X) + 0x44) +#define DRC_PERF_MON_MRR2_DAT(X) (DRC_PERF_MON_BASE_ADDR(X) + 0x48) +#define DRC_PERF_MON_MRR3_DAT(X) (DRC_PERF_MON_BASE_ADDR(X) + 0x4C) +#define DRC_PERF_MON_MRR4_DAT(X) (DRC_PERF_MON_BASE_ADDR(X) + 0x50) +#define DRC_PERF_MON_MRR5_DAT(X) (DRC_PERF_MON_BASE_ADDR(X) + 0x54) +#define DRC_PERF_MON_MRR6_DAT(X) (DRC_PERF_MON_BASE_ADDR(X) + 0x58) +#define DRC_PERF_MON_MRR7_DAT(X) (DRC_PERF_MON_BASE_ADDR(X) + 0x5C) +#define DRC_PERF_MON_MRR8_DAT(X) (DRC_PERF_MON_BASE_ADDR(X) + 0x60) +#define DRC_PERF_MON_MRR9_DAT(X) (DRC_PERF_MON_BASE_ADDR(X) + 0x64) +#define DRC_PERF_MON_MRR10_DAT(X) (DRC_PERF_MON_BASE_ADDR(X) + 0x68) +#define DRC_PERF_MON_MRR11_DAT(X) (DRC_PERF_MON_BASE_ADDR(X) + 0x6C) +#define DRC_PERF_MON_MRR12_DAT(X) (DRC_PERF_MON_BASE_ADDR(X) + 0x70) +#define DRC_PERF_MON_MRR13_DAT(X) (DRC_PERF_MON_BASE_ADDR(X) + 0x74) +#define DRC_PERF_MON_MRR14_DAT(X) (DRC_PERF_MON_BASE_ADDR(X) + 0x78) +#define DRC_PERF_MON_MRR15_DAT(X) (DRC_PERF_MON_BASE_ADDR(X) + 0x7C) + +#define dwc_ddrphy_apb_rd(addr) mmio_read_32(IMX_DDRPHY_BASE + 4 * (addr)) +#define dwc_ddrphy_apb_wr(addr, val) mmio_write_32(IMX_DDRPHY_BASE + 4 * (addr), val) + +#endif /*IMX_DDRC_H */ diff --git a/plat/imx/imx8m/include/dram.h b/plat/imx/imx8m/include/dram.h new file mode 100644 index 0000000..719c390 --- /dev/null +++ b/plat/imx/imx8m/include/dram.h @@ -0,0 +1,87 @@ +/* + * Copyright 2019-2023 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef DRAM_H +#define DRAM_H + +#include <assert.h> + +#include <arch_helpers.h> +#include <lib/utils_def.h> + +#include <ddrc.h> +#include <platform_def.h> + +#define DDRC_LPDDR4 BIT(5) +#define DDRC_DDR4 BIT(4) +#define DDRC_DDR3L BIT(0) +#define DDR_TYPE_MASK U(0x3f) +#define ACTIVE_RANK_MASK U(0x3) +#define DDRC_ACTIVE_ONE_RANK U(0x1) +#define DDRC_ACTIVE_TWO_RANK U(0x2) + +#define MR12 U(12) +#define MR14 U(14) + +#define MAX_FSP_NUM U(3) + +/* reg & config param */ +struct dram_cfg_param { + unsigned int reg; + unsigned int val; +}; + +struct dram_timing_info { + /* umctl2 config */ + struct dram_cfg_param *ddrc_cfg; + unsigned int ddrc_cfg_num; + /* ddrphy config */ + struct dram_cfg_param *ddrphy_cfg; + unsigned int ddrphy_cfg_num; + /* ddr fsp train info */ + struct dram_fsp_msg *fsp_msg; + unsigned int fsp_msg_num; + /* ddr phy trained CSR */ + struct dram_cfg_param *ddrphy_trained_csr; + unsigned int ddrphy_trained_csr_num; + /* ddr phy PIE */ + struct dram_cfg_param *ddrphy_pie; + unsigned int ddrphy_pie_num; + /* initialized fsp table */ + unsigned int fsp_table[4]; +}; + +struct dram_info { + int dram_type; + unsigned int num_rank; + uint32_t num_fsp; + int current_fsp; + int boot_fsp; + bool bypass_mode; + struct dram_timing_info *timing_info; + /* mr, emr, emr2, emr3, mr11, mr12, mr22, mr14 */ + uint32_t mr_table[3][8]; + /* used for workaround for rank to rank issue */ + uint32_t rank_setting[3][3]; +}; + +extern struct dram_info dram_info; + +void dram_info_init(unsigned long dram_timing_base); +void dram_umctl2_init(struct dram_timing_info *timing); +void dram_phy_init(struct dram_timing_info *timing); + +/* dram retention */ +void dram_enter_retention(void); +void dram_exit_retention(void); + +void dram_clock_switch(unsigned int target_drate, bool bypass_mode); + +/* dram frequency change */ +void lpddr4_swffc(struct dram_info *info, unsigned int init_fsp, unsigned int fsp_index); +void ddr4_swffc(struct dram_info *dram_info, unsigned int pstate); + +#endif /* DRAM_H */ diff --git a/plat/imx/imx8m/include/gpc.h b/plat/imx/imx8m/include/gpc.h new file mode 100644 index 0000000..8eb3e06 --- /dev/null +++ b/plat/imx/imx8m/include/gpc.h @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2018-2023, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef IMX8M_GPC_H +#define IMX8M_GPC_H + +#include <gpc_reg.h> + +/* helper macro */ +#define A53_LPM_MASK U(0xF) +#define A53_LPM_WAIT U(0x5) +#define A53_LPM_STOP U(0xA) +#define LPM_MODE(local_state) ((local_state) == PLAT_WAIT_RET_STATE ? A53_LPM_WAIT : A53_LPM_STOP) + +#define DSM_MODE_MASK BIT(31) +#define CORE_WKUP_FROM_GIC (IRQ_SRC_C0 | IRQ_SRC_C1 | IRQ_SRC_C2 | IRQ_SRC_C3) +#define A53_CORE_WUP_SRC(core_id) (1 << ((core_id) < 2 ? 28 + (core_id) : 22 + (core_id) - 2)) +#define COREx_PGC_PCR(core_id) (0x800 + (core_id) * 0x40) +#define COREx_WFI_PDN(core_id) (1 << ((core_id) < 2 ? (core_id) * 2 : ((core_id) - 2) * 2 + 16)) +#define COREx_IRQ_WUP(core_id) ((core_id) < 2 ? (1 << ((core_id) * 2 + 8)) : (1 << ((core_id) * 2 + 20))) +#define COREx_LPM_PUP(core_id) ((core_id) < 2 ? (1 << ((core_id) * 2 + 9)) : (1 << ((core_id) * 2 + 21))) +#define SLTx_CFG(n) ((SLT0_CFG + ((n) * 4))) +#define SLT_COREx_PUP(core_id) (0x2 << ((core_id) * 2)) +#define SLT_COREx_PUP_ACK(core_id) ((core_id) < 2 ? (1 << ((core_id) + 16)) : (1 << ((core_id) + 27))) + +#define IMR_MASK_ALL 0xffffffff + +#define IMX_PD_DOMAIN(name, on) \ + { \ + .pwr_req = name##_PWR_REQ, \ + .pgc_offset = name##_PGC, \ + .need_sync = false, \ + .always_on = (on), \ + } + +#define IMX_MIX_DOMAIN(name, on) \ + { \ + .pwr_req = name##_PWR_REQ, \ + .pgc_offset = name##_PGC, \ + .adb400_sync = name##_ADB400_SYNC, \ + .adb400_ack = name##_ADB400_ACK, \ + .need_sync = true, \ + .always_on = (on), \ + } + +struct imx_pwr_domain { + uint32_t pwr_req; + uint32_t adb400_sync; + uint32_t adb400_ack; + uint32_t pgc_offset; + bool need_sync; + bool always_on; +}; + +struct pll_override { + uint32_t reg; + uint32_t override_mask; +}; + +DECLARE_BAKERY_LOCK(gpc_lock); + +/* function declare */ +void imx_gpc_init(void); +void imx_set_cpu_secure_entry(unsigned int core_index, uintptr_t sec_entrypoint); +void imx_set_cpu_pwr_off(unsigned int core_index); +void imx_set_cpu_pwr_on(unsigned int core_index); +void imx_set_cpu_lpm(unsigned int core_index, bool pdn); +void imx_set_cluster_standby(bool retention); +void imx_set_cluster_powerdown(unsigned int last_core, uint8_t power_state); +void imx_noc_slot_config(bool pdn); +void imx_set_sys_wakeup(unsigned int last_core, bool pdn); +void imx_set_sys_lpm(unsigned last_core, bool retention); +void imx_set_rbc_count(void); +void imx_clear_rbc_count(void); +void imx_anamix_override(bool enter); +void imx_gpc_pm_domain_enable(uint32_t domain_id, bool on); + +#if defined(PLAT_imx8mq) +void imx_gpc_set_a53_core_awake(uint32_t core_id); +void imx_gpc_core_wake(uint32_t cpumask); +#endif + +#endif /*IMX8M_GPC_H */ diff --git a/plat/imx/imx8m/include/imx8m_caam.h b/plat/imx/imx8m/include/imx8m_caam.h new file mode 100644 index 0000000..84725b1 --- /dev/null +++ b/plat/imx/imx8m/include/imx8m_caam.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2019, NXP. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef IMX8M_CAAM_H +#define IMX8M_CAAM_H + +#include <lib/utils_def.h> + +#include <platform_def.h> + +#define CAAM_JR0MID (IMX_CAAM_BASE + 0x10) +#define CAAM_JR1MID (IMX_CAAM_BASE + 0x18) +#define CAAM_JR2MID (IMX_CAAM_BASE + 0x20) +#define CAAM_NS_MID (0x1) + +#define JR0_BASE (IMX_CAAM_BASE + 0x1000) + +#define SM_P0_PERM (JR0_BASE + 0xa04) +#define SM_P0_SMAG2 (JR0_BASE + 0xa08) +#define SM_P0_SMAG1 (JR0_BASE + 0xa0c) +#define SM_CMD (JR0_BASE + 0xbe4) + +/* secure memory command */ +#define SMC_PAGE_SHIFT 16 +#define SMC_PART_SHIFT 8 + +#define SMC_CMD_ALLOC_PAGE 0x01 /* allocate page to this partition */ +#define SMC_CMD_DEALLOC_PART 0x03 /* deallocate partition */ + +void imx8m_caam_init(void); + +#endif /* IMX8M_CAAM_H */ diff --git a/plat/imx/imx8m/include/imx8m_ccm.h b/plat/imx/imx8m/include/imx8m_ccm.h new file mode 100644 index 0000000..acbd135 --- /dev/null +++ b/plat/imx/imx8m/include/imx8m_ccm.h @@ -0,0 +1,12 @@ +/* + * Copyright (c) 2023, Pengutronix. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef IMX8M_CCM_H +#define IMX8M_CCM_H + +unsigned int imx8m_uart_get_base(void); + +#endif /* IMX8M_CCM_H */ diff --git a/plat/imx/imx8m/include/imx8m_csu.h b/plat/imx/imx8m/include/imx8m_csu.h new file mode 100644 index 0000000..dc634ed --- /dev/null +++ b/plat/imx/imx8m/include/imx8m_csu.h @@ -0,0 +1,74 @@ +/* + * Copyright 2020-2022 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef IMX_CSU_H +#define IMX_CSU_H + +#include <lib/utils_def.h> + +#include <platform_def.h> + +#define CSU_SEC_LEVEL_0 0xff +#define CSU_SEC_LEVEL_1 0xbb +#define CSU_SEC_LEVEL_2 0x3f +#define CSU_SEC_LEVEL_3 0x3b +#define CSU_SEC_LEVEL_4 0x33 +#define CSU_SEC_LEVEL_5 0x22 +#define CSU_SEC_LEVEL_6 0x03 +#define CSU_SEC_LEVEL_7 0x0 + +#define LOCKED 0x1 +#define UNLOCKED 0x0 + +#define CSLx_REG(x) (IMX_CSU_BASE + ((x) / 2) * 4) +#define CSLx_LOCK(x) ((0x1 << (((x) % 2) * 16 + 8))) +#define CSLx_CFG(x, n) ((x) << (((n) % 2) * 16)) + +#define CSU_HP_REG(x) (IMX_CSU_BASE + ((x) / 16) * 4 + 0x200) +#define CSU_HP_LOCK(x) ((0x1 << (((x) % 16) * 2 + 1))) +#define CSU_HP_CFG(x, n) ((x) << (((n) % 16) * 2)) + +#define CSU_SA_REG(x) (IMX_CSU_BASE + 0x218) +#define CSU_SA_LOCK(x) ((0x1 << (((x) % 16) * 2 + 1))) +#define CSU_SA_CFG(x, n) ((x) << (((n) % 16) * 2)) + +#define CSU_HPCONTROL_REG(x) (IMX_CSU_BASE + (((x) / 16) * 4) + 0x358) +#define CSU_HPCONTROL_LOCK(x) ((0x1 << (((x) % 16) * 2 + 1))) +#define CSU_HPCONTROL_CFG(x, n) ((x) << (((n) % 16) * 2)) + +enum csu_cfg_type { + CSU_INVALID, + CSU_CSL, + CSU_HP, + CSU_SA, + CSU_HPCONTROL, +}; + +struct imx_csu_cfg { + enum csu_cfg_type type; + uint16_t idx; + uint16_t lock : 1; + uint16_t csl_level : 8; + uint16_t hp : 1; + uint16_t sa : 1; + uint16_t hpctrl : 1; +}; + +#define CSU_CSLx(i, level, lk) \ + {CSU_CSL, .idx = (i), .csl_level = (level), .lock = (lk),} + +#define CSU_HPx(i, val, lk) \ + {CSU_HP, .idx = (i), .hp = (val), .lock = (lk), } + +#define CSU_SA(i, val, lk) \ + {CSU_SA, .idx = (i), .sa = (val), .lock = (lk), } + +#define CSU_HPCTRL(i, val, lk) \ + {CSU_HPCONTROL, .idx = (i), .hpctrl = (val), .lock = (lk), } + +void imx_csu_init(const struct imx_csu_cfg *csu_cfg); + +#endif /* IMX_CSU_H */ diff --git a/plat/imx/imx8m/include/imx8m_measured_boot.h b/plat/imx/imx8m/include/imx8m_measured_boot.h new file mode 100644 index 0000000..2ec0c46 --- /dev/null +++ b/plat/imx/imx8m/include/imx8m_measured_boot.h @@ -0,0 +1,16 @@ +/* + * Copyright (c) 2022, Linaro + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef IMX8M_MEASURED_BOOT_H +#define IMX8M_MEASURED_BOOT_H + +#include <stdint.h> + +#include <arch_helpers.h> + +int imx8m_set_nt_fw_info(size_t log_size, uintptr_t *ns_log_addr); + +#endif /* IMX8M_MEASURED_BOOT_H */ diff --git a/plat/imx/imx8m/include/imx8m_psci.h b/plat/imx/imx8m/include/imx8m_psci.h new file mode 100644 index 0000000..7d14d11 --- /dev/null +++ b/plat/imx/imx8m/include/imx8m_psci.h @@ -0,0 +1,24 @@ +/* + * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef IMX8M_PSCI_H +#define IMX8M_PSCI_H + +#define CORE_PWR_STATE(state) ((state)->pwr_domain_state[MPIDR_AFFLVL0]) +#define CLUSTER_PWR_STATE(state) ((state)->pwr_domain_state[MPIDR_AFFLVL1]) +#define SYSTEM_PWR_STATE(state) ((state)->pwr_domain_state[PLAT_MAX_PWR_LVL]) + +int imx_pwr_domain_on(u_register_t mpidr); +void imx_pwr_domain_on_finish(const psci_power_state_t *target_state); +void imx_pwr_domain_off(const psci_power_state_t *target_state); +int imx_validate_ns_entrypoint(uintptr_t ns_entrypoint); +void imx_cpu_standby(plat_local_state_t cpu_state); +void imx_domain_suspend(const psci_power_state_t *target_state); +void imx_domain_suspend_finish(const psci_power_state_t *target_state); +void __dead2 imx_pwr_domain_pwr_down_wfi(const psci_power_state_t *target_state); +int imx_system_reset2(int is_vendor, int reset_type, u_register_t cookie); + +#endif /* IMX8M_PSCI_H */ diff --git a/plat/imx/imx8m/include/imx8m_snvs.h b/plat/imx/imx8m/include/imx8m_snvs.h new file mode 100644 index 0000000..799e1d5 --- /dev/null +++ b/plat/imx/imx8m/include/imx8m_snvs.h @@ -0,0 +1,12 @@ +/* + * Copyright 2022-2023 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef IMX8M_SNVS_H +#define IMX8M_SNVS_H + +void enable_snvs_privileged_access(void); + +#endif diff --git a/plat/imx/imx8m/include/imx_aipstz.h b/plat/imx/imx8m/include/imx_aipstz.h new file mode 100644 index 0000000..7616862 --- /dev/null +++ b/plat/imx/imx8m/include/imx_aipstz.h @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef IMX_AIPSTZ_H +#define IMX_AIPSTZ_H + +#include <lib/utils_def.h> + +#define AIPSTZ_MPR0 U(0x0) +#define AIPSTZ_MPR1 U(0x4) + +#define AIPSTZ_OPACR_NUM U(0x5) +#define OPACR_OFFSET(i) U((i) * 4 + 0x40) + +struct aipstz_cfg { + uintptr_t base; + uint32_t mpr0; + uint32_t mpr1; + uint32_t opacr[AIPSTZ_OPACR_NUM]; +}; + +void imx_aipstz_init(const struct aipstz_cfg *aipstz_cfg); + +#endif /* IMX_AIPSTZ_H */ diff --git a/plat/imx/imx8m/include/imx_rdc.h b/plat/imx/imx8m/include/imx_rdc.h new file mode 100644 index 0000000..a6e10a7 --- /dev/null +++ b/plat/imx/imx8m/include/imx_rdc.h @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2019-2022 NXP. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef IMX_RDC_H +#define IMX_RDC_H + +#include <lib/utils_def.h> + +#include <imx_sec_def.h> +#include <platform_def.h> + +#define MDAn(x) (IMX_RDC_BASE + 0x200 + (x) * 4) +#define PDAPn(x) (IMX_RDC_BASE + 0x400 + (x) * 4) +#define MRSAn(x) (IMX_RDC_BASE + 0x800 + (x) * 0x10) +#define MREAn(x) (IMX_RDC_BASE + 0x804 + (x) * 0x10) +#define MRCn(x) (IMX_RDC_BASE + 0x808 + (x) * 0x10) + +#define LCK BIT(31) +#define SREQ BIT(30) +#define ENA BIT(30) + +#define DID0 U(0x0) +#define DID1 U(0x1) +#define DID2 U(0x2) +#define DID3 U(0x3) + +#define D3R BIT(7) +#define D3W BIT(6) +#define D2R BIT(5) +#define D2W BIT(4) +#define D1R BIT(3) +#define D1W BIT(2) +#define D0R BIT(1) +#define D0W BIT(0) + +union rdc_setting { + uint32_t rdc_mda; /* Master Domain Assignment */ + uint32_t rdc_pdap; /* Peripheral Domain Access Permissions */ + uint32_t rdc_mem_region[3]; /* Memory Region Access Control */ +}; + +enum rdc_type { + RDC_INVALID, + RDC_MDA, + RDC_PDAP, + RDC_MEM_REGION, +}; + +struct imx_rdc_cfg { + enum rdc_type type; /* config type Master, Peripheral or Memory region */ + int index; + union rdc_setting setting; +}; + +#define RDC_MDAn(i, mda) \ + {RDC_MDA, (i), .setting.rdc_mda = (mda), } +#define RDC_PDAPn(i, pdap) \ + {RDC_PDAP, (i), .setting.rdc_pdap = (pdap), } + +#define RDC_MEM_REGIONn(i, msa, mea, mrc) \ + { RDC_MEM_REGION, (i), \ + .setting.rdc_mem_region[0] = (msa), \ + .setting.rdc_mem_region[1] = (mea), \ + .setting.rdc_mem_region[2] = (mrc), \ + } + +void imx_rdc_init(const struct imx_rdc_cfg *cfg); + +#endif /* IMX_RDC_H */ + |