diff options
Diffstat (limited to 'plat/rockchip')
119 files changed, 20741 insertions, 0 deletions
diff --git a/plat/rockchip/common/aarch32/plat_helpers.S b/plat/rockchip/common/aarch32/plat_helpers.S new file mode 100644 index 0000000..475c297 --- /dev/null +++ b/plat/rockchip/common/aarch32/plat_helpers.S @@ -0,0 +1,164 @@ +/* + * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <platform_def.h> + +#include <arch.h> +#include <asm_macros.S> +#include <common/bl_common.h> +#include <cortex_a12.h> +#include <plat_private.h> +#include <plat_pmu_macros.S> + + .globl cpuson_entry_point + .globl cpuson_flags + .globl platform_cpu_warmboot + .globl plat_secondary_cold_boot_setup + .globl plat_report_exception + .globl plat_is_my_cpu_primary + .globl plat_my_core_pos + .globl plat_reset_handler + .globl plat_panic_handler + + /* + * void plat_reset_handler(void); + * + * Determine the SOC type and call the appropriate reset + * handler. + * + */ +func plat_reset_handler + bx lr +endfunc plat_reset_handler + +func plat_my_core_pos + ldcopr r0, MPIDR + and r1, r0, #MPIDR_CPU_MASK +#ifdef PLAT_RK_MPIDR_CLUSTER_MASK + and r0, r0, #PLAT_RK_MPIDR_CLUSTER_MASK +#else + and r0, r0, #MPIDR_CLUSTER_MASK +#endif + add r0, r1, r0, LSR #PLAT_RK_CLST_TO_CPUID_SHIFT + bx lr +endfunc plat_my_core_pos + + /* -------------------------------------------------------------------- + * void plat_secondary_cold_boot_setup (void); + * + * This function performs any platform specific actions + * needed for a secondary cpu after a cold reset e.g + * mark the cpu's presence, mechanism to place it in a + * holding pen etc. + * -------------------------------------------------------------------- + */ +func plat_secondary_cold_boot_setup + /* rk3288 does not do cold boot for secondary CPU */ +cb_panic: + b cb_panic +endfunc plat_secondary_cold_boot_setup + +func plat_is_my_cpu_primary + ldcopr r0, MPIDR +#ifdef PLAT_RK_MPIDR_CLUSTER_MASK + ldr r1, =(PLAT_RK_MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK) +#else + ldr r1, =(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK) +#endif + and r0, r1 + cmp r0, #PLAT_RK_PRIMARY_CPU + moveq r0, #1 + movne r0, #0 + bx lr +endfunc plat_is_my_cpu_primary + + /* -------------------------------------------------------------------- + * void plat_panic_handler(void) + * Call system reset function on panic. Set up an emergency stack so we + * can run C functions (it only needs to last for a few calls until we + * reboot anyway). + * -------------------------------------------------------------------- + */ +func plat_panic_handler + bl plat_set_my_stack + b rockchip_soc_soft_reset +endfunc plat_panic_handler + + /* -------------------------------------------------------------------- + * void platform_cpu_warmboot (void); + * cpus online or resume entrypoint + * -------------------------------------------------------------------- + */ +func platform_cpu_warmboot _align=16 + push { r4 - r7, lr } + ldcopr r0, MPIDR + and r5, r0, #MPIDR_CPU_MASK +#ifdef PLAT_RK_MPIDR_CLUSTER_MASK + and r6, r0, #PLAT_RK_MPIDR_CLUSTER_MASK +#else + and r6, r0, #MPIDR_CLUSTER_MASK +#endif + mov r0, r6 + + func_rockchip_clst_warmboot + /* -------------------------------------------------------------------- + * big cluster id is 1 + * big cores id is from 0-3, little cores id 4-7 + * -------------------------------------------------------------------- + */ + add r7, r5, r6, LSR #PLAT_RK_CLST_TO_CPUID_SHIFT + /* -------------------------------------------------------------------- + * get per cpuup flag + * -------------------------------------------------------------------- + */ + ldr r4, =cpuson_flags + add r4, r4, r7, lsl #2 + ldr r1, [r4] + /* -------------------------------------------------------------------- + * check cpuon reason + * -------------------------------------------------------------------- + */ + cmp r1, #PMU_CPU_AUTO_PWRDN + beq boot_entry + cmp r1, #PMU_CPU_HOTPLUG + beq boot_entry + /* -------------------------------------------------------------------- + * If the boot core cpuson_flags or cpuson_entry_point is not + * expection. force the core into wfe. + * -------------------------------------------------------------------- + */ +wfe_loop: + wfe + b wfe_loop +boot_entry: + mov r1, #0 + str r1, [r4] + /* -------------------------------------------------------------------- + * get per cpuup boot addr + * -------------------------------------------------------------------- + */ + ldr r5, =cpuson_entry_point + ldr r2, [r5, r7, lsl #2] /* ehem. #3 */ + pop { r4 - r7, lr } + + bx r2 +endfunc platform_cpu_warmboot + + /* -------------------------------------------------------------------- + * Per-CPU Secure entry point - resume or power up + * -------------------------------------------------------------------- + */ + .section tzfw_coherent_mem, "a" + .align 3 +cpuson_entry_point: + .rept PLATFORM_CORE_COUNT + .quad 0 + .endr +cpuson_flags: + .rept PLATFORM_CORE_COUNT + .word 0 + .endr +rockchip_clst_warmboot_data diff --git a/plat/rockchip/common/aarch32/platform_common.c b/plat/rockchip/common/aarch32/platform_common.c new file mode 100644 index 0000000..9030951 --- /dev/null +++ b/plat/rockchip/common/aarch32/platform_common.c @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <string.h> + +#include <platform_def.h> + +#include <arch_helpers.h> +#include <common/bl_common.h> +#include <common/debug.h> +#include <lib/utils.h> +#include <lib/xlat_tables/xlat_tables.h> + +#include <plat_private.h> + +void plat_configure_mmu_svc_mon(unsigned long total_base, + unsigned long total_size, + unsigned long ro_start, + unsigned long ro_limit, + unsigned long coh_start, + unsigned long coh_limit) +{ + mmap_add_region(total_base, total_base, total_size, + MT_MEMORY | MT_RW | MT_SECURE); + mmap_add_region(ro_start, ro_start, ro_limit - ro_start, + MT_MEMORY | MT_RO | MT_SECURE); + mmap_add_region(coh_start, coh_start, coh_limit - coh_start, + MT_DEVICE | MT_RW | MT_SECURE); + mmap_add(plat_rk_mmap); + rockchip_plat_mmu_svc_mon(); + init_xlat_tables(); + enable_mmu_svc_mon(0); +} + +unsigned int plat_get_syscnt_freq2(void) +{ + return SYS_COUNTER_FREQ_IN_TICKS; +} + +/* + * generic pm code does cci handling, but rockchip arm32 platforms + * have ever only 1 cluster, so nothing to do. + */ +void plat_cci_init(void) +{ +} + +void plat_cci_enable(void) +{ +} + +void plat_cci_disable(void) +{ +} diff --git a/plat/rockchip/common/aarch32/pmu_sram_cpus_on.S b/plat/rockchip/common/aarch32/pmu_sram_cpus_on.S new file mode 100644 index 0000000..a05ae54 --- /dev/null +++ b/plat/rockchip/common/aarch32/pmu_sram_cpus_on.S @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <arch.h> +#include <asm_macros.S> +#include <platform_def.h> + + .globl pmu_cpuson_entrypoint + .macro pmusram_entry_func _name + .section .pmusram.entry, "ax" + .type \_name, %function + .cfi_startproc + \_name: + .endm + +pmusram_entry_func pmu_cpuson_entrypoint + +#if PSRAM_CHECK_WAKEUP_CPU +check_wake_cpus: + ldcopr r0, MPIDR + and r1, r0, #MPIDR_CPU_MASK +#ifdef PLAT_RK_MPIDR_CLUSTER_MASK + and r0, r0, #PLAT_RK_MPIDR_CLUSTER_MASK +#else + and r0, r0, #MPIDR_CLUSTER_MASK +#endif + orr r0, r0, r1 + + /* primary_cpu */ + ldr r1, boot_mpidr + cmp r0, r1 + beq sys_wakeup + + /* + * If the core is not the primary cpu, + * force the core into wfe. + */ +wfe_loop: + wfe + b wfe_loop +sys_wakeup: +#endif + +#if PSRAM_DO_DDR_RESUME +ddr_resume: + ldr r2, =__bl32_sram_stack_end + mov sp, r2 + bl dmc_resume +#endif + bl sram_restore +sys_resume: + bl sp_min_warm_entrypoint +endfunc pmu_cpuson_entrypoint diff --git a/plat/rockchip/common/aarch64/plat_helpers.S b/plat/rockchip/common/aarch64/plat_helpers.S new file mode 100644 index 0000000..4af052b --- /dev/null +++ b/plat/rockchip/common/aarch64/plat_helpers.S @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <platform_def.h> + +#include <arch.h> +#include <asm_macros.S> +#include <common/bl_common.h> +#include <cortex_a53.h> +#include <cortex_a72.h> +#include <plat_private.h> +#include <plat_pmu_macros.S> + + .globl cpuson_entry_point + .globl cpuson_flags + .globl platform_cpu_warmboot + .globl plat_secondary_cold_boot_setup + .globl plat_report_exception + .globl plat_is_my_cpu_primary + .globl plat_my_core_pos + .globl plat_reset_handler + .globl plat_panic_handler + + /* + * void plat_reset_handler(void); + * + * Determine the SOC type and call the appropriate reset + * handler. + * + */ +func plat_reset_handler + mrs x0, midr_el1 + ubfx x0, x0, MIDR_PN_SHIFT, #12 + cmp w0, #((CORTEX_A72_MIDR >> MIDR_PN_SHIFT) & MIDR_PN_MASK) + b.eq handler_a72 + b handler_end +handler_a72: + /* + * This handler does the following: + * Set the L2 Data RAM latency for Cortex-A72. + * Set the L2 Tag RAM latency to for Cortex-A72. + */ + mov x0, #((5 << CORTEX_A72_L2CTLR_DATA_RAM_LATENCY_SHIFT) | \ + (0x1 << 5)) + msr CORTEX_A72_L2CTLR_EL1, x0 + isb +handler_end: + ret +endfunc plat_reset_handler + +func plat_my_core_pos + mrs x0, mpidr_el1 + and x1, x0, #MPIDR_CPU_MASK + and x0, x0, #MPIDR_CLUSTER_MASK + add x0, x1, x0, LSR #PLAT_RK_CLST_TO_CPUID_SHIFT + ret +endfunc plat_my_core_pos + + /* -------------------------------------------------------------------- + * void plat_secondary_cold_boot_setup (void); + * + * This function performs any platform specific actions + * needed for a secondary cpu after a cold reset e.g + * mark the cpu's presence, mechanism to place it in a + * holding pen etc. + * -------------------------------------------------------------------- + */ +func plat_secondary_cold_boot_setup + /* rk3368 does not do cold boot for secondary CPU */ +cb_panic: + b cb_panic +endfunc plat_secondary_cold_boot_setup + +func plat_is_my_cpu_primary + mrs x0, mpidr_el1 + and x0, x0, #(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK) + cmp x0, #PLAT_RK_PRIMARY_CPU + cset x0, eq + ret +endfunc plat_is_my_cpu_primary + + /* -------------------------------------------------------------------- + * void plat_panic_handler(void) + * Call system reset function on panic. Set up an emergency stack so we + * can run C functions (it only needs to last for a few calls until we + * reboot anyway). + * -------------------------------------------------------------------- + */ +func plat_panic_handler + msr spsel, #0 + bl plat_set_my_stack + b rockchip_soc_soft_reset +endfunc plat_panic_handler + + /* -------------------------------------------------------------------- + * void platform_cpu_warmboot (void); + * cpus online or resume enterpoint + * -------------------------------------------------------------------- + */ +func platform_cpu_warmboot _align=16 + mrs x0, MPIDR_EL1 + and x19, x0, #MPIDR_CPU_MASK + and x20, x0, #MPIDR_CLUSTER_MASK + mov x0, x20 + func_rockchip_clst_warmboot + /* -------------------------------------------------------------------- + * big cluster id is 1 + * big cores id is from 0-3, little cores id 4-7 + * -------------------------------------------------------------------- + */ + add x21, x19, x20, lsr #PLAT_RK_CLST_TO_CPUID_SHIFT + /* -------------------------------------------------------------------- + * get per cpuup flag + * -------------------------------------------------------------------- + */ + adr x4, cpuson_flags + add x4, x4, x21, lsl #2 + ldr w1, [x4] + /* -------------------------------------------------------------------- + * check cpuon reason + * -------------------------------------------------------------------- + */ + cmp w1, PMU_CPU_AUTO_PWRDN + b.eq boot_entry + cmp w1, PMU_CPU_HOTPLUG + b.eq boot_entry + /* -------------------------------------------------------------------- + * If the boot core cpuson_flags or cpuson_entry_point is not + * expection. force the core into wfe. + * -------------------------------------------------------------------- + */ +wfe_loop: + wfe + b wfe_loop +boot_entry: + str wzr, [x4] + /* -------------------------------------------------------------------- + * get per cpuup boot addr + * -------------------------------------------------------------------- + */ + adr x5, cpuson_entry_point + ldr x2, [x5, x21, lsl #3] + br x2 +endfunc platform_cpu_warmboot + + /* -------------------------------------------------------------------- + * Per-CPU Secure entry point - resume or power up + * -------------------------------------------------------------------- + */ + .section tzfw_coherent_mem, "a" + .align 3 +cpuson_entry_point: + .rept PLATFORM_CORE_COUNT + .quad 0 + .endr +cpuson_flags: + .rept PLATFORM_CORE_COUNT + .word 0 + .endr +rockchip_clst_warmboot_data diff --git a/plat/rockchip/common/aarch64/platform_common.c b/plat/rockchip/common/aarch64/platform_common.c new file mode 100644 index 0000000..81e8520 --- /dev/null +++ b/plat/rockchip/common/aarch64/platform_common.c @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <string.h> + +#include <platform_def.h> + +#include <arch_helpers.h> +#include <common/bl_common.h> +#include <common/debug.h> +#include <drivers/arm/cci.h> +#include <lib/utils.h> +#include <lib/xlat_tables/xlat_tables.h> + +#include <plat_private.h> + +#ifdef PLAT_RK_CCI_BASE +static const int cci_map[] = { + PLAT_RK_CCI_CLUSTER0_SL_IFACE_IX, + PLAT_RK_CCI_CLUSTER1_SL_IFACE_IX +}; +#endif + +/****************************************************************************** + * Macro generating the code for the function setting up the pagetables as per + * the platform memory map & initialize the mmu, for the given exception level + ******************************************************************************/ +#define DEFINE_CONFIGURE_MMU_EL(_el) \ + void plat_configure_mmu_el ## _el(unsigned long total_base, \ + unsigned long total_size, \ + unsigned long ro_start, \ + unsigned long ro_limit, \ + unsigned long coh_start, \ + unsigned long coh_limit) \ + { \ + mmap_add_region(total_base, total_base, \ + total_size, \ + MT_MEMORY | MT_RW | MT_SECURE); \ + mmap_add_region(ro_start, ro_start, \ + ro_limit - ro_start, \ + MT_MEMORY | MT_RO | MT_SECURE); \ + mmap_add_region(coh_start, coh_start, \ + coh_limit - coh_start, \ + MT_DEVICE | MT_RW | MT_SECURE); \ + mmap_add(plat_rk_mmap); \ + rockchip_plat_mmu_el##_el(); \ + init_xlat_tables(); \ + \ + enable_mmu_el ## _el(0); \ + } + +/* Define EL3 variants of the function initialising the MMU */ +DEFINE_CONFIGURE_MMU_EL(3) + +unsigned int plat_get_syscnt_freq2(void) +{ + return SYS_COUNTER_FREQ_IN_TICKS; +} + +void plat_cci_init(void) +{ +#ifdef PLAT_RK_CCI_BASE + /* Initialize CCI driver */ + cci_init(PLAT_RK_CCI_BASE, cci_map, ARRAY_SIZE(cci_map)); +#endif +} + +void plat_cci_enable(void) +{ + /* + * Enable CCI coherency for this cluster. + * No need for locks as no other cpu is active at the moment. + */ +#ifdef PLAT_RK_CCI_BASE + cci_enable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr())); +#endif +} + +void plat_cci_disable(void) +{ +#ifdef PLAT_RK_CCI_BASE + cci_disable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr())); +#endif +} diff --git a/plat/rockchip/common/aarch64/pmu_sram_cpus_on.S b/plat/rockchip/common/aarch64/pmu_sram_cpus_on.S new file mode 100644 index 0000000..d91ee0e --- /dev/null +++ b/plat/rockchip/common/aarch64/pmu_sram_cpus_on.S @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <arch.h> +#include <asm_macros.S> +#include <platform_def.h> + + .globl pmu_cpuson_entrypoint + .macro pmusram_entry_func _name + .section .pmusram.entry, "ax" + .type \_name, %function + .cfi_startproc + \_name: + .endm + +pmusram_entry_func pmu_cpuson_entrypoint + +#if PSRAM_CHECK_WAKEUP_CPU +check_wake_cpus: + mrs x0, MPIDR_EL1 + and x1, x0, #MPIDR_CPU_MASK + and x0, x0, #MPIDR_CLUSTER_MASK + orr x0, x0, x1 + + /* primary_cpu */ + ldr w1, boot_mpidr + cmp w0, w1 + b.eq sys_wakeup + + /* + * If the core is not the primary cpu, + * force the core into wfe. + */ +wfe_loop: + wfe + b wfe_loop +sys_wakeup: +#endif + +#if PSRAM_DO_DDR_RESUME +ddr_resume: + ldr x2, =__bl31_sram_stack_end + mov sp, x2 + bl dmc_resume +#endif + bl sram_restore +sys_resume: + bl bl31_warm_entrypoint +endfunc pmu_cpuson_entrypoint diff --git a/plat/rockchip/common/bl31_plat_setup.c b/plat/rockchip/common/bl31_plat_setup.c new file mode 100644 index 0000000..98ef415 --- /dev/null +++ b/plat/rockchip/common/bl31_plat_setup.c @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> + +#include <platform_def.h> + +#include <common/bl_common.h> +#include <common/debug.h> +#include <common/desc_image_load.h> +#include <drivers/console.h> +#include <drivers/generic_delay_timer.h> +#include <drivers/ti/uart/uart_16550.h> +#include <lib/mmio.h> +#include <plat_private.h> +#include <plat/common/platform.h> + +static entry_point_info_t bl32_ep_info; +static entry_point_info_t bl33_ep_info; + +/******************************************************************************* + * Return a pointer to the 'entry_point_info' structure of the next image for + * the security state specified. BL33 corresponds to the non-secure image type + * while BL32 corresponds to the secure image type. A NULL pointer is returned + * if the image does not exist. + ******************************************************************************/ +entry_point_info_t *bl31_plat_get_next_image_ep_info(uint32_t type) +{ + entry_point_info_t *next_image_info; + + next_image_info = (type == NON_SECURE) ? &bl33_ep_info : &bl32_ep_info; + assert(next_image_info->h.type == PARAM_EP); + + /* None of the images on this platform can have 0x0 as the entrypoint */ + if (next_image_info->pc) + return next_image_info; + else + return NULL; +} + +#pragma weak params_early_setup +void params_early_setup(u_register_t plat_param_from_bl2) +{ +} + +/******************************************************************************* + * Perform any BL3-1 early platform setup. Here is an opportunity to copy + * parameters passed by the calling EL (S-EL1 in BL2 & EL3 in BL1) before they + * are lost (potentially). This needs to be done before the MMU is initialized + * so that the memory layout can be used while creating page tables. + * BL2 has flushed this information to memory, so we are guaranteed to pick up + * good data. + ******************************************************************************/ +void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1, + u_register_t arg2, u_register_t arg3) +{ + static console_t console; + + params_early_setup(arg1); + + if (rockchip_get_uart_base() != 0) + console_16550_register(rockchip_get_uart_base(), + rockchip_get_uart_clock(), + rockchip_get_uart_baudrate(), &console); + + VERBOSE("bl31_setup\n"); + + bl31_params_parse_helper(arg0, &bl32_ep_info, &bl33_ep_info); +} + +/******************************************************************************* + * Perform any BL3-1 platform setup code + ******************************************************************************/ +void bl31_platform_setup(void) +{ + generic_delay_timer_init(); + plat_rockchip_soc_init(); + + /* Initialize the gic cpu and distributor interfaces */ + plat_rockchip_gic_driver_init(); + plat_rockchip_gic_init(); + plat_rockchip_pmu_init(); +} + +/******************************************************************************* + * Perform the very early platform specific architectural setup here. At the + * moment this is only intializes the mmu in a quick and dirty way. + ******************************************************************************/ +void bl31_plat_arch_setup(void) +{ + plat_cci_init(); + plat_cci_enable(); + plat_configure_mmu_el3(BL_CODE_BASE, + BL_COHERENT_RAM_END - BL_CODE_BASE, + BL_CODE_BASE, + BL_CODE_END, + BL_COHERENT_RAM_BASE, + BL_COHERENT_RAM_END); +} diff --git a/plat/rockchip/common/drivers/parameter/ddr_parameter.c b/plat/rockchip/common/drivers/parameter/ddr_parameter.c new file mode 100644 index 0000000..e89fe1e --- /dev/null +++ b/plat/rockchip/common/drivers/parameter/ddr_parameter.c @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <string.h> + +#include <platform_def.h> + +#include <arch_helpers.h> +#include <common/debug.h> +#include <drivers/console.h> +#include <drivers/delay_timer.h> +#include <lib/mmio.h> + +#include <plat_private.h> +#include <soc.h> + +#include "ddr_parameter.h" + +/* + * The miniloader delivers the parameters about ddr usage info from address + * 0x02000000 and the data format is defined as below figure. It tells ATF the + * areas of ddr that are used by platform, we treat them as non-secure regions + * by default. Then we should parse the other part regions and configurate them + * as secure regions to avoid illegal access. + * + * [ddr usage info data format] + * 0x02000000 + * ----------------------------------------------------------------------------- + * | <name> | <size> | <description> | + * ----------------------------------------------------------------------------- + * | count | 4byte | the array numbers of the | + * | | | 'addr_array' and 'size_array' | + * ----------------------------------------------------------------------------- + * | reserved | 4byte | just for 'addr_array' 8byte aligned | + * ----------------------------------------------------------------------------- + * | addr_array[count] | per 8byte | memory region base address | + * ----------------------------------------------------------------------------- + * | size_array[count] | per 8byte | memory region size (byte) | + * ----------------------------------------------------------------------------- + */ + +/* + * function: read parameters info(ns-regions) and try to parse s-regions info + * + * @addr: head address to the ddr usage struct from miniloader + * @max_mb: the max ddr capacity(MB) that the platform support + */ +struct param_ddr_usage ddr_region_usage_parse(uint64_t addr, uint64_t max_mb) +{ + uint64_t base, top; + uint32_t i, addr_offset, size_offset; + struct param_ddr_usage p; + + memset(&p, 0, sizeof(p)); + + /* read how many blocks of ns-regions, read from offset: 0x0 */ + p.ns_nr = mmio_read_32(addr + REGION_NR_OFFSET); + if ((p.ns_nr > DDR_REGION_NR_MAX) || (p.ns_nr == 0)) { + ERROR("over or zero region, nr=%d, max=%d\n", + p.ns_nr, DDR_REGION_NR_MAX); + return p; + } + + /* whole ddr regions boundary, it will be used when parse s-regions */ + p.boundary = max_mb; + + /* calculate ns-region base addr and size offset */ + addr_offset = REGION_ADDR_OFFSET; + size_offset = REGION_ADDR_OFFSET + p.ns_nr * REGION_DATA_PER_BYTES; + + /* read all ns-regions base and top address */ + for (i = 0; i < p.ns_nr; i++) { + base = mmio_read_64(addr + addr_offset); + top = base + mmio_read_64(addr + size_offset); + /* + * translate byte to MB and store info, + * Miniloader will promise every ns-region is MB aligned. + */ + p.ns_base[i] = RG_SIZE_MB(base); + p.ns_top[i] = RG_SIZE_MB(top); + + addr_offset += REGION_DATA_PER_BYTES; + size_offset += REGION_DATA_PER_BYTES; + } + + /* + * a s-region's base starts from previous ns-region's top, and a + * s-region's top ends with next ns-region's base. maybe like this: + * + * case1: ns-regison start from 0MB + * ----------------------------------------------- + * | ns0 | S0 | ns1 | S1 | ns2 | + * 0----------------------------------------------- max_mb + * + * + * case2: ns-regison not start from 0MB + * ----------------------------------------------- + * | S0 | ns0 | ns1 | ns2 | S1 | + * 0----------------------------------------------- max_mb + */ + + /* like above case2 figure, ns-region is not start from 0MB */ + if (p.ns_base[0] != 0) { + p.s_base[p.s_nr] = 0; + p.s_top[p.s_nr] = p.ns_base[0]; + p.s_nr++; + } + + /* + * notice: if ns-regions not start from 0MB, p.s_nr = 1 now, otherwise 0 + */ + for (i = 0; i < p.ns_nr; i++) { + /* + * if current ns-regions top covers boundary, + * that means s-regions are all parsed yet, so finsh. + */ + if (p.ns_top[i] == p.boundary) + goto out; + + /* s-region's base starts from previous ns-region's top */ + p.s_base[p.s_nr] = p.ns_top[i]; + + /* s-region's top ends with next ns-region's base */ + if (i + 1 < p.ns_nr) + p.s_top[p.s_nr] = p.ns_base[i + 1]; + else + p.s_top[p.s_nr] = p.boundary; + p.s_nr++; + } +out: + return p; +} diff --git a/plat/rockchip/common/drivers/parameter/ddr_parameter.h b/plat/rockchip/common/drivers/parameter/ddr_parameter.h new file mode 100644 index 0000000..25c93a1 --- /dev/null +++ b/plat/rockchip/common/drivers/parameter/ddr_parameter.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef DDR_PARAMETER_H +#define DDR_PARAMETER_H + +#include <string.h> + +#include <platform_def.h> + +#include <arch_helpers.h> +#include <common/debug.h> +#include <drivers/console.h> +#include <drivers/delay_timer.h> +#include <lib/mmio.h> + +#include <plat_private.h> +#include <soc.h> + +#define DDR_REGION_NR_MAX 10 +#define REGION_NR_OFFSET 0 +#define REGION_ADDR_OFFSET 8 +#define REGION_DATA_PER_BYTES 8 +#define RG_SIZE_MB(byte) ((byte) >> 20) + +/* unit: MB */ +struct param_ddr_usage { + uint64_t boundary; + + uint32_t ns_nr; + uint64_t ns_base[DDR_REGION_NR_MAX]; + uint64_t ns_top[DDR_REGION_NR_MAX]; + + uint32_t s_nr; + uint64_t s_base[DDR_REGION_NR_MAX + 1]; + uint64_t s_top[DDR_REGION_NR_MAX + 1]; +}; + +struct param_ddr_usage ddr_region_usage_parse(uint64_t addr, uint64_t max_mb); + +#endif /* DDR_PARAMETER_H */ diff --git a/plat/rockchip/common/drivers/pmu/pmu_com.h b/plat/rockchip/common/drivers/pmu/pmu_com.h new file mode 100644 index 0000000..5359f73 --- /dev/null +++ b/plat/rockchip/common/drivers/pmu/pmu_com.h @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef PMU_COM_H +#define PMU_COM_H + +#ifndef CHECK_CPU_WFIE_BASE +#define CHECK_CPU_WFIE_BASE (PMU_BASE + PMU_CORE_PWR_ST) +#endif +/* + * Use this macro to instantiate lock before it is used in below + * rockchip_pd_lock_xxx() macros + */ +DECLARE_BAKERY_LOCK(rockchip_pd_lock); + +/* + * These are wrapper macros to the powe domain Bakery Lock API. + */ +#define rockchip_pd_lock_init() bakery_lock_init(&rockchip_pd_lock) +#define rockchip_pd_lock_get() bakery_lock_get(&rockchip_pd_lock) +#define rockchip_pd_lock_rls() bakery_lock_release(&rockchip_pd_lock) + +/***************************************************************************** + * power domain on or off + *****************************************************************************/ +enum pmu_pd_state { + pmu_pd_on = 0, + pmu_pd_off = 1 +}; + +#pragma weak plat_ic_get_pending_interrupt_id +#pragma weak pmu_power_domain_ctr +#pragma weak check_cpu_wfie + +static inline uint32_t pmu_power_domain_st(uint32_t pd) +{ + uint32_t pwrdn_st = mmio_read_32(PMU_BASE + PMU_PWRDN_ST) & BIT(pd); + + if (pwrdn_st) + return pmu_pd_off; + else + return pmu_pd_on; +} + +static int pmu_power_domain_ctr(uint32_t pd, uint32_t pd_state) +{ + uint32_t val; + uint32_t loop = 0; + int ret = 0; + + rockchip_pd_lock_get(); + + val = mmio_read_32(PMU_BASE + PMU_PWRDN_CON); + if (pd_state == pmu_pd_off) + val |= BIT(pd); + else + val &= ~BIT(pd); + + mmio_write_32(PMU_BASE + PMU_PWRDN_CON, val); + dsb(); + + while ((pmu_power_domain_st(pd) != pd_state) && (loop < PD_CTR_LOOP)) { + udelay(1); + loop++; + } + + if (pmu_power_domain_st(pd) != pd_state) { + WARN("%s: %d, %d, error!\n", __func__, pd, pd_state); + ret = -EINVAL; + } + + rockchip_pd_lock_rls(); + + return ret; +} + +static int check_cpu_wfie(uint32_t cpu_id, uint32_t wfie_msk) +{ + uint32_t cluster_id, loop = 0; + + if (cpu_id >= PLATFORM_CLUSTER0_CORE_COUNT) { + cluster_id = 1; + cpu_id -= PLATFORM_CLUSTER0_CORE_COUNT; + } else { + cluster_id = 0; + } + + /* + * wfe/wfi tracking not possible, hopefully the host + * was sucessful in enabling wfe/wfi. + * We'll give a bit of additional time, like the kernel does. + */ + if ((cluster_id && clstb_cpu_wfe < 0) || + (!cluster_id && clstl_cpu_wfe < 0)) { + mdelay(1); + return 0; + } + + if (cluster_id) + wfie_msk <<= (clstb_cpu_wfe + cpu_id); + else + wfie_msk <<= (clstl_cpu_wfe + cpu_id); + + while (!(mmio_read_32(CHECK_CPU_WFIE_BASE) & wfie_msk) && + (loop < CHK_CPU_LOOP)) { + udelay(1); + loop++; + } + + if ((mmio_read_32(CHECK_CPU_WFIE_BASE) & wfie_msk) == 0) { + WARN("%s: %d, %d, %d, error!\n", __func__, + cluster_id, cpu_id, wfie_msk); + return -EINVAL; + } + + return 0; +} + +#endif /* PMU_COM_H */ diff --git a/plat/rockchip/common/include/plat_macros.S b/plat/rockchip/common/include/plat_macros.S new file mode 100644 index 0000000..691beeb --- /dev/null +++ b/plat/rockchip/common/include/plat_macros.S @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#ifndef ROCKCHIP_PLAT_MACROS_S +#define ROCKCHIP_PLAT_MACROS_S + +#include <drivers/arm/cci.h> +#include <drivers/arm/gic_common.h> +#include <drivers/arm/gicv2.h> +#include <drivers/arm/gicv3.h> +#include <platform_def.h> + +.section .rodata.gic_reg_name, "aS" +/* Applicable only to GICv2 and GICv3 with SRE disabled (legacy mode) */ +gicc_regs: + .asciz "gicc_hppir", "gicc_ahppir", "gicc_ctlr", "" + +/* Applicable only to GICv3 with SRE enabled */ +icc_regs: + .asciz "icc_hppir0_el1", "icc_hppir1_el1", "icc_ctlr_el3", "" + +/* Registers common to both GICv2 and GICv3 */ +gicd_pend_reg: + .asciz "gicd_ispendr regs (Offsets 0x200 - 0x278)\n" \ + " Offset:\t\t\tvalue\n" +newline: + .asciz "\n" +spacer: + .asciz ":\t\t0x" + +.section .rodata.cci_reg_name, "aS" +cci_iface_regs: + .asciz "cci_snoop_ctrl_cluster0", "cci_snoop_ctrl_cluster1" , "" + + /* --------------------------------------------- + * The below utility macro prints out relevant GIC + * and CCI registers whenever an unhandled + * exception is taken in BL31. + * Expects: GICD base in x26, GICC base in x27 + * Clobbers: x0 - x10, sp + * --------------------------------------------- + */ + .macro plat_crash_print_regs + + mov_imm x26, PLAT_RK_GICD_BASE + mov_imm x27, PLAT_RK_GICC_BASE + + /* Check for GICv3 system register access */ + mrs x7, id_aa64pfr0_el1 + ubfx x7, x7, #ID_AA64PFR0_GIC_SHIFT, #ID_AA64PFR0_GIC_WIDTH + cmp x7, #1 + b.ne print_gicv2 + + /* Check for SRE enable */ + mrs x8, ICC_SRE_EL3 + tst x8, #ICC_SRE_SRE_BIT + b.eq print_gicv2 + + /* Load the icc reg list to x6 */ + adr x6, icc_regs + /* Load the icc regs to gp regs used by str_in_crash_buf_print */ + mrs x8, ICC_HPPIR0_EL1 + mrs x9, ICC_HPPIR1_EL1 + mrs x10, ICC_CTLR_EL3 + /* Store to the crash buf and print to console */ + bl str_in_crash_buf_print + b print_gic_common + +print_gicv2: + /* Load the gicc reg list to x6 */ + adr x6, gicc_regs + /* Load the gicc regs to gp regs used by str_in_crash_buf_print */ + ldr w8, [x27, #GICC_HPPIR] + ldr w9, [x27, #GICC_AHPPIR] + ldr w10, [x27, #GICC_CTLR] + /* Store to the crash buf and print to console */ + bl str_in_crash_buf_print + +print_gic_common: + /* Print the GICD_ISPENDR regs */ + add x7, x26, #GICD_ISPENDR + adr x4, gicd_pend_reg + bl asm_print_str +gicd_ispendr_loop: + sub x4, x7, x26 + cmp x4, #0x280 + b.eq exit_print_gic_regs + bl asm_print_hex + + adr x4, spacer + bl asm_print_str + + ldr x4, [x7], #8 + bl asm_print_hex + + adr x4, newline + bl asm_print_str + b gicd_ispendr_loop +exit_print_gic_regs: + +#if PLATFORM_CLUSTER_COUNT > 1 + adr x6, cci_iface_regs + /* Store in x7 the base address of the first interface */ + mov_imm x7, (PLAT_RK_CCI_BASE + SLAVE_IFACE_OFFSET( \ + PLAT_RK_CCI_CLUSTER0_SL_IFACE_IX)) + ldr w8, [x7, #SNOOP_CTRL_REG] + /* Store in x7 the base address of the second interface */ + mov_imm x7, (PLAT_RK_CCI_BASE + SLAVE_IFACE_OFFSET( \ + PLAT_RK_CCI_CLUSTER1_SL_IFACE_IX)) + ldr w9, [x7, #SNOOP_CTRL_REG] + /* Store to the crash buf and print to console */ + bl str_in_crash_buf_print +#endif + .endm + +#endif /* ROCKCHIP_PLAT_MACROS_S */ diff --git a/plat/rockchip/common/include/plat_params.h b/plat/rockchip/common/include/plat_params.h new file mode 100644 index 0000000..95b850f --- /dev/null +++ b/plat/rockchip/common/include/plat_params.h @@ -0,0 +1,14 @@ +/* + * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef PLAT_PARAMS_H +#define PLAT_PARAMS_H + +#include <stdint.h> + +#include <export/plat/rockchip/common/plat_params_exp.h> + +#endif /* PLAT_PARAMS_H */ diff --git a/plat/rockchip/common/include/plat_private.h b/plat/rockchip/common/include/plat_private.h new file mode 100644 index 0000000..990d106 --- /dev/null +++ b/plat/rockchip/common/include/plat_private.h @@ -0,0 +1,155 @@ +/* + * Copyright (c) 2014-2019, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef PLAT_PRIVATE_H +#define PLAT_PRIVATE_H + +#ifndef __ASSEMBLER__ + +#include <stdint.h> + +#include <lib/psci/psci.h> +#include <lib/xlat_tables/xlat_tables.h> +#include <lib/mmio.h> +#include <plat_params.h> + +#define __sramdata __attribute__((section(".sram.data"))) +#define __sramconst __attribute__((section(".sram.rodata"))) +#define __sramfunc __attribute__((section(".sram.text"))) + +#define __pmusramdata __attribute__((section(".pmusram.data"))) +#define __pmusramconst __attribute__((section(".pmusram.rodata"))) +#define __pmusramfunc __attribute__((section(".pmusram.text"))) + +extern uint32_t __bl31_sram_text_start, __bl31_sram_text_end; +extern uint32_t __bl31_sram_data_start, __bl31_sram_data_end; +extern uint32_t __bl31_sram_stack_start, __bl31_sram_stack_end; +extern uint32_t __bl31_sram_text_real_end, __bl31_sram_data_real_end; +extern uint32_t __sram_incbin_start, __sram_incbin_end; +extern uint32_t __sram_incbin_real_end; + +/****************************************************************************** + * The register have write-mask bits, it is mean, if you want to set the bits, + * you needs set the write-mask bits at the same time, + * The write-mask bits is in high 16-bits. + * The fllowing macro definition helps access write-mask bits reg efficient! + ******************************************************************************/ +#define REG_MSK_SHIFT 16 + +#ifndef WMSK_BIT +#define WMSK_BIT(nr) BIT((nr) + REG_MSK_SHIFT) +#endif + +/* set one bit with write mask */ +#ifndef BIT_WITH_WMSK +#define BIT_WITH_WMSK(nr) (BIT(nr) | WMSK_BIT(nr)) +#endif + +#ifndef BITS_SHIFT +#define BITS_SHIFT(bits, shift) (bits << (shift)) +#endif + +#ifndef BITS_WITH_WMASK +#define BITS_WITH_WMASK(bits, msk, shift)\ + (BITS_SHIFT(bits, shift) | BITS_SHIFT(msk, (shift + REG_MSK_SHIFT))) +#endif + +/****************************************************************************** + * Function and variable prototypes + *****************************************************************************/ +#ifdef __aarch64__ +void plat_configure_mmu_el3(unsigned long total_base, + unsigned long total_size, + unsigned long, + unsigned long, + unsigned long, + unsigned long); + +void rockchip_plat_mmu_el3(void); +#else +void plat_configure_mmu_svc_mon(unsigned long total_base, + unsigned long total_size, + unsigned long, + unsigned long, + unsigned long, + unsigned long); + +void rockchip_plat_mmu_svc_mon(void); +#endif + +void plat_cci_init(void); +void plat_cci_enable(void); +void plat_cci_disable(void); + +void plat_delay_timer_init(void); + +void params_early_setup(u_register_t plat_params_from_bl2); + +void plat_rockchip_gic_driver_init(void); +void plat_rockchip_gic_init(void); +void plat_rockchip_gic_cpuif_enable(void); +void plat_rockchip_gic_cpuif_disable(void); +void plat_rockchip_gic_pcpu_init(void); + +void plat_rockchip_pmu_init(void); +void plat_rockchip_soc_init(void); +uintptr_t plat_get_sec_entrypoint(void); + +void platform_cpu_warmboot(void); + +struct bl_aux_gpio_info *plat_get_rockchip_gpio_reset(void); +struct bl_aux_gpio_info *plat_get_rockchip_gpio_poweroff(void); +struct bl_aux_gpio_info *plat_get_rockchip_suspend_gpio(uint32_t *count); +struct bl_aux_rk_apio_info *plat_get_rockchip_suspend_apio(void); +void plat_rockchip_gpio_init(void); +void plat_rockchip_save_gpio(void); +void plat_rockchip_restore_gpio(void); + +int rockchip_soc_cores_pwr_dm_on(unsigned long mpidr, uint64_t entrypoint); +int rockchip_soc_hlvl_pwr_dm_off(uint32_t lvl, + plat_local_state_t lvl_state); +int rockchip_soc_cores_pwr_dm_off(void); +int rockchip_soc_sys_pwr_dm_suspend(void); +int rockchip_soc_cores_pwr_dm_suspend(void); +int rockchip_soc_hlvl_pwr_dm_suspend(uint32_t lvl, + plat_local_state_t lvl_state); +int rockchip_soc_hlvl_pwr_dm_on_finish(uint32_t lvl, + plat_local_state_t lvl_state); +int rockchip_soc_cores_pwr_dm_on_finish(void); +int rockchip_soc_sys_pwr_dm_resume(void); + +int rockchip_soc_hlvl_pwr_dm_resume(uint32_t lvl, + plat_local_state_t lvl_state); +int rockchip_soc_cores_pwr_dm_resume(void); +void __dead2 rockchip_soc_soft_reset(void); +void __dead2 rockchip_soc_system_off(void); +void __dead2 rockchip_soc_cores_pd_pwr_dn_wfi( + const psci_power_state_t *target_state); +void __dead2 rockchip_soc_sys_pd_pwr_dn_wfi(void); + +extern const unsigned char rockchip_power_domain_tree_desc[]; + +extern void *pmu_cpuson_entrypoint; +extern u_register_t cpuson_entry_point[PLATFORM_CORE_COUNT]; +extern uint32_t cpuson_flags[PLATFORM_CORE_COUNT]; + +extern const mmap_region_t plat_rk_mmap[]; + +uint32_t rockchip_get_uart_base(void); +uint32_t rockchip_get_uart_baudrate(void); +uint32_t rockchip_get_uart_clock(void); + +#endif /* __ASSEMBLER__ */ + +/****************************************************************************** + * cpu up status + * The bits of macro value is not more than 12 bits for cmp instruction! + ******************************************************************************/ +#define PMU_CPU_HOTPLUG 0xf00 +#define PMU_CPU_AUTO_PWRDN 0xf0 +#define PMU_CLST_RET 0xa5 + +#endif /* PLAT_PRIVATE_H */ diff --git a/plat/rockchip/common/include/rockchip_sip_svc.h b/plat/rockchip/common/include/rockchip_sip_svc.h new file mode 100644 index 0000000..340d653 --- /dev/null +++ b/plat/rockchip/common/include/rockchip_sip_svc.h @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef ROCKCHIP_SIP_SVC_H +#define ROCKCHIP_SIP_SVC_H + +/* SMC function IDs for SiP Service queries */ +#define SIP_SVC_CALL_COUNT 0x8200ff00 +#define SIP_SVC_UID 0x8200ff01 +#define SIP_SVC_VERSION 0x8200ff03 + +/* rockchip SiP Service Calls version numbers */ +#define RK_SIP_SVC_VERSION_MAJOR 0x0 +#define RK_SIP_SVC_VERSION_MINOR 0x1 + +/* Number of ROCKCHIP SiP Calls implemented */ +#define RK_COMMON_SIP_NUM_CALLS 0x3 + +enum { + RK_SIP_E_SUCCESS = 0, + RK_SIP_E_INVALID_PARAM = -1 +}; + +#endif /* ROCKCHIP_SIP_SVC_H */ diff --git a/plat/rockchip/common/params_setup.c b/plat/rockchip/common/params_setup.c new file mode 100644 index 0000000..68054ad --- /dev/null +++ b/plat/rockchip/common/params_setup.c @@ -0,0 +1,256 @@ +/* + * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> +#include <errno.h> +#include <limits.h> +#include <string.h> + +#include <lib/bl_aux_params/bl_aux_params.h> +#include <common/bl_common.h> +#include <common/debug.h> +#include <drivers/console.h> +#include <drivers/gpio.h> +#include <libfdt.h> +#include <lib/coreboot.h> +#include <lib/mmio.h> +#include <plat/common/platform.h> + +#include <plat_params.h> +#include <plat_private.h> + +static struct bl_aux_gpio_info rst_gpio = { .index = UINT_MAX } ; +static struct bl_aux_gpio_info poweroff_gpio = { .index = UINT_MAX }; +static struct bl_aux_gpio_info suspend_gpio[10]; +uint32_t suspend_gpio_cnt; +static struct bl_aux_rk_apio_info suspend_apio; + +#if COREBOOT +static int dt_process_fdt(u_register_t param_from_bl2) +{ + return -ENODEV; +} +#else +static uint32_t rk_uart_base = PLAT_RK_UART_BASE; +static uint32_t rk_uart_baudrate = PLAT_RK_UART_BAUDRATE; +static uint32_t rk_uart_clock = PLAT_RK_UART_CLOCK; +#define FDT_BUFFER_SIZE 0x20000 +static uint64_t fdt_buffer[FDT_BUFFER_SIZE / 8]; + +void *plat_get_fdt(void) +{ + return &fdt_buffer[0]; +} + +static void plat_rockchip_dt_process_fdt_uart(void *fdt) +{ + const char *path_name = "/chosen"; + const char *prop_name = "stdout-path"; + int node_offset; + int stdout_path_len; + const char *stdout_path; + const char *separator; + const char *baud_start; + char serial_char; + int serial_no; + uint32_t uart_base; + uint32_t baud; + + node_offset = fdt_path_offset(fdt, path_name); + if (node_offset < 0) + return; + + stdout_path = fdt_getprop(fdt, node_offset, prop_name, + &stdout_path_len); + if (stdout_path == NULL) + return; + + /* + * We expect something like: + * "serial0:baudrate" + */ + if (strncmp("serial", stdout_path, 6) != 0) + return; + + serial_char = stdout_path[6]; + serial_no = serial_char - '0'; + + switch (serial_no) { + case 0: + uart_base = UART0_BASE; + break; + case 1: + uart_base = UART1_BASE; + break; + case 2: + uart_base = UART2_BASE; + break; +#ifdef UART3_BASE + case 3: + uart_base = UART3_BASE; + break; +#endif +#ifdef UART4_BASE + case 4: + uart_base = UART4_BASE; + break; +#endif +#ifdef UART5_BASE + case 5: + uart_base = UART5_BASE; + break; +#endif + default: + return; + } + + rk_uart_base = uart_base; + + separator = strchr(stdout_path, ':'); + if (!separator) + return; + + baud = 0; + baud_start = separator + 1; + while (*baud_start != '\0') { + /* + * uart binding is <baud>{<parity>{<bits>{...}}} + * So the baudrate either is the whole string, or + * we end in the parity characters. + */ + if (*baud_start == 'n' || *baud_start == 'o' || + *baud_start == 'e') + break; + + baud = baud * 10 + (*baud_start - '0'); + baud_start++; + } + + rk_uart_baudrate = baud; +} + +static int dt_process_fdt(u_register_t param_from_bl2) +{ + void *fdt = plat_get_fdt(); + int ret; + + ret = fdt_open_into((void *)param_from_bl2, fdt, FDT_BUFFER_SIZE); + if (ret < 0) + return ret; + + plat_rockchip_dt_process_fdt_uart(fdt); + + return 0; +} +#endif + +uint32_t rockchip_get_uart_base(void) +{ +#if COREBOOT + return coreboot_serial.baseaddr; +#else + return rk_uart_base; +#endif +} + +uint32_t rockchip_get_uart_baudrate(void) +{ +#if COREBOOT + return coreboot_serial.baud; +#else + return rk_uart_baudrate; +#endif +} + +uint32_t rockchip_get_uart_clock(void) +{ +#if COREBOOT + return coreboot_serial.input_hertz; +#else + return rk_uart_clock; +#endif +} + +struct bl_aux_gpio_info *plat_get_rockchip_gpio_reset(void) +{ + if (rst_gpio.index == UINT_MAX) + return NULL; + + return &rst_gpio; +} + +struct bl_aux_gpio_info *plat_get_rockchip_gpio_poweroff(void) +{ + if (poweroff_gpio.index == UINT_MAX) + return NULL; + + return &poweroff_gpio; +} + +struct bl_aux_gpio_info *plat_get_rockchip_suspend_gpio(uint32_t *count) +{ + *count = suspend_gpio_cnt; + + return &suspend_gpio[0]; +} + +struct bl_aux_rk_apio_info *plat_get_rockchip_suspend_apio(void) +{ + return &suspend_apio; +} + +static bool rk_aux_param_handler(struct bl_aux_param_header *param) +{ + /* Store platform parameters for later processing if needed. */ + switch (param->type) { + case BL_AUX_PARAM_RK_RESET_GPIO: + rst_gpio = ((struct bl_aux_param_gpio *)param)->gpio; + return true; + case BL_AUX_PARAM_RK_POWEROFF_GPIO: + poweroff_gpio = ((struct bl_aux_param_gpio *)param)->gpio; + return true; + case BL_AUX_PARAM_RK_SUSPEND_GPIO: + if (suspend_gpio_cnt >= ARRAY_SIZE(suspend_gpio)) { + ERROR("Exceeded the supported suspend GPIO number.\n"); + return true; + } + suspend_gpio[suspend_gpio_cnt++] = + ((struct bl_aux_param_gpio *)param)->gpio; + return true; + case BL_AUX_PARAM_RK_SUSPEND_APIO: + suspend_apio = ((struct bl_aux_param_rk_apio *)param)->apio; + return true; + } + + return false; +} + +void params_early_setup(u_register_t plat_param_from_bl2) +{ + int ret; + + /* + * Test if this is a FDT passed as a platform-specific parameter + * block. + */ + ret = dt_process_fdt(plat_param_from_bl2); + if (!ret) { + return; + } else if (ret != -FDT_ERR_BADMAGIC) { + /* + * If we found an FDT but couldn't parse it (e.g. corrupt, not + * enough space), return and don't attempt to parse the param + * as something else, since we know that will also fail. All + * we're doing is setting up UART, this doesn't need to be + * fatal. + */ + WARN("%s: found FDT but could not parse: error %d\n", + __func__, ret); + return; + } + + bl_aux_params_parse(plat_param_from_bl2, rk_aux_param_handler); +} diff --git a/plat/rockchip/common/plat_pm.c b/plat/rockchip/common/plat_pm.c new file mode 100644 index 0000000..6926887 --- /dev/null +++ b/plat/rockchip/common/plat_pm.c @@ -0,0 +1,413 @@ +/* + * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> +#include <errno.h> + +#include <platform_def.h> + +#include <arch_helpers.h> +#include <common/debug.h> +#include <drivers/console.h> +#include <drivers/delay_timer.h> +#include <lib/psci/psci.h> + +#include <plat_private.h> + +/* Macros to read the rk power domain state */ +#define RK_CORE_PWR_STATE(state) \ + ((state)->pwr_domain_state[MPIDR_AFFLVL0]) +#define RK_CLUSTER_PWR_STATE(state) \ + ((state)->pwr_domain_state[MPIDR_AFFLVL1]) +#define RK_SYSTEM_PWR_STATE(state) \ + ((state)->pwr_domain_state[PLAT_MAX_PWR_LVL]) + +static uintptr_t rockchip_sec_entrypoint; + +#pragma weak rockchip_soc_cores_pwr_dm_on +#pragma weak rockchip_soc_hlvl_pwr_dm_off +#pragma weak rockchip_soc_cores_pwr_dm_off +#pragma weak rockchip_soc_sys_pwr_dm_suspend +#pragma weak rockchip_soc_cores_pwr_dm_suspend +#pragma weak rockchip_soc_hlvl_pwr_dm_suspend +#pragma weak rockchip_soc_hlvl_pwr_dm_on_finish +#pragma weak rockchip_soc_cores_pwr_dm_on_finish +#pragma weak rockchip_soc_sys_pwr_dm_resume +#pragma weak rockchip_soc_hlvl_pwr_dm_resume +#pragma weak rockchip_soc_cores_pwr_dm_resume +#pragma weak rockchip_soc_soft_reset +#pragma weak rockchip_soc_system_off +#pragma weak rockchip_soc_sys_pd_pwr_dn_wfi +#pragma weak rockchip_soc_cores_pd_pwr_dn_wfi + +int rockchip_soc_cores_pwr_dm_on(unsigned long mpidr, uint64_t entrypoint) +{ + return PSCI_E_NOT_SUPPORTED; +} + +int rockchip_soc_hlvl_pwr_dm_off(uint32_t lvl, + plat_local_state_t lvl_state) +{ + return PSCI_E_NOT_SUPPORTED; +} + +int rockchip_soc_cores_pwr_dm_off(void) +{ + return PSCI_E_NOT_SUPPORTED; +} + +int rockchip_soc_sys_pwr_dm_suspend(void) +{ + return PSCI_E_NOT_SUPPORTED; +} + +int rockchip_soc_cores_pwr_dm_suspend(void) +{ + return PSCI_E_NOT_SUPPORTED; +} + +int rockchip_soc_hlvl_pwr_dm_suspend(uint32_t lvl, + plat_local_state_t lvl_state) +{ + return PSCI_E_NOT_SUPPORTED; +} + +int rockchip_soc_hlvl_pwr_dm_on_finish(uint32_t lvl, + plat_local_state_t lvl_state) +{ + return PSCI_E_NOT_SUPPORTED; +} + +int rockchip_soc_cores_pwr_dm_on_finish(void) +{ + return PSCI_E_NOT_SUPPORTED; +} + +int rockchip_soc_sys_pwr_dm_resume(void) +{ + return PSCI_E_NOT_SUPPORTED; +} + +int rockchip_soc_hlvl_pwr_dm_resume(uint32_t lvl, + plat_local_state_t lvl_state) +{ + return PSCI_E_NOT_SUPPORTED; +} + +int rockchip_soc_cores_pwr_dm_resume(void) +{ + return PSCI_E_NOT_SUPPORTED; +} + +void __dead2 rockchip_soc_soft_reset(void) +{ + while (1) + ; +} + +void __dead2 rockchip_soc_system_off(void) +{ + while (1) + ; +} + +void __dead2 rockchip_soc_cores_pd_pwr_dn_wfi( + const psci_power_state_t *target_state) +{ + psci_power_down_wfi(); +} + +void __dead2 rockchip_soc_sys_pd_pwr_dn_wfi(void) +{ + psci_power_down_wfi(); +} + +/******************************************************************************* + * Rockchip standard platform handler called to check the validity of the power + * state parameter. + ******************************************************************************/ +int rockchip_validate_power_state(unsigned int power_state, + psci_power_state_t *req_state) +{ + int pstate = psci_get_pstate_type(power_state); + int pwr_lvl = psci_get_pstate_pwrlvl(power_state); + int i; + + assert(req_state); + + if (pwr_lvl > PLAT_MAX_PWR_LVL) + return PSCI_E_INVALID_PARAMS; + + /* Sanity check the requested state */ + if (pstate == PSTATE_TYPE_STANDBY) { + /* + * It's probably to enter standby only on power level 0 + * ignore any other power level. + */ + if (pwr_lvl != MPIDR_AFFLVL0) + return PSCI_E_INVALID_PARAMS; + + req_state->pwr_domain_state[MPIDR_AFFLVL0] = + PLAT_MAX_RET_STATE; + } else { + for (i = MPIDR_AFFLVL0; i <= pwr_lvl; i++) + req_state->pwr_domain_state[i] = + PLAT_MAX_OFF_STATE; + + for (i = (pwr_lvl + 1); i <= PLAT_MAX_PWR_LVL; i++) + req_state->pwr_domain_state[i] = + PLAT_MAX_RET_STATE; + } + + /* We expect the 'state id' to be zero */ + if (psci_get_pstate_id(power_state)) + return PSCI_E_INVALID_PARAMS; + + return PSCI_E_SUCCESS; +} + +void rockchip_get_sys_suspend_power_state(psci_power_state_t *req_state) +{ + int i; + + for (i = MPIDR_AFFLVL0; i <= PLAT_MAX_PWR_LVL; i++) + req_state->pwr_domain_state[i] = PLAT_MAX_OFF_STATE; +} + +/******************************************************************************* + * RockChip handler called when a CPU is about to enter standby. + ******************************************************************************/ +void rockchip_cpu_standby(plat_local_state_t cpu_state) +{ + u_register_t scr; + + assert(cpu_state == PLAT_MAX_RET_STATE); + + scr = read_scr_el3(); + /* Enable PhysicalIRQ bit for NS world to wake the CPU */ + write_scr_el3(scr | SCR_IRQ_BIT); + isb(); + dsb(); + wfi(); + + /* + * Restore SCR to the original value, synchronisation of scr_el3 is + * done by eret while el3_exit to save some execution cycles. + */ + write_scr_el3(scr); +} + +/******************************************************************************* + * RockChip handler called when a power domain is about to be turned on. The + * mpidr determines the CPU to be turned on. + ******************************************************************************/ +int rockchip_pwr_domain_on(u_register_t mpidr) +{ + return rockchip_soc_cores_pwr_dm_on(mpidr, rockchip_sec_entrypoint); +} + +/******************************************************************************* + * RockChip handler called when a power domain is about to be turned off. The + * target_state encodes the power state that each level should transition to. + ******************************************************************************/ +void rockchip_pwr_domain_off(const psci_power_state_t *target_state) +{ + uint32_t lvl; + plat_local_state_t lvl_state; + int ret; + + assert(RK_CORE_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE); + + plat_rockchip_gic_cpuif_disable(); + + if (RK_CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) + plat_cci_disable(); + + rockchip_soc_cores_pwr_dm_off(); + + for (lvl = MPIDR_AFFLVL1; lvl <= PLAT_MAX_PWR_LVL; lvl++) { + lvl_state = target_state->pwr_domain_state[lvl]; + ret = rockchip_soc_hlvl_pwr_dm_off(lvl, lvl_state); + if (ret == PSCI_E_NOT_SUPPORTED) + break; + } +} + +/******************************************************************************* + * RockChip handler called when a power domain is about to be suspended. The + * target_state encodes the power state that each level should transition to. + ******************************************************************************/ +void rockchip_pwr_domain_suspend(const psci_power_state_t *target_state) +{ + uint32_t lvl; + plat_local_state_t lvl_state; + int ret; + + if (RK_CORE_PWR_STATE(target_state) != PLAT_MAX_OFF_STATE) + return; + + /* Prevent interrupts from spuriously waking up this cpu */ + plat_rockchip_gic_cpuif_disable(); + + if (RK_SYSTEM_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) + rockchip_soc_sys_pwr_dm_suspend(); + else + rockchip_soc_cores_pwr_dm_suspend(); + + /* Perform the common cluster specific operations */ + if (RK_CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) + plat_cci_disable(); + + if (RK_SYSTEM_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) + return; + + for (lvl = MPIDR_AFFLVL1; lvl <= PLAT_MAX_PWR_LVL; lvl++) { + lvl_state = target_state->pwr_domain_state[lvl]; + ret = rockchip_soc_hlvl_pwr_dm_suspend(lvl, lvl_state); + if (ret == PSCI_E_NOT_SUPPORTED) + break; + } +} + +/******************************************************************************* + * RockChip handler called when a power domain has just been powered on after + * being turned off earlier. The target_state encodes the low power state that + * each level has woken up from. + ******************************************************************************/ +void rockchip_pwr_domain_on_finish(const psci_power_state_t *target_state) +{ + uint32_t lvl; + plat_local_state_t lvl_state; + int ret; + + assert(RK_CORE_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE); + + for (lvl = MPIDR_AFFLVL1; lvl <= PLAT_MAX_PWR_LVL; lvl++) { + lvl_state = target_state->pwr_domain_state[lvl]; + ret = rockchip_soc_hlvl_pwr_dm_on_finish(lvl, lvl_state); + if (ret == PSCI_E_NOT_SUPPORTED) + break; + } + + rockchip_soc_cores_pwr_dm_on_finish(); + + /* Perform the common cluster specific operations */ + if (RK_CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) { + /* Enable coherency if this cluster was off */ + plat_cci_enable(); + } + + /* Enable the gic cpu interface */ + plat_rockchip_gic_pcpu_init(); + + /* Program the gic per-cpu distributor or re-distributor interface */ + plat_rockchip_gic_cpuif_enable(); +} + +/******************************************************************************* + * RockChip handler called when a power domain has just been powered on after + * having been suspended earlier. The target_state encodes the low power state + * that each level has woken up from. + * TODO: At the moment we reuse the on finisher and reinitialize the secure + * context. Need to implement a separate suspend finisher. + ******************************************************************************/ +void rockchip_pwr_domain_suspend_finish(const psci_power_state_t *target_state) +{ + uint32_t lvl; + plat_local_state_t lvl_state; + int ret; + + /* Nothing to be done on waking up from retention from CPU level */ + if (RK_CORE_PWR_STATE(target_state) != PLAT_MAX_OFF_STATE) + return; + + if (RK_SYSTEM_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) { + rockchip_soc_sys_pwr_dm_resume(); + goto comm_finish; + } + + for (lvl = MPIDR_AFFLVL1; lvl <= PLAT_MAX_PWR_LVL; lvl++) { + lvl_state = target_state->pwr_domain_state[lvl]; + ret = rockchip_soc_hlvl_pwr_dm_resume(lvl, lvl_state); + if (ret == PSCI_E_NOT_SUPPORTED) + break; + } + + rockchip_soc_cores_pwr_dm_resume(); + + /* + * Program the gic per-cpu distributor or re-distributor interface. + * For sys power domain operation, resuming of the gic needs to operate + * in rockchip_soc_sys_pwr_dm_resume(), according to the sys power mode + * implements. + */ + plat_rockchip_gic_cpuif_enable(); + +comm_finish: + /* Perform the common cluster specific operations */ + if (RK_CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) { + /* Enable coherency if this cluster was off */ + plat_cci_enable(); + } +} + +/******************************************************************************* + * RockChip handlers to reboot the system + ******************************************************************************/ +static void __dead2 rockchip_system_reset(void) +{ + rockchip_soc_soft_reset(); +} + +/******************************************************************************* + * RockChip handlers to power off the system + ******************************************************************************/ +static void __dead2 rockchip_system_poweroff(void) +{ + rockchip_soc_system_off(); +} + +static void __dead2 rockchip_pd_pwr_down_wfi( + const psci_power_state_t *target_state) +{ + if (RK_SYSTEM_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) + rockchip_soc_sys_pd_pwr_dn_wfi(); + else + rockchip_soc_cores_pd_pwr_dn_wfi(target_state); +} + +/******************************************************************************* + * Export the platform handlers via plat_rockchip_psci_pm_ops. The rockchip + * standard + * platform layer will take care of registering the handlers with PSCI. + ******************************************************************************/ +const plat_psci_ops_t plat_rockchip_psci_pm_ops = { + .cpu_standby = rockchip_cpu_standby, + .pwr_domain_on = rockchip_pwr_domain_on, + .pwr_domain_off = rockchip_pwr_domain_off, + .pwr_domain_suspend = rockchip_pwr_domain_suspend, + .pwr_domain_on_finish = rockchip_pwr_domain_on_finish, + .pwr_domain_suspend_finish = rockchip_pwr_domain_suspend_finish, + .pwr_domain_pwr_down_wfi = rockchip_pd_pwr_down_wfi, + .system_reset = rockchip_system_reset, + .system_off = rockchip_system_poweroff, + .validate_power_state = rockchip_validate_power_state, + .get_sys_suspend_power_state = rockchip_get_sys_suspend_power_state +}; + +int plat_setup_psci_ops(uintptr_t sec_entrypoint, + const plat_psci_ops_t **psci_ops) +{ + *psci_ops = &plat_rockchip_psci_pm_ops; + rockchip_sec_entrypoint = sec_entrypoint; + return 0; +} + +uintptr_t plat_get_sec_entrypoint(void) +{ + assert(rockchip_sec_entrypoint); + return rockchip_sec_entrypoint; +} diff --git a/plat/rockchip/common/plat_topology.c b/plat/rockchip/common/plat_topology.c new file mode 100644 index 0000000..4987eeb --- /dev/null +++ b/plat/rockchip/common/plat_topology.c @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <platform_def.h> + +#include <arch.h> +#include <lib/psci/psci.h> + +#include <plat_private.h> + +/******************************************************************************* + * This function returns the RockChip default topology tree information. + ******************************************************************************/ +const unsigned char *plat_get_power_domain_tree_desc(void) +{ + return rockchip_power_domain_tree_desc; +} + +int plat_core_pos_by_mpidr(u_register_t mpidr) +{ + unsigned int cluster_id, cpu_id; + + cpu_id = mpidr & MPIDR_AFFLVL_MASK; +#ifdef PLAT_RK_MPIDR_CLUSTER_MASK + cluster_id = mpidr & PLAT_RK_MPIDR_CLUSTER_MASK; +#else + cluster_id = mpidr & MPIDR_CLUSTER_MASK; +#endif + + cpu_id += (cluster_id >> PLAT_RK_CLST_TO_CPUID_SHIFT); + + if (cpu_id >= PLATFORM_CORE_COUNT) + return -1; + + return cpu_id; +} diff --git a/plat/rockchip/common/pmusram/cpus_on_fixed_addr.S b/plat/rockchip/common/pmusram/cpus_on_fixed_addr.S new file mode 100644 index 0000000..6cea2ea --- /dev/null +++ b/plat/rockchip/common/pmusram/cpus_on_fixed_addr.S @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <arch.h> +#include <asm_macros.S> +#include <platform_def.h> +#include <cpus_on_fixed_addr.h> + + .globl sys_sleep_flag_sram + .globl pmu_cpuson_entrypoint + + .macro pmusram_entry_func _name + .section .pmusram.entry, "ax" + .type \_name, %function + .cfi_startproc + \_name: + .endm + +pmusram_entry_func pmu_cpuson_entrypoint + adr x5, sys_sleep_flag_sram + ldr w2, [x5, #PSRAM_DT_PM_FLAG] + + tbz w2, #PM_WARM_BOOT_SHT, sys_resume_sp + ldr x1, =platform_cpu_warmboot + br x1 +sys_resume_sp: + adr x5, sys_sleep_flag_sram + ldr x1, [x5, #PSRAM_DT_SP] + mov sp, x1 +ddr_resume: + ldr x1, [x5, #PSRAM_DT_DDR_FUNC] + cmp x1, #0 + b.eq sys_resume + blr x1 +sys_resume: + ldr x1, =bl31_warm_entrypoint + br x1 +endfunc pmu_cpuson_entrypoint + + .section .pmusram.data, "a" + .align 3 +sys_sleep_flag_sram: + .rept PSRAM_DT_SIZE_WORDS + .word 0 + .endr diff --git a/plat/rockchip/common/pmusram/cpus_on_fixed_addr.h b/plat/rockchip/common/pmusram/cpus_on_fixed_addr.h new file mode 100644 index 0000000..bcd2a7c --- /dev/null +++ b/plat/rockchip/common/pmusram/cpus_on_fixed_addr.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef __CPU_ON_FIXED_ADDR_H__ +#define __CPU_ON_FIXED_ADDR_H__ + +/***************************************************************************** + * define data offset in struct psram_data + *****************************************************************************/ +#define PSRAM_DT_SP 0x0 +#define PSRAM_DT_DDR_FUNC 0x8 +#define PSRAM_DT_DDR_DATA 0x10 +#define PSRAM_DT_DDRFLAG 0x18 +#define PSRAM_DT_MPIDR 0x1c +#define PSRAM_DT_PM_FLAG 0x20 +#define PSRAM_DT_END 0x24 + +/* reserve 4 byte */ +#define PSRAM_DT_END_RES4 (PSRAM_DT_END + 4) + +#define PSRAM_DT_SIZE_WORDS (PSRAM_DT_END_RES4 / 4) + +#define PM_WARM_BOOT_SHT 0 +#define PM_WARM_BOOT_BIT (1 << PM_WARM_BOOT_SHT) + +#ifndef __ASSEMBLER__ + +struct psram_data_t { + uint64_t sp; + uint64_t ddr_func; + uint64_t ddr_data; + uint32_t ddr_flag; + uint32_t boot_mpidr; + uint32_t pm_flag; +}; + +CASSERT(__builtin_offsetof(struct psram_data_t, sp) == PSRAM_DT_SP, + assert_psram_dt_sp_offset_mistmatch); +CASSERT(__builtin_offsetof(struct psram_data_t, ddr_func) == PSRAM_DT_DDR_FUNC, + assert_psram_dt_ddr_func_offset_mistmatch); +CASSERT(__builtin_offsetof(struct psram_data_t, ddr_data) == PSRAM_DT_DDR_DATA, + assert_psram_dt_ddr_data_offset_mistmatch); +CASSERT(__builtin_offsetof(struct psram_data_t, ddr_flag) == PSRAM_DT_DDRFLAG, + assert_psram_dt_ddr_flag_offset_mistmatch); +CASSERT(__builtin_offsetof(struct psram_data_t, boot_mpidr) == PSRAM_DT_MPIDR, + assert_psram_dt_mpidr_offset_mistmatch); + +extern struct psram_data_t sys_sleep_flag_sram; + +#endif /* __ASSEMBLER__ */ + +#endif diff --git a/plat/rockchip/common/rockchip_gicv2.c b/plat/rockchip/common/rockchip_gicv2.c new file mode 100644 index 0000000..8db2b30 --- /dev/null +++ b/plat/rockchip/common/rockchip_gicv2.c @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <platform_def.h> + +#include <common/bl_common.h> +#include <common/interrupt_props.h> +#include <drivers/arm/gicv2.h> +#include <lib/utils.h> + +/****************************************************************************** + * The following functions are defined as weak to allow a platform to override + * the way the GICv2 driver is initialised and used. + *****************************************************************************/ +#pragma weak plat_rockchip_gic_driver_init +#pragma weak plat_rockchip_gic_init +#pragma weak plat_rockchip_gic_cpuif_enable +#pragma weak plat_rockchip_gic_cpuif_disable +#pragma weak plat_rockchip_gic_pcpu_init + +/****************************************************************************** + * List of interrupts. + *****************************************************************************/ +static const interrupt_prop_t g0_interrupt_props[] = { + PLAT_RK_GICV2_G0_IRQS +}; + +/* + * Ideally `rockchip_gic_data` structure definition should be a `const` but it + * is kept as modifiable for overwriting with different GICD and GICC base when + * running on FVP with VE memory map. + */ +gicv2_driver_data_t rockchip_gic_data = { + .gicd_base = PLAT_RK_GICD_BASE, + .gicc_base = PLAT_RK_GICC_BASE, + .interrupt_props = g0_interrupt_props, + .interrupt_props_num = ARRAY_SIZE(g0_interrupt_props), +}; + +/****************************************************************************** + * RockChip common helper to initialize the GICv2 only driver. + *****************************************************************************/ +void plat_rockchip_gic_driver_init(void) +{ + gicv2_driver_init(&rockchip_gic_data); +} + +void plat_rockchip_gic_init(void) +{ + gicv2_distif_init(); + gicv2_pcpu_distif_init(); + gicv2_cpuif_enable(); +} + +/****************************************************************************** + * RockChip common helper to enable the GICv2 CPU interface + *****************************************************************************/ +void plat_rockchip_gic_cpuif_enable(void) +{ + gicv2_cpuif_enable(); +} + +/****************************************************************************** + * RockChip common helper to disable the GICv2 CPU interface + *****************************************************************************/ +void plat_rockchip_gic_cpuif_disable(void) +{ + gicv2_cpuif_disable(); +} + +/****************************************************************************** + * RockChip common helper to initialize the per cpu distributor interface + * in GICv2 + *****************************************************************************/ +void plat_rockchip_gic_pcpu_init(void) +{ + gicv2_pcpu_distif_init(); +} diff --git a/plat/rockchip/common/rockchip_gicv3.c b/plat/rockchip/common/rockchip_gicv3.c new file mode 100644 index 0000000..edae2ef --- /dev/null +++ b/plat/rockchip/common/rockchip_gicv3.c @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <platform_def.h> + +#include <common/bl_common.h> +#include <common/interrupt_props.h> +#include <drivers/arm/gicv3.h> +#include <lib/utils.h> +#include <plat/common/platform.h> + +/****************************************************************************** + * The following functions are defined as weak to allow a platform to override + * the way the GICv3 driver is initialised and used. + *****************************************************************************/ +#pragma weak plat_rockchip_gic_driver_init +#pragma weak plat_rockchip_gic_init +#pragma weak plat_rockchip_gic_cpuif_enable +#pragma weak plat_rockchip_gic_cpuif_disable +#pragma weak plat_rockchip_gic_pcpu_init + +/* The GICv3 driver only needs to be initialized in EL3 */ +uintptr_t rdistif_base_addrs[PLATFORM_CORE_COUNT]; + +static const interrupt_prop_t g01s_interrupt_props[] = { + PLAT_RK_GICV3_G0_IRQS, + PLAT_RK_GICV3_G1S_IRQS +}; + +static unsigned int plat_rockchip_mpidr_to_core_pos(unsigned long mpidr) +{ + return (unsigned int)plat_core_pos_by_mpidr(mpidr); +} + +const gicv3_driver_data_t rockchip_gic_data = { + .gicd_base = PLAT_RK_GICD_BASE, + .gicr_base = PLAT_RK_GICR_BASE, + .interrupt_props = g01s_interrupt_props, + .interrupt_props_num = ARRAY_SIZE(g01s_interrupt_props), + .rdistif_num = PLATFORM_CORE_COUNT, + .rdistif_base_addrs = rdistif_base_addrs, + .mpidr_to_core_pos = plat_rockchip_mpidr_to_core_pos, +}; + +void plat_rockchip_gic_driver_init(void) +{ + /* + * The GICv3 driver is initialized in EL3 and does not need + * to be initialized again in SEL1. This is because the S-EL1 + * can use GIC system registers to manage interrupts and does + * not need GIC interface base addresses to be configured. + */ +#ifdef IMAGE_BL31 + gicv3_driver_init(&rockchip_gic_data); +#endif +} + +/****************************************************************************** + * RockChip common helper to initialize the GIC. Only invoked + * by BL31 + *****************************************************************************/ +void plat_rockchip_gic_init(void) +{ + gicv3_distif_init(); + gicv3_rdistif_init(plat_my_core_pos()); + gicv3_cpuif_enable(plat_my_core_pos()); +} + +/****************************************************************************** + * RockChip common helper to enable the GIC CPU interface + *****************************************************************************/ +void plat_rockchip_gic_cpuif_enable(void) +{ + gicv3_cpuif_enable(plat_my_core_pos()); +} + +/****************************************************************************** + * RockChip common helper to disable the GIC CPU interface + *****************************************************************************/ +void plat_rockchip_gic_cpuif_disable(void) +{ + gicv3_cpuif_disable(plat_my_core_pos()); +} + +/****************************************************************************** + * RockChip common helper to initialize the per-cpu redistributor interface + * in GICv3 + *****************************************************************************/ +void plat_rockchip_gic_pcpu_init(void) +{ + gicv3_rdistif_init(plat_my_core_pos()); +} diff --git a/plat/rockchip/common/rockchip_sip_svc.c b/plat/rockchip/common/rockchip_sip_svc.c new file mode 100644 index 0000000..27ef042 --- /dev/null +++ b/plat/rockchip/common/rockchip_sip_svc.c @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> + +#include <common/debug.h> +#include <common/runtime_svc.h> +#include <lib/mmio.h> +#include <tools_share/uuid.h> + +#include <plat_sip_calls.h> +#include <rockchip_sip_svc.h> + +/* Rockchip SiP Service UUID */ +DEFINE_SVC_UUID2(rk_sip_svc_uid, + 0xe2c76fe8, 0x3e31, 0xe611, 0xb7, 0x0d, + 0x8f, 0x88, 0xee, 0x74, 0x7b, 0x72); + +#pragma weak rockchip_plat_sip_handler +uintptr_t rockchip_plat_sip_handler(uint32_t smc_fid, + u_register_t x1, + u_register_t x2, + u_register_t x3, + u_register_t x4, + void *cookie, + void *handle, + u_register_t flags) +{ + ERROR("%s: unhandled SMC (0x%x)\n", __func__, smc_fid); + SMC_RET1(handle, SMC_UNK); +} + +/* + * This function is responsible for handling all SiP calls from the NS world + */ +uintptr_t sip_smc_handler(uint32_t smc_fid, + u_register_t x1, + u_register_t x2, + u_register_t x3, + u_register_t x4, + void *cookie, + void *handle, + u_register_t flags) +{ + uint32_t ns; + + /* Determine which security state this SMC originated from */ + ns = is_caller_non_secure(flags); + if (!ns) + SMC_RET1(handle, SMC_UNK); + + switch (smc_fid) { + case SIP_SVC_CALL_COUNT: + /* Return the number of Rockchip SiP Service Calls. */ + SMC_RET1(handle, + RK_COMMON_SIP_NUM_CALLS + RK_PLAT_SIP_NUM_CALLS); + + case SIP_SVC_UID: + /* Return UID to the caller */ + SMC_UUID_RET(handle, rk_sip_svc_uid); + + case SIP_SVC_VERSION: + /* Return the version of current implementation */ + SMC_RET2(handle, RK_SIP_SVC_VERSION_MAJOR, + RK_SIP_SVC_VERSION_MINOR); + + default: + return rockchip_plat_sip_handler(smc_fid, x1, x2, x3, x4, + cookie, handle, flags); + } +} + +/* Define a runtime service descriptor for fast SMC calls */ +DECLARE_RT_SVC( + rockchip_sip_svc, + OEN_SIP_START, + OEN_SIP_END, + SMC_TYPE_FAST, + NULL, + sip_smc_handler +); diff --git a/plat/rockchip/common/rockchip_stack_protector.c b/plat/rockchip/common/rockchip_stack_protector.c new file mode 100644 index 0000000..1898977 --- /dev/null +++ b/plat/rockchip/common/rockchip_stack_protector.c @@ -0,0 +1,24 @@ +/* + * Copyright (c) 2018-2020, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <stdint.h> + +#include <arch_helpers.h> +#include <plat/common/platform.h> + +#define RANDOM_CANARY_VALUE ((u_register_t) 3288484550995823360ULL) + +u_register_t plat_get_stack_protector_canary(void) +{ + /* + * Ideally, a random number should be returned instead of the + * combination of a timer's value and a compile-time constant. + * As the virt platform does not have any random number generator, + * this is better than nothing but not necessarily really secure. + */ + return RANDOM_CANARY_VALUE ^ read_cntpct_el0(); +} + diff --git a/plat/rockchip/common/sp_min_plat_setup.c b/plat/rockchip/common/sp_min_plat_setup.c new file mode 100644 index 0000000..0237b16 --- /dev/null +++ b/plat/rockchip/common/sp_min_plat_setup.c @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> + +#include <platform_def.h> + +#include <arch_helpers.h> +#include <common/bl_common.h> +#include <common/debug.h> +#include <common/desc_image_load.h> +#include <drivers/console.h> +#include <drivers/generic_delay_timer.h> +#include <drivers/ti/uart/uart_16550.h> +#include <lib/mmio.h> +#include <plat_private.h> +#include <plat/common/platform.h> + +static entry_point_info_t bl33_ep_info; + +/******************************************************************************* + * Return a pointer to the 'entry_point_info' structure of the next image for + * the security state specified. BL33 corresponds to the non-secure image type. + * A NULL pointer is returned if the image does not exist. + ******************************************************************************/ +entry_point_info_t *sp_min_plat_get_bl33_ep_info(void) +{ + entry_point_info_t *next_image_info; + + next_image_info = &bl33_ep_info; + + if (next_image_info->pc == 0U) { + return NULL; + } + + return next_image_info; +} + +#pragma weak params_early_setup +void params_early_setup(u_register_t plat_param_from_bl2) +{ +} + +unsigned int plat_is_my_cpu_primary(void); + +/******************************************************************************* + * Perform any BL32 specific platform actions. + ******************************************************************************/ +void sp_min_early_platform_setup2(u_register_t arg0, u_register_t arg1, + u_register_t arg2, u_register_t arg3) +{ + static console_t console; + + params_early_setup(arg1); + + if (rockchip_get_uart_base() != 0) + console_16550_register(rockchip_get_uart_base(), + rockchip_get_uart_clock(), + rockchip_get_uart_baudrate(), &console); + + VERBOSE("sp_min_setup\n"); + + bl31_params_parse_helper(arg0, NULL, &bl33_ep_info); +} + +/******************************************************************************* + * Perform any sp_min platform setup code + ******************************************************************************/ +void sp_min_platform_setup(void) +{ + generic_delay_timer_init(); + plat_rockchip_soc_init(); + + /* Initialize the gic cpu and distributor interfaces */ + plat_rockchip_gic_driver_init(); + plat_rockchip_gic_init(); + plat_rockchip_pmu_init(); +} + +/******************************************************************************* + * Perform the very early platform specific architectural setup here. At the + * moment this is only intializes the mmu in a quick and dirty way. + ******************************************************************************/ +void sp_min_plat_arch_setup(void) +{ + plat_cci_init(); + plat_cci_enable(); + + plat_configure_mmu_svc_mon(BL_CODE_BASE, + BL_COHERENT_RAM_END - BL_CODE_BASE, + BL_CODE_BASE, + BL_CODE_END, + BL_COHERENT_RAM_BASE, + BL_COHERENT_RAM_END); +} + +void sp_min_plat_fiq_handler(uint32_t id) +{ + VERBOSE("[sp_min] interrupt #%d\n", id); +} diff --git a/plat/rockchip/px30/drivers/pmu/plat_pmu_macros.S b/plat/rockchip/px30/drivers/pmu/plat_pmu_macros.S new file mode 100644 index 0000000..a757621 --- /dev/null +++ b/plat/rockchip/px30/drivers/pmu/plat_pmu_macros.S @@ -0,0 +1,21 @@ +/* + * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <arch.h> +#include <asm_macros.S> +#include <platform_def.h> + +.globl clst_warmboot_data + +.macro func_rockchip_clst_warmboot +.endm + +.macro rockchip_clst_warmboot_data +clst_warmboot_data: + .rept PLATFORM_CLUSTER_COUNT + .word 0 + .endr +.endm diff --git a/plat/rockchip/px30/drivers/pmu/pmu.c b/plat/rockchip/px30/drivers/pmu/pmu.c new file mode 100644 index 0000000..5f4e64f --- /dev/null +++ b/plat/rockchip/px30/drivers/pmu/pmu.c @@ -0,0 +1,1071 @@ +/* + * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> +#include <errno.h> + +#include <platform_def.h> + +#include <arch_helpers.h> +#include <bl31/bl31.h> +#include <common/debug.h> +#include <drivers/console.h> +#include <drivers/delay_timer.h> +#include <lib/bakery_lock.h> +#include <lib/mmio.h> +#include <plat/common/platform.h> + +#include <cpus_on_fixed_addr.h> +#include <plat_private.h> +#include <pmu.h> +#include <px30_def.h> +#include <secure.h> +#include <soc.h> + +DEFINE_BAKERY_LOCK(rockchip_pd_lock); +#define rockchip_pd_lock_init() bakery_lock_init(&rockchip_pd_lock) +#define rockchip_pd_lock_get() bakery_lock_get(&rockchip_pd_lock) +#define rockchip_pd_lock_rls() bakery_lock_release(&rockchip_pd_lock) + +static struct psram_data_t *psram_boot_cfg = + (struct psram_data_t *)&sys_sleep_flag_sram; + +/* + * There are two ways to powering on or off on core. + * 1) Control it power domain into on or off in PMU_PWRDN_CON reg, + * it is core_pwr_pd mode + * 2) Enable the core power manage in PMU_CORE_PM_CON reg, + * then, if the core enter into wfi, it power domain will be + * powered off automatically. it is core_pwr_wfi or core_pwr_wfi_int mode + * so we need core_pm_cfg_info to distinguish which method be used now. + */ + +static uint32_t cores_pd_cfg_info[PLATFORM_CORE_COUNT] +#if USE_COHERENT_MEM +__attribute__ ((section("tzfw_coherent_mem"))) +#endif +; + +struct px30_sleep_ddr_data { + uint32_t clk_sel0; + uint32_t cru_mode_save; + uint32_t cru_pmu_mode_save; + uint32_t ddrc_hwlpctl; + uint32_t ddrc_pwrctrl; + uint32_t ddrgrf_con0; + uint32_t ddrgrf_con1; + uint32_t ddrstdby_con0; + uint32_t gpio0b_iomux; + uint32_t gpio0c_iomux; + uint32_t pmu_pwrmd_core_l; + uint32_t pmu_pwrmd_core_h; + uint32_t pmu_pwrmd_cmm_l; + uint32_t pmu_pwrmd_cmm_h; + uint32_t pmu_wkup_cfg2_l; + uint32_t pmu_cru_clksel_con0; + uint32_t pmugrf_soc_con0; + uint32_t pmusgrf_soc_con0; + uint32_t pmic_slp_iomux; + uint32_t pgrf_pvtm_con[2]; + uint32_t cru_clk_gate[CRU_CLKGATES_CON_CNT]; + uint32_t cru_pmu_clk_gate[CRU_PMU_CLKGATE_CON_CNT]; + uint32_t cru_plls_con_save[END_PLL_ID][PLL_CON_CNT]; + uint32_t cpu_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t gpu_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t isp_128m_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t isp_rd_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t isp_wr_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t isp_m1_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t vip_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t rga_rd_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t rga_wr_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t vop_m0_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t vop_m1_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t vpu_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t vpu_r128_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t dcf_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t dmac_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t crypto_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t gmac_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t emmc_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t nand_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t sdio_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t sfc_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t sdmmc_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t usb_host_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t usb_otg_qos[CPU_AXI_QOS_NUM_REGS]; +}; + +static struct px30_sleep_ddr_data ddr_data +#if USE_COHERENT_MEM +__attribute__ ((section("tzfw_coherent_mem"))) +#endif +; + +static inline uint32_t get_cpus_pwr_domain_cfg_info(uint32_t cpu_id) +{ + assert(cpu_id < PLATFORM_CORE_COUNT); + return cores_pd_cfg_info[cpu_id]; +} + +static inline void set_cpus_pwr_domain_cfg_info(uint32_t cpu_id, uint32_t value) +{ + assert(cpu_id < PLATFORM_CORE_COUNT); + cores_pd_cfg_info[cpu_id] = value; +#if !USE_COHERENT_MEM + flush_dcache_range((uintptr_t)&cores_pd_cfg_info[cpu_id], + sizeof(uint32_t)); +#endif +} + +static inline uint32_t pmu_power_domain_st(uint32_t pd) +{ + return mmio_read_32(PMU_BASE + PMU_PWRDN_ST) & BIT(pd) ? + pmu_pd_off : + pmu_pd_on; +} + +static int pmu_power_domain_ctr(uint32_t pd, uint32_t pd_state) +{ + uint32_t loop = 0; + int ret = 0; + + rockchip_pd_lock_get(); + + mmio_write_32(PMU_BASE + PMU_PWRDN_CON, + BITS_WITH_WMASK(pd_state, 0x1, pd)); + dsb(); + + while ((pmu_power_domain_st(pd) != pd_state) && (loop < PD_CTR_LOOP)) { + udelay(1); + loop++; + } + + if (pmu_power_domain_st(pd) != pd_state) { + WARN("%s: %d, %d, error!\n", __func__, pd, pd_state); + ret = -EINVAL; + } + + rockchip_pd_lock_rls(); + + return ret; +} + +static inline uint32_t pmu_bus_idle_st(uint32_t bus) +{ + return !!((mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST) & BIT(bus)) && + (mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST) & BIT(bus + 16))); +} + +static void pmu_bus_idle_req(uint32_t bus, uint32_t state) +{ + uint32_t wait_cnt = 0; + + mmio_write_32(PMU_BASE + PMU_BUS_IDLE_REQ, + BITS_WITH_WMASK(state, 0x1, bus)); + + while (pmu_bus_idle_st(bus) != state && + wait_cnt < BUS_IDLE_LOOP) { + udelay(1); + wait_cnt++; + } + + if (pmu_bus_idle_st(bus) != state) + WARN("%s:idle_st=0x%x, bus_id=%d\n", + __func__, mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST), bus); +} + +static void qos_save(void) +{ + /* scu powerdomain will power off, so cpu qos should be saved */ + SAVE_QOS(ddr_data.cpu_qos, CPU); + + if (pmu_power_domain_st(PD_GPU) == pmu_pd_on) + SAVE_QOS(ddr_data.gpu_qos, GPU); + if (pmu_power_domain_st(PD_VI) == pmu_pd_on) { + SAVE_QOS(ddr_data.isp_128m_qos, ISP_128M); + SAVE_QOS(ddr_data.isp_rd_qos, ISP_RD); + SAVE_QOS(ddr_data.isp_wr_qos, ISP_WR); + SAVE_QOS(ddr_data.isp_m1_qos, ISP_M1); + SAVE_QOS(ddr_data.vip_qos, VIP); + } + if (pmu_power_domain_st(PD_VO) == pmu_pd_on) { + SAVE_QOS(ddr_data.rga_rd_qos, RGA_RD); + SAVE_QOS(ddr_data.rga_wr_qos, RGA_WR); + SAVE_QOS(ddr_data.vop_m0_qos, VOP_M0); + SAVE_QOS(ddr_data.vop_m1_qos, VOP_M1); + } + if (pmu_power_domain_st(PD_VPU) == pmu_pd_on) { + SAVE_QOS(ddr_data.vpu_qos, VPU); + SAVE_QOS(ddr_data.vpu_r128_qos, VPU_R128); + } + if (pmu_power_domain_st(PD_MMC_NAND) == pmu_pd_on) { + SAVE_QOS(ddr_data.emmc_qos, EMMC); + SAVE_QOS(ddr_data.nand_qos, NAND); + SAVE_QOS(ddr_data.sdio_qos, SDIO); + SAVE_QOS(ddr_data.sfc_qos, SFC); + } + if (pmu_power_domain_st(PD_GMAC) == pmu_pd_on) + SAVE_QOS(ddr_data.gmac_qos, GMAC); + if (pmu_power_domain_st(PD_CRYPTO) == pmu_pd_on) + SAVE_QOS(ddr_data.crypto_qos, CRYPTO); + if (pmu_power_domain_st(PD_SDCARD) == pmu_pd_on) + SAVE_QOS(ddr_data.sdmmc_qos, SDMMC); + if (pmu_power_domain_st(PD_USB) == pmu_pd_on) { + SAVE_QOS(ddr_data.usb_host_qos, USB_HOST); + SAVE_QOS(ddr_data.usb_otg_qos, USB_OTG); + } +} + +static void qos_restore(void) +{ + RESTORE_QOS(ddr_data.cpu_qos, CPU); + + if (pmu_power_domain_st(PD_GPU) == pmu_pd_on) + RESTORE_QOS(ddr_data.gpu_qos, GPU); + if (pmu_power_domain_st(PD_VI) == pmu_pd_on) { + RESTORE_QOS(ddr_data.isp_128m_qos, ISP_128M); + RESTORE_QOS(ddr_data.isp_rd_qos, ISP_RD); + RESTORE_QOS(ddr_data.isp_wr_qos, ISP_WR); + RESTORE_QOS(ddr_data.isp_m1_qos, ISP_M1); + RESTORE_QOS(ddr_data.vip_qos, VIP); + } + if (pmu_power_domain_st(PD_VO) == pmu_pd_on) { + RESTORE_QOS(ddr_data.rga_rd_qos, RGA_RD); + RESTORE_QOS(ddr_data.rga_wr_qos, RGA_WR); + RESTORE_QOS(ddr_data.vop_m0_qos, VOP_M0); + RESTORE_QOS(ddr_data.vop_m1_qos, VOP_M1); + } + if (pmu_power_domain_st(PD_VPU) == pmu_pd_on) { + RESTORE_QOS(ddr_data.vpu_qos, VPU); + RESTORE_QOS(ddr_data.vpu_r128_qos, VPU_R128); + } + if (pmu_power_domain_st(PD_MMC_NAND) == pmu_pd_on) { + RESTORE_QOS(ddr_data.emmc_qos, EMMC); + RESTORE_QOS(ddr_data.nand_qos, NAND); + RESTORE_QOS(ddr_data.sdio_qos, SDIO); + RESTORE_QOS(ddr_data.sfc_qos, SFC); + } + if (pmu_power_domain_st(PD_GMAC) == pmu_pd_on) + RESTORE_QOS(ddr_data.gmac_qos, GMAC); + if (pmu_power_domain_st(PD_CRYPTO) == pmu_pd_on) + RESTORE_QOS(ddr_data.crypto_qos, CRYPTO); + if (pmu_power_domain_st(PD_SDCARD) == pmu_pd_on) + RESTORE_QOS(ddr_data.sdmmc_qos, SDMMC); + if (pmu_power_domain_st(PD_USB) == pmu_pd_on) { + RESTORE_QOS(ddr_data.usb_host_qos, USB_HOST); + RESTORE_QOS(ddr_data.usb_otg_qos, USB_OTG); + } +} + +static int pmu_set_power_domain(uint32_t pd_id, uint32_t pd_state) +{ + uint32_t state; + + if (pmu_power_domain_st(pd_id) == pd_state) + goto out; + + if (pd_state == pmu_pd_on) + pmu_power_domain_ctr(pd_id, pd_state); + + state = (pd_state == pmu_pd_off) ? bus_idle : bus_active; + + switch (pd_id) { + case PD_GPU: + pmu_bus_idle_req(BUS_ID_GPU, state); + break; + case PD_VI: + pmu_bus_idle_req(BUS_ID_VI, state); + break; + case PD_VO: + pmu_bus_idle_req(BUS_ID_VO, state); + break; + case PD_VPU: + pmu_bus_idle_req(BUS_ID_VPU, state); + break; + case PD_MMC_NAND: + pmu_bus_idle_req(BUS_ID_MMC, state); + break; + case PD_GMAC: + pmu_bus_idle_req(BUS_ID_GMAC, state); + break; + case PD_CRYPTO: + pmu_bus_idle_req(BUS_ID_CRYPTO, state); + break; + case PD_SDCARD: + pmu_bus_idle_req(BUS_ID_SDCARD, state); + break; + case PD_USB: + pmu_bus_idle_req(BUS_ID_USB, state); + break; + default: + break; + } + + if (pd_state == pmu_pd_off) + pmu_power_domain_ctr(pd_id, pd_state); + +out: + return 0; +} + +static uint32_t pmu_powerdomain_state; + +static void pmu_power_domains_suspend(void) +{ + uint32_t clkgt_save[CRU_CLKGATES_CON_CNT + CRU_PMU_CLKGATE_CON_CNT]; + + clk_gate_con_save(clkgt_save); + clk_gate_con_disable(); + qos_save(); + + pmu_powerdomain_state = mmio_read_32(PMU_BASE + PMU_PWRDN_ST); + pmu_set_power_domain(PD_GPU, pmu_pd_off); + pmu_set_power_domain(PD_VI, pmu_pd_off); + pmu_set_power_domain(PD_VO, pmu_pd_off); + pmu_set_power_domain(PD_VPU, pmu_pd_off); + pmu_set_power_domain(PD_MMC_NAND, pmu_pd_off); + pmu_set_power_domain(PD_GMAC, pmu_pd_off); + pmu_set_power_domain(PD_CRYPTO, pmu_pd_off); + pmu_set_power_domain(PD_SDCARD, pmu_pd_off); + pmu_set_power_domain(PD_USB, pmu_pd_off); + + clk_gate_con_restore(clkgt_save); +} + +static void pmu_power_domains_resume(void) +{ + uint32_t clkgt_save[CRU_CLKGATES_CON_CNT + CRU_PMU_CLKGATE_CON_CNT]; + + clk_gate_con_save(clkgt_save); + clk_gate_con_disable(); + + if (!(pmu_powerdomain_state & BIT(PD_USB))) + pmu_set_power_domain(PD_USB, pmu_pd_on); + if (!(pmu_powerdomain_state & BIT(PD_SDCARD))) + pmu_set_power_domain(PD_SDCARD, pmu_pd_on); + if (!(pmu_powerdomain_state & BIT(PD_CRYPTO))) + pmu_set_power_domain(PD_CRYPTO, pmu_pd_on); + if (!(pmu_powerdomain_state & BIT(PD_GMAC))) + pmu_set_power_domain(PD_GMAC, pmu_pd_on); + if (!(pmu_powerdomain_state & BIT(PD_MMC_NAND))) + pmu_set_power_domain(PD_MMC_NAND, pmu_pd_on); + if (!(pmu_powerdomain_state & BIT(PD_VPU))) + pmu_set_power_domain(PD_VPU, pmu_pd_on); + if (!(pmu_powerdomain_state & BIT(PD_VO))) + pmu_set_power_domain(PD_VO, pmu_pd_on); + if (!(pmu_powerdomain_state & BIT(PD_VI))) + pmu_set_power_domain(PD_VI, pmu_pd_on); + if (!(pmu_powerdomain_state & BIT(PD_GPU))) + pmu_set_power_domain(PD_GPU, pmu_pd_on); + + qos_restore(); + clk_gate_con_restore(clkgt_save); +} + +static int check_cpu_wfie(uint32_t cpu) +{ + uint32_t loop = 0, wfie_msk = CKECK_WFEI_MSK << cpu; + + while (!(mmio_read_32(GRF_BASE + GRF_CPU_STATUS1) & wfie_msk) && + (loop < WFEI_CHECK_LOOP)) { + udelay(1); + loop++; + } + + if ((mmio_read_32(GRF_BASE + GRF_CPU_STATUS1) & wfie_msk) == 0) { + WARN("%s: %d, %d, error!\n", __func__, cpu, wfie_msk); + return -EINVAL; + } + + return 0; +} + +static int cpus_power_domain_on(uint32_t cpu_id) +{ + uint32_t cpu_pd, apm_value, cfg_info, loop = 0; + + cpu_pd = PD_CPU0 + cpu_id; + cfg_info = get_cpus_pwr_domain_cfg_info(cpu_id); + + if (cfg_info == core_pwr_pd) { + /* disable apm cfg */ + mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id), + WITH_16BITS_WMSK(CORES_PM_DISABLE)); + if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) { + mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id), + WITH_16BITS_WMSK(CORES_PM_DISABLE)); + pmu_power_domain_ctr(cpu_pd, pmu_pd_off); + } + pmu_power_domain_ctr(cpu_pd, pmu_pd_on); + } else { + /* wait cpu down */ + while (pmu_power_domain_st(cpu_pd) == pmu_pd_on && loop < 100) { + udelay(2); + loop++; + } + + /* return error if can't wait cpu down */ + if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) { + WARN("%s:can't wait cpu down\n", __func__); + return -EINVAL; + } + + /* power up cpu in power down state */ + apm_value = BIT(core_pm_sft_wakeup_en); + mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id), + WITH_16BITS_WMSK(apm_value)); + } + + return 0; +} + +static int cpus_power_domain_off(uint32_t cpu_id, uint32_t pd_cfg) +{ + uint32_t cpu_pd, apm_value; + + cpu_pd = PD_CPU0 + cpu_id; + if (pmu_power_domain_st(cpu_pd) == pmu_pd_off) + return 0; + + if (pd_cfg == core_pwr_pd) { + if (check_cpu_wfie(cpu_id)) + return -EINVAL; + /* disable apm cfg */ + mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id), + WITH_16BITS_WMSK(CORES_PM_DISABLE)); + set_cpus_pwr_domain_cfg_info(cpu_id, pd_cfg); + pmu_power_domain_ctr(cpu_pd, pmu_pd_off); + } else { + set_cpus_pwr_domain_cfg_info(cpu_id, pd_cfg); + apm_value = BIT(core_pm_en) | BIT(core_pm_dis_int); + if (pd_cfg == core_pwr_wfi_int) + apm_value |= BIT(core_pm_int_wakeup_en); + mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id), + WITH_16BITS_WMSK(apm_value)); + } + + return 0; +} + +static void nonboot_cpus_off(void) +{ + uint32_t boot_cpu, cpu; + + boot_cpu = plat_my_core_pos(); + + for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) { + if (cpu == boot_cpu) + continue; + cpus_power_domain_off(cpu, core_pwr_pd); + } +} + +int rockchip_soc_cores_pwr_dm_on(unsigned long mpidr, + uint64_t entrypoint) +{ + uint32_t cpu_id = plat_core_pos_by_mpidr(mpidr); + + assert(cpu_id < PLATFORM_CORE_COUNT); + assert(cpuson_flags[cpu_id] == 0); + cpuson_flags[cpu_id] = PMU_CPU_HOTPLUG; + cpuson_entry_point[cpu_id] = entrypoint; + dsb(); + + cpus_power_domain_on(cpu_id); + + return PSCI_E_SUCCESS; +} + +int rockchip_soc_cores_pwr_dm_on_finish(void) +{ + uint32_t cpu_id = plat_my_core_pos(); + + mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id), + WITH_16BITS_WMSK(CORES_PM_DISABLE)); + return PSCI_E_SUCCESS; +} + +int rockchip_soc_cores_pwr_dm_off(void) +{ + uint32_t cpu_id = plat_my_core_pos(); + + cpus_power_domain_off(cpu_id, core_pwr_wfi); + + return PSCI_E_SUCCESS; +} + +int rockchip_soc_cores_pwr_dm_suspend(void) +{ + uint32_t cpu_id = plat_my_core_pos(); + + assert(cpu_id < PLATFORM_CORE_COUNT); + assert(cpuson_flags[cpu_id] == 0); + cpuson_flags[cpu_id] = PMU_CPU_AUTO_PWRDN; + cpuson_entry_point[cpu_id] = plat_get_sec_entrypoint(); + dsb(); + + cpus_power_domain_off(cpu_id, core_pwr_wfi_int); + + return PSCI_E_SUCCESS; +} + +int rockchip_soc_cores_pwr_dm_resume(void) +{ + uint32_t cpu_id = plat_my_core_pos(); + + /* Disable core_pm */ + mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id), + WITH_16BITS_WMSK(CORES_PM_DISABLE)); + + return PSCI_E_SUCCESS; +} + +#define CLK_MSK_GATING(msk, con) \ + mmio_write_32(CRU_BASE + (con), ((msk) << 16) | 0xffff) +#define CLK_MSK_UNGATING(msk, con) \ + mmio_write_32(CRU_BASE + (con), ((~(msk)) << 16) | 0xffff) + +static uint32_t clk_ungt_msk[CRU_CLKGATES_CON_CNT] = { + 0xe0ff, 0xffff, 0x0000, 0x0000, + 0x0000, 0x0380, 0x0000, 0x0000, + 0x07c0, 0x0000, 0x0000, 0x000f, + 0x0061, 0x1f02, 0x0440, 0x1801, + 0x004b, 0x0000 +}; + +static uint32_t clk_pmu_ungt_msk[CRU_PMU_CLKGATE_CON_CNT] = { + 0xf1ff, 0x0310 +}; + +void clk_gate_suspend(void) +{ + int i; + + for (i = 0; i < CRU_CLKGATES_CON_CNT; i++) { + ddr_data.cru_clk_gate[i] = + mmio_read_32(CRU_BASE + CRU_CLKGATES_CON(i)); + mmio_write_32(CRU_BASE + CRU_CLKGATES_CON(i), + WITH_16BITS_WMSK(~clk_ungt_msk[i])); + } + + for (i = 0; i < CRU_PMU_CLKGATE_CON_CNT; i++) { + ddr_data.cru_pmu_clk_gate[i] = + mmio_read_32(PMUCRU_BASE + CRU_PMU_CLKGATES_CON(i)); + mmio_write_32(PMUCRU_BASE + CRU_PMU_CLKGATES_CON(i), + WITH_16BITS_WMSK(~clk_pmu_ungt_msk[i])); + } +} + +void clk_gate_resume(void) +{ + int i; + + for (i = 0; i < CRU_PMU_CLKGATE_CON_CNT; i++) + mmio_write_32(PMUCRU_BASE + CRU_PMU_CLKGATES_CON(i), + WITH_16BITS_WMSK(ddr_data.cru_pmu_clk_gate[i])); + + for (i = 0; i < CRU_CLKGATES_CON_CNT; i++) + mmio_write_32(CRU_BASE + CRU_CLKGATES_CON(i), + WITH_16BITS_WMSK(ddr_data.cru_clk_gate[i])); +} + +static void pvtm_32k_config(void) +{ + uint32_t pvtm_freq_khz, pvtm_div; + + ddr_data.pmu_cru_clksel_con0 = + mmio_read_32(PMUCRU_BASE + CRU_PMU_CLKSELS_CON(0)); + + ddr_data.pgrf_pvtm_con[0] = + mmio_read_32(PMUGRF_BASE + PMUGRF_PVTM_CON0); + ddr_data.pgrf_pvtm_con[1] = + mmio_read_32(PMUGRF_BASE + PMUGRF_PVTM_CON1); + + mmio_write_32(PMUGRF_BASE + PMUGRF_PVTM_CON0, + BITS_WITH_WMASK(0, 0x3, pgrf_pvtm_st)); + dsb(); + mmio_write_32(PMUGRF_BASE + PMUGRF_PVTM_CON0, + BITS_WITH_WMASK(1, 0x1, pgrf_pvtm_en)); + dsb(); + mmio_write_32(PMUGRF_BASE + PMUGRF_PVTM_CON1, PVTM_CALC_CNT); + dsb(); + + mmio_write_32(PMUGRF_BASE + PMUGRF_PVTM_CON0, + BITS_WITH_WMASK(1, 0x1, pgrf_pvtm_st)); + + /* pmugrf_pvtm_st0 will be clear after PVTM start, + * which will cost about 6 cycles of pvtm at least. + * So we wait 30 cycles of pvtm for security. + */ + while (mmio_read_32(PMUGRF_BASE + PMUGRF_PVTM_ST1) < 30) + ; + + dsb(); + while (!(mmio_read_32(PMUGRF_BASE + PMUGRF_PVTM_ST0) & 0x1)) + ; + + pvtm_freq_khz = + (mmio_read_32(PMUGRF_BASE + PMUGRF_PVTM_ST1) * 24000 + + PVTM_CALC_CNT / 2) / PVTM_CALC_CNT; + pvtm_div = (pvtm_freq_khz + 16) / 32; + + /* pvtm_div = div_factor << 2 + 1, + * so div_factor = (pvtm_div - 1) >> 2. + * But the operation ">> 2" will clear the low bit of pvtm_div, + * so we don't have to do "- 1" for compasation + */ + pvtm_div = pvtm_div >> 2; + if (pvtm_div > 0x3f) + pvtm_div = 0x3f; + + mmio_write_32(PMUGRF_BASE + PMUGRF_PVTM_CON0, + BITS_WITH_WMASK(pvtm_div, 0x3f, pgrf_pvtm_div)); + + /* select pvtm as 32k source */ + mmio_write_32(PMUCRU_BASE + CRU_PMU_CLKSELS_CON(0), + BITS_WITH_WMASK(1, 0x3U, 14)); +} + +static void pvtm_32k_config_restore(void) +{ + mmio_write_32(PMUCRU_BASE + CRU_PMU_CLKSELS_CON(0), + ddr_data.pmu_cru_clksel_con0 | BITS_WMSK(0x3U, 14)); + + mmio_write_32(PMUGRF_BASE + PMUGRF_PVTM_CON0, + WITH_16BITS_WMSK(ddr_data.pgrf_pvtm_con[0])); + mmio_write_32(PMUGRF_BASE + PMUGRF_PVTM_CON1, + ddr_data.pgrf_pvtm_con[1]); +} + +static void ddr_sleep_config(void) +{ + /* disable ddr pd, sr */ + ddr_data.ddrc_pwrctrl = mmio_read_32(DDR_UPCTL_BASE + 0x30); + mmio_write_32(DDR_UPCTL_BASE + 0x30, BITS_WITH_WMASK(0x0, 0x3, 0)); + + /* disable ddr auto gt */ + ddr_data.ddrgrf_con1 = mmio_read_32(DDRGRF_BASE + 0x4); + mmio_write_32(DDRGRF_BASE + 0x4, BITS_WITH_WMASK(0x0, 0x1f, 0)); + + /* disable ddr standby */ + ddr_data.ddrstdby_con0 = mmio_read_32(DDR_STDBY_BASE + 0x0); + mmio_write_32(DDR_STDBY_BASE + 0x0, BITS_WITH_WMASK(0x0, 0x1, 0)); + while ((mmio_read_32(DDR_UPCTL_BASE + 0x4) & 0x7) != 1) + ; + + /* ddr pmu ctrl */ + ddr_data.ddrgrf_con0 = mmio_read_32(DDRGRF_BASE + 0x0); + mmio_write_32(DDRGRF_BASE + 0x0, BITS_WITH_WMASK(0x0, 0x1, 5)); + dsb(); + mmio_write_32(DDRGRF_BASE + 0x0, BITS_WITH_WMASK(0x1, 0x1, 4)); + + /* ddr ret sel */ + ddr_data.pmugrf_soc_con0 = + mmio_read_32(PMUGRF_BASE + PMUGRF_SOC_CON(0)); + mmio_write_32(PMUGRF_BASE + PMUGRF_SOC_CON(0), + BITS_WITH_WMASK(0x0, 0x1, 12)); +} + +static void ddr_sleep_config_restore(void) +{ + /* restore ddr ret sel */ + mmio_write_32(PMUGRF_BASE + PMUGRF_SOC_CON(0), + ddr_data.pmugrf_soc_con0 | BITS_WMSK(0x1, 12)); + + /* restore ddr pmu ctrl */ + mmio_write_32(DDRGRF_BASE + 0x0, + ddr_data.ddrgrf_con0 | BITS_WMSK(0x1, 4)); + dsb(); + mmio_write_32(DDRGRF_BASE + 0x0, + ddr_data.ddrgrf_con0 | BITS_WMSK(0x1, 5)); + + /* restore ddr standby */ + mmio_write_32(DDR_STDBY_BASE + 0x0, + ddr_data.ddrstdby_con0 | BITS_WMSK(0x1, 0)); + + /* restore ddr auto gt */ + mmio_write_32(DDRGRF_BASE + 0x4, + ddr_data.ddrgrf_con1 | BITS_WMSK(0x1f, 0)); + + /* restore ddr pd, sr */ + mmio_write_32(DDR_UPCTL_BASE + 0x30, + ddr_data.ddrc_pwrctrl | BITS_WMSK(0x3, 0)); +} + +static void pmu_sleep_config(void) +{ + uint32_t pwrmd_core_lo, pwrmd_core_hi, pwrmd_com_lo, pwrmd_com_hi; + uint32_t pmu_wkup_cfg2_lo; + uint32_t clk_freq_khz; + + /* save pmic_sleep iomux gpio0_a4 */ + ddr_data.pmic_slp_iomux = mmio_read_32(PMUGRF_BASE + GPIO0A_IOMUX); + + ddr_data.pmu_pwrmd_core_l = + mmio_read_32(PMU_BASE + PMU_PWRMODE_CORE_LO); + ddr_data.pmu_pwrmd_core_h = + mmio_read_32(PMU_BASE + PMU_PWRMODE_CORE_HI); + ddr_data.pmu_pwrmd_cmm_l = + mmio_read_32(PMU_BASE + PMU_PWRMODE_COMMON_CON_LO); + ddr_data.pmu_pwrmd_cmm_h = + mmio_read_32(PMU_BASE + PMU_PWRMODE_COMMON_CON_HI); + ddr_data.pmu_wkup_cfg2_l = mmio_read_32(PMU_BASE + PMU_WKUP_CFG2_LO); + + pwrmd_core_lo = BIT(pmu_global_int_dis) | + BIT(pmu_core_src_gt) | + BIT(pmu_cpu0_pd) | + BIT(pmu_clr_core) | + BIT(pmu_scu_pd) | + BIT(pmu_l2_idle) | + BIT(pmu_l2_flush) | + BIT(pmu_clr_bus2main) | + BIT(pmu_clr_peri2msch); + + pwrmd_core_hi = BIT(pmu_dpll_pd_en) | + BIT(pmu_apll_pd_en) | + BIT(pmu_cpll_pd_en) | + BIT(pmu_gpll_pd_en) | + BIT(pmu_npll_pd_en); + + pwrmd_com_lo = BIT(pmu_mode_en) | + BIT(pmu_pll_pd) | + BIT(pmu_pmu_use_if) | + BIT(pmu_alive_use_if) | + BIT(pmu_osc_dis) | + BIT(pmu_sref_enter) | + BIT(pmu_ddrc_gt) | + BIT(pmu_clr_pmu) | + BIT(pmu_clr_peri_pmu); + + pwrmd_com_hi = BIT(pmu_clr_bus) | + BIT(pmu_clr_msch) | + BIT(pmu_wakeup_begin_cfg); + + pmu_wkup_cfg2_lo = BIT(pmu_cluster_wkup_en) | + BIT(pmu_gpio_wkup_en) | + BIT(pmu_timer_wkup_en); + + /* set pmic_sleep iomux gpio0_a4 */ + mmio_write_32(PMUGRF_BASE + GPIO0A_IOMUX, + BITS_WITH_WMASK(1, 0x3, 8)); + + clk_freq_khz = 32; + + mmio_write_32(PMU_BASE + PMU_OSC_CNT_LO, + WITH_16BITS_WMSK(clk_freq_khz * 32 & 0xffff)); + mmio_write_32(PMU_BASE + PMU_OSC_CNT_HI, + WITH_16BITS_WMSK(clk_freq_khz * 32 >> 16)); + + mmio_write_32(PMU_BASE + PMU_STABLE_CNT_LO, + WITH_16BITS_WMSK(clk_freq_khz * 32 & 0xffff)); + mmio_write_32(PMU_BASE + PMU_STABLE_CNT_HI, + WITH_16BITS_WMSK(clk_freq_khz * 32 >> 16)); + + mmio_write_32(PMU_BASE + PMU_WAKEUP_RST_CLR_LO, + WITH_16BITS_WMSK(clk_freq_khz * 2 & 0xffff)); + mmio_write_32(PMU_BASE + PMU_WAKEUP_RST_CLR_HI, + WITH_16BITS_WMSK(clk_freq_khz * 2 >> 16)); + + /* Pmu's clk has switched to 24M back When pmu FSM counts + * the follow counters, so we should use 24M to calculate + * these counters. + */ + mmio_write_32(PMU_BASE + PMU_SCU_PWRDN_CNT_LO, + WITH_16BITS_WMSK(24000 * 2 & 0xffff)); + mmio_write_32(PMU_BASE + PMU_SCU_PWRDN_CNT_HI, + WITH_16BITS_WMSK(24000 * 2 >> 16)); + + mmio_write_32(PMU_BASE + PMU_SCU_PWRUP_CNT_LO, + WITH_16BITS_WMSK(24000 * 2 & 0xffff)); + mmio_write_32(PMU_BASE + PMU_SCU_PWRUP_CNT_HI, + WITH_16BITS_WMSK(24000 * 2 >> 16)); + + mmio_write_32(PMU_BASE + PMU_PLLLOCK_CNT_LO, + WITH_16BITS_WMSK(24000 * 5 & 0xffff)); + mmio_write_32(PMU_BASE + PMU_PLLLOCK_CNT_HI, + WITH_16BITS_WMSK(24000 * 5 >> 16)); + + mmio_write_32(PMU_BASE + PMU_PLLRST_CNT_LO, + WITH_16BITS_WMSK(24000 * 2 & 0xffff)); + mmio_write_32(PMU_BASE + PMU_PLLRST_CNT_HI, + WITH_16BITS_WMSK(24000 * 2 >> 16)); + + /* Config pmu power mode and pmu wakeup source */ + mmio_write_32(PMU_BASE + PMU_PWRMODE_CORE_LO, + WITH_16BITS_WMSK(pwrmd_core_lo)); + mmio_write_32(PMU_BASE + PMU_PWRMODE_CORE_HI, + WITH_16BITS_WMSK(pwrmd_core_hi)); + + mmio_write_32(PMU_BASE + PMU_PWRMODE_COMMON_CON_LO, + WITH_16BITS_WMSK(pwrmd_com_lo)); + mmio_write_32(PMU_BASE + PMU_PWRMODE_COMMON_CON_HI, + WITH_16BITS_WMSK(pwrmd_com_hi)); + + mmio_write_32(PMU_BASE + PMU_WKUP_CFG2_LO, + WITH_16BITS_WMSK(pmu_wkup_cfg2_lo)); +} + +static void pmu_sleep_restore(void) +{ + mmio_write_32(PMU_BASE + PMU_PWRMODE_CORE_LO, + WITH_16BITS_WMSK(ddr_data.pmu_pwrmd_core_l)); + mmio_write_32(PMU_BASE + PMU_PWRMODE_CORE_HI, + WITH_16BITS_WMSK(ddr_data.pmu_pwrmd_core_h)); + mmio_write_32(PMU_BASE + PMU_PWRMODE_COMMON_CON_LO, + WITH_16BITS_WMSK(ddr_data.pmu_pwrmd_cmm_l)); + mmio_write_32(PMU_BASE + PMU_PWRMODE_COMMON_CON_HI, + WITH_16BITS_WMSK(ddr_data.pmu_pwrmd_cmm_h)); + mmio_write_32(PMU_BASE + PMU_WKUP_CFG2_LO, + WITH_16BITS_WMSK(ddr_data.pmu_wkup_cfg2_l)); + + /* restore pmic_sleep iomux */ + mmio_write_32(PMUGRF_BASE + GPIO0A_IOMUX, + WITH_16BITS_WMSK(ddr_data.pmic_slp_iomux)); +} + +static void soc_sleep_config(void) +{ + ddr_data.gpio0c_iomux = mmio_read_32(PMUGRF_BASE + GPIO0C_IOMUX); + + pmu_sleep_config(); + + ddr_sleep_config(); + + pvtm_32k_config(); +} + +static void soc_sleep_restore(void) +{ + secure_timer_init(); + + pvtm_32k_config_restore(); + + ddr_sleep_config_restore(); + + pmu_sleep_restore(); + + mmio_write_32(PMUGRF_BASE + GPIO0C_IOMUX, + WITH_16BITS_WMSK(ddr_data.gpio0c_iomux)); +} + +static inline void pm_pll_wait_lock(uint32_t pll_base, uint32_t pll_id) +{ + uint32_t delay = PLL_LOCKED_TIMEOUT; + + while (delay > 0) { + if (mmio_read_32(pll_base + PLL_CON(1)) & + PLL_LOCK_MSK) + break; + delay--; + } + + if (delay == 0) + ERROR("Can't wait pll:%d lock\n", pll_id); +} + +static inline void pll_pwr_ctr(uint32_t pll_base, uint32_t pll_id, uint32_t pd) +{ + mmio_write_32(pll_base + PLL_CON(1), + BITS_WITH_WMASK(1, 1U, 15)); + if (pd) + mmio_write_32(pll_base + PLL_CON(1), + BITS_WITH_WMASK(1, 1, 14)); + else + mmio_write_32(pll_base + PLL_CON(1), + BITS_WITH_WMASK(0, 1, 14)); +} + +static inline void pll_set_mode(uint32_t pll_id, uint32_t mode) +{ + uint32_t val = BITS_WITH_WMASK(mode, 0x3, PLL_MODE_SHIFT(pll_id)); + + if (pll_id != GPLL_ID) + mmio_write_32(CRU_BASE + CRU_MODE, val); + else + mmio_write_32(PMUCRU_BASE + CRU_PMU_MODE, + BITS_WITH_WMASK(mode, 0x3, 0)); +} + +static inline void pll_suspend(uint32_t pll_id) +{ + int i; + uint32_t pll_base; + + if (pll_id != GPLL_ID) + pll_base = CRU_BASE + CRU_PLL_CONS(pll_id, 0); + else + pll_base = PMUCRU_BASE + CRU_PLL_CONS(0, 0); + + /* save pll con */ + for (i = 0; i < PLL_CON_CNT; i++) + ddr_data.cru_plls_con_save[pll_id][i] = + mmio_read_32(pll_base + PLL_CON(i)); + + /* slow mode */ + pll_set_mode(pll_id, SLOW_MODE); +} + +static inline void pll_resume(uint32_t pll_id) +{ + uint32_t mode, pll_base; + + if (pll_id != GPLL_ID) { + pll_base = CRU_BASE + CRU_PLL_CONS(pll_id, 0); + mode = (ddr_data.cru_mode_save >> PLL_MODE_SHIFT(pll_id)) & 0x3; + } else { + pll_base = PMUCRU_BASE + CRU_PLL_CONS(0, 0); + mode = ddr_data.cru_pmu_mode_save & 0x3; + } + + /* if pll locked before suspend, we should wait atfer resume */ + if (ddr_data.cru_plls_con_save[pll_id][1] & PLL_LOCK_MSK) + pm_pll_wait_lock(pll_base, pll_id); + + pll_set_mode(pll_id, mode); +} + +static void pm_plls_suspend(void) +{ + ddr_data.cru_mode_save = mmio_read_32(CRU_BASE + CRU_MODE); + ddr_data.cru_pmu_mode_save = mmio_read_32(PMUCRU_BASE + CRU_PMU_MODE); + ddr_data.clk_sel0 = mmio_read_32(CRU_BASE + CRU_CLKSELS_CON(0)); + + pll_suspend(GPLL_ID); + pll_suspend(NPLL_ID); + pll_suspend(CPLL_ID); + pll_suspend(APLL_ID); + + /* core */ + mmio_write_32(CRU_BASE + CRU_CLKSELS_CON(0), + BITS_WITH_WMASK(0, 0xf, 0)); + + /* pclk_dbg */ + mmio_write_32(CRU_BASE + CRU_CLKSELS_CON(0), + BITS_WITH_WMASK(0, 0xf, 8)); +} + +static void pm_plls_resume(void) +{ + /* pclk_dbg */ + mmio_write_32(CRU_BASE + CRU_CLKSELS_CON(0), + ddr_data.clk_sel0 | BITS_WMSK(0xf, 8)); + + /* core */ + mmio_write_32(CRU_BASE + CRU_CLKSELS_CON(0), + ddr_data.clk_sel0 | BITS_WMSK(0xf, 0)); + + pll_resume(APLL_ID); + pll_resume(CPLL_ID); + pll_resume(NPLL_ID); + pll_resume(GPLL_ID); +} + +int rockchip_soc_sys_pwr_dm_suspend(void) +{ + pmu_power_domains_suspend(); + + clk_gate_suspend(); + + soc_sleep_config(); + + pm_plls_suspend(); + + psram_boot_cfg->pm_flag &= ~PM_WARM_BOOT_BIT; + + return 0; +} + +int rockchip_soc_sys_pwr_dm_resume(void) +{ + psram_boot_cfg->pm_flag |= PM_WARM_BOOT_BIT; + + pm_plls_resume(); + + soc_sleep_restore(); + + clk_gate_resume(); + + pmu_power_domains_resume(); + + plat_rockchip_gic_cpuif_enable(); + + return 0; +} + +void __dead2 rockchip_soc_soft_reset(void) +{ + pll_set_mode(GPLL_ID, SLOW_MODE); + pll_set_mode(CPLL_ID, SLOW_MODE); + pll_set_mode(NPLL_ID, SLOW_MODE); + pll_set_mode(APLL_ID, SLOW_MODE); + dsb(); + + mmio_write_32(CRU_BASE + CRU_GLB_SRST_FST, CRU_GLB_SRST_FST_VALUE); + dsb(); + + /* + * Maybe the HW needs some times to reset the system, + * so we do not hope the core to execute valid codes. + */ + psci_power_down_wfi(); +} + +void __dead2 rockchip_soc_system_off(void) +{ + uint32_t val; + + /* set pmic_sleep pin(gpio0_a4) to gpio mode */ + mmio_write_32(PMUGRF_BASE + GPIO0A_IOMUX, BITS_WITH_WMASK(0, 0x3, 8)); + + /* config output */ + val = mmio_read_32(GPIO0_BASE + SWPORTA_DDR); + val |= BIT(4); + mmio_write_32(GPIO0_BASE + SWPORTA_DDR, val); + + /* config output high level */ + val = mmio_read_32(GPIO0_BASE); + val |= BIT(4); + mmio_write_32(GPIO0_BASE, val); + dsb(); + + /* + * Maybe the HW needs some times to reset the system, + * so we do not hope the core to execute valid codes. + */ + psci_power_down_wfi(); +} + +void rockchip_plat_mmu_el3(void) +{ + /* TODO: support the el3 for px30 SoCs */ +} + +void plat_rockchip_pmu_init(void) +{ + uint32_t cpu; + + rockchip_pd_lock_init(); + + for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) + cpuson_flags[cpu] = 0; + + psram_boot_cfg->ddr_func = (uint64_t)0; + psram_boot_cfg->ddr_data = (uint64_t)0; + psram_boot_cfg->sp = PSRAM_SP_TOP; + psram_boot_cfg->ddr_flag = 0x0; + psram_boot_cfg->boot_mpidr = read_mpidr_el1() & 0xffff; + psram_boot_cfg->pm_flag = PM_WARM_BOOT_BIT; + + nonboot_cpus_off(); + + /* Remap pmu_sram's base address to boot address */ + mmio_write_32(PMUSGRF_BASE + PMUSGRF_SOC_CON(0), + BITS_WITH_WMASK(1, 0x1, 13)); + + INFO("%s: pd status %x\n", + __func__, mmio_read_32(PMU_BASE + PMU_PWRDN_ST)); +} diff --git a/plat/rockchip/px30/drivers/pmu/pmu.h b/plat/rockchip/px30/drivers/pmu/pmu.h new file mode 100644 index 0000000..416d1c1 --- /dev/null +++ b/plat/rockchip/px30/drivers/pmu/pmu.h @@ -0,0 +1,331 @@ +/* + * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef __PMU_H__ +#define __PMU_H__ + +/* Needed aligned 16 bytes for sp stack top */ +#define PSRAM_SP_TOP ((PMUSRAM_BASE + PMUSRAM_RSIZE) & ~0xf) + +/***************************************************************************** + * pmu con,reg + *****************************************************************************/ +#define PMU_WKUP_CFG0_LO 0x00 +#define PMU_WKUP_CFG0_HI 0x04 +#define PMU_WKUP_CFG1_LO 0x08 +#define PMU_WKUP_CFG1_HI 0x0c +#define PMU_WKUP_CFG2_LO 0x10 + +#define PMU_PWRDN_CON 0x18 +#define PMU_PWRDN_ST 0x20 + +#define PMU_PWRMODE_CORE_LO 0x24 +#define PMU_PWRMODE_CORE_HI 0x28 +#define PMU_PWRMODE_COMMON_CON_LO 0x2c +#define PMU_PWRMODE_COMMON_CON_HI 0x30 + +#define PMU_SFT_CON 0x34 +#define PMU_INT_ST 0x44 +#define PMU_BUS_IDLE_REQ 0x64 +#define PMU_BUS_IDLE_ST 0x6c + +#define PMU_OSC_CNT_LO 0x74 +#define PMU_OSC_CNT_HI 0x78 +#define PMU_PLLLOCK_CNT_LO 0x7c +#define PMU_PLLLOCK_CNT_HI 0x80 +#define PMU_PLLRST_CNT_LO 0x84 +#define PMU_PLLRST_CNT_HI 0x88 +#define PMU_STABLE_CNT_LO 0x8c +#define PMU_STABLE_CNT_HI 0x90 +#define PMU_WAKEUP_RST_CLR_LO 0x9c +#define PMU_WAKEUP_RST_CLR_HI 0xa0 + +#define PMU_DDR_SREF_ST 0xa4 + +#define PMU_SYS_REG0_LO 0xa8 +#define PMU_SYS_REG0_HI 0xac +#define PMU_SYS_REG1_LO 0xb0 +#define PMU_SYS_REG1_HI 0xb4 +#define PMU_SYS_REG2_LO 0xb8 +#define PMU_SYS_REG2_HI 0xbc +#define PMU_SYS_REG3_LO 0xc0 +#define PMU_SYS_REG3_HI 0xc4 + +#define PMU_SCU_PWRDN_CNT_LO 0xc8 +#define PMU_SCU_PWRDN_CNT_HI 0xcc +#define PMU_SCU_PWRUP_CNT_LO 0xd0 +#define PMU_SCU_PWRUP_CNT_HI 0xd4 + +#define PMU_TIMEOUT_CNT_LO 0xd8 +#define PMU_TIMEOUT_CNT_HI 0xdc + +#define PMU_CPUAPM_CON(cpu) (0xe0 + (cpu) * 0x4) + +#define CORES_PM_DISABLE 0x0 +#define CLST_CPUS_MSK 0xf + +#define PD_CTR_LOOP 500 +#define PD_CHECK_LOOP 500 +#define WFEI_CHECK_LOOP 500 +#define BUS_IDLE_LOOP 1000 + +enum pmu_wkup_cfg2 { + pmu_cluster_wkup_en = 0, + pmu_gpio_wkup_en = 2, + pmu_sdio_wkup_en = 3, + pmu_sdmmc_wkup_en = 4, + pmu_uart0_wkup_en = 5, + pmu_timer_wkup_en = 6, + pmu_usbdev_wkup_en = 7, + pmu_sft_wkup_en = 8, + pmu_timeout_wkup_en = 10, +}; + +enum pmu_powermode_core_lo { + pmu_global_int_dis = 0, + pmu_core_src_gt = 1, + pmu_cpu0_pd = 3, + pmu_clr_core = 5, + pmu_scu_pd = 6, + pmu_l2_idle = 8, + pmu_l2_flush = 9, + pmu_clr_bus2main = 10, + pmu_clr_peri2msch = 11, +}; + +enum pmu_powermode_core_hi { + pmu_apll_pd_en = 3, + pmu_dpll_pd_en = 4, + pmu_cpll_pd_en = 5, + pmu_gpll_pd_en = 6, + pmu_npll_pd_en = 7, +}; + +enum pmu_powermode_common_lo { + pmu_mode_en = 0, + pmu_ddr_pd_en = 1, + pmu_wkup_rst = 3, + pmu_pll_pd = 4, + pmu_pmu_use_if = 6, + pmu_alive_use_if = 7, + pmu_osc_dis = 8, + pmu_input_clamp = 9, + pmu_sref_enter = 10, + pmu_ddrc_gt = 11, + pmu_ddrio_ret = 12, + pmu_ddrio_ret_deq = 13, + pmu_clr_pmu = 14, + pmu_clr_peri_pmu = 15, +}; + +enum pmu_powermode_common_hi { + pmu_clr_bus = 0, + pmu_clr_mmc = 1, + pmu_clr_msch = 2, + pmu_clr_nandc = 3, + pmu_clr_gmac = 4, + pmu_clr_vo = 5, + pmu_clr_vi = 6, + pmu_clr_gpu = 7, + pmu_clr_usb = 8, + pmu_clr_vpu = 9, + pmu_clr_crypto = 10, + pmu_wakeup_begin_cfg = 11, + pmu_peri_clk_src_gt = 12, + pmu_bus_clk_src_gt = 13, +}; + +enum pmu_pd_id { + PD_CPU0 = 0, + PD_CPU1 = 1, + PD_CPU2 = 2, + PD_CPU3 = 3, + PD_SCU = 4, + PD_USB = 5, + PD_DDR = 6, + PD_SDCARD = 8, + PD_CRYPTO = 9, + PD_GMAC = 10, + PD_MMC_NAND = 11, + PD_VPU = 12, + PD_VO = 13, + PD_VI = 14, + PD_GPU = 15, + PD_END = 16, +}; + +enum pmu_bus_id { + BUS_ID_BUS = 0, + BUS_ID_BUS2MAIN = 1, + BUS_ID_GPU = 2, + BUS_ID_CORE = 3, + BUS_ID_CRYPTO = 4, + BUS_ID_MMC = 5, + BUS_ID_GMAC = 6, + BUS_ID_VO = 7, + BUS_ID_VI = 8, + BUS_ID_SDCARD = 9, + BUS_ID_USB = 10, + BUS_ID_MSCH = 11, + BUS_ID_PERI = 12, + BUS_ID_PMU = 13, + BUS_ID_VPU = 14, + BUS_ID_PERI2MSCH = 15, +}; + +enum pmu_pd_state { + pmu_pd_on = 0, + pmu_pd_off = 1 +}; + +enum pmu_bus_state { + bus_active = 0, + bus_idle = 1, +}; + +enum cores_pm_ctr_mode { + core_pwr_pd = 0, + core_pwr_wfi = 1, + core_pwr_wfi_int = 2 +}; + +enum pmu_cores_pm_by_wfi { + core_pm_en = 0, + core_pm_int_wakeup_en, + core_pm_dis_int, + core_pm_sft_wakeup_en +}; + +/***************************************************************************** + * pmu_sgrf + *****************************************************************************/ +#define PMUSGRF_SOC_CON(i) ((i) * 0x4) + +/***************************************************************************** + * pmu_grf + *****************************************************************************/ +#define GPIO0A_IOMUX 0x0 +#define GPIO0B_IOMUX 0x4 +#define GPIO0C_IOMUX 0x8 +#define GPIO0A_PULL 0x10 + +#define GPIO0L_SMT 0x38 +#define GPIO0H_SMT 0x3c + +#define PMUGRF_SOC_CON(i) (0x100 + (i) * 4) + +#define PMUGRF_PVTM_CON0 0x180 +#define PMUGRF_PVTM_CON1 0x184 +#define PMUGRF_PVTM_ST0 0x190 +#define PMUGRF_PVTM_ST1 0x194 + +#define PVTM_CALC_CNT 0x200 + +#define PMUGRF_OS_REG(n) (0x200 + (n) * 4) + +#define GPIO0A6_IOMUX_MSK (0x3 << 12) +#define GPIO0A6_IOMUX_GPIO (0x0 << 12) +#define GPIO0A6_IOMUX_RSTOUT (0x1 << 12) +#define GPIO0A6_IOMUX_SHTDN (0x2 << 12) + +enum px30_pmugrf_pvtm_con0 { + pgrf_pvtm_st = 0, + pgrf_pvtm_en = 1, + pgrf_pvtm_div = 2, +}; + +/***************************************************************************** + * pmu_cru + *****************************************************************************/ +#define CRU_PMU_MODE 0x20 +#define CRU_PMU_CLKSEL_CON 0x40 +#define CRU_PMU_CLKSELS_CON(i) (CRU_PMU_CLKSEL_CON + (i) * 4) +#define CRU_PMU_CLKSEL_CON_CNT 5 +#define CRU_PMU_CLKGATE_CON 0x80 +#define CRU_PMU_CLKGATES_CON(i) (CRU_PMU_CLKGATE_CON + (i) * 4) +#define CRU_PMU_CLKGATE_CON_CNT 2 +#define CRU_PMU_ATCS_CON 0xc0 +#define CRU_PMU_ATCSS_CON(i) (CRU_PMU_ATCS_CON + (i) * 4) +#define CRU_PMU_ATCS_CON_CNT 2 + +/***************************************************************************** + * pmusgrf + *****************************************************************************/ +#define PMUSGRF_RSTOUT_EN (0x7 << 10) +#define PMUSGRF_RSTOUT_FST 10 +#define PMUSGRF_RSTOUT_TSADC 11 +#define PMUSGRF_RSTOUT_WDT 12 + +#define PMUGRF_SOC_CON2_US_WMSK (0x1fff << 16) +#define PMUGRF_SOC_CON2_MAX_341US 0x1fff +#define PMUGRF_SOC_CON2_200US 0x12c0 + +#define PMUGRF_FAILSAFE_SHTDN_TSADC BIT(0) +#define PMUGRF_FAILSAFE_SHTDN_WDT BIT(1) + +/***************************************************************************** + * QOS + *****************************************************************************/ +#define CPU_AXI_QOS_ID_COREID 0x00 +#define CPU_AXI_QOS_REVISIONID 0x04 +#define CPU_AXI_QOS_PRIORITY 0x08 +#define CPU_AXI_QOS_MODE 0x0c +#define CPU_AXI_QOS_BANDWIDTH 0x10 +#define CPU_AXI_QOS_SATURATION 0x14 +#define CPU_AXI_QOS_EXTCONTROL 0x18 +#define CPU_AXI_QOS_NUM_REGS 0x07 + +#define CPU_AXI_CPU_QOS_BASE 0xff508000 +#define CPU_AXI_GPU_QOS_BASE 0xff520000 +#define CPU_AXI_ISP_128M_QOS_BASE 0xff548000 +#define CPU_AXI_ISP_RD_QOS_BASE 0xff548080 +#define CPU_AXI_ISP_WR_QOS_BASE 0xff548100 +#define CPU_AXI_ISP_M1_QOS_BASE 0xff548180 +#define CPU_AXI_VIP_QOS_BASE 0xff548200 +#define CPU_AXI_RGA_RD_QOS_BASE 0xff550000 +#define CPU_AXI_RGA_WR_QOS_BASE 0xff550080 +#define CPU_AXI_VOP_M0_QOS_BASE 0xff550100 +#define CPU_AXI_VOP_M1_QOS_BASE 0xff550180 +#define CPU_AXI_VPU_QOS_BASE 0xff558000 +#define CPU_AXI_VPU_R128_QOS_BASE 0xff558080 +#define CPU_AXI_DCF_QOS_BASE 0xff500000 +#define CPU_AXI_DMAC_QOS_BASE 0xff500080 +#define CPU_AXI_CRYPTO_QOS_BASE 0xff510000 +#define CPU_AXI_GMAC_QOS_BASE 0xff518000 +#define CPU_AXI_EMMC_QOS_BASE 0xff538000 +#define CPU_AXI_NAND_QOS_BASE 0xff538080 +#define CPU_AXI_SDIO_QOS_BASE 0xff538100 +#define CPU_AXI_SFC_QOS_BASE 0xff538180 +#define CPU_AXI_SDMMC_QOS_BASE 0xff52c000 +#define CPU_AXI_USB_HOST_QOS_BASE 0xff540000 +#define CPU_AXI_USB_OTG_QOS_BASE 0xff540080 + +#define PX30_CPU_AXI_SAVE_QOS(array, base) do { \ + array[0] = mmio_read_32(base + CPU_AXI_QOS_ID_COREID); \ + array[1] = mmio_read_32(base + CPU_AXI_QOS_REVISIONID); \ + array[2] = mmio_read_32(base + CPU_AXI_QOS_PRIORITY); \ + array[3] = mmio_read_32(base + CPU_AXI_QOS_MODE); \ + array[4] = mmio_read_32(base + CPU_AXI_QOS_BANDWIDTH); \ + array[5] = mmio_read_32(base + CPU_AXI_QOS_SATURATION); \ + array[6] = mmio_read_32(base + CPU_AXI_QOS_EXTCONTROL); \ +} while (0) + +#define PX30_CPU_AXI_RESTORE_QOS(array, base) do { \ + mmio_write_32(base + CPU_AXI_QOS_ID_COREID, array[0]); \ + mmio_write_32(base + CPU_AXI_QOS_REVISIONID, array[1]); \ + mmio_write_32(base + CPU_AXI_QOS_PRIORITY, array[2]); \ + mmio_write_32(base + CPU_AXI_QOS_MODE, array[3]); \ + mmio_write_32(base + CPU_AXI_QOS_BANDWIDTH, array[4]); \ + mmio_write_32(base + CPU_AXI_QOS_SATURATION, array[5]); \ + mmio_write_32(base + CPU_AXI_QOS_EXTCONTROL, array[6]); \ +} while (0) + +#define SAVE_QOS(array, NAME) \ + PX30_CPU_AXI_SAVE_QOS(array, CPU_AXI_##NAME##_QOS_BASE) +#define RESTORE_QOS(array, NAME) \ + PX30_CPU_AXI_RESTORE_QOS(array, CPU_AXI_##NAME##_QOS_BASE) + +#endif /* __PMU_H__ */ diff --git a/plat/rockchip/px30/drivers/secure/secure.c b/plat/rockchip/px30/drivers/secure/secure.c new file mode 100644 index 0000000..144f945 --- /dev/null +++ b/plat/rockchip/px30/drivers/secure/secure.c @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> +#include <ddr_parameter.h> +#include <plat_private.h> +#include <secure.h> +#include <px30_def.h> + +/** + * There are 8 regions for DDR security control + * @rgn - the DDR regions 0 ~ 7 which are can be configured. + * @st - start address to set as secure + * @sz - length of area to set as secure + * The internal unit is megabytes, so memory areas need to be aligned + * to megabyte borders. + */ +static void secure_ddr_region(uint32_t rgn, + uintptr_t st, size_t sz) +{ + uintptr_t ed = st + sz; + uintptr_t st_mb, ed_mb; + uint32_t val; + + assert(rgn <= 7); + assert(st < ed); + + /* check aligned 1MB */ + assert(st % SIZE_M(1) == 0); + assert(ed % SIZE_M(1) == 0); + + st_mb = st / SIZE_M(1); + ed_mb = ed / SIZE_M(1); + + /* map top and base */ + mmio_write_32(FIREWALL_DDR_BASE + + FIREWALL_DDR_FW_DDR_RGN(rgn), + RG_MAP_SECURE(ed_mb, st_mb)); + + /* enable secure */ + val = mmio_read_32(FIREWALL_DDR_BASE + FIREWALL_DDR_FW_DDR_CON_REG); + val |= BIT(rgn); + mmio_write_32(FIREWALL_DDR_BASE + + FIREWALL_DDR_FW_DDR_CON_REG, val); +} + +void secure_timer_init(void) +{ + mmio_write_32(STIMER_CHN_BASE(1) + TIMER_CONTROL_REG, + TIMER_DIS); + + mmio_write_32(STIMER_CHN_BASE(1) + TIMER_LOAD_COUNT0, 0xffffffff); + mmio_write_32(STIMER_CHN_BASE(1) + TIMER_LOAD_COUNT1, 0xffffffff); + + /* auto reload & enable the timer */ + mmio_write_32(STIMER_CHN_BASE(1) + TIMER_CONTROL_REG, + TIMER_EN | TIMER_FMODE); +} + +void sgrf_init(void) +{ +#ifdef PLAT_RK_SECURE_DDR_MINILOADER + uint32_t i; + struct param_ddr_usage usg; + + /* general secure regions */ + usg = ddr_region_usage_parse(DDR_PARAM_BASE, + PLAT_MAX_DDR_CAPACITY_MB); + + /* region-0 for TF-A, region-1 for optional OP-TEE */ + assert(usg.s_nr < 7); + + for (i = 0; i < usg.s_nr; i++) + secure_ddr_region(7 - i, usg.s_top[i], usg.s_base[i]); +#endif + + /* secure the trustzone ram */ + secure_ddr_region(0, TZRAM_BASE, TZRAM_SIZE); + + /* set all slave ip into no-secure, except stimer */ + mmio_write_32(SGRF_BASE + SGRF_SOC_CON(4), SGRF_SLV_S_ALL_NS); + mmio_write_32(SGRF_BASE + SGRF_SOC_CON(5), SGRF_SLV_S_ALL_NS); + mmio_write_32(SGRF_BASE + SGRF_SOC_CON(6), SGRF_SLV_S_ALL_NS); + mmio_write_32(SGRF_BASE + SGRF_SOC_CON(7), SGRF_SLV_S_ALL_NS); + mmio_write_32(SGRF_BASE + SGRF_SOC_CON(8), 0x00030000); + + /* set master crypto to no-secure, dcf to secure */ + mmio_write_32(SGRF_BASE + SGRF_SOC_CON(3), 0x000f0003); + + /* set DMAC into no-secure */ + mmio_write_32(SGRF_BASE + SGRF_DMAC_CON(0), DMA_IRQ_BOOT_NS); + mmio_write_32(SGRF_BASE + SGRF_DMAC_CON(1), DMA_PERI_CH_NS_15_0); + mmio_write_32(SGRF_BASE + SGRF_DMAC_CON(2), DMA_PERI_CH_NS_19_16); + mmio_write_32(SGRF_BASE + SGRF_DMAC_CON(3), DMA_MANAGER_BOOT_NS); + + /* soft reset dma before use */ + mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1), DMA_SOFTRST_REQ); + udelay(5); + mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1), DMA_SOFTRST_RLS); +} diff --git a/plat/rockchip/px30/drivers/secure/secure.h b/plat/rockchip/px30/drivers/secure/secure.h new file mode 100644 index 0000000..498027d --- /dev/null +++ b/plat/rockchip/px30/drivers/secure/secure.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef SECURE_H +#define SECURE_H + +/*************************************************************************** + * SGRF + ***************************************************************************/ +#define SGRF_SOC_CON(i) ((i) * 0x4) +#define SGRF_DMAC_CON(i) (0x30 + (i) * 0x4) + +#define SGRF_MST_S_ALL_NS 0xffffffff +#define SGRF_SLV_S_ALL_NS 0xffff0000 +#define DMA_IRQ_BOOT_NS 0xffffffff +#define DMA_PERI_CH_NS_15_0 0xffffffff +#define DMA_PERI_CH_NS_19_16 0x000f000f +#define DMA_MANAGER_BOOT_NS 0x00010001 +#define DMA_SOFTRST_REQ BITS_WITH_WMASK(1, 0x1, 12) +#define DMA_SOFTRST_RLS BITS_WITH_WMASK(0, 0x1, 12) + +/*************************************************************************** + * DDR FIREWALL + ***************************************************************************/ +#define FIREWALL_DDR_FW_DDR_RGN(i) ((i) * 0x4) +#define FIREWALL_DDR_FW_DDR_MST(i) (0x20 + (i) * 0x4) +#define FIREWALL_DDR_FW_DDR_CON_REG 0x40 +#define FIREWALL_DDR_FW_DDR_RGN_NUM 8 +#define FIREWALL_DDR_FW_DDR_MST_NUM 6 + +#define PLAT_MAX_DDR_CAPACITY_MB 4096 +#define RG_MAP_SECURE(top, base) ((((top) - 1) << 16) | (base)) + +/************************************************** + * secure timer + **************************************************/ + +/* chanal0~5 */ +#define STIMER_CHN_BASE(n) (STIME_BASE + 0x20 * (n)) + +#define TIMER_LOAD_COUNT0 0x0 +#define TIMER_LOAD_COUNT1 0x4 + +#define TIMER_CUR_VALUE0 0x8 +#define TIMER_CUR_VALUE1 0xc + +#define TIMER_CONTROL_REG 0x10 +#define TIMER_INTSTATUS 0x18 + +#define TIMER_DIS 0x0 +#define TIMER_EN 0x1 + +#define TIMER_FMODE (0x0 << 1) +#define TIMER_RMODE (0x1 << 1) + +#define TIMER_LOAD_COUNT0_MSK (0xffffffff) +#define TIMER_LOAD_COUNT1_MSK (0xffffffff00000000) + +void secure_timer_init(void); +void sgrf_init(void); + +#endif /* SECURE_H */ diff --git a/plat/rockchip/px30/drivers/soc/soc.c b/plat/rockchip/px30/drivers/soc/soc.c new file mode 100644 index 0000000..200563d --- /dev/null +++ b/plat/rockchip/px30/drivers/soc/soc.c @@ -0,0 +1,134 @@ +/* + * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <platform_def.h> + +#include <arch_helpers.h> +#include <common/debug.h> +#include <drivers/console.h> +#include <drivers/delay_timer.h> +#include <lib/mmio.h> + +#include <platform_def.h> +#include <pmu.h> +#include <px30_def.h> +#include <secure.h> +#include <soc.h> +#include <rockchip_sip_svc.h> + +/* Aggregate of all devices in the first GB */ +#define PX30_DEV_RNG0_BASE 0xff000000 +#define PX30_DEV_RNG0_SIZE 0x00ff0000 + +const mmap_region_t plat_rk_mmap[] = { + MAP_REGION_FLAT(PX30_DEV_RNG0_BASE, PX30_DEV_RNG0_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(SHARE_MEM_BASE, SHARE_MEM_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(DDR_PARAM_BASE, DDR_PARAM_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + { 0 } +}; + +/* The RockChip power domain tree descriptor */ +const unsigned char rockchip_power_domain_tree_desc[] = { + /* No of root nodes */ + PLATFORM_SYSTEM_COUNT, + /* No of children for the root node */ + PLATFORM_CLUSTER_COUNT, + /* No of children for the first cluster node */ + PLATFORM_CLUSTER0_CORE_COUNT, +}; + +void clk_gate_con_save(uint32_t *clkgt_save) +{ + uint32_t i, j; + + for (i = 0; i < CRU_CLKGATES_CON_CNT; i++) + clkgt_save[i] = + mmio_read_32(CRU_BASE + CRU_CLKGATES_CON(i)); + j = i; + for (i = 0; i < CRU_PMU_CLKGATE_CON_CNT; i++, j++) + clkgt_save[j] = + mmio_read_32(PMUCRU_BASE + CRU_PMU_CLKGATES_CON(i)); +} + +void clk_gate_con_restore(uint32_t *clkgt_save) +{ + uint32_t i, j; + + for (i = 0; i < CRU_CLKGATES_CON_CNT; i++) + mmio_write_32(CRU_BASE + CRU_CLKGATES_CON(i), + WITH_16BITS_WMSK(clkgt_save[i])); + + j = i; + for (i = 0; i < CRU_PMU_CLKGATE_CON_CNT; i++, j++) + mmio_write_32(PMUCRU_BASE + CRU_PMU_CLKGATES_CON(i), + WITH_16BITS_WMSK(clkgt_save[j])); +} + +void clk_gate_con_disable(void) +{ + uint32_t i; + + for (i = 0; i < CRU_CLKGATES_CON_CNT; i++) + mmio_write_32(CRU_BASE + CRU_CLKGATES_CON(i), + 0xffff0000); + + for (i = 0; i < CRU_PMU_CLKGATE_CON_CNT; i++) + mmio_write_32(PMUCRU_BASE + CRU_PMU_CLKGATES_CON(i), + 0xffff0000); +} + +static void soc_reset_config_all(void) +{ + uint32_t tmp; + + /* tsadc and wdt can trigger a first rst */ + tmp = mmio_read_32(CRU_BASE + CRU_GLB_RST_CON); + tmp |= CRU_GLB_RST_TSADC_FST | CRU_GLB_RST_WDT_FST; + mmio_write_32(CRU_BASE + CRU_GLB_RST_CON, tmp); + return; + tmp = mmio_read_32(PMUGRF_BASE + PMUGRF_SOC_CON(3)); + tmp &= ~(PMUGRF_FAILSAFE_SHTDN_TSADC | PMUGRF_FAILSAFE_SHTDN_WDT); + mmio_write_32(PMUGRF_BASE + PMUGRF_SOC_CON(3), tmp); + + /* wdt pin rst eable */ + mmio_write_32(GRF_BASE + GRF_SOC_CON(2), + BIT_WITH_WMSK(GRF_SOC_CON2_NSWDT_RST_EN)); +} + +void px30_soc_reset_config(void) +{ + uint32_t tmp; + + /* enable soc ip rst hold time cfg */ + tmp = mmio_read_32(CRU_BASE + CRU_GLB_RST_CON); + tmp |= BIT(CRU_GLB_RST_TSADC_EXT) | BIT(CRU_GLB_RST_WDT_EXT); + mmio_write_32(CRU_BASE + CRU_GLB_RST_CON, tmp); + /* soc ip rst hold time, 24m */ + tmp = mmio_read_32(CRU_BASE + CRU_GLB_CNT_TH); + tmp &= ~CRU_GLB_CNT_RST_MSK; + tmp |= (CRU_GLB_CNT_RST_1MS / 2); + mmio_write_32(CRU_BASE + CRU_GLB_CNT_TH, tmp); + + mmio_write_32(PMUSGRF_BASE + PMUSGRF_SOC_CON(0), + BIT_WITH_WMSK(PMUSGRF_RSTOUT_FST) | + BIT_WITH_WMSK(PMUSGRF_RSTOUT_TSADC) | + BIT_WITH_WMSK(PMUSGRF_RSTOUT_WDT)); + + /* rst_out pulse time */ + mmio_write_32(PMUGRF_BASE + PMUGRF_SOC_CON(2), + PMUGRF_SOC_CON2_MAX_341US | PMUGRF_SOC_CON2_US_WMSK); + + soc_reset_config_all(); +} + +void plat_rockchip_soc_init(void) +{ + secure_timer_init(); + sgrf_init(); +} diff --git a/plat/rockchip/px30/drivers/soc/soc.h b/plat/rockchip/px30/drivers/soc/soc.h new file mode 100644 index 0000000..648d18b --- /dev/null +++ b/plat/rockchip/px30/drivers/soc/soc.h @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef __SOC_H__ +#define __SOC_H__ + +#include <plat_private.h> + +#ifndef BITS_WMSK +#define BITS_WMSK(msk, shift) ((msk) << (shift + REG_MSK_SHIFT)) +#endif + +enum plls_id { + APLL_ID = 0, + DPLL_ID, + CPLL_ID, + NPLL_ID, + GPLL_ID, + END_PLL_ID, +}; + +enum pll_mode { + SLOW_MODE, + NORM_MODE, + DEEP_SLOW_MODE, +}; + +/*************************************************************************** + * GRF + ***************************************************************************/ +#define GRF_SOC_CON(i) (0x0400 + (i) * 4) +#define GRF_PD_VO_CON0 0x0434 +#define GRF_SOC_STATUS0 0x0480 +#define GRF_CPU_STATUS0 0x0520 +#define GRF_CPU_STATUS1 0x0524 +#define GRF_SOC_NOC_CON0 0x0530 +#define GRF_SOC_NOC_CON1 0x0534 + +#define CKECK_WFE_MSK 0x1 +#define CKECK_WFI_MSK 0x10 +#define CKECK_WFEI_MSK 0x11 + +#define GRF_SOC_CON2_NSWDT_RST_EN 12 + +/*************************************************************************** + * cru + ***************************************************************************/ +#define CRU_MODE 0xa0 +#define CRU_MISC 0xa4 +#define CRU_GLB_CNT_TH 0xb0 +#define CRU_GLB_RST_ST 0xb4 +#define CRU_GLB_SRST_FST 0xb8 +#define CRU_GLB_SRST_SND 0xbc +#define CRU_GLB_RST_CON 0xc0 + +#define CRU_CLKSEL_CON 0x100 +#define CRU_CLKSELS_CON(i) (CRU_CLKSEL_CON + (i) * 4) +#define CRU_CLKSEL_CON_CNT 60 + +#define CRU_CLKGATE_CON 0x200 +#define CRU_CLKGATES_CON(i) (CRU_CLKGATE_CON + (i) * 4) +#define CRU_CLKGATES_CON_CNT 18 + +#define CRU_SOFTRST_CON 0x300 +#define CRU_SOFTRSTS_CON(n) (CRU_SOFTRST_CON + ((n) * 4)) +#define CRU_SOFTRSTS_CON_CNT 12 + +#define CRU_AUTOCS_CON0(id) (0x400 + (id) * 8) +#define CRU_AUTOCS_CON1(id) (0x404 + (id) * 8) + +#define CRU_CONS_GATEID(i) (16 * (i)) +#define GATE_ID(reg, bit) ((reg) * 16 + (bit)) + +#define CRU_GLB_SRST_FST_VALUE 0xfdb9 +#define CRU_GLB_SRST_SND_VALUE 0xeca8 + +#define CRU_GLB_RST_TSADC_EXT 6 +#define CRU_GLB_RST_WDT_EXT 7 + +#define CRU_GLB_CNT_RST_MSK 0xffff +#define CRU_GLB_CNT_RST_1MS 0x5DC0 + +#define CRU_GLB_RST_TSADC_FST BIT(0) +#define CRU_GLB_RST_WDT_FST BIT(1) + +/*************************************************************************** + * pll + ***************************************************************************/ +#define CRU_PLL_CONS(id, i) ((id) * 0x20 + (i) * 4) +#define PLL_CON(i) ((i) * 4) +#define PLL_CON_CNT 5 +#define PLL_LOCK_MSK BIT(10) +#define PLL_MODE_SHIFT(id) ((id) == CPLL_ID ? \ + 2 : \ + ((id) == DPLL_ID ? 4 : 2 * (id))) +#define PLL_MODE_MSK(id) (0x3 << PLL_MODE_SHIFT(id)) + +#define PLL_LOCKED_TIMEOUT 600000U + +/*************************************************************************** + * GPIO + ***************************************************************************/ +#define SWPORTA_DR 0x00 +#define SWPORTA_DDR 0x04 +#define GPIO_INTEN 0x30 +#define GPIO_INT_STATUS 0x40 +#define GPIO_NUMS 4 + +void clk_gate_con_save(uint32_t *clkgt_save); +void clk_gate_con_restore(uint32_t *clkgt_save); +void clk_gate_con_disable(void); + +void px30_soc_reset_config(void); + +#endif /* __SOC_H__ */ diff --git a/plat/rockchip/px30/include/plat.ld.S b/plat/rockchip/px30/include/plat.ld.S new file mode 100644 index 0000000..44cca0d --- /dev/null +++ b/plat/rockchip/px30/include/plat.ld.S @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef __ROCKCHIP_PLAT_LD_S__ +#define __ROCKCHIP_PLAT_LD_S__ + +MEMORY { + PMUSRAM (rwx): ORIGIN = PMUSRAM_BASE, LENGTH = PMUSRAM_RSIZE +} + +SECTIONS +{ + . = PMUSRAM_BASE; + + /* + * pmu_cpuson_entrypoint request address + * align 64K when resume, so put it in the + * start of pmusram + */ + .pmusram : { + ASSERT(. == ALIGN(64 * 1024), + ".pmusram.entry request 64K aligned."); + KEEP(*(.pmusram.entry)) + + __bl31_pmusram_text_start = .; + *(.pmusram.text) + *(.pmusram.rodata) + __bl31_pmusram_text_end = .; + __bl31_pmusram_data_start = .; + *(.pmusram.data) + __bl31_pmusram_data_end = .; + } >PMUSRAM +} + +#endif /* __ROCKCHIP_PLAT_LD_S__ */ diff --git a/plat/rockchip/px30/include/plat_sip_calls.h b/plat/rockchip/px30/include/plat_sip_calls.h new file mode 100644 index 0000000..7b6a6a8 --- /dev/null +++ b/plat/rockchip/px30/include/plat_sip_calls.h @@ -0,0 +1,12 @@ +/* + * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef __PLAT_SIP_CALLS_H__ +#define __PLAT_SIP_CALLS_H__ + +#define RK_PLAT_SIP_NUM_CALLS 0 + +#endif /* __PLAT_SIP_CALLS_H__ */ diff --git a/plat/rockchip/px30/include/platform_def.h b/plat/rockchip/px30/include/platform_def.h new file mode 100644 index 0000000..a11f84f --- /dev/null +++ b/plat/rockchip/px30/include/platform_def.h @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef __PLATFORM_DEF_H__ +#define __PLATFORM_DEF_H__ + +#include <arch.h> +#include <common_def.h> +#include <px30_def.h> + +#define DEBUG_XLAT_TABLE 0 + +/******************************************************************************* + * Platform binary types for linking + ******************************************************************************/ +#define PLATFORM_LINKER_FORMAT "elf64-littleaarch64" +#define PLATFORM_LINKER_ARCH aarch64 + +/******************************************************************************* + * Generic platform constants + ******************************************************************************/ + +/* Size of cacheable stacks */ +#if DEBUG_XLAT_TABLE +#define PLATFORM_STACK_SIZE 0x800 +#elif IMAGE_BL1 +#define PLATFORM_STACK_SIZE 0x440 +#elif IMAGE_BL2 +#define PLATFORM_STACK_SIZE 0x400 +#elif IMAGE_BL31 +#define PLATFORM_STACK_SIZE 0x800 +#elif IMAGE_BL32 +#define PLATFORM_STACK_SIZE 0x440 +#endif + +#define FIRMWARE_WELCOME_STR "Booting Trusted Firmware\n" + +#define PLATFORM_MAX_AFFLVL MPIDR_AFFLVL2 +#define PLATFORM_SYSTEM_COUNT U(1) +#define PLATFORM_CLUSTER_COUNT U(1) +#define PLATFORM_CLUSTER0_CORE_COUNT U(4) +#define PLATFORM_CLUSTER1_CORE_COUNT U(0) +#define PLATFORM_CORE_COUNT (PLATFORM_CLUSTER1_CORE_COUNT + \ + PLATFORM_CLUSTER0_CORE_COUNT) + +#define PLATFORM_NUM_AFFS (PLATFORM_SYSTEM_COUNT + \ + PLATFORM_CLUSTER_COUNT + \ + PLATFORM_CORE_COUNT) + +#define PLAT_MAX_PWR_LVL MPIDR_AFFLVL2 + +#define PLAT_RK_CLST_TO_CPUID_SHIFT 8 + +/* + * This macro defines the deepest retention state possible. A higher state + * id will represent an invalid or a power down state. + */ +#define PLAT_MAX_RET_STATE 1 + +/* + * This macro defines the deepest power down states possible. Any state ID + * higher than this is invalid. + */ +#define PLAT_MAX_OFF_STATE 2 + +/******************************************************************************* + * Platform memory map related constants + ******************************************************************************/ +/* TF text, ro, rw, Size: 1MB */ +#define TZRAM_BASE (0x0) +#define TZRAM_SIZE (0x100000) + +/******************************************************************************* + * BL31 specific defines. + ******************************************************************************/ +/* + * Put BL3-1 at the top of the Trusted RAM + */ +#define BL31_BASE (TZRAM_BASE + 0x40000) +#define BL31_LIMIT (TZRAM_BASE + TZRAM_SIZE) + +/******************************************************************************* + * Platform specific page table and MMU setup constants + ******************************************************************************/ +#define PLAT_VIRT_ADDR_SPACE_SIZE (1ull << 32) +#define PLAT_PHY_ADDR_SPACE_SIZE (1ull << 32) +#define ADDR_SPACE_SIZE (1ull << 32) +#define MAX_XLAT_TABLES 8 +#define MAX_MMAP_REGIONS 27 + +/******************************************************************************* + * Declarations and constants to access the mailboxes safely. Each mailbox is + * aligned on the biggest cache line size in the platform. This is known only + * to the platform as it might have a combination of integrated and external + * caches. Such alignment ensures that two maiboxes do not sit on the same cache + * line at any cache level. They could belong to different cpus/clusters & + * get written while being protected by different locks causing corruption of + * a valid mailbox address. + ******************************************************************************/ +#define CACHE_WRITEBACK_SHIFT 6 +#define CACHE_WRITEBACK_GRANULE (1 << CACHE_WRITEBACK_SHIFT) + +/* + * Define GICD and GICC and GICR base + */ +#define PLAT_RK_GICD_BASE PX30_GICD_BASE +#define PLAT_RK_GICC_BASE PX30_GICC_BASE + +#define PLAT_RK_UART_BASE PX30_UART_BASE +#define PLAT_RK_UART_CLOCK PX30_UART_CLOCK +#define PLAT_RK_UART_BAUDRATE PX30_BAUDRATE + +#define PLAT_RK_PRIMARY_CPU 0x0 + +#endif /* __PLATFORM_DEF_H__ */ diff --git a/plat/rockchip/px30/plat_sip_calls.c b/plat/rockchip/px30/plat_sip_calls.c new file mode 100644 index 0000000..a4b8e55 --- /dev/null +++ b/plat/rockchip/px30/plat_sip_calls.c @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <common/debug.h> +#include <common/runtime_svc.h> +#include <lib/mmio.h> + +#include <plat_sip_calls.h> +#include <rockchip_sip_svc.h> + +uintptr_t rockchip_plat_sip_handler(uint32_t smc_fid, + u_register_t x1, + u_register_t x2, + u_register_t x3, + u_register_t x4, + void *cookie, + void *handle, + u_register_t flags) +{ + ERROR("%s: unhandled SMC (0x%x)\n", __func__, smc_fid); + SMC_RET1(handle, SMC_UNK); +} diff --git a/plat/rockchip/px30/platform.mk b/plat/rockchip/px30/platform.mk new file mode 100644 index 0000000..d14ffc4 --- /dev/null +++ b/plat/rockchip/px30/platform.mk @@ -0,0 +1,73 @@ +# +#Copyright (c) 2019, ARM Limited and Contributors. All rights reserved. +# +#SPDX-License-Identifier: BSD-3-Clause +# + +include drivers/arm/gic/v2/gicv2.mk + +RK_PLAT := plat/rockchip +RK_PLAT_SOC := ${RK_PLAT}/${PLAT} +RK_PLAT_COMMON := ${RK_PLAT}/common + +DISABLE_BIN_GENERATION := 1 + +PLAT_INCLUDES := -Idrivers/arm/gic/common/ \ + -Idrivers/arm/gic/v2/ \ + -Iinclude/plat/common/ \ + -I${RK_PLAT_COMMON}/ \ + -I${RK_PLAT_COMMON}/include/ \ + -I${RK_PLAT_COMMON}/drivers/parameter/ \ + -I${RK_PLAT_COMMON}/pmusram \ + -I${RK_PLAT_SOC}/ \ + -I${RK_PLAT_SOC}/drivers/pmu/ \ + -I${RK_PLAT_SOC}/drivers/secure/ \ + -I${RK_PLAT_SOC}/drivers/soc/ \ + -I${RK_PLAT_SOC}/include/ + +RK_GIC_SOURCES := ${GICV2_SOURCES} \ + plat/common/plat_gicv2.c \ + plat/common/aarch64/crash_console_helpers.S \ + ${RK_PLAT}/common/rockchip_gicv2.c + +PLAT_BL_COMMON_SOURCES := lib/bl_aux_params/bl_aux_params.c \ + lib/xlat_tables/xlat_tables_common.c \ + lib/xlat_tables/aarch64/xlat_tables.c \ + plat/common/plat_psci_common.c + +ifneq (${ENABLE_STACK_PROTECTOR},0) +PLAT_BL_COMMON_SOURCES += ${RK_PLAT_COMMON}/rockchip_stack_protector.c +endif + +BL31_SOURCES += ${RK_GIC_SOURCES} \ + common/desc_image_load.c \ + drivers/arm/cci/cci.c \ + drivers/delay_timer/delay_timer.c \ + drivers/delay_timer/generic_delay_timer.c \ + drivers/ti/uart/aarch64/16550_console.S \ + lib/cpus/aarch64/cortex_a35.S \ + ${RK_PLAT_COMMON}/aarch64/plat_helpers.S \ + ${RK_PLAT_COMMON}/aarch64/platform_common.c \ + ${RK_PLAT_COMMON}/bl31_plat_setup.c \ + ${RK_PLAT_COMMON}/params_setup.c \ + ${RK_PLAT_COMMON}/pmusram/cpus_on_fixed_addr.S \ + ${RK_PLAT_COMMON}/plat_pm.c \ + ${RK_PLAT_COMMON}/plat_topology.c \ + ${RK_PLAT_COMMON}/rockchip_sip_svc.c \ + ${RK_PLAT_SOC}/drivers/pmu/pmu.c \ + ${RK_PLAT_SOC}/drivers/secure/secure.c \ + ${RK_PLAT_SOC}/drivers/soc/soc.c \ + ${RK_PLAT_SOC}/plat_sip_calls.c + +ifdef PLAT_RK_SECURE_DDR_MINILOADER +BL31_SOURCES += ${RK_PLAT_COMMON}/drivers/parameter/ddr_parameter.c +endif + +ENABLE_PLAT_COMPAT := 0 +MULTI_CONSOLE_API := 1 + +include lib/libfdt/libfdt.mk + +$(eval $(call add_define,PLAT_EXTRA_LD_SCRIPT)) +$(eval $(call add_define,PLAT_SKIP_OPTEE_S_EL1_INT_REGISTER)) +$(eval $(call add_define,PLAT_WARMBOOT_ADDR_NOT_ALIGN)) diff --git a/plat/rockchip/px30/px30_def.h b/plat/rockchip/px30/px30_def.h new file mode 100644 index 0000000..efe789e --- /dev/null +++ b/plat/rockchip/px30/px30_def.h @@ -0,0 +1,176 @@ +/* + * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef __PX30_DEF_H__ +#define __PX30_DEF_H__ + +#define MAJOR_VERSION (1) +#define MINOR_VERSION (0) + +#define SIZE_K(n) ((n) * 1024) +#define SIZE_M(n) ((n) * 1024 * 1024) + +#define WITH_16BITS_WMSK(bits) (0xffff0000 | (bits)) + +/* Special value used to verify platform parameters from BL2 to BL3-1 */ +#define RK_BL31_PLAT_PARAM_VAL 0x0f1e2d3c4b5a6978ULL + +#define PMU_BASE 0xff000000 +#define PMU_SIZE SIZE_K(64) + +#define PMUGRF_BASE 0xff010000 +#define PMUGRF_SIZE SIZE_K(64) + +#define PMUSRAM_BASE 0xff020000 +#define PMUSRAM_SIZE SIZE_K(64) +#define PMUSRAM_RSIZE SIZE_K(8) + +#define UART0_BASE 0xff030000 +#define UART0_SIZE SIZE_K(64) + +#define GPIO0_BASE 0xff040000 +#define GPIO0_SIZE SIZE_K(64) + +#define PMUSGRF_BASE 0xff050000 +#define PMUSGRF_SIZE SIZE_K(64) + +#define INTSRAM_BASE 0xff0e0000 +#define INTSRAM_SIZE SIZE_K(64) + +#define SGRF_BASE 0xff11c000 +#define SGRF_SIZE SIZE_K(16) + +#define GIC400_BASE 0xff130000 +#define GIC400_SIZE SIZE_K(64) + +#define GRF_BASE 0xff140000 +#define GRF_SIZE SIZE_K(64) + +#define UART1_BASE 0xff158000 +#define UART1_SIZE SIZE_K(64) + +#define UART2_BASE 0xff160000 +#define UART2_SIZE SIZE_K(64) + +#define UART3_BASE 0xff168000 +#define UART3_SIZE SIZE_K(64) + +#define UART5_BASE 0xff178000 +#define UART5_SIZE SIZE_K(64) + +#define I2C0_BASE 0xff180000 +#define I2C0_SIZE SIZE_K(64) + +#define PWM0_BASE 0xff200000 +#define PWM0_SIZE SIZE_K(32) + +#define PWM1_BASE 0xff208000 +#define PWM1_SIZE SIZE_K(32) + +#define NTIME_BASE 0xff210000 +#define NTIME_SIZE SIZE_K(64) + +#define STIME_BASE 0xff220000 +#define STIME_SIZE SIZE_K(64) + +#define DCF_BASE 0xff230000 +#define DCF_SIZE SIZE_K(64) + +#define GPIO1_BASE 0xff250000 +#define GPIO1_SIZE SIZE_K(64) + +#define GPIO2_BASE 0xff260000 +#define GPIO2_SIZE SIZE_K(64) + +#define GPIO3_BASE 0xff270000 +#define GPIO3_SIZE SIZE_K(64) + +#define DDR_PHY_BASE 0xff2a0000 +#define DDR_PHY_SIZE SIZE_K(64) + +#define CRU_BASE 0xff2b0000 +#define CRU_SIZE SIZE_K(32) + +#define CRU_BOOST_BASE 0xff2b8000 +#define CRU_BOOST_SIZE SIZE_K(16) + +#define PMUCRU_BASE 0xff2bc000 +#define PMUCRU_SIZE SIZE_K(16) + +#define VOP_BASE 0xff460000 +#define VOP_SIZE SIZE_K(16) + +#define SERVER_MSCH_BASE 0xff530000 +#define SERVER_MSCH_SIZE SIZE_K(64) + +#define FIREWALL_DDR_BASE 0xff534000 +#define FIREWALL_DDR_SIZE SIZE_K(16) + +#define DDR_UPCTL_BASE 0xff600000 +#define DDR_UPCTL_SIZE SIZE_K(64) + +#define DDR_MNTR_BASE 0xff610000 +#define DDR_MNTR_SIZE SIZE_K(64) + +#define DDR_STDBY_BASE 0xff620000 +#define DDR_STDBY_SIZE SIZE_K(64) + +#define DDRGRF_BASE 0xff630000 +#define DDRGRF_SIZE SIZE_K(32) + +/************************************************************************** + * UART related constants + **************************************************************************/ +#define PX30_UART_BASE UART2_BASE +#define PX30_BAUDRATE 1500000 +#define PX30_UART_CLOCK 24000000 + +/****************************************************************************** + * System counter frequency related constants + ******************************************************************************/ +#define SYS_COUNTER_FREQ_IN_TICKS 24000000 +#define SYS_COUNTER_FREQ_IN_MHZ 24 + +/****************************************************************************** + * GIC-400 & interrupt handling related constants + ******************************************************************************/ + +/* Base rk_platform compatible GIC memory map */ +#define PX30_GICD_BASE (GIC400_BASE + 0x1000) +#define PX30_GICC_BASE (GIC400_BASE + 0x2000) +#define PX30_GICR_BASE 0 /* no GICR in GIC-400 */ + +/****************************************************************************** + * sgi, ppi + ******************************************************************************/ +#define RK_IRQ_SEC_PHY_TIMER 29 + +#define RK_IRQ_SEC_SGI_0 8 +#define RK_IRQ_SEC_SGI_1 9 +#define RK_IRQ_SEC_SGI_2 10 +#define RK_IRQ_SEC_SGI_3 11 +#define RK_IRQ_SEC_SGI_4 12 +#define RK_IRQ_SEC_SGI_5 13 +#define RK_IRQ_SEC_SGI_6 14 +#define RK_IRQ_SEC_SGI_7 15 + +/* + * Define a list of Group 0 interrupts. + */ +#define PLAT_RK_GICV2_G0_IRQS \ + INTR_PROP_DESC(RK_IRQ_SEC_PHY_TIMER, GIC_HIGHEST_SEC_PRIORITY, \ + GICV2_INTR_GROUP0, GIC_INTR_CFG_LEVEL), \ + INTR_PROP_DESC(RK_IRQ_SEC_SGI_6, GIC_HIGHEST_SEC_PRIORITY, \ + GICV2_INTR_GROUP0, GIC_INTR_CFG_LEVEL) + +#define SHARE_MEM_BASE 0x100000/* [1MB, 1MB+60K]*/ +#define SHARE_MEM_PAGE_NUM 15 +#define SHARE_MEM_SIZE SIZE_K(SHARE_MEM_PAGE_NUM * 4) + +#define DDR_PARAM_BASE 0x02000000 +#define DDR_PARAM_SIZE SIZE_K(4) + +#endif /* __PLAT_DEF_H__ */ diff --git a/plat/rockchip/rk3288/drivers/pmu/plat_pmu_macros.S b/plat/rockchip/rk3288/drivers/pmu/plat_pmu_macros.S new file mode 100644 index 0000000..2003749 --- /dev/null +++ b/plat/rockchip/rk3288/drivers/pmu/plat_pmu_macros.S @@ -0,0 +1,17 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <arch.h> +#include <asm_macros.S> +#include <platform_def.h> + +.macro func_rockchip_clst_warmboot + /* Nothing to do for rk3288 */ +.endm + +.macro rockchip_clst_warmboot_data + /* Nothing to do for rk3288 */ +.endm diff --git a/plat/rockchip/rk3288/drivers/pmu/pmu.c b/plat/rockchip/rk3288/drivers/pmu/pmu.c new file mode 100644 index 0000000..d6d7098 --- /dev/null +++ b/plat/rockchip/rk3288/drivers/pmu/pmu.c @@ -0,0 +1,391 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> +#include <errno.h> + +#include <platform_def.h> + +#include <arch_helpers.h> +#include <common/debug.h> +#include <drivers/delay_timer.h> +#include <lib/mmio.h> +#include <plat/common/platform.h> + +#include <plat_private.h> +#include <pmu.h> +#include <pmu_com.h> +#include <rk3288_def.h> +#include <secure.h> +#include <soc.h> + +DEFINE_BAKERY_LOCK(rockchip_pd_lock); + +static uint32_t cpu_warm_boot_addr; + +static uint32_t store_pmu_pwrmode_con; +static uint32_t store_sgrf_soc_con0; +static uint32_t store_sgrf_cpu_con0; + +/* These enum are variants of low power mode */ +enum { + ROCKCHIP_ARM_OFF_LOGIC_NORMAL = 0, + ROCKCHIP_ARM_OFF_LOGIC_DEEP = 1, +}; + +static inline int rk3288_pmu_bus_idle(uint32_t req, uint32_t idle) +{ + uint32_t mask = BIT(req); + uint32_t idle_mask = 0; + uint32_t idle_target = 0; + uint32_t val; + uint32_t wait_cnt = 0; + + switch (req) { + case bus_ide_req_gpu: + idle_mask = BIT(pmu_idle_ack_gpu) | BIT(pmu_idle_gpu); + idle_target = (idle << pmu_idle_ack_gpu) | + (idle << pmu_idle_gpu); + break; + case bus_ide_req_core: + idle_mask = BIT(pmu_idle_ack_core) | BIT(pmu_idle_core); + idle_target = (idle << pmu_idle_ack_core) | + (idle << pmu_idle_core); + break; + case bus_ide_req_cpup: + idle_mask = BIT(pmu_idle_ack_cpup) | BIT(pmu_idle_cpup); + idle_target = (idle << pmu_idle_ack_cpup) | + (idle << pmu_idle_cpup); + break; + case bus_ide_req_bus: + idle_mask = BIT(pmu_idle_ack_bus) | BIT(pmu_idle_bus); + idle_target = (idle << pmu_idle_ack_bus) | + (idle << pmu_idle_bus); + break; + case bus_ide_req_dma: + idle_mask = BIT(pmu_idle_ack_dma) | BIT(pmu_idle_dma); + idle_target = (idle << pmu_idle_ack_dma) | + (idle << pmu_idle_dma); + break; + case bus_ide_req_peri: + idle_mask = BIT(pmu_idle_ack_peri) | BIT(pmu_idle_peri); + idle_target = (idle << pmu_idle_ack_peri) | + (idle << pmu_idle_peri); + break; + case bus_ide_req_video: + idle_mask = BIT(pmu_idle_ack_video) | BIT(pmu_idle_video); + idle_target = (idle << pmu_idle_ack_video) | + (idle << pmu_idle_video); + break; + case bus_ide_req_hevc: + idle_mask = BIT(pmu_idle_ack_hevc) | BIT(pmu_idle_hevc); + idle_target = (idle << pmu_idle_ack_hevc) | + (idle << pmu_idle_hevc); + break; + case bus_ide_req_vio: + idle_mask = BIT(pmu_idle_ack_vio) | BIT(pmu_idle_vio); + idle_target = (pmu_idle_ack_vio) | + (idle << pmu_idle_vio); + break; + case bus_ide_req_alive: + idle_mask = BIT(pmu_idle_ack_alive) | BIT(pmu_idle_alive); + idle_target = (idle << pmu_idle_ack_alive) | + (idle << pmu_idle_alive); + break; + default: + ERROR("%s: Unsupported the idle request\n", __func__); + break; + } + + val = mmio_read_32(PMU_BASE + PMU_BUS_IDE_REQ); + if (idle) + val |= mask; + else + val &= ~mask; + + mmio_write_32(PMU_BASE + PMU_BUS_IDE_REQ, val); + + while ((mmio_read_32(PMU_BASE + + PMU_BUS_IDE_ST) & idle_mask) != idle_target) { + wait_cnt++; + if (!(wait_cnt % MAX_WAIT_CONUT)) + WARN("%s:st=%x(%x)\n", __func__, + mmio_read_32(PMU_BASE + PMU_BUS_IDE_ST), + idle_mask); + } + + return 0; +} + +static bool rk3288_sleep_disable_osc(void) +{ + static const uint32_t reg_offset[] = { GRF_UOC0_CON0, GRF_UOC1_CON0, + GRF_UOC2_CON0 }; + uint32_t reg, i; + + /* + * if any usb phy is still on(GRF_SIDDQ==0), that means we need the + * function of usb wakeup, so do not switch to 32khz, since the usb phy + * clk does not connect to 32khz osc + */ + for (i = 0; i < ARRAY_SIZE(reg_offset); i++) { + reg = mmio_read_32(GRF_BASE + reg_offset[i]); + if (!(reg & GRF_SIDDQ)) + return false; + } + + return true; +} + +static void pmu_set_sleep_mode(int level) +{ + uint32_t mode_set, mode_set1; + bool osc_disable = rk3288_sleep_disable_osc(); + + mode_set = BIT(pmu_mode_glb_int_dis) | BIT(pmu_mode_l2_flush_en) | + BIT(pmu_mode_sref0_enter) | BIT(pmu_mode_sref1_enter) | + BIT(pmu_mode_ddrc0_gt) | BIT(pmu_mode_ddrc1_gt) | + BIT(pmu_mode_en) | BIT(pmu_mode_chip_pd) | + BIT(pmu_mode_scu_pd); + + mode_set1 = BIT(pmu_mode_clr_core) | BIT(pmu_mode_clr_cpup); + + if (level == ROCKCHIP_ARM_OFF_LOGIC_DEEP) { + /* arm off, logic deep sleep */ + mode_set |= BIT(pmu_mode_bus_pd) | BIT(pmu_mode_pmu_use_lf) | + BIT(pmu_mode_ddrio1_ret) | + BIT(pmu_mode_ddrio0_ret) | + BIT(pmu_mode_pmu_alive_use_lf) | + BIT(pmu_mode_pll_pd); + + if (osc_disable) + mode_set |= BIT(pmu_mode_osc_dis); + + mode_set1 |= BIT(pmu_mode_clr_alive) | BIT(pmu_mode_clr_bus) | + BIT(pmu_mode_clr_peri) | BIT(pmu_mode_clr_dma); + + mmio_write_32(PMU_BASE + PMU_WAKEUP_CFG1, + pmu_armint_wakeup_en); + + /* + * In deep suspend we use PMU_PMU_USE_LF to let the rk3288 + * switch its main clock supply to the alternative 32kHz + * source. Therefore set 30ms on a 32kHz clock for pmic + * stabilization. Similar 30ms on 24MHz for the other + * mode below. + */ + mmio_write_32(PMU_BASE + PMU_STABL_CNT, 32 * 30); + + /* only wait for stabilization, if we turned the osc off */ + mmio_write_32(PMU_BASE + PMU_OSC_CNT, + osc_disable ? 32 * 30 : 0); + } else { + /* + * arm off, logic normal + * if pmu_clk_core_src_gate_en is not set, + * wakeup will be error + */ + mode_set |= BIT(pmu_mode_core_src_gt); + + mmio_write_32(PMU_BASE + PMU_WAKEUP_CFG1, + BIT(pmu_armint_wakeup_en) | + BIT(pmu_gpioint_wakeup_en)); + + /* 30ms on a 24MHz clock for pmic stabilization */ + mmio_write_32(PMU_BASE + PMU_STABL_CNT, 24000 * 30); + + /* oscillator is still running, so no need to wait */ + mmio_write_32(PMU_BASE + PMU_OSC_CNT, 0); + } + + mmio_write_32(PMU_BASE + PMU_PWRMODE_CON, mode_set); + mmio_write_32(PMU_BASE + PMU_PWRMODE_CON1, mode_set1); +} + +static int cpus_power_domain_on(uint32_t cpu_id) +{ + uint32_t cpu_pd; + + cpu_pd = PD_CPU0 + cpu_id; + + /* if the core has been on, power it off first */ + if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) { + /* put core in reset - some sort of A12/A17 bug */ + mmio_write_32(CRU_BASE + CRU_SOFTRSTS_CON(0), + BIT(cpu_id) | (BIT(cpu_id) << 16)); + + pmu_power_domain_ctr(cpu_pd, pmu_pd_off); + } + + pmu_power_domain_ctr(cpu_pd, pmu_pd_on); + + /* pull core out of reset */ + mmio_write_32(CRU_BASE + CRU_SOFTRSTS_CON(0), BIT(cpu_id) << 16); + + return 0; +} + +static int cpus_power_domain_off(uint32_t cpu_id) +{ + uint32_t cpu_pd = PD_CPU0 + cpu_id; + + if (pmu_power_domain_st(cpu_pd) == pmu_pd_off) + return 0; + + if (check_cpu_wfie(cpu_id, CKECK_WFEI_MSK)) + return -EINVAL; + + /* put core in reset - some sort of A12/A17 bug */ + mmio_write_32(CRU_BASE + CRU_SOFTRSTS_CON(0), + BIT(cpu_id) | (BIT(cpu_id) << 16)); + + pmu_power_domain_ctr(cpu_pd, pmu_pd_off); + + return 0; +} + +static void nonboot_cpus_off(void) +{ + uint32_t boot_cpu, cpu; + + boot_cpu = plat_my_core_pos(); + boot_cpu = MPIDR_AFFLVL0_VAL(read_mpidr()); + + /* turn off noboot cpus */ + for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) { + if (cpu == boot_cpu) + continue; + + cpus_power_domain_off(cpu); + } +} + +void sram_save(void) +{ + /* TODO: support the sdram save for rk3288 SoCs*/ +} + +void sram_restore(void) +{ + /* TODO: support the sdram restore for rk3288 SoCs */ +} + +int rockchip_soc_cores_pwr_dm_on(unsigned long mpidr, uint64_t entrypoint) +{ + uint32_t cpu_id = plat_core_pos_by_mpidr(mpidr); + + assert(cpu_id < PLATFORM_CORE_COUNT); + assert(cpuson_flags[cpu_id] == 0); + cpuson_flags[cpu_id] = PMU_CPU_HOTPLUG; + cpuson_entry_point[cpu_id] = entrypoint; + dsb(); + + cpus_power_domain_on(cpu_id); + + /* + * We communicate with the bootrom to active the cpus other + * than cpu0, after a blob of initialize code, they will + * stay at wfe state, once they are actived, they will check + * the mailbox: + * sram_base_addr + 4: 0xdeadbeaf + * sram_base_addr + 8: start address for pc + * The cpu0 need to wait the other cpus other than cpu0 entering + * the wfe state.The wait time is affected by many aspects. + * (e.g: cpu frequency, bootrom frequency, sram frequency, ...) + */ + mdelay(1); /* ensure the cpus other than cpu0 to startup */ + + /* tell the bootrom mailbox where to start from */ + mmio_write_32(SRAM_BASE + 8, cpu_warm_boot_addr); + mmio_write_32(SRAM_BASE + 4, 0xDEADBEAF); + dsb(); + sev(); + + return 0; +} + +int rockchip_soc_cores_pwr_dm_on_finish(void) +{ + return 0; +} + +int rockchip_soc_sys_pwr_dm_resume(void) +{ + mmio_write_32(PMU_BASE + PMU_PWRMODE_CON, store_pmu_pwrmode_con); + mmio_write_32(SGRF_BASE + SGRF_CPU_CON(0), + store_sgrf_cpu_con0 | SGRF_DAPDEVICE_MSK); + + /* disable fastboot mode */ + mmio_write_32(SGRF_BASE + SGRF_SOC_CON(0), + store_sgrf_soc_con0 | SGRF_FAST_BOOT_DIS); + + secure_watchdog_ungate(); + clk_gate_con_restore(); + clk_sel_con_restore(); + clk_plls_resume(); + + secure_gic_init(); + plat_rockchip_gic_init(); + + return 0; +} + +int rockchip_soc_sys_pwr_dm_suspend(void) +{ + nonboot_cpus_off(); + + store_sgrf_cpu_con0 = mmio_read_32(SGRF_BASE + SGRF_CPU_CON(0)); + store_sgrf_soc_con0 = mmio_read_32(SGRF_BASE + SGRF_SOC_CON(0)); + store_pmu_pwrmode_con = mmio_read_32(PMU_BASE + PMU_PWRMODE_CON); + + /* save clk-gates and ungate all for suspend */ + clk_gate_con_save(); + clk_gate_con_disable(); + clk_sel_con_save(); + + pmu_set_sleep_mode(ROCKCHIP_ARM_OFF_LOGIC_NORMAL); + + clk_plls_suspend(); + secure_watchdog_gate(); + + /* + * The dapswjdp can not auto reset before resume, that cause it may + * access some illegal address during resume. Let's disable it before + * suspend, and the MASKROM will enable it back. + */ + mmio_write_32(SGRF_BASE + SGRF_CPU_CON(0), SGRF_DAPDEVICE_MSK); + + /* + * SGRF_FAST_BOOT_EN - system to boot from FAST_BOOT_ADDR + */ + mmio_write_32(SGRF_BASE + SGRF_SOC_CON(0), SGRF_FAST_BOOT_ENA); + + /* boot-address of resuming system is from this register value */ + mmio_write_32(SGRF_BASE + SGRF_FAST_BOOT_ADDR, + (uint32_t)&pmu_cpuson_entrypoint); + + /* flush all caches - otherwise we might loose the resume address */ + dcsw_op_all(DC_OP_CISW); + + return 0; +} + +void rockchip_plat_mmu_svc_mon(void) +{ +} + +void plat_rockchip_pmu_init(void) +{ + uint32_t cpu; + + cpu_warm_boot_addr = (uint32_t)platform_cpu_warmboot; + + /* on boot all power-domains are on */ + for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) + cpuson_flags[cpu] = pmu_pd_on; + + nonboot_cpus_off(); +} diff --git a/plat/rockchip/rk3288/drivers/pmu/pmu.h b/plat/rockchip/rk3288/drivers/pmu/pmu.h new file mode 100644 index 0000000..06d5528 --- /dev/null +++ b/plat/rockchip/rk3288/drivers/pmu/pmu.h @@ -0,0 +1,151 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef PMU_H +#define PMU_H + +/* Allocate sp reginon in pmusram */ +#define PSRAM_SP_SIZE 0x80 +#define PSRAM_SP_BOTTOM (PSRAM_SP_TOP - PSRAM_SP_SIZE) + +/***************************************************************************** + * pmu con,reg + *****************************************************************************/ +#define PMU_WAKEUP_CFG0 0x0 +#define PMU_WAKEUP_CFG1 0x4 +#define PMU_PWRDN_CON 0x8 +#define PMU_PWRDN_ST 0xc + +#define PMU_PWRMODE_CON 0x18 +#define PMU_BUS_IDE_REQ 0x10 +#define PMU_BUS_IDE_ST 0x14 + +#define PMU_OSC_CNT 0x20 +#define PMU_PLL_CNT 0x24 +#define PMU_STABL_CNT 0x28 +#define PMU_DDRIO0_PWR_CNT 0x2c +#define PMU_DDRIO1_PWR_CNT 0x30 +#define PMU_WKUPRST_CNT 0x44 +#define PMU_SFT_CON 0x48 +#define PMU_PWRMODE_CON1 0x90 + +enum pmu_pdid { + PD_CPU0 = 0, + PD_CPU1, + PD_CPU2, + PD_CPU3, + PD_BUS = 5, + PD_PERI, + PD_VIO, + PD_VIDEO, + PD_GPU, + PD_SCU = 11, + PD_HEVC = 14, + PD_END +}; + +enum pmu_bus_ide { + bus_ide_req_bus = 0, + bus_ide_req_peri, + bus_ide_req_gpu, + bus_ide_req_video, + bus_ide_req_vio, + bus_ide_req_core, + bus_ide_req_alive, + bus_ide_req_dma, + bus_ide_req_cpup, + bus_ide_req_hevc, + bus_ide_req_end +}; + +enum pmu_pwrmode { + pmu_mode_en = 0, + pmu_mode_core_src_gt, + pmu_mode_glb_int_dis, + pmu_mode_l2_flush_en, + pmu_mode_bus_pd, + pmu_mode_cpu0_pd, + pmu_mode_scu_pd, + pmu_mode_pll_pd = 7, + pmu_mode_chip_pd, + pmu_mode_pwr_off_comb, + pmu_mode_pmu_alive_use_lf, + pmu_mode_pmu_use_lf, + pmu_mode_osc_dis = 12, + pmu_mode_input_clamp, + pmu_mode_wkup_rst, + pmu_mode_sref0_enter, + pmu_mode_sref1_enter, + pmu_mode_ddrio0_ret, + pmu_mode_ddrio1_ret, + pmu_mode_ddrc0_gt, + pmu_mode_ddrc1_gt, + pmu_mode_ddrio0_ret_deq, + pmu_mode_ddrio1_ret_deq, +}; + +enum pmu_pwrmode1 { + pmu_mode_clr_bus = 0, + pmu_mode_clr_core, + pmu_mode_clr_cpup, + pmu_mode_clr_alive, + pmu_mode_clr_dma, + pmu_mode_clr_peri, + pmu_mode_clr_gpu, + pmu_mode_clr_video, + pmu_mode_clr_hevc, + pmu_mode_clr_vio +}; + +enum pmu_sft_con { + pmu_sft_ddrio0_ret_cfg = 6, + pmu_sft_ddrio1_ret_cfg = 9, + pmu_sft_l2flsh = 15, +}; + +enum pmu_wakeup_cfg1 { + pmu_armint_wakeup_en = 0, + pmu_gpio_wakeup_negedge, + pmu_sdmmc0_wakeup_en, + pmu_gpioint_wakeup_en, +}; + +enum pmu_bus_idle_st { + pmu_idle_bus = 0, + pmu_idle_peri, + pmu_idle_gpu, + pmu_idle_video, + pmu_idle_vio, + pmu_idle_core, + pmu_idle_alive, + pmu_idle_dma, + pmu_idle_cpup, + pmu_idle_hevc, + pmu_idle_ack_bus = 16, + pmu_idle_ack_peri, + pmu_idle_ack_gpu, + pmu_idle_ack_video, + pmu_idle_ack_vio, + pmu_idle_ack_core, + pmu_idle_ack_alive, + pmu_idle_ack_dma, + pmu_idle_ack_cpup, + pmu_idle_ack_hevc, +}; + +#define CHECK_CPU_WFIE_BASE (0) + +#define clstl_cpu_wfe -1 +#define clstb_cpu_wfe -1 +#define CKECK_WFEI_MSK 0 + + +#define PD_CTR_LOOP 500 +#define CHK_CPU_LOOP 500 + +#define MAX_WAIT_CONUT 1000 + +#endif /* PMU_H */ diff --git a/plat/rockchip/rk3288/drivers/secure/secure.c b/plat/rockchip/rk3288/drivers/secure/secure.c new file mode 100644 index 0000000..25e1cca --- /dev/null +++ b/plat/rockchip/rk3288/drivers/secure/secure.c @@ -0,0 +1,165 @@ +/* + * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> + +#include <arch_helpers.h> +#include <common/debug.h> +#include <drivers/delay_timer.h> + +#include <plat_private.h> +#include <secure.h> +#include <soc.h> + +static void sgrf_ddr_rgn_global_bypass(uint32_t bypass) +{ + if (bypass) + /* set bypass (non-secure regions) for whole ddr regions */ + mmio_write_32(SGRF_BASE + SGRF_SOC_CON(21), + SGRF_DDR_RGN_BYPS); + else + /* cancel bypass for whole ddr regions */ + mmio_write_32(SGRF_BASE + SGRF_SOC_CON(21), + SGRF_DDR_RGN_NO_BYPS); +} + +/** + * There are 8 + 1 regions for DDR secure control: + * DDR_RGN_0 ~ DDR_RGN_7: Per DDR_RGNs grain size is 1MB + * DDR_RGN_X - the memories of exclude DDR_RGN_0 ~ DDR_RGN_7 + * + * SGRF_SOC_CON6 - start address of RGN_0 + control + * SGRF_SOC_CON7 - end address of RGN_0 + * ... + * SGRF_SOC_CON20 - start address of the RGN_7 + control + * SGRF_SOC_CON21 - end address of the RGN_7 + RGN_X control + * + * @rgn - the DDR regions 0 ~ 7 which are can be configured. + * @st - start address to set as secure + * @sz - length of area to set as secure + * The @st_mb and @ed_mb indicate the start and end addresses for which to set + * the security, and the unit is megabyte. When the st_mb == 0, ed_mb == 0, the + * address range 0x0 ~ 0xfffff is secure. + * + * For example, if we would like to set the range [0, 32MB) is security via + * DDR_RGN0, then rgn == 0, st_mb == 0, ed_mb == 31. + */ +static void sgrf_ddr_rgn_config(uint32_t rgn, uintptr_t st, size_t sz) +{ + uintptr_t ed = st + sz; + uintptr_t st_mb, ed_mb; + + assert(rgn <= 7); + assert(st < ed); + + /* check aligned 1MB */ + assert(st % SIZE_M(1) == 0); + assert(ed % SIZE_M(1) == 0); + + st_mb = st / SIZE_M(1); + ed_mb = ed / SIZE_M(1); + + /* set ddr region addr start */ + mmio_write_32(SGRF_BASE + SGRF_SOC_CON(6 + (rgn * 2)), + BITS_WITH_WMASK(st_mb, SGRF_DDR_RGN_ADDR_WMSK, 0)); + + /* set ddr region addr end */ + mmio_write_32(SGRF_BASE + SGRF_SOC_CON(6 + (rgn * 2) + 1), + BITS_WITH_WMASK((ed_mb - 1), SGRF_DDR_RGN_ADDR_WMSK, 0)); + + /* select region security */ + mmio_write_32(SGRF_BASE + SGRF_SOC_CON(6 + (rgn * 2)), + SGRF_DDR_RGN_SECURE_SEL); + + /* enable region security */ + mmio_write_32(SGRF_BASE + SGRF_SOC_CON(6 + (rgn * 2)), + SGRF_DDR_RGN_SECURE_EN); +} + +void secure_watchdog_gate(void) +{ + mmio_write_32(SGRF_BASE + SGRF_SOC_CON(0), SGRF_PCLK_WDT_GATE); +} + +void secure_watchdog_ungate(void) +{ + mmio_write_32(SGRF_BASE + SGRF_SOC_CON(0), SGRF_PCLK_WDT_UNGATE); +} + +__pmusramfunc void sram_secure_timer_init(void) +{ + mmio_write_32(STIMER1_BASE + TIMER_CONTROL_REG, 0); + + mmio_write_32(STIMER1_BASE + TIMER_LOAD_COUNT0, 0xffffffff); + mmio_write_32(STIMER1_BASE + TIMER_LOAD_COUNT1, 0xffffffff); + + /* auto reload & enable the timer */ + mmio_write_32(STIMER1_BASE + TIMER_CONTROL_REG, TIMER_EN); +} + +void secure_gic_init(void) +{ + /* (re-)enable non-secure access to the gic*/ + mmio_write_32(CORE_AXI_BUS_BASE + CORE_AXI_SECURITY0, + AXI_SECURITY0_GIC); +} + +void secure_timer_init(void) +{ + mmio_write_32(STIMER1_BASE + TIMER_CONTROL_REG, 0); + + mmio_write_32(STIMER1_BASE + TIMER_LOAD_COUNT0, 0xffffffff); + mmio_write_32(STIMER1_BASE + TIMER_LOAD_COUNT1, 0xffffffff); + + /* auto reload & enable the timer */ + mmio_write_32(STIMER1_BASE + TIMER_CONTROL_REG, TIMER_EN); +} + +void secure_sgrf_init(void) +{ + /* + * We use the first sram part to talk to the bootrom, + * so make it secure. + */ + mmio_write_32(TZPC_BASE + TZPC_R0SIZE, TZPC_SRAM_SECURE_4K(1)); + + secure_gic_init(); + + /* set all master ip to non-secure */ + mmio_write_32(SGRF_BASE + SGRF_SOC_CON(2), SGRF_SOC_CON2_MST_NS); + mmio_write_32(SGRF_BASE + SGRF_SOC_CON(3), SGRF_SOC_CON3_MST_NS); + + /* setting all configurable ip into non-secure */ + mmio_write_32(SGRF_BASE + SGRF_SOC_CON(4), + SGRF_SOC_CON4_SECURE_WMSK /*TODO:|SGRF_STIMER_SECURE*/); + mmio_write_32(SGRF_BASE + SGRF_SOC_CON(5), SGRF_SOC_CON5_SECURE_WMSK); + + /* secure dma to non-secure */ + mmio_write_32(TZPC_BASE + TZPC_DECPROT1SET, 0xff); + mmio_write_32(TZPC_BASE + TZPC_DECPROT2SET, 0xff); + mmio_write_32(SGRF_BASE + SGRF_BUSDMAC_CON(1), 0x3800); + dsb(); + + /* rst dma1 */ + mmio_write_32(CRU_BASE + CRU_SOFTRSTS_CON(1), + RST_DMA1_MSK | (RST_DMA1_MSK << 16)); + /* rst dma2 */ + mmio_write_32(CRU_BASE + CRU_SOFTRSTS_CON(4), + RST_DMA2_MSK | (RST_DMA2_MSK << 16)); + + dsb(); + + /* release dma1 rst*/ + mmio_write_32(CRU_BASE + CRU_SOFTRSTS_CON(1), (RST_DMA1_MSK << 16)); + /* release dma2 rst*/ + mmio_write_32(CRU_BASE + CRU_SOFTRSTS_CON(4), (RST_DMA2_MSK << 16)); +} + +void secure_sgrf_ddr_rgn_init(void) +{ + sgrf_ddr_rgn_config(0, TZRAM_BASE, TZRAM_SIZE); + sgrf_ddr_rgn_global_bypass(0); +} diff --git a/plat/rockchip/rk3288/drivers/secure/secure.h b/plat/rockchip/rk3288/drivers/secure/secure.h new file mode 100644 index 0000000..6c0b2b7 --- /dev/null +++ b/plat/rockchip/rk3288/drivers/secure/secure.h @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef SECURE_H +#define SECURE_H + +/****************************************************************************** + * TZPC TrustZone controller + ******************************************************************************/ + +#define TZPC_R0SIZE 0x0 +#define TZPC_SRAM_SECURE_4K(n) ((n) > 0x200 ? 0x200 : (n)) +#define TZPC_DECPROT1STAT 0x80c +#define TZPC_DECPROT1SET 0x810 +#define TZPC_DECPROT1CLR 0x814 +#define TZPC_DECPROT2STAT 0x818 +#define TZPC_DECPROT2SET 0x818 +#define TZPC_DECPROT2CLR 0x820 + +/************************************************** + * sgrf reg, offset + **************************************************/ +/* + * soc_con0-5 start at 0x0, soc_con6-... start art 0x50 + * adjusted for the 5 lower registers + */ +#define SGRF_SOC_CON(n) ((((n) < 6) ? 0x0 : 0x38) + (n) * 4) +#define SGRF_BUSDMAC_CON(n) (0x20 + (n) * 4) +#define SGRF_CPU_CON(n) (0x40 + (n) * 4) +#define SGRF_SOC_STATUS(n) (0x100 + (n) * 4) +#define SGRF_FAST_BOOT_ADDR 0x120 + +/* SGRF_SOC_CON0 */ +#define SGRF_FAST_BOOT_ENA BIT_WITH_WMSK(8) +#define SGRF_FAST_BOOT_DIS WMSK_BIT(8) +#define SGRF_PCLK_WDT_GATE BIT_WITH_WMSK(6) +#define SGRF_PCLK_WDT_UNGATE WMSK_BIT(6) +#define SGRF_PCLK_STIMER_GATE BIT_WITH_WMSK(4) + +#define SGRF_SOC_CON2_MST_NS 0xffe0ffe0 +#define SGRF_SOC_CON3_MST_NS 0x003f003f + +/* SGRF_SOC_CON4 */ +#define SGRF_SOC_CON4_SECURE_WMSK 0xffff0000 +#define SGRF_DDRC1_SECURE BIT_WITH_WMSK(12) +#define SGRF_DDRC0_SECURE BIT_WITH_WMSK(11) +#define SGRF_PMUSRAM_SECURE BIT_WITH_WMSK(8) +#define SGRF_WDT_SECURE BIT_WITH_WMSK(7) +#define SGRF_STIMER_SECURE BIT_WITH_WMSK(6) + +/* SGRF_SOC_CON5 */ +#define SGRF_SLV_SEC_BYPS BIT_WITH_WMSK(15) +#define SGRF_SLV_SEC_NO_BYPS WMSK_BIT(15) +#define SGRF_SOC_CON5_SECURE_WMSK 0x00ff0000 + +/* ddr regions in SGRF_SOC_CON6 and following */ +#define SGRF_DDR_RGN_SECURE_SEL BIT_WITH_WMSK(15) +#define SGRF_DDR_RGN_SECURE_EN BIT_WITH_WMSK(14) +#define SGRF_DDR_RGN_ADDR_WMSK 0x0fff + +/* SGRF_SOC_CON21 */ +/* All security of the DDR RGNs are bypassed */ +#define SGRF_DDR_RGN_BYPS BIT_WITH_WMSK(15) +#define SGRF_DDR_RGN_NO_BYPS WMSK_BIT(15) + +/* SGRF_CPU_CON0 */ +#define SGRF_DAPDEVICE_ENA BIT_WITH_WMSK(0) +#define SGRF_DAPDEVICE_MSK WMSK_BIT(0) + +/***************************************************************************** + * core-axi + *****************************************************************************/ +#define CORE_AXI_SECURITY0 0x08 +#define AXI_SECURITY0_GIC BIT(0) + +/***************************************************************************** + * secure timer + *****************************************************************************/ +#define TIMER_LOAD_COUNT0 0x00 +#define TIMER_LOAD_COUNT1 0x04 +#define TIMER_CURRENT_VALUE0 0x08 +#define TIMER_CURRENT_VALUE1 0x0C +#define TIMER_CONTROL_REG 0x10 +#define TIMER_INTSTATUS 0x18 + +#define TIMER_EN 0x1 + +#define STIMER1_BASE (STIME_BASE + 0x20) + +/* export secure operating APIs */ +void secure_watchdog_gate(void); +void secure_watchdog_ungate(void); +void secure_gic_init(void); +void secure_timer_init(void); +void secure_sgrf_init(void); +void secure_sgrf_ddr_rgn_init(void); +__pmusramfunc void sram_secure_timer_init(void); + +#endif /* SECURE_H */ diff --git a/plat/rockchip/rk3288/drivers/soc/soc.c b/plat/rockchip/rk3288/drivers/soc/soc.c new file mode 100644 index 0000000..36f410b --- /dev/null +++ b/plat/rockchip/rk3288/drivers/soc/soc.c @@ -0,0 +1,223 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <platform_def.h> + +#include <arch_helpers.h> +#include <common/debug.h> +#include <lib/mmio.h> + +#include <plat_private.h> +#include <rk3288_def.h> +#include <soc.h> +#include <secure.h> + +/* sleep data for pll suspend */ +static struct deepsleep_data_s slp_data; + +/* Table of regions to map using the MMU. */ +const mmap_region_t plat_rk_mmap[] = { + MAP_REGION_FLAT(GIC400_BASE, GIC400_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(STIME_BASE, STIME_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(SGRF_BASE, SGRF_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(TZPC_BASE, TZPC_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(PMUSRAM_BASE, PMUSRAM_SIZE, + MT_MEMORY | MT_RW | MT_SECURE), + MAP_REGION_FLAT(SRAM_BASE, SRAM_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(PMU_BASE, PMU_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(UART0_BASE, UART0_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(UART1_BASE, UART1_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(UART2_BASE, UART2_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(UART3_BASE, UART3_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(UART4_BASE, UART4_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(CRU_BASE, CRU_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(GRF_BASE, GRF_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(DDR_PCTL0_BASE, DDR_PCTL0_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(DDR_PHY0_BASE, DDR_PHY0_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(DDR_PCTL1_BASE, DDR_PCTL1_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(DDR_PHY1_BASE, DDR_PHY1_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(SERVICE_BUS_BASE, SERVICE_BUS_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(CORE_AXI_BUS_BASE, CORE_AXI_BUS_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + { 0 } +}; + +/* The RockChip power domain tree descriptor */ +const unsigned char rockchip_power_domain_tree_desc[] = { + /* No of root nodes */ + PLATFORM_SYSTEM_COUNT, + /* No of children for the root node */ + PLATFORM_CLUSTER_COUNT, + /* No of children for the first cluster node */ + PLATFORM_CLUSTER0_CORE_COUNT, +}; + +void plat_rockchip_soc_init(void) +{ + secure_timer_init(); + secure_sgrf_init(); + /* + * We cannot enable ddr security at this point, as the kernel + * seems to have an issue with it even living in the same 128MB + * memory block. Only when moving the kernel to the second + * 128MB block does it not conflict, but then we'd loose this + * memory area for use. Late maybe enable + * secure_sgrf_ddr_rgn_init(); + */ +} + +void regs_update_bits(uintptr_t addr, uint32_t val, + uint32_t mask, uint32_t shift) +{ + uint32_t tmp, orig; + + orig = mmio_read_32(addr); + + tmp = orig & ~(mask << shift); + tmp |= (val & mask) << shift; + + if (tmp != orig) + mmio_write_32(addr, tmp); + dsb(); +} + +static void pll_save(uint32_t pll_id) +{ + uint32_t *pll = slp_data.pll_con[pll_id]; + + pll[0] = mmio_read_32(CRU_BASE + PLL_CONS((pll_id), 0)); + pll[1] = mmio_read_32(CRU_BASE + PLL_CONS((pll_id), 1)); + pll[2] = mmio_read_32(CRU_BASE + PLL_CONS((pll_id), 2)); + pll[3] = mmio_read_32(CRU_BASE + PLL_CONS((pll_id), 3)); +} + +void clk_plls_suspend(void) +{ + pll_save(NPLL_ID); + pll_save(CPLL_ID); + pll_save(GPLL_ID); + pll_save(APLL_ID); + slp_data.pll_mode = mmio_read_32(CRU_BASE + PLL_MODE_CON); + + /* + * Switch PLLs other than DPLL (for SDRAM) to slow mode to + * avoid crashes on resume. The Mask ROM on the system will + * put APLL, CPLL, and GPLL into slow mode at resume time + * anyway (which is why we restore them), but we might not + * even make it to the Mask ROM if this isn't done at suspend + * time. + * + * NOTE: only APLL truly matters here, but we'll do them all. + */ + mmio_write_32(CRU_BASE + PLL_MODE_CON, 0xf3030000); +} + +void clk_plls_resume(void) +{ + /* restore pll-modes */ + mmio_write_32(CRU_BASE + PLL_MODE_CON, + slp_data.pll_mode | REG_SOC_WMSK); +} + +void clk_gate_con_save(void) +{ + uint32_t i = 0; + + for (i = 0; i < CRU_CLKGATES_CON_CNT; i++) + slp_data.cru_gate_con[i] = + mmio_read_32(CRU_BASE + CRU_CLKGATES_CON(i)); +} + +void clk_gate_con_disable(void) +{ + uint32_t i; + + for (i = 0; i < CRU_CLKGATES_CON_CNT; i++) + mmio_write_32(CRU_BASE + CRU_CLKGATES_CON(i), REG_SOC_WMSK); +} + +void clk_gate_con_restore(void) +{ + uint32_t i; + + for (i = 0; i < CRU_CLKGATES_CON_CNT; i++) + mmio_write_32(CRU_BASE + CRU_CLKGATES_CON(i), + REG_SOC_WMSK | slp_data.cru_gate_con[i]); +} + +void clk_sel_con_save(void) +{ + uint32_t i = 0; + + for (i = 0; i < CRU_CLKSELS_CON_CNT; i++) + slp_data.cru_sel_con[i] = + mmio_read_32(CRU_BASE + CRU_CLKSELS_CON(i)); +} + +void clk_sel_con_restore(void) +{ + uint32_t i, val; + + for (i = 0; i < CRU_CLKSELS_CON_CNT; i++) { + /* fractional dividers don't have write-masks */ + if ((i >= 7 && i <= 9) || + (i >= 17 && i <= 20) || + (i == 23) || (i == 41)) + val = slp_data.cru_sel_con[i]; + else + val = slp_data.cru_sel_con[i] | REG_SOC_WMSK; + + mmio_write_32(CRU_BASE + CRU_CLKSELS_CON(i), val); + } +} + +void __dead2 rockchip_soc_soft_reset(void) +{ + uint32_t temp_val; + + /* + * Switch PLLs other than DPLL (for SDRAM) to slow mode to + * avoid crashes on resume. The Mask ROM on the system will + * put APLL, CPLL, and GPLL into slow mode at resume time + * anyway (which is why we restore them), but we might not + * even make it to the Mask ROM if this isn't done at suspend + * time. + * + * NOTE: only APLL truly matters here, but we'll do them all. + */ + mmio_write_32(CRU_BASE + PLL_MODE_CON, 0xf3030000); + + temp_val = mmio_read_32(CRU_BASE + CRU_GLB_RST_CON); + temp_val &= ~PMU_RST_MASK; + temp_val |= PMU_RST_BY_SECOND_SFT; + mmio_write_32(CRU_BASE + CRU_GLB_RST_CON, temp_val); + mmio_write_32(CRU_BASE + CRU_GLB_SRST_SND, 0xeca8); + + /* + * Maybe the HW needs some times to reset the system, + * so we do not hope the core to excute valid codes. + */ + while (1) + ; +} diff --git a/plat/rockchip/rk3288/drivers/soc/soc.h b/plat/rockchip/rk3288/drivers/soc/soc.h new file mode 100644 index 0000000..b96c4dc --- /dev/null +++ b/plat/rockchip/rk3288/drivers/soc/soc.h @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef SOC_H +#define SOC_H + +enum plls_id { + APLL_ID = 0, + DPLL_ID, + CPLL_ID, + GPLL_ID, + NPLL_ID, + END_PLL_ID, +}; + + +#define CYCL_24M_CNT_US(us) (24 * (us)) +#define CYCL_24M_CNT_MS(ms) ((ms) * CYCL_24M_CNT_US(1000)) + +/***************************************************************************** + * grf regs + *****************************************************************************/ +#define GRF_UOC0_CON0 0x320 +#define GRF_UOC1_CON0 0x334 +#define GRF_UOC2_CON0 0x348 +#define GRF_SIDDQ BIT(13) + +/***************************************************************************** + * cru reg, offset + *****************************************************************************/ +#define CRU_SOFTRST_CON 0x1b8 +#define CRU_SOFTRSTS_CON(n) (CRU_SOFTRST_CON + ((n) * 4)) +#define CRU_SOFTRSTS_CON_CNT 11 + +#define RST_DMA1_MSK 0x4 +#define RST_DMA2_MSK 0x1 + +#define CRU_CLKSEL_CON 0x60 +#define CRU_CLKSELS_CON(i) (CRU_CLKSEL_CON + ((i) * 4)) +#define CRU_CLKSELS_CON_CNT 42 + +#define CRU_CLKGATE_CON 0x160 +#define CRU_CLKGATES_CON(i) (CRU_CLKGATE_CON + ((i) * 4)) +#define CRU_CLKGATES_CON_CNT 18 + +#define CRU_GLB_SRST_FST 0x1b0 +#define CRU_GLB_SRST_SND 0x1b4 +#define CRU_GLB_RST_CON 0x1f0 + +#define CRU_CONS_GATEID(i) (16 * (i)) +#define GATE_ID(reg, bit) (((reg) * 16) + (bit)) + +#define PMU_RST_MASK 0x3 +#define PMU_RST_BY_FIRST_SFT (0 << 2) +#define PMU_RST_BY_SECOND_SFT (1 << 2) +#define PMU_RST_NOT_BY_SFT (2 << 2) + +/*************************************************************************** + * pll + ***************************************************************************/ +#define PLL_CON_COUNT 4 +#define PLL_CONS(id, i) ((id) * 0x10 + ((i) * 4)) +#define PLL_PWR_DN_MSK BIT(1) +#define PLL_PWR_DN REG_WMSK_BITS(1, 1, 0x1) +#define PLL_PWR_ON REG_WMSK_BITS(0, 1, 0x1) +#define PLL_RESET REG_WMSK_BITS(1, 5, 0x1) +#define PLL_RESET_RESUME REG_WMSK_BITS(0, 5, 0x1) +#define PLL_BYPASS_MSK BIT(0) +#define PLL_BYPASS_W_MSK (PLL_BYPASS_MSK << 16) +#define PLL_BYPASS REG_WMSK_BITS(1, 0, 0x1) +#define PLL_NO_BYPASS REG_WMSK_BITS(0, 0, 0x1) + +#define PLL_MODE_CON 0x50 + +struct deepsleep_data_s { + uint32_t pll_con[END_PLL_ID][PLL_CON_COUNT]; + uint32_t pll_mode; + uint32_t cru_sel_con[CRU_CLKSELS_CON_CNT]; + uint32_t cru_gate_con[CRU_CLKGATES_CON_CNT]; +}; + +#define REG_W_MSK(bits_shift, msk) \ + ((msk) << ((bits_shift) + 16)) +#define REG_VAL_CLRBITS(val, bits_shift, msk) \ + ((val) & (~((msk) << bits_shift))) +#define REG_SET_BITS(bits, bits_shift, msk) \ + (((bits) & (msk)) << (bits_shift)) +#define REG_WMSK_BITS(bits, bits_shift, msk) \ + (REG_W_MSK(bits_shift, msk) | \ + REG_SET_BITS(bits, bits_shift, msk)) +#define REG_SOC_WMSK 0xffff0000 + +#define regs_update_bit_set(addr, shift) \ + regs_update_bits((addr), 0x1, 0x1, (shift)) +#define regs_update_bit_clr(addr, shift) \ + regs_update_bits((addr), 0x0, 0x1, (shift)) + +void regs_update_bits(uintptr_t addr, uint32_t val, + uint32_t mask, uint32_t shift); +void clk_plls_suspend(void); +void clk_plls_resume(void); +void clk_gate_con_save(void); +void clk_gate_con_disable(void); +void clk_gate_con_restore(void); +void clk_sel_con_save(void); +void clk_sel_con_restore(void); +#endif /* SOC_H */ diff --git a/plat/rockchip/rk3288/include/plat_sip_calls.h b/plat/rockchip/rk3288/include/plat_sip_calls.h new file mode 100644 index 0000000..66c4868 --- /dev/null +++ b/plat/rockchip/rk3288/include/plat_sip_calls.h @@ -0,0 +1,12 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef PLAT_SIP_CALLS_H +#define PLAT_SIP_CALLS_H + +#define RK_PLAT_SIP_NUM_CALLS 0 + +#endif /* PLAT_SIP_CALLS_H */ diff --git a/plat/rockchip/rk3288/include/plat_sp_min.ld.S b/plat/rockchip/rk3288/include/plat_sp_min.ld.S new file mode 100644 index 0000000..2878437 --- /dev/null +++ b/plat/rockchip/rk3288/include/plat_sp_min.ld.S @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#ifndef ROCKCHIP_PLAT_LD_S +#define ROCKCHIP_PLAT_LD_S + +#include <lib/xlat_tables/xlat_tables_defs.h> + +MEMORY { + SRAM (rwx): ORIGIN = SRAM_BASE, LENGTH = SRAM_SIZE + PMUSRAM (rwx): ORIGIN = PMUSRAM_BASE, LENGTH = PMUSRAM_RSIZE +} + +SECTIONS +{ + . = SRAM_BASE; + ASSERT(. == ALIGN(PAGE_SIZE), + "SRAM_BASE address is not aligned on a page boundary.") + + .text_sram : ALIGN(PAGE_SIZE) { + __bl32_sram_text_start = .; + *(.sram.text) + *(.sram.rodata) + __bl32_sram_text_real_end = .; + . = ALIGN(PAGE_SIZE); + __bl32_sram_text_end = .; + } >SRAM + ASSERT((__bl32_sram_text_real_end - __bl32_sram_text_start) <= + SRAM_TEXT_LIMIT, ".text_sram has exceeded its limit") + + .data_sram : ALIGN(PAGE_SIZE) { + __bl32_sram_data_start = .; + *(.sram.data) + __bl32_sram_data_real_end = .; + . = ALIGN(PAGE_SIZE); + __bl32_sram_data_end = .; + } >SRAM + ASSERT((__bl32_sram_data_real_end - __bl32_sram_data_start) <= + SRAM_DATA_LIMIT, ".data_sram has exceeded its limit") + + .stack_sram : ALIGN(PAGE_SIZE) { + __bl32_sram_stack_start = .; + . += PAGE_SIZE; + __bl32_sram_stack_end = .; + } >SRAM + + . = PMUSRAM_BASE; + + /* + * pmu_cpuson_entrypoint request address + * align 64K when resume, so put it in the + * start of pmusram + */ + .pmusram : { + ASSERT(. == ALIGN(64 * 1024), + ".pmusram.entry request 64K aligned."); + *(.pmusram.entry) + + __bl32_pmusram_text_start = .; + *(.pmusram.text) + *(.pmusram.rodata) + __bl32_pmusram_text_end = .; + + __bl32_pmusram_data_start = .; + *(.pmusram.data) + __bl32_pmusram_data_end = .; + } >PMUSRAM +} + +#endif /* ROCKCHIP_PLAT_LD_S */ diff --git a/plat/rockchip/rk3288/include/platform_def.h b/plat/rockchip/rk3288/include/platform_def.h new file mode 100644 index 0000000..85ec3fb --- /dev/null +++ b/plat/rockchip/rk3288/include/platform_def.h @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2014-2019, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef PLATFORM_DEF_H +#define PLATFORM_DEF_H + +#include <arch.h> +#include <lib/utils_def.h> +#include <plat/common/common_def.h> + +#include <bl32_param.h> +#include <rk3288_def.h> + +/******************************************************************************* + * Platform binary types for linking + ******************************************************************************/ +#define PLATFORM_LINKER_FORMAT "elf32-littlearm" +#define PLATFORM_LINKER_ARCH arm + +/******************************************************************************* + * Generic platform constants + ******************************************************************************/ + +/* Size of cacheable stacks */ +#if defined(IMAGE_BL1) +#define PLATFORM_STACK_SIZE 0x440 +#elif defined(IMAGE_BL2) +#define PLATFORM_STACK_SIZE 0x400 +#elif defined(IMAGE_BL32) +#define PLATFORM_STACK_SIZE 0x800 +#endif + +#define FIRMWARE_WELCOME_STR "Booting Trusted Firmware\n" + +#define PLATFORM_MAX_AFFLVL MPIDR_AFFLVL2 +#define PLATFORM_SYSTEM_COUNT U(1) +#define PLATFORM_CLUSTER_COUNT U(1) +#define PLATFORM_CLUSTER0_CORE_COUNT U(4) +#define PLATFORM_CORE_COUNT (PLATFORM_CLUSTER0_CORE_COUNT) +#define PLATFORM_MAX_CPUS_PER_CLUSTER U(4) +#define PLATFORM_NUM_AFFS (PLATFORM_SYSTEM_COUNT + \ + PLATFORM_CLUSTER_COUNT + \ + PLATFORM_CORE_COUNT) + +#define PLAT_RK_CLST_TO_CPUID_SHIFT 6 + +#define PLAT_MAX_PWR_LVL MPIDR_AFFLVL2 + +/* + * This macro defines the deepest retention state possible. A higher state + * id will represent an invalid or a power down state. + */ +#define PLAT_MAX_RET_STATE U(1) + +/* + * This macro defines the deepest power down states possible. Any state ID + * higher than this is invalid. + */ +#define PLAT_MAX_OFF_STATE U(2) + +/******************************************************************************* + * Platform specific page table and MMU setup constants + ******************************************************************************/ +#define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 32) +#define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 32) +#define MAX_XLAT_TABLES 8 +#define MAX_MMAP_REGIONS 18 + +/******************************************************************************* + * Declarations and constants to access the mailboxes safely. Each mailbox is + * aligned on the biggest cache line size in the platform. This is known only + * to the platform as it might have a combination of integrated and external + * caches. Such alignment ensures that two maiboxes do not sit on the same cache + * line at any cache level. They could belong to different cpus/clusters & + * get written while being protected by different locks causing corruption of + * a valid mailbox address. + ******************************************************************************/ +#define CACHE_WRITEBACK_SHIFT 6 +#define CACHE_WRITEBACK_GRANULE (1 << CACHE_WRITEBACK_SHIFT) + +/* + * Define GICD and GICC and GICR base + */ +#define PLAT_RK_GICD_BASE RK3288_GICD_BASE +#define PLAT_RK_GICC_BASE RK3288_GICC_BASE + +#define PLAT_RK_UART_BASE UART2_BASE +#define PLAT_RK_UART_CLOCK RK3288_UART_CLOCK +#define PLAT_RK_UART_BAUDRATE RK3288_BAUDRATE + +/* ClusterId is always 0x5 on rk3288, filter it */ +#define PLAT_RK_MPIDR_CLUSTER_MASK 0 +#define PLAT_RK_PRIMARY_CPU 0x0 + +#define PSRAM_DO_DDR_RESUME 0 +#define PSRAM_CHECK_WAKEUP_CPU 0 + +#endif /* PLATFORM_DEF_H */ diff --git a/plat/rockchip/rk3288/include/shared/bl32_param.h b/plat/rockchip/rk3288/include/shared/bl32_param.h new file mode 100644 index 0000000..ffdb2f3 --- /dev/null +++ b/plat/rockchip/rk3288/include/shared/bl32_param.h @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef BL32_PARAM_H +#define BL32_PARAM_H + +/******************************************************************************* + * Platform memory map related constants + ******************************************************************************/ +/* TF text, ro, rw, Size: 1MB */ +#define TZRAM_BASE (0x0) +#define TZRAM_SIZE (0x100000) + +/******************************************************************************* + * BL32 specific defines. + ******************************************************************************/ +/* + * Put BL32 at the top of the Trusted RAM + */ +#define BL32_BASE (TZRAM_BASE + 0x40000) +#define BL32_LIMIT (TZRAM_BASE + TZRAM_SIZE) + +#endif /* BL32_PARAM_H */ diff --git a/plat/rockchip/rk3288/plat_sip_calls.c b/plat/rockchip/rk3288/plat_sip_calls.c new file mode 100644 index 0000000..5918d58 --- /dev/null +++ b/plat/rockchip/rk3288/plat_sip_calls.c @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <common/debug.h> +#include <common/runtime_svc.h> +#include <lib/mmio.h> + +#include <plat_sip_calls.h> +#include <rockchip_sip_svc.h> + +uintptr_t rockchip_plat_sip_handler(uint32_t smc_fid, + u_register_t x1, + u_register_t x2, + u_register_t x3, + u_register_t x4, + void *cookie, + void *handle, + u_register_t flags) +{ + ERROR("%s: unhandled SMC (0x%x)\n", __func__, smc_fid); + SMC_RET1(handle, SMC_UNK); +} diff --git a/plat/rockchip/rk3288/platform.mk b/plat/rockchip/rk3288/platform.mk new file mode 100644 index 0000000..b8dd195 --- /dev/null +++ b/plat/rockchip/rk3288/platform.mk @@ -0,0 +1,69 @@ +# +# Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +include drivers/arm/gic/v2/gicv2.mk + +ARM_CORTEX_A12 := yes +ARM_ARCH_MAJOR := 7 + +RK_PLAT := plat/rockchip +RK_PLAT_SOC := ${RK_PLAT}/${PLAT} +RK_PLAT_COMMON := ${RK_PLAT}/common + +DISABLE_BIN_GENERATION := 1 + +PLAT_INCLUDES := -I${RK_PLAT_COMMON}/ \ + -I${RK_PLAT_COMMON}/include/ \ + -I${RK_PLAT_COMMON}/aarch32/ \ + -I${RK_PLAT_COMMON}/drivers/pmu/ \ + -I${RK_PLAT_SOC}/ \ + -I${RK_PLAT_SOC}/drivers/pmu/ \ + -I${RK_PLAT_SOC}/drivers/secure/ \ + -I${RK_PLAT_SOC}/drivers/soc/ \ + -I${RK_PLAT_SOC}/include/ \ + -I${RK_PLAT_SOC}/include/shared/ \ + +RK_GIC_SOURCES := ${GICV2_SOURCES} \ + plat/common/plat_gicv2.c \ + ${RK_PLAT}/common/rockchip_gicv2.c + +PLAT_BL_COMMON_SOURCES := common/desc_image_load.c \ + lib/bl_aux_params/bl_aux_params.c \ + plat/common/aarch32/crash_console_helpers.S \ + plat/common/plat_psci_common.c + +PLAT_BL_COMMON_SOURCES += lib/xlat_tables/xlat_tables_common.c \ + lib/xlat_tables/aarch32/xlat_tables.c + +BL32_SOURCES += ${RK_GIC_SOURCES} \ + drivers/arm/cci/cci.c \ + drivers/ti/uart/aarch32/16550_console.S \ + drivers/delay_timer/delay_timer.c \ + drivers/delay_timer/generic_delay_timer.c \ + lib/cpus/aarch32/cortex_a12.S \ + ${RK_PLAT_COMMON}/aarch32/plat_helpers.S \ + ${RK_PLAT_COMMON}/params_setup.c \ + ${RK_PLAT_COMMON}/aarch32/pmu_sram_cpus_on.S \ + ${RK_PLAT_COMMON}/plat_pm.c \ + ${RK_PLAT_COMMON}/plat_topology.c \ + ${RK_PLAT_COMMON}/aarch32/platform_common.c \ + ${RK_PLAT_COMMON}/rockchip_sip_svc.c \ + ${RK_PLAT_SOC}/plat_sip_calls.c \ + ${RK_PLAT_SOC}/drivers/pmu/pmu.c \ + ${RK_PLAT_SOC}/drivers/secure/secure.c \ + ${RK_PLAT_SOC}/drivers/soc/soc.c \ + +MULTI_CONSOLE_API := 1 + +include lib/coreboot/coreboot.mk +include lib/libfdt/libfdt.mk + +$(eval $(call add_define,PLAT_SP_MIN_EXTRA_LD_SCRIPT)) + +# Do not enable SVE +ENABLE_SVE_FOR_NS := 0 + +WORKAROUND_CVE_2017_5715 := 0 diff --git a/plat/rockchip/rk3288/rk3288_def.h b/plat/rockchip/rk3288/rk3288_def.h new file mode 100644 index 0000000..7bff865 --- /dev/null +++ b/plat/rockchip/rk3288/rk3288_def.h @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef RK3288_DEF_H +#define RK3288_DEF_H + +/* Special value used to verify platform parameters from BL2 to BL31 */ +#define RK_BL31_PLAT_PARAM_VAL 0x0f1e2d3c4b5a6978ULL + +#define SIZE_K(n) ((n) * 1024) +#define SIZE_M(n) ((n) * 1024 * 1024) + +#define SRAM_TEXT_LIMIT (4 * 1024) +#define SRAM_DATA_LIMIT (4 * 1024) + +#define DDR_PCTL0_BASE 0xff610000 +#define DDR_PCTL0_SIZE SIZE_K(64) + +#define DDR_PHY0_BASE 0xff620000 +#define DDR_PHY0_SIZE SIZE_K(64) + +#define DDR_PCTL1_BASE 0xff630000 +#define DDR_PCTL1_SIZE SIZE_K(64) + +#define DDR_PHY1_BASE 0xff640000 +#define DDR_PHY1_SIZE SIZE_K(64) + +#define UART0_BASE 0xff180000 +#define UART0_SIZE SIZE_K(64) + +#define UART1_BASE 0xff190000 +#define UART1_SIZE SIZE_K(64) + +#define UART2_BASE 0xff690000 +#define UART2_SIZE SIZE_K(64) + +#define UART3_BASE 0xff1b0000 +#define UART3_SIZE SIZE_K(64) + +#define UART4_BASE 0xff1c0000 +#define UART4_SIZE SIZE_K(64) + +/* 96k instead of 64k? */ +#define SRAM_BASE 0xff700000 +#define SRAM_SIZE SIZE_K(64) + +#define PMUSRAM_BASE 0xff720000 +#define PMUSRAM_SIZE SIZE_K(4) +#define PMUSRAM_RSIZE SIZE_K(4) + +#define PMU_BASE 0xff730000 +#define PMU_SIZE SIZE_K(64) + +#define SGRF_BASE 0xff740000 +#define SGRF_SIZE SIZE_K(64) + +#define CRU_BASE 0xff760000 +#define CRU_SIZE SIZE_K(64) + +#define GRF_BASE 0xff770000 +#define GRF_SIZE SIZE_K(64) + +/* timer 6+7 can be set as secure in SGRF */ +#define STIME_BASE 0xff810000 +#define STIME_SIZE SIZE_K(64) + +#define SERVICE_BUS_BASE 0xffac0000 +#define SERVICE_BUS_SIZE SIZE_K(64) + +#define TZPC_BASE 0xffb00000 +#define TZPC_SIZE SIZE_K(64) + +#define GIC400_BASE 0xffc00000 +#define GIC400_SIZE SIZE_K(64) + +#define CORE_AXI_BUS_BASE 0xffd00000 +#define CORE_AXI_BUS_SIZE SIZE_M(1) + +#define COLD_BOOT_BASE 0xffff0000 +/************************************************************************** + * UART related constants + **************************************************************************/ +#define RK3288_BAUDRATE 115200 +#define RK3288_UART_CLOCK 24000000 + +/****************************************************************************** + * System counter frequency related constants + ******************************************************************************/ +#define SYS_COUNTER_FREQ_IN_TICKS 24000000 + +/****************************************************************************** + * GIC-400 & interrupt handling related constants + ******************************************************************************/ + +/* Base rk_platform compatible GIC memory map */ +#define RK3288_GICD_BASE (GIC400_BASE + 0x1000) +#define RK3288_GICC_BASE (GIC400_BASE + 0x2000) +#define RK3288_GICR_BASE 0 /* no GICR in GIC-400 */ + +/****************************************************************************** + * sgi, ppi + ******************************************************************************/ +#define RK_IRQ_SEC_PHY_TIMER 29 + +/* what are these, and are they present on rk3288? */ +#define RK_IRQ_SEC_SGI_0 8 +#define RK_IRQ_SEC_SGI_1 9 +#define RK_IRQ_SEC_SGI_2 10 +#define RK_IRQ_SEC_SGI_3 11 +#define RK_IRQ_SEC_SGI_4 12 +#define RK_IRQ_SEC_SGI_5 13 +#define RK_IRQ_SEC_SGI_6 14 +#define RK_IRQ_SEC_SGI_7 15 + +/* + * Define a list of Group 0 interrupts. + */ +#define PLAT_RK_GICV2_G0_IRQS \ + INTR_PROP_DESC(RK_IRQ_SEC_PHY_TIMER, GIC_HIGHEST_SEC_PRIORITY, \ + GICV2_INTR_GROUP0, GIC_INTR_CFG_LEVEL), \ + INTR_PROP_DESC(RK_IRQ_SEC_SGI_6, GIC_HIGHEST_SEC_PRIORITY, \ + GICV2_INTR_GROUP0, GIC_INTR_CFG_LEVEL) + +#endif /* RK3288_DEF_H */ diff --git a/plat/rockchip/rk3288/sp_min/sp_min-rk3288.mk b/plat/rockchip/rk3288/sp_min/sp_min-rk3288.mk new file mode 100644 index 0000000..befdca3 --- /dev/null +++ b/plat/rockchip/rk3288/sp_min/sp_min-rk3288.mk @@ -0,0 +1,8 @@ +# +# Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +BL32_SOURCES += plat/common/aarch32/platform_mp_stack.S \ + plat/rockchip/common/sp_min_plat_setup.c diff --git a/plat/rockchip/rk3328/drivers/pmu/plat_pmu_macros.S b/plat/rockchip/rk3328/drivers/pmu/plat_pmu_macros.S new file mode 100644 index 0000000..cd604d2 --- /dev/null +++ b/plat/rockchip/rk3328/drivers/pmu/plat_pmu_macros.S @@ -0,0 +1,21 @@ +/* + * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <arch.h> +#include <asm_macros.S> +#include <platform_def.h> + +.globl clst_warmboot_data + +.macro func_rockchip_clst_warmboot +.endm + +.macro rockchip_clst_warmboot_data +clst_warmboot_data: + .rept PLATFORM_CLUSTER_COUNT + .word 0 + .endr +.endm diff --git a/plat/rockchip/rk3328/drivers/pmu/pmu.c b/plat/rockchip/rk3328/drivers/pmu/pmu.c new file mode 100644 index 0000000..a17fef9 --- /dev/null +++ b/plat/rockchip/rk3328/drivers/pmu/pmu.c @@ -0,0 +1,667 @@ +/* + * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> +#include <errno.h> + +#include <platform_def.h> + +#include <arch_helpers.h> +#include <bl31/bl31.h> +#include <common/debug.h> +#include <drivers/console.h> +#include <drivers/delay_timer.h> +#include <lib/bakery_lock.h> +#include <lib/mmio.h> +#include <plat/common/platform.h> + +#include <plat_private.h> +#include <pmu.h> +#include <pmu_com.h> +#include <rk3328_def.h> + +DEFINE_BAKERY_LOCK(rockchip_pd_lock); + +static struct rk3328_sleep_ddr_data ddr_data; +static __sramdata struct rk3328_sleep_sram_data sram_data; + +static uint32_t cpu_warm_boot_addr; + +#pragma weak rk3328_pmic_suspend +#pragma weak rk3328_pmic_resume + +static inline uint32_t get_cpus_pwr_domain_cfg_info(uint32_t cpu_id) +{ + uint32_t pd_reg, apm_reg; + + pd_reg = mmio_read_32(PMU_BASE + PMU_PWRDN_CON) & BIT(cpu_id); + apm_reg = mmio_read_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id)) & + BIT(core_pm_en); + + if (pd_reg && !apm_reg) + return core_pwr_pd; + else if (!pd_reg && apm_reg) + return core_pwr_wfi; + + ERROR("%s: 0x%x, 0x%x\n", __func__, pd_reg, apm_reg); + while (1) + ; +} + +static int cpus_power_domain_on(uint32_t cpu_id) +{ + uint32_t cpu_pd, cfg_info; + + cpu_pd = PD_CPU0 + cpu_id; + cfg_info = get_cpus_pwr_domain_cfg_info(cpu_id); + + if (cfg_info == core_pwr_pd) { + /* disable apm cfg */ + mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id), + CORES_PM_DISABLE); + + /* if the cores have be on, power off it firstly */ + if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) { + mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id), + CORES_PM_DISABLE); + pmu_power_domain_ctr(cpu_pd, pmu_pd_off); + } + pmu_power_domain_ctr(cpu_pd, pmu_pd_on); + } else { + if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) { + WARN("%s: cpu%d is not in off,!\n", __func__, cpu_id); + return -EINVAL; + } + + mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id), + BIT(core_pm_sft_wakeup_en)); + } + + return 0; +} + +static int cpus_power_domain_off(uint32_t cpu_id, uint32_t pd_cfg) +{ + uint32_t cpu_pd, core_pm_value; + + cpu_pd = PD_CPU0 + cpu_id; + if (pmu_power_domain_st(cpu_pd) == pmu_pd_off) + return 0; + + if (pd_cfg == core_pwr_pd) { + if (check_cpu_wfie(cpu_id, CKECK_WFEI_MSK)) + return -EINVAL; + /* disable apm cfg */ + mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id), + CORES_PM_DISABLE); + pmu_power_domain_ctr(cpu_pd, pmu_pd_off); + } else { + core_pm_value = BIT(core_pm_en) | BIT(core_pm_dis_int); + if (pd_cfg == core_pwr_wfi_int) + core_pm_value |= BIT(core_pm_int_wakeup_en); + + mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id), + core_pm_value); + } + + return 0; +} + +static void nonboot_cpus_off(void) +{ + uint32_t boot_cpu, cpu; + + /* turn off noboot cpus */ + boot_cpu = plat_my_core_pos(); + for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) { + if (cpu == boot_cpu) + continue; + cpus_power_domain_off(cpu, core_pwr_pd); + } +} + +void sram_save(void) +{ + /* TODO: support the sdram save for rk3328 SoCs*/ +} + +void sram_restore(void) +{ + /* TODO: support the sdram restore for rk3328 SoCs */ +} + +int rockchip_soc_cores_pwr_dm_on(unsigned long mpidr, uint64_t entrypoint) +{ + uint32_t cpu_id = plat_core_pos_by_mpidr(mpidr); + + assert(cpu_id < PLATFORM_CORE_COUNT); + assert(cpuson_flags[cpu_id] == 0); + cpuson_flags[cpu_id] = PMU_CPU_HOTPLUG; + cpuson_entry_point[cpu_id] = entrypoint; + dsb(); + + cpus_power_domain_on(cpu_id); + + return 0; +} + +int rockchip_soc_cores_pwr_dm_off(void) +{ + uint32_t cpu_id = plat_my_core_pos(); + + cpus_power_domain_off(cpu_id, core_pwr_wfi); + + return 0; +} + +int rockchip_soc_cores_pwr_dm_suspend(void) +{ + uint32_t cpu_id = plat_my_core_pos(); + + assert(cpu_id < PLATFORM_CORE_COUNT); + assert(cpuson_flags[cpu_id] == 0); + cpuson_flags[cpu_id] = PMU_CPU_AUTO_PWRDN; + cpuson_entry_point[cpu_id] = (uintptr_t)plat_get_sec_entrypoint(); + dsb(); + + cpus_power_domain_off(cpu_id, core_pwr_wfi_int); + + return 0; +} + +int rockchip_soc_cores_pwr_dm_on_finish(void) +{ + uint32_t cpu_id = plat_my_core_pos(); + + mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id), CORES_PM_DISABLE); + + return 0; +} + +int rockchip_soc_cores_pwr_dm_resume(void) +{ + uint32_t cpu_id = plat_my_core_pos(); + + mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id), CORES_PM_DISABLE); + + return 0; +} + +void __dead2 rockchip_soc_soft_reset(void) +{ + mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(CPLL_ID)); + mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(GPLL_ID)); + mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(NPLL_ID)); + mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(APLL_ID)); + dsb(); + + mmio_write_32(CRU_BASE + CRU_GLB_SRST_FST, CRU_GLB_SRST_FST_VALUE); + dsb(); + /* + * Maybe the HW needs some times to reset the system, + * so we do not hope the core to excute valid codes. + */ + while (1) + ; +} + +/* + * For PMIC RK805, its sleep pin is connect with gpio2_d2 from rk3328. + * If the PMIC is configed for responding the sleep pin to power off it, + * once the pin is output high, it will get the pmic power off. + */ +void __dead2 rockchip_soc_system_off(void) +{ + uint32_t val; + + /* gpio config */ + val = mmio_read_32(GRF_BASE + GRF_GPIO2D_IOMUX); + val &= ~GPIO2_D2_GPIO_MODE; + mmio_write_32(GRF_BASE + GRF_GPIO2D_IOMUX, val); + + /* config output */ + val = mmio_read_32(GPIO2_BASE + SWPORTA_DDR); + val |= GPIO2_D2; + mmio_write_32(GPIO2_BASE + SWPORTA_DDR, val); + + /* config output high level */ + val = mmio_read_32(GPIO2_BASE); + val |= GPIO2_D2; + mmio_write_32(GPIO2_BASE, val); + dsb(); + + while (1) + ; +} + +static uint32_t clk_ungt_msk[CRU_CLKGATE_NUMS] = { + 0x187f, 0x0000, 0x010c, 0x0000, 0x0200, + 0x0010, 0x0000, 0x0017, 0x001f, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0003, 0x0000, + 0xf001, 0x27c0, 0x04D9, 0x03ff, 0x0000, + 0x0000, 0x0000, 0x0010, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0003, 0x0008 +}; + +static void clks_gating_suspend(uint32_t *ungt_msk) +{ + int i; + + for (i = 0; i < CRU_CLKGATE_NUMS; i++) { + ddr_data.clk_ungt_save[i] = + mmio_read_32(CRU_BASE + CRU_CLKGATE_CON(i)); + mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(i), + ((~ungt_msk[i]) << 16) | 0xffff); + } +} + +static void clks_gating_resume(void) +{ + int i; + + for (i = 0; i < CRU_CLKGATE_NUMS; i++) + mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(i), + ddr_data.clk_ungt_save[i] | 0xffff0000); +} + +static inline void pm_pll_wait_lock(uint32_t pll_id) +{ + uint32_t delay = PLL_LOCKED_TIMEOUT; + + while (delay > 0) { + if (mmio_read_32(CRU_BASE + PLL_CONS(pll_id, 1)) & + PLL_IS_LOCKED) + break; + delay--; + } + if (delay == 0) + ERROR("lock-pll: %d\n", pll_id); +} + +static inline void pll_pwr_dwn(uint32_t pll_id, uint32_t pd) +{ + mmio_write_32(CRU_BASE + PLL_CONS(pll_id, 1), + BITS_WITH_WMASK(1U, 1U, 15)); + if (pd) + mmio_write_32(CRU_BASE + PLL_CONS(pll_id, 1), + BITS_WITH_WMASK(1, 1, 14)); + else + mmio_write_32(CRU_BASE + PLL_CONS(pll_id, 1), + BITS_WITH_WMASK(0, 1, 14)); +} + +static __sramfunc void dpll_suspend(void) +{ + int i; + + /* slow mode */ + mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(DPLL_ID)); + + /* save pll con */ + for (i = 0; i < CRU_PLL_CON_NUMS; i++) + sram_data.dpll_con_save[i] = + mmio_read_32(CRU_BASE + PLL_CONS(DPLL_ID, i)); + mmio_write_32(CRU_BASE + PLL_CONS(DPLL_ID, 1), + BITS_WITH_WMASK(1U, 1U, 15)); + mmio_write_32(CRU_BASE + PLL_CONS(DPLL_ID, 1), + BITS_WITH_WMASK(1, 1, 14)); +} + +static __sramfunc void dpll_resume(void) +{ + uint32_t delay = PLL_LOCKED_TIMEOUT; + + mmio_write_32(CRU_BASE + PLL_CONS(DPLL_ID, 1), + BITS_WITH_WMASK(1U, 1U, 15)); + mmio_write_32(CRU_BASE + PLL_CONS(DPLL_ID, 1), + BITS_WITH_WMASK(0, 1, 14)); + mmio_write_32(CRU_BASE + PLL_CONS(DPLL_ID, 1), + sram_data.dpll_con_save[1] | 0xc0000000); + + dsb(); + + while (delay > 0) { + if (mmio_read_32(CRU_BASE + PLL_CONS(DPLL_ID, 1)) & + PLL_IS_LOCKED) + break; + delay--; + } + if (delay == 0) + while (1) + ; + + mmio_write_32(CRU_BASE + CRU_CRU_MODE, + PLL_NORM_MODE(DPLL_ID)); +} + +static inline void pll_suspend(uint32_t pll_id) +{ + int i; + + /* slow mode */ + mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(pll_id)); + + /* save pll con */ + for (i = 0; i < CRU_PLL_CON_NUMS; i++) + ddr_data.cru_plls_con_save[pll_id][i] = + mmio_read_32(CRU_BASE + PLL_CONS(pll_id, i)); + + /* powerdown pll */ + pll_pwr_dwn(pll_id, pmu_pd_off); +} + +static inline void pll_resume(uint32_t pll_id) +{ + mmio_write_32(CRU_BASE + PLL_CONS(pll_id, 1), + ddr_data.cru_plls_con_save[pll_id][1] | 0xc0000000); + + pm_pll_wait_lock(pll_id); + + if (PLL_IS_NORM_MODE(ddr_data.cru_mode_save, pll_id)) + mmio_write_32(CRU_BASE + CRU_CRU_MODE, + PLL_NORM_MODE(pll_id)); +} + +static void pm_plls_suspend(void) +{ + ddr_data.cru_mode_save = mmio_read_32(CRU_BASE + CRU_CRU_MODE); + ddr_data.clk_sel0 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(0)); + ddr_data.clk_sel1 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(1)); + ddr_data.clk_sel18 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(18)); + ddr_data.clk_sel20 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(20)); + ddr_data.clk_sel24 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(24)); + ddr_data.clk_sel38 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(38)); + pll_suspend(NPLL_ID); + pll_suspend(CPLL_ID); + pll_suspend(GPLL_ID); + pll_suspend(APLL_ID); + + /* core */ + mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(0), + BITS_WITH_WMASK(0, 0x1f, 0)); + + /* pclk_dbg */ + mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(1), + BITS_WITH_WMASK(0, 0xf, 0)); + + /* crypto */ + mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(20), + BITS_WITH_WMASK(0, 0x1f, 0)); + + /* pwm0 */ + mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(24), + BITS_WITH_WMASK(0, 0x7f, 8)); + + /* uart2 from 24M */ + mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(18), + BITS_WITH_WMASK(2, 0x3, 8)); + + /* clk_rtc32k */ + mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(38), + BITS_WITH_WMASK(767, 0x3fff, 0) | + BITS_WITH_WMASK(2U, 0x3u, 14)); +} + +static void pm_plls_resume(void) +{ + /* clk_rtc32k */ + mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(38), + ddr_data.clk_sel38 | + BITS_WMSK(0x3fff, 0) | + BITS_WMSK(0x3u, 14)); + + /* uart2 */ + mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(18), + ddr_data.clk_sel18 | BITS_WMSK(0x3, 8)); + + /* pwm0 */ + mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(24), + ddr_data.clk_sel24 | BITS_WMSK(0x7f, 8)); + + /* crypto */ + mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(20), + ddr_data.clk_sel20 | BITS_WMSK(0x1f, 0)); + + /* pclk_dbg */ + mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(1), + ddr_data.clk_sel1 | BITS_WMSK(0xf, 0)); + + /* core */ + mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(0), + ddr_data.clk_sel0 | BITS_WMSK(0x1f, 0)); + + pll_pwr_dwn(APLL_ID, pmu_pd_on); + pll_pwr_dwn(GPLL_ID, pmu_pd_on); + pll_pwr_dwn(CPLL_ID, pmu_pd_on); + pll_pwr_dwn(NPLL_ID, pmu_pd_on); + + pll_resume(APLL_ID); + pll_resume(GPLL_ID); + pll_resume(CPLL_ID); + pll_resume(NPLL_ID); +} + +#define ARCH_TIMER_TICKS_PER_US (SYS_COUNTER_FREQ_IN_TICKS / 1000000) + +static __sramfunc void sram_udelay(uint32_t us) +{ + uint64_t pct_orig, pct_now; + uint64_t to_wait = ARCH_TIMER_TICKS_PER_US * us; + + isb(); + pct_orig = read_cntpct_el0(); + + do { + isb(); + pct_now = read_cntpct_el0(); + } while ((pct_now - pct_orig) <= to_wait); +} + +/* + * For PMIC RK805, its sleep pin is connect with gpio2_d2 from rk3328. + * If the PMIC is configed for responding the sleep pin + * to get it into sleep mode, + * once the pin is output high, it will get the pmic into sleep mode. + */ +__sramfunc void rk3328_pmic_suspend(void) +{ + sram_data.pmic_sleep_save = mmio_read_32(GRF_BASE + PMIC_SLEEP_REG); + sram_data.pmic_sleep_gpio_save[1] = mmio_read_32(GPIO2_BASE + 4); + sram_data.pmic_sleep_gpio_save[0] = mmio_read_32(GPIO2_BASE); + mmio_write_32(GRF_BASE + PMIC_SLEEP_REG, BITS_WITH_WMASK(0, 0x3, 4)); + mmio_write_32(GPIO2_BASE + 4, + sram_data.pmic_sleep_gpio_save[1] | BIT(26)); + mmio_write_32(GPIO2_BASE, + sram_data.pmic_sleep_gpio_save[0] | BIT(26)); +} + +__sramfunc void rk3328_pmic_resume(void) +{ + mmio_write_32(GPIO2_BASE, sram_data.pmic_sleep_gpio_save[0]); + mmio_write_32(GPIO2_BASE + 4, sram_data.pmic_sleep_gpio_save[1]); + mmio_write_32(GRF_BASE + PMIC_SLEEP_REG, + sram_data.pmic_sleep_save | BITS_WMSK(0xffffu, 0)); + /* Resuming volt need a lot of time */ + sram_udelay(100); +} + +static __sramfunc void ddr_suspend(void) +{ + sram_data.pd_sr_idle_save = mmio_read_32(DDR_UPCTL_BASE + + DDR_PCTL2_PWRCTL); + sram_data.pd_sr_idle_save &= SELFREF_EN; + + mmio_clrbits_32(DDR_UPCTL_BASE + DDR_PCTL2_PWRCTL, SELFREF_EN); + sram_data.ddr_grf_con0 = mmio_read_32(DDR_GRF_BASE + + DDRGRF_SOC_CON(0)); + mmio_write_32(DDR_GRF_BASE, BIT_WITH_WMSK(14) | WMSK_BIT(15)); + + /* + * Override csysreq from ddrc and + * send valid csysreq signal to PMU, + * csysreq is controlled by ddrc only + */ + + /* in self-refresh */ + mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(0)); + while ((mmio_read_32(DDR_GRF_BASE + DDRGRF_SOC_STATUS(1)) & + (0x03 << 12)) != (0x02 << 12)) + ; + /* ddr retention */ + mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(2)); + + /* ddr gating */ + mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(0), + BITS_WITH_WMASK(0x7, 0x7, 4)); + mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(7), + BITS_WITH_WMASK(1, 1, 4)); + mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(18), + BITS_WITH_WMASK(0x1ff, 0x1ff, 1)); + mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(27), + BITS_WITH_WMASK(0x3, 0x3, 0)); + + dpll_suspend(); +} + +__sramfunc void dmc_restore(void) +{ + dpll_resume(); + + /* ddr gating */ + mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(0), + BITS_WITH_WMASK(0, 0x7, 4)); + mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(7), + BITS_WITH_WMASK(0, 1, 4)); + mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(18), + BITS_WITH_WMASK(0, 0x1ff, 1)); + mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(27), + BITS_WITH_WMASK(0, 0x3, 0)); + + /* ddr de_retention */ + mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(2)); + /* exit self-refresh */ + mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(0)); + while ((mmio_read_32(DDR_GRF_BASE + DDRGRF_SOC_STATUS(1)) & + (0x03 << 12)) != (0x00 << 12)) + ; + + mmio_write_32(DDR_GRF_BASE, sram_data.ddr_grf_con0 | 0xc0000000); + if (sram_data.pd_sr_idle_save) + mmio_setbits_32(DDR_UPCTL_BASE + DDR_PCTL2_PWRCTL, + SELFREF_EN); +} + +static __sramfunc void sram_dbg_uart_suspend(void) +{ + sram_data.uart2_ier = mmio_read_32(UART2_BASE + UART_IER); + mmio_write_32(UART2_BASE + UART_IER, UART_INT_DISABLE); + mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(16), 0x20002000); + mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(2), 0x00040004); +} + +__sramfunc void sram_dbg_uart_resume(void) +{ + /* restore uart clk and reset fifo */ + mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(16), 0x20000000); + mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(2), 0x00040000); + mmio_write_32(UART2_BASE + UART_FCR, UART_FIFO_RESET); + mmio_write_32(UART2_BASE + UART_IER, sram_data.uart2_ier); +} + +static __sramfunc void sram_soc_enter_lp(void) +{ + uint32_t apm_value; + + apm_value = BIT(core_pm_en) | + BIT(core_pm_dis_int) | + BIT(core_pm_int_wakeup_en); + mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(PD_CPU0), apm_value); + + dsb(); + isb(); +err_loop: + wfi(); + /* + *Soc will enter low power mode and + *do not return to here. + */ + goto err_loop; +} + +__sramfunc void sram_suspend(void) +{ + /* disable mmu and icache */ + disable_mmu_icache_el3(); + tlbialle3(); + dsbsy(); + isb(); + + mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1), + ((uintptr_t)&pmu_cpuson_entrypoint >> CPU_BOOT_ADDR_ALIGN) | + CPU_BOOT_ADDR_WMASK); + + /* ddr self-refresh and gating phy */ + ddr_suspend(); + + rk3328_pmic_suspend(); + + sram_dbg_uart_suspend(); + + sram_soc_enter_lp(); +} + +void __dead2 rockchip_soc_sys_pd_pwr_dn_wfi(void) +{ + sram_suspend(); + + /* should never reach here */ + psci_power_down_wfi(); +} + +int rockchip_soc_sys_pwr_dm_suspend(void) +{ + clks_gating_suspend(clk_ungt_msk); + + pm_plls_suspend(); + + return 0; +} + +int rockchip_soc_sys_pwr_dm_resume(void) +{ + pm_plls_resume(); + + clks_gating_resume(); + + plat_rockchip_gic_cpuif_enable(); + + return 0; +} + +void rockchip_plat_mmu_el3(void) +{ + /* TODO: support the el3 for rk3328 SoCs */ +} + +void plat_rockchip_pmu_init(void) +{ + uint32_t cpu; + + for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) + cpuson_flags[cpu] = 0; + + cpu_warm_boot_addr = (uint64_t)platform_cpu_warmboot; + + /* the warm booting address of cpus */ + mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1), + (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) | + CPU_BOOT_ADDR_WMASK); + + nonboot_cpus_off(); + + INFO("%s: pd status 0x%x\n", + __func__, mmio_read_32(PMU_BASE + PMU_PWRDN_ST)); +} diff --git a/plat/rockchip/rk3328/drivers/pmu/pmu.h b/plat/rockchip/rk3328/drivers/pmu/pmu.h new file mode 100644 index 0000000..dfb8912 --- /dev/null +++ b/plat/rockchip/rk3328/drivers/pmu/pmu.h @@ -0,0 +1,129 @@ +/* + * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef PMU_H +#define PMU_H + +#include <soc.h> + +struct rk3328_sleep_ddr_data { + uint32_t pmu_debug_enable; + uint32_t debug_iomux_save; + uint32_t pmic_sleep_save; + uint32_t pmu_wakeup_conf0; + uint32_t pmu_pwrmd_com; + uint32_t cru_mode_save; + uint32_t clk_sel0, clk_sel1, clk_sel18, + clk_sel20, clk_sel24, clk_sel38; + uint32_t clk_ungt_save[CRU_CLKGATE_NUMS]; + uint32_t cru_plls_con_save[MAX_PLL][CRU_PLL_CON_NUMS]; +}; + +struct rk3328_sleep_sram_data { + uint32_t pmic_sleep_save; + uint32_t pmic_sleep_gpio_save[2]; + uint32_t ddr_grf_con0; + uint32_t dpll_con_save[CRU_PLL_CON_NUMS]; + uint32_t pd_sr_idle_save; + uint32_t uart2_ier; +}; + +/***************************************************************************** + * The ways of cores power domain contorlling + *****************************************************************************/ +enum cores_pm_ctr_mode { + core_pwr_pd = 0, + core_pwr_wfi = 1, + core_pwr_wfi_int = 2 +}; + +enum pmu_cores_pm_by_wfi { + core_pm_en = 0, + core_pm_int_wakeup_en, + core_pm_dis_int, + core_pm_sft_wakeup_en +}; + +extern void *pmu_cpuson_entrypoint_start; +extern void *pmu_cpuson_entrypoint_end; + +#define CORES_PM_DISABLE 0x0 + +/***************************************************************************** + * pmu con,reg + *****************************************************************************/ +#define PMU_WAKEUP_CFG0 0x00 +#define PMU_PWRDN_CON 0x0c +#define PMU_PWRDN_ST 0x10 +#define PMU_PWRMD_COM 0x18 +#define PMU_SFT_CON 0x1c +#define PMU_INT_CON 0x20 +#define PMU_INT_ST 0x24 +#define PMU_POWER_ST 0x44 +#define PMU_CPUAPM_CON(n) (0x80 + (n) * 4) +#define PMU_SYS_REG(n) (0xa0 + (n) * 4) + +#define CHECK_CPU_WFIE_BASE (GRF_BASE + GRF_CPU_STATUS(1)) + +enum pmu_core_pwrst_shift { + clst_cpu_wfe = 0, + clst_cpu_wfi = 4, +}; + +#define clstl_cpu_wfe (clst_cpu_wfe) +#define clstb_cpu_wfe (clst_cpu_wfe) + +enum pmu_pd_id { + PD_CPU0 = 0, + PD_CPU1, + PD_CPU2, + PD_CPU3, +}; + +enum pmu_power_mode_common { + pmu_mode_en = 0, + sref_enter_en, + global_int_disable_cfg, + cpu0_pd_en, + wait_wakeup_begin_cfg = 4, + l2_flush_en, + l2_idle_en, + ddrio_ret_de_req, + ddrio_ret_en = 8, +}; + +enum pmu_sft_con { + upctl_c_sysreq_cfg = 0, + l2flushreq_req, + ddr_io_ret_cfg, + pmu_sft_ret_cfg, +}; + +#define CKECK_WFE_MSK 0x1 +#define CKECK_WFI_MSK 0x10 +#define CKECK_WFEI_MSK 0x11 + +#define PD_CTR_LOOP 500 +#define CHK_CPU_LOOP 500 +#define MAX_WAIT_CONUT 1000 + +#define WAKEUP_INT_CLUSTER_EN 0x1 +#define PMIC_SLEEP_REG 0x34 + +#define PLL_IS_NORM_MODE(mode, pll_id) \ + ((mode & (PLL_NORM_MODE(pll_id)) & 0xffff) != 0) + +#define CTLR_ENABLE_G1_BIT BIT(1) +#define UART_FIFO_EMPTY BIT(6) + +#define UART_IER 0x04 +#define UART_FCR 0x08 +#define UART_LSR 0x14 + +#define UART_INT_DISABLE 0x00 +#define UART_FIFO_RESET 0x07 + +#endif /* PMU_H */ diff --git a/plat/rockchip/rk3328/drivers/soc/soc.c b/plat/rockchip/rk3328/drivers/soc/soc.c new file mode 100644 index 0000000..306308f --- /dev/null +++ b/plat/rockchip/rk3328/drivers/soc/soc.c @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <platform_def.h> + +#include <arch_helpers.h> +#include <common/debug.h> +#include <drivers/console.h> +#include <drivers/delay_timer.h> +#include <lib/mmio.h> + +#include <ddr_parameter.h> +#include <plat_private.h> +#include <rk3328_def.h> +#include <soc.h> + +/* Table of regions to map using the MMU. */ +const mmap_region_t plat_rk_mmap[] = { + MAP_REGION_FLAT(UART0_BASE, UART0_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(UART1_BASE, UART1_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(UART2_BASE, UART2_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(PMU_BASE, PMU_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(SGRF_BASE, SGRF_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(GPIO0_BASE, GPIO0_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(GPIO1_BASE, GPIO1_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(GPIO2_BASE, GPIO2_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(GPIO3_BASE, GPIO3_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(CRU_BASE, CRU_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(GRF_BASE, GRF_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(FIREWALL_DDR_BASE, FIREWALL_DDR_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(FIREWALL_CFG_BASE, FIREWALL_CFG_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(STIME_BASE, STIME_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(GIC400_BASE, GIC400_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(PMUSRAM_BASE, PMUSRAM_SIZE, + MT_MEMORY | MT_RW | MT_SECURE), + MAP_REGION_FLAT(SHARE_MEM_BASE, SHARE_MEM_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(DDR_GRF_BASE, DDR_GRF_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(DDR_UPCTL_BASE, DDR_UPCTL_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(PWM_BASE, PWM_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(DDR_PARAM_BASE, DDR_PARAM_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(EFUSE8_BASE, EFUSE8_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(EFUSE32_BASE, EFUSE32_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(DDR_PHY_BASE, DDR_PHY_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(SERVER_MSCH_BASE, SERVER_MSCH_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(DDR_MONITOR_BASE, DDR_MONITOR_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(VOP_BASE, VOP_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + + { 0 } +}; + +/* The RockChip power domain tree descriptor */ +const unsigned char rockchip_power_domain_tree_desc[] = { + /* No of root nodes */ + PLATFORM_SYSTEM_COUNT, + /* No of children for the root node */ + PLATFORM_CLUSTER_COUNT, + /* No of children for the first cluster node */ + PLATFORM_CLUSTER0_CORE_COUNT, +}; + +void secure_timer_init(void) +{ + mmio_write_32(STIMER_CHN_BASE(1) + TIMER_LOADE_COUNT0, 0xffffffff); + mmio_write_32(STIMER_CHN_BASE(1) + TIMER_LOADE_COUNT1, 0xffffffff); + /* auto reload & enable the timer */ + mmio_write_32(STIMER_CHN_BASE(1) + TIMER_CONTROL_REG, TIMER_EN); +} + +void sgrf_init(void) +{ +#ifdef PLAT_RK_SECURE_DDR_MINILOADER + uint32_t i, val; + struct param_ddr_usage usg; + + /* general secure regions */ + usg = ddr_region_usage_parse(DDR_PARAM_BASE, + PLAT_MAX_DDR_CAPACITY_MB); + for (i = 0; i < usg.s_nr; i++) { + /* enable secure */ + val = mmio_read_32(FIREWALL_DDR_BASE + + FIREWALL_DDR_FW_DDR_CON_REG); + val |= BIT(7 - i); + mmio_write_32(FIREWALL_DDR_BASE + + FIREWALL_DDR_FW_DDR_CON_REG, val); + /* map top and base */ + mmio_write_32(FIREWALL_DDR_BASE + + FIREWALL_DDR_FW_DDR_RGN(7 - i), + RG_MAP_SECURE(usg.s_top[i], usg.s_base[i])); + } +#endif + + /* set ddr rgn0_top and rga0_top as 0 */ + mmio_write_32(FIREWALL_DDR_BASE + FIREWALL_DDR_FW_DDR_RGN(0), 0x0); + + /* set all slave ip into no-secure, except stimer */ + mmio_write_32(FIREWALL_CFG_BASE + FIREWALL_CFG_FW_SYS_CON(0), + SGRF_SLV_S_ALL_NS); + mmio_write_32(FIREWALL_CFG_BASE + FIREWALL_CFG_FW_SYS_CON(1), + SGRF_SLV_S_ALL_NS); + mmio_write_32(FIREWALL_CFG_BASE + FIREWALL_CFG_FW_SYS_CON(2), + SGRF_SLV_S_ALL_NS | STIMER_S); + mmio_write_32(FIREWALL_CFG_BASE + FIREWALL_CFG_FW_SYS_CON(3), + SGRF_SLV_S_ALL_NS); + + /* set all master ip into no-secure */ + mmio_write_32(SGRF_BASE + SGRF_SOC_CON(2), 0xf0000000); + mmio_write_32(SGRF_BASE + SGRF_SOC_CON(3), SGRF_MST_S_ALL_NS); + mmio_write_32(SGRF_BASE + SGRF_SOC_CON(4), SGRF_MST_S_ALL_NS); + + /* set DMAC into no-secure */ + mmio_write_32(SGRF_BASE + SGRF_DMAC_CON(3), DMA_IRQ_BOOT_NS); + mmio_write_32(SGRF_BASE + SGRF_DMAC_CON(4), DMA_PERI_CH_NS_15_0); + mmio_write_32(SGRF_BASE + SGRF_DMAC_CON(5), DMA_PERI_CH_NS_19_16); + mmio_write_32(SGRF_BASE + SGRF_DMAC_CON(5), DMA_MANAGER_BOOT_NS); + + /* soft reset dma before use */ + mmio_write_32(CRU_BASE + CRU_SOFTRSTS_CON(3), DMA_SOFTRST_REQ); + udelay(5); + mmio_write_32(CRU_BASE + CRU_SOFTRSTS_CON(3), DMA_SOFTRST_RLS); +} + +void plat_rockchip_soc_init(void) +{ + secure_timer_init(); + sgrf_init(); + + NOTICE("BL31:Rockchip release version: v%d.%d\n", + MAJOR_VERSION, MINOR_VERSION); +} diff --git a/plat/rockchip/rk3328/drivers/soc/soc.h b/plat/rockchip/rk3328/drivers/soc/soc.h new file mode 100644 index 0000000..e8cbc09 --- /dev/null +++ b/plat/rockchip/rk3328/drivers/soc/soc.h @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef SOC_H +#define SOC_H + +/******************************* stimer ***************************************/ +#define TIMER_LOADE_COUNT0 0x00 +#define TIMER_LOADE_COUNT1 0x04 +#define TIMER_CURRENT_VALUE0 0x08 +#define TIMER_CURRENT_VALUE1 0x0C +#define TIMER_CONTROL_REG 0x10 +#define TIMER_INTSTATUS 0x18 +#define TIMER_EN 0x1 + +/**************************** read/write **************************************/ +#ifndef BITS_WMSK +#define BITS_WMSK(msk, shift) ((msk) << (shift + REG_MSK_SHIFT)) +#endif + +/**************************** cru *********************************************/ +enum plls_id { + APLL_ID = 0, + DPLL_ID, + CPLL_ID, + GPLL_ID, + REVERVE, + NPLL_ID, + MAX_PLL, +}; + +#define CRU_CRU_MODE 0x0080 +#define CRU_CRU_MISC 0x0084 +#define CRU_GLB_SRST_FST 0x009c +#define CRU_GLB_SRST_FST_VALUE 0xfdb9 +#define PLL_CONS(id, i) (0x020 * (id) + ((i) * 4)) +#define CRU_CLKSEL_CON(i) (0x100 + ((i) * 4)) +#define CRU_CLKSEL_NUMS 53 +#define CRU_CLKGATE_CON(i) (0x200 + ((i) * 4)) +#define CRU_CLKGATE_NUMS 29 +#define CRU_SOFTRSTS_CON(n) (0x300 + ((n) * 4)) +#define CRU_SOFTRSTS_NUMS 12 +#define CRU_PLL_CON_NUMS 5 + +/* PLLn_CON1 */ +#define PLL_IS_LOCKED BIT(10) +/* PLLn_CON0 */ +#define PLL_BYPASS BITS_WITH_WMASK(1, 0x1, 15) +#define PLL_NO_BYPASS BITS_WITH_WMASK(0, 0x1, 15) +/* CRU_MODE */ +#define PLL_SLOW_MODE(id) ((id) == NPLL_ID) ? \ + BITS_WITH_WMASK(0, 0x1, 1) : \ + BITS_WITH_WMASK(0, 0x1, ((id) * 4)) +#define PLL_NORM_MODE(id) ((id) == NPLL_ID) ? \ + BITS_WITH_WMASK(1, 0x1, 1) : \ + BITS_WITH_WMASK(1, 0x1, ((id) * 4)) + +#define CRU_GATEID_CONS(ID) (0x200 + (ID / 16) * 4) +#define CRU_CONS_GATEID(i) (16 * (i)) +#define GATE_ID(reg, bit) ((reg * 16) + bit) + +#define PLL_LOCKED_TIMEOUT 600000U + +#define STIMER_CHN_BASE(n) (STIME_BASE + 0x20 * (n)) +/************************** config regs ***************************************/ +#define FIREWALL_CFG_FW_SYS_CON(n) (0x000 + (n) * 4) +#define FIREWALL_DDR_FW_DDR_RGN(n) (0x000 + (n) * 4) +#define FIREWALL_DDR_FW_DDR_MST(n) (0x020 + (n) * 4) +#define FIREWALL_DDR_FW_DDR_CON_REG (0x040) +#define GRF_SOC_CON(n) (0x400 + (n) * 4) +#define GRF_SOC_STATUS(n) (0x480 + (n) * 4) +#define GRF_CPU_STATUS(n) (0x520 + (n) * 4) +#define GRF_OS_REG(n) (0x5c8 + (n) * 4) +#define DDRGRF_SOC_CON(n) (0x000 + (n) * 4) +#define DDRGRF_SOC_STATUS(n) (0x100 + (n) * 4) +#define SGRF_SOC_CON(n) (0x000 + (n) * 4) +#define SGRF_DMAC_CON(n) (0x100 + (n) * 4) +#define SGRF_HDCP_KEY_CON(n) (0x280 + (n) * 4) + +#define DDR_PCTL2_PWRCTL 0x30 +/************************** regs func *****************************************/ +#define STIMER_S BIT(23) +#define SGRF_SLV_S_ALL_NS 0x0 +#define SGRF_MST_S_ALL_NS 0xffffffff +#define DMA_IRQ_BOOT_NS 0xffffffff +#define DMA_MANAGER_BOOT_NS 0x80008000 +#define DMA_PERI_CH_NS_15_0 0xffffffff +#define DMA_PERI_CH_NS_19_16 0x000f000f +#define DMA_SOFTRST_REQ 0x01000100 +#define DMA_SOFTRST_RLS 0x01000000 + +#define SELFREF_EN BIT(0) +/************************** cpu ***********************************************/ +#define CPU_BOOT_ADDR_WMASK 0xffff0000 +#define CPU_BOOT_ADDR_ALIGN 16 + +/************************** ddr secure region *********************************/ +#define PLAT_MAX_DDR_CAPACITY_MB 4096 +#define RG_MAP_SECURE(top, base) ((((top) - 1) << 16) | (base)) + +/************************** gpio2_d2 ******************************************/ +#define SWPORTA_DR 0x00 +#define SWPORTA_DDR 0x04 +#define GPIO2_D2 BIT(26) +#define GPIO2_D2_GPIO_MODE 0x30 +#define GRF_GPIO2D_IOMUX 0x34 + +#endif /* SOC_H */ diff --git a/plat/rockchip/rk3328/include/plat.ld.S b/plat/rockchip/rk3328/include/plat.ld.S new file mode 100644 index 0000000..e9bb3a2 --- /dev/null +++ b/plat/rockchip/rk3328/include/plat.ld.S @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#ifndef ROCKCHIP_PLAT_LD_S +#define ROCKCHIP_PLAT_LD_S + +MEMORY { + PMUSRAM (rwx): ORIGIN = PMUSRAM_BASE, LENGTH = PMUSRAM_RSIZE +} + +SECTIONS +{ + . = PMUSRAM_BASE; + + /* + * pmu_cpuson_entrypoint request address + * align 64K when resume, so put it in the + * start of pmusram + */ + .text_pmusram : { + ASSERT(. == ALIGN(64 * 1024), + ".pmusram.entry request 64K aligned."); + *(.pmusram.entry) + __bl31_pmusram_text_start = .; + *(.pmusram.text) + *(.pmusram.rodata) + __bl31_pmusram_text_end = .; + __bl31_pmusram_data_start = .; + *(.pmusram.data) + __bl31_pmusram_data_end = .; + + } >PMUSRAM +} + +#endif /* ROCKCHIP_PLAT_LD_S */ diff --git a/plat/rockchip/rk3328/include/platform_def.h b/plat/rockchip/rk3328/include/platform_def.h new file mode 100644 index 0000000..6579756 --- /dev/null +++ b/plat/rockchip/rk3328/include/platform_def.h @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef PLATFORM_DEF_H +#define PLATFORM_DEF_H + +#include <arch.h> +#include <plat/common/common_def.h> + +#include <rk3328_def.h> + +/******************************************************************************* + * Platform binary types for linking + ******************************************************************************/ +#define PLATFORM_LINKER_FORMAT "elf64-littleaarch64" +#define PLATFORM_LINKER_ARCH aarch64 + +/******************************************************************************* + * Generic platform constants + ******************************************************************************/ + +/* Size of cacheable stacks */ +#if defined(IMAGE_BL1) +#define PLATFORM_STACK_SIZE 0x440 +#elif defined(IMAGE_BL2) +#define PLATFORM_STACK_SIZE 0x400 +#elif defined(IMAGE_BL31) +#define PLATFORM_STACK_SIZE 0x800 +#elif defined(IMAGE_BL32) +#define PLATFORM_STACK_SIZE 0x440 +#endif + +#define FIRMWARE_WELCOME_STR "Booting Trusted Firmware\n" + +#define PLATFORM_MAX_AFFLVL MPIDR_AFFLVL2 +#define PLATFORM_SYSTEM_COUNT 1 +#define PLATFORM_CLUSTER_COUNT U(1) +#define PLATFORM_CLUSTER0_CORE_COUNT U(4) +#define PLATFORM_CLUSTER1_CORE_COUNT U(0) +#define PLATFORM_CORE_COUNT (PLATFORM_CLUSTER1_CORE_COUNT + \ + PLATFORM_CLUSTER0_CORE_COUNT) + +#define PLATFORM_NUM_AFFS (PLATFORM_SYSTEM_COUNT + \ + PLATFORM_CLUSTER_COUNT + \ + PLATFORM_CORE_COUNT) + +#define PLAT_MAX_PWR_LVL MPIDR_AFFLVL2 + +#define PLAT_RK_CLST_TO_CPUID_SHIFT 6 + +/* + * This macro defines the deepest retention state possible. A higher state + * id will represent an invalid or a power down state. + */ +#define PLAT_MAX_RET_STATE U(1) + +/* + * This macro defines the deepest power down states possible. Any state ID + * higher than this is invalid. + */ +#define PLAT_MAX_OFF_STATE U(2) + +/******************************************************************************* + * Platform memory map related constants + ******************************************************************************/ +/* TF text, ro, rw, Size: 1MB */ +#define TZRAM_BASE (0x0) +#define TZRAM_SIZE (0x100000) + +/******************************************************************************* + * BL31 specific defines. + ******************************************************************************/ +/* + * Put BL3-1 at the top of the Trusted RAM + */ +#define BL31_BASE (TZRAM_BASE + 0x40000) +#define BL31_LIMIT (TZRAM_BASE + TZRAM_SIZE) + +/******************************************************************************* + * Platform specific page table and MMU setup constants + ******************************************************************************/ +#define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 32) +#define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 32) +#define MAX_XLAT_TABLES 9 +#define MAX_MMAP_REGIONS 33 + +/******************************************************************************* + * Declarations and constants to access the mailboxes safely. Each mailbox is + * aligned on the biggest cache line size in the platform. This is known only + * to the platform as it might have a combination of integrated and external + * caches. Such alignment ensures that two maiboxes do not sit on the same cache + * line at any cache level. They could belong to different cpus/clusters & + * get written while being protected by different locks causing corruption of + * a valid mailbox address. + ******************************************************************************/ +#define CACHE_WRITEBACK_SHIFT 6 +#define CACHE_WRITEBACK_GRANULE (1 << CACHE_WRITEBACK_SHIFT) + +/* + * Define GICD and GICC and GICR base + */ +#define PLAT_RK_GICD_BASE RK3328_GICD_BASE +#define PLAT_RK_GICC_BASE RK3328_GICC_BASE + +#define PLAT_RK_UART_BASE UART2_BASE +#define PLAT_RK_UART_CLOCK RK3328_UART_CLOCK +#define PLAT_RK_UART_BAUDRATE RK3328_BAUDRATE + +#define PLAT_RK_PRIMARY_CPU 0x0 + +#define PSRAM_DO_DDR_RESUME 0 +#define PSRAM_CHECK_WAKEUP_CPU 0 + +#endif /* PLATFORM_DEF_H */ diff --git a/plat/rockchip/rk3328/platform.mk b/plat/rockchip/rk3328/platform.mk new file mode 100644 index 0000000..5b4766d --- /dev/null +++ b/plat/rockchip/rk3328/platform.mk @@ -0,0 +1,75 @@ +# +# Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +include drivers/arm/gic/v2/gicv2.mk + +RK_PLAT := plat/rockchip +RK_PLAT_SOC := ${RK_PLAT}/${PLAT} +RK_PLAT_COMMON := ${RK_PLAT}/common + +DISABLE_BIN_GENERATION := 1 + +PLAT_INCLUDES := -Idrivers/arm/gic/common/ \ + -Idrivers/arm/gic/v2/ \ + -I${RK_PLAT_COMMON}/ \ + -I${RK_PLAT_COMMON}/include/ \ + -I${RK_PLAT_COMMON}/aarch64/ \ + -I${RK_PLAT_COMMON}/drivers/pmu/ \ + -I${RK_PLAT_COMMON}/drivers/parameter/ \ + -I${RK_PLAT_SOC}/ \ + -I${RK_PLAT_SOC}/drivers/pmu/ \ + -I${RK_PLAT_SOC}/drivers/soc/ \ + -I${RK_PLAT_SOC}/include/ + +RK_GIC_SOURCES := ${GICV2_SOURCES} \ + plat/common/plat_gicv2.c \ + ${RK_PLAT}/common/rockchip_gicv2.c + +PLAT_BL_COMMON_SOURCES := common/desc_image_load.c \ + lib/bl_aux_params/bl_aux_params.c \ + lib/xlat_tables/aarch64/xlat_tables.c \ + lib/xlat_tables/xlat_tables_common.c \ + plat/common/aarch64/crash_console_helpers.S \ + plat/common/plat_psci_common.c + +ifneq (${ENABLE_STACK_PROTECTOR},0) +PLAT_BL_COMMON_SOURCES += ${RK_PLAT_COMMON}/rockchip_stack_protector.c +endif + +BL31_SOURCES += ${RK_GIC_SOURCES} \ + drivers/arm/cci/cci.c \ + drivers/ti/uart/aarch64/16550_console.S \ + drivers/delay_timer/delay_timer.c \ + drivers/delay_timer/generic_delay_timer.c \ + lib/cpus/aarch64/aem_generic.S \ + lib/cpus/aarch64/cortex_a53.S \ + ${RK_PLAT_COMMON}/aarch64/plat_helpers.S \ + ${RK_PLAT_COMMON}/params_setup.c \ + ${RK_PLAT_COMMON}/bl31_plat_setup.c \ + ${RK_PLAT_COMMON}/aarch64/pmu_sram_cpus_on.S \ + ${RK_PLAT_COMMON}/plat_pm.c \ + ${RK_PLAT_COMMON}/plat_topology.c \ + ${RK_PLAT_COMMON}/aarch64/platform_common.c \ + ${RK_PLAT_SOC}/drivers/pmu/pmu.c \ + ${RK_PLAT_SOC}/drivers/soc/soc.c + +ifdef PLAT_RK_SECURE_DDR_MINILOADER +BL31_SOURCES += ${RK_PLAT_COMMON}/drivers/parameter/ddr_parameter.c +endif + +include lib/coreboot/coreboot.mk +include lib/libfdt/libfdt.mk + +# Enable workarounds for selected Cortex-A53 errata +ERRATA_A53_855873 := 1 + +$(eval $(call add_define,PLAT_EXTRA_LD_SCRIPT)) +$(eval $(call add_define,PLAT_SKIP_OPTEE_S_EL1_INT_REGISTER)) + +# Do not enable SVE +ENABLE_SVE_FOR_NS := 0 + +WORKAROUND_CVE_2017_5715 := 0 diff --git a/plat/rockchip/rk3328/rk3328_def.h b/plat/rockchip/rk3328/rk3328_def.h new file mode 100644 index 0000000..60055e8 --- /dev/null +++ b/plat/rockchip/rk3328/rk3328_def.h @@ -0,0 +1,151 @@ +/* + * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef RK3328_DEF_H +#define RK3328_DEF_H + +#define MAJOR_VERSION (1) +#define MINOR_VERSION (2) + +#define SIZE_K(n) ((n) * 1024) + +/* Special value used to verify platform parameters from BL2 to BL3-1 */ +#define RK_BL31_PLAT_PARAM_VAL 0x0f1e2d3c4b5a6978ULL + +#define UART0_BASE 0xff110000 +#define UART0_SIZE SIZE_K(64) + +#define UART1_BASE 0xff120000 +#define UART1_SIZE SIZE_K(64) + +#define UART2_BASE 0xff130000 +#define UART2_SIZE SIZE_K(64) + +#define PMU_BASE 0xff140000 +#define PMU_SIZE SIZE_K(64) + +#define SGRF_BASE 0xff0d0000 +#define SGRF_SIZE SIZE_K(64) + +#define CRU_BASE 0xff440000 +#define CRU_SIZE SIZE_K(64) + +#define GRF_BASE 0xff100000 +#define GRF_SIZE SIZE_K(64) + +#define GPIO0_BASE 0xff210000 +#define GPIO0_SIZE SIZE_K(32) + +#define GPIO1_BASE 0xff220000 +#define GPIO1_SIZE SIZE_K(32) + +#define GPIO2_BASE 0xff230000 +#define GPIO2_SIZE SIZE_K(64) + +#define GPIO3_BASE 0xff240000 +#define GPIO3_SIZE SIZE_K(64) + +#define STIME_BASE 0xff1d0000 +#define STIME_SIZE SIZE_K(64) + +#define INTMEM_BASE 0xff090000 +#define INTMEM_SIZE SIZE_K(32) + +#define SRAM_LDS_BASE (INTMEM_BASE + SIZE_K(4)) +#define SRAM_LDS_SIZE (INTMEM_SIZE - SIZE_K(4)) + +#define PMUSRAM_BASE INTMEM_BASE +#define PMUSRAM_SIZE SIZE_K(4) +#define PMUSRAM_RSIZE SIZE_K(4) + +#define VOP_BASE 0xff370000 +#define VOP_SIZE SIZE_K(16) + +#define DDR_PHY_BASE 0xff400000 +#define DDR_PHY_SIZE SIZE_K(4) + +#define SERVER_MSCH_BASE 0xff720000 +#define SERVER_MSCH_SIZE SIZE_K(4) + +#define DDR_UPCTL_BASE 0xff780000 +#define DDR_UPCTL_SIZE SIZE_K(12) + +#define DDR_MONITOR_BASE 0xff790000 +#define DDR_MONITOR_SIZE SIZE_K(4) + +#define FIREWALL_DDR_BASE 0xff7c0000 +#define FIREWALL_DDR_SIZE SIZE_K(64) + +#define FIREWALL_CFG_BASE 0xff7d0000 +#define FIREWALL_CFG_SIZE SIZE_K(64) + +#define GIC400_BASE 0xff810000 +#define GIC400_SIZE SIZE_K(64) + +#define DDR_GRF_BASE 0xff798000 +#define DDR_GRF_SIZE SIZE_K(16) + +#define PWM_BASE 0xff1b0000 +#define PWM_SIZE SIZE_K(64) + +#define DDR_PARAM_BASE 0x02000000 +#define DDR_PARAM_SIZE SIZE_K(4) + +#define EFUSE8_BASE 0xff260000 +#define EFUSE8_SIZE SIZE_K(4) + +#define EFUSE32_BASE 0xff0b0000 +#define EFUSE32_SIZE SIZE_K(4) + +/************************************************************************** + * UART related constants + **************************************************************************/ +#define RK3328_BAUDRATE 1500000 +#define RK3328_UART_CLOCK 24000000 + +/****************************************************************************** + * System counter frequency related constants + ******************************************************************************/ +#define SYS_COUNTER_FREQ_IN_TICKS 24000000U +#define SYS_COUNTER_FREQ_IN_MHZ 24 + +/****************************************************************************** + * GIC-400 & interrupt handling related constants + ******************************************************************************/ + +/* Base rk_platform compatible GIC memory map */ +#define RK3328_GICD_BASE (GIC400_BASE + 0x1000) +#define RK3328_GICC_BASE (GIC400_BASE + 0x2000) +#define RK3328_GICR_BASE 0 /* no GICR in GIC-400 */ + +/****************************************************************************** + * sgi, ppi + ******************************************************************************/ +#define RK_IRQ_SEC_PHY_TIMER 29 + +#define RK_IRQ_SEC_SGI_0 8 +#define RK_IRQ_SEC_SGI_1 9 +#define RK_IRQ_SEC_SGI_2 10 +#define RK_IRQ_SEC_SGI_3 11 +#define RK_IRQ_SEC_SGI_4 12 +#define RK_IRQ_SEC_SGI_5 13 +#define RK_IRQ_SEC_SGI_6 14 +#define RK_IRQ_SEC_SGI_7 15 + +/* + * Define a list of Group 0 interrupts. + */ +#define PLAT_RK_GICV2_G0_IRQS \ + INTR_PROP_DESC(RK_IRQ_SEC_PHY_TIMER, GIC_HIGHEST_SEC_PRIORITY, \ + GICV2_INTR_GROUP0, GIC_INTR_CFG_LEVEL), \ + INTR_PROP_DESC(RK_IRQ_SEC_SGI_6, GIC_HIGHEST_SEC_PRIORITY, \ + GICV2_INTR_GROUP0, GIC_INTR_CFG_LEVEL) + +#define SHARE_MEM_BASE 0x100000/* [1MB, 1MB+60K]*/ +#define SHARE_MEM_PAGE_NUM 15 +#define SHARE_MEM_SIZE SIZE_K(SHARE_MEM_PAGE_NUM * 4) + +#endif /* RK3328_DEF_H */ diff --git a/plat/rockchip/rk3368/drivers/ddr/ddr_rk3368.c b/plat/rockchip/rk3368/drivers/ddr/ddr_rk3368.c new file mode 100644 index 0000000..fa98eb3 --- /dev/null +++ b/plat/rockchip/rk3368/drivers/ddr/ddr_rk3368.c @@ -0,0 +1,482 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <stdint.h> +#include <string.h> + +#include <platform_def.h> + +#include <common/debug.h> +#include <lib/mmio.h> + +#include <ddr_rk3368.h> +#include <pmu.h> +#include <rk3368_def.h> +#include <soc.h> + +/* GRF_SOC_STATUS0 */ +#define DPLL_LOCK (0x1 << 2) + +/* GRF_DDRC0_CON0 */ +#define GRF_DDR_16BIT_EN (((0x1 << 3) << 16) | (0x1 << 3)) +#define GRF_DDR_32BIT_EN (((0x1 << 3) << 16) | (0x0 << 3)) +#define GRF_MOBILE_DDR_EN (((0x1 << 4) << 16) | (0x1 << 4)) +#define GRF_MOBILE_DDR_DISB (((0x1 << 4) << 16) | (0x0 << 4)) +#define GRF_DDR3_EN (((0x1 << 2) << 16) | (0x1 << 2)) +#define GRF_LPDDR2_3_EN (((0x1 << 2) << 16) | (0x0 << 2)) + +/* PMUGRF_SOC_CON0 */ +#define ddrphy_bufferen_io_en(n) ((0x1 << (9 + 16)) | (n << 9)) +#define ddrphy_bufferen_core_en(n) ((0x1 << (8 + 16)) | (n << 8)) + +struct PCTRL_TIMING_TAG { + uint32_t ddrfreq; + uint32_t TOGCNT1U; + uint32_t TINIT; + uint32_t TRSTH; + uint32_t TOGCNT100N; + uint32_t TREFI; + uint32_t TMRD; + uint32_t TRFC; + uint32_t TRP; + uint32_t TRTW; + uint32_t TAL; + uint32_t TCL; + uint32_t TCWL; + uint32_t TRAS; + uint32_t TRC; + uint32_t TRCD; + uint32_t TRRD; + uint32_t TRTP; + uint32_t TWR; + uint32_t TWTR; + uint32_t TEXSR; + uint32_t TXP; + uint32_t TXPDLL; + uint32_t TZQCS; + uint32_t TZQCSI; + uint32_t TDQS; + uint32_t TCKSRE; + uint32_t TCKSRX; + uint32_t TCKE; + uint32_t TMOD; + uint32_t TRSTL; + uint32_t TZQCL; + uint32_t TMRR; + uint32_t TCKESR; + uint32_t TDPD; + uint32_t TREFI_MEM_DDR3; +}; + +struct MSCH_SAVE_REG_TAG { + uint32_t ddrconf; + uint32_t ddrtiming; + uint32_t ddrmode; + uint32_t readlatency; + uint32_t activate; + uint32_t devtodev; +}; + +/* ddr suspend need save reg */ +struct PCTL_SAVE_REG_TAG { + uint32_t SCFG; + uint32_t CMDTSTATEN; + uint32_t MCFG1; + uint32_t MCFG; + uint32_t PPCFG; + struct PCTRL_TIMING_TAG pctl_timing; + /* DFI Control Registers */ + uint32_t DFITCTRLDELAY; + uint32_t DFIODTCFG; + uint32_t DFIODTCFG1; + uint32_t DFIODTRANKMAP; + /* DFI Write Data Registers */ + uint32_t DFITPHYWRDATA; + uint32_t DFITPHYWRLAT; + uint32_t DFITPHYWRDATALAT; + /* DFI Read Data Registers */ + uint32_t DFITRDDATAEN; + uint32_t DFITPHYRDLAT; + /* DFI Update Registers */ + uint32_t DFITPHYUPDTYPE0; + uint32_t DFITPHYUPDTYPE1; + uint32_t DFITPHYUPDTYPE2; + uint32_t DFITPHYUPDTYPE3; + uint32_t DFITCTRLUPDMIN; + uint32_t DFITCTRLUPDMAX; + uint32_t DFITCTRLUPDDLY; + uint32_t DFIUPDCFG; + uint32_t DFITREFMSKI; + uint32_t DFITCTRLUPDI; + /* DFI Status Registers */ + uint32_t DFISTCFG0; + uint32_t DFISTCFG1; + uint32_t DFITDRAMCLKEN; + uint32_t DFITDRAMCLKDIS; + uint32_t DFISTCFG2; + /* DFI Low Power Register */ + uint32_t DFILPCFG0; +}; + +struct DDRPHY_SAVE_REG_TAG { + uint32_t PHY_REG0; + uint32_t PHY_REG1; + uint32_t PHY_REGB; + uint32_t PHY_REGC; + uint32_t PHY_REG11; + uint32_t PHY_REG13; + uint32_t PHY_REG14; + uint32_t PHY_REG16; + uint32_t PHY_REG20; + uint32_t PHY_REG21; + uint32_t PHY_REG26; + uint32_t PHY_REG27; + uint32_t PHY_REG28; + uint32_t PHY_REG30; + uint32_t PHY_REG31; + uint32_t PHY_REG36; + uint32_t PHY_REG37; + uint32_t PHY_REG38; + uint32_t PHY_REG40; + uint32_t PHY_REG41; + uint32_t PHY_REG46; + uint32_t PHY_REG47; + uint32_t PHY_REG48; + uint32_t PHY_REG50; + uint32_t PHY_REG51; + uint32_t PHY_REG56; + uint32_t PHY_REG57; + uint32_t PHY_REG58; + uint32_t PHY_REGDLL; + uint32_t PHY_REGEC; + uint32_t PHY_REGED; + uint32_t PHY_REGEE; + uint32_t PHY_REGEF; + uint32_t PHY_REGFB; + uint32_t PHY_REGFC; + uint32_t PHY_REGFD; + uint32_t PHY_REGFE; +}; + +struct BACKUP_REG_TAG { + uint32_t tag; + uint32_t pctladdr; + struct PCTL_SAVE_REG_TAG pctl; + uint32_t phyaddr; + struct DDRPHY_SAVE_REG_TAG phy; + uint32_t nocaddr; + struct MSCH_SAVE_REG_TAG noc; + uint32_t pllselect; + uint32_t phypllockaddr; + uint32_t phyplllockmask; + uint32_t phyplllockval; + uint32_t pllpdstat; + uint32_t dpllmodeaddr; + uint32_t dpllslowmode; + uint32_t dpllnormalmode; + uint32_t dpllresetaddr; + uint32_t dpllreset; + uint32_t dplldereset; + uint32_t dpllconaddr; + uint32_t dpllcon[4]; + uint32_t dplllockaddr; + uint32_t dplllockmask; + uint32_t dplllockval; + uint32_t ddrpllsrcdivaddr; + uint32_t ddrpllsrcdiv; + uint32_t retendisaddr; + uint32_t retendisval; + uint32_t grfregaddr; + uint32_t grfddrcreg; + uint32_t crupctlphysoftrstaddr; + uint32_t cruresetpctlphy; + uint32_t cruderesetphy; + uint32_t cruderesetpctlphy; + uint32_t physoftrstaddr; + uint32_t endtag; +}; + +static uint32_t ddr_get_phy_pll_freq(void) +{ + uint32_t ret = 0; + uint32_t fb_div, pre_div; + + fb_div = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REGEC); + fb_div |= (mmio_read_32(DDR_PHY_BASE + DDR_PHY_REGED) & 0x1) << 8; + + pre_div = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REGEE) & 0xff; + ret = 2 * 24 * fb_div / (4 * pre_div); + + return ret; +} + +static void ddr_copy(uint32_t *pdest, uint32_t *psrc, uint32_t words) +{ + uint32_t i; + + for (i = 0; i < words; i++) + pdest[i] = psrc[i]; +} + +static void ddr_get_dpll_cfg(uint32_t *p) +{ + uint32_t nmhz, NO, NF, NR; + + nmhz = ddr_get_phy_pll_freq(); + if (nmhz <= 150) + NO = 6; + else if (nmhz <= 250) + NO = 4; + else if (nmhz <= 500) + NO = 2; + else + NO = 1; + + NR = 1; + NF = 2 * nmhz * NR * NO / 24; + + p[0] = SET_NR(NR) | SET_NO(NO); + p[1] = SET_NF(NF); + p[2] = SET_NB(NF / 2); +} + +void ddr_reg_save(uint32_t pllpdstat, uint64_t base_addr) +{ + struct BACKUP_REG_TAG *p_ddr_reg = (struct BACKUP_REG_TAG *)base_addr; + struct PCTL_SAVE_REG_TAG *pctl_tim = &p_ddr_reg->pctl; + + p_ddr_reg->tag = 0x56313031; + p_ddr_reg->pctladdr = DDR_PCTL_BASE; + p_ddr_reg->phyaddr = DDR_PHY_BASE; + p_ddr_reg->nocaddr = SERVICE_BUS_BASE; + + /* PCTLR */ + ddr_copy((uint32_t *)&pctl_tim->pctl_timing.TOGCNT1U, + (uint32_t *)(DDR_PCTL_BASE + DDR_PCTL_TOGCNT1U), 35); + pctl_tim->pctl_timing.TREFI |= DDR_UPD_REF_ENABLE; + pctl_tim->SCFG = mmio_read_32(DDR_PCTL_BASE + DDR_PCTL_SCFG); + pctl_tim->CMDTSTATEN = mmio_read_32(DDR_PCTL_BASE + + DDR_PCTL_CMDTSTATEN); + pctl_tim->MCFG1 = mmio_read_32(DDR_PCTL_BASE + DDR_PCTL_MCFG1); + pctl_tim->MCFG = mmio_read_32(DDR_PCTL_BASE + DDR_PCTL_MCFG); + pctl_tim->PPCFG = mmio_read_32(DDR_PCTL_BASE + DDR_PCTL_PPCFG); + pctl_tim->pctl_timing.ddrfreq = mmio_read_32(DDR_PCTL_BASE + + DDR_PCTL_TOGCNT1U * 2); + pctl_tim->DFITCTRLDELAY = mmio_read_32(DDR_PCTL_BASE + + DDR_PCTL_DFITCTRLDELAY); + pctl_tim->DFIODTCFG = mmio_read_32(DDR_PCTL_BASE + DDR_PCTL_DFIODTCFG); + pctl_tim->DFIODTCFG1 = mmio_read_32(DDR_PCTL_BASE + + DDR_PCTL_DFIODTCFG1); + pctl_tim->DFIODTRANKMAP = mmio_read_32(DDR_PCTL_BASE + + DDR_PCTL_DFIODTRANKMAP); + pctl_tim->DFITPHYWRDATA = mmio_read_32(DDR_PCTL_BASE + + DDR_PCTL_DFITPHYWRDATA); + pctl_tim->DFITPHYWRLAT = mmio_read_32(DDR_PCTL_BASE + + DDR_PCTL_DFITPHYWRLAT); + pctl_tim->DFITPHYWRDATALAT = mmio_read_32(DDR_PCTL_BASE + + DDR_PCTL_DFITPHYWRDATALAT); + pctl_tim->DFITRDDATAEN = mmio_read_32(DDR_PCTL_BASE + + DDR_PCTL_DFITRDDATAEN); + pctl_tim->DFITPHYRDLAT = mmio_read_32(DDR_PCTL_BASE + + DDR_PCTL_DFITPHYRDLAT); + pctl_tim->DFITPHYUPDTYPE0 = mmio_read_32(DDR_PCTL_BASE + + DDR_PCTL_DFITPHYUPDTYPE0); + pctl_tim->DFITPHYUPDTYPE1 = mmio_read_32(DDR_PCTL_BASE + + DDR_PCTL_DFITPHYUPDTYPE1); + pctl_tim->DFITPHYUPDTYPE2 = mmio_read_32(DDR_PCTL_BASE + + DDR_PCTL_DFITPHYUPDTYPE2); + pctl_tim->DFITPHYUPDTYPE3 = mmio_read_32(DDR_PCTL_BASE + + DDR_PCTL_DFITPHYUPDTYPE3); + pctl_tim->DFITCTRLUPDMIN = mmio_read_32(DDR_PCTL_BASE + + DDR_PCTL_DFITCTRLUPDMIN); + pctl_tim->DFITCTRLUPDMAX = mmio_read_32(DDR_PCTL_BASE + + DDR_PCTL_DFITCTRLUPDMAX); + pctl_tim->DFITCTRLUPDDLY = mmio_read_32(DDR_PCTL_BASE + + DDR_PCTL_DFITCTRLUPDDLY); + + pctl_tim->DFIUPDCFG = mmio_read_32(DDR_PCTL_BASE + DDR_PCTL_DFIUPDCFG); + pctl_tim->DFITREFMSKI = mmio_read_32(DDR_PCTL_BASE + + DDR_PCTL_DFITREFMSKI); + pctl_tim->DFITCTRLUPDI = mmio_read_32(DDR_PCTL_BASE + + DDR_PCTL_DFITCTRLUPDI); + pctl_tim->DFISTCFG0 = mmio_read_32(DDR_PCTL_BASE + DDR_PCTL_DFISTCFG0); + pctl_tim->DFISTCFG1 = mmio_read_32(DDR_PCTL_BASE + DDR_PCTL_DFISTCFG1); + pctl_tim->DFITDRAMCLKEN = mmio_read_32(DDR_PCTL_BASE + + DDR_PCTL_DFITDRAMCLKEN); + pctl_tim->DFITDRAMCLKDIS = mmio_read_32(DDR_PCTL_BASE + + DDR_PCTL_DFITDRAMCLKDIS); + pctl_tim->DFISTCFG2 = mmio_read_32(DDR_PCTL_BASE + DDR_PCTL_DFISTCFG2); + pctl_tim->DFILPCFG0 = mmio_read_32(DDR_PCTL_BASE + DDR_PCTL_DFILPCFG0); + + /* PHY */ + p_ddr_reg->phy.PHY_REG0 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG0); + p_ddr_reg->phy.PHY_REG1 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG1); + p_ddr_reg->phy.PHY_REGB = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REGB); + p_ddr_reg->phy.PHY_REGC = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REGC); + p_ddr_reg->phy.PHY_REG11 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG11); + p_ddr_reg->phy.PHY_REG13 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG13); + p_ddr_reg->phy.PHY_REG14 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG14); + p_ddr_reg->phy.PHY_REG16 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG16); + p_ddr_reg->phy.PHY_REG20 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG20); + p_ddr_reg->phy.PHY_REG21 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG21); + p_ddr_reg->phy.PHY_REG26 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG26); + p_ddr_reg->phy.PHY_REG27 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG27); + p_ddr_reg->phy.PHY_REG28 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG28); + p_ddr_reg->phy.PHY_REG30 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG30); + p_ddr_reg->phy.PHY_REG31 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG31); + p_ddr_reg->phy.PHY_REG36 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG36); + p_ddr_reg->phy.PHY_REG37 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG37); + p_ddr_reg->phy.PHY_REG38 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG38); + p_ddr_reg->phy.PHY_REG40 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG40); + p_ddr_reg->phy.PHY_REG41 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG41); + p_ddr_reg->phy.PHY_REG46 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG46); + p_ddr_reg->phy.PHY_REG47 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG47); + p_ddr_reg->phy.PHY_REG48 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG48); + p_ddr_reg->phy.PHY_REG50 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG50); + p_ddr_reg->phy.PHY_REG51 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG51); + p_ddr_reg->phy.PHY_REG56 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG56); + p_ddr_reg->phy.PHY_REG57 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG57); + p_ddr_reg->phy.PHY_REG58 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG58); + p_ddr_reg->phy.PHY_REGDLL = mmio_read_32(DDR_PHY_BASE + + DDR_PHY_REGDLL); + p_ddr_reg->phy.PHY_REGEC = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REGEC); + p_ddr_reg->phy.PHY_REGED = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REGED); + p_ddr_reg->phy.PHY_REGEE = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REGEE); + p_ddr_reg->phy.PHY_REGEF = 0; + + if (mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG2) & 0x2) { + p_ddr_reg->phy.PHY_REGFB = mmio_read_32(DDR_PHY_BASE + + DDR_PHY_REG2C); + p_ddr_reg->phy.PHY_REGFC = mmio_read_32(DDR_PHY_BASE + + DDR_PHY_REG3C); + p_ddr_reg->phy.PHY_REGFD = mmio_read_32(DDR_PHY_BASE + + DDR_PHY_REG4C); + p_ddr_reg->phy.PHY_REGFE = mmio_read_32(DDR_PHY_BASE + + DDR_PHY_REG5C); + } else { + p_ddr_reg->phy.PHY_REGFB = mmio_read_32(DDR_PHY_BASE + + DDR_PHY_REGFB); + p_ddr_reg->phy.PHY_REGFC = mmio_read_32(DDR_PHY_BASE + + DDR_PHY_REGFC); + p_ddr_reg->phy.PHY_REGFD = mmio_read_32(DDR_PHY_BASE + + DDR_PHY_REGFD); + p_ddr_reg->phy.PHY_REGFE = mmio_read_32(DDR_PHY_BASE + + DDR_PHY_REGFE); + } + + /* NOC */ + p_ddr_reg->noc.ddrconf = mmio_read_32(SERVICE_BUS_BASE + MSCH_DDRCONF); + p_ddr_reg->noc.ddrtiming = mmio_read_32(SERVICE_BUS_BASE + + MSCH_DDRTIMING); + p_ddr_reg->noc.ddrmode = mmio_read_32(SERVICE_BUS_BASE + MSCH_DDRMODE); + p_ddr_reg->noc.readlatency = mmio_read_32(SERVICE_BUS_BASE + + MSCH_READLATENCY); + p_ddr_reg->noc.activate = mmio_read_32(SERVICE_BUS_BASE + + MSCH_ACTIVATE); + p_ddr_reg->noc.devtodev = mmio_read_32(SERVICE_BUS_BASE + + MSCH_DEVTODEV); + + p_ddr_reg->pllselect = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REGEE) * 0x1; + p_ddr_reg->phypllockaddr = GRF_BASE + GRF_SOC_STATUS0; + p_ddr_reg->phyplllockmask = GRF_DDRPHY_LOCK; + p_ddr_reg->phyplllockval = 0; + + /* PLLPD */ + p_ddr_reg->pllpdstat = pllpdstat; + /* DPLL */ + p_ddr_reg->dpllmodeaddr = CRU_BASE + PLL_CONS(DPLL_ID, 3); + /* slow mode and power on */ + p_ddr_reg->dpllslowmode = DPLL_WORK_SLOW_MODE | DPLL_POWER_DOWN; + p_ddr_reg->dpllnormalmode = DPLL_WORK_NORMAL_MODE; + p_ddr_reg->dpllresetaddr = CRU_BASE + PLL_CONS(DPLL_ID, 3); + p_ddr_reg->dpllreset = DPLL_RESET_CONTROL_NORMAL; + p_ddr_reg->dplldereset = DPLL_RESET_CONTROL_RESET; + p_ddr_reg->dpllconaddr = CRU_BASE + PLL_CONS(DPLL_ID, 0); + + if (p_ddr_reg->pllselect == 0) { + p_ddr_reg->dpllcon[0] = (mmio_read_32(CRU_BASE + + PLL_CONS(DPLL_ID, 0)) + & 0xffff) | + (0xFFFFu << 16); + p_ddr_reg->dpllcon[1] = (mmio_read_32(CRU_BASE + + PLL_CONS(DPLL_ID, 1)) + & 0xffff); + p_ddr_reg->dpllcon[2] = (mmio_read_32(CRU_BASE + + PLL_CONS(DPLL_ID, 2)) + & 0xffff); + p_ddr_reg->dpllcon[3] = (mmio_read_32(CRU_BASE + + PLL_CONS(DPLL_ID, 3)) + & 0xffff) | + (0xFFFFu << 16); + } else { + ddr_get_dpll_cfg(&p_ddr_reg->dpllcon[0]); + } + + p_ddr_reg->pllselect = 0; + p_ddr_reg->dplllockaddr = CRU_BASE + PLL_CONS(DPLL_ID, 1); + p_ddr_reg->dplllockmask = DPLL_STATUS_LOCK; + p_ddr_reg->dplllockval = DPLL_STATUS_LOCK; + + /* SET_DDR_PLL_SRC */ + p_ddr_reg->ddrpllsrcdivaddr = CRU_BASE + CRU_CLKSELS_CON(13); + p_ddr_reg->ddrpllsrcdiv = (mmio_read_32(CRU_BASE + CRU_CLKSELS_CON(13)) + & DDR_PLL_SRC_MASK) + | (DDR_PLL_SRC_MASK << 16); + p_ddr_reg->retendisaddr = PMU_BASE + PMU_PWRMD_COM; + p_ddr_reg->retendisval = PD_PERI_PWRDN_ENABLE; + p_ddr_reg->grfregaddr = GRF_BASE + GRF_DDRC0_CON0; + p_ddr_reg->grfddrcreg = (mmio_read_32(GRF_BASE + GRF_DDRC0_CON0) & + DDR_PLL_SRC_MASK) | + (DDR_PLL_SRC_MASK << 16); + + /* pctl phy soft reset */ + p_ddr_reg->crupctlphysoftrstaddr = CRU_BASE + CRU_SOFTRSTS_CON(10); + p_ddr_reg->cruresetpctlphy = DDRCTRL0_PSRSTN_REQ(1) | + DDRCTRL0_SRSTN_REQ(1) | + DDRPHY0_PSRSTN_REQ(1) | + DDRPHY0_SRSTN_REQ(1); + p_ddr_reg->cruderesetphy = DDRCTRL0_PSRSTN_REQ(1) | + DDRCTRL0_SRSTN_REQ(1) | + DDRPHY0_PSRSTN_REQ(0) | + DDRPHY0_SRSTN_REQ(0); + + p_ddr_reg->cruderesetpctlphy = DDRCTRL0_PSRSTN_REQ(0) | + DDRCTRL0_SRSTN_REQ(0) | + DDRPHY0_PSRSTN_REQ(0) | + DDRPHY0_SRSTN_REQ(0); + + p_ddr_reg->physoftrstaddr = DDR_PHY_BASE + DDR_PHY_REG0; + + p_ddr_reg->endtag = 0xFFFFFFFF; +} + +/* + * "rk3368_ddr_reg_resume_V1.05.bin" is an executable bin which is generated + * by ARM DS5 for resuming ddr controller. If the soc wakes up from system + * suspend, ddr needs to be resumed and the resuming code needs to be run in + * sram. But there is not a way to pointing the resuming code to the PMUSRAM + * when linking .o files of bl31, so we use the + * "rk3368_ddr_reg_resume_V1.05.bin" whose code is position-independent and + * it can be loaded anywhere and run. + */ +static __aligned(4) unsigned int ddr_reg_resume[] = { + #include "rk3368_ddr_reg_resume_V1.05.bin" +}; + +uint32_t ddr_get_resume_code_size(void) +{ + return sizeof(ddr_reg_resume); +} + +uint32_t ddr_get_resume_data_size(void) +{ + return sizeof(struct BACKUP_REG_TAG); +} + +uint32_t *ddr_get_resume_code_base(void) +{ + return (unsigned int *)ddr_reg_resume; +} diff --git a/plat/rockchip/rk3368/drivers/ddr/ddr_rk3368.h b/plat/rockchip/rk3368/drivers/ddr/ddr_rk3368.h new file mode 100644 index 0000000..6663bcb --- /dev/null +++ b/plat/rockchip/rk3368/drivers/ddr/ddr_rk3368.h @@ -0,0 +1,247 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef DDR_RK3368_H +#define DDR_RK3368_H + +#define DDR_PCTL_SCFG 0x0 +#define DDR_PCTL_SCTL 0x4 +#define DDR_PCTL_STAT 0x8 +#define DDR_PCTL_INTRSTAT 0xc + +#define DDR_PCTL_MCMD 0x40 +#define DDR_PCTL_POWCTL 0x44 +#define DDR_PCTL_POWSTAT 0x48 +#define DDR_PCTL_CMDTSTAT 0x4c +#define DDR_PCTL_CMDTSTATEN 0x50 +#define DDR_PCTL_MRRCFG0 0x60 +#define DDR_PCTL_MRRSTAT0 0x64 +#define DDR_PCTL_MRRSTAT1 0x68 +#define DDR_PCTL_MCFG1 0x7c +#define DDR_PCTL_MCFG 0x80 +#define DDR_PCTL_PPCFG 0x84 +#define DDR_PCTL_MSTAT 0x88 +#define DDR_PCTL_LPDDR2ZQCFG 0x8c +#define DDR_PCTL_DTUPDES 0x94 +#define DDR_PCTL_DTUNA 0x98 +#define DDR_PCTL_DTUNE 0x9c +#define DDR_PCTL_DTUPRD0 0xa0 +#define DDR_PCTL_DTUPRD1 0xa4 +#define DDR_PCTL_DTUPRD2 0xa8 +#define DDR_PCTL_DTUPRD3 0xac +#define DDR_PCTL_DTUAWDT 0xb0 +#define DDR_PCTL_TOGCNT1U 0xc0 +#define DDR_PCTL_TINIT 0xc4 +#define DDR_PCTL_TRSTH 0xc8 +#define DDR_PCTL_TOGCNT100N 0xcc +#define DDR_PCTL_TREFI 0xd0 +#define DDR_PCTL_TMRD 0xd4 +#define DDR_PCTL_TRFC 0xd8 +#define DDR_PCTL_TRP 0xdc +#define DDR_PCTL_TRTW 0xe0 +#define DDR_PCTL_TAL 0xe4 +#define DDR_PCTL_TCL 0xe8 +#define DDR_PCTL_TCWL 0xec +#define DDR_PCTL_TRAS 0xf0 +#define DDR_PCTL_TRC 0xf4 +#define DDR_PCTL_TRCD 0xf8 +#define DDR_PCTL_TRRD 0xfc +#define DDR_PCTL_TRTP 0x100 +#define DDR_PCTL_TWR 0x104 +#define DDR_PCTL_TWTR 0x108 +#define DDR_PCTL_TEXSR 0x10c +#define DDR_PCTL_TXP 0x110 +#define DDR_PCTL_TXPDLL 0x114 +#define DDR_PCTL_TZQCS 0x118 +#define DDR_PCTL_TZQCSI 0x11c +#define DDR_PCTL_TDQS 0x120 +#define DDR_PCTL_TCKSRE 0x124 +#define DDR_PCTL_TCKSRX 0x128 +#define DDR_PCTL_TCKE 0x12c +#define DDR_PCTL_TMOD 0x130 +#define DDR_PCTL_TRSTL 0x134 +#define DDR_PCTL_TZQCL 0x138 +#define DDR_PCTL_TMRR 0x13c +#define DDR_PCTL_TCKESR 0x140 +#define DDR_PCTL_TDPD 0x144 +#define DDR_PCTL_TREFI_MEM_DDR3 0x148 +#define DDR_PCTL_ECCCFG 0x180 +#define DDR_PCTL_ECCTST 0x184 +#define DDR_PCTL_ECCCLR 0x188 +#define DDR_PCTL_ECCLOG 0x18c +#define DDR_PCTL_DTUWACTL 0x200 +#define DDR_PCTL_DTURACTL 0x204 +#define DDR_PCTL_DTUCFG 0x208 +#define DDR_PCTL_DTUECTL 0x20c +#define DDR_PCTL_DTUWD0 0x210 +#define DDR_PCTL_DTUWD1 0x214 +#define DDR_PCTL_DTUWD2 0x218 +#define DDR_PCTL_DTUWD3 0x21c +#define DDR_PCTL_DTUWDM 0x220 +#define DDR_PCTL_DTURD0 0x224 +#define DDR_PCTL_DTURD1 0x228 +#define DDR_PCTL_DTURD2 0x22c +#define DDR_PCTL_DTURD3 0x230 +#define DDR_PCTL_DTULFSRWD 0x234 +#define DDR_PCTL_DTULFSRRD 0x238 +#define DDR_PCTL_DTUEAF 0x23c +#define DDR_PCTL_DFITCTRLDELAY 0x240 +#define DDR_PCTL_DFIODTCFG 0x244 +#define DDR_PCTL_DFIODTCFG1 0x248 +#define DDR_PCTL_DFIODTRANKMAP 0x24c +#define DDR_PCTL_DFITPHYWRDATA 0x250 +#define DDR_PCTL_DFITPHYWRLAT 0x254 +#define DDR_PCTL_DFITPHYWRDATALAT 0x258 +#define DDR_PCTL_DFITRDDATAEN 0x260 +#define DDR_PCTL_DFITPHYRDLAT 0x264 +#define DDR_PCTL_DFITPHYUPDTYPE0 0x270 +#define DDR_PCTL_DFITPHYUPDTYPE1 0x274 +#define DDR_PCTL_DFITPHYUPDTYPE2 0x278 +#define DDR_PCTL_DFITPHYUPDTYPE3 0x27c +#define DDR_PCTL_DFITCTRLUPDMIN 0x280 +#define DDR_PCTL_DFITCTRLUPDMAX 0x284 +#define DDR_PCTL_DFITCTRLUPDDLY 0x288 +#define DDR_PCTL_DFIUPDCFG 0x290 +#define DDR_PCTL_DFITREFMSKI 0x294 +#define DDR_PCTL_DFITCTRLUPDI 0x298 +#define DDR_PCTL_DFITRCFG0 0x2ac +#define DDR_PCTL_DFITRSTAT0 0x2b0 +#define DDR_PCTL_DFITRWRLVLEN 0x2b4 +#define DDR_PCTL_DFITRRDLVLEN 0x2b8 +#define DDR_PCTL_DFITRRDLVLGATEEN 0x2bc +#define DDR_PCTL_DFISTSTAT0 0x2c0 +#define DDR_PCTL_DFISTCFG0 0x2c4 +#define DDR_PCTL_DFISTCFG1 0x2c8 +#define DDR_PCTL_DFITDRAMCLKEN 0x2d0 +#define DDR_PCTL_DFITDRAMCLKDIS 0x2d4 +#define DDR_PCTL_DFISTCFG2 0x2d8 +#define DDR_PCTL_DFISTPARCLR 0x2dc +#define DDR_PCTL_DFISTPARLOG 0x2e0 +#define DDR_PCTL_DFILPCFG0 0x2f0 +#define DDR_PCTL_DFITRWRLVLRESP0 0x300 +#define DDR_PCTL_DFITRWRLVLRESP1 0x304 +#define DDR_PCTL_DFITRWRLVLRESP2 0x308 +#define DDR_PCTL_DFITRRDLVLRESP0 0x30c +#define DDR_PCTL_DFITRRDLVLRESP1 0x310 +#define DDR_PCTL_DFITRRDLVLRESP2 0x314 +#define DDR_PCTL_DFITRWRLVLDELAY0 0x318 +#define DDR_PCTL_DFITRWRLVLDELAY1 0x31c +#define DDR_PCTL_DFITRWRLVLDELAY2 0x320 +#define DDR_PCTL_DFITRRDLVLDELAY0 0x324 +#define DDR_PCTL_DFITRRDLVLDELAY1 0x328 +#define DDR_PCTL_DFITRRDLVLDELAY2 0x32c +#define DDR_PCTL_DFITRRDLVLGATEDELAY0 0x330 +#define DDR_PCTL_DFITRRDLVLGATEDELAY1 0x334 +#define DDR_PCTL_DFITRRDLVLGATEDELAY2 0x338 +#define DDR_PCTL_DFITRCMD 0x33c +#define DDR_PCTL_IPVR 0x3f8 +#define DDR_PCTL_IPTR 0x3fc + +/* DDR PHY REG */ +#define DDR_PHY_REG0 0x0 +#define DDR_PHY_REG1 0x4 +#define DDR_PHY_REG2 0x8 +#define DDR_PHY_REG3 0xc +#define DDR_PHY_REG4 0x10 +#define DDR_PHY_REG5 0x14 +#define DDR_PHY_REG6 0x18 +#define DDR_PHY_REGB 0x2c +#define DDR_PHY_REGC 0x30 +#define DDR_PHY_REG11 0x44 +#define DDR_PHY_REG12 0x48 +#define DDR_PHY_REG13 0x4c +#define DDR_PHY_REG14 0x50 +#define DDR_PHY_REG16 0x58 +#define DDR_PHY_REG20 0x80 +#define DDR_PHY_REG21 0x84 +#define DDR_PHY_REG26 0x98 +#define DDR_PHY_REG27 0x9c +#define DDR_PHY_REG28 0xa0 +#define DDR_PHY_REG2C 0xb0 +#define DDR_PHY_REG30 0xc0 +#define DDR_PHY_REG31 0xc4 +#define DDR_PHY_REG36 0xd8 +#define DDR_PHY_REG37 0xdc +#define DDR_PHY_REG38 0xe0 +#define DDR_PHY_REG3C 0xf0 +#define DDR_PHY_REG40 0x100 +#define DDR_PHY_REG41 0x104 +#define DDR_PHY_REG46 0x118 +#define DDR_PHY_REG47 0x11c +#define DDR_PHY_REG48 0x120 +#define DDR_PHY_REG4C 0x130 +#define DDR_PHY_REG50 0x140 +#define DDR_PHY_REG51 0x144 +#define DDR_PHY_REG56 0x158 +#define DDR_PHY_REG57 0x15c +#define DDR_PHY_REG58 0x160 +#define DDR_PHY_REG5C 0x170 +#define DDR_PHY_REGDLL 0x290 +#define DDR_PHY_REGEC 0x3b0 +#define DDR_PHY_REGED 0x3b4 +#define DDR_PHY_REGEE 0x3b8 +#define DDR_PHY_REGEF 0x3bc +#define DDR_PHY_REGF0 0x3c0 +#define DDR_PHY_REGF1 0x3c4 +#define DDR_PHY_REGF2 0x3c8 +#define DDR_PHY_REGFA 0x3e8 +#define DDR_PHY_REGFB 0x3ec +#define DDR_PHY_REGFC 0x3f0 +#define DDR_PHY_REGFD 0x3f4 +#define DDR_PHY_REGFE 0x3f8 +#define DDR_PHY_REGFF 0x3fc + +/* MSCH REG define */ +#define MSCH_COREID 0x0 +#define MSCH_DDRCONF 0x8 +#define MSCH_DDRTIMING 0xc +#define MSCH_DDRMODE 0x10 +#define MSCH_READLATENCY 0x14 +#define MSCH_ACTIVATE 0x38 +#define MSCH_DEVTODEV 0x3c + +#define SET_NR(n) ((0x3f << (8 + 16)) | ((n - 1) << 8)) +#define SET_NO(n) ((0xf << (0 + 16)) | ((n - 1) << 0)) +#define SET_NF(n) ((n - 1) & 0x1fff) +#define SET_NB(n) ((n - 1) & 0xfff) +#define PLLMODE(n) ((0x3 << (8 + 16)) | (n << 8)) + +/* GRF REG define */ +#define GRF_SOC_STATUS0 0x480 +#define GRF_DDRPHY_LOCK (0x1 << 15) +#define GRF_DDRC0_CON0 0x600 + +/* CRU softreset ddr pctl, phy */ +#define DDRMSCH0_SRSTN_REQ(n) (((0x1 << 10) << 16) | (n << 10)) +#define DDRCTRL0_PSRSTN_REQ(n) (((0x1 << 3) << 16) | (n << 3)) +#define DDRCTRL0_SRSTN_REQ(n) (((0x1 << 2) << 16) | (n << 2)) +#define DDRPHY0_PSRSTN_REQ(n) (((0x1 << 1) << 16) | (n << 1)) +#define DDRPHY0_SRSTN_REQ(n) (((0x1 << 0) << 16) | (n << 0)) + +/* CRU_DPLL_CON2 */ +#define DPLL_STATUS_LOCK (1U << 31) + +/* CRU_DPLL_CON3 */ +#define DPLL_POWER_DOWN ((0x1 << (1 + 16)) | (0 << 1)) +#define DPLL_WORK_NORMAL_MODE ((0x3 << (8 + 16)) | (0 << 8)) +#define DPLL_WORK_SLOW_MODE ((0x3 << (8 + 16)) | (1 << 8)) +#define DPLL_RESET_CONTROL_NORMAL ((0x1 << (5 + 16)) | (0x0 << 5)) +#define DPLL_RESET_CONTROL_RESET ((0x1 << (5 + 16)) | (0x1 << 5)) + +/* PMU_PWRDN_CON */ +#define PD_PERI_PWRDN_ENABLE (1 << 13) + +#define DDR_PLL_SRC_MASK 0x13 + +/* DDR_PCTL_TREFI */ +#define DDR_UPD_REF_ENABLE (0X1u << 31) + +uint32_t ddr_get_resume_code_size(void); +uint32_t ddr_get_resume_data_size(void); +uint32_t *ddr_get_resume_code_base(void); +void ddr_reg_save(uint32_t pllpdstat, uint64_t base_addr); + +#endif /* DDR_RK3368_H */ diff --git a/plat/rockchip/rk3368/drivers/pmu/plat_pmu_macros.S b/plat/rockchip/rk3368/drivers/pmu/plat_pmu_macros.S new file mode 100644 index 0000000..399f61c --- /dev/null +++ b/plat/rockchip/rk3368/drivers/pmu/plat_pmu_macros.S @@ -0,0 +1,17 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <arch.h> +#include <asm_macros.S> +#include <platform_def.h> + +.macro func_rockchip_clst_warmboot + /* Nothing to do for rk3368 */ +.endm + +.macro rockchip_clst_warmboot_data + /* Nothing to do for rk3368 */ +.endm diff --git a/plat/rockchip/rk3368/drivers/pmu/pmu.c b/plat/rockchip/rk3368/drivers/pmu/pmu.c new file mode 100644 index 0000000..e277a18 --- /dev/null +++ b/plat/rockchip/rk3368/drivers/pmu/pmu.c @@ -0,0 +1,373 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> +#include <errno.h> + +#include <platform_def.h> + +#include <arch_helpers.h> +#include <common/debug.h> +#include <drivers/delay_timer.h> +#include <lib/mmio.h> +#include <plat/common/platform.h> + +#include <ddr_rk3368.h> +#include <plat_private.h> +#include <pmu.h> +#include <pmu_com.h> +#include <rk3368_def.h> +#include <soc.h> + +DEFINE_BAKERY_LOCK(rockchip_pd_lock); + +static uint32_t cpu_warm_boot_addr; + +void rk3368_flash_l2_b(void) +{ + uint32_t wait_cnt = 0; + + regs_updata_bit_set(PMU_BASE + PMU_SFT_CON, pmu_sft_l2flsh_clst_b); + dsb(); + + while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST) + & BIT(clst_b_l2_flsh_done))) { + wait_cnt++; + if (!(wait_cnt % MAX_WAIT_CONUT)) + WARN("%s:reg %x,wait\n", __func__, + mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST)); + } + + regs_updata_bit_clr(PMU_BASE + PMU_SFT_CON, pmu_sft_l2flsh_clst_b); +} + +static inline int rk3368_pmu_bus_idle(uint32_t req, uint32_t idle) +{ + uint32_t mask = BIT(req); + uint32_t idle_mask = 0; + uint32_t idle_target = 0; + uint32_t val; + uint32_t wait_cnt = 0; + + switch (req) { + case bus_ide_req_clst_l: + idle_mask = BIT(pmu_idle_ack_cluster_l); + idle_target = (idle << pmu_idle_ack_cluster_l); + break; + + case bus_ide_req_clst_b: + idle_mask = BIT(pmu_idle_ack_cluster_b); + idle_target = (idle << pmu_idle_ack_cluster_b); + break; + + case bus_ide_req_cxcs: + idle_mask = BIT(pmu_idle_ack_cxcs); + idle_target = ((!idle) << pmu_idle_ack_cxcs); + break; + + case bus_ide_req_cci400: + idle_mask = BIT(pmu_idle_ack_cci400); + idle_target = ((!idle) << pmu_idle_ack_cci400); + break; + + case bus_ide_req_gpu: + idle_mask = BIT(pmu_idle_ack_gpu) | BIT(pmu_idle_gpu); + idle_target = (idle << pmu_idle_ack_gpu) | + (idle << pmu_idle_gpu); + break; + + case bus_ide_req_core: + idle_mask = BIT(pmu_idle_ack_core) | BIT(pmu_idle_core); + idle_target = (idle << pmu_idle_ack_core) | + (idle << pmu_idle_core); + break; + + case bus_ide_req_bus: + idle_mask = BIT(pmu_idle_ack_bus) | BIT(pmu_idle_bus); + idle_target = (idle << pmu_idle_ack_bus) | + (idle << pmu_idle_bus); + break; + case bus_ide_req_dma: + idle_mask = BIT(pmu_idle_ack_dma) | BIT(pmu_idle_dma); + idle_target = (idle << pmu_idle_ack_dma) | + (idle << pmu_idle_dma); + break; + + case bus_ide_req_peri: + idle_mask = BIT(pmu_idle_ack_peri) | BIT(pmu_idle_peri); + idle_target = (idle << pmu_idle_ack_peri) | + (idle << pmu_idle_peri); + break; + + case bus_ide_req_video: + idle_mask = BIT(pmu_idle_ack_video) | BIT(pmu_idle_video); + idle_target = (idle << pmu_idle_ack_video) | + (idle << pmu_idle_video); + break; + + case bus_ide_req_vio: + idle_mask = BIT(pmu_idle_ack_vio) | BIT(pmu_idle_vio); + idle_target = (pmu_idle_ack_vio) | + (idle << pmu_idle_vio); + break; + + case bus_ide_req_alive: + idle_mask = BIT(pmu_idle_ack_alive) | BIT(pmu_idle_alive); + idle_target = (idle << pmu_idle_ack_alive) | + (idle << pmu_idle_alive); + break; + + case bus_ide_req_pmu: + idle_mask = BIT(pmu_idle_ack_pmu) | BIT(pmu_idle_pmu); + idle_target = (idle << pmu_idle_ack_pmu) | + (idle << pmu_idle_pmu); + break; + + case bus_ide_req_msch: + idle_mask = BIT(pmu_idle_ack_msch) | BIT(pmu_idle_msch); + idle_target = (idle << pmu_idle_ack_msch) | + (idle << pmu_idle_msch); + break; + + case bus_ide_req_cci: + idle_mask = BIT(pmu_idle_ack_cci) | BIT(pmu_idle_cci); + idle_target = (idle << pmu_idle_ack_cci) | + (idle << pmu_idle_cci); + break; + + default: + ERROR("%s: Unsupported the idle request\n", __func__); + break; + } + + val = mmio_read_32(PMU_BASE + PMU_BUS_IDE_REQ); + if (idle) + val |= mask; + else + val &= ~mask; + + mmio_write_32(PMU_BASE + PMU_BUS_IDE_REQ, val); + + while ((mmio_read_32(PMU_BASE + + PMU_BUS_IDE_ST) & idle_mask) != idle_target) { + wait_cnt++; + if (!(wait_cnt % MAX_WAIT_CONUT)) + WARN("%s:st=%x(%x)\n", __func__, + mmio_read_32(PMU_BASE + PMU_BUS_IDE_ST), + idle_mask); + } + + return 0; +} + +void pmu_scu_b_pwrup(void) +{ + regs_updata_bit_clr(PMU_BASE + PMU_SFT_CON, pmu_sft_acinactm_clst_b); + rk3368_pmu_bus_idle(bus_ide_req_clst_b, 0); +} + +static void pmu_scu_b_pwrdn(void) +{ + uint32_t wait_cnt = 0; + + if ((mmio_read_32(PMU_BASE + PMU_PWRDN_ST) & + PM_PWRDM_CPUSB_MSK) != PM_PWRDM_CPUSB_MSK) { + ERROR("%s: not all cpus is off\n", __func__); + return; + } + + rk3368_flash_l2_b(); + + regs_updata_bit_set(PMU_BASE + PMU_SFT_CON, pmu_sft_acinactm_clst_b); + + while (!(mmio_read_32(PMU_BASE + + PMU_CORE_PWR_ST) & BIT(clst_b_l2_wfi))) { + wait_cnt++; + if (!(wait_cnt % MAX_WAIT_CONUT)) + ERROR("%s:wait cluster-b l2(%x)\n", __func__, + mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST)); + } + rk3368_pmu_bus_idle(bus_ide_req_clst_b, 1); +} + +static void pmu_sleep_mode_config(void) +{ + uint32_t pwrmd_core, pwrmd_com; + + pwrmd_core = BIT(pmu_mdcr_cpu0_pd) | + BIT(pmu_mdcr_scu_l_pd) | + BIT(pmu_mdcr_l2_flush) | + BIT(pmu_mdcr_l2_idle) | + BIT(pmu_mdcr_clr_clst_l) | + BIT(pmu_mdcr_clr_core) | + BIT(pmu_mdcr_clr_cci) | + BIT(pmu_mdcr_core_pd); + + pwrmd_com = BIT(pmu_mode_en) | + BIT(pmu_mode_sref_enter) | + BIT(pmu_mode_pwr_off); + + regs_updata_bit_set(PMU_BASE + PMU_WKUP_CFG2, pmu_cluster_l_wkup_en); + regs_updata_bit_set(PMU_BASE + PMU_WKUP_CFG2, pmu_cluster_b_wkup_en); + regs_updata_bit_clr(PMU_BASE + PMU_WKUP_CFG2, pmu_gpio_wkup_en); + + mmio_write_32(PMU_BASE + PMU_PLLLOCK_CNT, CYCL_24M_CNT_MS(2)); + mmio_write_32(PMU_BASE + PMU_PLLRST_CNT, CYCL_24M_CNT_US(100)); + mmio_write_32(PMU_BASE + PMU_STABLE_CNT, CYCL_24M_CNT_MS(2)); + mmio_write_32(PMU_BASE + PMU_PWRMD_CORE, pwrmd_core); + mmio_write_32(PMU_BASE + PMU_PWRMD_COM, pwrmd_com); + dsb(); +} + +static void pmu_set_sleep_mode(void) +{ + pmu_sleep_mode_config(); + soc_sleep_config(); + regs_updata_bit_set(PMU_BASE + PMU_PWRMD_CORE, pmu_mdcr_global_int_dis); + regs_updata_bit_set(PMU_BASE + PMU_SFT_CON, pmu_sft_glbl_int_dis_b); + pmu_scu_b_pwrdn(); + mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1), + ((uintptr_t)&pmu_cpuson_entrypoint >> + CPU_BOOT_ADDR_ALIGN) | CPU_BOOT_ADDR_WMASK); + mmio_write_32(SGRF_BASE + SGRF_SOC_CON(2), + ((uintptr_t)&pmu_cpuson_entrypoint >> + CPU_BOOT_ADDR_ALIGN) | CPU_BOOT_ADDR_WMASK); +} + +static int cpus_id_power_domain(uint32_t cluster, + uint32_t cpu, + uint32_t pd_state, + uint32_t wfie_msk) +{ + uint32_t pd; + uint64_t mpidr; + + if (cluster) + pd = PD_CPUB0 + cpu; + else + pd = PD_CPUL0 + cpu; + + if (pmu_power_domain_st(pd) == pd_state) + return 0; + + if (pd_state == pmu_pd_off) { + mpidr = (cluster << MPIDR_AFF1_SHIFT) | cpu; + if (check_cpu_wfie(mpidr, wfie_msk)) + return -EINVAL; + } + + return pmu_power_domain_ctr(pd, pd_state); +} + +static void nonboot_cpus_off(void) +{ + uint32_t boot_cpu, boot_cluster, cpu; + + boot_cpu = MPIDR_AFFLVL0_VAL(read_mpidr_el1()); + boot_cluster = MPIDR_AFFLVL1_VAL(read_mpidr_el1()); + + /* turn off noboot cpus */ + for (cpu = 0; cpu < PLATFORM_CLUSTER0_CORE_COUNT; cpu++) { + if (!boot_cluster && (cpu == boot_cpu)) + continue; + cpus_id_power_domain(0, cpu, pmu_pd_off, CKECK_WFEI_MSK); + } + + for (cpu = 0; cpu < PLATFORM_CLUSTER1_CORE_COUNT; cpu++) { + if (boot_cluster && (cpu == boot_cpu)) + continue; + cpus_id_power_domain(1, cpu, pmu_pd_off, CKECK_WFEI_MSK); + } +} + +void sram_save(void) +{ + /* TODO: support the sdram save for rk3368 SoCs*/ +} + +void sram_restore(void) +{ + /* TODO: support the sdram restore for rk3368 SoCs */ +} + +int rockchip_soc_cores_pwr_dm_on(unsigned long mpidr, uint64_t entrypoint) +{ + uint32_t cpu, cluster; + uint32_t cpuon_id; + + cpu = MPIDR_AFFLVL0_VAL(mpidr); + cluster = MPIDR_AFFLVL1_VAL(mpidr); + + /* Make sure the cpu is off,Before power up the cpu! */ + cpus_id_power_domain(cluster, cpu, pmu_pd_off, CKECK_WFEI_MSK); + + cpuon_id = (cluster * PLATFORM_CLUSTER0_CORE_COUNT) + cpu; + assert(cpuon_id < PLATFORM_CORE_COUNT); + assert(cpuson_flags[cpuon_id] == 0); + cpuson_flags[cpuon_id] = PMU_CPU_HOTPLUG; + cpuson_entry_point[cpuon_id] = entrypoint; + + /* Switch boot addr to pmusram */ + mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1 + cluster), + (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) | + CPU_BOOT_ADDR_WMASK); + dsb(); + + cpus_id_power_domain(cluster, cpu, pmu_pd_on, CKECK_WFEI_MSK); + + mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1 + cluster), + (COLD_BOOT_BASE >> CPU_BOOT_ADDR_ALIGN) | + CPU_BOOT_ADDR_WMASK); + + return 0; +} + +int rockchip_soc_cores_pwr_dm_on_finish(void) +{ + return 0; +} + +int rockchip_soc_sys_pwr_dm_resume(void) +{ + mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1), + (COLD_BOOT_BASE >> CPU_BOOT_ADDR_ALIGN) | + CPU_BOOT_ADDR_WMASK); + mmio_write_32(SGRF_BASE + SGRF_SOC_CON(2), + (COLD_BOOT_BASE >> CPU_BOOT_ADDR_ALIGN) | + CPU_BOOT_ADDR_WMASK); + pm_plls_resume(); + pmu_scu_b_pwrup(); + + return 0; +} + +int rockchip_soc_sys_pwr_dm_suspend(void) +{ + nonboot_cpus_off(); + pmu_set_sleep_mode(); + + return 0; +} + +void rockchip_plat_mmu_el3(void) +{ + /* TODO: support the el3 for rk3368 SoCs */ +} + +void plat_rockchip_pmu_init(void) +{ + uint32_t cpu; + + /* register requires 32bits mode, switch it to 32 bits */ + cpu_warm_boot_addr = (uint64_t)platform_cpu_warmboot; + + for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) + cpuson_flags[cpu] = 0; + + nonboot_cpus_off(); + INFO("%s(%d): pd status %x\n", __func__, __LINE__, + mmio_read_32(PMU_BASE + PMU_PWRDN_ST)); +} diff --git a/plat/rockchip/rk3368/drivers/pmu/pmu.h b/plat/rockchip/rk3368/drivers/pmu/pmu.h new file mode 100644 index 0000000..b4d4807 --- /dev/null +++ b/plat/rockchip/rk3368/drivers/pmu/pmu.h @@ -0,0 +1,207 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef PMU_H +#define PMU_H + +/* Allocate sp reginon in pmusram */ +#define PSRAM_SP_SIZE 0x80 +#define PSRAM_SP_BOTTOM (PSRAM_SP_TOP - PSRAM_SP_SIZE) + +/***************************************************************************** + * pmu con,reg + *****************************************************************************/ +#define PMU_WKUP_CFG0 0x0 +#define PMU_WKUP_CFG1 0x4 +#define PMU_WKUP_CFG2 0x8 +#define PMU_TIMEOUT_CNT 0x7c +#define PMU_PWRDN_CON 0xc +#define PMU_PWRDN_ST 0x10 +#define PMU_CORE_PWR_ST 0x38 + +#define PMU_PWRMD_CORE 0x14 +#define PMU_PWRMD_COM 0x18 +#define PMU_SFT_CON 0x1c +#define PMU_BUS_IDE_REQ 0x3c +#define PMU_BUS_IDE_ST 0x40 +#define PMU_OSC_CNT 0x48 +#define PMU_PLLLOCK_CNT 0x4c +#define PMU_PLLRST_CNT 0x50 +#define PMU_STABLE_CNT 0x54 +#define PMU_DDRIO_PWR_CNT 0x58 +#define PMU_WKUPRST_CNT 0x5c + +enum pmu_powermode_core { + pmu_mdcr_global_int_dis = 0, + pmu_mdcr_core_src_gt, + pmu_mdcr_clr_cci, + pmu_mdcr_cpu0_pd, + pmu_mdcr_clr_clst_l = 4, + pmu_mdcr_clr_core, + pmu_mdcr_scu_l_pd, + pmu_mdcr_core_pd, + pmu_mdcr_l2_idle = 8, + pmu_mdcr_l2_flush +}; + +/* + * the shift of bits for cores status + */ +enum pmu_core_pwrst_shift { + clstl_cpu_wfe = 2, + clstl_cpu_wfi = 6, + clstb_cpu_wfe = 12, + clstb_cpu_wfi = 16 +}; + +enum pmu_pdid { + PD_CPUL0 = 0, + PD_CPUL1, + PD_CPUL2, + PD_CPUL3, + PD_SCUL, + PD_CPUB0 = 5, + PD_CPUB1, + PD_CPUB2, + PD_CPUB3, + PD_SCUB = 9, + PD_PERI = 13, + PD_VIDEO, + PD_VIO, + PD_GPU0, + PD_GPU1, + PD_END +}; + +enum pmu_bus_ide { + bus_ide_req_clst_l = 0, + bus_ide_req_clst_b, + bus_ide_req_gpu, + bus_ide_req_core, + bus_ide_req_bus = 4, + bus_ide_req_dma, + bus_ide_req_peri, + bus_ide_req_video, + bus_ide_req_vio = 8, + bus_ide_req_res0, + bus_ide_req_cxcs, + bus_ide_req_alive, + bus_ide_req_pmu = 12, + bus_ide_req_msch, + bus_ide_req_cci, + bus_ide_req_cci400 = 15, + bus_ide_req_end +}; + +enum pmu_powermode_common { + pmu_mode_en = 0, + pmu_mode_res0, + pmu_mode_bus_pd, + pmu_mode_wkup_rst, + pmu_mode_pll_pd = 4, + pmu_mode_pwr_off, + pmu_mode_pmu_use_if, + pmu_mode_pmu_alive_use_if, + pmu_mode_osc_dis = 8, + pmu_mode_input_clamp, + pmu_mode_sref_enter, + pmu_mode_ddrc_gt, + pmu_mode_ddrio_ret = 12, + pmu_mode_ddrio_ret_deq, + pmu_mode_clr_pmu, + pmu_mode_clr_alive, + pmu_mode_clr_bus = 16, + pmu_mode_clr_dma, + pmu_mode_clr_msch, + pmu_mode_clr_peri, + pmu_mode_clr_video = 20, + pmu_mode_clr_vio, + pmu_mode_clr_gpu, + pmu_mode_clr_mcu, + pmu_mode_clr_cxcs = 24, + pmu_mode_clr_cci400, + pmu_mode_res1, + pmu_mode_res2, + pmu_mode_res3 = 28, + pmu_mode_mclst +}; + +enum pmu_core_power_st { + clst_l_cpu_wfe = 2, + clst_l_cpu_wfi = 6, + clst_b_l2_flsh_done = 10, + clst_b_l2_wfi = 11, + clst_b_cpu_wfe = 12, + clst_b_cpu_wfi = 16, + mcu_sleeping = 20, +}; + +enum pmu_sft_con { + pmu_sft_acinactm_clst_b = 5, + pmu_sft_l2flsh_clst_b, + pmu_sft_glbl_int_dis_b = 9, + pmu_sft_ddrio_ret_cfg = 11, +}; + +enum pmu_wkup_cfg2 { + pmu_cluster_l_wkup_en = 0, + pmu_cluster_b_wkup_en, + pmu_gpio_wkup_en, + pmu_sdio_wkup_en, + pmu_sdmmc_wkup_en, + pmu_sim_wkup_en, + pmu_timer_wkup_en, + pmu_usbdev_wkup_en, + pmu_sft_wkup_en, + pmu_wdt_mcu_wkup_en, + pmu_timeout_wkup_en, +}; + +enum pmu_bus_idle_st { + pmu_idle_ack_cluster_l = 0, + pmu_idle_ack_cluster_b, + pmu_idle_ack_gpu, + pmu_idle_ack_core, + pmu_idle_ack_bus, + pmu_idle_ack_dma, + pmu_idle_ack_peri, + pmu_idle_ack_video, + pmu_idle_ack_vio, + pmu_idle_ack_cci = 10, + pmu_idle_ack_msch, + pmu_idle_ack_alive, + pmu_idle_ack_pmu, + pmu_idle_ack_cxcs, + pmu_idle_ack_cci400, + pmu_inactive_cluster_l, + pmu_inactive_cluster_b, + pmu_idle_gpu, + pmu_idle_core, + pmu_idle_bus, + pmu_idle_dma, + pmu_idle_peri, + pmu_idle_video, + pmu_idle_vio, + pmu_idle_cci = 26, + pmu_idle_msch, + pmu_idle_alive, + pmu_idle_pmu, + pmu_active_cxcs, + pmu_active_cci, +}; + +#define PM_PWRDM_CPUSB_MSK (0xf << 5) + +#define CKECK_WFE_MSK 0x1 +#define CKECK_WFI_MSK 0x10 +#define CKECK_WFEI_MSK 0x11 + +#define PD_CTR_LOOP 500 +#define CHK_CPU_LOOP 500 + +#define MAX_WAIT_CONUT 1000 + +#endif /* PMU_H */ diff --git a/plat/rockchip/rk3368/drivers/soc/soc.c b/plat/rockchip/rk3368/drivers/soc/soc.c new file mode 100644 index 0000000..7d51bb8 --- /dev/null +++ b/plat/rockchip/rk3368/drivers/soc/soc.c @@ -0,0 +1,209 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <platform_def.h> + +#include <arch_helpers.h> +#include <common/debug.h> +#include <lib/mmio.h> + +#include <plat_private.h> +#include <rk3368_def.h> +#include <soc.h> + +static uint32_t plls_con[END_PLL_ID][4]; + +/* Table of regions to map using the MMU. */ +const mmap_region_t plat_rk_mmap[] = { + MAP_REGION_FLAT(CCI400_BASE, CCI400_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(GIC400_BASE, GIC400_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(STIME_BASE, STIME_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(SGRF_BASE, SGRF_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(PMUSRAM_BASE, PMUSRAM_SIZE, + MT_MEMORY | MT_RW | MT_SECURE), + MAP_REGION_FLAT(PMU_BASE, PMU_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(UART0_BASE, UART0_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(UART1_BASE, UART1_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(UART2_BASE, UART2_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(UART3_BASE, UART3_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(UART4_BASE, UART4_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(CRU_BASE, CRU_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(DDR_PCTL_BASE, DDR_PCTL_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(DDR_PHY_BASE, DDR_PHY_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(GRF_BASE, GRF_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(SERVICE_BUS_BASE, SERVICE_BUS_SISE, + MT_DEVICE | MT_RW | MT_SECURE), + { 0 } +}; + +/* The RockChip power domain tree descriptor */ +const unsigned char rockchip_power_domain_tree_desc[] = { + /* No of root nodes */ + PLATFORM_SYSTEM_COUNT, + /* No of children for the root node */ + PLATFORM_CLUSTER_COUNT, + /* No of children for the first cluster node */ + PLATFORM_CLUSTER0_CORE_COUNT, + /* No of children for the second cluster node */ + PLATFORM_CLUSTER1_CORE_COUNT +}; + +void secure_timer_init(void) +{ + mmio_write_32(STIMER1_BASE + TIMER_LOADE_COUNT0, 0xffffffff); + mmio_write_32(STIMER1_BASE + TIMER_LOADE_COUNT1, 0xffffffff); + + /* auto reload & enable the timer */ + mmio_write_32(STIMER1_BASE + TIMER_CONTROL_REG, TIMER_EN); +} + +void sgrf_init(void) +{ + /* setting all configurable ip into no-secure */ + mmio_write_32(SGRF_BASE + SGRF_SOC_CON(5), SGRF_SOC_CON_NS); + mmio_write_32(SGRF_BASE + SGRF_SOC_CON(6), SGRF_SOC_CON7_BITS); + mmio_write_32(SGRF_BASE + SGRF_SOC_CON(7), SGRF_SOC_CON_NS); + + /* secure dma to no sesure */ + mmio_write_32(SGRF_BASE + SGRF_BUSDMAC_CON(0), SGRF_BUSDMAC_CON0_NS); + mmio_write_32(SGRF_BASE + SGRF_BUSDMAC_CON(1), SGRF_BUSDMAC_CON1_NS); + dsb(); + + /* rst dma1 */ + mmio_write_32(CRU_BASE + CRU_SOFTRSTS_CON(1), + RST_DMA1_MSK | (RST_DMA1_MSK << 16)); + /* rst dma2 */ + mmio_write_32(CRU_BASE + CRU_SOFTRSTS_CON(4), + RST_DMA2_MSK | (RST_DMA2_MSK << 16)); + + dsb(); + + /* release dma1 rst*/ + mmio_write_32(CRU_BASE + CRU_SOFTRSTS_CON(1), (RST_DMA1_MSK << 16)); + /* release dma2 rst*/ + mmio_write_32(CRU_BASE + CRU_SOFTRSTS_CON(4), (RST_DMA2_MSK << 16)); +} + +void plat_rockchip_soc_init(void) +{ + secure_timer_init(); + sgrf_init(); +} + +void regs_updata_bits(uintptr_t addr, uint32_t val, + uint32_t mask, uint32_t shift) +{ + uint32_t tmp, orig; + + orig = mmio_read_32(addr); + + tmp = orig & ~(mask << shift); + tmp |= (val & mask) << shift; + + if (tmp != orig) + mmio_write_32(addr, tmp); + dsb(); +} + +static void plls_suspend(uint32_t pll_id) +{ + plls_con[pll_id][0] = mmio_read_32(CRU_BASE + PLL_CONS((pll_id), 0)); + plls_con[pll_id][1] = mmio_read_32(CRU_BASE + PLL_CONS((pll_id), 1)); + plls_con[pll_id][2] = mmio_read_32(CRU_BASE + PLL_CONS((pll_id), 2)); + plls_con[pll_id][3] = mmio_read_32(CRU_BASE + PLL_CONS((pll_id), 3)); + + mmio_write_32(CRU_BASE + PLL_CONS((pll_id), 3), PLL_SLOW_BITS); + mmio_write_32(CRU_BASE + PLL_CONS((pll_id), 3), PLL_BYPASS); +} + +static void pm_plls_suspend(void) +{ + plls_suspend(NPLL_ID); + plls_suspend(CPLL_ID); + plls_suspend(GPLL_ID); + plls_suspend(ABPLL_ID); + plls_suspend(ALPLL_ID); +} + +static inline void plls_resume(void) +{ + mmio_write_32(CRU_BASE + PLL_CONS(ABPLL_ID, 3), + plls_con[ABPLL_ID][3] | PLL_BYPASS_W_MSK); + mmio_write_32(CRU_BASE + PLL_CONS(ALPLL_ID, 3), + plls_con[ALPLL_ID][3] | PLL_BYPASS_W_MSK); + mmio_write_32(CRU_BASE + PLL_CONS(GPLL_ID, 3), + plls_con[GPLL_ID][3] | PLL_BYPASS_W_MSK); + mmio_write_32(CRU_BASE + PLL_CONS(CPLL_ID, 3), + plls_con[CPLL_ID][3] | PLL_BYPASS_W_MSK); + mmio_write_32(CRU_BASE + PLL_CONS(NPLL_ID, 3), + plls_con[NPLL_ID][3] | PLL_BYPASS_W_MSK); +} + +void soc_sleep_config(void) +{ + int i = 0; + + for (i = 0; i < CRU_CLKGATES_CON_CNT; i++) + mmio_write_32(CRU_BASE + CRU_CLKGATES_CON(i), 0xffff0000); + pm_plls_suspend(); + + for (i = 0; i < CRU_CLKGATES_CON_CNT; i++) + mmio_write_32(CRU_BASE + CRU_CLKGATES_CON(i), 0xffff0000); +} + +void pm_plls_resume(void) +{ + plls_resume(); + + mmio_write_32(CRU_BASE + PLL_CONS(ABPLL_ID, 3), + plls_con[ABPLL_ID][3] | PLLS_MODE_WMASK); + mmio_write_32(CRU_BASE + PLL_CONS(ALPLL_ID, 3), + plls_con[ALPLL_ID][3] | PLLS_MODE_WMASK); + mmio_write_32(CRU_BASE + PLL_CONS(GPLL_ID, 3), + plls_con[GPLL_ID][3] | PLLS_MODE_WMASK); + mmio_write_32(CRU_BASE + PLL_CONS(CPLL_ID, 3), + plls_con[CPLL_ID][3] | PLLS_MODE_WMASK); + mmio_write_32(CRU_BASE + PLL_CONS(NPLL_ID, 3), + plls_con[NPLL_ID][3] | PLLS_MODE_WMASK); +} + +void __dead2 rockchip_soc_soft_reset(void) +{ + uint32_t temp_val; + + mmio_write_32(CRU_BASE + PLL_CONS((GPLL_ID), 3), PLL_SLOW_BITS); + mmio_write_32(CRU_BASE + PLL_CONS((CPLL_ID), 3), PLL_SLOW_BITS); + mmio_write_32(CRU_BASE + PLL_CONS((NPLL_ID), 3), PLL_SLOW_BITS); + mmio_write_32(CRU_BASE + PLL_CONS((ABPLL_ID), 3), PLL_SLOW_BITS); + mmio_write_32(CRU_BASE + PLL_CONS((ALPLL_ID), 3), PLL_SLOW_BITS); + + temp_val = mmio_read_32(CRU_BASE + CRU_GLB_RST_CON) | + PMU_RST_BY_SECOND_SFT; + + mmio_write_32(CRU_BASE + CRU_GLB_RST_CON, temp_val); + mmio_write_32(CRU_BASE + CRU_GLB_SRST_SND, 0xeca8); + + /* + * Maybe the HW needs some times to reset the system, + * so we do not hope the core to excute valid codes. + */ + while (1) + ; +} diff --git a/plat/rockchip/rk3368/drivers/soc/soc.h b/plat/rockchip/rk3368/drivers/soc/soc.h new file mode 100644 index 0000000..6c7a01b --- /dev/null +++ b/plat/rockchip/rk3368/drivers/soc/soc.h @@ -0,0 +1,141 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef SOC_H +#define SOC_H + +enum plls_id { + ABPLL_ID = 0, + ALPLL_ID, + DPLL_ID, + CPLL_ID, + GPLL_ID, + NPLL_ID, + END_PLL_ID, +}; + +/***************************************************************************** + * secure timer + *****************************************************************************/ +#define TIMER_LOADE_COUNT0 0x00 +#define TIMER_LOADE_COUNT1 0x04 +#define TIMER_CURRENT_VALUE0 0x08 +#define TIMER_CURRENT_VALUE1 0x0C +#define TIMER_CONTROL_REG 0x10 +#define TIMER_INTSTATUS 0x18 + +#define TIMER_EN 0x1 + +#define STIMER1_BASE (STIME_BASE + 0x20) + +#define CYCL_24M_CNT_US(us) (24 * us) +#define CYCL_24M_CNT_MS(ms) (ms * CYCL_24M_CNT_US(1000)) + +/***************************************************************************** + * sgrf reg, offset + *****************************************************************************/ +#define SGRF_SOC_CON(n) (0x0 + (n) * 4) +#define SGRF_BUSDMAC_CON(n) (0x100 + (n) * 4) + +#define SGRF_SOC_CON_NS 0xffff0000 + +/***************************************************************************** + * con6[2]pmusram is security. + * con6[6]stimer is security. + *****************************************************************************/ +#define PMUSRAM_S_SHIFT 2 +#define PMUSRAM_S 1 +#define STIMER_S_SHIFT 6 +#define STIMER_S 1 +#define SGRF_SOC_CON7_BITS ((0xffffu << 16) | \ + (PMUSRAM_S << PMUSRAM_S_SHIFT) | \ + (STIMER_S << STIMER_S_SHIFT)) + +#define SGRF_BUSDMAC_CON0_NS 0xfffcfff8 +#define SGRF_BUSDMAC_CON1_NS 0xffff0fff + +/* + * sgrf_soc_con1~2, mask and offset + */ +#define CPU_BOOT_ADDR_WMASK 0xffff0000 +#define CPU_BOOT_ADDR_ALIGN 16 + +/***************************************************************************** + * cru reg, offset + *****************************************************************************/ +#define CRU_SOFTRST_CON 0x300 +#define CRU_SOFTRSTS_CON(n) (CRU_SOFTRST_CON + ((n) * 4)) +#define CRU_SOFTRSTS_CON_CNT 15 + +#define SOFTRST_DMA1 0x40004 +#define SOFTRST_DMA2 0x10001 + +#define RST_DMA1_MSK 0x4 +#define RST_DMA2_MSK 0x0 + +#define CRU_CLKSEL_CON 0x100 +#define CRU_CLKSELS_CON(i) (CRU_CLKSEL_CON + ((i) * 4)) +#define CRU_CLKSEL_CON_CNT 56 + +#define CRU_CLKGATE_CON 0x200 +#define CRU_CLKGATES_CON(i) (CRU_CLKGATE_CON + ((i) * 4)) +#define CRU_CLKGATES_CON_CNT 25 + +#define CRU_GLB_SRST_FST 0x280 +#define CRU_GLB_SRST_SND 0x284 +#define CRU_GLB_RST_CON 0x388 + +#define CRU_CONS_GATEID(i) (16 * (i)) +#define GATE_ID(reg, bit) ((reg * 16) + bit) + +#define PMU_RST_BY_SECOND_SFT (BIT(1) << 2) +#define PMU_RST_NOT_BY_SFT (BIT(1) << 2) + +/*************************************************************************** + * pll + ***************************************************************************/ +#define PLL_PWR_DN_MSK (0x1 << 1) +#define PLL_PWR_DN REG_WMSK_BITS(1, 1, 0x1) +#define PLL_PWR_ON REG_WMSK_BITS(0, 1, 0x1) +#define PLL_RESET REG_WMSK_BITS(1, 5, 0x1) +#define PLL_RESET_RESUME REG_WMSK_BITS(0, 5, 0x1) +#define PLL_BYPASS_MSK (0x1 << 0) +#define PLL_BYPASS_W_MSK (PLL_BYPASS_MSK << 16) +#define PLL_BYPASS REG_WMSK_BITS(1, 0, 0x1) +#define PLL_NO_BYPASS REG_WMSK_BITS(0, 0, 0x1) +#define PLL_MODE_SHIFT 8 +#define PLL_MODE_MSK 0x3 +#define PLLS_MODE_WMASK (PLL_MODE_MSK << (16 + PLL_MODE_SHIFT)) +#define PLL_SLOW 0x0 +#define PLL_NORM 0x1 +#define PLL_DEEP 0x2 +#define PLL_SLOW_BITS REG_WMSK_BITS(PLL_SLOW, 8, 0x3) +#define PLL_NORM_BITS REG_WMSK_BITS(PLL_NORM, 8, 0x3) +#define PLL_DEEP_BITS REG_WMSK_BITS(PLL_DEEP, 8, 0x3) + +#define PLL_CONS(id, i) ((id) * 0x10 + ((i) * 4)) + +#define REG_W_MSK(bits_shift, msk) \ + ((msk) << ((bits_shift) + 16)) +#define REG_VAL_CLRBITS(val, bits_shift, msk) \ + (val & (~(msk << bits_shift))) +#define REG_SET_BITS(bits, bits_shift, msk) \ + (((bits) & (msk)) << (bits_shift)) +#define REG_WMSK_BITS(bits, bits_shift, msk) \ + (REG_W_MSK(bits_shift, msk) | \ + REG_SET_BITS(bits, bits_shift, msk)) + +#define regs_updata_bit_set(addr, shift) \ + regs_updata_bits((addr), 0x1, 0x1, (shift)) +#define regs_updata_bit_clr(addr, shift) \ + regs_updata_bits((addr), 0x0, 0x1, (shift)) + +void regs_updata_bits(uintptr_t addr, uint32_t val, + uint32_t mask, uint32_t shift); +void soc_sleep_config(void); +void pm_plls_resume(void); + +#endif /* SOC_H */ diff --git a/plat/rockchip/rk3368/include/plat.ld.S b/plat/rockchip/rk3368/include/plat.ld.S new file mode 100644 index 0000000..e9bb3a2 --- /dev/null +++ b/plat/rockchip/rk3368/include/plat.ld.S @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#ifndef ROCKCHIP_PLAT_LD_S +#define ROCKCHIP_PLAT_LD_S + +MEMORY { + PMUSRAM (rwx): ORIGIN = PMUSRAM_BASE, LENGTH = PMUSRAM_RSIZE +} + +SECTIONS +{ + . = PMUSRAM_BASE; + + /* + * pmu_cpuson_entrypoint request address + * align 64K when resume, so put it in the + * start of pmusram + */ + .text_pmusram : { + ASSERT(. == ALIGN(64 * 1024), + ".pmusram.entry request 64K aligned."); + *(.pmusram.entry) + __bl31_pmusram_text_start = .; + *(.pmusram.text) + *(.pmusram.rodata) + __bl31_pmusram_text_end = .; + __bl31_pmusram_data_start = .; + *(.pmusram.data) + __bl31_pmusram_data_end = .; + + } >PMUSRAM +} + +#endif /* ROCKCHIP_PLAT_LD_S */ diff --git a/plat/rockchip/rk3368/include/plat_sip_calls.h b/plat/rockchip/rk3368/include/plat_sip_calls.h new file mode 100644 index 0000000..66c4868 --- /dev/null +++ b/plat/rockchip/rk3368/include/plat_sip_calls.h @@ -0,0 +1,12 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef PLAT_SIP_CALLS_H +#define PLAT_SIP_CALLS_H + +#define RK_PLAT_SIP_NUM_CALLS 0 + +#endif /* PLAT_SIP_CALLS_H */ diff --git a/plat/rockchip/rk3368/include/platform_def.h b/plat/rockchip/rk3368/include/platform_def.h new file mode 100644 index 0000000..519a025 --- /dev/null +++ b/plat/rockchip/rk3368/include/platform_def.h @@ -0,0 +1,120 @@ +/* + * Copyright (c) 2014-2019, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef PLATFORM_DEF_H +#define PLATFORM_DEF_H + +#include <arch.h> +#include <lib/utils_def.h> +#include <plat/common/common_def.h> + +#include <rk3368_def.h> + +/******************************************************************************* + * Platform binary types for linking + ******************************************************************************/ +#define PLATFORM_LINKER_FORMAT "elf64-littleaarch64" +#define PLATFORM_LINKER_ARCH aarch64 + +/******************************************************************************* + * Generic platform constants + ******************************************************************************/ + +/* Size of cacheable stacks */ +#if defined(IMAGE_BL1) +#define PLATFORM_STACK_SIZE 0x440 +#elif defined(IMAGE_BL2) +#define PLATFORM_STACK_SIZE 0x400 +#elif defined(IMAGE_BL31) +#define PLATFORM_STACK_SIZE 0x800 +#elif defined(IMAGE_BL32) +#define PLATFORM_STACK_SIZE 0x440 +#endif + +#define FIRMWARE_WELCOME_STR "Booting Trusted Firmware\n" + +#define PLATFORM_MAX_AFFLVL MPIDR_AFFLVL2 +#define PLATFORM_SYSTEM_COUNT U(1) +#define PLATFORM_CLUSTER_COUNT U(2) +#define PLATFORM_CLUSTER0_CORE_COUNT U(4) +#define PLATFORM_CLUSTER1_CORE_COUNT U(4) +#define PLATFORM_CORE_COUNT (PLATFORM_CLUSTER1_CORE_COUNT + \ + PLATFORM_CLUSTER0_CORE_COUNT) +#define PLATFORM_MAX_CPUS_PER_CLUSTER U(4) +#define PLATFORM_NUM_AFFS (PLATFORM_SYSTEM_COUNT + \ + PLATFORM_CLUSTER_COUNT + \ + PLATFORM_CORE_COUNT) + +#define PLAT_RK_CLST_TO_CPUID_SHIFT 6 + +#define PLAT_MAX_PWR_LVL MPIDR_AFFLVL2 + +/* + * This macro defines the deepest retention state possible. A higher state + * id will represent an invalid or a power down state. + */ +#define PLAT_MAX_RET_STATE U(1) + +/* + * This macro defines the deepest power down states possible. Any state ID + * higher than this is invalid. + */ +#define PLAT_MAX_OFF_STATE U(2) + +/******************************************************************************* + * Platform memory map related constants + ******************************************************************************/ +/* TF text, ro, rw, Size: 1MB */ +#define TZRAM_BASE (0x0) +#define TZRAM_SIZE (0x100000) + +/******************************************************************************* + * BL31 specific defines. + ******************************************************************************/ +/* + * Put BL3-1 at the top of the Trusted RAM + */ +#define BL31_BASE (TZRAM_BASE + 0x40000) +#define BL31_LIMIT (TZRAM_BASE + TZRAM_SIZE) + +/******************************************************************************* + * Platform specific page table and MMU setup constants + ******************************************************************************/ +#define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 32) +#define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 32) +#define MAX_XLAT_TABLES 8 +#define MAX_MMAP_REGIONS 20 + +/******************************************************************************* + * Declarations and constants to access the mailboxes safely. Each mailbox is + * aligned on the biggest cache line size in the platform. This is known only + * to the platform as it might have a combination of integrated and external + * caches. Such alignment ensures that two maiboxes do not sit on the same cache + * line at any cache level. They could belong to different cpus/clusters & + * get written while being protected by different locks causing corruption of + * a valid mailbox address. + ******************************************************************************/ +#define CACHE_WRITEBACK_SHIFT 6 +#define CACHE_WRITEBACK_GRANULE (1 << CACHE_WRITEBACK_SHIFT) + +/* + * Define GICD and GICC and GICR base + */ +#define PLAT_RK_GICD_BASE RK3368_GICD_BASE +#define PLAT_RK_GICC_BASE RK3368_GICC_BASE + +#define PLAT_RK_UART_BASE UART2_BASE +#define PLAT_RK_UART_CLOCK RK3368_UART_CLOCK +#define PLAT_RK_UART_BAUDRATE RK3368_BAUDRATE + +#define PLAT_RK_CCI_BASE CCI400_BASE + +#define PLAT_RK_PRIMARY_CPU 0x0 + +#define PSRAM_DO_DDR_RESUME 0 +#define PSRAM_CHECK_WAKEUP_CPU 0 + +#endif /* PLATFORM_DEF_H */ diff --git a/plat/rockchip/rk3368/plat_sip_calls.c b/plat/rockchip/rk3368/plat_sip_calls.c new file mode 100644 index 0000000..5918d58 --- /dev/null +++ b/plat/rockchip/rk3368/plat_sip_calls.c @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <common/debug.h> +#include <common/runtime_svc.h> +#include <lib/mmio.h> + +#include <plat_sip_calls.h> +#include <rockchip_sip_svc.h> + +uintptr_t rockchip_plat_sip_handler(uint32_t smc_fid, + u_register_t x1, + u_register_t x2, + u_register_t x3, + u_register_t x4, + void *cookie, + void *handle, + u_register_t flags) +{ + ERROR("%s: unhandled SMC (0x%x)\n", __func__, smc_fid); + SMC_RET1(handle, SMC_UNK); +} diff --git a/plat/rockchip/rk3368/platform.mk b/plat/rockchip/rk3368/platform.mk new file mode 100644 index 0000000..e6c62de --- /dev/null +++ b/plat/rockchip/rk3368/platform.mk @@ -0,0 +1,67 @@ +# +# Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +include drivers/arm/gic/v2/gicv2.mk + +RK_PLAT := plat/rockchip +RK_PLAT_SOC := ${RK_PLAT}/${PLAT} +RK_PLAT_COMMON := ${RK_PLAT}/common + +DISABLE_BIN_GENERATION := 1 + +PLAT_INCLUDES := -I${RK_PLAT_COMMON}/ \ + -I${RK_PLAT_COMMON}/include/ \ + -I${RK_PLAT_COMMON}/aarch64/ \ + -I${RK_PLAT_COMMON}/drivers/pmu/ \ + -I${RK_PLAT_SOC}/ \ + -I${RK_PLAT_SOC}/drivers/pmu/ \ + -I${RK_PLAT_SOC}/drivers/soc/ \ + -I${RK_PLAT_SOC}/drivers/ddr/ \ + -I${RK_PLAT_SOC}/include/ + +RK_GIC_SOURCES := ${GICV2_SOURCES} \ + plat/common/plat_gicv2.c \ + ${RK_PLAT}/common/rockchip_gicv2.c + +PLAT_BL_COMMON_SOURCES := common/desc_image_load.c \ + lib/bl_aux_params/bl_aux_params.c \ + lib/xlat_tables/xlat_tables_common.c \ + lib/xlat_tables/aarch64/xlat_tables.c \ + plat/common/aarch64/crash_console_helpers.S \ + plat/common/plat_psci_common.c + +ifneq (${ENABLE_STACK_PROTECTOR},0) +PLAT_BL_COMMON_SOURCES += ${RK_PLAT_COMMON}/rockchip_stack_protector.c +endif + +BL31_SOURCES += ${RK_GIC_SOURCES} \ + drivers/arm/cci/cci.c \ + drivers/ti/uart/aarch64/16550_console.S \ + drivers/delay_timer/delay_timer.c \ + drivers/delay_timer/generic_delay_timer.c \ + lib/cpus/aarch64/cortex_a53.S \ + ${RK_PLAT_COMMON}/aarch64/plat_helpers.S \ + ${RK_PLAT_COMMON}/bl31_plat_setup.c \ + ${RK_PLAT_COMMON}/params_setup.c \ + ${RK_PLAT_COMMON}/aarch64/pmu_sram_cpus_on.S \ + ${RK_PLAT_COMMON}/plat_pm.c \ + ${RK_PLAT_COMMON}/plat_topology.c \ + ${RK_PLAT_COMMON}/aarch64/platform_common.c \ + ${RK_PLAT_COMMON}/rockchip_sip_svc.c \ + ${RK_PLAT_SOC}/plat_sip_calls.c \ + ${RK_PLAT_SOC}/drivers/pmu/pmu.c \ + ${RK_PLAT_SOC}/drivers/soc/soc.c \ + ${RK_PLAT_SOC}/drivers/ddr/ddr_rk3368.c \ + +include lib/coreboot/coreboot.mk +include lib/libfdt/libfdt.mk + +$(eval $(call add_define,PLAT_EXTRA_LD_SCRIPT)) + +# Do not enable SVE +ENABLE_SVE_FOR_NS := 0 + +WORKAROUND_CVE_2017_5715 := 0 diff --git a/plat/rockchip/rk3368/rk3368_def.h b/plat/rockchip/rk3368/rk3368_def.h new file mode 100644 index 0000000..4b0fbab --- /dev/null +++ b/plat/rockchip/rk3368/rk3368_def.h @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef RK3368_DEF_H +#define RK3368_DEF_H + +/* Special value used to verify platform parameters from BL2 to BL3-1 */ +#define RK_BL31_PLAT_PARAM_VAL 0x0f1e2d3c4b5a6978ULL + +#define CCI400_BASE 0xffb90000 +#define CCI400_SIZE 0x10000 + +#define GIC400_BASE 0xffb70000 +#define GIC400_SIZE 0x10000 + +#define STIME_BASE 0xff830000 +#define STIME_SIZE 0x10000 + +#define CRU_BASE 0xff760000 +#define CRU_SIZE 0x10000 + +#define GRF_BASE 0xff770000 +#define GRF_SIZE 0x10000 + +#define SGRF_BASE 0xff740000 +#define SGRF_SIZE 0x10000 + +#define PMU_BASE 0xff730000 +#define PMU_GRF_BASE 0xff738000 +#define PMU_SIZE 0x10000 + +#define RK_INTMEM_BASE 0xff8c0000 +#define RK_INTMEM_SIZE 0x10000 + +#define UART0_BASE 0xff180000 +#define UART0_SIZE 0x10000 + +#define UART1_BASE 0xff190000 +#define UART1_SIZE 0x10000 + +#define UART2_BASE 0xff690000 +#define UART2_SIZE 0x10000 + +#define UART3_BASE 0xff1b0000 +#define UART3_SIZE 0x10000 + +#define UART4_BASE 0xff1c0000 +#define UART4_SIZE 0x10000 + +#define CRU_BASE 0xff760000 + +#define PMUSRAM_BASE 0xff720000 +#define PMUSRAM_SIZE 0x10000 +#define PMUSRAM_RSIZE 0x1000 + +#define DDR_PCTL_BASE 0xff610000 +#define DDR_PCTL_SIZE 0x10000 + +#define DDR_PHY_BASE 0xff620000 +#define DDR_PHY_SIZE 0x10000 + +#define SERVICE_BUS_BASE 0xffac0000 +#define SERVICE_BUS_SISE 0x50000 + +#define COLD_BOOT_BASE 0xffff0000 +/************************************************************************** + * UART related constants + **************************************************************************/ +#define RK3368_BAUDRATE 115200 +#define RK3368_UART_CLOCK 24000000 + +/****************************************************************************** + * System counter frequency related constants + ******************************************************************************/ +#define SYS_COUNTER_FREQ_IN_TICKS 24000000 + +/****************************************************************************** + * GIC-400 & interrupt handling related constants + ******************************************************************************/ + +/* Base rk_platform compatible GIC memory map */ +#define RK3368_GICD_BASE (GIC400_BASE + 0x1000) +#define RK3368_GICC_BASE (GIC400_BASE + 0x2000) +#define RK3368_GICR_BASE 0 /* no GICR in GIC-400 */ + +/***************************************************************************** + * CCI-400 related constants + ******************************************************************************/ +#define PLAT_RK_CCI_CLUSTER0_SL_IFACE_IX 3 +#define PLAT_RK_CCI_CLUSTER1_SL_IFACE_IX 4 + +/****************************************************************************** + * sgi, ppi + ******************************************************************************/ +#define RK_IRQ_SEC_PHY_TIMER 29 + +#define RK_IRQ_SEC_SGI_0 8 +#define RK_IRQ_SEC_SGI_1 9 +#define RK_IRQ_SEC_SGI_2 10 +#define RK_IRQ_SEC_SGI_3 11 +#define RK_IRQ_SEC_SGI_4 12 +#define RK_IRQ_SEC_SGI_5 13 +#define RK_IRQ_SEC_SGI_6 14 +#define RK_IRQ_SEC_SGI_7 15 + +/* + * Define a list of Group 0 interrupts. + */ +#define PLAT_RK_GICV2_G0_IRQS \ + INTR_PROP_DESC(RK_IRQ_SEC_PHY_TIMER, GIC_HIGHEST_SEC_PRIORITY, \ + GICV2_INTR_GROUP0, GIC_INTR_CFG_LEVEL) + +#endif /* RK3368_DEF_H */ diff --git a/plat/rockchip/rk3399/drivers/dp/cdn_dp.c b/plat/rockchip/rk3399/drivers/dp/cdn_dp.c new file mode 100644 index 0000000..a8773f4 --- /dev/null +++ b/plat/rockchip/rk3399/drivers/dp/cdn_dp.c @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> +#include <cdefs.h> +#include <stdlib.h> +#include <string.h> + +#include <lib/smccc.h> + +#include <cdn_dp.h> + +__asm__( + ".pushsection .text.hdcp_handler, \"ax\", %progbits\n" + ".global hdcp_handler\n" + ".balign 4\n" + "hdcp_handler:\n" + ".incbin \"" HDCPFW "\"\n" + ".type hdcp_handler, %function\n" + ".size hdcp_handler, .- hdcp_handler\n" + ".popsection\n" +); + +static uint64_t *hdcp_key_pdata; +static struct cdn_dp_hdcp_key_1x key; + +int hdcp_handler(struct cdn_dp_hdcp_key_1x *key); + +uint64_t dp_hdcp_ctrl(uint64_t type) +{ + switch (type) { + case HDCP_KEY_DATA_START_TRANSFER: + memset(&key, 0x00, sizeof(key)); + hdcp_key_pdata = (uint64_t *)&key; + return 0; + case HDCP_KEY_DATA_START_DECRYPT: + if (hdcp_key_pdata == (uint64_t *)(&key + 1)) + return hdcp_handler(&key); + else + return PSCI_E_INVALID_PARAMS; + assert(0); /* Unreachable */ + default: + return SMC_UNK; + } +} + +uint64_t dp_hdcp_store_key(uint64_t x1, + uint64_t x2, + uint64_t x3, + uint64_t x4, + uint64_t x5, + uint64_t x6) +{ + if (hdcp_key_pdata < (uint64_t *)&key || + hdcp_key_pdata + 6 > (uint64_t *)(&key + 1)) + return PSCI_E_INVALID_PARAMS; + + hdcp_key_pdata[0] = x1; + hdcp_key_pdata[1] = x2; + hdcp_key_pdata[2] = x3; + hdcp_key_pdata[3] = x4; + hdcp_key_pdata[4] = x5; + hdcp_key_pdata[5] = x6; + hdcp_key_pdata += 6; + + return 0; +} diff --git a/plat/rockchip/rk3399/drivers/dp/cdn_dp.h b/plat/rockchip/rk3399/drivers/dp/cdn_dp.h new file mode 100644 index 0000000..c5cbae2 --- /dev/null +++ b/plat/rockchip/rk3399/drivers/dp/cdn_dp.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef CDN_DP_H +#define CDN_DP_H + +#include <plat_private.h> + +enum { + CDN_DP_HDCP_1X_KSV_LEN = 5, + CDN_DP_HDCP_KSV_LEN = 8, + CDN_DP_HDCP_RESERVED_LEN = 10, + CDN_DP_HDCP_UID_LEN = 16, + CDN_DP_HDCP_SHA_LEN = 20, + CDN_DP_HDCP_DPK_LEN = 280, + CDN_DP_HDCP_1X_KEYS_LEN = 285, + CDN_DP_HDCP_KEY_LEN = 326, +}; + +struct cdn_dp_hdcp_key_1x { + uint8_t ksv[CDN_DP_HDCP_KSV_LEN]; + uint8_t device_key[CDN_DP_HDCP_DPK_LEN]; + uint8_t sha1[CDN_DP_HDCP_SHA_LEN]; + uint8_t uid[CDN_DP_HDCP_UID_LEN]; + uint16_t seed; + uint8_t reserved[CDN_DP_HDCP_RESERVED_LEN]; +}; + +#define HDCP_KEY_DATA_START_TRANSFER 0 +#define HDCP_KEY_DATA_START_DECRYPT 1 +#define HDCP_KEY_1X_STORE_DATA_ALIGN_SIZE (6 * 64) / 8 + +/* Checks the cdn_dp_hdcp_key_1x must be aligned on 6 x 64-bit word boundary */ +CASSERT(sizeof(struct cdn_dp_hdcp_key_1x) % HDCP_KEY_1X_STORE_DATA_ALIGN_SIZE, \ + assert_hdcp_key_1x_store_data_align_size_mismatch); + +uint64_t dp_hdcp_ctrl(uint64_t type); + +uint64_t dp_hdcp_store_key(uint64_t x1, + uint64_t x2, + uint64_t x3, + uint64_t x4, + uint64_t x5, + uint64_t x6); + +#endif /* CDN_DP_H */ diff --git a/plat/rockchip/rk3399/drivers/dram/dfs.c b/plat/rockchip/rk3399/drivers/dram/dfs.c new file mode 100644 index 0000000..816372b --- /dev/null +++ b/plat/rockchip/rk3399/drivers/dram/dfs.c @@ -0,0 +1,2114 @@ +/* + * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <arch_helpers.h> +#include <common/debug.h> +#include <drivers/delay_timer.h> +#include <lib/mmio.h> + +#include <m0_ctl.h> +#include <plat_private.h> +#include "dfs.h" +#include "dram.h" +#include "dram_spec_timing.h" +#include "pmu.h" +#include "soc.h" +#include "string.h" + +#define ENPER_CS_TRAINING_FREQ (666) +#define TDFI_LAT_THRESHOLD_FREQ (928) +#define PHY_DLL_BYPASS_FREQ (260) + +static const struct pll_div dpll_rates_table[] = { + + /* _mhz, _refdiv, _fbdiv, _postdiv1, _postdiv2 */ + {.mhz = 928, .refdiv = 1, .fbdiv = 116, .postdiv1 = 3, .postdiv2 = 1}, + {.mhz = 800, .refdiv = 1, .fbdiv = 100, .postdiv1 = 3, .postdiv2 = 1}, + {.mhz = 732, .refdiv = 1, .fbdiv = 61, .postdiv1 = 2, .postdiv2 = 1}, + {.mhz = 666, .refdiv = 1, .fbdiv = 111, .postdiv1 = 4, .postdiv2 = 1}, + {.mhz = 600, .refdiv = 1, .fbdiv = 50, .postdiv1 = 2, .postdiv2 = 1}, + {.mhz = 528, .refdiv = 1, .fbdiv = 66, .postdiv1 = 3, .postdiv2 = 1}, + {.mhz = 400, .refdiv = 1, .fbdiv = 50, .postdiv1 = 3, .postdiv2 = 1}, + {.mhz = 300, .refdiv = 1, .fbdiv = 50, .postdiv1 = 4, .postdiv2 = 1}, + {.mhz = 200, .refdiv = 1, .fbdiv = 50, .postdiv1 = 3, .postdiv2 = 2}, +}; + +struct rk3399_dram_status { + uint32_t current_index; + uint32_t index_freq[2]; + uint32_t boot_freq; + uint32_t low_power_stat; + struct timing_related_config timing_config; + struct drv_odt_lp_config drv_odt_lp_cfg; +}; + +struct rk3399_saved_status { + uint32_t freq; + uint32_t low_power_stat; + uint32_t odt; +}; + +static struct rk3399_dram_status rk3399_dram_status; +static struct rk3399_saved_status rk3399_suspend_status; +static uint32_t wrdqs_delay_val[2][2][4]; +static uint32_t rddqs_delay_ps; + +static struct rk3399_sdram_default_config ddr3_default_config = { + .bl = 8, + .ap = 0, + .burst_ref_cnt = 1, + .zqcsi = 0 +}; + +static struct rk3399_sdram_default_config lpddr3_default_config = { + .bl = 8, + .ap = 0, + .burst_ref_cnt = 1, + .zqcsi = 0 +}; + +static struct rk3399_sdram_default_config lpddr4_default_config = { + .bl = 16, + .ap = 0, + .caodt = 240, + .burst_ref_cnt = 1, + .zqcsi = 0 +}; + +static uint32_t get_cs_die_capability(struct rk3399_sdram_params *ram_config, + uint8_t channel, uint8_t cs) +{ + struct rk3399_sdram_channel *ch = &ram_config->ch[channel]; + uint32_t bandwidth; + uint32_t die_bandwidth; + uint32_t die; + uint32_t cs_cap; + uint32_t row; + + row = cs == 0 ? ch->cs0_row : ch->cs1_row; + bandwidth = 8 * (1 << ch->bw); + die_bandwidth = 8 * (1 << ch->dbw); + die = bandwidth / die_bandwidth; + cs_cap = (1 << (row + ((1 << ch->bk) / 4 + 1) + ch->col + + (bandwidth / 16))); + if (ch->row_3_4) + cs_cap = cs_cap * 3 / 4; + + return (cs_cap / die); +} + +static void get_dram_drv_odt_val(uint32_t dram_type, + struct drv_odt_lp_config *drv_config) +{ + uint32_t tmp; + uint32_t mr1_val, mr3_val, mr11_val; + + switch (dram_type) { + case DDR3: + mr1_val = (mmio_read_32(CTL_REG(0, 133)) >> 16) & 0xffff; + tmp = ((mr1_val >> 1) & 1) | ((mr1_val >> 4) & 1); + if (tmp) + drv_config->dram_side_drv = 34; + else + drv_config->dram_side_drv = 40; + tmp = ((mr1_val >> 2) & 1) | ((mr1_val >> 5) & 1) | + ((mr1_val >> 7) & 1); + if (tmp == 0) + drv_config->dram_side_dq_odt = 0; + else if (tmp == 1) + drv_config->dram_side_dq_odt = 60; + else if (tmp == 3) + drv_config->dram_side_dq_odt = 40; + else + drv_config->dram_side_dq_odt = 120; + break; + case LPDDR3: + mr3_val = mmio_read_32(CTL_REG(0, 138)) & 0xf; + mr11_val = (mmio_read_32(CTL_REG(0, 139)) >> 24) & 0x3; + if (mr3_val == 0xb) + drv_config->dram_side_drv = 3448; + else if (mr3_val == 0xa) + drv_config->dram_side_drv = 4048; + else if (mr3_val == 0x9) + drv_config->dram_side_drv = 3440; + else if (mr3_val == 0x4) + drv_config->dram_side_drv = 60; + else if (mr3_val == 0x3) + drv_config->dram_side_drv = 48; + else if (mr3_val == 0x2) + drv_config->dram_side_drv = 40; + else + drv_config->dram_side_drv = 34; + + if (mr11_val == 1) + drv_config->dram_side_dq_odt = 60; + else if (mr11_val == 2) + drv_config->dram_side_dq_odt = 120; + else if (mr11_val == 0) + drv_config->dram_side_dq_odt = 0; + else + drv_config->dram_side_dq_odt = 240; + break; + case LPDDR4: + default: + mr3_val = (mmio_read_32(CTL_REG(0, 138)) >> 3) & 0x7; + mr11_val = (mmio_read_32(CTL_REG(0, 139)) >> 24) & 0xff; + + if ((mr3_val == 0) || (mr3_val == 7)) + drv_config->dram_side_drv = 40; + else + drv_config->dram_side_drv = 240 / mr3_val; + + tmp = mr11_val & 0x7; + if ((tmp == 7) || (tmp == 0)) + drv_config->dram_side_dq_odt = 0; + else + drv_config->dram_side_dq_odt = 240 / tmp; + + tmp = (mr11_val >> 4) & 0x7; + if ((tmp == 7) || (tmp == 0)) + drv_config->dram_side_ca_odt = 0; + else + drv_config->dram_side_ca_odt = 240 / tmp; + break; + } +} + +static void sdram_timing_cfg_init(struct timing_related_config *ptiming_config, + struct rk3399_sdram_params *sdram_params, + struct drv_odt_lp_config *drv_config) +{ + uint32_t i, j; + + for (i = 0; i < sdram_params->num_channels; i++) { + ptiming_config->dram_info[i].speed_rate = DDR3_DEFAULT; + ptiming_config->dram_info[i].cs_cnt = sdram_params->ch[i].rank; + for (j = 0; j < sdram_params->ch[i].rank; j++) { + ptiming_config->dram_info[i].per_die_capability[j] = + get_cs_die_capability(sdram_params, i, j); + } + } + ptiming_config->dram_type = sdram_params->dramtype; + ptiming_config->ch_cnt = sdram_params->num_channels; + switch (sdram_params->dramtype) { + case DDR3: + ptiming_config->bl = ddr3_default_config.bl; + ptiming_config->ap = ddr3_default_config.ap; + break; + case LPDDR3: + ptiming_config->bl = lpddr3_default_config.bl; + ptiming_config->ap = lpddr3_default_config.ap; + break; + case LPDDR4: + ptiming_config->bl = lpddr4_default_config.bl; + ptiming_config->ap = lpddr4_default_config.ap; + ptiming_config->rdbi = 0; + ptiming_config->wdbi = 0; + break; + default: + /* Do nothing in default case */ + break; + } + ptiming_config->dramds = drv_config->dram_side_drv; + ptiming_config->dramodt = drv_config->dram_side_dq_odt; + ptiming_config->caodt = drv_config->dram_side_ca_odt; + ptiming_config->odt = (mmio_read_32(PHY_REG(0, 5)) >> 16) & 0x1; +} + +struct lat_adj_pair { + uint32_t cl; + uint32_t rdlat_adj; + uint32_t cwl; + uint32_t wrlat_adj; +}; + +const struct lat_adj_pair ddr3_lat_adj[] = { + {6, 5, 5, 4}, + {8, 7, 6, 5}, + {10, 9, 7, 6}, + {11, 9, 8, 7}, + {13, 0xb, 9, 8}, + {14, 0xb, 0xa, 9} +}; + +const struct lat_adj_pair lpddr3_lat_adj[] = { + {3, 2, 1, 0}, + {6, 5, 3, 2}, + {8, 7, 4, 3}, + {9, 8, 5, 4}, + {10, 9, 6, 5}, + {11, 9, 6, 5}, + {12, 0xa, 6, 5}, + {14, 0xc, 8, 7}, + {16, 0xd, 8, 7} +}; + +const struct lat_adj_pair lpddr4_lat_adj[] = { + {6, 5, 4, 2}, + {10, 9, 6, 4}, + {14, 0xc, 8, 6}, + {20, 0x11, 0xa, 8}, + {24, 0x15, 0xc, 0xa}, + {28, 0x18, 0xe, 0xc}, + {32, 0x1b, 0x10, 0xe}, + {36, 0x1e, 0x12, 0x10} +}; + +static uint32_t get_rdlat_adj(uint32_t dram_type, uint32_t cl) +{ + const struct lat_adj_pair *p; + uint32_t cnt; + uint32_t i; + + if (dram_type == DDR3) { + p = ddr3_lat_adj; + cnt = ARRAY_SIZE(ddr3_lat_adj); + } else if (dram_type == LPDDR3) { + p = lpddr3_lat_adj; + cnt = ARRAY_SIZE(lpddr3_lat_adj); + } else { + p = lpddr4_lat_adj; + cnt = ARRAY_SIZE(lpddr4_lat_adj); + } + + for (i = 0; i < cnt; i++) { + if (cl == p[i].cl) + return p[i].rdlat_adj; + } + /* fail */ + return 0xff; +} + +static uint32_t get_wrlat_adj(uint32_t dram_type, uint32_t cwl) +{ + const struct lat_adj_pair *p; + uint32_t cnt; + uint32_t i; + + if (dram_type == DDR3) { + p = ddr3_lat_adj; + cnt = ARRAY_SIZE(ddr3_lat_adj); + } else if (dram_type == LPDDR3) { + p = lpddr3_lat_adj; + cnt = ARRAY_SIZE(lpddr3_lat_adj); + } else { + p = lpddr4_lat_adj; + cnt = ARRAY_SIZE(lpddr4_lat_adj); + } + + for (i = 0; i < cnt; i++) { + if (cwl == p[i].cwl) + return p[i].wrlat_adj; + } + /* fail */ + return 0xff; +} + +#define PI_REGS_DIMM_SUPPORT (0) +#define PI_ADD_LATENCY (0) +#define PI_DOUBLEFREEK (1) + +#define PI_PAD_DELAY_PS_VALUE (1000) +#define PI_IE_ENABLE_VALUE (3000) +#define PI_TSEL_ENABLE_VALUE (700) + +static uint32_t get_pi_rdlat_adj(struct dram_timing_t *pdram_timing) +{ + /*[DLLSUBTYPE2] == "STD_DENALI_HS" */ + uint32_t rdlat, delay_adder, ie_enable, hs_offset, tsel_adder, + extra_adder, tsel_enable; + + ie_enable = PI_IE_ENABLE_VALUE; + tsel_enable = PI_TSEL_ENABLE_VALUE; + + rdlat = pdram_timing->cl + PI_ADD_LATENCY; + delay_adder = ie_enable / (1000000 / pdram_timing->mhz); + if ((ie_enable % (1000000 / pdram_timing->mhz)) != 0) + delay_adder++; + hs_offset = 0; + tsel_adder = 0; + extra_adder = 0; + /* rdlat = rdlat - (PREAMBLE_SUPPORT & 0x1); */ + tsel_adder = tsel_enable / (1000000 / pdram_timing->mhz); + if ((tsel_enable % (1000000 / pdram_timing->mhz)) != 0) + tsel_adder++; + delay_adder = delay_adder - 1; + if (tsel_adder > delay_adder) + extra_adder = tsel_adder - delay_adder; + else + extra_adder = 0; + if (PI_REGS_DIMM_SUPPORT && PI_DOUBLEFREEK) + hs_offset = 2; + else + hs_offset = 1; + + if (delay_adder > (rdlat - 1 - hs_offset)) { + rdlat = rdlat - tsel_adder; + } else { + if ((rdlat - delay_adder) < 2) + rdlat = 2; + else + rdlat = rdlat - delay_adder - extra_adder; + } + + return rdlat; +} + +static uint32_t get_pi_wrlat(struct dram_timing_t *pdram_timing, + struct timing_related_config *timing_config) +{ + uint32_t tmp; + + if (timing_config->dram_type == LPDDR3) { + tmp = pdram_timing->cl; + if (tmp >= 14) + tmp = 8; + else if (tmp >= 10) + tmp = 6; + else if (tmp == 9) + tmp = 5; + else if (tmp == 8) + tmp = 4; + else if (tmp == 6) + tmp = 3; + else + tmp = 1; + } else { + tmp = 1; + } + + return tmp; +} + +static uint32_t get_pi_wrlat_adj(struct dram_timing_t *pdram_timing, + struct timing_related_config *timing_config) +{ + return get_pi_wrlat(pdram_timing, timing_config) + PI_ADD_LATENCY - 1; +} + +static uint32_t get_pi_tdfi_phy_rdlat(struct dram_timing_t *pdram_timing, + struct timing_related_config *timing_config) +{ + /* [DLLSUBTYPE2] == "STD_DENALI_HS" */ + uint32_t cas_lat, delay_adder, ie_enable, hs_offset, ie_delay_adder; + uint32_t mem_delay_ps, round_trip_ps; + uint32_t phy_internal_delay, lpddr_adder, dfi_adder, rdlat_delay; + + ie_enable = PI_IE_ENABLE_VALUE; + + delay_adder = ie_enable / (1000000 / pdram_timing->mhz); + if ((ie_enable % (1000000 / pdram_timing->mhz)) != 0) + delay_adder++; + delay_adder = delay_adder - 1; + if (PI_REGS_DIMM_SUPPORT && PI_DOUBLEFREEK) + hs_offset = 2; + else + hs_offset = 1; + + cas_lat = pdram_timing->cl + PI_ADD_LATENCY; + + if (delay_adder > (cas_lat - 1 - hs_offset)) { + ie_delay_adder = 0; + } else { + ie_delay_adder = ie_enable / (1000000 / pdram_timing->mhz); + if ((ie_enable % (1000000 / pdram_timing->mhz)) != 0) + ie_delay_adder++; + } + + if (timing_config->dram_type == DDR3) { + mem_delay_ps = 0; + } else if (timing_config->dram_type == LPDDR4) { + mem_delay_ps = 3600; + } else if (timing_config->dram_type == LPDDR3) { + mem_delay_ps = 5500; + } else { + NOTICE("get_pi_tdfi_phy_rdlat:dramtype unsupport\n"); + return 0; + } + round_trip_ps = 1100 + 500 + mem_delay_ps + 500 + 600; + delay_adder = round_trip_ps / (1000000 / pdram_timing->mhz); + if ((round_trip_ps % (1000000 / pdram_timing->mhz)) != 0) + delay_adder++; + + phy_internal_delay = 5 + 2 + 4; + lpddr_adder = mem_delay_ps / (1000000 / pdram_timing->mhz); + if ((mem_delay_ps % (1000000 / pdram_timing->mhz)) != 0) + lpddr_adder++; + dfi_adder = 0; + phy_internal_delay = phy_internal_delay + 2; + rdlat_delay = delay_adder + phy_internal_delay + + ie_delay_adder + lpddr_adder + dfi_adder; + + rdlat_delay = rdlat_delay + 2; + return rdlat_delay; +} + +static uint32_t get_pi_todtoff_min(struct dram_timing_t *pdram_timing, + struct timing_related_config *timing_config) +{ + uint32_t tmp, todtoff_min_ps; + + if (timing_config->dram_type == LPDDR3) + todtoff_min_ps = 2500; + else if (timing_config->dram_type == LPDDR4) + todtoff_min_ps = 1500; + else + todtoff_min_ps = 0; + /* todtoff_min */ + tmp = todtoff_min_ps / (1000000 / pdram_timing->mhz); + if ((todtoff_min_ps % (1000000 / pdram_timing->mhz)) != 0) + tmp++; + return tmp; +} + +static uint32_t get_pi_todtoff_max(struct dram_timing_t *pdram_timing, + struct timing_related_config *timing_config) +{ + uint32_t tmp, todtoff_max_ps; + + if ((timing_config->dram_type == LPDDR4) + || (timing_config->dram_type == LPDDR3)) + todtoff_max_ps = 3500; + else + todtoff_max_ps = 0; + + /* todtoff_max */ + tmp = todtoff_max_ps / (1000000 / pdram_timing->mhz); + if ((todtoff_max_ps % (1000000 / pdram_timing->mhz)) != 0) + tmp++; + return tmp; +} + +static void gen_rk3399_ctl_params_f0(struct timing_related_config + *timing_config, + struct dram_timing_t *pdram_timing) +{ + uint32_t i; + uint32_t tmp, tmp1; + + for (i = 0; i < timing_config->ch_cnt; i++) { + if (timing_config->dram_type == DDR3) { + tmp = ((700000 + 10) * timing_config->freq + + 999) / 1000; + tmp += pdram_timing->txsnr + (pdram_timing->tmrd * 3) + + pdram_timing->tmod + pdram_timing->tzqinit; + mmio_write_32(CTL_REG(i, 5), tmp); + + mmio_clrsetbits_32(CTL_REG(i, 22), 0xffff, + pdram_timing->tdllk); + + mmio_write_32(CTL_REG(i, 32), + (pdram_timing->tmod << 8) | + pdram_timing->tmrd); + + mmio_clrsetbits_32(CTL_REG(i, 59), 0xffffu << 16, + (pdram_timing->txsr - + pdram_timing->trcd) << 16); + } else if (timing_config->dram_type == LPDDR4) { + mmio_write_32(CTL_REG(i, 5), pdram_timing->tinit1 + + pdram_timing->tinit3); + mmio_write_32(CTL_REG(i, 32), + (pdram_timing->tmrd << 8) | + pdram_timing->tmrd); + mmio_clrsetbits_32(CTL_REG(i, 59), 0xffffu << 16, + pdram_timing->txsr << 16); + } else { + mmio_write_32(CTL_REG(i, 5), pdram_timing->tinit1); + mmio_write_32(CTL_REG(i, 7), pdram_timing->tinit4); + mmio_write_32(CTL_REG(i, 32), + (pdram_timing->tmrd << 8) | + pdram_timing->tmrd); + mmio_clrsetbits_32(CTL_REG(i, 59), 0xffffu << 16, + pdram_timing->txsr << 16); + } + mmio_write_32(CTL_REG(i, 6), pdram_timing->tinit3); + mmio_write_32(CTL_REG(i, 8), pdram_timing->tinit5); + mmio_clrsetbits_32(CTL_REG(i, 23), (0x7f << 16), + ((pdram_timing->cl * 2) << 16)); + mmio_clrsetbits_32(CTL_REG(i, 23), (0x1f << 24), + (pdram_timing->cwl << 24)); + mmio_clrsetbits_32(CTL_REG(i, 24), 0x3f, pdram_timing->al); + mmio_clrsetbits_32(CTL_REG(i, 26), 0xffffu << 16, + (pdram_timing->trc << 24) | + (pdram_timing->trrd << 16)); + mmio_write_32(CTL_REG(i, 27), + (pdram_timing->tfaw << 24) | + (pdram_timing->trppb << 16) | + (pdram_timing->twtr << 8) | + pdram_timing->tras_min); + + mmio_clrsetbits_32(CTL_REG(i, 31), 0xffu << 24, + max(4, pdram_timing->trtp) << 24); + mmio_write_32(CTL_REG(i, 33), (pdram_timing->tcke << 24) | + pdram_timing->tras_max); + mmio_clrsetbits_32(CTL_REG(i, 34), 0xff, + max(1, pdram_timing->tckesr)); + mmio_clrsetbits_32(CTL_REG(i, 39), + (0x3f << 16) | (0xff << 8), + (pdram_timing->twr << 16) | + (pdram_timing->trcd << 8)); + mmio_clrsetbits_32(CTL_REG(i, 42), 0x1f << 16, + pdram_timing->tmrz << 16); + tmp = pdram_timing->tdal ? pdram_timing->tdal : + (pdram_timing->twr + pdram_timing->trp); + mmio_clrsetbits_32(CTL_REG(i, 44), 0xff, tmp); + mmio_clrsetbits_32(CTL_REG(i, 45), 0xff, pdram_timing->trp); + mmio_write_32(CTL_REG(i, 48), + ((pdram_timing->trefi - 8) << 16) | + pdram_timing->trfc); + mmio_clrsetbits_32(CTL_REG(i, 52), 0xffff, pdram_timing->txp); + mmio_clrsetbits_32(CTL_REG(i, 53), 0xffffu << 16, + pdram_timing->txpdll << 16); + mmio_clrsetbits_32(CTL_REG(i, 55), 0xf << 24, + pdram_timing->tcscke << 24); + mmio_clrsetbits_32(CTL_REG(i, 55), 0xff, pdram_timing->tmrri); + mmio_write_32(CTL_REG(i, 56), + (pdram_timing->tzqcke << 24) | + (pdram_timing->tmrwckel << 16) | + (pdram_timing->tckehcs << 8) | + pdram_timing->tckelcs); + mmio_clrsetbits_32(CTL_REG(i, 60), 0xffff, pdram_timing->txsnr); + mmio_clrsetbits_32(CTL_REG(i, 62), 0xffffu << 16, + (pdram_timing->tckehcmd << 24) | + (pdram_timing->tckelcmd << 16)); + mmio_write_32(CTL_REG(i, 63), + (pdram_timing->tckelpd << 24) | + (pdram_timing->tescke << 16) | + (pdram_timing->tsr << 8) | + pdram_timing->tckckel); + mmio_clrsetbits_32(CTL_REG(i, 64), 0xfff, + (pdram_timing->tcmdcke << 8) | + pdram_timing->tcsckeh); + mmio_clrsetbits_32(CTL_REG(i, 92), 0xffff << 8, + (pdram_timing->tcksrx << 16) | + (pdram_timing->tcksre << 8)); + mmio_clrsetbits_32(CTL_REG(i, 108), 0x1 << 24, + (timing_config->dllbp << 24)); + mmio_clrsetbits_32(CTL_REG(i, 122), 0x3ff << 16, + (pdram_timing->tvrcg_enable << 16)); + mmio_write_32(CTL_REG(i, 123), (pdram_timing->tfc_long << 16) | + pdram_timing->tvrcg_disable); + mmio_write_32(CTL_REG(i, 124), + (pdram_timing->tvref_long << 16) | + (pdram_timing->tckfspx << 8) | + pdram_timing->tckfspe); + mmio_write_32(CTL_REG(i, 133), (pdram_timing->mr[1] << 16) | + pdram_timing->mr[0]); + mmio_clrsetbits_32(CTL_REG(i, 134), 0xffff, + pdram_timing->mr[2]); + mmio_clrsetbits_32(CTL_REG(i, 138), 0xffff, + pdram_timing->mr[3]); + mmio_clrsetbits_32(CTL_REG(i, 139), 0xffu << 24, + pdram_timing->mr11 << 24); + mmio_write_32(CTL_REG(i, 147), + (pdram_timing->mr[1] << 16) | + pdram_timing->mr[0]); + mmio_clrsetbits_32(CTL_REG(i, 148), 0xffff, + pdram_timing->mr[2]); + mmio_clrsetbits_32(CTL_REG(i, 152), 0xffff, + pdram_timing->mr[3]); + mmio_clrsetbits_32(CTL_REG(i, 153), 0xffu << 24, + pdram_timing->mr11 << 24); + if (timing_config->dram_type == LPDDR4) { + mmio_clrsetbits_32(CTL_REG(i, 140), 0xffffu << 16, + pdram_timing->mr12 << 16); + mmio_clrsetbits_32(CTL_REG(i, 142), 0xffffu << 16, + pdram_timing->mr14 << 16); + mmio_clrsetbits_32(CTL_REG(i, 145), 0xffffu << 16, + pdram_timing->mr22 << 16); + mmio_clrsetbits_32(CTL_REG(i, 154), 0xffffu << 16, + pdram_timing->mr12 << 16); + mmio_clrsetbits_32(CTL_REG(i, 156), 0xffffu << 16, + pdram_timing->mr14 << 16); + mmio_clrsetbits_32(CTL_REG(i, 159), 0xffffu << 16, + pdram_timing->mr22 << 16); + } + mmio_clrsetbits_32(CTL_REG(i, 179), 0xfff << 8, + pdram_timing->tzqinit << 8); + mmio_write_32(CTL_REG(i, 180), (pdram_timing->tzqcs << 16) | + (pdram_timing->tzqinit / 2)); + mmio_write_32(CTL_REG(i, 181), (pdram_timing->tzqlat << 16) | + pdram_timing->tzqcal); + mmio_clrsetbits_32(CTL_REG(i, 212), 0xff << 8, + pdram_timing->todton << 8); + + if (timing_config->odt) { + mmio_setbits_32(CTL_REG(i, 213), 1 << 16); + if (timing_config->freq < 400) + tmp = 4 << 24; + else + tmp = 8 << 24; + } else { + mmio_clrbits_32(CTL_REG(i, 213), 1 << 16); + tmp = 2 << 24; + } + + mmio_clrsetbits_32(CTL_REG(i, 216), 0x1f << 24, tmp); + mmio_clrsetbits_32(CTL_REG(i, 221), (0x3 << 16) | (0xf << 8), + (pdram_timing->tdqsck << 16) | + (pdram_timing->tdqsck_max << 8)); + tmp = + (get_wrlat_adj(timing_config->dram_type, pdram_timing->cwl) + << 8) | get_rdlat_adj(timing_config->dram_type, + pdram_timing->cl); + mmio_clrsetbits_32(CTL_REG(i, 284), 0xffff, tmp); + mmio_clrsetbits_32(CTL_REG(i, 82), 0xffffu << 16, + (4 * pdram_timing->trefi) << 16); + + mmio_clrsetbits_32(CTL_REG(i, 83), 0xffff, + (2 * pdram_timing->trefi) & 0xffff); + + if ((timing_config->dram_type == LPDDR3) || + (timing_config->dram_type == LPDDR4)) { + tmp = get_pi_wrlat(pdram_timing, timing_config); + tmp1 = get_pi_todtoff_max(pdram_timing, timing_config); + tmp = (tmp > tmp1) ? (tmp - tmp1) : 0; + } else { + tmp = 0; + } + mmio_clrsetbits_32(CTL_REG(i, 214), 0x3f << 16, + (tmp & 0x3f) << 16); + + if ((timing_config->dram_type == LPDDR3) || + (timing_config->dram_type == LPDDR4)) { + /* min_rl_preamble = cl+TDQSCK_MIN -1 */ + tmp = pdram_timing->cl + + get_pi_todtoff_min(pdram_timing, timing_config) - 1; + /* todtoff_max */ + tmp1 = get_pi_todtoff_max(pdram_timing, timing_config); + tmp = (tmp > tmp1) ? (tmp - tmp1) : 0; + } else { + tmp = pdram_timing->cl - pdram_timing->cwl; + } + mmio_clrsetbits_32(CTL_REG(i, 215), 0x3f << 8, + (tmp & 0x3f) << 8); + + mmio_clrsetbits_32(CTL_REG(i, 275), 0xff << 16, + (get_pi_tdfi_phy_rdlat(pdram_timing, + timing_config) & + 0xff) << 16); + + mmio_clrsetbits_32(CTL_REG(i, 277), 0xffff, + (2 * pdram_timing->trefi) & 0xffff); + + mmio_clrsetbits_32(CTL_REG(i, 282), 0xffff, + (2 * pdram_timing->trefi) & 0xffff); + + mmio_write_32(CTL_REG(i, 283), 20 * pdram_timing->trefi); + + /* CTL_308 TDFI_CALVL_CAPTURE_F0:RW:16:10 */ + tmp1 = 20000 / (1000000 / pdram_timing->mhz) + 1; + if ((20000 % (1000000 / pdram_timing->mhz)) != 0) + tmp1++; + tmp = (tmp1 >> 1) + (tmp1 % 2) + 5; + mmio_clrsetbits_32(CTL_REG(i, 308), 0x3ff << 16, tmp << 16); + + /* CTL_308 TDFI_CALVL_CC_F0:RW:0:10 */ + tmp = tmp + 18; + mmio_clrsetbits_32(CTL_REG(i, 308), 0x3ff, tmp); + + /* CTL_314 TDFI_WRCSLAT_F0:RW:8:8 */ + tmp1 = get_pi_wrlat_adj(pdram_timing, timing_config); + if (timing_config->freq <= TDFI_LAT_THRESHOLD_FREQ) { + if (tmp1 == 0) + tmp = 0; + else if (tmp1 < 5) + tmp = tmp1 - 1; + else + tmp = tmp1 - 5; + } else { + tmp = tmp1 - 2; + } + mmio_clrsetbits_32(CTL_REG(i, 314), 0xff << 8, tmp << 8); + + /* CTL_314 TDFI_RDCSLAT_F0:RW:0:8 */ + if ((timing_config->freq <= TDFI_LAT_THRESHOLD_FREQ) && + (pdram_timing->cl >= 5)) + tmp = pdram_timing->cl - 5; + else + tmp = pdram_timing->cl - 2; + mmio_clrsetbits_32(CTL_REG(i, 314), 0xff, tmp); + } +} + +static void gen_rk3399_ctl_params_f1(struct timing_related_config + *timing_config, + struct dram_timing_t *pdram_timing) +{ + uint32_t i; + uint32_t tmp, tmp1; + + for (i = 0; i < timing_config->ch_cnt; i++) { + if (timing_config->dram_type == DDR3) { + tmp = + ((700000 + 10) * timing_config->freq + 999) / 1000; + tmp += pdram_timing->txsnr + (pdram_timing->tmrd * 3) + + pdram_timing->tmod + pdram_timing->tzqinit; + mmio_write_32(CTL_REG(i, 9), tmp); + mmio_clrsetbits_32(CTL_REG(i, 22), 0xffffu << 16, + pdram_timing->tdllk << 16); + mmio_clrsetbits_32(CTL_REG(i, 34), 0xffffff00, + (pdram_timing->tmod << 24) | + (pdram_timing->tmrd << 16) | + (pdram_timing->trtp << 8)); + mmio_clrsetbits_32(CTL_REG(i, 60), 0xffffu << 16, + (pdram_timing->txsr - + pdram_timing->trcd) << 16); + } else if (timing_config->dram_type == LPDDR4) { + mmio_write_32(CTL_REG(i, 9), pdram_timing->tinit1 + + pdram_timing->tinit3); + mmio_clrsetbits_32(CTL_REG(i, 34), 0xffffff00, + (pdram_timing->tmrd << 24) | + (pdram_timing->tmrd << 16) | + (pdram_timing->trtp << 8)); + mmio_clrsetbits_32(CTL_REG(i, 60), 0xffffu << 16, + pdram_timing->txsr << 16); + } else { + mmio_write_32(CTL_REG(i, 9), pdram_timing->tinit1); + mmio_write_32(CTL_REG(i, 11), pdram_timing->tinit4); + mmio_clrsetbits_32(CTL_REG(i, 34), 0xffffff00, + (pdram_timing->tmrd << 24) | + (pdram_timing->tmrd << 16) | + (pdram_timing->trtp << 8)); + mmio_clrsetbits_32(CTL_REG(i, 60), 0xffffu << 16, + pdram_timing->txsr << 16); + } + mmio_write_32(CTL_REG(i, 10), pdram_timing->tinit3); + mmio_write_32(CTL_REG(i, 12), pdram_timing->tinit5); + mmio_clrsetbits_32(CTL_REG(i, 24), (0x7f << 8), + ((pdram_timing->cl * 2) << 8)); + mmio_clrsetbits_32(CTL_REG(i, 24), (0x1f << 16), + (pdram_timing->cwl << 16)); + mmio_clrsetbits_32(CTL_REG(i, 24), 0x3f << 24, + pdram_timing->al << 24); + mmio_clrsetbits_32(CTL_REG(i, 28), 0xffffff00, + (pdram_timing->tras_min << 24) | + (pdram_timing->trc << 16) | + (pdram_timing->trrd << 8)); + mmio_clrsetbits_32(CTL_REG(i, 29), 0xffffff, + (pdram_timing->tfaw << 16) | + (pdram_timing->trppb << 8) | + pdram_timing->twtr); + mmio_write_32(CTL_REG(i, 35), (pdram_timing->tcke << 24) | + pdram_timing->tras_max); + mmio_clrsetbits_32(CTL_REG(i, 36), 0xff, + max(1, pdram_timing->tckesr)); + mmio_clrsetbits_32(CTL_REG(i, 39), (0xffu << 24), + (pdram_timing->trcd << 24)); + mmio_clrsetbits_32(CTL_REG(i, 40), 0x3f, pdram_timing->twr); + mmio_clrsetbits_32(CTL_REG(i, 42), 0x1f << 24, + pdram_timing->tmrz << 24); + tmp = pdram_timing->tdal ? pdram_timing->tdal : + (pdram_timing->twr + pdram_timing->trp); + mmio_clrsetbits_32(CTL_REG(i, 44), 0xff << 8, tmp << 8); + mmio_clrsetbits_32(CTL_REG(i, 45), 0xff << 8, + pdram_timing->trp << 8); + mmio_write_32(CTL_REG(i, 49), + ((pdram_timing->trefi - 8) << 16) | + pdram_timing->trfc); + mmio_clrsetbits_32(CTL_REG(i, 52), 0xffffu << 16, + pdram_timing->txp << 16); + mmio_clrsetbits_32(CTL_REG(i, 54), 0xffff, + pdram_timing->txpdll); + mmio_clrsetbits_32(CTL_REG(i, 55), 0xff << 8, + pdram_timing->tmrri << 8); + mmio_write_32(CTL_REG(i, 57), (pdram_timing->tmrwckel << 24) | + (pdram_timing->tckehcs << 16) | + (pdram_timing->tckelcs << 8) | + pdram_timing->tcscke); + mmio_clrsetbits_32(CTL_REG(i, 58), 0xf, pdram_timing->tzqcke); + mmio_clrsetbits_32(CTL_REG(i, 61), 0xffff, pdram_timing->txsnr); + mmio_clrsetbits_32(CTL_REG(i, 64), 0xffffu << 16, + (pdram_timing->tckehcmd << 24) | + (pdram_timing->tckelcmd << 16)); + mmio_write_32(CTL_REG(i, 65), (pdram_timing->tckelpd << 24) | + (pdram_timing->tescke << 16) | + (pdram_timing->tsr << 8) | + pdram_timing->tckckel); + mmio_clrsetbits_32(CTL_REG(i, 66), 0xfff, + (pdram_timing->tcmdcke << 8) | + pdram_timing->tcsckeh); + mmio_clrsetbits_32(CTL_REG(i, 92), (0xffu << 24), + (pdram_timing->tcksre << 24)); + mmio_clrsetbits_32(CTL_REG(i, 93), 0xff, + pdram_timing->tcksrx); + mmio_clrsetbits_32(CTL_REG(i, 108), (0x1 << 25), + (timing_config->dllbp << 25)); + mmio_write_32(CTL_REG(i, 125), + (pdram_timing->tvrcg_disable << 16) | + pdram_timing->tvrcg_enable); + mmio_write_32(CTL_REG(i, 126), (pdram_timing->tckfspx << 24) | + (pdram_timing->tckfspe << 16) | + pdram_timing->tfc_long); + mmio_clrsetbits_32(CTL_REG(i, 127), 0xffff, + pdram_timing->tvref_long); + mmio_clrsetbits_32(CTL_REG(i, 134), 0xffffu << 16, + pdram_timing->mr[0] << 16); + mmio_write_32(CTL_REG(i, 135), (pdram_timing->mr[2] << 16) | + pdram_timing->mr[1]); + mmio_clrsetbits_32(CTL_REG(i, 138), 0xffffu << 16, + pdram_timing->mr[3] << 16); + mmio_clrsetbits_32(CTL_REG(i, 140), 0xff, pdram_timing->mr11); + mmio_clrsetbits_32(CTL_REG(i, 148), 0xffffu << 16, + pdram_timing->mr[0] << 16); + mmio_write_32(CTL_REG(i, 149), (pdram_timing->mr[2] << 16) | + pdram_timing->mr[1]); + mmio_clrsetbits_32(CTL_REG(i, 152), 0xffffu << 16, + pdram_timing->mr[3] << 16); + mmio_clrsetbits_32(CTL_REG(i, 154), 0xff, pdram_timing->mr11); + if (timing_config->dram_type == LPDDR4) { + mmio_clrsetbits_32(CTL_REG(i, 141), 0xffff, + pdram_timing->mr12); + mmio_clrsetbits_32(CTL_REG(i, 143), 0xffff, + pdram_timing->mr14); + mmio_clrsetbits_32(CTL_REG(i, 146), 0xffff, + pdram_timing->mr22); + mmio_clrsetbits_32(CTL_REG(i, 155), 0xffff, + pdram_timing->mr12); + mmio_clrsetbits_32(CTL_REG(i, 157), 0xffff, + pdram_timing->mr14); + mmio_clrsetbits_32(CTL_REG(i, 160), 0xffff, + pdram_timing->mr22); + } + mmio_write_32(CTL_REG(i, 182), + ((pdram_timing->tzqinit / 2) << 16) | + pdram_timing->tzqinit); + mmio_write_32(CTL_REG(i, 183), (pdram_timing->tzqcal << 16) | + pdram_timing->tzqcs); + mmio_clrsetbits_32(CTL_REG(i, 184), 0x3f, pdram_timing->tzqlat); + mmio_clrsetbits_32(CTL_REG(i, 188), 0xfff, + pdram_timing->tzqreset); + mmio_clrsetbits_32(CTL_REG(i, 212), 0xff << 16, + pdram_timing->todton << 16); + + if (timing_config->odt) { + mmio_setbits_32(CTL_REG(i, 213), (1 << 24)); + if (timing_config->freq < 400) + tmp = 4 << 24; + else + tmp = 8 << 24; + } else { + mmio_clrbits_32(CTL_REG(i, 213), (1 << 24)); + tmp = 2 << 24; + } + mmio_clrsetbits_32(CTL_REG(i, 217), 0x1f << 24, tmp); + mmio_clrsetbits_32(CTL_REG(i, 221), 0xf << 24, + (pdram_timing->tdqsck_max << 24)); + mmio_clrsetbits_32(CTL_REG(i, 222), 0x3, pdram_timing->tdqsck); + mmio_clrsetbits_32(CTL_REG(i, 291), 0xffff, + (get_wrlat_adj(timing_config->dram_type, + pdram_timing->cwl) << 8) | + get_rdlat_adj(timing_config->dram_type, + pdram_timing->cl)); + + mmio_clrsetbits_32(CTL_REG(i, 84), 0xffff, + (4 * pdram_timing->trefi) & 0xffff); + + mmio_clrsetbits_32(CTL_REG(i, 84), 0xffffu << 16, + ((2 * pdram_timing->trefi) & 0xffff) << 16); + + if ((timing_config->dram_type == LPDDR3) || + (timing_config->dram_type == LPDDR4)) { + tmp = get_pi_wrlat(pdram_timing, timing_config); + tmp1 = get_pi_todtoff_max(pdram_timing, timing_config); + tmp = (tmp > tmp1) ? (tmp - tmp1) : 0; + } else { + tmp = 0; + } + mmio_clrsetbits_32(CTL_REG(i, 214), 0x3f << 24, + (tmp & 0x3f) << 24); + + if ((timing_config->dram_type == LPDDR3) || + (timing_config->dram_type == LPDDR4)) { + /* min_rl_preamble = cl + TDQSCK_MIN - 1 */ + tmp = pdram_timing->cl + + get_pi_todtoff_min(pdram_timing, timing_config); + tmp--; + /* todtoff_max */ + tmp1 = get_pi_todtoff_max(pdram_timing, timing_config); + tmp = (tmp > tmp1) ? (tmp - tmp1) : 0; + } else { + tmp = pdram_timing->cl - pdram_timing->cwl; + } + mmio_clrsetbits_32(CTL_REG(i, 215), 0x3f << 16, + (tmp & 0x3f) << 16); + + mmio_clrsetbits_32(CTL_REG(i, 275), 0xffu << 24, + (get_pi_tdfi_phy_rdlat(pdram_timing, + timing_config) & + 0xff) << 24); + + mmio_clrsetbits_32(CTL_REG(i, 284), 0xffffu << 16, + ((2 * pdram_timing->trefi) & 0xffff) << 16); + + mmio_clrsetbits_32(CTL_REG(i, 289), 0xffff, + (2 * pdram_timing->trefi) & 0xffff); + + mmio_write_32(CTL_REG(i, 290), 20 * pdram_timing->trefi); + + /* CTL_309 TDFI_CALVL_CAPTURE_F1:RW:16:10 */ + tmp1 = 20000 / (1000000 / pdram_timing->mhz) + 1; + if ((20000 % (1000000 / pdram_timing->mhz)) != 0) + tmp1++; + tmp = (tmp1 >> 1) + (tmp1 % 2) + 5; + mmio_clrsetbits_32(CTL_REG(i, 309), 0x3ff << 16, tmp << 16); + + /* CTL_309 TDFI_CALVL_CC_F1:RW:0:10 */ + tmp = tmp + 18; + mmio_clrsetbits_32(CTL_REG(i, 309), 0x3ff, tmp); + + /* CTL_314 TDFI_WRCSLAT_F1:RW:24:8 */ + tmp1 = get_pi_wrlat_adj(pdram_timing, timing_config); + if (timing_config->freq <= TDFI_LAT_THRESHOLD_FREQ) { + if (tmp1 == 0) + tmp = 0; + else if (tmp1 < 5) + tmp = tmp1 - 1; + else + tmp = tmp1 - 5; + } else { + tmp = tmp1 - 2; + } + + mmio_clrsetbits_32(CTL_REG(i, 314), 0xffu << 24, tmp << 24); + + /* CTL_314 TDFI_RDCSLAT_F1:RW:16:8 */ + if ((timing_config->freq <= TDFI_LAT_THRESHOLD_FREQ) && + (pdram_timing->cl >= 5)) + tmp = pdram_timing->cl - 5; + else + tmp = pdram_timing->cl - 2; + mmio_clrsetbits_32(CTL_REG(i, 314), 0xff << 16, tmp << 16); + } +} + +static void gen_rk3399_enable_training(uint32_t ch_cnt, uint32_t nmhz) +{ + uint32_t i, tmp; + + if (nmhz <= PHY_DLL_BYPASS_FREQ) + tmp = 0; + else + tmp = 1; + + for (i = 0; i < ch_cnt; i++) { + mmio_clrsetbits_32(CTL_REG(i, 305), 1 << 16, tmp << 16); + mmio_clrsetbits_32(CTL_REG(i, 71), 1, tmp); + mmio_clrsetbits_32(CTL_REG(i, 70), 1 << 8, 1 << 8); + } +} + +static void gen_rk3399_disable_training(uint32_t ch_cnt) +{ + uint32_t i; + + for (i = 0; i < ch_cnt; i++) { + mmio_clrbits_32(CTL_REG(i, 305), 1 << 16); + mmio_clrbits_32(CTL_REG(i, 71), 1); + mmio_clrbits_32(CTL_REG(i, 70), 1 << 8); + } +} + +static void gen_rk3399_ctl_params(struct timing_related_config *timing_config, + struct dram_timing_t *pdram_timing, + uint32_t fn) +{ + if (fn == 0) + gen_rk3399_ctl_params_f0(timing_config, pdram_timing); + else + gen_rk3399_ctl_params_f1(timing_config, pdram_timing); +} + +static void gen_rk3399_pi_params_f0(struct timing_related_config *timing_config, + struct dram_timing_t *pdram_timing) +{ + uint32_t tmp, tmp1, tmp2; + uint32_t i; + + for (i = 0; i < timing_config->ch_cnt; i++) { + /* PI_02 PI_TDFI_PHYMSTR_MAX_F0:RW:0:32 */ + tmp = 4 * pdram_timing->trefi; + mmio_write_32(PI_REG(i, 2), tmp); + /* PI_03 PI_TDFI_PHYMSTR_RESP_F0:RW:0:16 */ + tmp = 2 * pdram_timing->trefi; + mmio_clrsetbits_32(PI_REG(i, 3), 0xffff, tmp); + /* PI_07 PI_TDFI_PHYUPD_RESP_F0:RW:16:16 */ + mmio_clrsetbits_32(PI_REG(i, 7), 0xffffu << 16, tmp << 16); + + /* PI_42 PI_TDELAY_RDWR_2_BUS_IDLE_F0:RW:0:8 */ + if (timing_config->dram_type == LPDDR4) + tmp = 2; + else + tmp = 0; + tmp = (pdram_timing->bl / 2) + 4 + + (get_pi_rdlat_adj(pdram_timing) - 2) + tmp + + get_pi_tdfi_phy_rdlat(pdram_timing, timing_config); + mmio_clrsetbits_32(PI_REG(i, 42), 0xff, tmp); + /* PI_43 PI_WRLAT_F0:RW:0:5 */ + if (timing_config->dram_type == LPDDR3) { + tmp = get_pi_wrlat(pdram_timing, timing_config); + mmio_clrsetbits_32(PI_REG(i, 43), 0x1f, tmp); + } + /* PI_43 PI_ADDITIVE_LAT_F0:RW:8:6 */ + mmio_clrsetbits_32(PI_REG(i, 43), 0x3f << 8, + PI_ADD_LATENCY << 8); + + /* PI_43 PI_CASLAT_LIN_F0:RW:16:7 */ + mmio_clrsetbits_32(PI_REG(i, 43), 0x7f << 16, + (pdram_timing->cl * 2) << 16); + /* PI_46 PI_TREF_F0:RW:16:16 */ + mmio_clrsetbits_32(PI_REG(i, 46), 0xffffu << 16, + pdram_timing->trefi << 16); + /* PI_46 PI_TRFC_F0:RW:0:10 */ + mmio_clrsetbits_32(PI_REG(i, 46), 0x3ff, pdram_timing->trfc); + /* PI_66 PI_TODTL_2CMD_F0:RW:24:8 */ + if (timing_config->dram_type == LPDDR3) { + tmp = get_pi_todtoff_max(pdram_timing, timing_config); + mmio_clrsetbits_32(PI_REG(i, 66), 0xffu << 24, + tmp << 24); + } + /* PI_72 PI_WR_TO_ODTH_F0:RW:16:6 */ + if ((timing_config->dram_type == LPDDR3) || + (timing_config->dram_type == LPDDR4)) { + tmp1 = get_pi_wrlat(pdram_timing, timing_config); + tmp2 = get_pi_todtoff_max(pdram_timing, timing_config); + if (tmp1 > tmp2) + tmp = tmp1 - tmp2; + else + tmp = 0; + } else if (timing_config->dram_type == DDR3) { + tmp = 0; + } + mmio_clrsetbits_32(PI_REG(i, 72), 0x3f << 16, tmp << 16); + /* PI_73 PI_RD_TO_ODTH_F0:RW:8:6 */ + if ((timing_config->dram_type == LPDDR3) || + (timing_config->dram_type == LPDDR4)) { + /* min_rl_preamble = cl + TDQSCK_MIN - 1 */ + tmp1 = pdram_timing->cl; + tmp1 += get_pi_todtoff_min(pdram_timing, timing_config); + tmp1--; + /* todtoff_max */ + tmp2 = get_pi_todtoff_max(pdram_timing, timing_config); + if (tmp1 > tmp2) + tmp = tmp1 - tmp2; + else + tmp = 0; + } else if (timing_config->dram_type == DDR3) { + tmp = pdram_timing->cl - pdram_timing->cwl; + } + mmio_clrsetbits_32(PI_REG(i, 73), 0x3f << 8, tmp << 8); + /* PI_89 PI_RDLAT_ADJ_F0:RW:16:8 */ + tmp = get_pi_rdlat_adj(pdram_timing); + mmio_clrsetbits_32(PI_REG(i, 89), 0xff << 16, tmp << 16); + /* PI_90 PI_WRLAT_ADJ_F0:RW:16:8 */ + tmp = get_pi_wrlat_adj(pdram_timing, timing_config); + mmio_clrsetbits_32(PI_REG(i, 90), 0xff << 16, tmp << 16); + /* PI_91 PI_TDFI_WRCSLAT_F0:RW:16:8 */ + tmp1 = tmp; + if (tmp1 == 0) + tmp = 0; + else if (tmp1 < 5) + tmp = tmp1 - 1; + else + tmp = tmp1 - 5; + mmio_clrsetbits_32(PI_REG(i, 91), 0xff << 16, tmp << 16); + /* PI_95 PI_TDFI_CALVL_CAPTURE_F0:RW:16:10 */ + tmp1 = 20000 / (1000000 / pdram_timing->mhz) + 1; + if ((20000 % (1000000 / pdram_timing->mhz)) != 0) + tmp1++; + tmp = (tmp1 >> 1) + (tmp1 % 2) + 5; + mmio_clrsetbits_32(PI_REG(i, 95), 0x3ff << 16, tmp << 16); + /* PI_95 PI_TDFI_CALVL_CC_F0:RW:0:10 */ + mmio_clrsetbits_32(PI_REG(i, 95), 0x3ff, tmp + 18); + /* PI_102 PI_TMRZ_F0:RW:8:5 */ + mmio_clrsetbits_32(PI_REG(i, 102), 0x1f << 8, + pdram_timing->tmrz << 8); + /* PI_111 PI_TDFI_CALVL_STROBE_F0:RW:8:4 */ + tmp1 = 2 * 1000 / (1000000 / pdram_timing->mhz); + if ((2 * 1000 % (1000000 / pdram_timing->mhz)) != 0) + tmp1++; + /* pi_tdfi_calvl_strobe=tds_train+5 */ + tmp = tmp1 + 5; + mmio_clrsetbits_32(PI_REG(i, 111), 0xf << 8, tmp << 8); + /* PI_116 PI_TCKEHDQS_F0:RW:16:6 */ + tmp = 10000 / (1000000 / pdram_timing->mhz); + if ((10000 % (1000000 / pdram_timing->mhz)) != 0) + tmp++; + if (pdram_timing->mhz <= 100) + tmp = tmp + 1; + else + tmp = tmp + 8; + mmio_clrsetbits_32(PI_REG(i, 116), 0x3f << 16, tmp << 16); + /* PI_125 PI_MR1_DATA_F0_0:RW+:8:16 */ + mmio_clrsetbits_32(PI_REG(i, 125), 0xffff << 8, + pdram_timing->mr[1] << 8); + /* PI_133 PI_MR1_DATA_F0_1:RW+:0:16 */ + mmio_clrsetbits_32(PI_REG(i, 133), 0xffff, pdram_timing->mr[1]); + /* PI_140 PI_MR1_DATA_F0_2:RW+:16:16 */ + mmio_clrsetbits_32(PI_REG(i, 140), 0xffffu << 16, + pdram_timing->mr[1] << 16); + /* PI_148 PI_MR1_DATA_F0_3:RW+:0:16 */ + mmio_clrsetbits_32(PI_REG(i, 148), 0xffff, pdram_timing->mr[1]); + /* PI_126 PI_MR2_DATA_F0_0:RW+:0:16 */ + mmio_clrsetbits_32(PI_REG(i, 126), 0xffff, pdram_timing->mr[2]); + /* PI_133 PI_MR2_DATA_F0_1:RW+:16:16 */ + mmio_clrsetbits_32(PI_REG(i, 133), 0xffffu << 16, + pdram_timing->mr[2] << 16); + /* PI_141 PI_MR2_DATA_F0_2:RW+:0:16 */ + mmio_clrsetbits_32(PI_REG(i, 141), 0xffff, pdram_timing->mr[2]); + /* PI_148 PI_MR2_DATA_F0_3:RW+:16:16 */ + mmio_clrsetbits_32(PI_REG(i, 148), 0xffffu << 16, + pdram_timing->mr[2] << 16); + /* PI_156 PI_TFC_F0:RW:0:10 */ + mmio_clrsetbits_32(PI_REG(i, 156), 0x3ff, + pdram_timing->tfc_long); + /* PI_158 PI_TWR_F0:RW:24:6 */ + mmio_clrsetbits_32(PI_REG(i, 158), 0x3f << 24, + pdram_timing->twr << 24); + /* PI_158 PI_TWTR_F0:RW:16:6 */ + mmio_clrsetbits_32(PI_REG(i, 158), 0x3f << 16, + pdram_timing->twtr << 16); + /* PI_158 PI_TRCD_F0:RW:8:8 */ + mmio_clrsetbits_32(PI_REG(i, 158), 0xff << 8, + pdram_timing->trcd << 8); + /* PI_158 PI_TRP_F0:RW:0:8 */ + mmio_clrsetbits_32(PI_REG(i, 158), 0xff, pdram_timing->trp); + /* PI_157 PI_TRTP_F0:RW:24:8 */ + mmio_clrsetbits_32(PI_REG(i, 157), 0xffu << 24, + pdram_timing->trtp << 24); + /* PI_159 PI_TRAS_MIN_F0:RW:24:8 */ + mmio_clrsetbits_32(PI_REG(i, 159), 0xffu << 24, + pdram_timing->tras_min << 24); + /* PI_159 PI_TRAS_MAX_F0:RW:0:17 */ + tmp = pdram_timing->tras_max * 99 / 100; + mmio_clrsetbits_32(PI_REG(i, 159), 0x1ffff, tmp); + /* PI_160 PI_TMRD_F0:RW:16:6 */ + mmio_clrsetbits_32(PI_REG(i, 160), 0x3f << 16, + pdram_timing->tmrd << 16); + /*PI_160 PI_TDQSCK_MAX_F0:RW:0:4 */ + mmio_clrsetbits_32(PI_REG(i, 160), 0xf, + pdram_timing->tdqsck_max); + /* PI_187 PI_TDFI_CTRLUPD_MAX_F0:RW:8:16 */ + mmio_clrsetbits_32(PI_REG(i, 187), 0xffff << 8, + (2 * pdram_timing->trefi) << 8); + /* PI_188 PI_TDFI_CTRLUPD_INTERVAL_F0:RW:0:32 */ + mmio_clrsetbits_32(PI_REG(i, 188), 0xffffffff, + 20 * pdram_timing->trefi); + } +} + +static void gen_rk3399_pi_params_f1(struct timing_related_config *timing_config, + struct dram_timing_t *pdram_timing) +{ + uint32_t tmp, tmp1, tmp2; + uint32_t i; + + for (i = 0; i < timing_config->ch_cnt; i++) { + /* PI_04 PI_TDFI_PHYMSTR_MAX_F1:RW:0:32 */ + tmp = 4 * pdram_timing->trefi; + mmio_write_32(PI_REG(i, 4), tmp); + /* PI_05 PI_TDFI_PHYMSTR_RESP_F1:RW:0:16 */ + tmp = 2 * pdram_timing->trefi; + mmio_clrsetbits_32(PI_REG(i, 5), 0xffff, tmp); + /* PI_12 PI_TDFI_PHYUPD_RESP_F1:RW:0:16 */ + mmio_clrsetbits_32(PI_REG(i, 12), 0xffff, tmp); + + /* PI_42 PI_TDELAY_RDWR_2_BUS_IDLE_F1:RW:8:8 */ + if (timing_config->dram_type == LPDDR4) + tmp = 2; + else + tmp = 0; + tmp = (pdram_timing->bl / 2) + 4 + + (get_pi_rdlat_adj(pdram_timing) - 2) + tmp + + get_pi_tdfi_phy_rdlat(pdram_timing, timing_config); + mmio_clrsetbits_32(PI_REG(i, 42), 0xff << 8, tmp << 8); + /* PI_43 PI_WRLAT_F1:RW:24:5 */ + if (timing_config->dram_type == LPDDR3) { + tmp = get_pi_wrlat(pdram_timing, timing_config); + mmio_clrsetbits_32(PI_REG(i, 43), 0x1f << 24, + tmp << 24); + } + /* PI_44 PI_ADDITIVE_LAT_F1:RW:0:6 */ + mmio_clrsetbits_32(PI_REG(i, 44), 0x3f, PI_ADD_LATENCY); + /* PI_44 PI_CASLAT_LIN_F1:RW:8:7:=0x18 */ + mmio_clrsetbits_32(PI_REG(i, 44), 0x7f << 8, + (pdram_timing->cl * 2) << 8); + /* PI_47 PI_TREF_F1:RW:16:16 */ + mmio_clrsetbits_32(PI_REG(i, 47), 0xffffu << 16, + pdram_timing->trefi << 16); + /* PI_47 PI_TRFC_F1:RW:0:10 */ + mmio_clrsetbits_32(PI_REG(i, 47), 0x3ff, pdram_timing->trfc); + /* PI_67 PI_TODTL_2CMD_F1:RW:8:8 */ + if (timing_config->dram_type == LPDDR3) { + tmp = get_pi_todtoff_max(pdram_timing, timing_config); + mmio_clrsetbits_32(PI_REG(i, 67), 0xff << 8, tmp << 8); + } + /* PI_72 PI_WR_TO_ODTH_F1:RW:24:6 */ + if ((timing_config->dram_type == LPDDR3) || + (timing_config->dram_type == LPDDR4)) { + tmp1 = get_pi_wrlat(pdram_timing, timing_config); + tmp2 = get_pi_todtoff_max(pdram_timing, timing_config); + if (tmp1 > tmp2) + tmp = tmp1 - tmp2; + else + tmp = 0; + } else if (timing_config->dram_type == DDR3) { + tmp = 0; + } + mmio_clrsetbits_32(PI_REG(i, 72), 0x3f << 24, tmp << 24); + /* PI_73 PI_RD_TO_ODTH_F1:RW:16:6 */ + if ((timing_config->dram_type == LPDDR3) || + (timing_config->dram_type == LPDDR4)) { + /* min_rl_preamble = cl + TDQSCK_MIN - 1 */ + tmp1 = pdram_timing->cl + + get_pi_todtoff_min(pdram_timing, timing_config); + tmp1--; + /* todtoff_max */ + tmp2 = get_pi_todtoff_max(pdram_timing, timing_config); + if (tmp1 > tmp2) + tmp = tmp1 - tmp2; + else + tmp = 0; + } else if (timing_config->dram_type == DDR3) + tmp = pdram_timing->cl - pdram_timing->cwl; + + mmio_clrsetbits_32(PI_REG(i, 73), 0x3f << 16, tmp << 16); + /*P I_89 PI_RDLAT_ADJ_F1:RW:24:8 */ + tmp = get_pi_rdlat_adj(pdram_timing); + mmio_clrsetbits_32(PI_REG(i, 89), 0xffu << 24, tmp << 24); + /* PI_90 PI_WRLAT_ADJ_F1:RW:24:8 */ + tmp = get_pi_wrlat_adj(pdram_timing, timing_config); + mmio_clrsetbits_32(PI_REG(i, 90), 0xffu << 24, tmp << 24); + /* PI_91 PI_TDFI_WRCSLAT_F1:RW:24:8 */ + tmp1 = tmp; + if (tmp1 == 0) + tmp = 0; + else if (tmp1 < 5) + tmp = tmp1 - 1; + else + tmp = tmp1 - 5; + mmio_clrsetbits_32(PI_REG(i, 91), 0xffu << 24, tmp << 24); + /*PI_96 PI_TDFI_CALVL_CAPTURE_F1:RW:16:10 */ + /* tadr=20ns */ + tmp1 = 20000 / (1000000 / pdram_timing->mhz) + 1; + if ((20000 % (1000000 / pdram_timing->mhz)) != 0) + tmp1++; + tmp = (tmp1 >> 1) + (tmp1 % 2) + 5; + mmio_clrsetbits_32(PI_REG(i, 96), 0x3ff << 16, tmp << 16); + /* PI_96 PI_TDFI_CALVL_CC_F1:RW:0:10 */ + tmp = tmp + 18; + mmio_clrsetbits_32(PI_REG(i, 96), 0x3ff, tmp); + /*PI_103 PI_TMRZ_F1:RW:0:5 */ + mmio_clrsetbits_32(PI_REG(i, 103), 0x1f, pdram_timing->tmrz); + /*PI_111 PI_TDFI_CALVL_STROBE_F1:RW:16:4 */ + /* tds_train=ceil(2/ns) */ + tmp1 = 2 * 1000 / (1000000 / pdram_timing->mhz); + if ((2 * 1000 % (1000000 / pdram_timing->mhz)) != 0) + tmp1++; + /* pi_tdfi_calvl_strobe=tds_train+5 */ + tmp = tmp1 + 5; + mmio_clrsetbits_32(PI_REG(i, 111), 0xf << 16, + tmp << 16); + /* PI_116 PI_TCKEHDQS_F1:RW:24:6 */ + tmp = 10000 / (1000000 / pdram_timing->mhz); + if ((10000 % (1000000 / pdram_timing->mhz)) != 0) + tmp++; + if (pdram_timing->mhz <= 100) + tmp = tmp + 1; + else + tmp = tmp + 8; + mmio_clrsetbits_32(PI_REG(i, 116), 0x3f << 24, + tmp << 24); + /* PI_128 PI_MR1_DATA_F1_0:RW+:0:16 */ + mmio_clrsetbits_32(PI_REG(i, 128), 0xffff, pdram_timing->mr[1]); + /* PI_135 PI_MR1_DATA_F1_1:RW+:8:16 */ + mmio_clrsetbits_32(PI_REG(i, 135), 0xffff << 8, + pdram_timing->mr[1] << 8); + /* PI_143 PI_MR1_DATA_F1_2:RW+:0:16 */ + mmio_clrsetbits_32(PI_REG(i, 143), 0xffff, pdram_timing->mr[1]); + /* PI_150 PI_MR1_DATA_F1_3:RW+:8:16 */ + mmio_clrsetbits_32(PI_REG(i, 150), 0xffff << 8, + pdram_timing->mr[1] << 8); + /* PI_128 PI_MR2_DATA_F1_0:RW+:16:16 */ + mmio_clrsetbits_32(PI_REG(i, 128), 0xffffu << 16, + pdram_timing->mr[2] << 16); + /* PI_136 PI_MR2_DATA_F1_1:RW+:0:16 */ + mmio_clrsetbits_32(PI_REG(i, 136), 0xffff, pdram_timing->mr[2]); + /* PI_143 PI_MR2_DATA_F1_2:RW+:16:16 */ + mmio_clrsetbits_32(PI_REG(i, 143), 0xffffu << 16, + pdram_timing->mr[2] << 16); + /* PI_151 PI_MR2_DATA_F1_3:RW+:0:16 */ + mmio_clrsetbits_32(PI_REG(i, 151), 0xffff, pdram_timing->mr[2]); + /* PI_156 PI_TFC_F1:RW:16:10 */ + mmio_clrsetbits_32(PI_REG(i, 156), 0x3ff << 16, + pdram_timing->tfc_long << 16); + /* PI_162 PI_TWR_F1:RW:8:6 */ + mmio_clrsetbits_32(PI_REG(i, 162), 0x3f << 8, + pdram_timing->twr << 8); + /* PI_162 PI_TWTR_F1:RW:0:6 */ + mmio_clrsetbits_32(PI_REG(i, 162), 0x3f, pdram_timing->twtr); + /* PI_161 PI_TRCD_F1:RW:24:8 */ + mmio_clrsetbits_32(PI_REG(i, 161), 0xffu << 24, + pdram_timing->trcd << 24); + /* PI_161 PI_TRP_F1:RW:16:8 */ + mmio_clrsetbits_32(PI_REG(i, 161), 0xff << 16, + pdram_timing->trp << 16); + /* PI_161 PI_TRTP_F1:RW:8:8 */ + mmio_clrsetbits_32(PI_REG(i, 161), 0xff << 8, + pdram_timing->trtp << 8); + /* PI_163 PI_TRAS_MIN_F1:RW:24:8 */ + mmio_clrsetbits_32(PI_REG(i, 163), 0xffu << 24, + pdram_timing->tras_min << 24); + /* PI_163 PI_TRAS_MAX_F1:RW:0:17 */ + mmio_clrsetbits_32(PI_REG(i, 163), 0x1ffff, + pdram_timing->tras_max * 99 / 100); + /* PI_164 PI_TMRD_F1:RW:16:6 */ + mmio_clrsetbits_32(PI_REG(i, 164), 0x3f << 16, + pdram_timing->tmrd << 16); + /* PI_164 PI_TDQSCK_MAX_F1:RW:0:4 */ + mmio_clrsetbits_32(PI_REG(i, 164), 0xf, + pdram_timing->tdqsck_max); + /* PI_189 PI_TDFI_CTRLUPD_MAX_F1:RW:0:16 */ + mmio_clrsetbits_32(PI_REG(i, 189), 0xffff, + 2 * pdram_timing->trefi); + /* PI_190 PI_TDFI_CTRLUPD_INTERVAL_F1:RW:0:32 */ + mmio_clrsetbits_32(PI_REG(i, 190), 0xffffffff, + 20 * pdram_timing->trefi); + } +} + +static void gen_rk3399_pi_params(struct timing_related_config *timing_config, + struct dram_timing_t *pdram_timing, + uint32_t fn) +{ + if (fn == 0) + gen_rk3399_pi_params_f0(timing_config, pdram_timing); + else + gen_rk3399_pi_params_f1(timing_config, pdram_timing); +} + +static void gen_rk3399_set_odt(uint32_t odt_en) +{ + uint32_t drv_odt_val; + uint32_t i; + + for (i = 0; i < rk3399_dram_status.timing_config.ch_cnt; i++) { + drv_odt_val = (odt_en | (0 << 1) | (0 << 2)) << 16; + mmio_clrsetbits_32(PHY_REG(i, 5), 0x7 << 16, drv_odt_val); + mmio_clrsetbits_32(PHY_REG(i, 133), 0x7 << 16, drv_odt_val); + mmio_clrsetbits_32(PHY_REG(i, 261), 0x7 << 16, drv_odt_val); + mmio_clrsetbits_32(PHY_REG(i, 389), 0x7 << 16, drv_odt_val); + drv_odt_val = (odt_en | (0 << 1) | (0 << 2)) << 24; + mmio_clrsetbits_32(PHY_REG(i, 6), 0x7 << 24, drv_odt_val); + mmio_clrsetbits_32(PHY_REG(i, 134), 0x7 << 24, drv_odt_val); + mmio_clrsetbits_32(PHY_REG(i, 262), 0x7 << 24, drv_odt_val); + mmio_clrsetbits_32(PHY_REG(i, 390), 0x7 << 24, drv_odt_val); + } +} + +static void gen_rk3399_phy_dll_bypass(uint32_t mhz, uint32_t ch, + uint32_t index, uint32_t dram_type) +{ + uint32_t sw_master_mode = 0; + uint32_t rddqs_gate_delay, rddqs_latency, total_delay; + uint32_t i; + + if (dram_type == DDR3) + total_delay = PI_PAD_DELAY_PS_VALUE; + else if (dram_type == LPDDR3) + total_delay = PI_PAD_DELAY_PS_VALUE + 2500; + else + total_delay = PI_PAD_DELAY_PS_VALUE + 1500; + /* total_delay + 0.55tck */ + total_delay += (55 * 10000)/mhz; + rddqs_latency = total_delay * mhz / 1000000; + total_delay -= rddqs_latency * 1000000 / mhz; + rddqs_gate_delay = total_delay * 0x200 * mhz / 1000000; + if (mhz <= PHY_DLL_BYPASS_FREQ) { + sw_master_mode = 0xc; + mmio_setbits_32(PHY_REG(ch, 514), 1); + mmio_setbits_32(PHY_REG(ch, 642), 1); + mmio_setbits_32(PHY_REG(ch, 770), 1); + + /* setting bypass mode slave delay */ + for (i = 0; i < 4; i++) { + /* wr dq delay = -180deg + (0x60 / 4) * 20ps */ + mmio_clrsetbits_32(PHY_REG(ch, 1 + 128 * i), 0x7ff << 8, + 0x4a0 << 8); + /* rd dqs/dq delay = (0x60 / 4) * 20ps */ + mmio_clrsetbits_32(PHY_REG(ch, 11 + 128 * i), 0x3ff, + 0xa0); + /* rd rddqs_gate delay */ + mmio_clrsetbits_32(PHY_REG(ch, 2 + 128 * i), 0x3ff, + rddqs_gate_delay); + mmio_clrsetbits_32(PHY_REG(ch, 78 + 128 * i), 0xf, + rddqs_latency); + } + for (i = 0; i < 3; i++) + /* adr delay */ + mmio_clrsetbits_32(PHY_REG(ch, 513 + 128 * i), + 0x7ff << 16, 0x80 << 16); + + if ((mmio_read_32(PHY_REG(ch, 86)) & 0xc00) == 0) { + /* + * old status is normal mode, + * and saving the wrdqs slave delay + */ + for (i = 0; i < 4; i++) { + /* save and clear wr dqs slave delay */ + wrdqs_delay_val[ch][index][i] = 0x3ff & + (mmio_read_32(PHY_REG(ch, 63 + i * 128)) + >> 16); + mmio_clrsetbits_32(PHY_REG(ch, 63 + i * 128), + 0x03ff << 16, 0 << 16); + /* + * in normal mode the cmd may delay 1cycle by + * wrlvl and in bypass mode making dqs also + * delay 1cycle. + */ + mmio_clrsetbits_32(PHY_REG(ch, 78 + i * 128), + 0x07 << 8, 0x1 << 8); + } + } + } else if (mmio_read_32(PHY_REG(ch, 86)) & 0xc00) { + /* old status is bypass mode and restore wrlvl resume */ + for (i = 0; i < 4; i++) { + mmio_clrsetbits_32(PHY_REG(ch, 63 + i * 128), + 0x03ff << 16, + (wrdqs_delay_val[ch][index][i] & + 0x3ff) << 16); + /* resume phy_write_path_lat_add */ + mmio_clrbits_32(PHY_REG(ch, 78 + i * 128), 0x07 << 8); + } + } + + /* phy_sw_master_mode_X PHY_86/214/342/470 4bits offset_8 */ + mmio_clrsetbits_32(PHY_REG(ch, 86), 0xf << 8, sw_master_mode << 8); + mmio_clrsetbits_32(PHY_REG(ch, 214), 0xf << 8, sw_master_mode << 8); + mmio_clrsetbits_32(PHY_REG(ch, 342), 0xf << 8, sw_master_mode << 8); + mmio_clrsetbits_32(PHY_REG(ch, 470), 0xf << 8, sw_master_mode << 8); + + /* phy_adrctl_sw_master_mode PHY_547/675/803 4bits offset_16 */ + mmio_clrsetbits_32(PHY_REG(ch, 547), 0xf << 16, sw_master_mode << 16); + mmio_clrsetbits_32(PHY_REG(ch, 675), 0xf << 16, sw_master_mode << 16); + mmio_clrsetbits_32(PHY_REG(ch, 803), 0xf << 16, sw_master_mode << 16); +} + +static void gen_rk3399_phy_params(struct timing_related_config *timing_config, + struct drv_odt_lp_config *drv_config, + struct dram_timing_t *pdram_timing, + uint32_t fn) +{ + uint32_t tmp, i, div, j; + uint32_t mem_delay_ps, pad_delay_ps, total_delay_ps, delay_frac_ps; + uint32_t trpre_min_ps, gate_delay_ps, gate_delay_frac_ps; + uint32_t ie_enable, tsel_enable, cas_lat, rddata_en_ie_dly, tsel_adder; + uint32_t extra_adder, delta, hs_offset; + + for (i = 0; i < timing_config->ch_cnt; i++) { + + pad_delay_ps = PI_PAD_DELAY_PS_VALUE; + ie_enable = PI_IE_ENABLE_VALUE; + tsel_enable = PI_TSEL_ENABLE_VALUE; + + mmio_clrsetbits_32(PHY_REG(i, 896), (0x3 << 8) | 1, fn << 8); + + /* PHY_LOW_FREQ_SEL */ + /* DENALI_PHY_913 1bit offset_0 */ + if (timing_config->freq > 400) + mmio_clrbits_32(PHY_REG(i, 913), 1); + else + mmio_setbits_32(PHY_REG(i, 913), 1); + + /* PHY_RPTR_UPDATE_x */ + /* DENALI_PHY_87/215/343/471 4bit offset_16 */ + tmp = 2500 / (1000000 / pdram_timing->mhz) + 3; + if ((2500 % (1000000 / pdram_timing->mhz)) != 0) + tmp++; + mmio_clrsetbits_32(PHY_REG(i, 87), 0xf << 16, tmp << 16); + mmio_clrsetbits_32(PHY_REG(i, 215), 0xf << 16, tmp << 16); + mmio_clrsetbits_32(PHY_REG(i, 343), 0xf << 16, tmp << 16); + mmio_clrsetbits_32(PHY_REG(i, 471), 0xf << 16, tmp << 16); + + /* PHY_PLL_CTRL */ + /* DENALI_PHY_911 13bits offset_0 */ + /* PHY_LP4_BOOT_PLL_CTRL */ + /* DENALI_PHY_919 13bits offset_0 */ + tmp = (1 << 12) | (2 << 7) | (1 << 1); + mmio_clrsetbits_32(PHY_REG(i, 911), 0x1fff, tmp); + mmio_clrsetbits_32(PHY_REG(i, 919), 0x1fff, tmp); + + /* PHY_PLL_CTRL_CA */ + /* DENALI_PHY_911 13bits offset_16 */ + /* PHY_LP4_BOOT_PLL_CTRL_CA */ + /* DENALI_PHY_919 13bits offset_16 */ + tmp = (2 << 7) | (1 << 5) | (1 << 1); + mmio_clrsetbits_32(PHY_REG(i, 911), 0x1fff << 16, tmp << 16); + mmio_clrsetbits_32(PHY_REG(i, 919), 0x1fff << 16, tmp << 16); + + /* PHY_TCKSRE_WAIT */ + /* DENALI_PHY_922 4bits offset_24 */ + if (pdram_timing->mhz <= 400) + tmp = 1; + else if (pdram_timing->mhz <= 800) + tmp = 3; + else if (pdram_timing->mhz <= 1000) + tmp = 4; + else + tmp = 5; + mmio_clrsetbits_32(PHY_REG(i, 922), 0xf << 24, tmp << 24); + /* PHY_CAL_CLK_SELECT_0:RW8:3 */ + div = pdram_timing->mhz / (2 * 20); + for (j = 2, tmp = 1; j <= 128; j <<= 1, tmp++) { + if (div < j) + break; + } + mmio_clrsetbits_32(PHY_REG(i, 947), 0x7 << 8, tmp << 8); + + if (timing_config->dram_type == DDR3) { + mem_delay_ps = 0; + trpre_min_ps = 1000; + } else if (timing_config->dram_type == LPDDR4) { + mem_delay_ps = 1500; + trpre_min_ps = 900; + } else if (timing_config->dram_type == LPDDR3) { + mem_delay_ps = 2500; + trpre_min_ps = 900; + } else { + ERROR("gen_rk3399_phy_params:dramtype unsupport\n"); + return; + } + total_delay_ps = mem_delay_ps + pad_delay_ps; + delay_frac_ps = 1000 * total_delay_ps / + (1000000 / pdram_timing->mhz); + gate_delay_ps = delay_frac_ps + 1000 - (trpre_min_ps / 2); + gate_delay_frac_ps = gate_delay_ps % 1000; + tmp = gate_delay_frac_ps * 0x200 / 1000; + /* PHY_RDDQS_GATE_SLAVE_DELAY */ + /* DENALI_PHY_77/205/333/461 10bits offset_16 */ + mmio_clrsetbits_32(PHY_REG(i, 77), 0x2ff << 16, tmp << 16); + mmio_clrsetbits_32(PHY_REG(i, 205), 0x2ff << 16, tmp << 16); + mmio_clrsetbits_32(PHY_REG(i, 333), 0x2ff << 16, tmp << 16); + mmio_clrsetbits_32(PHY_REG(i, 461), 0x2ff << 16, tmp << 16); + + tmp = gate_delay_ps / 1000; + /* PHY_LP4_BOOT_RDDQS_LATENCY_ADJUST */ + /* DENALI_PHY_10/138/266/394 4bit offset_0 */ + mmio_clrsetbits_32(PHY_REG(i, 10), 0xf, tmp); + mmio_clrsetbits_32(PHY_REG(i, 138), 0xf, tmp); + mmio_clrsetbits_32(PHY_REG(i, 266), 0xf, tmp); + mmio_clrsetbits_32(PHY_REG(i, 394), 0xf, tmp); + /* PHY_GTLVL_LAT_ADJ_START */ + /* DENALI_PHY_80/208/336/464 4bits offset_16 */ + tmp = rddqs_delay_ps / (1000000 / pdram_timing->mhz) + 2; + mmio_clrsetbits_32(PHY_REG(i, 80), 0xf << 16, tmp << 16); + mmio_clrsetbits_32(PHY_REG(i, 208), 0xf << 16, tmp << 16); + mmio_clrsetbits_32(PHY_REG(i, 336), 0xf << 16, tmp << 16); + mmio_clrsetbits_32(PHY_REG(i, 464), 0xf << 16, tmp << 16); + + cas_lat = pdram_timing->cl + PI_ADD_LATENCY; + rddata_en_ie_dly = ie_enable / (1000000 / pdram_timing->mhz); + if ((ie_enable % (1000000 / pdram_timing->mhz)) != 0) + rddata_en_ie_dly++; + rddata_en_ie_dly = rddata_en_ie_dly - 1; + tsel_adder = tsel_enable / (1000000 / pdram_timing->mhz); + if ((tsel_enable % (1000000 / pdram_timing->mhz)) != 0) + tsel_adder++; + if (rddata_en_ie_dly > tsel_adder) + extra_adder = rddata_en_ie_dly - tsel_adder; + else + extra_adder = 0; + delta = cas_lat - rddata_en_ie_dly; + if (PI_REGS_DIMM_SUPPORT && PI_DOUBLEFREEK) + hs_offset = 2; + else + hs_offset = 1; + if (rddata_en_ie_dly > (cas_lat - 1 - hs_offset)) + tmp = 0; + else if ((delta == 2) || (delta == 1)) + tmp = rddata_en_ie_dly - 0 - extra_adder; + else + tmp = extra_adder; + /* PHY_LP4_BOOT_RDDATA_EN_TSEL_DLY */ + /* DENALI_PHY_9/137/265/393 4bit offset_16 */ + mmio_clrsetbits_32(PHY_REG(i, 9), 0xf << 16, tmp << 16); + mmio_clrsetbits_32(PHY_REG(i, 137), 0xf << 16, tmp << 16); + mmio_clrsetbits_32(PHY_REG(i, 265), 0xf << 16, tmp << 16); + mmio_clrsetbits_32(PHY_REG(i, 393), 0xf << 16, tmp << 16); + /* PHY_RDDATA_EN_TSEL_DLY */ + /* DENALI_PHY_86/214/342/470 4bit offset_0 */ + mmio_clrsetbits_32(PHY_REG(i, 86), 0xf, tmp); + mmio_clrsetbits_32(PHY_REG(i, 214), 0xf, tmp); + mmio_clrsetbits_32(PHY_REG(i, 342), 0xf, tmp); + mmio_clrsetbits_32(PHY_REG(i, 470), 0xf, tmp); + + if (tsel_adder > rddata_en_ie_dly) + extra_adder = tsel_adder - rddata_en_ie_dly; + else + extra_adder = 0; + if (rddata_en_ie_dly > (cas_lat - 1 - hs_offset)) + tmp = tsel_adder; + else + tmp = rddata_en_ie_dly - 0 + extra_adder; + /* PHY_LP4_BOOT_RDDATA_EN_DLY */ + /* DENALI_PHY_9/137/265/393 4bit offset_8 */ + mmio_clrsetbits_32(PHY_REG(i, 9), 0xf << 8, tmp << 8); + mmio_clrsetbits_32(PHY_REG(i, 137), 0xf << 8, tmp << 8); + mmio_clrsetbits_32(PHY_REG(i, 265), 0xf << 8, tmp << 8); + mmio_clrsetbits_32(PHY_REG(i, 393), 0xf << 8, tmp << 8); + /* PHY_RDDATA_EN_DLY */ + /* DENALI_PHY_85/213/341/469 4bit offset_24 */ + mmio_clrsetbits_32(PHY_REG(i, 85), 0xf << 24, tmp << 24); + mmio_clrsetbits_32(PHY_REG(i, 213), 0xf << 24, tmp << 24); + mmio_clrsetbits_32(PHY_REG(i, 341), 0xf << 24, tmp << 24); + mmio_clrsetbits_32(PHY_REG(i, 469), 0xf << 24, tmp << 24); + + if (pdram_timing->mhz <= ENPER_CS_TRAINING_FREQ) { + /* + * Note:Per-CS Training is not compatible at speeds + * under 533 MHz. If the PHY is running at a speed + * less than 533MHz, all phy_per_cs_training_en_X + * parameters must be cleared to 0. + */ + + /*DENALI_PHY_84/212/340/468 1bit offset_16 */ + mmio_clrbits_32(PHY_REG(i, 84), 0x1 << 16); + mmio_clrbits_32(PHY_REG(i, 212), 0x1 << 16); + mmio_clrbits_32(PHY_REG(i, 340), 0x1 << 16); + mmio_clrbits_32(PHY_REG(i, 468), 0x1 << 16); + } else { + mmio_setbits_32(PHY_REG(i, 84), 0x1 << 16); + mmio_setbits_32(PHY_REG(i, 212), 0x1 << 16); + mmio_setbits_32(PHY_REG(i, 340), 0x1 << 16); + mmio_setbits_32(PHY_REG(i, 468), 0x1 << 16); + } + gen_rk3399_phy_dll_bypass(pdram_timing->mhz, i, fn, + timing_config->dram_type); + } +} + +static int to_get_clk_index(unsigned int mhz) +{ + int pll_cnt, i; + + pll_cnt = ARRAY_SIZE(dpll_rates_table); + + /* Assumming rate_table is in descending order */ + for (i = 0; i < pll_cnt; i++) { + if (mhz >= dpll_rates_table[i].mhz) + break; + } + + /* if mhz lower than lowest frequency in table, use lowest frequency */ + if (i == pll_cnt) + i = pll_cnt - 1; + + return i; +} + +uint32_t ddr_get_rate(void) +{ + uint32_t refdiv, postdiv1, fbdiv, postdiv2; + + refdiv = mmio_read_32(CRU_BASE + CRU_PLL_CON(DPLL_ID, 1)) & 0x3f; + fbdiv = mmio_read_32(CRU_BASE + CRU_PLL_CON(DPLL_ID, 0)) & 0xfff; + postdiv1 = + (mmio_read_32(CRU_BASE + CRU_PLL_CON(DPLL_ID, 1)) >> 8) & 0x7; + postdiv2 = + (mmio_read_32(CRU_BASE + CRU_PLL_CON(DPLL_ID, 1)) >> 12) & 0x7; + + return (24 / refdiv * fbdiv / postdiv1 / postdiv2) * 1000 * 1000; +} + +/* + * return: bit12: channel 1, external self-refresh + * bit11: channel 1, stdby_mode + * bit10: channel 1, self-refresh with controller and memory clock gate + * bit9: channel 1, self-refresh + * bit8: channel 1, power-down + * + * bit4: channel 1, external self-refresh + * bit3: channel 0, stdby_mode + * bit2: channel 0, self-refresh with controller and memory clock gate + * bit1: channel 0, self-refresh + * bit0: channel 0, power-down + */ +uint32_t exit_low_power(void) +{ + uint32_t low_power = 0; + uint32_t channel_mask; + uint32_t tmp, i; + + channel_mask = (mmio_read_32(PMUGRF_BASE + PMUGRF_OSREG(2)) >> 28) & + 0x3; + for (i = 0; i < 2; i++) { + if (!(channel_mask & (1 << i))) + continue; + + /* exit stdby mode */ + mmio_write_32(CIC_BASE + CIC_CTRL1, + (1 << (i + 16)) | (0 << i)); + /* exit external self-refresh */ + tmp = i ? 12 : 8; + low_power |= ((mmio_read_32(PMU_BASE + PMU_SFT_CON) >> tmp) & + 0x1) << (4 + 8 * i); + mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, 1 << tmp); + while (!(mmio_read_32(PMU_BASE + PMU_DDR_SREF_ST) & (1 << i))) + ; + /* exit auto low-power */ + mmio_clrbits_32(CTL_REG(i, 101), 0x7); + /* lp_cmd to exit */ + if (((mmio_read_32(CTL_REG(i, 100)) >> 24) & 0x7f) != + 0x40) { + while (mmio_read_32(CTL_REG(i, 200)) & 0x1) + ; + mmio_clrsetbits_32(CTL_REG(i, 93), 0xffu << 24, + 0x69 << 24); + while (((mmio_read_32(CTL_REG(i, 100)) >> 24) & 0x7f) != + 0x40) + ; + } + } + return low_power; +} + +void resume_low_power(uint32_t low_power) +{ + uint32_t channel_mask; + uint32_t tmp, i, val; + + channel_mask = (mmio_read_32(PMUGRF_BASE + PMUGRF_OSREG(2)) >> 28) & + 0x3; + for (i = 0; i < 2; i++) { + if (!(channel_mask & (1 << i))) + continue; + + /* resume external self-refresh */ + tmp = i ? 12 : 8; + val = (low_power >> (4 + 8 * i)) & 0x1; + mmio_setbits_32(PMU_BASE + PMU_SFT_CON, val << tmp); + /* resume auto low-power */ + val = (low_power >> (8 * i)) & 0x7; + mmio_setbits_32(CTL_REG(i, 101), val); + /* resume stdby mode */ + val = (low_power >> (3 + 8 * i)) & 0x1; + mmio_write_32(CIC_BASE + CIC_CTRL1, + (1 << (i + 16)) | (val << i)); + } +} + +static void dram_low_power_config(void) +{ + uint32_t tmp, i; + uint32_t ch_cnt = rk3399_dram_status.timing_config.ch_cnt; + uint32_t dram_type = rk3399_dram_status.timing_config.dram_type; + + if (dram_type == DDR3) + tmp = (2 << 16) | (0x7 << 8); + else + tmp = (3 << 16) | (0x7 << 8); + + for (i = 0; i < ch_cnt; i++) + mmio_clrsetbits_32(CTL_REG(i, 101), 0x70f0f, tmp); + + /* standby idle */ + mmio_write_32(CIC_BASE + CIC_CG_WAIT_TH, 0x640008); + + if (ch_cnt == 2) { + mmio_write_32(GRF_BASE + GRF_DDRC1_CON1, + (((0x1<<4) | (0x1<<5) | (0x1<<6) | + (0x1<<7)) << 16) | + ((0x1<<4) | (0x0<<5) | (0x1<<6) | (0x1<<7))); + mmio_write_32(CIC_BASE + CIC_CTRL1, 0x002a0028); + } + + mmio_write_32(GRF_BASE + GRF_DDRC0_CON1, + (((0x1<<4) | (0x1<<5) | (0x1<<6) | (0x1<<7)) << 16) | + ((0x1<<4) | (0x0<<5) | (0x1<<6) | (0x1<<7))); + mmio_write_32(CIC_BASE + CIC_CTRL1, 0x00150014); +} + +void dram_dfs_init(void) +{ + uint32_t trefi0, trefi1, boot_freq; + uint32_t rddqs_adjust, rddqs_slave; + + /* get sdram config for os reg */ + get_dram_drv_odt_val(sdram_config.dramtype, + &rk3399_dram_status.drv_odt_lp_cfg); + sdram_timing_cfg_init(&rk3399_dram_status.timing_config, + &sdram_config, + &rk3399_dram_status.drv_odt_lp_cfg); + + trefi0 = ((mmio_read_32(CTL_REG(0, 48)) >> 16) & 0xffff) + 8; + trefi1 = ((mmio_read_32(CTL_REG(0, 49)) >> 16) & 0xffff) + 8; + + rk3399_dram_status.index_freq[0] = trefi0 * 10 / 39; + rk3399_dram_status.index_freq[1] = trefi1 * 10 / 39; + rk3399_dram_status.current_index = + (mmio_read_32(CTL_REG(0, 111)) >> 16) & 0x3; + if (rk3399_dram_status.timing_config.dram_type == DDR3) { + rk3399_dram_status.index_freq[0] /= 2; + rk3399_dram_status.index_freq[1] /= 2; + } + boot_freq = + rk3399_dram_status.index_freq[rk3399_dram_status.current_index]; + boot_freq = dpll_rates_table[to_get_clk_index(boot_freq)].mhz; + rk3399_dram_status.boot_freq = boot_freq; + rk3399_dram_status.index_freq[rk3399_dram_status.current_index] = + boot_freq; + rk3399_dram_status.index_freq[(rk3399_dram_status.current_index + 1) & + 0x1] = 0; + rk3399_dram_status.low_power_stat = 0; + /* + * following register decide if NOC stall the access request + * or return error when NOC being idled. when doing ddr frequency + * scaling in M0 or DCF, we need to make sure noc stall the access + * request, if return error cpu may data abort when ddr frequency + * changing. it don't need to set this register every times, + * so we init this register in function dram_dfs_init(). + */ + mmio_write_32(GRF_BASE + GRF_SOC_CON(0), 0xffffffff); + mmio_write_32(GRF_BASE + GRF_SOC_CON(1), 0xffffffff); + mmio_write_32(GRF_BASE + GRF_SOC_CON(2), 0xffffffff); + mmio_write_32(GRF_BASE + GRF_SOC_CON(3), 0xffffffff); + mmio_write_32(GRF_BASE + GRF_SOC_CON(4), 0x70007000); + + /* Disable multicast */ + mmio_clrbits_32(PHY_REG(0, 896), 1); + mmio_clrbits_32(PHY_REG(1, 896), 1); + dram_low_power_config(); + + /* + * If boot_freq isn't in the bypass mode, it can get the + * rddqs_delay_ps from the result of gate training + */ + if (((mmio_read_32(PHY_REG(0, 86)) >> 8) & 0xf) != 0xc) { + + /* + * Select PHY's frequency set to current_index + * index for get the result of gate Training + * from registers + */ + mmio_clrsetbits_32(PHY_REG(0, 896), 0x3 << 8, + rk3399_dram_status.current_index << 8); + rddqs_slave = (mmio_read_32(PHY_REG(0, 77)) >> 16) & 0x3ff; + rddqs_slave = rddqs_slave * 1000000 / boot_freq / 512; + + rddqs_adjust = mmio_read_32(PHY_REG(0, 78)) & 0xf; + rddqs_adjust = rddqs_adjust * 1000000 / boot_freq; + rddqs_delay_ps = rddqs_slave + rddqs_adjust - + (1000000 / boot_freq / 2); + } else { + rddqs_delay_ps = 3500; + } +} + +/* + * arg0: bit0-7: sr_idle; bit8-15:sr_mc_gate_idle; bit16-31: standby idle + * arg1: bit0-11: pd_idle; bit 16-27: srpd_lite_idle + * arg2: bit0: if odt en + */ +uint32_t dram_set_odt_pd(uint32_t arg0, uint32_t arg1, uint32_t arg2) +{ + struct drv_odt_lp_config *lp_cfg = &rk3399_dram_status.drv_odt_lp_cfg; + uint32_t *low_power = &rk3399_dram_status.low_power_stat; + uint32_t dram_type, ch_count, pd_tmp, sr_tmp, i; + + dram_type = rk3399_dram_status.timing_config.dram_type; + ch_count = rk3399_dram_status.timing_config.ch_cnt; + + lp_cfg->sr_idle = arg0 & 0xff; + lp_cfg->sr_mc_gate_idle = (arg0 >> 8) & 0xff; + lp_cfg->standby_idle = (arg0 >> 16) & 0xffff; + lp_cfg->pd_idle = arg1 & 0xfff; + lp_cfg->srpd_lite_idle = (arg1 >> 16) & 0xfff; + + rk3399_dram_status.timing_config.odt = arg2 & 0x1; + + exit_low_power(); + + *low_power = 0; + + /* pd_idle en */ + if (lp_cfg->pd_idle) + *low_power |= ((1 << 0) | (1 << 8)); + /* sr_idle en srpd_lite_idle */ + if (lp_cfg->sr_idle | lp_cfg->srpd_lite_idle) + *low_power |= ((1 << 1) | (1 << 9)); + /* sr_mc_gate_idle */ + if (lp_cfg->sr_mc_gate_idle) + *low_power |= ((1 << 2) | (1 << 10)); + /* standbyidle */ + if (lp_cfg->standby_idle) { + if (rk3399_dram_status.timing_config.ch_cnt == 2) + *low_power |= ((1 << 3) | (1 << 11)); + else + *low_power |= (1 << 3); + } + + pd_tmp = arg1; + if (dram_type != LPDDR4) + pd_tmp = arg1 & 0xfff; + sr_tmp = arg0 & 0xffff; + for (i = 0; i < ch_count; i++) { + mmio_write_32(CTL_REG(i, 102), pd_tmp); + mmio_clrsetbits_32(CTL_REG(i, 103), 0xffff, sr_tmp); + } + mmio_write_32(CIC_BASE + CIC_IDLE_TH, (arg0 >> 16) & 0xffff); + + return 0; +} + +static void m0_configure_ddr(struct pll_div pll_div, uint32_t ddr_index) +{ + mmio_write_32(M0_PARAM_ADDR + PARAM_DPLL_CON0, FBDIV(pll_div.fbdiv)); + mmio_write_32(M0_PARAM_ADDR + PARAM_DPLL_CON1, + POSTDIV2(pll_div.postdiv2) | POSTDIV1(pll_div.postdiv1) | + REFDIV(pll_div.refdiv)); + + mmio_write_32(M0_PARAM_ADDR + PARAM_DRAM_FREQ, pll_div.mhz); + + mmio_write_32(M0_PARAM_ADDR + PARAM_FREQ_SELECT, ddr_index << 4); + dmbst(); + m0_configure_execute_addr(M0_BINCODE_BASE); +} + +static uint32_t prepare_ddr_timing(uint32_t mhz) +{ + uint32_t index; + struct dram_timing_t dram_timing; + + rk3399_dram_status.timing_config.freq = mhz; + + if (mhz < 300) + rk3399_dram_status.timing_config.dllbp = 1; + else + rk3399_dram_status.timing_config.dllbp = 0; + + if (rk3399_dram_status.timing_config.odt == 1) + gen_rk3399_set_odt(1); + + index = (rk3399_dram_status.current_index + 1) & 0x1; + + /* + * checking if having available gate traiing timing for + * target freq. + */ + dram_get_parameter(&rk3399_dram_status.timing_config, &dram_timing); + gen_rk3399_ctl_params(&rk3399_dram_status.timing_config, + &dram_timing, index); + gen_rk3399_pi_params(&rk3399_dram_status.timing_config, + &dram_timing, index); + gen_rk3399_phy_params(&rk3399_dram_status.timing_config, + &rk3399_dram_status.drv_odt_lp_cfg, + &dram_timing, index); + rk3399_dram_status.index_freq[index] = mhz; + + return index; +} + +uint32_t ddr_set_rate(uint32_t hz) +{ + uint32_t low_power, index, ddr_index; + uint32_t mhz = hz / (1000 * 1000); + + if (mhz == + rk3399_dram_status.index_freq[rk3399_dram_status.current_index]) + return mhz; + + index = to_get_clk_index(mhz); + mhz = dpll_rates_table[index].mhz; + + ddr_index = prepare_ddr_timing(mhz); + gen_rk3399_enable_training(rk3399_dram_status.timing_config.ch_cnt, + mhz); + if (ddr_index > 1) + goto out; + + /* + * Make sure the clock is enabled. The M0 clocks should be on all of the + * time during S0. + */ + m0_configure_ddr(dpll_rates_table[index], ddr_index); + m0_start(); + m0_wait_done(); + m0_stop(); + + if (rk3399_dram_status.timing_config.odt == 0) + gen_rk3399_set_odt(0); + + rk3399_dram_status.current_index = ddr_index; + low_power = rk3399_dram_status.low_power_stat; + resume_low_power(low_power); +out: + gen_rk3399_disable_training(rk3399_dram_status.timing_config.ch_cnt); + return mhz; +} + +uint32_t ddr_round_rate(uint32_t hz) +{ + int index; + uint32_t mhz = hz / (1000 * 1000); + + index = to_get_clk_index(mhz); + + return dpll_rates_table[index].mhz * 1000 * 1000; +} + +void ddr_prepare_for_sys_suspend(void) +{ + uint32_t mhz = + rk3399_dram_status.index_freq[rk3399_dram_status.current_index]; + + /* + * If we're not currently at the boot (assumed highest) frequency, we + * need to change frequencies to configure out current index. + */ + rk3399_suspend_status.freq = mhz; + exit_low_power(); + rk3399_suspend_status.low_power_stat = + rk3399_dram_status.low_power_stat; + rk3399_suspend_status.odt = rk3399_dram_status.timing_config.odt; + rk3399_dram_status.low_power_stat = 0; + rk3399_dram_status.timing_config.odt = 1; + if (mhz != rk3399_dram_status.boot_freq) + ddr_set_rate(rk3399_dram_status.boot_freq * 1000 * 1000); + + /* + * This will configure the other index to be the same frequency as the + * current one. We retrain both indices on resume, so both have to be + * setup for the same frequency. + */ + prepare_ddr_timing(rk3399_dram_status.boot_freq); +} + +void ddr_prepare_for_sys_resume(void) +{ + /* Disable multicast */ + mmio_clrbits_32(PHY_REG(0, 896), 1); + mmio_clrbits_32(PHY_REG(1, 896), 1); + + /* The suspend code changes the current index, so reset it now. */ + rk3399_dram_status.current_index = + (mmio_read_32(CTL_REG(0, 111)) >> 16) & 0x3; + rk3399_dram_status.low_power_stat = + rk3399_suspend_status.low_power_stat; + rk3399_dram_status.timing_config.odt = rk3399_suspend_status.odt; + + /* + * Set the saved frequency from suspend if it's different than the + * current frequency. + */ + if (rk3399_suspend_status.freq != + rk3399_dram_status.index_freq[rk3399_dram_status.current_index]) { + ddr_set_rate(rk3399_suspend_status.freq * 1000 * 1000); + return; + } + + gen_rk3399_set_odt(rk3399_dram_status.timing_config.odt); + resume_low_power(rk3399_dram_status.low_power_stat); +} diff --git a/plat/rockchip/rk3399/drivers/dram/dfs.h b/plat/rockchip/rk3399/drivers/dram/dfs.h new file mode 100644 index 0000000..172b2a7 --- /dev/null +++ b/plat/rockchip/rk3399/drivers/dram/dfs.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef DFS_H +#define DFS_H + +#include <stdint.h> + +struct rk3399_sdram_default_config { + unsigned char bl; + /* 1:auto precharge, 0:never auto precharge */ + unsigned char ap; + /* dram driver strength */ + unsigned char dramds; + /* dram ODT, if odt=0, this parameter invalid */ + unsigned char dramodt; + /* ca ODT, if odt=0, this parameter invalid + * only used by LPDDR4 + */ + unsigned char caodt; + unsigned char burst_ref_cnt; + /* zqcs period, unit(s) */ + unsigned char zqcsi; +}; + +struct drv_odt_lp_config { + uint32_t pd_idle; + uint32_t sr_idle; + uint32_t sr_mc_gate_idle; + uint32_t srpd_lite_idle; + uint32_t standby_idle; + uint32_t odt_en; + + uint32_t dram_side_drv; + uint32_t dram_side_dq_odt; + uint32_t dram_side_ca_odt; +}; + +uint32_t ddr_set_rate(uint32_t hz); +uint32_t ddr_round_rate(uint32_t hz); +uint32_t ddr_get_rate(void); +uint32_t dram_set_odt_pd(uint32_t arg0, uint32_t arg1, uint32_t arg2); +void dram_dfs_init(void); +void ddr_prepare_for_sys_suspend(void); +void ddr_prepare_for_sys_resume(void); + +#endif /* DFS_H */ diff --git a/plat/rockchip/rk3399/drivers/dram/dram.c b/plat/rockchip/rk3399/drivers/dram/dram.c new file mode 100644 index 0000000..42b6294 --- /dev/null +++ b/plat/rockchip/rk3399/drivers/dram/dram.c @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <dram.h> +#include <plat_private.h> +#include <rk3399_def.h> +#include <secure.h> +#include <soc.h> + +__pmusramdata struct rk3399_sdram_params sdram_config; + +void dram_init(void) +{ + uint32_t os_reg2_val, i; + + os_reg2_val = mmio_read_32(PMUGRF_BASE + PMUGRF_OSREG(2)); + sdram_config.dramtype = SYS_REG_DEC_DDRTYPE(os_reg2_val); + sdram_config.num_channels = SYS_REG_DEC_NUM_CH(os_reg2_val); + sdram_config.stride = (mmio_read_32(SGRF_BASE + SGRF_SOC_CON3_7(4)) >> + 10) & 0x1f; + + for (i = 0; i < 2; i++) { + struct rk3399_sdram_channel *ch = &sdram_config.ch[i]; + struct rk3399_msch_timings *noc = &ch->noc_timings; + + if (!(SYS_REG_DEC_CHINFO(os_reg2_val, i))) + continue; + + ch->rank = SYS_REG_DEC_RANK(os_reg2_val, i); + ch->col = SYS_REG_DEC_COL(os_reg2_val, i); + ch->bk = SYS_REG_DEC_BK(os_reg2_val, i); + ch->bw = SYS_REG_DEC_BW(os_reg2_val, i); + ch->dbw = SYS_REG_DEC_DBW(os_reg2_val, i); + ch->row_3_4 = SYS_REG_DEC_ROW_3_4(os_reg2_val, i); + ch->cs0_row = SYS_REG_DEC_CS0_ROW(os_reg2_val, i); + ch->cs1_row = SYS_REG_DEC_CS1_ROW(os_reg2_val, i); + ch->ddrconfig = mmio_read_32(MSCH_BASE(i) + MSCH_DEVICECONF); + + noc->ddrtiminga0.d32 = mmio_read_32(MSCH_BASE(i) + + MSCH_DDRTIMINGA0); + noc->ddrtimingb0.d32 = mmio_read_32(MSCH_BASE(i) + + MSCH_DDRTIMINGB0); + noc->ddrtimingc0.d32 = mmio_read_32(MSCH_BASE(i) + + MSCH_DDRTIMINGC0); + noc->devtodev0.d32 = mmio_read_32(MSCH_BASE(i) + + MSCH_DEVTODEV0); + noc->ddrmode.d32 = mmio_read_32(MSCH_BASE(i) + MSCH_DDRMODE); + noc->agingx0 = mmio_read_32(MSCH_BASE(i) + MSCH_AGINGX0); + } +} diff --git a/plat/rockchip/rk3399/drivers/dram/dram.h b/plat/rockchip/rk3399/drivers/dram/dram.h new file mode 100644 index 0000000..5572b16 --- /dev/null +++ b/plat/rockchip/rk3399/drivers/dram/dram.h @@ -0,0 +1,156 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef DRAM_H +#define DRAM_H + +#include <stdint.h> + +#include <dram_regs.h> +#include <plat_private.h> + +enum { + DDR3 = 3, + LPDDR2 = 5, + LPDDR3 = 6, + LPDDR4 = 7, + UNUSED = 0xff +}; + +struct rk3399_ddr_pctl_regs { + uint32_t denali_ctl[CTL_REG_NUM]; +}; + +struct rk3399_ddr_publ_regs { + /* + * PHY registers from 0 to 90 for slice1. + * These are used to restore slice1-4 on resume. + */ + uint32_t phy0[91]; + /* + * PHY registers from 512 to 895. + * Only registers 0-37 of each 128 register range are used. + */ + uint32_t phy512[3][38]; + uint32_t phy896[63]; +}; + +struct rk3399_ddr_pi_regs { + uint32_t denali_pi[PI_REG_NUM]; +}; +union noc_ddrtiminga0 { + uint32_t d32; + struct { + unsigned acttoact : 6; + unsigned reserved0 : 2; + unsigned rdtomiss : 6; + unsigned reserved1 : 2; + unsigned wrtomiss : 6; + unsigned reserved2 : 2; + unsigned readlatency : 8; + } b; +}; + +union noc_ddrtimingb0 { + uint32_t d32; + struct { + unsigned rdtowr : 5; + unsigned reserved0 : 3; + unsigned wrtord : 5; + unsigned reserved1 : 3; + unsigned rrd : 4; + unsigned reserved2 : 4; + unsigned faw : 6; + unsigned reserved3 : 2; + } b; +}; + +union noc_ddrtimingc0 { + uint32_t d32; + struct { + unsigned burstpenalty : 4; + unsigned reserved0 : 4; + unsigned wrtomwr : 6; + unsigned reserved1 : 18; + } b; +}; + +union noc_devtodev0 { + uint32_t d32; + struct { + unsigned busrdtord : 3; + unsigned reserved0 : 1; + unsigned busrdtowr : 3; + unsigned reserved1 : 1; + unsigned buswrtord : 3; + unsigned reserved2 : 1; + unsigned buswrtowr : 3; + unsigned reserved3 : 17; + } b; +}; + +union noc_ddrmode { + uint32_t d32; + struct { + unsigned autoprecharge : 1; + unsigned bypassfiltering : 1; + unsigned fawbank : 1; + unsigned burstsize : 2; + unsigned mwrsize : 2; + unsigned reserved2 : 1; + unsigned forceorder : 8; + unsigned forceorderstate : 8; + unsigned reserved3 : 8; + } b; +}; + +struct rk3399_msch_timings { + union noc_ddrtiminga0 ddrtiminga0; + union noc_ddrtimingb0 ddrtimingb0; + union noc_ddrtimingc0 ddrtimingc0; + union noc_devtodev0 devtodev0; + union noc_ddrmode ddrmode; + uint32_t agingx0; +}; + +struct rk3399_sdram_channel { + unsigned char rank; + /* col = 0, means this channel is invalid */ + unsigned char col; + /* 3:8bank, 2:4bank */ + unsigned char bk; + /* channel buswidth, 2:32bit, 1:16bit, 0:8bit */ + unsigned char bw; + /* die buswidth, 2:32bit, 1:16bit, 0:8bit */ + unsigned char dbw; + /* row_3_4 = 1: 6Gb or 12Gb die + * row_3_4 = 0: normal die, power of 2 + */ + unsigned char row_3_4; + unsigned char cs0_row; + unsigned char cs1_row; + uint32_t ddrconfig; + struct rk3399_msch_timings noc_timings; +}; + +struct rk3399_sdram_params { + struct rk3399_sdram_channel ch[2]; + uint32_t ddr_freq; + unsigned char dramtype; + unsigned char num_channels; + unsigned char stride; + unsigned char odt; + struct rk3399_ddr_pctl_regs pctl_regs; + struct rk3399_ddr_pi_regs pi_regs; + struct rk3399_ddr_publ_regs phy_regs; + uint32_t rx_cal_dqs[2][4]; +}; + +extern struct rk3399_sdram_params sdram_config; + +void dram_init(void); + +#endif /* DRAM_H */ diff --git a/plat/rockchip/rk3399/drivers/dram/dram_spec_timing.c b/plat/rockchip/rk3399/drivers/dram/dram_spec_timing.c new file mode 100644 index 0000000..3cdb7a2 --- /dev/null +++ b/plat/rockchip/rk3399/drivers/dram/dram_spec_timing.c @@ -0,0 +1,1324 @@ +/* + * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <stdint.h> +#include <string.h> + +#include <lib/utils.h> + +#include <dram.h> + +#include "dram_spec_timing.h" + +static const uint8_t ddr3_cl_cwl[][7] = { + /* + * speed 0~330 331 ~ 400 401 ~ 533 534~666 667~800 801~933 934~1066 + * tCK>3 2.5~3 1.875~2.5 1.5~1.875 1.25~1.5 1.07~1.25 0.938~1.07 + * cl<<4, cwl cl<<4, cwl cl<<4, cwl + */ + /* DDR3_800D (5-5-5) */ + {((5 << 4) | 5), ((5 << 4) | 5), 0, 0, 0, 0, 0}, + /* DDR3_800E (6-6-6) */ + {((5 << 4) | 5), ((6 << 4) | 5), 0, 0, 0, 0, 0}, + /* DDR3_1066E (6-6-6) */ + {((5 << 4) | 5), ((5 << 4) | 5), ((6 << 4) | 6), 0, 0, 0, 0}, + /* DDR3_1066F (7-7-7) */ + {((5 << 4) | 5), ((6 << 4) | 5), ((7 << 4) | 6), 0, 0, 0, 0}, + /* DDR3_1066G (8-8-8) */ + {((5 << 4) | 5), ((6 << 4) | 5), ((8 << 4) | 6), 0, 0, 0, 0}, + /* DDR3_1333F (7-7-7) */ + {((5 << 4) | 5), ((5 << 4) | 5), ((6 << 4) | 6), ((7 << 4) | 7), + 0, 0, 0}, + /* DDR3_1333G (8-8-8) */ + {((5 << 4) | 5), ((5 << 4) | 5), ((7 << 4) | 6), ((8 << 4) | 7), + 0, 0, 0}, + /* DDR3_1333H (9-9-9) */ + {((5 << 4) | 5), ((6 << 4) | 5), ((8 << 4) | 6), ((9 << 4) | 7), + 0, 0, 0}, + /* DDR3_1333J (10-10-10) */ + {((5 << 4) | 5), ((6 << 4) | 5), ((8 << 4) | 6), ((10 << 4) | 7), + 0, 0, 0}, + /* DDR3_1600G (8-8-8) */ + {((5 << 4) | 5), ((5 << 4) | 5), ((6 << 4) | 6), ((7 << 4) | 7), + ((8 << 4) | 8), 0, 0}, + /* DDR3_1600H (9-9-9) */ + {((5 << 4) | 5), ((5 << 4) | 5), ((6 << 4) | 6), ((8 << 4) | 7), + ((9 << 4) | 8), 0, 0}, + /* DDR3_1600J (10-10-10) */ + {((5 << 4) | 5), ((5 << 4) | 5), ((7 << 4) | 6), ((9 << 4) | 7), + ((10 << 4) | 8), 0, 0}, + /* DDR3_1600K (11-11-11) */ + {((5 << 4) | 5), ((6 << 4) | 5), ((8 << 4) | 6), ((10 << 4) | 7), + ((11 << 4) | 8), 0, 0}, + /* DDR3_1866J (10-10-10) */ + {((5 << 4) | 5), ((5 << 4) | 5), ((6 << 4) | 6), ((8 << 4) | 7), + ((9 << 4) | 8), ((11 << 4) | 9), 0}, + /* DDR3_1866K (11-11-11) */ + {((5 << 4) | 5), ((5 << 4) | 5), ((7 << 4) | 6), ((8 << 4) | 7), + ((10 << 4) | 8), ((11 << 4) | 9), 0}, + /* DDR3_1866L (12-12-12) */ + {((6 << 4) | 5), ((6 << 4) | 5), ((7 << 4) | 6), ((9 << 4) | 7), + ((11 << 4) | 8), ((12 << 4) | 9), 0}, + /* DDR3_1866M (13-13-13) */ + {((6 << 4) | 5), ((6 << 4) | 5), ((8 << 4) | 6), ((10 << 4) | 7), + ((11 << 4) | 8), ((13 << 4) | 9), 0}, + /* DDR3_2133K (11-11-11) */ + {((5 << 4) | 5), ((5 << 4) | 5), ((6 << 4) | 6), ((7 << 4) | 7), + ((9 << 4) | 8), ((10 << 4) | 9), ((11 << 4) | 10)}, + /* DDR3_2133L (12-12-12) */ + {((5 << 4) | 5), ((5 << 4) | 5), ((6 << 4) | 6), ((8 << 4) | 7), + ((9 << 4) | 8), ((11 << 4) | 9), ((12 << 4) | 10)}, + /* DDR3_2133M (13-13-13) */ + {((5 << 4) | 5), ((5 << 4) | 5), ((7 << 4) | 6), ((9 << 4) | 7), + ((10 << 4) | 8), ((12 << 4) | 9), ((13 << 4) | 10)}, + /* DDR3_2133N (14-14-14) */ + {((6 << 4) | 5), ((6 << 4) | 5), ((7 << 4) | 6), ((9 << 4) | 7), + ((11 << 4) | 8), ((13 << 4) | 9), ((14 << 4) | 10)}, + /* DDR3_DEFAULT */ + {((6 << 4) | 5), ((6 << 4) | 5), ((8 << 4) | 6), ((10 << 4) | 7), + ((11 << 4) | 8), ((13 << 4) | 9), ((14 << 4) | 10)} +}; + +static const uint16_t ddr3_trc_tfaw[] = { + /* tRC tFAW */ + ((50 << 8) | 50), /* DDR3_800D (5-5-5) */ + ((53 << 8) | 50), /* DDR3_800E (6-6-6) */ + + ((49 << 8) | 50), /* DDR3_1066E (6-6-6) */ + ((51 << 8) | 50), /* DDR3_1066F (7-7-7) */ + ((53 << 8) | 50), /* DDR3_1066G (8-8-8) */ + + ((47 << 8) | 45), /* DDR3_1333F (7-7-7) */ + ((48 << 8) | 45), /* DDR3_1333G (8-8-8) */ + ((50 << 8) | 45), /* DDR3_1333H (9-9-9) */ + ((51 << 8) | 45), /* DDR3_1333J (10-10-10) */ + + ((45 << 8) | 40), /* DDR3_1600G (8-8-8) */ + ((47 << 8) | 40), /* DDR3_1600H (9-9-9)*/ + ((48 << 8) | 40), /* DDR3_1600J (10-10-10) */ + ((49 << 8) | 40), /* DDR3_1600K (11-11-11) */ + + ((45 << 8) | 35), /* DDR3_1866J (10-10-10) */ + ((46 << 8) | 35), /* DDR3_1866K (11-11-11) */ + ((47 << 8) | 35), /* DDR3_1866L (12-12-12) */ + ((48 << 8) | 35), /* DDR3_1866M (13-13-13) */ + + ((44 << 8) | 35), /* DDR3_2133K (11-11-11) */ + ((45 << 8) | 35), /* DDR3_2133L (12-12-12) */ + ((46 << 8) | 35), /* DDR3_2133M (13-13-13) */ + ((47 << 8) | 35), /* DDR3_2133N (14-14-14) */ + + ((53 << 8) | 50) /* DDR3_DEFAULT */ +}; + +static uint32_t get_max_speed_rate(struct timing_related_config *timing_config) +{ + if (timing_config->ch_cnt > 1) + return max(timing_config->dram_info[0].speed_rate, + timing_config->dram_info[1].speed_rate); + else + return timing_config->dram_info[0].speed_rate; +} + +static uint32_t +get_max_die_capability(struct timing_related_config *timing_config) +{ + uint32_t die_cap = 0; + uint32_t cs, ch; + + for (ch = 0; ch < timing_config->ch_cnt; ch++) { + for (cs = 0; cs < timing_config->dram_info[ch].cs_cnt; cs++) { + die_cap = max(die_cap, + timing_config-> + dram_info[ch].per_die_capability[cs]); + } + } + return die_cap; +} + +/* tRSTL, 100ns */ +#define DDR3_TRSTL (100) +/* trsth, 500us */ +#define DDR3_TRSTH (500000) +/* trefi, 7.8us */ +#define DDR3_TREFI_7_8_US (7800) +/* tWR, 15ns */ +#define DDR3_TWR (15) +/* tRTP, max(4 tCK,7.5ns) */ +#define DDR3_TRTP (7) +/* tRRD = max(4nCK, 10ns) */ +#define DDR3_TRRD (10) +/* tCK */ +#define DDR3_TCCD (4) +/*tWTR, max(4 tCK,7.5ns)*/ +#define DDR3_TWTR (7) +/* tCK */ +#define DDR3_TRTW (0) +/* tRAS, 37.5ns(400MHz) 37.5ns(533MHz) */ +#define DDR3_TRAS (37) +/* ns */ +#define DDR3_TRFC_512MBIT (90) +/* ns */ +#define DDR3_TRFC_1GBIT (110) +/* ns */ +#define DDR3_TRFC_2GBIT (160) +/* ns */ +#define DDR3_TRFC_4GBIT (300) +/* ns */ +#define DDR3_TRFC_8GBIT (350) + +/*pd and sr*/ +#define DDR3_TXP (7) /* tXP, max(3 tCK, 7.5ns)( < 933MHz) */ +#define DDR3_TXPDLL (24) /* tXPDLL, max(10 tCK, 24ns) */ +#define DDR3_TDLLK (512) /* tXSR, tDLLK=512 tCK */ +#define DDR3_TCKE_400MHZ (7) /* tCKE, max(3 tCK,7.5ns)(400MHz) */ +#define DDR3_TCKE_533MHZ (6) /* tCKE, max(3 tCK,5.625ns)(533MHz) */ +#define DDR3_TCKSRE (10) /* tCKSRX, max(5 tCK, 10ns) */ + +/*mode register timing*/ +#define DDR3_TMOD (15) /* tMOD, max(12 tCK,15ns) */ +#define DDR3_TMRD (4) /* tMRD, 4 tCK */ + +/* ZQ */ +#define DDR3_TZQINIT (640) /* tZQinit, max(512 tCK, 640ns) */ +#define DDR3_TZQCS (80) /* tZQCS, max(64 tCK, 80ns) */ +#define DDR3_TZQOPER (320) /* tZQoper, max(256 tCK, 320ns) */ + +/* Write leveling */ +#define DDR3_TWLMRD (40) /* tCK */ +#define DDR3_TWLO (9) /* max 7.5ns */ +#define DDR3_TWLDQSEN (25) /* tCK */ + +/* + * Description: depend on input parameter "timing_config", + * and calculate all ddr3 + * spec timing to "pdram_timing" + * parameters: + * input: timing_config + * output: pdram_timing + */ +static void ddr3_get_parameter(struct timing_related_config *timing_config, + struct dram_timing_t *pdram_timing) +{ + uint32_t nmhz = timing_config->freq; + uint32_t ddr_speed_bin = get_max_speed_rate(timing_config); + uint32_t ddr_capability_per_die = get_max_die_capability(timing_config); + uint32_t tmp; + + zeromem((void *)pdram_timing, sizeof(struct dram_timing_t)); + pdram_timing->mhz = nmhz; + pdram_timing->al = 0; + pdram_timing->bl = timing_config->bl; + if (nmhz <= 330) + tmp = 0; + else if (nmhz <= 400) + tmp = 1; + else if (nmhz <= 533) + tmp = 2; + else if (nmhz <= 666) + tmp = 3; + else if (nmhz <= 800) + tmp = 4; + else if (nmhz <= 933) + tmp = 5; + else + tmp = 6; + + /* when dll bypss cl = cwl = 6 */ + if (nmhz < 300) { + pdram_timing->cl = 6; + pdram_timing->cwl = 6; + } else { + pdram_timing->cl = (ddr3_cl_cwl[ddr_speed_bin][tmp] >> 4) & 0xf; + pdram_timing->cwl = ddr3_cl_cwl[ddr_speed_bin][tmp] & 0xf; + } + + switch (timing_config->dramds) { + case 40: + tmp = DDR3_DS_40; + break; + case 34: + default: + tmp = DDR3_DS_34; + break; + } + + if (timing_config->odt) + switch (timing_config->dramodt) { + case 60: + pdram_timing->mr[1] = tmp | DDR3_RTT_NOM_60; + break; + case 40: + pdram_timing->mr[1] = tmp | DDR3_RTT_NOM_40; + break; + case 120: + pdram_timing->mr[1] = tmp | DDR3_RTT_NOM_120; + break; + case 0: + default: + pdram_timing->mr[1] = tmp | DDR3_RTT_NOM_DIS; + break; + } + else + pdram_timing->mr[1] = tmp | DDR3_RTT_NOM_DIS; + + pdram_timing->mr[2] = DDR3_MR2_CWL(pdram_timing->cwl); + pdram_timing->mr[3] = 0; + + pdram_timing->trstl = ((DDR3_TRSTL * nmhz + 999) / 1000); + pdram_timing->trsth = ((DDR3_TRSTH * nmhz + 999) / 1000); + /* tREFI, average periodic refresh interval, 7.8us */ + pdram_timing->trefi = ((DDR3_TREFI_7_8_US * nmhz + 999) / 1000); + /* base timing */ + pdram_timing->trcd = pdram_timing->cl; + pdram_timing->trp = pdram_timing->cl; + pdram_timing->trppb = pdram_timing->cl; + tmp = ((DDR3_TWR * nmhz + 999) / 1000); + pdram_timing->twr = tmp; + pdram_timing->tdal = tmp + pdram_timing->trp; + if (tmp < 9) { + tmp = tmp - 4; + } else { + tmp += (tmp & 0x1) ? 1 : 0; + tmp = tmp >> 1; + } + if (pdram_timing->bl == 4) + pdram_timing->mr[0] = DDR3_BC4 + | DDR3_CL(pdram_timing->cl) + | DDR3_WR(tmp); + else + pdram_timing->mr[0] = DDR3_BL8 + | DDR3_CL(pdram_timing->cl) + | DDR3_WR(tmp); + tmp = ((DDR3_TRTP * nmhz + (nmhz >> 1) + 999) / 1000); + pdram_timing->trtp = max(4, tmp); + pdram_timing->trc = + (((ddr3_trc_tfaw[ddr_speed_bin] >> 8) * nmhz + 999) / 1000); + tmp = ((DDR3_TRRD * nmhz + 999) / 1000); + pdram_timing->trrd = max(4, tmp); + pdram_timing->tccd = DDR3_TCCD; + tmp = ((DDR3_TWTR * nmhz + (nmhz >> 1) + 999) / 1000); + pdram_timing->twtr = max(4, tmp); + pdram_timing->trtw = DDR3_TRTW; + pdram_timing->tras_max = 9 * pdram_timing->trefi; + pdram_timing->tras_min = ((DDR3_TRAS * nmhz + (nmhz >> 1) + 999) + / 1000); + pdram_timing->tfaw = + (((ddr3_trc_tfaw[ddr_speed_bin] & 0x0ff) * nmhz + 999) + / 1000); + /* tRFC, 90ns(512Mb),110ns(1Gb),160ns(2Gb),300ns(4Gb),350ns(8Gb) */ + if (ddr_capability_per_die <= 0x4000000) + tmp = DDR3_TRFC_512MBIT; + else if (ddr_capability_per_die <= 0x8000000) + tmp = DDR3_TRFC_1GBIT; + else if (ddr_capability_per_die <= 0x10000000) + tmp = DDR3_TRFC_2GBIT; + else if (ddr_capability_per_die <= 0x20000000) + tmp = DDR3_TRFC_4GBIT; + else + tmp = DDR3_TRFC_8GBIT; + pdram_timing->trfc = (tmp * nmhz + 999) / 1000; + pdram_timing->txsnr = max(5, (((tmp + 10) * nmhz + 999) / 1000)); + pdram_timing->tdqsck_max = 0; + /*pd and sr*/ + pdram_timing->txsr = DDR3_TDLLK; + tmp = ((DDR3_TXP * nmhz + (nmhz >> 1) + 999) / 1000); + pdram_timing->txp = max(3, tmp); + tmp = ((DDR3_TXPDLL * nmhz + 999) / 1000); + pdram_timing->txpdll = max(10, tmp); + pdram_timing->tdllk = DDR3_TDLLK; + if (nmhz >= 533) + tmp = ((DDR3_TCKE_533MHZ * nmhz + 999) / 1000); + else + tmp = ((DDR3_TCKE_400MHZ * nmhz + (nmhz >> 1) + 999) / 1000); + pdram_timing->tcke = max(3, tmp); + pdram_timing->tckesr = (pdram_timing->tcke + 1); + tmp = ((DDR3_TCKSRE * nmhz + 999) / 1000); + pdram_timing->tcksre = max(5, tmp); + pdram_timing->tcksrx = max(5, tmp); + /*mode register timing*/ + tmp = ((DDR3_TMOD * nmhz + 999) / 1000); + pdram_timing->tmod = max(12, tmp); + pdram_timing->tmrd = DDR3_TMRD; + pdram_timing->tmrr = 0; + /*ODT*/ + pdram_timing->todton = pdram_timing->cwl - 2; + /*ZQ*/ + tmp = ((DDR3_TZQINIT * nmhz + 999) / 1000); + pdram_timing->tzqinit = max(512, tmp); + tmp = ((DDR3_TZQCS * nmhz + 999) / 1000); + pdram_timing->tzqcs = max(64, tmp); + tmp = ((DDR3_TZQOPER * nmhz + 999) / 1000); + pdram_timing->tzqoper = max(256, tmp); + /* write leveling */ + pdram_timing->twlmrd = DDR3_TWLMRD; + pdram_timing->twldqsen = DDR3_TWLDQSEN; + pdram_timing->twlo = ((DDR3_TWLO * nmhz + (nmhz >> 1) + 999) / 1000); +} + +#define LPDDR2_TINIT1 (100) /* ns */ +#define LPDDR2_TINIT2 (5) /* tCK */ +#define LPDDR2_TINIT3 (200000) /* 200us */ +#define LPDDR2_TINIT4 (1000) /* 1us */ +#define LPDDR2_TINIT5 (10000) /* 10us */ +#define LPDDR2_TRSTL (0) /* tCK */ +#define LPDDR2_TRSTH (500000) /* 500us */ +#define LPDDR2_TREFI_3_9_US (3900) /* 3.9us */ +#define LPDDR2_TREFI_7_8_US (7800) /* 7.8us */ + +/* base timing */ +#define LPDDR2_TRCD (24) /* tRCD,15ns(Fast)18ns(Typ)24ns(Slow) */ +#define LPDDR2_TRP_PB (18) /* tRPpb,15ns(Fast)18ns(Typ)24ns(Slow) */ +#define LPDDR2_TRP_AB_8_BANK (21) /* tRPab,18ns(Fast)21ns(Typ)27ns(Slow) */ +#define LPDDR2_TWR (15) /* tWR, max(3tCK,15ns) */ +#define LPDDR2_TRTP (7) /* tRTP, max(2tCK, 7.5ns) */ +#define LPDDR2_TRRD (10) /* tRRD, max(2tCK,10ns) */ +#define LPDDR2_TCCD (2) /* tCK */ +#define LPDDR2_TWTR_GREAT_200MHZ (7) /* ns */ +#define LPDDR2_TWTR_LITTLE_200MHZ (10) /* ns */ +#define LPDDR2_TRTW (0) /* tCK */ +#define LPDDR2_TRAS_MAX (70000) /* 70us */ +#define LPDDR2_TRAS (42) /* tRAS, max(3tCK,42ns) */ +#define LPDDR2_TFAW_GREAT_200MHZ (50) /* max(8tCK,50ns) */ +#define LPDDR2_TFAW_LITTLE_200MHZ (60) /* max(8tCK,60ns) */ +#define LPDDR2_TRFC_8GBIT (210) /* ns */ +#define LPDDR2_TRFC_4GBIT (130) /* ns */ +#define LPDDR2_TDQSCK_MIN (2) /* tDQSCKmin, 2.5ns */ +#define LPDDR2_TDQSCK_MAX (5) /* tDQSCKmax, 5.5ns */ + +/*pd and sr*/ +#define LPDDR2_TXP (7) /* tXP, max(2tCK,7.5ns) */ +#define LPDDR2_TXPDLL (0) +#define LPDDR2_TDLLK (0) /* tCK */ +#define LPDDR2_TCKE (3) /* tCK */ +#define LPDDR2_TCKESR (15) /* tCKESR, max(3tCK,15ns) */ +#define LPDDR2_TCKSRE (1) /* tCK */ +#define LPDDR2_TCKSRX (2) /* tCK */ + +/*mode register timing*/ +#define LPDDR2_TMOD (0) +#define LPDDR2_TMRD (5) /* tMRD, (=tMRW), 5 tCK */ +#define LPDDR2_TMRR (2) /* tCK */ + +/*ZQ*/ +#define LPDDR2_TZQINIT (1000) /* ns */ +#define LPDDR2_TZQCS (90) /* tZQCS, max(6tCK,90ns) */ +#define LPDDR2_TZQCL (360) /* tZQCL, max(6tCK,360ns) */ +#define LPDDR2_TZQRESET (50) /* ZQreset, max(3tCK,50ns) */ + +/* + * Description: depend on input parameter "timing_config", + * and calculate all lpddr2 + * spec timing to "pdram_timing" + * parameters: + * input: timing_config + * output: pdram_timing + */ +static void lpddr2_get_parameter(struct timing_related_config *timing_config, + struct dram_timing_t *pdram_timing) +{ + uint32_t nmhz = timing_config->freq; + uint32_t ddr_capability_per_die = get_max_die_capability(timing_config); + uint32_t tmp, trp_tmp, trppb_tmp, tras_tmp, twr_tmp, bl_tmp; + + zeromem((void *)pdram_timing, sizeof(struct dram_timing_t)); + pdram_timing->mhz = nmhz; + pdram_timing->al = 0; + pdram_timing->bl = timing_config->bl; + + /* 1066 933 800 667 533 400 333 + * RL, 8 7 6 5 4 3 3 + * WL, 4 4 3 2 2 1 1 + */ + if (nmhz <= 266) { + pdram_timing->cl = 4; + pdram_timing->cwl = 2; + pdram_timing->mr[2] = LPDDR2_RL4_WL2; + } else if (nmhz <= 333) { + pdram_timing->cl = 5; + pdram_timing->cwl = 2; + pdram_timing->mr[2] = LPDDR2_RL5_WL2; + } else if (nmhz <= 400) { + pdram_timing->cl = 6; + pdram_timing->cwl = 3; + pdram_timing->mr[2] = LPDDR2_RL6_WL3; + } else if (nmhz <= 466) { + pdram_timing->cl = 7; + pdram_timing->cwl = 4; + pdram_timing->mr[2] = LPDDR2_RL7_WL4; + } else { + pdram_timing->cl = 8; + pdram_timing->cwl = 4; + pdram_timing->mr[2] = LPDDR2_RL8_WL4; + } + switch (timing_config->dramds) { + case 120: + pdram_timing->mr[3] = LPDDR2_DS_120; + break; + case 80: + pdram_timing->mr[3] = LPDDR2_DS_80; + break; + case 60: + pdram_timing->mr[3] = LPDDR2_DS_60; + break; + case 48: + pdram_timing->mr[3] = LPDDR2_DS_48; + break; + case 40: + pdram_timing->mr[3] = LPDDR2_DS_40; + break; + case 34: + default: + pdram_timing->mr[3] = LPDDR2_DS_34; + break; + } + pdram_timing->mr[0] = 0; + + pdram_timing->tinit1 = (LPDDR2_TINIT1 * nmhz + 999) / 1000; + pdram_timing->tinit2 = LPDDR2_TINIT2; + pdram_timing->tinit3 = (LPDDR2_TINIT3 * nmhz + 999) / 1000; + pdram_timing->tinit4 = (LPDDR2_TINIT4 * nmhz + 999) / 1000; + pdram_timing->tinit5 = (LPDDR2_TINIT5 * nmhz + 999) / 1000; + pdram_timing->trstl = LPDDR2_TRSTL; + pdram_timing->trsth = (LPDDR2_TRSTH * nmhz + 999) / 1000; + /* + * tREFI, average periodic refresh interval, + * 15.6us(<256Mb) 7.8us(256Mb-1Gb) 3.9us(2Gb-8Gb) + */ + if (ddr_capability_per_die >= 0x10000000) + pdram_timing->trefi = (LPDDR2_TREFI_3_9_US * nmhz + 999) + / 1000; + else + pdram_timing->trefi = (LPDDR2_TREFI_7_8_US * nmhz + 999) + / 1000; + /* base timing */ + tmp = ((LPDDR2_TRCD * nmhz + 999) / 1000); + pdram_timing->trcd = max(3, tmp); + /* + * tRPpb, max(3tCK, 15ns(Fast) 18ns(Typ) 24ns(Slow), + */ + trppb_tmp = ((LPDDR2_TRP_PB * nmhz + 999) / 1000); + trppb_tmp = max(3, trppb_tmp); + pdram_timing->trppb = trppb_tmp; + /* + * tRPab, max(3tCK, 4-bank:15ns(Fast) 18ns(Typ) 24ns(Slow), + * 8-bank:18ns(Fast) 21ns(Typ) 27ns(Slow)) + */ + trp_tmp = ((LPDDR2_TRP_AB_8_BANK * nmhz + 999) / 1000); + trp_tmp = max(3, trp_tmp); + pdram_timing->trp = trp_tmp; + twr_tmp = ((LPDDR2_TWR * nmhz + 999) / 1000); + twr_tmp = max(3, twr_tmp); + pdram_timing->twr = twr_tmp; + bl_tmp = (pdram_timing->bl == 16) ? LPDDR2_BL16 : + ((pdram_timing->bl == 8) ? LPDDR2_BL8 : LPDDR2_BL4); + pdram_timing->mr[1] = bl_tmp | LPDDR2_N_WR(twr_tmp); + tmp = ((LPDDR2_TRTP * nmhz + (nmhz >> 1) + 999) / 1000); + pdram_timing->trtp = max(2, tmp); + tras_tmp = ((LPDDR2_TRAS * nmhz + 999) / 1000); + tras_tmp = max(3, tras_tmp); + pdram_timing->tras_min = tras_tmp; + pdram_timing->tras_max = ((LPDDR2_TRAS_MAX * nmhz + 999) / 1000); + pdram_timing->trc = (tras_tmp + trp_tmp); + tmp = ((LPDDR2_TRRD * nmhz + 999) / 1000); + pdram_timing->trrd = max(2, tmp); + pdram_timing->tccd = LPDDR2_TCCD; + /* tWTR, max(2tCK, 7.5ns(533-266MHz) 10ns(200-166MHz)) */ + if (nmhz > 200) + tmp = ((LPDDR2_TWTR_GREAT_200MHZ * nmhz + (nmhz >> 1) + + 999) / 1000); + else + tmp = ((LPDDR2_TWTR_LITTLE_200MHZ * nmhz + 999) / 1000); + pdram_timing->twtr = max(2, tmp); + pdram_timing->trtw = LPDDR2_TRTW; + if (nmhz <= 200) + pdram_timing->tfaw = (LPDDR2_TFAW_LITTLE_200MHZ * nmhz + 999) + / 1000; + else + pdram_timing->tfaw = (LPDDR2_TFAW_GREAT_200MHZ * nmhz + 999) + / 1000; + /* tRFC, 90ns(<=512Mb) 130ns(1Gb-4Gb) 210ns(8Gb) */ + if (ddr_capability_per_die >= 0x40000000) { + pdram_timing->trfc = + (LPDDR2_TRFC_8GBIT * nmhz + 999) / 1000; + tmp = (((LPDDR2_TRFC_8GBIT + 10) * nmhz + 999) / 1000); + } else { + pdram_timing->trfc = + (LPDDR2_TRFC_4GBIT * nmhz + 999) / 1000; + tmp = (((LPDDR2_TRFC_4GBIT + 10) * nmhz + 999) / 1000); + } + if (tmp < 2) + tmp = 2; + pdram_timing->txsr = tmp; + pdram_timing->txsnr = tmp; + /* tdqsck use rounded down */ + pdram_timing->tdqsck = ((LPDDR2_TDQSCK_MIN * nmhz + (nmhz >> 1)) + / 1000); + pdram_timing->tdqsck_max = + ((LPDDR2_TDQSCK_MAX * nmhz + (nmhz >> 1) + 999) + / 1000); + /* pd and sr */ + tmp = ((LPDDR2_TXP * nmhz + (nmhz >> 1) + 999) / 1000); + pdram_timing->txp = max(2, tmp); + pdram_timing->txpdll = LPDDR2_TXPDLL; + pdram_timing->tdllk = LPDDR2_TDLLK; + pdram_timing->tcke = LPDDR2_TCKE; + tmp = ((LPDDR2_TCKESR * nmhz + 999) / 1000); + pdram_timing->tckesr = max(3, tmp); + pdram_timing->tcksre = LPDDR2_TCKSRE; + pdram_timing->tcksrx = LPDDR2_TCKSRX; + /* mode register timing */ + pdram_timing->tmod = LPDDR2_TMOD; + pdram_timing->tmrd = LPDDR2_TMRD; + pdram_timing->tmrr = LPDDR2_TMRR; + /* ZQ */ + pdram_timing->tzqinit = (LPDDR2_TZQINIT * nmhz + 999) / 1000; + tmp = ((LPDDR2_TZQCS * nmhz + 999) / 1000); + pdram_timing->tzqcs = max(6, tmp); + tmp = ((LPDDR2_TZQCL * nmhz + 999) / 1000); + pdram_timing->tzqoper = max(6, tmp); + tmp = ((LPDDR2_TZQRESET * nmhz + 999) / 1000); + pdram_timing->tzqreset = max(3, tmp); +} + +#define LPDDR3_TINIT1 (100) /* ns */ +#define LPDDR3_TINIT2 (5) /* tCK */ +#define LPDDR3_TINIT3 (200000) /* 200us */ +#define LPDDR3_TINIT4 (1000) /* 1us */ +#define LPDDR3_TINIT5 (10000) /* 10us */ +#define LPDDR3_TRSTL (0) +#define LPDDR3_TRSTH (0) /* 500us */ +#define LPDDR3_TREFI_3_9_US (3900) /* 3.9us */ + +/* base timging */ +#define LPDDR3_TRCD (18) /* tRCD,15ns(Fast)18ns(Typ)24ns(Slow) */ +#define LPDDR3_TRP_PB (18) /* tRPpb, 15ns(Fast) 18ns(Typ) 24ns(Slow) */ +#define LPDDR3_TRP_AB (21) /* tRPab, 18ns(Fast) 21ns(Typ) 27ns(Slow) */ +#define LPDDR3_TWR (15) /* tWR, max(4tCK,15ns) */ +#define LPDDR3_TRTP (7) /* tRTP, max(4tCK, 7.5ns) */ +#define LPDDR3_TRRD (10) /* tRRD, max(2tCK,10ns) */ +#define LPDDR3_TCCD (4) /* tCK */ +#define LPDDR3_TWTR (7) /* tWTR, max(4tCK, 7.5ns) */ +#define LPDDR3_TRTW (0) /* tCK register min valid value */ +#define LPDDR3_TRAS_MAX (70000) /* 70us */ +#define LPDDR3_TRAS (42) /* tRAS, max(3tCK,42ns) */ +#define LPDDR3_TFAW (50) /* tFAW,max(8tCK, 50ns) */ +#define LPDDR3_TRFC_8GBIT (210) /* tRFC, 130ns(4Gb) 210ns(>4Gb) */ +#define LPDDR3_TRFC_4GBIT (130) /* ns */ +#define LPDDR3_TDQSCK_MIN (2) /* tDQSCKmin,2.5ns */ +#define LPDDR3_TDQSCK_MAX (5) /* tDQSCKmax,5.5ns */ + +/* pd and sr */ +#define LPDDR3_TXP (7) /* tXP, max(3tCK,7.5ns) */ +#define LPDDR3_TXPDLL (0) +#define LPDDR3_TCKE (7) /* tCKE, (max 7.5ns,3 tCK) */ +#define LPDDR3_TCKESR (15) /* tCKESR, max(3tCK,15ns) */ +#define LPDDR3_TCKSRE (2) /* tCKSRE=tCPDED, 2 tCK */ +#define LPDDR3_TCKSRX (2) /* tCKSRX, 2 tCK */ + +/* mode register timing */ +#define LPDDR3_TMOD (0) +#define LPDDR3_TMRD (14) /* tMRD, (=tMRW), max(14ns, 10 tCK) */ +#define LPDDR3_TMRR (4) /* tMRR, 4 tCK */ +#define LPDDR3_TMRRI LPDDR3_TRCD + +/* ODT */ +#define LPDDR3_TODTON (3) /* 3.5ns */ + +/* ZQ */ +#define LPDDR3_TZQINIT (1000) /* 1us */ +#define LPDDR3_TZQCS (90) /* tZQCS, 90ns */ +#define LPDDR3_TZQCL (360) /* 360ns */ +#define LPDDR3_TZQRESET (50) /* ZQreset, max(3tCK,50ns) */ +/* write leveling */ +#define LPDDR3_TWLMRD (40) /* ns */ +#define LPDDR3_TWLO (20) /* ns */ +#define LPDDR3_TWLDQSEN (25) /* ns */ +/* CA training */ +#define LPDDR3_TCACKEL (10) /* tCK */ +#define LPDDR3_TCAENT (10) /* tCK */ +#define LPDDR3_TCAMRD (20) /* tCK */ +#define LPDDR3_TCACKEH (10) /* tCK */ +#define LPDDR3_TCAEXT (10) /* tCK */ +#define LPDDR3_TADR (20) /* ns */ +#define LPDDR3_TMRZ (3) /* ns */ + +/* FSP */ +#define LPDDR3_TFC_LONG (250) /* ns */ + +/* + * Description: depend on input parameter "timing_config", + * and calculate all lpddr3 + * spec timing to "pdram_timing" + * parameters: + * input: timing_config + * output: pdram_timing + */ +static void lpddr3_get_parameter(struct timing_related_config *timing_config, + struct dram_timing_t *pdram_timing) +{ + uint32_t nmhz = timing_config->freq; + uint32_t ddr_capability_per_die = get_max_die_capability(timing_config); + uint32_t tmp, trp_tmp, trppb_tmp, tras_tmp, twr_tmp, bl_tmp; + + zeromem((void *)pdram_timing, sizeof(struct dram_timing_t)); + pdram_timing->mhz = nmhz; + pdram_timing->al = 0; + pdram_timing->bl = timing_config->bl; + + /* + * Only support Write Latency Set A here + * 1066 933 800 733 667 600 533 400 166 + * RL, 16 14 12 11 10 9 8 6 3 + * WL, 8 8 6 6 6 5 4 3 1 + */ + if (nmhz <= 400) { + pdram_timing->cl = 6; + pdram_timing->cwl = 3; + pdram_timing->mr[2] = LPDDR3_RL6_WL3; + } else if (nmhz <= 533) { + pdram_timing->cl = 8; + pdram_timing->cwl = 4; + pdram_timing->mr[2] = LPDDR3_RL8_WL4; + } else if (nmhz <= 600) { + pdram_timing->cl = 9; + pdram_timing->cwl = 5; + pdram_timing->mr[2] = LPDDR3_RL9_WL5; + } else if (nmhz <= 667) { + pdram_timing->cl = 10; + pdram_timing->cwl = 6; + pdram_timing->mr[2] = LPDDR3_RL10_WL6; + } else if (nmhz <= 733) { + pdram_timing->cl = 11; + pdram_timing->cwl = 6; + pdram_timing->mr[2] = LPDDR3_RL11_WL6; + } else if (nmhz <= 800) { + pdram_timing->cl = 12; + pdram_timing->cwl = 6; + pdram_timing->mr[2] = LPDDR3_RL12_WL6; + } else if (nmhz <= 933) { + pdram_timing->cl = 14; + pdram_timing->cwl = 8; + pdram_timing->mr[2] = LPDDR3_RL14_WL8; + } else { + pdram_timing->cl = 16; + pdram_timing->cwl = 8; + pdram_timing->mr[2] = LPDDR3_RL16_WL8; + } + switch (timing_config->dramds) { + case 80: + pdram_timing->mr[3] = LPDDR3_DS_80; + break; + case 60: + pdram_timing->mr[3] = LPDDR3_DS_60; + break; + case 48: + pdram_timing->mr[3] = LPDDR3_DS_48; + break; + case 40: + pdram_timing->mr[3] = LPDDR3_DS_40; + break; + case 3440: + pdram_timing->mr[3] = LPDDR3_DS_34D_40U; + break; + case 4048: + pdram_timing->mr[3] = LPDDR3_DS_40D_48U; + break; + case 3448: + pdram_timing->mr[3] = LPDDR3_DS_34D_48U; + break; + case 34: + default: + pdram_timing->mr[3] = LPDDR3_DS_34; + break; + } + pdram_timing->mr[0] = 0; + if (timing_config->odt) + switch (timing_config->dramodt) { + case 60: + pdram_timing->mr11 = LPDDR3_ODT_60; + break; + case 120: + pdram_timing->mr11 = LPDDR3_ODT_120; + break; + case 240: + default: + pdram_timing->mr11 = LPDDR3_ODT_240; + break; + } + else + pdram_timing->mr11 = LPDDR3_ODT_DIS; + + pdram_timing->tinit1 = (LPDDR3_TINIT1 * nmhz + 999) / 1000; + pdram_timing->tinit2 = LPDDR3_TINIT2; + pdram_timing->tinit3 = (LPDDR3_TINIT3 * nmhz + 999) / 1000; + pdram_timing->tinit4 = (LPDDR3_TINIT4 * nmhz + 999) / 1000; + pdram_timing->tinit5 = (LPDDR3_TINIT5 * nmhz + 999) / 1000; + pdram_timing->trstl = LPDDR3_TRSTL; + pdram_timing->trsth = (LPDDR3_TRSTH * nmhz + 999) / 1000; + /* tREFI, average periodic refresh interval, 3.9us(4Gb-16Gb) */ + pdram_timing->trefi = (LPDDR3_TREFI_3_9_US * nmhz + 999) / 1000; + /* base timing */ + tmp = ((LPDDR3_TRCD * nmhz + 999) / 1000); + pdram_timing->trcd = max(3, tmp); + trppb_tmp = ((LPDDR3_TRP_PB * nmhz + 999) / 1000); + trppb_tmp = max(3, trppb_tmp); + pdram_timing->trppb = trppb_tmp; + trp_tmp = ((LPDDR3_TRP_AB * nmhz + 999) / 1000); + trp_tmp = max(3, trp_tmp); + pdram_timing->trp = trp_tmp; + twr_tmp = ((LPDDR3_TWR * nmhz + 999) / 1000); + twr_tmp = max(4, twr_tmp); + pdram_timing->twr = twr_tmp; + if (twr_tmp <= 6) + twr_tmp = 6; + else if (twr_tmp <= 8) + twr_tmp = 8; + else if (twr_tmp <= 12) + twr_tmp = twr_tmp; + else if (twr_tmp <= 14) + twr_tmp = 14; + else + twr_tmp = 16; + if (twr_tmp > 9) + pdram_timing->mr[2] |= (1 << 4); /*enable nWR > 9*/ + twr_tmp = (twr_tmp > 9) ? (twr_tmp - 10) : (twr_tmp - 2); + bl_tmp = LPDDR3_BL8; + pdram_timing->mr[1] = bl_tmp | LPDDR3_N_WR(twr_tmp); + tmp = ((LPDDR3_TRTP * nmhz + (nmhz >> 1) + 999) / 1000); + pdram_timing->trtp = max(4, tmp); + tras_tmp = ((LPDDR3_TRAS * nmhz + 999) / 1000); + tras_tmp = max(3, tras_tmp); + pdram_timing->tras_min = tras_tmp; + pdram_timing->trc = (tras_tmp + trp_tmp); + tmp = ((LPDDR3_TRRD * nmhz + 999) / 1000); + pdram_timing->trrd = max(2, tmp); + pdram_timing->tccd = LPDDR3_TCCD; + tmp = ((LPDDR3_TWTR * nmhz + (nmhz >> 1) + 999) / 1000); + pdram_timing->twtr = max(4, tmp); + pdram_timing->trtw = ((LPDDR3_TRTW * nmhz + 999) / 1000); + pdram_timing->tras_max = ((LPDDR3_TRAS_MAX * nmhz + 999) / 1000); + tmp = (LPDDR3_TFAW * nmhz + 999) / 1000; + pdram_timing->tfaw = max(8, tmp); + if (ddr_capability_per_die > 0x20000000) { + pdram_timing->trfc = + (LPDDR3_TRFC_8GBIT * nmhz + 999) / 1000; + tmp = (((LPDDR3_TRFC_8GBIT + 10) * nmhz + 999) / 1000); + } else { + pdram_timing->trfc = + (LPDDR3_TRFC_4GBIT * nmhz + 999) / 1000; + tmp = (((LPDDR3_TRFC_4GBIT + 10) * nmhz + 999) / 1000); + } + pdram_timing->txsr = max(2, tmp); + pdram_timing->txsnr = max(2, tmp); + /* tdqsck use rounded down */ + pdram_timing->tdqsck = + ((LPDDR3_TDQSCK_MIN * nmhz + (nmhz >> 1)) + / 1000); + pdram_timing->tdqsck_max = + ((LPDDR3_TDQSCK_MAX * nmhz + (nmhz >> 1) + 999) + / 1000); + /*pd and sr*/ + tmp = ((LPDDR3_TXP * nmhz + (nmhz >> 1) + 999) / 1000); + pdram_timing->txp = max(3, tmp); + pdram_timing->txpdll = LPDDR3_TXPDLL; + tmp = ((LPDDR3_TCKE * nmhz + (nmhz >> 1) + 999) / 1000); + pdram_timing->tcke = max(3, tmp); + tmp = ((LPDDR3_TCKESR * nmhz + 999) / 1000); + pdram_timing->tckesr = max(3, tmp); + pdram_timing->tcksre = LPDDR3_TCKSRE; + pdram_timing->tcksrx = LPDDR3_TCKSRX; + /*mode register timing*/ + pdram_timing->tmod = LPDDR3_TMOD; + tmp = ((LPDDR3_TMRD * nmhz + 999) / 1000); + pdram_timing->tmrd = max(10, tmp); + pdram_timing->tmrr = LPDDR3_TMRR; + tmp = ((LPDDR3_TRCD * nmhz + 999) / 1000); + pdram_timing->tmrri = max(3, tmp); + /* ODT */ + pdram_timing->todton = (LPDDR3_TODTON * nmhz + (nmhz >> 1) + 999) + / 1000; + /* ZQ */ + pdram_timing->tzqinit = (LPDDR3_TZQINIT * nmhz + 999) / 1000; + pdram_timing->tzqcs = + ((LPDDR3_TZQCS * nmhz + 999) / 1000); + pdram_timing->tzqoper = + ((LPDDR3_TZQCL * nmhz + 999) / 1000); + tmp = ((LPDDR3_TZQRESET * nmhz + 999) / 1000); + pdram_timing->tzqreset = max(3, tmp); + /* write leveling */ + pdram_timing->twlmrd = (LPDDR3_TWLMRD * nmhz + 999) / 1000; + pdram_timing->twlo = (LPDDR3_TWLO * nmhz + 999) / 1000; + pdram_timing->twldqsen = (LPDDR3_TWLDQSEN * nmhz + 999) / 1000; + /* CA training */ + pdram_timing->tcackel = LPDDR3_TCACKEL; + pdram_timing->tcaent = LPDDR3_TCAENT; + pdram_timing->tcamrd = LPDDR3_TCAMRD; + pdram_timing->tcackeh = LPDDR3_TCACKEH; + pdram_timing->tcaext = LPDDR3_TCAEXT; + pdram_timing->tadr = (LPDDR3_TADR * nmhz + 999) / 1000; + pdram_timing->tmrz = (LPDDR3_TMRZ * nmhz + 999) / 1000; + pdram_timing->tcacd = pdram_timing->tadr + 2; + + /* FSP */ + pdram_timing->tfc_long = (LPDDR3_TFC_LONG * nmhz + 999) / 1000; +} + +#define LPDDR4_TINIT1 (200000) /* 200us */ +#define LPDDR4_TINIT2 (10) /* 10ns */ +#define LPDDR4_TINIT3 (2000000) /* 2ms */ +#define LPDDR4_TINIT4 (5) /* tCK */ +#define LPDDR4_TINIT5 (2000) /* 2us */ +#define LPDDR4_TRSTL LPDDR4_TINIT1 +#define LPDDR4_TRSTH LPDDR4_TINIT3 +#define LPDDR4_TREFI_3_9_US (3900) /* 3.9us */ + +/* base timging */ +#define LPDDR4_TRCD (18) /* tRCD, max(18ns,4tCK) */ +#define LPDDR4_TRP_PB (18) /* tRPpb, max(18ns, 4tCK) */ +#define LPDDR4_TRP_AB (21) /* tRPab, max(21ns, 4tCK) */ +#define LPDDR4_TRRD (10) /* tRRD, max(4tCK,10ns) */ +#define LPDDR4_TCCD_BL16 (8) /* tCK */ +#define LPDDR4_TCCD_BL32 (16) /* tCK */ +#define LPDDR4_TWTR (10) /* tWTR, max(8tCK, 10ns) */ +#define LPDDR4_TRTW (0) /* tCK register min valid value */ +#define LPDDR4_TRAS_MAX (70000) /* 70us */ +#define LPDDR4_TRAS (42) /* tRAS, max(3tCK,42ns) */ +#define LPDDR4_TFAW (40) /* tFAW,min 40ns) */ +#define LPDDR4_TRFC_12GBIT (280) /* tRFC, 280ns(>=12Gb) */ +#define LPDDR4_TRFC_6GBIT (180) /* 6Gb/8Gb 180ns */ +#define LPDDR4_TRFC_4GBIT (130) /* 4Gb 130ns */ +#define LPDDR4_TDQSCK_MIN (1) /* tDQSCKmin,1.5ns */ +#define LPDDR4_TDQSCK_MAX (3) /* tDQSCKmax,3.5ns */ +#define LPDDR4_TPPD (4) /* tCK */ + +/* pd and sr */ +#define LPDDR4_TXP (7) /* tXP, max(5tCK,7.5ns) */ +#define LPDDR4_TCKE (7) /* tCKE, max(7.5ns,4 tCK) */ +#define LPDDR4_TESCKE (1) /* tESCKE, max(1.75ns, 3tCK) */ +#define LPDDR4_TSR (15) /* tSR, max(15ns, 3tCK) */ +#define LPDDR4_TCMDCKE (1) /* max(1.75ns, 3tCK) */ +#define LPDDR4_TCSCKE (1) /* 1.75ns */ +#define LPDDR4_TCKELCS (5) /* max(5ns, 5tCK) */ +#define LPDDR4_TCSCKEH (1) /* 1.75ns */ +#define LPDDR4_TCKEHCS (7) /* max(7.5ns, 5tCK) */ +#define LPDDR4_TMRWCKEL (14) /* max(14ns, 10tCK) */ +#define LPDDR4_TCKELCMD (7) /* max(7.5ns, 3tCK) */ +#define LPDDR4_TCKEHCMD (7) /* max(7.5ns, 3tCK) */ +#define LPDDR4_TCKELPD (7) /* max(7.5ns, 3tCK) */ +#define LPDDR4_TCKCKEL (7) /* max(7.5ns, 3tCK) */ + +/* mode register timing */ +#define LPDDR4_TMRD (14) /* tMRD, (=tMRW), max(14ns, 10 tCK) */ +#define LPDDR4_TMRR (8) /* tMRR, 8 tCK */ + +/* ODT */ +#define LPDDR4_TODTON (3) /* 3.5ns */ + +/* ZQ */ +#define LPDDR4_TZQCAL (1000) /* 1us */ +#define LPDDR4_TZQLAT (30) /* tZQLAT, max(30ns,8tCK) */ +#define LPDDR4_TZQRESET (50) /* ZQreset, max(3tCK,50ns) */ +#define LPDDR4_TZQCKE (1) /* tZQCKE, max(1.75ns, 3tCK) */ + +/* write leveling */ +#define LPDDR4_TWLMRD (40) /* tCK */ +#define LPDDR4_TWLO (20) /* ns */ +#define LPDDR4_TWLDQSEN (20) /* tCK */ + +/* CA training */ +#define LPDDR4_TCAENT (250) /* ns */ +#define LPDDR4_TADR (20) /* ns */ +#define LPDDR4_TMRZ (1) /* 1.5ns */ +#define LPDDR4_TVREF_LONG (250) /* ns */ +#define LPDDR4_TVREF_SHORT (100) /* ns */ + +/* VRCG */ +#define LPDDR4_TVRCG_ENABLE (200) /* ns */ +#define LPDDR4_TVRCG_DISABLE (100) /* ns */ + +/* FSP */ +#define LPDDR4_TFC_LONG (250) /* ns */ +#define LPDDR4_TCKFSPE (7) /* max(7.5ns, 4tCK) */ +#define LPDDR4_TCKFSPX (7) /* max(7.5ns, 4tCK) */ + +/* + * Description: depend on input parameter "timing_config", + * and calculate all lpddr4 + * spec timing to "pdram_timing" + * parameters: + * input: timing_config + * output: pdram_timing + */ +static void lpddr4_get_parameter(struct timing_related_config *timing_config, + struct dram_timing_t *pdram_timing) +{ + uint32_t nmhz = timing_config->freq; + uint32_t ddr_capability_per_die = get_max_die_capability(timing_config); + uint32_t tmp, trp_tmp, trppb_tmp, tras_tmp; + + zeromem((void *)pdram_timing, sizeof(struct dram_timing_t)); + pdram_timing->mhz = nmhz; + pdram_timing->al = 0; + pdram_timing->bl = timing_config->bl; + + /* + * Only support Write Latency Set A here + * 2133 1866 1600 1333 1066 800 533 266 + * RL, 36 32 28 24 20 14 10 6 + * WL, 18 16 14 12 10 8 6 4 + * nWR, 40 34 30 24 20 16 10 6 + * nRTP,16 14 12 10 8 8 8 8 + */ + tmp = (timing_config->bl == 32) ? 1 : 0; + + /* + * we always use WR preamble = 2tCK + * RD preamble = Static + */ + tmp |= (1 << 2); + if (nmhz <= 266) { + pdram_timing->cl = 6; + pdram_timing->cwl = 4; + pdram_timing->twr = 6; + pdram_timing->trtp = 8; + pdram_timing->mr[2] = LPDDR4_RL6_NRTP8 | LPDDR4_A_WL4; + } else if (nmhz <= 533) { + if (timing_config->rdbi) { + pdram_timing->cl = 12; + pdram_timing->mr[2] = LPDDR4_RL12_NRTP8 | LPDDR4_A_WL6; + } else { + pdram_timing->cl = 10; + pdram_timing->mr[2] = LPDDR4_RL10_NRTP8 | LPDDR4_A_WL6; + } + pdram_timing->cwl = 6; + pdram_timing->twr = 10; + pdram_timing->trtp = 8; + tmp |= (1 << 4); + } else if (nmhz <= 800) { + if (timing_config->rdbi) { + pdram_timing->cl = 16; + pdram_timing->mr[2] = LPDDR4_RL16_NRTP8 | LPDDR4_A_WL8; + } else { + pdram_timing->cl = 14; + pdram_timing->mr[2] = LPDDR4_RL14_NRTP8 | LPDDR4_A_WL8; + } + pdram_timing->cwl = 8; + pdram_timing->twr = 16; + pdram_timing->trtp = 8; + tmp |= (2 << 4); + } else if (nmhz <= 1066) { + if (timing_config->rdbi) { + pdram_timing->cl = 22; + pdram_timing->mr[2] = LPDDR4_RL22_NRTP8 | LPDDR4_A_WL10; + } else { + pdram_timing->cl = 20; + pdram_timing->mr[2] = LPDDR4_RL20_NRTP8 | LPDDR4_A_WL10; + } + pdram_timing->cwl = 10; + pdram_timing->twr = 20; + pdram_timing->trtp = 8; + tmp |= (3 << 4); + } else if (nmhz <= 1333) { + if (timing_config->rdbi) { + pdram_timing->cl = 28; + pdram_timing->mr[2] = LPDDR4_RL28_NRTP10 | + LPDDR4_A_WL12; + } else { + pdram_timing->cl = 24; + pdram_timing->mr[2] = LPDDR4_RL24_NRTP10 | + LPDDR4_A_WL12; + } + pdram_timing->cwl = 12; + pdram_timing->twr = 24; + pdram_timing->trtp = 10; + tmp |= (4 << 4); + } else if (nmhz <= 1600) { + if (timing_config->rdbi) { + pdram_timing->cl = 32; + pdram_timing->mr[2] = LPDDR4_RL32_NRTP12 | + LPDDR4_A_WL14; + } else { + pdram_timing->cl = 28; + pdram_timing->mr[2] = LPDDR4_RL28_NRTP12 | + LPDDR4_A_WL14; + } + pdram_timing->cwl = 14; + pdram_timing->twr = 30; + pdram_timing->trtp = 12; + tmp |= (5 << 4); + } else if (nmhz <= 1866) { + if (timing_config->rdbi) { + pdram_timing->cl = 36; + pdram_timing->mr[2] = LPDDR4_RL36_NRTP14 | + LPDDR4_A_WL16; + } else { + pdram_timing->cl = 32; + pdram_timing->mr[2] = LPDDR4_RL32_NRTP14 | + LPDDR4_A_WL16; + } + pdram_timing->cwl = 16; + pdram_timing->twr = 34; + pdram_timing->trtp = 14; + tmp |= (6 << 4); + } else { + if (timing_config->rdbi) { + pdram_timing->cl = 40; + pdram_timing->mr[2] = LPDDR4_RL40_NRTP16 | + LPDDR4_A_WL18; + } else { + pdram_timing->cl = 36; + pdram_timing->mr[2] = LPDDR4_RL36_NRTP16 | + LPDDR4_A_WL18; + } + pdram_timing->cwl = 18; + pdram_timing->twr = 40; + pdram_timing->trtp = 16; + tmp |= (7 << 4); + } + pdram_timing->mr[1] = tmp; + tmp = (timing_config->rdbi ? LPDDR4_DBI_RD_EN : 0) | + (timing_config->wdbi ? LPDDR4_DBI_WR_EN : 0); + switch (timing_config->dramds) { + case 240: + pdram_timing->mr[3] = LPDDR4_PDDS_240 | tmp; + break; + case 120: + pdram_timing->mr[3] = LPDDR4_PDDS_120 | tmp; + break; + case 80: + pdram_timing->mr[3] = LPDDR4_PDDS_80 | tmp; + break; + case 60: + pdram_timing->mr[3] = LPDDR4_PDDS_60 | tmp; + break; + case 48: + pdram_timing->mr[3] = LPDDR4_PDDS_48 | tmp; + break; + case 40: + default: + pdram_timing->mr[3] = LPDDR4_PDDS_40 | tmp; + break; + } + pdram_timing->mr[0] = 0; + if (timing_config->odt) { + switch (timing_config->dramodt) { + case 240: + tmp = LPDDR4_DQODT_240; + break; + case 120: + tmp = LPDDR4_DQODT_120; + break; + case 80: + tmp = LPDDR4_DQODT_80; + break; + case 60: + tmp = LPDDR4_DQODT_60; + break; + case 48: + tmp = LPDDR4_DQODT_48; + break; + case 40: + default: + tmp = LPDDR4_DQODT_40; + break; + } + + switch (timing_config->caodt) { + case 240: + pdram_timing->mr11 = LPDDR4_CAODT_240 | tmp; + break; + case 120: + pdram_timing->mr11 = LPDDR4_CAODT_120 | tmp; + break; + case 80: + pdram_timing->mr11 = LPDDR4_CAODT_80 | tmp; + break; + case 60: + pdram_timing->mr11 = LPDDR4_CAODT_60 | tmp; + break; + case 48: + pdram_timing->mr11 = LPDDR4_CAODT_48 | tmp; + break; + case 40: + default: + pdram_timing->mr11 = LPDDR4_CAODT_40 | tmp; + break; + } + } else { + pdram_timing->mr11 = LPDDR4_CAODT_DIS | tmp; + } + + pdram_timing->tinit1 = (LPDDR4_TINIT1 * nmhz + 999) / 1000; + pdram_timing->tinit2 = (LPDDR4_TINIT2 * nmhz + 999) / 1000; + pdram_timing->tinit3 = (LPDDR4_TINIT3 * nmhz + 999) / 1000; + pdram_timing->tinit4 = (LPDDR4_TINIT4 * nmhz + 999) / 1000; + pdram_timing->tinit5 = (LPDDR4_TINIT5 * nmhz + 999) / 1000; + pdram_timing->trstl = (LPDDR4_TRSTL * nmhz + 999) / 1000; + pdram_timing->trsth = (LPDDR4_TRSTH * nmhz + 999) / 1000; + /* tREFI, average periodic refresh interval, 3.9us(4Gb-16Gb) */ + pdram_timing->trefi = (LPDDR4_TREFI_3_9_US * nmhz + 999) / 1000; + /* base timing */ + tmp = ((LPDDR4_TRCD * nmhz + 999) / 1000); + pdram_timing->trcd = max(4, tmp); + trppb_tmp = ((LPDDR4_TRP_PB * nmhz + 999) / 1000); + trppb_tmp = max(4, trppb_tmp); + pdram_timing->trppb = trppb_tmp; + trp_tmp = ((LPDDR4_TRP_AB * nmhz + 999) / 1000); + trp_tmp = max(4, trp_tmp); + pdram_timing->trp = trp_tmp; + tras_tmp = ((LPDDR4_TRAS * nmhz + 999) / 1000); + tras_tmp = max(3, tras_tmp); + pdram_timing->tras_min = tras_tmp; + pdram_timing->trc = (tras_tmp + trp_tmp); + tmp = ((LPDDR4_TRRD * nmhz + 999) / 1000); + pdram_timing->trrd = max(4, tmp); + if (timing_config->bl == 32) + pdram_timing->tccd = LPDDR4_TCCD_BL16; + else + pdram_timing->tccd = LPDDR4_TCCD_BL32; + pdram_timing->tccdmw = 4 * pdram_timing->tccd; + tmp = ((LPDDR4_TWTR * nmhz + 999) / 1000); + pdram_timing->twtr = max(8, tmp); + pdram_timing->trtw = ((LPDDR4_TRTW * nmhz + 999) / 1000); + pdram_timing->tras_max = ((LPDDR4_TRAS_MAX * nmhz + 999) / 1000); + pdram_timing->tfaw = (LPDDR4_TFAW * nmhz + 999) / 1000; + if (ddr_capability_per_die > 0x60000000) { + /* >= 12Gb */ + pdram_timing->trfc = + (LPDDR4_TRFC_12GBIT * nmhz + 999) / 1000; + tmp = (((LPDDR4_TRFC_12GBIT + 7) * nmhz + (nmhz >> 1) + + 999) / 1000); + } else if (ddr_capability_per_die > 0x30000000) { + pdram_timing->trfc = + (LPDDR4_TRFC_6GBIT * nmhz + 999) / 1000; + tmp = (((LPDDR4_TRFC_6GBIT + 7) * nmhz + (nmhz >> 1) + + 999) / 1000); + } else { + pdram_timing->trfc = + (LPDDR4_TRFC_4GBIT * nmhz + 999) / 1000; + tmp = (((LPDDR4_TRFC_4GBIT + 7) * nmhz + (nmhz >> 1) + + 999) / 1000); + } + pdram_timing->txsr = max(2, tmp); + pdram_timing->txsnr = max(2, tmp); + /* tdqsck use rounded down */ + pdram_timing->tdqsck = ((LPDDR4_TDQSCK_MIN * nmhz + + (nmhz >> 1)) / 1000); + pdram_timing->tdqsck_max = ((LPDDR4_TDQSCK_MAX * nmhz + + (nmhz >> 1) + 999) / 1000); + pdram_timing->tppd = LPDDR4_TPPD; + /* pd and sr */ + tmp = ((LPDDR4_TXP * nmhz + (nmhz >> 1) + 999) / 1000); + pdram_timing->txp = max(5, tmp); + tmp = ((LPDDR4_TCKE * nmhz + (nmhz >> 1) + 999) / 1000); + pdram_timing->tcke = max(4, tmp); + tmp = ((LPDDR4_TESCKE * nmhz + + ((nmhz * 3) / 4) + + 999) / 1000); + pdram_timing->tescke = max(3, tmp); + tmp = ((LPDDR4_TSR * nmhz + 999) / 1000); + pdram_timing->tsr = max(3, tmp); + tmp = ((LPDDR4_TCMDCKE * nmhz + + ((nmhz * 3) / 4) + + 999) / 1000); + pdram_timing->tcmdcke = max(3, tmp); + pdram_timing->tcscke = ((LPDDR4_TCSCKE * nmhz + + ((nmhz * 3) / 4) + + 999) / 1000); + tmp = ((LPDDR4_TCKELCS * nmhz + 999) / 1000); + pdram_timing->tckelcs = max(5, tmp); + pdram_timing->tcsckeh = ((LPDDR4_TCSCKEH * nmhz + + ((nmhz * 3) / 4) + + 999) / 1000); + tmp = ((LPDDR4_TCKEHCS * nmhz + + (nmhz >> 1) + 999) / 1000); + pdram_timing->tckehcs = max(5, tmp); + tmp = ((LPDDR4_TMRWCKEL * nmhz + 999) / 1000); + pdram_timing->tmrwckel = max(10, tmp); + tmp = ((LPDDR4_TCKELCMD * nmhz + (nmhz >> 1) + + 999) / 1000); + pdram_timing->tckelcmd = max(3, tmp); + tmp = ((LPDDR4_TCKEHCMD * nmhz + (nmhz >> 1) + + 999) / 1000); + pdram_timing->tckehcmd = max(3, tmp); + tmp = ((LPDDR4_TCKELPD * nmhz + (nmhz >> 1) + + 999) / 1000); + pdram_timing->tckelpd = max(3, tmp); + tmp = ((LPDDR4_TCKCKEL * nmhz + (nmhz >> 1) + + 999) / 1000); + pdram_timing->tckckel = max(3, tmp); + /* mode register timing */ + tmp = ((LPDDR4_TMRD * nmhz + 999) / 1000); + pdram_timing->tmrd = max(10, tmp); + pdram_timing->tmrr = LPDDR4_TMRR; + pdram_timing->tmrri = pdram_timing->trcd + 3; + /* ODT */ + pdram_timing->todton = (LPDDR4_TODTON * nmhz + (nmhz >> 1) + 999) + / 1000; + /* ZQ */ + pdram_timing->tzqcal = (LPDDR4_TZQCAL * nmhz + 999) / 1000; + tmp = ((LPDDR4_TZQLAT * nmhz + 999) / 1000); + pdram_timing->tzqlat = max(8, tmp); + tmp = ((LPDDR4_TZQRESET * nmhz + 999) / 1000); + pdram_timing->tzqreset = max(3, tmp); + tmp = ((LPDDR4_TZQCKE * nmhz + + ((nmhz * 3) / 4) + + 999) / 1000); + pdram_timing->tzqcke = max(3, tmp); + /* write leveling */ + pdram_timing->twlmrd = LPDDR4_TWLMRD; + pdram_timing->twlo = (LPDDR4_TWLO * nmhz + 999) / 1000; + pdram_timing->twldqsen = LPDDR4_TWLDQSEN; + /* CA training */ + pdram_timing->tcaent = (LPDDR4_TCAENT * nmhz + 999) / 1000; + pdram_timing->tadr = (LPDDR4_TADR * nmhz + 999) / 1000; + pdram_timing->tmrz = (LPDDR4_TMRZ * nmhz + (nmhz >> 1) + 999) / 1000; + pdram_timing->tvref_long = (LPDDR4_TVREF_LONG * nmhz + 999) / 1000; + pdram_timing->tvref_short = (LPDDR4_TVREF_SHORT * nmhz + 999) / 1000; + /* VRCG */ + pdram_timing->tvrcg_enable = (LPDDR4_TVRCG_ENABLE * nmhz + + 999) / 1000; + pdram_timing->tvrcg_disable = (LPDDR4_TVRCG_DISABLE * nmhz + + 999) / 1000; + /* FSP */ + pdram_timing->tfc_long = (LPDDR4_TFC_LONG * nmhz + 999) / 1000; + tmp = (LPDDR4_TCKFSPE * nmhz + (nmhz >> 1) + 999) / 1000; + pdram_timing->tckfspe = max(4, tmp); + tmp = (LPDDR4_TCKFSPX * nmhz + (nmhz >> 1) + 999) / 1000; + pdram_timing->tckfspx = max(4, tmp); +} + +/* + * Description: depend on input parameter "timing_config", + * and calculate correspond "dram_type" + * spec timing to "pdram_timing" + * parameters: + * input: timing_config + * output: pdram_timing + * NOTE: MR ODT is set, need to disable by controller + */ +void dram_get_parameter(struct timing_related_config *timing_config, + struct dram_timing_t *pdram_timing) +{ + switch (timing_config->dram_type) { + case DDR3: + ddr3_get_parameter(timing_config, pdram_timing); + break; + case LPDDR2: + lpddr2_get_parameter(timing_config, pdram_timing); + break; + case LPDDR3: + lpddr3_get_parameter(timing_config, pdram_timing); + break; + case LPDDR4: + lpddr4_get_parameter(timing_config, pdram_timing); + break; + default: + /* Do nothing in default case */ + break; + } +} diff --git a/plat/rockchip/rk3399/drivers/dram/dram_spec_timing.h b/plat/rockchip/rk3399/drivers/dram/dram_spec_timing.h new file mode 100644 index 0000000..9cda22c --- /dev/null +++ b/plat/rockchip/rk3399/drivers/dram/dram_spec_timing.h @@ -0,0 +1,507 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef DRAM_SPEC_TIMING_H +#define DRAM_SPEC_TIMING_H + +#include <stdint.h> + +enum ddr3_speed_rate { + /* 5-5-5 */ + DDR3_800D = 0, + /* 6-6-6 */ + DDR3_800E = 1, + /* 6-6-6 */ + DDR3_1066E = 2, + /* 7-7-7 */ + DDR3_1066F = 3, + /* 8-8-8 */ + DDR3_1066G = 4, + /* 7-7-7 */ + DDR3_1333F = 5, + /* 8-8-8 */ + DDR3_1333G = 6, + /* 9-9-9 */ + DDR3_1333H = 7, + /* 10-10-10 */ + DDR3_1333J = 8, + /* 8-8-8 */ + DDR3_1600G = 9, + /* 9-9-9 */ + DDR3_1600H = 10, + /* 10-10-10 */ + DDR3_1600J = 11, + /* 11-11-11 */ + DDR3_1600K = 12, + /* 10-10-10 */ + DDR3_1866J = 13, + /* 11-11-11 */ + DDR3_1866K = 14, + /* 12-12-12 */ + DDR3_1866L = 15, + /* 13-13-13 */ + DDR3_1866M = 16, + /* 11-11-11 */ + DDR3_2133K = 17, + /* 12-12-12 */ + DDR3_2133L = 18, + /* 13-13-13 */ + DDR3_2133M = 19, + /* 14-14-14 */ + DDR3_2133N = 20, + DDR3_DEFAULT = 21, +}; + +#define max(a, b) (((a) > (b)) ? (a) : (b)) +#define range(mi, val, ma) (((ma) > (val)) ? (max(mi, val)) : (ma)) + +struct dram_timing_t { + /* unit MHz */ + uint32_t mhz; + /* some timing unit is us */ + uint32_t tinit1; + uint32_t tinit2; + uint32_t tinit3; + uint32_t tinit4; + uint32_t tinit5; + /* reset low, DDR3:200us */ + uint32_t trstl; + /* reset high to CKE high, DDR3:500us */ + uint32_t trsth; + uint32_t trefi; + /* base */ + uint32_t trcd; + /* trp per bank */ + uint32_t trppb; + /* trp all bank */ + uint32_t trp; + uint32_t twr; + uint32_t tdal; + uint32_t trtp; + uint32_t trc; + uint32_t trrd; + uint32_t tccd; + uint32_t twtr; + uint32_t trtw; + uint32_t tras_max; + uint32_t tras_min; + uint32_t tfaw; + uint32_t trfc; + uint32_t tdqsck; + uint32_t tdqsck_max; + /* pd or sr */ + uint32_t txsr; + uint32_t txsnr; + uint32_t txp; + uint32_t txpdll; + uint32_t tdllk; + uint32_t tcke; + uint32_t tckesr; + uint32_t tcksre; + uint32_t tcksrx; + uint32_t tdpd; + /* mode regiter timing */ + uint32_t tmod; + uint32_t tmrd; + uint32_t tmrr; + uint32_t tmrri; + /* ODT */ + uint32_t todton; + /* ZQ */ + uint32_t tzqinit; + uint32_t tzqcs; + uint32_t tzqoper; + uint32_t tzqreset; + /* Write Leveling */ + uint32_t twlmrd; + uint32_t twlo; + uint32_t twldqsen; + /* CA Training */ + uint32_t tcackel; + uint32_t tcaent; + uint32_t tcamrd; + uint32_t tcackeh; + uint32_t tcaext; + uint32_t tadr; + uint32_t tmrz; + uint32_t tcacd; + /* mode register */ + uint32_t mr[4]; + uint32_t mr11; + /* lpddr4 spec */ + uint32_t mr12; + uint32_t mr13; + uint32_t mr14; + uint32_t mr16; + uint32_t mr17; + uint32_t mr20; + uint32_t mr22; + uint32_t tccdmw; + uint32_t tppd; + uint32_t tescke; + uint32_t tsr; + uint32_t tcmdcke; + uint32_t tcscke; + uint32_t tckelcs; + uint32_t tcsckeh; + uint32_t tckehcs; + uint32_t tmrwckel; + uint32_t tzqcal; + uint32_t tzqlat; + uint32_t tzqcke; + uint32_t tvref_long; + uint32_t tvref_short; + uint32_t tvrcg_enable; + uint32_t tvrcg_disable; + uint32_t tfc_long; + uint32_t tckfspe; + uint32_t tckfspx; + uint32_t tckehcmd; + uint32_t tckelcmd; + uint32_t tckelpd; + uint32_t tckckel; + /* other */ + uint32_t al; + uint32_t cl; + uint32_t cwl; + uint32_t bl; +}; + +struct dram_info_t { + /* speed_rate only used when DDR3 */ + enum ddr3_speed_rate speed_rate; + /* 1: use CS0, 2: use CS0 and CS1 */ + uint32_t cs_cnt; + /* give the max per-die capability on each rank/cs */ + uint32_t per_die_capability[2]; +}; + +struct timing_related_config { + struct dram_info_t dram_info[2]; + uint32_t dram_type; + /* MHz */ + uint32_t freq; + uint32_t ch_cnt; + uint32_t bl; + /* 1:auto precharge, 0:never auto precharge */ + uint32_t ap; + /* + * 1:dll bypass, 0:dll normal + * dram and controller dll bypass at the same time + */ + uint32_t dllbp; + /* 1:odt enable, 0:odt disable */ + uint32_t odt; + /* 1:enable, 0:disabe */ + uint32_t rdbi; + uint32_t wdbi; + /* dram driver strength */ + uint32_t dramds; + /* dram ODT, if odt=0, this parameter invalid */ + uint32_t dramodt; + /* + * ca ODT, if odt=0, this parameter invalid + * it only used by LPDDR4 + */ + uint32_t caodt; +}; + +/* mr0 for ddr3 */ +#define DDR3_BL8 (0) +#define DDR3_BC4_8 (1) +#define DDR3_BC4 (2) +#define DDR3_CL(n) (((((n) - 4) & 0x7) << 4)\ + | ((((n) - 4) & 0x8) >> 1)) +#define DDR3_WR(n) (((n) & 0x7) << 9) +#define DDR3_DLL_RESET (1 << 8) +#define DDR3_DLL_DERESET (0 << 8) + +/* mr1 for ddr3 */ +#define DDR3_DLL_ENABLE (0) +#define DDR3_DLL_DISABLE (1) +#define DDR3_MR1_AL(n) (((n) & 0x3) << 3) + +#define DDR3_DS_40 (0) +#define DDR3_DS_34 (1 << 1) +#define DDR3_RTT_NOM_DIS (0) +#define DDR3_RTT_NOM_60 (1 << 2) +#define DDR3_RTT_NOM_120 (1 << 6) +#define DDR3_RTT_NOM_40 ((1 << 2) | (1 << 6)) +#define DDR3_TDQS (1 << 11) + +/* mr2 for ddr3 */ +#define DDR3_MR2_CWL(n) ((((n) - 5) & 0x7) << 3) +#define DDR3_RTT_WR_DIS (0) +#define DDR3_RTT_WR_60 (1 << 9) +#define DDR3_RTT_WR_120 (2 << 9) + +/* + * MR0 (Device Information) + * 0:DAI complete, 1:DAI still in progress + */ +#define LPDDR2_DAI (0x1) +/* 0:S2 or S4 SDRAM, 1:NVM */ +#define LPDDR2_DI (0x1 << 1) +/* 0:DNV not supported, 1:DNV supported */ +#define LPDDR2_DNVI (0x1 << 2) +#define LPDDR2_RZQI (0x3 << 3) + +/* + * 00:RZQ self test not supported, + * 01:ZQ-pin may connect to VDDCA or float + * 10:ZQ-pin may short to GND. + * 11:ZQ-pin self test completed, no error condition detected. + */ + +/* MR1 (Device Feature) */ +#define LPDDR2_BL4 (0x2) +#define LPDDR2_BL8 (0x3) +#define LPDDR2_BL16 (0x4) +#define LPDDR2_N_WR(n) (((n) - 2) << 5) + +/* MR2 (Device Feature 2) */ +#define LPDDR2_RL3_WL1 (0x1) +#define LPDDR2_RL4_WL2 (0x2) +#define LPDDR2_RL5_WL2 (0x3) +#define LPDDR2_RL6_WL3 (0x4) +#define LPDDR2_RL7_WL4 (0x5) +#define LPDDR2_RL8_WL4 (0x6) + +/* MR3 (IO Configuration 1) */ +#define LPDDR2_DS_34 (0x1) +#define LPDDR2_DS_40 (0x2) +#define LPDDR2_DS_48 (0x3) +#define LPDDR2_DS_60 (0x4) +#define LPDDR2_DS_80 (0x6) +/* optional */ +#define LPDDR2_DS_120 (0x7) + +/* MR4 (Device Temperature) */ +#define LPDDR2_TREF_MASK (0x7) +#define LPDDR2_4_TREF (0x1) +#define LPDDR2_2_TREF (0x2) +#define LPDDR2_1_TREF (0x3) +#define LPDDR2_025_TREF (0x5) +#define LPDDR2_025_TREF_DERATE (0x6) + +#define LPDDR2_TUF (0x1 << 7) + +/* MR8 (Basic configuration 4) */ +#define LPDDR2_S4 (0x0) +#define LPDDR2_S2 (0x1) +#define LPDDR2_N (0x2) +/* Unit:MB */ +#define LPDDR2_DENSITY(mr8) (8 << (((mr8) >> 2) & 0xf)) +#define LPDDR2_IO_WIDTH(mr8) (32 >> (((mr8) >> 6) & 0x3)) + +/* MR10 (Calibration) */ +#define LPDDR2_ZQINIT (0xff) +#define LPDDR2_ZQCL (0xab) +#define LPDDR2_ZQCS (0x56) +#define LPDDR2_ZQRESET (0xc3) + +/* MR16 (PASR Bank Mask), S2 SDRAM Only */ +#define LPDDR2_PASR_FULL (0x0) +#define LPDDR2_PASR_1_2 (0x1) +#define LPDDR2_PASR_1_4 (0x2) +#define LPDDR2_PASR_1_8 (0x3) + +/* + * MR0 (Device Information) + * 0:DAI complete, + * 1:DAI still in progress + */ +#define LPDDR3_DAI (0x1) +/* + * 00:RZQ self test not supported, + * 01:ZQ-pin may connect to VDDCA or float + * 10:ZQ-pin may short to GND. + * 11:ZQ-pin self test completed, no error condition detected. + */ +#define LPDDR3_RZQI (0x3 << 3) +/* + * 0:DRAM does not support WL(Set B), + * 1:DRAM support WL(Set B) + */ +#define LPDDR3_WL_SUPOT (1 << 6) +/* + * 0:DRAM does not support RL=3,nWR=3,WL=1; + * 1:DRAM supports RL=3,nWR=3,WL=1 for frequencies <=166 + */ +#define LPDDR3_RL3_SUPOT (1 << 7) + +/* MR1 (Device Feature) */ +#define LPDDR3_BL8 (0x3) +#define LPDDR3_N_WR(n) ((n) << 5) + +/* MR2 (Device Feature 2), WL Set A,default */ +/* <=166MHz,optional*/ +#define LPDDR3_RL3_WL1 (0x1) +/* <=400MHz*/ +#define LPDDR3_RL6_WL3 (0x4) +/* <=533MHz*/ +#define LPDDR3_RL8_WL4 (0x6) +/* <=600MHz*/ +#define LPDDR3_RL9_WL5 (0x7) +/* <=667MHz,default*/ +#define LPDDR3_RL10_WL6 (0x8) +/* <=733MHz*/ +#define LPDDR3_RL11_WL6 (0x9) +/* <=800MHz*/ +#define LPDDR3_RL12_WL6 (0xa) +/* <=933MHz*/ +#define LPDDR3_RL14_WL8 (0xc) +/* <=1066MHz*/ +#define LPDDR3_RL16_WL8 (0xe) + +/* WL Set B, optional */ +/* <=667MHz,default*/ +#define LPDDR3_RL10_WL8 (0x8) +/* <=733MHz*/ +#define LPDDR3_RL11_WL9 (0x9) +/* <=800MHz*/ +#define LPDDR3_RL12_WL9 (0xa) +/* <=933MHz*/ +#define LPDDR3_RL14_WL11 (0xc) +/* <=1066MHz*/ +#define LPDDR3_RL16_WL13 (0xe) + +/* 1:enable nWR programming > 9(default)*/ +#define LPDDR3_N_WRE (1 << 4) +/* 1:Select WL Set B*/ +#define LPDDR3_WL_S (1 << 6) +/* 1:enable*/ +#define LPDDR3_WR_LEVEL (1 << 7) + +/* MR3 (IO Configuration 1) */ +#define LPDDR3_DS_34 (0x1) +#define LPDDR3_DS_40 (0x2) +#define LPDDR3_DS_48 (0x3) +#define LPDDR3_DS_60 (0x4) +#define LPDDR3_DS_80 (0x6) +#define LPDDR3_DS_34D_40U (0x9) +#define LPDDR3_DS_40D_48U (0xa) +#define LPDDR3_DS_34D_48U (0xb) + +/* MR4 (Device Temperature) */ +#define LPDDR3_TREF_MASK (0x7) +/* SDRAM Low temperature operating limit exceeded */ +#define LPDDR3_LT_EXED (0x0) +#define LPDDR3_4_TREF (0x1) +#define LPDDR3_2_TREF (0x2) +#define LPDDR3_1_TREF (0x3) +#define LPDDR3_05_TREF (0x4) +#define LPDDR3_025_TREF (0x5) +#define LPDDR3_025_TREF_DERATE (0x6) +/* SDRAM High temperature operating limit exceeded */ +#define LPDDR3_HT_EXED (0x7) + +/* 1:value has changed since last read of MR4 */ +#define LPDDR3_TUF (0x1 << 7) + +/* MR8 (Basic configuration 4) */ +#define LPDDR3_S8 (0x3) +#define LPDDR3_DENSITY(mr8) (8 << (((mr8) >> 2) & 0xf)) +#define LPDDR3_IO_WIDTH(mr8) (32 >> (((mr8) >> 6) & 0x3)) + +/* MR10 (Calibration) */ +#define LPDDR3_ZQINIT (0xff) +#define LPDDR3_ZQCL (0xab) +#define LPDDR3_ZQCS (0x56) +#define LPDDR3_ZQRESET (0xc3) + +/* MR11 (ODT Control) */ +#define LPDDR3_ODT_60 (1) +#define LPDDR3_ODT_120 (2) +#define LPDDR3_ODT_240 (3) +#define LPDDR3_ODT_DIS (0) + +/* MR2 (Device Feature 2) */ +/* RL & nRTP for DBI-RD Disabled */ +#define LPDDR4_RL6_NRTP8 (0x0) +#define LPDDR4_RL10_NRTP8 (0x1) +#define LPDDR4_RL14_NRTP8 (0x2) +#define LPDDR4_RL20_NRTP8 (0x3) +#define LPDDR4_RL24_NRTP10 (0x4) +#define LPDDR4_RL28_NRTP12 (0x5) +#define LPDDR4_RL32_NRTP14 (0x6) +#define LPDDR4_RL36_NRTP16 (0x7) +/* RL & nRTP for DBI-RD Disabled */ +#define LPDDR4_RL12_NRTP8 (0x1) +#define LPDDR4_RL16_NRTP8 (0x2) +#define LPDDR4_RL22_NRTP8 (0x3) +#define LPDDR4_RL28_NRTP10 (0x4) +#define LPDDR4_RL32_NRTP12 (0x5) +#define LPDDR4_RL36_NRTP14 (0x6) +#define LPDDR4_RL40_NRTP16 (0x7) +/* WL Set A,default */ +#define LPDDR4_A_WL4 (0x0) +#define LPDDR4_A_WL6 (0x1) +#define LPDDR4_A_WL8 (0x2) +#define LPDDR4_A_WL10 (0x3) +#define LPDDR4_A_WL12 (0x4) +#define LPDDR4_A_WL14 (0x5) +#define LPDDR4_A_WL16 (0x6) +#define LPDDR4_A_WL18 (0x7) +/* WL Set B, optional */ +#define LPDDR4_B_WL4 (0x0 << 3) +#define LPDDR4_B_WL8 (0x1 << 3) +#define LPDDR4_B_WL12 (0x2 << 3) +#define LPDDR4_B_WL18 (0x3 << 3) +#define LPDDR4_B_WL22 (0x4 << 3) +#define LPDDR4_B_WL26 (0x5 << 3) +#define LPDDR4_B_WL30 (0x6 << 3) +#define LPDDR4_B_WL34 (0x7 << 3) +/* 1:Select WL Set B*/ +#define LPDDR4_WL_B (1 << 6) +/* 1:enable*/ +#define LPDDR4_WR_LEVEL (1 << 7) + +/* MR3 */ +#define LPDDR4_VDDQ_2_5 (0) +#define LPDDR4_VDDQ_3 (1) +#define LPDDR4_WRPST_0_5_TCK (0 << 1) +#define LPDDR4_WRPST_1_5_TCK (1 << 1) +#define LPDDR4_PPR_EN (1 << 2) +/* PDDS */ +#define LPDDR4_PDDS_240 (0x1 << 3) +#define LPDDR4_PDDS_120 (0x2 << 3) +#define LPDDR4_PDDS_80 (0x3 << 3) +#define LPDDR4_PDDS_60 (0x4 << 3) +#define LPDDR4_PDDS_48 (0x5 << 3) +#define LPDDR4_PDDS_40 (0x6 << 3) +#define LPDDR4_DBI_RD_EN (1 << 6) +#define LPDDR4_DBI_WR_EN (1 << 7) + +/* MR11 (ODT Control) */ +#define LPDDR4_DQODT_240 (1) +#define LPDDR4_DQODT_120 (2) +#define LPDDR4_DQODT_80 (3) +#define LPDDR4_DQODT_60 (4) +#define LPDDR4_DQODT_48 (5) +#define LPDDR4_DQODT_40 (6) +#define LPDDR4_DQODT_DIS (0) +#define LPDDR4_CAODT_240 (1 << 4) +#define LPDDR4_CAODT_120 (2 << 4) +#define LPDDR4_CAODT_80 (3 << 4) +#define LPDDR4_CAODT_60 (4 << 4) +#define LPDDR4_CAODT_48 (5 << 4) +#define LPDDR4_CAODT_40 (6 << 4) +#define LPDDR4_CAODT_DIS (0 << 4) + +/* + * Description: depend on input parameter "timing_config", + * and calculate correspond "dram_type" + * spec timing to "pdram_timing" + * parameters: + * input: timing_config + * output: pdram_timing + * NOTE: MR ODT is set, need to disable by controller + */ +void dram_get_parameter(struct timing_related_config *timing_config, + struct dram_timing_t *pdram_timing); + +#endif /* DRAM_SPEC_TIMING_H */ diff --git a/plat/rockchip/rk3399/drivers/dram/suspend.c b/plat/rockchip/rk3399/drivers/dram/suspend.c new file mode 100644 index 0000000..a8b1c32 --- /dev/null +++ b/plat/rockchip/rk3399/drivers/dram/suspend.c @@ -0,0 +1,852 @@ +/* + * Copyright (c) 2016-2021, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <platform_def.h> + +#include <arch_helpers.h> +#include <common/debug.h> + +#include <dram.h> +#include <plat_private.h> +#include <pmu.h> +#include <pmu_bits.h> +#include <pmu_regs.h> +#include <rk3399_def.h> +#include <secure.h> +#include <soc.h> +#include <suspend.h> + +#define PMUGRF_OS_REG0 0x300 +#define PMUGRF_OS_REG1 0x304 +#define PMUGRF_OS_REG2 0x308 +#define PMUGRF_OS_REG3 0x30c + +#define CRU_SFTRST_DDR_CTRL(ch, n) ((0x1 << (8 + 16 + (ch) * 4)) | \ + ((n) << (8 + (ch) * 4))) +#define CRU_SFTRST_DDR_PHY(ch, n) ((0x1 << (9 + 16 + (ch) * 4)) | \ + ((n) << (9 + (ch) * 4))) + +#define FBDIV_ENC(n) ((n) << 16) +#define FBDIV_DEC(n) (((n) >> 16) & 0xfff) +#define POSTDIV2_ENC(n) ((n) << 12) +#define POSTDIV2_DEC(n) (((n) >> 12) & 0x7) +#define POSTDIV1_ENC(n) ((n) << 8) +#define POSTDIV1_DEC(n) (((n) >> 8) & 0x7) +#define REFDIV_ENC(n) (n) +#define REFDIV_DEC(n) ((n) & 0x3f) + +/* PMU CRU */ +#define PMUCRU_RSTNHOLD_CON0 0x120 +#define PMUCRU_RSTNHOLD_CON1 0x124 + +#define PRESET_GPIO0_HOLD(n) (((n) << 7) | WMSK_BIT(7)) +#define PRESET_GPIO1_HOLD(n) (((n) << 8) | WMSK_BIT(8)) + +#define SYS_COUNTER_FREQ_IN_MHZ (SYS_COUNTER_FREQ_IN_TICKS / 1000000) + +__pmusramdata uint32_t dpll_data[PLL_CON_COUNT]; +__pmusramdata uint32_t cru_clksel_con6; +__pmusramdata uint8_t pmu_enable_watchdog0; + +/* + * Copy @num registers from @src to @dst + */ +static __pmusramfunc void sram_regcpy(uintptr_t dst, uintptr_t src, + uint32_t num) +{ + while (num--) { + mmio_write_32(dst, mmio_read_32(src)); + dst += sizeof(uint32_t); + src += sizeof(uint32_t); + } +} + +/* + * Copy @num registers from @src to @dst + * This is intentionally a copy of the sram_regcpy function. PMUSRAM functions + * cannot be called from code running in DRAM. + */ +static void dram_regcpy(uintptr_t dst, uintptr_t src, uint32_t num) +{ + while (num--) { + mmio_write_32(dst, mmio_read_32(src)); + dst += sizeof(uint32_t); + src += sizeof(uint32_t); + } +} + +static __pmusramfunc uint32_t sram_get_timer_value(void) +{ + /* + * Generic delay timer implementation expects the timer to be a down + * counter. We apply bitwise NOT operator to the tick values returned + * by read_cntpct_el0() to simulate the down counter. + */ + return (uint32_t)(~read_cntpct_el0()); +} + +static __pmusramfunc void sram_udelay(uint32_t usec) +{ + uint32_t start, cnt, delta, total_ticks; + + /* counter is decreasing */ + start = sram_get_timer_value(); + total_ticks = usec * SYS_COUNTER_FREQ_IN_MHZ; + do { + cnt = sram_get_timer_value(); + if (cnt > start) { + delta = UINT32_MAX - cnt; + delta += start; + } else + delta = start - cnt; + } while (delta <= total_ticks); +} + +static __pmusramfunc void configure_sgrf(void) +{ + /* + * SGRF_DDR_RGN_DPLL_CLK and SGRF_DDR_RGN_RTC_CLK: + * IC ECO bug, need to set this register. + * + * SGRF_DDR_RGN_BYPS: + * After the PD_CENTER suspend/resume, the DDR region + * related registers in the SGRF will be reset, we + * need to re-initialize them. + */ + mmio_write_32(SGRF_BASE + SGRF_DDRRGN_CON0_16(16), + SGRF_DDR_RGN_DPLL_CLK | + SGRF_DDR_RGN_RTC_CLK | + SGRF_DDR_RGN_BYPS); +} + +static __pmusramfunc void rkclk_ddr_reset(uint32_t channel, uint32_t ctl, + uint32_t phy) +{ + channel &= 0x1; + ctl &= 0x1; + phy &= 0x1; + mmio_write_32(CRU_BASE + CRU_SOFTRST_CON(4), + CRU_SFTRST_DDR_CTRL(channel, ctl) | + CRU_SFTRST_DDR_PHY(channel, phy)); +} + +static __pmusramfunc void phy_pctrl_reset(uint32_t ch) +{ + rkclk_ddr_reset(ch, 1, 1); + sram_udelay(10); + rkclk_ddr_reset(ch, 1, 0); + sram_udelay(10); + rkclk_ddr_reset(ch, 0, 0); + sram_udelay(10); +} + +static __pmusramfunc void set_cs_training_index(uint32_t ch, uint32_t rank) +{ + uint32_t byte; + + /* PHY_8/136/264/392 phy_per_cs_training_index_X 1bit offset_24 */ + for (byte = 0; byte < 4; byte++) + mmio_clrsetbits_32(PHY_REG(ch, 8 + (128 * byte)), 0x1 << 24, + rank << 24); +} + +static __pmusramfunc void select_per_cs_training_index(uint32_t ch, + uint32_t rank) +{ + /* PHY_84 PHY_PER_CS_TRAINING_EN_0 1bit offset_16 */ + if ((mmio_read_32(PHY_REG(ch, 84)) >> 16) & 1) + set_cs_training_index(ch, rank); +} + +static __pmusramfunc void override_write_leveling_value(uint32_t ch) +{ + uint32_t byte; + + for (byte = 0; byte < 4; byte++) { + /* + * PHY_8/136/264/392 + * phy_per_cs_training_multicast_en_X 1bit offset_16 + */ + mmio_clrsetbits_32(PHY_REG(ch, 8 + (128 * byte)), 0x1 << 16, + 1 << 16); + mmio_clrsetbits_32(PHY_REG(ch, 63 + (128 * byte)), + 0xffffu << 16, + 0x200 << 16); + } + + /* CTL_200 ctrlupd_req 1bit offset_8 */ + mmio_clrsetbits_32(CTL_REG(ch, 200), 0x1 << 8, 0x1 << 8); +} + +static __pmusramfunc int data_training(uint32_t ch, + struct rk3399_sdram_params *sdram_params, + uint32_t training_flag) +{ + uint32_t obs_0, obs_1, obs_2, obs_3, obs_err = 0; + uint32_t rank = sdram_params->ch[ch].rank; + uint32_t rank_mask; + uint32_t i, tmp; + + if (sdram_params->dramtype == LPDDR4) + rank_mask = (rank == 1) ? 0x5 : 0xf; + else + rank_mask = (rank == 1) ? 0x1 : 0x3; + + /* PHY_927 PHY_PAD_DQS_DRIVE RPULL offset_22 */ + mmio_setbits_32(PHY_REG(ch, 927), (1 << 22)); + + if (training_flag == PI_FULL_TRAINING) { + if (sdram_params->dramtype == LPDDR4) { + training_flag = PI_WRITE_LEVELING | + PI_READ_GATE_TRAINING | + PI_READ_LEVELING | + PI_WDQ_LEVELING; + } else if (sdram_params->dramtype == LPDDR3) { + training_flag = PI_CA_TRAINING | PI_WRITE_LEVELING | + PI_READ_GATE_TRAINING; + } else if (sdram_params->dramtype == DDR3) { + training_flag = PI_WRITE_LEVELING | + PI_READ_GATE_TRAINING | + PI_READ_LEVELING; + } + } + + /* ca training(LPDDR4,LPDDR3 support) */ + if ((training_flag & PI_CA_TRAINING) == PI_CA_TRAINING) { + for (i = 0; i < 4; i++) { + if (!(rank_mask & (1 << i))) + continue; + + select_per_cs_training_index(ch, i); + /* PI_100 PI_CALVL_EN:RW:8:2 */ + mmio_clrsetbits_32(PI_REG(ch, 100), 0x3 << 8, 0x2 << 8); + + /* PI_92 PI_CALVL_REQ:WR:16:1,PI_CALVL_CS:RW:24:2 */ + mmio_clrsetbits_32(PI_REG(ch, 92), + (0x1 << 16) | (0x3 << 24), + (0x1 << 16) | (i << 24)); + while (1) { + /* PI_174 PI_INT_STATUS:RD:8:18 */ + tmp = mmio_read_32(PI_REG(ch, 174)) >> 8; + + /* + * check status obs + * PHY_532/660/788 phy_adr_calvl_obs1_:0:32 + */ + obs_0 = mmio_read_32(PHY_REG(ch, 532)); + obs_1 = mmio_read_32(PHY_REG(ch, 660)); + obs_2 = mmio_read_32(PHY_REG(ch, 788)); + if (((obs_0 >> 30) & 0x3) || + ((obs_1 >> 30) & 0x3) || + ((obs_2 >> 30) & 0x3)) + obs_err = 1; + if ((((tmp >> 11) & 0x1) == 0x1) && + (((tmp >> 13) & 0x1) == 0x1) && + (((tmp >> 5) & 0x1) == 0x0) && + (obs_err == 0)) + break; + else if ((((tmp >> 5) & 0x1) == 0x1) || + (obs_err == 1)) + return -1; + } + /* clear interrupt,PI_175 PI_INT_ACK:WR:0:17 */ + mmio_write_32(PI_REG(ch, 175), 0x00003f7c); + } + mmio_clrbits_32(PI_REG(ch, 100), 0x3 << 8); + } + + /* write leveling(LPDDR4,LPDDR3,DDR3 support) */ + if ((training_flag & PI_WRITE_LEVELING) == PI_WRITE_LEVELING) { + for (i = 0; i < rank; i++) { + select_per_cs_training_index(ch, i); + /* PI_60 PI_WRLVL_EN:RW:8:2 */ + mmio_clrsetbits_32(PI_REG(ch, 60), 0x3 << 8, 0x2 << 8); + /* PI_59 PI_WRLVL_REQ:WR:8:1,PI_WRLVL_CS:RW:16:2 */ + mmio_clrsetbits_32(PI_REG(ch, 59), + (0x1 << 8) | (0x3 << 16), + (0x1 << 8) | (i << 16)); + + while (1) { + /* PI_174 PI_INT_STATUS:RD:8:18 */ + tmp = mmio_read_32(PI_REG(ch, 174)) >> 8; + + /* + * check status obs, if error maybe can not + * get leveling done PHY_40/168/296/424 + * phy_wrlvl_status_obs_X:0:13 + */ + obs_0 = mmio_read_32(PHY_REG(ch, 40)); + obs_1 = mmio_read_32(PHY_REG(ch, 168)); + obs_2 = mmio_read_32(PHY_REG(ch, 296)); + obs_3 = mmio_read_32(PHY_REG(ch, 424)); + if (((obs_0 >> 12) & 0x1) || + ((obs_1 >> 12) & 0x1) || + ((obs_2 >> 12) & 0x1) || + ((obs_3 >> 12) & 0x1)) + obs_err = 1; + if ((((tmp >> 10) & 0x1) == 0x1) && + (((tmp >> 13) & 0x1) == 0x1) && + (((tmp >> 4) & 0x1) == 0x0) && + (obs_err == 0)) + break; + else if ((((tmp >> 4) & 0x1) == 0x1) || + (obs_err == 1)) + return -1; + } + + /* clear interrupt,PI_175 PI_INT_ACK:WR:0:17 */ + mmio_write_32(PI_REG(ch, 175), 0x00003f7c); + } + override_write_leveling_value(ch); + mmio_clrbits_32(PI_REG(ch, 60), 0x3 << 8); + } + + /* read gate training(LPDDR4,LPDDR3,DDR3 support) */ + if ((training_flag & PI_READ_GATE_TRAINING) == PI_READ_GATE_TRAINING) { + for (i = 0; i < rank; i++) { + select_per_cs_training_index(ch, i); + /* PI_80 PI_RDLVL_GATE_EN:RW:24:2 */ + mmio_clrsetbits_32(PI_REG(ch, 80), 0x3 << 24, + 0x2 << 24); + /* + * PI_74 PI_RDLVL_GATE_REQ:WR:16:1 + * PI_RDLVL_CS:RW:24:2 + */ + mmio_clrsetbits_32(PI_REG(ch, 74), + (0x1 << 16) | (0x3 << 24), + (0x1 << 16) | (i << 24)); + + while (1) { + /* PI_174 PI_INT_STATUS:RD:8:18 */ + tmp = mmio_read_32(PI_REG(ch, 174)) >> 8; + + /* + * check status obs + * PHY_43/171/299/427 + * PHY_GTLVL_STATUS_OBS_x:16:8 + */ + obs_0 = mmio_read_32(PHY_REG(ch, 43)); + obs_1 = mmio_read_32(PHY_REG(ch, 171)); + obs_2 = mmio_read_32(PHY_REG(ch, 299)); + obs_3 = mmio_read_32(PHY_REG(ch, 427)); + if (((obs_0 >> (16 + 6)) & 0x3) || + ((obs_1 >> (16 + 6)) & 0x3) || + ((obs_2 >> (16 + 6)) & 0x3) || + ((obs_3 >> (16 + 6)) & 0x3)) + obs_err = 1; + if ((((tmp >> 9) & 0x1) == 0x1) && + (((tmp >> 13) & 0x1) == 0x1) && + (((tmp >> 3) & 0x1) == 0x0) && + (obs_err == 0)) + break; + else if ((((tmp >> 3) & 0x1) == 0x1) || + (obs_err == 1)) + return -1; + } + /* clear interrupt,PI_175 PI_INT_ACK:WR:0:17 */ + mmio_write_32(PI_REG(ch, 175), 0x00003f7c); + } + mmio_clrbits_32(PI_REG(ch, 80), 0x3 << 24); + } + + /* read leveling(LPDDR4,LPDDR3,DDR3 support) */ + if ((training_flag & PI_READ_LEVELING) == PI_READ_LEVELING) { + for (i = 0; i < rank; i++) { + select_per_cs_training_index(ch, i); + /* PI_80 PI_RDLVL_EN:RW:16:2 */ + mmio_clrsetbits_32(PI_REG(ch, 80), 0x3 << 16, + 0x2 << 16); + /* PI_74 PI_RDLVL_REQ:WR:8:1,PI_RDLVL_CS:RW:24:2 */ + mmio_clrsetbits_32(PI_REG(ch, 74), + (0x1 << 8) | (0x3 << 24), + (0x1 << 8) | (i << 24)); + while (1) { + /* PI_174 PI_INT_STATUS:RD:8:18 */ + tmp = mmio_read_32(PI_REG(ch, 174)) >> 8; + + /* + * make sure status obs not report error bit + * PHY_46/174/302/430 + * phy_rdlvl_status_obs_X:16:8 + */ + if ((((tmp >> 8) & 0x1) == 0x1) && + (((tmp >> 13) & 0x1) == 0x1) && + (((tmp >> 2) & 0x1) == 0x0)) + break; + else if (((tmp >> 2) & 0x1) == 0x1) + return -1; + } + /* clear interrupt,PI_175 PI_INT_ACK:WR:0:17 */ + mmio_write_32(PI_REG(ch, 175), 0x00003f7c); + } + mmio_clrbits_32(PI_REG(ch, 80), 0x3 << 16); + } + + /* wdq leveling(LPDDR4 support) */ + if ((training_flag & PI_WDQ_LEVELING) == PI_WDQ_LEVELING) { + for (i = 0; i < 4; i++) { + if (!(rank_mask & (1 << i))) + continue; + + select_per_cs_training_index(ch, i); + /* + * disable PI_WDQLVL_VREF_EN before wdq leveling? + * PI_181 PI_WDQLVL_VREF_EN:RW:8:1 + */ + mmio_clrbits_32(PI_REG(ch, 181), 0x1 << 8); + /* PI_124 PI_WDQLVL_EN:RW:16:2 */ + mmio_clrsetbits_32(PI_REG(ch, 124), 0x3 << 16, + 0x2 << 16); + /* PI_121 PI_WDQLVL_REQ:WR:8:1,PI_WDQLVL_CS:RW:16:2 */ + mmio_clrsetbits_32(PI_REG(ch, 121), + (0x1 << 8) | (0x3 << 16), + (0x1 << 8) | (i << 16)); + while (1) { + /* PI_174 PI_INT_STATUS:RD:8:18 */ + tmp = mmio_read_32(PI_REG(ch, 174)) >> 8; + if ((((tmp >> 12) & 0x1) == 0x1) && + (((tmp >> 13) & 0x1) == 0x1) && + (((tmp >> 6) & 0x1) == 0x0)) + break; + else if (((tmp >> 6) & 0x1) == 0x1) + return -1; + } + /* clear interrupt,PI_175 PI_INT_ACK:WR:0:17 */ + mmio_write_32(PI_REG(ch, 175), 0x00003f7c); + } + mmio_clrbits_32(PI_REG(ch, 124), 0x3 << 16); + } + + /* PHY_927 PHY_PAD_DQS_DRIVE RPULL offset_22 */ + mmio_clrbits_32(PHY_REG(ch, 927), (1 << 22)); + + return 0; +} + +static __pmusramfunc void set_ddrconfig( + struct rk3399_sdram_params *sdram_params, + unsigned char channel, uint32_t ddrconfig) +{ + /* only need to set ddrconfig */ + struct rk3399_sdram_channel *ch = &sdram_params->ch[channel]; + unsigned int cs0_cap = 0; + unsigned int cs1_cap = 0; + + cs0_cap = (1 << (ch->cs0_row + ch->col + ch->bk + ch->bw - 20)); + if (ch->rank > 1) + cs1_cap = cs0_cap >> (ch->cs0_row - ch->cs1_row); + if (ch->row_3_4) { + cs0_cap = cs0_cap * 3 / 4; + cs1_cap = cs1_cap * 3 / 4; + } + + mmio_write_32(MSCH_BASE(channel) + MSCH_DEVICECONF, + ddrconfig | (ddrconfig << 6)); + mmio_write_32(MSCH_BASE(channel) + MSCH_DEVICESIZE, + ((cs0_cap / 32) & 0xff) | (((cs1_cap / 32) & 0xff) << 8)); +} + +static __pmusramfunc void dram_all_config( + struct rk3399_sdram_params *sdram_params) +{ + unsigned int i; + + for (i = 0; i < 2; i++) { + struct rk3399_sdram_channel *info = &sdram_params->ch[i]; + struct rk3399_msch_timings *noc = &info->noc_timings; + + if (sdram_params->ch[i].col == 0) + continue; + + mmio_write_32(MSCH_BASE(i) + MSCH_DDRTIMINGA0, + noc->ddrtiminga0.d32); + mmio_write_32(MSCH_BASE(i) + MSCH_DDRTIMINGB0, + noc->ddrtimingb0.d32); + mmio_write_32(MSCH_BASE(i) + MSCH_DDRTIMINGC0, + noc->ddrtimingc0.d32); + mmio_write_32(MSCH_BASE(i) + MSCH_DEVTODEV0, + noc->devtodev0.d32); + mmio_write_32(MSCH_BASE(i) + MSCH_DDRMODE, noc->ddrmode.d32); + + /* rank 1 memory clock disable (dfi_dram_clk_disable = 1) */ + if (sdram_params->ch[i].rank == 1) + mmio_setbits_32(CTL_REG(i, 276), 1 << 17); + } + + DDR_STRIDE(sdram_params->stride); + + /* reboot hold register set */ + mmio_write_32(PMUCRU_BASE + CRU_PMU_RSTHOLD_CON(1), + CRU_PMU_SGRF_RST_RLS | + PRESET_GPIO0_HOLD(1) | + PRESET_GPIO1_HOLD(1)); + mmio_clrsetbits_32(CRU_BASE + CRU_GLB_RST_CON, 0x3, 0x3); +} + +static __pmusramfunc void pctl_cfg(uint32_t ch, + struct rk3399_sdram_params *sdram_params) +{ + const uint32_t *params_ctl = sdram_params->pctl_regs.denali_ctl; + const uint32_t *params_pi = sdram_params->pi_regs.denali_pi; + const struct rk3399_ddr_publ_regs *phy_regs = &sdram_params->phy_regs; + uint32_t tmp, tmp1, tmp2, i; + + /* + * Workaround controller bug: + * Do not program DRAM_CLASS until NO_PHY_IND_TRAIN_INT is programmed + */ + sram_regcpy(CTL_REG(ch, 1), (uintptr_t)¶ms_ctl[1], + CTL_REG_NUM - 1); + mmio_write_32(CTL_REG(ch, 0), params_ctl[0]); + sram_regcpy(PI_REG(ch, 0), (uintptr_t)¶ms_pi[0], + PI_REG_NUM); + + sram_regcpy(PHY_REG(ch, 910), (uintptr_t)&phy_regs->phy896[910 - 896], + 3); + + mmio_clrsetbits_32(CTL_REG(ch, 68), PWRUP_SREFRESH_EXIT, + PWRUP_SREFRESH_EXIT); + + /* PHY_DLL_RST_EN */ + mmio_clrsetbits_32(PHY_REG(ch, 957), 0x3 << 24, 1 << 24); + dmbst(); + + mmio_setbits_32(PI_REG(ch, 0), START); + mmio_setbits_32(CTL_REG(ch, 0), START); + + /* wait lock */ + while (1) { + tmp = mmio_read_32(PHY_REG(ch, 920)); + tmp1 = mmio_read_32(PHY_REG(ch, 921)); + tmp2 = mmio_read_32(PHY_REG(ch, 922)); + if ((((tmp >> 16) & 0x1) == 0x1) && + (((tmp1 >> 16) & 0x1) == 0x1) && + (((tmp1 >> 0) & 0x1) == 0x1) && + (((tmp2 >> 0) & 0x1) == 0x1)) + break; + /* if PLL bypass,don't need wait lock */ + if (mmio_read_32(PHY_REG(ch, 911)) & 0x1) + break; + } + + sram_regcpy(PHY_REG(ch, 896), (uintptr_t)&phy_regs->phy896[0], 63); + + for (i = 0; i < 4; i++) + sram_regcpy(PHY_REG(ch, 128 * i), + (uintptr_t)&phy_regs->phy0[0], 91); + + for (i = 0; i < 3; i++) + sram_regcpy(PHY_REG(ch, 512 + 128 * i), + (uintptr_t)&phy_regs->phy512[i][0], 38); +} + +static __pmusramfunc int dram_switch_to_next_index( + struct rk3399_sdram_params *sdram_params) +{ + uint32_t ch, ch_count; + uint32_t fn = ((mmio_read_32(CTL_REG(0, 111)) >> 16) + 1) & 0x1; + + mmio_write_32(CIC_BASE + CIC_CTRL0, + (((0x3 << 4) | (1 << 2) | 1) << 16) | + (fn << 4) | (1 << 2) | 1); + while (!(mmio_read_32(CIC_BASE + CIC_STATUS0) & (1 << 2))) + ; + + mmio_write_32(CIC_BASE + CIC_CTRL0, 0x20002); + while (!(mmio_read_32(CIC_BASE + CIC_STATUS0) & (1 << 0))) + ; + + ch_count = sdram_params->num_channels; + + /* LPDDR4 f2 cann't do training, all training will fail */ + for (ch = 0; ch < ch_count; ch++) { + /* + * Without this disabled for LPDDR4 we end up writing 0's + * in place of real data in an interesting pattern. + */ + if (sdram_params->dramtype != LPDDR4) { + mmio_clrsetbits_32(PHY_REG(ch, 896), (0x3 << 8) | 1, + fn << 8); + } + + /* data_training failed */ + if (data_training(ch, sdram_params, PI_FULL_TRAINING)) + return -1; + } + + return 0; +} + +/* + * Needs to be done for both channels at once in case of a shared reset signal + * between channels. + */ +static __pmusramfunc int pctl_start(uint32_t channel_mask, + struct rk3399_sdram_params *sdram_params) +{ + uint32_t count; + uint32_t byte; + + mmio_setbits_32(CTL_REG(0, 68), PWRUP_SREFRESH_EXIT); + mmio_setbits_32(CTL_REG(1, 68), PWRUP_SREFRESH_EXIT); + + /* need de-access IO retention before controller START */ + if (channel_mask & (1 << 0)) + mmio_setbits_32(PMU_BASE + PMU_PWRMODE_CON, (1 << 19)); + if (channel_mask & (1 << 1)) + mmio_setbits_32(PMU_BASE + PMU_PWRMODE_CON, (1 << 23)); + + /* PHY_DLL_RST_EN */ + if (channel_mask & (1 << 0)) + mmio_clrsetbits_32(PHY_REG(0, 957), 0x3 << 24, + 0x2 << 24); + if (channel_mask & (1 << 1)) + mmio_clrsetbits_32(PHY_REG(1, 957), 0x3 << 24, + 0x2 << 24); + + /* check ERROR bit */ + if (channel_mask & (1 << 0)) { + count = 0; + while (!(mmio_read_32(CTL_REG(0, 203)) & (1 << 3))) { + /* CKE is low, loop 10ms */ + if (count > 100) + return -1; + + sram_udelay(100); + count++; + } + + mmio_clrbits_32(CTL_REG(0, 68), PWRUP_SREFRESH_EXIT); + + /* Restore the PHY_RX_CAL_DQS value */ + for (byte = 0; byte < 4; byte++) + mmio_clrsetbits_32(PHY_REG(0, 57 + 128 * byte), + 0xfff << 16, + sdram_params->rx_cal_dqs[0][byte]); + } + if (channel_mask & (1 << 1)) { + count = 0; + while (!(mmio_read_32(CTL_REG(1, 203)) & (1 << 3))) { + /* CKE is low, loop 10ms */ + if (count > 100) + return -1; + + sram_udelay(100); + count++; + } + + mmio_clrbits_32(CTL_REG(1, 68), PWRUP_SREFRESH_EXIT); + + /* Restore the PHY_RX_CAL_DQS value */ + for (byte = 0; byte < 4; byte++) + mmio_clrsetbits_32(PHY_REG(1, 57 + 128 * byte), + 0xfff << 16, + sdram_params->rx_cal_dqs[1][byte]); + } + + return 0; +} + +__pmusramfunc static void pmusram_restore_pll(int pll_id, uint32_t *src) +{ + mmio_write_32((CRU_BASE + CRU_PLL_CON(pll_id, 3)), PLL_SLOW_MODE); + + mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 0), src[0] | REG_SOC_WMSK); + mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 1), src[1] | REG_SOC_WMSK); + mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 2), src[2]); + mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 4), src[4] | REG_SOC_WMSK); + mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 5), src[5] | REG_SOC_WMSK); + + mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 3), src[3] | REG_SOC_WMSK); + + while ((mmio_read_32(CRU_BASE + CRU_PLL_CON(pll_id, 2)) & + (1U << 31)) == 0x0) + ; +} + +__pmusramfunc static void pmusram_enable_watchdog(void) +{ + /* Make the watchdog use the first global reset. */ + mmio_write_32(CRU_BASE + CRU_GLB_RST_CON, 1 << 1); + + /* + * This gives the system ~8 seconds before reset. The pclk for the + * watchdog is 4MHz on reset. The value of 0x9 in WDT_TORR means that + * the watchdog will wait for 0x1ffffff cycles before resetting. + */ + mmio_write_32(WDT0_BASE + 4, 0x9); + + /* Enable the watchdog */ + mmio_setbits_32(WDT0_BASE, 0x1); + + /* Magic reset the watchdog timer value for WDT_CRR. */ + mmio_write_32(WDT0_BASE + 0xc, 0x76); + + secure_watchdog_ungate(); + + /* The watchdog is in PD_ALIVE, so deidle it. */ + mmio_clrbits_32(PMU_BASE + PMU_BUS_CLR, PMU_CLR_ALIVE); +} + +void dmc_suspend(void) +{ + struct rk3399_sdram_params *sdram_params = &sdram_config; + struct rk3399_ddr_publ_regs *phy_regs; + uint32_t *params_ctl; + uint32_t *params_pi; + uint32_t refdiv, postdiv2, postdiv1, fbdiv; + uint32_t ch, byte, i; + + phy_regs = &sdram_params->phy_regs; + params_ctl = sdram_params->pctl_regs.denali_ctl; + params_pi = sdram_params->pi_regs.denali_pi; + + /* save dpll register and ddr clock register value to pmusram */ + cru_clksel_con6 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON6); + for (i = 0; i < PLL_CON_COUNT; i++) + dpll_data[i] = mmio_read_32(CRU_BASE + CRU_PLL_CON(DPLL_ID, i)); + + fbdiv = dpll_data[0] & 0xfff; + postdiv2 = POSTDIV2_DEC(dpll_data[1]); + postdiv1 = POSTDIV1_DEC(dpll_data[1]); + refdiv = REFDIV_DEC(dpll_data[1]); + + sdram_params->ddr_freq = ((fbdiv * 24) / + (refdiv * postdiv1 * postdiv2)) * MHz; + + INFO("sdram_params->ddr_freq = %d\n", sdram_params->ddr_freq); + sdram_params->odt = (((mmio_read_32(PHY_REG(0, 5)) >> 16) & + 0x7) != 0) ? 1 : 0; + + /* copy the registers CTL PI and PHY */ + dram_regcpy((uintptr_t)¶ms_ctl[0], CTL_REG(0, 0), CTL_REG_NUM); + + /* mask DENALI_CTL_00_DATA.START, only copy here, will trigger later */ + params_ctl[0] &= ~(0x1 << 0); + + dram_regcpy((uintptr_t)¶ms_pi[0], PI_REG(0, 0), + PI_REG_NUM); + + /* mask DENALI_PI_00_DATA.START, only copy here, will trigger later*/ + params_pi[0] &= ~(0x1 << 0); + + dram_regcpy((uintptr_t)&phy_regs->phy0[0], + PHY_REG(0, 0), 91); + + for (i = 0; i < 3; i++) + dram_regcpy((uintptr_t)&phy_regs->phy512[i][0], + PHY_REG(0, 512 + 128 * i), 38); + + dram_regcpy((uintptr_t)&phy_regs->phy896[0], PHY_REG(0, 896), 63); + + for (ch = 0; ch < sdram_params->num_channels; ch++) { + for (byte = 0; byte < 4; byte++) + sdram_params->rx_cal_dqs[ch][byte] = (0xfff << 16) & + mmio_read_32(PHY_REG(ch, 57 + byte * 128)); + } + + /* set DENALI_PHY_957_DATA.PHY_DLL_RST_EN = 0x1 */ + phy_regs->phy896[957 - 896] &= ~(0x3 << 24); + phy_regs->phy896[957 - 896] |= 1 << 24; + phy_regs->phy896[0] |= 1; + phy_regs->phy896[0] &= ~(0x3 << 8); +} + +__pmusramfunc void phy_dll_bypass_set(uint32_t ch, uint32_t freq) +{ + if (freq <= (125 * 1000 * 1000)) { + /* Set master mode to SW for slices*/ + mmio_setbits_32(PHY_REG(ch, 86), 3 << 10); + mmio_setbits_32(PHY_REG(ch, 214), 3 << 10); + mmio_setbits_32(PHY_REG(ch, 342), 3 << 10); + mmio_setbits_32(PHY_REG(ch, 470), 3 << 10); + /* Set master mode to SW for address slices*/ + mmio_setbits_32(PHY_REG(ch, 547), 3 << 18); + mmio_setbits_32(PHY_REG(ch, 675), 3 << 18); + mmio_setbits_32(PHY_REG(ch, 803), 3 << 18); + } else { + /* Clear SW master mode for slices*/ + mmio_clrbits_32(PHY_REG(ch, 86), 3 << 10); + mmio_clrbits_32(PHY_REG(ch, 214), 3 << 10); + mmio_clrbits_32(PHY_REG(ch, 342), 3 << 10); + mmio_clrbits_32(PHY_REG(ch, 470), 3 << 10); + /* Clear SW master mode for address slices*/ + mmio_clrbits_32(PHY_REG(ch, 547), 3 << 18); + mmio_clrbits_32(PHY_REG(ch, 675), 3 << 18); + mmio_clrbits_32(PHY_REG(ch, 803), 3 << 18); + } +} + +__pmusramfunc void dmc_resume(void) +{ + struct rk3399_sdram_params *sdram_params = &sdram_config; + uint32_t channel_mask = 0; + uint32_t channel; + + /* + * We can't turn off the watchdog, so if we have not turned it on before + * we should not turn it on here. + */ + if ((pmu_enable_watchdog0 & 0x1) == 0x1) { + pmusram_enable_watchdog(); + } + pmu_sgrf_rst_hld_release(); + restore_pmu_rsthold(); + sram_secure_timer_init(); + + /* + * we switch ddr clock to abpll when suspend, + * we set back to dpll here + */ + mmio_write_32(CRU_BASE + CRU_CLKSEL_CON6, + cru_clksel_con6 | REG_SOC_WMSK); + pmusram_restore_pll(DPLL_ID, dpll_data); + + configure_sgrf(); + +retry: + for (channel = 0; channel < sdram_params->num_channels; channel++) { + phy_pctrl_reset(channel); + /* + * Without this, LPDDR4 will write 0's in place of real data + * in a strange pattern. + */ + if (sdram_params->dramtype == LPDDR4) { + phy_dll_bypass_set(channel, sdram_params->ddr_freq); + } + pctl_cfg(channel, sdram_params); + } + + for (channel = 0; channel < 2; channel++) { + if (sdram_params->ch[channel].col) + channel_mask |= 1 << channel; + } + + if (pctl_start(channel_mask, sdram_params) < 0) + goto retry; + + for (channel = 0; channel < sdram_params->num_channels; channel++) { + /* LPDDR2/LPDDR3 need to wait DAI complete, max 10us */ + if (sdram_params->dramtype == LPDDR3) + sram_udelay(10); + + /* + * Training here will always fail for LPDDR4, so skip it + * If traning fail, retry to do it again. + */ + if (sdram_params->dramtype != LPDDR4 && + data_training(channel, sdram_params, PI_FULL_TRAINING)) + goto retry; + + set_ddrconfig(sdram_params, channel, + sdram_params->ch[channel].ddrconfig); + } + + dram_all_config(sdram_params); + + /* Switch to index 1 and prepare for DDR frequency switch. */ + dram_switch_to_next_index(sdram_params); +} diff --git a/plat/rockchip/rk3399/drivers/dram/suspend.h b/plat/rockchip/rk3399/drivers/dram/suspend.h new file mode 100644 index 0000000..1389944 --- /dev/null +++ b/plat/rockchip/rk3399/drivers/dram/suspend.h @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2016-2021, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef SUSPEND_H +#define SUSPEND_H + +#include <stdint.h> +#include <dram.h> + +#define KHz (1000) +#define MHz (1000 * KHz) +#define GHz (1000 * MHz) + +#define PI_CA_TRAINING (1 << 0) +#define PI_WRITE_LEVELING (1 << 1) +#define PI_READ_GATE_TRAINING (1 << 2) +#define PI_READ_LEVELING (1 << 3) +#define PI_WDQ_LEVELING (1 << 4) +#define PI_FULL_TRAINING (0xff) + +void dmc_suspend(void); +__pmusramfunc void dmc_resume(void); +extern __pmusramdata uint8_t pmu_enable_watchdog0; + +#endif /* SUSPEND_H */ diff --git a/plat/rockchip/rk3399/drivers/gpio/rk3399_gpio.c b/plat/rockchip/rk3399/drivers/gpio/rk3399_gpio.c new file mode 100644 index 0000000..724968f --- /dev/null +++ b/plat/rockchip/rk3399/drivers/gpio/rk3399_gpio.c @@ -0,0 +1,400 @@ +/* + * Copyright (c) 2016-2021, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> +#include <errno.h> + +#include <platform_def.h> + +#include <common/debug.h> +#include <drivers/delay_timer.h> +#include <drivers/gpio.h> +#include <lib/mmio.h> +#include <plat/common/platform.h> + +#include <plat_private.h> +#include <soc.h> + +struct gpio_save { + uint32_t swporta_dr; + uint32_t swporta_ddr; + uint32_t inten; + uint32_t intmask; + uint32_t inttype_level; + uint32_t int_polarity; + uint32_t debounce; + uint32_t ls_sync; +} store_gpio[3]; + +static uint32_t store_grf_gpio[(GRF_GPIO2D_HE - GRF_GPIO2A_IOMUX) / 4 + 1]; + +#define SWPORTA_DR 0x00 +#define SWPORTA_DDR 0x04 +#define INTEN 0x30 +#define INTMASK 0x34 +#define INTTYPE_LEVEL 0x38 +#define INT_POLARITY 0x3c +#define DEBOUNCE 0x48 +#define LS_SYNC 0x60 + +#define EXT_PORTA 0x50 +#define PMU_GPIO_PORT0 0 +#define PMU_GPIO_PORT1 1 +#define GPIO_PORT2 2 +#define GPIO_PORT3 3 +#define GPIO_PORT4 4 + +#define PMU_GRF_GPIO0A_P 0x40 +#define GRF_GPIO2A_P 0xe040 +#define GPIO_P_MASK 0x03 + +#define GET_GPIO_PORT(pin) (pin / 32) +#define GET_GPIO_NUM(pin) (pin % 32) +#define GET_GPIO_BANK(pin) ((pin % 32) / 8) +#define GET_GPIO_ID(pin) ((pin % 32) % 8) + +enum { + ENC_ZDZU, + ENC_ZUDR, + ENC_ZUDZ, + NUM_ENC +}; + +static const struct port_info { + uint32_t clkgate_reg; + uint32_t pull_base; + uint32_t port_base; + /* + * Selects the pull mode encoding per bank, + * first index for pull_type_{hw2sw,sw2hw} + */ + uint8_t pull_enc[4]; + uint8_t clkgate_bit; + uint8_t max_bank; +} port_info[] = { + { + .clkgate_reg = PMUCRU_BASE + CRU_PMU_CLKGATE_CON(1), + .pull_base = PMUGRF_BASE + PMUGRF_GPIO0A_P, + .port_base = GPIO0_BASE, + .pull_enc = {ENC_ZDZU, ENC_ZDZU}, + .clkgate_bit = PCLK_GPIO0_GATE_SHIFT, + .max_bank = 1, + }, { + .clkgate_reg = PMUCRU_BASE + CRU_PMU_CLKGATE_CON(1), + .pull_base = PMUGRF_BASE + PMUGRF_GPIO1A_P, + .port_base = GPIO1_BASE, + .pull_enc = {ENC_ZUDR, ENC_ZUDR, ENC_ZUDR, ENC_ZUDR}, + .clkgate_bit = PCLK_GPIO1_GATE_SHIFT, + .max_bank = 3, + }, { + .clkgate_reg = CRU_BASE + CRU_CLKGATE_CON(31), + .pull_base = GRF_BASE + GRF_GPIO2A_P, + .port_base = GPIO2_BASE, + .pull_enc = {ENC_ZUDR, ENC_ZUDR, ENC_ZDZU, ENC_ZDZU}, + .clkgate_bit = PCLK_GPIO2_GATE_SHIFT, + .max_bank = 3, + }, { + .clkgate_reg = CRU_BASE + CRU_CLKGATE_CON(31), + .pull_base = GRF_BASE + GRF_GPIO3A_P, + .port_base = GPIO3_BASE, + .pull_enc = {ENC_ZUDR, ENC_ZUDR, ENC_ZUDR, ENC_ZUDR}, + .clkgate_bit = PCLK_GPIO3_GATE_SHIFT, + .max_bank = 3, + }, { + .clkgate_reg = CRU_BASE + CRU_CLKGATE_CON(31), + .pull_base = GRF_BASE + GRF_GPIO4A_P, + .port_base = GPIO4_BASE, + .pull_enc = {ENC_ZUDR, ENC_ZUDR, ENC_ZUDR, ENC_ZUDR}, + .clkgate_bit = PCLK_GPIO4_GATE_SHIFT, + .max_bank = 3, + } +}; + +/* + * Mappings between TF-A constants and hardware encodings: + * there are 3 different encoding schemes that may differ between + * banks of the same port: the corresponding value of the pull_enc array + * in port_info is used as the first index + */ +static const uint8_t pull_type_hw2sw[NUM_ENC][4] = { + [ENC_ZDZU] = {GPIO_PULL_NONE, GPIO_PULL_DOWN, GPIO_PULL_NONE, GPIO_PULL_UP}, + [ENC_ZUDR] = {GPIO_PULL_NONE, GPIO_PULL_UP, GPIO_PULL_DOWN, GPIO_PULL_REPEATER}, + [ENC_ZUDZ] = {GPIO_PULL_NONE, GPIO_PULL_UP, GPIO_PULL_DOWN, GPIO_PULL_NONE} +}; +static const uint8_t pull_type_sw2hw[NUM_ENC][4] = { + [ENC_ZDZU] = { + [GPIO_PULL_NONE] = 0, + [GPIO_PULL_DOWN] = 1, + [GPIO_PULL_UP] = 3, + [GPIO_PULL_REPEATER] = -1 + }, + [ENC_ZUDR] = { + [GPIO_PULL_NONE] = 0, + [GPIO_PULL_DOWN] = 2, + [GPIO_PULL_UP] = 1, + [GPIO_PULL_REPEATER] = 3 + }, + [ENC_ZUDZ] = { + [GPIO_PULL_NONE] = 0, + [GPIO_PULL_DOWN] = 2, + [GPIO_PULL_UP] = 1, + [GPIO_PULL_REPEATER] = -1 + } +}; + +/* Return old clock state, enables clock, in order to do GPIO access */ +static int gpio_get_clock(uint32_t gpio_number) +{ + uint32_t port = GET_GPIO_PORT(gpio_number); + assert(port < 5U); + + const struct port_info *info = &port_info[port]; + + if ((mmio_read_32(info->clkgate_reg) & (1U << info->clkgate_bit)) == 0U) { + return 0; + } + mmio_write_32( + info->clkgate_reg, + BITS_WITH_WMASK(0, 1, info->clkgate_bit) + ); + return 1; +} + +/* Restore old state of gpio clock, assuming it is running now */ +void gpio_put_clock(uint32_t gpio_number, uint32_t clock_state) +{ + if (clock_state == 0) { + return; + } + uint32_t port = GET_GPIO_PORT(gpio_number); + const struct port_info *info = &port_info[port]; + + mmio_write_32(info->clkgate_reg, BITS_WITH_WMASK(1, 1, info->clkgate_bit)); +} + +static int get_pull(int gpio) +{ + uint32_t port = GET_GPIO_PORT(gpio); + uint32_t bank = GET_GPIO_BANK(gpio); + uint32_t id = GET_GPIO_ID(gpio); + uint32_t val, clock_state; + + assert(port < 5U); + const struct port_info *info = &port_info[port]; + + assert(bank <= info->max_bank); + + clock_state = gpio_get_clock(gpio); + val = (mmio_read_32(info->pull_base + 4 * bank) >> (id * 2)) & GPIO_P_MASK; + gpio_put_clock(gpio, clock_state); + + return pull_type_hw2sw[info->pull_enc[bank]][val]; +} + +static void set_pull(int gpio, int pull) +{ + uint32_t port = GET_GPIO_PORT(gpio); + uint32_t bank = GET_GPIO_BANK(gpio); + uint32_t id = GET_GPIO_ID(gpio); + uint32_t clock_state; + + assert(port < 5U); + const struct port_info *info = &port_info[port]; + + assert(bank <= info->max_bank); + + uint8_t val = pull_type_sw2hw[info->pull_enc[bank]][pull]; + + assert(val != (uint8_t)-1); + + clock_state = gpio_get_clock(gpio); + mmio_write_32( + info->pull_base + 4 * bank, + BITS_WITH_WMASK(val, GPIO_P_MASK, id * 2) + ); + gpio_put_clock(gpio, clock_state); +} + +static void set_direction(int gpio, int direction) +{ + uint32_t port = GET_GPIO_PORT(gpio); + uint32_t num = GET_GPIO_NUM(gpio); + uint32_t clock_state; + + assert((port < 5) && (num < 32)); + + clock_state = gpio_get_clock(gpio); + + /* + * in gpio.h + * #define GPIO_DIR_OUT 0 + * #define GPIO_DIR_IN 1 + * but rk3399 gpio direction 1: output, 0: input + * so need to revert direction value + */ + mmio_setbits_32( + port_info[port].port_base + SWPORTA_DDR, + ((direction == 0) ? 1 : 0) << num + ); + gpio_put_clock(gpio, clock_state); +} + +static int get_direction(int gpio) +{ + uint32_t port = GET_GPIO_PORT(gpio); + uint32_t num = GET_GPIO_NUM(gpio); + int direction, clock_state; + + assert((port < 5U) && (num < 32U)); + + clock_state = gpio_get_clock(gpio); + + /* + * in gpio.h + * #define GPIO_DIR_OUT 0 + * #define GPIO_DIR_IN 1 + * but rk3399 gpio direction 1: output, 0: input + * so need to revert direction value + */ + direction = (((mmio_read_32( + port_info[port].port_base + SWPORTA_DDR + ) >> num) & 1U) == 0) ? 1 : 0; + gpio_put_clock(gpio, clock_state); + + return direction; +} + +static int get_value(int gpio) +{ + uint32_t port = GET_GPIO_PORT(gpio); + uint32_t num = GET_GPIO_NUM(gpio); + int value, clock_state; + + assert((port < 5) && (num < 32)); + + clock_state = gpio_get_clock(gpio); + value = (mmio_read_32(port_info[port].port_base + EXT_PORTA) >> num) & + 0x1U; + gpio_put_clock(gpio, clock_state); + + return value; +} + +static void set_value(int gpio, int value) +{ + uint32_t port = GET_GPIO_PORT(gpio); + uint32_t num = GET_GPIO_NUM(gpio); + uint32_t clock_state; + + assert((port < 5U) && (num < 32U)); + + clock_state = gpio_get_clock(gpio); + mmio_clrsetbits_32( + port_info[port].port_base + SWPORTA_DR, + 1 << num, + ((value == 0) ? 0 : 1) << num + ); + gpio_put_clock(gpio, clock_state); +} + +void plat_rockchip_save_gpio(void) +{ + unsigned int i; + uint32_t cru_gate_save; + + cru_gate_save = mmio_read_32(CRU_BASE + CRU_CLKGATE_CON(31)); + + /* + * when shutdown logic, we need to save gpio2 ~ gpio4 register, + * we need to enable gpio2 ~ gpio4 clock here, since it may be gating, + * and we do not care gpio0 and gpio1 clock gate, since we never + * gating them + */ + mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(31), + BITS_WITH_WMASK(0, 0x07, PCLK_GPIO2_GATE_SHIFT)); + + /* + * since gpio0, gpio1 are pmugpio, they will keep ther value + * when shutdown logic power rail, so only need to save gpio2 ~ gpio4 + * register value + */ + for (i = 2; i < 5; i++) { + uint32_t base = port_info[i].port_base; + + store_gpio[i - 2] = (struct gpio_save) { + .swporta_dr = mmio_read_32(base + SWPORTA_DR), + .swporta_ddr = mmio_read_32(base + SWPORTA_DDR), + .inten = mmio_read_32(base + INTEN), + .intmask = mmio_read_32(base + INTMASK), + .inttype_level = mmio_read_32(base + INTTYPE_LEVEL), + .int_polarity = mmio_read_32(base + INT_POLARITY), + .debounce = mmio_read_32(base + DEBOUNCE), + .ls_sync = mmio_read_32(base + LS_SYNC), + }; + } + mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(31), + cru_gate_save | REG_SOC_WMSK); + + /* + * gpio0, gpio1 in pmuiomux, they will keep ther value + * when shutdown logic power rail, so only need to save gpio2 ~ gpio4 + * iomux register value + */ + for (i = 0; i < ARRAY_SIZE(store_grf_gpio); i++) + store_grf_gpio[i] = + mmio_read_32(GRF_BASE + GRF_GPIO2A_IOMUX + i * 4); +} + +void plat_rockchip_restore_gpio(void) +{ + int i; + uint32_t cru_gate_save; + + for (i = 0; i < ARRAY_SIZE(store_grf_gpio); i++) + mmio_write_32(GRF_BASE + GRF_GPIO2A_IOMUX + i * 4, + REG_SOC_WMSK | store_grf_gpio[i]); + + cru_gate_save = mmio_read_32(CRU_BASE + CRU_CLKGATE_CON(31)); + + /* + * when shutdown logic, we need to save gpio2 ~ gpio4 register, + * we need to enable gpio2 ~ gpio4 clock here, since it may be gating, + * and we do not care gpio0 and gpio1 clock gate, since we never + * gating them + */ + mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(31), + BITS_WITH_WMASK(0, 0x07, PCLK_GPIO2_GATE_SHIFT)); + + for (i = 2; i < 5; i++) { + uint32_t base = port_info[i].port_base; + const struct gpio_save *save = &store_gpio[i - 2]; + + mmio_write_32(base + SWPORTA_DR, save->swporta_dr); + mmio_write_32(base + SWPORTA_DDR, save->swporta_ddr); + mmio_write_32(base + INTEN, save->inten); + mmio_write_32(base + INTMASK, save->intmask); + mmio_write_32(base + INTTYPE_LEVEL, save->inttype_level), + mmio_write_32(base + INT_POLARITY, save->int_polarity); + mmio_write_32(base + DEBOUNCE, save->debounce); + mmio_write_32(base + LS_SYNC, save->ls_sync); + } + mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(31), + cru_gate_save | REG_SOC_WMSK); +} + +const gpio_ops_t rk3399_gpio_ops = { + .get_direction = get_direction, + .set_direction = set_direction, + .get_value = get_value, + .set_value = set_value, + .set_pull = set_pull, + .get_pull = get_pull, +}; + +void plat_rockchip_gpio_init(void) +{ + gpio_init(&rk3399_gpio_ops); +} diff --git a/plat/rockchip/rk3399/drivers/m0/Makefile b/plat/rockchip/rk3399/drivers/m0/Makefile new file mode 100644 index 0000000..79e09f0 --- /dev/null +++ b/plat/rockchip/rk3399/drivers/m0/Makefile @@ -0,0 +1,125 @@ +# +# Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +# Cross Compile +M0_CROSS_COMPILE ?= arm-none-eabi- + +# Build architecture +ARCH := cortex-m0 + +# Build platform +PLAT_M0 ?= rk3399m0 +PLAT_M0_PMU ?= rk3399m0pmu + +ifeq (${V},0) + Q=@ +else + Q= +endif +export Q + +.SUFFIXES: + +INCLUDES += -Iinclude/ \ + -I../../include/shared/ + +# NOTE: Add C source files here +C_SOURCES_COMMON := src/startup.c +C_SOURCES := src/dram.c \ + src/stopwatch.c +C_SOURCES_PMU := src/suspend.c + +# Flags definition +COMMON_FLAGS := -g -mcpu=$(ARCH) -mthumb -Wall -O3 -nostdlib -mfloat-abi=soft +CFLAGS := -ffunction-sections -fdata-sections -fomit-frame-pointer -fno-common +ASFLAGS := -Wa,--gdwarf-2 +LDFLAGS := -Wl,--gc-sections -Wl,--build-id=none + +# Cross tool +CC := ${M0_CROSS_COMPILE}gcc +CPP := ${M0_CROSS_COMPILE}cpp +AR := ${M0_CROSS_COMPILE}ar +OC := ${M0_CROSS_COMPILE}objcopy +OD := ${M0_CROSS_COMPILE}objdump +NM := ${M0_CROSS_COMPILE}nm + +# NOTE: The line continuation '\' is required in the next define otherwise we +# end up with a line-feed characer at the end of the last c filename. +# Also bare this issue in mind if extending the list of supported filetypes. +define SOURCES_TO_OBJS + $(notdir $(patsubst %.c,%.o,$(filter %.c,$(1)))) \ + $(notdir $(patsubst %.S,%.o,$(filter %.S,$(1)))) +endef + +SOURCES_COMMON := $(C_SOURCES_COMMON) +SOURCES := $(C_SOURCES) +SOURCES_PMU := $(C_SOURCES_PMU) +OBJS_COMMON := $(addprefix $(BUILD)/,$(call SOURCES_TO_OBJS,$(SOURCES_COMMON))) +OBJS := $(addprefix $(BUILD)/,$(call SOURCES_TO_OBJS,$(SOURCES))) +OBJS_PMU := $(addprefix $(BUILD)/,$(call SOURCES_TO_OBJS,$(SOURCES_PMU))) +LINKERFILE := $(BUILD)/$(PLAT_M0).ld +MAPFILE := $(BUILD)/$(PLAT_M0).map +MAPFILE_PMU := $(BUILD)/$(PLAT_M0_PMU).map +ELF := $(BUILD)/$(PLAT_M0).elf +ELF_PMU := $(BUILD)/$(PLAT_M0_PMU).elf +BIN := $(BUILD)/$(PLAT_M0).bin +BIN_PMU := $(BUILD)/$(PLAT_M0_PMU).bin +LINKERFILE_SRC := src/$(PLAT_M0).ld.S + +# Function definition related compilation +define MAKE_C +$(eval OBJ := $(1)/$(patsubst %.c,%.o,$(notdir $(2)))) +-include $(patsubst %.o,%.d,$(OBJ)) + +$(OBJ) : $(2) + @echo " CC $$<" + $$(Q)$$(CC) $$(COMMON_FLAGS) $$(CFLAGS) $$(INCLUDES) -MMD -MT $$@ -c $$< -o $$@ +endef + +define MAKE_S +$(eval OBJ := $(1)/$(patsubst %.S,%.o,$(notdir $(2)))) + +$(OBJ) : $(2) + @echo " AS $$<" + $$(Q)$$(CC) -x assembler-with-cpp $$(COMMON_FLAGS) $$(ASFLAGS) -c $$< -o $$@ +endef + +define MAKE_OBJS + $(eval C_OBJS := $(filter %.c,$(2))) + $(eval REMAIN := $(filter-out %.c,$(2))) + $(eval $(foreach obj,$(C_OBJS),$(call MAKE_C,$(1),$(obj),$(3)))) + + $(eval S_OBJS := $(filter %.S,$(REMAIN))) + $(eval REMAIN := $(filter-out %.S,$(REMAIN))) + $(eval $(foreach obj,$(S_OBJS),$(call MAKE_S,$(1),$(obj),$(3)))) + + $(and $(REMAIN),$(error Unexpected source files present: $(REMAIN))) +endef + +.PHONY: all +all: $(BIN) $(BIN_PMU) + +.DEFAULT_GOAL := all + +$(LINKERFILE): $(LINKERFILE_SRC) + $(CC) $(COMMON_FLAGS) $(INCLUDES) -P -E -D__LINKER__ -MMD -MF $@.d -MT $@ -o $@ $< +-include $(LINKERFILE).d + +$(ELF) : $(OBJS) $(OBJS_COMMON) $(LINKERFILE) + @echo " LD $@" + $(Q)$(CC) -o $@ $(COMMON_FLAGS) $(LDFLAGS) -Wl,-Map=$(MAPFILE) -Wl,-T$(LINKERFILE) $(OBJS) $(OBJS_COMMON) + +%.bin : %.elf + @echo " BIN $@" + $(Q)$(OC) -O binary $< $@ + +$(ELF_PMU) : $(OBJS_COMMON) $(OBJS_PMU) $(LINKERFILE) + @echo " LD $@" + $(Q)$(CC) -o $@ $(COMMON_FLAGS) $(LDFLAGS) -Wl,-Map=$(MAPFILE_PMU) -Wl,-T$(LINKERFILE) $(OBJS_PMU) $(OBJS_COMMON) + +$(eval $(call MAKE_OBJS,$(BUILD),$(SOURCES_COMMON),$(1))) +$(eval $(call MAKE_OBJS,$(BUILD),$(SOURCES),$(1))) +$(eval $(call MAKE_OBJS,$(BUILD),$(SOURCES_PMU),$(1))) diff --git a/plat/rockchip/rk3399/drivers/m0/include/addressmap.h b/plat/rockchip/rk3399/drivers/m0/include/addressmap.h new file mode 100644 index 0000000..d431437 --- /dev/null +++ b/plat/rockchip/rk3399/drivers/m0/include/addressmap.h @@ -0,0 +1,15 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef ADDRESSMAP_H +#define ADDRESSMAP_H + +#include <addressmap_shared.h> + +/* Registers base address for M0 */ +#define MMIO_BASE 0x40000000 + +#endif /* ADDRESSMAP_H */ diff --git a/plat/rockchip/rk3399/drivers/m0/include/rk3399_mcu.h b/plat/rockchip/rk3399/drivers/m0/include/rk3399_mcu.h new file mode 100644 index 0000000..2e90694 --- /dev/null +++ b/plat/rockchip/rk3399/drivers/m0/include/rk3399_mcu.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef RK3399_MCU_H +#define RK3399_MCU_H + +#include <addressmap.h> + +typedef unsigned int uint32_t; + +#define mmio_read_32(c) ({unsigned int __v = \ + (*(volatile unsigned int *)(c)); __v; }) +#define mmio_write_32(c, v) ((*(volatile unsigned int *)(c)) = (v)) + +#define mmio_clrbits_32(addr, clear) \ + mmio_write_32(addr, (mmio_read_32(addr) & ~(clear))) +#define mmio_setbits_32(addr, set) \ + mmio_write_32(addr, (mmio_read_32(addr)) | (set)) +#define mmio_clrsetbits_32(addr, clear, set) \ + mmio_write_32(addr, (mmio_read_32(addr) & ~(clear)) | (set)) + +#define MIN(a, b) ((a) < (b) ? (a) : (b)) +#define MAX(a, b) ((a) > (b) ? (a) : (b)) + +void stopwatch_init_usecs_expire(unsigned int usecs); +int stopwatch_expired(void); +void stopwatch_reset(void); + +#endif /* RK3399_MCU_H */ diff --git a/plat/rockchip/rk3399/drivers/m0/src/dram.c b/plat/rockchip/rk3399/drivers/m0/src/dram.c new file mode 100644 index 0000000..84e8884 --- /dev/null +++ b/plat/rockchip/rk3399/drivers/m0/src/dram.c @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <dram_regs.h> +#include <m0_param.h> +#include <pmu_bits.h> +#include <pmu_regs.h> +#include "misc_regs.h" +#include "rk3399_mcu.h" + +static uint32_t gatedis_con0; + +static void idle_port(void) +{ + gatedis_con0 = mmio_read_32(PMUCRU_BASE + PMU_CRU_GATEDIS_CON0); + mmio_write_32(PMUCRU_BASE + PMU_CRU_GATEDIS_CON0, 0x3fffffff); + + mmio_setbits_32(PMU_BASE + PMU_BUS_IDLE_REQ, + (1 << PMU_IDLE_REQ_MSCH0) | (1 << PMU_IDLE_REQ_MSCH1)); + while ((mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST) & + ((1 << PMU_IDLE_ST_MSCH1) | (1 << PMU_IDLE_ST_MSCH0))) != + ((1 << PMU_IDLE_ST_MSCH1) | (1 << PMU_IDLE_ST_MSCH0))) + continue; +} + +static void deidle_port(void) +{ + mmio_clrbits_32(PMU_BASE + PMU_BUS_IDLE_REQ, + (1 << PMU_IDLE_REQ_MSCH0) | (1 << PMU_IDLE_REQ_MSCH1)); + while (mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST) & + ((1 << PMU_IDLE_ST_MSCH1) | (1 << PMU_IDLE_ST_MSCH0))) + continue; + + /* document is wrong, PMU_CRU_GATEDIS_CON0 do not need set MASK BIT */ + mmio_write_32(PMUCRU_BASE + PMU_CRU_GATEDIS_CON0, gatedis_con0); +} + +static void ddr_set_pll(void) +{ + mmio_write_32(CRU_BASE + CRU_DPLL_CON3, PLL_MODE(PLL_SLOW_MODE)); + + mmio_write_32(CRU_BASE + CRU_DPLL_CON3, PLL_POWER_DOWN(1)); + mmio_write_32(CRU_BASE + CRU_DPLL_CON0, + mmio_read_32(PARAM_ADDR + PARAM_DPLL_CON0)); + mmio_write_32(CRU_BASE + CRU_DPLL_CON1, + mmio_read_32(PARAM_ADDR + PARAM_DPLL_CON1)); + mmio_write_32(CRU_BASE + CRU_DPLL_CON3, PLL_POWER_DOWN(0)); + + while ((mmio_read_32(CRU_BASE + CRU_DPLL_CON2) & (1u << 31)) == 0) + continue; + + mmio_write_32(CRU_BASE + CRU_DPLL_CON3, PLL_MODE(PLL_NORMAL_MODE)); +} + +__attribute__((noreturn)) void m0_main(void) +{ + mmio_setbits_32(PHY_REG(0, 927), (1 << 22)); + mmio_setbits_32(PHY_REG(1, 927), (1 << 22)); + idle_port(); + + mmio_write_32(CIC_BASE + CIC_CTRL0, + (((0x3 << 4) | (1 << 2) | 1) << 16) | + (1 << 2) | 1 | + mmio_read_32(PARAM_ADDR + PARAM_FREQ_SELECT)); + while ((mmio_read_32(CIC_BASE + CIC_STATUS0) & (1 << 2)) == 0) + continue; + + ddr_set_pll(); + mmio_write_32(CIC_BASE + CIC_CTRL0, 0x20002); + while ((mmio_read_32(CIC_BASE + CIC_STATUS0) & (1 << 0)) == 0) + continue; + + deidle_port(); + mmio_clrbits_32(PHY_REG(0, 927), (1 << 22)); + mmio_clrbits_32(PHY_REG(1, 927), (1 << 22)); + + mmio_write_32(PARAM_ADDR + PARAM_M0_DONE, M0_DONE_FLAG); + + for (;;) + __asm__ volatile ("wfi"); +} diff --git a/plat/rockchip/rk3399/drivers/m0/src/rk3399m0.ld.S b/plat/rockchip/rk3399/drivers/m0/src/rk3399m0.ld.S new file mode 100644 index 0000000..bfe054e --- /dev/null +++ b/plat/rockchip/rk3399/drivers/m0/src/rk3399m0.ld.S @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <m0_param.h> + +OUTPUT_FORMAT("elf32-littlearm") + +SECTIONS { + .m0_bin 0 : { + KEEP(*(.isr_vector)) + ASSERT(. == 0xc0, "ISR vector has the wrong size."); + ASSERT(. == PARAM_ADDR, "M0 params should go right behind ISR table."); + . += PARAM_M0_SIZE; + *(.text*) + *(.rodata*) + *(.data*) + *(.bss*) + . = ALIGN(8); + *(.co_stack*) + } + + /DISCARD/ : { *(.comment) *(.note*) } +} diff --git a/plat/rockchip/rk3399/drivers/m0/src/startup.c b/plat/rockchip/rk3399/drivers/m0/src/startup.c new file mode 100644 index 0000000..dfd8af2 --- /dev/null +++ b/plat/rockchip/rk3399/drivers/m0/src/startup.c @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include "rk3399_mcu.h" + +/* Stack configuration */ +#define STACK_SIZE 0x00000040 +__attribute__ ((section(".co_stack"))) +unsigned long pstack[STACK_SIZE]; + +/* Macro definition */ +#define WEAK __attribute__ ((weak)) + +/* System exception vector handler */ +__attribute__ ((used)) +void WEAK reset_handler(void); +void WEAK nmi_handler(void); +void WEAK hardware_fault_handler(void); +void WEAK svc_handler(void); +void WEAK pend_sv_handler(void); +void WEAK systick_handler(void); + +extern int m0_main(void); + +/* Function prototypes */ +static void default_reset_handler(void); +static void default_handler(void); + +/* + * The minimal vector table for a Cortex M3. Note that the proper constructs + * must be placed on this to ensure that it ends up at physical address + * 0x00000000. + */ +__attribute__ ((used, section(".isr_vector"))) +void (* const g_pfnVectors[])(void) = { + /* core Exceptions */ + (void *)&pstack[STACK_SIZE], /* the initial stack pointer */ + reset_handler, + nmi_handler, + hardware_fault_handler, + 0, 0, 0, 0, 0, 0, 0, + svc_handler, + 0, 0, + pend_sv_handler, + systick_handler, + + /* external exceptions */ + 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0 +}; + +/** + * This is the code that gets called when the processor first + * starts execution following a reset event. Only the absolutely + * necessary set is performed, after which the application + * supplied m0_main() routine is called. + */ +static void default_reset_handler(void) +{ + /* call the application's entry point */ + m0_main(); +} + +/** + * Provide weak aliases for each Exception handler to the Default_Handler. + * As they are weak aliases, any function with the same name will override + * this definition. + */ +#pragma weak reset_handler = default_reset_handler +#pragma weak nmi_handler = default_handler +#pragma weak hardware_fault_handler = default_handler +#pragma weak svc_handler = default_handler +#pragma weak pend_sv_handler = default_handler +#pragma weak systick_handler = default_handler + +/** + * This is the code that gets called when the processor receives + * an unexpected interrupt. This simply enters an infinite loop, + * preserving the system state for examination by a debugger. + */ +static void default_handler(void) +{ + /* go into an infinite loop. */ + while (1) + ; +} diff --git a/plat/rockchip/rk3399/drivers/m0/src/stopwatch.c b/plat/rockchip/rk3399/drivers/m0/src/stopwatch.c new file mode 100644 index 0000000..5af8caa --- /dev/null +++ b/plat/rockchip/rk3399/drivers/m0/src/stopwatch.c @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <m0_param.h> +#include "rk3399_mcu.h" + +/* use 24MHz SysTick */ +#define US_TO_CYCLE(US) (US * 24) + +#define SYST_CST 0xe000e010 +/* enable counter */ +#define ENABLE (1 << 0) +/* count down to 0 does not cause SysTick exception to pend */ +#define TICKINT (1 << 1) +/* core clock used for SysTick */ +#define CLKSOURCE (1 << 2) + +#define COUNTFLAG (1 << 16) +#define SYST_RVR 0xe000e014 +#define MAX_VALUE 0xffffff +#define MAX_USECS (MAX_VALUE / US_TO_CYCLE(1)) +#define SYST_CVR 0xe000e018 +#define SYST_CALIB 0xe000e01c + +unsigned int remaining_usecs; + +static inline void stopwatch_set_usecs(void) +{ + unsigned int cycle; + unsigned int usecs = MIN(MAX_USECS, remaining_usecs); + + remaining_usecs -= usecs; + cycle = US_TO_CYCLE(usecs); + mmio_write_32(SYST_RVR, cycle); + mmio_write_32(SYST_CVR, 0); + + mmio_write_32(SYST_CST, ENABLE | TICKINT | CLKSOURCE); +} + +void stopwatch_init_usecs_expire(unsigned int usecs) +{ + /* + * Enter an inifite loop if the stopwatch is in use. This will allow the + * state to be analyzed with a debugger. + */ + if (mmio_read_32(SYST_CST) & ENABLE) + while (1) + ; + + remaining_usecs = usecs; + stopwatch_set_usecs(); +} + +int stopwatch_expired(void) +{ + int val = mmio_read_32(SYST_CST); + if ((val & COUNTFLAG) || !(val & ENABLE)) { + if (!remaining_usecs) + return 1; + + stopwatch_set_usecs(); + } + + return 0; +} + +void stopwatch_reset(void) +{ + mmio_clrbits_32(SYST_CST, ENABLE); + remaining_usecs = 0; +} diff --git a/plat/rockchip/rk3399/drivers/m0/src/suspend.c b/plat/rockchip/rk3399/drivers/m0/src/suspend.c new file mode 100644 index 0000000..9ad2fa2 --- /dev/null +++ b/plat/rockchip/rk3399/drivers/m0/src/suspend.c @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <pmu_regs.h> +#include "rk3399_mcu.h" + +#define M0_SCR 0xe000ed10 /* System Control Register (SCR) */ + +#define SCR_SLEEPDEEP_SHIFT (1 << 2) + +__attribute__((noreturn)) void m0_main(void) +{ + unsigned int status_value; + + /* + * PMU sometimes doesn't clear power mode bit as it's supposed to due + * to a hardware bug. Make the M0 clear it manually to be sure, + * otherwise interrupts some cases with concurrent wake interrupts + * we stay asleep forever. + */ + while (1) { + status_value = mmio_read_32(PMU_BASE + PMU_POWER_ST); + if (status_value) { + mmio_clrbits_32(PMU_BASE + PMU_PWRMODE_CON, 0x01); + break; + } + } + + /* + * FSM power secquence is .. -> ST_INPUT_CLAMP(step.17) -> .. -> + * ST_WAKEUP_RESET -> ST_EXT_PWRUP-> ST_RELEASE_CLAMP -> + * ST_24M_OSC_EN -> .. -> ST_WAKEUP_RESET_CLR(step.26) -> .., + * INPUT_CLAMP and WAKEUP_RESET will hold the SOC not affect by + * power or other single glitch, but WAKEUP_RESET need work with 24MHz, + * so between RELEASE_CLAMP and 24M_OSC_EN, there have a chance + * that glitch will affect SOC, and mess up SOC status, so we + * addressmap_shared software clamp between ST_INPUT_CLAMP and + * ST_WAKEUP_RESET_CLR to avoid this happen. + */ + while (1) { + status_value = mmio_read_32(PMU_BASE + PMU_POWER_ST); + if (status_value >= 17) { + mmio_setbits_32(PMU_BASE + PMU_SFT_CON, 0x02); + break; + } + + } + + while (1) { + status_value = mmio_read_32(PMU_BASE + PMU_POWER_ST); + if (status_value >= 26) { + mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, 0x02); + break; + } + } + + for (;;) + __asm__ volatile ("wfi"); +} diff --git a/plat/rockchip/rk3399/drivers/pmu/m0_ctl.c b/plat/rockchip/rk3399/drivers/pmu/m0_ctl.c new file mode 100644 index 0000000..cad76ac --- /dev/null +++ b/plat/rockchip/rk3399/drivers/pmu/m0_ctl.c @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> + +#include <arch_helpers.h> +#include <common/debug.h> +#include <drivers/delay_timer.h> +#include <lib/mmio.h> + +#include <m0_ctl.h> +#include <plat_private.h> +#include <rk3399_def.h> +#include <secure.h> +#include <soc.h> + +void m0_init(void) +{ + /* secure config for M0 */ + mmio_write_32(SGRF_BASE + SGRF_PMU_CON(0), WMSK_BIT(7)); + mmio_write_32(SGRF_BASE + SGRF_SOC_CON(6), WMSK_BIT(12)); + + /* document is wrong, PMU_CRU_GATEDIS_CON0 do not need set MASK BIT */ + mmio_setbits_32(PMUCRU_BASE + PMUCRU_GATEDIS_CON0, 0x02); + + /* + * To switch the parent to xin24M and div == 1, + * + * We need to close most of the PLLs and clocks except the OSC 24MHz + * durning suspend, and this should be enough to supplies the ddrfreq, + * For the simple handle, we just keep the fixed 24MHz to supply the + * suspend and ddrfreq directly. + */ + mmio_write_32(PMUCRU_BASE + PMUCRU_CLKSEL_CON0, + BIT_WITH_WMSK(15) | BITS_WITH_WMASK(0x0, 0x1f, 8)); + + mmio_write_32(PMUCRU_BASE + PMUCRU_CLKGATE_CON2, WMSK_BIT(5)); +} + +void m0_configure_execute_addr(uintptr_t addr) +{ + /* set the execute address for M0 */ + mmio_write_32(SGRF_BASE + SGRF_PMU_CON(3), + BITS_WITH_WMASK((addr >> 12) & 0xffff, + 0xffffu, 0)); + mmio_write_32(SGRF_BASE + SGRF_PMU_CON(7), + BITS_WITH_WMASK((addr >> 28) & 0xf, + 0xfu, 0)); +} + +void m0_start(void) +{ + /* enable clocks for M0 */ + mmio_write_32(PMUCRU_BASE + PMUCRU_CLKGATE_CON2, + BITS_WITH_WMASK(0x0, 0xf, 0)); + + /* clean the PARAM_M0_DONE flag, mean that M0 will start working */ + mmio_write_32(M0_PARAM_ADDR + PARAM_M0_DONE, 0); + dmbst(); + + mmio_write_32(PMUCRU_BASE + PMUCRU_SOFTRST_CON0, + BITS_WITH_WMASK(0x0, 0x4, 0)); + + udelay(5); + /* start M0 */ + mmio_write_32(PMUCRU_BASE + PMUCRU_SOFTRST_CON0, + BITS_WITH_WMASK(0x0, 0x20, 0)); + dmbst(); +} + +void m0_stop(void) +{ + /* stop M0 */ + mmio_write_32(PMUCRU_BASE + PMUCRU_SOFTRST_CON0, + BITS_WITH_WMASK(0x24, 0x24, 0)); + + /* disable clocks for M0 */ + mmio_write_32(PMUCRU_BASE + PMUCRU_CLKGATE_CON2, + BITS_WITH_WMASK(0xf, 0xf, 0)); +} + +void m0_wait_done(void) +{ + do { + /* + * Don't starve the M0 for access to SRAM, so delay before + * reading the PARAM_M0_DONE value again. + */ + udelay(5); + dsb(); + } while (mmio_read_32(M0_PARAM_ADDR + PARAM_M0_DONE) != M0_DONE_FLAG); + + /* + * Let the M0 settle into WFI before we leave. This is so we don't reset + * the M0 in a bad spot which can cause problems with the M0. + */ + udelay(10); + dsb(); +} diff --git a/plat/rockchip/rk3399/drivers/pmu/m0_ctl.h b/plat/rockchip/rk3399/drivers/pmu/m0_ctl.h new file mode 100644 index 0000000..7542e22 --- /dev/null +++ b/plat/rockchip/rk3399/drivers/pmu/m0_ctl.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef M0_CTL_H +#define M0_CTL_H + +#include <m0_param.h> + +#define M0_BINCODE_BASE ((uintptr_t)rk3399m0_bin) +#define M0_PARAM_ADDR (M0_BINCODE_BASE + PARAM_ADDR) +#define M0PMU_BINCODE_BASE ((uintptr_t)rk3399m0pmu_bin) + +/* pmu_fw.c */ +extern char rk3399m0_bin[]; +extern char rk3399m0_bin_end[]; + +extern char rk3399m0pmu_bin[]; +extern char rk3399m0pmu_bin_end[]; + +extern void m0_init(void); +extern void m0_start(void); +extern void m0_stop(void); +extern void m0_wait_done(void); +extern void m0_configure_execute_addr(uintptr_t addr); + +#endif /* M0_CTL_H */ diff --git a/plat/rockchip/rk3399/drivers/pmu/plat_pmu_macros.S b/plat/rockchip/rk3399/drivers/pmu/plat_pmu_macros.S new file mode 100644 index 0000000..546c09a --- /dev/null +++ b/plat/rockchip/rk3399/drivers/pmu/plat_pmu_macros.S @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <arch.h> +#include <asm_macros.S> +#include <platform_def.h> +#include <pmu_regs.h> + + .globl clst_warmboot_data + + .macro sram_func _name + .cfi_sections .debug_frame + .section .sram.text, "ax" + .type \_name, %function + .cfi_startproc + \_name: + .endm + +#define CRU_CLKSEL_CON6 0x118 + +#define DDRCTL0_C_SYSREQ_CFG 0x0100 +#define DDRCTL1_C_SYSREQ_CFG 0x1000 + +#define DDRC0_SREF_DONE_EXT 0x01 +#define DDRC1_SREF_DONE_EXT 0x04 + +#define PLL_MODE_SHIFT (0x8) +#define PLL_NORMAL_MODE ((0x3 << (PLL_MODE_SHIFT + 16)) | \ + (0x1 << PLL_MODE_SHIFT)) +#define MPIDR_CLST_L_BITS 0x0 + /* + * For different socs, if we want to speed up warmboot, + * we need to config some regs here. + * If scu was suspend, we must resume related clk + * from slow (24M) mode to normal mode first. + * X0: MPIDR_EL1 & MPIDR_CLUSTER_MASK + */ +.macro func_rockchip_clst_warmboot + adr x4, clst_warmboot_data + lsr x5, x0, #6 + ldr w3, [x4, x5] + str wzr, [x4, x5] + cmp w3, #PMU_CLST_RET + b.ne clst_warmboot_end + ldr w6, =(PLL_NORMAL_MODE) + /* + * core_l offset is CRU_BASE + 0xc, + * core_b offset is CRU_BASE + 0x2c + */ + ldr x7, =(CRU_BASE + 0xc) + lsr x2, x0, #3 + str w6, [x7, x2] +clst_warmboot_end: +.endm + +.macro rockchip_clst_warmboot_data +clst_warmboot_data: + .rept PLATFORM_CLUSTER_COUNT + .word 0 + .endr +.endm + + /* ----------------------------------------------- + * void sram_func_set_ddrctl_pll(uint32_t pll_src) + * Function to switch the PLL source for ddrctrl + * In: x0 - The PLL of the clk_ddrc clock source + * out: None + * Clobber list : x0 - x3, x5, x8 - x10 + * ----------------------------------------------- + */ + + .globl sram_func_set_ddrctl_pll + +sram_func sram_func_set_ddrctl_pll + /* backup parameter */ + mov x8, x0 + + /* disable the MMU at EL3 */ + mrs x9, sctlr_el3 + bic x10, x9, #(SCTLR_M_BIT) + msr sctlr_el3, x10 + isb + dsb sy + + /* enable ddrctl0_1 idle request */ + mov x5, PMU_BASE + ldr w0, [x5, #PMU_SFT_CON] + orr w0, w0, #DDRCTL0_C_SYSREQ_CFG + orr w0, w0, #DDRCTL1_C_SYSREQ_CFG + str w0, [x5, #PMU_SFT_CON] + +check_ddrc0_1_sref_enter: + ldr w1, [x5, #PMU_DDR_SREF_ST] + and w2, w1, #DDRC0_SREF_DONE_EXT + and w3, w1, #DDRC1_SREF_DONE_EXT + orr w2, w2, w3 + cmp w2, #(DDRC0_SREF_DONE_EXT | DDRC1_SREF_DONE_EXT) + b.eq check_ddrc0_1_sref_enter + + /* + * select a PLL for ddrctrl: + * x0 = 0: ALPLL + * x0 = 1: ABPLL + * x0 = 2: DPLL + * x0 = 3: GPLLL + */ + mov x5, CRU_BASE + lsl w0, w8, #4 + orr w0, w0, #0x00300000 + str w0, [x5, #CRU_CLKSEL_CON6] + + /* disable ddrctl0_1 idle request */ + mov x5, PMU_BASE + ldr w0, [x5, #PMU_SFT_CON] + bic w0, w0, #DDRCTL0_C_SYSREQ_CFG + bic w0, w0, #DDRCTL1_C_SYSREQ_CFG + str w0, [x5, #PMU_SFT_CON] + +check_ddrc0_1_sref_exit: + ldr w1, [x5, #PMU_DDR_SREF_ST] + and w2, w1, #DDRC0_SREF_DONE_EXT + and w3, w1, #DDRC1_SREF_DONE_EXT + orr w2, w2, w3 + cmp w2, #0x0 + b.eq check_ddrc0_1_sref_exit + + /* reenable the MMU at EL3 */ + msr sctlr_el3, x9 + isb + dsb sy + + ret +endfunc sram_func_set_ddrctl_pll diff --git a/plat/rockchip/rk3399/drivers/pmu/pmu.c b/plat/rockchip/rk3399/drivers/pmu/pmu.c new file mode 100644 index 0000000..3084c4f --- /dev/null +++ b/plat/rockchip/rk3399/drivers/pmu/pmu.c @@ -0,0 +1,1626 @@ +/* + * Copyright (c) 2016-2021, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> +#include <errno.h> +#include <string.h> + +#include <platform_def.h> + +#include <arch_helpers.h> +#include <bl31/bl31.h> +#include <common/debug.h> +#include <drivers/arm/gicv3.h> +#include <drivers/delay_timer.h> +#include <drivers/gpio.h> +#include <lib/bakery_lock.h> +#include <lib/mmio.h> +#include <plat/common/platform.h> + +#include <dfs.h> +#include <m0_ctl.h> +#include <plat_params.h> +#include <plat_private.h> +#include <pmu.h> +#include <pmu_com.h> +#include <pwm.h> +#include <rk3399_def.h> +#include <secure.h> +#include <soc.h> +#include <suspend.h> + +DEFINE_BAKERY_LOCK(rockchip_pd_lock); + +static uint32_t cpu_warm_boot_addr; +static char store_sram[SRAM_BIN_LIMIT + SRAM_TEXT_LIMIT + SRAM_DATA_LIMIT]; +static uint32_t store_cru[CRU_SDIO0_CON1 / 4 + 1]; +static uint32_t store_usbphy0[7]; +static uint32_t store_usbphy1[7]; +static uint32_t store_grf_io_vsel; +static uint32_t store_grf_soc_con0; +static uint32_t store_grf_soc_con1; +static uint32_t store_grf_soc_con2; +static uint32_t store_grf_soc_con3; +static uint32_t store_grf_soc_con4; +static uint32_t store_grf_soc_con7; +static uint32_t store_grf_ddrc_con[4]; +static uint32_t store_wdt0[2]; +static uint32_t store_wdt1[2]; +static gicv3_dist_ctx_t dist_ctx; +static gicv3_redist_ctx_t rdist_ctx; + +/* + * There are two ways to powering on or off on core. + * 1) Control it power domain into on or off in PMU_PWRDN_CON reg, + * it is core_pwr_pd mode + * 2) Enable the core power manage in PMU_CORE_PM_CON reg, + * then, if the core enter into wfi, it power domain will be + * powered off automatically. it is core_pwr_wfi or core_pwr_wfi_int mode + * so we need core_pm_cfg_info to distinguish which method be used now. + */ + +static uint32_t core_pm_cfg_info[PLATFORM_CORE_COUNT] +#if USE_COHERENT_MEM +__attribute__ ((section("tzfw_coherent_mem"))) +#endif +;/* coheront */ + +static void pmu_bus_idle_req(uint32_t bus, uint32_t state) +{ + uint32_t bus_id = BIT(bus); + uint32_t bus_req; + uint32_t wait_cnt = 0; + uint32_t bus_state, bus_ack; + + if (state) + bus_req = BIT(bus); + else + bus_req = 0; + + mmio_clrsetbits_32(PMU_BASE + PMU_BUS_IDLE_REQ, bus_id, bus_req); + + do { + bus_state = mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST) & bus_id; + bus_ack = mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ACK) & bus_id; + if (bus_state == bus_req && bus_ack == bus_req) + break; + + wait_cnt++; + udelay(1); + } while (wait_cnt < MAX_WAIT_COUNT); + + if (bus_state != bus_req || bus_ack != bus_req) { + INFO("%s:st=%x(%x)\n", __func__, + mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST), + bus_state); + INFO("%s:st=%x(%x)\n", __func__, + mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ACK), + bus_ack); + } +} + +struct pmu_slpdata_s pmu_slpdata; + +static void qos_restore(void) +{ + if (pmu_power_domain_st(PD_GPU) == pmu_pd_on) + RESTORE_QOS(pmu_slpdata.gpu_qos, GPU); + if (pmu_power_domain_st(PD_ISP0) == pmu_pd_on) { + RESTORE_QOS(pmu_slpdata.isp0_m0_qos, ISP0_M0); + RESTORE_QOS(pmu_slpdata.isp0_m1_qos, ISP0_M1); + } + if (pmu_power_domain_st(PD_ISP1) == pmu_pd_on) { + RESTORE_QOS(pmu_slpdata.isp1_m0_qos, ISP1_M0); + RESTORE_QOS(pmu_slpdata.isp1_m1_qos, ISP1_M1); + } + if (pmu_power_domain_st(PD_VO) == pmu_pd_on) { + RESTORE_QOS(pmu_slpdata.vop_big_r, VOP_BIG_R); + RESTORE_QOS(pmu_slpdata.vop_big_w, VOP_BIG_W); + RESTORE_QOS(pmu_slpdata.vop_little, VOP_LITTLE); + } + if (pmu_power_domain_st(PD_HDCP) == pmu_pd_on) + RESTORE_QOS(pmu_slpdata.hdcp_qos, HDCP); + if (pmu_power_domain_st(PD_GMAC) == pmu_pd_on) + RESTORE_QOS(pmu_slpdata.gmac_qos, GMAC); + if (pmu_power_domain_st(PD_CCI) == pmu_pd_on) { + RESTORE_QOS(pmu_slpdata.cci_m0_qos, CCI_M0); + RESTORE_QOS(pmu_slpdata.cci_m1_qos, CCI_M1); + } + if (pmu_power_domain_st(PD_SD) == pmu_pd_on) + RESTORE_QOS(pmu_slpdata.sdmmc_qos, SDMMC); + if (pmu_power_domain_st(PD_EMMC) == pmu_pd_on) + RESTORE_QOS(pmu_slpdata.emmc_qos, EMMC); + if (pmu_power_domain_st(PD_SDIOAUDIO) == pmu_pd_on) + RESTORE_QOS(pmu_slpdata.sdio_qos, SDIO); + if (pmu_power_domain_st(PD_GIC) == pmu_pd_on) + RESTORE_QOS(pmu_slpdata.gic_qos, GIC); + if (pmu_power_domain_st(PD_RGA) == pmu_pd_on) { + RESTORE_QOS(pmu_slpdata.rga_r_qos, RGA_R); + RESTORE_QOS(pmu_slpdata.rga_w_qos, RGA_W); + } + if (pmu_power_domain_st(PD_IEP) == pmu_pd_on) + RESTORE_QOS(pmu_slpdata.iep_qos, IEP); + if (pmu_power_domain_st(PD_USB3) == pmu_pd_on) { + RESTORE_QOS(pmu_slpdata.usb_otg0_qos, USB_OTG0); + RESTORE_QOS(pmu_slpdata.usb_otg1_qos, USB_OTG1); + } + if (pmu_power_domain_st(PD_PERIHP) == pmu_pd_on) { + RESTORE_QOS(pmu_slpdata.usb_host0_qos, USB_HOST0); + RESTORE_QOS(pmu_slpdata.usb_host1_qos, USB_HOST1); + RESTORE_QOS(pmu_slpdata.perihp_nsp_qos, PERIHP_NSP); + } + if (pmu_power_domain_st(PD_PERILP) == pmu_pd_on) { + RESTORE_QOS(pmu_slpdata.dmac0_qos, DMAC0); + RESTORE_QOS(pmu_slpdata.dmac1_qos, DMAC1); + RESTORE_QOS(pmu_slpdata.dcf_qos, DCF); + RESTORE_QOS(pmu_slpdata.crypto0_qos, CRYPTO0); + RESTORE_QOS(pmu_slpdata.crypto1_qos, CRYPTO1); + RESTORE_QOS(pmu_slpdata.perilp_nsp_qos, PERILP_NSP); + RESTORE_QOS(pmu_slpdata.perilpslv_nsp_qos, PERILPSLV_NSP); + RESTORE_QOS(pmu_slpdata.peri_cm1_qos, PERI_CM1); + } + if (pmu_power_domain_st(PD_VDU) == pmu_pd_on) + RESTORE_QOS(pmu_slpdata.video_m0_qos, VIDEO_M0); + if (pmu_power_domain_st(PD_VCODEC) == pmu_pd_on) { + RESTORE_QOS(pmu_slpdata.video_m1_r_qos, VIDEO_M1_R); + RESTORE_QOS(pmu_slpdata.video_m1_w_qos, VIDEO_M1_W); + } +} + +static void qos_save(void) +{ + if (pmu_power_domain_st(PD_GPU) == pmu_pd_on) + SAVE_QOS(pmu_slpdata.gpu_qos, GPU); + if (pmu_power_domain_st(PD_ISP0) == pmu_pd_on) { + SAVE_QOS(pmu_slpdata.isp0_m0_qos, ISP0_M0); + SAVE_QOS(pmu_slpdata.isp0_m1_qos, ISP0_M1); + } + if (pmu_power_domain_st(PD_ISP1) == pmu_pd_on) { + SAVE_QOS(pmu_slpdata.isp1_m0_qos, ISP1_M0); + SAVE_QOS(pmu_slpdata.isp1_m1_qos, ISP1_M1); + } + if (pmu_power_domain_st(PD_VO) == pmu_pd_on) { + SAVE_QOS(pmu_slpdata.vop_big_r, VOP_BIG_R); + SAVE_QOS(pmu_slpdata.vop_big_w, VOP_BIG_W); + SAVE_QOS(pmu_slpdata.vop_little, VOP_LITTLE); + } + if (pmu_power_domain_st(PD_HDCP) == pmu_pd_on) + SAVE_QOS(pmu_slpdata.hdcp_qos, HDCP); + if (pmu_power_domain_st(PD_GMAC) == pmu_pd_on) + SAVE_QOS(pmu_slpdata.gmac_qos, GMAC); + if (pmu_power_domain_st(PD_CCI) == pmu_pd_on) { + SAVE_QOS(pmu_slpdata.cci_m0_qos, CCI_M0); + SAVE_QOS(pmu_slpdata.cci_m1_qos, CCI_M1); + } + if (pmu_power_domain_st(PD_SD) == pmu_pd_on) + SAVE_QOS(pmu_slpdata.sdmmc_qos, SDMMC); + if (pmu_power_domain_st(PD_EMMC) == pmu_pd_on) + SAVE_QOS(pmu_slpdata.emmc_qos, EMMC); + if (pmu_power_domain_st(PD_SDIOAUDIO) == pmu_pd_on) + SAVE_QOS(pmu_slpdata.sdio_qos, SDIO); + if (pmu_power_domain_st(PD_GIC) == pmu_pd_on) + SAVE_QOS(pmu_slpdata.gic_qos, GIC); + if (pmu_power_domain_st(PD_RGA) == pmu_pd_on) { + SAVE_QOS(pmu_slpdata.rga_r_qos, RGA_R); + SAVE_QOS(pmu_slpdata.rga_w_qos, RGA_W); + } + if (pmu_power_domain_st(PD_IEP) == pmu_pd_on) + SAVE_QOS(pmu_slpdata.iep_qos, IEP); + if (pmu_power_domain_st(PD_USB3) == pmu_pd_on) { + SAVE_QOS(pmu_slpdata.usb_otg0_qos, USB_OTG0); + SAVE_QOS(pmu_slpdata.usb_otg1_qos, USB_OTG1); + } + if (pmu_power_domain_st(PD_PERIHP) == pmu_pd_on) { + SAVE_QOS(pmu_slpdata.usb_host0_qos, USB_HOST0); + SAVE_QOS(pmu_slpdata.usb_host1_qos, USB_HOST1); + SAVE_QOS(pmu_slpdata.perihp_nsp_qos, PERIHP_NSP); + } + if (pmu_power_domain_st(PD_PERILP) == pmu_pd_on) { + SAVE_QOS(pmu_slpdata.dmac0_qos, DMAC0); + SAVE_QOS(pmu_slpdata.dmac1_qos, DMAC1); + SAVE_QOS(pmu_slpdata.dcf_qos, DCF); + SAVE_QOS(pmu_slpdata.crypto0_qos, CRYPTO0); + SAVE_QOS(pmu_slpdata.crypto1_qos, CRYPTO1); + SAVE_QOS(pmu_slpdata.perilp_nsp_qos, PERILP_NSP); + SAVE_QOS(pmu_slpdata.perilpslv_nsp_qos, PERILPSLV_NSP); + SAVE_QOS(pmu_slpdata.peri_cm1_qos, PERI_CM1); + } + if (pmu_power_domain_st(PD_VDU) == pmu_pd_on) + SAVE_QOS(pmu_slpdata.video_m0_qos, VIDEO_M0); + if (pmu_power_domain_st(PD_VCODEC) == pmu_pd_on) { + SAVE_QOS(pmu_slpdata.video_m1_r_qos, VIDEO_M1_R); + SAVE_QOS(pmu_slpdata.video_m1_w_qos, VIDEO_M1_W); + } +} + +static int pmu_set_power_domain(uint32_t pd_id, uint32_t pd_state) +{ + uint32_t state; + + if (pmu_power_domain_st(pd_id) == pd_state) + goto out; + + if (pd_state == pmu_pd_on) + pmu_power_domain_ctr(pd_id, pd_state); + + state = (pd_state == pmu_pd_off) ? BUS_IDLE : BUS_ACTIVE; + + switch (pd_id) { + case PD_GPU: + pmu_bus_idle_req(BUS_ID_GPU, state); + break; + case PD_VIO: + pmu_bus_idle_req(BUS_ID_VIO, state); + break; + case PD_ISP0: + pmu_bus_idle_req(BUS_ID_ISP0, state); + break; + case PD_ISP1: + pmu_bus_idle_req(BUS_ID_ISP1, state); + break; + case PD_VO: + pmu_bus_idle_req(BUS_ID_VOPB, state); + pmu_bus_idle_req(BUS_ID_VOPL, state); + break; + case PD_HDCP: + pmu_bus_idle_req(BUS_ID_HDCP, state); + break; + case PD_TCPD0: + break; + case PD_TCPD1: + break; + case PD_GMAC: + pmu_bus_idle_req(BUS_ID_GMAC, state); + break; + case PD_CCI: + pmu_bus_idle_req(BUS_ID_CCIM0, state); + pmu_bus_idle_req(BUS_ID_CCIM1, state); + break; + case PD_SD: + pmu_bus_idle_req(BUS_ID_SD, state); + break; + case PD_EMMC: + pmu_bus_idle_req(BUS_ID_EMMC, state); + break; + case PD_EDP: + pmu_bus_idle_req(BUS_ID_EDP, state); + break; + case PD_SDIOAUDIO: + pmu_bus_idle_req(BUS_ID_SDIOAUDIO, state); + break; + case PD_GIC: + pmu_bus_idle_req(BUS_ID_GIC, state); + break; + case PD_RGA: + pmu_bus_idle_req(BUS_ID_RGA, state); + break; + case PD_VCODEC: + pmu_bus_idle_req(BUS_ID_VCODEC, state); + break; + case PD_VDU: + pmu_bus_idle_req(BUS_ID_VDU, state); + break; + case PD_IEP: + pmu_bus_idle_req(BUS_ID_IEP, state); + break; + case PD_USB3: + pmu_bus_idle_req(BUS_ID_USB3, state); + break; + case PD_PERIHP: + pmu_bus_idle_req(BUS_ID_PERIHP, state); + break; + default: + /* Do nothing in default case */ + break; + } + + if (pd_state == pmu_pd_off) + pmu_power_domain_ctr(pd_id, pd_state); + +out: + return 0; +} + +static uint32_t pmu_powerdomain_state; + +static void pmu_power_domains_suspend(void) +{ + clk_gate_con_save(); + clk_gate_con_disable(); + qos_save(); + pmu_powerdomain_state = mmio_read_32(PMU_BASE + PMU_PWRDN_ST); + pmu_set_power_domain(PD_GPU, pmu_pd_off); + pmu_set_power_domain(PD_TCPD0, pmu_pd_off); + pmu_set_power_domain(PD_TCPD1, pmu_pd_off); + pmu_set_power_domain(PD_VO, pmu_pd_off); + pmu_set_power_domain(PD_ISP0, pmu_pd_off); + pmu_set_power_domain(PD_ISP1, pmu_pd_off); + pmu_set_power_domain(PD_HDCP, pmu_pd_off); + pmu_set_power_domain(PD_SDIOAUDIO, pmu_pd_off); + pmu_set_power_domain(PD_GMAC, pmu_pd_off); + pmu_set_power_domain(PD_EDP, pmu_pd_off); + pmu_set_power_domain(PD_IEP, pmu_pd_off); + pmu_set_power_domain(PD_RGA, pmu_pd_off); + pmu_set_power_domain(PD_VCODEC, pmu_pd_off); + pmu_set_power_domain(PD_VDU, pmu_pd_off); + pmu_set_power_domain(PD_USB3, pmu_pd_off); + pmu_set_power_domain(PD_EMMC, pmu_pd_off); + pmu_set_power_domain(PD_VIO, pmu_pd_off); + pmu_set_power_domain(PD_SD, pmu_pd_off); + pmu_set_power_domain(PD_PERIHP, pmu_pd_off); + clk_gate_con_restore(); +} + +static void pmu_power_domains_resume(void) +{ + clk_gate_con_save(); + clk_gate_con_disable(); + if (!(pmu_powerdomain_state & BIT(PD_VDU))) + pmu_set_power_domain(PD_VDU, pmu_pd_on); + if (!(pmu_powerdomain_state & BIT(PD_VCODEC))) + pmu_set_power_domain(PD_VCODEC, pmu_pd_on); + if (!(pmu_powerdomain_state & BIT(PD_RGA))) + pmu_set_power_domain(PD_RGA, pmu_pd_on); + if (!(pmu_powerdomain_state & BIT(PD_IEP))) + pmu_set_power_domain(PD_IEP, pmu_pd_on); + if (!(pmu_powerdomain_state & BIT(PD_EDP))) + pmu_set_power_domain(PD_EDP, pmu_pd_on); + if (!(pmu_powerdomain_state & BIT(PD_GMAC))) + pmu_set_power_domain(PD_GMAC, pmu_pd_on); + if (!(pmu_powerdomain_state & BIT(PD_SDIOAUDIO))) + pmu_set_power_domain(PD_SDIOAUDIO, pmu_pd_on); + if (!(pmu_powerdomain_state & BIT(PD_HDCP))) + pmu_set_power_domain(PD_HDCP, pmu_pd_on); + if (!(pmu_powerdomain_state & BIT(PD_ISP1))) + pmu_set_power_domain(PD_ISP1, pmu_pd_on); + if (!(pmu_powerdomain_state & BIT(PD_ISP0))) + pmu_set_power_domain(PD_ISP0, pmu_pd_on); + if (!(pmu_powerdomain_state & BIT(PD_VO))) + pmu_set_power_domain(PD_VO, pmu_pd_on); + if (!(pmu_powerdomain_state & BIT(PD_TCPD1))) + pmu_set_power_domain(PD_TCPD1, pmu_pd_on); + if (!(pmu_powerdomain_state & BIT(PD_TCPD0))) + pmu_set_power_domain(PD_TCPD0, pmu_pd_on); + if (!(pmu_powerdomain_state & BIT(PD_GPU))) + pmu_set_power_domain(PD_GPU, pmu_pd_on); + if (!(pmu_powerdomain_state & BIT(PD_USB3))) + pmu_set_power_domain(PD_USB3, pmu_pd_on); + if (!(pmu_powerdomain_state & BIT(PD_EMMC))) + pmu_set_power_domain(PD_EMMC, pmu_pd_on); + if (!(pmu_powerdomain_state & BIT(PD_VIO))) + pmu_set_power_domain(PD_VIO, pmu_pd_on); + if (!(pmu_powerdomain_state & BIT(PD_SD))) + pmu_set_power_domain(PD_SD, pmu_pd_on); + if (!(pmu_powerdomain_state & BIT(PD_PERIHP))) + pmu_set_power_domain(PD_PERIHP, pmu_pd_on); + qos_restore(); + clk_gate_con_restore(); +} + +void pmu_power_domains_on(void) +{ + clk_gate_con_disable(); + pmu_set_power_domain(PD_VDU, pmu_pd_on); + pmu_set_power_domain(PD_VCODEC, pmu_pd_on); + pmu_set_power_domain(PD_RGA, pmu_pd_on); + pmu_set_power_domain(PD_IEP, pmu_pd_on); + pmu_set_power_domain(PD_EDP, pmu_pd_on); + pmu_set_power_domain(PD_GMAC, pmu_pd_on); + pmu_set_power_domain(PD_SDIOAUDIO, pmu_pd_on); + pmu_set_power_domain(PD_HDCP, pmu_pd_on); + pmu_set_power_domain(PD_ISP1, pmu_pd_on); + pmu_set_power_domain(PD_ISP0, pmu_pd_on); + pmu_set_power_domain(PD_VO, pmu_pd_on); + pmu_set_power_domain(PD_TCPD1, pmu_pd_on); + pmu_set_power_domain(PD_TCPD0, pmu_pd_on); + pmu_set_power_domain(PD_GPU, pmu_pd_on); +} + +void rk3399_flush_l2_b(void) +{ + uint32_t wait_cnt = 0; + + mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(L2_FLUSH_REQ_CLUSTER_B)); + dsb(); + + /* + * The Big cluster flush L2 cache took ~4ms by default, give 10ms for + * the enough margin. + */ + while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST) & + BIT(L2_FLUSHDONE_CLUSTER_B))) { + wait_cnt++; + udelay(10); + if (wait_cnt == 10000 / 10) + WARN("L2 cache flush on suspend took longer than 10ms\n"); + } + + mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(L2_FLUSH_REQ_CLUSTER_B)); +} + +static void pmu_scu_b_pwrdn(void) +{ + uint32_t wait_cnt = 0; + + if ((mmio_read_32(PMU_BASE + PMU_PWRDN_ST) & + (BIT(PMU_A72_B0_PWRDWN_ST) | BIT(PMU_A72_B1_PWRDWN_ST))) != + (BIT(PMU_A72_B0_PWRDWN_ST) | BIT(PMU_A72_B1_PWRDWN_ST))) { + ERROR("%s: not all cpus is off\n", __func__); + return; + } + + rk3399_flush_l2_b(); + + mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(ACINACTM_CLUSTER_B_CFG)); + + while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST) & + BIT(STANDBY_BY_WFIL2_CLUSTER_B))) { + wait_cnt++; + udelay(1); + if (wait_cnt >= MAX_WAIT_COUNT) + ERROR("%s:wait cluster-b l2(%x)\n", __func__, + mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST)); + } +} + +static void pmu_scu_b_pwrup(void) +{ + mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(ACINACTM_CLUSTER_B_CFG)); +} + +static inline uint32_t get_cpus_pwr_domain_cfg_info(uint32_t cpu_id) +{ + assert(cpu_id < PLATFORM_CORE_COUNT); + return core_pm_cfg_info[cpu_id]; +} + +static inline void set_cpus_pwr_domain_cfg_info(uint32_t cpu_id, uint32_t value) +{ + assert(cpu_id < PLATFORM_CORE_COUNT); + core_pm_cfg_info[cpu_id] = value; +#if !USE_COHERENT_MEM + flush_dcache_range((uintptr_t)&core_pm_cfg_info[cpu_id], + sizeof(uint32_t)); +#endif +} + +static int cpus_power_domain_on(uint32_t cpu_id) +{ + uint32_t cfg_info; + uint32_t cpu_pd = PD_CPUL0 + cpu_id; + /* + * There are two ways to powering on or off on core. + * 1) Control it power domain into on or off in PMU_PWRDN_CON reg + * 2) Enable the core power manage in PMU_CORE_PM_CON reg, + * then, if the core enter into wfi, it power domain will be + * powered off automatically. + */ + + cfg_info = get_cpus_pwr_domain_cfg_info(cpu_id); + + if (cfg_info == core_pwr_pd) { + /* disable core_pm cfg */ + mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), + CORES_PM_DISABLE); + /* if the cores have be on, power off it firstly */ + if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) { + mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 0); + pmu_power_domain_ctr(cpu_pd, pmu_pd_off); + } + + pmu_power_domain_ctr(cpu_pd, pmu_pd_on); + } else { + if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) { + WARN("%s: cpu%d is not in off,!\n", __func__, cpu_id); + return -EINVAL; + } + + mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), + BIT(core_pm_sft_wakeup_en)); + dsb(); + } + + return 0; +} + +static int cpus_power_domain_off(uint32_t cpu_id, uint32_t pd_cfg) +{ + uint32_t cpu_pd; + uint32_t core_pm_value; + + cpu_pd = PD_CPUL0 + cpu_id; + if (pmu_power_domain_st(cpu_pd) == pmu_pd_off) + return 0; + + if (pd_cfg == core_pwr_pd) { + if (check_cpu_wfie(cpu_id, CKECK_WFEI_MSK)) + return -EINVAL; + + /* disable core_pm cfg */ + mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), + CORES_PM_DISABLE); + + set_cpus_pwr_domain_cfg_info(cpu_id, pd_cfg); + pmu_power_domain_ctr(cpu_pd, pmu_pd_off); + } else { + set_cpus_pwr_domain_cfg_info(cpu_id, pd_cfg); + + core_pm_value = BIT(core_pm_en); + if (pd_cfg == core_pwr_wfi_int) + core_pm_value |= BIT(core_pm_int_wakeup_en); + mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), + core_pm_value); + dsb(); + } + + return 0; +} + +static inline void clst_pwr_domain_suspend(plat_local_state_t lvl_state) +{ + uint32_t cpu_id = plat_my_core_pos(); + uint32_t pll_id, clst_st_msk, clst_st_chk_msk, pmu_st; + + assert(cpu_id < PLATFORM_CORE_COUNT); + + if (lvl_state == PLAT_MAX_OFF_STATE) { + if (cpu_id < PLATFORM_CLUSTER0_CORE_COUNT) { + pll_id = ALPLL_ID; + clst_st_msk = CLST_L_CPUS_MSK; + } else { + pll_id = ABPLL_ID; + clst_st_msk = CLST_B_CPUS_MSK << + PLATFORM_CLUSTER0_CORE_COUNT; + } + + clst_st_chk_msk = clst_st_msk & ~(BIT(cpu_id)); + + pmu_st = mmio_read_32(PMU_BASE + PMU_PWRDN_ST); + + pmu_st &= clst_st_msk; + + if (pmu_st == clst_st_chk_msk) { + mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 3), + PLL_SLOW_MODE); + + clst_warmboot_data[pll_id] = PMU_CLST_RET; + + pmu_st = mmio_read_32(PMU_BASE + PMU_PWRDN_ST); + pmu_st &= clst_st_msk; + if (pmu_st == clst_st_chk_msk) + return; + /* + * it is mean that others cpu is up again, + * we must resume the cfg at once. + */ + mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 3), + PLL_NOMAL_MODE); + clst_warmboot_data[pll_id] = 0; + } + } +} + +static int clst_pwr_domain_resume(plat_local_state_t lvl_state) +{ + uint32_t cpu_id = plat_my_core_pos(); + uint32_t pll_id, pll_st; + + assert(cpu_id < PLATFORM_CORE_COUNT); + + if (lvl_state == PLAT_MAX_OFF_STATE) { + if (cpu_id < PLATFORM_CLUSTER0_CORE_COUNT) + pll_id = ALPLL_ID; + else + pll_id = ABPLL_ID; + + pll_st = mmio_read_32(CRU_BASE + CRU_PLL_CON(pll_id, 3)) >> + PLL_MODE_SHIFT; + + if (pll_st != NORMAL_MODE) { + WARN("%s: clst (%d) is in error mode (%d)\n", + __func__, pll_id, pll_st); + return -1; + } + } + + return 0; +} + +static void nonboot_cpus_off(void) +{ + uint32_t boot_cpu, cpu; + + boot_cpu = plat_my_core_pos(); + + /* turn off noboot cpus */ + for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) { + if (cpu == boot_cpu) + continue; + cpus_power_domain_off(cpu, core_pwr_pd); + } +} + +int rockchip_soc_cores_pwr_dm_on(unsigned long mpidr, uint64_t entrypoint) +{ + uint32_t cpu_id = plat_core_pos_by_mpidr(mpidr); + + assert(cpu_id < PLATFORM_CORE_COUNT); + assert(cpuson_flags[cpu_id] == 0); + cpuson_flags[cpu_id] = PMU_CPU_HOTPLUG; + cpuson_entry_point[cpu_id] = entrypoint; + dsb(); + + cpus_power_domain_on(cpu_id); + + return PSCI_E_SUCCESS; +} + +int rockchip_soc_cores_pwr_dm_off(void) +{ + uint32_t cpu_id = plat_my_core_pos(); + + cpus_power_domain_off(cpu_id, core_pwr_wfi); + + return PSCI_E_SUCCESS; +} + +int rockchip_soc_hlvl_pwr_dm_off(uint32_t lvl, + plat_local_state_t lvl_state) +{ + if (lvl == MPIDR_AFFLVL1) { + clst_pwr_domain_suspend(lvl_state); + } + + return PSCI_E_SUCCESS; +} + +int rockchip_soc_cores_pwr_dm_suspend(void) +{ + uint32_t cpu_id = plat_my_core_pos(); + + assert(cpu_id < PLATFORM_CORE_COUNT); + assert(cpuson_flags[cpu_id] == 0); + cpuson_flags[cpu_id] = PMU_CPU_AUTO_PWRDN; + cpuson_entry_point[cpu_id] = plat_get_sec_entrypoint(); + dsb(); + + cpus_power_domain_off(cpu_id, core_pwr_wfi_int); + + return PSCI_E_SUCCESS; +} + +int rockchip_soc_hlvl_pwr_dm_suspend(uint32_t lvl, plat_local_state_t lvl_state) +{ + if (lvl == MPIDR_AFFLVL1) { + clst_pwr_domain_suspend(lvl_state); + } + + return PSCI_E_SUCCESS; +} + +int rockchip_soc_cores_pwr_dm_on_finish(void) +{ + uint32_t cpu_id = plat_my_core_pos(); + + mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), + CORES_PM_DISABLE); + return PSCI_E_SUCCESS; +} + +int rockchip_soc_hlvl_pwr_dm_on_finish(uint32_t lvl, + plat_local_state_t lvl_state) +{ + if (lvl == MPIDR_AFFLVL1) { + clst_pwr_domain_resume(lvl_state); + } + + return PSCI_E_SUCCESS; +} + +int rockchip_soc_cores_pwr_dm_resume(void) +{ + uint32_t cpu_id = plat_my_core_pos(); + + /* Disable core_pm */ + mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), CORES_PM_DISABLE); + + return PSCI_E_SUCCESS; +} + +int rockchip_soc_hlvl_pwr_dm_resume(uint32_t lvl, plat_local_state_t lvl_state) +{ + if (lvl == MPIDR_AFFLVL1) { + clst_pwr_domain_resume(lvl_state); + } + + return PSCI_E_SUCCESS; +} + +/** + * init_pmu_counts - Init timing counts in the PMU register area + * + * At various points when we power up or down parts of the system we need + * a delay to wait for power / clocks to become stable. The PMU has counters + * to help software do the delay properly. Basically, it works like this: + * - Software sets up counter values + * - When software turns on something in the PMU, the counter kicks off + * - The hardware sets a bit automatically when the counter has finished and + * software knows that the initialization is done. + * + * It's software's job to setup these counters. The hardware power on default + * for these settings is conservative, setting everything to 0x5dc0 + * (750 ms in 32 kHz counts or 1 ms in 24 MHz counts). + * + * Note that some of these counters are only really used at suspend/resume + * time (for instance, that's the only time we turn off/on the oscillator) and + * others are used during normal runtime (like turning on/off a CPU or GPU) but + * it doesn't hurt to init everything at boot. + * + * Also note that these counters can run off the 32 kHz clock or the 24 MHz + * clock. While the 24 MHz clock can give us more precision, it's not always + * available (like when we turn the oscillator off at sleep time). The + * pmu_use_lf (lf: low freq) is available in power mode. Current understanding + * is that counts work like this: + * IF (pmu_use_lf == 0) || (power_mode_en == 0) + * use the 24M OSC for counts + * ELSE + * use the 32K OSC for counts + * + * Notes: + * - There is a separate bit for the PMU called PMU_24M_EN_CFG. At the moment + * we always keep that 0. This apparently choose between using the PLL as + * the source for the PMU vs. the 24M clock. If we ever set it to 1 we + * should consider how it affects these counts (if at all). + * - The power_mode_en is documented to auto-clear automatically when we leave + * "power mode". That's why most clocks are on 24M. Only timings used when + * in "power mode" are 32k. + * - In some cases the kernel may override these counts. + * + * The PMU_STABLE_CNT / PMU_OSC_CNT / PMU_PLLLOCK_CNT are important CNTs + * in power mode, we need to ensure that they are available. + */ +static void init_pmu_counts(void) +{ + /* COUNTS FOR INSIDE POWER MODE */ + + /* + * From limited testing, need PMU stable >= 2ms, but go overkill + * and choose 30 ms to match testing on past SoCs. Also let + * OSC have 30 ms for stabilization. + */ + mmio_write_32(PMU_BASE + PMU_STABLE_CNT, CYCL_32K_CNT_MS(30)); + mmio_write_32(PMU_BASE + PMU_OSC_CNT, CYCL_32K_CNT_MS(30)); + + /* Unclear what these should be; try 3 ms */ + mmio_write_32(PMU_BASE + PMU_WAKEUP_RST_CLR_CNT, CYCL_32K_CNT_MS(3)); + + /* Unclear what this should be, but set the default explicitly */ + mmio_write_32(PMU_BASE + PMU_TIMEOUT_CNT, 0x5dc0); + + /* COUNTS FOR OUTSIDE POWER MODE */ + + /* Put something sorta conservative here until we know better */ + mmio_write_32(PMU_BASE + PMU_PLLLOCK_CNT, CYCL_24M_CNT_MS(3)); + mmio_write_32(PMU_BASE + PMU_DDRIO_PWRON_CNT, CYCL_24M_CNT_MS(1)); + mmio_write_32(PMU_BASE + PMU_CENTER_PWRDN_CNT, CYCL_24M_CNT_MS(1)); + mmio_write_32(PMU_BASE + PMU_CENTER_PWRUP_CNT, CYCL_24M_CNT_MS(1)); + + /* + * when we enable PMU_CLR_PERILP, it will shut down the SRAM, but + * M0 code run in SRAM, and we need it to check whether cpu enter + * FSM status, so we must wait M0 finish their code and enter WFI, + * then we can shutdown SRAM, according FSM order: + * ST_NORMAL->..->ST_SCU_L_PWRDN->..->ST_CENTER_PWRDN->ST_PERILP_PWRDN + * we can add delay when shutdown ST_SCU_L_PWRDN to guarantee M0 get + * the FSM status and enter WFI, then enable PMU_CLR_PERILP. + */ + mmio_write_32(PMU_BASE + PMU_SCU_L_PWRDN_CNT, CYCL_24M_CNT_MS(5)); + mmio_write_32(PMU_BASE + PMU_SCU_L_PWRUP_CNT, CYCL_24M_CNT_US(1)); + + /* + * Set CPU/GPU to 1 us. + * + * NOTE: Even though ATF doesn't configure the GPU we'll still setup + * counts here. After all ATF controls all these other bits and also + * chooses which clock these counters use. + */ + mmio_write_32(PMU_BASE + PMU_SCU_B_PWRDN_CNT, CYCL_24M_CNT_US(1)); + mmio_write_32(PMU_BASE + PMU_SCU_B_PWRUP_CNT, CYCL_24M_CNT_US(1)); + mmio_write_32(PMU_BASE + PMU_GPU_PWRDN_CNT, CYCL_24M_CNT_US(1)); + mmio_write_32(PMU_BASE + PMU_GPU_PWRUP_CNT, CYCL_24M_CNT_US(1)); +} + +static uint32_t clk_ddrc_save; + +static void sys_slp_config(void) +{ + uint32_t slp_mode_cfg = 0; + + /* keep enabling clk_ddrc_bpll_src_en gate for DDRC */ + clk_ddrc_save = mmio_read_32(CRU_BASE + CRU_CLKGATE_CON(3)); + mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(3), WMSK_BIT(1)); + + prepare_abpll_for_ddrctrl(); + sram_func_set_ddrctl_pll(ABPLL_ID); + + mmio_write_32(GRF_BASE + GRF_SOC_CON4, CCI_FORCE_WAKEUP); + mmio_write_32(PMU_BASE + PMU_CCI500_CON, + BIT_WITH_WMSK(PMU_CLR_PREQ_CCI500_HW) | + BIT_WITH_WMSK(PMU_CLR_QREQ_CCI500_HW) | + BIT_WITH_WMSK(PMU_QGATING_CCI500_CFG)); + + mmio_write_32(PMU_BASE + PMU_ADB400_CON, + BIT_WITH_WMSK(PMU_CLR_CORE_L_HW) | + BIT_WITH_WMSK(PMU_CLR_CORE_L_2GIC_HW) | + BIT_WITH_WMSK(PMU_CLR_GIC2_CORE_L_HW)); + + slp_mode_cfg = BIT(PMU_PWR_MODE_EN) | + BIT(PMU_WKUP_RST_EN) | + BIT(PMU_INPUT_CLAMP_EN) | + BIT(PMU_POWER_OFF_REQ_CFG) | + BIT(PMU_CPU0_PD_EN) | + BIT(PMU_L2_FLUSH_EN) | + BIT(PMU_L2_IDLE_EN) | + BIT(PMU_SCU_PD_EN) | + BIT(PMU_CCI_PD_EN) | + BIT(PMU_CLK_CORE_SRC_GATE_EN) | + BIT(PMU_ALIVE_USE_LF) | + BIT(PMU_SREF0_ENTER_EN) | + BIT(PMU_SREF1_ENTER_EN) | + BIT(PMU_DDRC0_GATING_EN) | + BIT(PMU_DDRC1_GATING_EN) | + BIT(PMU_DDRIO0_RET_EN) | + BIT(PMU_DDRIO0_RET_DE_REQ) | + BIT(PMU_DDRIO1_RET_EN) | + BIT(PMU_DDRIO1_RET_DE_REQ) | + BIT(PMU_CENTER_PD_EN) | + BIT(PMU_PERILP_PD_EN) | + BIT(PMU_CLK_PERILP_SRC_GATE_EN) | + BIT(PMU_PLL_PD_EN) | + BIT(PMU_CLK_CENTER_SRC_GATE_EN) | + BIT(PMU_OSC_DIS) | + BIT(PMU_PMU_USE_LF); + + mmio_setbits_32(PMU_BASE + PMU_WKUP_CFG4, BIT(PMU_GPIO_WKUP_EN)); + mmio_write_32(PMU_BASE + PMU_PWRMODE_CON, slp_mode_cfg); + + mmio_write_32(PMU_BASE + PMU_PLL_CON, PLL_PD_HW); + mmio_write_32(PMUGRF_BASE + PMUGRF_SOC_CON0, EXTERNAL_32K); + mmio_write_32(PMUGRF_BASE, IOMUX_CLK_32K); /* 32k iomux */ +} + +static void set_hw_idle(uint32_t hw_idle) +{ + mmio_setbits_32(PMU_BASE + PMU_BUS_CLR, hw_idle); +} + +static void clr_hw_idle(uint32_t hw_idle) +{ + mmio_clrbits_32(PMU_BASE + PMU_BUS_CLR, hw_idle); +} + +static uint32_t iomux_status[12]; +static uint32_t pull_mode_status[12]; +static uint32_t gpio_direction[3]; +static uint32_t gpio_2_4_clk_gate; + +static void suspend_apio(void) +{ + struct bl_aux_rk_apio_info *suspend_apio; + int i; + + suspend_apio = plat_get_rockchip_suspend_apio(); + + if (!suspend_apio) + return; + + /* save gpio2 ~ gpio4 iomux and pull mode */ + for (i = 0; i < 12; i++) { + iomux_status[i] = mmio_read_32(GRF_BASE + + GRF_GPIO2A_IOMUX + i * 4); + pull_mode_status[i] = mmio_read_32(GRF_BASE + + GRF_GPIO2A_P + i * 4); + } + + /* store gpio2 ~ gpio4 clock gate state */ + gpio_2_4_clk_gate = (mmio_read_32(CRU_BASE + CRU_CLKGATE_CON(31)) >> + PCLK_GPIO2_GATE_SHIFT) & 0x07; + + /* enable gpio2 ~ gpio4 clock gate */ + mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(31), + BITS_WITH_WMASK(0, 0x07, PCLK_GPIO2_GATE_SHIFT)); + + /* save gpio2 ~ gpio4 direction */ + gpio_direction[0] = mmio_read_32(GPIO2_BASE + 0x04); + gpio_direction[1] = mmio_read_32(GPIO3_BASE + 0x04); + gpio_direction[2] = mmio_read_32(GPIO4_BASE + 0x04); + + /* apio1 charge gpio3a0 ~ gpio3c7 */ + if (suspend_apio->apio1) { + + /* set gpio3a0 ~ gpio3c7 iomux to gpio */ + mmio_write_32(GRF_BASE + GRF_GPIO3A_IOMUX, + REG_SOC_WMSK | GRF_IOMUX_GPIO); + mmio_write_32(GRF_BASE + GRF_GPIO3B_IOMUX, + REG_SOC_WMSK | GRF_IOMUX_GPIO); + mmio_write_32(GRF_BASE + GRF_GPIO3C_IOMUX, + REG_SOC_WMSK | GRF_IOMUX_GPIO); + + /* set gpio3a0 ~ gpio3c7 pull mode to pull none */ + mmio_write_32(GRF_BASE + GRF_GPIO3A_P, REG_SOC_WMSK | 0); + mmio_write_32(GRF_BASE + GRF_GPIO3B_P, REG_SOC_WMSK | 0); + mmio_write_32(GRF_BASE + GRF_GPIO3C_P, REG_SOC_WMSK | 0); + + /* set gpio3a0 ~ gpio3c7 to input */ + mmio_clrbits_32(GPIO3_BASE + 0x04, 0x00ffffff); + } + + /* apio2 charge gpio2a0 ~ gpio2b4 */ + if (suspend_apio->apio2) { + + /* set gpio2a0 ~ gpio2b4 iomux to gpio */ + mmio_write_32(GRF_BASE + GRF_GPIO2A_IOMUX, + REG_SOC_WMSK | GRF_IOMUX_GPIO); + mmio_write_32(GRF_BASE + GRF_GPIO2B_IOMUX, + REG_SOC_WMSK | GRF_IOMUX_GPIO); + + /* set gpio2a0 ~ gpio2b4 pull mode to pull none */ + mmio_write_32(GRF_BASE + GRF_GPIO2A_P, REG_SOC_WMSK | 0); + mmio_write_32(GRF_BASE + GRF_GPIO2B_P, REG_SOC_WMSK | 0); + + /* set gpio2a0 ~ gpio2b4 to input */ + mmio_clrbits_32(GPIO2_BASE + 0x04, 0x00001fff); + } + + /* apio3 charge gpio2c0 ~ gpio2d4*/ + if (suspend_apio->apio3) { + + /* set gpio2a0 ~ gpio2b4 iomux to gpio */ + mmio_write_32(GRF_BASE + GRF_GPIO2C_IOMUX, + REG_SOC_WMSK | GRF_IOMUX_GPIO); + mmio_write_32(GRF_BASE + GRF_GPIO2D_IOMUX, + REG_SOC_WMSK | GRF_IOMUX_GPIO); + + /* set gpio2c0 ~ gpio2d4 pull mode to pull none */ + mmio_write_32(GRF_BASE + GRF_GPIO2C_P, REG_SOC_WMSK | 0); + mmio_write_32(GRF_BASE + GRF_GPIO2D_P, REG_SOC_WMSK | 0); + + /* set gpio2c0 ~ gpio2d4 to input */ + mmio_clrbits_32(GPIO2_BASE + 0x04, 0x1fff0000); + } + + /* apio4 charge gpio4c0 ~ gpio4c7, gpio4d0 ~ gpio4d6 */ + if (suspend_apio->apio4) { + + /* set gpio4c0 ~ gpio4d6 iomux to gpio */ + mmio_write_32(GRF_BASE + GRF_GPIO4C_IOMUX, + REG_SOC_WMSK | GRF_IOMUX_GPIO); + mmio_write_32(GRF_BASE + GRF_GPIO4D_IOMUX, + REG_SOC_WMSK | GRF_IOMUX_GPIO); + + /* set gpio4c0 ~ gpio4d6 pull mode to pull none */ + mmio_write_32(GRF_BASE + GRF_GPIO4C_P, REG_SOC_WMSK | 0); + mmio_write_32(GRF_BASE + GRF_GPIO4D_P, REG_SOC_WMSK | 0); + + /* set gpio4c0 ~ gpio4d6 to input */ + mmio_clrbits_32(GPIO4_BASE + 0x04, 0x7fff0000); + } + + /* apio5 charge gpio3d0 ~ gpio3d7, gpio4a0 ~ gpio4a7*/ + if (suspend_apio->apio5) { + /* set gpio3d0 ~ gpio4a7 iomux to gpio */ + mmio_write_32(GRF_BASE + GRF_GPIO3D_IOMUX, + REG_SOC_WMSK | GRF_IOMUX_GPIO); + mmio_write_32(GRF_BASE + GRF_GPIO4A_IOMUX, + REG_SOC_WMSK | GRF_IOMUX_GPIO); + + /* set gpio3d0 ~ gpio4a7 pull mode to pull none */ + mmio_write_32(GRF_BASE + GRF_GPIO3D_P, REG_SOC_WMSK | 0); + mmio_write_32(GRF_BASE + GRF_GPIO4A_P, REG_SOC_WMSK | 0); + + /* set gpio4c0 ~ gpio4d6 to input */ + mmio_clrbits_32(GPIO3_BASE + 0x04, 0xff000000); + mmio_clrbits_32(GPIO4_BASE + 0x04, 0x000000ff); + } +} + +static void resume_apio(void) +{ + struct bl_aux_rk_apio_info *suspend_apio; + int i; + + suspend_apio = plat_get_rockchip_suspend_apio(); + + if (!suspend_apio) + return; + + for (i = 0; i < 12; i++) { + mmio_write_32(GRF_BASE + GRF_GPIO2A_P + i * 4, + REG_SOC_WMSK | pull_mode_status[i]); + mmio_write_32(GRF_BASE + GRF_GPIO2A_IOMUX + i * 4, + REG_SOC_WMSK | iomux_status[i]); + } + + /* set gpio2 ~ gpio4 direction back to store value */ + mmio_write_32(GPIO2_BASE + 0x04, gpio_direction[0]); + mmio_write_32(GPIO3_BASE + 0x04, gpio_direction[1]); + mmio_write_32(GPIO4_BASE + 0x04, gpio_direction[2]); + + /* set gpio2 ~ gpio4 clock gate back to store value */ + mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(31), + BITS_WITH_WMASK(gpio_2_4_clk_gate, 0x07, + PCLK_GPIO2_GATE_SHIFT)); +} + +static void suspend_gpio(void) +{ + struct bl_aux_gpio_info *suspend_gpio; + uint32_t count; + int i; + + suspend_gpio = plat_get_rockchip_suspend_gpio(&count); + + for (i = 0; i < count; i++) { + gpio_set_value(suspend_gpio[i].index, suspend_gpio[i].polarity); + gpio_set_direction(suspend_gpio[i].index, GPIO_DIR_OUT); + udelay(1); + } +} + +static void resume_gpio(void) +{ + struct bl_aux_gpio_info *suspend_gpio; + uint32_t count; + int i; + + suspend_gpio = plat_get_rockchip_suspend_gpio(&count); + + for (i = count - 1; i >= 0; i--) { + gpio_set_value(suspend_gpio[i].index, + !suspend_gpio[i].polarity); + gpio_set_direction(suspend_gpio[i].index, GPIO_DIR_OUT); + udelay(1); + } +} + +void sram_save(void) +{ + size_t text_size = (char *)&__bl31_sram_text_real_end - + (char *)&__bl31_sram_text_start; + size_t data_size = (char *)&__bl31_sram_data_real_end - + (char *)&__bl31_sram_data_start; + size_t incbin_size = (char *)&__sram_incbin_real_end - + (char *)&__sram_incbin_start; + + memcpy(&store_sram[0], &__bl31_sram_text_start, text_size); + memcpy(&store_sram[text_size], &__bl31_sram_data_start, data_size); + memcpy(&store_sram[text_size + data_size], &__sram_incbin_start, + incbin_size); +} + +void sram_restore(void) +{ + size_t text_size = (char *)&__bl31_sram_text_real_end - + (char *)&__bl31_sram_text_start; + size_t data_size = (char *)&__bl31_sram_data_real_end - + (char *)&__bl31_sram_data_start; + size_t incbin_size = (char *)&__sram_incbin_real_end - + (char *)&__sram_incbin_start; + + memcpy(&__bl31_sram_text_start, &store_sram[0], text_size); + memcpy(&__bl31_sram_data_start, &store_sram[text_size], data_size); + memcpy(&__sram_incbin_start, &store_sram[text_size + data_size], + incbin_size); +} + +struct uart_debug { + uint32_t uart_dll; + uint32_t uart_dlh; + uint32_t uart_ier; + uint32_t uart_fcr; + uint32_t uart_mcr; + uint32_t uart_lcr; +}; + +#define UART_DLL 0x00 +#define UART_DLH 0x04 +#define UART_IER 0x04 +#define UART_FCR 0x08 +#define UART_LCR 0x0c +#define UART_MCR 0x10 +#define UARTSRR 0x88 + +#define UART_RESET BIT(0) +#define UARTFCR_FIFOEN BIT(0) +#define RCVR_FIFO_RESET BIT(1) +#define XMIT_FIFO_RESET BIT(2) +#define DIAGNOSTIC_MODE BIT(4) +#define UARTLCR_DLAB BIT(7) + +static struct uart_debug uart_save; + +void suspend_uart(void) +{ + uint32_t uart_base = rockchip_get_uart_base(); + + if (uart_base == 0) + return; + + uart_save.uart_lcr = mmio_read_32(uart_base + UART_LCR); + uart_save.uart_ier = mmio_read_32(uart_base + UART_IER); + uart_save.uart_mcr = mmio_read_32(uart_base + UART_MCR); + mmio_write_32(uart_base + UART_LCR, + uart_save.uart_lcr | UARTLCR_DLAB); + uart_save.uart_dll = mmio_read_32(uart_base + UART_DLL); + uart_save.uart_dlh = mmio_read_32(uart_base + UART_DLH); + mmio_write_32(uart_base + UART_LCR, uart_save.uart_lcr); +} + +void resume_uart(void) +{ + uint32_t uart_base = rockchip_get_uart_base(); + uint32_t uart_lcr; + + if (uart_base == 0) + return; + + mmio_write_32(uart_base + UARTSRR, + XMIT_FIFO_RESET | RCVR_FIFO_RESET | UART_RESET); + + uart_lcr = mmio_read_32(uart_base + UART_LCR); + mmio_write_32(uart_base + UART_MCR, DIAGNOSTIC_MODE); + mmio_write_32(uart_base + UART_LCR, uart_lcr | UARTLCR_DLAB); + mmio_write_32(uart_base + UART_DLL, uart_save.uart_dll); + mmio_write_32(uart_base + UART_DLH, uart_save.uart_dlh); + mmio_write_32(uart_base + UART_LCR, uart_save.uart_lcr); + mmio_write_32(uart_base + UART_IER, uart_save.uart_ier); + mmio_write_32(uart_base + UART_FCR, UARTFCR_FIFOEN); + mmio_write_32(uart_base + UART_MCR, uart_save.uart_mcr); +} + +void save_usbphy(void) +{ + store_usbphy0[0] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL0); + store_usbphy0[1] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL2); + store_usbphy0[2] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL3); + store_usbphy0[3] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL12); + store_usbphy0[4] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL13); + store_usbphy0[5] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL15); + store_usbphy0[6] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL16); + + store_usbphy1[0] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL0); + store_usbphy1[1] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL2); + store_usbphy1[2] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL3); + store_usbphy1[3] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL12); + store_usbphy1[4] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL13); + store_usbphy1[5] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL15); + store_usbphy1[6] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL16); +} + +void restore_usbphy(void) +{ + mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL0, + REG_SOC_WMSK | store_usbphy0[0]); + mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL2, + REG_SOC_WMSK | store_usbphy0[1]); + mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL3, + REG_SOC_WMSK | store_usbphy0[2]); + mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL12, + REG_SOC_WMSK | store_usbphy0[3]); + mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL13, + REG_SOC_WMSK | store_usbphy0[4]); + mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL15, + REG_SOC_WMSK | store_usbphy0[5]); + mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL16, + REG_SOC_WMSK | store_usbphy0[6]); + + mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL0, + REG_SOC_WMSK | store_usbphy1[0]); + mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL2, + REG_SOC_WMSK | store_usbphy1[1]); + mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL3, + REG_SOC_WMSK | store_usbphy1[2]); + mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL12, + REG_SOC_WMSK | store_usbphy1[3]); + mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL13, + REG_SOC_WMSK | store_usbphy1[4]); + mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL15, + REG_SOC_WMSK | store_usbphy1[5]); + mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL16, + REG_SOC_WMSK | store_usbphy1[6]); +} + +void grf_register_save(void) +{ + int i; + + store_grf_soc_con0 = mmio_read_32(GRF_BASE + GRF_SOC_CON(0)); + store_grf_soc_con1 = mmio_read_32(GRF_BASE + GRF_SOC_CON(1)); + store_grf_soc_con2 = mmio_read_32(GRF_BASE + GRF_SOC_CON(2)); + store_grf_soc_con3 = mmio_read_32(GRF_BASE + GRF_SOC_CON(3)); + store_grf_soc_con4 = mmio_read_32(GRF_BASE + GRF_SOC_CON(4)); + store_grf_soc_con7 = mmio_read_32(GRF_BASE + GRF_SOC_CON(7)); + + for (i = 0; i < 4; i++) + store_grf_ddrc_con[i] = + mmio_read_32(GRF_BASE + GRF_DDRC0_CON0 + i * 4); + + store_grf_io_vsel = mmio_read_32(GRF_BASE + GRF_IO_VSEL); +} + +void grf_register_restore(void) +{ + int i; + + mmio_write_32(GRF_BASE + GRF_SOC_CON(0), + REG_SOC_WMSK | store_grf_soc_con0); + mmio_write_32(GRF_BASE + GRF_SOC_CON(1), + REG_SOC_WMSK | store_grf_soc_con1); + mmio_write_32(GRF_BASE + GRF_SOC_CON(2), + REG_SOC_WMSK | store_grf_soc_con2); + mmio_write_32(GRF_BASE + GRF_SOC_CON(3), + REG_SOC_WMSK | store_grf_soc_con3); + mmio_write_32(GRF_BASE + GRF_SOC_CON(4), + REG_SOC_WMSK | store_grf_soc_con4); + mmio_write_32(GRF_BASE + GRF_SOC_CON(7), + REG_SOC_WMSK | store_grf_soc_con7); + + for (i = 0; i < 4; i++) + mmio_write_32(GRF_BASE + GRF_DDRC0_CON0 + i * 4, + REG_SOC_WMSK | store_grf_ddrc_con[i]); + + mmio_write_32(GRF_BASE + GRF_IO_VSEL, REG_SOC_WMSK | store_grf_io_vsel); +} + +void cru_register_save(void) +{ + int i; + + for (i = 0; i <= CRU_SDIO0_CON1; i = i + 4) + store_cru[i / 4] = mmio_read_32(CRU_BASE + i); +} + +void cru_register_restore(void) +{ + int i; + + for (i = 0; i <= CRU_SDIO0_CON1; i = i + 4) { + + /* + * since DPLL, CRU_CLKSEL_CON6 have been restore in + * dmc_resume, ABPLL will resote later, so skip them + */ + if ((i == CRU_CLKSEL_CON6) || + (i >= CRU_PLL_CON(ABPLL_ID, 0) && + i <= CRU_PLL_CON(DPLL_ID, 5))) + continue; + + if ((i == CRU_PLL_CON(ALPLL_ID, 2)) || + (i == CRU_PLL_CON(CPLL_ID, 2)) || + (i == CRU_PLL_CON(GPLL_ID, 2)) || + (i == CRU_PLL_CON(NPLL_ID, 2)) || + (i == CRU_PLL_CON(VPLL_ID, 2))) + mmio_write_32(CRU_BASE + i, store_cru[i / 4]); + /* + * CRU_GLB_CNT_TH and CRU_CLKSEL_CON97~CRU_CLKSEL_CON107 + * not need do high 16bit mask + */ + else if ((i > 0x27c && i < 0x2b0) || (i == 0x508)) + mmio_write_32(CRU_BASE + i, store_cru[i / 4]); + else + mmio_write_32(CRU_BASE + i, + REG_SOC_WMSK | store_cru[i / 4]); + } +} + +void wdt_register_save(void) +{ + int i; + + for (i = 0; i < 2; i++) { + store_wdt0[i] = mmio_read_32(WDT0_BASE + i * 4); + store_wdt1[i] = mmio_read_32(WDT1_BASE + i * 4); + } + pmu_enable_watchdog0 = (uint8_t) store_wdt0[0] & 0x1; +} + +void wdt_register_restore(void) +{ + int i; + + for (i = 1; i >= 0; i--) { + mmio_write_32(WDT0_BASE + i * 4, store_wdt0[i]); + mmio_write_32(WDT1_BASE + i * 4, store_wdt1[i]); + } + + /* write 0x76 to cnt_restart to keep watchdog alive */ + mmio_write_32(WDT0_BASE + 0x0c, 0x76); + mmio_write_32(WDT1_BASE + 0x0c, 0x76); +} + +int rockchip_soc_sys_pwr_dm_suspend(void) +{ + uint32_t wait_cnt = 0; + uint32_t status = 0; + + ddr_prepare_for_sys_suspend(); + dmc_suspend(); + pmu_scu_b_pwrdn(); + + gicv3_rdistif_save(plat_my_core_pos(), &rdist_ctx); + gicv3_distif_save(&dist_ctx); + + /* need to save usbphy before shutdown PERIHP PD */ + save_usbphy(); + + pmu_power_domains_suspend(); + set_hw_idle(BIT(PMU_CLR_CENTER1) | + BIT(PMU_CLR_ALIVE) | + BIT(PMU_CLR_MSCH0) | + BIT(PMU_CLR_MSCH1) | + BIT(PMU_CLR_CCIM0) | + BIT(PMU_CLR_CCIM1) | + BIT(PMU_CLR_CENTER) | + BIT(PMU_CLR_PERILP) | + BIT(PMU_CLR_PERILPM0) | + BIT(PMU_CLR_GIC)); + set_pmu_rsthold(); + sys_slp_config(); + + m0_configure_execute_addr(M0PMU_BINCODE_BASE); + m0_start(); + + pmu_sgrf_rst_hld(); + + mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1), + ((uintptr_t)&pmu_cpuson_entrypoint >> + CPU_BOOT_ADDR_ALIGN) | CPU_BOOT_ADDR_WMASK); + + mmio_write_32(PMU_BASE + PMU_ADB400_CON, + BIT_WITH_WMSK(PMU_PWRDWN_REQ_CORE_B_2GIC_SW) | + BIT_WITH_WMSK(PMU_PWRDWN_REQ_CORE_B_SW) | + BIT_WITH_WMSK(PMU_PWRDWN_REQ_GIC2_CORE_B_SW)); + dsb(); + status = BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW_ST) | + BIT(PMU_PWRDWN_REQ_CORE_B_SW_ST) | + BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW_ST); + while ((mmio_read_32(PMU_BASE + + PMU_ADB400_ST) & status) != status) { + wait_cnt++; + if (wait_cnt >= MAX_WAIT_COUNT) { + ERROR("%s:wait cluster-b l2(%x)\n", __func__, + mmio_read_32(PMU_BASE + PMU_ADB400_ST)); + panic(); + } + udelay(1); + } + mmio_setbits_32(PMU_BASE + PMU_PWRDN_CON, BIT(PMU_SCU_B_PWRDWN_EN)); + + wdt_register_save(); + secure_watchdog_gate(); + + /* + * Disabling PLLs/PWM/DVFS is approaching WFI which is + * the last steps in suspend. + */ + disable_dvfs_plls(); + disable_pwms(); + disable_nodvfs_plls(); + + suspend_apio(); + suspend_gpio(); + suspend_uart(); + grf_register_save(); + cru_register_save(); + sram_save(); + plat_rockchip_save_gpio(); + + return 0; +} + +int rockchip_soc_sys_pwr_dm_resume(void) +{ + uint32_t wait_cnt = 0; + uint32_t status = 0; + + plat_rockchip_restore_gpio(); + cru_register_restore(); + grf_register_restore(); + wdt_register_restore(); + resume_uart(); + resume_apio(); + resume_gpio(); + enable_nodvfs_plls(); + enable_pwms(); + /* PWM regulators take time to come up; give 300us to be safe. */ + udelay(300); + enable_dvfs_plls(); + + secure_sgrf_init(); + secure_sgrf_ddr_rgn_init(); + + /* restore clk_ddrc_bpll_src_en gate */ + mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(3), + BITS_WITH_WMASK(clk_ddrc_save, 0xff, 0)); + + /* + * The wakeup status is not cleared by itself, we need to clear it + * manually. Otherwise we will alway query some interrupt next time. + * + * NOTE: If the kernel needs to query this, we might want to stash it + * somewhere. + */ + mmio_write_32(PMU_BASE + PMU_WAKEUP_STATUS, 0xffffffff); + mmio_write_32(PMU_BASE + PMU_WKUP_CFG4, 0x00); + + mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1), + (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) | + CPU_BOOT_ADDR_WMASK); + + mmio_write_32(PMU_BASE + PMU_CCI500_CON, + WMSK_BIT(PMU_CLR_PREQ_CCI500_HW) | + WMSK_BIT(PMU_CLR_QREQ_CCI500_HW) | + WMSK_BIT(PMU_QGATING_CCI500_CFG)); + dsb(); + mmio_clrbits_32(PMU_BASE + PMU_PWRDN_CON, + BIT(PMU_SCU_B_PWRDWN_EN)); + + mmio_write_32(PMU_BASE + PMU_ADB400_CON, + WMSK_BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW) | + WMSK_BIT(PMU_PWRDWN_REQ_CORE_B_SW) | + WMSK_BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW) | + WMSK_BIT(PMU_CLR_CORE_L_HW) | + WMSK_BIT(PMU_CLR_CORE_L_2GIC_HW) | + WMSK_BIT(PMU_CLR_GIC2_CORE_L_HW)); + + status = BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW_ST) | + BIT(PMU_PWRDWN_REQ_CORE_B_SW_ST) | + BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW_ST); + + while ((mmio_read_32(PMU_BASE + + PMU_ADB400_ST) & status)) { + wait_cnt++; + if (wait_cnt >= MAX_WAIT_COUNT) { + ERROR("%s:wait cluster-b l2(%x)\n", __func__, + mmio_read_32(PMU_BASE + PMU_ADB400_ST)); + panic(); + } + udelay(1); + } + + pmu_scu_b_pwrup(); + pmu_power_domains_resume(); + + restore_abpll(); + clr_hw_idle(BIT(PMU_CLR_CENTER1) | + BIT(PMU_CLR_ALIVE) | + BIT(PMU_CLR_MSCH0) | + BIT(PMU_CLR_MSCH1) | + BIT(PMU_CLR_CCIM0) | + BIT(PMU_CLR_CCIM1) | + BIT(PMU_CLR_CENTER) | + BIT(PMU_CLR_PERILP) | + BIT(PMU_CLR_PERILPM0) | + BIT(PMU_CLR_GIC)); + + gicv3_distif_init_restore(&dist_ctx); + gicv3_rdistif_init_restore(plat_my_core_pos(), &rdist_ctx); + plat_rockchip_gic_cpuif_enable(); + m0_stop(); + + restore_usbphy(); + + ddr_prepare_for_sys_resume(); + + return 0; +} + +void __dead2 rockchip_soc_soft_reset(void) +{ + struct bl_aux_gpio_info *rst_gpio; + + rst_gpio = plat_get_rockchip_gpio_reset(); + + if (rst_gpio) { + gpio_set_direction(rst_gpio->index, GPIO_DIR_OUT); + gpio_set_value(rst_gpio->index, rst_gpio->polarity); + } else { + soc_global_soft_reset(); + } + + while (1) + ; +} + +void __dead2 rockchip_soc_system_off(void) +{ + struct bl_aux_gpio_info *poweroff_gpio; + + poweroff_gpio = plat_get_rockchip_gpio_poweroff(); + + if (poweroff_gpio) { + /* + * if use tsadc over temp pin(GPIO1A6) as shutdown gpio, + * need to set this pin iomux back to gpio function + */ + if (poweroff_gpio->index == TSADC_INT_PIN) { + mmio_write_32(PMUGRF_BASE + PMUGRF_GPIO1A_IOMUX, + GPIO1A6_IOMUX); + } + gpio_set_direction(poweroff_gpio->index, GPIO_DIR_OUT); + gpio_set_value(poweroff_gpio->index, poweroff_gpio->polarity); + } else { + WARN("Do nothing when system off\n"); + } + + while (1) + ; +} + +void rockchip_plat_mmu_el3(void) +{ + size_t sram_size; + + /* sram.text size */ + sram_size = (char *)&__bl31_sram_text_end - + (char *)&__bl31_sram_text_start; + mmap_add_region((unsigned long)&__bl31_sram_text_start, + (unsigned long)&__bl31_sram_text_start, + sram_size, MT_MEMORY | MT_RO | MT_SECURE); + + /* sram.data size */ + sram_size = (char *)&__bl31_sram_data_end - + (char *)&__bl31_sram_data_start; + mmap_add_region((unsigned long)&__bl31_sram_data_start, + (unsigned long)&__bl31_sram_data_start, + sram_size, MT_MEMORY | MT_RW | MT_SECURE); + + sram_size = (char *)&__bl31_sram_stack_end - + (char *)&__bl31_sram_stack_start; + mmap_add_region((unsigned long)&__bl31_sram_stack_start, + (unsigned long)&__bl31_sram_stack_start, + sram_size, MT_MEMORY | MT_RW | MT_SECURE); + + sram_size = (char *)&__sram_incbin_end - (char *)&__sram_incbin_start; + mmap_add_region((unsigned long)&__sram_incbin_start, + (unsigned long)&__sram_incbin_start, + sram_size, MT_NON_CACHEABLE | MT_RW | MT_SECURE); +} + +void plat_rockchip_pmu_init(void) +{ + uint32_t cpu; + + rockchip_pd_lock_init(); + + /* register requires 32bits mode, switch it to 32 bits */ + cpu_warm_boot_addr = (uint64_t)platform_cpu_warmboot; + + for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) + cpuson_flags[cpu] = 0; + + for (cpu = 0; cpu < PLATFORM_CLUSTER_COUNT; cpu++) + clst_warmboot_data[cpu] = 0; + + /* config cpu's warm boot address */ + mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1), + (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) | + CPU_BOOT_ADDR_WMASK); + mmio_write_32(PMU_BASE + PMU_NOC_AUTO_ENA, NOC_AUTO_ENABLE); + + /* + * Enable Schmitt trigger for better 32 kHz input signal, which is + * important for suspend/resume reliability among other things. + */ + mmio_write_32(PMUGRF_BASE + PMUGRF_GPIO0A_SMT, GPIO0A0_SMT_ENABLE); + + init_pmu_counts(); + + nonboot_cpus_off(); + + INFO("%s(%d): pd status %x\n", __func__, __LINE__, + mmio_read_32(PMU_BASE + PMU_PWRDN_ST)); +} diff --git a/plat/rockchip/rk3399/drivers/pmu/pmu.h b/plat/rockchip/rk3399/drivers/pmu/pmu.h new file mode 100644 index 0000000..bb7de50 --- /dev/null +++ b/plat/rockchip/rk3399/drivers/pmu/pmu.h @@ -0,0 +1,141 @@ +/* + * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef PMU_H +#define PMU_H + +#include <pmu_bits.h> +#include <pmu_regs.h> +#include <soc.h> + +/* Allocate sp reginon in pmusram */ +#define PSRAM_SP_SIZE 0x80 +#define PSRAM_SP_BOTTOM (PSRAM_SP_TOP - PSRAM_SP_SIZE) + +/***************************************************************************** + * Common define for per soc pmu.h + *****************************************************************************/ +/* The ways of cores power domain contorlling */ +enum cores_pm_ctr_mode { + core_pwr_pd = 0, + core_pwr_wfi = 1, + core_pwr_wfi_int = 2 +}; + +/***************************************************************************** + * pmu con,reg + *****************************************************************************/ +#define PMU_WKUP_CFG(n) ((n) * 4) + +#define PMU_CORE_PM_CON(cpu) (0xc0 + (cpu * 4)) + +/* the shift of bits for cores status */ +enum pmu_core_pwrst_shift { + clstl_cpu_wfe = 2, + clstl_cpu_wfi = 6, + clstb_cpu_wfe = 12, + clstb_cpu_wfi = 16 +}; + +#define CKECK_WFE_MSK 0x1 +#define CKECK_WFI_MSK 0x10 +#define CKECK_WFEI_MSK 0x11 + +/* Specific features required */ +#define AP_PWROFF 0x0a + +#define GPIO0A0_SMT_ENABLE BITS_WITH_WMASK(1, 3, 0) +#define GPIO1A6_IOMUX BITS_WITH_WMASK(0, 3, 12) + +#define TSADC_INT_PIN 38 +#define CORES_PM_DISABLE 0x0 + +#define PD_CTR_LOOP 10000 +#define CHK_CPU_LOOP 500 +#define MAX_WAIT_COUNT 1000 + +#define GRF_SOC_CON4 0x0e210 + +#define PMUGRF_GPIO0A_SMT 0x0120 +#define PMUGRF_SOC_CON0 0x0180 + +#define CCI_FORCE_WAKEUP WMSK_BIT(8) +#define EXTERNAL_32K WMSK_BIT(0) + +#define PLL_PD_HW 0xff +#define IOMUX_CLK_32K 0x00030002 +#define NOC_AUTO_ENABLE 0x3fffffff + +#define SAVE_QOS(array, NAME) \ + RK3399_CPU_AXI_SAVE_QOS(array, CPU_AXI_##NAME##_QOS_BASE) +#define RESTORE_QOS(array, NAME) \ + RK3399_CPU_AXI_RESTORE_QOS(array, CPU_AXI_##NAME##_QOS_BASE) + +#define RK3399_CPU_AXI_SAVE_QOS(array, base) do { \ + array[0] = mmio_read_32(base + CPU_AXI_QOS_ID_COREID); \ + array[1] = mmio_read_32(base + CPU_AXI_QOS_REVISIONID); \ + array[2] = mmio_read_32(base + CPU_AXI_QOS_PRIORITY); \ + array[3] = mmio_read_32(base + CPU_AXI_QOS_MODE); \ + array[4] = mmio_read_32(base + CPU_AXI_QOS_BANDWIDTH); \ + array[5] = mmio_read_32(base + CPU_AXI_QOS_SATURATION); \ + array[6] = mmio_read_32(base + CPU_AXI_QOS_EXTCONTROL); \ +} while (0) + +#define RK3399_CPU_AXI_RESTORE_QOS(array, base) do { \ + mmio_write_32(base + CPU_AXI_QOS_ID_COREID, array[0]); \ + mmio_write_32(base + CPU_AXI_QOS_REVISIONID, array[1]); \ + mmio_write_32(base + CPU_AXI_QOS_PRIORITY, array[2]); \ + mmio_write_32(base + CPU_AXI_QOS_MODE, array[3]); \ + mmio_write_32(base + CPU_AXI_QOS_BANDWIDTH, array[4]); \ + mmio_write_32(base + CPU_AXI_QOS_SATURATION, array[5]); \ + mmio_write_32(base + CPU_AXI_QOS_EXTCONTROL, array[6]); \ +} while (0) + +struct pmu_slpdata_s { + uint32_t cci_m0_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t cci_m1_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t dmac0_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t dmac1_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t dcf_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t crypto0_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t crypto1_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t pmu_cm0_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t peri_cm1_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t gic_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t sdmmc_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t gmac_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t emmc_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t usb_otg0_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t usb_otg1_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t usb_host0_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t usb_host1_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t gpu_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t video_m0_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t video_m1_r_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t video_m1_w_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t rga_r_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t rga_w_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t vop_big_r[CPU_AXI_QOS_NUM_REGS]; + uint32_t vop_big_w[CPU_AXI_QOS_NUM_REGS]; + uint32_t vop_little[CPU_AXI_QOS_NUM_REGS]; + uint32_t iep_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t isp1_m0_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t isp1_m1_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t isp0_m0_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t isp0_m1_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t hdcp_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t perihp_nsp_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t perilp_nsp_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t perilpslv_nsp_qos[CPU_AXI_QOS_NUM_REGS]; + uint32_t sdio_qos[CPU_AXI_QOS_NUM_REGS]; +}; + +extern uint32_t clst_warmboot_data[PLATFORM_CLUSTER_COUNT]; + +extern void sram_func_set_ddrctl_pll(uint32_t pll_src); +void pmu_power_domains_on(void); + +#endif /* PMU_H */ diff --git a/plat/rockchip/rk3399/drivers/pmu/pmu_fw.c b/plat/rockchip/rk3399/drivers/pmu/pmu_fw.c new file mode 100644 index 0000000..25596b1 --- /dev/null +++ b/plat/rockchip/rk3399/drivers/pmu/pmu_fw.c @@ -0,0 +1,22 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +/* convoluted way to make sure that the define is pasted just the right way */ +#define INCBIN(file, sym, sec) \ + __asm__( \ + ".section " sec "\n" \ + ".global " sym "\n" \ + ".type " sym ", %object\n" \ + ".align 4\n" \ + sym ":\n" \ + ".incbin \"" file "\"\n" \ + ".size " sym ", .-" sym "\n" \ + ".global " sym "_end\n" \ + sym "_end:\n" \ + ) + +INCBIN(RK3399M0FW, "rk3399m0_bin", ".sram.incbin"); +INCBIN(RK3399M0PMUFW, "rk3399m0pmu_bin", ".pmusram.incbin"); diff --git a/plat/rockchip/rk3399/drivers/pwm/pwm.c b/plat/rockchip/rk3399/drivers/pwm/pwm.c new file mode 100644 index 0000000..11c1565 --- /dev/null +++ b/plat/rockchip/rk3399/drivers/pwm/pwm.c @@ -0,0 +1,123 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <plat_private.h> +#include <pmu.h> +#include <pwm.h> +#include <soc.h> + +#define PWM0_IOMUX_PWM_EN (1 << 0) +#define PWM1_IOMUX_PWM_EN (1 << 1) +#define PWM2_IOMUX_PWM_EN (1 << 2) +#define PWM3_IOMUX_PWM_EN (1 << 3) + +struct pwm_data_s { + uint32_t iomux_bitmask; + uint32_t enable_bitmask; +}; + +static struct pwm_data_s pwm_data; + +/* + * Disable the PWMs. + */ +void disable_pwms(void) +{ + uint32_t i, val; + + pwm_data.iomux_bitmask = 0; + + /* Save PWMs pinmux and change PWMs pinmux to GPIOs */ + val = mmio_read_32(GRF_BASE + GRF_GPIO4C_IOMUX); + if (((val >> GRF_GPIO4C2_IOMUX_SHIFT) & + GRF_IOMUX_2BIT_MASK) == GRF_GPIO4C2_IOMUX_PWM) { + pwm_data.iomux_bitmask |= PWM0_IOMUX_PWM_EN; + val = BITS_WITH_WMASK(GRF_IOMUX_GPIO, GRF_IOMUX_2BIT_MASK, + GRF_GPIO4C2_IOMUX_SHIFT); + mmio_write_32(GRF_BASE + GRF_GPIO4C_IOMUX, val); + } + + val = mmio_read_32(GRF_BASE + GRF_GPIO4C_IOMUX); + if (((val >> GRF_GPIO4C6_IOMUX_SHIFT) & + GRF_IOMUX_2BIT_MASK) == GRF_GPIO4C6_IOMUX_PWM) { + pwm_data.iomux_bitmask |= PWM1_IOMUX_PWM_EN; + val = BITS_WITH_WMASK(GRF_IOMUX_GPIO, GRF_IOMUX_2BIT_MASK, + GRF_GPIO4C6_IOMUX_SHIFT); + mmio_write_32(GRF_BASE + GRF_GPIO4C_IOMUX, val); + } + + val = mmio_read_32(PMUGRF_BASE + PMUGRF_GPIO1C_IOMUX); + if (((val >> PMUGRF_GPIO1C3_IOMUX_SHIFT) & + GRF_IOMUX_2BIT_MASK) == PMUGRF_GPIO1C3_IOMUX_PWM) { + pwm_data.iomux_bitmask |= PWM2_IOMUX_PWM_EN; + val = BITS_WITH_WMASK(GRF_IOMUX_GPIO, GRF_IOMUX_2BIT_MASK, + PMUGRF_GPIO1C3_IOMUX_SHIFT); + mmio_write_32(PMUGRF_BASE + PMUGRF_GPIO1C_IOMUX, val); + } + + val = mmio_read_32(PMUGRF_BASE + PMUGRF_GPIO0A_IOMUX); + if (((val >> PMUGRF_GPIO0A6_IOMUX_SHIFT) & + GRF_IOMUX_2BIT_MASK) == PMUGRF_GPIO0A6_IOMUX_PWM) { + pwm_data.iomux_bitmask |= PWM3_IOMUX_PWM_EN; + val = BITS_WITH_WMASK(GRF_IOMUX_GPIO, GRF_IOMUX_2BIT_MASK, + PMUGRF_GPIO0A6_IOMUX_SHIFT); + mmio_write_32(PMUGRF_BASE + PMUGRF_GPIO0A_IOMUX, val); + } + + /* Disable the pwm channel */ + pwm_data.enable_bitmask = 0; + for (i = 0; i < 4; i++) { + val = mmio_read_32(PWM_BASE + PWM_CTRL(i)); + if ((val & PWM_ENABLE) != PWM_ENABLE) + continue; + pwm_data.enable_bitmask |= (1 << i); + mmio_write_32(PWM_BASE + PWM_CTRL(i), val & ~PWM_ENABLE); + } +} + +/* + * Enable the PWMs. + */ +void enable_pwms(void) +{ + uint32_t i, val; + + for (i = 0; i < 4; i++) { + val = mmio_read_32(PWM_BASE + PWM_CTRL(i)); + if (!(pwm_data.enable_bitmask & (1 << i))) + continue; + mmio_write_32(PWM_BASE + PWM_CTRL(i), val | PWM_ENABLE); + } + + /* Restore all IOMUXes */ + if (pwm_data.iomux_bitmask & PWM3_IOMUX_PWM_EN) { + val = BITS_WITH_WMASK(PMUGRF_GPIO0A6_IOMUX_PWM, + GRF_IOMUX_2BIT_MASK, + PMUGRF_GPIO0A6_IOMUX_SHIFT); + mmio_write_32(PMUGRF_BASE + PMUGRF_GPIO0A_IOMUX, val); + } + + if (pwm_data.iomux_bitmask & PWM2_IOMUX_PWM_EN) { + val = BITS_WITH_WMASK(PMUGRF_GPIO1C3_IOMUX_PWM, + GRF_IOMUX_2BIT_MASK, + PMUGRF_GPIO1C3_IOMUX_SHIFT); + mmio_write_32(PMUGRF_BASE + PMUGRF_GPIO1C_IOMUX, val); + } + + if (pwm_data.iomux_bitmask & PWM1_IOMUX_PWM_EN) { + val = BITS_WITH_WMASK(GRF_GPIO4C6_IOMUX_PWM, + GRF_IOMUX_2BIT_MASK, + GRF_GPIO4C6_IOMUX_SHIFT); + mmio_write_32(GRF_BASE + GRF_GPIO4C_IOMUX, val); + } + + if (pwm_data.iomux_bitmask & PWM0_IOMUX_PWM_EN) { + val = BITS_WITH_WMASK(GRF_GPIO4C2_IOMUX_PWM, + GRF_IOMUX_2BIT_MASK, + GRF_GPIO4C2_IOMUX_SHIFT); + mmio_write_32(GRF_BASE + GRF_GPIO4C_IOMUX, val); + } +} diff --git a/plat/rockchip/rk3399/drivers/pwm/pwm.h b/plat/rockchip/rk3399/drivers/pwm/pwm.h new file mode 100644 index 0000000..d665392 --- /dev/null +++ b/plat/rockchip/rk3399/drivers/pwm/pwm.h @@ -0,0 +1,13 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef PWM_H +#define PWM_H + +void disable_pwms(void); +void enable_pwms(void); + +#endif /* PWM_H */ diff --git a/plat/rockchip/rk3399/drivers/secure/secure.c b/plat/rockchip/rk3399/drivers/secure/secure.c new file mode 100644 index 0000000..13c83ca --- /dev/null +++ b/plat/rockchip/rk3399/drivers/secure/secure.c @@ -0,0 +1,167 @@ +/* + * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> + +#include <arch_helpers.h> +#include <common/debug.h> +#include <drivers/delay_timer.h> + +#include <plat_private.h> +#include <secure.h> +#include <soc.h> + +static void sgrf_ddr_rgn_global_bypass(uint32_t bypass) +{ + if (bypass) + /* set bypass (non-secure regions) for whole ddr regions */ + mmio_write_32(SGRF_BASE + SGRF_DDRRGN_CON0_16(16), + SGRF_DDR_RGN_BYPS); + else + /* cancel bypass for whole ddr regions */ + mmio_write_32(SGRF_BASE + SGRF_DDRRGN_CON0_16(16), + SGRF_DDR_RGN_NO_BYPS); +} + +/** + * There are 8 + 1 regions for DDR secure control: + * DDR_RGN_0 ~ DDR_RGN_7: Per DDR_RGNs grain size is 1MB + * DDR_RGN_X - the memories of exclude DDR_RGN_0 ~ DDR_RGN_7 + * + * DDR_RGN_0 - start address of the RGN0 + * DDR_RGN_8 - end address of the RGN0 + * DDR_RGN_1 - start address of the RGN1 + * DDR_RGN_9 - end address of the RGN1 + * ... + * DDR_RGN_7 - start address of the RGN7 + * DDR_RGN_15 - end address of the RGN7 + * DDR_RGN_16 - bit 0 ~ 7 is bitmap for RGN0~7 secure,0: disable, 1: enable + * bit 8 is setting for RGNx, the rest of the memory and region + * which excludes RGN0~7, 0: disable, 1: enable + * bit 9, the global secure configuration via bypass, 0: disable + * bypass, 1: enable bypass + * + * @rgn - the DDR regions 0 ~ 7 which are can be configured. + * @st - start address to set as secure + * @sz - length of area to set as secure + * The @st_mb and @ed_mb indicate the start and end addresses for which to set + * the security, and the unit is megabyte. When the st_mb == 0, ed_mb == 0, the + * address range 0x0 ~ 0xfffff is secure. + * + * For example, if we would like to set the range [0, 32MB) is security via + * DDR_RGN0, then rgn == 0, st_mb == 0, ed_mb == 31. + */ +static void sgrf_ddr_rgn_config(uint32_t rgn, + uintptr_t st, size_t sz) +{ + uintptr_t ed = st + sz; + uintptr_t st_mb, ed_mb; + + assert(rgn <= 7); + assert(st < ed); + + /* check aligned 1MB */ + assert(st % SIZE_M(1) == 0); + assert(ed % SIZE_M(1) == 0); + + st_mb = st / SIZE_M(1); + ed_mb = ed / SIZE_M(1); + + /* set ddr region addr start */ + mmio_write_32(SGRF_BASE + SGRF_DDRRGN_CON0_16(rgn), + BITS_WITH_WMASK(st_mb, SGRF_DDR_RGN_0_16_WMSK, 0)); + + /* set ddr region addr end */ + mmio_write_32(SGRF_BASE + SGRF_DDRRGN_CON0_16(rgn + 8), + BITS_WITH_WMASK((ed_mb - 1), SGRF_DDR_RGN_0_16_WMSK, 0)); + + mmio_write_32(SGRF_BASE + SGRF_DDRRGN_CON0_16(16), + BIT_WITH_WMSK(rgn)); +} + +void secure_watchdog_gate(void) +{ + /** + * Disable CA53 and CM0 wdt pclk + * BIT[8]: ca53 wdt pclk, 0: enable 1: disable + * BIT[10]: cm0 wdt pclk, 0: enable 1: disable + */ + mmio_write_32(SGRF_BASE + SGRF_SOC_CON(3), + BIT_WITH_WMSK(PCLK_WDT_CA53_GATE_SHIFT) | + BIT_WITH_WMSK(PCLK_WDT_CM0_GATE_SHIFT)); +} + +__pmusramfunc void secure_watchdog_ungate(void) +{ + /** + * Enable CA53 and CM0 wdt pclk + * BIT[8]: ca53 wdt pclk, 0: enable 1: disable + * BIT[10]: cm0 wdt pclk, 0: enable 1: disable + */ + mmio_write_32(SGRF_BASE + SGRF_SOC_CON(3), + WMSK_BIT(PCLK_WDT_CA53_GATE_SHIFT) | + WMSK_BIT(PCLK_WDT_CM0_GATE_SHIFT)); +} + +__pmusramfunc void sram_secure_timer_init(void) +{ + mmio_write_32(STIMER1_CHN_BASE(5) + TIMER_END_COUNT0, 0xffffffff); + mmio_write_32(STIMER1_CHN_BASE(5) + TIMER_END_COUNT1, 0xffffffff); + + mmio_write_32(STIMER1_CHN_BASE(5) + TIMER_INIT_COUNT0, 0x0); + mmio_write_32(STIMER1_CHN_BASE(5) + TIMER_INIT_COUNT0, 0x0); + + /* auto reload & enable the timer */ + mmio_write_32(STIMER1_CHN_BASE(5) + TIMER_CONTROL_REG, + TIMER_EN | TIMER_FMODE); +} + +void secure_timer_init(void) +{ + mmio_write_32(STIMER1_CHN_BASE(5) + TIMER_END_COUNT0, 0xffffffff); + mmio_write_32(STIMER1_CHN_BASE(5) + TIMER_END_COUNT1, 0xffffffff); + + mmio_write_32(STIMER1_CHN_BASE(5) + TIMER_INIT_COUNT0, 0x0); + mmio_write_32(STIMER1_CHN_BASE(5) + TIMER_INIT_COUNT0, 0x0); + + /* auto reload & enable the timer */ + mmio_write_32(STIMER1_CHN_BASE(5) + TIMER_CONTROL_REG, + TIMER_EN | TIMER_FMODE); +} + +void secure_sgrf_init(void) +{ + /* security config for master */ + mmio_write_32(SGRF_BASE + SGRF_SOC_CON(5), + REG_SOC_WMSK | SGRF_SOC_ALLMST_NS); + mmio_write_32(SGRF_BASE + SGRF_SOC_CON(6), + REG_SOC_WMSK | SGRF_SOC_ALLMST_NS); + mmio_write_32(SGRF_BASE + SGRF_SOC_CON(7), + REG_SOC_WMSK | SGRF_SOC_ALLMST_NS); + + /* security config for slave */ + mmio_write_32(SGRF_BASE + SGRF_PMU_SLV_CON0_1(0), + SGRF_PMU_SLV_S_CFGED | + SGRF_PMU_SLV_CRYPTO1_NS); + mmio_write_32(SGRF_BASE + SGRF_PMU_SLV_CON0_1(1), + SGRF_SLV_S_WMSK | SGRF_PMUSRAM_S); + mmio_write_32(SGRF_BASE + SGRF_SLV_SECURE_CON0_4(0), + SGRF_SLV_S_WMSK | SGRF_SLV_S_ALL_NS); + mmio_write_32(SGRF_BASE + SGRF_SLV_SECURE_CON0_4(1), + SGRF_SLV_S_WMSK | SGRF_SLV_S_ALL_NS); + mmio_write_32(SGRF_BASE + SGRF_SLV_SECURE_CON0_4(2), + SGRF_SLV_S_WMSK | SGRF_SLV_S_ALL_NS); + mmio_write_32(SGRF_BASE + SGRF_SLV_SECURE_CON0_4(3), + SGRF_SLV_S_WMSK | SGRF_SLV_S_ALL_NS); + mmio_write_32(SGRF_BASE + SGRF_SLV_SECURE_CON0_4(4), + SGRF_SLV_S_WMSK | SGRF_INTSRAM_S); +} + +void secure_sgrf_ddr_rgn_init(void) +{ + sgrf_ddr_rgn_config(0, TZRAM_BASE, TZRAM_SIZE); + sgrf_ddr_rgn_global_bypass(0); +} diff --git a/plat/rockchip/rk3399/drivers/secure/secure.h b/plat/rockchip/rk3399/drivers/secure/secure.h new file mode 100644 index 0000000..e31c999 --- /dev/null +++ b/plat/rockchip/rk3399/drivers/secure/secure.h @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef SECURE_H +#define SECURE_H + +/************************************************** + * sgrf reg, offset + **************************************************/ +#define SGRF_SOC_CON0_1(n) (0xc000 + (n) * 4) +#define SGRF_SOC_CON3_7(n) (0xe00c + ((n) - 3) * 4) +#define SGRF_SOC_CON8_15(n) (0x8020 + ((n) - 8) * 4) +#define SGRF_SOC_CON(n) (n < 3 ? SGRF_SOC_CON0_1(n) :\ + (n < 8 ? SGRF_SOC_CON3_7(n) :\ + SGRF_SOC_CON8_15(n))) + +#define SGRF_PMU_SLV_CON0_1(n) (0xc240 + ((n) - 0) * 4) +#define SGRF_SLV_SECURE_CON0_4(n) (0xe3c0 + ((n) - 0) * 4) +#define SGRF_DDRRGN_CON0_16(n) ((n) * 4) +#define SGRF_DDRRGN_CON20_34(n) (0x50 + ((n) - 20) * 4) + +/* All of master in ns */ +#define SGRF_SOC_ALLMST_NS 0xffff + +/* security config for slave */ +#define SGRF_SLV_S_WMSK 0xffff0000 +#define SGRF_SLV_S_ALL_NS 0x0 + +/* security config pmu slave ip */ +/* All of slaves is ns */ +#define SGRF_PMU_SLV_S_NS BIT_WITH_WMSK(0) +/* slaves secure attr is configed */ +#define SGRF_PMU_SLV_S_CFGED WMSK_BIT(0) +#define SGRF_PMU_SLV_CRYPTO1_NS WMSK_BIT(1) + +#define SGRF_PMUSRAM_S BIT(8) + +#define SGRF_INTSRAM_S BIT(13) + +/* ddr region */ +#define SGRF_DDR_RGN_0_16_WMSK 0x0fff /* DDR RGN 0~16 size mask */ + +#define SGRF_DDR_RGN_DPLL_CLK BIT_WITH_WMSK(15) /* DDR PLL output clock */ +#define SGRF_DDR_RGN_RTC_CLK BIT_WITH_WMSK(14) /* 32K clock for DDR PLL */ + +/* All security of the DDR RGNs are bypass */ +#define SGRF_DDR_RGN_BYPS BIT_WITH_WMSK(9) +/* All security of the DDR RGNs are not bypass */ +#define SGRF_DDR_RGN_NO_BYPS WMSK_BIT(9) + +/* The MST access the ddr rgn n with secure attribution */ +#define SGRF_L_MST_S_DDR_RGN(n) BIT_WITH_WMSK((n)) +/* bits[16:8]*/ +#define SGRF_H_MST_S_DDR_RGN(n) BIT_WITH_WMSK((n) + 8) + +#define SGRF_PMU_CON0 0x0c100 +#define SGRF_PMU_CON(n) (SGRF_PMU_CON0 + (n) * 4) + +/************************************************** + * secure timer + **************************************************/ +/* chanal0~5 */ +#define STIMER0_CHN_BASE(n) (STIME_BASE + 0x20 * (n)) +/* chanal6~11 */ +#define STIMER1_CHN_BASE(n) (STIME_BASE + 0x8000 + 0x20 * (n)) + + /* low 32 bits */ +#define TIMER_END_COUNT0 0x00 + /* high 32 bits */ +#define TIMER_END_COUNT1 0x04 + +#define TIMER_CURRENT_VALUE0 0x08 +#define TIMER_CURRENT_VALUE1 0x0C + + /* low 32 bits */ +#define TIMER_INIT_COUNT0 0x10 + /* high 32 bits */ +#define TIMER_INIT_COUNT1 0x14 + +#define TIMER_INTSTATUS 0x18 +#define TIMER_CONTROL_REG 0x1c + +#define TIMER_EN 0x1 + +#define TIMER_FMODE (0x0 << 1) +#define TIMER_RMODE (0x1 << 1) + +/************************************************** + * secure WDT + **************************************************/ +#define PCLK_WDT_CA53_GATE_SHIFT 8 +#define PCLK_WDT_CM0_GATE_SHIFT 10 + +/* export secure operating APIs */ +void secure_watchdog_gate(void); +__pmusramfunc void secure_watchdog_ungate(void); +void secure_timer_init(void); +void secure_sgrf_init(void); +void secure_sgrf_ddr_rgn_init(void); +__pmusramfunc void sram_secure_timer_init(void); + +#endif /* SECURE_H */ diff --git a/plat/rockchip/rk3399/drivers/soc/soc.c b/plat/rockchip/rk3399/drivers/soc/soc.c new file mode 100644 index 0000000..98b5ad6 --- /dev/null +++ b/plat/rockchip/rk3399/drivers/soc/soc.c @@ -0,0 +1,362 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> + +#include <platform_def.h> + +#include <arch_helpers.h> +#include <common/debug.h> +#include <drivers/delay_timer.h> +#include <lib/mmio.h> + +#include <dfs.h> +#include <dram.h> +#include <m0_ctl.h> +#include <plat_private.h> +#include <pmu.h> +#include <rk3399_def.h> +#include <secure.h> +#include <soc.h> + +/* Table of regions to map using the MMU. */ +const mmap_region_t plat_rk_mmap[] = { + MAP_REGION_FLAT(DEV_RNG0_BASE, DEV_RNG0_SIZE, + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(PMUSRAM_BASE, PMUSRAM_SIZE, + MT_MEMORY | MT_RW | MT_SECURE), + + { 0 } +}; + +/* The RockChip power domain tree descriptor */ +const unsigned char rockchip_power_domain_tree_desc[] = { + /* No of root nodes */ + PLATFORM_SYSTEM_COUNT, + /* No of children for the root node */ + PLATFORM_CLUSTER_COUNT, + /* No of children for the first cluster node */ + PLATFORM_CLUSTER0_CORE_COUNT, + /* No of children for the second cluster node */ + PLATFORM_CLUSTER1_CORE_COUNT +}; + +/* sleep data for pll suspend */ +static struct deepsleep_data_s slp_data; + +/* sleep data that needs to be accessed from pmusram */ +__pmusramdata struct pmu_sleep_data pmu_slp_data; + +static void set_pll_slow_mode(uint32_t pll_id) +{ + if (pll_id == PPLL_ID) + mmio_write_32(PMUCRU_BASE + PMUCRU_PPLL_CON(3), PLL_SLOW_MODE); + else + mmio_write_32((CRU_BASE + + CRU_PLL_CON(pll_id, 3)), PLL_SLOW_MODE); +} + +static void set_pll_normal_mode(uint32_t pll_id) +{ + if (pll_id == PPLL_ID) + mmio_write_32(PMUCRU_BASE + PMUCRU_PPLL_CON(3), PLL_NOMAL_MODE); + else + mmio_write_32(CRU_BASE + + CRU_PLL_CON(pll_id, 3), PLL_NOMAL_MODE); +} + +static void set_pll_bypass(uint32_t pll_id) +{ + if (pll_id == PPLL_ID) + mmio_write_32(PMUCRU_BASE + + PMUCRU_PPLL_CON(3), PLL_BYPASS_MODE); + else + mmio_write_32(CRU_BASE + + CRU_PLL_CON(pll_id, 3), PLL_BYPASS_MODE); +} + +static void _pll_suspend(uint32_t pll_id) +{ + set_pll_slow_mode(pll_id); + set_pll_bypass(pll_id); +} + +/** + * disable_dvfs_plls - To suspend the specific PLLs + * + * When we close the center logic, the DPLL will be closed, + * so we need to keep the ABPLL and switch to it to supply + * clock for DDR during suspend, then we should not close + * the ABPLL and exclude ABPLL_ID. + */ +void disable_dvfs_plls(void) +{ + _pll_suspend(CPLL_ID); + _pll_suspend(NPLL_ID); + _pll_suspend(VPLL_ID); + _pll_suspend(GPLL_ID); + _pll_suspend(ALPLL_ID); +} + +/** + * disable_nodvfs_plls - To suspend the PPLL + */ +void disable_nodvfs_plls(void) +{ + _pll_suspend(PPLL_ID); +} + +/** + * restore_pll - Copy PLL settings from memory to a PLL. + * + * This will copy PLL settings from an array in memory to the memory mapped + * registers for a PLL. + * + * Note that: above the PLL exclude PPLL. + * + * pll_id: One of the values from enum plls_id + * src: Pointer to the array of values to restore from + */ +static void restore_pll(int pll_id, uint32_t *src) +{ + /* Nice to have PLL off while configuring */ + mmio_write_32((CRU_BASE + CRU_PLL_CON(pll_id, 3)), PLL_SLOW_MODE); + + mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 0), src[0] | REG_SOC_WMSK); + mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 1), src[1] | REG_SOC_WMSK); + mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 2), src[2]); + mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 4), src[4] | REG_SOC_WMSK); + mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 5), src[5] | REG_SOC_WMSK); + + /* Do PLL_CON3 since that will enable things */ + mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 3), src[3] | REG_SOC_WMSK); + + /* Wait for PLL lock done */ + while ((mmio_read_32(CRU_BASE + CRU_PLL_CON(pll_id, 2)) & + 0x80000000) == 0x0) + ; +} + +/** + * save_pll - Copy PLL settings a PLL to memory + * + * This will copy PLL settings from the memory mapped registers for a PLL to + * an array in memory. + * + * Note that: above the PLL exclude PPLL. + * + * pll_id: One of the values from enum plls_id + * src: Pointer to the array of values to save to. + */ +static void save_pll(uint32_t *dst, int pll_id) +{ + int i; + + for (i = 0; i < PLL_CON_COUNT; i++) + dst[i] = mmio_read_32(CRU_BASE + CRU_PLL_CON(pll_id, i)); +} + +/** + * prepare_abpll_for_ddrctrl - Copy DPLL settings to ABPLL + * + * This will copy DPLL settings from the memory mapped registers for a PLL to + * an array in memory. + */ +void prepare_abpll_for_ddrctrl(void) +{ + save_pll(slp_data.plls_con[ABPLL_ID], ABPLL_ID); + save_pll(slp_data.plls_con[DPLL_ID], DPLL_ID); + + restore_pll(ABPLL_ID, slp_data.plls_con[DPLL_ID]); +} + +void restore_abpll(void) +{ + restore_pll(ABPLL_ID, slp_data.plls_con[ABPLL_ID]); +} + +void clk_gate_con_save(void) +{ + uint32_t i = 0; + + for (i = 0; i < PMUCRU_GATE_COUNT; i++) + slp_data.pmucru_gate_con[i] = + mmio_read_32(PMUCRU_BASE + PMUCRU_GATE_CON(i)); + + for (i = 0; i < CRU_GATE_COUNT; i++) + slp_data.cru_gate_con[i] = + mmio_read_32(CRU_BASE + CRU_GATE_CON(i)); +} + +void clk_gate_con_disable(void) +{ + uint32_t i; + + for (i = 0; i < PMUCRU_GATE_COUNT; i++) + mmio_write_32(PMUCRU_BASE + PMUCRU_GATE_CON(i), REG_SOC_WMSK); + + for (i = 0; i < CRU_GATE_COUNT; i++) + mmio_write_32(CRU_BASE + CRU_GATE_CON(i), REG_SOC_WMSK); +} + +void clk_gate_con_restore(void) +{ + uint32_t i; + + for (i = 0; i < PMUCRU_GATE_COUNT; i++) + mmio_write_32(PMUCRU_BASE + PMUCRU_GATE_CON(i), + REG_SOC_WMSK | slp_data.pmucru_gate_con[i]); + + for (i = 0; i < CRU_GATE_COUNT; i++) + mmio_write_32(CRU_BASE + CRU_GATE_CON(i), + REG_SOC_WMSK | slp_data.cru_gate_con[i]); +} + +static void set_plls_nobypass(uint32_t pll_id) +{ + if (pll_id == PPLL_ID) + mmio_write_32(PMUCRU_BASE + PMUCRU_PPLL_CON(3), + PLL_NO_BYPASS_MODE); + else + mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 3), + PLL_NO_BYPASS_MODE); +} + +static void _pll_resume(uint32_t pll_id) +{ + set_plls_nobypass(pll_id); + set_pll_normal_mode(pll_id); +} + +void set_pmu_rsthold(void) +{ + uint32_t rstnhold_cofig0; + uint32_t rstnhold_cofig1; + + pmu_slp_data.pmucru_rstnhold_con0 = mmio_read_32(PMUCRU_BASE + + PMUCRU_RSTNHOLD_CON0); + pmu_slp_data.pmucru_rstnhold_con1 = mmio_read_32(PMUCRU_BASE + + PMUCRU_RSTNHOLD_CON1); + rstnhold_cofig0 = BIT_WITH_WMSK(PRESETN_NOC_PMU_HOLD) | + BIT_WITH_WMSK(PRESETN_INTMEM_PMU_HOLD) | + BIT_WITH_WMSK(HRESETN_CM0S_PMU_HOLD) | + BIT_WITH_WMSK(HRESETN_CM0S_NOC_PMU_HOLD) | + BIT_WITH_WMSK(DRESETN_CM0S_PMU_HOLD) | + BIT_WITH_WMSK(POESETN_CM0S_PMU_HOLD) | + BIT_WITH_WMSK(PRESETN_TIMER_PMU_0_1_HOLD) | + BIT_WITH_WMSK(RESETN_TIMER_PMU_0_HOLD) | + BIT_WITH_WMSK(RESETN_TIMER_PMU_1_HOLD) | + BIT_WITH_WMSK(PRESETN_UART_M0_PMU_HOLD) | + BIT_WITH_WMSK(RESETN_UART_M0_PMU_HOLD) | + BIT_WITH_WMSK(PRESETN_WDT_PMU_HOLD); + rstnhold_cofig1 = BIT_WITH_WMSK(PRESETN_RKPWM_PMU_HOLD) | + BIT_WITH_WMSK(PRESETN_PMUGRF_HOLD) | + BIT_WITH_WMSK(PRESETN_SGRF_HOLD) | + BIT_WITH_WMSK(PRESETN_GPIO0_HOLD) | + BIT_WITH_WMSK(PRESETN_GPIO1_HOLD) | + BIT_WITH_WMSK(PRESETN_CRU_PMU_HOLD) | + BIT_WITH_WMSK(PRESETN_PVTM_PMU_HOLD); + + mmio_write_32(PMUCRU_BASE + PMUCRU_RSTNHOLD_CON0, rstnhold_cofig0); + mmio_write_32(PMUCRU_BASE + PMUCRU_RSTNHOLD_CON1, rstnhold_cofig1); +} + +void pmu_sgrf_rst_hld(void) +{ + mmio_write_32(PMUCRU_BASE + CRU_PMU_RSTHOLD_CON(1), + CRU_PMU_SGRF_RST_HOLD); +} + +/* + * When system reset in running state, we want the cpus to be reboot + * from maskrom (system reboot), + * the pmusgrf reset-hold bits needs to be released. + * When system wake up from system deep suspend, some soc will be reset + * when waked up, + * we want the bootcpu to be reboot from pmusram, + * the pmusgrf reset-hold bits needs to be held. + */ +__pmusramfunc void pmu_sgrf_rst_hld_release(void) +{ + mmio_write_32(PMUCRU_BASE + CRU_PMU_RSTHOLD_CON(1), + CRU_PMU_SGRF_RST_RLS); +} + +__pmusramfunc void restore_pmu_rsthold(void) +{ + mmio_write_32(PMUCRU_BASE + PMUCRU_RSTNHOLD_CON0, + pmu_slp_data.pmucru_rstnhold_con0 | REG_SOC_WMSK); + mmio_write_32(PMUCRU_BASE + PMUCRU_RSTNHOLD_CON1, + pmu_slp_data.pmucru_rstnhold_con1 | REG_SOC_WMSK); +} + +/** + * enable_dvfs_plls - To resume the specific PLLs + * + * Please see the comment at the disable_dvfs_plls() + * we don't suspend the ABPLL, so don't need resume + * it too. + */ +void enable_dvfs_plls(void) +{ + _pll_resume(ALPLL_ID); + _pll_resume(GPLL_ID); + _pll_resume(VPLL_ID); + _pll_resume(NPLL_ID); + _pll_resume(CPLL_ID); +} + +/** + * enable_nodvfs_plls - To resume the PPLL + */ +void enable_nodvfs_plls(void) +{ + _pll_resume(PPLL_ID); +} + +void soc_global_soft_reset_init(void) +{ + mmio_write_32(PMUCRU_BASE + CRU_PMU_RSTHOLD_CON(1), + CRU_PMU_SGRF_RST_RLS); + + mmio_clrbits_32(CRU_BASE + CRU_GLB_RST_CON, + CRU_PMU_WDTRST_MSK | CRU_PMU_FIRST_SFTRST_MSK); +} + +void __dead2 soc_global_soft_reset(void) +{ + pmu_power_domains_on(); + set_pll_slow_mode(VPLL_ID); + set_pll_slow_mode(NPLL_ID); + set_pll_slow_mode(GPLL_ID); + set_pll_slow_mode(CPLL_ID); + set_pll_slow_mode(PPLL_ID); + set_pll_slow_mode(ABPLL_ID); + set_pll_slow_mode(ALPLL_ID); + + dsb(); + + mmio_write_32(CRU_BASE + CRU_GLB_SRST_FST, GLB_SRST_FST_CFG_VAL); + + /* + * Maybe the HW needs some times to reset the system, + * so we do not hope the core to excute valid codes. + */ + while (1) + ; +} + +void plat_rockchip_soc_init(void) +{ + secure_timer_init(); + secure_sgrf_init(); + secure_sgrf_ddr_rgn_init(); + soc_global_soft_reset_init(); + plat_rockchip_gpio_init(); + m0_init(); + dram_init(); + dram_dfs_init(); +} diff --git a/plat/rockchip/rk3399/drivers/soc/soc.h b/plat/rockchip/rk3399/drivers/soc/soc.h new file mode 100644 index 0000000..8daa5bb --- /dev/null +++ b/plat/rockchip/rk3399/drivers/soc/soc.h @@ -0,0 +1,289 @@ +/* + * Copyright (c) 2016-2021, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef SOC_H +#define SOC_H + +#include <lib/utils.h> + +#define GLB_SRST_FST_CFG_VAL 0xfdb9 +#define GLB_SRST_SND_CFG_VAL 0xeca8 + +#define PMUCRU_PPLL_CON(n) ((n) * 4) +#define CRU_PLL_CON(pll_id, n) ((pll_id) * 0x20 + (n) * 4) +#define PLL_MODE_MSK 0x03 +#define PLL_MODE_SHIFT 0x08 +#define PLL_BYPASS_MSK 0x01 +#define PLL_BYPASS_SHIFT 0x01 +#define PLL_PWRDN_MSK 0x01 +#define PLL_PWRDN_SHIFT 0x0 +#define PLL_BYPASS BIT(1) +#define PLL_PWRDN BIT(0) + +#define NO_PLL_BYPASS (0x00) +#define NO_PLL_PWRDN (0x00) + +#define FBDIV(n) ((0xfff << 16) | n) +#define POSTDIV2(n) ((0x7 << (12 + 16)) | (n << 12)) +#define POSTDIV1(n) ((0x7 << (8 + 16)) | (n << 8)) +#define REFDIV(n) ((0x3F << 16) | n) +#define PLL_LOCK(n) ((n >> 31) & 0x1) + +#define PLL_SLOW_MODE BITS_WITH_WMASK(SLOW_MODE,\ + PLL_MODE_MSK, PLL_MODE_SHIFT) + +#define PLL_NOMAL_MODE BITS_WITH_WMASK(NORMAL_MODE,\ + PLL_MODE_MSK, PLL_MODE_SHIFT) + +#define PLL_BYPASS_MODE BIT_WITH_WMSK(PLL_BYPASS_SHIFT) +#define PLL_NO_BYPASS_MODE WMSK_BIT(PLL_BYPASS_SHIFT) + +#define PLL_CON_COUNT 0x06 +#define CRU_CLKSEL_COUNT 108 +#define CRU_CLKSEL_CON(n) (0x100 + (n) * 4) + +#define PMUCRU_CLKSEL_CONUT 0x06 +#define PMUCRU_CLKSEL_OFFSET 0x080 +#define REG_SIZE 0x04 +#define REG_SOC_WMSK 0xffff0000 +#define CLK_GATE_MASK 0x01 + +#define PMUCRU_GATE_COUNT 0x03 +#define CRU_GATE_COUNT 0x23 +#define PMUCRU_GATE_CON(n) (0x100 + (n) * 4) +#define CRU_GATE_CON(n) (0x300 + (n) * 4) + +#define PMUCRU_RSTNHOLD_CON0 0x120 +enum { + PRESETN_NOC_PMU_HOLD = 1, + PRESETN_INTMEM_PMU_HOLD, + HRESETN_CM0S_PMU_HOLD, + HRESETN_CM0S_NOC_PMU_HOLD, + DRESETN_CM0S_PMU_HOLD, + POESETN_CM0S_PMU_HOLD, + PRESETN_SPI3_HOLD, + RESETN_SPI3_HOLD, + PRESETN_TIMER_PMU_0_1_HOLD, + RESETN_TIMER_PMU_0_HOLD, + RESETN_TIMER_PMU_1_HOLD, + PRESETN_UART_M0_PMU_HOLD, + RESETN_UART_M0_PMU_HOLD, + PRESETN_WDT_PMU_HOLD +}; + +#define PMUCRU_RSTNHOLD_CON1 0x124 +enum { + PRESETN_I2C0_HOLD, + PRESETN_I2C4_HOLD, + PRESETN_I2C8_HOLD, + PRESETN_MAILBOX_PMU_HOLD, + PRESETN_RKPWM_PMU_HOLD, + PRESETN_PMUGRF_HOLD, + PRESETN_SGRF_HOLD, + PRESETN_GPIO0_HOLD, + PRESETN_GPIO1_HOLD, + PRESETN_CRU_PMU_HOLD, + PRESETN_INTR_ARB_HOLD, + PRESETN_PVTM_PMU_HOLD, + RESETN_I2C0_HOLD, + RESETN_I2C4_HOLD, + RESETN_I2C8_HOLD +}; + +enum plls_id { + ALPLL_ID = 0, + ABPLL_ID, + DPLL_ID, + CPLL_ID, + GPLL_ID, + NPLL_ID, + VPLL_ID, + PPLL_ID, + END_PLL_ID, +}; + +#define CLST_L_CPUS_MSK (0xf) +#define CLST_B_CPUS_MSK (0x3) + +enum pll_work_mode { + SLOW_MODE = 0x00, + NORMAL_MODE = 0x01, + DEEP_SLOW_MODE = 0x02, +}; + +enum glb_sft_reset { + PMU_RST_BY_FIRST_SFT, + PMU_RST_BY_SECOND_SFT = BIT(2), + PMU_RST_NOT_BY_SFT = BIT(3), +}; + +struct pll_div { + uint32_t mhz; + uint32_t refdiv; + uint32_t fbdiv; + uint32_t postdiv1; + uint32_t postdiv2; + uint32_t frac; + uint32_t freq; +}; + +struct deepsleep_data_s { + uint32_t plls_con[END_PLL_ID][PLL_CON_COUNT]; + uint32_t cru_gate_con[CRU_GATE_COUNT]; + uint32_t pmucru_gate_con[PMUCRU_GATE_COUNT]; +}; + +struct pmu_sleep_data { + uint32_t pmucru_rstnhold_con0; + uint32_t pmucru_rstnhold_con1; +}; + +/************************************************** + * pmugrf reg, offset + **************************************************/ +#define PMUGRF_OSREG(n) (0x300 + (n) * 4) +#define PMUGRF_GPIO0A_P 0x040 +#define PMUGRF_GPIO1A_P 0x050 + +/************************************************** + * DCF reg, offset + **************************************************/ +#define DCF_DCF_CTRL 0x0 +#define DCF_DCF_ADDR 0x8 +#define DCF_DCF_ISR 0xc +#define DCF_DCF_TOSET 0x14 +#define DCF_DCF_TOCMD 0x18 +#define DCF_DCF_CMD_CFG 0x1c + +/* DCF_DCF_ISR */ +#define DCF_TIMEOUT (1 << 2) +#define DCF_ERR (1 << 1) +#define DCF_DONE (1 << 0) + +/* DCF_DCF_CTRL */ +#define DCF_VOP_HW_EN (1 << 2) +#define DCF_STOP (1 << 1) +#define DCF_START (1 << 0) + +#define CYCL_24M_CNT_US(us) (24 * us) +#define CYCL_24M_CNT_MS(ms) (ms * CYCL_24M_CNT_US(1000)) +#define CYCL_32K_CNT_MS(ms) (ms * 32) + +/************************************************** + * cru reg, offset + **************************************************/ +#define CRU_SOFTRST_CON(n) (0x400 + (n) * 4) + +#define CRU_DMAC0_RST BIT_WITH_WMSK(3) + /* reset release*/ +#define CRU_DMAC0_RST_RLS WMSK_BIT(3) + +#define CRU_DMAC1_RST BIT_WITH_WMSK(4) + /* reset release*/ +#define CRU_DMAC1_RST_RLS WMSK_BIT(4) + +#define CRU_GLB_RST_CON 0x0510 +#define CRU_GLB_SRST_FST 0x0500 +#define CRU_GLB_SRST_SND 0x0504 + +#define CRU_CLKGATE_CON(n) (0x300 + n * 4) +#define PCLK_GPIO2_GATE_SHIFT 3 +#define PCLK_GPIO3_GATE_SHIFT 4 +#define PCLK_GPIO4_GATE_SHIFT 5 + +/************************************************** + * pmu cru reg, offset + **************************************************/ +#define CRU_PMU_RSTHOLD_CON(n) (0x120 + n * 4) +/* reset hold*/ +#define CRU_PMU_SGRF_RST_HOLD BIT_WITH_WMSK(6) +/* reset hold release*/ +#define CRU_PMU_SGRF_RST_RLS WMSK_BIT(6) + +#define CRU_PMU_WDTRST_MSK (0x1 << 4) +#define CRU_PMU_WDTRST_EN 0x0 + +#define CRU_PMU_FIRST_SFTRST_MSK (0x3 << 2) +#define CRU_PMU_FIRST_SFTRST_EN 0x0 + +#define CRU_PMU_CLKGATE_CON(n) (0x100 + n * 4) +#define PCLK_GPIO0_GATE_SHIFT 3 +#define PCLK_GPIO1_GATE_SHIFT 4 + +#define CPU_BOOT_ADDR_WMASK 0xffff0000 +#define CPU_BOOT_ADDR_ALIGN 16 + +#define GRF_IOMUX_2BIT_MASK 0x3 +#define GRF_IOMUX_GPIO 0x0 + +#define GRF_GPIO4C2_IOMUX_SHIFT 4 +#define GRF_GPIO4C2_IOMUX_PWM 0x1 +#define GRF_GPIO4C6_IOMUX_SHIFT 12 +#define GRF_GPIO4C6_IOMUX_PWM 0x1 + +#define PWM_CNT(n) (0x0000 + 0x10 * (n)) +#define PWM_PERIOD_HPR(n) (0x0004 + 0x10 * (n)) +#define PWM_DUTY_LPR(n) (0x0008 + 0x10 * (n)) +#define PWM_CTRL(n) (0x000c + 0x10 * (n)) + +#define PWM_DISABLE (0 << 0) +#define PWM_ENABLE (1 << 0) + +/* grf reg offset */ +#define GRF_USBPHY0_CTRL0 0x4480 +#define GRF_USBPHY0_CTRL2 0x4488 +#define GRF_USBPHY0_CTRL3 0x448c +#define GRF_USBPHY0_CTRL12 0x44b0 +#define GRF_USBPHY0_CTRL13 0x44b4 +#define GRF_USBPHY0_CTRL15 0x44bc +#define GRF_USBPHY0_CTRL16 0x44c0 + +#define GRF_USBPHY1_CTRL0 0x4500 +#define GRF_USBPHY1_CTRL2 0x4508 +#define GRF_USBPHY1_CTRL3 0x450c +#define GRF_USBPHY1_CTRL12 0x4530 +#define GRF_USBPHY1_CTRL13 0x4534 +#define GRF_USBPHY1_CTRL15 0x453c +#define GRF_USBPHY1_CTRL16 0x4540 + +#define GRF_GPIO2A_IOMUX 0xe000 +#define GRF_GPIO2A_P 0xe040 +#define GRF_GPIO3A_P 0xe050 +#define GRF_GPIO4A_P 0xe060 +#define GRF_GPIO2D_HE 0xe18c +#define GRF_DDRC0_CON0 0xe380 +#define GRF_DDRC0_CON1 0xe384 +#define GRF_DDRC1_CON0 0xe388 +#define GRF_DDRC1_CON1 0xe38c +#define GRF_SOC_CON_BASE 0xe200 +#define GRF_SOC_CON(n) (GRF_SOC_CON_BASE + (n) * 4) +#define GRF_IO_VSEL 0xe640 + +#define CRU_CLKSEL_CON0 0x0100 +#define CRU_CLKSEL_CON6 0x0118 +#define CRU_SDIO0_CON1 0x058c +#define PMUCRU_CLKSEL_CON0 0x0080 +#define PMUCRU_CLKGATE_CON2 0x0108 +#define PMUCRU_SOFTRST_CON0 0x0110 +#define PMUCRU_GATEDIS_CON0 0x0130 +#define PMUCRU_SOFTRST_CON(n) (PMUCRU_SOFTRST_CON0 + (n) * 4) + +/* export related and operating SoC APIs */ +void __dead2 soc_global_soft_reset(void); +void disable_dvfs_plls(void); +void disable_nodvfs_plls(void); +void enable_dvfs_plls(void); +void enable_nodvfs_plls(void); +void prepare_abpll_for_ddrctrl(void); +void restore_abpll(void); +void clk_gate_con_save(void); +void clk_gate_con_disable(void); +void clk_gate_con_restore(void); +void set_pmu_rsthold(void); +void pmu_sgrf_rst_hld(void); +__pmusramfunc void pmu_sgrf_rst_hld_release(void); +__pmusramfunc void restore_pmu_rsthold(void); +#endif /* SOC_H */ diff --git a/plat/rockchip/rk3399/include/addressmap.h b/plat/rockchip/rk3399/include/addressmap.h new file mode 100644 index 0000000..dc1c703 --- /dev/null +++ b/plat/rockchip/rk3399/include/addressmap.h @@ -0,0 +1,19 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef ADDRESSMAP_H +#define ADDRESSMAP_H + +#include <addressmap_shared.h> + +/* Registers base address */ +#define MMIO_BASE 0xF8000000 + +/* Aggregate of all devices in the first GB */ +#define DEV_RNG0_BASE MMIO_BASE +#define DEV_RNG0_SIZE SIZE_M(125) + +#endif /* ADDRESSMAP_H */ diff --git a/plat/rockchip/rk3399/include/plat.ld.S b/plat/rockchip/rk3399/include/plat.ld.S new file mode 100644 index 0000000..cfa912f --- /dev/null +++ b/plat/rockchip/rk3399/include/plat.ld.S @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#ifndef ROCKCHIP_PLAT_LD_S +#define ROCKCHIP_PLAT_LD_S + +#include <lib/xlat_tables/xlat_tables_defs.h> + +MEMORY { + SRAM (rwx): ORIGIN = SRAM_BASE, LENGTH = SRAM_SIZE + PMUSRAM (rwx): ORIGIN = PMUSRAM_BASE, LENGTH = PMUSRAM_RSIZE +} + +SECTIONS +{ + . = SRAM_BASE; + ASSERT(. == ALIGN(PAGE_SIZE), + "SRAM_BASE address is not aligned on a page boundary.") + + /* + * The SRAM space allocation for RK3399 + * ---------------- + * | m0 code bin + * ---------------- + * | sram text + * ---------------- + * | sram data + * ---------------- + */ + .incbin_sram : ALIGN(PAGE_SIZE) { + __sram_incbin_start = .; + *(.sram.incbin) + __sram_incbin_real_end = .; + . = ALIGN(PAGE_SIZE); + __sram_incbin_end = .; + } >SRAM + ASSERT((__sram_incbin_real_end - __sram_incbin_start) <= + SRAM_BIN_LIMIT, ".incbin_sram has exceeded its limit") + + .text_sram : ALIGN(PAGE_SIZE) { + __bl31_sram_text_start = .; + *(.sram.text) + *(.sram.rodata) + __bl31_sram_text_real_end = .; + . = ALIGN(PAGE_SIZE); + __bl31_sram_text_end = .; + } >SRAM + ASSERT((__bl31_sram_text_real_end - __bl31_sram_text_start) <= + SRAM_TEXT_LIMIT, ".text_sram has exceeded its limit") + + .data_sram : ALIGN(PAGE_SIZE) { + __bl31_sram_data_start = .; + *(.sram.data) + __bl31_sram_data_real_end = .; + . = ALIGN(PAGE_SIZE); + __bl31_sram_data_end = .; + } >SRAM + ASSERT((__bl31_sram_data_real_end - __bl31_sram_data_start) <= + SRAM_DATA_LIMIT, ".data_sram has exceeded its limit") + + .stack_sram : ALIGN(PAGE_SIZE) { + __bl31_sram_stack_start = .; + . += PAGE_SIZE; + __bl31_sram_stack_end = .; + } >SRAM + + . = PMUSRAM_BASE; + + /* + * pmu_cpuson_entrypoint request address + * align 64K when resume, so put it in the + * start of pmusram + */ + .pmusram : { + ASSERT(. == ALIGN(64 * 1024), + ".pmusram.entry request 64K aligned."); + *(.pmusram.entry) + + __bl31_pmusram_text_start = .; + *(.pmusram.text) + *(.pmusram.rodata) + __bl31_pmusram_text_end = .; + + /* M0 start address request 4K align */ + . = ALIGN(4096); + __pmusram_incbin_start = .; + *(.pmusram.incbin) + __pmusram_incbin_end = .; + + __bl31_pmusram_data_start = .; + *(.pmusram.data) + __bl31_pmusram_data_end = .; + } >PMUSRAM +} + +#endif /* ROCKCHIP_PLAT_LD_S */ diff --git a/plat/rockchip/rk3399/include/plat_sip_calls.h b/plat/rockchip/rk3399/include/plat_sip_calls.h new file mode 100644 index 0000000..66c4868 --- /dev/null +++ b/plat/rockchip/rk3399/include/plat_sip_calls.h @@ -0,0 +1,12 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef PLAT_SIP_CALLS_H +#define PLAT_SIP_CALLS_H + +#define RK_PLAT_SIP_NUM_CALLS 0 + +#endif /* PLAT_SIP_CALLS_H */ diff --git a/plat/rockchip/rk3399/include/platform_def.h b/plat/rockchip/rk3399/include/platform_def.h new file mode 100644 index 0000000..78269b6 --- /dev/null +++ b/plat/rockchip/rk3399/include/platform_def.h @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2014-2019, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef PLATFORM_DEF_H +#define PLATFORM_DEF_H + +#include <arch.h> +#include <lib/utils_def.h> +#include <plat/common/common_def.h> + +#include <bl31_param.h> +#include <rk3399_def.h> + +/******************************************************************************* + * Platform binary types for linking + ******************************************************************************/ +#define PLATFORM_LINKER_FORMAT "elf64-littleaarch64" +#define PLATFORM_LINKER_ARCH aarch64 + +/******************************************************************************* + * Generic platform constants + ******************************************************************************/ + +/* Size of cacheable stacks */ +#if defined(IMAGE_BL1) +#define PLATFORM_STACK_SIZE 0x440 +#elif defined(IMAGE_BL2) +#define PLATFORM_STACK_SIZE 0x400 +#elif defined(IMAGE_BL31) +#define PLATFORM_STACK_SIZE 0x800 +#elif defined(IMAGE_BL32) +#define PLATFORM_STACK_SIZE 0x440 +#endif + +#define FIRMWARE_WELCOME_STR "Booting Trusted Firmware\n" + +#define PLATFORM_MAX_AFFLVL MPIDR_AFFLVL2 +#define PLATFORM_SYSTEM_COUNT U(1) +#define PLATFORM_CLUSTER_COUNT U(2) +#define PLATFORM_CLUSTER0_CORE_COUNT U(4) +#define PLATFORM_CLUSTER1_CORE_COUNT U(2) +#define PLATFORM_CORE_COUNT (PLATFORM_CLUSTER1_CORE_COUNT + \ + PLATFORM_CLUSTER0_CORE_COUNT) +#define PLATFORM_MAX_CPUS_PER_CLUSTER U(4) +#define PLATFORM_NUM_AFFS (PLATFORM_SYSTEM_COUNT + \ + PLATFORM_CLUSTER_COUNT + \ + PLATFORM_CORE_COUNT) +#define PLAT_RK_CLST_TO_CPUID_SHIFT 6 +#define PLAT_MAX_PWR_LVL MPIDR_AFFLVL2 + +/* + * This macro defines the deepest retention state possible. A higher state + * id will represent an invalid or a power down state. + */ +#define PLAT_MAX_RET_STATE U(1) + +/* + * This macro defines the deepest power down states possible. Any state ID + * higher than this is invalid. + */ +#define PLAT_MAX_OFF_STATE U(2) + +/******************************************************************************* + * Platform specific page table and MMU setup constants + ******************************************************************************/ +#define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 32) +#define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 32) +#define MAX_XLAT_TABLES 20 +#define MAX_MMAP_REGIONS 25 + +/******************************************************************************* + * Declarations and constants to access the mailboxes safely. Each mailbox is + * aligned on the biggest cache line size in the platform. This is known only + * to the platform as it might have a combination of integrated and external + * caches. Such alignment ensures that two maiboxes do not sit on the same cache + * line at any cache level. They could belong to different cpus/clusters & + * get written while being protected by different locks causing corruption of + * a valid mailbox address. + ******************************************************************************/ +#define CACHE_WRITEBACK_SHIFT 6 +#define CACHE_WRITEBACK_GRANULE (1 << CACHE_WRITEBACK_SHIFT) + +/* + * Define GICD and GICC and GICR base + */ +#define PLAT_RK_GICD_BASE BASE_GICD_BASE +#define PLAT_RK_GICR_BASE BASE_GICR_BASE +#define PLAT_RK_GICC_BASE 0 + +#define PLAT_RK_UART_BASE UART2_BASE +#define PLAT_RK_UART_CLOCK RK3399_UART_CLOCK +#define PLAT_RK_UART_BAUDRATE RK3399_BAUDRATE + +#define PLAT_RK_CCI_BASE CCI500_BASE + +#define PLAT_RK_PRIMARY_CPU 0x0 + +#define PSRAM_DO_DDR_RESUME 1 +#define PSRAM_CHECK_WAKEUP_CPU 0 + +#endif /* PLATFORM_DEF_H */ diff --git a/plat/rockchip/rk3399/include/shared/addressmap_shared.h b/plat/rockchip/rk3399/include/shared/addressmap_shared.h new file mode 100644 index 0000000..84a31b2 --- /dev/null +++ b/plat/rockchip/rk3399/include/shared/addressmap_shared.h @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef ADDRESSMAP_SHARED_H +#define ADDRESSMAP_SHARED_H + +#define SIZE_K(n) ((n) * 1024) +#define SIZE_M(n) ((n) * 1024 * 1024) +#define SRAM_TEXT_LIMIT (4 * 1024) +#define SRAM_DATA_LIMIT (4 * 1024) +#define SRAM_BIN_LIMIT (4 * 1024) + +/* + * The parts of the shared defined registers address with AP and M0, + * let's note and mark the previous defines like this: + */ +#define GIC500_BASE (MMIO_BASE + 0x06E00000) +#define UART0_BASE (MMIO_BASE + 0x07180000) +#define UART1_BASE (MMIO_BASE + 0x07190000) +#define UART2_BASE (MMIO_BASE + 0x071A0000) +#define UART3_BASE (MMIO_BASE + 0x071B0000) + +#define PMU_BASE (MMIO_BASE + 0x07310000) +#define PMUGRF_BASE (MMIO_BASE + 0x07320000) +#define SGRF_BASE (MMIO_BASE + 0x07330000) +#define PMUSRAM_BASE (MMIO_BASE + 0x073B0000) +#define PWM_BASE (MMIO_BASE + 0x07420000) + +#define CIC_BASE (MMIO_BASE + 0x07620000) +#define PD_BUS0_BASE (MMIO_BASE + 0x07650000) +#define DCF_BASE (MMIO_BASE + 0x076A0000) +#define GPIO0_BASE (MMIO_BASE + 0x07720000) +#define GPIO1_BASE (MMIO_BASE + 0x07730000) +#define PMUCRU_BASE (MMIO_BASE + 0x07750000) +#define CRU_BASE (MMIO_BASE + 0x07760000) +#define GRF_BASE (MMIO_BASE + 0x07770000) +#define GPIO2_BASE (MMIO_BASE + 0x07780000) +#define GPIO3_BASE (MMIO_BASE + 0x07788000) +#define GPIO4_BASE (MMIO_BASE + 0x07790000) +#define WDT1_BASE (MMIO_BASE + 0x07840000) +#define WDT0_BASE (MMIO_BASE + 0x07848000) +#define TIMER_BASE (MMIO_BASE + 0x07850000) +#define STIME_BASE (MMIO_BASE + 0x07860000) +#define SRAM_BASE (MMIO_BASE + 0x078C0000) +#define SERVICE_NOC_0_BASE (MMIO_BASE + 0x07A50000) +#define DDRC0_BASE (MMIO_BASE + 0x07A80000) +#define SERVICE_NOC_1_BASE (MMIO_BASE + 0x07A84000) +#define DDRC1_BASE (MMIO_BASE + 0x07A88000) +#define SERVICE_NOC_2_BASE (MMIO_BASE + 0x07A8C000) +#define SERVICE_NOC_3_BASE (MMIO_BASE + 0x07A90000) +#define CCI500_BASE (MMIO_BASE + 0x07B00000) +#define COLD_BOOT_BASE (MMIO_BASE + 0x07FF0000) + +/* Registers size */ +#define GIC500_SIZE SIZE_M(2) +#define UART0_SIZE SIZE_K(64) +#define UART1_SIZE SIZE_K(64) +#define UART2_SIZE SIZE_K(64) +#define UART3_SIZE SIZE_K(64) +#define PMU_SIZE SIZE_K(64) +#define PMUGRF_SIZE SIZE_K(64) +#define SGRF_SIZE SIZE_K(64) +#define PMUSRAM_SIZE SIZE_K(64) +#define PMUSRAM_RSIZE SIZE_K(8) +#define PWM_SIZE SIZE_K(64) +#define CIC_SIZE SIZE_K(4) +#define DCF_SIZE SIZE_K(4) +#define GPIO0_SIZE SIZE_K(64) +#define GPIO1_SIZE SIZE_K(64) +#define PMUCRU_SIZE SIZE_K(64) +#define CRU_SIZE SIZE_K(64) +#define GRF_SIZE SIZE_K(64) +#define GPIO2_SIZE SIZE_K(32) +#define GPIO3_SIZE SIZE_K(32) +#define GPIO4_SIZE SIZE_K(32) +#define STIME_SIZE SIZE_K(64) +#define SRAM_SIZE SIZE_K(192) +#define SERVICE_NOC_0_SIZE SIZE_K(192) +#define DDRC0_SIZE SIZE_K(32) +#define SERVICE_NOC_1_SIZE SIZE_K(16) +#define DDRC1_SIZE SIZE_K(32) +#define SERVICE_NOC_2_SIZE SIZE_K(16) +#define SERVICE_NOC_3_SIZE SIZE_K(448) +#define CCI500_SIZE SIZE_M(1) +#define PD_BUS0_SIZE SIZE_K(448) + +/* DDR Registers address */ +#define CTL_BASE(ch) (DDRC0_BASE + (ch) * 0x8000) +#define CTL_REG(ch, n) (CTL_BASE(ch) + (n) * 0x4) + +#define PI_OFFSET 0x800 +#define PI_BASE(ch) (CTL_BASE(ch) + PI_OFFSET) +#define PI_REG(ch, n) (PI_BASE(ch) + (n) * 0x4) + +#define PHY_OFFSET 0x2000 +#define PHY_BASE(ch) (CTL_BASE(ch) + PHY_OFFSET) +#define PHY_REG(ch, n) (PHY_BASE(ch) + (n) * 0x4) + +#define MSCH_BASE(ch) (SERVICE_NOC_1_BASE + (ch) * 0x8000) + +#endif /* ADDRESSMAP_SHARED_H */ diff --git a/plat/rockchip/rk3399/include/shared/bl31_param.h b/plat/rockchip/rk3399/include/shared/bl31_param.h new file mode 100644 index 0000000..6e7e8ba --- /dev/null +++ b/plat/rockchip/rk3399/include/shared/bl31_param.h @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef BL31_PARAM_H +#define BL31_PARAM_H + +/******************************************************************************* + * Platform memory map related constants + ******************************************************************************/ +/* TF text, ro, rw, Size: 1MB */ +#define TZRAM_BASE (0x0) +#define TZRAM_SIZE (0x100000) + +/******************************************************************************* + * BL31 specific defines. + ******************************************************************************/ +/* + * Put BL31 at the top of the Trusted RAM + */ +#define BL31_BASE (TZRAM_BASE + 0x40000) +#define BL31_LIMIT (TZRAM_BASE + TZRAM_SIZE) + +#endif /* BL31_PARAM_H */ diff --git a/plat/rockchip/rk3399/include/shared/dram_regs.h b/plat/rockchip/rk3399/include/shared/dram_regs.h new file mode 100644 index 0000000..4d4ebf6 --- /dev/null +++ b/plat/rockchip/rk3399/include/shared/dram_regs.h @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef DRAM_REGS_H +#define DRAM_REGS_H + +#define CTL_REG_NUM 332 +#define PHY_REG_NUM 959 +#define PI_REG_NUM 200 + +#define MSCH_ID_COREID 0x0 +#define MSCH_ID_REVISIONID 0x4 +#define MSCH_DEVICECONF 0x8 +#define MSCH_DEVICESIZE 0xc +#define MSCH_DDRTIMINGA0 0x10 +#define MSCH_DDRTIMINGB0 0x14 +#define MSCH_DDRTIMINGC0 0x18 +#define MSCH_DEVTODEV0 0x1c +#define MSCH_DDRMODE 0x110 +#define MSCH_AGINGX0 0x1000 + +#define CIC_CTRL0 0x0 +#define CIC_CTRL1 0x4 +#define CIC_IDLE_TH 0x8 +#define CIC_CG_WAIT_TH 0xc +#define CIC_STATUS0 0x10 +#define CIC_STATUS1 0x14 +#define CIC_CTRL2 0x18 +#define CIC_CTRL3 0x1c +#define CIC_CTRL4 0x20 + +/* DENALI_CTL_00 */ +#define START 1 + +/* DENALI_CTL_68 */ +#define PWRUP_SREFRESH_EXIT (1 << 16) + +/* DENALI_CTL_274 */ +#define MEM_RST_VALID 1 + +#define PHY_DRV_ODT_Hi_Z 0x0 +#define PHY_DRV_ODT_240 0x1 +#define PHY_DRV_ODT_120 0x8 +#define PHY_DRV_ODT_80 0x9 +#define PHY_DRV_ODT_60 0xc +#define PHY_DRV_ODT_48 0xd +#define PHY_DRV_ODT_40 0xe +#define PHY_DRV_ODT_34_3 0xf + +/* + * sys_reg bitfield struct + * [31] row_3_4_ch1 + * [30] row_3_4_ch0 + * [29:28] chinfo + * [27] rank_ch1 + * [26:25] col_ch1 + * [24] bk_ch1 + * [23:22] cs0_row_ch1 + * [21:20] cs1_row_ch1 + * [19:18] bw_ch1 + * [17:16] dbw_ch1; + * [15:13] ddrtype + * [12] channelnum + * [11] rank_ch0 + * [10:9] col_ch0 + * [8] bk_ch0 + * [7:6] cs0_row_ch0 + * [5:4] cs1_row_ch0 + * [3:2] bw_ch0 + * [1:0] dbw_ch0 + */ +#define SYS_REG_ENC_ROW_3_4(n, ch) ((n) << (30 + (ch))) +#define SYS_REG_DEC_ROW_3_4(n, ch) (((n) >> (30 + (ch))) & 0x1) +#define SYS_REG_ENC_CHINFO(ch) (1 << (28 + (ch))) +#define SYS_REG_DEC_CHINFO(n, ch) (((n) >> (28 + (ch))) & 0x1) +#define SYS_REG_ENC_DDRTYPE(n) ((n) << 13) +#define SYS_REG_DEC_DDRTYPE(n) (((n) >> 13) & 0x7) +#define SYS_REG_ENC_NUM_CH(n) (((n) - 1) << 12) +#define SYS_REG_DEC_NUM_CH(n) (1 + (((n) >> 12) & 0x1)) +#define SYS_REG_ENC_RANK(n, ch) (((n) - 1) << (11 + (ch) * 16)) +#define SYS_REG_DEC_RANK(n, ch) (1 + (((n) >> (11 + (ch) * 16)) & 0x1)) +#define SYS_REG_ENC_COL(n, ch) (((n) - 9) << (9 + (ch) * 16)) +#define SYS_REG_DEC_COL(n, ch) (9 + (((n) >> (9 + (ch) * 16)) & 0x3)) +#define SYS_REG_ENC_BK(n, ch) (((n) == 3 ? 0 : 1) << (8 + (ch) * 16)) +#define SYS_REG_DEC_BK(n, ch) (3 - (((n) >> (8 + (ch) * 16)) & 0x1)) +#define SYS_REG_ENC_CS0_ROW(n, ch) (((n) - 13) << (6 + (ch) * 16)) +#define SYS_REG_DEC_CS0_ROW(n, ch) (13 + (((n) >> (6 + (ch) * 16)) & 0x3)) +#define SYS_REG_ENC_CS1_ROW(n, ch) (((n) - 13) << (4 + (ch) * 16)) +#define SYS_REG_DEC_CS1_ROW(n, ch) (13 + (((n) >> (4 + (ch) * 16)) & 0x3)) +#define SYS_REG_ENC_BW(n, ch) ((2 >> (n)) << (2 + (ch) * 16)) +#define SYS_REG_DEC_BW(n, ch) (2 >> (((n) >> (2 + (ch) * 16)) & 0x3)) +#define SYS_REG_ENC_DBW(n, ch) ((2 >> (n)) << (0 + (ch) * 16)) +#define SYS_REG_DEC_DBW(n, ch) (2 >> (((n) >> (0 + (ch) * 16)) & 0x3)) +#define DDR_STRIDE(n) mmio_write_32(SGRF_BASE + SGRF_SOC_CON3_7(4), \ + (0x1f<<(10+16))|((n)<<10)) + +#endif /* DRAM_REGS_H */ diff --git a/plat/rockchip/rk3399/include/shared/m0_param.h b/plat/rockchip/rk3399/include/shared/m0_param.h new file mode 100644 index 0000000..a5311c9 --- /dev/null +++ b/plat/rockchip/rk3399/include/shared/m0_param.h @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef M0_PARAM_H +#define M0_PARAM_H + +#define PARAM_ADDR 0xc0 + +#define PARAM_M0_FUNC 0x00 +#define PARAM_DRAM_FREQ 0x04 +#define PARAM_DPLL_CON0 0x08 +#define PARAM_DPLL_CON1 0x0c +#define PARAM_DPLL_CON2 0x10 +#define PARAM_DPLL_CON3 0x14 +#define PARAM_DPLL_CON4 0x18 +#define PARAM_DPLL_CON5 0x1c +#define PARAM_FREQ_SELECT 0x20 +#define PARAM_M0_DONE 0x24 +#define PARAM_M0_SIZE 0x28 +#define M0_DONE_FLAG 0xf59ec39a + +#endif /* M0_PARAM_H */ diff --git a/plat/rockchip/rk3399/include/shared/misc_regs.h b/plat/rockchip/rk3399/include/shared/misc_regs.h new file mode 100644 index 0000000..0160453 --- /dev/null +++ b/plat/rockchip/rk3399/include/shared/misc_regs.h @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef MISC_REGS_H +#define MISC_REGS_H + +/* CRU */ +#define CRU_DPLL_CON0 0x40 +#define CRU_DPLL_CON1 0x44 +#define CRU_DPLL_CON2 0x48 +#define CRU_DPLL_CON3 0x4c +#define CRU_DPLL_CON4 0x50 +#define CRU_DPLL_CON5 0x54 + +/* CRU_PLL_CON3 */ +#define PLL_SLOW_MODE 0 +#define PLL_NORMAL_MODE 1 +#define PLL_MODE(n) ((0x3 << (8 + 16)) | ((n) << 8)) +#define PLL_POWER_DOWN(n) ((0x1 << (0 + 16)) | ((n) << 0)) + +/* PMU CRU */ +#define PMU_CRU_GATEDIS_CON0 0x130 + +#endif /* MISC_REGS_H */ diff --git a/plat/rockchip/rk3399/include/shared/pmu_bits.h b/plat/rockchip/rk3399/include/shared/pmu_bits.h new file mode 100644 index 0000000..2968d5b --- /dev/null +++ b/plat/rockchip/rk3399/include/shared/pmu_bits.h @@ -0,0 +1,697 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef PMU_BITS_H +#define PMU_BITS_H + +enum pmu_powerdomain_id { + PD_CPUL0 = 0, + PD_CPUL1, + PD_CPUL2, + PD_CPUL3, + PD_CPUB0, + PD_CPUB1, + PD_SCUL, + PD_SCUB, + PD_TCPD0, + PD_TCPD1, + PD_CCI, + PD_PERILP, + PD_PERIHP, + PD_CENTER, + PD_VIO, + PD_GPU, + PD_VCODEC, + PD_VDU, + PD_RGA, + PD_IEP, + PD_VO, + PD_ISP0 = 22, + PD_ISP1, + PD_HDCP, + PD_GMAC, + PD_EMMC, + PD_USB3, + PD_EDP, + PD_GIC, + PD_SD, + PD_SDIOAUDIO, + PD_END +}; + +enum powerdomain_state { + PMU_POWER_ON = 0, + PMU_POWER_OFF, +}; + +enum pmu_bus_id { + BUS_ID_GPU = 0, + BUS_ID_PERILP, + BUS_ID_PERIHP, + BUS_ID_VCODEC, + BUS_ID_VDU, + BUS_ID_RGA, + BUS_ID_IEP, + BUS_ID_VOPB, + BUS_ID_VOPL, + BUS_ID_ISP0, + BUS_ID_ISP1, + BUS_ID_HDCP, + BUS_ID_USB3, + BUS_ID_PERILPM0, + BUS_ID_CENTER, + BUS_ID_CCIM0, + BUS_ID_CCIM1, + BUS_ID_VIO, + BUS_ID_MSCH0, + BUS_ID_MSCH1, + BUS_ID_ALIVE, + BUS_ID_PMU, + BUS_ID_EDP, + BUS_ID_GMAC, + BUS_ID_EMMC, + BUS_ID_CENTER1, + BUS_ID_PMUM0, + BUS_ID_GIC, + BUS_ID_SD, + BUS_ID_SDIOAUDIO, +}; + +enum pmu_bus_state { + BUS_ACTIVE, + BUS_IDLE, +}; + +/* pmu_cpuapm bit */ +enum pmu_cores_pm_by_wfi { + core_pm_en = 0, + core_pm_int_wakeup_en, + core_pm_resv, + core_pm_sft_wakeup_en +}; + +enum pmu_wkup_cfg0 { + PMU_GPIO0A_POSE_WKUP_EN = 0, + PMU_GPIO0B_POSE_WKUP_EN = 8, + PMU_GPIO0C_POSE_WKUP_EN = 16, + PMU_GPIO0D_POSE_WKUP_EN = 24, +}; + +enum pmu_wkup_cfg1 { + PMU_GPIO0A_NEGEDGE_WKUP_EN = 0, + PMU_GPIO0B_NEGEDGE_WKUP_EN = 7, + PMU_GPIO0C_NEGEDGE_WKUP_EN = 16, + PMU_GPIO0D_NEGEDGE_WKUP_EN = 24, +}; + +enum pmu_wkup_cfg2 { + PMU_GPIO1A_POSE_WKUP_EN = 0, + PMU_GPIO1B_POSE_WKUP_EN = 7, + PMU_GPIO1C_POSE_WKUP_EN = 16, + PMU_GPIO1D_POSE_WKUP_EN = 24, +}; + +enum pmu_wkup_cfg3 { + PMU_GPIO1A_NEGEDGE_WKUP_EN = 0, + PMU_GPIO1B_NEGEDGE_WKUP_EN = 7, + PMU_GPIO1C_NEGEDGE_WKUP_EN = 16, + PMU_GPIO1D_NEGEDGE_WKUP_EN = 24, +}; + +/* pmu_wkup_cfg4 */ +enum pmu_wkup_cfg4 { + PMU_CLUSTER_L_WKUP_EN = 0, + PMU_CLUSTER_B_WKUP_EN, + PMU_GPIO_WKUP_EN, + PMU_SDIO_WKUP_EN, + + PMU_SDMMC_WKUP_EN, + PMU_TIMER_WKUP_EN = 6, + PMU_USBDEV_WKUP_EN, + + PMU_SFT_WKUP_EN, + PMU_M0_WDT_WKUP_EN, + PMU_TIMEOUT_WKUP_EN, + PMU_PWM_WKUP_EN, + + PMU_PCIE_WKUP_EN = 13, +}; + +enum pmu_pwrdn_con { + PMU_A53_L0_PWRDWN_EN = 0, + PMU_A53_L1_PWRDWN_EN, + PMU_A53_L2_PWRDWN_EN, + PMU_A53_L3_PWRDWN_EN, + + PMU_A72_B0_PWRDWN_EN, + PMU_A72_B1_PWRDWN_EN, + PMU_SCU_L_PWRDWN_EN, + PMU_SCU_B_PWRDWN_EN, + + PMU_TCPD0_PWRDWN_EN, + PMU_TCPD1_PWRDWN_EN, + PMU_CCI_PWRDWN_EN, + PMU_PERILP_PWRDWN_EN, + + PMU_PERIHP_PWRDWN_EN, + PMU_CENTER_PWRDWN_EN, + PMU_VIO_PWRDWN_EN, + PMU_GPU_PWRDWN_EN, + + PMU_VCODEC_PWRDWN_EN, + PMU_VDU_PWRDWN_EN, + PMU_RGA_PWRDWN_EN, + PMU_IEP_PWRDWN_EN, + + PMU_VO_PWRDWN_EN, + PMU_ISP0_PWRDWN_EN = 22, + PMU_ISP1_PWRDWN_EN, + + PMU_HDCP_PWRDWN_EN, + PMU_GMAC_PWRDWN_EN, + PMU_EMMC_PWRDWN_EN, + PMU_USB3_PWRDWN_EN, + + PMU_EDP_PWRDWN_EN, + PMU_GIC_PWRDWN_EN, + PMU_SD_PWRDWN_EN, + PMU_SDIOAUDIO_PWRDWN_EN, +}; + +enum pmu_pwrdn_st { + PMU_A53_L0_PWRDWN_ST = 0, + PMU_A53_L1_PWRDWN_ST, + PMU_A53_L2_PWRDWN_ST, + PMU_A53_L3_PWRDWN_ST, + + PMU_A72_B0_PWRDWN_ST, + PMU_A72_B1_PWRDWN_ST, + PMU_SCU_L_PWRDWN_ST, + PMU_SCU_B_PWRDWN_ST, + + PMU_TCPD0_PWRDWN_ST, + PMU_TCPD1_PWRDWN_ST, + PMU_CCI_PWRDWN_ST, + PMU_PERILP_PWRDWN_ST, + + PMU_PERIHP_PWRDWN_ST, + PMU_CENTER_PWRDWN_ST, + PMU_VIO_PWRDWN_ST, + PMU_GPU_PWRDWN_ST, + + PMU_VCODEC_PWRDWN_ST, + PMU_VDU_PWRDWN_ST, + PMU_RGA_PWRDWN_ST, + PMU_IEP_PWRDWN_ST, + + PMU_VO_PWRDWN_ST, + PMU_ISP0_PWRDWN_ST = 22, + PMU_ISP1_PWRDWN_ST, + + PMU_HDCP_PWRDWN_ST, + PMU_GMAC_PWRDWN_ST, + PMU_EMMC_PWRDWN_ST, + PMU_USB3_PWRDWN_ST, + + PMU_EDP_PWRDWN_ST, + PMU_GIC_PWRDWN_ST, + PMU_SD_PWRDWN_ST, + PMU_SDIOAUDIO_PWRDWN_ST, + +}; + +enum pmu_pll_con { + PMU_PLL_PD_CFG = 0, + PMU_SFT_PLL_PD = 8, +}; + +enum pmu_pwermode_con { + PMU_PWR_MODE_EN = 0, + PMU_WKUP_RST_EN, + PMU_INPUT_CLAMP_EN, + PMU_OSC_DIS, + + PMU_ALIVE_USE_LF, + PMU_PMU_USE_LF, + PMU_POWER_OFF_REQ_CFG, + PMU_CHIP_PD_EN, + + PMU_PLL_PD_EN, + PMU_CPU0_PD_EN, + PMU_L2_FLUSH_EN, + PMU_L2_IDLE_EN, + + PMU_SCU_PD_EN, + PMU_CCI_PD_EN, + PMU_PERILP_PD_EN, + PMU_CENTER_PD_EN, + + PMU_SREF0_ENTER_EN, + PMU_DDRC0_GATING_EN, + PMU_DDRIO0_RET_EN, + PMU_DDRIO0_RET_DE_REQ, + + PMU_SREF1_ENTER_EN, + PMU_DDRC1_GATING_EN, + PMU_DDRIO1_RET_EN, + PMU_DDRIO1_RET_DE_REQ, + + PMU_CLK_CENTER_SRC_GATE_EN = 26, + PMU_CLK_PERILP_SRC_GATE_EN, + + PMU_CLK_CORE_SRC_GATE_EN, + PMU_DDRIO_RET_HW_DE_REQ, + PMU_SLP_OUTPUT_CFG, + PMU_MAIN_CLUSTER, +}; + +enum pmu_sft_con { + PMU_WKUP_SFT = 0, + PMU_INPUT_CLAMP_CFG, + PMU_OSC_DIS_CFG, + PMU_PMU_LF_EN_CFG, + + PMU_ALIVE_LF_EN_CFG, + PMU_24M_EN_CFG, + PMU_DBG_PWRUP_L0_CFG, + PMU_WKUP_SFT_M0, + + PMU_DDRCTL0_C_SYSREQ_CFG, + PMU_DDR0_IO_RET_CFG, + + PMU_DDRCTL1_C_SYSREQ_CFG = 12, + PMU_DDR1_IO_RET_CFG, + DBG_PWRUP_B0_CFG = 15, + + DBG_NOPWERDWN_L0_EN, + DBG_NOPWERDWN_L1_EN, + DBG_NOPWERDWN_L2_EN, + DBG_NOPWERDWN_L3_EN, + + DBG_PWRUP_REQ_L_EN = 20, + CLUSTER_L_CLK_SRC_GATING_CFG, + L2_FLUSH_REQ_CLUSTER_L, + ACINACTM_CLUSTER_L_CFG, + + DBG_NO_PWERDWN_B0_EN, + DBG_NO_PWERDWN_B1_EN, + + DBG_PWRUP_REQ_B_EN = 28, + CLUSTER_B_CLK_SRC_GATING_CFG, + L2_FLUSH_REQ_CLUSTER_B, + ACINACTM_CLUSTER_B_CFG, +}; + +enum pmu_int_con { + PMU_PMU_INT_EN = 0, + PMU_PWRMD_WKUP_INT_EN, + PMU_WKUP_GPIO0_NEG_INT_EN, + PMU_WKUP_GPIO0_POS_INT_EN, + PMU_WKUP_GPIO1_NEG_INT_EN, + PMU_WKUP_GPIO1_POS_INT_EN, +}; + +enum pmu_int_st { + PMU_PWRMD_WKUP_INT_ST = 1, + PMU_WKUP_GPIO0_NEG_INT_ST, + PMU_WKUP_GPIO0_POS_INT_ST, + PMU_WKUP_GPIO1_NEG_INT_ST, + PMU_WKUP_GPIO1_POS_INT_ST, +}; + +enum pmu_gpio0_pos_int_con { + PMU_GPIO0A_POS_INT_EN = 0, + PMU_GPIO0B_POS_INT_EN = 8, + PMU_GPIO0C_POS_INT_EN = 16, + PMU_GPIO0D_POS_INT_EN = 24, +}; + +enum pmu_gpio0_neg_int_con { + PMU_GPIO0A_NEG_INT_EN = 0, + PMU_GPIO0B_NEG_INT_EN = 8, + PMU_GPIO0C_NEG_INT_EN = 16, + PMU_GPIO0D_NEG_INT_EN = 24, +}; + +enum pmu_gpio1_pos_int_con { + PMU_GPIO1A_POS_INT_EN = 0, + PMU_GPIO1B_POS_INT_EN = 8, + PMU_GPIO1C_POS_INT_EN = 16, + PMU_GPIO1D_POS_INT_EN = 24, +}; + +enum pmu_gpio1_neg_int_con { + PMU_GPIO1A_NEG_INT_EN = 0, + PMU_GPIO1B_NEG_INT_EN = 8, + PMU_GPIO1C_NEG_INT_EN = 16, + PMU_GPIO1D_NEG_INT_EN = 24, +}; + +enum pmu_gpio0_pos_int_st { + PMU_GPIO0A_POS_INT_ST = 0, + PMU_GPIO0B_POS_INT_ST = 8, + PMU_GPIO0C_POS_INT_ST = 16, + PMU_GPIO0D_POS_INT_ST = 24, +}; + +enum pmu_gpio0_neg_int_st { + PMU_GPIO0A_NEG_INT_ST = 0, + PMU_GPIO0B_NEG_INT_ST = 8, + PMU_GPIO0C_NEG_INT_ST = 16, + PMU_GPIO0D_NEG_INT_ST = 24, +}; + +enum pmu_gpio1_pos_int_st { + PMU_GPIO1A_POS_INT_ST = 0, + PMU_GPIO1B_POS_INT_ST = 8, + PMU_GPIO1C_POS_INT_ST = 16, + PMU_GPIO1D_POS_INT_ST = 24, +}; + +enum pmu_gpio1_neg_int_st { + PMU_GPIO1A_NEG_INT_ST = 0, + PMU_GPIO1B_NEG_INT_ST = 8, + PMU_GPIO1C_NEG_INT_ST = 16, + PMU_GPIO1D_NEG_INT_ST = 24, +}; + +/* pmu power down configure register 0x0050 */ +enum pmu_pwrdn_inten { + PMU_A53_L0_PWR_SWITCH_INT_EN = 0, + PMU_A53_L1_PWR_SWITCH_INT_EN, + PMU_A53_L2_PWR_SWITCH_INT_EN, + PMU_A53_L3_PWR_SWITCH_INT_EN, + + PMU_A72_B0_PWR_SWITCH_INT_EN, + PMU_A72_B1_PWR_SWITCH_INT_EN, + PMU_SCU_L_PWR_SWITCH_INT_EN, + PMU_SCU_B_PWR_SWITCH_INT_EN, + + PMU_TCPD0_PWR_SWITCH_INT_EN, + PMU_TCPD1_PWR_SWITCH_INT_EN, + PMU_CCI_PWR_SWITCH_INT_EN, + PMU_PERILP_PWR_SWITCH_INT_EN, + + PMU_PERIHP_PWR_SWITCH_INT_EN, + PMU_CENTER_PWR_SWITCH_INT_EN, + PMU_VIO_PWR_SWITCH_INT_EN, + PMU_GPU_PWR_SWITCH_INT_EN, + + PMU_VCODEC_PWR_SWITCH_INT_EN, + PMU_VDU_PWR_SWITCH_INT_EN, + PMU_RGA_PWR_SWITCH_INT_EN, + PMU_IEP_PWR_SWITCH_INT_EN, + + PMU_VO_PWR_SWITCH_INT_EN, + PMU_ISP0_PWR_SWITCH_INT_EN = 22, + PMU_ISP1_PWR_SWITCH_INT_EN, + + PMU_HDCP_PWR_SWITCH_INT_EN, + PMU_GMAC_PWR_SWITCH_INT_EN, + PMU_EMMC_PWR_SWITCH_INT_EN, + PMU_USB3_PWR_SWITCH_INT_EN, + + PMU_EDP_PWR_SWITCH_INT_EN, + PMU_GIC_PWR_SWITCH_INT_EN, + PMU_SD_PWR_SWITCH_INT_EN, + PMU_SDIOAUDIO_PWR_SWITCH_INT_EN, +}; + +enum pmu_wkup_status { + PMU_WKUP_BY_CLSTER_L_INT = 0, + PMU_WKUP_BY_CLSTER_b_INT, + PMU_WKUP_BY_GPIO_INT, + PMU_WKUP_BY_SDIO_DET, + + PMU_WKUP_BY_SDMMC_DET, + PMU_WKUP_BY_TIMER = 6, + PMU_WKUP_BY_USBDEV_DET, + + PMU_WKUP_BY_M0_SFT, + PMU_WKUP_BY_M0_WDT_INT, + PMU_WKUP_BY_TIMEOUT, + PMU_WKUP_BY_PWM, + + PMU_WKUP_BY_PCIE = 13, +}; + +enum pmu_bus_clr { + PMU_CLR_GPU = 0, + PMU_CLR_PERILP, + PMU_CLR_PERIHP, + PMU_CLR_VCODEC, + + PMU_CLR_VDU, + PMU_CLR_RGA, + PMU_CLR_IEP, + PMU_CLR_VOPB, + + PMU_CLR_VOPL, + PMU_CLR_ISP0, + PMU_CLR_ISP1, + PMU_CLR_HDCP, + + PMU_CLR_USB3, + PMU_CLR_PERILPM0, + PMU_CLR_CENTER, + PMU_CLR_CCIM1, + + PMU_CLR_CCIM0, + PMU_CLR_VIO, + PMU_CLR_MSCH0, + PMU_CLR_MSCH1, + + PMU_CLR_ALIVE, + PMU_CLR_PMU, + PMU_CLR_EDP, + PMU_CLR_GMAC, + + PMU_CLR_EMMC, + PMU_CLR_CENTER1, + PMU_CLR_PMUM0, + PMU_CLR_GIC, + + PMU_CLR_SD, + PMU_CLR_SDIOAUDIO, +}; + +/* PMU bus idle request register */ +enum pmu_bus_idle_req { + PMU_IDLE_REQ_GPU = 0, + PMU_IDLE_REQ_PERILP, + PMU_IDLE_REQ_PERIHP, + PMU_IDLE_REQ_VCODEC, + + PMU_IDLE_REQ_VDU, + PMU_IDLE_REQ_RGA, + PMU_IDLE_REQ_IEP, + PMU_IDLE_REQ_VOPB, + + PMU_IDLE_REQ_VOPL, + PMU_IDLE_REQ_ISP0, + PMU_IDLE_REQ_ISP1, + PMU_IDLE_REQ_HDCP, + + PMU_IDLE_REQ_USB3, + PMU_IDLE_REQ_PERILPM0, + PMU_IDLE_REQ_CENTER, + PMU_IDLE_REQ_CCIM0, + + PMU_IDLE_REQ_CCIM1, + PMU_IDLE_REQ_VIO, + PMU_IDLE_REQ_MSCH0, + PMU_IDLE_REQ_MSCH1, + + PMU_IDLE_REQ_ALIVE, + PMU_IDLE_REQ_PMU, + PMU_IDLE_REQ_EDP, + PMU_IDLE_REQ_GMAC, + + PMU_IDLE_REQ_EMMC, + PMU_IDLE_REQ_CENTER1, + PMU_IDLE_REQ_PMUM0, + PMU_IDLE_REQ_GIC, + + PMU_IDLE_REQ_SD, + PMU_IDLE_REQ_SDIOAUDIO, +}; + +/* pmu bus idle status register */ +enum pmu_bus_idle_st { + PMU_IDLE_ST_GPU = 0, + PMU_IDLE_ST_PERILP, + PMU_IDLE_ST_PERIHP, + PMU_IDLE_ST_VCODEC, + + PMU_IDLE_ST_VDU, + PMU_IDLE_ST_RGA, + PMU_IDLE_ST_IEP, + PMU_IDLE_ST_VOPB, + + PMU_IDLE_ST_VOPL, + PMU_IDLE_ST_ISP0, + PMU_IDLE_ST_ISP1, + PMU_IDLE_ST_HDCP, + + PMU_IDLE_ST_USB3, + PMU_IDLE_ST_PERILPM0, + PMU_IDLE_ST_CENTER, + PMU_IDLE_ST_CCIM0, + + PMU_IDLE_ST_CCIM1, + PMU_IDLE_ST_VIO, + PMU_IDLE_ST_MSCH0, + PMU_IDLE_ST_MSCH1, + + PMU_IDLE_ST_ALIVE, + PMU_IDLE_ST_PMU, + PMU_IDLE_ST_EDP, + PMU_IDLE_ST_GMAC, + + PMU_IDLE_ST_EMMC, + PMU_IDLE_ST_CENTER1, + PMU_IDLE_ST_PMUM0, + PMU_IDLE_ST_GIC, + + PMU_IDLE_ST_SD, + PMU_IDLE_ST_SDIOAUDIO, +}; + +enum pmu_bus_idle_ack { + PMU_IDLE_ACK_GPU = 0, + PMU_IDLE_ACK_PERILP, + PMU_IDLE_ACK_PERIHP, + PMU_IDLE_ACK_VCODEC, + + PMU_IDLE_ACK_VDU, + PMU_IDLE_ACK_RGA, + PMU_IDLE_ACK_IEP, + PMU_IDLE_ACK_VOPB, + + PMU_IDLE_ACK_VOPL, + PMU_IDLE_ACK_ISP0, + PMU_IDLE_ACK_ISP1, + PMU_IDLE_ACK_HDCP, + + PMU_IDLE_ACK_USB3, + PMU_IDLE_ACK_PERILPM0, + PMU_IDLE_ACK_CENTER, + PMU_IDLE_ACK_CCIM0, + + PMU_IDLE_ACK_CCIM1, + PMU_IDLE_ACK_VIO, + PMU_IDLE_ACK_MSCH0, + PMU_IDLE_ACK_MSCH1, + + PMU_IDLE_ACK_ALIVE, + PMU_IDLE_ACK_PMU, + PMU_IDLE_ACK_EDP, + PMU_IDLE_ACK_GMAC, + + PMU_IDLE_ACK_EMMC, + PMU_IDLE_ACK_CENTER1, + PMU_IDLE_ACK_PMUM0, + PMU_IDLE_ACK_GIC, + + PMU_IDLE_ACK_SD, + PMU_IDLE_ACK_SDIOAUDIO, +}; + +enum pmu_cci500_con { + PMU_PREQ_CCI500_CFG_SW = 0, + PMU_CLR_PREQ_CCI500_HW, + PMU_PSTATE_CCI500_0, + PMU_PSTATE_CCI500_1, + + PMU_PSTATE_CCI500_2, + PMU_QREQ_CCI500_CFG_SW, + PMU_CLR_QREQ_CCI500_HW, + PMU_QGATING_CCI500_CFG, + + PMU_PREQ_CCI500_CFG_SW_WMSK = 16, + PMU_CLR_PREQ_CCI500_HW_WMSK, + PMU_PSTATE_CCI500_0_WMSK, + PMU_PSTATE_CCI500_1_WMSK, + + PMU_PSTATE_CCI500_2_WMSK, + PMU_QREQ_CCI500_CFG_SW_WMSK, + PMU_CLR_QREQ_CCI500_HW_WMSK, + PMU_QGATING_CCI500_CFG_WMSK, +}; + +enum pmu_adb400_con { + PMU_PWRDWN_REQ_CXCS_SW = 0, + PMU_PWRDWN_REQ_CORE_L_SW, + PMU_PWRDWN_REQ_CORE_L_2GIC_SW, + PMU_PWRDWN_REQ_GIC2_CORE_L_SW, + + PMU_PWRDWN_REQ_CORE_B_SW, + PMU_PWRDWN_REQ_CORE_B_2GIC_SW, + PMU_PWRDWN_REQ_GIC2_CORE_B_SW, + + PMU_CLR_CXCS_HW = 8, + PMU_CLR_CORE_L_HW, + PMU_CLR_CORE_L_2GIC_HW, + PMU_CLR_GIC2_CORE_L_HW, + + PMU_CLR_CORE_B_HW, + PMU_CLR_CORE_B_2GIC_HW, + PMU_CLR_GIC2_CORE_B_HW, + + PMU_PWRDWN_REQ_CXCS_SW_WMSK = 16, + PMU_PWRDWN_REQ_CORE_L_SW_WMSK, + PMU_PWRDWN_REQ_CORE_L_2GIC_SW_WMSK, + PMU_PWRDWN_REQ_GIC2_CORE_L_SW_WMSK, + + PMU_PWRDWN_REQ_CORE_B_SW_WMSK, + PMU_PWRDWN_REQ_CORE_B_2GIC_SW_WMSK, + PMU_PWRDWN_REQ_GIC2_CORE_B_SW_WMSK, + + PMU_CLR_CXCS_HW_WMSK = 24, + PMU_CLR_CORE_L_HW_WMSK, + PMU_CLR_CORE_L_2GIC_HW_WMSK, + PMU_CLR_GIC2_CORE_L_HW_WMSK, + + PMU_CLR_CORE_B_HW_WMSK, + PMU_CLR_CORE_B_2GIC_HW_WMSK, + PMU_CLR_GIC2_CORE_B_HW_WMSK, +}; + +enum pmu_adb400_st { + PMU_PWRDWN_REQ_CXCS_SW_ST = 0, + PMU_PWRDWN_REQ_CORE_L_SW_ST, + PMU_PWRDWN_REQ_CORE_L_2GIC_SW_ST, + PMU_PWRDWN_REQ_GIC2_CORE_L_SW_ST, + + PMU_PWRDWN_REQ_CORE_B_SW_ST, + PMU_PWRDWN_REQ_CORE_B_2GIC_SW_ST, + PMU_PWRDWN_REQ_GIC2_CORE_B_SW_ST, + + PMU_CLR_CXCS_HW_ST = 8, + PMU_CLR_CORE_L_HW_ST, + PMU_CLR_CORE_L_2GIC_HW_ST, + PMU_CLR_GIC2_CORE_L_HW_ST, + + PMU_CLR_CORE_B_HW_ST, + PMU_CLR_CORE_B_2GIC_HW_ST, + PMU_CLR_GIC2_CORE_B_HW_ST, +}; + +enum pmu_pwrdn_con1 { + PMU_VD_SCU_L_PWRDN_EN = 0, + PMU_VD_SCU_B_PWRDN_EN, + PMU_VD_CENTER_PWRDN_EN, +}; + +enum pmu_core_pwr_st { + L2_FLUSHDONE_CLUSTER_L = 0, + STANDBY_BY_WFIL2_CLUSTER_L, + + L2_FLUSHDONE_CLUSTER_B = 10, + STANDBY_BY_WFIL2_CLUSTER_B, +}; + +#endif /* PMU_BITS_H */ diff --git a/plat/rockchip/rk3399/include/shared/pmu_regs.h b/plat/rockchip/rk3399/include/shared/pmu_regs.h new file mode 100644 index 0000000..43e785e --- /dev/null +++ b/plat/rockchip/rk3399/include/shared/pmu_regs.h @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef PMU_REGS_H +#define PMU_REGS_H + +#define PMU_WKUP_CFG0 0x00 +#define PMU_WKUP_CFG1 0x04 +#define PMU_WKUP_CFG2 0x08 +#define PMU_WKUP_CFG3 0x0c +#define PMU_WKUP_CFG4 0x10 +#define PMU_PWRDN_CON 0x14 +#define PMU_PWRDN_ST 0x18 +#define PMU_PLL_CON 0x1c +#define PMU_PWRMODE_CON 0x20 +#define PMU_SFT_CON 0x24 +#define PMU_INT_CON 0x28 +#define PMU_INT_ST 0x2c +#define PMU_GPIO0_POS_INT_CON 0x30 +#define PMU_GPIO0_NEG_INT_CON 0x34 +#define PMU_GPIO1_POS_INT_CON 0x38 +#define PMU_GPIO1_NEG_INT_CON 0x3c +#define PMU_GPIO0_POS_INT_ST 0x40 +#define PMU_GPIO0_NEG_INT_ST 0x44 +#define PMU_GPIO1_POS_INT_ST 0x48 +#define PMU_GPIO1_NEG_INT_ST 0x4c +#define PMU_PWRDN_INTEN 0x50 +#define PMU_PWRDN_STATUS 0x54 +#define PMU_WAKEUP_STATUS 0x58 +#define PMU_BUS_CLR 0x5c +#define PMU_BUS_IDLE_REQ 0x60 +#define PMU_BUS_IDLE_ST 0x64 +#define PMU_BUS_IDLE_ACK 0x68 +#define PMU_CCI500_CON 0x6c +#define PMU_ADB400_CON 0x70 +#define PMU_ADB400_ST 0x74 +#define PMU_POWER_ST 0x78 +#define PMU_CORE_PWR_ST 0x7c +#define PMU_OSC_CNT 0x80 +#define PMU_PLLLOCK_CNT 0x84 +#define PMU_PLLRST_CNT 0x88 +#define PMU_STABLE_CNT 0x8c +#define PMU_DDRIO_PWRON_CNT 0x90 +#define PMU_WAKEUP_RST_CLR_CNT 0x94 +#define PMU_DDR_SREF_ST 0x98 +#define PMU_SCU_L_PWRDN_CNT 0x9c +#define PMU_SCU_L_PWRUP_CNT 0xa0 +#define PMU_SCU_B_PWRDN_CNT 0xa4 +#define PMU_SCU_B_PWRUP_CNT 0xa8 +#define PMU_GPU_PWRDN_CNT 0xac +#define PMU_GPU_PWRUP_CNT 0xb0 +#define PMU_CENTER_PWRDN_CNT 0xb4 +#define PMU_CENTER_PWRUP_CNT 0xb8 +#define PMU_TIMEOUT_CNT 0xbc +#define PMU_CPU0APM_CON 0xc0 +#define PMU_CPU1APM_CON 0xc4 +#define PMU_CPU2APM_CON 0xc8 +#define PMU_CPU3APM_CON 0xcc +#define PMU_CPU0BPM_CON 0xd0 +#define PMU_CPU1BPM_CON 0xd4 +#define PMU_NOC_AUTO_ENA 0xd8 +#define PMU_PWRDN_CON1 0xdc + +#define PMUGRF_GPIO0A_IOMUX 0x00 +#define PMUGRF_GPIO1A_IOMUX 0x10 +#define PMUGRF_GPIO1C_IOMUX 0x18 + +#define PMUGRF_GPIO0A6_IOMUX_SHIFT 12 +#define PMUGRF_GPIO0A6_IOMUX_PWM 0x1 +#define PMUGRF_GPIO1C3_IOMUX_SHIFT 6 +#define PMUGRF_GPIO1C3_IOMUX_PWM 0x1 + +#define CPU_AXI_QOS_ID_COREID 0x00 +#define CPU_AXI_QOS_REVISIONID 0x04 +#define CPU_AXI_QOS_PRIORITY 0x08 +#define CPU_AXI_QOS_MODE 0x0c +#define CPU_AXI_QOS_BANDWIDTH 0x10 +#define CPU_AXI_QOS_SATURATION 0x14 +#define CPU_AXI_QOS_EXTCONTROL 0x18 +#define CPU_AXI_QOS_NUM_REGS 0x07 + +#define CPU_AXI_CCI_M0_QOS_BASE 0xffa50000 +#define CPU_AXI_CCI_M1_QOS_BASE 0xffad8000 +#define CPU_AXI_DMAC0_QOS_BASE 0xffa64200 +#define CPU_AXI_DMAC1_QOS_BASE 0xffa64280 +#define CPU_AXI_DCF_QOS_BASE 0xffa64180 +#define CPU_AXI_CRYPTO0_QOS_BASE 0xffa64100 +#define CPU_AXI_CRYPTO1_QOS_BASE 0xffa64080 +#define CPU_AXI_PMU_CM0_QOS_BASE 0xffa68000 +#define CPU_AXI_PERI_CM1_QOS_BASE 0xffa64300 +#define CPU_AXI_GIC_QOS_BASE 0xffa78000 +#define CPU_AXI_SDIO_QOS_BASE 0xffa76000 +#define CPU_AXI_SDMMC_QOS_BASE 0xffa74000 +#define CPU_AXI_EMMC_QOS_BASE 0xffa58000 +#define CPU_AXI_GMAC_QOS_BASE 0xffa5c000 +#define CPU_AXI_USB_OTG0_QOS_BASE 0xffa70000 +#define CPU_AXI_USB_OTG1_QOS_BASE 0xffa70080 +#define CPU_AXI_USB_HOST0_QOS_BASE 0xffa60100 +#define CPU_AXI_USB_HOST1_QOS_BASE 0xffa60180 +#define CPU_AXI_GPU_QOS_BASE 0xffae0000 +#define CPU_AXI_VIDEO_M0_QOS_BASE 0xffab8000 +#define CPU_AXI_VIDEO_M1_R_QOS_BASE 0xffac0000 +#define CPU_AXI_VIDEO_M1_W_QOS_BASE 0xffac0080 +#define CPU_AXI_RGA_R_QOS_BASE 0xffab0000 +#define CPU_AXI_RGA_W_QOS_BASE 0xffab0080 +#define CPU_AXI_IEP_QOS_BASE 0xffa98000 +#define CPU_AXI_VOP_BIG_R_QOS_BASE 0xffac8000 +#define CPU_AXI_VOP_BIG_W_QOS_BASE 0xffac8080 +#define CPU_AXI_VOP_LITTLE_QOS_BASE 0xffad0000 +#define CPU_AXI_ISP0_M0_QOS_BASE 0xffaa0000 +#define CPU_AXI_ISP0_M1_QOS_BASE 0xffaa0080 +#define CPU_AXI_ISP1_M0_QOS_BASE 0xffaa8000 +#define CPU_AXI_ISP1_M1_QOS_BASE 0xffaa8080 +#define CPU_AXI_HDCP_QOS_BASE 0xffa90000 +#define CPU_AXI_PERIHP_NSP_QOS_BASE 0xffad8080 +#define CPU_AXI_PERILP_NSP_QOS_BASE 0xffad8180 +#define CPU_AXI_PERILPSLV_NSP_QOS_BASE 0xffad8100 + +#define GRF_GPIO2A_IOMUX 0xe000 +#define GRF_GPIO2B_IOMUX 0xe004 +#define GRF_GPIO2C_IOMUX 0xe008 +#define GRF_GPIO2D_IOMUX 0xe00c +#define GRF_GPIO3A_IOMUX 0xe010 +#define GRF_GPIO3B_IOMUX 0xe014 +#define GRF_GPIO3C_IOMUX 0xe018 +#define GRF_GPIO3D_IOMUX 0xe01c +#define GRF_GPIO4A_IOMUX 0xe020 +#define GRF_GPIO4B_IOMUX 0xe024 +#define GRF_GPIO4C_IOMUX 0xe028 +#define GRF_GPIO4D_IOMUX 0xe02c + +#define GRF_GPIO2A_P 0xe040 +#define GRF_GPIO2B_P 0xe044 +#define GRF_GPIO2C_P 0xe048 +#define GRF_GPIO2D_P 0xe04C +#define GRF_GPIO3A_P 0xe050 +#define GRF_GPIO3B_P 0xe054 +#define GRF_GPIO3C_P 0xe058 +#define GRF_GPIO3D_P 0xe05C +#define GRF_GPIO4A_P 0xe060 +#define GRF_GPIO4B_P 0xe064 +#define GRF_GPIO4C_P 0xe068 +#define GRF_GPIO4D_P 0xe06C + +#endif /* PMU_REGS_H */ diff --git a/plat/rockchip/rk3399/plat_sip_calls.c b/plat/rockchip/rk3399/plat_sip_calls.c new file mode 100644 index 0000000..ce8476c --- /dev/null +++ b/plat/rockchip/rk3399/plat_sip_calls.c @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <common/debug.h> +#include <common/runtime_svc.h> +#include <lib/mmio.h> + +#include <cdn_dp.h> +#include <dfs.h> +#include <plat_sip_calls.h> +#include <rockchip_sip_svc.h> + +#define RK_SIP_DDR_CFG 0x82000008 +#define DRAM_INIT 0x00 +#define DRAM_SET_RATE 0x01 +#define DRAM_ROUND_RATE 0x02 +#define DRAM_SET_AT_SR 0x03 +#define DRAM_GET_BW 0x04 +#define DRAM_GET_RATE 0x05 +#define DRAM_CLR_IRQ 0x06 +#define DRAM_SET_PARAM 0x07 +#define DRAM_SET_ODT_PD 0x08 + +#define RK_SIP_HDCP_CONTROL 0x82000009 +#define RK_SIP_HDCP_KEY_DATA64 0xC200000A + +uint32_t ddr_smc_handler(uint64_t arg0, uint64_t arg1, + uint64_t id, uint64_t arg2) +{ + switch (id) { + case DRAM_SET_RATE: + return ddr_set_rate((uint32_t)arg0); + case DRAM_ROUND_RATE: + return ddr_round_rate((uint32_t)arg0); + case DRAM_GET_RATE: + return ddr_get_rate(); + case DRAM_SET_ODT_PD: + dram_set_odt_pd(arg0, arg1, arg2); + break; + default: + break; + } + + return 0; +} + +uintptr_t rockchip_plat_sip_handler(uint32_t smc_fid, + u_register_t x1, + u_register_t x2, + u_register_t x3, + u_register_t x4, + void *cookie, + void *handle, + u_register_t flags) +{ +#ifdef PLAT_RK_DP_HDCP + uint64_t x5, x6; +#endif + + switch (smc_fid) { + case RK_SIP_DDR_CFG: + SMC_RET1(handle, ddr_smc_handler(x1, x2, x3, x4)); +#ifdef PLAT_RK_DP_HDCP + case RK_SIP_HDCP_CONTROL: + SMC_RET1(handle, dp_hdcp_ctrl(x1)); + case RK_SIP_HDCP_KEY_DATA64: + x5 = read_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X5); + x6 = read_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X6); + SMC_RET1(handle, dp_hdcp_store_key(x1, x2, x3, x4, x5, x6)); +#endif + default: + ERROR("%s: unhandled SMC (0x%x)\n", __func__, smc_fid); + SMC_RET1(handle, SMC_UNK); + } +} diff --git a/plat/rockchip/rk3399/platform.mk b/plat/rockchip/rk3399/platform.mk new file mode 100644 index 0000000..aba67c2 --- /dev/null +++ b/plat/rockchip/rk3399/platform.mk @@ -0,0 +1,113 @@ +# +# Copyright (c) 2016-2020, ARM Limited and Contributors. All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +RK_PLAT := plat/rockchip +RK_PLAT_SOC := ${RK_PLAT}/${PLAT} +RK_PLAT_COMMON := ${RK_PLAT}/common + +DISABLE_BIN_GENERATION := 1 + +PLAT_INCLUDES := -I${RK_PLAT_COMMON}/ \ + -I${RK_PLAT_COMMON}/include/ \ + -I${RK_PLAT_COMMON}/aarch64/ \ + -I${RK_PLAT_COMMON}/drivers/pmu/ \ + -I${RK_PLAT_SOC}/ \ + -I${RK_PLAT_SOC}/drivers/pmu/ \ + -I${RK_PLAT_SOC}/drivers/pwm/ \ + -I${RK_PLAT_SOC}/drivers/secure/ \ + -I${RK_PLAT_SOC}/drivers/soc/ \ + -I${RK_PLAT_SOC}/drivers/dram/ \ + -I${RK_PLAT_SOC}/drivers/dp/ \ + -I${RK_PLAT_SOC}/include/ \ + -I${RK_PLAT_SOC}/include/shared/ \ + +# Include GICv3 driver files +include drivers/arm/gic/v3/gicv3.mk + +RK_GIC_SOURCES := ${GICV3_SOURCES} \ + plat/common/plat_gicv3.c \ + ${RK_PLAT}/common/rockchip_gicv3.c + +PLAT_BL_COMMON_SOURCES := common/desc_image_load.c \ + lib/bl_aux_params/bl_aux_params.c \ + lib/xlat_tables/xlat_tables_common.c \ + lib/xlat_tables/aarch64/xlat_tables.c \ + plat/common/aarch64/crash_console_helpers.S \ + plat/common/plat_psci_common.c + +ifneq (${ENABLE_STACK_PROTECTOR},0) +PLAT_BL_COMMON_SOURCES += ${RK_PLAT_COMMON}/rockchip_stack_protector.c +endif + +BL31_SOURCES += ${RK_GIC_SOURCES} \ + drivers/arm/cci/cci.c \ + drivers/ti/uart/aarch64/16550_console.S \ + drivers/delay_timer/delay_timer.c \ + drivers/delay_timer/generic_delay_timer.c \ + drivers/gpio/gpio.c \ + lib/cpus/aarch64/cortex_a53.S \ + lib/cpus/aarch64/cortex_a72.S \ + ${RK_PLAT_COMMON}/aarch64/plat_helpers.S \ + ${RK_PLAT_COMMON}/bl31_plat_setup.c \ + ${RK_PLAT_COMMON}/params_setup.c \ + ${RK_PLAT_COMMON}/aarch64/pmu_sram_cpus_on.S \ + ${RK_PLAT_COMMON}/plat_pm.c \ + ${RK_PLAT_COMMON}/plat_topology.c \ + ${RK_PLAT_COMMON}/aarch64/platform_common.c \ + ${RK_PLAT_COMMON}/rockchip_sip_svc.c \ + ${RK_PLAT_SOC}/plat_sip_calls.c \ + ${RK_PLAT_SOC}/drivers/gpio/rk3399_gpio.c \ + ${RK_PLAT_SOC}/drivers/pmu/pmu.c \ + ${RK_PLAT_SOC}/drivers/pmu/pmu_fw.c \ + ${RK_PLAT_SOC}/drivers/pmu/m0_ctl.c \ + ${RK_PLAT_SOC}/drivers/pwm/pwm.c \ + ${RK_PLAT_SOC}/drivers/secure/secure.c \ + ${RK_PLAT_SOC}/drivers/soc/soc.c \ + ${RK_PLAT_SOC}/drivers/dram/dfs.c \ + ${RK_PLAT_SOC}/drivers/dram/dram.c \ + ${RK_PLAT_SOC}/drivers/dram/dram_spec_timing.c \ + ${RK_PLAT_SOC}/drivers/dram/suspend.c + +include lib/coreboot/coreboot.mk +include lib/libfdt/libfdt.mk + +$(eval $(call add_define,PLAT_EXTRA_LD_SCRIPT)) + +# Enable workarounds for selected Cortex-A53 erratas. +ERRATA_A53_855873 := 1 + +# M0 source build +PLAT_M0 := ${PLAT}m0 +BUILD_M0 := ${BUILD_PLAT}/m0 + +RK3399M0FW=${BUILD_M0}/${PLAT_M0}.bin +$(eval $(call add_define_val,RK3399M0FW,\"$(RK3399M0FW)\")) + +RK3399M0PMUFW=${BUILD_M0}/${PLAT_M0}pmu.bin +$(eval $(call add_define_val,RK3399M0PMUFW,\"$(RK3399M0PMUFW)\")) + +ifdef PLAT_RK_DP_HDCP +BL31_SOURCES += ${RK_PLAT_SOC}/drivers/dp/cdn_dp.c + +HDCPFW=${RK_PLAT_SOC}/drivers/dp/hdcp.bin +$(eval $(call add_define_val,HDCPFW,\"$(HDCPFW)\")) + +${BUILD_PLAT}/bl31/cdn_dp.o: CCACHE_EXTRAFILES=$(HDCPFW) +${RK_PLAT_SOC}/drivers/dp/cdn_dp.c: $(HDCPFW) +endif + +# CCACHE_EXTRAFILES is needed because ccache doesn't handle .incbin +export CCACHE_EXTRAFILES +${BUILD_PLAT}/bl31/pmu_fw.o: CCACHE_EXTRAFILES=$(RK3399M0FW):$(RK3399M0PMUFW) +${RK_PLAT_SOC}/drivers/pmu/pmu_fw.c: $(RK3399M0FW) + +$(eval $(call MAKE_PREREQ_DIR,${BUILD_M0},${BUILD_PLAT})) +.PHONY: $(RK3399M0FW) +$(RK3399M0FW): | ${BUILD_M0} + $(MAKE) -C ${RK_PLAT_SOC}/drivers/m0 BUILD=$(abspath ${BUILD_PLAT}/m0) + +# Do not enable SVE +ENABLE_SVE_FOR_NS := 0 diff --git a/plat/rockchip/rk3399/rk3399_def.h b/plat/rockchip/rk3399/rk3399_def.h new file mode 100644 index 0000000..ba83242 --- /dev/null +++ b/plat/rockchip/rk3399/rk3399_def.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef RK3399_DEF_H +#define RK3399_DEF_H + +#include <addressmap.h> + +#define RK3399_PRIMARY_CPU 0x0 + +/* Special value used to verify platform parameters from BL2 to BL3-1 */ +#define RK_BL31_PLAT_PARAM_VAL 0x0f1e2d3c4b5a6978ULL + +/************************************************************************** + * UART related constants + **************************************************************************/ +#define RK3399_BAUDRATE 115200 +#define RK3399_UART_CLOCK 24000000 + +/****************************************************************************** + * System counter frequency related constants + ******************************************************************************/ +#define SYS_COUNTER_FREQ_IN_TICKS 24000000 + +/* Base rockchip_platform compatible GIC memory map */ +#define BASE_GICD_BASE (GIC500_BASE) +#define BASE_GICR_BASE (GIC500_BASE + SIZE_M(1)) + +/***************************************************************************** + * CCI-400 related constants + ******************************************************************************/ +#define PLAT_RK_CCI_CLUSTER0_SL_IFACE_IX 0 +#define PLAT_RK_CCI_CLUSTER1_SL_IFACE_IX 1 + +/****************************************************************************** + * sgi, ppi + ******************************************************************************/ +#define ARM_IRQ_SEC_PHY_TIMER 29 + +#define ARM_IRQ_SEC_SGI_0 8 +#define ARM_IRQ_SEC_SGI_1 9 +#define ARM_IRQ_SEC_SGI_2 10 +#define ARM_IRQ_SEC_SGI_3 11 +#define ARM_IRQ_SEC_SGI_4 12 +#define ARM_IRQ_SEC_SGI_5 13 +#define ARM_IRQ_SEC_SGI_6 14 +#define ARM_IRQ_SEC_SGI_7 15 + +/* + * Define a list of Group 1 Secure and Group 0 interrupts as per GICv3 + * terminology. On a GICv2 system or mode, the lists will be merged and treated + * as Group 0 interrupts. + */ +#define PLAT_RK_GICV3_G1S_IRQS \ + INTR_PROP_DESC(ARM_IRQ_SEC_PHY_TIMER, GIC_HIGHEST_SEC_PRIORITY, \ + INTR_GROUP1S, GIC_INTR_CFG_LEVEL) + +#define PLAT_RK_GICV3_G0_IRQS \ + INTR_PROP_DESC(ARM_IRQ_SEC_SGI_6, GIC_HIGHEST_SEC_PRIORITY, \ + INTR_GROUP0, GIC_INTR_CFG_LEVEL) + +#endif /* RK3399_DEF_H */ |