summaryrefslogtreecommitdiffstats
path: root/plat/rockchip/rk3399/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'plat/rockchip/rk3399/drivers')
-rw-r--r--plat/rockchip/rk3399/drivers/dp/cdn_dp.c70
-rw-r--r--plat/rockchip/rk3399/drivers/dp/cdn_dp.h49
-rw-r--r--plat/rockchip/rk3399/drivers/dram/dfs.c2114
-rw-r--r--plat/rockchip/rk3399/drivers/dram/dfs.h50
-rw-r--r--plat/rockchip/rk3399/drivers/dram/dram.c53
-rw-r--r--plat/rockchip/rk3399/drivers/dram/dram.h156
-rw-r--r--plat/rockchip/rk3399/drivers/dram/dram_spec_timing.c1324
-rw-r--r--plat/rockchip/rk3399/drivers/dram/dram_spec_timing.h507
-rw-r--r--plat/rockchip/rk3399/drivers/dram/suspend.c852
-rw-r--r--plat/rockchip/rk3399/drivers/dram/suspend.h28
-rw-r--r--plat/rockchip/rk3399/drivers/gpio/rk3399_gpio.c400
-rw-r--r--plat/rockchip/rk3399/drivers/m0/Makefile125
-rw-r--r--plat/rockchip/rk3399/drivers/m0/include/addressmap.h15
-rw-r--r--plat/rockchip/rk3399/drivers/m0/include/rk3399_mcu.h32
-rw-r--r--plat/rockchip/rk3399/drivers/m0/src/dram.c84
-rw-r--r--plat/rockchip/rk3399/drivers/m0/src/rk3399m0.ld.S26
-rw-r--r--plat/rockchip/rk3399/drivers/m0/src/startup.c92
-rw-r--r--plat/rockchip/rk3399/drivers/m0/src/stopwatch.c74
-rw-r--r--plat/rockchip/rk3399/drivers/m0/src/suspend.c62
-rw-r--r--plat/rockchip/rk3399/drivers/pmu/m0_ctl.c102
-rw-r--r--plat/rockchip/rk3399/drivers/pmu/m0_ctl.h29
-rw-r--r--plat/rockchip/rk3399/drivers/pmu/plat_pmu_macros.S136
-rw-r--r--plat/rockchip/rk3399/drivers/pmu/pmu.c1626
-rw-r--r--plat/rockchip/rk3399/drivers/pmu/pmu.h141
-rw-r--r--plat/rockchip/rk3399/drivers/pmu/pmu_fw.c22
-rw-r--r--plat/rockchip/rk3399/drivers/pwm/pwm.c123
-rw-r--r--plat/rockchip/rk3399/drivers/pwm/pwm.h13
-rw-r--r--plat/rockchip/rk3399/drivers/secure/secure.c167
-rw-r--r--plat/rockchip/rk3399/drivers/secure/secure.h105
-rw-r--r--plat/rockchip/rk3399/drivers/soc/soc.c362
-rw-r--r--plat/rockchip/rk3399/drivers/soc/soc.h289
31 files changed, 9228 insertions, 0 deletions
diff --git a/plat/rockchip/rk3399/drivers/dp/cdn_dp.c b/plat/rockchip/rk3399/drivers/dp/cdn_dp.c
new file mode 100644
index 0000000..a8773f4
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/dp/cdn_dp.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <cdefs.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <lib/smccc.h>
+
+#include <cdn_dp.h>
+
+__asm__(
+ ".pushsection .text.hdcp_handler, \"ax\", %progbits\n"
+ ".global hdcp_handler\n"
+ ".balign 4\n"
+ "hdcp_handler:\n"
+ ".incbin \"" HDCPFW "\"\n"
+ ".type hdcp_handler, %function\n"
+ ".size hdcp_handler, .- hdcp_handler\n"
+ ".popsection\n"
+);
+
+static uint64_t *hdcp_key_pdata;
+static struct cdn_dp_hdcp_key_1x key;
+
+int hdcp_handler(struct cdn_dp_hdcp_key_1x *key);
+
+uint64_t dp_hdcp_ctrl(uint64_t type)
+{
+ switch (type) {
+ case HDCP_KEY_DATA_START_TRANSFER:
+ memset(&key, 0x00, sizeof(key));
+ hdcp_key_pdata = (uint64_t *)&key;
+ return 0;
+ case HDCP_KEY_DATA_START_DECRYPT:
+ if (hdcp_key_pdata == (uint64_t *)(&key + 1))
+ return hdcp_handler(&key);
+ else
+ return PSCI_E_INVALID_PARAMS;
+ assert(0); /* Unreachable */
+ default:
+ return SMC_UNK;
+ }
+}
+
+uint64_t dp_hdcp_store_key(uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ uint64_t x5,
+ uint64_t x6)
+{
+ if (hdcp_key_pdata < (uint64_t *)&key ||
+ hdcp_key_pdata + 6 > (uint64_t *)(&key + 1))
+ return PSCI_E_INVALID_PARAMS;
+
+ hdcp_key_pdata[0] = x1;
+ hdcp_key_pdata[1] = x2;
+ hdcp_key_pdata[2] = x3;
+ hdcp_key_pdata[3] = x4;
+ hdcp_key_pdata[4] = x5;
+ hdcp_key_pdata[5] = x6;
+ hdcp_key_pdata += 6;
+
+ return 0;
+}
diff --git a/plat/rockchip/rk3399/drivers/dp/cdn_dp.h b/plat/rockchip/rk3399/drivers/dp/cdn_dp.h
new file mode 100644
index 0000000..c5cbae2
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/dp/cdn_dp.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CDN_DP_H
+#define CDN_DP_H
+
+#include <plat_private.h>
+
+enum {
+ CDN_DP_HDCP_1X_KSV_LEN = 5,
+ CDN_DP_HDCP_KSV_LEN = 8,
+ CDN_DP_HDCP_RESERVED_LEN = 10,
+ CDN_DP_HDCP_UID_LEN = 16,
+ CDN_DP_HDCP_SHA_LEN = 20,
+ CDN_DP_HDCP_DPK_LEN = 280,
+ CDN_DP_HDCP_1X_KEYS_LEN = 285,
+ CDN_DP_HDCP_KEY_LEN = 326,
+};
+
+struct cdn_dp_hdcp_key_1x {
+ uint8_t ksv[CDN_DP_HDCP_KSV_LEN];
+ uint8_t device_key[CDN_DP_HDCP_DPK_LEN];
+ uint8_t sha1[CDN_DP_HDCP_SHA_LEN];
+ uint8_t uid[CDN_DP_HDCP_UID_LEN];
+ uint16_t seed;
+ uint8_t reserved[CDN_DP_HDCP_RESERVED_LEN];
+};
+
+#define HDCP_KEY_DATA_START_TRANSFER 0
+#define HDCP_KEY_DATA_START_DECRYPT 1
+#define HDCP_KEY_1X_STORE_DATA_ALIGN_SIZE (6 * 64) / 8
+
+/* Checks the cdn_dp_hdcp_key_1x must be aligned on 6 x 64-bit word boundary */
+CASSERT(sizeof(struct cdn_dp_hdcp_key_1x) % HDCP_KEY_1X_STORE_DATA_ALIGN_SIZE, \
+ assert_hdcp_key_1x_store_data_align_size_mismatch);
+
+uint64_t dp_hdcp_ctrl(uint64_t type);
+
+uint64_t dp_hdcp_store_key(uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ uint64_t x5,
+ uint64_t x6);
+
+#endif /* CDN_DP_H */
diff --git a/plat/rockchip/rk3399/drivers/dram/dfs.c b/plat/rockchip/rk3399/drivers/dram/dfs.c
new file mode 100644
index 0000000..816372b
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/dram/dfs.c
@@ -0,0 +1,2114 @@
+/*
+ * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <drivers/delay_timer.h>
+#include <lib/mmio.h>
+
+#include <m0_ctl.h>
+#include <plat_private.h>
+#include "dfs.h"
+#include "dram.h"
+#include "dram_spec_timing.h"
+#include "pmu.h"
+#include "soc.h"
+#include "string.h"
+
+#define ENPER_CS_TRAINING_FREQ (666)
+#define TDFI_LAT_THRESHOLD_FREQ (928)
+#define PHY_DLL_BYPASS_FREQ (260)
+
+static const struct pll_div dpll_rates_table[] = {
+
+ /* _mhz, _refdiv, _fbdiv, _postdiv1, _postdiv2 */
+ {.mhz = 928, .refdiv = 1, .fbdiv = 116, .postdiv1 = 3, .postdiv2 = 1},
+ {.mhz = 800, .refdiv = 1, .fbdiv = 100, .postdiv1 = 3, .postdiv2 = 1},
+ {.mhz = 732, .refdiv = 1, .fbdiv = 61, .postdiv1 = 2, .postdiv2 = 1},
+ {.mhz = 666, .refdiv = 1, .fbdiv = 111, .postdiv1 = 4, .postdiv2 = 1},
+ {.mhz = 600, .refdiv = 1, .fbdiv = 50, .postdiv1 = 2, .postdiv2 = 1},
+ {.mhz = 528, .refdiv = 1, .fbdiv = 66, .postdiv1 = 3, .postdiv2 = 1},
+ {.mhz = 400, .refdiv = 1, .fbdiv = 50, .postdiv1 = 3, .postdiv2 = 1},
+ {.mhz = 300, .refdiv = 1, .fbdiv = 50, .postdiv1 = 4, .postdiv2 = 1},
+ {.mhz = 200, .refdiv = 1, .fbdiv = 50, .postdiv1 = 3, .postdiv2 = 2},
+};
+
+struct rk3399_dram_status {
+ uint32_t current_index;
+ uint32_t index_freq[2];
+ uint32_t boot_freq;
+ uint32_t low_power_stat;
+ struct timing_related_config timing_config;
+ struct drv_odt_lp_config drv_odt_lp_cfg;
+};
+
+struct rk3399_saved_status {
+ uint32_t freq;
+ uint32_t low_power_stat;
+ uint32_t odt;
+};
+
+static struct rk3399_dram_status rk3399_dram_status;
+static struct rk3399_saved_status rk3399_suspend_status;
+static uint32_t wrdqs_delay_val[2][2][4];
+static uint32_t rddqs_delay_ps;
+
+static struct rk3399_sdram_default_config ddr3_default_config = {
+ .bl = 8,
+ .ap = 0,
+ .burst_ref_cnt = 1,
+ .zqcsi = 0
+};
+
+static struct rk3399_sdram_default_config lpddr3_default_config = {
+ .bl = 8,
+ .ap = 0,
+ .burst_ref_cnt = 1,
+ .zqcsi = 0
+};
+
+static struct rk3399_sdram_default_config lpddr4_default_config = {
+ .bl = 16,
+ .ap = 0,
+ .caodt = 240,
+ .burst_ref_cnt = 1,
+ .zqcsi = 0
+};
+
+static uint32_t get_cs_die_capability(struct rk3399_sdram_params *ram_config,
+ uint8_t channel, uint8_t cs)
+{
+ struct rk3399_sdram_channel *ch = &ram_config->ch[channel];
+ uint32_t bandwidth;
+ uint32_t die_bandwidth;
+ uint32_t die;
+ uint32_t cs_cap;
+ uint32_t row;
+
+ row = cs == 0 ? ch->cs0_row : ch->cs1_row;
+ bandwidth = 8 * (1 << ch->bw);
+ die_bandwidth = 8 * (1 << ch->dbw);
+ die = bandwidth / die_bandwidth;
+ cs_cap = (1 << (row + ((1 << ch->bk) / 4 + 1) + ch->col +
+ (bandwidth / 16)));
+ if (ch->row_3_4)
+ cs_cap = cs_cap * 3 / 4;
+
+ return (cs_cap / die);
+}
+
+static void get_dram_drv_odt_val(uint32_t dram_type,
+ struct drv_odt_lp_config *drv_config)
+{
+ uint32_t tmp;
+ uint32_t mr1_val, mr3_val, mr11_val;
+
+ switch (dram_type) {
+ case DDR3:
+ mr1_val = (mmio_read_32(CTL_REG(0, 133)) >> 16) & 0xffff;
+ tmp = ((mr1_val >> 1) & 1) | ((mr1_val >> 4) & 1);
+ if (tmp)
+ drv_config->dram_side_drv = 34;
+ else
+ drv_config->dram_side_drv = 40;
+ tmp = ((mr1_val >> 2) & 1) | ((mr1_val >> 5) & 1) |
+ ((mr1_val >> 7) & 1);
+ if (tmp == 0)
+ drv_config->dram_side_dq_odt = 0;
+ else if (tmp == 1)
+ drv_config->dram_side_dq_odt = 60;
+ else if (tmp == 3)
+ drv_config->dram_side_dq_odt = 40;
+ else
+ drv_config->dram_side_dq_odt = 120;
+ break;
+ case LPDDR3:
+ mr3_val = mmio_read_32(CTL_REG(0, 138)) & 0xf;
+ mr11_val = (mmio_read_32(CTL_REG(0, 139)) >> 24) & 0x3;
+ if (mr3_val == 0xb)
+ drv_config->dram_side_drv = 3448;
+ else if (mr3_val == 0xa)
+ drv_config->dram_side_drv = 4048;
+ else if (mr3_val == 0x9)
+ drv_config->dram_side_drv = 3440;
+ else if (mr3_val == 0x4)
+ drv_config->dram_side_drv = 60;
+ else if (mr3_val == 0x3)
+ drv_config->dram_side_drv = 48;
+ else if (mr3_val == 0x2)
+ drv_config->dram_side_drv = 40;
+ else
+ drv_config->dram_side_drv = 34;
+
+ if (mr11_val == 1)
+ drv_config->dram_side_dq_odt = 60;
+ else if (mr11_val == 2)
+ drv_config->dram_side_dq_odt = 120;
+ else if (mr11_val == 0)
+ drv_config->dram_side_dq_odt = 0;
+ else
+ drv_config->dram_side_dq_odt = 240;
+ break;
+ case LPDDR4:
+ default:
+ mr3_val = (mmio_read_32(CTL_REG(0, 138)) >> 3) & 0x7;
+ mr11_val = (mmio_read_32(CTL_REG(0, 139)) >> 24) & 0xff;
+
+ if ((mr3_val == 0) || (mr3_val == 7))
+ drv_config->dram_side_drv = 40;
+ else
+ drv_config->dram_side_drv = 240 / mr3_val;
+
+ tmp = mr11_val & 0x7;
+ if ((tmp == 7) || (tmp == 0))
+ drv_config->dram_side_dq_odt = 0;
+ else
+ drv_config->dram_side_dq_odt = 240 / tmp;
+
+ tmp = (mr11_val >> 4) & 0x7;
+ if ((tmp == 7) || (tmp == 0))
+ drv_config->dram_side_ca_odt = 0;
+ else
+ drv_config->dram_side_ca_odt = 240 / tmp;
+ break;
+ }
+}
+
+static void sdram_timing_cfg_init(struct timing_related_config *ptiming_config,
+ struct rk3399_sdram_params *sdram_params,
+ struct drv_odt_lp_config *drv_config)
+{
+ uint32_t i, j;
+
+ for (i = 0; i < sdram_params->num_channels; i++) {
+ ptiming_config->dram_info[i].speed_rate = DDR3_DEFAULT;
+ ptiming_config->dram_info[i].cs_cnt = sdram_params->ch[i].rank;
+ for (j = 0; j < sdram_params->ch[i].rank; j++) {
+ ptiming_config->dram_info[i].per_die_capability[j] =
+ get_cs_die_capability(sdram_params, i, j);
+ }
+ }
+ ptiming_config->dram_type = sdram_params->dramtype;
+ ptiming_config->ch_cnt = sdram_params->num_channels;
+ switch (sdram_params->dramtype) {
+ case DDR3:
+ ptiming_config->bl = ddr3_default_config.bl;
+ ptiming_config->ap = ddr3_default_config.ap;
+ break;
+ case LPDDR3:
+ ptiming_config->bl = lpddr3_default_config.bl;
+ ptiming_config->ap = lpddr3_default_config.ap;
+ break;
+ case LPDDR4:
+ ptiming_config->bl = lpddr4_default_config.bl;
+ ptiming_config->ap = lpddr4_default_config.ap;
+ ptiming_config->rdbi = 0;
+ ptiming_config->wdbi = 0;
+ break;
+ default:
+ /* Do nothing in default case */
+ break;
+ }
+ ptiming_config->dramds = drv_config->dram_side_drv;
+ ptiming_config->dramodt = drv_config->dram_side_dq_odt;
+ ptiming_config->caodt = drv_config->dram_side_ca_odt;
+ ptiming_config->odt = (mmio_read_32(PHY_REG(0, 5)) >> 16) & 0x1;
+}
+
+struct lat_adj_pair {
+ uint32_t cl;
+ uint32_t rdlat_adj;
+ uint32_t cwl;
+ uint32_t wrlat_adj;
+};
+
+const struct lat_adj_pair ddr3_lat_adj[] = {
+ {6, 5, 5, 4},
+ {8, 7, 6, 5},
+ {10, 9, 7, 6},
+ {11, 9, 8, 7},
+ {13, 0xb, 9, 8},
+ {14, 0xb, 0xa, 9}
+};
+
+const struct lat_adj_pair lpddr3_lat_adj[] = {
+ {3, 2, 1, 0},
+ {6, 5, 3, 2},
+ {8, 7, 4, 3},
+ {9, 8, 5, 4},
+ {10, 9, 6, 5},
+ {11, 9, 6, 5},
+ {12, 0xa, 6, 5},
+ {14, 0xc, 8, 7},
+ {16, 0xd, 8, 7}
+};
+
+const struct lat_adj_pair lpddr4_lat_adj[] = {
+ {6, 5, 4, 2},
+ {10, 9, 6, 4},
+ {14, 0xc, 8, 6},
+ {20, 0x11, 0xa, 8},
+ {24, 0x15, 0xc, 0xa},
+ {28, 0x18, 0xe, 0xc},
+ {32, 0x1b, 0x10, 0xe},
+ {36, 0x1e, 0x12, 0x10}
+};
+
+static uint32_t get_rdlat_adj(uint32_t dram_type, uint32_t cl)
+{
+ const struct lat_adj_pair *p;
+ uint32_t cnt;
+ uint32_t i;
+
+ if (dram_type == DDR3) {
+ p = ddr3_lat_adj;
+ cnt = ARRAY_SIZE(ddr3_lat_adj);
+ } else if (dram_type == LPDDR3) {
+ p = lpddr3_lat_adj;
+ cnt = ARRAY_SIZE(lpddr3_lat_adj);
+ } else {
+ p = lpddr4_lat_adj;
+ cnt = ARRAY_SIZE(lpddr4_lat_adj);
+ }
+
+ for (i = 0; i < cnt; i++) {
+ if (cl == p[i].cl)
+ return p[i].rdlat_adj;
+ }
+ /* fail */
+ return 0xff;
+}
+
+static uint32_t get_wrlat_adj(uint32_t dram_type, uint32_t cwl)
+{
+ const struct lat_adj_pair *p;
+ uint32_t cnt;
+ uint32_t i;
+
+ if (dram_type == DDR3) {
+ p = ddr3_lat_adj;
+ cnt = ARRAY_SIZE(ddr3_lat_adj);
+ } else if (dram_type == LPDDR3) {
+ p = lpddr3_lat_adj;
+ cnt = ARRAY_SIZE(lpddr3_lat_adj);
+ } else {
+ p = lpddr4_lat_adj;
+ cnt = ARRAY_SIZE(lpddr4_lat_adj);
+ }
+
+ for (i = 0; i < cnt; i++) {
+ if (cwl == p[i].cwl)
+ return p[i].wrlat_adj;
+ }
+ /* fail */
+ return 0xff;
+}
+
+#define PI_REGS_DIMM_SUPPORT (0)
+#define PI_ADD_LATENCY (0)
+#define PI_DOUBLEFREEK (1)
+
+#define PI_PAD_DELAY_PS_VALUE (1000)
+#define PI_IE_ENABLE_VALUE (3000)
+#define PI_TSEL_ENABLE_VALUE (700)
+
+static uint32_t get_pi_rdlat_adj(struct dram_timing_t *pdram_timing)
+{
+ /*[DLLSUBTYPE2] == "STD_DENALI_HS" */
+ uint32_t rdlat, delay_adder, ie_enable, hs_offset, tsel_adder,
+ extra_adder, tsel_enable;
+
+ ie_enable = PI_IE_ENABLE_VALUE;
+ tsel_enable = PI_TSEL_ENABLE_VALUE;
+
+ rdlat = pdram_timing->cl + PI_ADD_LATENCY;
+ delay_adder = ie_enable / (1000000 / pdram_timing->mhz);
+ if ((ie_enable % (1000000 / pdram_timing->mhz)) != 0)
+ delay_adder++;
+ hs_offset = 0;
+ tsel_adder = 0;
+ extra_adder = 0;
+ /* rdlat = rdlat - (PREAMBLE_SUPPORT & 0x1); */
+ tsel_adder = tsel_enable / (1000000 / pdram_timing->mhz);
+ if ((tsel_enable % (1000000 / pdram_timing->mhz)) != 0)
+ tsel_adder++;
+ delay_adder = delay_adder - 1;
+ if (tsel_adder > delay_adder)
+ extra_adder = tsel_adder - delay_adder;
+ else
+ extra_adder = 0;
+ if (PI_REGS_DIMM_SUPPORT && PI_DOUBLEFREEK)
+ hs_offset = 2;
+ else
+ hs_offset = 1;
+
+ if (delay_adder > (rdlat - 1 - hs_offset)) {
+ rdlat = rdlat - tsel_adder;
+ } else {
+ if ((rdlat - delay_adder) < 2)
+ rdlat = 2;
+ else
+ rdlat = rdlat - delay_adder - extra_adder;
+ }
+
+ return rdlat;
+}
+
+static uint32_t get_pi_wrlat(struct dram_timing_t *pdram_timing,
+ struct timing_related_config *timing_config)
+{
+ uint32_t tmp;
+
+ if (timing_config->dram_type == LPDDR3) {
+ tmp = pdram_timing->cl;
+ if (tmp >= 14)
+ tmp = 8;
+ else if (tmp >= 10)
+ tmp = 6;
+ else if (tmp == 9)
+ tmp = 5;
+ else if (tmp == 8)
+ tmp = 4;
+ else if (tmp == 6)
+ tmp = 3;
+ else
+ tmp = 1;
+ } else {
+ tmp = 1;
+ }
+
+ return tmp;
+}
+
+static uint32_t get_pi_wrlat_adj(struct dram_timing_t *pdram_timing,
+ struct timing_related_config *timing_config)
+{
+ return get_pi_wrlat(pdram_timing, timing_config) + PI_ADD_LATENCY - 1;
+}
+
+static uint32_t get_pi_tdfi_phy_rdlat(struct dram_timing_t *pdram_timing,
+ struct timing_related_config *timing_config)
+{
+ /* [DLLSUBTYPE2] == "STD_DENALI_HS" */
+ uint32_t cas_lat, delay_adder, ie_enable, hs_offset, ie_delay_adder;
+ uint32_t mem_delay_ps, round_trip_ps;
+ uint32_t phy_internal_delay, lpddr_adder, dfi_adder, rdlat_delay;
+
+ ie_enable = PI_IE_ENABLE_VALUE;
+
+ delay_adder = ie_enable / (1000000 / pdram_timing->mhz);
+ if ((ie_enable % (1000000 / pdram_timing->mhz)) != 0)
+ delay_adder++;
+ delay_adder = delay_adder - 1;
+ if (PI_REGS_DIMM_SUPPORT && PI_DOUBLEFREEK)
+ hs_offset = 2;
+ else
+ hs_offset = 1;
+
+ cas_lat = pdram_timing->cl + PI_ADD_LATENCY;
+
+ if (delay_adder > (cas_lat - 1 - hs_offset)) {
+ ie_delay_adder = 0;
+ } else {
+ ie_delay_adder = ie_enable / (1000000 / pdram_timing->mhz);
+ if ((ie_enable % (1000000 / pdram_timing->mhz)) != 0)
+ ie_delay_adder++;
+ }
+
+ if (timing_config->dram_type == DDR3) {
+ mem_delay_ps = 0;
+ } else if (timing_config->dram_type == LPDDR4) {
+ mem_delay_ps = 3600;
+ } else if (timing_config->dram_type == LPDDR3) {
+ mem_delay_ps = 5500;
+ } else {
+ NOTICE("get_pi_tdfi_phy_rdlat:dramtype unsupport\n");
+ return 0;
+ }
+ round_trip_ps = 1100 + 500 + mem_delay_ps + 500 + 600;
+ delay_adder = round_trip_ps / (1000000 / pdram_timing->mhz);
+ if ((round_trip_ps % (1000000 / pdram_timing->mhz)) != 0)
+ delay_adder++;
+
+ phy_internal_delay = 5 + 2 + 4;
+ lpddr_adder = mem_delay_ps / (1000000 / pdram_timing->mhz);
+ if ((mem_delay_ps % (1000000 / pdram_timing->mhz)) != 0)
+ lpddr_adder++;
+ dfi_adder = 0;
+ phy_internal_delay = phy_internal_delay + 2;
+ rdlat_delay = delay_adder + phy_internal_delay +
+ ie_delay_adder + lpddr_adder + dfi_adder;
+
+ rdlat_delay = rdlat_delay + 2;
+ return rdlat_delay;
+}
+
+static uint32_t get_pi_todtoff_min(struct dram_timing_t *pdram_timing,
+ struct timing_related_config *timing_config)
+{
+ uint32_t tmp, todtoff_min_ps;
+
+ if (timing_config->dram_type == LPDDR3)
+ todtoff_min_ps = 2500;
+ else if (timing_config->dram_type == LPDDR4)
+ todtoff_min_ps = 1500;
+ else
+ todtoff_min_ps = 0;
+ /* todtoff_min */
+ tmp = todtoff_min_ps / (1000000 / pdram_timing->mhz);
+ if ((todtoff_min_ps % (1000000 / pdram_timing->mhz)) != 0)
+ tmp++;
+ return tmp;
+}
+
+static uint32_t get_pi_todtoff_max(struct dram_timing_t *pdram_timing,
+ struct timing_related_config *timing_config)
+{
+ uint32_t tmp, todtoff_max_ps;
+
+ if ((timing_config->dram_type == LPDDR4)
+ || (timing_config->dram_type == LPDDR3))
+ todtoff_max_ps = 3500;
+ else
+ todtoff_max_ps = 0;
+
+ /* todtoff_max */
+ tmp = todtoff_max_ps / (1000000 / pdram_timing->mhz);
+ if ((todtoff_max_ps % (1000000 / pdram_timing->mhz)) != 0)
+ tmp++;
+ return tmp;
+}
+
+static void gen_rk3399_ctl_params_f0(struct timing_related_config
+ *timing_config,
+ struct dram_timing_t *pdram_timing)
+{
+ uint32_t i;
+ uint32_t tmp, tmp1;
+
+ for (i = 0; i < timing_config->ch_cnt; i++) {
+ if (timing_config->dram_type == DDR3) {
+ tmp = ((700000 + 10) * timing_config->freq +
+ 999) / 1000;
+ tmp += pdram_timing->txsnr + (pdram_timing->tmrd * 3) +
+ pdram_timing->tmod + pdram_timing->tzqinit;
+ mmio_write_32(CTL_REG(i, 5), tmp);
+
+ mmio_clrsetbits_32(CTL_REG(i, 22), 0xffff,
+ pdram_timing->tdllk);
+
+ mmio_write_32(CTL_REG(i, 32),
+ (pdram_timing->tmod << 8) |
+ pdram_timing->tmrd);
+
+ mmio_clrsetbits_32(CTL_REG(i, 59), 0xffffu << 16,
+ (pdram_timing->txsr -
+ pdram_timing->trcd) << 16);
+ } else if (timing_config->dram_type == LPDDR4) {
+ mmio_write_32(CTL_REG(i, 5), pdram_timing->tinit1 +
+ pdram_timing->tinit3);
+ mmio_write_32(CTL_REG(i, 32),
+ (pdram_timing->tmrd << 8) |
+ pdram_timing->tmrd);
+ mmio_clrsetbits_32(CTL_REG(i, 59), 0xffffu << 16,
+ pdram_timing->txsr << 16);
+ } else {
+ mmio_write_32(CTL_REG(i, 5), pdram_timing->tinit1);
+ mmio_write_32(CTL_REG(i, 7), pdram_timing->tinit4);
+ mmio_write_32(CTL_REG(i, 32),
+ (pdram_timing->tmrd << 8) |
+ pdram_timing->tmrd);
+ mmio_clrsetbits_32(CTL_REG(i, 59), 0xffffu << 16,
+ pdram_timing->txsr << 16);
+ }
+ mmio_write_32(CTL_REG(i, 6), pdram_timing->tinit3);
+ mmio_write_32(CTL_REG(i, 8), pdram_timing->tinit5);
+ mmio_clrsetbits_32(CTL_REG(i, 23), (0x7f << 16),
+ ((pdram_timing->cl * 2) << 16));
+ mmio_clrsetbits_32(CTL_REG(i, 23), (0x1f << 24),
+ (pdram_timing->cwl << 24));
+ mmio_clrsetbits_32(CTL_REG(i, 24), 0x3f, pdram_timing->al);
+ mmio_clrsetbits_32(CTL_REG(i, 26), 0xffffu << 16,
+ (pdram_timing->trc << 24) |
+ (pdram_timing->trrd << 16));
+ mmio_write_32(CTL_REG(i, 27),
+ (pdram_timing->tfaw << 24) |
+ (pdram_timing->trppb << 16) |
+ (pdram_timing->twtr << 8) |
+ pdram_timing->tras_min);
+
+ mmio_clrsetbits_32(CTL_REG(i, 31), 0xffu << 24,
+ max(4, pdram_timing->trtp) << 24);
+ mmio_write_32(CTL_REG(i, 33), (pdram_timing->tcke << 24) |
+ pdram_timing->tras_max);
+ mmio_clrsetbits_32(CTL_REG(i, 34), 0xff,
+ max(1, pdram_timing->tckesr));
+ mmio_clrsetbits_32(CTL_REG(i, 39),
+ (0x3f << 16) | (0xff << 8),
+ (pdram_timing->twr << 16) |
+ (pdram_timing->trcd << 8));
+ mmio_clrsetbits_32(CTL_REG(i, 42), 0x1f << 16,
+ pdram_timing->tmrz << 16);
+ tmp = pdram_timing->tdal ? pdram_timing->tdal :
+ (pdram_timing->twr + pdram_timing->trp);
+ mmio_clrsetbits_32(CTL_REG(i, 44), 0xff, tmp);
+ mmio_clrsetbits_32(CTL_REG(i, 45), 0xff, pdram_timing->trp);
+ mmio_write_32(CTL_REG(i, 48),
+ ((pdram_timing->trefi - 8) << 16) |
+ pdram_timing->trfc);
+ mmio_clrsetbits_32(CTL_REG(i, 52), 0xffff, pdram_timing->txp);
+ mmio_clrsetbits_32(CTL_REG(i, 53), 0xffffu << 16,
+ pdram_timing->txpdll << 16);
+ mmio_clrsetbits_32(CTL_REG(i, 55), 0xf << 24,
+ pdram_timing->tcscke << 24);
+ mmio_clrsetbits_32(CTL_REG(i, 55), 0xff, pdram_timing->tmrri);
+ mmio_write_32(CTL_REG(i, 56),
+ (pdram_timing->tzqcke << 24) |
+ (pdram_timing->tmrwckel << 16) |
+ (pdram_timing->tckehcs << 8) |
+ pdram_timing->tckelcs);
+ mmio_clrsetbits_32(CTL_REG(i, 60), 0xffff, pdram_timing->txsnr);
+ mmio_clrsetbits_32(CTL_REG(i, 62), 0xffffu << 16,
+ (pdram_timing->tckehcmd << 24) |
+ (pdram_timing->tckelcmd << 16));
+ mmio_write_32(CTL_REG(i, 63),
+ (pdram_timing->tckelpd << 24) |
+ (pdram_timing->tescke << 16) |
+ (pdram_timing->tsr << 8) |
+ pdram_timing->tckckel);
+ mmio_clrsetbits_32(CTL_REG(i, 64), 0xfff,
+ (pdram_timing->tcmdcke << 8) |
+ pdram_timing->tcsckeh);
+ mmio_clrsetbits_32(CTL_REG(i, 92), 0xffff << 8,
+ (pdram_timing->tcksrx << 16) |
+ (pdram_timing->tcksre << 8));
+ mmio_clrsetbits_32(CTL_REG(i, 108), 0x1 << 24,
+ (timing_config->dllbp << 24));
+ mmio_clrsetbits_32(CTL_REG(i, 122), 0x3ff << 16,
+ (pdram_timing->tvrcg_enable << 16));
+ mmio_write_32(CTL_REG(i, 123), (pdram_timing->tfc_long << 16) |
+ pdram_timing->tvrcg_disable);
+ mmio_write_32(CTL_REG(i, 124),
+ (pdram_timing->tvref_long << 16) |
+ (pdram_timing->tckfspx << 8) |
+ pdram_timing->tckfspe);
+ mmio_write_32(CTL_REG(i, 133), (pdram_timing->mr[1] << 16) |
+ pdram_timing->mr[0]);
+ mmio_clrsetbits_32(CTL_REG(i, 134), 0xffff,
+ pdram_timing->mr[2]);
+ mmio_clrsetbits_32(CTL_REG(i, 138), 0xffff,
+ pdram_timing->mr[3]);
+ mmio_clrsetbits_32(CTL_REG(i, 139), 0xffu << 24,
+ pdram_timing->mr11 << 24);
+ mmio_write_32(CTL_REG(i, 147),
+ (pdram_timing->mr[1] << 16) |
+ pdram_timing->mr[0]);
+ mmio_clrsetbits_32(CTL_REG(i, 148), 0xffff,
+ pdram_timing->mr[2]);
+ mmio_clrsetbits_32(CTL_REG(i, 152), 0xffff,
+ pdram_timing->mr[3]);
+ mmio_clrsetbits_32(CTL_REG(i, 153), 0xffu << 24,
+ pdram_timing->mr11 << 24);
+ if (timing_config->dram_type == LPDDR4) {
+ mmio_clrsetbits_32(CTL_REG(i, 140), 0xffffu << 16,
+ pdram_timing->mr12 << 16);
+ mmio_clrsetbits_32(CTL_REG(i, 142), 0xffffu << 16,
+ pdram_timing->mr14 << 16);
+ mmio_clrsetbits_32(CTL_REG(i, 145), 0xffffu << 16,
+ pdram_timing->mr22 << 16);
+ mmio_clrsetbits_32(CTL_REG(i, 154), 0xffffu << 16,
+ pdram_timing->mr12 << 16);
+ mmio_clrsetbits_32(CTL_REG(i, 156), 0xffffu << 16,
+ pdram_timing->mr14 << 16);
+ mmio_clrsetbits_32(CTL_REG(i, 159), 0xffffu << 16,
+ pdram_timing->mr22 << 16);
+ }
+ mmio_clrsetbits_32(CTL_REG(i, 179), 0xfff << 8,
+ pdram_timing->tzqinit << 8);
+ mmio_write_32(CTL_REG(i, 180), (pdram_timing->tzqcs << 16) |
+ (pdram_timing->tzqinit / 2));
+ mmio_write_32(CTL_REG(i, 181), (pdram_timing->tzqlat << 16) |
+ pdram_timing->tzqcal);
+ mmio_clrsetbits_32(CTL_REG(i, 212), 0xff << 8,
+ pdram_timing->todton << 8);
+
+ if (timing_config->odt) {
+ mmio_setbits_32(CTL_REG(i, 213), 1 << 16);
+ if (timing_config->freq < 400)
+ tmp = 4 << 24;
+ else
+ tmp = 8 << 24;
+ } else {
+ mmio_clrbits_32(CTL_REG(i, 213), 1 << 16);
+ tmp = 2 << 24;
+ }
+
+ mmio_clrsetbits_32(CTL_REG(i, 216), 0x1f << 24, tmp);
+ mmio_clrsetbits_32(CTL_REG(i, 221), (0x3 << 16) | (0xf << 8),
+ (pdram_timing->tdqsck << 16) |
+ (pdram_timing->tdqsck_max << 8));
+ tmp =
+ (get_wrlat_adj(timing_config->dram_type, pdram_timing->cwl)
+ << 8) | get_rdlat_adj(timing_config->dram_type,
+ pdram_timing->cl);
+ mmio_clrsetbits_32(CTL_REG(i, 284), 0xffff, tmp);
+ mmio_clrsetbits_32(CTL_REG(i, 82), 0xffffu << 16,
+ (4 * pdram_timing->trefi) << 16);
+
+ mmio_clrsetbits_32(CTL_REG(i, 83), 0xffff,
+ (2 * pdram_timing->trefi) & 0xffff);
+
+ if ((timing_config->dram_type == LPDDR3) ||
+ (timing_config->dram_type == LPDDR4)) {
+ tmp = get_pi_wrlat(pdram_timing, timing_config);
+ tmp1 = get_pi_todtoff_max(pdram_timing, timing_config);
+ tmp = (tmp > tmp1) ? (tmp - tmp1) : 0;
+ } else {
+ tmp = 0;
+ }
+ mmio_clrsetbits_32(CTL_REG(i, 214), 0x3f << 16,
+ (tmp & 0x3f) << 16);
+
+ if ((timing_config->dram_type == LPDDR3) ||
+ (timing_config->dram_type == LPDDR4)) {
+ /* min_rl_preamble = cl+TDQSCK_MIN -1 */
+ tmp = pdram_timing->cl +
+ get_pi_todtoff_min(pdram_timing, timing_config) - 1;
+ /* todtoff_max */
+ tmp1 = get_pi_todtoff_max(pdram_timing, timing_config);
+ tmp = (tmp > tmp1) ? (tmp - tmp1) : 0;
+ } else {
+ tmp = pdram_timing->cl - pdram_timing->cwl;
+ }
+ mmio_clrsetbits_32(CTL_REG(i, 215), 0x3f << 8,
+ (tmp & 0x3f) << 8);
+
+ mmio_clrsetbits_32(CTL_REG(i, 275), 0xff << 16,
+ (get_pi_tdfi_phy_rdlat(pdram_timing,
+ timing_config) &
+ 0xff) << 16);
+
+ mmio_clrsetbits_32(CTL_REG(i, 277), 0xffff,
+ (2 * pdram_timing->trefi) & 0xffff);
+
+ mmio_clrsetbits_32(CTL_REG(i, 282), 0xffff,
+ (2 * pdram_timing->trefi) & 0xffff);
+
+ mmio_write_32(CTL_REG(i, 283), 20 * pdram_timing->trefi);
+
+ /* CTL_308 TDFI_CALVL_CAPTURE_F0:RW:16:10 */
+ tmp1 = 20000 / (1000000 / pdram_timing->mhz) + 1;
+ if ((20000 % (1000000 / pdram_timing->mhz)) != 0)
+ tmp1++;
+ tmp = (tmp1 >> 1) + (tmp1 % 2) + 5;
+ mmio_clrsetbits_32(CTL_REG(i, 308), 0x3ff << 16, tmp << 16);
+
+ /* CTL_308 TDFI_CALVL_CC_F0:RW:0:10 */
+ tmp = tmp + 18;
+ mmio_clrsetbits_32(CTL_REG(i, 308), 0x3ff, tmp);
+
+ /* CTL_314 TDFI_WRCSLAT_F0:RW:8:8 */
+ tmp1 = get_pi_wrlat_adj(pdram_timing, timing_config);
+ if (timing_config->freq <= TDFI_LAT_THRESHOLD_FREQ) {
+ if (tmp1 == 0)
+ tmp = 0;
+ else if (tmp1 < 5)
+ tmp = tmp1 - 1;
+ else
+ tmp = tmp1 - 5;
+ } else {
+ tmp = tmp1 - 2;
+ }
+ mmio_clrsetbits_32(CTL_REG(i, 314), 0xff << 8, tmp << 8);
+
+ /* CTL_314 TDFI_RDCSLAT_F0:RW:0:8 */
+ if ((timing_config->freq <= TDFI_LAT_THRESHOLD_FREQ) &&
+ (pdram_timing->cl >= 5))
+ tmp = pdram_timing->cl - 5;
+ else
+ tmp = pdram_timing->cl - 2;
+ mmio_clrsetbits_32(CTL_REG(i, 314), 0xff, tmp);
+ }
+}
+
+static void gen_rk3399_ctl_params_f1(struct timing_related_config
+ *timing_config,
+ struct dram_timing_t *pdram_timing)
+{
+ uint32_t i;
+ uint32_t tmp, tmp1;
+
+ for (i = 0; i < timing_config->ch_cnt; i++) {
+ if (timing_config->dram_type == DDR3) {
+ tmp =
+ ((700000 + 10) * timing_config->freq + 999) / 1000;
+ tmp += pdram_timing->txsnr + (pdram_timing->tmrd * 3) +
+ pdram_timing->tmod + pdram_timing->tzqinit;
+ mmio_write_32(CTL_REG(i, 9), tmp);
+ mmio_clrsetbits_32(CTL_REG(i, 22), 0xffffu << 16,
+ pdram_timing->tdllk << 16);
+ mmio_clrsetbits_32(CTL_REG(i, 34), 0xffffff00,
+ (pdram_timing->tmod << 24) |
+ (pdram_timing->tmrd << 16) |
+ (pdram_timing->trtp << 8));
+ mmio_clrsetbits_32(CTL_REG(i, 60), 0xffffu << 16,
+ (pdram_timing->txsr -
+ pdram_timing->trcd) << 16);
+ } else if (timing_config->dram_type == LPDDR4) {
+ mmio_write_32(CTL_REG(i, 9), pdram_timing->tinit1 +
+ pdram_timing->tinit3);
+ mmio_clrsetbits_32(CTL_REG(i, 34), 0xffffff00,
+ (pdram_timing->tmrd << 24) |
+ (pdram_timing->tmrd << 16) |
+ (pdram_timing->trtp << 8));
+ mmio_clrsetbits_32(CTL_REG(i, 60), 0xffffu << 16,
+ pdram_timing->txsr << 16);
+ } else {
+ mmio_write_32(CTL_REG(i, 9), pdram_timing->tinit1);
+ mmio_write_32(CTL_REG(i, 11), pdram_timing->tinit4);
+ mmio_clrsetbits_32(CTL_REG(i, 34), 0xffffff00,
+ (pdram_timing->tmrd << 24) |
+ (pdram_timing->tmrd << 16) |
+ (pdram_timing->trtp << 8));
+ mmio_clrsetbits_32(CTL_REG(i, 60), 0xffffu << 16,
+ pdram_timing->txsr << 16);
+ }
+ mmio_write_32(CTL_REG(i, 10), pdram_timing->tinit3);
+ mmio_write_32(CTL_REG(i, 12), pdram_timing->tinit5);
+ mmio_clrsetbits_32(CTL_REG(i, 24), (0x7f << 8),
+ ((pdram_timing->cl * 2) << 8));
+ mmio_clrsetbits_32(CTL_REG(i, 24), (0x1f << 16),
+ (pdram_timing->cwl << 16));
+ mmio_clrsetbits_32(CTL_REG(i, 24), 0x3f << 24,
+ pdram_timing->al << 24);
+ mmio_clrsetbits_32(CTL_REG(i, 28), 0xffffff00,
+ (pdram_timing->tras_min << 24) |
+ (pdram_timing->trc << 16) |
+ (pdram_timing->trrd << 8));
+ mmio_clrsetbits_32(CTL_REG(i, 29), 0xffffff,
+ (pdram_timing->tfaw << 16) |
+ (pdram_timing->trppb << 8) |
+ pdram_timing->twtr);
+ mmio_write_32(CTL_REG(i, 35), (pdram_timing->tcke << 24) |
+ pdram_timing->tras_max);
+ mmio_clrsetbits_32(CTL_REG(i, 36), 0xff,
+ max(1, pdram_timing->tckesr));
+ mmio_clrsetbits_32(CTL_REG(i, 39), (0xffu << 24),
+ (pdram_timing->trcd << 24));
+ mmio_clrsetbits_32(CTL_REG(i, 40), 0x3f, pdram_timing->twr);
+ mmio_clrsetbits_32(CTL_REG(i, 42), 0x1f << 24,
+ pdram_timing->tmrz << 24);
+ tmp = pdram_timing->tdal ? pdram_timing->tdal :
+ (pdram_timing->twr + pdram_timing->trp);
+ mmio_clrsetbits_32(CTL_REG(i, 44), 0xff << 8, tmp << 8);
+ mmio_clrsetbits_32(CTL_REG(i, 45), 0xff << 8,
+ pdram_timing->trp << 8);
+ mmio_write_32(CTL_REG(i, 49),
+ ((pdram_timing->trefi - 8) << 16) |
+ pdram_timing->trfc);
+ mmio_clrsetbits_32(CTL_REG(i, 52), 0xffffu << 16,
+ pdram_timing->txp << 16);
+ mmio_clrsetbits_32(CTL_REG(i, 54), 0xffff,
+ pdram_timing->txpdll);
+ mmio_clrsetbits_32(CTL_REG(i, 55), 0xff << 8,
+ pdram_timing->tmrri << 8);
+ mmio_write_32(CTL_REG(i, 57), (pdram_timing->tmrwckel << 24) |
+ (pdram_timing->tckehcs << 16) |
+ (pdram_timing->tckelcs << 8) |
+ pdram_timing->tcscke);
+ mmio_clrsetbits_32(CTL_REG(i, 58), 0xf, pdram_timing->tzqcke);
+ mmio_clrsetbits_32(CTL_REG(i, 61), 0xffff, pdram_timing->txsnr);
+ mmio_clrsetbits_32(CTL_REG(i, 64), 0xffffu << 16,
+ (pdram_timing->tckehcmd << 24) |
+ (pdram_timing->tckelcmd << 16));
+ mmio_write_32(CTL_REG(i, 65), (pdram_timing->tckelpd << 24) |
+ (pdram_timing->tescke << 16) |
+ (pdram_timing->tsr << 8) |
+ pdram_timing->tckckel);
+ mmio_clrsetbits_32(CTL_REG(i, 66), 0xfff,
+ (pdram_timing->tcmdcke << 8) |
+ pdram_timing->tcsckeh);
+ mmio_clrsetbits_32(CTL_REG(i, 92), (0xffu << 24),
+ (pdram_timing->tcksre << 24));
+ mmio_clrsetbits_32(CTL_REG(i, 93), 0xff,
+ pdram_timing->tcksrx);
+ mmio_clrsetbits_32(CTL_REG(i, 108), (0x1 << 25),
+ (timing_config->dllbp << 25));
+ mmio_write_32(CTL_REG(i, 125),
+ (pdram_timing->tvrcg_disable << 16) |
+ pdram_timing->tvrcg_enable);
+ mmio_write_32(CTL_REG(i, 126), (pdram_timing->tckfspx << 24) |
+ (pdram_timing->tckfspe << 16) |
+ pdram_timing->tfc_long);
+ mmio_clrsetbits_32(CTL_REG(i, 127), 0xffff,
+ pdram_timing->tvref_long);
+ mmio_clrsetbits_32(CTL_REG(i, 134), 0xffffu << 16,
+ pdram_timing->mr[0] << 16);
+ mmio_write_32(CTL_REG(i, 135), (pdram_timing->mr[2] << 16) |
+ pdram_timing->mr[1]);
+ mmio_clrsetbits_32(CTL_REG(i, 138), 0xffffu << 16,
+ pdram_timing->mr[3] << 16);
+ mmio_clrsetbits_32(CTL_REG(i, 140), 0xff, pdram_timing->mr11);
+ mmio_clrsetbits_32(CTL_REG(i, 148), 0xffffu << 16,
+ pdram_timing->mr[0] << 16);
+ mmio_write_32(CTL_REG(i, 149), (pdram_timing->mr[2] << 16) |
+ pdram_timing->mr[1]);
+ mmio_clrsetbits_32(CTL_REG(i, 152), 0xffffu << 16,
+ pdram_timing->mr[3] << 16);
+ mmio_clrsetbits_32(CTL_REG(i, 154), 0xff, pdram_timing->mr11);
+ if (timing_config->dram_type == LPDDR4) {
+ mmio_clrsetbits_32(CTL_REG(i, 141), 0xffff,
+ pdram_timing->mr12);
+ mmio_clrsetbits_32(CTL_REG(i, 143), 0xffff,
+ pdram_timing->mr14);
+ mmio_clrsetbits_32(CTL_REG(i, 146), 0xffff,
+ pdram_timing->mr22);
+ mmio_clrsetbits_32(CTL_REG(i, 155), 0xffff,
+ pdram_timing->mr12);
+ mmio_clrsetbits_32(CTL_REG(i, 157), 0xffff,
+ pdram_timing->mr14);
+ mmio_clrsetbits_32(CTL_REG(i, 160), 0xffff,
+ pdram_timing->mr22);
+ }
+ mmio_write_32(CTL_REG(i, 182),
+ ((pdram_timing->tzqinit / 2) << 16) |
+ pdram_timing->tzqinit);
+ mmio_write_32(CTL_REG(i, 183), (pdram_timing->tzqcal << 16) |
+ pdram_timing->tzqcs);
+ mmio_clrsetbits_32(CTL_REG(i, 184), 0x3f, pdram_timing->tzqlat);
+ mmio_clrsetbits_32(CTL_REG(i, 188), 0xfff,
+ pdram_timing->tzqreset);
+ mmio_clrsetbits_32(CTL_REG(i, 212), 0xff << 16,
+ pdram_timing->todton << 16);
+
+ if (timing_config->odt) {
+ mmio_setbits_32(CTL_REG(i, 213), (1 << 24));
+ if (timing_config->freq < 400)
+ tmp = 4 << 24;
+ else
+ tmp = 8 << 24;
+ } else {
+ mmio_clrbits_32(CTL_REG(i, 213), (1 << 24));
+ tmp = 2 << 24;
+ }
+ mmio_clrsetbits_32(CTL_REG(i, 217), 0x1f << 24, tmp);
+ mmio_clrsetbits_32(CTL_REG(i, 221), 0xf << 24,
+ (pdram_timing->tdqsck_max << 24));
+ mmio_clrsetbits_32(CTL_REG(i, 222), 0x3, pdram_timing->tdqsck);
+ mmio_clrsetbits_32(CTL_REG(i, 291), 0xffff,
+ (get_wrlat_adj(timing_config->dram_type,
+ pdram_timing->cwl) << 8) |
+ get_rdlat_adj(timing_config->dram_type,
+ pdram_timing->cl));
+
+ mmio_clrsetbits_32(CTL_REG(i, 84), 0xffff,
+ (4 * pdram_timing->trefi) & 0xffff);
+
+ mmio_clrsetbits_32(CTL_REG(i, 84), 0xffffu << 16,
+ ((2 * pdram_timing->trefi) & 0xffff) << 16);
+
+ if ((timing_config->dram_type == LPDDR3) ||
+ (timing_config->dram_type == LPDDR4)) {
+ tmp = get_pi_wrlat(pdram_timing, timing_config);
+ tmp1 = get_pi_todtoff_max(pdram_timing, timing_config);
+ tmp = (tmp > tmp1) ? (tmp - tmp1) : 0;
+ } else {
+ tmp = 0;
+ }
+ mmio_clrsetbits_32(CTL_REG(i, 214), 0x3f << 24,
+ (tmp & 0x3f) << 24);
+
+ if ((timing_config->dram_type == LPDDR3) ||
+ (timing_config->dram_type == LPDDR4)) {
+ /* min_rl_preamble = cl + TDQSCK_MIN - 1 */
+ tmp = pdram_timing->cl +
+ get_pi_todtoff_min(pdram_timing, timing_config);
+ tmp--;
+ /* todtoff_max */
+ tmp1 = get_pi_todtoff_max(pdram_timing, timing_config);
+ tmp = (tmp > tmp1) ? (tmp - tmp1) : 0;
+ } else {
+ tmp = pdram_timing->cl - pdram_timing->cwl;
+ }
+ mmio_clrsetbits_32(CTL_REG(i, 215), 0x3f << 16,
+ (tmp & 0x3f) << 16);
+
+ mmio_clrsetbits_32(CTL_REG(i, 275), 0xffu << 24,
+ (get_pi_tdfi_phy_rdlat(pdram_timing,
+ timing_config) &
+ 0xff) << 24);
+
+ mmio_clrsetbits_32(CTL_REG(i, 284), 0xffffu << 16,
+ ((2 * pdram_timing->trefi) & 0xffff) << 16);
+
+ mmio_clrsetbits_32(CTL_REG(i, 289), 0xffff,
+ (2 * pdram_timing->trefi) & 0xffff);
+
+ mmio_write_32(CTL_REG(i, 290), 20 * pdram_timing->trefi);
+
+ /* CTL_309 TDFI_CALVL_CAPTURE_F1:RW:16:10 */
+ tmp1 = 20000 / (1000000 / pdram_timing->mhz) + 1;
+ if ((20000 % (1000000 / pdram_timing->mhz)) != 0)
+ tmp1++;
+ tmp = (tmp1 >> 1) + (tmp1 % 2) + 5;
+ mmio_clrsetbits_32(CTL_REG(i, 309), 0x3ff << 16, tmp << 16);
+
+ /* CTL_309 TDFI_CALVL_CC_F1:RW:0:10 */
+ tmp = tmp + 18;
+ mmio_clrsetbits_32(CTL_REG(i, 309), 0x3ff, tmp);
+
+ /* CTL_314 TDFI_WRCSLAT_F1:RW:24:8 */
+ tmp1 = get_pi_wrlat_adj(pdram_timing, timing_config);
+ if (timing_config->freq <= TDFI_LAT_THRESHOLD_FREQ) {
+ if (tmp1 == 0)
+ tmp = 0;
+ else if (tmp1 < 5)
+ tmp = tmp1 - 1;
+ else
+ tmp = tmp1 - 5;
+ } else {
+ tmp = tmp1 - 2;
+ }
+
+ mmio_clrsetbits_32(CTL_REG(i, 314), 0xffu << 24, tmp << 24);
+
+ /* CTL_314 TDFI_RDCSLAT_F1:RW:16:8 */
+ if ((timing_config->freq <= TDFI_LAT_THRESHOLD_FREQ) &&
+ (pdram_timing->cl >= 5))
+ tmp = pdram_timing->cl - 5;
+ else
+ tmp = pdram_timing->cl - 2;
+ mmio_clrsetbits_32(CTL_REG(i, 314), 0xff << 16, tmp << 16);
+ }
+}
+
+static void gen_rk3399_enable_training(uint32_t ch_cnt, uint32_t nmhz)
+{
+ uint32_t i, tmp;
+
+ if (nmhz <= PHY_DLL_BYPASS_FREQ)
+ tmp = 0;
+ else
+ tmp = 1;
+
+ for (i = 0; i < ch_cnt; i++) {
+ mmio_clrsetbits_32(CTL_REG(i, 305), 1 << 16, tmp << 16);
+ mmio_clrsetbits_32(CTL_REG(i, 71), 1, tmp);
+ mmio_clrsetbits_32(CTL_REG(i, 70), 1 << 8, 1 << 8);
+ }
+}
+
+static void gen_rk3399_disable_training(uint32_t ch_cnt)
+{
+ uint32_t i;
+
+ for (i = 0; i < ch_cnt; i++) {
+ mmio_clrbits_32(CTL_REG(i, 305), 1 << 16);
+ mmio_clrbits_32(CTL_REG(i, 71), 1);
+ mmio_clrbits_32(CTL_REG(i, 70), 1 << 8);
+ }
+}
+
+static void gen_rk3399_ctl_params(struct timing_related_config *timing_config,
+ struct dram_timing_t *pdram_timing,
+ uint32_t fn)
+{
+ if (fn == 0)
+ gen_rk3399_ctl_params_f0(timing_config, pdram_timing);
+ else
+ gen_rk3399_ctl_params_f1(timing_config, pdram_timing);
+}
+
+static void gen_rk3399_pi_params_f0(struct timing_related_config *timing_config,
+ struct dram_timing_t *pdram_timing)
+{
+ uint32_t tmp, tmp1, tmp2;
+ uint32_t i;
+
+ for (i = 0; i < timing_config->ch_cnt; i++) {
+ /* PI_02 PI_TDFI_PHYMSTR_MAX_F0:RW:0:32 */
+ tmp = 4 * pdram_timing->trefi;
+ mmio_write_32(PI_REG(i, 2), tmp);
+ /* PI_03 PI_TDFI_PHYMSTR_RESP_F0:RW:0:16 */
+ tmp = 2 * pdram_timing->trefi;
+ mmio_clrsetbits_32(PI_REG(i, 3), 0xffff, tmp);
+ /* PI_07 PI_TDFI_PHYUPD_RESP_F0:RW:16:16 */
+ mmio_clrsetbits_32(PI_REG(i, 7), 0xffffu << 16, tmp << 16);
+
+ /* PI_42 PI_TDELAY_RDWR_2_BUS_IDLE_F0:RW:0:8 */
+ if (timing_config->dram_type == LPDDR4)
+ tmp = 2;
+ else
+ tmp = 0;
+ tmp = (pdram_timing->bl / 2) + 4 +
+ (get_pi_rdlat_adj(pdram_timing) - 2) + tmp +
+ get_pi_tdfi_phy_rdlat(pdram_timing, timing_config);
+ mmio_clrsetbits_32(PI_REG(i, 42), 0xff, tmp);
+ /* PI_43 PI_WRLAT_F0:RW:0:5 */
+ if (timing_config->dram_type == LPDDR3) {
+ tmp = get_pi_wrlat(pdram_timing, timing_config);
+ mmio_clrsetbits_32(PI_REG(i, 43), 0x1f, tmp);
+ }
+ /* PI_43 PI_ADDITIVE_LAT_F0:RW:8:6 */
+ mmio_clrsetbits_32(PI_REG(i, 43), 0x3f << 8,
+ PI_ADD_LATENCY << 8);
+
+ /* PI_43 PI_CASLAT_LIN_F0:RW:16:7 */
+ mmio_clrsetbits_32(PI_REG(i, 43), 0x7f << 16,
+ (pdram_timing->cl * 2) << 16);
+ /* PI_46 PI_TREF_F0:RW:16:16 */
+ mmio_clrsetbits_32(PI_REG(i, 46), 0xffffu << 16,
+ pdram_timing->trefi << 16);
+ /* PI_46 PI_TRFC_F0:RW:0:10 */
+ mmio_clrsetbits_32(PI_REG(i, 46), 0x3ff, pdram_timing->trfc);
+ /* PI_66 PI_TODTL_2CMD_F0:RW:24:8 */
+ if (timing_config->dram_type == LPDDR3) {
+ tmp = get_pi_todtoff_max(pdram_timing, timing_config);
+ mmio_clrsetbits_32(PI_REG(i, 66), 0xffu << 24,
+ tmp << 24);
+ }
+ /* PI_72 PI_WR_TO_ODTH_F0:RW:16:6 */
+ if ((timing_config->dram_type == LPDDR3) ||
+ (timing_config->dram_type == LPDDR4)) {
+ tmp1 = get_pi_wrlat(pdram_timing, timing_config);
+ tmp2 = get_pi_todtoff_max(pdram_timing, timing_config);
+ if (tmp1 > tmp2)
+ tmp = tmp1 - tmp2;
+ else
+ tmp = 0;
+ } else if (timing_config->dram_type == DDR3) {
+ tmp = 0;
+ }
+ mmio_clrsetbits_32(PI_REG(i, 72), 0x3f << 16, tmp << 16);
+ /* PI_73 PI_RD_TO_ODTH_F0:RW:8:6 */
+ if ((timing_config->dram_type == LPDDR3) ||
+ (timing_config->dram_type == LPDDR4)) {
+ /* min_rl_preamble = cl + TDQSCK_MIN - 1 */
+ tmp1 = pdram_timing->cl;
+ tmp1 += get_pi_todtoff_min(pdram_timing, timing_config);
+ tmp1--;
+ /* todtoff_max */
+ tmp2 = get_pi_todtoff_max(pdram_timing, timing_config);
+ if (tmp1 > tmp2)
+ tmp = tmp1 - tmp2;
+ else
+ tmp = 0;
+ } else if (timing_config->dram_type == DDR3) {
+ tmp = pdram_timing->cl - pdram_timing->cwl;
+ }
+ mmio_clrsetbits_32(PI_REG(i, 73), 0x3f << 8, tmp << 8);
+ /* PI_89 PI_RDLAT_ADJ_F0:RW:16:8 */
+ tmp = get_pi_rdlat_adj(pdram_timing);
+ mmio_clrsetbits_32(PI_REG(i, 89), 0xff << 16, tmp << 16);
+ /* PI_90 PI_WRLAT_ADJ_F0:RW:16:8 */
+ tmp = get_pi_wrlat_adj(pdram_timing, timing_config);
+ mmio_clrsetbits_32(PI_REG(i, 90), 0xff << 16, tmp << 16);
+ /* PI_91 PI_TDFI_WRCSLAT_F0:RW:16:8 */
+ tmp1 = tmp;
+ if (tmp1 == 0)
+ tmp = 0;
+ else if (tmp1 < 5)
+ tmp = tmp1 - 1;
+ else
+ tmp = tmp1 - 5;
+ mmio_clrsetbits_32(PI_REG(i, 91), 0xff << 16, tmp << 16);
+ /* PI_95 PI_TDFI_CALVL_CAPTURE_F0:RW:16:10 */
+ tmp1 = 20000 / (1000000 / pdram_timing->mhz) + 1;
+ if ((20000 % (1000000 / pdram_timing->mhz)) != 0)
+ tmp1++;
+ tmp = (tmp1 >> 1) + (tmp1 % 2) + 5;
+ mmio_clrsetbits_32(PI_REG(i, 95), 0x3ff << 16, tmp << 16);
+ /* PI_95 PI_TDFI_CALVL_CC_F0:RW:0:10 */
+ mmio_clrsetbits_32(PI_REG(i, 95), 0x3ff, tmp + 18);
+ /* PI_102 PI_TMRZ_F0:RW:8:5 */
+ mmio_clrsetbits_32(PI_REG(i, 102), 0x1f << 8,
+ pdram_timing->tmrz << 8);
+ /* PI_111 PI_TDFI_CALVL_STROBE_F0:RW:8:4 */
+ tmp1 = 2 * 1000 / (1000000 / pdram_timing->mhz);
+ if ((2 * 1000 % (1000000 / pdram_timing->mhz)) != 0)
+ tmp1++;
+ /* pi_tdfi_calvl_strobe=tds_train+5 */
+ tmp = tmp1 + 5;
+ mmio_clrsetbits_32(PI_REG(i, 111), 0xf << 8, tmp << 8);
+ /* PI_116 PI_TCKEHDQS_F0:RW:16:6 */
+ tmp = 10000 / (1000000 / pdram_timing->mhz);
+ if ((10000 % (1000000 / pdram_timing->mhz)) != 0)
+ tmp++;
+ if (pdram_timing->mhz <= 100)
+ tmp = tmp + 1;
+ else
+ tmp = tmp + 8;
+ mmio_clrsetbits_32(PI_REG(i, 116), 0x3f << 16, tmp << 16);
+ /* PI_125 PI_MR1_DATA_F0_0:RW+:8:16 */
+ mmio_clrsetbits_32(PI_REG(i, 125), 0xffff << 8,
+ pdram_timing->mr[1] << 8);
+ /* PI_133 PI_MR1_DATA_F0_1:RW+:0:16 */
+ mmio_clrsetbits_32(PI_REG(i, 133), 0xffff, pdram_timing->mr[1]);
+ /* PI_140 PI_MR1_DATA_F0_2:RW+:16:16 */
+ mmio_clrsetbits_32(PI_REG(i, 140), 0xffffu << 16,
+ pdram_timing->mr[1] << 16);
+ /* PI_148 PI_MR1_DATA_F0_3:RW+:0:16 */
+ mmio_clrsetbits_32(PI_REG(i, 148), 0xffff, pdram_timing->mr[1]);
+ /* PI_126 PI_MR2_DATA_F0_0:RW+:0:16 */
+ mmio_clrsetbits_32(PI_REG(i, 126), 0xffff, pdram_timing->mr[2]);
+ /* PI_133 PI_MR2_DATA_F0_1:RW+:16:16 */
+ mmio_clrsetbits_32(PI_REG(i, 133), 0xffffu << 16,
+ pdram_timing->mr[2] << 16);
+ /* PI_141 PI_MR2_DATA_F0_2:RW+:0:16 */
+ mmio_clrsetbits_32(PI_REG(i, 141), 0xffff, pdram_timing->mr[2]);
+ /* PI_148 PI_MR2_DATA_F0_3:RW+:16:16 */
+ mmio_clrsetbits_32(PI_REG(i, 148), 0xffffu << 16,
+ pdram_timing->mr[2] << 16);
+ /* PI_156 PI_TFC_F0:RW:0:10 */
+ mmio_clrsetbits_32(PI_REG(i, 156), 0x3ff,
+ pdram_timing->tfc_long);
+ /* PI_158 PI_TWR_F0:RW:24:6 */
+ mmio_clrsetbits_32(PI_REG(i, 158), 0x3f << 24,
+ pdram_timing->twr << 24);
+ /* PI_158 PI_TWTR_F0:RW:16:6 */
+ mmio_clrsetbits_32(PI_REG(i, 158), 0x3f << 16,
+ pdram_timing->twtr << 16);
+ /* PI_158 PI_TRCD_F0:RW:8:8 */
+ mmio_clrsetbits_32(PI_REG(i, 158), 0xff << 8,
+ pdram_timing->trcd << 8);
+ /* PI_158 PI_TRP_F0:RW:0:8 */
+ mmio_clrsetbits_32(PI_REG(i, 158), 0xff, pdram_timing->trp);
+ /* PI_157 PI_TRTP_F0:RW:24:8 */
+ mmio_clrsetbits_32(PI_REG(i, 157), 0xffu << 24,
+ pdram_timing->trtp << 24);
+ /* PI_159 PI_TRAS_MIN_F0:RW:24:8 */
+ mmio_clrsetbits_32(PI_REG(i, 159), 0xffu << 24,
+ pdram_timing->tras_min << 24);
+ /* PI_159 PI_TRAS_MAX_F0:RW:0:17 */
+ tmp = pdram_timing->tras_max * 99 / 100;
+ mmio_clrsetbits_32(PI_REG(i, 159), 0x1ffff, tmp);
+ /* PI_160 PI_TMRD_F0:RW:16:6 */
+ mmio_clrsetbits_32(PI_REG(i, 160), 0x3f << 16,
+ pdram_timing->tmrd << 16);
+ /*PI_160 PI_TDQSCK_MAX_F0:RW:0:4 */
+ mmio_clrsetbits_32(PI_REG(i, 160), 0xf,
+ pdram_timing->tdqsck_max);
+ /* PI_187 PI_TDFI_CTRLUPD_MAX_F0:RW:8:16 */
+ mmio_clrsetbits_32(PI_REG(i, 187), 0xffff << 8,
+ (2 * pdram_timing->trefi) << 8);
+ /* PI_188 PI_TDFI_CTRLUPD_INTERVAL_F0:RW:0:32 */
+ mmio_clrsetbits_32(PI_REG(i, 188), 0xffffffff,
+ 20 * pdram_timing->trefi);
+ }
+}
+
+static void gen_rk3399_pi_params_f1(struct timing_related_config *timing_config,
+ struct dram_timing_t *pdram_timing)
+{
+ uint32_t tmp, tmp1, tmp2;
+ uint32_t i;
+
+ for (i = 0; i < timing_config->ch_cnt; i++) {
+ /* PI_04 PI_TDFI_PHYMSTR_MAX_F1:RW:0:32 */
+ tmp = 4 * pdram_timing->trefi;
+ mmio_write_32(PI_REG(i, 4), tmp);
+ /* PI_05 PI_TDFI_PHYMSTR_RESP_F1:RW:0:16 */
+ tmp = 2 * pdram_timing->trefi;
+ mmio_clrsetbits_32(PI_REG(i, 5), 0xffff, tmp);
+ /* PI_12 PI_TDFI_PHYUPD_RESP_F1:RW:0:16 */
+ mmio_clrsetbits_32(PI_REG(i, 12), 0xffff, tmp);
+
+ /* PI_42 PI_TDELAY_RDWR_2_BUS_IDLE_F1:RW:8:8 */
+ if (timing_config->dram_type == LPDDR4)
+ tmp = 2;
+ else
+ tmp = 0;
+ tmp = (pdram_timing->bl / 2) + 4 +
+ (get_pi_rdlat_adj(pdram_timing) - 2) + tmp +
+ get_pi_tdfi_phy_rdlat(pdram_timing, timing_config);
+ mmio_clrsetbits_32(PI_REG(i, 42), 0xff << 8, tmp << 8);
+ /* PI_43 PI_WRLAT_F1:RW:24:5 */
+ if (timing_config->dram_type == LPDDR3) {
+ tmp = get_pi_wrlat(pdram_timing, timing_config);
+ mmio_clrsetbits_32(PI_REG(i, 43), 0x1f << 24,
+ tmp << 24);
+ }
+ /* PI_44 PI_ADDITIVE_LAT_F1:RW:0:6 */
+ mmio_clrsetbits_32(PI_REG(i, 44), 0x3f, PI_ADD_LATENCY);
+ /* PI_44 PI_CASLAT_LIN_F1:RW:8:7:=0x18 */
+ mmio_clrsetbits_32(PI_REG(i, 44), 0x7f << 8,
+ (pdram_timing->cl * 2) << 8);
+ /* PI_47 PI_TREF_F1:RW:16:16 */
+ mmio_clrsetbits_32(PI_REG(i, 47), 0xffffu << 16,
+ pdram_timing->trefi << 16);
+ /* PI_47 PI_TRFC_F1:RW:0:10 */
+ mmio_clrsetbits_32(PI_REG(i, 47), 0x3ff, pdram_timing->trfc);
+ /* PI_67 PI_TODTL_2CMD_F1:RW:8:8 */
+ if (timing_config->dram_type == LPDDR3) {
+ tmp = get_pi_todtoff_max(pdram_timing, timing_config);
+ mmio_clrsetbits_32(PI_REG(i, 67), 0xff << 8, tmp << 8);
+ }
+ /* PI_72 PI_WR_TO_ODTH_F1:RW:24:6 */
+ if ((timing_config->dram_type == LPDDR3) ||
+ (timing_config->dram_type == LPDDR4)) {
+ tmp1 = get_pi_wrlat(pdram_timing, timing_config);
+ tmp2 = get_pi_todtoff_max(pdram_timing, timing_config);
+ if (tmp1 > tmp2)
+ tmp = tmp1 - tmp2;
+ else
+ tmp = 0;
+ } else if (timing_config->dram_type == DDR3) {
+ tmp = 0;
+ }
+ mmio_clrsetbits_32(PI_REG(i, 72), 0x3f << 24, tmp << 24);
+ /* PI_73 PI_RD_TO_ODTH_F1:RW:16:6 */
+ if ((timing_config->dram_type == LPDDR3) ||
+ (timing_config->dram_type == LPDDR4)) {
+ /* min_rl_preamble = cl + TDQSCK_MIN - 1 */
+ tmp1 = pdram_timing->cl +
+ get_pi_todtoff_min(pdram_timing, timing_config);
+ tmp1--;
+ /* todtoff_max */
+ tmp2 = get_pi_todtoff_max(pdram_timing, timing_config);
+ if (tmp1 > tmp2)
+ tmp = tmp1 - tmp2;
+ else
+ tmp = 0;
+ } else if (timing_config->dram_type == DDR3)
+ tmp = pdram_timing->cl - pdram_timing->cwl;
+
+ mmio_clrsetbits_32(PI_REG(i, 73), 0x3f << 16, tmp << 16);
+ /*P I_89 PI_RDLAT_ADJ_F1:RW:24:8 */
+ tmp = get_pi_rdlat_adj(pdram_timing);
+ mmio_clrsetbits_32(PI_REG(i, 89), 0xffu << 24, tmp << 24);
+ /* PI_90 PI_WRLAT_ADJ_F1:RW:24:8 */
+ tmp = get_pi_wrlat_adj(pdram_timing, timing_config);
+ mmio_clrsetbits_32(PI_REG(i, 90), 0xffu << 24, tmp << 24);
+ /* PI_91 PI_TDFI_WRCSLAT_F1:RW:24:8 */
+ tmp1 = tmp;
+ if (tmp1 == 0)
+ tmp = 0;
+ else if (tmp1 < 5)
+ tmp = tmp1 - 1;
+ else
+ tmp = tmp1 - 5;
+ mmio_clrsetbits_32(PI_REG(i, 91), 0xffu << 24, tmp << 24);
+ /*PI_96 PI_TDFI_CALVL_CAPTURE_F1:RW:16:10 */
+ /* tadr=20ns */
+ tmp1 = 20000 / (1000000 / pdram_timing->mhz) + 1;
+ if ((20000 % (1000000 / pdram_timing->mhz)) != 0)
+ tmp1++;
+ tmp = (tmp1 >> 1) + (tmp1 % 2) + 5;
+ mmio_clrsetbits_32(PI_REG(i, 96), 0x3ff << 16, tmp << 16);
+ /* PI_96 PI_TDFI_CALVL_CC_F1:RW:0:10 */
+ tmp = tmp + 18;
+ mmio_clrsetbits_32(PI_REG(i, 96), 0x3ff, tmp);
+ /*PI_103 PI_TMRZ_F1:RW:0:5 */
+ mmio_clrsetbits_32(PI_REG(i, 103), 0x1f, pdram_timing->tmrz);
+ /*PI_111 PI_TDFI_CALVL_STROBE_F1:RW:16:4 */
+ /* tds_train=ceil(2/ns) */
+ tmp1 = 2 * 1000 / (1000000 / pdram_timing->mhz);
+ if ((2 * 1000 % (1000000 / pdram_timing->mhz)) != 0)
+ tmp1++;
+ /* pi_tdfi_calvl_strobe=tds_train+5 */
+ tmp = tmp1 + 5;
+ mmio_clrsetbits_32(PI_REG(i, 111), 0xf << 16,
+ tmp << 16);
+ /* PI_116 PI_TCKEHDQS_F1:RW:24:6 */
+ tmp = 10000 / (1000000 / pdram_timing->mhz);
+ if ((10000 % (1000000 / pdram_timing->mhz)) != 0)
+ tmp++;
+ if (pdram_timing->mhz <= 100)
+ tmp = tmp + 1;
+ else
+ tmp = tmp + 8;
+ mmio_clrsetbits_32(PI_REG(i, 116), 0x3f << 24,
+ tmp << 24);
+ /* PI_128 PI_MR1_DATA_F1_0:RW+:0:16 */
+ mmio_clrsetbits_32(PI_REG(i, 128), 0xffff, pdram_timing->mr[1]);
+ /* PI_135 PI_MR1_DATA_F1_1:RW+:8:16 */
+ mmio_clrsetbits_32(PI_REG(i, 135), 0xffff << 8,
+ pdram_timing->mr[1] << 8);
+ /* PI_143 PI_MR1_DATA_F1_2:RW+:0:16 */
+ mmio_clrsetbits_32(PI_REG(i, 143), 0xffff, pdram_timing->mr[1]);
+ /* PI_150 PI_MR1_DATA_F1_3:RW+:8:16 */
+ mmio_clrsetbits_32(PI_REG(i, 150), 0xffff << 8,
+ pdram_timing->mr[1] << 8);
+ /* PI_128 PI_MR2_DATA_F1_0:RW+:16:16 */
+ mmio_clrsetbits_32(PI_REG(i, 128), 0xffffu << 16,
+ pdram_timing->mr[2] << 16);
+ /* PI_136 PI_MR2_DATA_F1_1:RW+:0:16 */
+ mmio_clrsetbits_32(PI_REG(i, 136), 0xffff, pdram_timing->mr[2]);
+ /* PI_143 PI_MR2_DATA_F1_2:RW+:16:16 */
+ mmio_clrsetbits_32(PI_REG(i, 143), 0xffffu << 16,
+ pdram_timing->mr[2] << 16);
+ /* PI_151 PI_MR2_DATA_F1_3:RW+:0:16 */
+ mmio_clrsetbits_32(PI_REG(i, 151), 0xffff, pdram_timing->mr[2]);
+ /* PI_156 PI_TFC_F1:RW:16:10 */
+ mmio_clrsetbits_32(PI_REG(i, 156), 0x3ff << 16,
+ pdram_timing->tfc_long << 16);
+ /* PI_162 PI_TWR_F1:RW:8:6 */
+ mmio_clrsetbits_32(PI_REG(i, 162), 0x3f << 8,
+ pdram_timing->twr << 8);
+ /* PI_162 PI_TWTR_F1:RW:0:6 */
+ mmio_clrsetbits_32(PI_REG(i, 162), 0x3f, pdram_timing->twtr);
+ /* PI_161 PI_TRCD_F1:RW:24:8 */
+ mmio_clrsetbits_32(PI_REG(i, 161), 0xffu << 24,
+ pdram_timing->trcd << 24);
+ /* PI_161 PI_TRP_F1:RW:16:8 */
+ mmio_clrsetbits_32(PI_REG(i, 161), 0xff << 16,
+ pdram_timing->trp << 16);
+ /* PI_161 PI_TRTP_F1:RW:8:8 */
+ mmio_clrsetbits_32(PI_REG(i, 161), 0xff << 8,
+ pdram_timing->trtp << 8);
+ /* PI_163 PI_TRAS_MIN_F1:RW:24:8 */
+ mmio_clrsetbits_32(PI_REG(i, 163), 0xffu << 24,
+ pdram_timing->tras_min << 24);
+ /* PI_163 PI_TRAS_MAX_F1:RW:0:17 */
+ mmio_clrsetbits_32(PI_REG(i, 163), 0x1ffff,
+ pdram_timing->tras_max * 99 / 100);
+ /* PI_164 PI_TMRD_F1:RW:16:6 */
+ mmio_clrsetbits_32(PI_REG(i, 164), 0x3f << 16,
+ pdram_timing->tmrd << 16);
+ /* PI_164 PI_TDQSCK_MAX_F1:RW:0:4 */
+ mmio_clrsetbits_32(PI_REG(i, 164), 0xf,
+ pdram_timing->tdqsck_max);
+ /* PI_189 PI_TDFI_CTRLUPD_MAX_F1:RW:0:16 */
+ mmio_clrsetbits_32(PI_REG(i, 189), 0xffff,
+ 2 * pdram_timing->trefi);
+ /* PI_190 PI_TDFI_CTRLUPD_INTERVAL_F1:RW:0:32 */
+ mmio_clrsetbits_32(PI_REG(i, 190), 0xffffffff,
+ 20 * pdram_timing->trefi);
+ }
+}
+
+static void gen_rk3399_pi_params(struct timing_related_config *timing_config,
+ struct dram_timing_t *pdram_timing,
+ uint32_t fn)
+{
+ if (fn == 0)
+ gen_rk3399_pi_params_f0(timing_config, pdram_timing);
+ else
+ gen_rk3399_pi_params_f1(timing_config, pdram_timing);
+}
+
+static void gen_rk3399_set_odt(uint32_t odt_en)
+{
+ uint32_t drv_odt_val;
+ uint32_t i;
+
+ for (i = 0; i < rk3399_dram_status.timing_config.ch_cnt; i++) {
+ drv_odt_val = (odt_en | (0 << 1) | (0 << 2)) << 16;
+ mmio_clrsetbits_32(PHY_REG(i, 5), 0x7 << 16, drv_odt_val);
+ mmio_clrsetbits_32(PHY_REG(i, 133), 0x7 << 16, drv_odt_val);
+ mmio_clrsetbits_32(PHY_REG(i, 261), 0x7 << 16, drv_odt_val);
+ mmio_clrsetbits_32(PHY_REG(i, 389), 0x7 << 16, drv_odt_val);
+ drv_odt_val = (odt_en | (0 << 1) | (0 << 2)) << 24;
+ mmio_clrsetbits_32(PHY_REG(i, 6), 0x7 << 24, drv_odt_val);
+ mmio_clrsetbits_32(PHY_REG(i, 134), 0x7 << 24, drv_odt_val);
+ mmio_clrsetbits_32(PHY_REG(i, 262), 0x7 << 24, drv_odt_val);
+ mmio_clrsetbits_32(PHY_REG(i, 390), 0x7 << 24, drv_odt_val);
+ }
+}
+
+static void gen_rk3399_phy_dll_bypass(uint32_t mhz, uint32_t ch,
+ uint32_t index, uint32_t dram_type)
+{
+ uint32_t sw_master_mode = 0;
+ uint32_t rddqs_gate_delay, rddqs_latency, total_delay;
+ uint32_t i;
+
+ if (dram_type == DDR3)
+ total_delay = PI_PAD_DELAY_PS_VALUE;
+ else if (dram_type == LPDDR3)
+ total_delay = PI_PAD_DELAY_PS_VALUE + 2500;
+ else
+ total_delay = PI_PAD_DELAY_PS_VALUE + 1500;
+ /* total_delay + 0.55tck */
+ total_delay += (55 * 10000)/mhz;
+ rddqs_latency = total_delay * mhz / 1000000;
+ total_delay -= rddqs_latency * 1000000 / mhz;
+ rddqs_gate_delay = total_delay * 0x200 * mhz / 1000000;
+ if (mhz <= PHY_DLL_BYPASS_FREQ) {
+ sw_master_mode = 0xc;
+ mmio_setbits_32(PHY_REG(ch, 514), 1);
+ mmio_setbits_32(PHY_REG(ch, 642), 1);
+ mmio_setbits_32(PHY_REG(ch, 770), 1);
+
+ /* setting bypass mode slave delay */
+ for (i = 0; i < 4; i++) {
+ /* wr dq delay = -180deg + (0x60 / 4) * 20ps */
+ mmio_clrsetbits_32(PHY_REG(ch, 1 + 128 * i), 0x7ff << 8,
+ 0x4a0 << 8);
+ /* rd dqs/dq delay = (0x60 / 4) * 20ps */
+ mmio_clrsetbits_32(PHY_REG(ch, 11 + 128 * i), 0x3ff,
+ 0xa0);
+ /* rd rddqs_gate delay */
+ mmio_clrsetbits_32(PHY_REG(ch, 2 + 128 * i), 0x3ff,
+ rddqs_gate_delay);
+ mmio_clrsetbits_32(PHY_REG(ch, 78 + 128 * i), 0xf,
+ rddqs_latency);
+ }
+ for (i = 0; i < 3; i++)
+ /* adr delay */
+ mmio_clrsetbits_32(PHY_REG(ch, 513 + 128 * i),
+ 0x7ff << 16, 0x80 << 16);
+
+ if ((mmio_read_32(PHY_REG(ch, 86)) & 0xc00) == 0) {
+ /*
+ * old status is normal mode,
+ * and saving the wrdqs slave delay
+ */
+ for (i = 0; i < 4; i++) {
+ /* save and clear wr dqs slave delay */
+ wrdqs_delay_val[ch][index][i] = 0x3ff &
+ (mmio_read_32(PHY_REG(ch, 63 + i * 128))
+ >> 16);
+ mmio_clrsetbits_32(PHY_REG(ch, 63 + i * 128),
+ 0x03ff << 16, 0 << 16);
+ /*
+ * in normal mode the cmd may delay 1cycle by
+ * wrlvl and in bypass mode making dqs also
+ * delay 1cycle.
+ */
+ mmio_clrsetbits_32(PHY_REG(ch, 78 + i * 128),
+ 0x07 << 8, 0x1 << 8);
+ }
+ }
+ } else if (mmio_read_32(PHY_REG(ch, 86)) & 0xc00) {
+ /* old status is bypass mode and restore wrlvl resume */
+ for (i = 0; i < 4; i++) {
+ mmio_clrsetbits_32(PHY_REG(ch, 63 + i * 128),
+ 0x03ff << 16,
+ (wrdqs_delay_val[ch][index][i] &
+ 0x3ff) << 16);
+ /* resume phy_write_path_lat_add */
+ mmio_clrbits_32(PHY_REG(ch, 78 + i * 128), 0x07 << 8);
+ }
+ }
+
+ /* phy_sw_master_mode_X PHY_86/214/342/470 4bits offset_8 */
+ mmio_clrsetbits_32(PHY_REG(ch, 86), 0xf << 8, sw_master_mode << 8);
+ mmio_clrsetbits_32(PHY_REG(ch, 214), 0xf << 8, sw_master_mode << 8);
+ mmio_clrsetbits_32(PHY_REG(ch, 342), 0xf << 8, sw_master_mode << 8);
+ mmio_clrsetbits_32(PHY_REG(ch, 470), 0xf << 8, sw_master_mode << 8);
+
+ /* phy_adrctl_sw_master_mode PHY_547/675/803 4bits offset_16 */
+ mmio_clrsetbits_32(PHY_REG(ch, 547), 0xf << 16, sw_master_mode << 16);
+ mmio_clrsetbits_32(PHY_REG(ch, 675), 0xf << 16, sw_master_mode << 16);
+ mmio_clrsetbits_32(PHY_REG(ch, 803), 0xf << 16, sw_master_mode << 16);
+}
+
+static void gen_rk3399_phy_params(struct timing_related_config *timing_config,
+ struct drv_odt_lp_config *drv_config,
+ struct dram_timing_t *pdram_timing,
+ uint32_t fn)
+{
+ uint32_t tmp, i, div, j;
+ uint32_t mem_delay_ps, pad_delay_ps, total_delay_ps, delay_frac_ps;
+ uint32_t trpre_min_ps, gate_delay_ps, gate_delay_frac_ps;
+ uint32_t ie_enable, tsel_enable, cas_lat, rddata_en_ie_dly, tsel_adder;
+ uint32_t extra_adder, delta, hs_offset;
+
+ for (i = 0; i < timing_config->ch_cnt; i++) {
+
+ pad_delay_ps = PI_PAD_DELAY_PS_VALUE;
+ ie_enable = PI_IE_ENABLE_VALUE;
+ tsel_enable = PI_TSEL_ENABLE_VALUE;
+
+ mmio_clrsetbits_32(PHY_REG(i, 896), (0x3 << 8) | 1, fn << 8);
+
+ /* PHY_LOW_FREQ_SEL */
+ /* DENALI_PHY_913 1bit offset_0 */
+ if (timing_config->freq > 400)
+ mmio_clrbits_32(PHY_REG(i, 913), 1);
+ else
+ mmio_setbits_32(PHY_REG(i, 913), 1);
+
+ /* PHY_RPTR_UPDATE_x */
+ /* DENALI_PHY_87/215/343/471 4bit offset_16 */
+ tmp = 2500 / (1000000 / pdram_timing->mhz) + 3;
+ if ((2500 % (1000000 / pdram_timing->mhz)) != 0)
+ tmp++;
+ mmio_clrsetbits_32(PHY_REG(i, 87), 0xf << 16, tmp << 16);
+ mmio_clrsetbits_32(PHY_REG(i, 215), 0xf << 16, tmp << 16);
+ mmio_clrsetbits_32(PHY_REG(i, 343), 0xf << 16, tmp << 16);
+ mmio_clrsetbits_32(PHY_REG(i, 471), 0xf << 16, tmp << 16);
+
+ /* PHY_PLL_CTRL */
+ /* DENALI_PHY_911 13bits offset_0 */
+ /* PHY_LP4_BOOT_PLL_CTRL */
+ /* DENALI_PHY_919 13bits offset_0 */
+ tmp = (1 << 12) | (2 << 7) | (1 << 1);
+ mmio_clrsetbits_32(PHY_REG(i, 911), 0x1fff, tmp);
+ mmio_clrsetbits_32(PHY_REG(i, 919), 0x1fff, tmp);
+
+ /* PHY_PLL_CTRL_CA */
+ /* DENALI_PHY_911 13bits offset_16 */
+ /* PHY_LP4_BOOT_PLL_CTRL_CA */
+ /* DENALI_PHY_919 13bits offset_16 */
+ tmp = (2 << 7) | (1 << 5) | (1 << 1);
+ mmio_clrsetbits_32(PHY_REG(i, 911), 0x1fff << 16, tmp << 16);
+ mmio_clrsetbits_32(PHY_REG(i, 919), 0x1fff << 16, tmp << 16);
+
+ /* PHY_TCKSRE_WAIT */
+ /* DENALI_PHY_922 4bits offset_24 */
+ if (pdram_timing->mhz <= 400)
+ tmp = 1;
+ else if (pdram_timing->mhz <= 800)
+ tmp = 3;
+ else if (pdram_timing->mhz <= 1000)
+ tmp = 4;
+ else
+ tmp = 5;
+ mmio_clrsetbits_32(PHY_REG(i, 922), 0xf << 24, tmp << 24);
+ /* PHY_CAL_CLK_SELECT_0:RW8:3 */
+ div = pdram_timing->mhz / (2 * 20);
+ for (j = 2, tmp = 1; j <= 128; j <<= 1, tmp++) {
+ if (div < j)
+ break;
+ }
+ mmio_clrsetbits_32(PHY_REG(i, 947), 0x7 << 8, tmp << 8);
+
+ if (timing_config->dram_type == DDR3) {
+ mem_delay_ps = 0;
+ trpre_min_ps = 1000;
+ } else if (timing_config->dram_type == LPDDR4) {
+ mem_delay_ps = 1500;
+ trpre_min_ps = 900;
+ } else if (timing_config->dram_type == LPDDR3) {
+ mem_delay_ps = 2500;
+ trpre_min_ps = 900;
+ } else {
+ ERROR("gen_rk3399_phy_params:dramtype unsupport\n");
+ return;
+ }
+ total_delay_ps = mem_delay_ps + pad_delay_ps;
+ delay_frac_ps = 1000 * total_delay_ps /
+ (1000000 / pdram_timing->mhz);
+ gate_delay_ps = delay_frac_ps + 1000 - (trpre_min_ps / 2);
+ gate_delay_frac_ps = gate_delay_ps % 1000;
+ tmp = gate_delay_frac_ps * 0x200 / 1000;
+ /* PHY_RDDQS_GATE_SLAVE_DELAY */
+ /* DENALI_PHY_77/205/333/461 10bits offset_16 */
+ mmio_clrsetbits_32(PHY_REG(i, 77), 0x2ff << 16, tmp << 16);
+ mmio_clrsetbits_32(PHY_REG(i, 205), 0x2ff << 16, tmp << 16);
+ mmio_clrsetbits_32(PHY_REG(i, 333), 0x2ff << 16, tmp << 16);
+ mmio_clrsetbits_32(PHY_REG(i, 461), 0x2ff << 16, tmp << 16);
+
+ tmp = gate_delay_ps / 1000;
+ /* PHY_LP4_BOOT_RDDQS_LATENCY_ADJUST */
+ /* DENALI_PHY_10/138/266/394 4bit offset_0 */
+ mmio_clrsetbits_32(PHY_REG(i, 10), 0xf, tmp);
+ mmio_clrsetbits_32(PHY_REG(i, 138), 0xf, tmp);
+ mmio_clrsetbits_32(PHY_REG(i, 266), 0xf, tmp);
+ mmio_clrsetbits_32(PHY_REG(i, 394), 0xf, tmp);
+ /* PHY_GTLVL_LAT_ADJ_START */
+ /* DENALI_PHY_80/208/336/464 4bits offset_16 */
+ tmp = rddqs_delay_ps / (1000000 / pdram_timing->mhz) + 2;
+ mmio_clrsetbits_32(PHY_REG(i, 80), 0xf << 16, tmp << 16);
+ mmio_clrsetbits_32(PHY_REG(i, 208), 0xf << 16, tmp << 16);
+ mmio_clrsetbits_32(PHY_REG(i, 336), 0xf << 16, tmp << 16);
+ mmio_clrsetbits_32(PHY_REG(i, 464), 0xf << 16, tmp << 16);
+
+ cas_lat = pdram_timing->cl + PI_ADD_LATENCY;
+ rddata_en_ie_dly = ie_enable / (1000000 / pdram_timing->mhz);
+ if ((ie_enable % (1000000 / pdram_timing->mhz)) != 0)
+ rddata_en_ie_dly++;
+ rddata_en_ie_dly = rddata_en_ie_dly - 1;
+ tsel_adder = tsel_enable / (1000000 / pdram_timing->mhz);
+ if ((tsel_enable % (1000000 / pdram_timing->mhz)) != 0)
+ tsel_adder++;
+ if (rddata_en_ie_dly > tsel_adder)
+ extra_adder = rddata_en_ie_dly - tsel_adder;
+ else
+ extra_adder = 0;
+ delta = cas_lat - rddata_en_ie_dly;
+ if (PI_REGS_DIMM_SUPPORT && PI_DOUBLEFREEK)
+ hs_offset = 2;
+ else
+ hs_offset = 1;
+ if (rddata_en_ie_dly > (cas_lat - 1 - hs_offset))
+ tmp = 0;
+ else if ((delta == 2) || (delta == 1))
+ tmp = rddata_en_ie_dly - 0 - extra_adder;
+ else
+ tmp = extra_adder;
+ /* PHY_LP4_BOOT_RDDATA_EN_TSEL_DLY */
+ /* DENALI_PHY_9/137/265/393 4bit offset_16 */
+ mmio_clrsetbits_32(PHY_REG(i, 9), 0xf << 16, tmp << 16);
+ mmio_clrsetbits_32(PHY_REG(i, 137), 0xf << 16, tmp << 16);
+ mmio_clrsetbits_32(PHY_REG(i, 265), 0xf << 16, tmp << 16);
+ mmio_clrsetbits_32(PHY_REG(i, 393), 0xf << 16, tmp << 16);
+ /* PHY_RDDATA_EN_TSEL_DLY */
+ /* DENALI_PHY_86/214/342/470 4bit offset_0 */
+ mmio_clrsetbits_32(PHY_REG(i, 86), 0xf, tmp);
+ mmio_clrsetbits_32(PHY_REG(i, 214), 0xf, tmp);
+ mmio_clrsetbits_32(PHY_REG(i, 342), 0xf, tmp);
+ mmio_clrsetbits_32(PHY_REG(i, 470), 0xf, tmp);
+
+ if (tsel_adder > rddata_en_ie_dly)
+ extra_adder = tsel_adder - rddata_en_ie_dly;
+ else
+ extra_adder = 0;
+ if (rddata_en_ie_dly > (cas_lat - 1 - hs_offset))
+ tmp = tsel_adder;
+ else
+ tmp = rddata_en_ie_dly - 0 + extra_adder;
+ /* PHY_LP4_BOOT_RDDATA_EN_DLY */
+ /* DENALI_PHY_9/137/265/393 4bit offset_8 */
+ mmio_clrsetbits_32(PHY_REG(i, 9), 0xf << 8, tmp << 8);
+ mmio_clrsetbits_32(PHY_REG(i, 137), 0xf << 8, tmp << 8);
+ mmio_clrsetbits_32(PHY_REG(i, 265), 0xf << 8, tmp << 8);
+ mmio_clrsetbits_32(PHY_REG(i, 393), 0xf << 8, tmp << 8);
+ /* PHY_RDDATA_EN_DLY */
+ /* DENALI_PHY_85/213/341/469 4bit offset_24 */
+ mmio_clrsetbits_32(PHY_REG(i, 85), 0xf << 24, tmp << 24);
+ mmio_clrsetbits_32(PHY_REG(i, 213), 0xf << 24, tmp << 24);
+ mmio_clrsetbits_32(PHY_REG(i, 341), 0xf << 24, tmp << 24);
+ mmio_clrsetbits_32(PHY_REG(i, 469), 0xf << 24, tmp << 24);
+
+ if (pdram_timing->mhz <= ENPER_CS_TRAINING_FREQ) {
+ /*
+ * Note:Per-CS Training is not compatible at speeds
+ * under 533 MHz. If the PHY is running at a speed
+ * less than 533MHz, all phy_per_cs_training_en_X
+ * parameters must be cleared to 0.
+ */
+
+ /*DENALI_PHY_84/212/340/468 1bit offset_16 */
+ mmio_clrbits_32(PHY_REG(i, 84), 0x1 << 16);
+ mmio_clrbits_32(PHY_REG(i, 212), 0x1 << 16);
+ mmio_clrbits_32(PHY_REG(i, 340), 0x1 << 16);
+ mmio_clrbits_32(PHY_REG(i, 468), 0x1 << 16);
+ } else {
+ mmio_setbits_32(PHY_REG(i, 84), 0x1 << 16);
+ mmio_setbits_32(PHY_REG(i, 212), 0x1 << 16);
+ mmio_setbits_32(PHY_REG(i, 340), 0x1 << 16);
+ mmio_setbits_32(PHY_REG(i, 468), 0x1 << 16);
+ }
+ gen_rk3399_phy_dll_bypass(pdram_timing->mhz, i, fn,
+ timing_config->dram_type);
+ }
+}
+
+static int to_get_clk_index(unsigned int mhz)
+{
+ int pll_cnt, i;
+
+ pll_cnt = ARRAY_SIZE(dpll_rates_table);
+
+ /* Assumming rate_table is in descending order */
+ for (i = 0; i < pll_cnt; i++) {
+ if (mhz >= dpll_rates_table[i].mhz)
+ break;
+ }
+
+ /* if mhz lower than lowest frequency in table, use lowest frequency */
+ if (i == pll_cnt)
+ i = pll_cnt - 1;
+
+ return i;
+}
+
+uint32_t ddr_get_rate(void)
+{
+ uint32_t refdiv, postdiv1, fbdiv, postdiv2;
+
+ refdiv = mmio_read_32(CRU_BASE + CRU_PLL_CON(DPLL_ID, 1)) & 0x3f;
+ fbdiv = mmio_read_32(CRU_BASE + CRU_PLL_CON(DPLL_ID, 0)) & 0xfff;
+ postdiv1 =
+ (mmio_read_32(CRU_BASE + CRU_PLL_CON(DPLL_ID, 1)) >> 8) & 0x7;
+ postdiv2 =
+ (mmio_read_32(CRU_BASE + CRU_PLL_CON(DPLL_ID, 1)) >> 12) & 0x7;
+
+ return (24 / refdiv * fbdiv / postdiv1 / postdiv2) * 1000 * 1000;
+}
+
+/*
+ * return: bit12: channel 1, external self-refresh
+ * bit11: channel 1, stdby_mode
+ * bit10: channel 1, self-refresh with controller and memory clock gate
+ * bit9: channel 1, self-refresh
+ * bit8: channel 1, power-down
+ *
+ * bit4: channel 1, external self-refresh
+ * bit3: channel 0, stdby_mode
+ * bit2: channel 0, self-refresh with controller and memory clock gate
+ * bit1: channel 0, self-refresh
+ * bit0: channel 0, power-down
+ */
+uint32_t exit_low_power(void)
+{
+ uint32_t low_power = 0;
+ uint32_t channel_mask;
+ uint32_t tmp, i;
+
+ channel_mask = (mmio_read_32(PMUGRF_BASE + PMUGRF_OSREG(2)) >> 28) &
+ 0x3;
+ for (i = 0; i < 2; i++) {
+ if (!(channel_mask & (1 << i)))
+ continue;
+
+ /* exit stdby mode */
+ mmio_write_32(CIC_BASE + CIC_CTRL1,
+ (1 << (i + 16)) | (0 << i));
+ /* exit external self-refresh */
+ tmp = i ? 12 : 8;
+ low_power |= ((mmio_read_32(PMU_BASE + PMU_SFT_CON) >> tmp) &
+ 0x1) << (4 + 8 * i);
+ mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, 1 << tmp);
+ while (!(mmio_read_32(PMU_BASE + PMU_DDR_SREF_ST) & (1 << i)))
+ ;
+ /* exit auto low-power */
+ mmio_clrbits_32(CTL_REG(i, 101), 0x7);
+ /* lp_cmd to exit */
+ if (((mmio_read_32(CTL_REG(i, 100)) >> 24) & 0x7f) !=
+ 0x40) {
+ while (mmio_read_32(CTL_REG(i, 200)) & 0x1)
+ ;
+ mmio_clrsetbits_32(CTL_REG(i, 93), 0xffu << 24,
+ 0x69 << 24);
+ while (((mmio_read_32(CTL_REG(i, 100)) >> 24) & 0x7f) !=
+ 0x40)
+ ;
+ }
+ }
+ return low_power;
+}
+
+void resume_low_power(uint32_t low_power)
+{
+ uint32_t channel_mask;
+ uint32_t tmp, i, val;
+
+ channel_mask = (mmio_read_32(PMUGRF_BASE + PMUGRF_OSREG(2)) >> 28) &
+ 0x3;
+ for (i = 0; i < 2; i++) {
+ if (!(channel_mask & (1 << i)))
+ continue;
+
+ /* resume external self-refresh */
+ tmp = i ? 12 : 8;
+ val = (low_power >> (4 + 8 * i)) & 0x1;
+ mmio_setbits_32(PMU_BASE + PMU_SFT_CON, val << tmp);
+ /* resume auto low-power */
+ val = (low_power >> (8 * i)) & 0x7;
+ mmio_setbits_32(CTL_REG(i, 101), val);
+ /* resume stdby mode */
+ val = (low_power >> (3 + 8 * i)) & 0x1;
+ mmio_write_32(CIC_BASE + CIC_CTRL1,
+ (1 << (i + 16)) | (val << i));
+ }
+}
+
+static void dram_low_power_config(void)
+{
+ uint32_t tmp, i;
+ uint32_t ch_cnt = rk3399_dram_status.timing_config.ch_cnt;
+ uint32_t dram_type = rk3399_dram_status.timing_config.dram_type;
+
+ if (dram_type == DDR3)
+ tmp = (2 << 16) | (0x7 << 8);
+ else
+ tmp = (3 << 16) | (0x7 << 8);
+
+ for (i = 0; i < ch_cnt; i++)
+ mmio_clrsetbits_32(CTL_REG(i, 101), 0x70f0f, tmp);
+
+ /* standby idle */
+ mmio_write_32(CIC_BASE + CIC_CG_WAIT_TH, 0x640008);
+
+ if (ch_cnt == 2) {
+ mmio_write_32(GRF_BASE + GRF_DDRC1_CON1,
+ (((0x1<<4) | (0x1<<5) | (0x1<<6) |
+ (0x1<<7)) << 16) |
+ ((0x1<<4) | (0x0<<5) | (0x1<<6) | (0x1<<7)));
+ mmio_write_32(CIC_BASE + CIC_CTRL1, 0x002a0028);
+ }
+
+ mmio_write_32(GRF_BASE + GRF_DDRC0_CON1,
+ (((0x1<<4) | (0x1<<5) | (0x1<<6) | (0x1<<7)) << 16) |
+ ((0x1<<4) | (0x0<<5) | (0x1<<6) | (0x1<<7)));
+ mmio_write_32(CIC_BASE + CIC_CTRL1, 0x00150014);
+}
+
+void dram_dfs_init(void)
+{
+ uint32_t trefi0, trefi1, boot_freq;
+ uint32_t rddqs_adjust, rddqs_slave;
+
+ /* get sdram config for os reg */
+ get_dram_drv_odt_val(sdram_config.dramtype,
+ &rk3399_dram_status.drv_odt_lp_cfg);
+ sdram_timing_cfg_init(&rk3399_dram_status.timing_config,
+ &sdram_config,
+ &rk3399_dram_status.drv_odt_lp_cfg);
+
+ trefi0 = ((mmio_read_32(CTL_REG(0, 48)) >> 16) & 0xffff) + 8;
+ trefi1 = ((mmio_read_32(CTL_REG(0, 49)) >> 16) & 0xffff) + 8;
+
+ rk3399_dram_status.index_freq[0] = trefi0 * 10 / 39;
+ rk3399_dram_status.index_freq[1] = trefi1 * 10 / 39;
+ rk3399_dram_status.current_index =
+ (mmio_read_32(CTL_REG(0, 111)) >> 16) & 0x3;
+ if (rk3399_dram_status.timing_config.dram_type == DDR3) {
+ rk3399_dram_status.index_freq[0] /= 2;
+ rk3399_dram_status.index_freq[1] /= 2;
+ }
+ boot_freq =
+ rk3399_dram_status.index_freq[rk3399_dram_status.current_index];
+ boot_freq = dpll_rates_table[to_get_clk_index(boot_freq)].mhz;
+ rk3399_dram_status.boot_freq = boot_freq;
+ rk3399_dram_status.index_freq[rk3399_dram_status.current_index] =
+ boot_freq;
+ rk3399_dram_status.index_freq[(rk3399_dram_status.current_index + 1) &
+ 0x1] = 0;
+ rk3399_dram_status.low_power_stat = 0;
+ /*
+ * following register decide if NOC stall the access request
+ * or return error when NOC being idled. when doing ddr frequency
+ * scaling in M0 or DCF, we need to make sure noc stall the access
+ * request, if return error cpu may data abort when ddr frequency
+ * changing. it don't need to set this register every times,
+ * so we init this register in function dram_dfs_init().
+ */
+ mmio_write_32(GRF_BASE + GRF_SOC_CON(0), 0xffffffff);
+ mmio_write_32(GRF_BASE + GRF_SOC_CON(1), 0xffffffff);
+ mmio_write_32(GRF_BASE + GRF_SOC_CON(2), 0xffffffff);
+ mmio_write_32(GRF_BASE + GRF_SOC_CON(3), 0xffffffff);
+ mmio_write_32(GRF_BASE + GRF_SOC_CON(4), 0x70007000);
+
+ /* Disable multicast */
+ mmio_clrbits_32(PHY_REG(0, 896), 1);
+ mmio_clrbits_32(PHY_REG(1, 896), 1);
+ dram_low_power_config();
+
+ /*
+ * If boot_freq isn't in the bypass mode, it can get the
+ * rddqs_delay_ps from the result of gate training
+ */
+ if (((mmio_read_32(PHY_REG(0, 86)) >> 8) & 0xf) != 0xc) {
+
+ /*
+ * Select PHY's frequency set to current_index
+ * index for get the result of gate Training
+ * from registers
+ */
+ mmio_clrsetbits_32(PHY_REG(0, 896), 0x3 << 8,
+ rk3399_dram_status.current_index << 8);
+ rddqs_slave = (mmio_read_32(PHY_REG(0, 77)) >> 16) & 0x3ff;
+ rddqs_slave = rddqs_slave * 1000000 / boot_freq / 512;
+
+ rddqs_adjust = mmio_read_32(PHY_REG(0, 78)) & 0xf;
+ rddqs_adjust = rddqs_adjust * 1000000 / boot_freq;
+ rddqs_delay_ps = rddqs_slave + rddqs_adjust -
+ (1000000 / boot_freq / 2);
+ } else {
+ rddqs_delay_ps = 3500;
+ }
+}
+
+/*
+ * arg0: bit0-7: sr_idle; bit8-15:sr_mc_gate_idle; bit16-31: standby idle
+ * arg1: bit0-11: pd_idle; bit 16-27: srpd_lite_idle
+ * arg2: bit0: if odt en
+ */
+uint32_t dram_set_odt_pd(uint32_t arg0, uint32_t arg1, uint32_t arg2)
+{
+ struct drv_odt_lp_config *lp_cfg = &rk3399_dram_status.drv_odt_lp_cfg;
+ uint32_t *low_power = &rk3399_dram_status.low_power_stat;
+ uint32_t dram_type, ch_count, pd_tmp, sr_tmp, i;
+
+ dram_type = rk3399_dram_status.timing_config.dram_type;
+ ch_count = rk3399_dram_status.timing_config.ch_cnt;
+
+ lp_cfg->sr_idle = arg0 & 0xff;
+ lp_cfg->sr_mc_gate_idle = (arg0 >> 8) & 0xff;
+ lp_cfg->standby_idle = (arg0 >> 16) & 0xffff;
+ lp_cfg->pd_idle = arg1 & 0xfff;
+ lp_cfg->srpd_lite_idle = (arg1 >> 16) & 0xfff;
+
+ rk3399_dram_status.timing_config.odt = arg2 & 0x1;
+
+ exit_low_power();
+
+ *low_power = 0;
+
+ /* pd_idle en */
+ if (lp_cfg->pd_idle)
+ *low_power |= ((1 << 0) | (1 << 8));
+ /* sr_idle en srpd_lite_idle */
+ if (lp_cfg->sr_idle | lp_cfg->srpd_lite_idle)
+ *low_power |= ((1 << 1) | (1 << 9));
+ /* sr_mc_gate_idle */
+ if (lp_cfg->sr_mc_gate_idle)
+ *low_power |= ((1 << 2) | (1 << 10));
+ /* standbyidle */
+ if (lp_cfg->standby_idle) {
+ if (rk3399_dram_status.timing_config.ch_cnt == 2)
+ *low_power |= ((1 << 3) | (1 << 11));
+ else
+ *low_power |= (1 << 3);
+ }
+
+ pd_tmp = arg1;
+ if (dram_type != LPDDR4)
+ pd_tmp = arg1 & 0xfff;
+ sr_tmp = arg0 & 0xffff;
+ for (i = 0; i < ch_count; i++) {
+ mmio_write_32(CTL_REG(i, 102), pd_tmp);
+ mmio_clrsetbits_32(CTL_REG(i, 103), 0xffff, sr_tmp);
+ }
+ mmio_write_32(CIC_BASE + CIC_IDLE_TH, (arg0 >> 16) & 0xffff);
+
+ return 0;
+}
+
+static void m0_configure_ddr(struct pll_div pll_div, uint32_t ddr_index)
+{
+ mmio_write_32(M0_PARAM_ADDR + PARAM_DPLL_CON0, FBDIV(pll_div.fbdiv));
+ mmio_write_32(M0_PARAM_ADDR + PARAM_DPLL_CON1,
+ POSTDIV2(pll_div.postdiv2) | POSTDIV1(pll_div.postdiv1) |
+ REFDIV(pll_div.refdiv));
+
+ mmio_write_32(M0_PARAM_ADDR + PARAM_DRAM_FREQ, pll_div.mhz);
+
+ mmio_write_32(M0_PARAM_ADDR + PARAM_FREQ_SELECT, ddr_index << 4);
+ dmbst();
+ m0_configure_execute_addr(M0_BINCODE_BASE);
+}
+
+static uint32_t prepare_ddr_timing(uint32_t mhz)
+{
+ uint32_t index;
+ struct dram_timing_t dram_timing;
+
+ rk3399_dram_status.timing_config.freq = mhz;
+
+ if (mhz < 300)
+ rk3399_dram_status.timing_config.dllbp = 1;
+ else
+ rk3399_dram_status.timing_config.dllbp = 0;
+
+ if (rk3399_dram_status.timing_config.odt == 1)
+ gen_rk3399_set_odt(1);
+
+ index = (rk3399_dram_status.current_index + 1) & 0x1;
+
+ /*
+ * checking if having available gate traiing timing for
+ * target freq.
+ */
+ dram_get_parameter(&rk3399_dram_status.timing_config, &dram_timing);
+ gen_rk3399_ctl_params(&rk3399_dram_status.timing_config,
+ &dram_timing, index);
+ gen_rk3399_pi_params(&rk3399_dram_status.timing_config,
+ &dram_timing, index);
+ gen_rk3399_phy_params(&rk3399_dram_status.timing_config,
+ &rk3399_dram_status.drv_odt_lp_cfg,
+ &dram_timing, index);
+ rk3399_dram_status.index_freq[index] = mhz;
+
+ return index;
+}
+
+uint32_t ddr_set_rate(uint32_t hz)
+{
+ uint32_t low_power, index, ddr_index;
+ uint32_t mhz = hz / (1000 * 1000);
+
+ if (mhz ==
+ rk3399_dram_status.index_freq[rk3399_dram_status.current_index])
+ return mhz;
+
+ index = to_get_clk_index(mhz);
+ mhz = dpll_rates_table[index].mhz;
+
+ ddr_index = prepare_ddr_timing(mhz);
+ gen_rk3399_enable_training(rk3399_dram_status.timing_config.ch_cnt,
+ mhz);
+ if (ddr_index > 1)
+ goto out;
+
+ /*
+ * Make sure the clock is enabled. The M0 clocks should be on all of the
+ * time during S0.
+ */
+ m0_configure_ddr(dpll_rates_table[index], ddr_index);
+ m0_start();
+ m0_wait_done();
+ m0_stop();
+
+ if (rk3399_dram_status.timing_config.odt == 0)
+ gen_rk3399_set_odt(0);
+
+ rk3399_dram_status.current_index = ddr_index;
+ low_power = rk3399_dram_status.low_power_stat;
+ resume_low_power(low_power);
+out:
+ gen_rk3399_disable_training(rk3399_dram_status.timing_config.ch_cnt);
+ return mhz;
+}
+
+uint32_t ddr_round_rate(uint32_t hz)
+{
+ int index;
+ uint32_t mhz = hz / (1000 * 1000);
+
+ index = to_get_clk_index(mhz);
+
+ return dpll_rates_table[index].mhz * 1000 * 1000;
+}
+
+void ddr_prepare_for_sys_suspend(void)
+{
+ uint32_t mhz =
+ rk3399_dram_status.index_freq[rk3399_dram_status.current_index];
+
+ /*
+ * If we're not currently at the boot (assumed highest) frequency, we
+ * need to change frequencies to configure out current index.
+ */
+ rk3399_suspend_status.freq = mhz;
+ exit_low_power();
+ rk3399_suspend_status.low_power_stat =
+ rk3399_dram_status.low_power_stat;
+ rk3399_suspend_status.odt = rk3399_dram_status.timing_config.odt;
+ rk3399_dram_status.low_power_stat = 0;
+ rk3399_dram_status.timing_config.odt = 1;
+ if (mhz != rk3399_dram_status.boot_freq)
+ ddr_set_rate(rk3399_dram_status.boot_freq * 1000 * 1000);
+
+ /*
+ * This will configure the other index to be the same frequency as the
+ * current one. We retrain both indices on resume, so both have to be
+ * setup for the same frequency.
+ */
+ prepare_ddr_timing(rk3399_dram_status.boot_freq);
+}
+
+void ddr_prepare_for_sys_resume(void)
+{
+ /* Disable multicast */
+ mmio_clrbits_32(PHY_REG(0, 896), 1);
+ mmio_clrbits_32(PHY_REG(1, 896), 1);
+
+ /* The suspend code changes the current index, so reset it now. */
+ rk3399_dram_status.current_index =
+ (mmio_read_32(CTL_REG(0, 111)) >> 16) & 0x3;
+ rk3399_dram_status.low_power_stat =
+ rk3399_suspend_status.low_power_stat;
+ rk3399_dram_status.timing_config.odt = rk3399_suspend_status.odt;
+
+ /*
+ * Set the saved frequency from suspend if it's different than the
+ * current frequency.
+ */
+ if (rk3399_suspend_status.freq !=
+ rk3399_dram_status.index_freq[rk3399_dram_status.current_index]) {
+ ddr_set_rate(rk3399_suspend_status.freq * 1000 * 1000);
+ return;
+ }
+
+ gen_rk3399_set_odt(rk3399_dram_status.timing_config.odt);
+ resume_low_power(rk3399_dram_status.low_power_stat);
+}
diff --git a/plat/rockchip/rk3399/drivers/dram/dfs.h b/plat/rockchip/rk3399/drivers/dram/dfs.h
new file mode 100644
index 0000000..172b2a7
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/dram/dfs.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef DFS_H
+#define DFS_H
+
+#include <stdint.h>
+
+struct rk3399_sdram_default_config {
+ unsigned char bl;
+ /* 1:auto precharge, 0:never auto precharge */
+ unsigned char ap;
+ /* dram driver strength */
+ unsigned char dramds;
+ /* dram ODT, if odt=0, this parameter invalid */
+ unsigned char dramodt;
+ /* ca ODT, if odt=0, this parameter invalid
+ * only used by LPDDR4
+ */
+ unsigned char caodt;
+ unsigned char burst_ref_cnt;
+ /* zqcs period, unit(s) */
+ unsigned char zqcsi;
+};
+
+struct drv_odt_lp_config {
+ uint32_t pd_idle;
+ uint32_t sr_idle;
+ uint32_t sr_mc_gate_idle;
+ uint32_t srpd_lite_idle;
+ uint32_t standby_idle;
+ uint32_t odt_en;
+
+ uint32_t dram_side_drv;
+ uint32_t dram_side_dq_odt;
+ uint32_t dram_side_ca_odt;
+};
+
+uint32_t ddr_set_rate(uint32_t hz);
+uint32_t ddr_round_rate(uint32_t hz);
+uint32_t ddr_get_rate(void);
+uint32_t dram_set_odt_pd(uint32_t arg0, uint32_t arg1, uint32_t arg2);
+void dram_dfs_init(void);
+void ddr_prepare_for_sys_suspend(void);
+void ddr_prepare_for_sys_resume(void);
+
+#endif /* DFS_H */
diff --git a/plat/rockchip/rk3399/drivers/dram/dram.c b/plat/rockchip/rk3399/drivers/dram/dram.c
new file mode 100644
index 0000000..42b6294
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/dram/dram.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <dram.h>
+#include <plat_private.h>
+#include <rk3399_def.h>
+#include <secure.h>
+#include <soc.h>
+
+__pmusramdata struct rk3399_sdram_params sdram_config;
+
+void dram_init(void)
+{
+ uint32_t os_reg2_val, i;
+
+ os_reg2_val = mmio_read_32(PMUGRF_BASE + PMUGRF_OSREG(2));
+ sdram_config.dramtype = SYS_REG_DEC_DDRTYPE(os_reg2_val);
+ sdram_config.num_channels = SYS_REG_DEC_NUM_CH(os_reg2_val);
+ sdram_config.stride = (mmio_read_32(SGRF_BASE + SGRF_SOC_CON3_7(4)) >>
+ 10) & 0x1f;
+
+ for (i = 0; i < 2; i++) {
+ struct rk3399_sdram_channel *ch = &sdram_config.ch[i];
+ struct rk3399_msch_timings *noc = &ch->noc_timings;
+
+ if (!(SYS_REG_DEC_CHINFO(os_reg2_val, i)))
+ continue;
+
+ ch->rank = SYS_REG_DEC_RANK(os_reg2_val, i);
+ ch->col = SYS_REG_DEC_COL(os_reg2_val, i);
+ ch->bk = SYS_REG_DEC_BK(os_reg2_val, i);
+ ch->bw = SYS_REG_DEC_BW(os_reg2_val, i);
+ ch->dbw = SYS_REG_DEC_DBW(os_reg2_val, i);
+ ch->row_3_4 = SYS_REG_DEC_ROW_3_4(os_reg2_val, i);
+ ch->cs0_row = SYS_REG_DEC_CS0_ROW(os_reg2_val, i);
+ ch->cs1_row = SYS_REG_DEC_CS1_ROW(os_reg2_val, i);
+ ch->ddrconfig = mmio_read_32(MSCH_BASE(i) + MSCH_DEVICECONF);
+
+ noc->ddrtiminga0.d32 = mmio_read_32(MSCH_BASE(i) +
+ MSCH_DDRTIMINGA0);
+ noc->ddrtimingb0.d32 = mmio_read_32(MSCH_BASE(i) +
+ MSCH_DDRTIMINGB0);
+ noc->ddrtimingc0.d32 = mmio_read_32(MSCH_BASE(i) +
+ MSCH_DDRTIMINGC0);
+ noc->devtodev0.d32 = mmio_read_32(MSCH_BASE(i) +
+ MSCH_DEVTODEV0);
+ noc->ddrmode.d32 = mmio_read_32(MSCH_BASE(i) + MSCH_DDRMODE);
+ noc->agingx0 = mmio_read_32(MSCH_BASE(i) + MSCH_AGINGX0);
+ }
+}
diff --git a/plat/rockchip/rk3399/drivers/dram/dram.h b/plat/rockchip/rk3399/drivers/dram/dram.h
new file mode 100644
index 0000000..5572b16
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/dram/dram.h
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef DRAM_H
+#define DRAM_H
+
+#include <stdint.h>
+
+#include <dram_regs.h>
+#include <plat_private.h>
+
+enum {
+ DDR3 = 3,
+ LPDDR2 = 5,
+ LPDDR3 = 6,
+ LPDDR4 = 7,
+ UNUSED = 0xff
+};
+
+struct rk3399_ddr_pctl_regs {
+ uint32_t denali_ctl[CTL_REG_NUM];
+};
+
+struct rk3399_ddr_publ_regs {
+ /*
+ * PHY registers from 0 to 90 for slice1.
+ * These are used to restore slice1-4 on resume.
+ */
+ uint32_t phy0[91];
+ /*
+ * PHY registers from 512 to 895.
+ * Only registers 0-37 of each 128 register range are used.
+ */
+ uint32_t phy512[3][38];
+ uint32_t phy896[63];
+};
+
+struct rk3399_ddr_pi_regs {
+ uint32_t denali_pi[PI_REG_NUM];
+};
+union noc_ddrtiminga0 {
+ uint32_t d32;
+ struct {
+ unsigned acttoact : 6;
+ unsigned reserved0 : 2;
+ unsigned rdtomiss : 6;
+ unsigned reserved1 : 2;
+ unsigned wrtomiss : 6;
+ unsigned reserved2 : 2;
+ unsigned readlatency : 8;
+ } b;
+};
+
+union noc_ddrtimingb0 {
+ uint32_t d32;
+ struct {
+ unsigned rdtowr : 5;
+ unsigned reserved0 : 3;
+ unsigned wrtord : 5;
+ unsigned reserved1 : 3;
+ unsigned rrd : 4;
+ unsigned reserved2 : 4;
+ unsigned faw : 6;
+ unsigned reserved3 : 2;
+ } b;
+};
+
+union noc_ddrtimingc0 {
+ uint32_t d32;
+ struct {
+ unsigned burstpenalty : 4;
+ unsigned reserved0 : 4;
+ unsigned wrtomwr : 6;
+ unsigned reserved1 : 18;
+ } b;
+};
+
+union noc_devtodev0 {
+ uint32_t d32;
+ struct {
+ unsigned busrdtord : 3;
+ unsigned reserved0 : 1;
+ unsigned busrdtowr : 3;
+ unsigned reserved1 : 1;
+ unsigned buswrtord : 3;
+ unsigned reserved2 : 1;
+ unsigned buswrtowr : 3;
+ unsigned reserved3 : 17;
+ } b;
+};
+
+union noc_ddrmode {
+ uint32_t d32;
+ struct {
+ unsigned autoprecharge : 1;
+ unsigned bypassfiltering : 1;
+ unsigned fawbank : 1;
+ unsigned burstsize : 2;
+ unsigned mwrsize : 2;
+ unsigned reserved2 : 1;
+ unsigned forceorder : 8;
+ unsigned forceorderstate : 8;
+ unsigned reserved3 : 8;
+ } b;
+};
+
+struct rk3399_msch_timings {
+ union noc_ddrtiminga0 ddrtiminga0;
+ union noc_ddrtimingb0 ddrtimingb0;
+ union noc_ddrtimingc0 ddrtimingc0;
+ union noc_devtodev0 devtodev0;
+ union noc_ddrmode ddrmode;
+ uint32_t agingx0;
+};
+
+struct rk3399_sdram_channel {
+ unsigned char rank;
+ /* col = 0, means this channel is invalid */
+ unsigned char col;
+ /* 3:8bank, 2:4bank */
+ unsigned char bk;
+ /* channel buswidth, 2:32bit, 1:16bit, 0:8bit */
+ unsigned char bw;
+ /* die buswidth, 2:32bit, 1:16bit, 0:8bit */
+ unsigned char dbw;
+ /* row_3_4 = 1: 6Gb or 12Gb die
+ * row_3_4 = 0: normal die, power of 2
+ */
+ unsigned char row_3_4;
+ unsigned char cs0_row;
+ unsigned char cs1_row;
+ uint32_t ddrconfig;
+ struct rk3399_msch_timings noc_timings;
+};
+
+struct rk3399_sdram_params {
+ struct rk3399_sdram_channel ch[2];
+ uint32_t ddr_freq;
+ unsigned char dramtype;
+ unsigned char num_channels;
+ unsigned char stride;
+ unsigned char odt;
+ struct rk3399_ddr_pctl_regs pctl_regs;
+ struct rk3399_ddr_pi_regs pi_regs;
+ struct rk3399_ddr_publ_regs phy_regs;
+ uint32_t rx_cal_dqs[2][4];
+};
+
+extern struct rk3399_sdram_params sdram_config;
+
+void dram_init(void);
+
+#endif /* DRAM_H */
diff --git a/plat/rockchip/rk3399/drivers/dram/dram_spec_timing.c b/plat/rockchip/rk3399/drivers/dram/dram_spec_timing.c
new file mode 100644
index 0000000..3cdb7a2
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/dram/dram_spec_timing.c
@@ -0,0 +1,1324 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdint.h>
+#include <string.h>
+
+#include <lib/utils.h>
+
+#include <dram.h>
+
+#include "dram_spec_timing.h"
+
+static const uint8_t ddr3_cl_cwl[][7] = {
+ /*
+ * speed 0~330 331 ~ 400 401 ~ 533 534~666 667~800 801~933 934~1066
+ * tCK>3 2.5~3 1.875~2.5 1.5~1.875 1.25~1.5 1.07~1.25 0.938~1.07
+ * cl<<4, cwl cl<<4, cwl cl<<4, cwl
+ */
+ /* DDR3_800D (5-5-5) */
+ {((5 << 4) | 5), ((5 << 4) | 5), 0, 0, 0, 0, 0},
+ /* DDR3_800E (6-6-6) */
+ {((5 << 4) | 5), ((6 << 4) | 5), 0, 0, 0, 0, 0},
+ /* DDR3_1066E (6-6-6) */
+ {((5 << 4) | 5), ((5 << 4) | 5), ((6 << 4) | 6), 0, 0, 0, 0},
+ /* DDR3_1066F (7-7-7) */
+ {((5 << 4) | 5), ((6 << 4) | 5), ((7 << 4) | 6), 0, 0, 0, 0},
+ /* DDR3_1066G (8-8-8) */
+ {((5 << 4) | 5), ((6 << 4) | 5), ((8 << 4) | 6), 0, 0, 0, 0},
+ /* DDR3_1333F (7-7-7) */
+ {((5 << 4) | 5), ((5 << 4) | 5), ((6 << 4) | 6), ((7 << 4) | 7),
+ 0, 0, 0},
+ /* DDR3_1333G (8-8-8) */
+ {((5 << 4) | 5), ((5 << 4) | 5), ((7 << 4) | 6), ((8 << 4) | 7),
+ 0, 0, 0},
+ /* DDR3_1333H (9-9-9) */
+ {((5 << 4) | 5), ((6 << 4) | 5), ((8 << 4) | 6), ((9 << 4) | 7),
+ 0, 0, 0},
+ /* DDR3_1333J (10-10-10) */
+ {((5 << 4) | 5), ((6 << 4) | 5), ((8 << 4) | 6), ((10 << 4) | 7),
+ 0, 0, 0},
+ /* DDR3_1600G (8-8-8) */
+ {((5 << 4) | 5), ((5 << 4) | 5), ((6 << 4) | 6), ((7 << 4) | 7),
+ ((8 << 4) | 8), 0, 0},
+ /* DDR3_1600H (9-9-9) */
+ {((5 << 4) | 5), ((5 << 4) | 5), ((6 << 4) | 6), ((8 << 4) | 7),
+ ((9 << 4) | 8), 0, 0},
+ /* DDR3_1600J (10-10-10) */
+ {((5 << 4) | 5), ((5 << 4) | 5), ((7 << 4) | 6), ((9 << 4) | 7),
+ ((10 << 4) | 8), 0, 0},
+ /* DDR3_1600K (11-11-11) */
+ {((5 << 4) | 5), ((6 << 4) | 5), ((8 << 4) | 6), ((10 << 4) | 7),
+ ((11 << 4) | 8), 0, 0},
+ /* DDR3_1866J (10-10-10) */
+ {((5 << 4) | 5), ((5 << 4) | 5), ((6 << 4) | 6), ((8 << 4) | 7),
+ ((9 << 4) | 8), ((11 << 4) | 9), 0},
+ /* DDR3_1866K (11-11-11) */
+ {((5 << 4) | 5), ((5 << 4) | 5), ((7 << 4) | 6), ((8 << 4) | 7),
+ ((10 << 4) | 8), ((11 << 4) | 9), 0},
+ /* DDR3_1866L (12-12-12) */
+ {((6 << 4) | 5), ((6 << 4) | 5), ((7 << 4) | 6), ((9 << 4) | 7),
+ ((11 << 4) | 8), ((12 << 4) | 9), 0},
+ /* DDR3_1866M (13-13-13) */
+ {((6 << 4) | 5), ((6 << 4) | 5), ((8 << 4) | 6), ((10 << 4) | 7),
+ ((11 << 4) | 8), ((13 << 4) | 9), 0},
+ /* DDR3_2133K (11-11-11) */
+ {((5 << 4) | 5), ((5 << 4) | 5), ((6 << 4) | 6), ((7 << 4) | 7),
+ ((9 << 4) | 8), ((10 << 4) | 9), ((11 << 4) | 10)},
+ /* DDR3_2133L (12-12-12) */
+ {((5 << 4) | 5), ((5 << 4) | 5), ((6 << 4) | 6), ((8 << 4) | 7),
+ ((9 << 4) | 8), ((11 << 4) | 9), ((12 << 4) | 10)},
+ /* DDR3_2133M (13-13-13) */
+ {((5 << 4) | 5), ((5 << 4) | 5), ((7 << 4) | 6), ((9 << 4) | 7),
+ ((10 << 4) | 8), ((12 << 4) | 9), ((13 << 4) | 10)},
+ /* DDR3_2133N (14-14-14) */
+ {((6 << 4) | 5), ((6 << 4) | 5), ((7 << 4) | 6), ((9 << 4) | 7),
+ ((11 << 4) | 8), ((13 << 4) | 9), ((14 << 4) | 10)},
+ /* DDR3_DEFAULT */
+ {((6 << 4) | 5), ((6 << 4) | 5), ((8 << 4) | 6), ((10 << 4) | 7),
+ ((11 << 4) | 8), ((13 << 4) | 9), ((14 << 4) | 10)}
+};
+
+static const uint16_t ddr3_trc_tfaw[] = {
+ /* tRC tFAW */
+ ((50 << 8) | 50), /* DDR3_800D (5-5-5) */
+ ((53 << 8) | 50), /* DDR3_800E (6-6-6) */
+
+ ((49 << 8) | 50), /* DDR3_1066E (6-6-6) */
+ ((51 << 8) | 50), /* DDR3_1066F (7-7-7) */
+ ((53 << 8) | 50), /* DDR3_1066G (8-8-8) */
+
+ ((47 << 8) | 45), /* DDR3_1333F (7-7-7) */
+ ((48 << 8) | 45), /* DDR3_1333G (8-8-8) */
+ ((50 << 8) | 45), /* DDR3_1333H (9-9-9) */
+ ((51 << 8) | 45), /* DDR3_1333J (10-10-10) */
+
+ ((45 << 8) | 40), /* DDR3_1600G (8-8-8) */
+ ((47 << 8) | 40), /* DDR3_1600H (9-9-9)*/
+ ((48 << 8) | 40), /* DDR3_1600J (10-10-10) */
+ ((49 << 8) | 40), /* DDR3_1600K (11-11-11) */
+
+ ((45 << 8) | 35), /* DDR3_1866J (10-10-10) */
+ ((46 << 8) | 35), /* DDR3_1866K (11-11-11) */
+ ((47 << 8) | 35), /* DDR3_1866L (12-12-12) */
+ ((48 << 8) | 35), /* DDR3_1866M (13-13-13) */
+
+ ((44 << 8) | 35), /* DDR3_2133K (11-11-11) */
+ ((45 << 8) | 35), /* DDR3_2133L (12-12-12) */
+ ((46 << 8) | 35), /* DDR3_2133M (13-13-13) */
+ ((47 << 8) | 35), /* DDR3_2133N (14-14-14) */
+
+ ((53 << 8) | 50) /* DDR3_DEFAULT */
+};
+
+static uint32_t get_max_speed_rate(struct timing_related_config *timing_config)
+{
+ if (timing_config->ch_cnt > 1)
+ return max(timing_config->dram_info[0].speed_rate,
+ timing_config->dram_info[1].speed_rate);
+ else
+ return timing_config->dram_info[0].speed_rate;
+}
+
+static uint32_t
+get_max_die_capability(struct timing_related_config *timing_config)
+{
+ uint32_t die_cap = 0;
+ uint32_t cs, ch;
+
+ for (ch = 0; ch < timing_config->ch_cnt; ch++) {
+ for (cs = 0; cs < timing_config->dram_info[ch].cs_cnt; cs++) {
+ die_cap = max(die_cap,
+ timing_config->
+ dram_info[ch].per_die_capability[cs]);
+ }
+ }
+ return die_cap;
+}
+
+/* tRSTL, 100ns */
+#define DDR3_TRSTL (100)
+/* trsth, 500us */
+#define DDR3_TRSTH (500000)
+/* trefi, 7.8us */
+#define DDR3_TREFI_7_8_US (7800)
+/* tWR, 15ns */
+#define DDR3_TWR (15)
+/* tRTP, max(4 tCK,7.5ns) */
+#define DDR3_TRTP (7)
+/* tRRD = max(4nCK, 10ns) */
+#define DDR3_TRRD (10)
+/* tCK */
+#define DDR3_TCCD (4)
+/*tWTR, max(4 tCK,7.5ns)*/
+#define DDR3_TWTR (7)
+/* tCK */
+#define DDR3_TRTW (0)
+/* tRAS, 37.5ns(400MHz) 37.5ns(533MHz) */
+#define DDR3_TRAS (37)
+/* ns */
+#define DDR3_TRFC_512MBIT (90)
+/* ns */
+#define DDR3_TRFC_1GBIT (110)
+/* ns */
+#define DDR3_TRFC_2GBIT (160)
+/* ns */
+#define DDR3_TRFC_4GBIT (300)
+/* ns */
+#define DDR3_TRFC_8GBIT (350)
+
+/*pd and sr*/
+#define DDR3_TXP (7) /* tXP, max(3 tCK, 7.5ns)( < 933MHz) */
+#define DDR3_TXPDLL (24) /* tXPDLL, max(10 tCK, 24ns) */
+#define DDR3_TDLLK (512) /* tXSR, tDLLK=512 tCK */
+#define DDR3_TCKE_400MHZ (7) /* tCKE, max(3 tCK,7.5ns)(400MHz) */
+#define DDR3_TCKE_533MHZ (6) /* tCKE, max(3 tCK,5.625ns)(533MHz) */
+#define DDR3_TCKSRE (10) /* tCKSRX, max(5 tCK, 10ns) */
+
+/*mode register timing*/
+#define DDR3_TMOD (15) /* tMOD, max(12 tCK,15ns) */
+#define DDR3_TMRD (4) /* tMRD, 4 tCK */
+
+/* ZQ */
+#define DDR3_TZQINIT (640) /* tZQinit, max(512 tCK, 640ns) */
+#define DDR3_TZQCS (80) /* tZQCS, max(64 tCK, 80ns) */
+#define DDR3_TZQOPER (320) /* tZQoper, max(256 tCK, 320ns) */
+
+/* Write leveling */
+#define DDR3_TWLMRD (40) /* tCK */
+#define DDR3_TWLO (9) /* max 7.5ns */
+#define DDR3_TWLDQSEN (25) /* tCK */
+
+/*
+ * Description: depend on input parameter "timing_config",
+ * and calculate all ddr3
+ * spec timing to "pdram_timing"
+ * parameters:
+ * input: timing_config
+ * output: pdram_timing
+ */
+static void ddr3_get_parameter(struct timing_related_config *timing_config,
+ struct dram_timing_t *pdram_timing)
+{
+ uint32_t nmhz = timing_config->freq;
+ uint32_t ddr_speed_bin = get_max_speed_rate(timing_config);
+ uint32_t ddr_capability_per_die = get_max_die_capability(timing_config);
+ uint32_t tmp;
+
+ zeromem((void *)pdram_timing, sizeof(struct dram_timing_t));
+ pdram_timing->mhz = nmhz;
+ pdram_timing->al = 0;
+ pdram_timing->bl = timing_config->bl;
+ if (nmhz <= 330)
+ tmp = 0;
+ else if (nmhz <= 400)
+ tmp = 1;
+ else if (nmhz <= 533)
+ tmp = 2;
+ else if (nmhz <= 666)
+ tmp = 3;
+ else if (nmhz <= 800)
+ tmp = 4;
+ else if (nmhz <= 933)
+ tmp = 5;
+ else
+ tmp = 6;
+
+ /* when dll bypss cl = cwl = 6 */
+ if (nmhz < 300) {
+ pdram_timing->cl = 6;
+ pdram_timing->cwl = 6;
+ } else {
+ pdram_timing->cl = (ddr3_cl_cwl[ddr_speed_bin][tmp] >> 4) & 0xf;
+ pdram_timing->cwl = ddr3_cl_cwl[ddr_speed_bin][tmp] & 0xf;
+ }
+
+ switch (timing_config->dramds) {
+ case 40:
+ tmp = DDR3_DS_40;
+ break;
+ case 34:
+ default:
+ tmp = DDR3_DS_34;
+ break;
+ }
+
+ if (timing_config->odt)
+ switch (timing_config->dramodt) {
+ case 60:
+ pdram_timing->mr[1] = tmp | DDR3_RTT_NOM_60;
+ break;
+ case 40:
+ pdram_timing->mr[1] = tmp | DDR3_RTT_NOM_40;
+ break;
+ case 120:
+ pdram_timing->mr[1] = tmp | DDR3_RTT_NOM_120;
+ break;
+ case 0:
+ default:
+ pdram_timing->mr[1] = tmp | DDR3_RTT_NOM_DIS;
+ break;
+ }
+ else
+ pdram_timing->mr[1] = tmp | DDR3_RTT_NOM_DIS;
+
+ pdram_timing->mr[2] = DDR3_MR2_CWL(pdram_timing->cwl);
+ pdram_timing->mr[3] = 0;
+
+ pdram_timing->trstl = ((DDR3_TRSTL * nmhz + 999) / 1000);
+ pdram_timing->trsth = ((DDR3_TRSTH * nmhz + 999) / 1000);
+ /* tREFI, average periodic refresh interval, 7.8us */
+ pdram_timing->trefi = ((DDR3_TREFI_7_8_US * nmhz + 999) / 1000);
+ /* base timing */
+ pdram_timing->trcd = pdram_timing->cl;
+ pdram_timing->trp = pdram_timing->cl;
+ pdram_timing->trppb = pdram_timing->cl;
+ tmp = ((DDR3_TWR * nmhz + 999) / 1000);
+ pdram_timing->twr = tmp;
+ pdram_timing->tdal = tmp + pdram_timing->trp;
+ if (tmp < 9) {
+ tmp = tmp - 4;
+ } else {
+ tmp += (tmp & 0x1) ? 1 : 0;
+ tmp = tmp >> 1;
+ }
+ if (pdram_timing->bl == 4)
+ pdram_timing->mr[0] = DDR3_BC4
+ | DDR3_CL(pdram_timing->cl)
+ | DDR3_WR(tmp);
+ else
+ pdram_timing->mr[0] = DDR3_BL8
+ | DDR3_CL(pdram_timing->cl)
+ | DDR3_WR(tmp);
+ tmp = ((DDR3_TRTP * nmhz + (nmhz >> 1) + 999) / 1000);
+ pdram_timing->trtp = max(4, tmp);
+ pdram_timing->trc =
+ (((ddr3_trc_tfaw[ddr_speed_bin] >> 8) * nmhz + 999) / 1000);
+ tmp = ((DDR3_TRRD * nmhz + 999) / 1000);
+ pdram_timing->trrd = max(4, tmp);
+ pdram_timing->tccd = DDR3_TCCD;
+ tmp = ((DDR3_TWTR * nmhz + (nmhz >> 1) + 999) / 1000);
+ pdram_timing->twtr = max(4, tmp);
+ pdram_timing->trtw = DDR3_TRTW;
+ pdram_timing->tras_max = 9 * pdram_timing->trefi;
+ pdram_timing->tras_min = ((DDR3_TRAS * nmhz + (nmhz >> 1) + 999)
+ / 1000);
+ pdram_timing->tfaw =
+ (((ddr3_trc_tfaw[ddr_speed_bin] & 0x0ff) * nmhz + 999)
+ / 1000);
+ /* tRFC, 90ns(512Mb),110ns(1Gb),160ns(2Gb),300ns(4Gb),350ns(8Gb) */
+ if (ddr_capability_per_die <= 0x4000000)
+ tmp = DDR3_TRFC_512MBIT;
+ else if (ddr_capability_per_die <= 0x8000000)
+ tmp = DDR3_TRFC_1GBIT;
+ else if (ddr_capability_per_die <= 0x10000000)
+ tmp = DDR3_TRFC_2GBIT;
+ else if (ddr_capability_per_die <= 0x20000000)
+ tmp = DDR3_TRFC_4GBIT;
+ else
+ tmp = DDR3_TRFC_8GBIT;
+ pdram_timing->trfc = (tmp * nmhz + 999) / 1000;
+ pdram_timing->txsnr = max(5, (((tmp + 10) * nmhz + 999) / 1000));
+ pdram_timing->tdqsck_max = 0;
+ /*pd and sr*/
+ pdram_timing->txsr = DDR3_TDLLK;
+ tmp = ((DDR3_TXP * nmhz + (nmhz >> 1) + 999) / 1000);
+ pdram_timing->txp = max(3, tmp);
+ tmp = ((DDR3_TXPDLL * nmhz + 999) / 1000);
+ pdram_timing->txpdll = max(10, tmp);
+ pdram_timing->tdllk = DDR3_TDLLK;
+ if (nmhz >= 533)
+ tmp = ((DDR3_TCKE_533MHZ * nmhz + 999) / 1000);
+ else
+ tmp = ((DDR3_TCKE_400MHZ * nmhz + (nmhz >> 1) + 999) / 1000);
+ pdram_timing->tcke = max(3, tmp);
+ pdram_timing->tckesr = (pdram_timing->tcke + 1);
+ tmp = ((DDR3_TCKSRE * nmhz + 999) / 1000);
+ pdram_timing->tcksre = max(5, tmp);
+ pdram_timing->tcksrx = max(5, tmp);
+ /*mode register timing*/
+ tmp = ((DDR3_TMOD * nmhz + 999) / 1000);
+ pdram_timing->tmod = max(12, tmp);
+ pdram_timing->tmrd = DDR3_TMRD;
+ pdram_timing->tmrr = 0;
+ /*ODT*/
+ pdram_timing->todton = pdram_timing->cwl - 2;
+ /*ZQ*/
+ tmp = ((DDR3_TZQINIT * nmhz + 999) / 1000);
+ pdram_timing->tzqinit = max(512, tmp);
+ tmp = ((DDR3_TZQCS * nmhz + 999) / 1000);
+ pdram_timing->tzqcs = max(64, tmp);
+ tmp = ((DDR3_TZQOPER * nmhz + 999) / 1000);
+ pdram_timing->tzqoper = max(256, tmp);
+ /* write leveling */
+ pdram_timing->twlmrd = DDR3_TWLMRD;
+ pdram_timing->twldqsen = DDR3_TWLDQSEN;
+ pdram_timing->twlo = ((DDR3_TWLO * nmhz + (nmhz >> 1) + 999) / 1000);
+}
+
+#define LPDDR2_TINIT1 (100) /* ns */
+#define LPDDR2_TINIT2 (5) /* tCK */
+#define LPDDR2_TINIT3 (200000) /* 200us */
+#define LPDDR2_TINIT4 (1000) /* 1us */
+#define LPDDR2_TINIT5 (10000) /* 10us */
+#define LPDDR2_TRSTL (0) /* tCK */
+#define LPDDR2_TRSTH (500000) /* 500us */
+#define LPDDR2_TREFI_3_9_US (3900) /* 3.9us */
+#define LPDDR2_TREFI_7_8_US (7800) /* 7.8us */
+
+/* base timing */
+#define LPDDR2_TRCD (24) /* tRCD,15ns(Fast)18ns(Typ)24ns(Slow) */
+#define LPDDR2_TRP_PB (18) /* tRPpb,15ns(Fast)18ns(Typ)24ns(Slow) */
+#define LPDDR2_TRP_AB_8_BANK (21) /* tRPab,18ns(Fast)21ns(Typ)27ns(Slow) */
+#define LPDDR2_TWR (15) /* tWR, max(3tCK,15ns) */
+#define LPDDR2_TRTP (7) /* tRTP, max(2tCK, 7.5ns) */
+#define LPDDR2_TRRD (10) /* tRRD, max(2tCK,10ns) */
+#define LPDDR2_TCCD (2) /* tCK */
+#define LPDDR2_TWTR_GREAT_200MHZ (7) /* ns */
+#define LPDDR2_TWTR_LITTLE_200MHZ (10) /* ns */
+#define LPDDR2_TRTW (0) /* tCK */
+#define LPDDR2_TRAS_MAX (70000) /* 70us */
+#define LPDDR2_TRAS (42) /* tRAS, max(3tCK,42ns) */
+#define LPDDR2_TFAW_GREAT_200MHZ (50) /* max(8tCK,50ns) */
+#define LPDDR2_TFAW_LITTLE_200MHZ (60) /* max(8tCK,60ns) */
+#define LPDDR2_TRFC_8GBIT (210) /* ns */
+#define LPDDR2_TRFC_4GBIT (130) /* ns */
+#define LPDDR2_TDQSCK_MIN (2) /* tDQSCKmin, 2.5ns */
+#define LPDDR2_TDQSCK_MAX (5) /* tDQSCKmax, 5.5ns */
+
+/*pd and sr*/
+#define LPDDR2_TXP (7) /* tXP, max(2tCK,7.5ns) */
+#define LPDDR2_TXPDLL (0)
+#define LPDDR2_TDLLK (0) /* tCK */
+#define LPDDR2_TCKE (3) /* tCK */
+#define LPDDR2_TCKESR (15) /* tCKESR, max(3tCK,15ns) */
+#define LPDDR2_TCKSRE (1) /* tCK */
+#define LPDDR2_TCKSRX (2) /* tCK */
+
+/*mode register timing*/
+#define LPDDR2_TMOD (0)
+#define LPDDR2_TMRD (5) /* tMRD, (=tMRW), 5 tCK */
+#define LPDDR2_TMRR (2) /* tCK */
+
+/*ZQ*/
+#define LPDDR2_TZQINIT (1000) /* ns */
+#define LPDDR2_TZQCS (90) /* tZQCS, max(6tCK,90ns) */
+#define LPDDR2_TZQCL (360) /* tZQCL, max(6tCK,360ns) */
+#define LPDDR2_TZQRESET (50) /* ZQreset, max(3tCK,50ns) */
+
+/*
+ * Description: depend on input parameter "timing_config",
+ * and calculate all lpddr2
+ * spec timing to "pdram_timing"
+ * parameters:
+ * input: timing_config
+ * output: pdram_timing
+ */
+static void lpddr2_get_parameter(struct timing_related_config *timing_config,
+ struct dram_timing_t *pdram_timing)
+{
+ uint32_t nmhz = timing_config->freq;
+ uint32_t ddr_capability_per_die = get_max_die_capability(timing_config);
+ uint32_t tmp, trp_tmp, trppb_tmp, tras_tmp, twr_tmp, bl_tmp;
+
+ zeromem((void *)pdram_timing, sizeof(struct dram_timing_t));
+ pdram_timing->mhz = nmhz;
+ pdram_timing->al = 0;
+ pdram_timing->bl = timing_config->bl;
+
+ /* 1066 933 800 667 533 400 333
+ * RL, 8 7 6 5 4 3 3
+ * WL, 4 4 3 2 2 1 1
+ */
+ if (nmhz <= 266) {
+ pdram_timing->cl = 4;
+ pdram_timing->cwl = 2;
+ pdram_timing->mr[2] = LPDDR2_RL4_WL2;
+ } else if (nmhz <= 333) {
+ pdram_timing->cl = 5;
+ pdram_timing->cwl = 2;
+ pdram_timing->mr[2] = LPDDR2_RL5_WL2;
+ } else if (nmhz <= 400) {
+ pdram_timing->cl = 6;
+ pdram_timing->cwl = 3;
+ pdram_timing->mr[2] = LPDDR2_RL6_WL3;
+ } else if (nmhz <= 466) {
+ pdram_timing->cl = 7;
+ pdram_timing->cwl = 4;
+ pdram_timing->mr[2] = LPDDR2_RL7_WL4;
+ } else {
+ pdram_timing->cl = 8;
+ pdram_timing->cwl = 4;
+ pdram_timing->mr[2] = LPDDR2_RL8_WL4;
+ }
+ switch (timing_config->dramds) {
+ case 120:
+ pdram_timing->mr[3] = LPDDR2_DS_120;
+ break;
+ case 80:
+ pdram_timing->mr[3] = LPDDR2_DS_80;
+ break;
+ case 60:
+ pdram_timing->mr[3] = LPDDR2_DS_60;
+ break;
+ case 48:
+ pdram_timing->mr[3] = LPDDR2_DS_48;
+ break;
+ case 40:
+ pdram_timing->mr[3] = LPDDR2_DS_40;
+ break;
+ case 34:
+ default:
+ pdram_timing->mr[3] = LPDDR2_DS_34;
+ break;
+ }
+ pdram_timing->mr[0] = 0;
+
+ pdram_timing->tinit1 = (LPDDR2_TINIT1 * nmhz + 999) / 1000;
+ pdram_timing->tinit2 = LPDDR2_TINIT2;
+ pdram_timing->tinit3 = (LPDDR2_TINIT3 * nmhz + 999) / 1000;
+ pdram_timing->tinit4 = (LPDDR2_TINIT4 * nmhz + 999) / 1000;
+ pdram_timing->tinit5 = (LPDDR2_TINIT5 * nmhz + 999) / 1000;
+ pdram_timing->trstl = LPDDR2_TRSTL;
+ pdram_timing->trsth = (LPDDR2_TRSTH * nmhz + 999) / 1000;
+ /*
+ * tREFI, average periodic refresh interval,
+ * 15.6us(<256Mb) 7.8us(256Mb-1Gb) 3.9us(2Gb-8Gb)
+ */
+ if (ddr_capability_per_die >= 0x10000000)
+ pdram_timing->trefi = (LPDDR2_TREFI_3_9_US * nmhz + 999)
+ / 1000;
+ else
+ pdram_timing->trefi = (LPDDR2_TREFI_7_8_US * nmhz + 999)
+ / 1000;
+ /* base timing */
+ tmp = ((LPDDR2_TRCD * nmhz + 999) / 1000);
+ pdram_timing->trcd = max(3, tmp);
+ /*
+ * tRPpb, max(3tCK, 15ns(Fast) 18ns(Typ) 24ns(Slow),
+ */
+ trppb_tmp = ((LPDDR2_TRP_PB * nmhz + 999) / 1000);
+ trppb_tmp = max(3, trppb_tmp);
+ pdram_timing->trppb = trppb_tmp;
+ /*
+ * tRPab, max(3tCK, 4-bank:15ns(Fast) 18ns(Typ) 24ns(Slow),
+ * 8-bank:18ns(Fast) 21ns(Typ) 27ns(Slow))
+ */
+ trp_tmp = ((LPDDR2_TRP_AB_8_BANK * nmhz + 999) / 1000);
+ trp_tmp = max(3, trp_tmp);
+ pdram_timing->trp = trp_tmp;
+ twr_tmp = ((LPDDR2_TWR * nmhz + 999) / 1000);
+ twr_tmp = max(3, twr_tmp);
+ pdram_timing->twr = twr_tmp;
+ bl_tmp = (pdram_timing->bl == 16) ? LPDDR2_BL16 :
+ ((pdram_timing->bl == 8) ? LPDDR2_BL8 : LPDDR2_BL4);
+ pdram_timing->mr[1] = bl_tmp | LPDDR2_N_WR(twr_tmp);
+ tmp = ((LPDDR2_TRTP * nmhz + (nmhz >> 1) + 999) / 1000);
+ pdram_timing->trtp = max(2, tmp);
+ tras_tmp = ((LPDDR2_TRAS * nmhz + 999) / 1000);
+ tras_tmp = max(3, tras_tmp);
+ pdram_timing->tras_min = tras_tmp;
+ pdram_timing->tras_max = ((LPDDR2_TRAS_MAX * nmhz + 999) / 1000);
+ pdram_timing->trc = (tras_tmp + trp_tmp);
+ tmp = ((LPDDR2_TRRD * nmhz + 999) / 1000);
+ pdram_timing->trrd = max(2, tmp);
+ pdram_timing->tccd = LPDDR2_TCCD;
+ /* tWTR, max(2tCK, 7.5ns(533-266MHz) 10ns(200-166MHz)) */
+ if (nmhz > 200)
+ tmp = ((LPDDR2_TWTR_GREAT_200MHZ * nmhz + (nmhz >> 1) +
+ 999) / 1000);
+ else
+ tmp = ((LPDDR2_TWTR_LITTLE_200MHZ * nmhz + 999) / 1000);
+ pdram_timing->twtr = max(2, tmp);
+ pdram_timing->trtw = LPDDR2_TRTW;
+ if (nmhz <= 200)
+ pdram_timing->tfaw = (LPDDR2_TFAW_LITTLE_200MHZ * nmhz + 999)
+ / 1000;
+ else
+ pdram_timing->tfaw = (LPDDR2_TFAW_GREAT_200MHZ * nmhz + 999)
+ / 1000;
+ /* tRFC, 90ns(<=512Mb) 130ns(1Gb-4Gb) 210ns(8Gb) */
+ if (ddr_capability_per_die >= 0x40000000) {
+ pdram_timing->trfc =
+ (LPDDR2_TRFC_8GBIT * nmhz + 999) / 1000;
+ tmp = (((LPDDR2_TRFC_8GBIT + 10) * nmhz + 999) / 1000);
+ } else {
+ pdram_timing->trfc =
+ (LPDDR2_TRFC_4GBIT * nmhz + 999) / 1000;
+ tmp = (((LPDDR2_TRFC_4GBIT + 10) * nmhz + 999) / 1000);
+ }
+ if (tmp < 2)
+ tmp = 2;
+ pdram_timing->txsr = tmp;
+ pdram_timing->txsnr = tmp;
+ /* tdqsck use rounded down */
+ pdram_timing->tdqsck = ((LPDDR2_TDQSCK_MIN * nmhz + (nmhz >> 1))
+ / 1000);
+ pdram_timing->tdqsck_max =
+ ((LPDDR2_TDQSCK_MAX * nmhz + (nmhz >> 1) + 999)
+ / 1000);
+ /* pd and sr */
+ tmp = ((LPDDR2_TXP * nmhz + (nmhz >> 1) + 999) / 1000);
+ pdram_timing->txp = max(2, tmp);
+ pdram_timing->txpdll = LPDDR2_TXPDLL;
+ pdram_timing->tdllk = LPDDR2_TDLLK;
+ pdram_timing->tcke = LPDDR2_TCKE;
+ tmp = ((LPDDR2_TCKESR * nmhz + 999) / 1000);
+ pdram_timing->tckesr = max(3, tmp);
+ pdram_timing->tcksre = LPDDR2_TCKSRE;
+ pdram_timing->tcksrx = LPDDR2_TCKSRX;
+ /* mode register timing */
+ pdram_timing->tmod = LPDDR2_TMOD;
+ pdram_timing->tmrd = LPDDR2_TMRD;
+ pdram_timing->tmrr = LPDDR2_TMRR;
+ /* ZQ */
+ pdram_timing->tzqinit = (LPDDR2_TZQINIT * nmhz + 999) / 1000;
+ tmp = ((LPDDR2_TZQCS * nmhz + 999) / 1000);
+ pdram_timing->tzqcs = max(6, tmp);
+ tmp = ((LPDDR2_TZQCL * nmhz + 999) / 1000);
+ pdram_timing->tzqoper = max(6, tmp);
+ tmp = ((LPDDR2_TZQRESET * nmhz + 999) / 1000);
+ pdram_timing->tzqreset = max(3, tmp);
+}
+
+#define LPDDR3_TINIT1 (100) /* ns */
+#define LPDDR3_TINIT2 (5) /* tCK */
+#define LPDDR3_TINIT3 (200000) /* 200us */
+#define LPDDR3_TINIT4 (1000) /* 1us */
+#define LPDDR3_TINIT5 (10000) /* 10us */
+#define LPDDR3_TRSTL (0)
+#define LPDDR3_TRSTH (0) /* 500us */
+#define LPDDR3_TREFI_3_9_US (3900) /* 3.9us */
+
+/* base timging */
+#define LPDDR3_TRCD (18) /* tRCD,15ns(Fast)18ns(Typ)24ns(Slow) */
+#define LPDDR3_TRP_PB (18) /* tRPpb, 15ns(Fast) 18ns(Typ) 24ns(Slow) */
+#define LPDDR3_TRP_AB (21) /* tRPab, 18ns(Fast) 21ns(Typ) 27ns(Slow) */
+#define LPDDR3_TWR (15) /* tWR, max(4tCK,15ns) */
+#define LPDDR3_TRTP (7) /* tRTP, max(4tCK, 7.5ns) */
+#define LPDDR3_TRRD (10) /* tRRD, max(2tCK,10ns) */
+#define LPDDR3_TCCD (4) /* tCK */
+#define LPDDR3_TWTR (7) /* tWTR, max(4tCK, 7.5ns) */
+#define LPDDR3_TRTW (0) /* tCK register min valid value */
+#define LPDDR3_TRAS_MAX (70000) /* 70us */
+#define LPDDR3_TRAS (42) /* tRAS, max(3tCK,42ns) */
+#define LPDDR3_TFAW (50) /* tFAW,max(8tCK, 50ns) */
+#define LPDDR3_TRFC_8GBIT (210) /* tRFC, 130ns(4Gb) 210ns(>4Gb) */
+#define LPDDR3_TRFC_4GBIT (130) /* ns */
+#define LPDDR3_TDQSCK_MIN (2) /* tDQSCKmin,2.5ns */
+#define LPDDR3_TDQSCK_MAX (5) /* tDQSCKmax,5.5ns */
+
+/* pd and sr */
+#define LPDDR3_TXP (7) /* tXP, max(3tCK,7.5ns) */
+#define LPDDR3_TXPDLL (0)
+#define LPDDR3_TCKE (7) /* tCKE, (max 7.5ns,3 tCK) */
+#define LPDDR3_TCKESR (15) /* tCKESR, max(3tCK,15ns) */
+#define LPDDR3_TCKSRE (2) /* tCKSRE=tCPDED, 2 tCK */
+#define LPDDR3_TCKSRX (2) /* tCKSRX, 2 tCK */
+
+/* mode register timing */
+#define LPDDR3_TMOD (0)
+#define LPDDR3_TMRD (14) /* tMRD, (=tMRW), max(14ns, 10 tCK) */
+#define LPDDR3_TMRR (4) /* tMRR, 4 tCK */
+#define LPDDR3_TMRRI LPDDR3_TRCD
+
+/* ODT */
+#define LPDDR3_TODTON (3) /* 3.5ns */
+
+/* ZQ */
+#define LPDDR3_TZQINIT (1000) /* 1us */
+#define LPDDR3_TZQCS (90) /* tZQCS, 90ns */
+#define LPDDR3_TZQCL (360) /* 360ns */
+#define LPDDR3_TZQRESET (50) /* ZQreset, max(3tCK,50ns) */
+/* write leveling */
+#define LPDDR3_TWLMRD (40) /* ns */
+#define LPDDR3_TWLO (20) /* ns */
+#define LPDDR3_TWLDQSEN (25) /* ns */
+/* CA training */
+#define LPDDR3_TCACKEL (10) /* tCK */
+#define LPDDR3_TCAENT (10) /* tCK */
+#define LPDDR3_TCAMRD (20) /* tCK */
+#define LPDDR3_TCACKEH (10) /* tCK */
+#define LPDDR3_TCAEXT (10) /* tCK */
+#define LPDDR3_TADR (20) /* ns */
+#define LPDDR3_TMRZ (3) /* ns */
+
+/* FSP */
+#define LPDDR3_TFC_LONG (250) /* ns */
+
+/*
+ * Description: depend on input parameter "timing_config",
+ * and calculate all lpddr3
+ * spec timing to "pdram_timing"
+ * parameters:
+ * input: timing_config
+ * output: pdram_timing
+ */
+static void lpddr3_get_parameter(struct timing_related_config *timing_config,
+ struct dram_timing_t *pdram_timing)
+{
+ uint32_t nmhz = timing_config->freq;
+ uint32_t ddr_capability_per_die = get_max_die_capability(timing_config);
+ uint32_t tmp, trp_tmp, trppb_tmp, tras_tmp, twr_tmp, bl_tmp;
+
+ zeromem((void *)pdram_timing, sizeof(struct dram_timing_t));
+ pdram_timing->mhz = nmhz;
+ pdram_timing->al = 0;
+ pdram_timing->bl = timing_config->bl;
+
+ /*
+ * Only support Write Latency Set A here
+ * 1066 933 800 733 667 600 533 400 166
+ * RL, 16 14 12 11 10 9 8 6 3
+ * WL, 8 8 6 6 6 5 4 3 1
+ */
+ if (nmhz <= 400) {
+ pdram_timing->cl = 6;
+ pdram_timing->cwl = 3;
+ pdram_timing->mr[2] = LPDDR3_RL6_WL3;
+ } else if (nmhz <= 533) {
+ pdram_timing->cl = 8;
+ pdram_timing->cwl = 4;
+ pdram_timing->mr[2] = LPDDR3_RL8_WL4;
+ } else if (nmhz <= 600) {
+ pdram_timing->cl = 9;
+ pdram_timing->cwl = 5;
+ pdram_timing->mr[2] = LPDDR3_RL9_WL5;
+ } else if (nmhz <= 667) {
+ pdram_timing->cl = 10;
+ pdram_timing->cwl = 6;
+ pdram_timing->mr[2] = LPDDR3_RL10_WL6;
+ } else if (nmhz <= 733) {
+ pdram_timing->cl = 11;
+ pdram_timing->cwl = 6;
+ pdram_timing->mr[2] = LPDDR3_RL11_WL6;
+ } else if (nmhz <= 800) {
+ pdram_timing->cl = 12;
+ pdram_timing->cwl = 6;
+ pdram_timing->mr[2] = LPDDR3_RL12_WL6;
+ } else if (nmhz <= 933) {
+ pdram_timing->cl = 14;
+ pdram_timing->cwl = 8;
+ pdram_timing->mr[2] = LPDDR3_RL14_WL8;
+ } else {
+ pdram_timing->cl = 16;
+ pdram_timing->cwl = 8;
+ pdram_timing->mr[2] = LPDDR3_RL16_WL8;
+ }
+ switch (timing_config->dramds) {
+ case 80:
+ pdram_timing->mr[3] = LPDDR3_DS_80;
+ break;
+ case 60:
+ pdram_timing->mr[3] = LPDDR3_DS_60;
+ break;
+ case 48:
+ pdram_timing->mr[3] = LPDDR3_DS_48;
+ break;
+ case 40:
+ pdram_timing->mr[3] = LPDDR3_DS_40;
+ break;
+ case 3440:
+ pdram_timing->mr[3] = LPDDR3_DS_34D_40U;
+ break;
+ case 4048:
+ pdram_timing->mr[3] = LPDDR3_DS_40D_48U;
+ break;
+ case 3448:
+ pdram_timing->mr[3] = LPDDR3_DS_34D_48U;
+ break;
+ case 34:
+ default:
+ pdram_timing->mr[3] = LPDDR3_DS_34;
+ break;
+ }
+ pdram_timing->mr[0] = 0;
+ if (timing_config->odt)
+ switch (timing_config->dramodt) {
+ case 60:
+ pdram_timing->mr11 = LPDDR3_ODT_60;
+ break;
+ case 120:
+ pdram_timing->mr11 = LPDDR3_ODT_120;
+ break;
+ case 240:
+ default:
+ pdram_timing->mr11 = LPDDR3_ODT_240;
+ break;
+ }
+ else
+ pdram_timing->mr11 = LPDDR3_ODT_DIS;
+
+ pdram_timing->tinit1 = (LPDDR3_TINIT1 * nmhz + 999) / 1000;
+ pdram_timing->tinit2 = LPDDR3_TINIT2;
+ pdram_timing->tinit3 = (LPDDR3_TINIT3 * nmhz + 999) / 1000;
+ pdram_timing->tinit4 = (LPDDR3_TINIT4 * nmhz + 999) / 1000;
+ pdram_timing->tinit5 = (LPDDR3_TINIT5 * nmhz + 999) / 1000;
+ pdram_timing->trstl = LPDDR3_TRSTL;
+ pdram_timing->trsth = (LPDDR3_TRSTH * nmhz + 999) / 1000;
+ /* tREFI, average periodic refresh interval, 3.9us(4Gb-16Gb) */
+ pdram_timing->trefi = (LPDDR3_TREFI_3_9_US * nmhz + 999) / 1000;
+ /* base timing */
+ tmp = ((LPDDR3_TRCD * nmhz + 999) / 1000);
+ pdram_timing->trcd = max(3, tmp);
+ trppb_tmp = ((LPDDR3_TRP_PB * nmhz + 999) / 1000);
+ trppb_tmp = max(3, trppb_tmp);
+ pdram_timing->trppb = trppb_tmp;
+ trp_tmp = ((LPDDR3_TRP_AB * nmhz + 999) / 1000);
+ trp_tmp = max(3, trp_tmp);
+ pdram_timing->trp = trp_tmp;
+ twr_tmp = ((LPDDR3_TWR * nmhz + 999) / 1000);
+ twr_tmp = max(4, twr_tmp);
+ pdram_timing->twr = twr_tmp;
+ if (twr_tmp <= 6)
+ twr_tmp = 6;
+ else if (twr_tmp <= 8)
+ twr_tmp = 8;
+ else if (twr_tmp <= 12)
+ twr_tmp = twr_tmp;
+ else if (twr_tmp <= 14)
+ twr_tmp = 14;
+ else
+ twr_tmp = 16;
+ if (twr_tmp > 9)
+ pdram_timing->mr[2] |= (1 << 4); /*enable nWR > 9*/
+ twr_tmp = (twr_tmp > 9) ? (twr_tmp - 10) : (twr_tmp - 2);
+ bl_tmp = LPDDR3_BL8;
+ pdram_timing->mr[1] = bl_tmp | LPDDR3_N_WR(twr_tmp);
+ tmp = ((LPDDR3_TRTP * nmhz + (nmhz >> 1) + 999) / 1000);
+ pdram_timing->trtp = max(4, tmp);
+ tras_tmp = ((LPDDR3_TRAS * nmhz + 999) / 1000);
+ tras_tmp = max(3, tras_tmp);
+ pdram_timing->tras_min = tras_tmp;
+ pdram_timing->trc = (tras_tmp + trp_tmp);
+ tmp = ((LPDDR3_TRRD * nmhz + 999) / 1000);
+ pdram_timing->trrd = max(2, tmp);
+ pdram_timing->tccd = LPDDR3_TCCD;
+ tmp = ((LPDDR3_TWTR * nmhz + (nmhz >> 1) + 999) / 1000);
+ pdram_timing->twtr = max(4, tmp);
+ pdram_timing->trtw = ((LPDDR3_TRTW * nmhz + 999) / 1000);
+ pdram_timing->tras_max = ((LPDDR3_TRAS_MAX * nmhz + 999) / 1000);
+ tmp = (LPDDR3_TFAW * nmhz + 999) / 1000;
+ pdram_timing->tfaw = max(8, tmp);
+ if (ddr_capability_per_die > 0x20000000) {
+ pdram_timing->trfc =
+ (LPDDR3_TRFC_8GBIT * nmhz + 999) / 1000;
+ tmp = (((LPDDR3_TRFC_8GBIT + 10) * nmhz + 999) / 1000);
+ } else {
+ pdram_timing->trfc =
+ (LPDDR3_TRFC_4GBIT * nmhz + 999) / 1000;
+ tmp = (((LPDDR3_TRFC_4GBIT + 10) * nmhz + 999) / 1000);
+ }
+ pdram_timing->txsr = max(2, tmp);
+ pdram_timing->txsnr = max(2, tmp);
+ /* tdqsck use rounded down */
+ pdram_timing->tdqsck =
+ ((LPDDR3_TDQSCK_MIN * nmhz + (nmhz >> 1))
+ / 1000);
+ pdram_timing->tdqsck_max =
+ ((LPDDR3_TDQSCK_MAX * nmhz + (nmhz >> 1) + 999)
+ / 1000);
+ /*pd and sr*/
+ tmp = ((LPDDR3_TXP * nmhz + (nmhz >> 1) + 999) / 1000);
+ pdram_timing->txp = max(3, tmp);
+ pdram_timing->txpdll = LPDDR3_TXPDLL;
+ tmp = ((LPDDR3_TCKE * nmhz + (nmhz >> 1) + 999) / 1000);
+ pdram_timing->tcke = max(3, tmp);
+ tmp = ((LPDDR3_TCKESR * nmhz + 999) / 1000);
+ pdram_timing->tckesr = max(3, tmp);
+ pdram_timing->tcksre = LPDDR3_TCKSRE;
+ pdram_timing->tcksrx = LPDDR3_TCKSRX;
+ /*mode register timing*/
+ pdram_timing->tmod = LPDDR3_TMOD;
+ tmp = ((LPDDR3_TMRD * nmhz + 999) / 1000);
+ pdram_timing->tmrd = max(10, tmp);
+ pdram_timing->tmrr = LPDDR3_TMRR;
+ tmp = ((LPDDR3_TRCD * nmhz + 999) / 1000);
+ pdram_timing->tmrri = max(3, tmp);
+ /* ODT */
+ pdram_timing->todton = (LPDDR3_TODTON * nmhz + (nmhz >> 1) + 999)
+ / 1000;
+ /* ZQ */
+ pdram_timing->tzqinit = (LPDDR3_TZQINIT * nmhz + 999) / 1000;
+ pdram_timing->tzqcs =
+ ((LPDDR3_TZQCS * nmhz + 999) / 1000);
+ pdram_timing->tzqoper =
+ ((LPDDR3_TZQCL * nmhz + 999) / 1000);
+ tmp = ((LPDDR3_TZQRESET * nmhz + 999) / 1000);
+ pdram_timing->tzqreset = max(3, tmp);
+ /* write leveling */
+ pdram_timing->twlmrd = (LPDDR3_TWLMRD * nmhz + 999) / 1000;
+ pdram_timing->twlo = (LPDDR3_TWLO * nmhz + 999) / 1000;
+ pdram_timing->twldqsen = (LPDDR3_TWLDQSEN * nmhz + 999) / 1000;
+ /* CA training */
+ pdram_timing->tcackel = LPDDR3_TCACKEL;
+ pdram_timing->tcaent = LPDDR3_TCAENT;
+ pdram_timing->tcamrd = LPDDR3_TCAMRD;
+ pdram_timing->tcackeh = LPDDR3_TCACKEH;
+ pdram_timing->tcaext = LPDDR3_TCAEXT;
+ pdram_timing->tadr = (LPDDR3_TADR * nmhz + 999) / 1000;
+ pdram_timing->tmrz = (LPDDR3_TMRZ * nmhz + 999) / 1000;
+ pdram_timing->tcacd = pdram_timing->tadr + 2;
+
+ /* FSP */
+ pdram_timing->tfc_long = (LPDDR3_TFC_LONG * nmhz + 999) / 1000;
+}
+
+#define LPDDR4_TINIT1 (200000) /* 200us */
+#define LPDDR4_TINIT2 (10) /* 10ns */
+#define LPDDR4_TINIT3 (2000000) /* 2ms */
+#define LPDDR4_TINIT4 (5) /* tCK */
+#define LPDDR4_TINIT5 (2000) /* 2us */
+#define LPDDR4_TRSTL LPDDR4_TINIT1
+#define LPDDR4_TRSTH LPDDR4_TINIT3
+#define LPDDR4_TREFI_3_9_US (3900) /* 3.9us */
+
+/* base timging */
+#define LPDDR4_TRCD (18) /* tRCD, max(18ns,4tCK) */
+#define LPDDR4_TRP_PB (18) /* tRPpb, max(18ns, 4tCK) */
+#define LPDDR4_TRP_AB (21) /* tRPab, max(21ns, 4tCK) */
+#define LPDDR4_TRRD (10) /* tRRD, max(4tCK,10ns) */
+#define LPDDR4_TCCD_BL16 (8) /* tCK */
+#define LPDDR4_TCCD_BL32 (16) /* tCK */
+#define LPDDR4_TWTR (10) /* tWTR, max(8tCK, 10ns) */
+#define LPDDR4_TRTW (0) /* tCK register min valid value */
+#define LPDDR4_TRAS_MAX (70000) /* 70us */
+#define LPDDR4_TRAS (42) /* tRAS, max(3tCK,42ns) */
+#define LPDDR4_TFAW (40) /* tFAW,min 40ns) */
+#define LPDDR4_TRFC_12GBIT (280) /* tRFC, 280ns(>=12Gb) */
+#define LPDDR4_TRFC_6GBIT (180) /* 6Gb/8Gb 180ns */
+#define LPDDR4_TRFC_4GBIT (130) /* 4Gb 130ns */
+#define LPDDR4_TDQSCK_MIN (1) /* tDQSCKmin,1.5ns */
+#define LPDDR4_TDQSCK_MAX (3) /* tDQSCKmax,3.5ns */
+#define LPDDR4_TPPD (4) /* tCK */
+
+/* pd and sr */
+#define LPDDR4_TXP (7) /* tXP, max(5tCK,7.5ns) */
+#define LPDDR4_TCKE (7) /* tCKE, max(7.5ns,4 tCK) */
+#define LPDDR4_TESCKE (1) /* tESCKE, max(1.75ns, 3tCK) */
+#define LPDDR4_TSR (15) /* tSR, max(15ns, 3tCK) */
+#define LPDDR4_TCMDCKE (1) /* max(1.75ns, 3tCK) */
+#define LPDDR4_TCSCKE (1) /* 1.75ns */
+#define LPDDR4_TCKELCS (5) /* max(5ns, 5tCK) */
+#define LPDDR4_TCSCKEH (1) /* 1.75ns */
+#define LPDDR4_TCKEHCS (7) /* max(7.5ns, 5tCK) */
+#define LPDDR4_TMRWCKEL (14) /* max(14ns, 10tCK) */
+#define LPDDR4_TCKELCMD (7) /* max(7.5ns, 3tCK) */
+#define LPDDR4_TCKEHCMD (7) /* max(7.5ns, 3tCK) */
+#define LPDDR4_TCKELPD (7) /* max(7.5ns, 3tCK) */
+#define LPDDR4_TCKCKEL (7) /* max(7.5ns, 3tCK) */
+
+/* mode register timing */
+#define LPDDR4_TMRD (14) /* tMRD, (=tMRW), max(14ns, 10 tCK) */
+#define LPDDR4_TMRR (8) /* tMRR, 8 tCK */
+
+/* ODT */
+#define LPDDR4_TODTON (3) /* 3.5ns */
+
+/* ZQ */
+#define LPDDR4_TZQCAL (1000) /* 1us */
+#define LPDDR4_TZQLAT (30) /* tZQLAT, max(30ns,8tCK) */
+#define LPDDR4_TZQRESET (50) /* ZQreset, max(3tCK,50ns) */
+#define LPDDR4_TZQCKE (1) /* tZQCKE, max(1.75ns, 3tCK) */
+
+/* write leveling */
+#define LPDDR4_TWLMRD (40) /* tCK */
+#define LPDDR4_TWLO (20) /* ns */
+#define LPDDR4_TWLDQSEN (20) /* tCK */
+
+/* CA training */
+#define LPDDR4_TCAENT (250) /* ns */
+#define LPDDR4_TADR (20) /* ns */
+#define LPDDR4_TMRZ (1) /* 1.5ns */
+#define LPDDR4_TVREF_LONG (250) /* ns */
+#define LPDDR4_TVREF_SHORT (100) /* ns */
+
+/* VRCG */
+#define LPDDR4_TVRCG_ENABLE (200) /* ns */
+#define LPDDR4_TVRCG_DISABLE (100) /* ns */
+
+/* FSP */
+#define LPDDR4_TFC_LONG (250) /* ns */
+#define LPDDR4_TCKFSPE (7) /* max(7.5ns, 4tCK) */
+#define LPDDR4_TCKFSPX (7) /* max(7.5ns, 4tCK) */
+
+/*
+ * Description: depend on input parameter "timing_config",
+ * and calculate all lpddr4
+ * spec timing to "pdram_timing"
+ * parameters:
+ * input: timing_config
+ * output: pdram_timing
+ */
+static void lpddr4_get_parameter(struct timing_related_config *timing_config,
+ struct dram_timing_t *pdram_timing)
+{
+ uint32_t nmhz = timing_config->freq;
+ uint32_t ddr_capability_per_die = get_max_die_capability(timing_config);
+ uint32_t tmp, trp_tmp, trppb_tmp, tras_tmp;
+
+ zeromem((void *)pdram_timing, sizeof(struct dram_timing_t));
+ pdram_timing->mhz = nmhz;
+ pdram_timing->al = 0;
+ pdram_timing->bl = timing_config->bl;
+
+ /*
+ * Only support Write Latency Set A here
+ * 2133 1866 1600 1333 1066 800 533 266
+ * RL, 36 32 28 24 20 14 10 6
+ * WL, 18 16 14 12 10 8 6 4
+ * nWR, 40 34 30 24 20 16 10 6
+ * nRTP,16 14 12 10 8 8 8 8
+ */
+ tmp = (timing_config->bl == 32) ? 1 : 0;
+
+ /*
+ * we always use WR preamble = 2tCK
+ * RD preamble = Static
+ */
+ tmp |= (1 << 2);
+ if (nmhz <= 266) {
+ pdram_timing->cl = 6;
+ pdram_timing->cwl = 4;
+ pdram_timing->twr = 6;
+ pdram_timing->trtp = 8;
+ pdram_timing->mr[2] = LPDDR4_RL6_NRTP8 | LPDDR4_A_WL4;
+ } else if (nmhz <= 533) {
+ if (timing_config->rdbi) {
+ pdram_timing->cl = 12;
+ pdram_timing->mr[2] = LPDDR4_RL12_NRTP8 | LPDDR4_A_WL6;
+ } else {
+ pdram_timing->cl = 10;
+ pdram_timing->mr[2] = LPDDR4_RL10_NRTP8 | LPDDR4_A_WL6;
+ }
+ pdram_timing->cwl = 6;
+ pdram_timing->twr = 10;
+ pdram_timing->trtp = 8;
+ tmp |= (1 << 4);
+ } else if (nmhz <= 800) {
+ if (timing_config->rdbi) {
+ pdram_timing->cl = 16;
+ pdram_timing->mr[2] = LPDDR4_RL16_NRTP8 | LPDDR4_A_WL8;
+ } else {
+ pdram_timing->cl = 14;
+ pdram_timing->mr[2] = LPDDR4_RL14_NRTP8 | LPDDR4_A_WL8;
+ }
+ pdram_timing->cwl = 8;
+ pdram_timing->twr = 16;
+ pdram_timing->trtp = 8;
+ tmp |= (2 << 4);
+ } else if (nmhz <= 1066) {
+ if (timing_config->rdbi) {
+ pdram_timing->cl = 22;
+ pdram_timing->mr[2] = LPDDR4_RL22_NRTP8 | LPDDR4_A_WL10;
+ } else {
+ pdram_timing->cl = 20;
+ pdram_timing->mr[2] = LPDDR4_RL20_NRTP8 | LPDDR4_A_WL10;
+ }
+ pdram_timing->cwl = 10;
+ pdram_timing->twr = 20;
+ pdram_timing->trtp = 8;
+ tmp |= (3 << 4);
+ } else if (nmhz <= 1333) {
+ if (timing_config->rdbi) {
+ pdram_timing->cl = 28;
+ pdram_timing->mr[2] = LPDDR4_RL28_NRTP10 |
+ LPDDR4_A_WL12;
+ } else {
+ pdram_timing->cl = 24;
+ pdram_timing->mr[2] = LPDDR4_RL24_NRTP10 |
+ LPDDR4_A_WL12;
+ }
+ pdram_timing->cwl = 12;
+ pdram_timing->twr = 24;
+ pdram_timing->trtp = 10;
+ tmp |= (4 << 4);
+ } else if (nmhz <= 1600) {
+ if (timing_config->rdbi) {
+ pdram_timing->cl = 32;
+ pdram_timing->mr[2] = LPDDR4_RL32_NRTP12 |
+ LPDDR4_A_WL14;
+ } else {
+ pdram_timing->cl = 28;
+ pdram_timing->mr[2] = LPDDR4_RL28_NRTP12 |
+ LPDDR4_A_WL14;
+ }
+ pdram_timing->cwl = 14;
+ pdram_timing->twr = 30;
+ pdram_timing->trtp = 12;
+ tmp |= (5 << 4);
+ } else if (nmhz <= 1866) {
+ if (timing_config->rdbi) {
+ pdram_timing->cl = 36;
+ pdram_timing->mr[2] = LPDDR4_RL36_NRTP14 |
+ LPDDR4_A_WL16;
+ } else {
+ pdram_timing->cl = 32;
+ pdram_timing->mr[2] = LPDDR4_RL32_NRTP14 |
+ LPDDR4_A_WL16;
+ }
+ pdram_timing->cwl = 16;
+ pdram_timing->twr = 34;
+ pdram_timing->trtp = 14;
+ tmp |= (6 << 4);
+ } else {
+ if (timing_config->rdbi) {
+ pdram_timing->cl = 40;
+ pdram_timing->mr[2] = LPDDR4_RL40_NRTP16 |
+ LPDDR4_A_WL18;
+ } else {
+ pdram_timing->cl = 36;
+ pdram_timing->mr[2] = LPDDR4_RL36_NRTP16 |
+ LPDDR4_A_WL18;
+ }
+ pdram_timing->cwl = 18;
+ pdram_timing->twr = 40;
+ pdram_timing->trtp = 16;
+ tmp |= (7 << 4);
+ }
+ pdram_timing->mr[1] = tmp;
+ tmp = (timing_config->rdbi ? LPDDR4_DBI_RD_EN : 0) |
+ (timing_config->wdbi ? LPDDR4_DBI_WR_EN : 0);
+ switch (timing_config->dramds) {
+ case 240:
+ pdram_timing->mr[3] = LPDDR4_PDDS_240 | tmp;
+ break;
+ case 120:
+ pdram_timing->mr[3] = LPDDR4_PDDS_120 | tmp;
+ break;
+ case 80:
+ pdram_timing->mr[3] = LPDDR4_PDDS_80 | tmp;
+ break;
+ case 60:
+ pdram_timing->mr[3] = LPDDR4_PDDS_60 | tmp;
+ break;
+ case 48:
+ pdram_timing->mr[3] = LPDDR4_PDDS_48 | tmp;
+ break;
+ case 40:
+ default:
+ pdram_timing->mr[3] = LPDDR4_PDDS_40 | tmp;
+ break;
+ }
+ pdram_timing->mr[0] = 0;
+ if (timing_config->odt) {
+ switch (timing_config->dramodt) {
+ case 240:
+ tmp = LPDDR4_DQODT_240;
+ break;
+ case 120:
+ tmp = LPDDR4_DQODT_120;
+ break;
+ case 80:
+ tmp = LPDDR4_DQODT_80;
+ break;
+ case 60:
+ tmp = LPDDR4_DQODT_60;
+ break;
+ case 48:
+ tmp = LPDDR4_DQODT_48;
+ break;
+ case 40:
+ default:
+ tmp = LPDDR4_DQODT_40;
+ break;
+ }
+
+ switch (timing_config->caodt) {
+ case 240:
+ pdram_timing->mr11 = LPDDR4_CAODT_240 | tmp;
+ break;
+ case 120:
+ pdram_timing->mr11 = LPDDR4_CAODT_120 | tmp;
+ break;
+ case 80:
+ pdram_timing->mr11 = LPDDR4_CAODT_80 | tmp;
+ break;
+ case 60:
+ pdram_timing->mr11 = LPDDR4_CAODT_60 | tmp;
+ break;
+ case 48:
+ pdram_timing->mr11 = LPDDR4_CAODT_48 | tmp;
+ break;
+ case 40:
+ default:
+ pdram_timing->mr11 = LPDDR4_CAODT_40 | tmp;
+ break;
+ }
+ } else {
+ pdram_timing->mr11 = LPDDR4_CAODT_DIS | tmp;
+ }
+
+ pdram_timing->tinit1 = (LPDDR4_TINIT1 * nmhz + 999) / 1000;
+ pdram_timing->tinit2 = (LPDDR4_TINIT2 * nmhz + 999) / 1000;
+ pdram_timing->tinit3 = (LPDDR4_TINIT3 * nmhz + 999) / 1000;
+ pdram_timing->tinit4 = (LPDDR4_TINIT4 * nmhz + 999) / 1000;
+ pdram_timing->tinit5 = (LPDDR4_TINIT5 * nmhz + 999) / 1000;
+ pdram_timing->trstl = (LPDDR4_TRSTL * nmhz + 999) / 1000;
+ pdram_timing->trsth = (LPDDR4_TRSTH * nmhz + 999) / 1000;
+ /* tREFI, average periodic refresh interval, 3.9us(4Gb-16Gb) */
+ pdram_timing->trefi = (LPDDR4_TREFI_3_9_US * nmhz + 999) / 1000;
+ /* base timing */
+ tmp = ((LPDDR4_TRCD * nmhz + 999) / 1000);
+ pdram_timing->trcd = max(4, tmp);
+ trppb_tmp = ((LPDDR4_TRP_PB * nmhz + 999) / 1000);
+ trppb_tmp = max(4, trppb_tmp);
+ pdram_timing->trppb = trppb_tmp;
+ trp_tmp = ((LPDDR4_TRP_AB * nmhz + 999) / 1000);
+ trp_tmp = max(4, trp_tmp);
+ pdram_timing->trp = trp_tmp;
+ tras_tmp = ((LPDDR4_TRAS * nmhz + 999) / 1000);
+ tras_tmp = max(3, tras_tmp);
+ pdram_timing->tras_min = tras_tmp;
+ pdram_timing->trc = (tras_tmp + trp_tmp);
+ tmp = ((LPDDR4_TRRD * nmhz + 999) / 1000);
+ pdram_timing->trrd = max(4, tmp);
+ if (timing_config->bl == 32)
+ pdram_timing->tccd = LPDDR4_TCCD_BL16;
+ else
+ pdram_timing->tccd = LPDDR4_TCCD_BL32;
+ pdram_timing->tccdmw = 4 * pdram_timing->tccd;
+ tmp = ((LPDDR4_TWTR * nmhz + 999) / 1000);
+ pdram_timing->twtr = max(8, tmp);
+ pdram_timing->trtw = ((LPDDR4_TRTW * nmhz + 999) / 1000);
+ pdram_timing->tras_max = ((LPDDR4_TRAS_MAX * nmhz + 999) / 1000);
+ pdram_timing->tfaw = (LPDDR4_TFAW * nmhz + 999) / 1000;
+ if (ddr_capability_per_die > 0x60000000) {
+ /* >= 12Gb */
+ pdram_timing->trfc =
+ (LPDDR4_TRFC_12GBIT * nmhz + 999) / 1000;
+ tmp = (((LPDDR4_TRFC_12GBIT + 7) * nmhz + (nmhz >> 1) +
+ 999) / 1000);
+ } else if (ddr_capability_per_die > 0x30000000) {
+ pdram_timing->trfc =
+ (LPDDR4_TRFC_6GBIT * nmhz + 999) / 1000;
+ tmp = (((LPDDR4_TRFC_6GBIT + 7) * nmhz + (nmhz >> 1) +
+ 999) / 1000);
+ } else {
+ pdram_timing->trfc =
+ (LPDDR4_TRFC_4GBIT * nmhz + 999) / 1000;
+ tmp = (((LPDDR4_TRFC_4GBIT + 7) * nmhz + (nmhz >> 1) +
+ 999) / 1000);
+ }
+ pdram_timing->txsr = max(2, tmp);
+ pdram_timing->txsnr = max(2, tmp);
+ /* tdqsck use rounded down */
+ pdram_timing->tdqsck = ((LPDDR4_TDQSCK_MIN * nmhz +
+ (nmhz >> 1)) / 1000);
+ pdram_timing->tdqsck_max = ((LPDDR4_TDQSCK_MAX * nmhz +
+ (nmhz >> 1) + 999) / 1000);
+ pdram_timing->tppd = LPDDR4_TPPD;
+ /* pd and sr */
+ tmp = ((LPDDR4_TXP * nmhz + (nmhz >> 1) + 999) / 1000);
+ pdram_timing->txp = max(5, tmp);
+ tmp = ((LPDDR4_TCKE * nmhz + (nmhz >> 1) + 999) / 1000);
+ pdram_timing->tcke = max(4, tmp);
+ tmp = ((LPDDR4_TESCKE * nmhz +
+ ((nmhz * 3) / 4) +
+ 999) / 1000);
+ pdram_timing->tescke = max(3, tmp);
+ tmp = ((LPDDR4_TSR * nmhz + 999) / 1000);
+ pdram_timing->tsr = max(3, tmp);
+ tmp = ((LPDDR4_TCMDCKE * nmhz +
+ ((nmhz * 3) / 4) +
+ 999) / 1000);
+ pdram_timing->tcmdcke = max(3, tmp);
+ pdram_timing->tcscke = ((LPDDR4_TCSCKE * nmhz +
+ ((nmhz * 3) / 4) +
+ 999) / 1000);
+ tmp = ((LPDDR4_TCKELCS * nmhz + 999) / 1000);
+ pdram_timing->tckelcs = max(5, tmp);
+ pdram_timing->tcsckeh = ((LPDDR4_TCSCKEH * nmhz +
+ ((nmhz * 3) / 4) +
+ 999) / 1000);
+ tmp = ((LPDDR4_TCKEHCS * nmhz +
+ (nmhz >> 1) + 999) / 1000);
+ pdram_timing->tckehcs = max(5, tmp);
+ tmp = ((LPDDR4_TMRWCKEL * nmhz + 999) / 1000);
+ pdram_timing->tmrwckel = max(10, tmp);
+ tmp = ((LPDDR4_TCKELCMD * nmhz + (nmhz >> 1) +
+ 999) / 1000);
+ pdram_timing->tckelcmd = max(3, tmp);
+ tmp = ((LPDDR4_TCKEHCMD * nmhz + (nmhz >> 1) +
+ 999) / 1000);
+ pdram_timing->tckehcmd = max(3, tmp);
+ tmp = ((LPDDR4_TCKELPD * nmhz + (nmhz >> 1) +
+ 999) / 1000);
+ pdram_timing->tckelpd = max(3, tmp);
+ tmp = ((LPDDR4_TCKCKEL * nmhz + (nmhz >> 1) +
+ 999) / 1000);
+ pdram_timing->tckckel = max(3, tmp);
+ /* mode register timing */
+ tmp = ((LPDDR4_TMRD * nmhz + 999) / 1000);
+ pdram_timing->tmrd = max(10, tmp);
+ pdram_timing->tmrr = LPDDR4_TMRR;
+ pdram_timing->tmrri = pdram_timing->trcd + 3;
+ /* ODT */
+ pdram_timing->todton = (LPDDR4_TODTON * nmhz + (nmhz >> 1) + 999)
+ / 1000;
+ /* ZQ */
+ pdram_timing->tzqcal = (LPDDR4_TZQCAL * nmhz + 999) / 1000;
+ tmp = ((LPDDR4_TZQLAT * nmhz + 999) / 1000);
+ pdram_timing->tzqlat = max(8, tmp);
+ tmp = ((LPDDR4_TZQRESET * nmhz + 999) / 1000);
+ pdram_timing->tzqreset = max(3, tmp);
+ tmp = ((LPDDR4_TZQCKE * nmhz +
+ ((nmhz * 3) / 4) +
+ 999) / 1000);
+ pdram_timing->tzqcke = max(3, tmp);
+ /* write leveling */
+ pdram_timing->twlmrd = LPDDR4_TWLMRD;
+ pdram_timing->twlo = (LPDDR4_TWLO * nmhz + 999) / 1000;
+ pdram_timing->twldqsen = LPDDR4_TWLDQSEN;
+ /* CA training */
+ pdram_timing->tcaent = (LPDDR4_TCAENT * nmhz + 999) / 1000;
+ pdram_timing->tadr = (LPDDR4_TADR * nmhz + 999) / 1000;
+ pdram_timing->tmrz = (LPDDR4_TMRZ * nmhz + (nmhz >> 1) + 999) / 1000;
+ pdram_timing->tvref_long = (LPDDR4_TVREF_LONG * nmhz + 999) / 1000;
+ pdram_timing->tvref_short = (LPDDR4_TVREF_SHORT * nmhz + 999) / 1000;
+ /* VRCG */
+ pdram_timing->tvrcg_enable = (LPDDR4_TVRCG_ENABLE * nmhz +
+ 999) / 1000;
+ pdram_timing->tvrcg_disable = (LPDDR4_TVRCG_DISABLE * nmhz +
+ 999) / 1000;
+ /* FSP */
+ pdram_timing->tfc_long = (LPDDR4_TFC_LONG * nmhz + 999) / 1000;
+ tmp = (LPDDR4_TCKFSPE * nmhz + (nmhz >> 1) + 999) / 1000;
+ pdram_timing->tckfspe = max(4, tmp);
+ tmp = (LPDDR4_TCKFSPX * nmhz + (nmhz >> 1) + 999) / 1000;
+ pdram_timing->tckfspx = max(4, tmp);
+}
+
+/*
+ * Description: depend on input parameter "timing_config",
+ * and calculate correspond "dram_type"
+ * spec timing to "pdram_timing"
+ * parameters:
+ * input: timing_config
+ * output: pdram_timing
+ * NOTE: MR ODT is set, need to disable by controller
+ */
+void dram_get_parameter(struct timing_related_config *timing_config,
+ struct dram_timing_t *pdram_timing)
+{
+ switch (timing_config->dram_type) {
+ case DDR3:
+ ddr3_get_parameter(timing_config, pdram_timing);
+ break;
+ case LPDDR2:
+ lpddr2_get_parameter(timing_config, pdram_timing);
+ break;
+ case LPDDR3:
+ lpddr3_get_parameter(timing_config, pdram_timing);
+ break;
+ case LPDDR4:
+ lpddr4_get_parameter(timing_config, pdram_timing);
+ break;
+ default:
+ /* Do nothing in default case */
+ break;
+ }
+}
diff --git a/plat/rockchip/rk3399/drivers/dram/dram_spec_timing.h b/plat/rockchip/rk3399/drivers/dram/dram_spec_timing.h
new file mode 100644
index 0000000..9cda22c
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/dram/dram_spec_timing.h
@@ -0,0 +1,507 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef DRAM_SPEC_TIMING_H
+#define DRAM_SPEC_TIMING_H
+
+#include <stdint.h>
+
+enum ddr3_speed_rate {
+ /* 5-5-5 */
+ DDR3_800D = 0,
+ /* 6-6-6 */
+ DDR3_800E = 1,
+ /* 6-6-6 */
+ DDR3_1066E = 2,
+ /* 7-7-7 */
+ DDR3_1066F = 3,
+ /* 8-8-8 */
+ DDR3_1066G = 4,
+ /* 7-7-7 */
+ DDR3_1333F = 5,
+ /* 8-8-8 */
+ DDR3_1333G = 6,
+ /* 9-9-9 */
+ DDR3_1333H = 7,
+ /* 10-10-10 */
+ DDR3_1333J = 8,
+ /* 8-8-8 */
+ DDR3_1600G = 9,
+ /* 9-9-9 */
+ DDR3_1600H = 10,
+ /* 10-10-10 */
+ DDR3_1600J = 11,
+ /* 11-11-11 */
+ DDR3_1600K = 12,
+ /* 10-10-10 */
+ DDR3_1866J = 13,
+ /* 11-11-11 */
+ DDR3_1866K = 14,
+ /* 12-12-12 */
+ DDR3_1866L = 15,
+ /* 13-13-13 */
+ DDR3_1866M = 16,
+ /* 11-11-11 */
+ DDR3_2133K = 17,
+ /* 12-12-12 */
+ DDR3_2133L = 18,
+ /* 13-13-13 */
+ DDR3_2133M = 19,
+ /* 14-14-14 */
+ DDR3_2133N = 20,
+ DDR3_DEFAULT = 21,
+};
+
+#define max(a, b) (((a) > (b)) ? (a) : (b))
+#define range(mi, val, ma) (((ma) > (val)) ? (max(mi, val)) : (ma))
+
+struct dram_timing_t {
+ /* unit MHz */
+ uint32_t mhz;
+ /* some timing unit is us */
+ uint32_t tinit1;
+ uint32_t tinit2;
+ uint32_t tinit3;
+ uint32_t tinit4;
+ uint32_t tinit5;
+ /* reset low, DDR3:200us */
+ uint32_t trstl;
+ /* reset high to CKE high, DDR3:500us */
+ uint32_t trsth;
+ uint32_t trefi;
+ /* base */
+ uint32_t trcd;
+ /* trp per bank */
+ uint32_t trppb;
+ /* trp all bank */
+ uint32_t trp;
+ uint32_t twr;
+ uint32_t tdal;
+ uint32_t trtp;
+ uint32_t trc;
+ uint32_t trrd;
+ uint32_t tccd;
+ uint32_t twtr;
+ uint32_t trtw;
+ uint32_t tras_max;
+ uint32_t tras_min;
+ uint32_t tfaw;
+ uint32_t trfc;
+ uint32_t tdqsck;
+ uint32_t tdqsck_max;
+ /* pd or sr */
+ uint32_t txsr;
+ uint32_t txsnr;
+ uint32_t txp;
+ uint32_t txpdll;
+ uint32_t tdllk;
+ uint32_t tcke;
+ uint32_t tckesr;
+ uint32_t tcksre;
+ uint32_t tcksrx;
+ uint32_t tdpd;
+ /* mode regiter timing */
+ uint32_t tmod;
+ uint32_t tmrd;
+ uint32_t tmrr;
+ uint32_t tmrri;
+ /* ODT */
+ uint32_t todton;
+ /* ZQ */
+ uint32_t tzqinit;
+ uint32_t tzqcs;
+ uint32_t tzqoper;
+ uint32_t tzqreset;
+ /* Write Leveling */
+ uint32_t twlmrd;
+ uint32_t twlo;
+ uint32_t twldqsen;
+ /* CA Training */
+ uint32_t tcackel;
+ uint32_t tcaent;
+ uint32_t tcamrd;
+ uint32_t tcackeh;
+ uint32_t tcaext;
+ uint32_t tadr;
+ uint32_t tmrz;
+ uint32_t tcacd;
+ /* mode register */
+ uint32_t mr[4];
+ uint32_t mr11;
+ /* lpddr4 spec */
+ uint32_t mr12;
+ uint32_t mr13;
+ uint32_t mr14;
+ uint32_t mr16;
+ uint32_t mr17;
+ uint32_t mr20;
+ uint32_t mr22;
+ uint32_t tccdmw;
+ uint32_t tppd;
+ uint32_t tescke;
+ uint32_t tsr;
+ uint32_t tcmdcke;
+ uint32_t tcscke;
+ uint32_t tckelcs;
+ uint32_t tcsckeh;
+ uint32_t tckehcs;
+ uint32_t tmrwckel;
+ uint32_t tzqcal;
+ uint32_t tzqlat;
+ uint32_t tzqcke;
+ uint32_t tvref_long;
+ uint32_t tvref_short;
+ uint32_t tvrcg_enable;
+ uint32_t tvrcg_disable;
+ uint32_t tfc_long;
+ uint32_t tckfspe;
+ uint32_t tckfspx;
+ uint32_t tckehcmd;
+ uint32_t tckelcmd;
+ uint32_t tckelpd;
+ uint32_t tckckel;
+ /* other */
+ uint32_t al;
+ uint32_t cl;
+ uint32_t cwl;
+ uint32_t bl;
+};
+
+struct dram_info_t {
+ /* speed_rate only used when DDR3 */
+ enum ddr3_speed_rate speed_rate;
+ /* 1: use CS0, 2: use CS0 and CS1 */
+ uint32_t cs_cnt;
+ /* give the max per-die capability on each rank/cs */
+ uint32_t per_die_capability[2];
+};
+
+struct timing_related_config {
+ struct dram_info_t dram_info[2];
+ uint32_t dram_type;
+ /* MHz */
+ uint32_t freq;
+ uint32_t ch_cnt;
+ uint32_t bl;
+ /* 1:auto precharge, 0:never auto precharge */
+ uint32_t ap;
+ /*
+ * 1:dll bypass, 0:dll normal
+ * dram and controller dll bypass at the same time
+ */
+ uint32_t dllbp;
+ /* 1:odt enable, 0:odt disable */
+ uint32_t odt;
+ /* 1:enable, 0:disabe */
+ uint32_t rdbi;
+ uint32_t wdbi;
+ /* dram driver strength */
+ uint32_t dramds;
+ /* dram ODT, if odt=0, this parameter invalid */
+ uint32_t dramodt;
+ /*
+ * ca ODT, if odt=0, this parameter invalid
+ * it only used by LPDDR4
+ */
+ uint32_t caodt;
+};
+
+/* mr0 for ddr3 */
+#define DDR3_BL8 (0)
+#define DDR3_BC4_8 (1)
+#define DDR3_BC4 (2)
+#define DDR3_CL(n) (((((n) - 4) & 0x7) << 4)\
+ | ((((n) - 4) & 0x8) >> 1))
+#define DDR3_WR(n) (((n) & 0x7) << 9)
+#define DDR3_DLL_RESET (1 << 8)
+#define DDR3_DLL_DERESET (0 << 8)
+
+/* mr1 for ddr3 */
+#define DDR3_DLL_ENABLE (0)
+#define DDR3_DLL_DISABLE (1)
+#define DDR3_MR1_AL(n) (((n) & 0x3) << 3)
+
+#define DDR3_DS_40 (0)
+#define DDR3_DS_34 (1 << 1)
+#define DDR3_RTT_NOM_DIS (0)
+#define DDR3_RTT_NOM_60 (1 << 2)
+#define DDR3_RTT_NOM_120 (1 << 6)
+#define DDR3_RTT_NOM_40 ((1 << 2) | (1 << 6))
+#define DDR3_TDQS (1 << 11)
+
+/* mr2 for ddr3 */
+#define DDR3_MR2_CWL(n) ((((n) - 5) & 0x7) << 3)
+#define DDR3_RTT_WR_DIS (0)
+#define DDR3_RTT_WR_60 (1 << 9)
+#define DDR3_RTT_WR_120 (2 << 9)
+
+/*
+ * MR0 (Device Information)
+ * 0:DAI complete, 1:DAI still in progress
+ */
+#define LPDDR2_DAI (0x1)
+/* 0:S2 or S4 SDRAM, 1:NVM */
+#define LPDDR2_DI (0x1 << 1)
+/* 0:DNV not supported, 1:DNV supported */
+#define LPDDR2_DNVI (0x1 << 2)
+#define LPDDR2_RZQI (0x3 << 3)
+
+/*
+ * 00:RZQ self test not supported,
+ * 01:ZQ-pin may connect to VDDCA or float
+ * 10:ZQ-pin may short to GND.
+ * 11:ZQ-pin self test completed, no error condition detected.
+ */
+
+/* MR1 (Device Feature) */
+#define LPDDR2_BL4 (0x2)
+#define LPDDR2_BL8 (0x3)
+#define LPDDR2_BL16 (0x4)
+#define LPDDR2_N_WR(n) (((n) - 2) << 5)
+
+/* MR2 (Device Feature 2) */
+#define LPDDR2_RL3_WL1 (0x1)
+#define LPDDR2_RL4_WL2 (0x2)
+#define LPDDR2_RL5_WL2 (0x3)
+#define LPDDR2_RL6_WL3 (0x4)
+#define LPDDR2_RL7_WL4 (0x5)
+#define LPDDR2_RL8_WL4 (0x6)
+
+/* MR3 (IO Configuration 1) */
+#define LPDDR2_DS_34 (0x1)
+#define LPDDR2_DS_40 (0x2)
+#define LPDDR2_DS_48 (0x3)
+#define LPDDR2_DS_60 (0x4)
+#define LPDDR2_DS_80 (0x6)
+/* optional */
+#define LPDDR2_DS_120 (0x7)
+
+/* MR4 (Device Temperature) */
+#define LPDDR2_TREF_MASK (0x7)
+#define LPDDR2_4_TREF (0x1)
+#define LPDDR2_2_TREF (0x2)
+#define LPDDR2_1_TREF (0x3)
+#define LPDDR2_025_TREF (0x5)
+#define LPDDR2_025_TREF_DERATE (0x6)
+
+#define LPDDR2_TUF (0x1 << 7)
+
+/* MR8 (Basic configuration 4) */
+#define LPDDR2_S4 (0x0)
+#define LPDDR2_S2 (0x1)
+#define LPDDR2_N (0x2)
+/* Unit:MB */
+#define LPDDR2_DENSITY(mr8) (8 << (((mr8) >> 2) & 0xf))
+#define LPDDR2_IO_WIDTH(mr8) (32 >> (((mr8) >> 6) & 0x3))
+
+/* MR10 (Calibration) */
+#define LPDDR2_ZQINIT (0xff)
+#define LPDDR2_ZQCL (0xab)
+#define LPDDR2_ZQCS (0x56)
+#define LPDDR2_ZQRESET (0xc3)
+
+/* MR16 (PASR Bank Mask), S2 SDRAM Only */
+#define LPDDR2_PASR_FULL (0x0)
+#define LPDDR2_PASR_1_2 (0x1)
+#define LPDDR2_PASR_1_4 (0x2)
+#define LPDDR2_PASR_1_8 (0x3)
+
+/*
+ * MR0 (Device Information)
+ * 0:DAI complete,
+ * 1:DAI still in progress
+ */
+#define LPDDR3_DAI (0x1)
+/*
+ * 00:RZQ self test not supported,
+ * 01:ZQ-pin may connect to VDDCA or float
+ * 10:ZQ-pin may short to GND.
+ * 11:ZQ-pin self test completed, no error condition detected.
+ */
+#define LPDDR3_RZQI (0x3 << 3)
+/*
+ * 0:DRAM does not support WL(Set B),
+ * 1:DRAM support WL(Set B)
+ */
+#define LPDDR3_WL_SUPOT (1 << 6)
+/*
+ * 0:DRAM does not support RL=3,nWR=3,WL=1;
+ * 1:DRAM supports RL=3,nWR=3,WL=1 for frequencies <=166
+ */
+#define LPDDR3_RL3_SUPOT (1 << 7)
+
+/* MR1 (Device Feature) */
+#define LPDDR3_BL8 (0x3)
+#define LPDDR3_N_WR(n) ((n) << 5)
+
+/* MR2 (Device Feature 2), WL Set A,default */
+/* <=166MHz,optional*/
+#define LPDDR3_RL3_WL1 (0x1)
+/* <=400MHz*/
+#define LPDDR3_RL6_WL3 (0x4)
+/* <=533MHz*/
+#define LPDDR3_RL8_WL4 (0x6)
+/* <=600MHz*/
+#define LPDDR3_RL9_WL5 (0x7)
+/* <=667MHz,default*/
+#define LPDDR3_RL10_WL6 (0x8)
+/* <=733MHz*/
+#define LPDDR3_RL11_WL6 (0x9)
+/* <=800MHz*/
+#define LPDDR3_RL12_WL6 (0xa)
+/* <=933MHz*/
+#define LPDDR3_RL14_WL8 (0xc)
+/* <=1066MHz*/
+#define LPDDR3_RL16_WL8 (0xe)
+
+/* WL Set B, optional */
+/* <=667MHz,default*/
+#define LPDDR3_RL10_WL8 (0x8)
+/* <=733MHz*/
+#define LPDDR3_RL11_WL9 (0x9)
+/* <=800MHz*/
+#define LPDDR3_RL12_WL9 (0xa)
+/* <=933MHz*/
+#define LPDDR3_RL14_WL11 (0xc)
+/* <=1066MHz*/
+#define LPDDR3_RL16_WL13 (0xe)
+
+/* 1:enable nWR programming > 9(default)*/
+#define LPDDR3_N_WRE (1 << 4)
+/* 1:Select WL Set B*/
+#define LPDDR3_WL_S (1 << 6)
+/* 1:enable*/
+#define LPDDR3_WR_LEVEL (1 << 7)
+
+/* MR3 (IO Configuration 1) */
+#define LPDDR3_DS_34 (0x1)
+#define LPDDR3_DS_40 (0x2)
+#define LPDDR3_DS_48 (0x3)
+#define LPDDR3_DS_60 (0x4)
+#define LPDDR3_DS_80 (0x6)
+#define LPDDR3_DS_34D_40U (0x9)
+#define LPDDR3_DS_40D_48U (0xa)
+#define LPDDR3_DS_34D_48U (0xb)
+
+/* MR4 (Device Temperature) */
+#define LPDDR3_TREF_MASK (0x7)
+/* SDRAM Low temperature operating limit exceeded */
+#define LPDDR3_LT_EXED (0x0)
+#define LPDDR3_4_TREF (0x1)
+#define LPDDR3_2_TREF (0x2)
+#define LPDDR3_1_TREF (0x3)
+#define LPDDR3_05_TREF (0x4)
+#define LPDDR3_025_TREF (0x5)
+#define LPDDR3_025_TREF_DERATE (0x6)
+/* SDRAM High temperature operating limit exceeded */
+#define LPDDR3_HT_EXED (0x7)
+
+/* 1:value has changed since last read of MR4 */
+#define LPDDR3_TUF (0x1 << 7)
+
+/* MR8 (Basic configuration 4) */
+#define LPDDR3_S8 (0x3)
+#define LPDDR3_DENSITY(mr8) (8 << (((mr8) >> 2) & 0xf))
+#define LPDDR3_IO_WIDTH(mr8) (32 >> (((mr8) >> 6) & 0x3))
+
+/* MR10 (Calibration) */
+#define LPDDR3_ZQINIT (0xff)
+#define LPDDR3_ZQCL (0xab)
+#define LPDDR3_ZQCS (0x56)
+#define LPDDR3_ZQRESET (0xc3)
+
+/* MR11 (ODT Control) */
+#define LPDDR3_ODT_60 (1)
+#define LPDDR3_ODT_120 (2)
+#define LPDDR3_ODT_240 (3)
+#define LPDDR3_ODT_DIS (0)
+
+/* MR2 (Device Feature 2) */
+/* RL & nRTP for DBI-RD Disabled */
+#define LPDDR4_RL6_NRTP8 (0x0)
+#define LPDDR4_RL10_NRTP8 (0x1)
+#define LPDDR4_RL14_NRTP8 (0x2)
+#define LPDDR4_RL20_NRTP8 (0x3)
+#define LPDDR4_RL24_NRTP10 (0x4)
+#define LPDDR4_RL28_NRTP12 (0x5)
+#define LPDDR4_RL32_NRTP14 (0x6)
+#define LPDDR4_RL36_NRTP16 (0x7)
+/* RL & nRTP for DBI-RD Disabled */
+#define LPDDR4_RL12_NRTP8 (0x1)
+#define LPDDR4_RL16_NRTP8 (0x2)
+#define LPDDR4_RL22_NRTP8 (0x3)
+#define LPDDR4_RL28_NRTP10 (0x4)
+#define LPDDR4_RL32_NRTP12 (0x5)
+#define LPDDR4_RL36_NRTP14 (0x6)
+#define LPDDR4_RL40_NRTP16 (0x7)
+/* WL Set A,default */
+#define LPDDR4_A_WL4 (0x0)
+#define LPDDR4_A_WL6 (0x1)
+#define LPDDR4_A_WL8 (0x2)
+#define LPDDR4_A_WL10 (0x3)
+#define LPDDR4_A_WL12 (0x4)
+#define LPDDR4_A_WL14 (0x5)
+#define LPDDR4_A_WL16 (0x6)
+#define LPDDR4_A_WL18 (0x7)
+/* WL Set B, optional */
+#define LPDDR4_B_WL4 (0x0 << 3)
+#define LPDDR4_B_WL8 (0x1 << 3)
+#define LPDDR4_B_WL12 (0x2 << 3)
+#define LPDDR4_B_WL18 (0x3 << 3)
+#define LPDDR4_B_WL22 (0x4 << 3)
+#define LPDDR4_B_WL26 (0x5 << 3)
+#define LPDDR4_B_WL30 (0x6 << 3)
+#define LPDDR4_B_WL34 (0x7 << 3)
+/* 1:Select WL Set B*/
+#define LPDDR4_WL_B (1 << 6)
+/* 1:enable*/
+#define LPDDR4_WR_LEVEL (1 << 7)
+
+/* MR3 */
+#define LPDDR4_VDDQ_2_5 (0)
+#define LPDDR4_VDDQ_3 (1)
+#define LPDDR4_WRPST_0_5_TCK (0 << 1)
+#define LPDDR4_WRPST_1_5_TCK (1 << 1)
+#define LPDDR4_PPR_EN (1 << 2)
+/* PDDS */
+#define LPDDR4_PDDS_240 (0x1 << 3)
+#define LPDDR4_PDDS_120 (0x2 << 3)
+#define LPDDR4_PDDS_80 (0x3 << 3)
+#define LPDDR4_PDDS_60 (0x4 << 3)
+#define LPDDR4_PDDS_48 (0x5 << 3)
+#define LPDDR4_PDDS_40 (0x6 << 3)
+#define LPDDR4_DBI_RD_EN (1 << 6)
+#define LPDDR4_DBI_WR_EN (1 << 7)
+
+/* MR11 (ODT Control) */
+#define LPDDR4_DQODT_240 (1)
+#define LPDDR4_DQODT_120 (2)
+#define LPDDR4_DQODT_80 (3)
+#define LPDDR4_DQODT_60 (4)
+#define LPDDR4_DQODT_48 (5)
+#define LPDDR4_DQODT_40 (6)
+#define LPDDR4_DQODT_DIS (0)
+#define LPDDR4_CAODT_240 (1 << 4)
+#define LPDDR4_CAODT_120 (2 << 4)
+#define LPDDR4_CAODT_80 (3 << 4)
+#define LPDDR4_CAODT_60 (4 << 4)
+#define LPDDR4_CAODT_48 (5 << 4)
+#define LPDDR4_CAODT_40 (6 << 4)
+#define LPDDR4_CAODT_DIS (0 << 4)
+
+/*
+ * Description: depend on input parameter "timing_config",
+ * and calculate correspond "dram_type"
+ * spec timing to "pdram_timing"
+ * parameters:
+ * input: timing_config
+ * output: pdram_timing
+ * NOTE: MR ODT is set, need to disable by controller
+ */
+void dram_get_parameter(struct timing_related_config *timing_config,
+ struct dram_timing_t *pdram_timing);
+
+#endif /* DRAM_SPEC_TIMING_H */
diff --git a/plat/rockchip/rk3399/drivers/dram/suspend.c b/plat/rockchip/rk3399/drivers/dram/suspend.c
new file mode 100644
index 0000000..a8b1c32
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/dram/suspend.c
@@ -0,0 +1,852 @@
+/*
+ * Copyright (c) 2016-2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <platform_def.h>
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+
+#include <dram.h>
+#include <plat_private.h>
+#include <pmu.h>
+#include <pmu_bits.h>
+#include <pmu_regs.h>
+#include <rk3399_def.h>
+#include <secure.h>
+#include <soc.h>
+#include <suspend.h>
+
+#define PMUGRF_OS_REG0 0x300
+#define PMUGRF_OS_REG1 0x304
+#define PMUGRF_OS_REG2 0x308
+#define PMUGRF_OS_REG3 0x30c
+
+#define CRU_SFTRST_DDR_CTRL(ch, n) ((0x1 << (8 + 16 + (ch) * 4)) | \
+ ((n) << (8 + (ch) * 4)))
+#define CRU_SFTRST_DDR_PHY(ch, n) ((0x1 << (9 + 16 + (ch) * 4)) | \
+ ((n) << (9 + (ch) * 4)))
+
+#define FBDIV_ENC(n) ((n) << 16)
+#define FBDIV_DEC(n) (((n) >> 16) & 0xfff)
+#define POSTDIV2_ENC(n) ((n) << 12)
+#define POSTDIV2_DEC(n) (((n) >> 12) & 0x7)
+#define POSTDIV1_ENC(n) ((n) << 8)
+#define POSTDIV1_DEC(n) (((n) >> 8) & 0x7)
+#define REFDIV_ENC(n) (n)
+#define REFDIV_DEC(n) ((n) & 0x3f)
+
+/* PMU CRU */
+#define PMUCRU_RSTNHOLD_CON0 0x120
+#define PMUCRU_RSTNHOLD_CON1 0x124
+
+#define PRESET_GPIO0_HOLD(n) (((n) << 7) | WMSK_BIT(7))
+#define PRESET_GPIO1_HOLD(n) (((n) << 8) | WMSK_BIT(8))
+
+#define SYS_COUNTER_FREQ_IN_MHZ (SYS_COUNTER_FREQ_IN_TICKS / 1000000)
+
+__pmusramdata uint32_t dpll_data[PLL_CON_COUNT];
+__pmusramdata uint32_t cru_clksel_con6;
+__pmusramdata uint8_t pmu_enable_watchdog0;
+
+/*
+ * Copy @num registers from @src to @dst
+ */
+static __pmusramfunc void sram_regcpy(uintptr_t dst, uintptr_t src,
+ uint32_t num)
+{
+ while (num--) {
+ mmio_write_32(dst, mmio_read_32(src));
+ dst += sizeof(uint32_t);
+ src += sizeof(uint32_t);
+ }
+}
+
+/*
+ * Copy @num registers from @src to @dst
+ * This is intentionally a copy of the sram_regcpy function. PMUSRAM functions
+ * cannot be called from code running in DRAM.
+ */
+static void dram_regcpy(uintptr_t dst, uintptr_t src, uint32_t num)
+{
+ while (num--) {
+ mmio_write_32(dst, mmio_read_32(src));
+ dst += sizeof(uint32_t);
+ src += sizeof(uint32_t);
+ }
+}
+
+static __pmusramfunc uint32_t sram_get_timer_value(void)
+{
+ /*
+ * Generic delay timer implementation expects the timer to be a down
+ * counter. We apply bitwise NOT operator to the tick values returned
+ * by read_cntpct_el0() to simulate the down counter.
+ */
+ return (uint32_t)(~read_cntpct_el0());
+}
+
+static __pmusramfunc void sram_udelay(uint32_t usec)
+{
+ uint32_t start, cnt, delta, total_ticks;
+
+ /* counter is decreasing */
+ start = sram_get_timer_value();
+ total_ticks = usec * SYS_COUNTER_FREQ_IN_MHZ;
+ do {
+ cnt = sram_get_timer_value();
+ if (cnt > start) {
+ delta = UINT32_MAX - cnt;
+ delta += start;
+ } else
+ delta = start - cnt;
+ } while (delta <= total_ticks);
+}
+
+static __pmusramfunc void configure_sgrf(void)
+{
+ /*
+ * SGRF_DDR_RGN_DPLL_CLK and SGRF_DDR_RGN_RTC_CLK:
+ * IC ECO bug, need to set this register.
+ *
+ * SGRF_DDR_RGN_BYPS:
+ * After the PD_CENTER suspend/resume, the DDR region
+ * related registers in the SGRF will be reset, we
+ * need to re-initialize them.
+ */
+ mmio_write_32(SGRF_BASE + SGRF_DDRRGN_CON0_16(16),
+ SGRF_DDR_RGN_DPLL_CLK |
+ SGRF_DDR_RGN_RTC_CLK |
+ SGRF_DDR_RGN_BYPS);
+}
+
+static __pmusramfunc void rkclk_ddr_reset(uint32_t channel, uint32_t ctl,
+ uint32_t phy)
+{
+ channel &= 0x1;
+ ctl &= 0x1;
+ phy &= 0x1;
+ mmio_write_32(CRU_BASE + CRU_SOFTRST_CON(4),
+ CRU_SFTRST_DDR_CTRL(channel, ctl) |
+ CRU_SFTRST_DDR_PHY(channel, phy));
+}
+
+static __pmusramfunc void phy_pctrl_reset(uint32_t ch)
+{
+ rkclk_ddr_reset(ch, 1, 1);
+ sram_udelay(10);
+ rkclk_ddr_reset(ch, 1, 0);
+ sram_udelay(10);
+ rkclk_ddr_reset(ch, 0, 0);
+ sram_udelay(10);
+}
+
+static __pmusramfunc void set_cs_training_index(uint32_t ch, uint32_t rank)
+{
+ uint32_t byte;
+
+ /* PHY_8/136/264/392 phy_per_cs_training_index_X 1bit offset_24 */
+ for (byte = 0; byte < 4; byte++)
+ mmio_clrsetbits_32(PHY_REG(ch, 8 + (128 * byte)), 0x1 << 24,
+ rank << 24);
+}
+
+static __pmusramfunc void select_per_cs_training_index(uint32_t ch,
+ uint32_t rank)
+{
+ /* PHY_84 PHY_PER_CS_TRAINING_EN_0 1bit offset_16 */
+ if ((mmio_read_32(PHY_REG(ch, 84)) >> 16) & 1)
+ set_cs_training_index(ch, rank);
+}
+
+static __pmusramfunc void override_write_leveling_value(uint32_t ch)
+{
+ uint32_t byte;
+
+ for (byte = 0; byte < 4; byte++) {
+ /*
+ * PHY_8/136/264/392
+ * phy_per_cs_training_multicast_en_X 1bit offset_16
+ */
+ mmio_clrsetbits_32(PHY_REG(ch, 8 + (128 * byte)), 0x1 << 16,
+ 1 << 16);
+ mmio_clrsetbits_32(PHY_REG(ch, 63 + (128 * byte)),
+ 0xffffu << 16,
+ 0x200 << 16);
+ }
+
+ /* CTL_200 ctrlupd_req 1bit offset_8 */
+ mmio_clrsetbits_32(CTL_REG(ch, 200), 0x1 << 8, 0x1 << 8);
+}
+
+static __pmusramfunc int data_training(uint32_t ch,
+ struct rk3399_sdram_params *sdram_params,
+ uint32_t training_flag)
+{
+ uint32_t obs_0, obs_1, obs_2, obs_3, obs_err = 0;
+ uint32_t rank = sdram_params->ch[ch].rank;
+ uint32_t rank_mask;
+ uint32_t i, tmp;
+
+ if (sdram_params->dramtype == LPDDR4)
+ rank_mask = (rank == 1) ? 0x5 : 0xf;
+ else
+ rank_mask = (rank == 1) ? 0x1 : 0x3;
+
+ /* PHY_927 PHY_PAD_DQS_DRIVE RPULL offset_22 */
+ mmio_setbits_32(PHY_REG(ch, 927), (1 << 22));
+
+ if (training_flag == PI_FULL_TRAINING) {
+ if (sdram_params->dramtype == LPDDR4) {
+ training_flag = PI_WRITE_LEVELING |
+ PI_READ_GATE_TRAINING |
+ PI_READ_LEVELING |
+ PI_WDQ_LEVELING;
+ } else if (sdram_params->dramtype == LPDDR3) {
+ training_flag = PI_CA_TRAINING | PI_WRITE_LEVELING |
+ PI_READ_GATE_TRAINING;
+ } else if (sdram_params->dramtype == DDR3) {
+ training_flag = PI_WRITE_LEVELING |
+ PI_READ_GATE_TRAINING |
+ PI_READ_LEVELING;
+ }
+ }
+
+ /* ca training(LPDDR4,LPDDR3 support) */
+ if ((training_flag & PI_CA_TRAINING) == PI_CA_TRAINING) {
+ for (i = 0; i < 4; i++) {
+ if (!(rank_mask & (1 << i)))
+ continue;
+
+ select_per_cs_training_index(ch, i);
+ /* PI_100 PI_CALVL_EN:RW:8:2 */
+ mmio_clrsetbits_32(PI_REG(ch, 100), 0x3 << 8, 0x2 << 8);
+
+ /* PI_92 PI_CALVL_REQ:WR:16:1,PI_CALVL_CS:RW:24:2 */
+ mmio_clrsetbits_32(PI_REG(ch, 92),
+ (0x1 << 16) | (0x3 << 24),
+ (0x1 << 16) | (i << 24));
+ while (1) {
+ /* PI_174 PI_INT_STATUS:RD:8:18 */
+ tmp = mmio_read_32(PI_REG(ch, 174)) >> 8;
+
+ /*
+ * check status obs
+ * PHY_532/660/788 phy_adr_calvl_obs1_:0:32
+ */
+ obs_0 = mmio_read_32(PHY_REG(ch, 532));
+ obs_1 = mmio_read_32(PHY_REG(ch, 660));
+ obs_2 = mmio_read_32(PHY_REG(ch, 788));
+ if (((obs_0 >> 30) & 0x3) ||
+ ((obs_1 >> 30) & 0x3) ||
+ ((obs_2 >> 30) & 0x3))
+ obs_err = 1;
+ if ((((tmp >> 11) & 0x1) == 0x1) &&
+ (((tmp >> 13) & 0x1) == 0x1) &&
+ (((tmp >> 5) & 0x1) == 0x0) &&
+ (obs_err == 0))
+ break;
+ else if ((((tmp >> 5) & 0x1) == 0x1) ||
+ (obs_err == 1))
+ return -1;
+ }
+ /* clear interrupt,PI_175 PI_INT_ACK:WR:0:17 */
+ mmio_write_32(PI_REG(ch, 175), 0x00003f7c);
+ }
+ mmio_clrbits_32(PI_REG(ch, 100), 0x3 << 8);
+ }
+
+ /* write leveling(LPDDR4,LPDDR3,DDR3 support) */
+ if ((training_flag & PI_WRITE_LEVELING) == PI_WRITE_LEVELING) {
+ for (i = 0; i < rank; i++) {
+ select_per_cs_training_index(ch, i);
+ /* PI_60 PI_WRLVL_EN:RW:8:2 */
+ mmio_clrsetbits_32(PI_REG(ch, 60), 0x3 << 8, 0x2 << 8);
+ /* PI_59 PI_WRLVL_REQ:WR:8:1,PI_WRLVL_CS:RW:16:2 */
+ mmio_clrsetbits_32(PI_REG(ch, 59),
+ (0x1 << 8) | (0x3 << 16),
+ (0x1 << 8) | (i << 16));
+
+ while (1) {
+ /* PI_174 PI_INT_STATUS:RD:8:18 */
+ tmp = mmio_read_32(PI_REG(ch, 174)) >> 8;
+
+ /*
+ * check status obs, if error maybe can not
+ * get leveling done PHY_40/168/296/424
+ * phy_wrlvl_status_obs_X:0:13
+ */
+ obs_0 = mmio_read_32(PHY_REG(ch, 40));
+ obs_1 = mmio_read_32(PHY_REG(ch, 168));
+ obs_2 = mmio_read_32(PHY_REG(ch, 296));
+ obs_3 = mmio_read_32(PHY_REG(ch, 424));
+ if (((obs_0 >> 12) & 0x1) ||
+ ((obs_1 >> 12) & 0x1) ||
+ ((obs_2 >> 12) & 0x1) ||
+ ((obs_3 >> 12) & 0x1))
+ obs_err = 1;
+ if ((((tmp >> 10) & 0x1) == 0x1) &&
+ (((tmp >> 13) & 0x1) == 0x1) &&
+ (((tmp >> 4) & 0x1) == 0x0) &&
+ (obs_err == 0))
+ break;
+ else if ((((tmp >> 4) & 0x1) == 0x1) ||
+ (obs_err == 1))
+ return -1;
+ }
+
+ /* clear interrupt,PI_175 PI_INT_ACK:WR:0:17 */
+ mmio_write_32(PI_REG(ch, 175), 0x00003f7c);
+ }
+ override_write_leveling_value(ch);
+ mmio_clrbits_32(PI_REG(ch, 60), 0x3 << 8);
+ }
+
+ /* read gate training(LPDDR4,LPDDR3,DDR3 support) */
+ if ((training_flag & PI_READ_GATE_TRAINING) == PI_READ_GATE_TRAINING) {
+ for (i = 0; i < rank; i++) {
+ select_per_cs_training_index(ch, i);
+ /* PI_80 PI_RDLVL_GATE_EN:RW:24:2 */
+ mmio_clrsetbits_32(PI_REG(ch, 80), 0x3 << 24,
+ 0x2 << 24);
+ /*
+ * PI_74 PI_RDLVL_GATE_REQ:WR:16:1
+ * PI_RDLVL_CS:RW:24:2
+ */
+ mmio_clrsetbits_32(PI_REG(ch, 74),
+ (0x1 << 16) | (0x3 << 24),
+ (0x1 << 16) | (i << 24));
+
+ while (1) {
+ /* PI_174 PI_INT_STATUS:RD:8:18 */
+ tmp = mmio_read_32(PI_REG(ch, 174)) >> 8;
+
+ /*
+ * check status obs
+ * PHY_43/171/299/427
+ * PHY_GTLVL_STATUS_OBS_x:16:8
+ */
+ obs_0 = mmio_read_32(PHY_REG(ch, 43));
+ obs_1 = mmio_read_32(PHY_REG(ch, 171));
+ obs_2 = mmio_read_32(PHY_REG(ch, 299));
+ obs_3 = mmio_read_32(PHY_REG(ch, 427));
+ if (((obs_0 >> (16 + 6)) & 0x3) ||
+ ((obs_1 >> (16 + 6)) & 0x3) ||
+ ((obs_2 >> (16 + 6)) & 0x3) ||
+ ((obs_3 >> (16 + 6)) & 0x3))
+ obs_err = 1;
+ if ((((tmp >> 9) & 0x1) == 0x1) &&
+ (((tmp >> 13) & 0x1) == 0x1) &&
+ (((tmp >> 3) & 0x1) == 0x0) &&
+ (obs_err == 0))
+ break;
+ else if ((((tmp >> 3) & 0x1) == 0x1) ||
+ (obs_err == 1))
+ return -1;
+ }
+ /* clear interrupt,PI_175 PI_INT_ACK:WR:0:17 */
+ mmio_write_32(PI_REG(ch, 175), 0x00003f7c);
+ }
+ mmio_clrbits_32(PI_REG(ch, 80), 0x3 << 24);
+ }
+
+ /* read leveling(LPDDR4,LPDDR3,DDR3 support) */
+ if ((training_flag & PI_READ_LEVELING) == PI_READ_LEVELING) {
+ for (i = 0; i < rank; i++) {
+ select_per_cs_training_index(ch, i);
+ /* PI_80 PI_RDLVL_EN:RW:16:2 */
+ mmio_clrsetbits_32(PI_REG(ch, 80), 0x3 << 16,
+ 0x2 << 16);
+ /* PI_74 PI_RDLVL_REQ:WR:8:1,PI_RDLVL_CS:RW:24:2 */
+ mmio_clrsetbits_32(PI_REG(ch, 74),
+ (0x1 << 8) | (0x3 << 24),
+ (0x1 << 8) | (i << 24));
+ while (1) {
+ /* PI_174 PI_INT_STATUS:RD:8:18 */
+ tmp = mmio_read_32(PI_REG(ch, 174)) >> 8;
+
+ /*
+ * make sure status obs not report error bit
+ * PHY_46/174/302/430
+ * phy_rdlvl_status_obs_X:16:8
+ */
+ if ((((tmp >> 8) & 0x1) == 0x1) &&
+ (((tmp >> 13) & 0x1) == 0x1) &&
+ (((tmp >> 2) & 0x1) == 0x0))
+ break;
+ else if (((tmp >> 2) & 0x1) == 0x1)
+ return -1;
+ }
+ /* clear interrupt,PI_175 PI_INT_ACK:WR:0:17 */
+ mmio_write_32(PI_REG(ch, 175), 0x00003f7c);
+ }
+ mmio_clrbits_32(PI_REG(ch, 80), 0x3 << 16);
+ }
+
+ /* wdq leveling(LPDDR4 support) */
+ if ((training_flag & PI_WDQ_LEVELING) == PI_WDQ_LEVELING) {
+ for (i = 0; i < 4; i++) {
+ if (!(rank_mask & (1 << i)))
+ continue;
+
+ select_per_cs_training_index(ch, i);
+ /*
+ * disable PI_WDQLVL_VREF_EN before wdq leveling?
+ * PI_181 PI_WDQLVL_VREF_EN:RW:8:1
+ */
+ mmio_clrbits_32(PI_REG(ch, 181), 0x1 << 8);
+ /* PI_124 PI_WDQLVL_EN:RW:16:2 */
+ mmio_clrsetbits_32(PI_REG(ch, 124), 0x3 << 16,
+ 0x2 << 16);
+ /* PI_121 PI_WDQLVL_REQ:WR:8:1,PI_WDQLVL_CS:RW:16:2 */
+ mmio_clrsetbits_32(PI_REG(ch, 121),
+ (0x1 << 8) | (0x3 << 16),
+ (0x1 << 8) | (i << 16));
+ while (1) {
+ /* PI_174 PI_INT_STATUS:RD:8:18 */
+ tmp = mmio_read_32(PI_REG(ch, 174)) >> 8;
+ if ((((tmp >> 12) & 0x1) == 0x1) &&
+ (((tmp >> 13) & 0x1) == 0x1) &&
+ (((tmp >> 6) & 0x1) == 0x0))
+ break;
+ else if (((tmp >> 6) & 0x1) == 0x1)
+ return -1;
+ }
+ /* clear interrupt,PI_175 PI_INT_ACK:WR:0:17 */
+ mmio_write_32(PI_REG(ch, 175), 0x00003f7c);
+ }
+ mmio_clrbits_32(PI_REG(ch, 124), 0x3 << 16);
+ }
+
+ /* PHY_927 PHY_PAD_DQS_DRIVE RPULL offset_22 */
+ mmio_clrbits_32(PHY_REG(ch, 927), (1 << 22));
+
+ return 0;
+}
+
+static __pmusramfunc void set_ddrconfig(
+ struct rk3399_sdram_params *sdram_params,
+ unsigned char channel, uint32_t ddrconfig)
+{
+ /* only need to set ddrconfig */
+ struct rk3399_sdram_channel *ch = &sdram_params->ch[channel];
+ unsigned int cs0_cap = 0;
+ unsigned int cs1_cap = 0;
+
+ cs0_cap = (1 << (ch->cs0_row + ch->col + ch->bk + ch->bw - 20));
+ if (ch->rank > 1)
+ cs1_cap = cs0_cap >> (ch->cs0_row - ch->cs1_row);
+ if (ch->row_3_4) {
+ cs0_cap = cs0_cap * 3 / 4;
+ cs1_cap = cs1_cap * 3 / 4;
+ }
+
+ mmio_write_32(MSCH_BASE(channel) + MSCH_DEVICECONF,
+ ddrconfig | (ddrconfig << 6));
+ mmio_write_32(MSCH_BASE(channel) + MSCH_DEVICESIZE,
+ ((cs0_cap / 32) & 0xff) | (((cs1_cap / 32) & 0xff) << 8));
+}
+
+static __pmusramfunc void dram_all_config(
+ struct rk3399_sdram_params *sdram_params)
+{
+ unsigned int i;
+
+ for (i = 0; i < 2; i++) {
+ struct rk3399_sdram_channel *info = &sdram_params->ch[i];
+ struct rk3399_msch_timings *noc = &info->noc_timings;
+
+ if (sdram_params->ch[i].col == 0)
+ continue;
+
+ mmio_write_32(MSCH_BASE(i) + MSCH_DDRTIMINGA0,
+ noc->ddrtiminga0.d32);
+ mmio_write_32(MSCH_BASE(i) + MSCH_DDRTIMINGB0,
+ noc->ddrtimingb0.d32);
+ mmio_write_32(MSCH_BASE(i) + MSCH_DDRTIMINGC0,
+ noc->ddrtimingc0.d32);
+ mmio_write_32(MSCH_BASE(i) + MSCH_DEVTODEV0,
+ noc->devtodev0.d32);
+ mmio_write_32(MSCH_BASE(i) + MSCH_DDRMODE, noc->ddrmode.d32);
+
+ /* rank 1 memory clock disable (dfi_dram_clk_disable = 1) */
+ if (sdram_params->ch[i].rank == 1)
+ mmio_setbits_32(CTL_REG(i, 276), 1 << 17);
+ }
+
+ DDR_STRIDE(sdram_params->stride);
+
+ /* reboot hold register set */
+ mmio_write_32(PMUCRU_BASE + CRU_PMU_RSTHOLD_CON(1),
+ CRU_PMU_SGRF_RST_RLS |
+ PRESET_GPIO0_HOLD(1) |
+ PRESET_GPIO1_HOLD(1));
+ mmio_clrsetbits_32(CRU_BASE + CRU_GLB_RST_CON, 0x3, 0x3);
+}
+
+static __pmusramfunc void pctl_cfg(uint32_t ch,
+ struct rk3399_sdram_params *sdram_params)
+{
+ const uint32_t *params_ctl = sdram_params->pctl_regs.denali_ctl;
+ const uint32_t *params_pi = sdram_params->pi_regs.denali_pi;
+ const struct rk3399_ddr_publ_regs *phy_regs = &sdram_params->phy_regs;
+ uint32_t tmp, tmp1, tmp2, i;
+
+ /*
+ * Workaround controller bug:
+ * Do not program DRAM_CLASS until NO_PHY_IND_TRAIN_INT is programmed
+ */
+ sram_regcpy(CTL_REG(ch, 1), (uintptr_t)&params_ctl[1],
+ CTL_REG_NUM - 1);
+ mmio_write_32(CTL_REG(ch, 0), params_ctl[0]);
+ sram_regcpy(PI_REG(ch, 0), (uintptr_t)&params_pi[0],
+ PI_REG_NUM);
+
+ sram_regcpy(PHY_REG(ch, 910), (uintptr_t)&phy_regs->phy896[910 - 896],
+ 3);
+
+ mmio_clrsetbits_32(CTL_REG(ch, 68), PWRUP_SREFRESH_EXIT,
+ PWRUP_SREFRESH_EXIT);
+
+ /* PHY_DLL_RST_EN */
+ mmio_clrsetbits_32(PHY_REG(ch, 957), 0x3 << 24, 1 << 24);
+ dmbst();
+
+ mmio_setbits_32(PI_REG(ch, 0), START);
+ mmio_setbits_32(CTL_REG(ch, 0), START);
+
+ /* wait lock */
+ while (1) {
+ tmp = mmio_read_32(PHY_REG(ch, 920));
+ tmp1 = mmio_read_32(PHY_REG(ch, 921));
+ tmp2 = mmio_read_32(PHY_REG(ch, 922));
+ if ((((tmp >> 16) & 0x1) == 0x1) &&
+ (((tmp1 >> 16) & 0x1) == 0x1) &&
+ (((tmp1 >> 0) & 0x1) == 0x1) &&
+ (((tmp2 >> 0) & 0x1) == 0x1))
+ break;
+ /* if PLL bypass,don't need wait lock */
+ if (mmio_read_32(PHY_REG(ch, 911)) & 0x1)
+ break;
+ }
+
+ sram_regcpy(PHY_REG(ch, 896), (uintptr_t)&phy_regs->phy896[0], 63);
+
+ for (i = 0; i < 4; i++)
+ sram_regcpy(PHY_REG(ch, 128 * i),
+ (uintptr_t)&phy_regs->phy0[0], 91);
+
+ for (i = 0; i < 3; i++)
+ sram_regcpy(PHY_REG(ch, 512 + 128 * i),
+ (uintptr_t)&phy_regs->phy512[i][0], 38);
+}
+
+static __pmusramfunc int dram_switch_to_next_index(
+ struct rk3399_sdram_params *sdram_params)
+{
+ uint32_t ch, ch_count;
+ uint32_t fn = ((mmio_read_32(CTL_REG(0, 111)) >> 16) + 1) & 0x1;
+
+ mmio_write_32(CIC_BASE + CIC_CTRL0,
+ (((0x3 << 4) | (1 << 2) | 1) << 16) |
+ (fn << 4) | (1 << 2) | 1);
+ while (!(mmio_read_32(CIC_BASE + CIC_STATUS0) & (1 << 2)))
+ ;
+
+ mmio_write_32(CIC_BASE + CIC_CTRL0, 0x20002);
+ while (!(mmio_read_32(CIC_BASE + CIC_STATUS0) & (1 << 0)))
+ ;
+
+ ch_count = sdram_params->num_channels;
+
+ /* LPDDR4 f2 cann't do training, all training will fail */
+ for (ch = 0; ch < ch_count; ch++) {
+ /*
+ * Without this disabled for LPDDR4 we end up writing 0's
+ * in place of real data in an interesting pattern.
+ */
+ if (sdram_params->dramtype != LPDDR4) {
+ mmio_clrsetbits_32(PHY_REG(ch, 896), (0x3 << 8) | 1,
+ fn << 8);
+ }
+
+ /* data_training failed */
+ if (data_training(ch, sdram_params, PI_FULL_TRAINING))
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * Needs to be done for both channels at once in case of a shared reset signal
+ * between channels.
+ */
+static __pmusramfunc int pctl_start(uint32_t channel_mask,
+ struct rk3399_sdram_params *sdram_params)
+{
+ uint32_t count;
+ uint32_t byte;
+
+ mmio_setbits_32(CTL_REG(0, 68), PWRUP_SREFRESH_EXIT);
+ mmio_setbits_32(CTL_REG(1, 68), PWRUP_SREFRESH_EXIT);
+
+ /* need de-access IO retention before controller START */
+ if (channel_mask & (1 << 0))
+ mmio_setbits_32(PMU_BASE + PMU_PWRMODE_CON, (1 << 19));
+ if (channel_mask & (1 << 1))
+ mmio_setbits_32(PMU_BASE + PMU_PWRMODE_CON, (1 << 23));
+
+ /* PHY_DLL_RST_EN */
+ if (channel_mask & (1 << 0))
+ mmio_clrsetbits_32(PHY_REG(0, 957), 0x3 << 24,
+ 0x2 << 24);
+ if (channel_mask & (1 << 1))
+ mmio_clrsetbits_32(PHY_REG(1, 957), 0x3 << 24,
+ 0x2 << 24);
+
+ /* check ERROR bit */
+ if (channel_mask & (1 << 0)) {
+ count = 0;
+ while (!(mmio_read_32(CTL_REG(0, 203)) & (1 << 3))) {
+ /* CKE is low, loop 10ms */
+ if (count > 100)
+ return -1;
+
+ sram_udelay(100);
+ count++;
+ }
+
+ mmio_clrbits_32(CTL_REG(0, 68), PWRUP_SREFRESH_EXIT);
+
+ /* Restore the PHY_RX_CAL_DQS value */
+ for (byte = 0; byte < 4; byte++)
+ mmio_clrsetbits_32(PHY_REG(0, 57 + 128 * byte),
+ 0xfff << 16,
+ sdram_params->rx_cal_dqs[0][byte]);
+ }
+ if (channel_mask & (1 << 1)) {
+ count = 0;
+ while (!(mmio_read_32(CTL_REG(1, 203)) & (1 << 3))) {
+ /* CKE is low, loop 10ms */
+ if (count > 100)
+ return -1;
+
+ sram_udelay(100);
+ count++;
+ }
+
+ mmio_clrbits_32(CTL_REG(1, 68), PWRUP_SREFRESH_EXIT);
+
+ /* Restore the PHY_RX_CAL_DQS value */
+ for (byte = 0; byte < 4; byte++)
+ mmio_clrsetbits_32(PHY_REG(1, 57 + 128 * byte),
+ 0xfff << 16,
+ sdram_params->rx_cal_dqs[1][byte]);
+ }
+
+ return 0;
+}
+
+__pmusramfunc static void pmusram_restore_pll(int pll_id, uint32_t *src)
+{
+ mmio_write_32((CRU_BASE + CRU_PLL_CON(pll_id, 3)), PLL_SLOW_MODE);
+
+ mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 0), src[0] | REG_SOC_WMSK);
+ mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 1), src[1] | REG_SOC_WMSK);
+ mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 2), src[2]);
+ mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 4), src[4] | REG_SOC_WMSK);
+ mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 5), src[5] | REG_SOC_WMSK);
+
+ mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 3), src[3] | REG_SOC_WMSK);
+
+ while ((mmio_read_32(CRU_BASE + CRU_PLL_CON(pll_id, 2)) &
+ (1U << 31)) == 0x0)
+ ;
+}
+
+__pmusramfunc static void pmusram_enable_watchdog(void)
+{
+ /* Make the watchdog use the first global reset. */
+ mmio_write_32(CRU_BASE + CRU_GLB_RST_CON, 1 << 1);
+
+ /*
+ * This gives the system ~8 seconds before reset. The pclk for the
+ * watchdog is 4MHz on reset. The value of 0x9 in WDT_TORR means that
+ * the watchdog will wait for 0x1ffffff cycles before resetting.
+ */
+ mmio_write_32(WDT0_BASE + 4, 0x9);
+
+ /* Enable the watchdog */
+ mmio_setbits_32(WDT0_BASE, 0x1);
+
+ /* Magic reset the watchdog timer value for WDT_CRR. */
+ mmio_write_32(WDT0_BASE + 0xc, 0x76);
+
+ secure_watchdog_ungate();
+
+ /* The watchdog is in PD_ALIVE, so deidle it. */
+ mmio_clrbits_32(PMU_BASE + PMU_BUS_CLR, PMU_CLR_ALIVE);
+}
+
+void dmc_suspend(void)
+{
+ struct rk3399_sdram_params *sdram_params = &sdram_config;
+ struct rk3399_ddr_publ_regs *phy_regs;
+ uint32_t *params_ctl;
+ uint32_t *params_pi;
+ uint32_t refdiv, postdiv2, postdiv1, fbdiv;
+ uint32_t ch, byte, i;
+
+ phy_regs = &sdram_params->phy_regs;
+ params_ctl = sdram_params->pctl_regs.denali_ctl;
+ params_pi = sdram_params->pi_regs.denali_pi;
+
+ /* save dpll register and ddr clock register value to pmusram */
+ cru_clksel_con6 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON6);
+ for (i = 0; i < PLL_CON_COUNT; i++)
+ dpll_data[i] = mmio_read_32(CRU_BASE + CRU_PLL_CON(DPLL_ID, i));
+
+ fbdiv = dpll_data[0] & 0xfff;
+ postdiv2 = POSTDIV2_DEC(dpll_data[1]);
+ postdiv1 = POSTDIV1_DEC(dpll_data[1]);
+ refdiv = REFDIV_DEC(dpll_data[1]);
+
+ sdram_params->ddr_freq = ((fbdiv * 24) /
+ (refdiv * postdiv1 * postdiv2)) * MHz;
+
+ INFO("sdram_params->ddr_freq = %d\n", sdram_params->ddr_freq);
+ sdram_params->odt = (((mmio_read_32(PHY_REG(0, 5)) >> 16) &
+ 0x7) != 0) ? 1 : 0;
+
+ /* copy the registers CTL PI and PHY */
+ dram_regcpy((uintptr_t)&params_ctl[0], CTL_REG(0, 0), CTL_REG_NUM);
+
+ /* mask DENALI_CTL_00_DATA.START, only copy here, will trigger later */
+ params_ctl[0] &= ~(0x1 << 0);
+
+ dram_regcpy((uintptr_t)&params_pi[0], PI_REG(0, 0),
+ PI_REG_NUM);
+
+ /* mask DENALI_PI_00_DATA.START, only copy here, will trigger later*/
+ params_pi[0] &= ~(0x1 << 0);
+
+ dram_regcpy((uintptr_t)&phy_regs->phy0[0],
+ PHY_REG(0, 0), 91);
+
+ for (i = 0; i < 3; i++)
+ dram_regcpy((uintptr_t)&phy_regs->phy512[i][0],
+ PHY_REG(0, 512 + 128 * i), 38);
+
+ dram_regcpy((uintptr_t)&phy_regs->phy896[0], PHY_REG(0, 896), 63);
+
+ for (ch = 0; ch < sdram_params->num_channels; ch++) {
+ for (byte = 0; byte < 4; byte++)
+ sdram_params->rx_cal_dqs[ch][byte] = (0xfff << 16) &
+ mmio_read_32(PHY_REG(ch, 57 + byte * 128));
+ }
+
+ /* set DENALI_PHY_957_DATA.PHY_DLL_RST_EN = 0x1 */
+ phy_regs->phy896[957 - 896] &= ~(0x3 << 24);
+ phy_regs->phy896[957 - 896] |= 1 << 24;
+ phy_regs->phy896[0] |= 1;
+ phy_regs->phy896[0] &= ~(0x3 << 8);
+}
+
+__pmusramfunc void phy_dll_bypass_set(uint32_t ch, uint32_t freq)
+{
+ if (freq <= (125 * 1000 * 1000)) {
+ /* Set master mode to SW for slices*/
+ mmio_setbits_32(PHY_REG(ch, 86), 3 << 10);
+ mmio_setbits_32(PHY_REG(ch, 214), 3 << 10);
+ mmio_setbits_32(PHY_REG(ch, 342), 3 << 10);
+ mmio_setbits_32(PHY_REG(ch, 470), 3 << 10);
+ /* Set master mode to SW for address slices*/
+ mmio_setbits_32(PHY_REG(ch, 547), 3 << 18);
+ mmio_setbits_32(PHY_REG(ch, 675), 3 << 18);
+ mmio_setbits_32(PHY_REG(ch, 803), 3 << 18);
+ } else {
+ /* Clear SW master mode for slices*/
+ mmio_clrbits_32(PHY_REG(ch, 86), 3 << 10);
+ mmio_clrbits_32(PHY_REG(ch, 214), 3 << 10);
+ mmio_clrbits_32(PHY_REG(ch, 342), 3 << 10);
+ mmio_clrbits_32(PHY_REG(ch, 470), 3 << 10);
+ /* Clear SW master mode for address slices*/
+ mmio_clrbits_32(PHY_REG(ch, 547), 3 << 18);
+ mmio_clrbits_32(PHY_REG(ch, 675), 3 << 18);
+ mmio_clrbits_32(PHY_REG(ch, 803), 3 << 18);
+ }
+}
+
+__pmusramfunc void dmc_resume(void)
+{
+ struct rk3399_sdram_params *sdram_params = &sdram_config;
+ uint32_t channel_mask = 0;
+ uint32_t channel;
+
+ /*
+ * We can't turn off the watchdog, so if we have not turned it on before
+ * we should not turn it on here.
+ */
+ if ((pmu_enable_watchdog0 & 0x1) == 0x1) {
+ pmusram_enable_watchdog();
+ }
+ pmu_sgrf_rst_hld_release();
+ restore_pmu_rsthold();
+ sram_secure_timer_init();
+
+ /*
+ * we switch ddr clock to abpll when suspend,
+ * we set back to dpll here
+ */
+ mmio_write_32(CRU_BASE + CRU_CLKSEL_CON6,
+ cru_clksel_con6 | REG_SOC_WMSK);
+ pmusram_restore_pll(DPLL_ID, dpll_data);
+
+ configure_sgrf();
+
+retry:
+ for (channel = 0; channel < sdram_params->num_channels; channel++) {
+ phy_pctrl_reset(channel);
+ /*
+ * Without this, LPDDR4 will write 0's in place of real data
+ * in a strange pattern.
+ */
+ if (sdram_params->dramtype == LPDDR4) {
+ phy_dll_bypass_set(channel, sdram_params->ddr_freq);
+ }
+ pctl_cfg(channel, sdram_params);
+ }
+
+ for (channel = 0; channel < 2; channel++) {
+ if (sdram_params->ch[channel].col)
+ channel_mask |= 1 << channel;
+ }
+
+ if (pctl_start(channel_mask, sdram_params) < 0)
+ goto retry;
+
+ for (channel = 0; channel < sdram_params->num_channels; channel++) {
+ /* LPDDR2/LPDDR3 need to wait DAI complete, max 10us */
+ if (sdram_params->dramtype == LPDDR3)
+ sram_udelay(10);
+
+ /*
+ * Training here will always fail for LPDDR4, so skip it
+ * If traning fail, retry to do it again.
+ */
+ if (sdram_params->dramtype != LPDDR4 &&
+ data_training(channel, sdram_params, PI_FULL_TRAINING))
+ goto retry;
+
+ set_ddrconfig(sdram_params, channel,
+ sdram_params->ch[channel].ddrconfig);
+ }
+
+ dram_all_config(sdram_params);
+
+ /* Switch to index 1 and prepare for DDR frequency switch. */
+ dram_switch_to_next_index(sdram_params);
+}
diff --git a/plat/rockchip/rk3399/drivers/dram/suspend.h b/plat/rockchip/rk3399/drivers/dram/suspend.h
new file mode 100644
index 0000000..1389944
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/dram/suspend.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2016-2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SUSPEND_H
+#define SUSPEND_H
+
+#include <stdint.h>
+#include <dram.h>
+
+#define KHz (1000)
+#define MHz (1000 * KHz)
+#define GHz (1000 * MHz)
+
+#define PI_CA_TRAINING (1 << 0)
+#define PI_WRITE_LEVELING (1 << 1)
+#define PI_READ_GATE_TRAINING (1 << 2)
+#define PI_READ_LEVELING (1 << 3)
+#define PI_WDQ_LEVELING (1 << 4)
+#define PI_FULL_TRAINING (0xff)
+
+void dmc_suspend(void);
+__pmusramfunc void dmc_resume(void);
+extern __pmusramdata uint8_t pmu_enable_watchdog0;
+
+#endif /* SUSPEND_H */
diff --git a/plat/rockchip/rk3399/drivers/gpio/rk3399_gpio.c b/plat/rockchip/rk3399/drivers/gpio/rk3399_gpio.c
new file mode 100644
index 0000000..724968f
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/gpio/rk3399_gpio.c
@@ -0,0 +1,400 @@
+/*
+ * Copyright (c) 2016-2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+
+#include <platform_def.h>
+
+#include <common/debug.h>
+#include <drivers/delay_timer.h>
+#include <drivers/gpio.h>
+#include <lib/mmio.h>
+#include <plat/common/platform.h>
+
+#include <plat_private.h>
+#include <soc.h>
+
+struct gpio_save {
+ uint32_t swporta_dr;
+ uint32_t swporta_ddr;
+ uint32_t inten;
+ uint32_t intmask;
+ uint32_t inttype_level;
+ uint32_t int_polarity;
+ uint32_t debounce;
+ uint32_t ls_sync;
+} store_gpio[3];
+
+static uint32_t store_grf_gpio[(GRF_GPIO2D_HE - GRF_GPIO2A_IOMUX) / 4 + 1];
+
+#define SWPORTA_DR 0x00
+#define SWPORTA_DDR 0x04
+#define INTEN 0x30
+#define INTMASK 0x34
+#define INTTYPE_LEVEL 0x38
+#define INT_POLARITY 0x3c
+#define DEBOUNCE 0x48
+#define LS_SYNC 0x60
+
+#define EXT_PORTA 0x50
+#define PMU_GPIO_PORT0 0
+#define PMU_GPIO_PORT1 1
+#define GPIO_PORT2 2
+#define GPIO_PORT3 3
+#define GPIO_PORT4 4
+
+#define PMU_GRF_GPIO0A_P 0x40
+#define GRF_GPIO2A_P 0xe040
+#define GPIO_P_MASK 0x03
+
+#define GET_GPIO_PORT(pin) (pin / 32)
+#define GET_GPIO_NUM(pin) (pin % 32)
+#define GET_GPIO_BANK(pin) ((pin % 32) / 8)
+#define GET_GPIO_ID(pin) ((pin % 32) % 8)
+
+enum {
+ ENC_ZDZU,
+ ENC_ZUDR,
+ ENC_ZUDZ,
+ NUM_ENC
+};
+
+static const struct port_info {
+ uint32_t clkgate_reg;
+ uint32_t pull_base;
+ uint32_t port_base;
+ /*
+ * Selects the pull mode encoding per bank,
+ * first index for pull_type_{hw2sw,sw2hw}
+ */
+ uint8_t pull_enc[4];
+ uint8_t clkgate_bit;
+ uint8_t max_bank;
+} port_info[] = {
+ {
+ .clkgate_reg = PMUCRU_BASE + CRU_PMU_CLKGATE_CON(1),
+ .pull_base = PMUGRF_BASE + PMUGRF_GPIO0A_P,
+ .port_base = GPIO0_BASE,
+ .pull_enc = {ENC_ZDZU, ENC_ZDZU},
+ .clkgate_bit = PCLK_GPIO0_GATE_SHIFT,
+ .max_bank = 1,
+ }, {
+ .clkgate_reg = PMUCRU_BASE + CRU_PMU_CLKGATE_CON(1),
+ .pull_base = PMUGRF_BASE + PMUGRF_GPIO1A_P,
+ .port_base = GPIO1_BASE,
+ .pull_enc = {ENC_ZUDR, ENC_ZUDR, ENC_ZUDR, ENC_ZUDR},
+ .clkgate_bit = PCLK_GPIO1_GATE_SHIFT,
+ .max_bank = 3,
+ }, {
+ .clkgate_reg = CRU_BASE + CRU_CLKGATE_CON(31),
+ .pull_base = GRF_BASE + GRF_GPIO2A_P,
+ .port_base = GPIO2_BASE,
+ .pull_enc = {ENC_ZUDR, ENC_ZUDR, ENC_ZDZU, ENC_ZDZU},
+ .clkgate_bit = PCLK_GPIO2_GATE_SHIFT,
+ .max_bank = 3,
+ }, {
+ .clkgate_reg = CRU_BASE + CRU_CLKGATE_CON(31),
+ .pull_base = GRF_BASE + GRF_GPIO3A_P,
+ .port_base = GPIO3_BASE,
+ .pull_enc = {ENC_ZUDR, ENC_ZUDR, ENC_ZUDR, ENC_ZUDR},
+ .clkgate_bit = PCLK_GPIO3_GATE_SHIFT,
+ .max_bank = 3,
+ }, {
+ .clkgate_reg = CRU_BASE + CRU_CLKGATE_CON(31),
+ .pull_base = GRF_BASE + GRF_GPIO4A_P,
+ .port_base = GPIO4_BASE,
+ .pull_enc = {ENC_ZUDR, ENC_ZUDR, ENC_ZUDR, ENC_ZUDR},
+ .clkgate_bit = PCLK_GPIO4_GATE_SHIFT,
+ .max_bank = 3,
+ }
+};
+
+/*
+ * Mappings between TF-A constants and hardware encodings:
+ * there are 3 different encoding schemes that may differ between
+ * banks of the same port: the corresponding value of the pull_enc array
+ * in port_info is used as the first index
+ */
+static const uint8_t pull_type_hw2sw[NUM_ENC][4] = {
+ [ENC_ZDZU] = {GPIO_PULL_NONE, GPIO_PULL_DOWN, GPIO_PULL_NONE, GPIO_PULL_UP},
+ [ENC_ZUDR] = {GPIO_PULL_NONE, GPIO_PULL_UP, GPIO_PULL_DOWN, GPIO_PULL_REPEATER},
+ [ENC_ZUDZ] = {GPIO_PULL_NONE, GPIO_PULL_UP, GPIO_PULL_DOWN, GPIO_PULL_NONE}
+};
+static const uint8_t pull_type_sw2hw[NUM_ENC][4] = {
+ [ENC_ZDZU] = {
+ [GPIO_PULL_NONE] = 0,
+ [GPIO_PULL_DOWN] = 1,
+ [GPIO_PULL_UP] = 3,
+ [GPIO_PULL_REPEATER] = -1
+ },
+ [ENC_ZUDR] = {
+ [GPIO_PULL_NONE] = 0,
+ [GPIO_PULL_DOWN] = 2,
+ [GPIO_PULL_UP] = 1,
+ [GPIO_PULL_REPEATER] = 3
+ },
+ [ENC_ZUDZ] = {
+ [GPIO_PULL_NONE] = 0,
+ [GPIO_PULL_DOWN] = 2,
+ [GPIO_PULL_UP] = 1,
+ [GPIO_PULL_REPEATER] = -1
+ }
+};
+
+/* Return old clock state, enables clock, in order to do GPIO access */
+static int gpio_get_clock(uint32_t gpio_number)
+{
+ uint32_t port = GET_GPIO_PORT(gpio_number);
+ assert(port < 5U);
+
+ const struct port_info *info = &port_info[port];
+
+ if ((mmio_read_32(info->clkgate_reg) & (1U << info->clkgate_bit)) == 0U) {
+ return 0;
+ }
+ mmio_write_32(
+ info->clkgate_reg,
+ BITS_WITH_WMASK(0, 1, info->clkgate_bit)
+ );
+ return 1;
+}
+
+/* Restore old state of gpio clock, assuming it is running now */
+void gpio_put_clock(uint32_t gpio_number, uint32_t clock_state)
+{
+ if (clock_state == 0) {
+ return;
+ }
+ uint32_t port = GET_GPIO_PORT(gpio_number);
+ const struct port_info *info = &port_info[port];
+
+ mmio_write_32(info->clkgate_reg, BITS_WITH_WMASK(1, 1, info->clkgate_bit));
+}
+
+static int get_pull(int gpio)
+{
+ uint32_t port = GET_GPIO_PORT(gpio);
+ uint32_t bank = GET_GPIO_BANK(gpio);
+ uint32_t id = GET_GPIO_ID(gpio);
+ uint32_t val, clock_state;
+
+ assert(port < 5U);
+ const struct port_info *info = &port_info[port];
+
+ assert(bank <= info->max_bank);
+
+ clock_state = gpio_get_clock(gpio);
+ val = (mmio_read_32(info->pull_base + 4 * bank) >> (id * 2)) & GPIO_P_MASK;
+ gpio_put_clock(gpio, clock_state);
+
+ return pull_type_hw2sw[info->pull_enc[bank]][val];
+}
+
+static void set_pull(int gpio, int pull)
+{
+ uint32_t port = GET_GPIO_PORT(gpio);
+ uint32_t bank = GET_GPIO_BANK(gpio);
+ uint32_t id = GET_GPIO_ID(gpio);
+ uint32_t clock_state;
+
+ assert(port < 5U);
+ const struct port_info *info = &port_info[port];
+
+ assert(bank <= info->max_bank);
+
+ uint8_t val = pull_type_sw2hw[info->pull_enc[bank]][pull];
+
+ assert(val != (uint8_t)-1);
+
+ clock_state = gpio_get_clock(gpio);
+ mmio_write_32(
+ info->pull_base + 4 * bank,
+ BITS_WITH_WMASK(val, GPIO_P_MASK, id * 2)
+ );
+ gpio_put_clock(gpio, clock_state);
+}
+
+static void set_direction(int gpio, int direction)
+{
+ uint32_t port = GET_GPIO_PORT(gpio);
+ uint32_t num = GET_GPIO_NUM(gpio);
+ uint32_t clock_state;
+
+ assert((port < 5) && (num < 32));
+
+ clock_state = gpio_get_clock(gpio);
+
+ /*
+ * in gpio.h
+ * #define GPIO_DIR_OUT 0
+ * #define GPIO_DIR_IN 1
+ * but rk3399 gpio direction 1: output, 0: input
+ * so need to revert direction value
+ */
+ mmio_setbits_32(
+ port_info[port].port_base + SWPORTA_DDR,
+ ((direction == 0) ? 1 : 0) << num
+ );
+ gpio_put_clock(gpio, clock_state);
+}
+
+static int get_direction(int gpio)
+{
+ uint32_t port = GET_GPIO_PORT(gpio);
+ uint32_t num = GET_GPIO_NUM(gpio);
+ int direction, clock_state;
+
+ assert((port < 5U) && (num < 32U));
+
+ clock_state = gpio_get_clock(gpio);
+
+ /*
+ * in gpio.h
+ * #define GPIO_DIR_OUT 0
+ * #define GPIO_DIR_IN 1
+ * but rk3399 gpio direction 1: output, 0: input
+ * so need to revert direction value
+ */
+ direction = (((mmio_read_32(
+ port_info[port].port_base + SWPORTA_DDR
+ ) >> num) & 1U) == 0) ? 1 : 0;
+ gpio_put_clock(gpio, clock_state);
+
+ return direction;
+}
+
+static int get_value(int gpio)
+{
+ uint32_t port = GET_GPIO_PORT(gpio);
+ uint32_t num = GET_GPIO_NUM(gpio);
+ int value, clock_state;
+
+ assert((port < 5) && (num < 32));
+
+ clock_state = gpio_get_clock(gpio);
+ value = (mmio_read_32(port_info[port].port_base + EXT_PORTA) >> num) &
+ 0x1U;
+ gpio_put_clock(gpio, clock_state);
+
+ return value;
+}
+
+static void set_value(int gpio, int value)
+{
+ uint32_t port = GET_GPIO_PORT(gpio);
+ uint32_t num = GET_GPIO_NUM(gpio);
+ uint32_t clock_state;
+
+ assert((port < 5U) && (num < 32U));
+
+ clock_state = gpio_get_clock(gpio);
+ mmio_clrsetbits_32(
+ port_info[port].port_base + SWPORTA_DR,
+ 1 << num,
+ ((value == 0) ? 0 : 1) << num
+ );
+ gpio_put_clock(gpio, clock_state);
+}
+
+void plat_rockchip_save_gpio(void)
+{
+ unsigned int i;
+ uint32_t cru_gate_save;
+
+ cru_gate_save = mmio_read_32(CRU_BASE + CRU_CLKGATE_CON(31));
+
+ /*
+ * when shutdown logic, we need to save gpio2 ~ gpio4 register,
+ * we need to enable gpio2 ~ gpio4 clock here, since it may be gating,
+ * and we do not care gpio0 and gpio1 clock gate, since we never
+ * gating them
+ */
+ mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(31),
+ BITS_WITH_WMASK(0, 0x07, PCLK_GPIO2_GATE_SHIFT));
+
+ /*
+ * since gpio0, gpio1 are pmugpio, they will keep ther value
+ * when shutdown logic power rail, so only need to save gpio2 ~ gpio4
+ * register value
+ */
+ for (i = 2; i < 5; i++) {
+ uint32_t base = port_info[i].port_base;
+
+ store_gpio[i - 2] = (struct gpio_save) {
+ .swporta_dr = mmio_read_32(base + SWPORTA_DR),
+ .swporta_ddr = mmio_read_32(base + SWPORTA_DDR),
+ .inten = mmio_read_32(base + INTEN),
+ .intmask = mmio_read_32(base + INTMASK),
+ .inttype_level = mmio_read_32(base + INTTYPE_LEVEL),
+ .int_polarity = mmio_read_32(base + INT_POLARITY),
+ .debounce = mmio_read_32(base + DEBOUNCE),
+ .ls_sync = mmio_read_32(base + LS_SYNC),
+ };
+ }
+ mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(31),
+ cru_gate_save | REG_SOC_WMSK);
+
+ /*
+ * gpio0, gpio1 in pmuiomux, they will keep ther value
+ * when shutdown logic power rail, so only need to save gpio2 ~ gpio4
+ * iomux register value
+ */
+ for (i = 0; i < ARRAY_SIZE(store_grf_gpio); i++)
+ store_grf_gpio[i] =
+ mmio_read_32(GRF_BASE + GRF_GPIO2A_IOMUX + i * 4);
+}
+
+void plat_rockchip_restore_gpio(void)
+{
+ int i;
+ uint32_t cru_gate_save;
+
+ for (i = 0; i < ARRAY_SIZE(store_grf_gpio); i++)
+ mmio_write_32(GRF_BASE + GRF_GPIO2A_IOMUX + i * 4,
+ REG_SOC_WMSK | store_grf_gpio[i]);
+
+ cru_gate_save = mmio_read_32(CRU_BASE + CRU_CLKGATE_CON(31));
+
+ /*
+ * when shutdown logic, we need to save gpio2 ~ gpio4 register,
+ * we need to enable gpio2 ~ gpio4 clock here, since it may be gating,
+ * and we do not care gpio0 and gpio1 clock gate, since we never
+ * gating them
+ */
+ mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(31),
+ BITS_WITH_WMASK(0, 0x07, PCLK_GPIO2_GATE_SHIFT));
+
+ for (i = 2; i < 5; i++) {
+ uint32_t base = port_info[i].port_base;
+ const struct gpio_save *save = &store_gpio[i - 2];
+
+ mmio_write_32(base + SWPORTA_DR, save->swporta_dr);
+ mmio_write_32(base + SWPORTA_DDR, save->swporta_ddr);
+ mmio_write_32(base + INTEN, save->inten);
+ mmio_write_32(base + INTMASK, save->intmask);
+ mmio_write_32(base + INTTYPE_LEVEL, save->inttype_level),
+ mmio_write_32(base + INT_POLARITY, save->int_polarity);
+ mmio_write_32(base + DEBOUNCE, save->debounce);
+ mmio_write_32(base + LS_SYNC, save->ls_sync);
+ }
+ mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(31),
+ cru_gate_save | REG_SOC_WMSK);
+}
+
+const gpio_ops_t rk3399_gpio_ops = {
+ .get_direction = get_direction,
+ .set_direction = set_direction,
+ .get_value = get_value,
+ .set_value = set_value,
+ .set_pull = set_pull,
+ .get_pull = get_pull,
+};
+
+void plat_rockchip_gpio_init(void)
+{
+ gpio_init(&rk3399_gpio_ops);
+}
diff --git a/plat/rockchip/rk3399/drivers/m0/Makefile b/plat/rockchip/rk3399/drivers/m0/Makefile
new file mode 100644
index 0000000..79e09f0
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/m0/Makefile
@@ -0,0 +1,125 @@
+#
+# Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# Cross Compile
+M0_CROSS_COMPILE ?= arm-none-eabi-
+
+# Build architecture
+ARCH := cortex-m0
+
+# Build platform
+PLAT_M0 ?= rk3399m0
+PLAT_M0_PMU ?= rk3399m0pmu
+
+ifeq (${V},0)
+ Q=@
+else
+ Q=
+endif
+export Q
+
+.SUFFIXES:
+
+INCLUDES += -Iinclude/ \
+ -I../../include/shared/
+
+# NOTE: Add C source files here
+C_SOURCES_COMMON := src/startup.c
+C_SOURCES := src/dram.c \
+ src/stopwatch.c
+C_SOURCES_PMU := src/suspend.c
+
+# Flags definition
+COMMON_FLAGS := -g -mcpu=$(ARCH) -mthumb -Wall -O3 -nostdlib -mfloat-abi=soft
+CFLAGS := -ffunction-sections -fdata-sections -fomit-frame-pointer -fno-common
+ASFLAGS := -Wa,--gdwarf-2
+LDFLAGS := -Wl,--gc-sections -Wl,--build-id=none
+
+# Cross tool
+CC := ${M0_CROSS_COMPILE}gcc
+CPP := ${M0_CROSS_COMPILE}cpp
+AR := ${M0_CROSS_COMPILE}ar
+OC := ${M0_CROSS_COMPILE}objcopy
+OD := ${M0_CROSS_COMPILE}objdump
+NM := ${M0_CROSS_COMPILE}nm
+
+# NOTE: The line continuation '\' is required in the next define otherwise we
+# end up with a line-feed characer at the end of the last c filename.
+# Also bare this issue in mind if extending the list of supported filetypes.
+define SOURCES_TO_OBJS
+ $(notdir $(patsubst %.c,%.o,$(filter %.c,$(1)))) \
+ $(notdir $(patsubst %.S,%.o,$(filter %.S,$(1))))
+endef
+
+SOURCES_COMMON := $(C_SOURCES_COMMON)
+SOURCES := $(C_SOURCES)
+SOURCES_PMU := $(C_SOURCES_PMU)
+OBJS_COMMON := $(addprefix $(BUILD)/,$(call SOURCES_TO_OBJS,$(SOURCES_COMMON)))
+OBJS := $(addprefix $(BUILD)/,$(call SOURCES_TO_OBJS,$(SOURCES)))
+OBJS_PMU := $(addprefix $(BUILD)/,$(call SOURCES_TO_OBJS,$(SOURCES_PMU)))
+LINKERFILE := $(BUILD)/$(PLAT_M0).ld
+MAPFILE := $(BUILD)/$(PLAT_M0).map
+MAPFILE_PMU := $(BUILD)/$(PLAT_M0_PMU).map
+ELF := $(BUILD)/$(PLAT_M0).elf
+ELF_PMU := $(BUILD)/$(PLAT_M0_PMU).elf
+BIN := $(BUILD)/$(PLAT_M0).bin
+BIN_PMU := $(BUILD)/$(PLAT_M0_PMU).bin
+LINKERFILE_SRC := src/$(PLAT_M0).ld.S
+
+# Function definition related compilation
+define MAKE_C
+$(eval OBJ := $(1)/$(patsubst %.c,%.o,$(notdir $(2))))
+-include $(patsubst %.o,%.d,$(OBJ))
+
+$(OBJ) : $(2)
+ @echo " CC $$<"
+ $$(Q)$$(CC) $$(COMMON_FLAGS) $$(CFLAGS) $$(INCLUDES) -MMD -MT $$@ -c $$< -o $$@
+endef
+
+define MAKE_S
+$(eval OBJ := $(1)/$(patsubst %.S,%.o,$(notdir $(2))))
+
+$(OBJ) : $(2)
+ @echo " AS $$<"
+ $$(Q)$$(CC) -x assembler-with-cpp $$(COMMON_FLAGS) $$(ASFLAGS) -c $$< -o $$@
+endef
+
+define MAKE_OBJS
+ $(eval C_OBJS := $(filter %.c,$(2)))
+ $(eval REMAIN := $(filter-out %.c,$(2)))
+ $(eval $(foreach obj,$(C_OBJS),$(call MAKE_C,$(1),$(obj),$(3))))
+
+ $(eval S_OBJS := $(filter %.S,$(REMAIN)))
+ $(eval REMAIN := $(filter-out %.S,$(REMAIN)))
+ $(eval $(foreach obj,$(S_OBJS),$(call MAKE_S,$(1),$(obj),$(3))))
+
+ $(and $(REMAIN),$(error Unexpected source files present: $(REMAIN)))
+endef
+
+.PHONY: all
+all: $(BIN) $(BIN_PMU)
+
+.DEFAULT_GOAL := all
+
+$(LINKERFILE): $(LINKERFILE_SRC)
+ $(CC) $(COMMON_FLAGS) $(INCLUDES) -P -E -D__LINKER__ -MMD -MF $@.d -MT $@ -o $@ $<
+-include $(LINKERFILE).d
+
+$(ELF) : $(OBJS) $(OBJS_COMMON) $(LINKERFILE)
+ @echo " LD $@"
+ $(Q)$(CC) -o $@ $(COMMON_FLAGS) $(LDFLAGS) -Wl,-Map=$(MAPFILE) -Wl,-T$(LINKERFILE) $(OBJS) $(OBJS_COMMON)
+
+%.bin : %.elf
+ @echo " BIN $@"
+ $(Q)$(OC) -O binary $< $@
+
+$(ELF_PMU) : $(OBJS_COMMON) $(OBJS_PMU) $(LINKERFILE)
+ @echo " LD $@"
+ $(Q)$(CC) -o $@ $(COMMON_FLAGS) $(LDFLAGS) -Wl,-Map=$(MAPFILE_PMU) -Wl,-T$(LINKERFILE) $(OBJS_PMU) $(OBJS_COMMON)
+
+$(eval $(call MAKE_OBJS,$(BUILD),$(SOURCES_COMMON),$(1)))
+$(eval $(call MAKE_OBJS,$(BUILD),$(SOURCES),$(1)))
+$(eval $(call MAKE_OBJS,$(BUILD),$(SOURCES_PMU),$(1)))
diff --git a/plat/rockchip/rk3399/drivers/m0/include/addressmap.h b/plat/rockchip/rk3399/drivers/m0/include/addressmap.h
new file mode 100644
index 0000000..d431437
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/m0/include/addressmap.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ADDRESSMAP_H
+#define ADDRESSMAP_H
+
+#include <addressmap_shared.h>
+
+/* Registers base address for M0 */
+#define MMIO_BASE 0x40000000
+
+#endif /* ADDRESSMAP_H */
diff --git a/plat/rockchip/rk3399/drivers/m0/include/rk3399_mcu.h b/plat/rockchip/rk3399/drivers/m0/include/rk3399_mcu.h
new file mode 100644
index 0000000..2e90694
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/m0/include/rk3399_mcu.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef RK3399_MCU_H
+#define RK3399_MCU_H
+
+#include <addressmap.h>
+
+typedef unsigned int uint32_t;
+
+#define mmio_read_32(c) ({unsigned int __v = \
+ (*(volatile unsigned int *)(c)); __v; })
+#define mmio_write_32(c, v) ((*(volatile unsigned int *)(c)) = (v))
+
+#define mmio_clrbits_32(addr, clear) \
+ mmio_write_32(addr, (mmio_read_32(addr) & ~(clear)))
+#define mmio_setbits_32(addr, set) \
+ mmio_write_32(addr, (mmio_read_32(addr)) | (set))
+#define mmio_clrsetbits_32(addr, clear, set) \
+ mmio_write_32(addr, (mmio_read_32(addr) & ~(clear)) | (set))
+
+#define MIN(a, b) ((a) < (b) ? (a) : (b))
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
+
+void stopwatch_init_usecs_expire(unsigned int usecs);
+int stopwatch_expired(void);
+void stopwatch_reset(void);
+
+#endif /* RK3399_MCU_H */
diff --git a/plat/rockchip/rk3399/drivers/m0/src/dram.c b/plat/rockchip/rk3399/drivers/m0/src/dram.c
new file mode 100644
index 0000000..84e8884
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/m0/src/dram.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <dram_regs.h>
+#include <m0_param.h>
+#include <pmu_bits.h>
+#include <pmu_regs.h>
+#include "misc_regs.h"
+#include "rk3399_mcu.h"
+
+static uint32_t gatedis_con0;
+
+static void idle_port(void)
+{
+ gatedis_con0 = mmio_read_32(PMUCRU_BASE + PMU_CRU_GATEDIS_CON0);
+ mmio_write_32(PMUCRU_BASE + PMU_CRU_GATEDIS_CON0, 0x3fffffff);
+
+ mmio_setbits_32(PMU_BASE + PMU_BUS_IDLE_REQ,
+ (1 << PMU_IDLE_REQ_MSCH0) | (1 << PMU_IDLE_REQ_MSCH1));
+ while ((mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST) &
+ ((1 << PMU_IDLE_ST_MSCH1) | (1 << PMU_IDLE_ST_MSCH0))) !=
+ ((1 << PMU_IDLE_ST_MSCH1) | (1 << PMU_IDLE_ST_MSCH0)))
+ continue;
+}
+
+static void deidle_port(void)
+{
+ mmio_clrbits_32(PMU_BASE + PMU_BUS_IDLE_REQ,
+ (1 << PMU_IDLE_REQ_MSCH0) | (1 << PMU_IDLE_REQ_MSCH1));
+ while (mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST) &
+ ((1 << PMU_IDLE_ST_MSCH1) | (1 << PMU_IDLE_ST_MSCH0)))
+ continue;
+
+ /* document is wrong, PMU_CRU_GATEDIS_CON0 do not need set MASK BIT */
+ mmio_write_32(PMUCRU_BASE + PMU_CRU_GATEDIS_CON0, gatedis_con0);
+}
+
+static void ddr_set_pll(void)
+{
+ mmio_write_32(CRU_BASE + CRU_DPLL_CON3, PLL_MODE(PLL_SLOW_MODE));
+
+ mmio_write_32(CRU_BASE + CRU_DPLL_CON3, PLL_POWER_DOWN(1));
+ mmio_write_32(CRU_BASE + CRU_DPLL_CON0,
+ mmio_read_32(PARAM_ADDR + PARAM_DPLL_CON0));
+ mmio_write_32(CRU_BASE + CRU_DPLL_CON1,
+ mmio_read_32(PARAM_ADDR + PARAM_DPLL_CON1));
+ mmio_write_32(CRU_BASE + CRU_DPLL_CON3, PLL_POWER_DOWN(0));
+
+ while ((mmio_read_32(CRU_BASE + CRU_DPLL_CON2) & (1u << 31)) == 0)
+ continue;
+
+ mmio_write_32(CRU_BASE + CRU_DPLL_CON3, PLL_MODE(PLL_NORMAL_MODE));
+}
+
+__attribute__((noreturn)) void m0_main(void)
+{
+ mmio_setbits_32(PHY_REG(0, 927), (1 << 22));
+ mmio_setbits_32(PHY_REG(1, 927), (1 << 22));
+ idle_port();
+
+ mmio_write_32(CIC_BASE + CIC_CTRL0,
+ (((0x3 << 4) | (1 << 2) | 1) << 16) |
+ (1 << 2) | 1 |
+ mmio_read_32(PARAM_ADDR + PARAM_FREQ_SELECT));
+ while ((mmio_read_32(CIC_BASE + CIC_STATUS0) & (1 << 2)) == 0)
+ continue;
+
+ ddr_set_pll();
+ mmio_write_32(CIC_BASE + CIC_CTRL0, 0x20002);
+ while ((mmio_read_32(CIC_BASE + CIC_STATUS0) & (1 << 0)) == 0)
+ continue;
+
+ deidle_port();
+ mmio_clrbits_32(PHY_REG(0, 927), (1 << 22));
+ mmio_clrbits_32(PHY_REG(1, 927), (1 << 22));
+
+ mmio_write_32(PARAM_ADDR + PARAM_M0_DONE, M0_DONE_FLAG);
+
+ for (;;)
+ __asm__ volatile ("wfi");
+}
diff --git a/plat/rockchip/rk3399/drivers/m0/src/rk3399m0.ld.S b/plat/rockchip/rk3399/drivers/m0/src/rk3399m0.ld.S
new file mode 100644
index 0000000..bfe054e
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/m0/src/rk3399m0.ld.S
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <m0_param.h>
+
+OUTPUT_FORMAT("elf32-littlearm")
+
+SECTIONS {
+ .m0_bin 0 : {
+ KEEP(*(.isr_vector))
+ ASSERT(. == 0xc0, "ISR vector has the wrong size.");
+ ASSERT(. == PARAM_ADDR, "M0 params should go right behind ISR table.");
+ . += PARAM_M0_SIZE;
+ *(.text*)
+ *(.rodata*)
+ *(.data*)
+ *(.bss*)
+ . = ALIGN(8);
+ *(.co_stack*)
+ }
+
+ /DISCARD/ : { *(.comment) *(.note*) }
+}
diff --git a/plat/rockchip/rk3399/drivers/m0/src/startup.c b/plat/rockchip/rk3399/drivers/m0/src/startup.c
new file mode 100644
index 0000000..dfd8af2
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/m0/src/startup.c
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "rk3399_mcu.h"
+
+/* Stack configuration */
+#define STACK_SIZE 0x00000040
+__attribute__ ((section(".co_stack")))
+unsigned long pstack[STACK_SIZE];
+
+/* Macro definition */
+#define WEAK __attribute__ ((weak))
+
+/* System exception vector handler */
+__attribute__ ((used))
+void WEAK reset_handler(void);
+void WEAK nmi_handler(void);
+void WEAK hardware_fault_handler(void);
+void WEAK svc_handler(void);
+void WEAK pend_sv_handler(void);
+void WEAK systick_handler(void);
+
+extern int m0_main(void);
+
+/* Function prototypes */
+static void default_reset_handler(void);
+static void default_handler(void);
+
+/*
+ * The minimal vector table for a Cortex M3. Note that the proper constructs
+ * must be placed on this to ensure that it ends up at physical address
+ * 0x00000000.
+ */
+__attribute__ ((used, section(".isr_vector")))
+void (* const g_pfnVectors[])(void) = {
+ /* core Exceptions */
+ (void *)&pstack[STACK_SIZE], /* the initial stack pointer */
+ reset_handler,
+ nmi_handler,
+ hardware_fault_handler,
+ 0, 0, 0, 0, 0, 0, 0,
+ svc_handler,
+ 0, 0,
+ pend_sv_handler,
+ systick_handler,
+
+ /* external exceptions */
+ 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0
+};
+
+/**
+ * This is the code that gets called when the processor first
+ * starts execution following a reset event. Only the absolutely
+ * necessary set is performed, after which the application
+ * supplied m0_main() routine is called.
+ */
+static void default_reset_handler(void)
+{
+ /* call the application's entry point */
+ m0_main();
+}
+
+/**
+ * Provide weak aliases for each Exception handler to the Default_Handler.
+ * As they are weak aliases, any function with the same name will override
+ * this definition.
+ */
+#pragma weak reset_handler = default_reset_handler
+#pragma weak nmi_handler = default_handler
+#pragma weak hardware_fault_handler = default_handler
+#pragma weak svc_handler = default_handler
+#pragma weak pend_sv_handler = default_handler
+#pragma weak systick_handler = default_handler
+
+/**
+ * This is the code that gets called when the processor receives
+ * an unexpected interrupt. This simply enters an infinite loop,
+ * preserving the system state for examination by a debugger.
+ */
+static void default_handler(void)
+{
+ /* go into an infinite loop. */
+ while (1)
+ ;
+}
diff --git a/plat/rockchip/rk3399/drivers/m0/src/stopwatch.c b/plat/rockchip/rk3399/drivers/m0/src/stopwatch.c
new file mode 100644
index 0000000..5af8caa
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/m0/src/stopwatch.c
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <m0_param.h>
+#include "rk3399_mcu.h"
+
+/* use 24MHz SysTick */
+#define US_TO_CYCLE(US) (US * 24)
+
+#define SYST_CST 0xe000e010
+/* enable counter */
+#define ENABLE (1 << 0)
+/* count down to 0 does not cause SysTick exception to pend */
+#define TICKINT (1 << 1)
+/* core clock used for SysTick */
+#define CLKSOURCE (1 << 2)
+
+#define COUNTFLAG (1 << 16)
+#define SYST_RVR 0xe000e014
+#define MAX_VALUE 0xffffff
+#define MAX_USECS (MAX_VALUE / US_TO_CYCLE(1))
+#define SYST_CVR 0xe000e018
+#define SYST_CALIB 0xe000e01c
+
+unsigned int remaining_usecs;
+
+static inline void stopwatch_set_usecs(void)
+{
+ unsigned int cycle;
+ unsigned int usecs = MIN(MAX_USECS, remaining_usecs);
+
+ remaining_usecs -= usecs;
+ cycle = US_TO_CYCLE(usecs);
+ mmio_write_32(SYST_RVR, cycle);
+ mmio_write_32(SYST_CVR, 0);
+
+ mmio_write_32(SYST_CST, ENABLE | TICKINT | CLKSOURCE);
+}
+
+void stopwatch_init_usecs_expire(unsigned int usecs)
+{
+ /*
+ * Enter an inifite loop if the stopwatch is in use. This will allow the
+ * state to be analyzed with a debugger.
+ */
+ if (mmio_read_32(SYST_CST) & ENABLE)
+ while (1)
+ ;
+
+ remaining_usecs = usecs;
+ stopwatch_set_usecs();
+}
+
+int stopwatch_expired(void)
+{
+ int val = mmio_read_32(SYST_CST);
+ if ((val & COUNTFLAG) || !(val & ENABLE)) {
+ if (!remaining_usecs)
+ return 1;
+
+ stopwatch_set_usecs();
+ }
+
+ return 0;
+}
+
+void stopwatch_reset(void)
+{
+ mmio_clrbits_32(SYST_CST, ENABLE);
+ remaining_usecs = 0;
+}
diff --git a/plat/rockchip/rk3399/drivers/m0/src/suspend.c b/plat/rockchip/rk3399/drivers/m0/src/suspend.c
new file mode 100644
index 0000000..9ad2fa2
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/m0/src/suspend.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <pmu_regs.h>
+#include "rk3399_mcu.h"
+
+#define M0_SCR 0xe000ed10 /* System Control Register (SCR) */
+
+#define SCR_SLEEPDEEP_SHIFT (1 << 2)
+
+__attribute__((noreturn)) void m0_main(void)
+{
+ unsigned int status_value;
+
+ /*
+ * PMU sometimes doesn't clear power mode bit as it's supposed to due
+ * to a hardware bug. Make the M0 clear it manually to be sure,
+ * otherwise interrupts some cases with concurrent wake interrupts
+ * we stay asleep forever.
+ */
+ while (1) {
+ status_value = mmio_read_32(PMU_BASE + PMU_POWER_ST);
+ if (status_value) {
+ mmio_clrbits_32(PMU_BASE + PMU_PWRMODE_CON, 0x01);
+ break;
+ }
+ }
+
+ /*
+ * FSM power secquence is .. -> ST_INPUT_CLAMP(step.17) -> .. ->
+ * ST_WAKEUP_RESET -> ST_EXT_PWRUP-> ST_RELEASE_CLAMP ->
+ * ST_24M_OSC_EN -> .. -> ST_WAKEUP_RESET_CLR(step.26) -> ..,
+ * INPUT_CLAMP and WAKEUP_RESET will hold the SOC not affect by
+ * power or other single glitch, but WAKEUP_RESET need work with 24MHz,
+ * so between RELEASE_CLAMP and 24M_OSC_EN, there have a chance
+ * that glitch will affect SOC, and mess up SOC status, so we
+ * addressmap_shared software clamp between ST_INPUT_CLAMP and
+ * ST_WAKEUP_RESET_CLR to avoid this happen.
+ */
+ while (1) {
+ status_value = mmio_read_32(PMU_BASE + PMU_POWER_ST);
+ if (status_value >= 17) {
+ mmio_setbits_32(PMU_BASE + PMU_SFT_CON, 0x02);
+ break;
+ }
+
+ }
+
+ while (1) {
+ status_value = mmio_read_32(PMU_BASE + PMU_POWER_ST);
+ if (status_value >= 26) {
+ mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, 0x02);
+ break;
+ }
+ }
+
+ for (;;)
+ __asm__ volatile ("wfi");
+}
diff --git a/plat/rockchip/rk3399/drivers/pmu/m0_ctl.c b/plat/rockchip/rk3399/drivers/pmu/m0_ctl.c
new file mode 100644
index 0000000..cad76ac
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/pmu/m0_ctl.c
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <drivers/delay_timer.h>
+#include <lib/mmio.h>
+
+#include <m0_ctl.h>
+#include <plat_private.h>
+#include <rk3399_def.h>
+#include <secure.h>
+#include <soc.h>
+
+void m0_init(void)
+{
+ /* secure config for M0 */
+ mmio_write_32(SGRF_BASE + SGRF_PMU_CON(0), WMSK_BIT(7));
+ mmio_write_32(SGRF_BASE + SGRF_SOC_CON(6), WMSK_BIT(12));
+
+ /* document is wrong, PMU_CRU_GATEDIS_CON0 do not need set MASK BIT */
+ mmio_setbits_32(PMUCRU_BASE + PMUCRU_GATEDIS_CON0, 0x02);
+
+ /*
+ * To switch the parent to xin24M and div == 1,
+ *
+ * We need to close most of the PLLs and clocks except the OSC 24MHz
+ * durning suspend, and this should be enough to supplies the ddrfreq,
+ * For the simple handle, we just keep the fixed 24MHz to supply the
+ * suspend and ddrfreq directly.
+ */
+ mmio_write_32(PMUCRU_BASE + PMUCRU_CLKSEL_CON0,
+ BIT_WITH_WMSK(15) | BITS_WITH_WMASK(0x0, 0x1f, 8));
+
+ mmio_write_32(PMUCRU_BASE + PMUCRU_CLKGATE_CON2, WMSK_BIT(5));
+}
+
+void m0_configure_execute_addr(uintptr_t addr)
+{
+ /* set the execute address for M0 */
+ mmio_write_32(SGRF_BASE + SGRF_PMU_CON(3),
+ BITS_WITH_WMASK((addr >> 12) & 0xffff,
+ 0xffffu, 0));
+ mmio_write_32(SGRF_BASE + SGRF_PMU_CON(7),
+ BITS_WITH_WMASK((addr >> 28) & 0xf,
+ 0xfu, 0));
+}
+
+void m0_start(void)
+{
+ /* enable clocks for M0 */
+ mmio_write_32(PMUCRU_BASE + PMUCRU_CLKGATE_CON2,
+ BITS_WITH_WMASK(0x0, 0xf, 0));
+
+ /* clean the PARAM_M0_DONE flag, mean that M0 will start working */
+ mmio_write_32(M0_PARAM_ADDR + PARAM_M0_DONE, 0);
+ dmbst();
+
+ mmio_write_32(PMUCRU_BASE + PMUCRU_SOFTRST_CON0,
+ BITS_WITH_WMASK(0x0, 0x4, 0));
+
+ udelay(5);
+ /* start M0 */
+ mmio_write_32(PMUCRU_BASE + PMUCRU_SOFTRST_CON0,
+ BITS_WITH_WMASK(0x0, 0x20, 0));
+ dmbst();
+}
+
+void m0_stop(void)
+{
+ /* stop M0 */
+ mmio_write_32(PMUCRU_BASE + PMUCRU_SOFTRST_CON0,
+ BITS_WITH_WMASK(0x24, 0x24, 0));
+
+ /* disable clocks for M0 */
+ mmio_write_32(PMUCRU_BASE + PMUCRU_CLKGATE_CON2,
+ BITS_WITH_WMASK(0xf, 0xf, 0));
+}
+
+void m0_wait_done(void)
+{
+ do {
+ /*
+ * Don't starve the M0 for access to SRAM, so delay before
+ * reading the PARAM_M0_DONE value again.
+ */
+ udelay(5);
+ dsb();
+ } while (mmio_read_32(M0_PARAM_ADDR + PARAM_M0_DONE) != M0_DONE_FLAG);
+
+ /*
+ * Let the M0 settle into WFI before we leave. This is so we don't reset
+ * the M0 in a bad spot which can cause problems with the M0.
+ */
+ udelay(10);
+ dsb();
+}
diff --git a/plat/rockchip/rk3399/drivers/pmu/m0_ctl.h b/plat/rockchip/rk3399/drivers/pmu/m0_ctl.h
new file mode 100644
index 0000000..7542e22
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/pmu/m0_ctl.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef M0_CTL_H
+#define M0_CTL_H
+
+#include <m0_param.h>
+
+#define M0_BINCODE_BASE ((uintptr_t)rk3399m0_bin)
+#define M0_PARAM_ADDR (M0_BINCODE_BASE + PARAM_ADDR)
+#define M0PMU_BINCODE_BASE ((uintptr_t)rk3399m0pmu_bin)
+
+/* pmu_fw.c */
+extern char rk3399m0_bin[];
+extern char rk3399m0_bin_end[];
+
+extern char rk3399m0pmu_bin[];
+extern char rk3399m0pmu_bin_end[];
+
+extern void m0_init(void);
+extern void m0_start(void);
+extern void m0_stop(void);
+extern void m0_wait_done(void);
+extern void m0_configure_execute_addr(uintptr_t addr);
+
+#endif /* M0_CTL_H */
diff --git a/plat/rockchip/rk3399/drivers/pmu/plat_pmu_macros.S b/plat/rockchip/rk3399/drivers/pmu/plat_pmu_macros.S
new file mode 100644
index 0000000..546c09a
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/pmu/plat_pmu_macros.S
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <platform_def.h>
+#include <pmu_regs.h>
+
+ .globl clst_warmboot_data
+
+ .macro sram_func _name
+ .cfi_sections .debug_frame
+ .section .sram.text, "ax"
+ .type \_name, %function
+ .cfi_startproc
+ \_name:
+ .endm
+
+#define CRU_CLKSEL_CON6 0x118
+
+#define DDRCTL0_C_SYSREQ_CFG 0x0100
+#define DDRCTL1_C_SYSREQ_CFG 0x1000
+
+#define DDRC0_SREF_DONE_EXT 0x01
+#define DDRC1_SREF_DONE_EXT 0x04
+
+#define PLL_MODE_SHIFT (0x8)
+#define PLL_NORMAL_MODE ((0x3 << (PLL_MODE_SHIFT + 16)) | \
+ (0x1 << PLL_MODE_SHIFT))
+#define MPIDR_CLST_L_BITS 0x0
+ /*
+ * For different socs, if we want to speed up warmboot,
+ * we need to config some regs here.
+ * If scu was suspend, we must resume related clk
+ * from slow (24M) mode to normal mode first.
+ * X0: MPIDR_EL1 & MPIDR_CLUSTER_MASK
+ */
+.macro func_rockchip_clst_warmboot
+ adr x4, clst_warmboot_data
+ lsr x5, x0, #6
+ ldr w3, [x4, x5]
+ str wzr, [x4, x5]
+ cmp w3, #PMU_CLST_RET
+ b.ne clst_warmboot_end
+ ldr w6, =(PLL_NORMAL_MODE)
+ /*
+ * core_l offset is CRU_BASE + 0xc,
+ * core_b offset is CRU_BASE + 0x2c
+ */
+ ldr x7, =(CRU_BASE + 0xc)
+ lsr x2, x0, #3
+ str w6, [x7, x2]
+clst_warmboot_end:
+.endm
+
+.macro rockchip_clst_warmboot_data
+clst_warmboot_data:
+ .rept PLATFORM_CLUSTER_COUNT
+ .word 0
+ .endr
+.endm
+
+ /* -----------------------------------------------
+ * void sram_func_set_ddrctl_pll(uint32_t pll_src)
+ * Function to switch the PLL source for ddrctrl
+ * In: x0 - The PLL of the clk_ddrc clock source
+ * out: None
+ * Clobber list : x0 - x3, x5, x8 - x10
+ * -----------------------------------------------
+ */
+
+ .globl sram_func_set_ddrctl_pll
+
+sram_func sram_func_set_ddrctl_pll
+ /* backup parameter */
+ mov x8, x0
+
+ /* disable the MMU at EL3 */
+ mrs x9, sctlr_el3
+ bic x10, x9, #(SCTLR_M_BIT)
+ msr sctlr_el3, x10
+ isb
+ dsb sy
+
+ /* enable ddrctl0_1 idle request */
+ mov x5, PMU_BASE
+ ldr w0, [x5, #PMU_SFT_CON]
+ orr w0, w0, #DDRCTL0_C_SYSREQ_CFG
+ orr w0, w0, #DDRCTL1_C_SYSREQ_CFG
+ str w0, [x5, #PMU_SFT_CON]
+
+check_ddrc0_1_sref_enter:
+ ldr w1, [x5, #PMU_DDR_SREF_ST]
+ and w2, w1, #DDRC0_SREF_DONE_EXT
+ and w3, w1, #DDRC1_SREF_DONE_EXT
+ orr w2, w2, w3
+ cmp w2, #(DDRC0_SREF_DONE_EXT | DDRC1_SREF_DONE_EXT)
+ b.eq check_ddrc0_1_sref_enter
+
+ /*
+ * select a PLL for ddrctrl:
+ * x0 = 0: ALPLL
+ * x0 = 1: ABPLL
+ * x0 = 2: DPLL
+ * x0 = 3: GPLLL
+ */
+ mov x5, CRU_BASE
+ lsl w0, w8, #4
+ orr w0, w0, #0x00300000
+ str w0, [x5, #CRU_CLKSEL_CON6]
+
+ /* disable ddrctl0_1 idle request */
+ mov x5, PMU_BASE
+ ldr w0, [x5, #PMU_SFT_CON]
+ bic w0, w0, #DDRCTL0_C_SYSREQ_CFG
+ bic w0, w0, #DDRCTL1_C_SYSREQ_CFG
+ str w0, [x5, #PMU_SFT_CON]
+
+check_ddrc0_1_sref_exit:
+ ldr w1, [x5, #PMU_DDR_SREF_ST]
+ and w2, w1, #DDRC0_SREF_DONE_EXT
+ and w3, w1, #DDRC1_SREF_DONE_EXT
+ orr w2, w2, w3
+ cmp w2, #0x0
+ b.eq check_ddrc0_1_sref_exit
+
+ /* reenable the MMU at EL3 */
+ msr sctlr_el3, x9
+ isb
+ dsb sy
+
+ ret
+endfunc sram_func_set_ddrctl_pll
diff --git a/plat/rockchip/rk3399/drivers/pmu/pmu.c b/plat/rockchip/rk3399/drivers/pmu/pmu.c
new file mode 100644
index 0000000..3084c4f
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/pmu/pmu.c
@@ -0,0 +1,1626 @@
+/*
+ * Copyright (c) 2016-2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <string.h>
+
+#include <platform_def.h>
+
+#include <arch_helpers.h>
+#include <bl31/bl31.h>
+#include <common/debug.h>
+#include <drivers/arm/gicv3.h>
+#include <drivers/delay_timer.h>
+#include <drivers/gpio.h>
+#include <lib/bakery_lock.h>
+#include <lib/mmio.h>
+#include <plat/common/platform.h>
+
+#include <dfs.h>
+#include <m0_ctl.h>
+#include <plat_params.h>
+#include <plat_private.h>
+#include <pmu.h>
+#include <pmu_com.h>
+#include <pwm.h>
+#include <rk3399_def.h>
+#include <secure.h>
+#include <soc.h>
+#include <suspend.h>
+
+DEFINE_BAKERY_LOCK(rockchip_pd_lock);
+
+static uint32_t cpu_warm_boot_addr;
+static char store_sram[SRAM_BIN_LIMIT + SRAM_TEXT_LIMIT + SRAM_DATA_LIMIT];
+static uint32_t store_cru[CRU_SDIO0_CON1 / 4 + 1];
+static uint32_t store_usbphy0[7];
+static uint32_t store_usbphy1[7];
+static uint32_t store_grf_io_vsel;
+static uint32_t store_grf_soc_con0;
+static uint32_t store_grf_soc_con1;
+static uint32_t store_grf_soc_con2;
+static uint32_t store_grf_soc_con3;
+static uint32_t store_grf_soc_con4;
+static uint32_t store_grf_soc_con7;
+static uint32_t store_grf_ddrc_con[4];
+static uint32_t store_wdt0[2];
+static uint32_t store_wdt1[2];
+static gicv3_dist_ctx_t dist_ctx;
+static gicv3_redist_ctx_t rdist_ctx;
+
+/*
+ * There are two ways to powering on or off on core.
+ * 1) Control it power domain into on or off in PMU_PWRDN_CON reg,
+ * it is core_pwr_pd mode
+ * 2) Enable the core power manage in PMU_CORE_PM_CON reg,
+ * then, if the core enter into wfi, it power domain will be
+ * powered off automatically. it is core_pwr_wfi or core_pwr_wfi_int mode
+ * so we need core_pm_cfg_info to distinguish which method be used now.
+ */
+
+static uint32_t core_pm_cfg_info[PLATFORM_CORE_COUNT]
+#if USE_COHERENT_MEM
+__attribute__ ((section("tzfw_coherent_mem")))
+#endif
+;/* coheront */
+
+static void pmu_bus_idle_req(uint32_t bus, uint32_t state)
+{
+ uint32_t bus_id = BIT(bus);
+ uint32_t bus_req;
+ uint32_t wait_cnt = 0;
+ uint32_t bus_state, bus_ack;
+
+ if (state)
+ bus_req = BIT(bus);
+ else
+ bus_req = 0;
+
+ mmio_clrsetbits_32(PMU_BASE + PMU_BUS_IDLE_REQ, bus_id, bus_req);
+
+ do {
+ bus_state = mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST) & bus_id;
+ bus_ack = mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ACK) & bus_id;
+ if (bus_state == bus_req && bus_ack == bus_req)
+ break;
+
+ wait_cnt++;
+ udelay(1);
+ } while (wait_cnt < MAX_WAIT_COUNT);
+
+ if (bus_state != bus_req || bus_ack != bus_req) {
+ INFO("%s:st=%x(%x)\n", __func__,
+ mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST),
+ bus_state);
+ INFO("%s:st=%x(%x)\n", __func__,
+ mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ACK),
+ bus_ack);
+ }
+}
+
+struct pmu_slpdata_s pmu_slpdata;
+
+static void qos_restore(void)
+{
+ if (pmu_power_domain_st(PD_GPU) == pmu_pd_on)
+ RESTORE_QOS(pmu_slpdata.gpu_qos, GPU);
+ if (pmu_power_domain_st(PD_ISP0) == pmu_pd_on) {
+ RESTORE_QOS(pmu_slpdata.isp0_m0_qos, ISP0_M0);
+ RESTORE_QOS(pmu_slpdata.isp0_m1_qos, ISP0_M1);
+ }
+ if (pmu_power_domain_st(PD_ISP1) == pmu_pd_on) {
+ RESTORE_QOS(pmu_slpdata.isp1_m0_qos, ISP1_M0);
+ RESTORE_QOS(pmu_slpdata.isp1_m1_qos, ISP1_M1);
+ }
+ if (pmu_power_domain_st(PD_VO) == pmu_pd_on) {
+ RESTORE_QOS(pmu_slpdata.vop_big_r, VOP_BIG_R);
+ RESTORE_QOS(pmu_slpdata.vop_big_w, VOP_BIG_W);
+ RESTORE_QOS(pmu_slpdata.vop_little, VOP_LITTLE);
+ }
+ if (pmu_power_domain_st(PD_HDCP) == pmu_pd_on)
+ RESTORE_QOS(pmu_slpdata.hdcp_qos, HDCP);
+ if (pmu_power_domain_st(PD_GMAC) == pmu_pd_on)
+ RESTORE_QOS(pmu_slpdata.gmac_qos, GMAC);
+ if (pmu_power_domain_st(PD_CCI) == pmu_pd_on) {
+ RESTORE_QOS(pmu_slpdata.cci_m0_qos, CCI_M0);
+ RESTORE_QOS(pmu_slpdata.cci_m1_qos, CCI_M1);
+ }
+ if (pmu_power_domain_st(PD_SD) == pmu_pd_on)
+ RESTORE_QOS(pmu_slpdata.sdmmc_qos, SDMMC);
+ if (pmu_power_domain_st(PD_EMMC) == pmu_pd_on)
+ RESTORE_QOS(pmu_slpdata.emmc_qos, EMMC);
+ if (pmu_power_domain_st(PD_SDIOAUDIO) == pmu_pd_on)
+ RESTORE_QOS(pmu_slpdata.sdio_qos, SDIO);
+ if (pmu_power_domain_st(PD_GIC) == pmu_pd_on)
+ RESTORE_QOS(pmu_slpdata.gic_qos, GIC);
+ if (pmu_power_domain_st(PD_RGA) == pmu_pd_on) {
+ RESTORE_QOS(pmu_slpdata.rga_r_qos, RGA_R);
+ RESTORE_QOS(pmu_slpdata.rga_w_qos, RGA_W);
+ }
+ if (pmu_power_domain_st(PD_IEP) == pmu_pd_on)
+ RESTORE_QOS(pmu_slpdata.iep_qos, IEP);
+ if (pmu_power_domain_st(PD_USB3) == pmu_pd_on) {
+ RESTORE_QOS(pmu_slpdata.usb_otg0_qos, USB_OTG0);
+ RESTORE_QOS(pmu_slpdata.usb_otg1_qos, USB_OTG1);
+ }
+ if (pmu_power_domain_st(PD_PERIHP) == pmu_pd_on) {
+ RESTORE_QOS(pmu_slpdata.usb_host0_qos, USB_HOST0);
+ RESTORE_QOS(pmu_slpdata.usb_host1_qos, USB_HOST1);
+ RESTORE_QOS(pmu_slpdata.perihp_nsp_qos, PERIHP_NSP);
+ }
+ if (pmu_power_domain_st(PD_PERILP) == pmu_pd_on) {
+ RESTORE_QOS(pmu_slpdata.dmac0_qos, DMAC0);
+ RESTORE_QOS(pmu_slpdata.dmac1_qos, DMAC1);
+ RESTORE_QOS(pmu_slpdata.dcf_qos, DCF);
+ RESTORE_QOS(pmu_slpdata.crypto0_qos, CRYPTO0);
+ RESTORE_QOS(pmu_slpdata.crypto1_qos, CRYPTO1);
+ RESTORE_QOS(pmu_slpdata.perilp_nsp_qos, PERILP_NSP);
+ RESTORE_QOS(pmu_slpdata.perilpslv_nsp_qos, PERILPSLV_NSP);
+ RESTORE_QOS(pmu_slpdata.peri_cm1_qos, PERI_CM1);
+ }
+ if (pmu_power_domain_st(PD_VDU) == pmu_pd_on)
+ RESTORE_QOS(pmu_slpdata.video_m0_qos, VIDEO_M0);
+ if (pmu_power_domain_st(PD_VCODEC) == pmu_pd_on) {
+ RESTORE_QOS(pmu_slpdata.video_m1_r_qos, VIDEO_M1_R);
+ RESTORE_QOS(pmu_slpdata.video_m1_w_qos, VIDEO_M1_W);
+ }
+}
+
+static void qos_save(void)
+{
+ if (pmu_power_domain_st(PD_GPU) == pmu_pd_on)
+ SAVE_QOS(pmu_slpdata.gpu_qos, GPU);
+ if (pmu_power_domain_st(PD_ISP0) == pmu_pd_on) {
+ SAVE_QOS(pmu_slpdata.isp0_m0_qos, ISP0_M0);
+ SAVE_QOS(pmu_slpdata.isp0_m1_qos, ISP0_M1);
+ }
+ if (pmu_power_domain_st(PD_ISP1) == pmu_pd_on) {
+ SAVE_QOS(pmu_slpdata.isp1_m0_qos, ISP1_M0);
+ SAVE_QOS(pmu_slpdata.isp1_m1_qos, ISP1_M1);
+ }
+ if (pmu_power_domain_st(PD_VO) == pmu_pd_on) {
+ SAVE_QOS(pmu_slpdata.vop_big_r, VOP_BIG_R);
+ SAVE_QOS(pmu_slpdata.vop_big_w, VOP_BIG_W);
+ SAVE_QOS(pmu_slpdata.vop_little, VOP_LITTLE);
+ }
+ if (pmu_power_domain_st(PD_HDCP) == pmu_pd_on)
+ SAVE_QOS(pmu_slpdata.hdcp_qos, HDCP);
+ if (pmu_power_domain_st(PD_GMAC) == pmu_pd_on)
+ SAVE_QOS(pmu_slpdata.gmac_qos, GMAC);
+ if (pmu_power_domain_st(PD_CCI) == pmu_pd_on) {
+ SAVE_QOS(pmu_slpdata.cci_m0_qos, CCI_M0);
+ SAVE_QOS(pmu_slpdata.cci_m1_qos, CCI_M1);
+ }
+ if (pmu_power_domain_st(PD_SD) == pmu_pd_on)
+ SAVE_QOS(pmu_slpdata.sdmmc_qos, SDMMC);
+ if (pmu_power_domain_st(PD_EMMC) == pmu_pd_on)
+ SAVE_QOS(pmu_slpdata.emmc_qos, EMMC);
+ if (pmu_power_domain_st(PD_SDIOAUDIO) == pmu_pd_on)
+ SAVE_QOS(pmu_slpdata.sdio_qos, SDIO);
+ if (pmu_power_domain_st(PD_GIC) == pmu_pd_on)
+ SAVE_QOS(pmu_slpdata.gic_qos, GIC);
+ if (pmu_power_domain_st(PD_RGA) == pmu_pd_on) {
+ SAVE_QOS(pmu_slpdata.rga_r_qos, RGA_R);
+ SAVE_QOS(pmu_slpdata.rga_w_qos, RGA_W);
+ }
+ if (pmu_power_domain_st(PD_IEP) == pmu_pd_on)
+ SAVE_QOS(pmu_slpdata.iep_qos, IEP);
+ if (pmu_power_domain_st(PD_USB3) == pmu_pd_on) {
+ SAVE_QOS(pmu_slpdata.usb_otg0_qos, USB_OTG0);
+ SAVE_QOS(pmu_slpdata.usb_otg1_qos, USB_OTG1);
+ }
+ if (pmu_power_domain_st(PD_PERIHP) == pmu_pd_on) {
+ SAVE_QOS(pmu_slpdata.usb_host0_qos, USB_HOST0);
+ SAVE_QOS(pmu_slpdata.usb_host1_qos, USB_HOST1);
+ SAVE_QOS(pmu_slpdata.perihp_nsp_qos, PERIHP_NSP);
+ }
+ if (pmu_power_domain_st(PD_PERILP) == pmu_pd_on) {
+ SAVE_QOS(pmu_slpdata.dmac0_qos, DMAC0);
+ SAVE_QOS(pmu_slpdata.dmac1_qos, DMAC1);
+ SAVE_QOS(pmu_slpdata.dcf_qos, DCF);
+ SAVE_QOS(pmu_slpdata.crypto0_qos, CRYPTO0);
+ SAVE_QOS(pmu_slpdata.crypto1_qos, CRYPTO1);
+ SAVE_QOS(pmu_slpdata.perilp_nsp_qos, PERILP_NSP);
+ SAVE_QOS(pmu_slpdata.perilpslv_nsp_qos, PERILPSLV_NSP);
+ SAVE_QOS(pmu_slpdata.peri_cm1_qos, PERI_CM1);
+ }
+ if (pmu_power_domain_st(PD_VDU) == pmu_pd_on)
+ SAVE_QOS(pmu_slpdata.video_m0_qos, VIDEO_M0);
+ if (pmu_power_domain_st(PD_VCODEC) == pmu_pd_on) {
+ SAVE_QOS(pmu_slpdata.video_m1_r_qos, VIDEO_M1_R);
+ SAVE_QOS(pmu_slpdata.video_m1_w_qos, VIDEO_M1_W);
+ }
+}
+
+static int pmu_set_power_domain(uint32_t pd_id, uint32_t pd_state)
+{
+ uint32_t state;
+
+ if (pmu_power_domain_st(pd_id) == pd_state)
+ goto out;
+
+ if (pd_state == pmu_pd_on)
+ pmu_power_domain_ctr(pd_id, pd_state);
+
+ state = (pd_state == pmu_pd_off) ? BUS_IDLE : BUS_ACTIVE;
+
+ switch (pd_id) {
+ case PD_GPU:
+ pmu_bus_idle_req(BUS_ID_GPU, state);
+ break;
+ case PD_VIO:
+ pmu_bus_idle_req(BUS_ID_VIO, state);
+ break;
+ case PD_ISP0:
+ pmu_bus_idle_req(BUS_ID_ISP0, state);
+ break;
+ case PD_ISP1:
+ pmu_bus_idle_req(BUS_ID_ISP1, state);
+ break;
+ case PD_VO:
+ pmu_bus_idle_req(BUS_ID_VOPB, state);
+ pmu_bus_idle_req(BUS_ID_VOPL, state);
+ break;
+ case PD_HDCP:
+ pmu_bus_idle_req(BUS_ID_HDCP, state);
+ break;
+ case PD_TCPD0:
+ break;
+ case PD_TCPD1:
+ break;
+ case PD_GMAC:
+ pmu_bus_idle_req(BUS_ID_GMAC, state);
+ break;
+ case PD_CCI:
+ pmu_bus_idle_req(BUS_ID_CCIM0, state);
+ pmu_bus_idle_req(BUS_ID_CCIM1, state);
+ break;
+ case PD_SD:
+ pmu_bus_idle_req(BUS_ID_SD, state);
+ break;
+ case PD_EMMC:
+ pmu_bus_idle_req(BUS_ID_EMMC, state);
+ break;
+ case PD_EDP:
+ pmu_bus_idle_req(BUS_ID_EDP, state);
+ break;
+ case PD_SDIOAUDIO:
+ pmu_bus_idle_req(BUS_ID_SDIOAUDIO, state);
+ break;
+ case PD_GIC:
+ pmu_bus_idle_req(BUS_ID_GIC, state);
+ break;
+ case PD_RGA:
+ pmu_bus_idle_req(BUS_ID_RGA, state);
+ break;
+ case PD_VCODEC:
+ pmu_bus_idle_req(BUS_ID_VCODEC, state);
+ break;
+ case PD_VDU:
+ pmu_bus_idle_req(BUS_ID_VDU, state);
+ break;
+ case PD_IEP:
+ pmu_bus_idle_req(BUS_ID_IEP, state);
+ break;
+ case PD_USB3:
+ pmu_bus_idle_req(BUS_ID_USB3, state);
+ break;
+ case PD_PERIHP:
+ pmu_bus_idle_req(BUS_ID_PERIHP, state);
+ break;
+ default:
+ /* Do nothing in default case */
+ break;
+ }
+
+ if (pd_state == pmu_pd_off)
+ pmu_power_domain_ctr(pd_id, pd_state);
+
+out:
+ return 0;
+}
+
+static uint32_t pmu_powerdomain_state;
+
+static void pmu_power_domains_suspend(void)
+{
+ clk_gate_con_save();
+ clk_gate_con_disable();
+ qos_save();
+ pmu_powerdomain_state = mmio_read_32(PMU_BASE + PMU_PWRDN_ST);
+ pmu_set_power_domain(PD_GPU, pmu_pd_off);
+ pmu_set_power_domain(PD_TCPD0, pmu_pd_off);
+ pmu_set_power_domain(PD_TCPD1, pmu_pd_off);
+ pmu_set_power_domain(PD_VO, pmu_pd_off);
+ pmu_set_power_domain(PD_ISP0, pmu_pd_off);
+ pmu_set_power_domain(PD_ISP1, pmu_pd_off);
+ pmu_set_power_domain(PD_HDCP, pmu_pd_off);
+ pmu_set_power_domain(PD_SDIOAUDIO, pmu_pd_off);
+ pmu_set_power_domain(PD_GMAC, pmu_pd_off);
+ pmu_set_power_domain(PD_EDP, pmu_pd_off);
+ pmu_set_power_domain(PD_IEP, pmu_pd_off);
+ pmu_set_power_domain(PD_RGA, pmu_pd_off);
+ pmu_set_power_domain(PD_VCODEC, pmu_pd_off);
+ pmu_set_power_domain(PD_VDU, pmu_pd_off);
+ pmu_set_power_domain(PD_USB3, pmu_pd_off);
+ pmu_set_power_domain(PD_EMMC, pmu_pd_off);
+ pmu_set_power_domain(PD_VIO, pmu_pd_off);
+ pmu_set_power_domain(PD_SD, pmu_pd_off);
+ pmu_set_power_domain(PD_PERIHP, pmu_pd_off);
+ clk_gate_con_restore();
+}
+
+static void pmu_power_domains_resume(void)
+{
+ clk_gate_con_save();
+ clk_gate_con_disable();
+ if (!(pmu_powerdomain_state & BIT(PD_VDU)))
+ pmu_set_power_domain(PD_VDU, pmu_pd_on);
+ if (!(pmu_powerdomain_state & BIT(PD_VCODEC)))
+ pmu_set_power_domain(PD_VCODEC, pmu_pd_on);
+ if (!(pmu_powerdomain_state & BIT(PD_RGA)))
+ pmu_set_power_domain(PD_RGA, pmu_pd_on);
+ if (!(pmu_powerdomain_state & BIT(PD_IEP)))
+ pmu_set_power_domain(PD_IEP, pmu_pd_on);
+ if (!(pmu_powerdomain_state & BIT(PD_EDP)))
+ pmu_set_power_domain(PD_EDP, pmu_pd_on);
+ if (!(pmu_powerdomain_state & BIT(PD_GMAC)))
+ pmu_set_power_domain(PD_GMAC, pmu_pd_on);
+ if (!(pmu_powerdomain_state & BIT(PD_SDIOAUDIO)))
+ pmu_set_power_domain(PD_SDIOAUDIO, pmu_pd_on);
+ if (!(pmu_powerdomain_state & BIT(PD_HDCP)))
+ pmu_set_power_domain(PD_HDCP, pmu_pd_on);
+ if (!(pmu_powerdomain_state & BIT(PD_ISP1)))
+ pmu_set_power_domain(PD_ISP1, pmu_pd_on);
+ if (!(pmu_powerdomain_state & BIT(PD_ISP0)))
+ pmu_set_power_domain(PD_ISP0, pmu_pd_on);
+ if (!(pmu_powerdomain_state & BIT(PD_VO)))
+ pmu_set_power_domain(PD_VO, pmu_pd_on);
+ if (!(pmu_powerdomain_state & BIT(PD_TCPD1)))
+ pmu_set_power_domain(PD_TCPD1, pmu_pd_on);
+ if (!(pmu_powerdomain_state & BIT(PD_TCPD0)))
+ pmu_set_power_domain(PD_TCPD0, pmu_pd_on);
+ if (!(pmu_powerdomain_state & BIT(PD_GPU)))
+ pmu_set_power_domain(PD_GPU, pmu_pd_on);
+ if (!(pmu_powerdomain_state & BIT(PD_USB3)))
+ pmu_set_power_domain(PD_USB3, pmu_pd_on);
+ if (!(pmu_powerdomain_state & BIT(PD_EMMC)))
+ pmu_set_power_domain(PD_EMMC, pmu_pd_on);
+ if (!(pmu_powerdomain_state & BIT(PD_VIO)))
+ pmu_set_power_domain(PD_VIO, pmu_pd_on);
+ if (!(pmu_powerdomain_state & BIT(PD_SD)))
+ pmu_set_power_domain(PD_SD, pmu_pd_on);
+ if (!(pmu_powerdomain_state & BIT(PD_PERIHP)))
+ pmu_set_power_domain(PD_PERIHP, pmu_pd_on);
+ qos_restore();
+ clk_gate_con_restore();
+}
+
+void pmu_power_domains_on(void)
+{
+ clk_gate_con_disable();
+ pmu_set_power_domain(PD_VDU, pmu_pd_on);
+ pmu_set_power_domain(PD_VCODEC, pmu_pd_on);
+ pmu_set_power_domain(PD_RGA, pmu_pd_on);
+ pmu_set_power_domain(PD_IEP, pmu_pd_on);
+ pmu_set_power_domain(PD_EDP, pmu_pd_on);
+ pmu_set_power_domain(PD_GMAC, pmu_pd_on);
+ pmu_set_power_domain(PD_SDIOAUDIO, pmu_pd_on);
+ pmu_set_power_domain(PD_HDCP, pmu_pd_on);
+ pmu_set_power_domain(PD_ISP1, pmu_pd_on);
+ pmu_set_power_domain(PD_ISP0, pmu_pd_on);
+ pmu_set_power_domain(PD_VO, pmu_pd_on);
+ pmu_set_power_domain(PD_TCPD1, pmu_pd_on);
+ pmu_set_power_domain(PD_TCPD0, pmu_pd_on);
+ pmu_set_power_domain(PD_GPU, pmu_pd_on);
+}
+
+void rk3399_flush_l2_b(void)
+{
+ uint32_t wait_cnt = 0;
+
+ mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(L2_FLUSH_REQ_CLUSTER_B));
+ dsb();
+
+ /*
+ * The Big cluster flush L2 cache took ~4ms by default, give 10ms for
+ * the enough margin.
+ */
+ while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST) &
+ BIT(L2_FLUSHDONE_CLUSTER_B))) {
+ wait_cnt++;
+ udelay(10);
+ if (wait_cnt == 10000 / 10)
+ WARN("L2 cache flush on suspend took longer than 10ms\n");
+ }
+
+ mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(L2_FLUSH_REQ_CLUSTER_B));
+}
+
+static void pmu_scu_b_pwrdn(void)
+{
+ uint32_t wait_cnt = 0;
+
+ if ((mmio_read_32(PMU_BASE + PMU_PWRDN_ST) &
+ (BIT(PMU_A72_B0_PWRDWN_ST) | BIT(PMU_A72_B1_PWRDWN_ST))) !=
+ (BIT(PMU_A72_B0_PWRDWN_ST) | BIT(PMU_A72_B1_PWRDWN_ST))) {
+ ERROR("%s: not all cpus is off\n", __func__);
+ return;
+ }
+
+ rk3399_flush_l2_b();
+
+ mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(ACINACTM_CLUSTER_B_CFG));
+
+ while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST) &
+ BIT(STANDBY_BY_WFIL2_CLUSTER_B))) {
+ wait_cnt++;
+ udelay(1);
+ if (wait_cnt >= MAX_WAIT_COUNT)
+ ERROR("%s:wait cluster-b l2(%x)\n", __func__,
+ mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST));
+ }
+}
+
+static void pmu_scu_b_pwrup(void)
+{
+ mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(ACINACTM_CLUSTER_B_CFG));
+}
+
+static inline uint32_t get_cpus_pwr_domain_cfg_info(uint32_t cpu_id)
+{
+ assert(cpu_id < PLATFORM_CORE_COUNT);
+ return core_pm_cfg_info[cpu_id];
+}
+
+static inline void set_cpus_pwr_domain_cfg_info(uint32_t cpu_id, uint32_t value)
+{
+ assert(cpu_id < PLATFORM_CORE_COUNT);
+ core_pm_cfg_info[cpu_id] = value;
+#if !USE_COHERENT_MEM
+ flush_dcache_range((uintptr_t)&core_pm_cfg_info[cpu_id],
+ sizeof(uint32_t));
+#endif
+}
+
+static int cpus_power_domain_on(uint32_t cpu_id)
+{
+ uint32_t cfg_info;
+ uint32_t cpu_pd = PD_CPUL0 + cpu_id;
+ /*
+ * There are two ways to powering on or off on core.
+ * 1) Control it power domain into on or off in PMU_PWRDN_CON reg
+ * 2) Enable the core power manage in PMU_CORE_PM_CON reg,
+ * then, if the core enter into wfi, it power domain will be
+ * powered off automatically.
+ */
+
+ cfg_info = get_cpus_pwr_domain_cfg_info(cpu_id);
+
+ if (cfg_info == core_pwr_pd) {
+ /* disable core_pm cfg */
+ mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id),
+ CORES_PM_DISABLE);
+ /* if the cores have be on, power off it firstly */
+ if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) {
+ mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 0);
+ pmu_power_domain_ctr(cpu_pd, pmu_pd_off);
+ }
+
+ pmu_power_domain_ctr(cpu_pd, pmu_pd_on);
+ } else {
+ if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) {
+ WARN("%s: cpu%d is not in off,!\n", __func__, cpu_id);
+ return -EINVAL;
+ }
+
+ mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id),
+ BIT(core_pm_sft_wakeup_en));
+ dsb();
+ }
+
+ return 0;
+}
+
+static int cpus_power_domain_off(uint32_t cpu_id, uint32_t pd_cfg)
+{
+ uint32_t cpu_pd;
+ uint32_t core_pm_value;
+
+ cpu_pd = PD_CPUL0 + cpu_id;
+ if (pmu_power_domain_st(cpu_pd) == pmu_pd_off)
+ return 0;
+
+ if (pd_cfg == core_pwr_pd) {
+ if (check_cpu_wfie(cpu_id, CKECK_WFEI_MSK))
+ return -EINVAL;
+
+ /* disable core_pm cfg */
+ mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id),
+ CORES_PM_DISABLE);
+
+ set_cpus_pwr_domain_cfg_info(cpu_id, pd_cfg);
+ pmu_power_domain_ctr(cpu_pd, pmu_pd_off);
+ } else {
+ set_cpus_pwr_domain_cfg_info(cpu_id, pd_cfg);
+
+ core_pm_value = BIT(core_pm_en);
+ if (pd_cfg == core_pwr_wfi_int)
+ core_pm_value |= BIT(core_pm_int_wakeup_en);
+ mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id),
+ core_pm_value);
+ dsb();
+ }
+
+ return 0;
+}
+
+static inline void clst_pwr_domain_suspend(plat_local_state_t lvl_state)
+{
+ uint32_t cpu_id = plat_my_core_pos();
+ uint32_t pll_id, clst_st_msk, clst_st_chk_msk, pmu_st;
+
+ assert(cpu_id < PLATFORM_CORE_COUNT);
+
+ if (lvl_state == PLAT_MAX_OFF_STATE) {
+ if (cpu_id < PLATFORM_CLUSTER0_CORE_COUNT) {
+ pll_id = ALPLL_ID;
+ clst_st_msk = CLST_L_CPUS_MSK;
+ } else {
+ pll_id = ABPLL_ID;
+ clst_st_msk = CLST_B_CPUS_MSK <<
+ PLATFORM_CLUSTER0_CORE_COUNT;
+ }
+
+ clst_st_chk_msk = clst_st_msk & ~(BIT(cpu_id));
+
+ pmu_st = mmio_read_32(PMU_BASE + PMU_PWRDN_ST);
+
+ pmu_st &= clst_st_msk;
+
+ if (pmu_st == clst_st_chk_msk) {
+ mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 3),
+ PLL_SLOW_MODE);
+
+ clst_warmboot_data[pll_id] = PMU_CLST_RET;
+
+ pmu_st = mmio_read_32(PMU_BASE + PMU_PWRDN_ST);
+ pmu_st &= clst_st_msk;
+ if (pmu_st == clst_st_chk_msk)
+ return;
+ /*
+ * it is mean that others cpu is up again,
+ * we must resume the cfg at once.
+ */
+ mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 3),
+ PLL_NOMAL_MODE);
+ clst_warmboot_data[pll_id] = 0;
+ }
+ }
+}
+
+static int clst_pwr_domain_resume(plat_local_state_t lvl_state)
+{
+ uint32_t cpu_id = plat_my_core_pos();
+ uint32_t pll_id, pll_st;
+
+ assert(cpu_id < PLATFORM_CORE_COUNT);
+
+ if (lvl_state == PLAT_MAX_OFF_STATE) {
+ if (cpu_id < PLATFORM_CLUSTER0_CORE_COUNT)
+ pll_id = ALPLL_ID;
+ else
+ pll_id = ABPLL_ID;
+
+ pll_st = mmio_read_32(CRU_BASE + CRU_PLL_CON(pll_id, 3)) >>
+ PLL_MODE_SHIFT;
+
+ if (pll_st != NORMAL_MODE) {
+ WARN("%s: clst (%d) is in error mode (%d)\n",
+ __func__, pll_id, pll_st);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static void nonboot_cpus_off(void)
+{
+ uint32_t boot_cpu, cpu;
+
+ boot_cpu = plat_my_core_pos();
+
+ /* turn off noboot cpus */
+ for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) {
+ if (cpu == boot_cpu)
+ continue;
+ cpus_power_domain_off(cpu, core_pwr_pd);
+ }
+}
+
+int rockchip_soc_cores_pwr_dm_on(unsigned long mpidr, uint64_t entrypoint)
+{
+ uint32_t cpu_id = plat_core_pos_by_mpidr(mpidr);
+
+ assert(cpu_id < PLATFORM_CORE_COUNT);
+ assert(cpuson_flags[cpu_id] == 0);
+ cpuson_flags[cpu_id] = PMU_CPU_HOTPLUG;
+ cpuson_entry_point[cpu_id] = entrypoint;
+ dsb();
+
+ cpus_power_domain_on(cpu_id);
+
+ return PSCI_E_SUCCESS;
+}
+
+int rockchip_soc_cores_pwr_dm_off(void)
+{
+ uint32_t cpu_id = plat_my_core_pos();
+
+ cpus_power_domain_off(cpu_id, core_pwr_wfi);
+
+ return PSCI_E_SUCCESS;
+}
+
+int rockchip_soc_hlvl_pwr_dm_off(uint32_t lvl,
+ plat_local_state_t lvl_state)
+{
+ if (lvl == MPIDR_AFFLVL1) {
+ clst_pwr_domain_suspend(lvl_state);
+ }
+
+ return PSCI_E_SUCCESS;
+}
+
+int rockchip_soc_cores_pwr_dm_suspend(void)
+{
+ uint32_t cpu_id = plat_my_core_pos();
+
+ assert(cpu_id < PLATFORM_CORE_COUNT);
+ assert(cpuson_flags[cpu_id] == 0);
+ cpuson_flags[cpu_id] = PMU_CPU_AUTO_PWRDN;
+ cpuson_entry_point[cpu_id] = plat_get_sec_entrypoint();
+ dsb();
+
+ cpus_power_domain_off(cpu_id, core_pwr_wfi_int);
+
+ return PSCI_E_SUCCESS;
+}
+
+int rockchip_soc_hlvl_pwr_dm_suspend(uint32_t lvl, plat_local_state_t lvl_state)
+{
+ if (lvl == MPIDR_AFFLVL1) {
+ clst_pwr_domain_suspend(lvl_state);
+ }
+
+ return PSCI_E_SUCCESS;
+}
+
+int rockchip_soc_cores_pwr_dm_on_finish(void)
+{
+ uint32_t cpu_id = plat_my_core_pos();
+
+ mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id),
+ CORES_PM_DISABLE);
+ return PSCI_E_SUCCESS;
+}
+
+int rockchip_soc_hlvl_pwr_dm_on_finish(uint32_t lvl,
+ plat_local_state_t lvl_state)
+{
+ if (lvl == MPIDR_AFFLVL1) {
+ clst_pwr_domain_resume(lvl_state);
+ }
+
+ return PSCI_E_SUCCESS;
+}
+
+int rockchip_soc_cores_pwr_dm_resume(void)
+{
+ uint32_t cpu_id = plat_my_core_pos();
+
+ /* Disable core_pm */
+ mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), CORES_PM_DISABLE);
+
+ return PSCI_E_SUCCESS;
+}
+
+int rockchip_soc_hlvl_pwr_dm_resume(uint32_t lvl, plat_local_state_t lvl_state)
+{
+ if (lvl == MPIDR_AFFLVL1) {
+ clst_pwr_domain_resume(lvl_state);
+ }
+
+ return PSCI_E_SUCCESS;
+}
+
+/**
+ * init_pmu_counts - Init timing counts in the PMU register area
+ *
+ * At various points when we power up or down parts of the system we need
+ * a delay to wait for power / clocks to become stable. The PMU has counters
+ * to help software do the delay properly. Basically, it works like this:
+ * - Software sets up counter values
+ * - When software turns on something in the PMU, the counter kicks off
+ * - The hardware sets a bit automatically when the counter has finished and
+ * software knows that the initialization is done.
+ *
+ * It's software's job to setup these counters. The hardware power on default
+ * for these settings is conservative, setting everything to 0x5dc0
+ * (750 ms in 32 kHz counts or 1 ms in 24 MHz counts).
+ *
+ * Note that some of these counters are only really used at suspend/resume
+ * time (for instance, that's the only time we turn off/on the oscillator) and
+ * others are used during normal runtime (like turning on/off a CPU or GPU) but
+ * it doesn't hurt to init everything at boot.
+ *
+ * Also note that these counters can run off the 32 kHz clock or the 24 MHz
+ * clock. While the 24 MHz clock can give us more precision, it's not always
+ * available (like when we turn the oscillator off at sleep time). The
+ * pmu_use_lf (lf: low freq) is available in power mode. Current understanding
+ * is that counts work like this:
+ * IF (pmu_use_lf == 0) || (power_mode_en == 0)
+ * use the 24M OSC for counts
+ * ELSE
+ * use the 32K OSC for counts
+ *
+ * Notes:
+ * - There is a separate bit for the PMU called PMU_24M_EN_CFG. At the moment
+ * we always keep that 0. This apparently choose between using the PLL as
+ * the source for the PMU vs. the 24M clock. If we ever set it to 1 we
+ * should consider how it affects these counts (if at all).
+ * - The power_mode_en is documented to auto-clear automatically when we leave
+ * "power mode". That's why most clocks are on 24M. Only timings used when
+ * in "power mode" are 32k.
+ * - In some cases the kernel may override these counts.
+ *
+ * The PMU_STABLE_CNT / PMU_OSC_CNT / PMU_PLLLOCK_CNT are important CNTs
+ * in power mode, we need to ensure that they are available.
+ */
+static void init_pmu_counts(void)
+{
+ /* COUNTS FOR INSIDE POWER MODE */
+
+ /*
+ * From limited testing, need PMU stable >= 2ms, but go overkill
+ * and choose 30 ms to match testing on past SoCs. Also let
+ * OSC have 30 ms for stabilization.
+ */
+ mmio_write_32(PMU_BASE + PMU_STABLE_CNT, CYCL_32K_CNT_MS(30));
+ mmio_write_32(PMU_BASE + PMU_OSC_CNT, CYCL_32K_CNT_MS(30));
+
+ /* Unclear what these should be; try 3 ms */
+ mmio_write_32(PMU_BASE + PMU_WAKEUP_RST_CLR_CNT, CYCL_32K_CNT_MS(3));
+
+ /* Unclear what this should be, but set the default explicitly */
+ mmio_write_32(PMU_BASE + PMU_TIMEOUT_CNT, 0x5dc0);
+
+ /* COUNTS FOR OUTSIDE POWER MODE */
+
+ /* Put something sorta conservative here until we know better */
+ mmio_write_32(PMU_BASE + PMU_PLLLOCK_CNT, CYCL_24M_CNT_MS(3));
+ mmio_write_32(PMU_BASE + PMU_DDRIO_PWRON_CNT, CYCL_24M_CNT_MS(1));
+ mmio_write_32(PMU_BASE + PMU_CENTER_PWRDN_CNT, CYCL_24M_CNT_MS(1));
+ mmio_write_32(PMU_BASE + PMU_CENTER_PWRUP_CNT, CYCL_24M_CNT_MS(1));
+
+ /*
+ * when we enable PMU_CLR_PERILP, it will shut down the SRAM, but
+ * M0 code run in SRAM, and we need it to check whether cpu enter
+ * FSM status, so we must wait M0 finish their code and enter WFI,
+ * then we can shutdown SRAM, according FSM order:
+ * ST_NORMAL->..->ST_SCU_L_PWRDN->..->ST_CENTER_PWRDN->ST_PERILP_PWRDN
+ * we can add delay when shutdown ST_SCU_L_PWRDN to guarantee M0 get
+ * the FSM status and enter WFI, then enable PMU_CLR_PERILP.
+ */
+ mmio_write_32(PMU_BASE + PMU_SCU_L_PWRDN_CNT, CYCL_24M_CNT_MS(5));
+ mmio_write_32(PMU_BASE + PMU_SCU_L_PWRUP_CNT, CYCL_24M_CNT_US(1));
+
+ /*
+ * Set CPU/GPU to 1 us.
+ *
+ * NOTE: Even though ATF doesn't configure the GPU we'll still setup
+ * counts here. After all ATF controls all these other bits and also
+ * chooses which clock these counters use.
+ */
+ mmio_write_32(PMU_BASE + PMU_SCU_B_PWRDN_CNT, CYCL_24M_CNT_US(1));
+ mmio_write_32(PMU_BASE + PMU_SCU_B_PWRUP_CNT, CYCL_24M_CNT_US(1));
+ mmio_write_32(PMU_BASE + PMU_GPU_PWRDN_CNT, CYCL_24M_CNT_US(1));
+ mmio_write_32(PMU_BASE + PMU_GPU_PWRUP_CNT, CYCL_24M_CNT_US(1));
+}
+
+static uint32_t clk_ddrc_save;
+
+static void sys_slp_config(void)
+{
+ uint32_t slp_mode_cfg = 0;
+
+ /* keep enabling clk_ddrc_bpll_src_en gate for DDRC */
+ clk_ddrc_save = mmio_read_32(CRU_BASE + CRU_CLKGATE_CON(3));
+ mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(3), WMSK_BIT(1));
+
+ prepare_abpll_for_ddrctrl();
+ sram_func_set_ddrctl_pll(ABPLL_ID);
+
+ mmio_write_32(GRF_BASE + GRF_SOC_CON4, CCI_FORCE_WAKEUP);
+ mmio_write_32(PMU_BASE + PMU_CCI500_CON,
+ BIT_WITH_WMSK(PMU_CLR_PREQ_CCI500_HW) |
+ BIT_WITH_WMSK(PMU_CLR_QREQ_CCI500_HW) |
+ BIT_WITH_WMSK(PMU_QGATING_CCI500_CFG));
+
+ mmio_write_32(PMU_BASE + PMU_ADB400_CON,
+ BIT_WITH_WMSK(PMU_CLR_CORE_L_HW) |
+ BIT_WITH_WMSK(PMU_CLR_CORE_L_2GIC_HW) |
+ BIT_WITH_WMSK(PMU_CLR_GIC2_CORE_L_HW));
+
+ slp_mode_cfg = BIT(PMU_PWR_MODE_EN) |
+ BIT(PMU_WKUP_RST_EN) |
+ BIT(PMU_INPUT_CLAMP_EN) |
+ BIT(PMU_POWER_OFF_REQ_CFG) |
+ BIT(PMU_CPU0_PD_EN) |
+ BIT(PMU_L2_FLUSH_EN) |
+ BIT(PMU_L2_IDLE_EN) |
+ BIT(PMU_SCU_PD_EN) |
+ BIT(PMU_CCI_PD_EN) |
+ BIT(PMU_CLK_CORE_SRC_GATE_EN) |
+ BIT(PMU_ALIVE_USE_LF) |
+ BIT(PMU_SREF0_ENTER_EN) |
+ BIT(PMU_SREF1_ENTER_EN) |
+ BIT(PMU_DDRC0_GATING_EN) |
+ BIT(PMU_DDRC1_GATING_EN) |
+ BIT(PMU_DDRIO0_RET_EN) |
+ BIT(PMU_DDRIO0_RET_DE_REQ) |
+ BIT(PMU_DDRIO1_RET_EN) |
+ BIT(PMU_DDRIO1_RET_DE_REQ) |
+ BIT(PMU_CENTER_PD_EN) |
+ BIT(PMU_PERILP_PD_EN) |
+ BIT(PMU_CLK_PERILP_SRC_GATE_EN) |
+ BIT(PMU_PLL_PD_EN) |
+ BIT(PMU_CLK_CENTER_SRC_GATE_EN) |
+ BIT(PMU_OSC_DIS) |
+ BIT(PMU_PMU_USE_LF);
+
+ mmio_setbits_32(PMU_BASE + PMU_WKUP_CFG4, BIT(PMU_GPIO_WKUP_EN));
+ mmio_write_32(PMU_BASE + PMU_PWRMODE_CON, slp_mode_cfg);
+
+ mmio_write_32(PMU_BASE + PMU_PLL_CON, PLL_PD_HW);
+ mmio_write_32(PMUGRF_BASE + PMUGRF_SOC_CON0, EXTERNAL_32K);
+ mmio_write_32(PMUGRF_BASE, IOMUX_CLK_32K); /* 32k iomux */
+}
+
+static void set_hw_idle(uint32_t hw_idle)
+{
+ mmio_setbits_32(PMU_BASE + PMU_BUS_CLR, hw_idle);
+}
+
+static void clr_hw_idle(uint32_t hw_idle)
+{
+ mmio_clrbits_32(PMU_BASE + PMU_BUS_CLR, hw_idle);
+}
+
+static uint32_t iomux_status[12];
+static uint32_t pull_mode_status[12];
+static uint32_t gpio_direction[3];
+static uint32_t gpio_2_4_clk_gate;
+
+static void suspend_apio(void)
+{
+ struct bl_aux_rk_apio_info *suspend_apio;
+ int i;
+
+ suspend_apio = plat_get_rockchip_suspend_apio();
+
+ if (!suspend_apio)
+ return;
+
+ /* save gpio2 ~ gpio4 iomux and pull mode */
+ for (i = 0; i < 12; i++) {
+ iomux_status[i] = mmio_read_32(GRF_BASE +
+ GRF_GPIO2A_IOMUX + i * 4);
+ pull_mode_status[i] = mmio_read_32(GRF_BASE +
+ GRF_GPIO2A_P + i * 4);
+ }
+
+ /* store gpio2 ~ gpio4 clock gate state */
+ gpio_2_4_clk_gate = (mmio_read_32(CRU_BASE + CRU_CLKGATE_CON(31)) >>
+ PCLK_GPIO2_GATE_SHIFT) & 0x07;
+
+ /* enable gpio2 ~ gpio4 clock gate */
+ mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(31),
+ BITS_WITH_WMASK(0, 0x07, PCLK_GPIO2_GATE_SHIFT));
+
+ /* save gpio2 ~ gpio4 direction */
+ gpio_direction[0] = mmio_read_32(GPIO2_BASE + 0x04);
+ gpio_direction[1] = mmio_read_32(GPIO3_BASE + 0x04);
+ gpio_direction[2] = mmio_read_32(GPIO4_BASE + 0x04);
+
+ /* apio1 charge gpio3a0 ~ gpio3c7 */
+ if (suspend_apio->apio1) {
+
+ /* set gpio3a0 ~ gpio3c7 iomux to gpio */
+ mmio_write_32(GRF_BASE + GRF_GPIO3A_IOMUX,
+ REG_SOC_WMSK | GRF_IOMUX_GPIO);
+ mmio_write_32(GRF_BASE + GRF_GPIO3B_IOMUX,
+ REG_SOC_WMSK | GRF_IOMUX_GPIO);
+ mmio_write_32(GRF_BASE + GRF_GPIO3C_IOMUX,
+ REG_SOC_WMSK | GRF_IOMUX_GPIO);
+
+ /* set gpio3a0 ~ gpio3c7 pull mode to pull none */
+ mmio_write_32(GRF_BASE + GRF_GPIO3A_P, REG_SOC_WMSK | 0);
+ mmio_write_32(GRF_BASE + GRF_GPIO3B_P, REG_SOC_WMSK | 0);
+ mmio_write_32(GRF_BASE + GRF_GPIO3C_P, REG_SOC_WMSK | 0);
+
+ /* set gpio3a0 ~ gpio3c7 to input */
+ mmio_clrbits_32(GPIO3_BASE + 0x04, 0x00ffffff);
+ }
+
+ /* apio2 charge gpio2a0 ~ gpio2b4 */
+ if (suspend_apio->apio2) {
+
+ /* set gpio2a0 ~ gpio2b4 iomux to gpio */
+ mmio_write_32(GRF_BASE + GRF_GPIO2A_IOMUX,
+ REG_SOC_WMSK | GRF_IOMUX_GPIO);
+ mmio_write_32(GRF_BASE + GRF_GPIO2B_IOMUX,
+ REG_SOC_WMSK | GRF_IOMUX_GPIO);
+
+ /* set gpio2a0 ~ gpio2b4 pull mode to pull none */
+ mmio_write_32(GRF_BASE + GRF_GPIO2A_P, REG_SOC_WMSK | 0);
+ mmio_write_32(GRF_BASE + GRF_GPIO2B_P, REG_SOC_WMSK | 0);
+
+ /* set gpio2a0 ~ gpio2b4 to input */
+ mmio_clrbits_32(GPIO2_BASE + 0x04, 0x00001fff);
+ }
+
+ /* apio3 charge gpio2c0 ~ gpio2d4*/
+ if (suspend_apio->apio3) {
+
+ /* set gpio2a0 ~ gpio2b4 iomux to gpio */
+ mmio_write_32(GRF_BASE + GRF_GPIO2C_IOMUX,
+ REG_SOC_WMSK | GRF_IOMUX_GPIO);
+ mmio_write_32(GRF_BASE + GRF_GPIO2D_IOMUX,
+ REG_SOC_WMSK | GRF_IOMUX_GPIO);
+
+ /* set gpio2c0 ~ gpio2d4 pull mode to pull none */
+ mmio_write_32(GRF_BASE + GRF_GPIO2C_P, REG_SOC_WMSK | 0);
+ mmio_write_32(GRF_BASE + GRF_GPIO2D_P, REG_SOC_WMSK | 0);
+
+ /* set gpio2c0 ~ gpio2d4 to input */
+ mmio_clrbits_32(GPIO2_BASE + 0x04, 0x1fff0000);
+ }
+
+ /* apio4 charge gpio4c0 ~ gpio4c7, gpio4d0 ~ gpio4d6 */
+ if (suspend_apio->apio4) {
+
+ /* set gpio4c0 ~ gpio4d6 iomux to gpio */
+ mmio_write_32(GRF_BASE + GRF_GPIO4C_IOMUX,
+ REG_SOC_WMSK | GRF_IOMUX_GPIO);
+ mmio_write_32(GRF_BASE + GRF_GPIO4D_IOMUX,
+ REG_SOC_WMSK | GRF_IOMUX_GPIO);
+
+ /* set gpio4c0 ~ gpio4d6 pull mode to pull none */
+ mmio_write_32(GRF_BASE + GRF_GPIO4C_P, REG_SOC_WMSK | 0);
+ mmio_write_32(GRF_BASE + GRF_GPIO4D_P, REG_SOC_WMSK | 0);
+
+ /* set gpio4c0 ~ gpio4d6 to input */
+ mmio_clrbits_32(GPIO4_BASE + 0x04, 0x7fff0000);
+ }
+
+ /* apio5 charge gpio3d0 ~ gpio3d7, gpio4a0 ~ gpio4a7*/
+ if (suspend_apio->apio5) {
+ /* set gpio3d0 ~ gpio4a7 iomux to gpio */
+ mmio_write_32(GRF_BASE + GRF_GPIO3D_IOMUX,
+ REG_SOC_WMSK | GRF_IOMUX_GPIO);
+ mmio_write_32(GRF_BASE + GRF_GPIO4A_IOMUX,
+ REG_SOC_WMSK | GRF_IOMUX_GPIO);
+
+ /* set gpio3d0 ~ gpio4a7 pull mode to pull none */
+ mmio_write_32(GRF_BASE + GRF_GPIO3D_P, REG_SOC_WMSK | 0);
+ mmio_write_32(GRF_BASE + GRF_GPIO4A_P, REG_SOC_WMSK | 0);
+
+ /* set gpio4c0 ~ gpio4d6 to input */
+ mmio_clrbits_32(GPIO3_BASE + 0x04, 0xff000000);
+ mmio_clrbits_32(GPIO4_BASE + 0x04, 0x000000ff);
+ }
+}
+
+static void resume_apio(void)
+{
+ struct bl_aux_rk_apio_info *suspend_apio;
+ int i;
+
+ suspend_apio = plat_get_rockchip_suspend_apio();
+
+ if (!suspend_apio)
+ return;
+
+ for (i = 0; i < 12; i++) {
+ mmio_write_32(GRF_BASE + GRF_GPIO2A_P + i * 4,
+ REG_SOC_WMSK | pull_mode_status[i]);
+ mmio_write_32(GRF_BASE + GRF_GPIO2A_IOMUX + i * 4,
+ REG_SOC_WMSK | iomux_status[i]);
+ }
+
+ /* set gpio2 ~ gpio4 direction back to store value */
+ mmio_write_32(GPIO2_BASE + 0x04, gpio_direction[0]);
+ mmio_write_32(GPIO3_BASE + 0x04, gpio_direction[1]);
+ mmio_write_32(GPIO4_BASE + 0x04, gpio_direction[2]);
+
+ /* set gpio2 ~ gpio4 clock gate back to store value */
+ mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(31),
+ BITS_WITH_WMASK(gpio_2_4_clk_gate, 0x07,
+ PCLK_GPIO2_GATE_SHIFT));
+}
+
+static void suspend_gpio(void)
+{
+ struct bl_aux_gpio_info *suspend_gpio;
+ uint32_t count;
+ int i;
+
+ suspend_gpio = plat_get_rockchip_suspend_gpio(&count);
+
+ for (i = 0; i < count; i++) {
+ gpio_set_value(suspend_gpio[i].index, suspend_gpio[i].polarity);
+ gpio_set_direction(suspend_gpio[i].index, GPIO_DIR_OUT);
+ udelay(1);
+ }
+}
+
+static void resume_gpio(void)
+{
+ struct bl_aux_gpio_info *suspend_gpio;
+ uint32_t count;
+ int i;
+
+ suspend_gpio = plat_get_rockchip_suspend_gpio(&count);
+
+ for (i = count - 1; i >= 0; i--) {
+ gpio_set_value(suspend_gpio[i].index,
+ !suspend_gpio[i].polarity);
+ gpio_set_direction(suspend_gpio[i].index, GPIO_DIR_OUT);
+ udelay(1);
+ }
+}
+
+void sram_save(void)
+{
+ size_t text_size = (char *)&__bl31_sram_text_real_end -
+ (char *)&__bl31_sram_text_start;
+ size_t data_size = (char *)&__bl31_sram_data_real_end -
+ (char *)&__bl31_sram_data_start;
+ size_t incbin_size = (char *)&__sram_incbin_real_end -
+ (char *)&__sram_incbin_start;
+
+ memcpy(&store_sram[0], &__bl31_sram_text_start, text_size);
+ memcpy(&store_sram[text_size], &__bl31_sram_data_start, data_size);
+ memcpy(&store_sram[text_size + data_size], &__sram_incbin_start,
+ incbin_size);
+}
+
+void sram_restore(void)
+{
+ size_t text_size = (char *)&__bl31_sram_text_real_end -
+ (char *)&__bl31_sram_text_start;
+ size_t data_size = (char *)&__bl31_sram_data_real_end -
+ (char *)&__bl31_sram_data_start;
+ size_t incbin_size = (char *)&__sram_incbin_real_end -
+ (char *)&__sram_incbin_start;
+
+ memcpy(&__bl31_sram_text_start, &store_sram[0], text_size);
+ memcpy(&__bl31_sram_data_start, &store_sram[text_size], data_size);
+ memcpy(&__sram_incbin_start, &store_sram[text_size + data_size],
+ incbin_size);
+}
+
+struct uart_debug {
+ uint32_t uart_dll;
+ uint32_t uart_dlh;
+ uint32_t uart_ier;
+ uint32_t uart_fcr;
+ uint32_t uart_mcr;
+ uint32_t uart_lcr;
+};
+
+#define UART_DLL 0x00
+#define UART_DLH 0x04
+#define UART_IER 0x04
+#define UART_FCR 0x08
+#define UART_LCR 0x0c
+#define UART_MCR 0x10
+#define UARTSRR 0x88
+
+#define UART_RESET BIT(0)
+#define UARTFCR_FIFOEN BIT(0)
+#define RCVR_FIFO_RESET BIT(1)
+#define XMIT_FIFO_RESET BIT(2)
+#define DIAGNOSTIC_MODE BIT(4)
+#define UARTLCR_DLAB BIT(7)
+
+static struct uart_debug uart_save;
+
+void suspend_uart(void)
+{
+ uint32_t uart_base = rockchip_get_uart_base();
+
+ if (uart_base == 0)
+ return;
+
+ uart_save.uart_lcr = mmio_read_32(uart_base + UART_LCR);
+ uart_save.uart_ier = mmio_read_32(uart_base + UART_IER);
+ uart_save.uart_mcr = mmio_read_32(uart_base + UART_MCR);
+ mmio_write_32(uart_base + UART_LCR,
+ uart_save.uart_lcr | UARTLCR_DLAB);
+ uart_save.uart_dll = mmio_read_32(uart_base + UART_DLL);
+ uart_save.uart_dlh = mmio_read_32(uart_base + UART_DLH);
+ mmio_write_32(uart_base + UART_LCR, uart_save.uart_lcr);
+}
+
+void resume_uart(void)
+{
+ uint32_t uart_base = rockchip_get_uart_base();
+ uint32_t uart_lcr;
+
+ if (uart_base == 0)
+ return;
+
+ mmio_write_32(uart_base + UARTSRR,
+ XMIT_FIFO_RESET | RCVR_FIFO_RESET | UART_RESET);
+
+ uart_lcr = mmio_read_32(uart_base + UART_LCR);
+ mmio_write_32(uart_base + UART_MCR, DIAGNOSTIC_MODE);
+ mmio_write_32(uart_base + UART_LCR, uart_lcr | UARTLCR_DLAB);
+ mmio_write_32(uart_base + UART_DLL, uart_save.uart_dll);
+ mmio_write_32(uart_base + UART_DLH, uart_save.uart_dlh);
+ mmio_write_32(uart_base + UART_LCR, uart_save.uart_lcr);
+ mmio_write_32(uart_base + UART_IER, uart_save.uart_ier);
+ mmio_write_32(uart_base + UART_FCR, UARTFCR_FIFOEN);
+ mmio_write_32(uart_base + UART_MCR, uart_save.uart_mcr);
+}
+
+void save_usbphy(void)
+{
+ store_usbphy0[0] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL0);
+ store_usbphy0[1] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL2);
+ store_usbphy0[2] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL3);
+ store_usbphy0[3] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL12);
+ store_usbphy0[4] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL13);
+ store_usbphy0[5] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL15);
+ store_usbphy0[6] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL16);
+
+ store_usbphy1[0] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL0);
+ store_usbphy1[1] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL2);
+ store_usbphy1[2] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL3);
+ store_usbphy1[3] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL12);
+ store_usbphy1[4] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL13);
+ store_usbphy1[5] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL15);
+ store_usbphy1[6] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL16);
+}
+
+void restore_usbphy(void)
+{
+ mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL0,
+ REG_SOC_WMSK | store_usbphy0[0]);
+ mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL2,
+ REG_SOC_WMSK | store_usbphy0[1]);
+ mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL3,
+ REG_SOC_WMSK | store_usbphy0[2]);
+ mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL12,
+ REG_SOC_WMSK | store_usbphy0[3]);
+ mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL13,
+ REG_SOC_WMSK | store_usbphy0[4]);
+ mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL15,
+ REG_SOC_WMSK | store_usbphy0[5]);
+ mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL16,
+ REG_SOC_WMSK | store_usbphy0[6]);
+
+ mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL0,
+ REG_SOC_WMSK | store_usbphy1[0]);
+ mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL2,
+ REG_SOC_WMSK | store_usbphy1[1]);
+ mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL3,
+ REG_SOC_WMSK | store_usbphy1[2]);
+ mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL12,
+ REG_SOC_WMSK | store_usbphy1[3]);
+ mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL13,
+ REG_SOC_WMSK | store_usbphy1[4]);
+ mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL15,
+ REG_SOC_WMSK | store_usbphy1[5]);
+ mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL16,
+ REG_SOC_WMSK | store_usbphy1[6]);
+}
+
+void grf_register_save(void)
+{
+ int i;
+
+ store_grf_soc_con0 = mmio_read_32(GRF_BASE + GRF_SOC_CON(0));
+ store_grf_soc_con1 = mmio_read_32(GRF_BASE + GRF_SOC_CON(1));
+ store_grf_soc_con2 = mmio_read_32(GRF_BASE + GRF_SOC_CON(2));
+ store_grf_soc_con3 = mmio_read_32(GRF_BASE + GRF_SOC_CON(3));
+ store_grf_soc_con4 = mmio_read_32(GRF_BASE + GRF_SOC_CON(4));
+ store_grf_soc_con7 = mmio_read_32(GRF_BASE + GRF_SOC_CON(7));
+
+ for (i = 0; i < 4; i++)
+ store_grf_ddrc_con[i] =
+ mmio_read_32(GRF_BASE + GRF_DDRC0_CON0 + i * 4);
+
+ store_grf_io_vsel = mmio_read_32(GRF_BASE + GRF_IO_VSEL);
+}
+
+void grf_register_restore(void)
+{
+ int i;
+
+ mmio_write_32(GRF_BASE + GRF_SOC_CON(0),
+ REG_SOC_WMSK | store_grf_soc_con0);
+ mmio_write_32(GRF_BASE + GRF_SOC_CON(1),
+ REG_SOC_WMSK | store_grf_soc_con1);
+ mmio_write_32(GRF_BASE + GRF_SOC_CON(2),
+ REG_SOC_WMSK | store_grf_soc_con2);
+ mmio_write_32(GRF_BASE + GRF_SOC_CON(3),
+ REG_SOC_WMSK | store_grf_soc_con3);
+ mmio_write_32(GRF_BASE + GRF_SOC_CON(4),
+ REG_SOC_WMSK | store_grf_soc_con4);
+ mmio_write_32(GRF_BASE + GRF_SOC_CON(7),
+ REG_SOC_WMSK | store_grf_soc_con7);
+
+ for (i = 0; i < 4; i++)
+ mmio_write_32(GRF_BASE + GRF_DDRC0_CON0 + i * 4,
+ REG_SOC_WMSK | store_grf_ddrc_con[i]);
+
+ mmio_write_32(GRF_BASE + GRF_IO_VSEL, REG_SOC_WMSK | store_grf_io_vsel);
+}
+
+void cru_register_save(void)
+{
+ int i;
+
+ for (i = 0; i <= CRU_SDIO0_CON1; i = i + 4)
+ store_cru[i / 4] = mmio_read_32(CRU_BASE + i);
+}
+
+void cru_register_restore(void)
+{
+ int i;
+
+ for (i = 0; i <= CRU_SDIO0_CON1; i = i + 4) {
+
+ /*
+ * since DPLL, CRU_CLKSEL_CON6 have been restore in
+ * dmc_resume, ABPLL will resote later, so skip them
+ */
+ if ((i == CRU_CLKSEL_CON6) ||
+ (i >= CRU_PLL_CON(ABPLL_ID, 0) &&
+ i <= CRU_PLL_CON(DPLL_ID, 5)))
+ continue;
+
+ if ((i == CRU_PLL_CON(ALPLL_ID, 2)) ||
+ (i == CRU_PLL_CON(CPLL_ID, 2)) ||
+ (i == CRU_PLL_CON(GPLL_ID, 2)) ||
+ (i == CRU_PLL_CON(NPLL_ID, 2)) ||
+ (i == CRU_PLL_CON(VPLL_ID, 2)))
+ mmio_write_32(CRU_BASE + i, store_cru[i / 4]);
+ /*
+ * CRU_GLB_CNT_TH and CRU_CLKSEL_CON97~CRU_CLKSEL_CON107
+ * not need do high 16bit mask
+ */
+ else if ((i > 0x27c && i < 0x2b0) || (i == 0x508))
+ mmio_write_32(CRU_BASE + i, store_cru[i / 4]);
+ else
+ mmio_write_32(CRU_BASE + i,
+ REG_SOC_WMSK | store_cru[i / 4]);
+ }
+}
+
+void wdt_register_save(void)
+{
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ store_wdt0[i] = mmio_read_32(WDT0_BASE + i * 4);
+ store_wdt1[i] = mmio_read_32(WDT1_BASE + i * 4);
+ }
+ pmu_enable_watchdog0 = (uint8_t) store_wdt0[0] & 0x1;
+}
+
+void wdt_register_restore(void)
+{
+ int i;
+
+ for (i = 1; i >= 0; i--) {
+ mmio_write_32(WDT0_BASE + i * 4, store_wdt0[i]);
+ mmio_write_32(WDT1_BASE + i * 4, store_wdt1[i]);
+ }
+
+ /* write 0x76 to cnt_restart to keep watchdog alive */
+ mmio_write_32(WDT0_BASE + 0x0c, 0x76);
+ mmio_write_32(WDT1_BASE + 0x0c, 0x76);
+}
+
+int rockchip_soc_sys_pwr_dm_suspend(void)
+{
+ uint32_t wait_cnt = 0;
+ uint32_t status = 0;
+
+ ddr_prepare_for_sys_suspend();
+ dmc_suspend();
+ pmu_scu_b_pwrdn();
+
+ gicv3_rdistif_save(plat_my_core_pos(), &rdist_ctx);
+ gicv3_distif_save(&dist_ctx);
+
+ /* need to save usbphy before shutdown PERIHP PD */
+ save_usbphy();
+
+ pmu_power_domains_suspend();
+ set_hw_idle(BIT(PMU_CLR_CENTER1) |
+ BIT(PMU_CLR_ALIVE) |
+ BIT(PMU_CLR_MSCH0) |
+ BIT(PMU_CLR_MSCH1) |
+ BIT(PMU_CLR_CCIM0) |
+ BIT(PMU_CLR_CCIM1) |
+ BIT(PMU_CLR_CENTER) |
+ BIT(PMU_CLR_PERILP) |
+ BIT(PMU_CLR_PERILPM0) |
+ BIT(PMU_CLR_GIC));
+ set_pmu_rsthold();
+ sys_slp_config();
+
+ m0_configure_execute_addr(M0PMU_BINCODE_BASE);
+ m0_start();
+
+ pmu_sgrf_rst_hld();
+
+ mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1),
+ ((uintptr_t)&pmu_cpuson_entrypoint >>
+ CPU_BOOT_ADDR_ALIGN) | CPU_BOOT_ADDR_WMASK);
+
+ mmio_write_32(PMU_BASE + PMU_ADB400_CON,
+ BIT_WITH_WMSK(PMU_PWRDWN_REQ_CORE_B_2GIC_SW) |
+ BIT_WITH_WMSK(PMU_PWRDWN_REQ_CORE_B_SW) |
+ BIT_WITH_WMSK(PMU_PWRDWN_REQ_GIC2_CORE_B_SW));
+ dsb();
+ status = BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW_ST) |
+ BIT(PMU_PWRDWN_REQ_CORE_B_SW_ST) |
+ BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW_ST);
+ while ((mmio_read_32(PMU_BASE +
+ PMU_ADB400_ST) & status) != status) {
+ wait_cnt++;
+ if (wait_cnt >= MAX_WAIT_COUNT) {
+ ERROR("%s:wait cluster-b l2(%x)\n", __func__,
+ mmio_read_32(PMU_BASE + PMU_ADB400_ST));
+ panic();
+ }
+ udelay(1);
+ }
+ mmio_setbits_32(PMU_BASE + PMU_PWRDN_CON, BIT(PMU_SCU_B_PWRDWN_EN));
+
+ wdt_register_save();
+ secure_watchdog_gate();
+
+ /*
+ * Disabling PLLs/PWM/DVFS is approaching WFI which is
+ * the last steps in suspend.
+ */
+ disable_dvfs_plls();
+ disable_pwms();
+ disable_nodvfs_plls();
+
+ suspend_apio();
+ suspend_gpio();
+ suspend_uart();
+ grf_register_save();
+ cru_register_save();
+ sram_save();
+ plat_rockchip_save_gpio();
+
+ return 0;
+}
+
+int rockchip_soc_sys_pwr_dm_resume(void)
+{
+ uint32_t wait_cnt = 0;
+ uint32_t status = 0;
+
+ plat_rockchip_restore_gpio();
+ cru_register_restore();
+ grf_register_restore();
+ wdt_register_restore();
+ resume_uart();
+ resume_apio();
+ resume_gpio();
+ enable_nodvfs_plls();
+ enable_pwms();
+ /* PWM regulators take time to come up; give 300us to be safe. */
+ udelay(300);
+ enable_dvfs_plls();
+
+ secure_sgrf_init();
+ secure_sgrf_ddr_rgn_init();
+
+ /* restore clk_ddrc_bpll_src_en gate */
+ mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(3),
+ BITS_WITH_WMASK(clk_ddrc_save, 0xff, 0));
+
+ /*
+ * The wakeup status is not cleared by itself, we need to clear it
+ * manually. Otherwise we will alway query some interrupt next time.
+ *
+ * NOTE: If the kernel needs to query this, we might want to stash it
+ * somewhere.
+ */
+ mmio_write_32(PMU_BASE + PMU_WAKEUP_STATUS, 0xffffffff);
+ mmio_write_32(PMU_BASE + PMU_WKUP_CFG4, 0x00);
+
+ mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1),
+ (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) |
+ CPU_BOOT_ADDR_WMASK);
+
+ mmio_write_32(PMU_BASE + PMU_CCI500_CON,
+ WMSK_BIT(PMU_CLR_PREQ_CCI500_HW) |
+ WMSK_BIT(PMU_CLR_QREQ_CCI500_HW) |
+ WMSK_BIT(PMU_QGATING_CCI500_CFG));
+ dsb();
+ mmio_clrbits_32(PMU_BASE + PMU_PWRDN_CON,
+ BIT(PMU_SCU_B_PWRDWN_EN));
+
+ mmio_write_32(PMU_BASE + PMU_ADB400_CON,
+ WMSK_BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW) |
+ WMSK_BIT(PMU_PWRDWN_REQ_CORE_B_SW) |
+ WMSK_BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW) |
+ WMSK_BIT(PMU_CLR_CORE_L_HW) |
+ WMSK_BIT(PMU_CLR_CORE_L_2GIC_HW) |
+ WMSK_BIT(PMU_CLR_GIC2_CORE_L_HW));
+
+ status = BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW_ST) |
+ BIT(PMU_PWRDWN_REQ_CORE_B_SW_ST) |
+ BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW_ST);
+
+ while ((mmio_read_32(PMU_BASE +
+ PMU_ADB400_ST) & status)) {
+ wait_cnt++;
+ if (wait_cnt >= MAX_WAIT_COUNT) {
+ ERROR("%s:wait cluster-b l2(%x)\n", __func__,
+ mmio_read_32(PMU_BASE + PMU_ADB400_ST));
+ panic();
+ }
+ udelay(1);
+ }
+
+ pmu_scu_b_pwrup();
+ pmu_power_domains_resume();
+
+ restore_abpll();
+ clr_hw_idle(BIT(PMU_CLR_CENTER1) |
+ BIT(PMU_CLR_ALIVE) |
+ BIT(PMU_CLR_MSCH0) |
+ BIT(PMU_CLR_MSCH1) |
+ BIT(PMU_CLR_CCIM0) |
+ BIT(PMU_CLR_CCIM1) |
+ BIT(PMU_CLR_CENTER) |
+ BIT(PMU_CLR_PERILP) |
+ BIT(PMU_CLR_PERILPM0) |
+ BIT(PMU_CLR_GIC));
+
+ gicv3_distif_init_restore(&dist_ctx);
+ gicv3_rdistif_init_restore(plat_my_core_pos(), &rdist_ctx);
+ plat_rockchip_gic_cpuif_enable();
+ m0_stop();
+
+ restore_usbphy();
+
+ ddr_prepare_for_sys_resume();
+
+ return 0;
+}
+
+void __dead2 rockchip_soc_soft_reset(void)
+{
+ struct bl_aux_gpio_info *rst_gpio;
+
+ rst_gpio = plat_get_rockchip_gpio_reset();
+
+ if (rst_gpio) {
+ gpio_set_direction(rst_gpio->index, GPIO_DIR_OUT);
+ gpio_set_value(rst_gpio->index, rst_gpio->polarity);
+ } else {
+ soc_global_soft_reset();
+ }
+
+ while (1)
+ ;
+}
+
+void __dead2 rockchip_soc_system_off(void)
+{
+ struct bl_aux_gpio_info *poweroff_gpio;
+
+ poweroff_gpio = plat_get_rockchip_gpio_poweroff();
+
+ if (poweroff_gpio) {
+ /*
+ * if use tsadc over temp pin(GPIO1A6) as shutdown gpio,
+ * need to set this pin iomux back to gpio function
+ */
+ if (poweroff_gpio->index == TSADC_INT_PIN) {
+ mmio_write_32(PMUGRF_BASE + PMUGRF_GPIO1A_IOMUX,
+ GPIO1A6_IOMUX);
+ }
+ gpio_set_direction(poweroff_gpio->index, GPIO_DIR_OUT);
+ gpio_set_value(poweroff_gpio->index, poweroff_gpio->polarity);
+ } else {
+ WARN("Do nothing when system off\n");
+ }
+
+ while (1)
+ ;
+}
+
+void rockchip_plat_mmu_el3(void)
+{
+ size_t sram_size;
+
+ /* sram.text size */
+ sram_size = (char *)&__bl31_sram_text_end -
+ (char *)&__bl31_sram_text_start;
+ mmap_add_region((unsigned long)&__bl31_sram_text_start,
+ (unsigned long)&__bl31_sram_text_start,
+ sram_size, MT_MEMORY | MT_RO | MT_SECURE);
+
+ /* sram.data size */
+ sram_size = (char *)&__bl31_sram_data_end -
+ (char *)&__bl31_sram_data_start;
+ mmap_add_region((unsigned long)&__bl31_sram_data_start,
+ (unsigned long)&__bl31_sram_data_start,
+ sram_size, MT_MEMORY | MT_RW | MT_SECURE);
+
+ sram_size = (char *)&__bl31_sram_stack_end -
+ (char *)&__bl31_sram_stack_start;
+ mmap_add_region((unsigned long)&__bl31_sram_stack_start,
+ (unsigned long)&__bl31_sram_stack_start,
+ sram_size, MT_MEMORY | MT_RW | MT_SECURE);
+
+ sram_size = (char *)&__sram_incbin_end - (char *)&__sram_incbin_start;
+ mmap_add_region((unsigned long)&__sram_incbin_start,
+ (unsigned long)&__sram_incbin_start,
+ sram_size, MT_NON_CACHEABLE | MT_RW | MT_SECURE);
+}
+
+void plat_rockchip_pmu_init(void)
+{
+ uint32_t cpu;
+
+ rockchip_pd_lock_init();
+
+ /* register requires 32bits mode, switch it to 32 bits */
+ cpu_warm_boot_addr = (uint64_t)platform_cpu_warmboot;
+
+ for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++)
+ cpuson_flags[cpu] = 0;
+
+ for (cpu = 0; cpu < PLATFORM_CLUSTER_COUNT; cpu++)
+ clst_warmboot_data[cpu] = 0;
+
+ /* config cpu's warm boot address */
+ mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1),
+ (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) |
+ CPU_BOOT_ADDR_WMASK);
+ mmio_write_32(PMU_BASE + PMU_NOC_AUTO_ENA, NOC_AUTO_ENABLE);
+
+ /*
+ * Enable Schmitt trigger for better 32 kHz input signal, which is
+ * important for suspend/resume reliability among other things.
+ */
+ mmio_write_32(PMUGRF_BASE + PMUGRF_GPIO0A_SMT, GPIO0A0_SMT_ENABLE);
+
+ init_pmu_counts();
+
+ nonboot_cpus_off();
+
+ INFO("%s(%d): pd status %x\n", __func__, __LINE__,
+ mmio_read_32(PMU_BASE + PMU_PWRDN_ST));
+}
diff --git a/plat/rockchip/rk3399/drivers/pmu/pmu.h b/plat/rockchip/rk3399/drivers/pmu/pmu.h
new file mode 100644
index 0000000..bb7de50
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/pmu/pmu.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PMU_H
+#define PMU_H
+
+#include <pmu_bits.h>
+#include <pmu_regs.h>
+#include <soc.h>
+
+/* Allocate sp reginon in pmusram */
+#define PSRAM_SP_SIZE 0x80
+#define PSRAM_SP_BOTTOM (PSRAM_SP_TOP - PSRAM_SP_SIZE)
+
+/*****************************************************************************
+ * Common define for per soc pmu.h
+ *****************************************************************************/
+/* The ways of cores power domain contorlling */
+enum cores_pm_ctr_mode {
+ core_pwr_pd = 0,
+ core_pwr_wfi = 1,
+ core_pwr_wfi_int = 2
+};
+
+/*****************************************************************************
+ * pmu con,reg
+ *****************************************************************************/
+#define PMU_WKUP_CFG(n) ((n) * 4)
+
+#define PMU_CORE_PM_CON(cpu) (0xc0 + (cpu * 4))
+
+/* the shift of bits for cores status */
+enum pmu_core_pwrst_shift {
+ clstl_cpu_wfe = 2,
+ clstl_cpu_wfi = 6,
+ clstb_cpu_wfe = 12,
+ clstb_cpu_wfi = 16
+};
+
+#define CKECK_WFE_MSK 0x1
+#define CKECK_WFI_MSK 0x10
+#define CKECK_WFEI_MSK 0x11
+
+/* Specific features required */
+#define AP_PWROFF 0x0a
+
+#define GPIO0A0_SMT_ENABLE BITS_WITH_WMASK(1, 3, 0)
+#define GPIO1A6_IOMUX BITS_WITH_WMASK(0, 3, 12)
+
+#define TSADC_INT_PIN 38
+#define CORES_PM_DISABLE 0x0
+
+#define PD_CTR_LOOP 10000
+#define CHK_CPU_LOOP 500
+#define MAX_WAIT_COUNT 1000
+
+#define GRF_SOC_CON4 0x0e210
+
+#define PMUGRF_GPIO0A_SMT 0x0120
+#define PMUGRF_SOC_CON0 0x0180
+
+#define CCI_FORCE_WAKEUP WMSK_BIT(8)
+#define EXTERNAL_32K WMSK_BIT(0)
+
+#define PLL_PD_HW 0xff
+#define IOMUX_CLK_32K 0x00030002
+#define NOC_AUTO_ENABLE 0x3fffffff
+
+#define SAVE_QOS(array, NAME) \
+ RK3399_CPU_AXI_SAVE_QOS(array, CPU_AXI_##NAME##_QOS_BASE)
+#define RESTORE_QOS(array, NAME) \
+ RK3399_CPU_AXI_RESTORE_QOS(array, CPU_AXI_##NAME##_QOS_BASE)
+
+#define RK3399_CPU_AXI_SAVE_QOS(array, base) do { \
+ array[0] = mmio_read_32(base + CPU_AXI_QOS_ID_COREID); \
+ array[1] = mmio_read_32(base + CPU_AXI_QOS_REVISIONID); \
+ array[2] = mmio_read_32(base + CPU_AXI_QOS_PRIORITY); \
+ array[3] = mmio_read_32(base + CPU_AXI_QOS_MODE); \
+ array[4] = mmio_read_32(base + CPU_AXI_QOS_BANDWIDTH); \
+ array[5] = mmio_read_32(base + CPU_AXI_QOS_SATURATION); \
+ array[6] = mmio_read_32(base + CPU_AXI_QOS_EXTCONTROL); \
+} while (0)
+
+#define RK3399_CPU_AXI_RESTORE_QOS(array, base) do { \
+ mmio_write_32(base + CPU_AXI_QOS_ID_COREID, array[0]); \
+ mmio_write_32(base + CPU_AXI_QOS_REVISIONID, array[1]); \
+ mmio_write_32(base + CPU_AXI_QOS_PRIORITY, array[2]); \
+ mmio_write_32(base + CPU_AXI_QOS_MODE, array[3]); \
+ mmio_write_32(base + CPU_AXI_QOS_BANDWIDTH, array[4]); \
+ mmio_write_32(base + CPU_AXI_QOS_SATURATION, array[5]); \
+ mmio_write_32(base + CPU_AXI_QOS_EXTCONTROL, array[6]); \
+} while (0)
+
+struct pmu_slpdata_s {
+ uint32_t cci_m0_qos[CPU_AXI_QOS_NUM_REGS];
+ uint32_t cci_m1_qos[CPU_AXI_QOS_NUM_REGS];
+ uint32_t dmac0_qos[CPU_AXI_QOS_NUM_REGS];
+ uint32_t dmac1_qos[CPU_AXI_QOS_NUM_REGS];
+ uint32_t dcf_qos[CPU_AXI_QOS_NUM_REGS];
+ uint32_t crypto0_qos[CPU_AXI_QOS_NUM_REGS];
+ uint32_t crypto1_qos[CPU_AXI_QOS_NUM_REGS];
+ uint32_t pmu_cm0_qos[CPU_AXI_QOS_NUM_REGS];
+ uint32_t peri_cm1_qos[CPU_AXI_QOS_NUM_REGS];
+ uint32_t gic_qos[CPU_AXI_QOS_NUM_REGS];
+ uint32_t sdmmc_qos[CPU_AXI_QOS_NUM_REGS];
+ uint32_t gmac_qos[CPU_AXI_QOS_NUM_REGS];
+ uint32_t emmc_qos[CPU_AXI_QOS_NUM_REGS];
+ uint32_t usb_otg0_qos[CPU_AXI_QOS_NUM_REGS];
+ uint32_t usb_otg1_qos[CPU_AXI_QOS_NUM_REGS];
+ uint32_t usb_host0_qos[CPU_AXI_QOS_NUM_REGS];
+ uint32_t usb_host1_qos[CPU_AXI_QOS_NUM_REGS];
+ uint32_t gpu_qos[CPU_AXI_QOS_NUM_REGS];
+ uint32_t video_m0_qos[CPU_AXI_QOS_NUM_REGS];
+ uint32_t video_m1_r_qos[CPU_AXI_QOS_NUM_REGS];
+ uint32_t video_m1_w_qos[CPU_AXI_QOS_NUM_REGS];
+ uint32_t rga_r_qos[CPU_AXI_QOS_NUM_REGS];
+ uint32_t rga_w_qos[CPU_AXI_QOS_NUM_REGS];
+ uint32_t vop_big_r[CPU_AXI_QOS_NUM_REGS];
+ uint32_t vop_big_w[CPU_AXI_QOS_NUM_REGS];
+ uint32_t vop_little[CPU_AXI_QOS_NUM_REGS];
+ uint32_t iep_qos[CPU_AXI_QOS_NUM_REGS];
+ uint32_t isp1_m0_qos[CPU_AXI_QOS_NUM_REGS];
+ uint32_t isp1_m1_qos[CPU_AXI_QOS_NUM_REGS];
+ uint32_t isp0_m0_qos[CPU_AXI_QOS_NUM_REGS];
+ uint32_t isp0_m1_qos[CPU_AXI_QOS_NUM_REGS];
+ uint32_t hdcp_qos[CPU_AXI_QOS_NUM_REGS];
+ uint32_t perihp_nsp_qos[CPU_AXI_QOS_NUM_REGS];
+ uint32_t perilp_nsp_qos[CPU_AXI_QOS_NUM_REGS];
+ uint32_t perilpslv_nsp_qos[CPU_AXI_QOS_NUM_REGS];
+ uint32_t sdio_qos[CPU_AXI_QOS_NUM_REGS];
+};
+
+extern uint32_t clst_warmboot_data[PLATFORM_CLUSTER_COUNT];
+
+extern void sram_func_set_ddrctl_pll(uint32_t pll_src);
+void pmu_power_domains_on(void);
+
+#endif /* PMU_H */
diff --git a/plat/rockchip/rk3399/drivers/pmu/pmu_fw.c b/plat/rockchip/rk3399/drivers/pmu/pmu_fw.c
new file mode 100644
index 0000000..25596b1
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/pmu/pmu_fw.c
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/* convoluted way to make sure that the define is pasted just the right way */
+#define INCBIN(file, sym, sec) \
+ __asm__( \
+ ".section " sec "\n" \
+ ".global " sym "\n" \
+ ".type " sym ", %object\n" \
+ ".align 4\n" \
+ sym ":\n" \
+ ".incbin \"" file "\"\n" \
+ ".size " sym ", .-" sym "\n" \
+ ".global " sym "_end\n" \
+ sym "_end:\n" \
+ )
+
+INCBIN(RK3399M0FW, "rk3399m0_bin", ".sram.incbin");
+INCBIN(RK3399M0PMUFW, "rk3399m0pmu_bin", ".pmusram.incbin");
diff --git a/plat/rockchip/rk3399/drivers/pwm/pwm.c b/plat/rockchip/rk3399/drivers/pwm/pwm.c
new file mode 100644
index 0000000..11c1565
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/pwm/pwm.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <plat_private.h>
+#include <pmu.h>
+#include <pwm.h>
+#include <soc.h>
+
+#define PWM0_IOMUX_PWM_EN (1 << 0)
+#define PWM1_IOMUX_PWM_EN (1 << 1)
+#define PWM2_IOMUX_PWM_EN (1 << 2)
+#define PWM3_IOMUX_PWM_EN (1 << 3)
+
+struct pwm_data_s {
+ uint32_t iomux_bitmask;
+ uint32_t enable_bitmask;
+};
+
+static struct pwm_data_s pwm_data;
+
+/*
+ * Disable the PWMs.
+ */
+void disable_pwms(void)
+{
+ uint32_t i, val;
+
+ pwm_data.iomux_bitmask = 0;
+
+ /* Save PWMs pinmux and change PWMs pinmux to GPIOs */
+ val = mmio_read_32(GRF_BASE + GRF_GPIO4C_IOMUX);
+ if (((val >> GRF_GPIO4C2_IOMUX_SHIFT) &
+ GRF_IOMUX_2BIT_MASK) == GRF_GPIO4C2_IOMUX_PWM) {
+ pwm_data.iomux_bitmask |= PWM0_IOMUX_PWM_EN;
+ val = BITS_WITH_WMASK(GRF_IOMUX_GPIO, GRF_IOMUX_2BIT_MASK,
+ GRF_GPIO4C2_IOMUX_SHIFT);
+ mmio_write_32(GRF_BASE + GRF_GPIO4C_IOMUX, val);
+ }
+
+ val = mmio_read_32(GRF_BASE + GRF_GPIO4C_IOMUX);
+ if (((val >> GRF_GPIO4C6_IOMUX_SHIFT) &
+ GRF_IOMUX_2BIT_MASK) == GRF_GPIO4C6_IOMUX_PWM) {
+ pwm_data.iomux_bitmask |= PWM1_IOMUX_PWM_EN;
+ val = BITS_WITH_WMASK(GRF_IOMUX_GPIO, GRF_IOMUX_2BIT_MASK,
+ GRF_GPIO4C6_IOMUX_SHIFT);
+ mmio_write_32(GRF_BASE + GRF_GPIO4C_IOMUX, val);
+ }
+
+ val = mmio_read_32(PMUGRF_BASE + PMUGRF_GPIO1C_IOMUX);
+ if (((val >> PMUGRF_GPIO1C3_IOMUX_SHIFT) &
+ GRF_IOMUX_2BIT_MASK) == PMUGRF_GPIO1C3_IOMUX_PWM) {
+ pwm_data.iomux_bitmask |= PWM2_IOMUX_PWM_EN;
+ val = BITS_WITH_WMASK(GRF_IOMUX_GPIO, GRF_IOMUX_2BIT_MASK,
+ PMUGRF_GPIO1C3_IOMUX_SHIFT);
+ mmio_write_32(PMUGRF_BASE + PMUGRF_GPIO1C_IOMUX, val);
+ }
+
+ val = mmio_read_32(PMUGRF_BASE + PMUGRF_GPIO0A_IOMUX);
+ if (((val >> PMUGRF_GPIO0A6_IOMUX_SHIFT) &
+ GRF_IOMUX_2BIT_MASK) == PMUGRF_GPIO0A6_IOMUX_PWM) {
+ pwm_data.iomux_bitmask |= PWM3_IOMUX_PWM_EN;
+ val = BITS_WITH_WMASK(GRF_IOMUX_GPIO, GRF_IOMUX_2BIT_MASK,
+ PMUGRF_GPIO0A6_IOMUX_SHIFT);
+ mmio_write_32(PMUGRF_BASE + PMUGRF_GPIO0A_IOMUX, val);
+ }
+
+ /* Disable the pwm channel */
+ pwm_data.enable_bitmask = 0;
+ for (i = 0; i < 4; i++) {
+ val = mmio_read_32(PWM_BASE + PWM_CTRL(i));
+ if ((val & PWM_ENABLE) != PWM_ENABLE)
+ continue;
+ pwm_data.enable_bitmask |= (1 << i);
+ mmio_write_32(PWM_BASE + PWM_CTRL(i), val & ~PWM_ENABLE);
+ }
+}
+
+/*
+ * Enable the PWMs.
+ */
+void enable_pwms(void)
+{
+ uint32_t i, val;
+
+ for (i = 0; i < 4; i++) {
+ val = mmio_read_32(PWM_BASE + PWM_CTRL(i));
+ if (!(pwm_data.enable_bitmask & (1 << i)))
+ continue;
+ mmio_write_32(PWM_BASE + PWM_CTRL(i), val | PWM_ENABLE);
+ }
+
+ /* Restore all IOMUXes */
+ if (pwm_data.iomux_bitmask & PWM3_IOMUX_PWM_EN) {
+ val = BITS_WITH_WMASK(PMUGRF_GPIO0A6_IOMUX_PWM,
+ GRF_IOMUX_2BIT_MASK,
+ PMUGRF_GPIO0A6_IOMUX_SHIFT);
+ mmio_write_32(PMUGRF_BASE + PMUGRF_GPIO0A_IOMUX, val);
+ }
+
+ if (pwm_data.iomux_bitmask & PWM2_IOMUX_PWM_EN) {
+ val = BITS_WITH_WMASK(PMUGRF_GPIO1C3_IOMUX_PWM,
+ GRF_IOMUX_2BIT_MASK,
+ PMUGRF_GPIO1C3_IOMUX_SHIFT);
+ mmio_write_32(PMUGRF_BASE + PMUGRF_GPIO1C_IOMUX, val);
+ }
+
+ if (pwm_data.iomux_bitmask & PWM1_IOMUX_PWM_EN) {
+ val = BITS_WITH_WMASK(GRF_GPIO4C6_IOMUX_PWM,
+ GRF_IOMUX_2BIT_MASK,
+ GRF_GPIO4C6_IOMUX_SHIFT);
+ mmio_write_32(GRF_BASE + GRF_GPIO4C_IOMUX, val);
+ }
+
+ if (pwm_data.iomux_bitmask & PWM0_IOMUX_PWM_EN) {
+ val = BITS_WITH_WMASK(GRF_GPIO4C2_IOMUX_PWM,
+ GRF_IOMUX_2BIT_MASK,
+ GRF_GPIO4C2_IOMUX_SHIFT);
+ mmio_write_32(GRF_BASE + GRF_GPIO4C_IOMUX, val);
+ }
+}
diff --git a/plat/rockchip/rk3399/drivers/pwm/pwm.h b/plat/rockchip/rk3399/drivers/pwm/pwm.h
new file mode 100644
index 0000000..d665392
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/pwm/pwm.h
@@ -0,0 +1,13 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PWM_H
+#define PWM_H
+
+void disable_pwms(void);
+void enable_pwms(void);
+
+#endif /* PWM_H */
diff --git a/plat/rockchip/rk3399/drivers/secure/secure.c b/plat/rockchip/rk3399/drivers/secure/secure.c
new file mode 100644
index 0000000..13c83ca
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/secure/secure.c
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <drivers/delay_timer.h>
+
+#include <plat_private.h>
+#include <secure.h>
+#include <soc.h>
+
+static void sgrf_ddr_rgn_global_bypass(uint32_t bypass)
+{
+ if (bypass)
+ /* set bypass (non-secure regions) for whole ddr regions */
+ mmio_write_32(SGRF_BASE + SGRF_DDRRGN_CON0_16(16),
+ SGRF_DDR_RGN_BYPS);
+ else
+ /* cancel bypass for whole ddr regions */
+ mmio_write_32(SGRF_BASE + SGRF_DDRRGN_CON0_16(16),
+ SGRF_DDR_RGN_NO_BYPS);
+}
+
+/**
+ * There are 8 + 1 regions for DDR secure control:
+ * DDR_RGN_0 ~ DDR_RGN_7: Per DDR_RGNs grain size is 1MB
+ * DDR_RGN_X - the memories of exclude DDR_RGN_0 ~ DDR_RGN_7
+ *
+ * DDR_RGN_0 - start address of the RGN0
+ * DDR_RGN_8 - end address of the RGN0
+ * DDR_RGN_1 - start address of the RGN1
+ * DDR_RGN_9 - end address of the RGN1
+ * ...
+ * DDR_RGN_7 - start address of the RGN7
+ * DDR_RGN_15 - end address of the RGN7
+ * DDR_RGN_16 - bit 0 ~ 7 is bitmap for RGN0~7 secure,0: disable, 1: enable
+ * bit 8 is setting for RGNx, the rest of the memory and region
+ * which excludes RGN0~7, 0: disable, 1: enable
+ * bit 9, the global secure configuration via bypass, 0: disable
+ * bypass, 1: enable bypass
+ *
+ * @rgn - the DDR regions 0 ~ 7 which are can be configured.
+ * @st - start address to set as secure
+ * @sz - length of area to set as secure
+ * The @st_mb and @ed_mb indicate the start and end addresses for which to set
+ * the security, and the unit is megabyte. When the st_mb == 0, ed_mb == 0, the
+ * address range 0x0 ~ 0xfffff is secure.
+ *
+ * For example, if we would like to set the range [0, 32MB) is security via
+ * DDR_RGN0, then rgn == 0, st_mb == 0, ed_mb == 31.
+ */
+static void sgrf_ddr_rgn_config(uint32_t rgn,
+ uintptr_t st, size_t sz)
+{
+ uintptr_t ed = st + sz;
+ uintptr_t st_mb, ed_mb;
+
+ assert(rgn <= 7);
+ assert(st < ed);
+
+ /* check aligned 1MB */
+ assert(st % SIZE_M(1) == 0);
+ assert(ed % SIZE_M(1) == 0);
+
+ st_mb = st / SIZE_M(1);
+ ed_mb = ed / SIZE_M(1);
+
+ /* set ddr region addr start */
+ mmio_write_32(SGRF_BASE + SGRF_DDRRGN_CON0_16(rgn),
+ BITS_WITH_WMASK(st_mb, SGRF_DDR_RGN_0_16_WMSK, 0));
+
+ /* set ddr region addr end */
+ mmio_write_32(SGRF_BASE + SGRF_DDRRGN_CON0_16(rgn + 8),
+ BITS_WITH_WMASK((ed_mb - 1), SGRF_DDR_RGN_0_16_WMSK, 0));
+
+ mmio_write_32(SGRF_BASE + SGRF_DDRRGN_CON0_16(16),
+ BIT_WITH_WMSK(rgn));
+}
+
+void secure_watchdog_gate(void)
+{
+ /**
+ * Disable CA53 and CM0 wdt pclk
+ * BIT[8]: ca53 wdt pclk, 0: enable 1: disable
+ * BIT[10]: cm0 wdt pclk, 0: enable 1: disable
+ */
+ mmio_write_32(SGRF_BASE + SGRF_SOC_CON(3),
+ BIT_WITH_WMSK(PCLK_WDT_CA53_GATE_SHIFT) |
+ BIT_WITH_WMSK(PCLK_WDT_CM0_GATE_SHIFT));
+}
+
+__pmusramfunc void secure_watchdog_ungate(void)
+{
+ /**
+ * Enable CA53 and CM0 wdt pclk
+ * BIT[8]: ca53 wdt pclk, 0: enable 1: disable
+ * BIT[10]: cm0 wdt pclk, 0: enable 1: disable
+ */
+ mmio_write_32(SGRF_BASE + SGRF_SOC_CON(3),
+ WMSK_BIT(PCLK_WDT_CA53_GATE_SHIFT) |
+ WMSK_BIT(PCLK_WDT_CM0_GATE_SHIFT));
+}
+
+__pmusramfunc void sram_secure_timer_init(void)
+{
+ mmio_write_32(STIMER1_CHN_BASE(5) + TIMER_END_COUNT0, 0xffffffff);
+ mmio_write_32(STIMER1_CHN_BASE(5) + TIMER_END_COUNT1, 0xffffffff);
+
+ mmio_write_32(STIMER1_CHN_BASE(5) + TIMER_INIT_COUNT0, 0x0);
+ mmio_write_32(STIMER1_CHN_BASE(5) + TIMER_INIT_COUNT0, 0x0);
+
+ /* auto reload & enable the timer */
+ mmio_write_32(STIMER1_CHN_BASE(5) + TIMER_CONTROL_REG,
+ TIMER_EN | TIMER_FMODE);
+}
+
+void secure_timer_init(void)
+{
+ mmio_write_32(STIMER1_CHN_BASE(5) + TIMER_END_COUNT0, 0xffffffff);
+ mmio_write_32(STIMER1_CHN_BASE(5) + TIMER_END_COUNT1, 0xffffffff);
+
+ mmio_write_32(STIMER1_CHN_BASE(5) + TIMER_INIT_COUNT0, 0x0);
+ mmio_write_32(STIMER1_CHN_BASE(5) + TIMER_INIT_COUNT0, 0x0);
+
+ /* auto reload & enable the timer */
+ mmio_write_32(STIMER1_CHN_BASE(5) + TIMER_CONTROL_REG,
+ TIMER_EN | TIMER_FMODE);
+}
+
+void secure_sgrf_init(void)
+{
+ /* security config for master */
+ mmio_write_32(SGRF_BASE + SGRF_SOC_CON(5),
+ REG_SOC_WMSK | SGRF_SOC_ALLMST_NS);
+ mmio_write_32(SGRF_BASE + SGRF_SOC_CON(6),
+ REG_SOC_WMSK | SGRF_SOC_ALLMST_NS);
+ mmio_write_32(SGRF_BASE + SGRF_SOC_CON(7),
+ REG_SOC_WMSK | SGRF_SOC_ALLMST_NS);
+
+ /* security config for slave */
+ mmio_write_32(SGRF_BASE + SGRF_PMU_SLV_CON0_1(0),
+ SGRF_PMU_SLV_S_CFGED |
+ SGRF_PMU_SLV_CRYPTO1_NS);
+ mmio_write_32(SGRF_BASE + SGRF_PMU_SLV_CON0_1(1),
+ SGRF_SLV_S_WMSK | SGRF_PMUSRAM_S);
+ mmio_write_32(SGRF_BASE + SGRF_SLV_SECURE_CON0_4(0),
+ SGRF_SLV_S_WMSK | SGRF_SLV_S_ALL_NS);
+ mmio_write_32(SGRF_BASE + SGRF_SLV_SECURE_CON0_4(1),
+ SGRF_SLV_S_WMSK | SGRF_SLV_S_ALL_NS);
+ mmio_write_32(SGRF_BASE + SGRF_SLV_SECURE_CON0_4(2),
+ SGRF_SLV_S_WMSK | SGRF_SLV_S_ALL_NS);
+ mmio_write_32(SGRF_BASE + SGRF_SLV_SECURE_CON0_4(3),
+ SGRF_SLV_S_WMSK | SGRF_SLV_S_ALL_NS);
+ mmio_write_32(SGRF_BASE + SGRF_SLV_SECURE_CON0_4(4),
+ SGRF_SLV_S_WMSK | SGRF_INTSRAM_S);
+}
+
+void secure_sgrf_ddr_rgn_init(void)
+{
+ sgrf_ddr_rgn_config(0, TZRAM_BASE, TZRAM_SIZE);
+ sgrf_ddr_rgn_global_bypass(0);
+}
diff --git a/plat/rockchip/rk3399/drivers/secure/secure.h b/plat/rockchip/rk3399/drivers/secure/secure.h
new file mode 100644
index 0000000..e31c999
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/secure/secure.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SECURE_H
+#define SECURE_H
+
+/**************************************************
+ * sgrf reg, offset
+ **************************************************/
+#define SGRF_SOC_CON0_1(n) (0xc000 + (n) * 4)
+#define SGRF_SOC_CON3_7(n) (0xe00c + ((n) - 3) * 4)
+#define SGRF_SOC_CON8_15(n) (0x8020 + ((n) - 8) * 4)
+#define SGRF_SOC_CON(n) (n < 3 ? SGRF_SOC_CON0_1(n) :\
+ (n < 8 ? SGRF_SOC_CON3_7(n) :\
+ SGRF_SOC_CON8_15(n)))
+
+#define SGRF_PMU_SLV_CON0_1(n) (0xc240 + ((n) - 0) * 4)
+#define SGRF_SLV_SECURE_CON0_4(n) (0xe3c0 + ((n) - 0) * 4)
+#define SGRF_DDRRGN_CON0_16(n) ((n) * 4)
+#define SGRF_DDRRGN_CON20_34(n) (0x50 + ((n) - 20) * 4)
+
+/* All of master in ns */
+#define SGRF_SOC_ALLMST_NS 0xffff
+
+/* security config for slave */
+#define SGRF_SLV_S_WMSK 0xffff0000
+#define SGRF_SLV_S_ALL_NS 0x0
+
+/* security config pmu slave ip */
+/* All of slaves is ns */
+#define SGRF_PMU_SLV_S_NS BIT_WITH_WMSK(0)
+/* slaves secure attr is configed */
+#define SGRF_PMU_SLV_S_CFGED WMSK_BIT(0)
+#define SGRF_PMU_SLV_CRYPTO1_NS WMSK_BIT(1)
+
+#define SGRF_PMUSRAM_S BIT(8)
+
+#define SGRF_INTSRAM_S BIT(13)
+
+/* ddr region */
+#define SGRF_DDR_RGN_0_16_WMSK 0x0fff /* DDR RGN 0~16 size mask */
+
+#define SGRF_DDR_RGN_DPLL_CLK BIT_WITH_WMSK(15) /* DDR PLL output clock */
+#define SGRF_DDR_RGN_RTC_CLK BIT_WITH_WMSK(14) /* 32K clock for DDR PLL */
+
+/* All security of the DDR RGNs are bypass */
+#define SGRF_DDR_RGN_BYPS BIT_WITH_WMSK(9)
+/* All security of the DDR RGNs are not bypass */
+#define SGRF_DDR_RGN_NO_BYPS WMSK_BIT(9)
+
+/* The MST access the ddr rgn n with secure attribution */
+#define SGRF_L_MST_S_DDR_RGN(n) BIT_WITH_WMSK((n))
+/* bits[16:8]*/
+#define SGRF_H_MST_S_DDR_RGN(n) BIT_WITH_WMSK((n) + 8)
+
+#define SGRF_PMU_CON0 0x0c100
+#define SGRF_PMU_CON(n) (SGRF_PMU_CON0 + (n) * 4)
+
+/**************************************************
+ * secure timer
+ **************************************************/
+/* chanal0~5 */
+#define STIMER0_CHN_BASE(n) (STIME_BASE + 0x20 * (n))
+/* chanal6~11 */
+#define STIMER1_CHN_BASE(n) (STIME_BASE + 0x8000 + 0x20 * (n))
+
+ /* low 32 bits */
+#define TIMER_END_COUNT0 0x00
+ /* high 32 bits */
+#define TIMER_END_COUNT1 0x04
+
+#define TIMER_CURRENT_VALUE0 0x08
+#define TIMER_CURRENT_VALUE1 0x0C
+
+ /* low 32 bits */
+#define TIMER_INIT_COUNT0 0x10
+ /* high 32 bits */
+#define TIMER_INIT_COUNT1 0x14
+
+#define TIMER_INTSTATUS 0x18
+#define TIMER_CONTROL_REG 0x1c
+
+#define TIMER_EN 0x1
+
+#define TIMER_FMODE (0x0 << 1)
+#define TIMER_RMODE (0x1 << 1)
+
+/**************************************************
+ * secure WDT
+ **************************************************/
+#define PCLK_WDT_CA53_GATE_SHIFT 8
+#define PCLK_WDT_CM0_GATE_SHIFT 10
+
+/* export secure operating APIs */
+void secure_watchdog_gate(void);
+__pmusramfunc void secure_watchdog_ungate(void);
+void secure_timer_init(void);
+void secure_sgrf_init(void);
+void secure_sgrf_ddr_rgn_init(void);
+__pmusramfunc void sram_secure_timer_init(void);
+
+#endif /* SECURE_H */
diff --git a/plat/rockchip/rk3399/drivers/soc/soc.c b/plat/rockchip/rk3399/drivers/soc/soc.c
new file mode 100644
index 0000000..98b5ad6
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/soc/soc.c
@@ -0,0 +1,362 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include <platform_def.h>
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <drivers/delay_timer.h>
+#include <lib/mmio.h>
+
+#include <dfs.h>
+#include <dram.h>
+#include <m0_ctl.h>
+#include <plat_private.h>
+#include <pmu.h>
+#include <rk3399_def.h>
+#include <secure.h>
+#include <soc.h>
+
+/* Table of regions to map using the MMU. */
+const mmap_region_t plat_rk_mmap[] = {
+ MAP_REGION_FLAT(DEV_RNG0_BASE, DEV_RNG0_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(PMUSRAM_BASE, PMUSRAM_SIZE,
+ MT_MEMORY | MT_RW | MT_SECURE),
+
+ { 0 }
+};
+
+/* The RockChip power domain tree descriptor */
+const unsigned char rockchip_power_domain_tree_desc[] = {
+ /* No of root nodes */
+ PLATFORM_SYSTEM_COUNT,
+ /* No of children for the root node */
+ PLATFORM_CLUSTER_COUNT,
+ /* No of children for the first cluster node */
+ PLATFORM_CLUSTER0_CORE_COUNT,
+ /* No of children for the second cluster node */
+ PLATFORM_CLUSTER1_CORE_COUNT
+};
+
+/* sleep data for pll suspend */
+static struct deepsleep_data_s slp_data;
+
+/* sleep data that needs to be accessed from pmusram */
+__pmusramdata struct pmu_sleep_data pmu_slp_data;
+
+static void set_pll_slow_mode(uint32_t pll_id)
+{
+ if (pll_id == PPLL_ID)
+ mmio_write_32(PMUCRU_BASE + PMUCRU_PPLL_CON(3), PLL_SLOW_MODE);
+ else
+ mmio_write_32((CRU_BASE +
+ CRU_PLL_CON(pll_id, 3)), PLL_SLOW_MODE);
+}
+
+static void set_pll_normal_mode(uint32_t pll_id)
+{
+ if (pll_id == PPLL_ID)
+ mmio_write_32(PMUCRU_BASE + PMUCRU_PPLL_CON(3), PLL_NOMAL_MODE);
+ else
+ mmio_write_32(CRU_BASE +
+ CRU_PLL_CON(pll_id, 3), PLL_NOMAL_MODE);
+}
+
+static void set_pll_bypass(uint32_t pll_id)
+{
+ if (pll_id == PPLL_ID)
+ mmio_write_32(PMUCRU_BASE +
+ PMUCRU_PPLL_CON(3), PLL_BYPASS_MODE);
+ else
+ mmio_write_32(CRU_BASE +
+ CRU_PLL_CON(pll_id, 3), PLL_BYPASS_MODE);
+}
+
+static void _pll_suspend(uint32_t pll_id)
+{
+ set_pll_slow_mode(pll_id);
+ set_pll_bypass(pll_id);
+}
+
+/**
+ * disable_dvfs_plls - To suspend the specific PLLs
+ *
+ * When we close the center logic, the DPLL will be closed,
+ * so we need to keep the ABPLL and switch to it to supply
+ * clock for DDR during suspend, then we should not close
+ * the ABPLL and exclude ABPLL_ID.
+ */
+void disable_dvfs_plls(void)
+{
+ _pll_suspend(CPLL_ID);
+ _pll_suspend(NPLL_ID);
+ _pll_suspend(VPLL_ID);
+ _pll_suspend(GPLL_ID);
+ _pll_suspend(ALPLL_ID);
+}
+
+/**
+ * disable_nodvfs_plls - To suspend the PPLL
+ */
+void disable_nodvfs_plls(void)
+{
+ _pll_suspend(PPLL_ID);
+}
+
+/**
+ * restore_pll - Copy PLL settings from memory to a PLL.
+ *
+ * This will copy PLL settings from an array in memory to the memory mapped
+ * registers for a PLL.
+ *
+ * Note that: above the PLL exclude PPLL.
+ *
+ * pll_id: One of the values from enum plls_id
+ * src: Pointer to the array of values to restore from
+ */
+static void restore_pll(int pll_id, uint32_t *src)
+{
+ /* Nice to have PLL off while configuring */
+ mmio_write_32((CRU_BASE + CRU_PLL_CON(pll_id, 3)), PLL_SLOW_MODE);
+
+ mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 0), src[0] | REG_SOC_WMSK);
+ mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 1), src[1] | REG_SOC_WMSK);
+ mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 2), src[2]);
+ mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 4), src[4] | REG_SOC_WMSK);
+ mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 5), src[5] | REG_SOC_WMSK);
+
+ /* Do PLL_CON3 since that will enable things */
+ mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 3), src[3] | REG_SOC_WMSK);
+
+ /* Wait for PLL lock done */
+ while ((mmio_read_32(CRU_BASE + CRU_PLL_CON(pll_id, 2)) &
+ 0x80000000) == 0x0)
+ ;
+}
+
+/**
+ * save_pll - Copy PLL settings a PLL to memory
+ *
+ * This will copy PLL settings from the memory mapped registers for a PLL to
+ * an array in memory.
+ *
+ * Note that: above the PLL exclude PPLL.
+ *
+ * pll_id: One of the values from enum plls_id
+ * src: Pointer to the array of values to save to.
+ */
+static void save_pll(uint32_t *dst, int pll_id)
+{
+ int i;
+
+ for (i = 0; i < PLL_CON_COUNT; i++)
+ dst[i] = mmio_read_32(CRU_BASE + CRU_PLL_CON(pll_id, i));
+}
+
+/**
+ * prepare_abpll_for_ddrctrl - Copy DPLL settings to ABPLL
+ *
+ * This will copy DPLL settings from the memory mapped registers for a PLL to
+ * an array in memory.
+ */
+void prepare_abpll_for_ddrctrl(void)
+{
+ save_pll(slp_data.plls_con[ABPLL_ID], ABPLL_ID);
+ save_pll(slp_data.plls_con[DPLL_ID], DPLL_ID);
+
+ restore_pll(ABPLL_ID, slp_data.plls_con[DPLL_ID]);
+}
+
+void restore_abpll(void)
+{
+ restore_pll(ABPLL_ID, slp_data.plls_con[ABPLL_ID]);
+}
+
+void clk_gate_con_save(void)
+{
+ uint32_t i = 0;
+
+ for (i = 0; i < PMUCRU_GATE_COUNT; i++)
+ slp_data.pmucru_gate_con[i] =
+ mmio_read_32(PMUCRU_BASE + PMUCRU_GATE_CON(i));
+
+ for (i = 0; i < CRU_GATE_COUNT; i++)
+ slp_data.cru_gate_con[i] =
+ mmio_read_32(CRU_BASE + CRU_GATE_CON(i));
+}
+
+void clk_gate_con_disable(void)
+{
+ uint32_t i;
+
+ for (i = 0; i < PMUCRU_GATE_COUNT; i++)
+ mmio_write_32(PMUCRU_BASE + PMUCRU_GATE_CON(i), REG_SOC_WMSK);
+
+ for (i = 0; i < CRU_GATE_COUNT; i++)
+ mmio_write_32(CRU_BASE + CRU_GATE_CON(i), REG_SOC_WMSK);
+}
+
+void clk_gate_con_restore(void)
+{
+ uint32_t i;
+
+ for (i = 0; i < PMUCRU_GATE_COUNT; i++)
+ mmio_write_32(PMUCRU_BASE + PMUCRU_GATE_CON(i),
+ REG_SOC_WMSK | slp_data.pmucru_gate_con[i]);
+
+ for (i = 0; i < CRU_GATE_COUNT; i++)
+ mmio_write_32(CRU_BASE + CRU_GATE_CON(i),
+ REG_SOC_WMSK | slp_data.cru_gate_con[i]);
+}
+
+static void set_plls_nobypass(uint32_t pll_id)
+{
+ if (pll_id == PPLL_ID)
+ mmio_write_32(PMUCRU_BASE + PMUCRU_PPLL_CON(3),
+ PLL_NO_BYPASS_MODE);
+ else
+ mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 3),
+ PLL_NO_BYPASS_MODE);
+}
+
+static void _pll_resume(uint32_t pll_id)
+{
+ set_plls_nobypass(pll_id);
+ set_pll_normal_mode(pll_id);
+}
+
+void set_pmu_rsthold(void)
+{
+ uint32_t rstnhold_cofig0;
+ uint32_t rstnhold_cofig1;
+
+ pmu_slp_data.pmucru_rstnhold_con0 = mmio_read_32(PMUCRU_BASE +
+ PMUCRU_RSTNHOLD_CON0);
+ pmu_slp_data.pmucru_rstnhold_con1 = mmio_read_32(PMUCRU_BASE +
+ PMUCRU_RSTNHOLD_CON1);
+ rstnhold_cofig0 = BIT_WITH_WMSK(PRESETN_NOC_PMU_HOLD) |
+ BIT_WITH_WMSK(PRESETN_INTMEM_PMU_HOLD) |
+ BIT_WITH_WMSK(HRESETN_CM0S_PMU_HOLD) |
+ BIT_WITH_WMSK(HRESETN_CM0S_NOC_PMU_HOLD) |
+ BIT_WITH_WMSK(DRESETN_CM0S_PMU_HOLD) |
+ BIT_WITH_WMSK(POESETN_CM0S_PMU_HOLD) |
+ BIT_WITH_WMSK(PRESETN_TIMER_PMU_0_1_HOLD) |
+ BIT_WITH_WMSK(RESETN_TIMER_PMU_0_HOLD) |
+ BIT_WITH_WMSK(RESETN_TIMER_PMU_1_HOLD) |
+ BIT_WITH_WMSK(PRESETN_UART_M0_PMU_HOLD) |
+ BIT_WITH_WMSK(RESETN_UART_M0_PMU_HOLD) |
+ BIT_WITH_WMSK(PRESETN_WDT_PMU_HOLD);
+ rstnhold_cofig1 = BIT_WITH_WMSK(PRESETN_RKPWM_PMU_HOLD) |
+ BIT_WITH_WMSK(PRESETN_PMUGRF_HOLD) |
+ BIT_WITH_WMSK(PRESETN_SGRF_HOLD) |
+ BIT_WITH_WMSK(PRESETN_GPIO0_HOLD) |
+ BIT_WITH_WMSK(PRESETN_GPIO1_HOLD) |
+ BIT_WITH_WMSK(PRESETN_CRU_PMU_HOLD) |
+ BIT_WITH_WMSK(PRESETN_PVTM_PMU_HOLD);
+
+ mmio_write_32(PMUCRU_BASE + PMUCRU_RSTNHOLD_CON0, rstnhold_cofig0);
+ mmio_write_32(PMUCRU_BASE + PMUCRU_RSTNHOLD_CON1, rstnhold_cofig1);
+}
+
+void pmu_sgrf_rst_hld(void)
+{
+ mmio_write_32(PMUCRU_BASE + CRU_PMU_RSTHOLD_CON(1),
+ CRU_PMU_SGRF_RST_HOLD);
+}
+
+/*
+ * When system reset in running state, we want the cpus to be reboot
+ * from maskrom (system reboot),
+ * the pmusgrf reset-hold bits needs to be released.
+ * When system wake up from system deep suspend, some soc will be reset
+ * when waked up,
+ * we want the bootcpu to be reboot from pmusram,
+ * the pmusgrf reset-hold bits needs to be held.
+ */
+__pmusramfunc void pmu_sgrf_rst_hld_release(void)
+{
+ mmio_write_32(PMUCRU_BASE + CRU_PMU_RSTHOLD_CON(1),
+ CRU_PMU_SGRF_RST_RLS);
+}
+
+__pmusramfunc void restore_pmu_rsthold(void)
+{
+ mmio_write_32(PMUCRU_BASE + PMUCRU_RSTNHOLD_CON0,
+ pmu_slp_data.pmucru_rstnhold_con0 | REG_SOC_WMSK);
+ mmio_write_32(PMUCRU_BASE + PMUCRU_RSTNHOLD_CON1,
+ pmu_slp_data.pmucru_rstnhold_con1 | REG_SOC_WMSK);
+}
+
+/**
+ * enable_dvfs_plls - To resume the specific PLLs
+ *
+ * Please see the comment at the disable_dvfs_plls()
+ * we don't suspend the ABPLL, so don't need resume
+ * it too.
+ */
+void enable_dvfs_plls(void)
+{
+ _pll_resume(ALPLL_ID);
+ _pll_resume(GPLL_ID);
+ _pll_resume(VPLL_ID);
+ _pll_resume(NPLL_ID);
+ _pll_resume(CPLL_ID);
+}
+
+/**
+ * enable_nodvfs_plls - To resume the PPLL
+ */
+void enable_nodvfs_plls(void)
+{
+ _pll_resume(PPLL_ID);
+}
+
+void soc_global_soft_reset_init(void)
+{
+ mmio_write_32(PMUCRU_BASE + CRU_PMU_RSTHOLD_CON(1),
+ CRU_PMU_SGRF_RST_RLS);
+
+ mmio_clrbits_32(CRU_BASE + CRU_GLB_RST_CON,
+ CRU_PMU_WDTRST_MSK | CRU_PMU_FIRST_SFTRST_MSK);
+}
+
+void __dead2 soc_global_soft_reset(void)
+{
+ pmu_power_domains_on();
+ set_pll_slow_mode(VPLL_ID);
+ set_pll_slow_mode(NPLL_ID);
+ set_pll_slow_mode(GPLL_ID);
+ set_pll_slow_mode(CPLL_ID);
+ set_pll_slow_mode(PPLL_ID);
+ set_pll_slow_mode(ABPLL_ID);
+ set_pll_slow_mode(ALPLL_ID);
+
+ dsb();
+
+ mmio_write_32(CRU_BASE + CRU_GLB_SRST_FST, GLB_SRST_FST_CFG_VAL);
+
+ /*
+ * Maybe the HW needs some times to reset the system,
+ * so we do not hope the core to excute valid codes.
+ */
+ while (1)
+ ;
+}
+
+void plat_rockchip_soc_init(void)
+{
+ secure_timer_init();
+ secure_sgrf_init();
+ secure_sgrf_ddr_rgn_init();
+ soc_global_soft_reset_init();
+ plat_rockchip_gpio_init();
+ m0_init();
+ dram_init();
+ dram_dfs_init();
+}
diff --git a/plat/rockchip/rk3399/drivers/soc/soc.h b/plat/rockchip/rk3399/drivers/soc/soc.h
new file mode 100644
index 0000000..8daa5bb
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/soc/soc.h
@@ -0,0 +1,289 @@
+/*
+ * Copyright (c) 2016-2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SOC_H
+#define SOC_H
+
+#include <lib/utils.h>
+
+#define GLB_SRST_FST_CFG_VAL 0xfdb9
+#define GLB_SRST_SND_CFG_VAL 0xeca8
+
+#define PMUCRU_PPLL_CON(n) ((n) * 4)
+#define CRU_PLL_CON(pll_id, n) ((pll_id) * 0x20 + (n) * 4)
+#define PLL_MODE_MSK 0x03
+#define PLL_MODE_SHIFT 0x08
+#define PLL_BYPASS_MSK 0x01
+#define PLL_BYPASS_SHIFT 0x01
+#define PLL_PWRDN_MSK 0x01
+#define PLL_PWRDN_SHIFT 0x0
+#define PLL_BYPASS BIT(1)
+#define PLL_PWRDN BIT(0)
+
+#define NO_PLL_BYPASS (0x00)
+#define NO_PLL_PWRDN (0x00)
+
+#define FBDIV(n) ((0xfff << 16) | n)
+#define POSTDIV2(n) ((0x7 << (12 + 16)) | (n << 12))
+#define POSTDIV1(n) ((0x7 << (8 + 16)) | (n << 8))
+#define REFDIV(n) ((0x3F << 16) | n)
+#define PLL_LOCK(n) ((n >> 31) & 0x1)
+
+#define PLL_SLOW_MODE BITS_WITH_WMASK(SLOW_MODE,\
+ PLL_MODE_MSK, PLL_MODE_SHIFT)
+
+#define PLL_NOMAL_MODE BITS_WITH_WMASK(NORMAL_MODE,\
+ PLL_MODE_MSK, PLL_MODE_SHIFT)
+
+#define PLL_BYPASS_MODE BIT_WITH_WMSK(PLL_BYPASS_SHIFT)
+#define PLL_NO_BYPASS_MODE WMSK_BIT(PLL_BYPASS_SHIFT)
+
+#define PLL_CON_COUNT 0x06
+#define CRU_CLKSEL_COUNT 108
+#define CRU_CLKSEL_CON(n) (0x100 + (n) * 4)
+
+#define PMUCRU_CLKSEL_CONUT 0x06
+#define PMUCRU_CLKSEL_OFFSET 0x080
+#define REG_SIZE 0x04
+#define REG_SOC_WMSK 0xffff0000
+#define CLK_GATE_MASK 0x01
+
+#define PMUCRU_GATE_COUNT 0x03
+#define CRU_GATE_COUNT 0x23
+#define PMUCRU_GATE_CON(n) (0x100 + (n) * 4)
+#define CRU_GATE_CON(n) (0x300 + (n) * 4)
+
+#define PMUCRU_RSTNHOLD_CON0 0x120
+enum {
+ PRESETN_NOC_PMU_HOLD = 1,
+ PRESETN_INTMEM_PMU_HOLD,
+ HRESETN_CM0S_PMU_HOLD,
+ HRESETN_CM0S_NOC_PMU_HOLD,
+ DRESETN_CM0S_PMU_HOLD,
+ POESETN_CM0S_PMU_HOLD,
+ PRESETN_SPI3_HOLD,
+ RESETN_SPI3_HOLD,
+ PRESETN_TIMER_PMU_0_1_HOLD,
+ RESETN_TIMER_PMU_0_HOLD,
+ RESETN_TIMER_PMU_1_HOLD,
+ PRESETN_UART_M0_PMU_HOLD,
+ RESETN_UART_M0_PMU_HOLD,
+ PRESETN_WDT_PMU_HOLD
+};
+
+#define PMUCRU_RSTNHOLD_CON1 0x124
+enum {
+ PRESETN_I2C0_HOLD,
+ PRESETN_I2C4_HOLD,
+ PRESETN_I2C8_HOLD,
+ PRESETN_MAILBOX_PMU_HOLD,
+ PRESETN_RKPWM_PMU_HOLD,
+ PRESETN_PMUGRF_HOLD,
+ PRESETN_SGRF_HOLD,
+ PRESETN_GPIO0_HOLD,
+ PRESETN_GPIO1_HOLD,
+ PRESETN_CRU_PMU_HOLD,
+ PRESETN_INTR_ARB_HOLD,
+ PRESETN_PVTM_PMU_HOLD,
+ RESETN_I2C0_HOLD,
+ RESETN_I2C4_HOLD,
+ RESETN_I2C8_HOLD
+};
+
+enum plls_id {
+ ALPLL_ID = 0,
+ ABPLL_ID,
+ DPLL_ID,
+ CPLL_ID,
+ GPLL_ID,
+ NPLL_ID,
+ VPLL_ID,
+ PPLL_ID,
+ END_PLL_ID,
+};
+
+#define CLST_L_CPUS_MSK (0xf)
+#define CLST_B_CPUS_MSK (0x3)
+
+enum pll_work_mode {
+ SLOW_MODE = 0x00,
+ NORMAL_MODE = 0x01,
+ DEEP_SLOW_MODE = 0x02,
+};
+
+enum glb_sft_reset {
+ PMU_RST_BY_FIRST_SFT,
+ PMU_RST_BY_SECOND_SFT = BIT(2),
+ PMU_RST_NOT_BY_SFT = BIT(3),
+};
+
+struct pll_div {
+ uint32_t mhz;
+ uint32_t refdiv;
+ uint32_t fbdiv;
+ uint32_t postdiv1;
+ uint32_t postdiv2;
+ uint32_t frac;
+ uint32_t freq;
+};
+
+struct deepsleep_data_s {
+ uint32_t plls_con[END_PLL_ID][PLL_CON_COUNT];
+ uint32_t cru_gate_con[CRU_GATE_COUNT];
+ uint32_t pmucru_gate_con[PMUCRU_GATE_COUNT];
+};
+
+struct pmu_sleep_data {
+ uint32_t pmucru_rstnhold_con0;
+ uint32_t pmucru_rstnhold_con1;
+};
+
+/**************************************************
+ * pmugrf reg, offset
+ **************************************************/
+#define PMUGRF_OSREG(n) (0x300 + (n) * 4)
+#define PMUGRF_GPIO0A_P 0x040
+#define PMUGRF_GPIO1A_P 0x050
+
+/**************************************************
+ * DCF reg, offset
+ **************************************************/
+#define DCF_DCF_CTRL 0x0
+#define DCF_DCF_ADDR 0x8
+#define DCF_DCF_ISR 0xc
+#define DCF_DCF_TOSET 0x14
+#define DCF_DCF_TOCMD 0x18
+#define DCF_DCF_CMD_CFG 0x1c
+
+/* DCF_DCF_ISR */
+#define DCF_TIMEOUT (1 << 2)
+#define DCF_ERR (1 << 1)
+#define DCF_DONE (1 << 0)
+
+/* DCF_DCF_CTRL */
+#define DCF_VOP_HW_EN (1 << 2)
+#define DCF_STOP (1 << 1)
+#define DCF_START (1 << 0)
+
+#define CYCL_24M_CNT_US(us) (24 * us)
+#define CYCL_24M_CNT_MS(ms) (ms * CYCL_24M_CNT_US(1000))
+#define CYCL_32K_CNT_MS(ms) (ms * 32)
+
+/**************************************************
+ * cru reg, offset
+ **************************************************/
+#define CRU_SOFTRST_CON(n) (0x400 + (n) * 4)
+
+#define CRU_DMAC0_RST BIT_WITH_WMSK(3)
+ /* reset release*/
+#define CRU_DMAC0_RST_RLS WMSK_BIT(3)
+
+#define CRU_DMAC1_RST BIT_WITH_WMSK(4)
+ /* reset release*/
+#define CRU_DMAC1_RST_RLS WMSK_BIT(4)
+
+#define CRU_GLB_RST_CON 0x0510
+#define CRU_GLB_SRST_FST 0x0500
+#define CRU_GLB_SRST_SND 0x0504
+
+#define CRU_CLKGATE_CON(n) (0x300 + n * 4)
+#define PCLK_GPIO2_GATE_SHIFT 3
+#define PCLK_GPIO3_GATE_SHIFT 4
+#define PCLK_GPIO4_GATE_SHIFT 5
+
+/**************************************************
+ * pmu cru reg, offset
+ **************************************************/
+#define CRU_PMU_RSTHOLD_CON(n) (0x120 + n * 4)
+/* reset hold*/
+#define CRU_PMU_SGRF_RST_HOLD BIT_WITH_WMSK(6)
+/* reset hold release*/
+#define CRU_PMU_SGRF_RST_RLS WMSK_BIT(6)
+
+#define CRU_PMU_WDTRST_MSK (0x1 << 4)
+#define CRU_PMU_WDTRST_EN 0x0
+
+#define CRU_PMU_FIRST_SFTRST_MSK (0x3 << 2)
+#define CRU_PMU_FIRST_SFTRST_EN 0x0
+
+#define CRU_PMU_CLKGATE_CON(n) (0x100 + n * 4)
+#define PCLK_GPIO0_GATE_SHIFT 3
+#define PCLK_GPIO1_GATE_SHIFT 4
+
+#define CPU_BOOT_ADDR_WMASK 0xffff0000
+#define CPU_BOOT_ADDR_ALIGN 16
+
+#define GRF_IOMUX_2BIT_MASK 0x3
+#define GRF_IOMUX_GPIO 0x0
+
+#define GRF_GPIO4C2_IOMUX_SHIFT 4
+#define GRF_GPIO4C2_IOMUX_PWM 0x1
+#define GRF_GPIO4C6_IOMUX_SHIFT 12
+#define GRF_GPIO4C6_IOMUX_PWM 0x1
+
+#define PWM_CNT(n) (0x0000 + 0x10 * (n))
+#define PWM_PERIOD_HPR(n) (0x0004 + 0x10 * (n))
+#define PWM_DUTY_LPR(n) (0x0008 + 0x10 * (n))
+#define PWM_CTRL(n) (0x000c + 0x10 * (n))
+
+#define PWM_DISABLE (0 << 0)
+#define PWM_ENABLE (1 << 0)
+
+/* grf reg offset */
+#define GRF_USBPHY0_CTRL0 0x4480
+#define GRF_USBPHY0_CTRL2 0x4488
+#define GRF_USBPHY0_CTRL3 0x448c
+#define GRF_USBPHY0_CTRL12 0x44b0
+#define GRF_USBPHY0_CTRL13 0x44b4
+#define GRF_USBPHY0_CTRL15 0x44bc
+#define GRF_USBPHY0_CTRL16 0x44c0
+
+#define GRF_USBPHY1_CTRL0 0x4500
+#define GRF_USBPHY1_CTRL2 0x4508
+#define GRF_USBPHY1_CTRL3 0x450c
+#define GRF_USBPHY1_CTRL12 0x4530
+#define GRF_USBPHY1_CTRL13 0x4534
+#define GRF_USBPHY1_CTRL15 0x453c
+#define GRF_USBPHY1_CTRL16 0x4540
+
+#define GRF_GPIO2A_IOMUX 0xe000
+#define GRF_GPIO2A_P 0xe040
+#define GRF_GPIO3A_P 0xe050
+#define GRF_GPIO4A_P 0xe060
+#define GRF_GPIO2D_HE 0xe18c
+#define GRF_DDRC0_CON0 0xe380
+#define GRF_DDRC0_CON1 0xe384
+#define GRF_DDRC1_CON0 0xe388
+#define GRF_DDRC1_CON1 0xe38c
+#define GRF_SOC_CON_BASE 0xe200
+#define GRF_SOC_CON(n) (GRF_SOC_CON_BASE + (n) * 4)
+#define GRF_IO_VSEL 0xe640
+
+#define CRU_CLKSEL_CON0 0x0100
+#define CRU_CLKSEL_CON6 0x0118
+#define CRU_SDIO0_CON1 0x058c
+#define PMUCRU_CLKSEL_CON0 0x0080
+#define PMUCRU_CLKGATE_CON2 0x0108
+#define PMUCRU_SOFTRST_CON0 0x0110
+#define PMUCRU_GATEDIS_CON0 0x0130
+#define PMUCRU_SOFTRST_CON(n) (PMUCRU_SOFTRST_CON0 + (n) * 4)
+
+/* export related and operating SoC APIs */
+void __dead2 soc_global_soft_reset(void);
+void disable_dvfs_plls(void);
+void disable_nodvfs_plls(void);
+void enable_dvfs_plls(void);
+void enable_nodvfs_plls(void);
+void prepare_abpll_for_ddrctrl(void);
+void restore_abpll(void);
+void clk_gate_con_save(void);
+void clk_gate_con_disable(void);
+void clk_gate_con_restore(void);
+void set_pmu_rsthold(void);
+void pmu_sgrf_rst_hld(void);
+__pmusramfunc void pmu_sgrf_rst_hld_release(void);
+__pmusramfunc void restore_pmu_rsthold(void);
+#endif /* SOC_H */