diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-21 17:43:51 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-21 17:43:51 +0000 |
commit | be58c81aff4cd4c0ccf43dbd7998da4a6a08c03b (patch) | |
tree | 779c248fb61c83f65d1f0dc867f2053d76b4e03a /drivers/marvell | |
parent | Initial commit. (diff) | |
download | arm-trusted-firmware-be58c81aff4cd4c0ccf43dbd7998da4a6a08c03b.tar.xz arm-trusted-firmware-be58c81aff4cd4c0ccf43dbd7998da4a6a08c03b.zip |
Adding upstream version 2.10.0+dfsg.upstream/2.10.0+dfsgupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
30 files changed, 10089 insertions, 0 deletions
diff --git a/drivers/marvell/amb_adec.c b/drivers/marvell/amb_adec.c new file mode 100644 index 0000000..d78fa25 --- /dev/null +++ b/drivers/marvell/amb_adec.c @@ -0,0 +1,160 @@ +/* + * Copyright (C) 2018 Marvell International Ltd. + * + * SPDX-License-Identifier: BSD-3-Clause + * https://spdx.org/licenses + */ + +/* AXI to M-Bridge decoding unit driver for Marvell Armada 8K and 8K+ SoCs */ + +#include <inttypes.h> +#include <stdint.h> + +#include <common/debug.h> +#include <lib/mmio.h> + +#include <armada_common.h> +#include <mvebu.h> +#include <mvebu_def.h> + +#if LOG_LEVEL >= LOG_LEVEL_INFO +#define DEBUG_ADDR_MAP +#endif + +/* common defines */ +#define WIN_ENABLE_BIT (0x1) + +#define MVEBU_AMB_ADEC_OFFSET (0x70ff00) + +#define AMB_WIN_CR_OFFSET(win) (amb_base + 0x0 + (0x8 * win)) +#define AMB_ATTR_OFFSET 8 +#define AMB_ATTR_MASK 0xFF +#define AMB_SIZE_OFFSET 16 +#define AMB_SIZE_MASK 0xFF + +#define AMB_WIN_BASE_OFFSET(win) (amb_base + 0x4 + (0x8 * win)) +#define AMB_BASE_OFFSET 16 +#define AMB_BASE_ADDR_MASK ((1 << (32 - AMB_BASE_OFFSET)) - 1) + +#define AMB_WIN_ALIGNMENT_64K (0x10000) +#define AMB_WIN_ALIGNMENT_1M (0x100000) + +uintptr_t amb_base; + +static void amb_check_win(struct addr_map_win *win, uint32_t win_num) +{ + uint32_t base_addr; + + /* make sure the base address is in 16-bit range */ + if (win->base_addr > AMB_BASE_ADDR_MASK) { + WARN("Window %d: base address is too big 0x%" PRIx64 "\n", + win_num, win->base_addr); + win->base_addr = AMB_BASE_ADDR_MASK; + WARN("Set the base address to 0x%" PRIx64 "\n", win->base_addr); + } + + base_addr = win->base_addr << AMB_BASE_OFFSET; + /* for AMB The base is always 1M aligned */ + /* check if address is aligned to 1M */ + if (IS_NOT_ALIGN(base_addr, AMB_WIN_ALIGNMENT_1M)) { + win->base_addr = ALIGN_UP(base_addr, AMB_WIN_ALIGNMENT_1M); + WARN("Window %d: base address unaligned to 0x%x\n", + win_num, AMB_WIN_ALIGNMENT_1M); + WARN("Align up the base address to 0x%" PRIx64 "\n", win->base_addr); + } + + /* size parameter validity check */ + if (!IS_POWER_OF_2(win->win_size)) { + WARN("Window %d: window size is not power of 2 (0x%" PRIx64 ")\n", + win_num, win->win_size); + win->win_size = ROUND_UP_TO_POW_OF_2(win->win_size); + WARN("Rounding size to 0x%" PRIx64 "\n", win->win_size); + } +} + +static void amb_enable_win(struct addr_map_win *win, uint32_t win_num) +{ + uint32_t ctrl, base, size; + + /* + * size is 64KB granularity. + * The number of ones specifies the size of the + * window in 64 KB granularity. 0 is 64KB + */ + size = (win->win_size / AMB_WIN_ALIGNMENT_64K) - 1; + ctrl = (size << AMB_SIZE_OFFSET) | (win->target_id << AMB_ATTR_OFFSET); + base = win->base_addr << AMB_BASE_OFFSET; + + mmio_write_32(AMB_WIN_BASE_OFFSET(win_num), base); + mmio_write_32(AMB_WIN_CR_OFFSET(win_num), ctrl); + + /* enable window after configuring window size (and attributes) */ + ctrl |= WIN_ENABLE_BIT; + mmio_write_32(AMB_WIN_CR_OFFSET(win_num), ctrl); +} + +#ifdef DEBUG_ADDR_MAP +static void dump_amb_adec(void) +{ + uint32_t ctrl, base, win_id, attr; + uint32_t size, size_count; + + /* Dump all AMB windows */ + printf("bank attribute base size\n"); + printf("--------------------------------------------\n"); + for (win_id = 0; win_id < AMB_MAX_WIN_ID; win_id++) { + ctrl = mmio_read_32(AMB_WIN_CR_OFFSET(win_id)); + if (ctrl & WIN_ENABLE_BIT) { + base = mmio_read_32(AMB_WIN_BASE_OFFSET(win_id)); + attr = (ctrl >> AMB_ATTR_OFFSET) & AMB_ATTR_MASK; + size_count = (ctrl >> AMB_SIZE_OFFSET) & AMB_SIZE_MASK; + size = (size_count + 1) * AMB_WIN_ALIGNMENT_64K; + printf("amb 0x%04x 0x%08x 0x%08x\n", + attr, base, size); + } + } +} +#endif + +int init_amb_adec(uintptr_t base) +{ + struct addr_map_win *win; + uint32_t win_id, win_reg; + uint32_t win_count; + + INFO("Initializing AXI to MBus Bridge Address decoding\n"); + + /* Get the base address of the AMB address decoding */ + amb_base = base + MVEBU_AMB_ADEC_OFFSET; + + /* Get the array of the windows and its size */ + marvell_get_amb_memory_map(&win, &win_count, base); + if (win_count <= 0) + INFO("no windows configurations found\n"); + + if (win_count > AMB_MAX_WIN_ID) { + INFO("number of windows is bigger than %d\n", AMB_MAX_WIN_ID); + return 0; + } + + /* disable all AMB windows */ + for (win_id = 0; win_id < AMB_MAX_WIN_ID; win_id++) { + win_reg = mmio_read_32(AMB_WIN_CR_OFFSET(win_id)); + win_reg &= ~WIN_ENABLE_BIT; + mmio_write_32(AMB_WIN_CR_OFFSET(win_id), win_reg); + } + + /* enable relevant windows */ + for (win_id = 0; win_id < win_count; win_id++, win++) { + amb_check_win(win, win_id); + amb_enable_win(win, win_id); + } + +#ifdef DEBUG_ADDR_MAP + dump_amb_adec(); +#endif + + INFO("Done AXI to MBus Bridge Address decoding Initializing\n"); + + return 0; +} diff --git a/drivers/marvell/ap807_clocks_init.c b/drivers/marvell/ap807_clocks_init.c new file mode 100644 index 0000000..c1f8619 --- /dev/null +++ b/drivers/marvell/ap807_clocks_init.c @@ -0,0 +1,109 @@ +/* + * Copyright (C) 2018 Marvell International Ltd. + * + * SPDX-License-Identifier: BSD-3-Clause + * https://spdx.org/licenses + */ + +#include <drivers/delay_timer.h> +#include <drivers/marvell/aro.h> +#include <lib/mmio.h> + +#include <a8k_plat_def.h> + +/* Notify bootloader on DRAM setup */ +#define AP807_CPU_ARO_CTRL(cluster) \ + (MVEBU_RFU_BASE + 0x82A8 + (0xA58 * (cluster))) + +/* 0 - ARO clock is enabled, 1 - ARO clock is disabled */ +#define AP807_CPU_ARO_CLK_EN_OFFSET 0 +#define AP807_CPU_ARO_CLK_EN_MASK (0x1 << AP807_CPU_ARO_CLK_EN_OFFSET) + +/* 0 - ARO is the clock source, 1 - PLL is the clock source */ +#define AP807_CPU_ARO_SEL_PLL_OFFSET 5 +#define AP807_CPU_ARO_SEL_PLL_MASK (0x1 << AP807_CPU_ARO_SEL_PLL_OFFSET) + +/* AP807 clusters count */ +#define AP807_CLUSTER_NUM 2 + +/* PLL frequency values */ +#define PLL_FREQ_1200 0x2AE5F002 /* 1200 */ +#define PLL_FREQ_2000 0x2FC9F002 /* 2000 */ +#define PLL_FREQ_2200 0x2AC57001 /* 2200 */ +#define PLL_FREQ_2400 0x2AE5F001 /* 2400 */ + +/* CPU PLL control registers */ +#define AP807_CPU_PLL_CTRL(cluster) \ + (MVEBU_RFU_BASE + 0x82E0 + (0x8 * (cluster))) + +#define AP807_CPU_PLL_PARAM(cluster) AP807_CPU_PLL_CTRL(cluster) +#define AP807_CPU_PLL_CFG(cluster) (AP807_CPU_PLL_CTRL(cluster) + 0x4) +#define AP807_CPU_PLL_CFG_BYPASS_MODE (0x1) +#define AP807_CPU_PLL_FRC_DSCHG (0x2) +#define AP807_CPU_PLL_CFG_USE_REG_FILE (0x1 << 9) + +static void pll_set_freq(unsigned int freq_val) +{ + int i; + + if (freq_val != PLL_FREQ_2200) + return; + + for (i = 0 ; i < AP807_CLUSTER_NUM ; i++) { + /* Set parameter of cluster i PLL to 2.2GHz */ + mmio_write_32(AP807_CPU_PLL_PARAM(i), freq_val); + /* Set apll_lpf_frc_dschg - Control + * voltage of internal VCO is discharged + */ + mmio_write_32(AP807_CPU_PLL_CFG(i), + AP807_CPU_PLL_FRC_DSCHG); + /* Set use_rf_conf load PLL parameter from register */ + mmio_write_32(AP807_CPU_PLL_CFG(i), + AP807_CPU_PLL_FRC_DSCHG | + AP807_CPU_PLL_CFG_USE_REG_FILE); + /* Un-set apll_lpf_frc_dschg */ + mmio_write_32(AP807_CPU_PLL_CFG(i), + AP807_CPU_PLL_CFG_USE_REG_FILE); + } +} + +/* Switch to ARO from PLL in ap807 */ +static void aro_to_pll(void) +{ + unsigned int reg; + int i; + + for (i = 0 ; i < AP807_CLUSTER_NUM ; i++) { + /* switch from ARO to PLL */ + reg = mmio_read_32(AP807_CPU_ARO_CTRL(i)); + reg |= AP807_CPU_ARO_SEL_PLL_MASK; + mmio_write_32(AP807_CPU_ARO_CTRL(i), reg); + + mdelay(100); + + /* disable ARO clk driver */ + reg = mmio_read_32(AP807_CPU_ARO_CTRL(i)); + reg |= (AP807_CPU_ARO_CLK_EN_MASK); + mmio_write_32(AP807_CPU_ARO_CTRL(i), reg); + } +} + +/* switch from ARO to PLL + * in case of default frequency option, configure PLL registers + * to be aligned with new default frequency. + */ +void ap807_clocks_init(unsigned int freq_option) +{ + /* Modifications in frequency table: + * 0x0: 764x: change to 2000 MHz. + * 0x2: 744x change to 1800 MHz, 764x change to 2200/2400. + * 0x3: 3900/744x/764x change to 1200 MHz. + */ + + if (freq_option == CPU_2200_DDR_1200_RCLK_1200) + pll_set_freq(PLL_FREQ_2200); + + /* Switch from ARO to PLL */ + aro_to_pll(); + +} diff --git a/drivers/marvell/cache_llc.c b/drivers/marvell/cache_llc.c new file mode 100644 index 0000000..4b06b47 --- /dev/null +++ b/drivers/marvell/cache_llc.c @@ -0,0 +1,189 @@ +/* + * Copyright (C) 2018 Marvell International Ltd. + * + * SPDX-License-Identifier: BSD-3-Clause + * https://spdx.org/licenses + */ + +/* LLC driver is the Last Level Cache (L3C) driver + * for Marvell SoCs in AP806, AP807, and AP810 + */ + +#include <assert.h> + +#include <arch_helpers.h> +#include <drivers/marvell/cache_llc.h> +#include <drivers/marvell/ccu.h> +#include <lib/mmio.h> + +#include <mvebu_def.h> + +#define CCU_HTC_CR(ap_index) (MVEBU_CCU_BASE(ap_index) + 0x200) +#define CCU_SET_POC_OFFSET 5 + +extern void ca72_l2_enable_unique_clean(void); + +void llc_cache_sync(int ap_index) +{ + mmio_write_32(LLC_SYNC(ap_index), 0); + /* Atomic write, no need to wait */ +} + +void llc_flush_all(int ap_index) +{ + mmio_write_32(LLC_CLEAN_INV_WAY(ap_index), LLC_ALL_WAYS_MASK); + llc_cache_sync(ap_index); +} + +void llc_clean_all(int ap_index) +{ + mmio_write_32(LLC_CLEAN_WAY(ap_index), LLC_ALL_WAYS_MASK); + llc_cache_sync(ap_index); +} + +void llc_inv_all(int ap_index) +{ + mmio_write_32(LLC_INV_WAY(ap_index), LLC_ALL_WAYS_MASK); + llc_cache_sync(ap_index); +} + +void llc_disable(int ap_index) +{ + llc_flush_all(ap_index); + mmio_write_32(LLC_CTRL(ap_index), 0); + dsbishst(); +} + +void llc_enable(int ap_index, int excl_mode) +{ + uint32_t val; + + dsbsy(); + llc_inv_all(ap_index); + dsbsy(); + + val = LLC_CTRL_EN; + if (excl_mode) + val |= LLC_EXCLUSIVE_EN; + + mmio_write_32(LLC_CTRL(ap_index), val); + dsbsy(); +} + +int llc_is_exclusive(int ap_index) +{ + uint32_t reg; + + reg = mmio_read_32(LLC_CTRL(ap_index)); + + if ((reg & (LLC_CTRL_EN | LLC_EXCLUSIVE_EN)) == + (LLC_CTRL_EN | LLC_EXCLUSIVE_EN)) + return 1; + + return 0; +} + +void llc_runtime_enable(int ap_index) +{ + uint32_t reg; + + reg = mmio_read_32(LLC_CTRL(ap_index)); + if (reg & LLC_CTRL_EN) + return; + + INFO("Enabling LLC\n"); + + /* + * Enable L2 UniqueClean evictions with data + * Note: this configuration assumes that LLC is configured + * in exclusive mode. + * Later on in the code this assumption will be validated + */ + ca72_l2_enable_unique_clean(); + llc_enable(ap_index, 1); + + /* Set point of coherency to DDR. + * This is required by units which have SW cache coherency + */ + reg = mmio_read_32(CCU_HTC_CR(ap_index)); + reg |= (0x1 << CCU_SET_POC_OFFSET); + mmio_write_32(CCU_HTC_CR(ap_index), reg); +} + +#if LLC_SRAM +int llc_sram_enable(int ap_index, int size) +{ + uint32_t tc, way, ways_to_allocate; + uint32_t way_addr; + + if ((size <= 0) || (size > LLC_SIZE) || (size % LLC_WAY_SIZE)) + return -1; + + llc_enable(ap_index, 1); + llc_inv_all(ap_index); + + ways_to_allocate = size / LLC_WAY_SIZE; + + /* Lockdown all available ways for all traffic classes */ + for (tc = 0; tc < LLC_TC_NUM; tc++) + mmio_write_32(LLC_TCN_LOCK(ap_index, tc), LLC_ALL_WAYS_MASK); + + /* Clear the high bits of SRAM address */ + mmio_write_32(LLC_BANKED_MNT_AHR(ap_index), 0); + + way_addr = PLAT_MARVELL_TRUSTED_RAM_BASE; + for (way = 0; way < ways_to_allocate; way++) { + /* Trigger allocation block command */ + mmio_write_32(LLC_BLK_ALOC(ap_index), + LLC_BLK_ALOC_BASE_ADDR(way_addr) | + LLC_BLK_ALOC_WAY_DATA_SET | + LLC_BLK_ALOC_WAY_ID(way)); + way_addr += LLC_WAY_SIZE; + } + return 0; +} + +void llc_sram_disable(int ap_index) +{ + uint32_t tc; + + /* Disable the line lockings */ + for (tc = 0; tc < LLC_TC_NUM; tc++) + mmio_write_32(LLC_TCN_LOCK(ap_index, tc), 0); + + /* Invalidate all ways */ + llc_inv_all(ap_index); +} + +int llc_sram_test(int ap_index, int size, char *msg) +{ + uintptr_t addr, end_addr; + uint32_t data = 0; + + if ((size <= 0) || (size > LLC_SIZE)) + return -1; + + INFO("=== LLC SRAM WRITE test %s\n", msg); + for (addr = PLAT_MARVELL_TRUSTED_RAM_BASE, + end_addr = PLAT_MARVELL_TRUSTED_RAM_BASE + size; + addr < end_addr; addr += 4) { + mmio_write_32(addr, addr); + } + INFO("=== LLC SRAM WRITE test %s PASSED\n", msg); + INFO("=== LLC SRAM READ test %s\n", msg); + for (addr = PLAT_MARVELL_TRUSTED_RAM_BASE, + end_addr = PLAT_MARVELL_TRUSTED_RAM_BASE + size; + addr < end_addr; addr += 4) { + data = mmio_read_32(addr); + if (data != addr) { + INFO("=== LLC SRAM READ test %s FAILED @ 0x%08lx)\n", + msg, addr); + return -1; + } + } + INFO("=== LLC SRAM READ test %s PASSED (last read = 0x%08x)\n", + msg, data); + return 0; +} + +#endif /* LLC_SRAM */ diff --git a/drivers/marvell/ccu.c b/drivers/marvell/ccu.c new file mode 100644 index 0000000..c206f11 --- /dev/null +++ b/drivers/marvell/ccu.c @@ -0,0 +1,417 @@ +/* + * Copyright (C) 2018 Marvell International Ltd. + * + * SPDX-License-Identifier: BSD-3-Clause + * https://spdx.org/licenses + */ + +/* CCU unit device driver for Marvell AP807, AP807 and AP810 SoCs */ + +#include <inttypes.h> +#include <stdint.h> + +#include <common/debug.h> +#include <drivers/marvell/ccu.h> +#include <lib/mmio.h> + +#include <armada_common.h> +#include <mvebu.h> +#include <mvebu_def.h> + +#if LOG_LEVEL >= LOG_LEVEL_INFO +#define DEBUG_ADDR_MAP +#endif + +/* common defines */ +#define WIN_ENABLE_BIT (0x1) +/* Physical address of the base of the window = {AddrLow[19:0],20'h0} */ +#define ADDRESS_SHIFT (20 - 4) +#define ADDRESS_MASK (0xFFFFFFF0) +#define CCU_WIN_ALIGNMENT (0x100000) + +/* + * Physical address of the highest address of window bits[31:19] = 0x6FF + * Physical address of the lowest address of window bits[18:6] = 0x6E0 + * Unit Id bits [5:2] = 2 + * RGF Window Enable bit[0] = 1 + * 0x37f9b809 - 11011111111 0011011100000 0010 0 1 + */ +#define ERRATA_WA_CCU_WIN4 0x37f9b809U + +/* + * Physical address of the highest address of window bits[31:19] = 0xFFF + * Physical address of the lowest address of window bits[18:6] = 0x800 + * Unit Id bits [5:2] = 2 + * RGF Window Enable bit[0] = 1 + * 0x7ffa0009 - 111111111111 0100000000000 0010 0 1 + */ +#define ERRATA_WA_CCU_WIN5 0x7ffa0009U + +/* + * Physical address of the highest address of window bits[31:19] = 0x1FFF + * Physical address of the lowest address of window bits[18:6] = 0x1000 + * Unit Id bits [5:2] = 2 + * RGF Window Enable bit[0] = 1 + * 0xfffc000d - 1111111111111 1000000000000 0011 0 1 + */ +#define ERRATA_WA_CCU_WIN6 0xfffc000dU + +#define IS_DRAM_TARGET(tgt) ((((tgt) == DRAM_0_TID) || \ + ((tgt) == DRAM_1_TID) || \ + ((tgt) == RAR_TID)) ? 1 : 0) + +#define CCU_RGF(win) (MVEBU_CCU_BASE(MVEBU_AP0) + \ + 0x90 + 4 * (win)) + +/* For storage of CR, SCR, ALR, AHR abd GCR */ +static uint32_t ccu_regs_save[MVEBU_CCU_MAX_WINS * 4 + 1]; + +#ifdef DEBUG_ADDR_MAP +static void dump_ccu(int ap_index) +{ + uint32_t win_id, win_cr, alr, ahr; + uint8_t target_id; + uint64_t start, end; + + /* Dump all AP windows */ + printf("\tbank target start end\n"); + printf("\t----------------------------------------------------\n"); + for (win_id = 0; win_id < MVEBU_CCU_MAX_WINS; win_id++) { + win_cr = mmio_read_32(CCU_WIN_CR_OFFSET(ap_index, win_id)); + if (win_cr & WIN_ENABLE_BIT) { + target_id = (win_cr >> CCU_TARGET_ID_OFFSET) & + CCU_TARGET_ID_MASK; + alr = mmio_read_32(CCU_WIN_ALR_OFFSET(ap_index, + win_id)); + ahr = mmio_read_32(CCU_WIN_AHR_OFFSET(ap_index, + win_id)); + start = ((uint64_t)alr << ADDRESS_SHIFT); + end = (((uint64_t)ahr + 0x10) << ADDRESS_SHIFT); + printf("\tccu%d %02x 0x%016" PRIx64 " 0x%016" PRIx64 "\n", + win_id, target_id, start, end); + } + } + win_cr = mmio_read_32(CCU_WIN_GCR_OFFSET(ap_index)); + target_id = (win_cr >> CCU_GCR_TARGET_OFFSET) & CCU_GCR_TARGET_MASK; + printf("\tccu GCR %d - all other transactions\n", target_id); +} +#endif + +void ccu_win_check(struct addr_map_win *win) +{ + /* check if address is aligned to 1M */ + if (IS_NOT_ALIGN(win->base_addr, CCU_WIN_ALIGNMENT)) { + win->base_addr = ALIGN_UP(win->base_addr, CCU_WIN_ALIGNMENT); + NOTICE("%s: Align up the base address to 0x%" PRIx64 "\n", + __func__, win->base_addr); + } + + /* size parameter validity check */ + if (IS_NOT_ALIGN(win->win_size, CCU_WIN_ALIGNMENT)) { + win->win_size = ALIGN_UP(win->win_size, CCU_WIN_ALIGNMENT); + NOTICE("%s: Aligning size to 0x%" PRIx64 "\n", + __func__, win->win_size); + } +} + +int ccu_is_win_enabled(int ap_index, uint32_t win_id) +{ + return mmio_read_32(CCU_WIN_CR_OFFSET(ap_index, win_id)) & + WIN_ENABLE_BIT; +} + +void ccu_enable_win(int ap_index, struct addr_map_win *win, uint32_t win_id) +{ + uint32_t ccu_win_reg; + uint32_t alr, ahr; + uint64_t end_addr; + + if ((win_id == 0) || (win_id > MVEBU_CCU_MAX_WINS)) { + ERROR("Enabling wrong CCU window %d!\n", win_id); + return; + } + + end_addr = (win->base_addr + win->win_size - 1); + alr = (uint32_t)((win->base_addr >> ADDRESS_SHIFT) & ADDRESS_MASK); + ahr = (uint32_t)((end_addr >> ADDRESS_SHIFT) & ADDRESS_MASK); + + mmio_write_32(CCU_WIN_ALR_OFFSET(ap_index, win_id), alr); + mmio_write_32(CCU_WIN_AHR_OFFSET(ap_index, win_id), ahr); + + ccu_win_reg = WIN_ENABLE_BIT; + ccu_win_reg |= (win->target_id & CCU_TARGET_ID_MASK) + << CCU_TARGET_ID_OFFSET; + mmio_write_32(CCU_WIN_CR_OFFSET(ap_index, win_id), ccu_win_reg); +} + +static void ccu_disable_win(int ap_index, uint32_t win_id) +{ + uint32_t win_reg; + + if ((win_id == 0) || (win_id > MVEBU_CCU_MAX_WINS)) { + ERROR("Disabling wrong CCU window %d!\n", win_id); + return; + } + + win_reg = mmio_read_32(CCU_WIN_CR_OFFSET(ap_index, win_id)); + win_reg &= ~WIN_ENABLE_BIT; + mmio_write_32(CCU_WIN_CR_OFFSET(ap_index, win_id), win_reg); +} + +/* Insert/Remove temporary window for using the out-of reset default + * CPx base address to access the CP configuration space prior to + * the further base address update in accordance with address mapping + * design. + * + * NOTE: Use the same window array for insertion and removal of + * temporary windows. + */ +void ccu_temp_win_insert(int ap_index, struct addr_map_win *win, int size) +{ + uint32_t win_id; + + for (int i = 0; i < size; i++) { + win_id = MVEBU_CCU_MAX_WINS - 1 - i; + ccu_win_check(win); + ccu_enable_win(ap_index, win, win_id); + win++; + } +} + +/* + * NOTE: Use the same window array for insertion and removal of + * temporary windows. + */ +void ccu_temp_win_remove(int ap_index, struct addr_map_win *win, int size) +{ + uint32_t win_id; + + for (int i = 0; i < size; i++) { + uint64_t base; + uint32_t target; + + win_id = MVEBU_CCU_MAX_WINS - 1 - i; + + target = mmio_read_32(CCU_WIN_CR_OFFSET(ap_index, win_id)); + target >>= CCU_TARGET_ID_OFFSET; + target &= CCU_TARGET_ID_MASK; + + base = mmio_read_32(CCU_WIN_ALR_OFFSET(ap_index, win_id)); + base <<= ADDRESS_SHIFT; + + if ((win->target_id != target) || (win->base_addr != base)) { + ERROR("%s: Trying to remove bad window-%d!\n", + __func__, win_id); + continue; + } + ccu_disable_win(ap_index, win_id); + win++; + } +} + +/* Returns current DRAM window target (DRAM_0_TID, DRAM_1_TID, RAR_TID) + * NOTE: Call only once for each AP. + * The AP0 DRAM window is located at index 2 only at the BL31 execution start. + * Then it relocated to index 1 for matching the rest of APs DRAM settings. + * Calling this function after relocation will produce wrong results on AP0 + */ +static uint32_t ccu_dram_target_get(int ap_index) +{ + /* On BLE stage the AP0 DRAM window is opened by the BootROM at index 2. + * All the rest of detected APs will use window at index 1. + * The AP0 DRAM window is moved from index 2 to 1 during + * init_ccu() execution. + */ + const uint32_t win_id = (ap_index == 0) ? 2 : 1; + uint32_t target; + + target = mmio_read_32(CCU_WIN_CR_OFFSET(ap_index, win_id)); + target >>= CCU_TARGET_ID_OFFSET; + target &= CCU_TARGET_ID_MASK; + + return target; +} + +void ccu_dram_target_set(int ap_index, uint32_t target) +{ + /* On BLE stage the AP0 DRAM window is opened by the BootROM at index 2. + * All the rest of detected APs will use window at index 1. + * The AP0 DRAM window is moved from index 2 to 1 + * during init_ccu() execution. + */ + const uint32_t win_id = (ap_index == 0) ? 2 : 1; + uint32_t dram_cr; + + dram_cr = mmio_read_32(CCU_WIN_CR_OFFSET(ap_index, win_id)); + dram_cr &= ~(CCU_TARGET_ID_MASK << CCU_TARGET_ID_OFFSET); + dram_cr |= (target & CCU_TARGET_ID_MASK) << CCU_TARGET_ID_OFFSET; + mmio_write_32(CCU_WIN_CR_OFFSET(ap_index, win_id), dram_cr); +} + +/* Setup CCU DRAM window and enable it */ +void ccu_dram_win_config(int ap_index, struct addr_map_win *win) +{ +#if IMAGE_BLE /* BLE */ + /* On BLE stage the AP0 DRAM window is opened by the BootROM at index 2. + * Since the BootROM is not accessing DRAM at BLE stage, + * the DRAM window can be temporarely disabled. + */ + const uint32_t win_id = (ap_index == 0) ? 2 : 1; +#else /* end of BLE */ + /* At the ccu_init() execution stage, DRAM windows of all APs + * are arranged at index 1. + * The AP0 still has the old window BootROM DRAM at index 2, so + * the window-1 can be safely disabled without breaking the DRAM access. + */ + const uint32_t win_id = 1; +#endif + + ccu_disable_win(ap_index, win_id); + /* enable write secure (and clear read secure) */ + mmio_write_32(CCU_WIN_SCR_OFFSET(ap_index, win_id), + CCU_WIN_ENA_WRITE_SECURE); + ccu_win_check(win); + ccu_enable_win(ap_index, win, win_id); +} + +/* Save content of CCU window + GCR */ +static void ccu_save_win_range(int ap_id, int win_first, + int win_last, uint32_t *buffer) +{ + int win_id, idx; + /* Save CCU */ + for (idx = 0, win_id = win_first; win_id <= win_last; win_id++) { + buffer[idx++] = mmio_read_32(CCU_WIN_CR_OFFSET(ap_id, win_id)); + buffer[idx++] = mmio_read_32(CCU_WIN_SCR_OFFSET(ap_id, win_id)); + buffer[idx++] = mmio_read_32(CCU_WIN_ALR_OFFSET(ap_id, win_id)); + buffer[idx++] = mmio_read_32(CCU_WIN_AHR_OFFSET(ap_id, win_id)); + } + buffer[idx] = mmio_read_32(CCU_WIN_GCR_OFFSET(ap_id)); +} + +/* Restore content of CCU window + GCR */ +static void ccu_restore_win_range(int ap_id, int win_first, + int win_last, uint32_t *buffer) +{ + int win_id, idx; + /* Restore CCU */ + for (idx = 0, win_id = win_first; win_id <= win_last; win_id++) { + mmio_write_32(CCU_WIN_CR_OFFSET(ap_id, win_id), buffer[idx++]); + mmio_write_32(CCU_WIN_SCR_OFFSET(ap_id, win_id), buffer[idx++]); + mmio_write_32(CCU_WIN_ALR_OFFSET(ap_id, win_id), buffer[idx++]); + mmio_write_32(CCU_WIN_AHR_OFFSET(ap_id, win_id), buffer[idx++]); + } + mmio_write_32(CCU_WIN_GCR_OFFSET(ap_id), buffer[idx]); +} + +void ccu_save_win_all(int ap_id) +{ + ccu_save_win_range(ap_id, 0, MVEBU_CCU_MAX_WINS - 1, ccu_regs_save); +} + +void ccu_restore_win_all(int ap_id) +{ + ccu_restore_win_range(ap_id, 0, MVEBU_CCU_MAX_WINS - 1, ccu_regs_save); +} + +int init_ccu(int ap_index) +{ + struct addr_map_win *win, *dram_win; + uint32_t win_id, win_reg; + uint32_t win_count, array_id; + uint32_t dram_target; +#if IMAGE_BLE + /* In BootROM context CCU Window-1 + * has SRAM_TID target and should not be disabled + */ + const uint32_t win_start = 2; +#else + const uint32_t win_start = 1; +#endif + + INFO("Initializing CCU Address decoding\n"); + + /* Get the array of the windows and fill the map data */ + marvell_get_ccu_memory_map(ap_index, &win, &win_count); + if (win_count <= 0) { + INFO("No windows configurations found\n"); + } else if (win_count > (MVEBU_CCU_MAX_WINS - 1)) { + ERROR("CCU mem map array > than max available windows (%d)\n", + MVEBU_CCU_MAX_WINS); + win_count = MVEBU_CCU_MAX_WINS; + } + + /* Need to set GCR to DRAM before all CCU windows are disabled for + * securing the normal access to DRAM location, which the ATF is running + * from. Once all CCU windows are set, which have to include the + * dedicated DRAM window as well, the GCR can be switched to the target + * defined by the platform configuration. + */ + dram_target = ccu_dram_target_get(ap_index); + win_reg = (dram_target & CCU_GCR_TARGET_MASK) << CCU_GCR_TARGET_OFFSET; + mmio_write_32(CCU_WIN_GCR_OFFSET(ap_index), win_reg); + + /* If the DRAM window was already configured at the BLE stage, + * only the window target considered valid, the address range should be + * updated according to the platform configuration. + */ + for (dram_win = win, array_id = 0; array_id < win_count; + array_id++, dram_win++) { + if (IS_DRAM_TARGET(dram_win->target_id)) { + dram_win->target_id = dram_target; + break; + } + } + + /* Disable all AP CCU windows + * Window-0 is always bypassed since it already contains + * data allowing the internal configuration space access + */ + for (win_id = win_start; win_id < MVEBU_CCU_MAX_WINS; win_id++) { + ccu_disable_win(ap_index, win_id); + /* enable write secure (and clear read secure) */ + mmio_write_32(CCU_WIN_SCR_OFFSET(ap_index, win_id), + CCU_WIN_ENA_WRITE_SECURE); + } + + /* win_id is the index of the current ccu window + * array_id is the index of the current memory map window entry + */ + for (win_id = win_start, array_id = 0; + ((win_id < MVEBU_CCU_MAX_WINS) && (array_id < win_count)); + win_id++) { + ccu_win_check(win); + ccu_enable_win(ap_index, win, win_id); + win++; + array_id++; + } + + /* Get & set the default target according to board topology */ + win_reg = (marvell_get_ccu_gcr_target(ap_index) & CCU_GCR_TARGET_MASK) + << CCU_GCR_TARGET_OFFSET; + mmio_write_32(CCU_WIN_GCR_OFFSET(ap_index), win_reg); + +#ifdef DEBUG_ADDR_MAP + dump_ccu(ap_index); +#endif + + INFO("Done CCU Address decoding Initializing\n"); + + return 0; +} + +void errata_wa_init(void) +{ + /* + * EERATA ID: RES-3033912 - Internal Address Space Init state causes + * a hang upon accesses to [0xf070_0000, 0xf07f_ffff] + * Workaround: Boot Firmware (ATF) should configure CCU_RGF_WIN(4) to + * split [0x6e_0000, 0x1ff_ffff] to values [0x6e_0000, 0x6f_ffff] and + * [0x80_0000, 0xff_ffff] and [0x100_0000, 0x1ff_ffff],that cause + * accesses to the segment of [0xf070_0000, 0xf1ff_ffff] + * to act as RAZWI. + */ + mmio_write_32(CCU_RGF(4), ERRATA_WA_CCU_WIN4); + mmio_write_32(CCU_RGF(5), ERRATA_WA_CCU_WIN5); + mmio_write_32(CCU_RGF(6), ERRATA_WA_CCU_WIN6); +} diff --git a/drivers/marvell/comphy.h b/drivers/marvell/comphy.h new file mode 100644 index 0000000..fab564e --- /dev/null +++ b/drivers/marvell/comphy.h @@ -0,0 +1,472 @@ +/* + * Copyright (C) 2018 Marvell International Ltd. + * + * SPDX-License-Identifier: BSD-3-Clause + * https://spdx.org/licenses + */ + +/* Driver for COMPHY unit that is part or Marvell A8K SoCs */ + +#ifndef COMPHY_H +#define COMPHY_H + +/* COMPHY registers */ +#define COMMON_PHY_CFG1_REG 0x0 +#define COMMON_PHY_CFG1_PWR_UP_OFFSET 1 +#define COMMON_PHY_CFG1_PWR_UP_MASK \ + (0x1 << COMMON_PHY_CFG1_PWR_UP_OFFSET) +#define COMMON_PHY_CFG1_PIPE_SELECT_OFFSET 2 +#define COMMON_PHY_CFG1_PIPE_SELECT_MASK \ + (0x1 << COMMON_PHY_CFG1_PIPE_SELECT_OFFSET) +#define COMMON_PHY_CFG1_PWR_ON_RESET_OFFSET 13 +#define COMMON_PHY_CFG1_PWR_ON_RESET_MASK \ + (0x1 << COMMON_PHY_CFG1_PWR_ON_RESET_OFFSET) +#define COMMON_PHY_CFG1_CORE_RSTN_OFFSET 14 +#define COMMON_PHY_CFG1_CORE_RSTN_MASK \ + (0x1 << COMMON_PHY_CFG1_CORE_RSTN_OFFSET) +#define COMMON_PHY_PHY_MODE_OFFSET 15 +#define COMMON_PHY_PHY_MODE_MASK \ + (0x1 << COMMON_PHY_PHY_MODE_OFFSET) + +#define COMMON_SELECTOR_PHY_OFFSET 0x140 +#define COMMON_SELECTOR_PIPE_OFFSET 0x144 + +#define COMMON_PHY_SD_CTRL1 0x148 +#define COMMON_PHY_SD_CTRL1_COMPHY_0_4_PORT_OFFSET 0 +#define COMMON_PHY_SD_CTRL1_COMPHY_0_4_PORT_MASK 0xFFFF +#define COMMON_PHY_SD_CTRL1_PCIE_X4_EN_OFFSET 24 +#define COMMON_PHY_SD_CTRL1_PCIE_X4_EN_MASK \ + (0x1 << COMMON_PHY_SD_CTRL1_PCIE_X4_EN_OFFSET) +#define COMMON_PHY_SD_CTRL1_PCIE_X2_EN_OFFSET 25 +#define COMMON_PHY_SD_CTRL1_PCIE_X2_EN_MASK \ + (0x1 << COMMON_PHY_SD_CTRL1_PCIE_X2_EN_OFFSET) + +#define DFX_DEV_GEN_CTRL12 0x80 +#define DFX_DEV_GEN_PCIE_CLK_SRC_OFFSET 7 +#define DFX_DEV_GEN_PCIE_CLK_SRC_MASK \ + (0x3 << DFX_DEV_GEN_PCIE_CLK_SRC_OFFSET) + +/* HPIPE register */ +#define HPIPE_PWR_PLL_REG 0x4 +#define HPIPE_PWR_PLL_REF_FREQ_OFFSET 0 +#define HPIPE_PWR_PLL_REF_FREQ_MASK \ + (0x1f << HPIPE_PWR_PLL_REF_FREQ_OFFSET) +#define HPIPE_PWR_PLL_PHY_MODE_OFFSET 5 +#define HPIPE_PWR_PLL_PHY_MODE_MASK \ + (0x7 << HPIPE_PWR_PLL_PHY_MODE_OFFSET) + +#define HPIPE_DFE_REG0 0x01C +#define HPIPE_DFE_RES_FORCE_OFFSET 15 +#define HPIPE_DFE_RES_FORCE_MASK \ + (0x1 << HPIPE_DFE_RES_FORCE_OFFSET) + +#define HPIPE_G2_SET_1_REG 0x040 +#define HPIPE_G2_SET_1_G2_RX_SELMUPI_OFFSET 0 +#define HPIPE_G2_SET_1_G2_RX_SELMUPI_MASK \ + (0x7 << HPIPE_G2_SET_1_G2_RX_SELMUPI_OFFSET) +#define HPIPE_G2_SET_1_G2_RX_SELMUPP_OFFSET 3 +#define HPIPE_G2_SET_1_G2_RX_SELMUPP_MASK \ + (0x7 << HPIPE_G2_SET_1_G2_RX_SELMUPP_OFFSET) +#define HPIPE_G2_SET_1_G2_RX_SELMUFI_OFFSET 6 +#define HPIPE_G2_SET_1_G2_RX_SELMUFI_MASK \ + (0x3 << HPIPE_G2_SET_1_G2_RX_SELMUFI_OFFSET) + +#define HPIPE_G3_SETTINGS_1_REG 0x048 +#define HPIPE_G3_RX_SELMUPI_OFFSET 0 +#define HPIPE_G3_RX_SELMUPI_MASK \ + (0x7 << HPIPE_G3_RX_SELMUPI_OFFSET) +#define HPIPE_G3_RX_SELMUPF_OFFSET 3 +#define HPIPE_G3_RX_SELMUPF_MASK \ + (0x7 << HPIPE_G3_RX_SELMUPF_OFFSET) +#define HPIPE_G3_SETTING_BIT_OFFSET 13 +#define HPIPE_G3_SETTING_BIT_MASK \ + (0x1 << HPIPE_G3_SETTING_BIT_OFFSET) + +#define HPIPE_INTERFACE_REG 0x94 +#define HPIPE_INTERFACE_GEN_MAX_OFFSET 10 +#define HPIPE_INTERFACE_GEN_MAX_MASK \ + (0x3 << HPIPE_INTERFACE_GEN_MAX_OFFSET) +#define HPIPE_INTERFACE_DET_BYPASS_OFFSET 12 +#define HPIPE_INTERFACE_DET_BYPASS_MASK \ + (0x1 << HPIPE_INTERFACE_DET_BYPASS_OFFSET) +#define HPIPE_INTERFACE_LINK_TRAIN_OFFSET 14 +#define HPIPE_INTERFACE_LINK_TRAIN_MASK \ + (0x1 << HPIPE_INTERFACE_LINK_TRAIN_OFFSET) + +#define HPIPE_VDD_CAL_CTRL_REG 0x114 +#define HPIPE_EXT_SELLV_RXSAMPL_OFFSET 5 +#define HPIPE_EXT_SELLV_RXSAMPL_MASK \ + (0x1f << HPIPE_EXT_SELLV_RXSAMPL_OFFSET) + +#define HPIPE_PCIE_REG0 0x120 +#define HPIPE_PCIE_IDLE_SYNC_OFFSET 12 +#define HPIPE_PCIE_IDLE_SYNC_MASK \ + (0x1 << HPIPE_PCIE_IDLE_SYNC_OFFSET) +#define HPIPE_PCIE_SEL_BITS_OFFSET 13 +#define HPIPE_PCIE_SEL_BITS_MASK \ + (0x3 << HPIPE_PCIE_SEL_BITS_OFFSET) + +#define HPIPE_LANE_ALIGN_REG 0x124 +#define HPIPE_LANE_ALIGN_OFF_OFFSET 12 +#define HPIPE_LANE_ALIGN_OFF_MASK \ + (0x1 << HPIPE_LANE_ALIGN_OFF_OFFSET) + +#define HPIPE_MISC_REG 0x13C +#define HPIPE_MISC_CLK100M_125M_OFFSET 4 +#define HPIPE_MISC_CLK100M_125M_MASK \ + (0x1 << HPIPE_MISC_CLK100M_125M_OFFSET) +#define HPIPE_MISC_ICP_FORCE_OFFSET 5 +#define HPIPE_MISC_ICP_FORCE_MASK \ + (0x1 << HPIPE_MISC_ICP_FORCE_OFFSET) +#define HPIPE_MISC_TXDCLK_2X_OFFSET 6 +#define HPIPE_MISC_TXDCLK_2X_MASK \ + (0x1 << HPIPE_MISC_TXDCLK_2X_OFFSET) +#define HPIPE_MISC_CLK500_EN_OFFSET 7 +#define HPIPE_MISC_CLK500_EN_MASK \ + (0x1 << HPIPE_MISC_CLK500_EN_OFFSET) +#define HPIPE_MISC_REFCLK_SEL_OFFSET 10 +#define HPIPE_MISC_REFCLK_SEL_MASK \ + (0x1 << HPIPE_MISC_REFCLK_SEL_OFFSET) + +#define HPIPE_SAMPLER_N_PROC_CALIB_CTRL_REG 0x16C +#define HPIPE_SMAPLER_OFFSET 12 +#define HPIPE_SMAPLER_MASK (0x1 << HPIPE_SMAPLER_OFFSET) + +#define HPIPE_PWR_CTR_DTL_REG 0x184 +#define HPIPE_PWR_CTR_DTL_FLOOP_EN_OFFSET 2 +#define HPIPE_PWR_CTR_DTL_FLOOP_EN_MASK \ + (0x1 << HPIPE_PWR_CTR_DTL_FLOOP_EN_OFFSET) + +#define HPIPE_FRAME_DET_CONTROL_REG 0x220 +#define HPIPE_FRAME_DET_LOCK_LOST_TO_OFFSET 12 +#define HPIPE_FRAME_DET_LOCK_LOST_TO_MASK \ + (0x1 << HPIPE_FRAME_DET_LOCK_LOST_TO_OFFSET) + +#define HPIPE_TX_TRAIN_CTRL_0_REG 0x268 +#define HPIPE_TX_TRAIN_P2P_HOLD_OFFSET 15 +#define HPIPE_TX_TRAIN_P2P_HOLD_MASK \ + (0x1 << HPIPE_TX_TRAIN_P2P_HOLD_OFFSET) + +#define HPIPE_TX_TRAIN_CTRL_REG 0x26C +#define HPIPE_TX_TRAIN_CTRL_G1_OFFSET 0 +#define HPIPE_TX_TRAIN_CTRL_G1_MASK \ + (0x1 << HPIPE_TX_TRAIN_CTRL_G1_OFFSET) +#define HPIPE_TX_TRAIN_CTRL_GN1_OFFSET 1 +#define HPIPE_TX_TRAIN_CTRL_GN1_MASK \ + (0x1 << HPIPE_TX_TRAIN_CTRL_GN1_OFFSET) +#define HPIPE_TX_TRAIN_CTRL_G0_OFFSET 2 +#define HPIPE_TX_TRAIN_CTRL_G0_MASK \ + (0x1 << HPIPE_TX_TRAIN_CTRL_G0_OFFSET) + +#define HPIPE_TX_TRAIN_CTRL_4_REG 0x278 +#define HPIPE_TRX_TRAIN_TIMER_OFFSET 0 +#define HPIPE_TRX_TRAIN_TIMER_MASK \ + (0x3FF << HPIPE_TRX_TRAIN_TIMER_OFFSET) + +#define HPIPE_TX_TRAIN_CTRL_5_REG 0x2A4 +#define HPIPE_TX_TRAIN_START_SQ_EN_OFFSET 11 +#define HPIPE_TX_TRAIN_START_SQ_EN_MASK \ + (0x1 << HPIPE_TX_TRAIN_START_SQ_EN_OFFSET) +#define HPIPE_TX_TRAIN_START_FRM_DET_EN_OFFSET 12 +#define HPIPE_TX_TRAIN_START_FRM_DET_EN_MASK \ + (0x1 << HPIPE_TX_TRAIN_START_FRM_DET_EN_OFFSET) +#define HPIPE_TX_TRAIN_START_FRM_LOCK_EN_OFFSET 13 +#define HPIPE_TX_TRAIN_START_FRM_LOCK_EN_MASK \ + (0x1 << HPIPE_TX_TRAIN_START_FRM_LOCK_EN_OFFSET) +#define HPIPE_TX_TRAIN_WAIT_TIME_EN_OFFSET 14 +#define HPIPE_TX_TRAIN_WAIT_TIME_EN_MASK \ + (0x1 << HPIPE_TX_TRAIN_WAIT_TIME_EN_OFFSET) + +#define HPIPE_TX_TRAIN_REG 0x31C +#define HPIPE_TX_TRAIN_CHK_INIT_OFFSET 4 +#define HPIPE_TX_TRAIN_CHK_INIT_MASK \ + (0x1 << HPIPE_TX_TRAIN_CHK_INIT_OFFSET) +#define HPIPE_TX_TRAIN_COE_FM_PIN_PCIE3_OFFSET 7 +#define HPIPE_TX_TRAIN_COE_FM_PIN_PCIE3_MASK \ + (0x1 << HPIPE_TX_TRAIN_COE_FM_PIN_PCIE3_OFFSET) + +#define HPIPE_CDR_CONTROL_REG 0x418 +#define HPIPE_CDR_RX_MAX_DFE_ADAPT_0_OFFSET 14 +#define HPIPE_CDR_RX_MAX_DFE_ADAPT_0_MASK \ + (0x3 << HPIPE_CDR_RX_MAX_DFE_ADAPT_0_OFFSET) +#define HPIPE_CDR_RX_MAX_DFE_ADAPT_1_OFFSET 12 +#define HPIPE_CDR_RX_MAX_DFE_ADAPT_1_MASK \ + (0x3 << HPIPE_CDR_RX_MAX_DFE_ADAPT_1_OFFSET) +#define HPIPE_CDR_MAX_DFE_ADAPT_0_OFFSET 9 +#define HPIPE_CDR_MAX_DFE_ADAPT_0_MASK \ + (0x7 << HPIPE_CDR_MAX_DFE_ADAPT_0_OFFSET) +#define HPIPE_CDR_MAX_DFE_ADAPT_1_OFFSET 6 +#define HPIPE_CDR_MAX_DFE_ADAPT_1_MASK \ + (0x7 << HPIPE_CDR_MAX_DFE_ADAPT_1_OFFSET) + +#define HPIPE_TX_TRAIN_CTRL_11_REG 0x438 +#define HPIPE_TX_STATUS_CHECK_MODE_OFFSET 6 +#define HPIPE_TX_TX_STATUS_CHECK_MODE_MASK \ + (0x1 << HPIPE_TX_STATUS_CHECK_MODE_OFFSET) +#define HPIPE_TX_NUM_OF_PRESET_OFFSET 10 +#define HPIPE_TX_NUM_OF_PRESET_MASK \ + (0x7 << HPIPE_TX_NUM_OF_PRESET_OFFSET) +#define HPIPE_TX_SWEEP_PRESET_EN_OFFSET 15 +#define HPIPE_TX_SWEEP_PRESET_EN_MASK \ + (0x1 << HPIPE_TX_SWEEP_PRESET_EN_OFFSET) +#define HPIPE_G2_SETTINGS_4_REG 0x44C +#define HPIPE_G2_DFE_RES_OFFSET 8 +#define HPIPE_G2_DFE_RES_MASK (0x3 << HPIPE_G2_DFE_RES_OFFSET) + +#define HPIPE_G3_SETTING_3_REG 0x450 +#define HPIPE_G3_FFE_CAP_SEL_OFFSET 0 +#define HPIPE_G3_FFE_CAP_SEL_MASK \ + (0xf << HPIPE_G3_FFE_CAP_SEL_OFFSET) +#define HPIPE_G3_FFE_RES_SEL_OFFSET 4 +#define HPIPE_G3_FFE_RES_SEL_MASK \ + (0x7 << HPIPE_G3_FFE_RES_SEL_OFFSET) +#define HPIPE_G3_FFE_SETTING_FORCE_OFFSET 7 +#define HPIPE_G3_FFE_SETTING_FORCE_MASK \ + (0x1 << HPIPE_G3_FFE_SETTING_FORCE_OFFSET) +#define HPIPE_G3_FFE_DEG_RES_LEVEL_OFFSET 12 +#define HPIPE_G3_FFE_DEG_RES_LEVEL_MASK \ + (0x3 << HPIPE_G3_FFE_DEG_RES_LEVEL_OFFSET) +#define HPIPE_G3_FFE_LOAD_RES_LEVEL_OFFSET 14 +#define HPIPE_G3_FFE_LOAD_RES_LEVEL_MASK \ + (0x3 << HPIPE_G3_FFE_LOAD_RES_LEVEL_OFFSET) + +#define HPIPE_G3_SETTING_4_REG 0x454 +#define HPIPE_G3_DFE_RES_OFFSET 8 +#define HPIPE_G3_DFE_RES_MASK (0x3 << HPIPE_G3_DFE_RES_OFFSET) + +#define HPIPE_DFE_CONTROL_REG 0x470 +#define HPIPE_DFE_TX_MAX_DFE_ADAPT_OFFSET 14 +#define HPIPE_DFE_TX_MAX_DFE_ADAPT_MASK \ + (0x3 << HPIPE_DFE_TX_MAX_DFE_ADAPT_OFFSET) + +#define HPIPE_DFE_CTRL_28_REG 0x49C +#define HPIPE_DFE_CTRL_28_PIPE4_OFFSET 7 +#define HPIPE_DFE_CTRL_28_PIPE4_MASK \ + (0x1 << HPIPE_DFE_CTRL_28_PIPE4_OFFSET) + +#define HPIPE_G3_SETTING_5_REG 0x548 +#define HPIPE_G3_SETTING_5_G3_ICP_OFFSET 0 +#define HPIPE_G3_SETTING_5_G3_ICP_MASK \ + (0xf << HPIPE_G3_SETTING_5_G3_ICP_OFFSET) + +#define HPIPE_LANE_STATUS1_REG 0x60C +#define HPIPE_LANE_STATUS1_PCLK_EN_OFFSET 0 +#define HPIPE_LANE_STATUS1_PCLK_EN_MASK \ + (0x1 << HPIPE_LANE_STATUS1_PCLK_EN_OFFSET) + +#define HPIPE_LANE_CFG4_REG 0x620 +#define HPIPE_LANE_CFG4_DFE_EN_SEL_OFFSET 3 +#define HPIPE_LANE_CFG4_DFE_EN_SEL_MASK \ + (0x1 << HPIPE_LANE_CFG4_DFE_EN_SEL_OFFSET) + +#define HPIPE_LANE_EQU_CONFIG_0_REG 0x69C +#define HPIPE_CFG_EQ_FS_OFFSET 0 +#define HPIPE_CFG_EQ_FS_MASK (0x3f << HPIPE_CFG_EQ_FS_OFFSET) +#define HPIPE_CFG_EQ_LF_OFFSET 6 +#define HPIPE_CFG_EQ_LF_MASK (0x3f << HPIPE_CFG_EQ_LF_OFFSET) +#define HPIPE_CFG_PHY_RC_EP_OFFSET 12 +#define HPIPE_CFG_PHY_RC_EP_MASK \ + (0x1 << HPIPE_CFG_PHY_RC_EP_OFFSET) + +#define HPIPE_LANE_EQ_CFG1_REG 0x6a0 +#define HPIPE_CFG_UPDATE_POLARITY_OFFSET 12 +#define HPIPE_CFG_UPDATE_POLARITY_MASK \ + (0x1 << HPIPE_CFG_UPDATE_POLARITY_OFFSET) + +#define HPIPE_LANE_EQ_CFG2_REG 0x6a4 +#define HPIPE_CFG_EQ_BUNDLE_DIS_OFFSET 14 +#define HPIPE_CFG_EQ_BUNDLE_DIS_MASK \ + (0x1 << HPIPE_CFG_EQ_BUNDLE_DIS_OFFSET) + +#define HPIPE_LANE_PRESET_CFG0_REG 0x6a8 +#define HPIPE_CFG_CURSOR_PRESET0_OFFSET 0 +#define HPIPE_CFG_CURSOR_PRESET0_MASK \ + (0x3f << HPIPE_CFG_CURSOR_PRESET0_OFFSET) +#define HPIPE_CFG_CURSOR_PRESET1_OFFSET 6 +#define HPIPE_CFG_CURSOR_PRESET1_MASK \ + (0x3f << HPIPE_CFG_CURSOR_PRESET1_OFFSET) + +#define HPIPE_LANE_PRESET_CFG1_REG 0x6ac +#define HPIPE_CFG_CURSOR_PRESET2_OFFSET 0 +#define HPIPE_CFG_CURSOR_PRESET2_MASK \ + (0x3f << HPIPE_CFG_CURSOR_PRESET2_OFFSET) +#define HPIPE_CFG_CURSOR_PRESET3_OFFSET 6 +#define HPIPE_CFG_CURSOR_PRESET3_MASK \ + (0x3f << HPIPE_CFG_CURSOR_PRESET3_OFFSET) + +#define HPIPE_LANE_PRESET_CFG2_REG 0x6b0 +#define HPIPE_CFG_CURSOR_PRESET4_OFFSET 0 +#define HPIPE_CFG_CURSOR_PRESET4_MASK \ + (0x3f << HPIPE_CFG_CURSOR_PRESET4_OFFSET) +#define HPIPE_CFG_CURSOR_PRESET5_OFFSET 6 +#define HPIPE_CFG_CURSOR_PRESET5_MASK \ + (0x3f << HPIPE_CFG_CURSOR_PRESET5_OFFSET) + +#define HPIPE_LANE_PRESET_CFG3_REG 0x6b4 +#define HPIPE_CFG_CURSOR_PRESET6_OFFSET 0 +#define HPIPE_CFG_CURSOR_PRESET6_MASK \ + (0x3f << HPIPE_CFG_CURSOR_PRESET6_OFFSET) +#define HPIPE_CFG_CURSOR_PRESET7_OFFSET 6 +#define HPIPE_CFG_CURSOR_PRESET7_MASK \ + (0x3f << HPIPE_CFG_CURSOR_PRESET7_OFFSET) + +#define HPIPE_LANE_PRESET_CFG4_REG 0x6b8 +#define HPIPE_CFG_CURSOR_PRESET8_OFFSET 0 +#define HPIPE_CFG_CURSOR_PRESET8_MASK \ + (0x3f << HPIPE_CFG_CURSOR_PRESET8_OFFSET) +#define HPIPE_CFG_CURSOR_PRESET9_OFFSET 6 +#define HPIPE_CFG_CURSOR_PRESET9_MASK \ + (0x3f << HPIPE_CFG_CURSOR_PRESET9_OFFSET) + +#define HPIPE_LANE_PRESET_CFG5_REG 0x6bc +#define HPIPE_CFG_CURSOR_PRESET10_OFFSET 0 +#define HPIPE_CFG_CURSOR_PRESET10_MASK \ + (0x3f << HPIPE_CFG_CURSOR_PRESET10_OFFSET) +#define HPIPE_CFG_CURSOR_PRESET11_OFFSET 6 +#define HPIPE_CFG_CURSOR_PRESET11_MASK \ + (0x3f << HPIPE_CFG_CURSOR_PRESET11_OFFSET) + +#define HPIPE_LANE_PRESET_CFG6_REG 0x6c0 +#define HPIPE_CFG_PRE_CURSOR_PRESET0_OFFSET 0 +#define HPIPE_CFG_PRE_CURSOR_PRESET0_MASK \ + (0x3f << HPIPE_CFG_PRE_CURSOR_PRESET0_OFFSET) +#define HPIPE_CFG_POST_CURSOR_PRESET0_OFFSET 6 +#define HPIPE_CFG_POST_CURSOR_PRESET0_MASK \ + (0x3f << HPIPE_CFG_POST_CURSOR_PRESET0_OFFSET) + +#define HPIPE_LANE_PRESET_CFG7_REG 0x6c4 +#define HPIPE_CFG_PRE_CURSOR_PRESET1_OFFSET 0 +#define HPIPE_CFG_PRE_CURSOR_PRESET1_MASK \ + (0x3f << HPIPE_CFG_PRE_CURSOR_PRESET1_OFFSET) +#define HPIPE_CFG_POST_CURSOR_PRESET1_OFFSET 6 +#define HPIPE_CFG_POST_CURSOR_PRESET1_MASK \ + (0x3f << HPIPE_CFG_POST_CURSOR_PRESET1_OFFSET) + +#define HPIPE_LANE_PRESET_CFG8_REG 0x6c8 +#define HPIPE_CFG_PRE_CURSOR_PRESET2_OFFSET 0 +#define HPIPE_CFG_PRE_CURSOR_PRESET2_MASK \ + (0x3f << HPIPE_CFG_PRE_CURSOR_PRESET2_OFFSET) +#define HPIPE_CFG_POST_CURSOR_PRESET2_OFFSET 6 +#define HPIPE_CFG_POST_CURSOR_PRESET2_MASK \ + (0x3f << HPIPE_CFG_POST_CURSOR_PRESET2_OFFSET) + +#define HPIPE_LANE_PRESET_CFG9_REG 0x6cc +#define HPIPE_CFG_PRE_CURSOR_PRESET3_OFFSET 0 +#define HPIPE_CFG_PRE_CURSOR_PRESET3_MASK \ + (0x3f << HPIPE_CFG_PRE_CURSOR_PRESET3_OFFSET) +#define HPIPE_CFG_POST_CURSOR_PRESET3_OFFSET 6 +#define HPIPE_CFG_POST_CURSOR_PRESET3_MASK \ + (0x3f << HPIPE_CFG_POST_CURSOR_PRESET3_OFFSET) + +#define HPIPE_LANE_PRESET_CFG10_REG 0x6d0 +#define HPIPE_CFG_PRE_CURSOR_PRESET4_OFFSET 0 +#define HPIPE_CFG_PRE_CURSOR_PRESET4_MASK \ + (0x3f << HPIPE_CFG_PRE_CURSOR_PRESET4_OFFSET) +#define HPIPE_CFG_POST_CURSOR_PRESET4_OFFSET 6 +#define HPIPE_CFG_POST_CURSOR_PRESET4_MASK \ + (0x3f << HPIPE_CFG_POST_CURSOR_PRESET4_OFFSET) + +#define HPIPE_LANE_PRESET_CFG11_REG 0x6d4 +#define HPIPE_CFG_PRE_CURSOR_PRESET5_OFFSET 0 +#define HPIPE_CFG_PRE_CURSOR_PRESET5_MASK \ + (0x3f << HPIPE_CFG_PRE_CURSOR_PRESET5_OFFSET) +#define HPIPE_CFG_POST_CURSOR_PRESET5_OFFSET 6 +#define HPIPE_CFG_POST_CURSOR_PRESET5_MASK \ + (0x3f << HPIPE_CFG_POST_CURSOR_PRESET5_OFFSET) + +#define HPIPE_LANE_PRESET_CFG12_REG 0x6d8 +#define HPIPE_CFG_PRE_CURSOR_PRESET6_OFFSET 0 +#define HPIPE_CFG_PRE_CURSOR_PRESET6_MASK \ + (0x3f << HPIPE_CFG_PRE_CURSOR_PRESET6_OFFSET) +#define HPIPE_CFG_POST_CURSOR_PRESET6_OFFSET 6 +#define HPIPE_CFG_POST_CURSOR_PRESET6_MASK \ + (0x3f << HPIPE_CFG_POST_CURSOR_PRESET6_OFFSET) + +#define HPIPE_LANE_PRESET_CFG13_REG 0x6dc +#define HPIPE_CFG_PRE_CURSOR_PRESET7_OFFSET 0 +#define HPIPE_CFG_PRE_CURSOR_PRESET7_MASK \ + (0x3f << HPIPE_CFG_PRE_CURSOR_PRESET7_OFFSET) +#define HPIPE_CFG_POST_CURSOR_PRESET7_OFFSET 6 +#define HPIPE_CFG_POST_CURSOR_PRESET7_MASK \ + (0x3f << HPIPE_CFG_POST_CURSOR_PRESET7_OFFSET) + +#define HPIPE_LANE_PRESET_CFG14_REG 0x6e0 +#define HPIPE_CFG_PRE_CURSOR_PRESET8_OFFSET 0 +#define HPIPE_CFG_PRE_CURSOR_PRESET8_MASK \ + (0x3f << HPIPE_CFG_PRE_CURSOR_PRESET8_OFFSET) +#define HPIPE_CFG_POST_CURSOR_PRESET8_OFFSET 6 +#define HPIPE_CFG_POST_CURSOR_PRESET8_MASK \ + (0x3f << HPIPE_CFG_POST_CURSOR_PRESET8_OFFSET) + +#define HPIPE_LANE_PRESET_CFG15_REG 0x6e4 +#define HPIPE_CFG_PRE_CURSOR_PRESET9_OFFSET 0 +#define HPIPE_CFG_PRE_CURSOR_PRESET9_MASK \ + (0x3f << HPIPE_CFG_PRE_CURSOR_PRESET9_OFFSET) +#define HPIPE_CFG_POST_CURSOR_PRESET9_OFFSET 6 +#define HPIPE_CFG_POST_CURSOR_PRESET9_MASK \ + (0x3f << HPIPE_CFG_POST_CURSOR_PRESET9_OFFSET) + +#define HPIPE_LANE_PRESET_CFG16_REG 0x6e8 +#define HPIPE_CFG_PRE_CURSOR_PRESET10_OFFSET 0 +#define HPIPE_CFG_PRE_CURSOR_PRESET10_MASK \ + (0x3f << HPIPE_CFG_PRE_CURSOR_PRESET10_OFFSET) +#define HPIPE_CFG_POST_CURSOR_PRESET10_OFFSET 6 +#define HPIPE_CFG_POST_CURSOR_PRESET10_MASK \ + (0x3f << HPIPE_CFG_POST_CURSOR_PRESET10_OFFSET) + +#define HPIPE_LANE_EQ_REMOTE_SETTING_REG 0x6f8 +#define HPIPE_LANE_CFG_FOM_DIRN_OVERRIDE_OFFSET 0 +#define HPIPE_LANE_CFG_FOM_DIRN_OVERRIDE_MASK \ + (0x1 << HPIPE_LANE_CFG_FOM_DIRN_OVERRIDE_OFFSET) +#define HPIPE_LANE_CFG_FOM_ONLY_MODE_OFFFSET 1 +#define HPIPE_LANE_CFG_FOM_ONLY_MODE_MASK \ + (0x1 << HPIPE_LANE_CFG_FOM_ONLY_MODE_OFFFSET) +#define HPIPE_LANE_CFG_FOM_PRESET_VECTOR_OFFSET 2 +#define HPIPE_LANE_CFG_FOM_PRESET_VECTOR_MASK \ + (0xf << HPIPE_LANE_CFG_FOM_PRESET_VECTOR_OFFSET) + +#define HPIPE_RST_CLK_CTRL_REG 0x704 +#define HPIPE_RST_CLK_CTRL_PIPE_RST_OFFSET 0 +#define HPIPE_RST_CLK_CTRL_PIPE_RST_MASK \ + (0x1 << HPIPE_RST_CLK_CTRL_PIPE_RST_OFFSET) +#define HPIPE_RST_CLK_CTRL_FIXED_PCLK_OFFSET 2 +#define HPIPE_RST_CLK_CTRL_FIXED_PCLK_MASK \ + (0x1 << HPIPE_RST_CLK_CTRL_FIXED_PCLK_OFFSET) +#define HPIPE_RST_CLK_CTRL_PIPE_WIDTH_OFFSET 3 +#define HPIPE_RST_CLK_CTRL_PIPE_WIDTH_MASK \ + (0x1 << HPIPE_RST_CLK_CTRL_PIPE_WIDTH_OFFSET) +#define HPIPE_RST_CLK_CTRL_CORE_FREQ_SEL_OFFSET 9 +#define HPIPE_RST_CLK_CTRL_CORE_FREQ_SEL_MASK \ + (0x1 << HPIPE_RST_CLK_CTRL_CORE_FREQ_SEL_OFFSET) + +#define HPIPE_CLK_SRC_LO_REG 0x70c +#define HPIPE_CLK_SRC_LO_BUNDLE_PERIOD_SEL_OFFSET 1 +#define HPIPE_CLK_SRC_LO_BUNDLE_PERIOD_SEL_MASK \ + (0x1 << HPIPE_CLK_SRC_LO_BUNDLE_PERIOD_SEL_OFFSET) +#define HPIPE_CLK_SRC_LO_BUNDLE_PERIOD_SCALE_OFFSET 2 +#define HPIPE_CLK_SRC_LO_BUNDLE_PERIOD_SCALE_MASK \ + (0x3 << HPIPE_CLK_SRC_LO_BUNDLE_PERIOD_SCALE_OFFSET) +#define HPIPE_CLK_SRC_LO_PLL_RDY_DL_OFFSET 5 +#define HPIPE_CLK_SRC_LO_PLL_RDY_DL_MASK \ + (0x7 << HPIPE_CLK_SRC_LO_PLL_RDY_DL_OFFSET) + +#define HPIPE_CLK_SRC_HI_REG 0x710 +#define HPIPE_CLK_SRC_HI_LANE_STRT_OFFSET 0 +#define HPIPE_CLK_SRC_HI_LANE_STRT_MASK \ + (0x1 << HPIPE_CLK_SRC_HI_LANE_STRT_OFFSET) +#define HPIPE_CLK_SRC_HI_LANE_BREAK_OFFSET 1 +#define HPIPE_CLK_SRC_HI_LANE_BREAK_MASK \ + (0x1 << HPIPE_CLK_SRC_HI_LANE_BREAK_OFFSET) +#define HPIPE_CLK_SRC_HI_LANE_MASTER_OFFSET 2 +#define HPIPE_CLK_SRC_HI_LANE_MASTER_MASK \ + (0x1 << HPIPE_CLK_SRC_HI_LANE_MASTER_OFFSET) +#define HPIPE_CLK_SRC_HI_MODE_PIPE_OFFSET 7 +#define HPIPE_CLK_SRC_HI_MODE_PIPE_MASK \ + (0x1 << HPIPE_CLK_SRC_HI_MODE_PIPE_OFFSET) + +#define HPIPE_GLOBAL_PM_CTRL 0x740 +#define HPIPE_GLOBAL_PM_RXDLOZ_WAIT_OFFSET 0 +#define HPIPE_GLOBAL_PM_RXDLOZ_WAIT_MASK \ + (0xFF << HPIPE_GLOBAL_PM_RXDLOZ_WAIT_OFFSET) + +#endif /* COMPHY_H */ diff --git a/drivers/marvell/comphy/comphy-cp110.h b/drivers/marvell/comphy/comphy-cp110.h new file mode 100644 index 0000000..af5c715 --- /dev/null +++ b/drivers/marvell/comphy/comphy-cp110.h @@ -0,0 +1,914 @@ +/* + * Copyright (C) 2018 Marvell International Ltd. + * + * SPDX-License-Identifier: BSD-3-Clause + * https://spdx.org/licenses + */ + +/* Marvell CP110 SoC COMPHY unit driver */ + +#ifndef COMPHY_CP110_H +#define COMPHY_CP110_H + +#define SD_ADDR(base, lane) (base + 0x1000 * lane) +#define HPIPE_ADDR(base, lane) (SD_ADDR(base, lane) + 0x800) +#define COMPHY_ADDR(base, lane) (base + 0x28 * lane) + +#define MAX_NUM_OF_FFE 8 +#define RX_TRAINING_TIMEOUT 500 + +/* Comphy registers */ +#define COMMON_PHY_CFG1_REG 0x0 +#define COMMON_PHY_CFG1_PWR_UP_OFFSET 1 +#define COMMON_PHY_CFG1_PWR_UP_MASK \ + (0x1 << COMMON_PHY_CFG1_PWR_UP_OFFSET) +#define COMMON_PHY_CFG1_PIPE_SELECT_OFFSET 2 +#define COMMON_PHY_CFG1_PIPE_SELECT_MASK \ + (0x1 << COMMON_PHY_CFG1_PIPE_SELECT_OFFSET) +#define COMMON_PHY_CFG1_CORE_RSTN_OFFSET 13 +#define COMMON_PHY_CFG1_CORE_RSTN_MASK \ + (0x1 << COMMON_PHY_CFG1_CORE_RSTN_OFFSET) +#define COMMON_PHY_CFG1_PWR_ON_RESET_OFFSET 14 +#define COMMON_PHY_CFG1_PWR_ON_RESET_MASK \ + (0x1 << COMMON_PHY_CFG1_PWR_ON_RESET_OFFSET) +#define COMMON_PHY_PHY_MODE_OFFSET 15 +#define COMMON_PHY_PHY_MODE_MASK \ + (0x1 << COMMON_PHY_PHY_MODE_OFFSET) + +#define COMMON_PHY_CFG6_REG 0x14 +#define COMMON_PHY_CFG6_IF_40_SEL_OFFSET 18 +#define COMMON_PHY_CFG6_IF_40_SEL_MASK \ + (0x1 << COMMON_PHY_CFG6_IF_40_SEL_OFFSET) + +#define COMMON_PHY_CFG6_REG 0x14 +#define COMMON_PHY_CFG6_IF_40_SEL_OFFSET 18 +#define COMMON_PHY_CFG6_IF_40_SEL_MASK \ + (0x1 << COMMON_PHY_CFG6_IF_40_SEL_OFFSET) + +#define COMMON_SELECTOR_PHY_REG_OFFSET 0x140 +#define COMMON_SELECTOR_PIPE_REG_OFFSET 0x144 +#define COMMON_SELECTOR_COMPHY_MASK 0xf +#define COMMON_SELECTOR_COMPHYN_FIELD_WIDTH 4 +#define COMMON_SELECTOR_COMPHYN_SATA 0x4 +#define COMMON_SELECTOR_PIPE_COMPHY_PCIE 0x4 +#define COMMON_SELECTOR_PIPE_COMPHY_USBH 0x1 +#define COMMON_SELECTOR_PIPE_COMPHY_USBD 0x2 + +/* SGMII/Base-X/SFI/RXAUI */ +#define COMMON_SELECTOR_COMPHY0_1_2_NETWORK 0x1 +#define COMMON_SELECTOR_COMPHY3_RXAUI 0x1 +#define COMMON_SELECTOR_COMPHY3_SGMII 0x2 +#define COMMON_SELECTOR_COMPHY4_PORT1 0x1 +#define COMMON_SELECTOR_COMPHY4_ALL_OTHERS 0x2 +#define COMMON_SELECTOR_COMPHY5_RXAUI 0x2 +#define COMMON_SELECTOR_COMPHY5_SGMII 0x1 + +#define COMMON_PHY_SD_CTRL1 0x148 +#define COMMON_PHY_SD_CTRL1_COMPHY_0_PORT_OFFSET 0 +#define COMMON_PHY_SD_CTRL1_COMPHY_1_PORT_OFFSET 4 +#define COMMON_PHY_SD_CTRL1_COMPHY_2_PORT_OFFSET 8 +#define COMMON_PHY_SD_CTRL1_COMPHY_3_PORT_OFFSET 12 +#define COMMON_PHY_SD_CTRL1_COMPHY_0_3_PORT_MASK 0xFFFF +#define COMMON_PHY_SD_CTRL1_COMPHY_0_1_PORT_MASK 0xFF +#define COMMON_PHY_SD_CTRL1_PCIE_X4_EN_OFFSET 24 +#define COMMON_PHY_SD_CTRL1_PCIE_X4_EN_MASK \ + (0x1 << COMMON_PHY_SD_CTRL1_PCIE_X4_EN_OFFSET) +#define COMMON_PHY_SD_CTRL1_PCIE_X2_EN_OFFSET 25 +#define COMMON_PHY_SD_CTRL1_PCIE_X2_EN_MASK \ + (0x1 << COMMON_PHY_SD_CTRL1_PCIE_X2_EN_OFFSET) +#define COMMON_PHY_SD_CTRL1_RXAUI1_OFFSET 26 +#define COMMON_PHY_SD_CTRL1_RXAUI1_MASK \ + (0x1 << COMMON_PHY_SD_CTRL1_RXAUI1_OFFSET) +#define COMMON_PHY_SD_CTRL1_RXAUI0_OFFSET 27 +#define COMMON_PHY_SD_CTRL1_RXAUI0_MASK \ + (0x1 << COMMON_PHY_SD_CTRL1_RXAUI0_OFFSET) + +/* DFX register */ +#define DFX_BASE (0x400000) +#define DFX_DEV_GEN_CTRL12_REG (0x280) +#define DFX_DEV_GEN_PCIE_CLK_SRC_MUX (0x3) +#define DFX_DEV_GEN_PCIE_CLK_SRC_OFFSET 7 +#define DFX_DEV_GEN_PCIE_CLK_SRC_MASK \ + (0x3 << DFX_DEV_GEN_PCIE_CLK_SRC_OFFSET) + +/* SerDes IP registers */ +#define SD_EXTERNAL_CONFIG0_REG 0 +#define SD_EXTERNAL_CONFIG0_SD_PU_PLL_OFFSET 1 +#define SD_EXTERNAL_CONFIG0_SD_PU_PLL_MASK \ + (1 << SD_EXTERNAL_CONFIG0_SD_PU_PLL_OFFSET) +#define SD_EXTERNAL_CONFIG0_SD_PHY_GEN_RX_OFFSET 3 +#define SD_EXTERNAL_CONFIG0_SD_PHY_GEN_RX_MASK \ + (0xf << SD_EXTERNAL_CONFIG0_SD_PHY_GEN_RX_OFFSET) +#define SD_EXTERNAL_CONFIG0_SD_PHY_GEN_TX_OFFSET 7 +#define SD_EXTERNAL_CONFIG0_SD_PHY_GEN_TX_MASK \ + (0xf << SD_EXTERNAL_CONFIG0_SD_PHY_GEN_TX_OFFSET) +#define SD_EXTERNAL_CONFIG0_SD_PU_RX_OFFSET 11 +#define SD_EXTERNAL_CONFIG0_SD_PU_RX_MASK \ + (1 << SD_EXTERNAL_CONFIG0_SD_PU_RX_OFFSET) +#define SD_EXTERNAL_CONFIG0_SD_PU_TX_OFFSET 12 +#define SD_EXTERNAL_CONFIG0_SD_PU_TX_MASK \ + (1 << SD_EXTERNAL_CONFIG0_SD_PU_TX_OFFSET) +#define SD_EXTERNAL_CONFIG0_HALF_BUS_MODE_OFFSET 14 +#define SD_EXTERNAL_CONFIG0_HALF_BUS_MODE_MASK \ + (1 << SD_EXTERNAL_CONFIG0_HALF_BUS_MODE_OFFSET) +#define SD_EXTERNAL_CONFIG0_MEDIA_MODE_OFFSET 15 +#define SD_EXTERNAL_CONFIG0_MEDIA_MODE_MASK \ + (0x1 << SD_EXTERNAL_CONFIG0_MEDIA_MODE_OFFSET) + +#define SD_EXTERNAL_CONFIG1_REG 0x4 +#define SD_EXTERNAL_CONFIG1_TX_IDLE_OFFSET 2 +#define SD_EXTERNAL_CONFIG1_TX_IDLE_MASK \ + (0x1 << SD_EXTERNAL_CONFIG1_TX_IDLE_OFFSET) +#define SD_EXTERNAL_CONFIG1_RESET_IN_OFFSET 3 +#define SD_EXTERNAL_CONFIG1_RESET_IN_MASK \ + (0x1 << SD_EXTERNAL_CONFIG1_RESET_IN_OFFSET) +#define SD_EXTERNAL_CONFIG1_RX_INIT_OFFSET 4 +#define SD_EXTERNAL_CONFIG1_RX_INIT_MASK \ + (0x1 << SD_EXTERNAL_CONFIG1_RX_INIT_OFFSET) +#define SD_EXTERNAL_CONFIG1_RESET_CORE_OFFSET 5 +#define SD_EXTERNAL_CONFIG1_RESET_CORE_MASK \ + (0x1 << SD_EXTERNAL_CONFIG1_RESET_CORE_OFFSET) +#define SD_EXTERNAL_CONFIG1_RF_RESET_IN_OFFSET 6 +#define SD_EXTERNAL_CONFIG1_RF_RESET_IN_MASK \ + (0x1 << SD_EXTERNAL_CONFIG1_RF_RESET_IN_OFFSET) + +#define SD_EXTERNAL_CONFIG2_REG 0x8 +#define SD_EXTERNAL_CONFIG2_PIN_DFE_EN_OFFSET 4 +#define SD_EXTERNAL_CONFIG2_PIN_DFE_EN_MASK \ + (0x1 << SD_EXTERNAL_CONFIG2_PIN_DFE_EN_OFFSET) +#define SD_EXTERNAL_CONFIG2_SSC_ENABLE_OFFSET 7 +#define SD_EXTERNAL_CONFIG2_SSC_ENABLE_MASK \ + (0x1 << SD_EXTERNAL_CONFIG2_SSC_ENABLE_OFFSET) + +#define SD_EXTERNAL_STATUS_REG 0xc +#define SD_EXTERNAL_STATUS_START_RX_TRAINING_OFFSET 7 +#define SD_EXTERNAL_STATUS_START_RX_TRAINING_MASK \ + (1 << SD_EXTERNAL_STATUS_START_RX_TRAINING_OFFSET) + +#define SD_EXTERNAL_STATUS0_REG 0x18 +#define SD_EXTERNAL_STATUS0_PLL_TX_OFFSET 2 +#define SD_EXTERNAL_STATUS0_PLL_TX_MASK \ + (0x1 << SD_EXTERNAL_STATUS0_PLL_TX_OFFSET) +#define SD_EXTERNAL_STATUS0_PLL_RX_OFFSET 3 +#define SD_EXTERNAL_STATUS0_PLL_RX_MASK \ + (0x1 << SD_EXTERNAL_STATUS0_PLL_RX_OFFSET) +#define SD_EXTERNAL_STATUS0_RX_INIT_OFFSET 4 +#define SD_EXTERNAL_STATUS0_RX_INIT_MASK \ + (0x1 << SD_EXTERNAL_STATUS0_RX_INIT_OFFSET) + +#define SD_EXTERNAL_STATAUS1_REG 0x1c +#define SD_EXTERNAL_STATAUS1_REG_RX_TRAIN_COMP_OFFSET 0 +#define SD_EXTERNAL_STATAUS1_REG_RX_TRAIN_COMP_MASK \ + (1 << SD_EXTERNAL_STATAUS1_REG_RX_TRAIN_COMP_OFFSET) +#define SD_EXTERNAL_STATAUS1_REG_RX_TRAIN_FAILED_OFFSET 1 +#define SD_EXTERNAL_STATAUS1_REG_RX_TRAIN_FAILED_MASK \ + (1 << SD_EXTERNAL_STATAUS1_REG_RX_TRAIN_FAILED_OFFSET) + +/* HPIPE registers */ +#define HPIPE_PWR_PLL_REG 0x4 +#define HPIPE_PWR_PLL_REF_FREQ_OFFSET 0 +#define HPIPE_PWR_PLL_REF_FREQ_MASK \ + (0x1f << HPIPE_PWR_PLL_REF_FREQ_OFFSET) +#define HPIPE_PWR_PLL_PHY_MODE_OFFSET 5 +#define HPIPE_PWR_PLL_PHY_MODE_MASK \ + (0x7 << HPIPE_PWR_PLL_PHY_MODE_OFFSET) + +#define HPIPE_CAL_REG1_REG 0xc +#define HPIPE_CAL_REG_1_EXT_TXIMP_OFFSET 10 +#define HPIPE_CAL_REG_1_EXT_TXIMP_MASK \ + (0x1f << HPIPE_CAL_REG_1_EXT_TXIMP_OFFSET) +#define HPIPE_CAL_REG_1_EXT_TXIMP_EN_OFFSET 15 +#define HPIPE_CAL_REG_1_EXT_TXIMP_EN_MASK \ + (0x1 << HPIPE_CAL_REG_1_EXT_TXIMP_EN_OFFSET) + +#define HPIPE_SQUELCH_FFE_SETTING_REG 0x18 +#define HPIPE_SQUELCH_THRESH_IN_OFFSET 8 +#define HPIPE_SQUELCH_THRESH_IN_MASK \ + (0xf << HPIPE_SQUELCH_THRESH_IN_OFFSET) +#define HPIPE_SQUELCH_DETECTED_OFFSET 14 +#define HPIPE_SQUELCH_DETECTED_MASK \ + (0x1 << HPIPE_SQUELCH_DETECTED_OFFSET) + +#define HPIPE_DFE_REG0 0x1c +#define HPIPE_DFE_RES_FORCE_OFFSET 15 +#define HPIPE_DFE_RES_FORCE_MASK \ + (0x1 << HPIPE_DFE_RES_FORCE_OFFSET) + +#define HPIPE_DFE_F3_F5_REG 0x28 +#define HPIPE_DFE_F3_F5_DFE_EN_OFFSET 14 +#define HPIPE_DFE_F3_F5_DFE_EN_MASK \ + (0x1 << HPIPE_DFE_F3_F5_DFE_EN_OFFSET) +#define HPIPE_DFE_F3_F5_DFE_CTRL_OFFSET 15 +#define HPIPE_DFE_F3_F5_DFE_CTRL_MASK \ + (0x1 << HPIPE_DFE_F3_F5_DFE_CTRL_OFFSET) + +#define HPIPE_ADAPTED_DFE_COEFFICIENT_1_REG 0x30 +#define HPIPE_ADAPTED_DFE_RES_OFFSET 13 +#define HPIPE_ADAPTED_DFE_RES_MASK \ + (0x3 << HPIPE_ADAPTED_DFE_RES_OFFSET) + +#define HPIPE_G1_SET_0_REG 0x34 +#define HPIPE_G1_SET_0_G1_TX_AMP_OFFSET 1 +#define HPIPE_G1_SET_0_G1_TX_AMP_MASK \ + (0x1f << HPIPE_G1_SET_0_G1_TX_AMP_OFFSET) +#define HPIPE_G1_SET_0_G1_TX_AMP_ADJ_OFFSET 6 +#define HPIPE_G1_SET_0_G1_TX_AMP_ADJ_MASK \ + (0x1 << HPIPE_G1_SET_0_G1_TX_AMP_ADJ_OFFSET) +#define HPIPE_G1_SET_0_G1_TX_EMPH1_OFFSET 7 +#define HPIPE_G1_SET_0_G1_TX_EMPH1_MASK \ + (0xf << HPIPE_G1_SET_0_G1_TX_EMPH1_OFFSET) +#define HPIPE_G1_SET_0_G1_TX_EMPH1_EN_OFFSET 11 +#define HPIPE_G1_SET_0_G1_TX_EMPH1_EN_MASK \ + (0x1 << HPIPE_G1_SET_0_G1_TX_EMPH1_EN_OFFSET) + +#define HPIPE_G1_SET_1_REG 0x38 +#define HPIPE_G1_SET_1_G1_RX_SELMUPI_OFFSET 0 +#define HPIPE_G1_SET_1_G1_RX_SELMUPI_MASK \ + (0x7 << HPIPE_G1_SET_1_G1_RX_SELMUPI_OFFSET) +#define HPIPE_G1_SET_1_G1_RX_SELMUPF_OFFSET 3 +#define HPIPE_G1_SET_1_G1_RX_SELMUPF_MASK \ + (0x7 << HPIPE_G1_SET_1_G1_RX_SELMUPF_OFFSET) +#define HPIPE_G1_SET_1_G1_RX_SELMUFI_OFFSET 6 +#define HPIPE_G1_SET_1_G1_RX_SELMUFI_MASK \ + (0x3 << HPIPE_G1_SET_1_G1_RX_SELMUFI_OFFSET) +#define HPIPE_G1_SET_1_G1_RX_SELMUFF_OFFSET 8 +#define HPIPE_G1_SET_1_G1_RX_SELMUFF_MASK \ + (0x3 << HPIPE_G1_SET_1_G1_RX_SELMUFF_OFFSET) +#define HPIPE_G1_SET_1_G1_RX_DFE_EN_OFFSET 10 +#define HPIPE_G1_SET_1_G1_RX_DFE_EN_MASK \ + (0x1 << HPIPE_G1_SET_1_G1_RX_DFE_EN_OFFSET) +#define HPIPE_G1_SET_1_G1_RX_DIGCK_DIV_OFFSET 11 +#define HPIPE_G1_SET_1_G1_RX_DIGCK_DIV_MASK \ + (0x3 << HPIPE_G1_SET_1_G1_RX_DIGCK_DIV_OFFSET) + +#define HPIPE_G2_SET_0_REG 0x3c +#define HPIPE_G2_SET_0_G2_TX_AMP_OFFSET 1 +#define HPIPE_G2_SET_0_G2_TX_AMP_MASK \ + (0x1f << HPIPE_G2_SET_0_G2_TX_AMP_OFFSET) +#define HPIPE_G2_SET_0_G2_TX_AMP_ADJ_OFFSET 6 +#define HPIPE_G2_SET_0_G2_TX_AMP_ADJ_MASK \ + (0x1 << HPIPE_G2_SET_0_G2_TX_AMP_ADJ_OFFSET) +#define HPIPE_G2_SET_0_G2_TX_EMPH1_OFFSET 7 +#define HPIPE_G2_SET_0_G2_TX_EMPH1_MASK \ + (0xf << HPIPE_G2_SET_0_G2_TX_EMPH1_OFFSET) +#define HPIPE_G2_SET_0_G2_TX_EMPH1_EN_OFFSET 11 +#define HPIPE_G2_SET_0_G2_TX_EMPH1_EN_MASK \ + (0x1 << HPIPE_G2_SET_0_G2_TX_EMPH1_EN_OFFSET) + +#define HPIPE_G2_SET_1_REG 0x40 +#define HPIPE_G2_SET_1_G2_RX_SELMUPI_OFFSET 0 +#define HPIPE_G2_SET_1_G2_RX_SELMUPI_MASK \ + (0x7 << HPIPE_G2_SET_1_G2_RX_SELMUPI_OFFSET) +#define HPIPE_G2_SET_1_G2_RX_SELMUPF_OFFSET 3 +#define HPIPE_G2_SET_1_G2_RX_SELMUPF_MASK \ + (0x7 << HPIPE_G2_SET_1_G2_RX_SELMUPF_OFFSET) +#define HPIPE_G2_SET_1_G2_RX_SELMUFI_OFFSET 6 +#define HPIPE_G2_SET_1_G2_RX_SELMUFI_MASK \ + (0x3 << HPIPE_G2_SET_1_G2_RX_SELMUFI_OFFSET) +#define HPIPE_G2_SET_1_G2_RX_SELMUFF_OFFSET 8 +#define HPIPE_G2_SET_1_G2_RX_SELMUFF_MASK \ + (0x3 << HPIPE_G2_SET_1_G2_RX_SELMUFF_OFFSET) +#define HPIPE_G2_SET_1_G2_RX_DFE_EN_OFFSET 10 +#define HPIPE_G2_SET_1_G2_RX_DFE_EN_MASK \ + (0x1 << HPIPE_G2_SET_1_G2_RX_DFE_EN_OFFSET) +#define HPIPE_G2_SET_1_G2_RX_DIGCK_DIV_OFFSET 11 +#define HPIPE_G2_SET_1_G2_RX_DIGCK_DIV_MASK \ + (0x3 << HPIPE_G2_SET_1_G2_RX_DIGCK_DIV_OFFSET) + +#define HPIPE_G3_SET_0_REG 0x44 +#define HPIPE_G3_SET_0_G3_TX_AMP_OFFSET 1 +#define HPIPE_G3_SET_0_G3_TX_AMP_MASK \ + (0x1f << HPIPE_G3_SET_0_G3_TX_AMP_OFFSET) +#define HPIPE_G3_SET_0_G3_TX_AMP_ADJ_OFFSET 6 +#define HPIPE_G3_SET_0_G3_TX_AMP_ADJ_MASK \ + (0x1 << HPIPE_G3_SET_0_G3_TX_AMP_ADJ_OFFSET) +#define HPIPE_G3_SET_0_G3_TX_EMPH1_OFFSET 7 +#define HPIPE_G3_SET_0_G3_TX_EMPH1_MASK \ + (0xf << HPIPE_G3_SET_0_G3_TX_EMPH1_OFFSET) +#define HPIPE_G3_SET_0_G3_TX_EMPH1_EN_OFFSET 11 +#define HPIPE_G3_SET_0_G3_TX_EMPH1_EN_MASK \ + (0x1 << HPIPE_G3_SET_0_G3_TX_EMPH1_EN_OFFSET) +#define HPIPE_G3_SET_0_G3_TX_SLEW_RATE_SEL_OFFSET 12 +#define HPIPE_G3_SET_0_G3_TX_SLEW_RATE_SEL_MASK \ + (0x7 << HPIPE_G3_SET_0_G3_TX_SLEW_RATE_SEL_OFFSET) +#define HPIPE_G3_SET_0_G3_TX_SLEW_CTRL_EN_OFFSET 15 +#define HPIPE_G3_SET_0_G3_TX_SLEW_CTRL_EN_MASK \ + (0x1 << HPIPE_G3_SET_0_G3_TX_SLEW_CTRL_EN_OFFSET) + +#define HPIPE_G3_SET_1_REG 0x48 +#define HPIPE_G3_SET_1_G3_RX_SELMUPI_OFFSET 0 +#define HPIPE_G3_SET_1_G3_RX_SELMUPI_MASK \ + (0x7 << HPIPE_G3_SET_1_G3_RX_SELMUPI_OFFSET) +#define HPIPE_G3_SET_1_G3_RX_SELMUPF_OFFSET 3 +#define HPIPE_G3_SET_1_G3_RX_SELMUPF_MASK \ + (0x7 << HPIPE_G3_SET_1_G3_RX_SELMUPF_OFFSET) +#define HPIPE_G3_SET_1_G3_RX_SELMUFI_OFFSET 6 +#define HPIPE_G3_SET_1_G3_RX_SELMUFI_MASK \ + (0x3 << HPIPE_G3_SET_1_G3_RX_SELMUFI_OFFSET) +#define HPIPE_G3_SET_1_G3_RX_SELMUFF_OFFSET 8 +#define HPIPE_G3_SET_1_G3_RX_SELMUFF_MASK \ + (0x3 << HPIPE_G3_SET_1_G3_RX_SELMUFF_OFFSET) +#define HPIPE_G3_SET_1_G3_RX_DFE_EN_OFFSET 10 +#define HPIPE_G3_SET_1_G3_RX_DFE_EN_MASK \ + (0x1 << HPIPE_G3_SET_1_G3_RX_DFE_EN_OFFSET) +#define HPIPE_G3_SET_1_G3_RX_DIGCK_DIV_OFFSET 11 +#define HPIPE_G3_SET_1_G3_RX_DIGCK_DIV_MASK \ + (0x3 << HPIPE_G3_SET_1_G3_RX_DIGCK_DIV_OFFSET) +#define HPIPE_G3_SET_1_G3_SAMPLER_INPAIRX2_EN_OFFSET 13 +#define HPIPE_G3_SET_1_G3_SAMPLER_INPAIRX2_EN_MASK \ + (0x1 << HPIPE_G3_SET_1_G3_SAMPLER_INPAIRX2_EN_OFFSET) + +#define HPIPE_PHY_TEST_CONTROL_REG 0x54 +#define HPIPE_PHY_TEST_PATTERN_SEL_OFFSET 4 +#define HPIPE_PHY_TEST_PATTERN_SEL_MASK \ + (0xf << HPIPE_PHY_TEST_PATTERN_SEL_OFFSET) +#define HPIPE_PHY_TEST_RESET_OFFSET 14 +#define HPIPE_PHY_TEST_RESET_MASK \ + (0x1 << HPIPE_PHY_TEST_RESET_OFFSET) +#define HPIPE_PHY_TEST_EN_OFFSET 15 +#define HPIPE_PHY_TEST_EN_MASK \ + (0x1 << HPIPE_PHY_TEST_EN_OFFSET) + +#define HPIPE_PHY_TEST_DATA_REG 0x6c +#define HPIPE_PHY_TEST_DATA_OFFSET 0 +#define HPIPE_PHY_TEST_DATA_MASK \ + (0xffff << HPIPE_PHY_TEST_DATA_OFFSET) + +#define HPIPE_PHY_TEST_PRBS_ERROR_COUNTER_1_REG 0x80 + +#define HPIPE_PHY_TEST_OOB_0_REGISTER 0x84 +#define HPIPE_PHY_PT_OOB_EN_OFFSET 14 +#define HPIPE_PHY_PT_OOB_EN_MASK \ + (0x1 << HPIPE_PHY_PT_OOB_EN_OFFSET) +#define HPIPE_PHY_TEST_PT_TESTMODE_OFFSET 12 +#define HPIPE_PHY_TEST_PT_TESTMODE_MASK \ + (0x3 << HPIPE_PHY_TEST_PT_TESTMODE_OFFSET) + +#define HPIPE_LOOPBACK_REG 0x8c +#define HPIPE_LOOPBACK_SEL_OFFSET 1 +#define HPIPE_LOOPBACK_SEL_MASK \ + (0x7 << HPIPE_LOOPBACK_SEL_OFFSET) +#define HPIPE_CDR_LOCK_OFFSET 7 +#define HPIPE_CDR_LOCK_MASK \ + (0x1 << HPIPE_CDR_LOCK_OFFSET) +#define HPIPE_CDR_LOCK_DET_EN_OFFSET 8 +#define HPIPE_CDR_LOCK_DET_EN_MASK \ + (0x1 << HPIPE_CDR_LOCK_DET_EN_OFFSET) + +#define HPIPE_SYNC_PATTERN_REG 0x090 +#define HPIPE_SYNC_PATTERN_TXD_INV_OFFSET 10 +#define HPIPE_SYNC_PATTERN_TXD_INV_MASK \ + (0x1 << HPIPE_SYNC_PATTERN_TXD_INV_OFFSET) +#define HPIPE_SYNC_PATTERN_RXD_INV_OFFSET 11 +#define HPIPE_SYNC_PATTERN_RXD_INV_MASK \ + (0x1 << HPIPE_SYNC_PATTERN_RXD_INV_OFFSET) + +#define HPIPE_INTERFACE_REG 0x94 +#define HPIPE_INTERFACE_GEN_MAX_OFFSET 10 +#define HPIPE_INTERFACE_GEN_MAX_MASK \ + (0x3 << HPIPE_INTERFACE_GEN_MAX_OFFSET) +#define HPIPE_INTERFACE_DET_BYPASS_OFFSET 12 +#define HPIPE_INTERFACE_DET_BYPASS_MASK \ + (0x1 << HPIPE_INTERFACE_DET_BYPASS_OFFSET) +#define HPIPE_INTERFACE_LINK_TRAIN_OFFSET 14 +#define HPIPE_INTERFACE_LINK_TRAIN_MASK \ + (0x1 << HPIPE_INTERFACE_LINK_TRAIN_OFFSET) + +#define HPIPE_G1_SET_2_REG 0xf4 +#define HPIPE_G1_SET_2_G1_TX_EMPH0_OFFSET 0 +#define HPIPE_G1_SET_2_G1_TX_EMPH0_MASK \ + (0xf << HPIPE_G1_SET_2_G1_TX_EMPH0_OFFSET) +#define HPIPE_G1_SET_2_G1_TX_EMPH0_EN_OFFSET 4 +#define HPIPE_G1_SET_2_G1_TX_EMPH0_EN_MASK \ + (0x1 << HPIPE_G1_SET_2_G1_TX_EMPH0_EN_OFFSET) + +#define HPIPE_G2_SET_2_REG 0xf8 +#define HPIPE_G2_SET_2_G2_TX_EMPH0_OFFSET 0 +#define HPIPE_G2_SET_2_G2_TX_EMPH0_MASK \ + (0xf << HPIPE_G2_SET_2_G2_TX_EMPH0_OFFSET) +#define HPIPE_G2_SET_2_G2_TX_EMPH0_EN_OFFSET 4 +#define HPIPE_G2_SET_2_G2_TX_EMPH0_EN_MASK \ + (0x1 << HPIPE_G2_SET_2_G2_TX_EMPH0_EN_OFFSET) +#define HPIPE_G2_TX_SSC_AMP_OFFSET 9 +#define HPIPE_G2_TX_SSC_AMP_MASK \ + (0x7f << HPIPE_G2_TX_SSC_AMP_OFFSET) + +#define HPIPE_G3_SET_2_REG 0xfc +#define HPIPE_G3_SET_2_G3_TX_EMPH0_OFFSET 0 +#define HPIPE_G3_SET_2_G3_TX_EMPH0_MASK \ + (0xf << HPIPE_G3_SET_2_G3_TX_EMPH0_OFFSET) +#define HPIPE_G3_SET_2_G3_TX_EMPH0_EN_OFFSET 4 +#define HPIPE_G3_SET_2_G3_TX_EMPH0_EN_MASK \ + (0x1 << HPIPE_G3_SET_2_G3_TX_EMPH0_EN_OFFSET) +#define HPIPE_G3_TX_SSC_AMP_OFFSET 9 +#define HPIPE_G3_TX_SSC_AMP_MASK \ + (0x7f << HPIPE_G3_TX_SSC_AMP_OFFSET) + +#define HPIPE_VDD_CAL_0_REG 0x108 +#define HPIPE_CAL_VDD_CONT_MODE_OFFSET 15 +#define HPIPE_CAL_VDD_CONT_MODE_MASK \ + (0x1 << HPIPE_CAL_VDD_CONT_MODE_OFFSET) + +#define HPIPE_VDD_CAL_CTRL_REG 0x114 +#define HPIPE_EXT_SELLV_RXSAMPL_OFFSET 5 +#define HPIPE_EXT_SELLV_RXSAMPL_MASK \ + (0x1f << HPIPE_EXT_SELLV_RXSAMPL_OFFSET) + +#define HPIPE_PCIE_REG0 0x120 +#define HPIPE_PCIE_IDLE_SYNC_OFFSET 12 +#define HPIPE_PCIE_IDLE_SYNC_MASK \ + (0x1 << HPIPE_PCIE_IDLE_SYNC_OFFSET) +#define HPIPE_PCIE_SEL_BITS_OFFSET 13 +#define HPIPE_PCIE_SEL_BITS_MASK \ + (0x3 << HPIPE_PCIE_SEL_BITS_OFFSET) + +#define HPIPE_LANE_ALIGN_REG 0x124 +#define HPIPE_LANE_ALIGN_OFF_OFFSET 12 +#define HPIPE_LANE_ALIGN_OFF_MASK \ + (0x1 << HPIPE_LANE_ALIGN_OFF_OFFSET) + +#define HPIPE_MISC_REG 0x13C +#define HPIPE_MISC_CLK100M_125M_OFFSET 4 +#define HPIPE_MISC_CLK100M_125M_MASK \ + (0x1 << HPIPE_MISC_CLK100M_125M_OFFSET) +#define HPIPE_MISC_ICP_FORCE_OFFSET 5 +#define HPIPE_MISC_ICP_FORCE_MASK \ + (0x1 << HPIPE_MISC_ICP_FORCE_OFFSET) +#define HPIPE_MISC_TXDCLK_2X_OFFSET 6 +#define HPIPE_MISC_TXDCLK_2X_MASK \ + (0x1 << HPIPE_MISC_TXDCLK_2X_OFFSET) +#define HPIPE_MISC_CLK500_EN_OFFSET 7 +#define HPIPE_MISC_CLK500_EN_MASK \ + (0x1 << HPIPE_MISC_CLK500_EN_OFFSET) +#define HPIPE_MISC_REFCLK_SEL_OFFSET 10 +#define HPIPE_MISC_REFCLK_SEL_MASK \ + (0x1 << HPIPE_MISC_REFCLK_SEL_OFFSET) + +#define HPIPE_RX_CONTROL_1_REG 0x140 +#define HPIPE_RX_CONTROL_1_RXCLK2X_SEL_OFFSET 11 +#define HPIPE_RX_CONTROL_1_RXCLK2X_SEL_MASK \ + (0x1 << HPIPE_RX_CONTROL_1_RXCLK2X_SEL_OFFSET) +#define HPIPE_RX_CONTROL_1_CLK8T_EN_OFFSET 12 +#define HPIPE_RX_CONTROL_1_CLK8T_EN_MASK \ + (0x1 << HPIPE_RX_CONTROL_1_CLK8T_EN_OFFSET) + +#define HPIPE_PWR_CTR_REG 0x148 +#define HPIPE_PWR_CTR_RST_DFE_OFFSET 0 +#define HPIPE_PWR_CTR_RST_DFE_MASK \ + (0x1 << HPIPE_PWR_CTR_RST_DFE_OFFSET) +#define HPIPE_PWR_CTR_SFT_RST_OFFSET 10 +#define HPIPE_PWR_CTR_SFT_RST_MASK \ + (0x1 << HPIPE_PWR_CTR_SFT_RST_OFFSET) + +#define HPIPE_SPD_DIV_FORCE_REG 0x154 +#define HPIPE_TXDIGCK_DIV_FORCE_OFFSET 7 +#define HPIPE_TXDIGCK_DIV_FORCE_MASK \ + (0x1 << HPIPE_TXDIGCK_DIV_FORCE_OFFSET) +#define HPIPE_SPD_DIV_FORCE_RX_SPD_DIV_OFFSET 8 +#define HPIPE_SPD_DIV_FORCE_RX_SPD_DIV_MASK \ + (0x3 << HPIPE_SPD_DIV_FORCE_RX_SPD_DIV_OFFSET) +#define HPIPE_SPD_DIV_FORCE_RX_SPD_DIV_FORCE_OFFSET 10 +#define HPIPE_SPD_DIV_FORCE_RX_SPD_DIV_FORCE_MASK \ + (0x1 << HPIPE_SPD_DIV_FORCE_RX_SPD_DIV_FORCE_OFFSET) +#define HPIPE_SPD_DIV_FORCE_TX_SPD_DIV_OFFSET 13 +#define HPIPE_SPD_DIV_FORCE_TX_SPD_DIV_MASK \ + (0x3 << HPIPE_SPD_DIV_FORCE_TX_SPD_DIV_OFFSET) +#define HPIPE_SPD_DIV_FORCE_TX_SPD_DIV_FORCE_OFFSET 15 +#define HPIPE_SPD_DIV_FORCE_TX_SPD_DIV_FORCE_MASK \ + (0x1 << HPIPE_SPD_DIV_FORCE_TX_SPD_DIV_FORCE_OFFSET) + +/* HPIPE_RX_CLK_ALIGN90_AND_TX_IDLE_CALIBRATION_CTRL_REG */ +#define HPIPE_RX_CLK_ALIGN90_AND_TX_IDLE_CALIB_CTRL_REG 0x168 +#define HPIPE_CAL_RXCLKALIGN_90_EXT_EN_OFFSET 15 +#define HPIPE_CAL_RXCLKALIGN_90_EXT_EN_MASK \ + (0x1 << HPIPE_CAL_RXCLKALIGN_90_EXT_EN_OFFSET) +#define HPIPE_CAL_OS_PH_EXT_OFFSET 8 +#define HPIPE_CAL_OS_PH_EXT_MASK \ + (0x7f << HPIPE_CAL_OS_PH_EXT_OFFSET) + +#define HPIPE_SAMPLER_N_PROC_CALIB_CTRL_REG 0x16C +#define HPIPE_RX_SAMPLER_OS_GAIN_OFFSET 6 +#define HPIPE_RX_SAMPLER_OS_GAIN_MASK \ + (0x3 << HPIPE_RX_SAMPLER_OS_GAIN_OFFSET) +#define HPIPE_SMAPLER_OFFSET 12 +#define HPIPE_SMAPLER_MASK \ + (0x1 << HPIPE_SMAPLER_OFFSET) + +#define HPIPE_TX_REG1_REG 0x174 +#define HPIPE_TX_REG1_TX_EMPH_RES_OFFSET 5 +#define HPIPE_TX_REG1_TX_EMPH_RES_MASK \ + (0x3 << HPIPE_TX_REG1_TX_EMPH_RES_OFFSET) +#define HPIPE_TX_REG1_SLC_EN_OFFSET 10 +#define HPIPE_TX_REG1_SLC_EN_MASK \ + (0x3f << HPIPE_TX_REG1_SLC_EN_OFFSET) + +#define HPIPE_PWR_CTR_DTL_REG 0x184 +#define HPIPE_PWR_CTR_DTL_SQ_DET_EN_OFFSET 0 +#define HPIPE_PWR_CTR_DTL_SQ_DET_EN_MASK \ + (0x1 << HPIPE_PWR_CTR_DTL_SQ_DET_EN_OFFSET) +#define HPIPE_PWR_CTR_DTL_SQ_PLOOP_EN_OFFSET 1 +#define HPIPE_PWR_CTR_DTL_SQ_PLOOP_EN_MASK \ + (0x1 << HPIPE_PWR_CTR_DTL_SQ_PLOOP_EN_OFFSET) +#define HPIPE_PWR_CTR_DTL_FLOOP_EN_OFFSET 2 +#define HPIPE_PWR_CTR_DTL_FLOOP_EN_MASK \ + (0x1 << HPIPE_PWR_CTR_DTL_FLOOP_EN_OFFSET) +#define HPIPE_PWR_CTR_DTL_CLAMPING_SEL_OFFSET 4 +#define HPIPE_PWR_CTR_DTL_CLAMPING_SEL_MASK \ + (0x7 << HPIPE_PWR_CTR_DTL_CLAMPING_SEL_OFFSET) +#define HPIPE_PWR_CTR_DTL_INTPCLK_DIV_FORCE_OFFSET 10 +#define HPIPE_PWR_CTR_DTL_INTPCLK_DIV_FORCE_MASK \ + (0x1 << HPIPE_PWR_CTR_DTL_INTPCLK_DIV_FORCE_OFFSET) +#define HPIPE_PWR_CTR_DTL_CLK_MODE_OFFSET 12 +#define HPIPE_PWR_CTR_DTL_CLK_MODE_MASK \ + (0x3 << HPIPE_PWR_CTR_DTL_CLK_MODE_OFFSET) +#define HPIPE_PWR_CTR_DTL_CLK_MODE_FORCE_OFFSET 14 +#define HPIPE_PWR_CTR_DTL_CLK_MODE_FORCE_MASK \ + (1 << HPIPE_PWR_CTR_DTL_CLK_MODE_FORCE_OFFSET) + +#define HPIPE_PHASE_CONTROL_REG 0x188 +#define HPIPE_OS_PH_OFFSET_OFFSET 0 +#define HPIPE_OS_PH_OFFSET_MASK \ + (0x7f << HPIPE_OS_PH_OFFSET_OFFSET) +#define HPIPE_OS_PH_OFFSET_FORCE_OFFSET 7 +#define HPIPE_OS_PH_OFFSET_FORCE_MASK \ + (0x1 << HPIPE_OS_PH_OFFSET_FORCE_OFFSET) +#define HPIPE_OS_PH_VALID_OFFSET 8 +#define HPIPE_OS_PH_VALID_MASK \ + (0x1 << HPIPE_OS_PH_VALID_OFFSET) + +#define HPIPE_DATA_PHASE_OFF_CTRL_REG 0x1A0 +#define HPIPE_DATA_PHASE_ADAPTED_OS_PH_OFFSET 9 +#define HPIPE_DATA_PHASE_ADAPTED_OS_PH_MASK \ + (0x7f << HPIPE_DATA_PHASE_ADAPTED_OS_PH_OFFSET) + +#define HPIPE_ADAPTED_FFE_CAPACITOR_COUNTER_CTRL_REG 0x1A4 +#define HPIPE_ADAPTED_FFE_ADAPTED_FFE_RES_OFFSET 12 +#define HPIPE_ADAPTED_FFE_ADAPTED_FFE_RES_MASK \ + (0x3 << HPIPE_ADAPTED_FFE_ADAPTED_FFE_RES_OFFSET) +#define HPIPE_ADAPTED_FFE_ADAPTED_FFE_CAP_OFFSET 8 +#define HPIPE_ADAPTED_FFE_ADAPTED_FFE_CAP_MASK \ + (0xf << HPIPE_ADAPTED_FFE_ADAPTED_FFE_CAP_OFFSET) + +#define HPIPE_SQ_GLITCH_FILTER_CTRL 0x1c8 +#define HPIPE_SQ_DEGLITCH_WIDTH_P_OFFSET 0 +#define HPIPE_SQ_DEGLITCH_WIDTH_P_MASK \ + (0xf << HPIPE_SQ_DEGLITCH_WIDTH_P_OFFSET) +#define HPIPE_SQ_DEGLITCH_WIDTH_N_OFFSET 4 +#define HPIPE_SQ_DEGLITCH_WIDTH_N_MASK \ + (0xf << HPIPE_SQ_DEGLITCH_WIDTH_N_OFFSET) +#define HPIPE_SQ_DEGLITCH_EN_OFFSET 8 +#define HPIPE_SQ_DEGLITCH_EN_MASK \ + (0x1 << HPIPE_SQ_DEGLITCH_EN_OFFSET) + +#define HPIPE_FRAME_DETECT_CTRL_0_REG 0x214 +#define HPIPE_TRAIN_PAT_NUM_OFFSET 0x7 +#define HPIPE_TRAIN_PAT_NUM_MASK \ + (0x1FF << HPIPE_TRAIN_PAT_NUM_OFFSET) + +#define HPIPE_FRAME_DETECT_CTRL_3_REG 0x220 +#define HPIPE_PATTERN_LOCK_LOST_TIMEOUT_EN_OFFSET 12 +#define HPIPE_PATTERN_LOCK_LOST_TIMEOUT_EN_MASK \ + (0x1 << HPIPE_PATTERN_LOCK_LOST_TIMEOUT_EN_OFFSET) + +#define HPIPE_DME_REG 0x228 +#define HPIPE_DME_ETHERNET_MODE_OFFSET 7 +#define HPIPE_DME_ETHERNET_MODE_MASK \ + (0x1 << HPIPE_DME_ETHERNET_MODE_OFFSET) + +#define HPIPE_TRX_TRAIN_CTRL_0_REG 0x22c +#define HPIPE_TRX_TX_F0T_EO_BASED_OFFSET 14 +#define HPIPE_TRX_TX_F0T_EO_BASED_MASK \ + (1 << HPIPE_TRX_TX_F0T_EO_BASED_OFFSET) +#define HPIPE_TRX_UPDATE_THEN_HOLD_OFFSET 6 +#define HPIPE_TRX_UPDATE_THEN_HOLD_MASK \ + (1 << HPIPE_TRX_UPDATE_THEN_HOLD_OFFSET) +#define HPIPE_TRX_TX_CTRL_CLK_EN_OFFSET 5 +#define HPIPE_TRX_TX_CTRL_CLK_EN_MASK \ + (1 << HPIPE_TRX_TX_CTRL_CLK_EN_OFFSET) +#define HPIPE_TRX_RX_ANA_IF_CLK_ENE_OFFSET 4 +#define HPIPE_TRX_RX_ANA_IF_CLK_ENE_MASK \ + (1 << HPIPE_TRX_RX_ANA_IF_CLK_ENE_OFFSET) +#define HPIPE_TRX_TX_TRAIN_EN_OFFSET 1 +#define HPIPE_TRX_TX_TRAIN_EN_MASK \ + (1 << HPIPE_TRX_TX_TRAIN_EN_OFFSET) +#define HPIPE_TRX_RX_TRAIN_EN_OFFSET 0 +#define HPIPE_TRX_RX_TRAIN_EN_MASK \ + (1 << HPIPE_TRX_RX_TRAIN_EN_OFFSET) + +#define HPIPE_TX_TRAIN_CTRL_0_REG 0x268 +#define HPIPE_TX_TRAIN_P2P_HOLD_OFFSET 15 +#define HPIPE_TX_TRAIN_P2P_HOLD_MASK \ + (0x1 << HPIPE_TX_TRAIN_P2P_HOLD_OFFSET) + +#define HPIPE_TX_TRAIN_CTRL_REG 0x26C +#define HPIPE_TX_TRAIN_CTRL_G1_OFFSET 0 +#define HPIPE_TX_TRAIN_CTRL_G1_MASK \ + (0x1 << HPIPE_TX_TRAIN_CTRL_G1_OFFSET) +#define HPIPE_TX_TRAIN_CTRL_GN1_OFFSET 1 +#define HPIPE_TX_TRAIN_CTRL_GN1_MASK \ + (0x1 << HPIPE_TX_TRAIN_CTRL_GN1_OFFSET) +#define HPIPE_TX_TRAIN_CTRL_G0_OFFSET 2 +#define HPIPE_TX_TRAIN_CTRL_G0_MASK \ + (0x1 << HPIPE_TX_TRAIN_CTRL_G0_OFFSET) + +#define HPIPE_TX_TRAIN_CTRL_4_REG 0x278 +#define HPIPE_TRX_TRAIN_TIMER_OFFSET 0 +#define HPIPE_TRX_TRAIN_TIMER_MASK \ + (0x3FF << HPIPE_TRX_TRAIN_TIMER_OFFSET) + +#define HPIPE_TX_TRAIN_CTRL_5_REG 0x2A4 +#define HPIPE_RX_TRAIN_TIMER_OFFSET 0 +#define HPIPE_RX_TRAIN_TIMER_MASK \ + (0x3ff << HPIPE_RX_TRAIN_TIMER_OFFSET) +#define HPIPE_TX_TRAIN_START_SQ_EN_OFFSET 11 +#define HPIPE_TX_TRAIN_START_SQ_EN_MASK \ + (0x1 << HPIPE_TX_TRAIN_START_SQ_EN_OFFSET) +#define HPIPE_TX_TRAIN_START_FRM_DET_EN_OFFSET 12 +#define HPIPE_TX_TRAIN_START_FRM_DET_EN_MASK \ + (0x1 << HPIPE_TX_TRAIN_START_FRM_DET_EN_OFFSET) +#define HPIPE_TX_TRAIN_START_FRM_LOCK_EN_OFFSET 13 +#define HPIPE_TX_TRAIN_START_FRM_LOCK_EN_MASK \ + (0x1 << HPIPE_TX_TRAIN_START_FRM_LOCK_EN_OFFSET) +#define HPIPE_TX_TRAIN_WAIT_TIME_EN_OFFSET 14 +#define HPIPE_TX_TRAIN_WAIT_TIME_EN_MASK \ + (0x1 << HPIPE_TX_TRAIN_WAIT_TIME_EN_OFFSET) + +#define HPIPE_INTERRUPT_1_REGISTER 0x2AC +#define HPIPE_TRX_TRAIN_FAILED_OFFSET 6 +#define HPIPE_TRX_TRAIN_FAILED_MASK \ + (1 << HPIPE_TRX_TRAIN_FAILED_OFFSET) +#define HPIPE_TRX_TRAIN_TIME_OUT_INT_OFFSET 5 +#define HPIPE_TRX_TRAIN_TIME_OUT_INT_MASK \ + (1 << HPIPE_TRX_TRAIN_TIME_OUT_INT_OFFSET) +#define HPIPE_INTERRUPT_TRX_TRAIN_DONE_OFFSET 4 +#define HPIPE_INTERRUPT_TRX_TRAIN_DONE_MASK \ + (1 << HPIPE_INTERRUPT_TRX_TRAIN_DONE_OFFSET) +#define HPIPE_INTERRUPT_DFE_DONE_INT_OFFSET 3 +#define HPIPE_INTERRUPT_DFE_DONE_INT_MASK \ + (1 << HPIPE_INTERRUPT_DFE_DONE_INT_OFFSET) +#define HPIPE_INTERRUPT_RX_TRAIN_COMPLETE_INT_OFFSET 1 +#define HPIPE_INTERRUPT_RX_TRAIN_COMPLETE_INT_MASK \ + (1 << HPIPE_INTERRUPT_RX_TRAIN_COMPLETE_INT_OFFSET) + +#define HPIPE_TX_TRAIN_REG 0x31C +#define HPIPE_TX_TRAIN_CHK_INIT_OFFSET 4 +#define HPIPE_TX_TRAIN_CHK_INIT_MASK \ + (0x1 << HPIPE_TX_TRAIN_CHK_INIT_OFFSET) +#define HPIPE_TX_TRAIN_COE_FM_PIN_PCIE3_OFFSET 7 +#define HPIPE_TX_TRAIN_COE_FM_PIN_PCIE3_MASK \ + (0x1 << HPIPE_TX_TRAIN_COE_FM_PIN_PCIE3_OFFSET) +#define HPIPE_TX_TRAIN_16BIT_AUTO_EN_OFFSET 8 +#define HPIPE_TX_TRAIN_16BIT_AUTO_EN_MASK \ + (0x1 << HPIPE_TX_TRAIN_16BIT_AUTO_EN_OFFSET) +#define HPIPE_TX_TRAIN_PAT_SEL_OFFSET 9 +#define HPIPE_TX_TRAIN_PAT_SEL_MASK \ + (0x1 << HPIPE_TX_TRAIN_PAT_SEL_OFFSET) + +#define HPIPE_SAVED_DFE_VALUES_REG 0x328 +#define HPIPE_SAVED_DFE_VALUES_SAV_F0D_OFFSET 10 +#define HPIPE_SAVED_DFE_VALUES_SAV_F0D_MASK \ + (0x3f << HPIPE_SAVED_DFE_VALUES_SAV_F0D_OFFSET) + +#define HPIPE_CDR_CONTROL_REG 0x418 +#define HPIPE_CRD_MIDPOINT_PHASE_OS_OFFSET 0 +#define HPIPE_CRD_MIDPOINT_PHASE_OS_MASK \ + (0x3f << HPIPE_CRD_MIDPOINT_PHASE_OS_OFFSET) +#define HPIPE_CDR_MAX_DFE_ADAPT_1_OFFSET 6 +#define HPIPE_CDR_MAX_DFE_ADAPT_1_MASK \ + (0x7 << HPIPE_CDR_MAX_DFE_ADAPT_1_OFFSET) +#define HPIPE_CDR_MAX_DFE_ADAPT_0_OFFSET 9 +#define HPIPE_CDR_MAX_DFE_ADAPT_0_MASK \ + (0x7 << HPIPE_CDR_MAX_DFE_ADAPT_0_OFFSET) +#define HPIPE_CDR_RX_MAX_DFE_ADAPT_1_OFFSET 12 +#define HPIPE_CDR_RX_MAX_DFE_ADAPT_1_MASK \ + (0x3 << HPIPE_CDR_RX_MAX_DFE_ADAPT_1_OFFSET) +#define HPIPE_CDR_RX_MAX_DFE_ADAPT_0_OFFSET 14 +#define HPIPE_CDR_RX_MAX_DFE_ADAPT_0_MASK \ + (0x3 << HPIPE_CDR_RX_MAX_DFE_ADAPT_0_OFFSET) + + +#define HPIPE_CDR_CONTROL1_REG 0x41c +#define HPIPE_CRD2_CRD_MIDPOINT_SMALL_THRES_K_OFF 12 +#define HPIPE_CRD2_CRD_MIDPOINT_SMALL_THRES_K_MASK \ + (0xf << HPIPE_CRD2_CRD_MIDPOINT_SMALL_THRES_K_OFF) + +#define HPIPE_CDR_CONTROL2_REG 0x420 +#define HPIPE_CRD2_CRD_MIDPOINT_LARGE_THRES_K_OFF 12 +#define HPIPE_CRD2_CRD_MIDPOINT_LARGE_THRES_K_MASK \ + (0xf << HPIPE_CRD2_CRD_MIDPOINT_LARGE_THRES_K_OFF) + +#define HPIPE_TX_TRAIN_CTRL_11_REG 0x438 +#define HPIPE_TX_STATUS_CHECK_MODE_OFFSET 6 +#define HPIPE_TX_TX_STATUS_CHECK_MODE_MASK \ + (0x1 << HPIPE_TX_STATUS_CHECK_MODE_OFFSET) +#define HPIPE_TX_NUM_OF_PRESET_OFFSET 10 +#define HPIPE_TX_NUM_OF_PRESET_MASK \ + (0x7 << HPIPE_TX_NUM_OF_PRESET_OFFSET) +#define HPIPE_TX_SWEEP_PRESET_EN_OFFSET 15 +#define HPIPE_TX_SWEEP_PRESET_EN_MASK \ + (0x1 << HPIPE_TX_SWEEP_PRESET_EN_OFFSET) + +#define HPIPE_G1_SETTINGS_3_REG 0x440 +#define HPIPE_G1_SETTINGS_3_G1_FFE_CAP_SEL_OFFSET 0 +#define HPIPE_G1_SETTINGS_3_G1_FFE_CAP_SEL_MASK \ + (0xf << HPIPE_G1_SETTINGS_3_G1_FFE_CAP_SEL_OFFSET) +#define HPIPE_G1_SETTINGS_3_G1_FFE_RES_SEL_OFFSET 4 +#define HPIPE_G1_SETTINGS_3_G1_FFE_RES_SEL_MASK \ + (0x7 << HPIPE_G1_SETTINGS_3_G1_FFE_RES_SEL_OFFSET) +#define HPIPE_G1_SETTINGS_3_G1_FFE_SETTING_FORCE_OFFSET 7 +#define HPIPE_G1_SETTINGS_3_G1_FFE_SETTING_FORCE_MASK \ + (0x1 << HPIPE_G1_SETTINGS_3_G1_FFE_SETTING_FORCE_OFFSET) +#define HPIPE_G1_SETTINGS_3_G1_FBCK_SEL_OFFSET 9 +#define HPIPE_G1_SETTINGS_3_G1_FBCK_SEL_MASK \ + (0x1 << HPIPE_G1_SETTINGS_3_G1_FBCK_SEL_OFFSET) +#define HPIPE_G1_SETTINGS_3_G1_FFE_DEG_RES_LEVEL_OFFSET 12 +#define HPIPE_G1_SETTINGS_3_G1_FFE_DEG_RES_LEVEL_MASK \ + (0x3 << HPIPE_G1_SETTINGS_3_G1_FFE_DEG_RES_LEVEL_OFFSET) +#define HPIPE_G1_SETTINGS_3_G1_FFE_LOAD_RES_LEVEL_OFFSET 14 +#define HPIPE_G1_SETTINGS_3_G1_FFE_LOAD_RES_LEVEL_MASK \ + (0x3 << HPIPE_G1_SETTINGS_3_G1_FFE_LOAD_RES_LEVEL_OFFSET) + +#define HPIPE_G1_SETTINGS_4_REG 0x444 +#define HPIPE_G1_SETTINGS_4_G1_DFE_RES_OFFSET 8 +#define HPIPE_G1_SETTINGS_4_G1_DFE_RES_MASK \ + (0x3 << HPIPE_G1_SETTINGS_4_G1_DFE_RES_OFFSET) + +#define HPIPE_G2_SETTINGS_4_REG 0x44c +#define HPIPE_G2_DFE_RES_OFFSET 8 +#define HPIPE_G2_DFE_RES_MASK \ + (0x3 << HPIPE_G2_DFE_RES_OFFSET) + +#define HPIPE_G3_SETTING_3_REG 0x450 +#define HPIPE_G3_FFE_CAP_SEL_OFFSET 0 +#define HPIPE_G3_FFE_CAP_SEL_MASK \ + (0xf << HPIPE_G3_FFE_CAP_SEL_OFFSET) +#define HPIPE_G3_FFE_RES_SEL_OFFSET 4 +#define HPIPE_G3_FFE_RES_SEL_MASK \ + (0x7 << HPIPE_G3_FFE_RES_SEL_OFFSET) +#define HPIPE_G3_FFE_SETTING_FORCE_OFFSET 7 +#define HPIPE_G3_FFE_SETTING_FORCE_MASK \ + (0x1 << HPIPE_G3_FFE_SETTING_FORCE_OFFSET) +#define HPIPE_G3_FFE_DEG_RES_LEVEL_OFFSET 12 +#define HPIPE_G3_FFE_DEG_RES_LEVEL_MASK \ + (0x3 << HPIPE_G3_FFE_DEG_RES_LEVEL_OFFSET) +#define HPIPE_G3_FFE_LOAD_RES_LEVEL_OFFSET 14 +#define HPIPE_G3_FFE_LOAD_RES_LEVEL_MASK \ + (0x3 << HPIPE_G3_FFE_LOAD_RES_LEVEL_OFFSET) + +#define HPIPE_G3_SETTING_4_REG 0x454 +#define HPIPE_G3_DFE_RES_OFFSET 8 +#define HPIPE_G3_DFE_RES_MASK (0x3 << HPIPE_G3_DFE_RES_OFFSET) + +#define HPIPE_TX_PRESET_INDEX_REG 0x468 +#define HPIPE_TX_PRESET_INDEX_OFFSET 0 +#define HPIPE_TX_PRESET_INDEX_MASK \ + (0xf << HPIPE_TX_PRESET_INDEX_OFFSET) + +#define HPIPE_DFE_CONTROL_REG 0x470 +#define HPIPE_DFE_TX_MAX_DFE_ADAPT_OFFSET 14 +#define HPIPE_DFE_TX_MAX_DFE_ADAPT_MASK \ + (0x3 << HPIPE_DFE_TX_MAX_DFE_ADAPT_OFFSET) + +#define HPIPE_DFE_CTRL_28_REG 0x49C +#define HPIPE_DFE_CTRL_28_PIPE4_OFFSET 7 +#define HPIPE_DFE_CTRL_28_PIPE4_MASK \ + (0x1 << HPIPE_DFE_CTRL_28_PIPE4_OFFSET) + +#define HPIPE_TRX0_REG 0x4cc /*in doc 0x133*4*/ +#define HPIPE_TRX0_GAIN_TRAIN_WITH_SAMPLER_OFF 2 +#define HPIPE_TRX0_GAIN_TRAIN_WITH_SAMPLER_MASK \ + (0x1 << HPIPE_TRX0_GAIN_TRAIN_WITH_SAMPLER_OFF) +#define HPIPE_TRX0_GAIN_TRAIN_WITH_C_OFF 0 +#define HPIPE_TRX0_GAIN_TRAIN_WITH_C_MASK \ + (0x1 << HPIPE_TRX0_GAIN_TRAIN_WITH_C_OFF) + +#define HPIPE_TRX_REG1 0x4d0 /*in doc 0x134*4*/ +#define HPIPE_TRX_REG1_MIN_BOOST_MODE_OFF 3 +#define HPIPE_TRX_REG1_MIN_BOOST_MODE_MASK \ + (0x1 << HPIPE_TRX_REG1_MIN_BOOST_MODE_OFF) +#define HPIPE_TRX_REG1_SUMFTAP_EN_OFF 10 +#define HPIPE_TRX_REG1_SUMFTAP_EN_MASK \ + (0x3f << HPIPE_TRX_REG1_SUMFTAP_EN_OFF) + +#define HPIPE_TRX_REG2 0x4d8 /*in doc 0x136*4*/ +#define HPIPE_TRX_REG2_SUMF_BOOST_TARGET_C_OFF 11 +#define HPIPE_TRX_REG2_SUMF_BOOST_TARGET_C_MASK \ + (0x1f << HPIPE_TRX_REG2_SUMF_BOOST_TARGET_C_OFF) +#define HPIPE_TRX_REG2_SUMF_BOOST_TARGET_K_OFF 7 +#define HPIPE_TRX_REG2_SUMF_BOOST_TARGET_K_MASK \ + (0xf << HPIPE_TRX_REG2_SUMF_BOOST_TARGET_K_OFF) + +#define HPIPE_G1_SETTING_5_REG 0x538 +#define HPIPE_G1_SETTING_5_G1_ICP_OFFSET 0 +#define HPIPE_G1_SETTING_5_G1_ICP_MASK \ + (0xf << HPIPE_G1_SETTING_5_G1_ICP_OFFSET) + +#define HPIPE_G3_SETTING_5_REG 0x548 +#define HPIPE_G3_SETTING_5_G3_ICP_OFFSET 0 +#define HPIPE_G3_SETTING_5_G3_ICP_MASK \ + (0xf << HPIPE_G3_SETTING_5_G3_ICP_OFFSET) + +#define HPIPE_LANE_CONFIG0_REG 0x600 +#define HPIPE_LANE_CONFIG0_TXDEEMPH0_OFFSET 0 +#define HPIPE_LANE_CONFIG0_TXDEEMPH0_MASK \ + (0x1 << HPIPE_LANE_CONFIG0_TXDEEMPH0_OFFSET) + +#define HPIPE_LANE_STATUS1_REG 0x60C +#define HPIPE_LANE_STATUS1_PCLK_EN_OFFSET 0 +#define HPIPE_LANE_STATUS1_PCLK_EN_MASK \ + (0x1 << HPIPE_LANE_STATUS1_PCLK_EN_OFFSET) + +#define HPIPE_LANE_CFG4_REG 0x620 +#define HPIPE_LANE_CFG4_DFE_CTRL_OFFSET 0 +#define HPIPE_LANE_CFG4_DFE_CTRL_MASK \ + (0x7 << HPIPE_LANE_CFG4_DFE_CTRL_OFFSET) +#define HPIPE_LANE_CFG4_DFE_EN_SEL_OFFSET 3 +#define HPIPE_LANE_CFG4_DFE_EN_SEL_MASK \ + (0x1 << HPIPE_LANE_CFG4_DFE_EN_SEL_OFFSET) +#define HPIPE_LANE_CFG4_DFE_OVER_OFFSET 6 +#define HPIPE_LANE_CFG4_DFE_OVER_MASK \ + (0x1 << HPIPE_LANE_CFG4_DFE_OVER_OFFSET) +#define HPIPE_LANE_CFG4_SSC_CTRL_OFFSET 7 +#define HPIPE_LANE_CFG4_SSC_CTRL_MASK \ + (0x1 << HPIPE_LANE_CFG4_SSC_CTRL_OFFSET) + +#define HPIPE_LANE_EQ_REMOTE_SETTING_REG 0x6f8 +#define HPIPE_LANE_CFG_FOM_DIRN_OVERRIDE_OFFSET 0 +#define HPIPE_LANE_CFG_FOM_DIRN_OVERRIDE_MASK \ + (0x1 << HPIPE_LANE_CFG_FOM_DIRN_OVERRIDE_OFFSET) +#define HPIPE_LANE_CFG_FOM_ONLY_MODE_OFFFSET 1 +#define HPIPE_LANE_CFG_FOM_ONLY_MODE_MASK \ + (0x1 << HPIPE_LANE_CFG_FOM_ONLY_MODE_OFFFSET) +#define HPIPE_LANE_CFG_FOM_PRESET_VECTOR_OFFSET 2 +#define HPIPE_LANE_CFG_FOM_PRESET_VECTOR_MASK \ + (0xf << HPIPE_LANE_CFG_FOM_PRESET_VECTOR_OFFSET) + +#define HPIPE_LANE_EQU_CONFIG_0_REG 0x69C +#define HPIPE_CFG_PHY_RC_EP_OFFSET 12 +#define HPIPE_CFG_PHY_RC_EP_MASK \ + (0x1 << HPIPE_CFG_PHY_RC_EP_OFFSET) + +#define HPIPE_LANE_EQ_CFG1_REG 0x6a0 +#define HPIPE_CFG_UPDATE_POLARITY_OFFSET 12 +#define HPIPE_CFG_UPDATE_POLARITY_MASK \ + (0x1 << HPIPE_CFG_UPDATE_POLARITY_OFFSET) + +#define HPIPE_LANE_EQ_CFG2_REG 0x6a4 +#define HPIPE_CFG_EQ_BUNDLE_DIS_OFFSET 14 +#define HPIPE_CFG_EQ_BUNDLE_DIS_MASK \ + (0x1 << HPIPE_CFG_EQ_BUNDLE_DIS_OFFSET) + +#define HPIPE_RST_CLK_CTRL_REG 0x704 +#define HPIPE_RST_CLK_CTRL_PIPE_RST_OFFSET 0 +#define HPIPE_RST_CLK_CTRL_PIPE_RST_MASK \ + (0x1 << HPIPE_RST_CLK_CTRL_PIPE_RST_OFFSET) +#define HPIPE_RST_CLK_CTRL_FIXED_PCLK_OFFSET 2 +#define HPIPE_RST_CLK_CTRL_FIXED_PCLK_MASK \ + (0x1 << HPIPE_RST_CLK_CTRL_FIXED_PCLK_OFFSET) +#define HPIPE_RST_CLK_CTRL_PIPE_WIDTH_OFFSET 3 +#define HPIPE_RST_CLK_CTRL_PIPE_WIDTH_MASK \ + (0x1 << HPIPE_RST_CLK_CTRL_PIPE_WIDTH_OFFSET) +#define HPIPE_RST_CLK_CTRL_CORE_FREQ_SEL_OFFSET 9 +#define HPIPE_RST_CLK_CTRL_CORE_FREQ_SEL_MASK \ + (0x1 << HPIPE_RST_CLK_CTRL_CORE_FREQ_SEL_OFFSET) + +#define HPIPE_TST_MODE_CTRL_REG 0x708 +#define HPIPE_TST_MODE_CTRL_MODE_MARGIN_OFFSET 2 +#define HPIPE_TST_MODE_CTRL_MODE_MARGIN_MASK \ + (0x1 << HPIPE_TST_MODE_CTRL_MODE_MARGIN_OFFSET) + +#define HPIPE_CLK_SRC_LO_REG 0x70c +#define HPIPE_CLK_SRC_LO_BUNDLE_PERIOD_SEL_OFFSET 1 +#define HPIPE_CLK_SRC_LO_BUNDLE_PERIOD_SEL_MASK \ + (0x1 << HPIPE_CLK_SRC_LO_BUNDLE_PERIOD_SEL_OFFSET) +#define HPIPE_CLK_SRC_LO_BUNDLE_PERIOD_SCALE_OFFSET 2 +#define HPIPE_CLK_SRC_LO_BUNDLE_PERIOD_SCALE_MASK \ + (0x3 << HPIPE_CLK_SRC_LO_BUNDLE_PERIOD_SCALE_OFFSET) +#define HPIPE_CLK_SRC_LO_PLL_RDY_DL_OFFSET 5 +#define HPIPE_CLK_SRC_LO_PLL_RDY_DL_MASK \ + (0x7 << HPIPE_CLK_SRC_LO_PLL_RDY_DL_OFFSET) + +#define HPIPE_CLK_SRC_HI_REG 0x710 +#define HPIPE_CLK_SRC_HI_LANE_STRT_OFFSET 0 +#define HPIPE_CLK_SRC_HI_LANE_STRT_MASK \ + (0x1 << HPIPE_CLK_SRC_HI_LANE_STRT_OFFSET) +#define HPIPE_CLK_SRC_HI_LANE_BREAK_OFFSET 1 +#define HPIPE_CLK_SRC_HI_LANE_BREAK_MASK \ + (0x1 << HPIPE_CLK_SRC_HI_LANE_BREAK_OFFSET) +#define HPIPE_CLK_SRC_HI_LANE_MASTER_OFFSET 2 +#define HPIPE_CLK_SRC_HI_LANE_MASTER_MASK \ + (0x1 << HPIPE_CLK_SRC_HI_LANE_MASTER_OFFSET) +#define HPIPE_CLK_SRC_HI_MODE_PIPE_OFFSET 7 +#define HPIPE_CLK_SRC_HI_MODE_PIPE_MASK \ + (0x1 << HPIPE_CLK_SRC_HI_MODE_PIPE_OFFSET) + +#define HPIPE_GLOBAL_MISC_CTRL 0x718 +#define HPIPE_GLOBAL_PM_CTRL 0x740 +#define HPIPE_GLOBAL_PM_RXDLOZ_WAIT_OFFSET 0 +#define HPIPE_GLOBAL_PM_RXDLOZ_WAIT_MASK \ + (0xFF << HPIPE_GLOBAL_PM_RXDLOZ_WAIT_OFFSET) + +/* General defines */ +#define PLL_LOCK_TIMEOUT 15000 + +#endif /* COMPHY_CP110_H */ diff --git a/drivers/marvell/comphy/phy-comphy-3700.c b/drivers/marvell/comphy/phy-comphy-3700.c new file mode 100644 index 0000000..1a97753 --- /dev/null +++ b/drivers/marvell/comphy/phy-comphy-3700.c @@ -0,0 +1,1065 @@ +/* + * Copyright (C) 2018-2021 Marvell International Ltd. + * + * SPDX-License-Identifier: BSD-3-Clause + * https://spdx.org/licenses + */ + +#include <errno.h> + +#include <common/debug.h> +#include <drivers/delay_timer.h> +#include <lib/mmio.h> +#include <lib/spinlock.h> + +#include <mvebu.h> +#include <mvebu_def.h> +#include <plat_marvell.h> + +#include "phy-comphy-3700.h" +#include "phy-comphy-common.h" + +/* + * COMPHY_INDIRECT_REG points to ahci address space but the ahci region used in + * Linux is up to 0x178 so none will access it from Linux in runtime + * concurrently. + */ +#define COMPHY_INDIRECT_REG (MVEBU_REGS_BASE + 0xE0178) + +/* The USB3_GBE1_PHY range is above USB3 registers used in dts */ +#define USB3_GBE1_PHY (MVEBU_REGS_BASE + 0x5C000) +#define COMPHY_SD_ADDR (MVEBU_REGS_BASE + 0x1F000) + +struct sgmii_phy_init_data_fix { + uint16_t addr; + uint16_t value; +}; + +/* Changes to 40M1G25 mode data required for running 40M3G125 init mode */ +static struct sgmii_phy_init_data_fix sgmii_phy_init_fix[] = { + {0x005, 0x07CC}, {0x015, 0x0000}, {0x01B, 0x0000}, {0x01D, 0x0000}, + {0x01E, 0x0000}, {0x01F, 0x0000}, {0x020, 0x0000}, {0x021, 0x0030}, + {0x026, 0x0888}, {0x04D, 0x0152}, {0x04F, 0xA020}, {0x050, 0x07CC}, + {0x053, 0xE9CA}, {0x055, 0xBD97}, {0x071, 0x3015}, {0x076, 0x03AA}, + {0x07C, 0x0FDF}, {0x0C2, 0x3030}, {0x0C3, 0x8000}, {0x0E2, 0x5550}, + {0x0E3, 0x12A4}, {0x0E4, 0x7D00}, {0x0E6, 0x0C83}, {0x101, 0xFCC0}, + {0x104, 0x0C10} +}; + +/* 40M1G25 mode init data */ +static uint16_t sgmii_phy_init[512] = { + /* 0 1 2 3 4 5 6 7 */ + /*-----------------------------------------------------------*/ + /* 8 9 A B C D E F */ + 0x3110, 0xFD83, 0x6430, 0x412F, 0x82C0, 0x06FA, 0x4500, 0x6D26, /* 00 */ + 0xAFC0, 0x8000, 0xC000, 0x0000, 0x2000, 0x49CC, 0x0BC9, 0x2A52, /* 08 */ + 0x0BD2, 0x0CDE, 0x13D2, 0x0CE8, 0x1149, 0x10E0, 0x0000, 0x0000, /* 10 */ + 0x0000, 0x0000, 0x0000, 0x0001, 0x0000, 0x4134, 0x0D2D, 0xFFFF, /* 18 */ + 0xFFE0, 0x4030, 0x1016, 0x0030, 0x0000, 0x0800, 0x0866, 0x0000, /* 20 */ + 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, /* 28 */ + 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /* 30 */ + 0x0000, 0x0000, 0x000F, 0x6A62, 0x1988, 0x3100, 0x3100, 0x3100, /* 38 */ + 0x3100, 0xA708, 0x2430, 0x0830, 0x1030, 0x4610, 0xFF00, 0xFF00, /* 40 */ + 0x0060, 0x1000, 0x0400, 0x0040, 0x00F0, 0x0155, 0x1100, 0xA02A, /* 48 */ + 0x06FA, 0x0080, 0xB008, 0xE3ED, 0x5002, 0xB592, 0x7A80, 0x0001, /* 50 */ + 0x020A, 0x8820, 0x6014, 0x8054, 0xACAA, 0xFC88, 0x2A02, 0x45CF, /* 58 */ + 0x000F, 0x1817, 0x2860, 0x064F, 0x0000, 0x0204, 0x1800, 0x6000, /* 60 */ + 0x810F, 0x4F23, 0x4000, 0x4498, 0x0850, 0x0000, 0x000E, 0x1002, /* 68 */ + 0x9D3A, 0x3009, 0xD066, 0x0491, 0x0001, 0x6AB0, 0x0399, 0x3780, /* 70 */ + 0x0040, 0x5AC0, 0x4A80, 0x0000, 0x01DF, 0x0000, 0x0007, 0x0000, /* 78 */ + 0x2D54, 0x00A1, 0x4000, 0x0100, 0xA20A, 0x0000, 0x0000, 0x0000, /* 80 */ + 0x0000, 0x0000, 0x0000, 0x7400, 0x0E81, 0x1000, 0x1242, 0x0210, /* 88 */ + 0x80DF, 0x0F1F, 0x2F3F, 0x4F5F, 0x6F7F, 0x0F1F, 0x2F3F, 0x4F5F, /* 90 */ + 0x6F7F, 0x4BAD, 0x0000, 0x0000, 0x0800, 0x0000, 0x2400, 0xB651, /* 98 */ + 0xC9E0, 0x4247, 0x0A24, 0x0000, 0xAF19, 0x1004, 0x0000, 0x0000, /* A0 */ + 0x0000, 0x0013, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /* A8 */ + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /* B0 */ + 0x0000, 0x0000, 0x0000, 0x0060, 0x0000, 0x0000, 0x0000, 0x0000, /* B8 */ + 0x0000, 0x0000, 0x3010, 0xFA00, 0x0000, 0x0000, 0x0000, 0x0003, /* C0 */ + 0x1618, 0x8200, 0x8000, 0x0400, 0x050F, 0x0000, 0x0000, 0x0000, /* C8 */ + 0x4C93, 0x0000, 0x1000, 0x1120, 0x0010, 0x1242, 0x1242, 0x1E00, /* D0 */ + 0x0000, 0x0000, 0x0000, 0x00F8, 0x0000, 0x0041, 0x0800, 0x0000, /* D8 */ + 0x82A0, 0x572E, 0x2490, 0x14A9, 0x4E00, 0x0000, 0x0803, 0x0541, /* E0 */ + 0x0C15, 0x0000, 0x0000, 0x0400, 0x2626, 0x0000, 0x0000, 0x4200, /* E8 */ + 0x0000, 0xAA55, 0x1020, 0x0000, 0x0000, 0x5010, 0x0000, 0x0000, /* F0 */ + 0x0000, 0x0000, 0x5000, 0x0000, 0x0000, 0x0000, 0x02F2, 0x0000, /* F8 */ + 0x101F, 0xFDC0, 0x4000, 0x8010, 0x0110, 0x0006, 0x0000, 0x0000, /*100 */ + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*108 */ + 0x04CF, 0x0000, 0x04CF, 0x0000, 0x04CF, 0x0000, 0x04C6, 0x0000, /*110 */ + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*118 */ + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*120 */ + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*128 */ + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*130 */ + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*138 */ + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*140 */ + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*148 */ + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*150 */ + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*158 */ + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*160 */ + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*168 */ + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*170 */ + 0x0000, 0x0000, 0x0000, 0x00F0, 0x08A2, 0x3112, 0x0A14, 0x0000, /*178 */ + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*180 */ + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*188 */ + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*190 */ + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*198 */ + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1A0 */ + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1A8 */ + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1B0 */ + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1B8 */ + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1C0 */ + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1C8 */ + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1D0 */ + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1D8 */ + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1E0 */ + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1E8 */ + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1F0 */ + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 /*1F8 */ +}; + +/* PHY selector configures with corresponding modes */ +static int mvebu_a3700_comphy_set_phy_selector(uint8_t comphy_index, + uint32_t comphy_mode) +{ + uint32_t reg; + int mode = COMPHY_GET_MODE(comphy_mode); + + reg = mmio_read_32(MVEBU_COMPHY_REG_BASE + COMPHY_SELECTOR_PHY_REG); + switch (mode) { + case (COMPHY_SATA_MODE): + /* SATA must be in Lane2 */ + if (comphy_index == COMPHY_LANE2) + reg &= ~COMPHY_SELECTOR_USB3_PHY_SEL_BIT; + else + goto error; + break; + + case (COMPHY_SGMII_MODE): + case (COMPHY_2500BASEX_MODE): + if (comphy_index == COMPHY_LANE0) + reg &= ~COMPHY_SELECTOR_USB3_GBE1_SEL_BIT; + else if (comphy_index == COMPHY_LANE1) + reg &= ~COMPHY_SELECTOR_PCIE_GBE0_SEL_BIT; + else + goto error; + break; + + case (COMPHY_USB3H_MODE): + case (COMPHY_USB3D_MODE): + case (COMPHY_USB3_MODE): + if (comphy_index == COMPHY_LANE2) + reg |= COMPHY_SELECTOR_USB3_PHY_SEL_BIT; + else if (comphy_index == COMPHY_LANE0) + reg |= COMPHY_SELECTOR_USB3_GBE1_SEL_BIT; + else + goto error; + break; + + case (COMPHY_PCIE_MODE): + /* PCIE must be in Lane1 */ + if (comphy_index == COMPHY_LANE1) + reg |= COMPHY_SELECTOR_PCIE_GBE0_SEL_BIT; + else + goto error; + break; + + default: + goto error; + } + + mmio_write_32(MVEBU_COMPHY_REG_BASE + COMPHY_SELECTOR_PHY_REG, reg); + return 0; +error: + ERROR("COMPHY[%d] mode[%d] is invalid\n", comphy_index, mode); + return -EINVAL; +} + +/* + * This is something like the inverse of the previous function: for given + * lane it returns COMPHY_*_MODE. + * + * It is useful when powering the phy off. + * + * This function returns COMPHY_USB3_MODE even if the phy was configured + * with COMPHY_USB3D_MODE or COMPHY_USB3H_MODE. (The usb3 phy initialization + * code does not differentiate between these modes.) + * Also it returns COMPHY_SGMII_MODE even if the phy was configures with + * COMPHY_2500BASEX_MODE. (The sgmii phy initialization code does differentiate + * between these modes, but it is irrelevant when powering the phy off.) + */ +static int mvebu_a3700_comphy_get_mode(uint8_t comphy_index) +{ + uint32_t reg; + + reg = mmio_read_32(MVEBU_COMPHY_REG_BASE + COMPHY_SELECTOR_PHY_REG); + switch (comphy_index) { + case COMPHY_LANE0: + if ((reg & COMPHY_SELECTOR_USB3_GBE1_SEL_BIT) != 0) + return COMPHY_USB3_MODE; + else + return COMPHY_SGMII_MODE; + case COMPHY_LANE1: + if ((reg & COMPHY_SELECTOR_PCIE_GBE0_SEL_BIT) != 0) + return COMPHY_PCIE_MODE; + else + return COMPHY_SGMII_MODE; + case COMPHY_LANE2: + if ((reg & COMPHY_SELECTOR_USB3_PHY_SEL_BIT) != 0) + return COMPHY_USB3_MODE; + else + return COMPHY_SATA_MODE; + } + + return COMPHY_UNUSED; +} + +/* It is only used for SATA and USB3 on comphy lane2. */ +static void comphy_set_indirect(uintptr_t addr, uint32_t offset, uint16_t data, + uint16_t mask, bool is_sata) +{ + /* + * When Lane 2 PHY is for USB3, access the PHY registers + * through indirect Address and Data registers: + * INDIR_ACC_PHY_ADDR (RD00E0178h [31:0]), + * INDIR_ACC_PHY_DATA (RD00E017Ch [31:0]), + * within the SATA Host Controller registers, Lane 2 base register + * offset is 0x200 + */ + if (is_sata) { + mmio_write_32(addr + COMPHY_LANE2_INDIR_ADDR_OFFSET, offset); + } else { + mmio_write_32(addr + COMPHY_LANE2_INDIR_ADDR_OFFSET, + offset + USB3PHY_LANE2_REG_BASE_OFFSET); + } + + reg_set(addr + COMPHY_LANE2_INDIR_DATA_OFFSET, data, mask); +} + +/* It is only used for SATA on comphy lane2. */ +static void comphy_sata_set_indirect(uintptr_t addr, uint32_t reg_offset, + uint16_t data, uint16_t mask) +{ + comphy_set_indirect(addr, reg_offset, data, mask, true); +} + +/* It is only used for USB3 indirect access on comphy lane2. */ +static void comphy_usb3_set_indirect(uintptr_t addr, uint32_t reg_offset, + uint16_t data, uint16_t mask) +{ + comphy_set_indirect(addr, reg_offset, data, mask, false); +} + +/* It is only used for USB3 direct access not on comphy lane2. */ +static void comphy_usb3_set_direct(uintptr_t addr, uint32_t reg_offset, + uint16_t data, uint16_t mask) +{ + reg_set16((reg_offset * PHY_SHFT(USB3) + addr), data, mask); +} + +static void comphy_sgmii_phy_init(uintptr_t sd_ip_addr, bool is_1gbps) +{ + const int fix_arr_sz = ARRAY_SIZE(sgmii_phy_init_fix); + int addr, fix_idx; + uint16_t val; + + fix_idx = 0; + for (addr = 0; addr < 512; addr++) { + /* + * All PHY register values are defined in full for 3.125Gbps + * SERDES speed. The values required for 1.25 Gbps are almost + * the same and only few registers should be "fixed" in + * comparison to 3.125 Gbps values. These register values are + * stored in "sgmii_phy_init_fix" array. + */ + if (!is_1gbps && sgmii_phy_init_fix[fix_idx].addr == addr) { + /* Use new value */ + val = sgmii_phy_init_fix[fix_idx].value; + if (fix_idx < fix_arr_sz) + fix_idx++; + } else { + val = sgmii_phy_init[addr]; + } + + reg_set16(SGMIIPHY_ADDR(addr, sd_ip_addr), val, 0xFFFF); + } +} + +static int mvebu_a3700_comphy_sata_power_on(uint8_t comphy_index, + uint32_t comphy_mode) +{ + int ret; + uint32_t offset, data = 0, ref_clk; + uintptr_t comphy_indir_regs = COMPHY_INDIRECT_REG; + int invert = COMPHY_GET_POLARITY_INVERT(comphy_mode); + + debug_enter(); + + /* Configure phy selector for SATA */ + ret = mvebu_a3700_comphy_set_phy_selector(comphy_index, comphy_mode); + if (ret) { + return ret; + } + + /* Clear phy isolation mode to make it work in normal mode */ + offset = COMPHY_ISOLATION_CTRL + SATAPHY_LANE2_REG_BASE_OFFSET; + comphy_sata_set_indirect(comphy_indir_regs, offset, 0, PHY_ISOLATE_MODE); + + /* 0. Check the Polarity invert bits */ + if (invert & COMPHY_POLARITY_TXD_INVERT) + data |= TXD_INVERT_BIT; + if (invert & COMPHY_POLARITY_RXD_INVERT) + data |= RXD_INVERT_BIT; + + offset = COMPHY_SYNC_PATTERN + SATAPHY_LANE2_REG_BASE_OFFSET; + comphy_sata_set_indirect(comphy_indir_regs, offset, data, TXD_INVERT_BIT | + RXD_INVERT_BIT); + + /* 1. Select 40-bit data width width */ + offset = COMPHY_DIG_LOOPBACK_EN + SATAPHY_LANE2_REG_BASE_OFFSET; + comphy_sata_set_indirect(comphy_indir_regs, offset, DATA_WIDTH_40BIT, + SEL_DATA_WIDTH_MASK); + + /* 2. Select reference clock(25M) and PHY mode (SATA) */ + offset = COMPHY_POWER_PLL_CTRL + SATAPHY_LANE2_REG_BASE_OFFSET; + if (get_ref_clk() == 40) + ref_clk = REF_FREF_SEL_SERDES_40MHZ; + else + ref_clk = REF_FREF_SEL_SERDES_25MHZ; + + comphy_sata_set_indirect(comphy_indir_regs, offset, ref_clk | PHY_MODE_SATA, + REF_FREF_SEL_MASK | PHY_MODE_MASK); + + /* 3. Use maximum PLL rate (no power save) */ + offset = COMPHY_KVCO_CAL_CTRL + SATAPHY_LANE2_REG_BASE_OFFSET; + comphy_sata_set_indirect(comphy_indir_regs, offset, USE_MAX_PLL_RATE_BIT, + USE_MAX_PLL_RATE_BIT); + + /* 4. Reset reserved bit */ + comphy_sata_set_indirect(comphy_indir_regs, COMPHY_RESERVED_REG, 0, + PHYCTRL_FRM_PIN_BIT); + + /* 5. Set vendor-specific configuration (It is done in sata driver) */ + /* XXX: in U-Boot below sequence was executed in this place, in Linux + * not. Now it is done only in U-Boot before this comphy + * initialization - tests shows that it works ok, but in case of any + * future problem it is left for reference. + * reg_set(MVEBU_REGS_BASE + 0xe00a0, 0, 0xffffffff); + * reg_set(MVEBU_REGS_BASE + 0xe00a4, BIT(6), BIT(6)); + */ + + /* Wait for > 55 us to allow PLL be enabled */ + udelay(PLL_SET_DELAY_US); + + /* Polling status */ + mmio_write_32(comphy_indir_regs + COMPHY_LANE2_INDIR_ADDR_OFFSET, + COMPHY_DIG_LOOPBACK_EN + SATAPHY_LANE2_REG_BASE_OFFSET); + + ret = polling_with_timeout(comphy_indir_regs + + COMPHY_LANE2_INDIR_DATA_OFFSET, + PLL_READY_TX_BIT, PLL_READY_TX_BIT, + COMPHY_PLL_TIMEOUT, REG_32BIT); + if (ret) { + return -ETIMEDOUT; + } + + debug_exit(); + + return 0; +} + +static int mvebu_a3700_comphy_sgmii_power_on(uint8_t comphy_index, + uint32_t comphy_mode) +{ + int ret; + uint32_t mask, data; + uintptr_t offset; + uintptr_t sd_ip_addr; + int mode = COMPHY_GET_MODE(comphy_mode); + int invert = COMPHY_GET_POLARITY_INVERT(comphy_mode); + + debug_enter(); + + /* Set selector */ + ret = mvebu_a3700_comphy_set_phy_selector(comphy_index, comphy_mode); + if (ret) { + return ret; + } + + /* Serdes IP Base address + * COMPHY Lane0 -- USB3/GBE1 + * COMPHY Lane1 -- PCIe/GBE0 + */ + if (comphy_index == COMPHY_LANE0) { + /* Get usb3 and gbe */ + sd_ip_addr = USB3_GBE1_PHY; + } else + sd_ip_addr = COMPHY_SD_ADDR; + + /* + * 1. Reset PHY by setting PHY input port PIN_RESET=1. + * 2. Set PHY input port PIN_TX_IDLE=1, PIN_PU_IVREF=1 to keep + * PHY TXP/TXN output to idle state during PHY initialization + * 3. Set PHY input port PIN_PU_PLL=0, PIN_PU_RX=0, PIN_PU_TX=0. + */ + data = PIN_PU_IVREF_BIT | PIN_TX_IDLE_BIT | PIN_RESET_COMPHY_BIT; + mask = data | PIN_RESET_CORE_BIT | PIN_PU_PLL_BIT | PIN_PU_RX_BIT | + PIN_PU_TX_BIT; + offset = MVEBU_COMPHY_REG_BASE + COMPHY_PHY_CFG1_OFFSET(comphy_index); + reg_set(offset, data, mask); + + /* 4. Release reset to the PHY by setting PIN_RESET=0. */ + data = 0; + mask = PIN_RESET_COMPHY_BIT; + reg_set(offset, data, mask); + + /* + * 5. Set PIN_PHY_GEN_TX[3:0] and PIN_PHY_GEN_RX[3:0] to decide COMPHY + * bit rate + */ + if (mode == COMPHY_SGMII_MODE) { + /* SGMII 1G, SerDes speed 1.25G */ + data |= SD_SPEED_1_25_G << GEN_RX_SEL_OFFSET; + data |= SD_SPEED_1_25_G << GEN_TX_SEL_OFFSET; + } else if (mode == COMPHY_2500BASEX_MODE) { + /* 2500Base-X, SerDes speed 3.125G */ + data |= SD_SPEED_3_125_G << GEN_RX_SEL_OFFSET; + data |= SD_SPEED_3_125_G << GEN_TX_SEL_OFFSET; + } else { + /* Other rates are not supported */ + ERROR("unsupported SGMII speed on comphy lane%d\n", + comphy_index); + return -EINVAL; + } + mask = GEN_RX_SEL_MASK | GEN_TX_SEL_MASK; + reg_set(offset, data, mask); + + /* + * 6. Wait 10mS for bandgap and reference clocks to stabilize; then + * start SW programming. + */ + mdelay(10); + + /* 7. Program COMPHY register PHY_MODE */ + data = PHY_MODE_SGMII; + mask = PHY_MODE_MASK; + reg_set16(SGMIIPHY_ADDR(COMPHY_POWER_PLL_CTRL, sd_ip_addr), data, mask); + + /* + * 8. Set COMPHY register REFCLK_SEL to select the correct REFCLK + * source + */ + data = 0; + mask = PHY_REF_CLK_SEL; + reg_set16(SGMIIPHY_ADDR(COMPHY_MISC_CTRL0, sd_ip_addr), data, mask); + + /* + * 9. Set correct reference clock frequency in COMPHY register + * REF_FREF_SEL. + */ + if (get_ref_clk() == 40) + data = REF_FREF_SEL_SERDES_50MHZ; + else + data = REF_FREF_SEL_SERDES_25MHZ; + + mask = REF_FREF_SEL_MASK; + reg_set16(SGMIIPHY_ADDR(COMPHY_POWER_PLL_CTRL, sd_ip_addr), data, mask); + + /* 10. Program COMPHY register PHY_GEN_MAX[1:0] + * This step is mentioned in the flow received from verification team. + * However the PHY_GEN_MAX value is only meaningful for other interfaces + * (not SGMII). For instance, it selects SATA speed 1.5/3/6 Gbps or PCIe + * speed 2.5/5 Gbps + */ + + /* + * 11. Program COMPHY register SEL_BITS to set correct parallel data + * bus width + */ + data = DATA_WIDTH_10BIT; + mask = SEL_DATA_WIDTH_MASK; + reg_set16(SGMIIPHY_ADDR(COMPHY_DIG_LOOPBACK_EN, sd_ip_addr), + data, mask); + + /* + * 12. As long as DFE function needs to be enabled in any mode, + * COMPHY register DFE_UPDATE_EN[5:0] shall be programmed to 0x3F + * for real chip during COMPHY power on. + * The step 14 exists (and empty) in the original initialization flow + * obtained from the verification team. According to the functional + * specification DFE_UPDATE_EN already has the default value 0x3F + */ + + /* + * 13. Program COMPHY GEN registers. + * These registers should be programmed based on the lab testing result + * to achieve optimal performance. Please contact the CEA group to get + * the related GEN table during real chip bring-up. We only required to + * run though the entire registers programming flow defined by + * "comphy_sgmii_phy_init" when the REF clock is 40 MHz. For REF clock + * 25 MHz the default values stored in PHY registers are OK. + */ + debug("Running C-DPI phy init %s mode\n", + mode == COMPHY_2500BASEX_MODE ? "2G5" : "1G"); + if (get_ref_clk() == 40) + comphy_sgmii_phy_init(sd_ip_addr, mode != COMPHY_2500BASEX_MODE); + + /* + * 14. [Simulation Only] should not be used for real chip. + * By pass power up calibration by programming EXT_FORCE_CAL_DONE + * (R02h[9]) to 1 to shorten COMPHY simulation time. + */ + + /* + * 15. [Simulation Only: should not be used for real chip] + * Program COMPHY register FAST_DFE_TIMER_EN=1 to shorten RX training + * simulation time. + */ + + /* + * 16. Check the PHY Polarity invert bit + */ + data = 0x0; + if (invert & COMPHY_POLARITY_TXD_INVERT) + data |= TXD_INVERT_BIT; + if (invert & COMPHY_POLARITY_RXD_INVERT) + data |= RXD_INVERT_BIT; + mask = TXD_INVERT_BIT | RXD_INVERT_BIT; + reg_set16(SGMIIPHY_ADDR(COMPHY_SYNC_PATTERN, sd_ip_addr), data, mask); + + /* + * 17. Set PHY input ports PIN_PU_PLL, PIN_PU_TX and PIN_PU_RX to 1 to + * start PHY power up sequence. All the PHY register programming should + * be done before PIN_PU_PLL=1. There should be no register programming + * for normal PHY operation from this point. + */ + reg_set(MVEBU_COMPHY_REG_BASE + COMPHY_PHY_CFG1_OFFSET(comphy_index), + PIN_PU_PLL_BIT | PIN_PU_RX_BIT | PIN_PU_TX_BIT, + PIN_PU_PLL_BIT | PIN_PU_RX_BIT | PIN_PU_TX_BIT); + + /* + * 18. Wait for PHY power up sequence to finish by checking output ports + * PIN_PLL_READY_TX=1 and PIN_PLL_READY_RX=1. + */ + ret = polling_with_timeout(MVEBU_COMPHY_REG_BASE + + COMPHY_PHY_STATUS_OFFSET(comphy_index), + PHY_PLL_READY_TX_BIT | PHY_PLL_READY_RX_BIT, + PHY_PLL_READY_TX_BIT | PHY_PLL_READY_RX_BIT, + COMPHY_PLL_TIMEOUT, REG_32BIT); + if (ret) { + ERROR("Failed to lock PLL for SGMII PHY %d\n", comphy_index); + return -ETIMEDOUT; + } + + /* + * 19. Set COMPHY input port PIN_TX_IDLE=0 + */ + reg_set(MVEBU_COMPHY_REG_BASE + COMPHY_PHY_CFG1_OFFSET(comphy_index), + 0x0, PIN_TX_IDLE_BIT); + + /* + * 20. After valid data appear on PIN_RXDATA bus, set PIN_RX_INIT=1. To + * start RX initialization. PIN_RX_INIT_DONE will be cleared to 0 by the + * PHY After RX initialization is done, PIN_RX_INIT_DONE will be set to + * 1 by COMPHY Set PIN_RX_INIT=0 after PIN_RX_INIT_DONE= 1. Please + * refer to RX initialization part for details. + */ + reg_set(MVEBU_COMPHY_REG_BASE + COMPHY_PHY_CFG1_OFFSET(comphy_index), + PHY_RX_INIT_BIT, PHY_RX_INIT_BIT); + + ret = polling_with_timeout(MVEBU_COMPHY_REG_BASE + + COMPHY_PHY_STATUS_OFFSET(comphy_index), + PHY_PLL_READY_TX_BIT | PHY_PLL_READY_RX_BIT, + PHY_PLL_READY_TX_BIT | PHY_PLL_READY_RX_BIT, + COMPHY_PLL_TIMEOUT, REG_32BIT); + if (ret) { + ERROR("Failed to lock PLL for SGMII PHY %d\n", comphy_index); + return -ETIMEDOUT; + } + + ret = polling_with_timeout(MVEBU_COMPHY_REG_BASE + + COMPHY_PHY_STATUS_OFFSET(comphy_index), + PHY_RX_INIT_DONE_BIT, PHY_RX_INIT_DONE_BIT, + COMPHY_PLL_TIMEOUT, REG_32BIT); + if (ret) { + ERROR("Failed to init RX of SGMII PHY %d\n", comphy_index); + return -ETIMEDOUT; + } + + debug_exit(); + + return 0; +} + +static int mvebu_a3700_comphy_sgmii_power_off(uint8_t comphy_index) +{ + uintptr_t offset; + uint32_t mask, data; + + debug_enter(); + + data = PIN_RESET_CORE_BIT | PIN_RESET_COMPHY_BIT; + mask = data; + offset = MVEBU_COMPHY_REG_BASE + COMPHY_PHY_CFG1_OFFSET(comphy_index); + reg_set(offset, data, mask); + + debug_exit(); + + return 0; +} + +static int mvebu_a3700_comphy_usb3_power_on(uint8_t comphy_index, + uint32_t comphy_mode) +{ + int ret; + uintptr_t reg_base = 0; + uintptr_t addr; + uint32_t mask, data, cfg, ref_clk; + void (*usb3_reg_set)(uintptr_t addr, uint32_t reg_offset, uint16_t data, + uint16_t mask); + int invert = COMPHY_GET_POLARITY_INVERT(comphy_mode); + + debug_enter(); + + /* Set phy seclector */ + ret = mvebu_a3700_comphy_set_phy_selector(comphy_index, comphy_mode); + if (ret) { + return ret; + } + + /* Set usb3 reg access func, Lane2 is indirect access */ + if (comphy_index == COMPHY_LANE2) { + usb3_reg_set = &comphy_usb3_set_indirect; + reg_base = COMPHY_INDIRECT_REG; + } else { + /* Get the direct access register resource and map */ + usb3_reg_set = &comphy_usb3_set_direct; + reg_base = USB3_GBE1_PHY; + } + + /* + * 0. Set PHY OTG Control(0x5d034), bit 4, Power up OTG module The + * register belong to UTMI module, so it is set in UTMI phy driver. + */ + + /* + * 1. Set PRD_TXDEEMPH (3.5db de-emph) + */ + mask = PRD_TXDEEMPH0_MASK | PRD_TXMARGIN_MASK | PRD_TXSWING_MASK | + CFG_TX_ALIGN_POS_MASK; + usb3_reg_set(reg_base, COMPHY_LANE_CFG0, PRD_TXDEEMPH0_MASK, mask); + + /* + * 2. Set BIT0: enable transmitter in high impedance mode + * Set BIT[3:4]: delay 2 clock cycles for HiZ off latency + * Set BIT6: Tx detect Rx at HiZ mode + * Unset BIT15: set to 0 to set USB3 De-emphasize level to -3.5db + * together with bit 0 of COMPHY_LANE_CFG0 register + */ + mask = PRD_TXDEEMPH1_MASK | TX_DET_RX_MODE | GEN2_TX_DATA_DLY_MASK | + TX_ELEC_IDLE_MODE_EN; + data = TX_DET_RX_MODE | GEN2_TX_DATA_DLY_DEFT | TX_ELEC_IDLE_MODE_EN; + usb3_reg_set(reg_base, COMPHY_LANE_CFG1, data, mask); + + /* + * 3. Set Spread Spectrum Clock Enabled + */ + usb3_reg_set(reg_base, COMPHY_LANE_CFG4, + SPREAD_SPECTRUM_CLK_EN, SPREAD_SPECTRUM_CLK_EN); + + /* + * 4. Set Override Margining Controls From the MAC: + * Use margining signals from lane configuration + */ + usb3_reg_set(reg_base, COMPHY_TEST_MODE_CTRL, + MODE_MARGIN_OVERRIDE, REG_16_BIT_MASK); + + /* + * 5. Set Lane-to-Lane Bundle Clock Sampling Period = per PCLK cycles + * set Mode Clock Source = PCLK is generated from REFCLK + */ + usb3_reg_set(reg_base, COMPHY_CLK_SRC_LO, 0x0, + (MODE_CLK_SRC | BUNDLE_PERIOD_SEL | + BUNDLE_PERIOD_SCALE_MASK | BUNDLE_SAMPLE_CTRL | + PLL_READY_DLY_MASK)); + + /* + * 6. Set G2 Spread Spectrum Clock Amplitude at 4K + */ + usb3_reg_set(reg_base, COMPHY_GEN2_SET2, + GS2_TX_SSC_AMP_VALUE_20, GS2_TX_SSC_AMP_MASK); + + /* + * 7. Unset G3 Spread Spectrum Clock Amplitude + * set G3 TX and RX Register Master Current Select + */ + mask = GS2_TX_SSC_AMP_MASK | GS2_VREG_RXTX_MAS_ISET_MASK | + GS2_RSVD_6_0_MASK; + usb3_reg_set(reg_base, COMPHY_GEN3_SET2, + GS2_VREG_RXTX_MAS_ISET_60U, mask); + + /* + * 8. Check crystal jumper setting and program the Power and PLL Control + * accordingly Change RX wait + */ + if (get_ref_clk() == 40) { + ref_clk = REF_FREF_SEL_PCIE_USB3_40MHZ; + cfg = CFG_PM_RXDLOZ_WAIT_12_UNIT; + + } else { + /* 25 MHz */ + ref_clk = REF_FREF_SEL_PCIE_USB3_25MHZ; + cfg = CFG_PM_RXDLOZ_WAIT_7_UNIT; + } + + mask = PU_IVREF_BIT | PU_PLL_BIT | PU_RX_BIT | PU_TX_BIT | + PU_TX_INTP_BIT | PU_DFE_BIT | PLL_LOCK_BIT | PHY_MODE_MASK | + REF_FREF_SEL_MASK; + data = PU_IVREF_BIT | PU_PLL_BIT | PU_RX_BIT | PU_TX_BIT | + PU_TX_INTP_BIT | PU_DFE_BIT | PHY_MODE_USB3 | ref_clk; + usb3_reg_set(reg_base, COMPHY_POWER_PLL_CTRL, data, mask); + + mask = CFG_PM_OSCCLK_WAIT_MASK | CFG_PM_RXDEN_WAIT_MASK | + CFG_PM_RXDLOZ_WAIT_MASK; + data = CFG_PM_RXDEN_WAIT_1_UNIT | cfg; + usb3_reg_set(reg_base, COMPHY_PWR_MGM_TIM1, data, mask); + + /* + * 9. Enable idle sync + */ + data = IDLE_SYNC_EN_DEFAULT_VALUE | IDLE_SYNC_EN; + usb3_reg_set(reg_base, COMPHY_IDLE_SYNC_EN, data, REG_16_BIT_MASK); + + /* + * 10. Enable the output of 500M clock + */ + data = MISC_CTRL0_DEFAULT_VALUE | CLK500M_EN; + usb3_reg_set(reg_base, COMPHY_MISC_CTRL0, data, REG_16_BIT_MASK); + + /* + * 11. Set 20-bit data width + */ + usb3_reg_set(reg_base, COMPHY_DIG_LOOPBACK_EN, DATA_WIDTH_20BIT, + REG_16_BIT_MASK); + + /* + * 12. Override Speed_PLL value and use MAC PLL + */ + usb3_reg_set(reg_base, COMPHY_KVCO_CAL_CTRL, + (SPEED_PLL_VALUE_16 | USE_MAX_PLL_RATE_BIT), + REG_16_BIT_MASK); + + /* + * 13. Check the Polarity invert bit + */ + data = 0U; + if (invert & COMPHY_POLARITY_TXD_INVERT) { + data |= TXD_INVERT_BIT; + } + if (invert & COMPHY_POLARITY_RXD_INVERT) { + data |= RXD_INVERT_BIT; + } + mask = TXD_INVERT_BIT | RXD_INVERT_BIT; + usb3_reg_set(reg_base, COMPHY_SYNC_PATTERN, data, mask); + + /* + * 14. Set max speed generation to USB3.0 5Gbps + */ + usb3_reg_set(reg_base, COMPHY_SYNC_MASK_GEN, PHY_GEN_MAX_USB3_5G, + PHY_GEN_MAX_MASK); + + /* + * 15. Set capacitor value for FFE gain peaking to 0xF + */ + usb3_reg_set(reg_base, COMPHY_GEN2_SET3, + GS3_FFE_CAP_SEL_VALUE, GS3_FFE_CAP_SEL_MASK); + + /* + * 16. Release SW reset + */ + data = MODE_CORE_CLK_FREQ_SEL | MODE_PIPE_WIDTH_32 | MODE_REFDIV_BY_4; + usb3_reg_set(reg_base, COMPHY_RST_CLK_CTRL, data, REG_16_BIT_MASK); + + /* Wait for > 55 us to allow PCLK be enabled */ + udelay(PLL_SET_DELAY_US); + + if (comphy_index == COMPHY_LANE2) { + data = COMPHY_LANE_STAT1 + USB3PHY_LANE2_REG_BASE_OFFSET; + mmio_write_32(reg_base + COMPHY_LANE2_INDIR_ADDR_OFFSET, + data); + + addr = reg_base + COMPHY_LANE2_INDIR_DATA_OFFSET; + ret = polling_with_timeout(addr, TXDCLK_PCLK_EN, TXDCLK_PCLK_EN, + COMPHY_PLL_TIMEOUT, REG_32BIT); + } else { + ret = polling_with_timeout(LANE_STAT1_ADDR(USB3) + reg_base, + TXDCLK_PCLK_EN, TXDCLK_PCLK_EN, + COMPHY_PLL_TIMEOUT, REG_16BIT); + } + if (ret) { + ERROR("Failed to lock USB3 PLL\n"); + return -ETIMEDOUT; + } + + debug_exit(); + + return 0; +} + +static int mvebu_a3700_comphy_pcie_power_on(uint8_t comphy_index, + uint32_t comphy_mode) +{ + int ret; + uint32_t ref_clk; + uint32_t mask, data; + int invert = COMPHY_GET_POLARITY_INVERT(comphy_mode); + + debug_enter(); + + /* Configure phy selector for PCIe */ + ret = mvebu_a3700_comphy_set_phy_selector(comphy_index, comphy_mode); + if (ret) { + return ret; + } + + /* 1. Enable max PLL. */ + reg_set16(LANE_CFG1_ADDR(PCIE) + COMPHY_SD_ADDR, + USE_MAX_PLL_RATE_EN, USE_MAX_PLL_RATE_EN); + + /* 2. Select 20 bit SERDES interface. */ + reg_set16(CLK_SRC_LO_ADDR(PCIE) + COMPHY_SD_ADDR, + CFG_SEL_20B, CFG_SEL_20B); + + /* 3. Force to use reg setting for PCIe mode */ + reg_set16(MISC_CTRL1_ADDR(PCIE) + COMPHY_SD_ADDR, + SEL_BITS_PCIE_FORCE, SEL_BITS_PCIE_FORCE); + + /* 4. Change RX wait */ + reg_set16(PWR_MGM_TIM1_ADDR(PCIE) + COMPHY_SD_ADDR, + CFG_PM_RXDEN_WAIT_1_UNIT | CFG_PM_RXDLOZ_WAIT_12_UNIT, + (CFG_PM_OSCCLK_WAIT_MASK | CFG_PM_RXDEN_WAIT_MASK | + CFG_PM_RXDLOZ_WAIT_MASK)); + + /* 5. Enable idle sync */ + reg_set16(IDLE_SYNC_EN_ADDR(PCIE) + COMPHY_SD_ADDR, + IDLE_SYNC_EN_DEFAULT_VALUE | IDLE_SYNC_EN, REG_16_BIT_MASK); + + /* 6. Enable the output of 100M/125M/500M clock */ + reg_set16(MISC_CTRL0_ADDR(PCIE) + COMPHY_SD_ADDR, + MISC_CTRL0_DEFAULT_VALUE | CLK500M_EN | TXDCLK_2X_SEL | CLK100M_125M_EN, + REG_16_BIT_MASK); + + /* + * 7. Enable TX, PCIE global register, 0xd0074814, it is done in + * PCI-E driver + */ + + /* + * 8. Check crystal jumper setting and program the Power and PLL + * Control accordingly + */ + + if (get_ref_clk() == 40) + ref_clk = REF_FREF_SEL_PCIE_USB3_40MHZ; + else + ref_clk = REF_FREF_SEL_PCIE_USB3_25MHZ; + + reg_set16(PWR_PLL_CTRL_ADDR(PCIE) + COMPHY_SD_ADDR, + (PU_IVREF_BIT | PU_PLL_BIT | PU_RX_BIT | PU_TX_BIT | + PU_TX_INTP_BIT | PU_DFE_BIT | ref_clk | PHY_MODE_PCIE), + REG_16_BIT_MASK); + + /* 9. Override Speed_PLL value and use MAC PLL */ + reg_set16(KVCO_CAL_CTRL_ADDR(PCIE) + COMPHY_SD_ADDR, + SPEED_PLL_VALUE_16 | USE_MAX_PLL_RATE_BIT, REG_16_BIT_MASK); + + /* 10. Check the Polarity invert bit */ + data = 0U; + if (invert & COMPHY_POLARITY_TXD_INVERT) { + data |= TXD_INVERT_BIT; + } + if (invert & COMPHY_POLARITY_RXD_INVERT) { + data |= RXD_INVERT_BIT; + } + mask = TXD_INVERT_BIT | RXD_INVERT_BIT; + reg_set16(SYNC_PATTERN_ADDR(PCIE) + COMPHY_SD_ADDR, data, mask); + + /* 11. Release SW reset */ + data = MODE_CORE_CLK_FREQ_SEL | MODE_PIPE_WIDTH_32; + mask = data | SOFT_RESET | MODE_REFDIV_MASK; + reg_set16(RST_CLK_CTRL_ADDR(PCIE) + COMPHY_SD_ADDR, data, mask); + + /* Wait for > 55 us to allow PCLK be enabled */ + udelay(PLL_SET_DELAY_US); + + ret = polling_with_timeout(LANE_STAT1_ADDR(PCIE) + COMPHY_SD_ADDR, + TXDCLK_PCLK_EN, TXDCLK_PCLK_EN, + COMPHY_PLL_TIMEOUT, REG_16BIT); + if (ret) { + ERROR("Failed to lock PCIE PLL\n"); + return -ETIMEDOUT; + } + + debug_exit(); + + return 0; +} + +int mvebu_3700_comphy_power_on(uint8_t comphy_index, uint32_t comphy_mode) +{ + int mode = COMPHY_GET_MODE(comphy_mode); + int ret = 0; + + debug_enter(); + + switch (mode) { + case(COMPHY_SATA_MODE): + ret = mvebu_a3700_comphy_sata_power_on(comphy_index, + comphy_mode); + break; + case(COMPHY_SGMII_MODE): + case(COMPHY_2500BASEX_MODE): + ret = mvebu_a3700_comphy_sgmii_power_on(comphy_index, + comphy_mode); + break; + case (COMPHY_USB3_MODE): + case (COMPHY_USB3H_MODE): + ret = mvebu_a3700_comphy_usb3_power_on(comphy_index, + comphy_mode); + break; + case (COMPHY_PCIE_MODE): + ret = mvebu_a3700_comphy_pcie_power_on(comphy_index, + comphy_mode); + break; + default: + ERROR("comphy%d: unsupported comphy mode\n", comphy_index); + ret = -EINVAL; + break; + } + + debug_exit(); + + return ret; +} + +static int mvebu_a3700_comphy_usb3_power_off(void) +{ + /* + * Currently the USB3 MAC will control the USB3 PHY to set it to low + * state, thus do not need to power off USB3 PHY again. + */ + debug_enter(); + debug_exit(); + + return 0; +} + +static int mvebu_a3700_comphy_sata_power_off(void) +{ + uintptr_t comphy_indir_regs = COMPHY_INDIRECT_REG; + uint32_t offset; + + debug_enter(); + + /* Set phy isolation mode */ + offset = COMPHY_ISOLATION_CTRL + SATAPHY_LANE2_REG_BASE_OFFSET; + comphy_sata_set_indirect(comphy_indir_regs, offset, PHY_ISOLATE_MODE, + PHY_ISOLATE_MODE); + + /* Power off PLL, Tx, Rx */ + offset = COMPHY_POWER_PLL_CTRL + SATAPHY_LANE2_REG_BASE_OFFSET; + comphy_sata_set_indirect(comphy_indir_regs, offset, 0, + PU_PLL_BIT | PU_RX_BIT | PU_TX_BIT); + + debug_exit(); + + return 0; +} + +int mvebu_3700_comphy_power_off(uint8_t comphy_index, uint32_t comphy_mode) +{ + int mode = COMPHY_GET_MODE(comphy_mode); + int err = 0; + + debug_enter(); + + if (!mode) { + /* + * The user did not specify which mode should be powered off. + * In this case we can identify this by reading the phy selector + * register. + */ + mode = mvebu_a3700_comphy_get_mode(comphy_index); + } + + switch (mode) { + case(COMPHY_SGMII_MODE): + case(COMPHY_2500BASEX_MODE): + err = mvebu_a3700_comphy_sgmii_power_off(comphy_index); + break; + case (COMPHY_USB3_MODE): + case (COMPHY_USB3H_MODE): + err = mvebu_a3700_comphy_usb3_power_off(); + break; + case (COMPHY_SATA_MODE): + err = mvebu_a3700_comphy_sata_power_off(); + break; + + default: + debug("comphy%d: power off is not implemented for mode %d\n", + comphy_index, mode); + break; + } + + debug_exit(); + + return err; +} + +static int mvebu_a3700_comphy_sata_is_pll_locked(void) +{ + uint32_t data, addr; + uintptr_t comphy_indir_regs = COMPHY_INDIRECT_REG; + int ret = 0; + + debug_enter(); + + /* Polling status */ + mmio_write_32(comphy_indir_regs + COMPHY_LANE2_INDIR_ADDR_OFFSET, + COMPHY_DIG_LOOPBACK_EN + SATAPHY_LANE2_REG_BASE_OFFSET); + addr = comphy_indir_regs + COMPHY_LANE2_INDIR_DATA_OFFSET; + data = polling_with_timeout(addr, PLL_READY_TX_BIT, PLL_READY_TX_BIT, + COMPHY_PLL_TIMEOUT, REG_32BIT); + + if (data != 0) { + ERROR("TX PLL is not locked\n"); + ret = -ETIMEDOUT; + } + + debug_exit(); + + return ret; +} + +int mvebu_3700_comphy_is_pll_locked(uint8_t comphy_index, uint32_t comphy_mode) +{ + int mode = COMPHY_GET_MODE(comphy_mode); + int ret = 0; + + debug_enter(); + + switch (mode) { + case(COMPHY_SATA_MODE): + ret = mvebu_a3700_comphy_sata_is_pll_locked(); + break; + + default: + ERROR("comphy[%d] mode[%d] doesn't support PLL lock check\n", + comphy_index, mode); + ret = -EINVAL; + break; + } + + debug_exit(); + + return ret; +} diff --git a/drivers/marvell/comphy/phy-comphy-3700.h b/drivers/marvell/comphy/phy-comphy-3700.h new file mode 100644 index 0000000..ed07624 --- /dev/null +++ b/drivers/marvell/comphy/phy-comphy-3700.h @@ -0,0 +1,249 @@ +/* + * Copyright (C) 2018-2021 Marvell International Ltd. + * + * SPDX-License-Identifier: BSD-3-Clause + * https://spdx.org/licenses + */ + +#ifndef PHY_COMPHY_3700_H +#define PHY_COMPHY_3700_H + +#define PLL_SET_DELAY_US 600 +#define COMPHY_PLL_TIMEOUT 1000 +#define REG_16_BIT_MASK 0xFFFF + +#define COMPHY_SELECTOR_PHY_REG 0xFC +/* bit0: 0: Lane1 is GbE0; 1: Lane1 is PCIE */ +#define COMPHY_SELECTOR_PCIE_GBE0_SEL_BIT BIT(0) +/* bit4: 0: Lane0 is GbE1; 1: Lane0 is USB3 */ +#define COMPHY_SELECTOR_USB3_GBE1_SEL_BIT BIT(4) +/* bit8: 0: Lane0 is USB3 instead of GbE1, Lane2 is SATA; 1: Lane2 is USB3 */ +#define COMPHY_SELECTOR_USB3_PHY_SEL_BIT BIT(8) + +/* SATA PHY register offset */ +#define SATAPHY_LANE2_REG_BASE_OFFSET 0x200 + +/* USB3 PHY offset compared to SATA PHY */ +#define USB3PHY_LANE2_REG_BASE_OFFSET 0x200 + +/* Comphy lane2 indirect access register offset */ +#define COMPHY_LANE2_INDIR_ADDR_OFFSET 0x0 +#define COMPHY_LANE2_INDIR_DATA_OFFSET 0x4 + +/* PHY shift to get related register address */ +enum { + PCIE = 1, + USB3, +}; + +#define PCIEPHY_SHFT 2 +#define USB3PHY_SHFT 2 +#define PHY_SHFT(unit) ((unit == PCIE) ? PCIEPHY_SHFT : USB3PHY_SHFT) + +/* PHY register */ +#define COMPHY_POWER_PLL_CTRL 0x01 +#define PWR_PLL_CTRL_ADDR(unit) (COMPHY_POWER_PLL_CTRL * PHY_SHFT(unit)) +#define PU_IVREF_BIT BIT(15) +#define PU_PLL_BIT BIT(14) +#define PU_RX_BIT BIT(13) +#define PU_TX_BIT BIT(12) +#define PU_TX_INTP_BIT BIT(11) +#define PU_DFE_BIT BIT(10) +#define RESET_DTL_RX_BIT BIT(9) +#define PLL_LOCK_BIT BIT(8) +#define REF_FREF_SEL_OFFSET 0 +#define REF_FREF_SEL_MASK (0x1F << REF_FREF_SEL_OFFSET) +#define REF_FREF_SEL_SERDES_25MHZ (0x1 << REF_FREF_SEL_OFFSET) +#define REF_FREF_SEL_SERDES_40MHZ (0x3 << REF_FREF_SEL_OFFSET) +#define REF_FREF_SEL_SERDES_50MHZ (0x4 << REF_FREF_SEL_OFFSET) +#define REF_FREF_SEL_PCIE_USB3_25MHZ (0x2 << REF_FREF_SEL_OFFSET) +#define REF_FREF_SEL_PCIE_USB3_40MHZ (0x3 << REF_FREF_SEL_OFFSET) +#define PHY_MODE_OFFSET 5 +#define PHY_MODE_MASK (7 << PHY_MODE_OFFSET) +#define PHY_MODE_SATA (0x0 << PHY_MODE_OFFSET) +#define PHY_MODE_PCIE (0x3 << PHY_MODE_OFFSET) +#define PHY_MODE_SGMII (0x4 << PHY_MODE_OFFSET) +#define PHY_MODE_USB3 (0x5 << PHY_MODE_OFFSET) + +#define COMPHY_KVCO_CAL_CTRL 0x02 +#define KVCO_CAL_CTRL_ADDR(unit) (COMPHY_KVCO_CAL_CTRL * PHY_SHFT(unit)) +#define USE_MAX_PLL_RATE_BIT BIT(12) +#define SPEED_PLL_OFFSET 2 +#define SPEED_PLL_MASK (0x3F << SPEED_PLL_OFFSET) +#define SPEED_PLL_VALUE_16 (0x10 << SPEED_PLL_OFFSET) + +#define COMPHY_DIG_LOOPBACK_EN 0x23 +#define DIG_LOOPBACK_EN_ADDR(unit) (COMPHY_DIG_LOOPBACK_EN * \ + PHY_SHFT(unit)) +#define SEL_DATA_WIDTH_OFFSET 10 +#define SEL_DATA_WIDTH_MASK (0x3 << SEL_DATA_WIDTH_OFFSET) +#define DATA_WIDTH_10BIT (0x0 << SEL_DATA_WIDTH_OFFSET) +#define DATA_WIDTH_20BIT (0x1 << SEL_DATA_WIDTH_OFFSET) +#define DATA_WIDTH_40BIT (0x2 << SEL_DATA_WIDTH_OFFSET) +#define PLL_READY_TX_BIT BIT(4) + +#define COMPHY_SYNC_PATTERN 0x24 +#define SYNC_PATTERN_ADDR(unit) (COMPHY_SYNC_PATTERN * PHY_SHFT(unit)) +#define TXD_INVERT_BIT BIT(10) +#define RXD_INVERT_BIT BIT(11) + +#define COMPHY_SYNC_MASK_GEN 0x25 +#define PHY_GEN_MAX_OFFSET 10 +#define PHY_GEN_MAX_MASK (3 << PHY_GEN_MAX_OFFSET) +#define PHY_GEN_MAX_USB3_5G (1 << PHY_GEN_MAX_OFFSET) + +#define COMPHY_ISOLATION_CTRL 0x26 +#define ISOLATION_CTRL_ADDR(unit) (COMPHY_ISOLATION_REG * PHY_SHFT(unit)) +#define PHY_ISOLATE_MODE BIT(15) + +#define COMPHY_GEN2_SET2 0x3e +#define GEN2_SET2_ADDR(unit) (COMPHY_GEN2_SET2 * PHY_SHFT(unit)) +#define GS2_TX_SSC_AMP_VALUE_20 BIT(14) +#define GS2_TX_SSC_AMP_OFF 9 +#define GS2_TX_SSC_AMP_LEN 7 +#define GS2_TX_SSC_AMP_MASK (((1 << GS2_TX_SSC_AMP_LEN) - 1) << \ + GS2_TX_SSC_AMP_OFF) +#define GS2_VREG_RXTX_MAS_ISET_OFF 7 +#define GS2_VREG_RXTX_MAS_ISET_60U (0 << GS2_VREG_RXTX_MAS_ISET_OFF) +#define GS2_VREG_RXTX_MAS_ISET_80U (1 << GS2_VREG_RXTX_MAS_ISET_OFF) +#define GS2_VREG_RXTX_MAS_ISET_100U (2 << GS2_VREG_RXTX_MAS_ISET_OFF) +#define GS2_VREG_RXTX_MAS_ISET_120U (3 << GS2_VREG_RXTX_MAS_ISET_OFF) +#define GS2_VREG_RXTX_MAS_ISET_MASK (BIT(7) | BIT(8)) +#define GS2_RSVD_6_0_OFF 0 +#define GS2_RSVD_6_0_LEN 7 +#define GS2_RSVD_6_0_MASK (((1 << GS2_RSVD_6_0_LEN) - 1) << \ + GS2_RSVD_6_0_OFF) + +#define COMPHY_GEN3_SET2 0x3f +#define GEN3_SET2_ADDR(unit) (COMPHY_GEN3_SET2 * PHY_SHFT(unit)) + +#define COMPHY_IDLE_SYNC_EN 0x48 +#define IDLE_SYNC_EN_ADDR(unit) (COMPHY_IDLE_SYNC_EN * PHY_SHFT(unit)) +#define IDLE_SYNC_EN BIT(12) +#define IDLE_SYNC_EN_DEFAULT_VALUE 0x60 + +#define COMPHY_MISC_CTRL0 0x4F +#define MISC_CTRL0_ADDR(unit) (COMPHY_MISC_CTRL0 * PHY_SHFT(unit)) +#define CLK100M_125M_EN BIT(4) +#define TXDCLK_2X_SEL BIT(6) +#define CLK500M_EN BIT(7) +#define PHY_REF_CLK_SEL BIT(10) +#define MISC_CTRL0_DEFAULT_VALUE 0xA00D + +#define COMPHY_MISC_CTRL1 0x73 +#define MISC_CTRL1_ADDR(unit) (COMPHY_MISC_CTRL1 * PHY_SHFT(unit)) +#define SEL_BITS_PCIE_FORCE BIT(15) + +#define COMPHY_GEN2_SET3 0x112 +#define GS3_FFE_CAP_SEL_MASK 0xF +#define GS3_FFE_CAP_SEL_VALUE 0xF + +#define COMPHY_LANE_CFG0 0x180 +#define LANE_CFG0_ADDR(unit) (COMPHY_LANE_CFG0 * PHY_SHFT(unit)) +#define PRD_TXDEEMPH0_MASK BIT(0) +#define PRD_TXMARGIN_MASK (BIT(1) | BIT(2) | BIT(3)) +#define PRD_TXSWING_MASK BIT(4) +#define CFG_TX_ALIGN_POS_MASK (BIT(5) | BIT(6) | BIT(7) | BIT(8)) + +#define COMPHY_LANE_CFG1 0x181 +#define LANE_CFG1_ADDR(unit) (COMPHY_LANE_CFG1 * PHY_SHFT(unit)) +#define PRD_TXDEEMPH1_MASK BIT(15) +#define USE_MAX_PLL_RATE_EN BIT(9) +#define TX_DET_RX_MODE BIT(6) +#define GEN2_TX_DATA_DLY_MASK (BIT(3) | BIT(4)) +#define GEN2_TX_DATA_DLY_DEFT (2 << 3) +#define TX_ELEC_IDLE_MODE_EN BIT(0) + +#define COMPHY_LANE_STAT1 0x183 +#define LANE_STAT1_ADDR(unit) (COMPHY_LANE_STAT1 * PHY_SHFT(unit)) +#define TXDCLK_PCLK_EN BIT(0) + +#define COMPHY_LANE_CFG4 0x188 +#define LANE_CFG4_ADDR(unit) (COMPHY_LANE_CFG4 * PHY_SHFT(unit)) +#define SPREAD_SPECTRUM_CLK_EN BIT(7) + +#define COMPHY_RST_CLK_CTRL 0x1C1 +#define RST_CLK_CTRL_ADDR(unit) (COMPHY_RST_CLK_CTRL * PHY_SHFT(unit)) +#define SOFT_RESET BIT(0) +#define MODE_CORE_CLK_FREQ_SEL BIT(9) +#define MODE_PIPE_WIDTH_32 BIT(3) +#define MODE_REFDIV_OFFSET 4 +#define MODE_REFDIV_LEN 2 +#define MODE_REFDIV_MASK (0x3 << MODE_REFDIV_OFFSET) +#define MODE_REFDIV_BY_4 (0x2 << MODE_REFDIV_OFFSET) + +#define COMPHY_TEST_MODE_CTRL 0x1C2 +#define TEST_MODE_CTRL_ADDR(unit) (COMPHY_TEST_MODE_CTRL * PHY_SHFT(unit)) +#define MODE_MARGIN_OVERRIDE BIT(2) + +#define COMPHY_CLK_SRC_LO 0x1C3 +#define CLK_SRC_LO_ADDR(unit) (COMPHY_CLK_SRC_LO * PHY_SHFT(unit)) +#define MODE_CLK_SRC BIT(0) +#define BUNDLE_PERIOD_SEL BIT(1) +#define BUNDLE_PERIOD_SCALE_MASK (BIT(2) | BIT(3)) +#define BUNDLE_SAMPLE_CTRL BIT(4) +#define PLL_READY_DLY_MASK (BIT(5) | BIT(6) | BIT(7)) +#define CFG_SEL_20B BIT(15) + +#define COMPHY_PWR_MGM_TIM1 0x1D0 +#define PWR_MGM_TIM1_ADDR(unit) (COMPHY_PWR_MGM_TIM1 * PHY_SHFT(unit)) +#define CFG_PM_OSCCLK_WAIT_OFF 12 +#define CFG_PM_OSCCLK_WAIT_LEN 4 +#define CFG_PM_OSCCLK_WAIT_MASK (((1 << CFG_PM_OSCCLK_WAIT_LEN) - 1) \ + << CFG_PM_OSCCLK_WAIT_OFF) +#define CFG_PM_RXDEN_WAIT_OFF 8 +#define CFG_PM_RXDEN_WAIT_LEN 4 +#define CFG_PM_RXDEN_WAIT_MASK (((1 << CFG_PM_RXDEN_WAIT_LEN) - 1) \ + << CFG_PM_RXDEN_WAIT_OFF) +#define CFG_PM_RXDEN_WAIT_1_UNIT (1 << CFG_PM_RXDEN_WAIT_OFF) +#define CFG_PM_RXDLOZ_WAIT_OFF 0 +#define CFG_PM_RXDLOZ_WAIT_LEN 8 +#define CFG_PM_RXDLOZ_WAIT_MASK (((1 << CFG_PM_RXDLOZ_WAIT_LEN) - 1) \ + << CFG_PM_RXDLOZ_WAIT_OFF) +#define CFG_PM_RXDLOZ_WAIT_7_UNIT (7 << CFG_PM_RXDLOZ_WAIT_OFF) +#define CFG_PM_RXDLOZ_WAIT_12_UNIT (0xC << CFG_PM_RXDLOZ_WAIT_OFF) + +/* + * This register is not from PHY lane register space. It only exists in the + * indirect register space, before the actual PHY lane 2 registers. So the + * offset is absolute, not relative to SATAPHY_LANE2_REG_BASE_OFFSET. + * It is used only for SATA PHY initialization. + */ +#define COMPHY_RESERVED_REG 0x0E +#define PHYCTRL_FRM_PIN_BIT BIT(13) + +/* SGMII */ +#define COMPHY_PHY_CFG1_OFFSET(lane) ((1 - (lane)) * 0x28) +#define PIN_PU_IVREF_BIT BIT(1) +#define PIN_RESET_CORE_BIT BIT(11) +#define PIN_RESET_COMPHY_BIT BIT(12) +#define PIN_PU_PLL_BIT BIT(16) +#define PIN_PU_RX_BIT BIT(17) +#define PIN_PU_TX_BIT BIT(18) +#define PIN_TX_IDLE_BIT BIT(19) +#define GEN_RX_SEL_OFFSET 22 +#define GEN_RX_SEL_MASK (0xF << GEN_RX_SEL_OFFSET) +#define GEN_TX_SEL_OFFSET 26 +#define GEN_TX_SEL_MASK (0xF << GEN_TX_SEL_OFFSET) +#define PHY_RX_INIT_BIT BIT(30) +#define SD_SPEED_1_25_G 0x6 +#define SD_SPEED_3_125_G 0x8 + +/* COMPHY status reg: + * lane0: USB3/GbE1 PHY Status 1 + * lane1: PCIe/GbE0 PHY Status 1 + */ +#define COMPHY_PHY_STATUS_OFFSET(lane) (0x18 + (1 - (lane)) * 0x28) +#define PHY_RX_INIT_DONE_BIT BIT(0) +#define PHY_PLL_READY_RX_BIT BIT(2) +#define PHY_PLL_READY_TX_BIT BIT(3) + +#define SGMIIPHY_ADDR(off, base) ((((off) & 0x00007FF) * 2) + (base)) + +#define MAX_LANE_NR 3 + +/* comphy API */ +int mvebu_3700_comphy_is_pll_locked(uint8_t comphy_index, uint32_t comphy_mode); +int mvebu_3700_comphy_power_off(uint8_t comphy_index, uint32_t comphy_mode); +int mvebu_3700_comphy_power_on(uint8_t comphy_index, uint32_t comphy_mode); +#endif /* PHY_COMPHY_3700_H */ diff --git a/drivers/marvell/comphy/phy-comphy-common.h b/drivers/marvell/comphy/phy-comphy-common.h new file mode 100644 index 0000000..ba5d255 --- /dev/null +++ b/drivers/marvell/comphy/phy-comphy-common.h @@ -0,0 +1,167 @@ +/* + * Copyright (C) 2018-2021 Marvell International Ltd. + * + * SPDX-License-Identifier: BSD-3-Clause + * https://spdx.org/licenses + */ + +/* Marvell CP110 ana A3700 common */ + +#ifndef PHY_COMPHY_COMMON_H +#define PHY_COMPHY_COMMON_H + +/* #define DEBUG_COMPHY */ +#ifdef DEBUG_COMPHY +#define debug(format...) printf(format) +#else +#define debug(format, arg...) +#endif + +/* A lane is described by 4 fields: + * - bit 1~0 represent comphy polarity invert + * - bit 7~2 represent comphy speed + * - bit 11~8 represent unit index + * - bit 16~12 represent mode + * - bit 17 represent comphy indication of clock source + * - bit 20~18 represents pcie width (in case of pcie comphy config.) + * - bit 21 represents the source of the request (Linux/Bootloader), + * (reguired only for PCIe!) + * - bit 31~22 reserved + */ + +#define COMPHY_INVERT_OFFSET 0 +#define COMPHY_INVERT_LEN 2 +#define COMPHY_INVERT_MASK COMPHY_MASK(COMPHY_INVERT_OFFSET, \ + COMPHY_INVERT_LEN) +#define COMPHY_SPEED_OFFSET (COMPHY_INVERT_OFFSET + COMPHY_INVERT_LEN) +#define COMPHY_SPEED_LEN 6 +#define COMPHY_SPEED_MASK COMPHY_MASK(COMPHY_SPEED_OFFSET, \ + COMPHY_SPEED_LEN) +#define COMPHY_UNIT_ID_OFFSET (COMPHY_SPEED_OFFSET + COMPHY_SPEED_LEN) +#define COMPHY_UNIT_ID_LEN 4 +#define COMPHY_UNIT_ID_MASK COMPHY_MASK(COMPHY_UNIT_ID_OFFSET, \ + COMPHY_UNIT_ID_LEN) +#define COMPHY_MODE_OFFSET (COMPHY_UNIT_ID_OFFSET + COMPHY_UNIT_ID_LEN) +#define COMPHY_MODE_LEN 5 +#define COMPHY_MODE_MASK COMPHY_MASK(COMPHY_MODE_OFFSET, COMPHY_MODE_LEN) +#define COMPHY_CLK_SRC_OFFSET (COMPHY_MODE_OFFSET + COMPHY_MODE_LEN) +#define COMPHY_CLK_SRC_LEN 1 +#define COMPHY_CLK_SRC_MASK COMPHY_MASK(COMPHY_CLK_SRC_OFFSET, \ + COMPHY_CLK_SRC_LEN) +#define COMPHY_PCI_WIDTH_OFFSET (COMPHY_CLK_SRC_OFFSET + COMPHY_CLK_SRC_LEN) +#define COMPHY_PCI_WIDTH_LEN 3 +#define COMPHY_PCI_WIDTH_MASK COMPHY_MASK(COMPHY_PCI_WIDTH_OFFSET, \ + COMPHY_PCI_WIDTH_LEN) +#define COMPHY_PCI_CALLER_OFFSET \ + (COMPHY_PCI_WIDTH_OFFSET + COMPHY_PCI_WIDTH_LEN) +#define COMPHY_PCI_CALLER_LEN 1 +#define COMPHY_PCI_CALLER_MASK COMPHY_MASK(COMPHY_PCI_CALLER_OFFSET, \ + COMPHY_PCI_CALLER_LEN) + +#define COMPHY_MASK(offset, len) (((1 << (len)) - 1) << (offset)) + +/* Macro which extracts mode from lane description */ +#define COMPHY_GET_MODE(x) (((x) & COMPHY_MODE_MASK) >> \ + COMPHY_MODE_OFFSET) +/* Macro which extracts unit index from lane description */ +#define COMPHY_GET_ID(x) (((x) & COMPHY_UNIT_ID_MASK) >> \ + COMPHY_UNIT_ID_OFFSET) +/* Macro which extracts speed from lane description */ +#define COMPHY_GET_SPEED(x) (((x) & COMPHY_SPEED_MASK) >> \ + COMPHY_SPEED_OFFSET) +/* Macro which extracts clock source indication from lane description */ +#define COMPHY_GET_CLK_SRC(x) (((x) & COMPHY_CLK_SRC_MASK) >> \ + COMPHY_CLK_SRC_OFFSET) +/* Macro which extracts pcie width indication from lane description */ +#define COMPHY_GET_PCIE_WIDTH(x) (((x) & COMPHY_PCI_WIDTH_MASK) >> \ + COMPHY_PCI_WIDTH_OFFSET) + +/* Macro which extracts the caller for pcie power on from lane description */ +#define COMPHY_GET_CALLER(x) (((x) & COMPHY_PCI_CALLER_MASK) >> \ + COMPHY_PCI_CALLER_OFFSET) + +/* Macro which extracts the polarity invert from lane description */ +#define COMPHY_GET_POLARITY_INVERT(x) (((x) & COMPHY_INVERT_MASK) >> \ + COMPHY_INVERT_OFFSET) + + +#define COMPHY_SATA_MODE 0x1 +#define COMPHY_SGMII_MODE 0x2 /* SGMII 1G */ +#define COMPHY_2500BASEX_MODE 0x3 /* 2500Base-X */ +#define COMPHY_USB3H_MODE 0x4 +#define COMPHY_USB3D_MODE 0x5 +#define COMPHY_PCIE_MODE 0x6 +#define COMPHY_RXAUI_MODE 0x7 +#define COMPHY_XFI_MODE 0x8 +#define COMPHY_SFI_MODE 0x9 +#define COMPHY_USB3_MODE 0xa +#define COMPHY_AP_MODE 0xb + +#define COMPHY_UNUSED 0xFFFFFFFF + +/* Polarity invert macro */ +#define COMPHY_POLARITY_NO_INVERT 0 +#define COMPHY_POLARITY_TXD_INVERT 1 +#define COMPHY_POLARITY_RXD_INVERT 2 +#define COMPHY_POLARITY_ALL_INVERT (COMPHY_POLARITY_TXD_INVERT | \ + COMPHY_POLARITY_RXD_INVERT) + +enum reg_width_type { + REG_16BIT = 0, + REG_32BIT, +}; + +enum { + COMPHY_LANE0 = 0, + COMPHY_LANE1, + COMPHY_LANE2, + COMPHY_LANE3, + COMPHY_LANE4, + COMPHY_LANE5, + COMPHY_LANE_MAX, +}; + +static inline uint32_t polling_with_timeout(uintptr_t addr, uint32_t val, + uint32_t mask, + uint32_t usec_timeout, + enum reg_width_type type) +{ + uint32_t data; + + do { + udelay(1); + if (type == REG_16BIT) + data = mmio_read_16(addr) & mask; + else + data = mmio_read_32(addr) & mask; + } while (data != val && --usec_timeout > 0); + + if (usec_timeout == 0) + return data; + + return 0; +} + +static inline void reg_set(uintptr_t addr, uint32_t data, uint32_t mask) +{ + debug("<atf>: WR to addr = 0x%lx, data = 0x%x (mask = 0x%x) - ", + addr, data, mask); + debug("old value = 0x%x ==> ", mmio_read_32(addr)); + mmio_clrsetbits_32(addr, mask, data & mask); + + debug("new val 0x%x\n", mmio_read_32(addr)); +} + +static inline void __unused reg_set16(uintptr_t addr, uint16_t data, + uint16_t mask) +{ + + debug("<atf>: WR to addr = 0x%lx, data = 0x%x (mask = 0x%x) - ", + addr, data, mask); + debug("old value = 0x%x ==> ", mmio_read_16(addr)); + mmio_clrsetbits_16(addr, mask, data & mask); + + debug("new val 0x%x\n", mmio_read_16(addr)); +} + +#endif /* PHY_COMPHY_COMMON_H */ diff --git a/drivers/marvell/comphy/phy-comphy-cp110.c b/drivers/marvell/comphy/phy-comphy-cp110.c new file mode 100644 index 0000000..e256fa7 --- /dev/null +++ b/drivers/marvell/comphy/phy-comphy-cp110.c @@ -0,0 +1,2528 @@ +/* + * Copyright (C) 2018 Marvell International Ltd. + * + * SPDX-License-Identifier: BSD-3-Clause + * https://spdx.org/licenses + */ + +/* Marvell CP110 SoC COMPHY unit driver */ + +#include <errno.h> +#include <inttypes.h> +#include <stdint.h> + +#include <common/debug.h> +#include <drivers/delay_timer.h> +#include <mg_conf_cm3/mg_conf_cm3.h> +#include <lib/mmio.h> +#include <lib/spinlock.h> + +#include <mvebu_def.h> +#include "mvebu.h" +#include "comphy-cp110.h" +#include "phy-comphy-cp110.h" +#include "phy-comphy-common.h" + +#if __has_include("phy-porting-layer.h") +#include "phy-porting-layer.h" +#else +#include "phy-default-porting-layer.h" +#endif + +/* COMPHY speed macro */ +#define COMPHY_SPEED_1_25G 0 /* SGMII 1G */ +#define COMPHY_SPEED_2_5G 1 +#define COMPHY_SPEED_3_125G 2 /* 2500Base-X */ +#define COMPHY_SPEED_5G 3 +#define COMPHY_SPEED_5_15625G 4 /* XFI 5G */ +#define COMPHY_SPEED_6G 5 +#define COMPHY_SPEED_10_3125G 6 /* XFI 10G */ +#define COMPHY_SPEED_MAX 0x3F +/* The default speed for IO with fixed known speed */ +#define COMPHY_SPEED_DEFAULT COMPHY_SPEED_MAX + +/* Commands for comphy driver */ +#define COMPHY_COMMAND_DIGITAL_PWR_OFF 0x00000001 +#define COMPHY_COMMAND_DIGITAL_PWR_ON 0x00000002 + +#define COMPHY_PIPE_FROM_COMPHY_ADDR(x) ((x & ~0xffffff) + 0x120000) + +/* System controller registers */ +#define PCIE_MAC_RESET_MASK_PORT0 BIT(13) +#define PCIE_MAC_RESET_MASK_PORT1 BIT(11) +#define PCIE_MAC_RESET_MASK_PORT2 BIT(12) +#define SYS_CTRL_UINIT_SOFT_RESET_REG 0x268 +#define SYS_CTRL_FROM_COMPHY_ADDR(x) ((x & ~0xffffff) + 0x440000) + +/* DFX register spaces */ +#define SAR_RST_PCIE0_CLOCK_CONFIG_CP0_OFFSET (30) +#define SAR_RST_PCIE0_CLOCK_CONFIG_CP0_MASK (0x1UL << \ + SAR_RST_PCIE0_CLOCK_CONFIG_CP0_OFFSET) +#define SAR_RST_PCIE1_CLOCK_CONFIG_CP0_OFFSET (31) +#define SAR_RST_PCIE1_CLOCK_CONFIG_CP0_MASK (0x1UL << \ + SAR_RST_PCIE1_CLOCK_CONFIG_CP0_OFFSET) +#define SAR_STATUS_0_REG 0x40600 +#define DFX_FROM_COMPHY_ADDR(x) ((x & ~0xffffff) + DFX_BASE) +/* Common Phy training */ +#define COMPHY_TRX_TRAIN_COMPHY_OFFS 0x1000 +#define COMPHY_TRX_TRAIN_RX_TRAIN_ENABLE 0x1 +#define COMPHY_TRX_RELATIVE_ADDR(comphy_index) (comphy_train_base + \ + (comphy_index) * COMPHY_TRX_TRAIN_COMPHY_OFFS) + +/* The same Units Soft Reset Config register are accessed in all PCIe ports + * initialization, so a spin lock is defined in case when more than 1 CPUs + * resets PCIe MAC and need to access the register in the same time. The spin + * lock is shared by all CP110 units. + */ +spinlock_t cp110_mac_reset_lock; + +/* These values come from the PCI Express Spec */ +enum pcie_link_width { + PCIE_LNK_WIDTH_RESRV = 0x00, + PCIE_LNK_X1 = 0x01, + PCIE_LNK_X2 = 0x02, + PCIE_LNK_X4 = 0x04, + PCIE_LNK_X8 = 0x08, + PCIE_LNK_X12 = 0x0C, + PCIE_LNK_X16 = 0x10, + PCIE_LNK_X32 = 0x20, + PCIE_LNK_WIDTH_UNKNOWN = 0xFF, +}; + +_Bool rx_trainng_done[AP_NUM][CP_NUM][MAX_LANE_NR] = {0}; + +static void mvebu_cp110_get_ap_and_cp_nr(uint8_t *ap_nr, uint8_t *cp_nr, + uint64_t comphy_base) +{ +#if (AP_NUM == 1) + *ap_nr = 0; +#else + *ap_nr = (((comphy_base & ~0xffffff) - MVEBU_AP_IO_BASE(0)) / + AP_IO_OFFSET); +#endif + + *cp_nr = (((comphy_base & ~0xffffff) - MVEBU_AP_IO_BASE(*ap_nr)) / + MVEBU_CP_OFFSET); + + debug("cp_base 0x%" PRIx64 ", ap_io_base 0x%lx, cp_offset 0x%lx\n", + comphy_base, (unsigned long)MVEBU_AP_IO_BASE(*ap_nr), + (unsigned long)MVEBU_CP_OFFSET); +} + +/* Clear PIPE selector - avoid collision with previous configuration */ +static void mvebu_cp110_comphy_clr_pipe_selector(uint64_t comphy_base, + uint8_t comphy_index) +{ + uint32_t reg, mask, field; + uint32_t comphy_offset = + COMMON_SELECTOR_COMPHYN_FIELD_WIDTH * comphy_index; + + mask = COMMON_SELECTOR_COMPHY_MASK << comphy_offset; + reg = mmio_read_32(comphy_base + COMMON_SELECTOR_PIPE_REG_OFFSET); + field = reg & mask; + + if (field) { + reg &= ~mask; + mmio_write_32(comphy_base + COMMON_SELECTOR_PIPE_REG_OFFSET, + reg); + } +} + +/* Clear PHY selector - avoid collision with previous configuration */ +static void mvebu_cp110_comphy_clr_phy_selector(uint64_t comphy_base, + uint8_t comphy_index) +{ + uint32_t reg, mask, field; + uint32_t comphy_offset = + COMMON_SELECTOR_COMPHYN_FIELD_WIDTH * comphy_index; + + mask = COMMON_SELECTOR_COMPHY_MASK << comphy_offset; + reg = mmio_read_32(comphy_base + COMMON_SELECTOR_PHY_REG_OFFSET); + field = reg & mask; + + /* Clear comphy selector - if it was already configured. + * (might be that this comphy was configured as PCIe/USB, + * in such case, no need to clear comphy selector because PCIe/USB + * are controlled by hpipe selector). + */ + if (field) { + reg &= ~mask; + mmio_write_32(comphy_base + COMMON_SELECTOR_PHY_REG_OFFSET, + reg); + } +} + +/* PHY selector configures SATA and Network modes */ +static void mvebu_cp110_comphy_set_phy_selector(uint64_t comphy_base, + uint8_t comphy_index, uint32_t comphy_mode) +{ + uint32_t reg, mask; + uint32_t comphy_offset = + COMMON_SELECTOR_COMPHYN_FIELD_WIDTH * comphy_index; + int mode; + + /* If phy selector is used the pipe selector should be marked as + * unconnected. + */ + mvebu_cp110_comphy_clr_pipe_selector(comphy_base, comphy_index); + + /* Comphy mode (compound of the IO mode and id). Here, only the IO mode + * is required to distinguish between SATA and network modes. + */ + mode = COMPHY_GET_MODE(comphy_mode); + + mask = COMMON_SELECTOR_COMPHY_MASK << comphy_offset; + reg = mmio_read_32(comphy_base + COMMON_SELECTOR_PHY_REG_OFFSET); + reg &= ~mask; + + /* SATA port 0/1 require the same configuration */ + if (mode == COMPHY_SATA_MODE) { + /* SATA selector values is always 4 */ + reg |= COMMON_SELECTOR_COMPHYN_SATA << comphy_offset; + } else { + switch (comphy_index) { + case(0): + case(1): + case(2): + /* For comphy 0,1, and 2: + * Network selector value is always 1. + */ + reg |= COMMON_SELECTOR_COMPHY0_1_2_NETWORK << + comphy_offset; + break; + case(3): + /* For comphy 3: + * 0x1 = RXAUI_Lane1 + * 0x2 = SGMII/Base-X Port1 + */ + if (mode == COMPHY_RXAUI_MODE) + reg |= COMMON_SELECTOR_COMPHY3_RXAUI << + comphy_offset; + else + reg |= COMMON_SELECTOR_COMPHY3_SGMII << + comphy_offset; + break; + case(4): + /* For comphy 4: + * 0x1 = SGMII/Base-X Port1, XFI1/SFI1 + * 0x2 = SGMII/Base-X Port0: XFI0/SFI0, RXAUI_Lane0 + * + * We want to check if SGMII1 is the + * requested mode in order to determine which value + * should be set (all other modes use the same value) + * so we need to strip the mode, and check the ID + * because we might handle SGMII0 too. + */ + /* TODO: need to distinguish between CP110 and CP115 + * as SFI1/XFI1 available only for CP115. + */ + if ((mode == COMPHY_SGMII_MODE || + mode == COMPHY_2500BASEX_MODE || + mode == COMPHY_SFI_MODE || + mode == COMPHY_XFI_MODE || + mode == COMPHY_AP_MODE) + && COMPHY_GET_ID(comphy_mode) == 1) + reg |= COMMON_SELECTOR_COMPHY4_PORT1 << + comphy_offset; + else + reg |= COMMON_SELECTOR_COMPHY4_ALL_OTHERS << + comphy_offset; + break; + case(5): + /* For comphy 5: + * 0x1 = SGMII/Base-X Port2 + * 0x2 = RXAUI Lane1 + */ + if (mode == COMPHY_RXAUI_MODE) + reg |= COMMON_SELECTOR_COMPHY5_RXAUI << + comphy_offset; + else + reg |= COMMON_SELECTOR_COMPHY5_SGMII << + comphy_offset; + break; + } + } + + mmio_write_32(comphy_base + COMMON_SELECTOR_PHY_REG_OFFSET, reg); +} + +/* PIPE selector configures for PCIe, USB 3.0 Host, and USB 3.0 Device mode */ +static void mvebu_cp110_comphy_set_pipe_selector(uint64_t comphy_base, + uint8_t comphy_index, uint32_t comphy_mode) +{ + uint32_t reg; + uint32_t shift = COMMON_SELECTOR_COMPHYN_FIELD_WIDTH * comphy_index; + int mode = COMPHY_GET_MODE(comphy_mode); + uint32_t mask = COMMON_SELECTOR_COMPHY_MASK << shift; + uint32_t pipe_sel = 0x0; + + /* If pipe selector is used the phy selector should be marked as + * unconnected. + */ + mvebu_cp110_comphy_clr_phy_selector(comphy_base, comphy_index); + + reg = mmio_read_32(comphy_base + COMMON_SELECTOR_PIPE_REG_OFFSET); + reg &= ~mask; + + switch (mode) { + case (COMPHY_PCIE_MODE): + /* For lanes support PCIE, selector value are all same */ + pipe_sel = COMMON_SELECTOR_PIPE_COMPHY_PCIE; + break; + + case (COMPHY_USB3H_MODE): + /* Only lane 1-4 support USB host, selector value is same */ + if (comphy_index == COMPHY_LANE0 || + comphy_index == COMPHY_LANE5) + ERROR("COMPHY[%d] mode[%d] is invalid\n", + comphy_index, mode); + else + pipe_sel = COMMON_SELECTOR_PIPE_COMPHY_USBH; + break; + + case (COMPHY_USB3D_MODE): + /* Lane 1 and 4 support USB device, selector value is same */ + if (comphy_index == COMPHY_LANE1 || + comphy_index == COMPHY_LANE4) + pipe_sel = COMMON_SELECTOR_PIPE_COMPHY_USBD; + else + ERROR("COMPHY[%d] mode[%d] is invalid\n", comphy_index, + mode); + break; + + default: + ERROR("COMPHY[%d] mode[%d] is invalid\n", comphy_index, mode); + break; + } + + mmio_write_32(comphy_base + COMMON_SELECTOR_PIPE_REG_OFFSET, reg | + (pipe_sel << shift)); +} + +int mvebu_cp110_comphy_is_pll_locked(uint64_t comphy_base, uint8_t comphy_index) +{ + uintptr_t sd_ip_addr, addr; + uint32_t mask, data; + int ret = 0; + + debug_enter(); + + sd_ip_addr = SD_ADDR(COMPHY_PIPE_FROM_COMPHY_ADDR(comphy_base), + comphy_index); + + addr = sd_ip_addr + SD_EXTERNAL_STATUS0_REG; + data = SD_EXTERNAL_STATUS0_PLL_TX_MASK & + SD_EXTERNAL_STATUS0_PLL_RX_MASK; + mask = data; + data = polling_with_timeout(addr, data, mask, + PLL_LOCK_TIMEOUT, REG_32BIT); + if (data != 0) { + if (data & SD_EXTERNAL_STATUS0_PLL_RX_MASK) + ERROR("RX PLL is not locked\n"); + if (data & SD_EXTERNAL_STATUS0_PLL_TX_MASK) + ERROR("TX PLL is not locked\n"); + + ret = -ETIMEDOUT; + } + + debug_exit(); + + return ret; +} + +static void mvebu_cp110_polarity_invert(uintptr_t addr, uint8_t phy_polarity_invert) +{ + uint32_t mask, data; + + /* Set RX / TX polarity */ + data = mask = 0x0U; + if ((phy_polarity_invert & COMPHY_POLARITY_TXD_INVERT) != 0) { + data |= (1 << HPIPE_SYNC_PATTERN_TXD_INV_OFFSET); + mask |= HPIPE_SYNC_PATTERN_TXD_INV_MASK; + debug("%s: inverting TX polarity\n", __func__); + } + + if ((phy_polarity_invert & COMPHY_POLARITY_RXD_INVERT) != 0) { + data |= (1 << HPIPE_SYNC_PATTERN_RXD_INV_OFFSET); + mask |= HPIPE_SYNC_PATTERN_RXD_INV_MASK; + debug("%s: inverting RX polarity\n", __func__); + } + + reg_set(addr, data, mask); +} + +static int mvebu_cp110_comphy_sata_power_on(uint64_t comphy_base, + uint8_t comphy_index, uint32_t comphy_mode) +{ + uintptr_t hpipe_addr, sd_ip_addr, comphy_addr; + uint32_t mask, data; + uint8_t ap_nr, cp_nr, phy_polarity_invert; + int ret = 0; + + debug_enter(); + + mvebu_cp110_get_ap_and_cp_nr(&ap_nr, &cp_nr, comphy_base); + + const struct sata_params *sata_static_values = + &sata_static_values_tab[ap_nr][cp_nr][comphy_index]; + + phy_polarity_invert = sata_static_values->polarity_invert; + + /* configure phy selector for SATA */ + mvebu_cp110_comphy_set_phy_selector(comphy_base, + comphy_index, comphy_mode); + + hpipe_addr = HPIPE_ADDR(COMPHY_PIPE_FROM_COMPHY_ADDR(comphy_base), + comphy_index); + sd_ip_addr = SD_ADDR(COMPHY_PIPE_FROM_COMPHY_ADDR(comphy_base), + comphy_index); + comphy_addr = COMPHY_ADDR(comphy_base, comphy_index); + + debug(" add hpipe 0x%lx, sd 0x%lx, comphy 0x%lx\n", + hpipe_addr, sd_ip_addr, comphy_addr); + debug("stage: RFU configurations - hard reset comphy\n"); + /* RFU configurations - hard reset comphy */ + mask = COMMON_PHY_CFG1_PWR_UP_MASK; + data = 0x1 << COMMON_PHY_CFG1_PWR_UP_OFFSET; + mask |= COMMON_PHY_CFG1_PIPE_SELECT_MASK; + data |= 0x0 << COMMON_PHY_CFG1_PIPE_SELECT_OFFSET; + mask |= COMMON_PHY_CFG1_PWR_ON_RESET_MASK; + data |= 0x0 << COMMON_PHY_CFG1_PWR_ON_RESET_OFFSET; + mask |= COMMON_PHY_CFG1_CORE_RSTN_MASK; + data |= 0x0 << COMMON_PHY_CFG1_CORE_RSTN_OFFSET; + reg_set(comphy_addr + COMMON_PHY_CFG1_REG, data, mask); + + /* Set select data width 40Bit - SATA mode only */ + reg_set(comphy_addr + COMMON_PHY_CFG6_REG, + 0x1 << COMMON_PHY_CFG6_IF_40_SEL_OFFSET, + COMMON_PHY_CFG6_IF_40_SEL_MASK); + + /* release from hard reset in SD external */ + mask = SD_EXTERNAL_CONFIG1_RESET_IN_MASK; + data = 0x1 << SD_EXTERNAL_CONFIG1_RESET_IN_OFFSET; + mask |= SD_EXTERNAL_CONFIG1_RESET_CORE_MASK; + data |= 0x1 << SD_EXTERNAL_CONFIG1_RESET_CORE_OFFSET; + reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG1_REG, data, mask); + + /* Wait 1ms - until band gap and ref clock ready */ + mdelay(1); + + debug("stage: Comphy configuration\n"); + /* Start comphy Configuration */ + /* Set reference clock to comes from group 1 - choose 25Mhz */ + reg_set(hpipe_addr + HPIPE_MISC_REG, + 0x0 << HPIPE_MISC_REFCLK_SEL_OFFSET, + HPIPE_MISC_REFCLK_SEL_MASK); + /* Reference frequency select set 1 (for SATA = 25Mhz) */ + mask = HPIPE_PWR_PLL_REF_FREQ_MASK; + data = 0x1 << HPIPE_PWR_PLL_REF_FREQ_OFFSET; + /* PHY mode select (set SATA = 0x0 */ + mask |= HPIPE_PWR_PLL_PHY_MODE_MASK; + data |= 0x0 << HPIPE_PWR_PLL_PHY_MODE_OFFSET; + reg_set(hpipe_addr + HPIPE_PWR_PLL_REG, data, mask); + /* Set max PHY generation setting - 6Gbps */ + reg_set(hpipe_addr + HPIPE_INTERFACE_REG, + 0x2 << HPIPE_INTERFACE_GEN_MAX_OFFSET, + HPIPE_INTERFACE_GEN_MAX_MASK); + /* Set select data width 40Bit (SEL_BITS[2:0]) */ + reg_set(hpipe_addr + HPIPE_LOOPBACK_REG, + 0x2 << HPIPE_LOOPBACK_SEL_OFFSET, HPIPE_LOOPBACK_SEL_MASK); + + debug("stage: Analog parameters from ETP(HW)\n"); + /* G1 settings */ + mask = HPIPE_G1_SET_1_G1_RX_SELMUPI_MASK; + data = sata_static_values->g1_rx_selmupi << + HPIPE_G1_SET_1_G1_RX_SELMUPI_OFFSET; + mask |= HPIPE_G1_SET_1_G1_RX_SELMUPF_MASK; + data |= sata_static_values->g1_rx_selmupf << + HPIPE_G1_SET_1_G1_RX_SELMUPF_OFFSET; + mask |= HPIPE_G1_SET_1_G1_RX_SELMUFI_MASK; + data |= sata_static_values->g1_rx_selmufi << + HPIPE_G1_SET_1_G1_RX_SELMUFI_OFFSET; + mask |= HPIPE_G1_SET_1_G1_RX_SELMUFF_MASK; + data |= sata_static_values->g1_rx_selmuff << + HPIPE_G1_SET_1_G1_RX_SELMUFF_OFFSET; + mask |= HPIPE_G1_SET_1_G1_RX_DIGCK_DIV_MASK; + data |= 0x1 << HPIPE_G1_SET_1_G1_RX_DIGCK_DIV_OFFSET; + reg_set(hpipe_addr + HPIPE_G1_SET_1_REG, data, mask); + + mask = HPIPE_G1_SETTINGS_3_G1_FFE_CAP_SEL_MASK; + data = 0xf << HPIPE_G1_SETTINGS_3_G1_FFE_CAP_SEL_OFFSET; + mask |= HPIPE_G1_SETTINGS_3_G1_FFE_RES_SEL_MASK; + data |= 0x2 << HPIPE_G1_SETTINGS_3_G1_FFE_RES_SEL_OFFSET; + mask |= HPIPE_G1_SETTINGS_3_G1_FFE_SETTING_FORCE_MASK; + data |= 0x1 << HPIPE_G1_SETTINGS_3_G1_FFE_SETTING_FORCE_OFFSET; + mask |= HPIPE_G1_SETTINGS_3_G1_FFE_DEG_RES_LEVEL_MASK; + data |= 0x1 << HPIPE_G1_SETTINGS_3_G1_FFE_DEG_RES_LEVEL_OFFSET; + mask |= HPIPE_G1_SETTINGS_3_G1_FFE_LOAD_RES_LEVEL_MASK; + data |= 0x1 << HPIPE_G1_SETTINGS_3_G1_FFE_LOAD_RES_LEVEL_OFFSET; + reg_set(hpipe_addr + HPIPE_G1_SETTINGS_3_REG, data, mask); + + /* G2 settings */ + mask = HPIPE_G2_SET_1_G2_RX_SELMUPI_MASK; + data = sata_static_values->g2_rx_selmupi << + HPIPE_G2_SET_1_G2_RX_SELMUPI_OFFSET; + mask |= HPIPE_G2_SET_1_G2_RX_SELMUPF_MASK; + data |= sata_static_values->g2_rx_selmupf << + HPIPE_G2_SET_1_G2_RX_SELMUPF_OFFSET; + mask |= HPIPE_G2_SET_1_G2_RX_SELMUFI_MASK; + data |= sata_static_values->g2_rx_selmufi << + HPIPE_G2_SET_1_G2_RX_SELMUFI_OFFSET; + mask |= HPIPE_G2_SET_1_G2_RX_SELMUFF_MASK; + data |= sata_static_values->g2_rx_selmuff << + HPIPE_G2_SET_1_G2_RX_SELMUFF_OFFSET; + mask |= HPIPE_G2_SET_1_G2_RX_DIGCK_DIV_MASK; + data |= 0x1 << HPIPE_G2_SET_1_G2_RX_DIGCK_DIV_OFFSET; + reg_set(hpipe_addr + HPIPE_G2_SET_1_REG, data, mask); + + /* G3 settings */ + mask = HPIPE_G3_SET_1_G3_RX_SELMUPI_MASK; + data = sata_static_values->g3_rx_selmupi << + HPIPE_G3_SET_1_G3_RX_SELMUPI_OFFSET; + mask |= HPIPE_G3_SET_1_G3_RX_SELMUPF_MASK; + data |= sata_static_values->g3_rx_selmupf << + HPIPE_G3_SET_1_G3_RX_SELMUPF_OFFSET; + mask |= HPIPE_G3_SET_1_G3_RX_SELMUFI_MASK; + data |= sata_static_values->g3_rx_selmufi << + HPIPE_G3_SET_1_G3_RX_SELMUFI_OFFSET; + mask |= HPIPE_G3_SET_1_G3_RX_SELMUFF_MASK; + data |= sata_static_values->g3_rx_selmuff << + HPIPE_G3_SET_1_G3_RX_SELMUFF_OFFSET; + mask |= HPIPE_G3_SET_1_G3_RX_DFE_EN_MASK; + data |= 0x1 << HPIPE_G3_SET_1_G3_RX_DFE_EN_OFFSET; + mask |= HPIPE_G3_SET_1_G3_RX_DIGCK_DIV_MASK; + data |= 0x2 << HPIPE_G3_SET_1_G3_RX_DIGCK_DIV_OFFSET; + mask |= HPIPE_G3_SET_1_G3_SAMPLER_INPAIRX2_EN_MASK; + data |= 0x0 << HPIPE_G3_SET_1_G3_SAMPLER_INPAIRX2_EN_OFFSET; + reg_set(hpipe_addr + HPIPE_G3_SET_1_REG, data, mask); + + /* DTL Control */ + mask = HPIPE_PWR_CTR_DTL_SQ_DET_EN_MASK; + data = 0x1 << HPIPE_PWR_CTR_DTL_SQ_DET_EN_OFFSET; + mask |= HPIPE_PWR_CTR_DTL_SQ_PLOOP_EN_MASK; + data |= 0x1 << HPIPE_PWR_CTR_DTL_SQ_PLOOP_EN_OFFSET; + mask |= HPIPE_PWR_CTR_DTL_FLOOP_EN_MASK; + data |= 0x1 << HPIPE_PWR_CTR_DTL_FLOOP_EN_OFFSET; + mask |= HPIPE_PWR_CTR_DTL_CLAMPING_SEL_MASK; + data |= 0x1 << HPIPE_PWR_CTR_DTL_CLAMPING_SEL_OFFSET; + mask |= HPIPE_PWR_CTR_DTL_INTPCLK_DIV_FORCE_MASK; + data |= 0x1 << HPIPE_PWR_CTR_DTL_INTPCLK_DIV_FORCE_OFFSET; + mask |= HPIPE_PWR_CTR_DTL_CLK_MODE_MASK; + data |= 0x1 << HPIPE_PWR_CTR_DTL_CLK_MODE_OFFSET; + mask |= HPIPE_PWR_CTR_DTL_CLK_MODE_FORCE_MASK; + data |= 0x1 << HPIPE_PWR_CTR_DTL_CLK_MODE_FORCE_OFFSET; + reg_set(hpipe_addr + HPIPE_PWR_CTR_DTL_REG, data, mask); + + /* Trigger sampler enable pulse */ + mask = HPIPE_SMAPLER_MASK; + data = 0x1 << HPIPE_SMAPLER_OFFSET; + reg_set(hpipe_addr + HPIPE_SAMPLER_N_PROC_CALIB_CTRL_REG, data, mask); + mask = HPIPE_SMAPLER_MASK; + data = 0x0 << HPIPE_SMAPLER_OFFSET; + reg_set(hpipe_addr + HPIPE_SAMPLER_N_PROC_CALIB_CTRL_REG, data, mask); + + /* VDD Calibration Control 3 */ + mask = HPIPE_EXT_SELLV_RXSAMPL_MASK; + data = 0x10 << HPIPE_EXT_SELLV_RXSAMPL_OFFSET; + reg_set(hpipe_addr + HPIPE_VDD_CAL_CTRL_REG, data, mask); + + /* DFE Resolution Control */ + mask = HPIPE_DFE_RES_FORCE_MASK; + data = 0x1 << HPIPE_DFE_RES_FORCE_OFFSET; + reg_set(hpipe_addr + HPIPE_DFE_REG0, data, mask); + + /* DFE F3-F5 Coefficient Control */ + mask = HPIPE_DFE_F3_F5_DFE_EN_MASK; + data = 0x0 << HPIPE_DFE_F3_F5_DFE_EN_OFFSET; + mask |= HPIPE_DFE_F3_F5_DFE_CTRL_MASK; + data = 0x0 << HPIPE_DFE_F3_F5_DFE_CTRL_OFFSET; + reg_set(hpipe_addr + HPIPE_DFE_F3_F5_REG, data, mask); + + /* G3 Setting 3 */ + mask = HPIPE_G3_FFE_CAP_SEL_MASK; + data = sata_static_values->g3_ffe_cap_sel << + HPIPE_G3_FFE_CAP_SEL_OFFSET; + mask |= HPIPE_G3_FFE_RES_SEL_MASK; + data |= sata_static_values->g3_ffe_res_sel << + HPIPE_G3_FFE_RES_SEL_OFFSET; + mask |= HPIPE_G3_FFE_SETTING_FORCE_MASK; + data |= 0x1 << HPIPE_G3_FFE_SETTING_FORCE_OFFSET; + mask |= HPIPE_G3_FFE_DEG_RES_LEVEL_MASK; + data |= 0x1 << HPIPE_G3_FFE_DEG_RES_LEVEL_OFFSET; + mask |= HPIPE_G3_FFE_LOAD_RES_LEVEL_MASK; + data |= 0x3 << HPIPE_G3_FFE_LOAD_RES_LEVEL_OFFSET; + reg_set(hpipe_addr + HPIPE_G3_SETTING_3_REG, data, mask); + + /* G3 Setting 4 */ + mask = HPIPE_G3_DFE_RES_MASK; + data = sata_static_values->g3_dfe_res << HPIPE_G3_DFE_RES_OFFSET; + reg_set(hpipe_addr + HPIPE_G3_SETTING_4_REG, data, mask); + + /* Offset Phase Control */ + mask = HPIPE_OS_PH_OFFSET_MASK; + data = sata_static_values->align90 << HPIPE_OS_PH_OFFSET_OFFSET; + mask |= HPIPE_OS_PH_OFFSET_FORCE_MASK; + data |= 0x1 << HPIPE_OS_PH_OFFSET_FORCE_OFFSET; + mask |= HPIPE_OS_PH_VALID_MASK; + data |= 0x0 << HPIPE_OS_PH_VALID_OFFSET; + reg_set(hpipe_addr + HPIPE_PHASE_CONTROL_REG, data, mask); + mask = HPIPE_OS_PH_VALID_MASK; + data = 0x1 << HPIPE_OS_PH_VALID_OFFSET; + reg_set(hpipe_addr + HPIPE_PHASE_CONTROL_REG, data, mask); + mask = HPIPE_OS_PH_VALID_MASK; + data = 0x0 << HPIPE_OS_PH_VALID_OFFSET; + reg_set(hpipe_addr + HPIPE_PHASE_CONTROL_REG, data, mask); + + /* Set G1 TX amplitude and TX post emphasis value */ + mask = HPIPE_G1_SET_0_G1_TX_AMP_MASK; + data = sata_static_values->g1_amp << HPIPE_G1_SET_0_G1_TX_AMP_OFFSET; + mask |= HPIPE_G1_SET_0_G1_TX_AMP_ADJ_MASK; + data |= sata_static_values->g1_tx_amp_adj << + HPIPE_G1_SET_0_G1_TX_AMP_ADJ_OFFSET; + mask |= HPIPE_G1_SET_0_G1_TX_EMPH1_MASK; + data |= sata_static_values->g1_emph << + HPIPE_G1_SET_0_G1_TX_EMPH1_OFFSET; + mask |= HPIPE_G1_SET_0_G1_TX_EMPH1_EN_MASK; + data |= sata_static_values->g1_emph_en << + HPIPE_G1_SET_0_G1_TX_EMPH1_EN_OFFSET; + reg_set(hpipe_addr + HPIPE_G1_SET_0_REG, data, mask); + + /* Set G1 emph */ + mask = HPIPE_G1_SET_2_G1_TX_EMPH0_EN_MASK; + data = sata_static_values->g1_tx_emph_en << + HPIPE_G1_SET_2_G1_TX_EMPH0_EN_OFFSET; + mask |= HPIPE_G1_SET_2_G1_TX_EMPH0_MASK; + data |= sata_static_values->g1_tx_emph << + HPIPE_G1_SET_2_G1_TX_EMPH0_OFFSET; + reg_set(hpipe_addr + HPIPE_G1_SET_2_REG, data, mask); + + /* Set G2 TX amplitude and TX post emphasis value */ + mask = HPIPE_G2_SET_0_G2_TX_AMP_MASK; + data = sata_static_values->g2_amp << HPIPE_G2_SET_0_G2_TX_AMP_OFFSET; + mask |= HPIPE_G2_SET_0_G2_TX_AMP_ADJ_MASK; + data |= sata_static_values->g2_tx_amp_adj << + HPIPE_G2_SET_0_G2_TX_AMP_ADJ_OFFSET; + mask |= HPIPE_G2_SET_0_G2_TX_EMPH1_MASK; + data |= sata_static_values->g2_emph << + HPIPE_G2_SET_0_G2_TX_EMPH1_OFFSET; + mask |= HPIPE_G2_SET_0_G2_TX_EMPH1_EN_MASK; + data |= sata_static_values->g2_emph_en << + HPIPE_G2_SET_0_G2_TX_EMPH1_EN_OFFSET; + reg_set(hpipe_addr + HPIPE_G2_SET_0_REG, data, mask); + + /* Set G2 emph */ + mask = HPIPE_G2_SET_2_G2_TX_EMPH0_EN_MASK; + data = sata_static_values->g2_tx_emph_en << + HPIPE_G2_SET_2_G2_TX_EMPH0_EN_OFFSET; + mask |= HPIPE_G2_SET_2_G2_TX_EMPH0_MASK; + data |= sata_static_values->g2_tx_emph << + HPIPE_G2_SET_2_G2_TX_EMPH0_OFFSET; + reg_set(hpipe_addr + HPIPE_G2_SET_2_REG, data, mask); + + /* Set G3 TX amplitude and TX post emphasis value */ + mask = HPIPE_G3_SET_0_G3_TX_AMP_MASK; + data = sata_static_values->g3_amp << HPIPE_G3_SET_0_G3_TX_AMP_OFFSET; + mask |= HPIPE_G3_SET_0_G3_TX_AMP_ADJ_MASK; + data |= sata_static_values->g3_tx_amp_adj << + HPIPE_G3_SET_0_G3_TX_AMP_ADJ_OFFSET; + mask |= HPIPE_G3_SET_0_G3_TX_EMPH1_MASK; + data |= sata_static_values->g3_emph << + HPIPE_G3_SET_0_G3_TX_EMPH1_OFFSET; + mask |= HPIPE_G3_SET_0_G3_TX_EMPH1_EN_MASK; + data |= sata_static_values->g3_emph_en << + HPIPE_G3_SET_0_G3_TX_EMPH1_EN_OFFSET; + mask |= HPIPE_G3_SET_0_G3_TX_SLEW_RATE_SEL_MASK; + data |= 0x4 << HPIPE_G3_SET_0_G3_TX_SLEW_RATE_SEL_OFFSET; + mask |= HPIPE_G3_SET_0_G3_TX_SLEW_CTRL_EN_MASK; + data |= 0x0 << HPIPE_G3_SET_0_G3_TX_SLEW_CTRL_EN_OFFSET; + reg_set(hpipe_addr + HPIPE_G3_SET_0_REG, data, mask); + + /* Set G3 emph */ + mask = HPIPE_G3_SET_2_G3_TX_EMPH0_EN_MASK; + data = sata_static_values->g3_tx_emph_en << + HPIPE_G3_SET_2_G3_TX_EMPH0_EN_OFFSET; + mask |= HPIPE_G3_SET_2_G3_TX_EMPH0_MASK; + data |= sata_static_values->g3_tx_emph << + HPIPE_G3_SET_2_G3_TX_EMPH0_OFFSET; + reg_set(hpipe_addr + HPIPE_G3_SET_2_REG, data, mask); + + /* SERDES External Configuration 2 register */ + mask = SD_EXTERNAL_CONFIG2_SSC_ENABLE_MASK; + data = 0x1 << SD_EXTERNAL_CONFIG2_SSC_ENABLE_OFFSET; + reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG2_REG, data, mask); + + /* DFE reset sequence */ + reg_set(hpipe_addr + HPIPE_PWR_CTR_REG, + 0x1 << HPIPE_PWR_CTR_RST_DFE_OFFSET, + HPIPE_PWR_CTR_RST_DFE_MASK); + reg_set(hpipe_addr + HPIPE_PWR_CTR_REG, + 0x0 << HPIPE_PWR_CTR_RST_DFE_OFFSET, + HPIPE_PWR_CTR_RST_DFE_MASK); + + if (phy_polarity_invert != 0) + mvebu_cp110_polarity_invert(hpipe_addr + HPIPE_SYNC_PATTERN_REG, + phy_polarity_invert); + + /* SW reset for interrupt logic */ + reg_set(hpipe_addr + HPIPE_PWR_CTR_REG, + 0x1 << HPIPE_PWR_CTR_SFT_RST_OFFSET, + HPIPE_PWR_CTR_SFT_RST_MASK); + reg_set(hpipe_addr + HPIPE_PWR_CTR_REG, + 0x0 << HPIPE_PWR_CTR_SFT_RST_OFFSET, + HPIPE_PWR_CTR_SFT_RST_MASK); + + debug_exit(); + + return ret; +} + +static int mvebu_cp110_comphy_sgmii_power_on(uint64_t comphy_base, + uint8_t comphy_index, uint32_t comphy_mode) +{ + uintptr_t hpipe_addr, sd_ip_addr, comphy_addr, addr; + uint32_t mask, data, sgmii_speed = COMPHY_GET_SPEED(comphy_mode); + int ret = 0; + + debug_enter(); + + hpipe_addr = HPIPE_ADDR(COMPHY_PIPE_FROM_COMPHY_ADDR(comphy_base), + comphy_index); + sd_ip_addr = SD_ADDR(COMPHY_PIPE_FROM_COMPHY_ADDR(comphy_base), + comphy_index); + comphy_addr = COMPHY_ADDR(comphy_base, comphy_index); + + /* configure phy selector for SGMII */ + mvebu_cp110_comphy_set_phy_selector(comphy_base, comphy_index, + comphy_mode); + + /* Confiugre the lane */ + debug("stage: RFU configurations - hard reset comphy\n"); + /* RFU configurations - hard reset comphy */ + mask = COMMON_PHY_CFG1_PWR_UP_MASK; + data = 0x1 << COMMON_PHY_CFG1_PWR_UP_OFFSET; + mask |= COMMON_PHY_CFG1_PIPE_SELECT_MASK; + data |= 0x0 << COMMON_PHY_CFG1_PIPE_SELECT_OFFSET; + reg_set(comphy_addr + COMMON_PHY_CFG1_REG, data, mask); + + /* Select Baud Rate of Comphy And PD_PLL/Tx/Rx */ + mask = SD_EXTERNAL_CONFIG0_SD_PU_PLL_MASK; + data = 0x0 << SD_EXTERNAL_CONFIG0_SD_PU_PLL_OFFSET; + mask |= SD_EXTERNAL_CONFIG0_SD_PHY_GEN_RX_MASK; + mask |= SD_EXTERNAL_CONFIG0_SD_PHY_GEN_TX_MASK; + + if (sgmii_speed == COMPHY_SPEED_1_25G) { + /* SGMII 1G, SerDes speed 1.25G */ + data |= 0x6 << SD_EXTERNAL_CONFIG0_SD_PHY_GEN_RX_OFFSET; + data |= 0x6 << SD_EXTERNAL_CONFIG0_SD_PHY_GEN_TX_OFFSET; + } else if (sgmii_speed == COMPHY_SPEED_3_125G) { + /* 2500Base-X, SerDes speed 3.125G */ + data |= 0x8 << SD_EXTERNAL_CONFIG0_SD_PHY_GEN_RX_OFFSET; + data |= 0x8 << SD_EXTERNAL_CONFIG0_SD_PHY_GEN_TX_OFFSET; + } else { + /* Other rates are not supported */ + ERROR("unsupported SGMII speed on comphy%d\n", comphy_index); + return -EINVAL; + } + + mask |= SD_EXTERNAL_CONFIG0_SD_PU_RX_MASK; + data |= 0 << SD_EXTERNAL_CONFIG0_SD_PU_RX_OFFSET; + mask |= SD_EXTERNAL_CONFIG0_SD_PU_TX_MASK; + data |= 0 << SD_EXTERNAL_CONFIG0_SD_PU_TX_OFFSET; + mask |= SD_EXTERNAL_CONFIG0_HALF_BUS_MODE_MASK; + data |= 1 << SD_EXTERNAL_CONFIG0_HALF_BUS_MODE_OFFSET; + reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG0_REG, data, mask); + + /* Set hard reset */ + mask = SD_EXTERNAL_CONFIG1_RESET_IN_MASK; + data = 0x0 << SD_EXTERNAL_CONFIG1_RESET_IN_OFFSET; + mask |= SD_EXTERNAL_CONFIG1_RESET_CORE_MASK; + data |= 0x0 << SD_EXTERNAL_CONFIG1_RESET_CORE_OFFSET; + mask |= SD_EXTERNAL_CONFIG1_RF_RESET_IN_MASK; + data |= 0x0 << SD_EXTERNAL_CONFIG1_RF_RESET_IN_OFFSET; + reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG1_REG, data, mask); + + /* Release hard reset */ + mask = SD_EXTERNAL_CONFIG1_RESET_IN_MASK; + data = 0x1 << SD_EXTERNAL_CONFIG1_RESET_IN_OFFSET; + mask |= SD_EXTERNAL_CONFIG1_RESET_CORE_MASK; + data |= 0x1 << SD_EXTERNAL_CONFIG1_RESET_CORE_OFFSET; + reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG1_REG, data, mask); + + /* Wait 1ms - until band gap and ref clock ready */ + mdelay(1); + + /* Make sure that 40 data bits is disabled + * This bit is not cleared by reset + */ + mask = COMMON_PHY_CFG6_IF_40_SEL_MASK; + data = 0 << COMMON_PHY_CFG6_IF_40_SEL_OFFSET; + reg_set(comphy_addr + COMMON_PHY_CFG6_REG, data, mask); + + /* Start comphy Configuration */ + debug("stage: Comphy configuration\n"); + /* set reference clock */ + mask = HPIPE_MISC_REFCLK_SEL_MASK; + data = 0x0 << HPIPE_MISC_REFCLK_SEL_OFFSET; + reg_set(hpipe_addr + HPIPE_MISC_REG, data, mask); + /* Power and PLL Control */ + mask = HPIPE_PWR_PLL_REF_FREQ_MASK; + data = 0x1 << HPIPE_PWR_PLL_REF_FREQ_OFFSET; + mask |= HPIPE_PWR_PLL_PHY_MODE_MASK; + data |= 0x4 << HPIPE_PWR_PLL_PHY_MODE_OFFSET; + reg_set(hpipe_addr + HPIPE_PWR_PLL_REG, data, mask); + /* Loopback register */ + mask = HPIPE_LOOPBACK_SEL_MASK; + data = 0x1 << HPIPE_LOOPBACK_SEL_OFFSET; + reg_set(hpipe_addr + HPIPE_LOOPBACK_REG, data, mask); + /* rx control 1 */ + mask = HPIPE_RX_CONTROL_1_RXCLK2X_SEL_MASK; + data = 0x1 << HPIPE_RX_CONTROL_1_RXCLK2X_SEL_OFFSET; + mask |= HPIPE_RX_CONTROL_1_CLK8T_EN_MASK; + data |= 0x0 << HPIPE_RX_CONTROL_1_CLK8T_EN_OFFSET; + reg_set(hpipe_addr + HPIPE_RX_CONTROL_1_REG, data, mask); + /* DTL Control */ + mask = HPIPE_PWR_CTR_DTL_FLOOP_EN_MASK; + data = 0x0 << HPIPE_PWR_CTR_DTL_FLOOP_EN_OFFSET; + reg_set(hpipe_addr + HPIPE_PWR_CTR_DTL_REG, data, mask); + + /* Set analog parameters from ETP(HW) - for now use the default data */ + debug("stage: Analog parameters from ETP(HW)\n"); + + reg_set(hpipe_addr + HPIPE_G1_SET_0_REG, + 0x1 << HPIPE_G1_SET_0_G1_TX_EMPH1_OFFSET, + HPIPE_G1_SET_0_G1_TX_EMPH1_MASK); + + debug("stage: RFU configurations- Power Up PLL,Tx,Rx\n"); + /* SERDES External Configuration */ + mask = SD_EXTERNAL_CONFIG0_SD_PU_PLL_MASK; + data = 0x1 << SD_EXTERNAL_CONFIG0_SD_PU_PLL_OFFSET; + mask |= SD_EXTERNAL_CONFIG0_SD_PU_RX_MASK; + data |= 0x1 << SD_EXTERNAL_CONFIG0_SD_PU_RX_OFFSET; + mask |= SD_EXTERNAL_CONFIG0_SD_PU_TX_MASK; + data |= 0x1 << SD_EXTERNAL_CONFIG0_SD_PU_TX_OFFSET; + reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG0_REG, data, mask); + + ret = mvebu_cp110_comphy_is_pll_locked(comphy_base, comphy_index); + if (ret) + return ret; + + /* RX init */ + mask = SD_EXTERNAL_CONFIG1_RX_INIT_MASK; + data = 0x1 << SD_EXTERNAL_CONFIG1_RX_INIT_OFFSET; + reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG1_REG, data, mask); + + /* check that RX init done */ + addr = sd_ip_addr + SD_EXTERNAL_STATUS0_REG; + data = SD_EXTERNAL_STATUS0_RX_INIT_MASK; + mask = data; + data = polling_with_timeout(addr, data, mask, 100, REG_32BIT); + if (data != 0) { + ERROR("RX init failed\n"); + ret = -ETIMEDOUT; + } + + debug("stage: RF Reset\n"); + /* RF Reset */ + mask = SD_EXTERNAL_CONFIG1_RX_INIT_MASK; + data = 0x0 << SD_EXTERNAL_CONFIG1_RX_INIT_OFFSET; + mask |= SD_EXTERNAL_CONFIG1_RF_RESET_IN_MASK; + data |= 0x1 << SD_EXTERNAL_CONFIG1_RF_RESET_IN_OFFSET; + reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG1_REG, data, mask); + + debug_exit(); + + return ret; +} + +static int mvebu_cp110_comphy_xfi_power_on(uint64_t comphy_base, + uint8_t comphy_index, + uint32_t comphy_mode, + uint64_t comphy_train_base) +{ + uintptr_t hpipe_addr, sd_ip_addr, comphy_addr, addr; + uint32_t mask, data, speed = COMPHY_GET_SPEED(comphy_mode); + int ret = 0; + uint8_t ap_nr, cp_nr; + + debug_enter(); + mvebu_cp110_get_ap_and_cp_nr(&ap_nr, &cp_nr, comphy_base); + + if (rx_trainng_done[ap_nr][cp_nr][comphy_index]) { + debug("Skip %s for comphy[%d][%d][%d], due to rx training\n", + __func__, ap_nr, cp_nr, comphy_index); + return 0; + } + + const struct xfi_params *xfi_static_values = + &xfi_static_values_tab[ap_nr][cp_nr][comphy_index]; + + debug("%s: the ap_nr = %d, cp_nr = %d, comphy_index %d\n", + __func__, ap_nr, cp_nr, comphy_index); + + debug("g1_ffe_cap_sel= 0x%x, g1_ffe_res_sel= 0x%x, g1_dfe_res= 0x%x\n", + xfi_static_values->g1_ffe_cap_sel, + xfi_static_values->g1_ffe_res_sel, + xfi_static_values->g1_dfe_res); + + if (!xfi_static_values->valid) { + ERROR("[ap%d][cp[%d][comphy:%d]: Has no valid static params\n", + ap_nr, cp_nr, comphy_index); + ERROR("[ap%d][cp[%d][comphy:%d]: porting layer needs update\n", + ap_nr, cp_nr, comphy_index); + return -EINVAL; + } + + if ((speed != COMPHY_SPEED_5_15625G) && + (speed != COMPHY_SPEED_10_3125G) && + (speed != COMPHY_SPEED_DEFAULT)) { + ERROR("comphy:%d: unsupported sfi/xfi speed\n", comphy_index); + return -EINVAL; + } + + hpipe_addr = HPIPE_ADDR(COMPHY_PIPE_FROM_COMPHY_ADDR(comphy_base), + comphy_index); + sd_ip_addr = SD_ADDR(COMPHY_PIPE_FROM_COMPHY_ADDR(comphy_base), + comphy_index); + comphy_addr = COMPHY_ADDR(comphy_base, comphy_index); + + /* configure phy selector for XFI/SFI */ + mvebu_cp110_comphy_set_phy_selector(comphy_base, comphy_index, + comphy_mode); + + debug("stage: RFU configurations - hard reset comphy\n"); + /* RFU configurations - hard reset comphy */ + mask = COMMON_PHY_CFG1_PWR_UP_MASK; + data = 0x1 << COMMON_PHY_CFG1_PWR_UP_OFFSET; + mask |= COMMON_PHY_CFG1_PIPE_SELECT_MASK; + data |= 0x0 << COMMON_PHY_CFG1_PIPE_SELECT_OFFSET; + reg_set(comphy_addr + COMMON_PHY_CFG1_REG, data, mask); + + /* Make sure that 40 data bits is disabled + * This bit is not cleared by reset + */ + mask = COMMON_PHY_CFG6_IF_40_SEL_MASK; + data = 0 << COMMON_PHY_CFG6_IF_40_SEL_OFFSET; + reg_set(comphy_addr + COMMON_PHY_CFG6_REG, data, mask); + + /* Select Baud Rate of Comphy And PD_PLL/Tx/Rx */ + mask = SD_EXTERNAL_CONFIG0_SD_PU_PLL_MASK; + data = 0x0 << SD_EXTERNAL_CONFIG0_SD_PU_PLL_OFFSET; + mask |= SD_EXTERNAL_CONFIG0_SD_PHY_GEN_RX_MASK; + data |= 0xE << SD_EXTERNAL_CONFIG0_SD_PHY_GEN_RX_OFFSET; + mask |= SD_EXTERNAL_CONFIG0_SD_PHY_GEN_TX_MASK; + data |= 0xE << SD_EXTERNAL_CONFIG0_SD_PHY_GEN_TX_OFFSET; + mask |= SD_EXTERNAL_CONFIG0_SD_PU_RX_MASK; + data |= 0 << SD_EXTERNAL_CONFIG0_SD_PU_RX_OFFSET; + mask |= SD_EXTERNAL_CONFIG0_SD_PU_TX_MASK; + data |= 0 << SD_EXTERNAL_CONFIG0_SD_PU_TX_OFFSET; + mask |= SD_EXTERNAL_CONFIG0_HALF_BUS_MODE_MASK; + data |= 0 << SD_EXTERNAL_CONFIG0_HALF_BUS_MODE_OFFSET; + reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG0_REG, data, mask); + + /* release from hard reset */ + mask = SD_EXTERNAL_CONFIG1_RESET_IN_MASK; + data = 0x0 << SD_EXTERNAL_CONFIG1_RESET_IN_OFFSET; + mask |= SD_EXTERNAL_CONFIG1_RESET_CORE_MASK; + data |= 0x0 << SD_EXTERNAL_CONFIG1_RESET_CORE_OFFSET; + mask |= SD_EXTERNAL_CONFIG1_RF_RESET_IN_MASK; + data |= 0x0 << SD_EXTERNAL_CONFIG1_RF_RESET_IN_OFFSET; + reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG1_REG, data, mask); + + mask = SD_EXTERNAL_CONFIG1_RESET_IN_MASK; + data = 0x1 << SD_EXTERNAL_CONFIG1_RESET_IN_OFFSET; + mask |= SD_EXTERNAL_CONFIG1_RESET_CORE_MASK; + data |= 0x1 << SD_EXTERNAL_CONFIG1_RESET_CORE_OFFSET; + mask |= SD_EXTERNAL_CONFIG1_TX_IDLE_MASK; + data |= 0x1 << SD_EXTERNAL_CONFIG1_TX_IDLE_OFFSET; + reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG1_REG, data, mask); + + /* Wait 1ms - until band gap and ref clock ready */ + mdelay(1); + + /* + * Erratum IPCE_COMPHY-1353: toggle TX_IDLE bit in + * addition to the PHY reset + */ + mask = SD_EXTERNAL_CONFIG1_TX_IDLE_MASK; + data = 0x0U; + reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG1_REG, data, mask); + + /* Start comphy Configuration */ + debug("stage: Comphy configuration\n"); + /* set reference clock */ + mask = HPIPE_MISC_ICP_FORCE_MASK; + data = (speed == COMPHY_SPEED_5_15625G) ? + (0x0 << HPIPE_MISC_ICP_FORCE_OFFSET) : + (0x1 << HPIPE_MISC_ICP_FORCE_OFFSET); + mask |= HPIPE_MISC_REFCLK_SEL_MASK; + data |= 0x0 << HPIPE_MISC_REFCLK_SEL_OFFSET; + reg_set(hpipe_addr + HPIPE_MISC_REG, data, mask); + /* Power and PLL Control */ + mask = HPIPE_PWR_PLL_REF_FREQ_MASK; + data = 0x1 << HPIPE_PWR_PLL_REF_FREQ_OFFSET; + mask |= HPIPE_PWR_PLL_PHY_MODE_MASK; + data |= 0x4 << HPIPE_PWR_PLL_PHY_MODE_OFFSET; + reg_set(hpipe_addr + HPIPE_PWR_PLL_REG, data, mask); + /* Loopback register */ + mask = HPIPE_LOOPBACK_SEL_MASK; + data = 0x1 << HPIPE_LOOPBACK_SEL_OFFSET; + reg_set(hpipe_addr + HPIPE_LOOPBACK_REG, data, mask); + /* rx control 1 */ + mask = HPIPE_RX_CONTROL_1_RXCLK2X_SEL_MASK; + data = 0x1 << HPIPE_RX_CONTROL_1_RXCLK2X_SEL_OFFSET; + mask |= HPIPE_RX_CONTROL_1_CLK8T_EN_MASK; + data |= 0x1 << HPIPE_RX_CONTROL_1_CLK8T_EN_OFFSET; + reg_set(hpipe_addr + HPIPE_RX_CONTROL_1_REG, data, mask); + /* DTL Control */ + mask = HPIPE_PWR_CTR_DTL_FLOOP_EN_MASK; + data = 0x1 << HPIPE_PWR_CTR_DTL_FLOOP_EN_OFFSET; + reg_set(hpipe_addr + HPIPE_PWR_CTR_DTL_REG, data, mask); + + /* Transmitter/Receiver Speed Divider Force */ + if (speed == COMPHY_SPEED_5_15625G) { + mask = HPIPE_SPD_DIV_FORCE_RX_SPD_DIV_MASK; + data = 1 << HPIPE_SPD_DIV_FORCE_RX_SPD_DIV_OFFSET; + mask |= HPIPE_SPD_DIV_FORCE_RX_SPD_DIV_FORCE_MASK; + data |= 1 << HPIPE_SPD_DIV_FORCE_RX_SPD_DIV_FORCE_OFFSET; + mask |= HPIPE_SPD_DIV_FORCE_TX_SPD_DIV_MASK; + data |= 1 << HPIPE_SPD_DIV_FORCE_TX_SPD_DIV_OFFSET; + mask |= HPIPE_SPD_DIV_FORCE_TX_SPD_DIV_FORCE_MASK; + data |= 1 << HPIPE_SPD_DIV_FORCE_TX_SPD_DIV_FORCE_OFFSET; + } else { + mask = HPIPE_TXDIGCK_DIV_FORCE_MASK; + data = 0x1 << HPIPE_TXDIGCK_DIV_FORCE_OFFSET; + } + reg_set(hpipe_addr + HPIPE_SPD_DIV_FORCE_REG, data, mask); + + /* Set analog parameters from ETP(HW) */ + debug("stage: Analog parameters from ETP(HW)\n"); + /* SERDES External Configuration 2 */ + mask = SD_EXTERNAL_CONFIG2_PIN_DFE_EN_MASK; + data = 0x1 << SD_EXTERNAL_CONFIG2_PIN_DFE_EN_OFFSET; + reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG2_REG, data, mask); + /* 0x7-DFE Resolution control */ + mask = HPIPE_DFE_RES_FORCE_MASK; + data = 0x1 << HPIPE_DFE_RES_FORCE_OFFSET; + reg_set(hpipe_addr + HPIPE_DFE_REG0, data, mask); + /* 0xd-G1_Setting_0 */ + if (speed == COMPHY_SPEED_5_15625G) { + mask = HPIPE_G1_SET_0_G1_TX_EMPH1_MASK; + data = 0x6 << HPIPE_G1_SET_0_G1_TX_EMPH1_OFFSET; + } else { + mask = HPIPE_G1_SET_0_G1_TX_AMP_MASK; + data = xfi_static_values->g1_amp << + HPIPE_G1_SET_0_G1_TX_AMP_OFFSET; + mask |= HPIPE_G1_SET_0_G1_TX_EMPH1_MASK; + data |= xfi_static_values->g1_emph << + HPIPE_G1_SET_0_G1_TX_EMPH1_OFFSET; + + mask |= HPIPE_G1_SET_0_G1_TX_EMPH1_EN_MASK; + data |= xfi_static_values->g1_emph_en << + HPIPE_G1_SET_0_G1_TX_EMPH1_EN_OFFSET; + mask |= HPIPE_G1_SET_0_G1_TX_AMP_ADJ_MASK; + data |= xfi_static_values->g1_tx_amp_adj << + HPIPE_G1_SET_0_G1_TX_AMP_ADJ_OFFSET; + } + reg_set(hpipe_addr + HPIPE_G1_SET_0_REG, data, mask); + /* Genration 1 setting 2 (G1_Setting_2) */ + mask = HPIPE_G1_SET_2_G1_TX_EMPH0_MASK; + data = xfi_static_values->g1_tx_emph << + HPIPE_G1_SET_2_G1_TX_EMPH0_OFFSET; + mask |= HPIPE_G1_SET_2_G1_TX_EMPH0_EN_MASK; + data |= xfi_static_values->g1_tx_emph_en << + HPIPE_G1_SET_2_G1_TX_EMPH0_EN_OFFSET; + reg_set(hpipe_addr + HPIPE_G1_SET_2_REG, data, mask); + /* Transmitter Slew Rate Control register (tx_reg1) */ + mask = HPIPE_TX_REG1_TX_EMPH_RES_MASK; + data = 0x3 << HPIPE_TX_REG1_TX_EMPH_RES_OFFSET; + mask |= HPIPE_TX_REG1_SLC_EN_MASK; + data |= 0x3f << HPIPE_TX_REG1_SLC_EN_OFFSET; + reg_set(hpipe_addr + HPIPE_TX_REG1_REG, data, mask); + /* Impedance Calibration Control register (cal_reg1) */ + mask = HPIPE_CAL_REG_1_EXT_TXIMP_MASK; + data = 0xe << HPIPE_CAL_REG_1_EXT_TXIMP_OFFSET; + mask |= HPIPE_CAL_REG_1_EXT_TXIMP_EN_MASK; + data |= 0x1 << HPIPE_CAL_REG_1_EXT_TXIMP_EN_OFFSET; + reg_set(hpipe_addr + HPIPE_CAL_REG1_REG, data, mask); + /* Generation 1 Setting 5 (g1_setting_5) */ + mask = HPIPE_G1_SETTING_5_G1_ICP_MASK; + data = 0 << HPIPE_CAL_REG_1_EXT_TXIMP_OFFSET; + reg_set(hpipe_addr + HPIPE_G1_SETTING_5_REG, data, mask); + + /* 0xE-G1_Setting_1 */ + mask = HPIPE_G1_SET_1_G1_RX_DFE_EN_MASK; + data = 0x1 << HPIPE_G1_SET_1_G1_RX_DFE_EN_OFFSET; + if (speed == COMPHY_SPEED_5_15625G) { + mask |= HPIPE_G1_SET_1_G1_RX_SELMUPI_MASK; + data |= 0x1 << HPIPE_G1_SET_1_G1_RX_SELMUPI_OFFSET; + mask |= HPIPE_G1_SET_1_G1_RX_SELMUPF_MASK; + data |= 0x1 << HPIPE_G1_SET_1_G1_RX_SELMUPF_OFFSET; + } else { + mask |= HPIPE_G1_SET_1_G1_RX_SELMUPI_MASK; + data |= xfi_static_values->g1_rx_selmupi << + HPIPE_G1_SET_1_G1_RX_SELMUPI_OFFSET; + mask |= HPIPE_G1_SET_1_G1_RX_SELMUPF_MASK; + data |= xfi_static_values->g1_rx_selmupf << + HPIPE_G1_SET_1_G1_RX_SELMUPF_OFFSET; + mask |= HPIPE_G1_SET_1_G1_RX_SELMUFI_MASK; + data |= xfi_static_values->g1_rx_selmufi << + HPIPE_G1_SET_1_G1_RX_SELMUFI_OFFSET; + mask |= HPIPE_G1_SET_1_G1_RX_SELMUFF_MASK; + data |= xfi_static_values->g1_rx_selmuff << + HPIPE_G1_SET_1_G1_RX_SELMUFF_OFFSET; + mask |= HPIPE_G1_SET_1_G1_RX_DIGCK_DIV_MASK; + data |= 0x3 << HPIPE_G1_SET_1_G1_RX_DIGCK_DIV_OFFSET; + } + reg_set(hpipe_addr + HPIPE_G1_SET_1_REG, data, mask); + + /* 0xA-DFE_Reg3 */ + mask = HPIPE_DFE_F3_F5_DFE_EN_MASK; + data = 0x0 << HPIPE_DFE_F3_F5_DFE_EN_OFFSET; + mask |= HPIPE_DFE_F3_F5_DFE_CTRL_MASK; + data |= 0x0 << HPIPE_DFE_F3_F5_DFE_CTRL_OFFSET; + reg_set(hpipe_addr + HPIPE_DFE_F3_F5_REG, data, mask); + + /* 0x111-G1_Setting_4 */ + mask = HPIPE_G1_SETTINGS_4_G1_DFE_RES_MASK; + data = 0x1 << HPIPE_G1_SETTINGS_4_G1_DFE_RES_OFFSET; + reg_set(hpipe_addr + HPIPE_G1_SETTINGS_4_REG, data, mask); + /* Genration 1 setting 3 (G1_Setting_3) */ + mask = HPIPE_G1_SETTINGS_3_G1_FBCK_SEL_MASK; + data = 0x1 << HPIPE_G1_SETTINGS_3_G1_FBCK_SEL_OFFSET; + if (speed == COMPHY_SPEED_5_15625G) { + /* Force FFE (Feed Forward Equalization) to 5G */ + mask |= HPIPE_G1_SETTINGS_3_G1_FFE_CAP_SEL_MASK; + data |= 0xf << HPIPE_G1_SETTINGS_3_G1_FFE_CAP_SEL_OFFSET; + mask |= HPIPE_G1_SETTINGS_3_G1_FFE_RES_SEL_MASK; + data |= 0x4 << HPIPE_G1_SETTINGS_3_G1_FFE_RES_SEL_OFFSET; + mask |= HPIPE_G1_SETTINGS_3_G1_FFE_SETTING_FORCE_MASK; + data |= 0x1 << HPIPE_G1_SETTINGS_3_G1_FFE_SETTING_FORCE_OFFSET; + reg_set(hpipe_addr + HPIPE_G1_SETTINGS_3_REG, data, mask); + } else { + mask |= HPIPE_G1_SETTINGS_3_G1_FFE_CAP_SEL_MASK; + data |= xfi_static_values->g1_ffe_cap_sel << + HPIPE_G1_SETTINGS_3_G1_FFE_CAP_SEL_OFFSET; + mask |= HPIPE_G1_SETTINGS_3_G1_FFE_RES_SEL_MASK; + data |= xfi_static_values->g1_ffe_res_sel << + HPIPE_G1_SETTINGS_3_G1_FFE_RES_SEL_OFFSET; + mask |= HPIPE_G1_SETTINGS_3_G1_FFE_SETTING_FORCE_MASK; + data |= 0x1 << HPIPE_G1_SETTINGS_3_G1_FFE_SETTING_FORCE_OFFSET; + reg_set(hpipe_addr + HPIPE_G1_SETTINGS_3_REG, data, mask); + + /* Use the value from CAL_OS_PH_EXT */ + mask = HPIPE_CAL_RXCLKALIGN_90_EXT_EN_MASK; + data = 1 << HPIPE_CAL_RXCLKALIGN_90_EXT_EN_OFFSET; + reg_set(hpipe_addr + + HPIPE_RX_CLK_ALIGN90_AND_TX_IDLE_CALIB_CTRL_REG, + data, mask); + + /* Update align90 */ + mask = HPIPE_CAL_OS_PH_EXT_MASK; + data = xfi_static_values->align90 << HPIPE_CAL_OS_PH_EXT_OFFSET; + reg_set(hpipe_addr + + HPIPE_RX_CLK_ALIGN90_AND_TX_IDLE_CALIB_CTRL_REG, + data, mask); + + /* Force DFE resolution (use gen table value) */ + mask = HPIPE_DFE_RES_FORCE_MASK; + data = 0x0 << HPIPE_DFE_RES_FORCE_OFFSET; + reg_set(hpipe_addr + HPIPE_DFE_REG0, data, mask); + + /* 0x111-G1 DFE_Setting_4 */ + mask = HPIPE_G1_SETTINGS_4_G1_DFE_RES_MASK; + data = xfi_static_values->g1_dfe_res << + HPIPE_G1_SETTINGS_4_G1_DFE_RES_OFFSET; + reg_set(hpipe_addr + HPIPE_G1_SETTINGS_4_REG, data, mask); + } + + /* Connfigure RX training timer */ + mask = HPIPE_RX_TRAIN_TIMER_MASK; + data = 0x13 << HPIPE_RX_TRAIN_TIMER_OFFSET; + reg_set(hpipe_addr + HPIPE_TX_TRAIN_CTRL_5_REG, data, mask); + + /* Enable TX train peak to peak hold */ + mask = HPIPE_TX_TRAIN_P2P_HOLD_MASK; + data = 0x1 << HPIPE_TX_TRAIN_P2P_HOLD_OFFSET; + reg_set(hpipe_addr + HPIPE_TX_TRAIN_CTRL_0_REG, data, mask); + + /* Configure TX preset index */ + mask = HPIPE_TX_PRESET_INDEX_MASK; + data = 0x2 << HPIPE_TX_PRESET_INDEX_OFFSET; + reg_set(hpipe_addr + HPIPE_TX_PRESET_INDEX_REG, data, mask); + + /* Disable pattern lock lost timeout */ + mask = HPIPE_PATTERN_LOCK_LOST_TIMEOUT_EN_MASK; + data = 0x0 << HPIPE_PATTERN_LOCK_LOST_TIMEOUT_EN_OFFSET; + reg_set(hpipe_addr + HPIPE_FRAME_DETECT_CTRL_3_REG, data, mask); + + /* Configure TX training pattern and TX training 16bit auto */ + mask = HPIPE_TX_TRAIN_16BIT_AUTO_EN_MASK; + data = 0x1 << HPIPE_TX_TRAIN_16BIT_AUTO_EN_OFFSET; + mask |= HPIPE_TX_TRAIN_PAT_SEL_MASK; + data |= 0x1 << HPIPE_TX_TRAIN_PAT_SEL_OFFSET; + reg_set(hpipe_addr + HPIPE_TX_TRAIN_REG, data, mask); + + /* Configure Training patten number */ + mask = HPIPE_TRAIN_PAT_NUM_MASK; + data = 0x88 << HPIPE_TRAIN_PAT_NUM_OFFSET; + reg_set(hpipe_addr + HPIPE_FRAME_DETECT_CTRL_0_REG, data, mask); + + /* Configure differencial manchester encoter to ethernet mode */ + mask = HPIPE_DME_ETHERNET_MODE_MASK; + data = 0x1 << HPIPE_DME_ETHERNET_MODE_OFFSET; + reg_set(hpipe_addr + HPIPE_DME_REG, data, mask); + + /* Configure VDD Continuous Calibration */ + mask = HPIPE_CAL_VDD_CONT_MODE_MASK; + data = 0x1 << HPIPE_CAL_VDD_CONT_MODE_OFFSET; + reg_set(hpipe_addr + HPIPE_VDD_CAL_0_REG, data, mask); + + /* Trigger sampler enable pulse (by toggleing the bit) */ + mask = HPIPE_RX_SAMPLER_OS_GAIN_MASK; + data = 0x3 << HPIPE_RX_SAMPLER_OS_GAIN_OFFSET; + mask |= HPIPE_SMAPLER_MASK; + data |= 0x1 << HPIPE_SMAPLER_OFFSET; + reg_set(hpipe_addr + HPIPE_SAMPLER_N_PROC_CALIB_CTRL_REG, data, mask); + mask = HPIPE_SMAPLER_MASK; + data = 0x0 << HPIPE_SMAPLER_OFFSET; + reg_set(hpipe_addr + HPIPE_SAMPLER_N_PROC_CALIB_CTRL_REG, data, mask); + + /* Set External RX Regulator Control */ + mask = HPIPE_EXT_SELLV_RXSAMPL_MASK; + data = 0x1A << HPIPE_EXT_SELLV_RXSAMPL_OFFSET; + reg_set(hpipe_addr + HPIPE_VDD_CAL_CTRL_REG, data, mask); + + debug("stage: RFU configurations- Power Up PLL,Tx,Rx\n"); + /* SERDES External Configuration */ + mask = SD_EXTERNAL_CONFIG0_SD_PU_PLL_MASK; + data = 0x1 << SD_EXTERNAL_CONFIG0_SD_PU_PLL_OFFSET; + mask |= SD_EXTERNAL_CONFIG0_SD_PU_RX_MASK; + data |= 0x1 << SD_EXTERNAL_CONFIG0_SD_PU_RX_OFFSET; + mask |= SD_EXTERNAL_CONFIG0_SD_PU_TX_MASK; + data |= 0x1 << SD_EXTERNAL_CONFIG0_SD_PU_TX_OFFSET; + reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG0_REG, data, mask); + + /* check PLL rx & tx ready */ + addr = sd_ip_addr + SD_EXTERNAL_STATUS0_REG; + data = SD_EXTERNAL_STATUS0_PLL_RX_MASK | + SD_EXTERNAL_STATUS0_PLL_TX_MASK; + mask = data; + data = polling_with_timeout(addr, data, mask, + PLL_LOCK_TIMEOUT, REG_32BIT); + if (data != 0) { + if (data & SD_EXTERNAL_STATUS0_PLL_RX_MASK) + ERROR("RX PLL is not locked\n"); + if (data & SD_EXTERNAL_STATUS0_PLL_TX_MASK) + ERROR("TX PLL is not locked\n"); + + ret = -ETIMEDOUT; + } + + /* RX init */ + mask = SD_EXTERNAL_CONFIG1_RX_INIT_MASK; + data = 0x1 << SD_EXTERNAL_CONFIG1_RX_INIT_OFFSET; + reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG1_REG, data, mask); + + /* check that RX init done */ + addr = sd_ip_addr + SD_EXTERNAL_STATUS0_REG; + data = SD_EXTERNAL_STATUS0_RX_INIT_MASK; + mask = data; + data = polling_with_timeout(addr, data, mask, 100, REG_32BIT); + if (data != 0) { + ERROR("RX init failed\n"); + ret = -ETIMEDOUT; + } + + debug("stage: RF Reset\n"); + /* RF Reset */ + mask = SD_EXTERNAL_CONFIG1_RX_INIT_MASK; + data = 0x0 << SD_EXTERNAL_CONFIG1_RX_INIT_OFFSET; + mask |= SD_EXTERNAL_CONFIG1_RF_RESET_IN_MASK; + data |= 0x1 << SD_EXTERNAL_CONFIG1_RF_RESET_IN_OFFSET; + reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG1_REG, data, mask); + + /* Force rx training on 10G port */ + data = mmio_read_32(COMPHY_TRX_RELATIVE_ADDR(comphy_index)); + data |= COMPHY_TRX_TRAIN_RX_TRAIN_ENABLE; + mmio_write_32(COMPHY_TRX_RELATIVE_ADDR(comphy_index), data); + mdelay(200); + data &= ~COMPHY_TRX_TRAIN_RX_TRAIN_ENABLE; + mmio_write_32(COMPHY_TRX_RELATIVE_ADDR(comphy_index), data); + + debug_exit(); + + return ret; +} + +static int mvebu_cp110_comphy_pcie_power_on(uint64_t comphy_base, + uint8_t comphy_index, uint32_t comphy_mode) +{ + int ret = 0; + uint32_t reg, mask, data, pcie_width; + uint32_t clk_dir; + uintptr_t hpipe_addr, comphy_addr, addr; + _Bool clk_src = COMPHY_GET_CLK_SRC(comphy_mode); + _Bool called_from_uboot = COMPHY_GET_CALLER(comphy_mode); + + /* In Armada 8K DB boards, PCIe initialization can be executed + * only once (PCIe reset performed during chip power on and + * it cannot be executed via GPIO later). + * This means that power on can be executed only once, so let's + * mark if the caller is bootloader or Linux. + * If bootloader -> run power on. + * If Linux -> exit. + * + * TODO: In MacciatoBIN, PCIe reset is connected via GPIO, + * so after GPIO reset is added to Linux Kernel, it can be + * powered-on by Linux. + */ + if (!called_from_uboot) + return ret; + + hpipe_addr = HPIPE_ADDR(COMPHY_PIPE_FROM_COMPHY_ADDR(comphy_base), + comphy_index); + comphy_addr = COMPHY_ADDR(comphy_base, comphy_index); + pcie_width = COMPHY_GET_PCIE_WIDTH(comphy_mode); + + debug_enter(); + + spin_lock(&cp110_mac_reset_lock); + + reg = mmio_read_32(SYS_CTRL_FROM_COMPHY_ADDR(comphy_base) + + SYS_CTRL_UINIT_SOFT_RESET_REG); + switch (comphy_index) { + case COMPHY_LANE0: + reg |= PCIE_MAC_RESET_MASK_PORT0; + break; + case COMPHY_LANE4: + reg |= PCIE_MAC_RESET_MASK_PORT1; + break; + case COMPHY_LANE5: + reg |= PCIE_MAC_RESET_MASK_PORT2; + break; + } + + mmio_write_32(SYS_CTRL_FROM_COMPHY_ADDR(comphy_base) + + SYS_CTRL_UINIT_SOFT_RESET_REG, reg); + spin_unlock(&cp110_mac_reset_lock); + + /* Configure PIPE selector for PCIE */ + mvebu_cp110_comphy_set_pipe_selector(comphy_base, comphy_index, + comphy_mode); + + /* + * Read SAR (Sample-At-Reset) configuration for the PCIe clock + * direction. + * + * SerDes Lane 4/5 got the PCIe ref-clock #1, + * and SerDes Lane 0 got PCIe ref-clock #0 + */ + reg = mmio_read_32(DFX_FROM_COMPHY_ADDR(comphy_base) + + SAR_STATUS_0_REG); + if (comphy_index == COMPHY_LANE4 || comphy_index == COMPHY_LANE5) + clk_dir = (reg & SAR_RST_PCIE1_CLOCK_CONFIG_CP0_MASK) >> + SAR_RST_PCIE1_CLOCK_CONFIG_CP0_OFFSET; + else + clk_dir = (reg & SAR_RST_PCIE0_CLOCK_CONFIG_CP0_MASK) >> + SAR_RST_PCIE0_CLOCK_CONFIG_CP0_OFFSET; + + debug("On lane %d\n", comphy_index); + debug("PCIe clock direction = %x\n", clk_dir); + debug("PCIe Width = %d\n", pcie_width); + + /* enable PCIe X4 and X2 */ + if (comphy_index == COMPHY_LANE0) { + if (pcie_width == PCIE_LNK_X4) { + data = 0x1 << COMMON_PHY_SD_CTRL1_PCIE_X4_EN_OFFSET; + mask = COMMON_PHY_SD_CTRL1_PCIE_X4_EN_MASK; + reg_set(comphy_base + COMMON_PHY_SD_CTRL1, + data, mask); + } else if (pcie_width == PCIE_LNK_X2) { + data = 0x1 << COMMON_PHY_SD_CTRL1_PCIE_X2_EN_OFFSET; + mask = COMMON_PHY_SD_CTRL1_PCIE_X2_EN_MASK; + reg_set(comphy_base + COMMON_PHY_SD_CTRL1, data, mask); + } + } + + /* If PCIe clock is output and clock source from SerDes lane 5, + * need to configure the clock-source MUX. + * By default, the clock source is from lane 4 + */ + if (clk_dir && clk_src && (comphy_index == COMPHY_LANE5)) { + data = DFX_DEV_GEN_PCIE_CLK_SRC_MUX << + DFX_DEV_GEN_PCIE_CLK_SRC_OFFSET; + mask = DFX_DEV_GEN_PCIE_CLK_SRC_MASK; + reg_set(DFX_FROM_COMPHY_ADDR(comphy_base) + + DFX_DEV_GEN_CTRL12_REG, data, mask); + } + + debug("stage: RFU configurations - hard reset comphy\n"); + /* RFU configurations - hard reset comphy */ + mask = COMMON_PHY_CFG1_PWR_UP_MASK; + data = 0x1 << COMMON_PHY_CFG1_PWR_UP_OFFSET; + mask |= COMMON_PHY_CFG1_PIPE_SELECT_MASK; + data |= 0x1 << COMMON_PHY_CFG1_PIPE_SELECT_OFFSET; + mask |= COMMON_PHY_CFG1_PWR_ON_RESET_MASK; + data |= 0x0 << COMMON_PHY_CFG1_PWR_ON_RESET_OFFSET; + mask |= COMMON_PHY_CFG1_CORE_RSTN_MASK; + data |= 0x0 << COMMON_PHY_CFG1_CORE_RSTN_OFFSET; + mask |= COMMON_PHY_PHY_MODE_MASK; + data |= 0x0 << COMMON_PHY_PHY_MODE_OFFSET; + reg_set(comphy_addr + COMMON_PHY_CFG1_REG, data, mask); + + /* release from hard reset */ + mask = COMMON_PHY_CFG1_PWR_ON_RESET_MASK; + data = 0x1 << COMMON_PHY_CFG1_PWR_ON_RESET_OFFSET; + mask |= COMMON_PHY_CFG1_CORE_RSTN_MASK; + data |= 0x1 << COMMON_PHY_CFG1_CORE_RSTN_OFFSET; + reg_set(comphy_addr + COMMON_PHY_CFG1_REG, data, mask); + + /* Wait 1ms - until band gap and ref clock ready */ + mdelay(1); + /* Start comphy Configuration */ + debug("stage: Comphy configuration\n"); + /* Set PIPE soft reset */ + mask = HPIPE_RST_CLK_CTRL_PIPE_RST_MASK; + data = 0x1 << HPIPE_RST_CLK_CTRL_PIPE_RST_OFFSET; + /* Set PHY datapath width mode for V0 */ + mask |= HPIPE_RST_CLK_CTRL_FIXED_PCLK_MASK; + data |= 0x1 << HPIPE_RST_CLK_CTRL_FIXED_PCLK_OFFSET; + /* Set Data bus width USB mode for V0 */ + mask |= HPIPE_RST_CLK_CTRL_PIPE_WIDTH_MASK; + data |= 0x0 << HPIPE_RST_CLK_CTRL_PIPE_WIDTH_OFFSET; + /* Set CORE_CLK output frequency for 250Mhz */ + mask |= HPIPE_RST_CLK_CTRL_CORE_FREQ_SEL_MASK; + data |= 0x0 << HPIPE_RST_CLK_CTRL_CORE_FREQ_SEL_OFFSET; + reg_set(hpipe_addr + HPIPE_RST_CLK_CTRL_REG, data, mask); + /* Set PLL ready delay for 0x2 */ + data = 0x2 << HPIPE_CLK_SRC_LO_PLL_RDY_DL_OFFSET; + mask = HPIPE_CLK_SRC_LO_PLL_RDY_DL_MASK; + if (pcie_width != PCIE_LNK_X1) { + data |= 0x1 << HPIPE_CLK_SRC_LO_BUNDLE_PERIOD_SEL_OFFSET; + mask |= HPIPE_CLK_SRC_LO_BUNDLE_PERIOD_SEL_MASK; + data |= 0x1 << HPIPE_CLK_SRC_LO_BUNDLE_PERIOD_SCALE_OFFSET; + mask |= HPIPE_CLK_SRC_LO_BUNDLE_PERIOD_SCALE_MASK; + } + reg_set(hpipe_addr + HPIPE_CLK_SRC_LO_REG, data, mask); + + /* Set PIPE mode interface to PCIe3 - 0x1 & set lane order */ + data = 0x1 << HPIPE_CLK_SRC_HI_MODE_PIPE_OFFSET; + mask = HPIPE_CLK_SRC_HI_MODE_PIPE_MASK; + if (pcie_width != PCIE_LNK_X1) { + mask |= HPIPE_CLK_SRC_HI_LANE_STRT_MASK; + mask |= HPIPE_CLK_SRC_HI_LANE_MASTER_MASK; + mask |= HPIPE_CLK_SRC_HI_LANE_BREAK_MASK; + if (comphy_index == 0) { + data |= 0x1 << HPIPE_CLK_SRC_HI_LANE_STRT_OFFSET; + data |= 0x1 << HPIPE_CLK_SRC_HI_LANE_MASTER_OFFSET; + } else if (comphy_index == (pcie_width - 1)) { + data |= 0x1 << HPIPE_CLK_SRC_HI_LANE_BREAK_OFFSET; + } + } + reg_set(hpipe_addr + HPIPE_CLK_SRC_HI_REG, data, mask); + /* Config update polarity equalization */ + data = 0x1 << HPIPE_CFG_UPDATE_POLARITY_OFFSET; + mask = HPIPE_CFG_UPDATE_POLARITY_MASK; + reg_set(hpipe_addr + HPIPE_LANE_EQ_CFG1_REG, data, mask); + /* Set PIPE version 4 to mode enable */ + data = 0x1 << HPIPE_DFE_CTRL_28_PIPE4_OFFSET; + mask = HPIPE_DFE_CTRL_28_PIPE4_MASK; + reg_set(hpipe_addr + HPIPE_DFE_CTRL_28_REG, data, mask); + /* TODO: check if pcie clock is output/input - for bringup use input*/ + /* Enable PIN clock 100M_125M */ + mask = 0; + data = 0; + /* Only if clock is output, configure the clock-source mux */ + if (clk_dir) { + mask |= HPIPE_MISC_CLK100M_125M_MASK; + data |= 0x1 << HPIPE_MISC_CLK100M_125M_OFFSET; + } + /* Set PIN_TXDCLK_2X Clock Freq. Selection for outputs 500MHz clock */ + mask |= HPIPE_MISC_TXDCLK_2X_MASK; + data |= 0x0 << HPIPE_MISC_TXDCLK_2X_OFFSET; + /* Enable 500MHz Clock */ + mask |= HPIPE_MISC_CLK500_EN_MASK; + data |= 0x1 << HPIPE_MISC_CLK500_EN_OFFSET; + if (clk_dir) { /* output */ + /* Set reference clock comes from group 1 */ + mask |= HPIPE_MISC_REFCLK_SEL_MASK; + data |= 0x0 << HPIPE_MISC_REFCLK_SEL_OFFSET; + } else { + /* Set reference clock comes from group 2 */ + mask |= HPIPE_MISC_REFCLK_SEL_MASK; + data |= 0x1 << HPIPE_MISC_REFCLK_SEL_OFFSET; + } + mask |= HPIPE_MISC_ICP_FORCE_MASK; + data |= 0x1 << HPIPE_MISC_ICP_FORCE_OFFSET; + reg_set(hpipe_addr + HPIPE_MISC_REG, data, mask); + if (clk_dir) { /* output */ + /* Set reference frequcency select - 0x2 for 25MHz*/ + mask = HPIPE_PWR_PLL_REF_FREQ_MASK; + data = 0x2 << HPIPE_PWR_PLL_REF_FREQ_OFFSET; + } else { + /* Set reference frequcency select - 0x0 for 100MHz*/ + mask = HPIPE_PWR_PLL_REF_FREQ_MASK; + data = 0x0 << HPIPE_PWR_PLL_REF_FREQ_OFFSET; + } + /* Set PHY mode to PCIe */ + mask |= HPIPE_PWR_PLL_PHY_MODE_MASK; + data |= 0x3 << HPIPE_PWR_PLL_PHY_MODE_OFFSET; + reg_set(hpipe_addr + HPIPE_PWR_PLL_REG, data, mask); + + /* ref clock alignment */ + if (pcie_width != PCIE_LNK_X1) { + mask = HPIPE_LANE_ALIGN_OFF_MASK; + data = 0x0 << HPIPE_LANE_ALIGN_OFF_OFFSET; + reg_set(hpipe_addr + HPIPE_LANE_ALIGN_REG, data, mask); + } + + /* Set the amount of time spent in the LoZ state - set for 0x7 only if + * the PCIe clock is output + */ + if (clk_dir) + reg_set(hpipe_addr + HPIPE_GLOBAL_PM_CTRL, + 0x7 << HPIPE_GLOBAL_PM_RXDLOZ_WAIT_OFFSET, + HPIPE_GLOBAL_PM_RXDLOZ_WAIT_MASK); + + /* Set Maximal PHY Generation Setting(8Gbps) */ + mask = HPIPE_INTERFACE_GEN_MAX_MASK; + data = 0x2 << HPIPE_INTERFACE_GEN_MAX_OFFSET; + /* Bypass frame detection and sync detection for RX DATA */ + mask |= HPIPE_INTERFACE_DET_BYPASS_MASK; + data |= 0x1 << HPIPE_INTERFACE_DET_BYPASS_OFFSET; + /* Set Link Train Mode (Tx training control pins are used) */ + mask |= HPIPE_INTERFACE_LINK_TRAIN_MASK; + data |= 0x1 << HPIPE_INTERFACE_LINK_TRAIN_OFFSET; + reg_set(hpipe_addr + HPIPE_INTERFACE_REG, data, mask); + + /* Set Idle_sync enable */ + mask = HPIPE_PCIE_IDLE_SYNC_MASK; + data = 0x1 << HPIPE_PCIE_IDLE_SYNC_OFFSET; + /* Select bits for PCIE Gen3(32bit) */ + mask |= HPIPE_PCIE_SEL_BITS_MASK; + data |= 0x2 << HPIPE_PCIE_SEL_BITS_OFFSET; + reg_set(hpipe_addr + HPIPE_PCIE_REG0, data, mask); + + /* Enable Tx_adapt_g1 */ + mask = HPIPE_TX_TRAIN_CTRL_G1_MASK; + data = 0x1 << HPIPE_TX_TRAIN_CTRL_G1_OFFSET; + /* Enable Tx_adapt_gn1 */ + mask |= HPIPE_TX_TRAIN_CTRL_GN1_MASK; + data |= 0x1 << HPIPE_TX_TRAIN_CTRL_GN1_OFFSET; + /* Disable Tx_adapt_g0 */ + mask |= HPIPE_TX_TRAIN_CTRL_G0_MASK; + data |= 0x0 << HPIPE_TX_TRAIN_CTRL_G0_OFFSET; + reg_set(hpipe_addr + HPIPE_TX_TRAIN_CTRL_REG, data, mask); + + /* Set reg_tx_train_chk_init */ + mask = HPIPE_TX_TRAIN_CHK_INIT_MASK; + data = 0x0 << HPIPE_TX_TRAIN_CHK_INIT_OFFSET; + /* Enable TX_COE_FM_PIN_PCIE3_EN */ + mask |= HPIPE_TX_TRAIN_COE_FM_PIN_PCIE3_MASK; + data |= 0x1 << HPIPE_TX_TRAIN_COE_FM_PIN_PCIE3_OFFSET; + reg_set(hpipe_addr + HPIPE_TX_TRAIN_REG, data, mask); + + debug("stage: TRx training parameters\n"); + /* Set Preset sweep configurations */ + mask = HPIPE_TX_TX_STATUS_CHECK_MODE_MASK; + data = 0x1 << HPIPE_TX_STATUS_CHECK_MODE_OFFSET; + mask |= HPIPE_TX_NUM_OF_PRESET_MASK; + data |= 0x7 << HPIPE_TX_NUM_OF_PRESET_OFFSET; + mask |= HPIPE_TX_SWEEP_PRESET_EN_MASK; + data |= 0x1 << HPIPE_TX_SWEEP_PRESET_EN_OFFSET; + reg_set(hpipe_addr + HPIPE_TX_TRAIN_CTRL_11_REG, data, mask); + + /* Tx train start configuration */ + mask = HPIPE_TX_TRAIN_START_SQ_EN_MASK; + data = 0x1 << HPIPE_TX_TRAIN_START_SQ_EN_OFFSET; + mask |= HPIPE_TX_TRAIN_START_FRM_DET_EN_MASK; + data |= 0x0 << HPIPE_TX_TRAIN_START_FRM_DET_EN_OFFSET; + mask |= HPIPE_TX_TRAIN_START_FRM_LOCK_EN_MASK; + data |= 0x0 << HPIPE_TX_TRAIN_START_FRM_LOCK_EN_OFFSET; + mask |= HPIPE_TX_TRAIN_WAIT_TIME_EN_MASK; + data |= 0x1 << HPIPE_TX_TRAIN_WAIT_TIME_EN_OFFSET; + reg_set(hpipe_addr + HPIPE_TX_TRAIN_CTRL_5_REG, data, mask); + + /* Enable Tx train P2P */ + mask = HPIPE_TX_TRAIN_P2P_HOLD_MASK; + data = 0x1 << HPIPE_TX_TRAIN_P2P_HOLD_OFFSET; + reg_set(hpipe_addr + HPIPE_TX_TRAIN_CTRL_0_REG, data, mask); + + /* Configure Tx train timeout */ + mask = HPIPE_TRX_TRAIN_TIMER_MASK; + data = 0x17 << HPIPE_TRX_TRAIN_TIMER_OFFSET; + reg_set(hpipe_addr + HPIPE_TX_TRAIN_CTRL_4_REG, data, mask); + + /* Disable G0/G1/GN1 adaptation */ + mask = HPIPE_TX_TRAIN_CTRL_G1_MASK | HPIPE_TX_TRAIN_CTRL_GN1_MASK + | HPIPE_TX_TRAIN_CTRL_G0_OFFSET; + data = 0; + reg_set(hpipe_addr + HPIPE_TX_TRAIN_CTRL_REG, data, mask); + + /* Disable DTL frequency loop */ + mask = HPIPE_PWR_CTR_DTL_FLOOP_EN_MASK; + data = 0x0 << HPIPE_PWR_CTR_DTL_FLOOP_EN_OFFSET; + reg_set(hpipe_addr + HPIPE_PWR_CTR_DTL_REG, data, mask); + + /* Configure G3 DFE */ + mask = HPIPE_G3_DFE_RES_MASK; + data = 0x3 << HPIPE_G3_DFE_RES_OFFSET; + reg_set(hpipe_addr + HPIPE_G3_SETTING_4_REG, data, mask); + + /* Use TX/RX training result for DFE */ + mask = HPIPE_DFE_RES_FORCE_MASK; + data = 0x0 << HPIPE_DFE_RES_FORCE_OFFSET; + reg_set(hpipe_addr + HPIPE_DFE_REG0, data, mask); + + /* Configure initial and final coefficient value for receiver */ + mask = HPIPE_G3_SET_1_G3_RX_SELMUPI_MASK; + data = 0x1 << HPIPE_G3_SET_1_G3_RX_SELMUPI_OFFSET; + + mask |= HPIPE_G3_SET_1_G3_RX_SELMUPF_MASK; + data |= 0x1 << HPIPE_G3_SET_1_G3_RX_SELMUPF_OFFSET; + + mask |= HPIPE_G3_SET_1_G3_SAMPLER_INPAIRX2_EN_MASK; + data |= 0x0 << HPIPE_G3_SET_1_G3_SAMPLER_INPAIRX2_EN_OFFSET; + reg_set(hpipe_addr + HPIPE_G3_SET_1_REG, data, mask); + + /* Trigger sampler enable pulse */ + mask = HPIPE_SMAPLER_MASK; + data = 0x1 << HPIPE_SMAPLER_OFFSET; + reg_set(hpipe_addr + HPIPE_SAMPLER_N_PROC_CALIB_CTRL_REG, data, mask); + udelay(5); + reg_set(hpipe_addr + HPIPE_SAMPLER_N_PROC_CALIB_CTRL_REG, 0, mask); + + /* FFE resistor tuning for different bandwidth */ + mask = HPIPE_G3_FFE_DEG_RES_LEVEL_MASK; + data = 0x1 << HPIPE_G3_FFE_DEG_RES_LEVEL_OFFSET; + mask |= HPIPE_G3_FFE_LOAD_RES_LEVEL_MASK; + data |= 0x3 << HPIPE_G3_FFE_LOAD_RES_LEVEL_OFFSET; + reg_set(hpipe_addr + HPIPE_G3_SETTING_3_REG, data, mask); + + /* Pattern lock lost timeout disable */ + mask = HPIPE_PATTERN_LOCK_LOST_TIMEOUT_EN_MASK; + data = 0x0 << HPIPE_PATTERN_LOCK_LOST_TIMEOUT_EN_OFFSET; + reg_set(hpipe_addr + HPIPE_FRAME_DETECT_CTRL_3_REG, data, mask); + + /* Configure DFE adaptations */ + mask = HPIPE_CDR_RX_MAX_DFE_ADAPT_0_MASK; + data = 0x0 << HPIPE_CDR_RX_MAX_DFE_ADAPT_0_OFFSET; + mask |= HPIPE_CDR_RX_MAX_DFE_ADAPT_1_MASK; + data |= 0x0 << HPIPE_CDR_RX_MAX_DFE_ADAPT_1_OFFSET; + mask |= HPIPE_CDR_MAX_DFE_ADAPT_0_MASK; + data |= 0x0 << HPIPE_CDR_MAX_DFE_ADAPT_0_OFFSET; + mask |= HPIPE_CDR_MAX_DFE_ADAPT_1_MASK; + data |= 0x1 << HPIPE_CDR_MAX_DFE_ADAPT_1_OFFSET; + reg_set(hpipe_addr + HPIPE_CDR_CONTROL_REG, data, mask); + + mask = HPIPE_DFE_TX_MAX_DFE_ADAPT_MASK; + data = 0x0 << HPIPE_DFE_TX_MAX_DFE_ADAPT_OFFSET; + reg_set(hpipe_addr + HPIPE_DFE_CONTROL_REG, data, mask); + + /* Genration 2 setting 1*/ + mask = HPIPE_G2_SET_1_G2_RX_SELMUPI_MASK; + data = 0x0 << HPIPE_G2_SET_1_G2_RX_SELMUPI_OFFSET; + mask |= HPIPE_G2_SET_1_G2_RX_SELMUPF_MASK; + data |= 0x1 << HPIPE_G2_SET_1_G2_RX_SELMUPF_OFFSET; + mask |= HPIPE_G2_SET_1_G2_RX_SELMUFI_MASK; + data |= 0x0 << HPIPE_G2_SET_1_G2_RX_SELMUFI_OFFSET; + reg_set(hpipe_addr + HPIPE_G2_SET_1_REG, data, mask); + + /* DFE enable */ + mask = HPIPE_G2_DFE_RES_MASK; + data = 0x3 << HPIPE_G2_DFE_RES_OFFSET; + reg_set(hpipe_addr + HPIPE_G2_SETTINGS_4_REG, data, mask); + + /* Configure DFE Resolution */ + mask = HPIPE_LANE_CFG4_DFE_EN_SEL_MASK; + data = 0x1 << HPIPE_LANE_CFG4_DFE_EN_SEL_OFFSET; + reg_set(hpipe_addr + HPIPE_LANE_CFG4_REG, data, mask); + + /* VDD calibration control */ + mask = HPIPE_EXT_SELLV_RXSAMPL_MASK; + data = 0x16 << HPIPE_EXT_SELLV_RXSAMPL_OFFSET; + reg_set(hpipe_addr + HPIPE_VDD_CAL_CTRL_REG, data, mask); + + /* Set PLL Charge-pump Current Control */ + mask = HPIPE_G3_SETTING_5_G3_ICP_MASK; + data = 0x4 << HPIPE_G3_SETTING_5_G3_ICP_OFFSET; + reg_set(hpipe_addr + HPIPE_G3_SETTING_5_REG, data, mask); + + /* Set lane rqualization remote setting */ + mask = HPIPE_LANE_CFG_FOM_DIRN_OVERRIDE_MASK; + data = 0x1 << HPIPE_LANE_CFG_FOM_DIRN_OVERRIDE_OFFSET; + mask |= HPIPE_LANE_CFG_FOM_ONLY_MODE_MASK; + data |= 0x1 << HPIPE_LANE_CFG_FOM_ONLY_MODE_OFFFSET; + mask |= HPIPE_LANE_CFG_FOM_PRESET_VECTOR_MASK; + data |= 0x6 << HPIPE_LANE_CFG_FOM_PRESET_VECTOR_OFFSET; + reg_set(hpipe_addr + HPIPE_LANE_EQ_REMOTE_SETTING_REG, data, mask); + + mask = HPIPE_CFG_EQ_BUNDLE_DIS_MASK; + data = 0x1 << HPIPE_CFG_EQ_BUNDLE_DIS_OFFSET; + reg_set(hpipe_addr + HPIPE_LANE_EQ_CFG2_REG, data, mask); + + debug("stage: Comphy power up\n"); + + /* For PCIe X4 or X2: + * release from reset only after finish to configure all lanes + */ + if ((pcie_width == PCIE_LNK_X1) || (comphy_index == (pcie_width - 1))) { + uint32_t i, start_lane, end_lane; + + if (pcie_width != PCIE_LNK_X1) { + /* allows writing to all lanes in one write */ + data = 0x0; + if (pcie_width == PCIE_LNK_X2) + mask = COMMON_PHY_SD_CTRL1_COMPHY_0_1_PORT_MASK; + else if (pcie_width == PCIE_LNK_X4) + mask = COMMON_PHY_SD_CTRL1_COMPHY_0_3_PORT_MASK; + reg_set(comphy_base + COMMON_PHY_SD_CTRL1, data, mask); + start_lane = 0; + end_lane = pcie_width; + + /* Release from PIPE soft reset + * For PCIe by4 or by2: + * release from soft reset all lanes - can't use + * read modify write + */ + reg_set(HPIPE_ADDR( + COMPHY_PIPE_FROM_COMPHY_ADDR(comphy_base), 0) + + HPIPE_RST_CLK_CTRL_REG, 0x24, 0xffffffff); + } else { + start_lane = comphy_index; + end_lane = comphy_index + 1; + + /* Release from PIPE soft reset + * for PCIe by4 or by2: + * release from soft reset all lanes + */ + reg_set(hpipe_addr + HPIPE_RST_CLK_CTRL_REG, + 0x0 << HPIPE_RST_CLK_CTRL_PIPE_RST_OFFSET, + HPIPE_RST_CLK_CTRL_PIPE_RST_MASK); + } + + if (pcie_width != PCIE_LNK_X1) { + /* disable writing to all lanes with one write */ + if (pcie_width == PCIE_LNK_X2) { + data = (COMPHY_LANE0 << + COMMON_PHY_SD_CTRL1_COMPHY_0_PORT_OFFSET) | + (COMPHY_LANE1 << + COMMON_PHY_SD_CTRL1_COMPHY_1_PORT_OFFSET); + mask = COMMON_PHY_SD_CTRL1_COMPHY_0_1_PORT_MASK; + } else if (pcie_width == PCIE_LNK_X4) { + data = (COMPHY_LANE0 << + COMMON_PHY_SD_CTRL1_COMPHY_0_PORT_OFFSET) | + (COMPHY_LANE1 << + COMMON_PHY_SD_CTRL1_COMPHY_1_PORT_OFFSET) | + (COMPHY_LANE2 << + COMMON_PHY_SD_CTRL1_COMPHY_2_PORT_OFFSET) | + (COMPHY_LANE3 << + COMMON_PHY_SD_CTRL1_COMPHY_3_PORT_OFFSET); + mask = COMMON_PHY_SD_CTRL1_COMPHY_0_3_PORT_MASK; + } + reg_set(comphy_base + COMMON_PHY_SD_CTRL1, + data, mask); + } + + debug("stage: Check PLL\n"); + /* Read lane status */ + for (i = start_lane; i < end_lane; i++) { + addr = HPIPE_ADDR( + COMPHY_PIPE_FROM_COMPHY_ADDR(comphy_base), i) + + HPIPE_LANE_STATUS1_REG; + data = HPIPE_LANE_STATUS1_PCLK_EN_MASK; + mask = data; + data = polling_with_timeout(addr, data, mask, + PLL_LOCK_TIMEOUT, + REG_32BIT); + if (data) { + ERROR("Failed to lock PCIE PLL\n"); + ret = -ETIMEDOUT; + } + } + } + + debug_exit(); + + return ret; +} + +static int mvebu_cp110_comphy_rxaui_power_on(uint64_t comphy_base, + uint8_t comphy_index, uint32_t comphy_mode) +{ + uintptr_t hpipe_addr, sd_ip_addr, comphy_addr, addr; + uint32_t mask, data; + int ret = 0; + + debug_enter(); + + hpipe_addr = HPIPE_ADDR(COMPHY_PIPE_FROM_COMPHY_ADDR(comphy_base), + comphy_index); + comphy_addr = COMPHY_ADDR(comphy_base, comphy_index); + sd_ip_addr = SD_ADDR(COMPHY_PIPE_FROM_COMPHY_ADDR(comphy_base), + comphy_index); + + /* configure phy selector for RXAUI */ + mvebu_cp110_comphy_set_phy_selector(comphy_base, comphy_index, + comphy_mode); + + /* RFU configurations - hard reset comphy */ + mask = COMMON_PHY_CFG1_PWR_UP_MASK; + data = 0x1 << COMMON_PHY_CFG1_PWR_UP_OFFSET; + mask |= COMMON_PHY_CFG1_PIPE_SELECT_MASK; + data |= 0x0 << COMMON_PHY_CFG1_PIPE_SELECT_OFFSET; + reg_set(comphy_addr + COMMON_PHY_CFG1_REG, data, mask); + + if (comphy_index == 2) { + reg_set(comphy_base + COMMON_PHY_SD_CTRL1, + 0x1 << COMMON_PHY_SD_CTRL1_RXAUI0_OFFSET, + COMMON_PHY_SD_CTRL1_RXAUI0_MASK); + } + if (comphy_index == 4) { + reg_set(comphy_base + COMMON_PHY_SD_CTRL1, + 0x1 << COMMON_PHY_SD_CTRL1_RXAUI1_OFFSET, + COMMON_PHY_SD_CTRL1_RXAUI1_MASK); + } + + /* Select Baud Rate of Comphy And PD_PLL/Tx/Rx */ + mask = SD_EXTERNAL_CONFIG0_SD_PU_PLL_MASK; + data = 0x0 << SD_EXTERNAL_CONFIG0_SD_PU_PLL_OFFSET; + mask |= SD_EXTERNAL_CONFIG0_SD_PHY_GEN_RX_MASK; + data |= 0xB << SD_EXTERNAL_CONFIG0_SD_PHY_GEN_RX_OFFSET; + mask |= SD_EXTERNAL_CONFIG0_SD_PHY_GEN_TX_MASK; + data |= 0xB << SD_EXTERNAL_CONFIG0_SD_PHY_GEN_TX_OFFSET; + mask |= SD_EXTERNAL_CONFIG0_SD_PU_RX_MASK; + data |= 0x0 << SD_EXTERNAL_CONFIG0_SD_PU_RX_OFFSET; + mask |= SD_EXTERNAL_CONFIG0_SD_PU_TX_MASK; + data |= 0x0 << SD_EXTERNAL_CONFIG0_SD_PU_TX_OFFSET; + mask |= SD_EXTERNAL_CONFIG0_HALF_BUS_MODE_MASK; + data |= 0x0 << SD_EXTERNAL_CONFIG0_HALF_BUS_MODE_OFFSET; + mask |= SD_EXTERNAL_CONFIG0_MEDIA_MODE_MASK; + data |= 0x1 << SD_EXTERNAL_CONFIG0_MEDIA_MODE_OFFSET; + reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG0_REG, data, mask); + + /* release from hard reset */ + mask = SD_EXTERNAL_CONFIG1_RESET_IN_MASK; + data = 0x0 << SD_EXTERNAL_CONFIG1_RESET_IN_OFFSET; + mask |= SD_EXTERNAL_CONFIG1_RESET_CORE_MASK; + data |= 0x0 << SD_EXTERNAL_CONFIG1_RESET_CORE_OFFSET; + mask |= SD_EXTERNAL_CONFIG1_RF_RESET_IN_MASK; + data |= 0x0 << SD_EXTERNAL_CONFIG1_RF_RESET_IN_OFFSET; + reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG1_REG, data, mask); + + mask = SD_EXTERNAL_CONFIG1_RESET_IN_MASK; + data = 0x1 << SD_EXTERNAL_CONFIG1_RESET_IN_OFFSET; + mask |= SD_EXTERNAL_CONFIG1_RESET_CORE_MASK; + data |= 0x1 << SD_EXTERNAL_CONFIG1_RESET_CORE_OFFSET; + reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG1_REG, data, mask); + + /* Wait 1ms - until band gap and ref clock ready */ + mdelay(1); + + /* Start comphy Configuration */ + debug("stage: Comphy configuration\n"); + /* set reference clock */ + reg_set(hpipe_addr + HPIPE_MISC_REG, + 0x0 << HPIPE_MISC_REFCLK_SEL_OFFSET, + HPIPE_MISC_REFCLK_SEL_MASK); + /* Power and PLL Control */ + mask = HPIPE_PWR_PLL_REF_FREQ_MASK; + data = 0x1 << HPIPE_PWR_PLL_REF_FREQ_OFFSET; + mask |= HPIPE_PWR_PLL_PHY_MODE_MASK; + data |= 0x4 << HPIPE_PWR_PLL_PHY_MODE_OFFSET; + reg_set(hpipe_addr + HPIPE_PWR_PLL_REG, data, mask); + /* Loopback register */ + reg_set(hpipe_addr + HPIPE_LOOPBACK_REG, + 0x1 << HPIPE_LOOPBACK_SEL_OFFSET, HPIPE_LOOPBACK_SEL_MASK); + /* rx control 1 */ + mask = HPIPE_RX_CONTROL_1_RXCLK2X_SEL_MASK; + data = 0x1 << HPIPE_RX_CONTROL_1_RXCLK2X_SEL_OFFSET; + mask |= HPIPE_RX_CONTROL_1_CLK8T_EN_MASK; + data |= 0x1 << HPIPE_RX_CONTROL_1_CLK8T_EN_OFFSET; + reg_set(hpipe_addr + HPIPE_RX_CONTROL_1_REG, data, mask); + /* DTL Control */ + reg_set(hpipe_addr + HPIPE_PWR_CTR_DTL_REG, + 0x0 << HPIPE_PWR_CTR_DTL_FLOOP_EN_OFFSET, + HPIPE_PWR_CTR_DTL_FLOOP_EN_MASK); + + /* Set analog parameters from ETP(HW) */ + debug("stage: Analog parameters from ETP(HW)\n"); + /* SERDES External Configuration 2 */ + reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG2_REG, + 0x1 << SD_EXTERNAL_CONFIG2_PIN_DFE_EN_OFFSET, + SD_EXTERNAL_CONFIG2_PIN_DFE_EN_MASK); + /* 0x7-DFE Resolution control */ + reg_set(hpipe_addr + HPIPE_DFE_REG0, 0x1 << HPIPE_DFE_RES_FORCE_OFFSET, + HPIPE_DFE_RES_FORCE_MASK); + /* 0xd-G1_Setting_0 */ + reg_set(hpipe_addr + HPIPE_G1_SET_0_REG, + 0xd << HPIPE_G1_SET_0_G1_TX_EMPH1_OFFSET, + HPIPE_G1_SET_0_G1_TX_EMPH1_MASK); + /* 0xE-G1_Setting_1 */ + mask = HPIPE_G1_SET_1_G1_RX_SELMUPI_MASK; + data = 0x1 << HPIPE_G1_SET_1_G1_RX_SELMUPI_OFFSET; + mask |= HPIPE_G1_SET_1_G1_RX_SELMUPF_MASK; + data |= 0x1 << HPIPE_G1_SET_1_G1_RX_SELMUPF_OFFSET; + mask |= HPIPE_G1_SET_1_G1_RX_DFE_EN_MASK; + data |= 0x1 << HPIPE_G1_SET_1_G1_RX_DFE_EN_OFFSET; + reg_set(hpipe_addr + HPIPE_G1_SET_1_REG, data, mask); + /* 0xA-DFE_Reg3 */ + mask = HPIPE_DFE_F3_F5_DFE_EN_MASK; + data = 0x0 << HPIPE_DFE_F3_F5_DFE_EN_OFFSET; + mask |= HPIPE_DFE_F3_F5_DFE_CTRL_MASK; + data |= 0x0 << HPIPE_DFE_F3_F5_DFE_CTRL_OFFSET; + reg_set(hpipe_addr + HPIPE_DFE_F3_F5_REG, data, mask); + + /* 0x111-G1_Setting_4 */ + mask = HPIPE_G1_SETTINGS_4_G1_DFE_RES_MASK; + data = 0x1 << HPIPE_G1_SETTINGS_4_G1_DFE_RES_OFFSET; + reg_set(hpipe_addr + HPIPE_G1_SETTINGS_4_REG, data, mask); + + debug("stage: RFU configurations- Power Up PLL,Tx,Rx\n"); + /* SERDES External Configuration */ + mask = SD_EXTERNAL_CONFIG0_SD_PU_PLL_MASK; + data = 0x1 << SD_EXTERNAL_CONFIG0_SD_PU_PLL_OFFSET; + mask |= SD_EXTERNAL_CONFIG0_SD_PU_RX_MASK; + data |= 0x1 << SD_EXTERNAL_CONFIG0_SD_PU_RX_OFFSET; + mask |= SD_EXTERNAL_CONFIG0_SD_PU_TX_MASK; + data |= 0x1 << SD_EXTERNAL_CONFIG0_SD_PU_TX_OFFSET; + reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG0_REG, data, mask); + + + /* check PLL rx & tx ready */ + addr = sd_ip_addr + SD_EXTERNAL_STATUS0_REG; + data = SD_EXTERNAL_STATUS0_PLL_RX_MASK | + SD_EXTERNAL_STATUS0_PLL_TX_MASK; + mask = data; + data = polling_with_timeout(addr, data, mask, 15000, REG_32BIT); + if (data != 0) { + debug("Read from reg = %lx - value = 0x%x\n", + sd_ip_addr + SD_EXTERNAL_STATUS0_REG, data); + ERROR("SD_EXTERNAL_STATUS0_PLL_RX is %d, -\"-_PLL_TX is %d\n", + (data & SD_EXTERNAL_STATUS0_PLL_RX_MASK), + (data & SD_EXTERNAL_STATUS0_PLL_TX_MASK)); + ret = -ETIMEDOUT; + } + + /* RX init */ + reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG1_REG, + 0x1 << SD_EXTERNAL_CONFIG1_RX_INIT_OFFSET, + SD_EXTERNAL_CONFIG1_RX_INIT_MASK); + + /* check that RX init done */ + addr = sd_ip_addr + SD_EXTERNAL_STATUS0_REG; + data = SD_EXTERNAL_STATUS0_RX_INIT_MASK; + mask = data; + data = polling_with_timeout(addr, data, mask, 100, REG_32BIT); + if (data != 0) { + debug("Read from reg = %lx - value = 0x%x\n", + sd_ip_addr + SD_EXTERNAL_STATUS0_REG, data); + ERROR("SD_EXTERNAL_STATUS0_RX_INIT is 0\n"); + ret = -ETIMEDOUT; + } + + debug("stage: RF Reset\n"); + /* RF Reset */ + mask = SD_EXTERNAL_CONFIG1_RX_INIT_MASK; + data = 0x0 << SD_EXTERNAL_CONFIG1_RX_INIT_OFFSET; + mask |= SD_EXTERNAL_CONFIG1_RF_RESET_IN_MASK; + data |= 0x1 << SD_EXTERNAL_CONFIG1_RF_RESET_IN_OFFSET; + reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG1_REG, data, mask); + + debug_exit(); + + return ret; +} + +static int mvebu_cp110_comphy_usb3_power_on(uint64_t comphy_base, + uint8_t comphy_index, uint32_t comphy_mode) +{ + uintptr_t hpipe_addr, comphy_addr, addr; + uint32_t mask, data; + uint8_t ap_nr, cp_nr, phy_polarity_invert; + int ret = 0; + + debug_enter(); + + /* Configure PIPE selector for USB3 */ + mvebu_cp110_comphy_set_pipe_selector(comphy_base, comphy_index, + comphy_mode); + + mvebu_cp110_get_ap_and_cp_nr(&ap_nr, &cp_nr, comphy_base); + + const struct usb_params *usb_static_values = + &usb_static_values_tab[ap_nr][cp_nr][comphy_index]; + + phy_polarity_invert = usb_static_values->polarity_invert; + + hpipe_addr = HPIPE_ADDR(COMPHY_PIPE_FROM_COMPHY_ADDR(comphy_base), + comphy_index); + comphy_addr = COMPHY_ADDR(comphy_base, comphy_index); + + debug("stage: RFU configurations - hard reset comphy\n"); + /* RFU configurations - hard reset comphy */ + mask = COMMON_PHY_CFG1_PWR_UP_MASK; + data = 0x1 << COMMON_PHY_CFG1_PWR_UP_OFFSET; + mask |= COMMON_PHY_CFG1_PIPE_SELECT_MASK; + data |= 0x1 << COMMON_PHY_CFG1_PIPE_SELECT_OFFSET; + mask |= COMMON_PHY_CFG1_PWR_ON_RESET_MASK; + data |= 0x0 << COMMON_PHY_CFG1_PWR_ON_RESET_OFFSET; + mask |= COMMON_PHY_CFG1_CORE_RSTN_MASK; + data |= 0x0 << COMMON_PHY_CFG1_CORE_RSTN_OFFSET; + mask |= COMMON_PHY_PHY_MODE_MASK; + data |= 0x1 << COMMON_PHY_PHY_MODE_OFFSET; + reg_set(comphy_addr + COMMON_PHY_CFG1_REG, data, mask); + + /* release from hard reset */ + mask = COMMON_PHY_CFG1_PWR_ON_RESET_MASK; + data = 0x1 << COMMON_PHY_CFG1_PWR_ON_RESET_OFFSET; + mask |= COMMON_PHY_CFG1_CORE_RSTN_MASK; + data |= 0x1 << COMMON_PHY_CFG1_CORE_RSTN_OFFSET; + reg_set(comphy_addr + COMMON_PHY_CFG1_REG, data, mask); + + /* Wait 1ms - until band gap and ref clock ready */ + mdelay(1); + + /* Start comphy Configuration */ + debug("stage: Comphy configuration\n"); + /* Set PIPE soft reset */ + mask = HPIPE_RST_CLK_CTRL_PIPE_RST_MASK; + data = 0x1 << HPIPE_RST_CLK_CTRL_PIPE_RST_OFFSET; + /* Set PHY datapath width mode for V0 */ + mask |= HPIPE_RST_CLK_CTRL_FIXED_PCLK_MASK; + data |= 0x0 << HPIPE_RST_CLK_CTRL_FIXED_PCLK_OFFSET; + /* Set Data bus width USB mode for V0 */ + mask |= HPIPE_RST_CLK_CTRL_PIPE_WIDTH_MASK; + data |= 0x0 << HPIPE_RST_CLK_CTRL_PIPE_WIDTH_OFFSET; + /* Set CORE_CLK output frequency for 250Mhz */ + mask |= HPIPE_RST_CLK_CTRL_CORE_FREQ_SEL_MASK; + data |= 0x0 << HPIPE_RST_CLK_CTRL_CORE_FREQ_SEL_OFFSET; + reg_set(hpipe_addr + HPIPE_RST_CLK_CTRL_REG, data, mask); + /* Set PLL ready delay for 0x2 */ + reg_set(hpipe_addr + HPIPE_CLK_SRC_LO_REG, + 0x2 << HPIPE_CLK_SRC_LO_PLL_RDY_DL_OFFSET, + HPIPE_CLK_SRC_LO_PLL_RDY_DL_MASK); + /* Set reference clock to come from group 1 - 25Mhz */ + reg_set(hpipe_addr + HPIPE_MISC_REG, + 0x0 << HPIPE_MISC_REFCLK_SEL_OFFSET, + HPIPE_MISC_REFCLK_SEL_MASK); + /* Set reference frequcency select - 0x2 */ + mask = HPIPE_PWR_PLL_REF_FREQ_MASK; + data = 0x2 << HPIPE_PWR_PLL_REF_FREQ_OFFSET; + /* Set PHY mode to USB - 0x5 */ + mask |= HPIPE_PWR_PLL_PHY_MODE_MASK; + data |= 0x5 << HPIPE_PWR_PLL_PHY_MODE_OFFSET; + reg_set(hpipe_addr + HPIPE_PWR_PLL_REG, data, mask); + /* Set the amount of time spent in the LoZ state - set for 0x7 */ + reg_set(hpipe_addr + HPIPE_GLOBAL_PM_CTRL, + 0x7 << HPIPE_GLOBAL_PM_RXDLOZ_WAIT_OFFSET, + HPIPE_GLOBAL_PM_RXDLOZ_WAIT_MASK); + /* Set max PHY generation setting - 5Gbps */ + reg_set(hpipe_addr + HPIPE_INTERFACE_REG, + 0x1 << HPIPE_INTERFACE_GEN_MAX_OFFSET, + HPIPE_INTERFACE_GEN_MAX_MASK); + /* Set select data width 20Bit (SEL_BITS[2:0]) */ + reg_set(hpipe_addr + HPIPE_LOOPBACK_REG, + 0x1 << HPIPE_LOOPBACK_SEL_OFFSET, + HPIPE_LOOPBACK_SEL_MASK); + /* select de-emphasize 3.5db */ + reg_set(hpipe_addr + HPIPE_LANE_CONFIG0_REG, + 0x1 << HPIPE_LANE_CONFIG0_TXDEEMPH0_OFFSET, + HPIPE_LANE_CONFIG0_TXDEEMPH0_MASK); + /* override tx margining from the MAC */ + reg_set(hpipe_addr + HPIPE_TST_MODE_CTRL_REG, + 0x1 << HPIPE_TST_MODE_CTRL_MODE_MARGIN_OFFSET, + HPIPE_TST_MODE_CTRL_MODE_MARGIN_MASK); + + /* The polarity inversion for USB was not tested due to lack of hw + * design which requires it. Support is added for customer needs. + */ + if (phy_polarity_invert) + mvebu_cp110_polarity_invert(hpipe_addr + HPIPE_SYNC_PATTERN_REG, + phy_polarity_invert); + + /* Start analog parameters from ETP(HW) */ + debug("stage: Analog parameters from ETP(HW)\n"); + /* Set Pin DFE_PAT_DIS -> Bit[1]: PIN_DFE_PAT_DIS = 0x0 */ + mask = HPIPE_LANE_CFG4_DFE_CTRL_MASK; + data = 0x1 << HPIPE_LANE_CFG4_DFE_CTRL_OFFSET; + /* Set Override PHY DFE control pins for 0x1 */ + mask |= HPIPE_LANE_CFG4_DFE_OVER_MASK; + data |= 0x1 << HPIPE_LANE_CFG4_DFE_OVER_OFFSET; + /* Set Spread Spectrum Clock Enable fot 0x1 */ + mask |= HPIPE_LANE_CFG4_SSC_CTRL_MASK; + data |= 0x1 << HPIPE_LANE_CFG4_SSC_CTRL_OFFSET; + reg_set(hpipe_addr + HPIPE_LANE_CFG4_REG, data, mask); + /* Configure SSC amplitude */ + mask = HPIPE_G2_TX_SSC_AMP_MASK; + data = 0x1f << HPIPE_G2_TX_SSC_AMP_OFFSET; + reg_set(hpipe_addr + HPIPE_G2_SET_2_REG, data, mask); + /* End of analog parameters */ + + debug("stage: Comphy power up\n"); + /* Release from PIPE soft reset */ + reg_set(hpipe_addr + HPIPE_RST_CLK_CTRL_REG, + 0x0 << HPIPE_RST_CLK_CTRL_PIPE_RST_OFFSET, + HPIPE_RST_CLK_CTRL_PIPE_RST_MASK); + + /* wait 15ms - for comphy calibration done */ + debug("stage: Check PLL\n"); + /* Read lane status */ + addr = hpipe_addr + HPIPE_LANE_STATUS1_REG; + data = HPIPE_LANE_STATUS1_PCLK_EN_MASK; + mask = data; + data = polling_with_timeout(addr, data, mask, 15000, REG_32BIT); + if (data != 0) { + debug("Read from reg = %lx - value = 0x%x\n", + hpipe_addr + HPIPE_LANE_STATUS1_REG, data); + ERROR("HPIPE_LANE_STATUS1_PCLK_EN_MASK is 0\n"); + ret = -ETIMEDOUT; + } + + debug_exit(); + + return ret; +} + +static void rx_pre_train(uint64_t comphy_base, uint8_t comphy_index) +{ + uintptr_t hpipe_addr; + uint32_t mask, data; + + hpipe_addr = HPIPE_ADDR(COMPHY_PIPE_FROM_COMPHY_ADDR(comphy_base), + comphy_index); + + debug("rx_training preparation\n\n"); + + mask = HPIPE_TRX0_GAIN_TRAIN_WITH_C_MASK; + data = (0x1 << HPIPE_TRX0_GAIN_TRAIN_WITH_C_OFF); + mask |= HPIPE_TRX0_GAIN_TRAIN_WITH_SAMPLER_MASK; + data |= (0x0 << HPIPE_TRX0_GAIN_TRAIN_WITH_SAMPLER_OFF); + reg_set(hpipe_addr + HPIPE_TRX0_REG, data, mask); + + + mask = HPIPE_TRX_REG2_SUMF_BOOST_TARGET_C_MASK; + data = (0x1e << HPIPE_TRX_REG2_SUMF_BOOST_TARGET_C_OFF); + mask |= HPIPE_TRX_REG2_SUMF_BOOST_TARGET_K_MASK; + data |= (0x0 << HPIPE_TRX_REG2_SUMF_BOOST_TARGET_K_OFF); + reg_set(hpipe_addr + HPIPE_TRX_REG2, data, mask); + + mask = HPIPE_TRX_REG1_MIN_BOOST_MODE_MASK; + data = (0x1 << HPIPE_TRX_REG1_MIN_BOOST_MODE_OFF); + reg_set(hpipe_addr + HPIPE_TRX_REG1, data, mask); + + mask = HPIPE_CRD2_CRD_MIDPOINT_SMALL_THRES_K_MASK; + data = (0x8 << HPIPE_CRD2_CRD_MIDPOINT_SMALL_THRES_K_OFF); + reg_set(hpipe_addr + HPIPE_CDR_CONTROL1_REG, data, mask); + + mask = HPIPE_CRD2_CRD_MIDPOINT_LARGE_THRES_K_MASK; + data = (0x8 << HPIPE_CRD2_CRD_MIDPOINT_LARGE_THRES_K_OFF); + reg_set(hpipe_addr + HPIPE_CDR_CONTROL2_REG, data, mask); + + mask = HPIPE_CRD_MIDPOINT_PHASE_OS_MASK; + data = (0x0 << HPIPE_CRD_MIDPOINT_PHASE_OS_OFFSET); + reg_set(hpipe_addr + HPIPE_CDR_CONTROL_REG, data, mask); + + mask = HPIPE_TRX_REG1_SUMFTAP_EN_MASK; + data = (0x38 << HPIPE_TRX_REG1_SUMFTAP_EN_OFF); + mask |= HPIPE_TRX_REG2_SUMF_BOOST_TARGET_C_MASK; + data |= (0x1e << HPIPE_TRX_REG2_SUMF_BOOST_TARGET_C_OFF); + reg_set(hpipe_addr + HPIPE_TRX_REG1, data, mask); +} + +int mvebu_cp110_comphy_xfi_rx_training(uint64_t comphy_base, + uint8_t comphy_index) +{ + uint32_t mask, data, timeout; + uint32_t g1_ffe_cap_sel, g1_ffe_res_sel, align90, g1_dfe_res; + uintptr_t hpipe_addr; + + uint8_t ap_nr, cp_nr; + + mvebu_cp110_get_ap_and_cp_nr(&ap_nr, &cp_nr, comphy_base); + + hpipe_addr = HPIPE_ADDR(COMPHY_PIPE_FROM_COMPHY_ADDR(comphy_base), + comphy_index); + + debug_enter(); + + rx_pre_train(comphy_base, comphy_index); + + debug("Preparation for rx_training\n\n"); + + /* Use the FFE table */ + mask = HPIPE_G1_SETTINGS_3_G1_FFE_SETTING_FORCE_MASK; + data = 0 << HPIPE_G1_SETTINGS_3_G1_FFE_SETTING_FORCE_OFFSET; + reg_set(hpipe_addr + HPIPE_G1_SETTINGS_3_REG, data, mask); + + /* Use auto-calibration value */ + mask = HPIPE_CAL_RXCLKALIGN_90_EXT_EN_MASK; + data = 0 << HPIPE_CAL_RXCLKALIGN_90_EXT_EN_OFFSET; + reg_set(hpipe_addr + HPIPE_RX_CLK_ALIGN90_AND_TX_IDLE_CALIB_CTRL_REG, + data, mask); + + /* Use Tx/Rx training results */ + mask = HPIPE_DFE_RES_FORCE_MASK; + data = 0 << HPIPE_DFE_RES_FORCE_OFFSET; + reg_set(hpipe_addr + HPIPE_DFE_REG0, data, mask); + + debug("Enable RX training\n\n"); + + mask = HPIPE_TRX_RX_TRAIN_EN_MASK; + data = 0x1 << HPIPE_TRX_RX_TRAIN_EN_OFFSET; + reg_set(hpipe_addr + HPIPE_TRX_TRAIN_CTRL_0_REG, data, mask); + + /* Check the result of RX training */ + timeout = RX_TRAINING_TIMEOUT; + mask = HPIPE_INTERRUPT_TRX_TRAIN_DONE_OFFSET | + HPIPE_INTERRUPT_DFE_DONE_INT_OFFSET | + HPIPE_INTERRUPT_RX_TRAIN_COMPLETE_INT_MASK; + while (timeout) { + data = mmio_read_32(hpipe_addr + HPIPE_INTERRUPT_1_REGISTER); + if (data & mask) + break; + mdelay(1); + timeout--; + } + + debug("RX training result: interrupt reg 0x%lx = 0x%x\n\n", + hpipe_addr + HPIPE_INTERRUPT_1_REGISTER, data); + + if (timeout == 0 || data & HPIPE_TRX_TRAIN_TIME_OUT_INT_MASK) { + ERROR("Rx training timeout...\n"); + return -ETIMEDOUT; + } + + if (data & HPIPE_TRX_TRAIN_FAILED_MASK) { + ERROR("Rx training failed...\n"); + return -EINVAL; + } + + mask = HPIPE_TRX_RX_TRAIN_EN_MASK; + data = 0x0 << HPIPE_TRX_RX_TRAIN_EN_OFFSET; + reg_set(hpipe_addr + HPIPE_TRX_TRAIN_CTRL_0_REG, data, mask); + + debug("Training done, reading results...\n\n"); + + mask = HPIPE_ADAPTED_FFE_ADAPTED_FFE_RES_MASK; + g1_ffe_res_sel = ((mmio_read_32(hpipe_addr + + HPIPE_ADAPTED_FFE_CAPACITOR_COUNTER_CTRL_REG) + & mask) >> HPIPE_ADAPTED_FFE_ADAPTED_FFE_RES_OFFSET); + + mask = HPIPE_ADAPTED_FFE_ADAPTED_FFE_CAP_MASK; + g1_ffe_cap_sel = ((mmio_read_32(hpipe_addr + + HPIPE_ADAPTED_FFE_CAPACITOR_COUNTER_CTRL_REG) + & mask) >> HPIPE_ADAPTED_FFE_ADAPTED_FFE_CAP_OFFSET); + + mask = HPIPE_DATA_PHASE_ADAPTED_OS_PH_MASK; + align90 = ((mmio_read_32(hpipe_addr + HPIPE_DATA_PHASE_OFF_CTRL_REG) + & mask) >> HPIPE_DATA_PHASE_ADAPTED_OS_PH_OFFSET); + + mask = HPIPE_ADAPTED_DFE_RES_MASK; + g1_dfe_res = ((mmio_read_32(hpipe_addr + + HPIPE_ADAPTED_DFE_COEFFICIENT_1_REG) + & mask) >> HPIPE_ADAPTED_DFE_RES_OFFSET); + + debug("================================================\n"); + debug("Switching to static configuration:\n"); + debug("FFE_RES = 0x%x FFE_CAP = 0x%x align90 = 0x%x g1_dfe_res 0x%x\n", + g1_ffe_res_sel, g1_ffe_cap_sel, align90, g1_dfe_res); + debug("Result after training: 0x%lx= 0x%x, 0x%lx= 0x%x, 0x%lx = 0x%x\n", + (hpipe_addr + HPIPE_ADAPTED_FFE_CAPACITOR_COUNTER_CTRL_REG), + mmio_read_32(hpipe_addr + + HPIPE_ADAPTED_FFE_CAPACITOR_COUNTER_CTRL_REG), + (hpipe_addr + HPIPE_DATA_PHASE_OFF_CTRL_REG), + mmio_read_32(hpipe_addr + HPIPE_DATA_PHASE_OFF_CTRL_REG), + (hpipe_addr + HPIPE_ADAPTED_DFE_COEFFICIENT_1_REG), + mmio_read_32(hpipe_addr + HPIPE_ADAPTED_DFE_COEFFICIENT_1_REG)); + debug("================================================\n"); + + /* Update FFE_RES */ + mask = HPIPE_G1_SETTINGS_3_G1_FFE_RES_SEL_MASK; + data = g1_ffe_res_sel << HPIPE_G1_SETTINGS_3_G1_FFE_RES_SEL_OFFSET; + reg_set(hpipe_addr + HPIPE_G1_SETTINGS_3_REG, data, mask); + + /* Update FFE_CAP */ + mask = HPIPE_G1_SETTINGS_3_G1_FFE_CAP_SEL_MASK; + data = g1_ffe_cap_sel << HPIPE_G1_SETTINGS_3_G1_FFE_CAP_SEL_OFFSET; + reg_set(hpipe_addr + HPIPE_G1_SETTINGS_3_REG, data, mask); + + /* Bypass the FFE table settings and use the FFE settings directly from + * registers FFE_RES_SEL and FFE_CAP_SEL + */ + mask = HPIPE_G1_SETTINGS_3_G1_FFE_SETTING_FORCE_MASK; + data = 1 << HPIPE_G1_SETTINGS_3_G1_FFE_SETTING_FORCE_OFFSET; + reg_set(hpipe_addr + HPIPE_G1_SETTINGS_3_REG, data, mask); + + /* Force DFE resolution (use gen table value) */ + mask = HPIPE_DFE_RES_FORCE_MASK; + data = 0x1 << HPIPE_DFE_RES_FORCE_OFFSET; + reg_set(hpipe_addr + HPIPE_DFE_REG0, data, mask); + + /* 0x111-G1 DFE_Setting_4 */ + mask = HPIPE_G1_SETTINGS_4_G1_DFE_RES_MASK; + data = g1_dfe_res << HPIPE_G1_SETTINGS_4_G1_DFE_RES_OFFSET; + reg_set(hpipe_addr + HPIPE_G1_SETTINGS_4_REG, data, mask); + + printf("########################################################\n"); + printf("# To use trained values update the ATF sources:\n"); + printf("# plat/marvell/armada/a8k/<board_type>/board/phy-porting-layer.h "); + printf("file\n# with new values as below (for appropriate AP nr %d", + ap_nr); + printf("and CP nr: %d comphy_index %d\n\n", + cp_nr, comphy_index); + printf("static struct xfi_params xfi_static_values_tab[AP_NUM]"); + printf("[CP_NUM][MAX_LANE_NR] = {\n"); + printf("\t...\n"); + printf("\t.g1_ffe_res_sel = 0x%x,\n", g1_ffe_res_sel); + printf("\t.g1_ffe_cap_sel = 0x%x,\n", g1_ffe_cap_sel); + printf("\t.align90 = 0x%x,\n", align90); + printf("\t.g1_dfe_res = 0x%x\n", g1_dfe_res); + printf("\t...\n"); + printf("};\n\n"); + printf("########################################################\n"); + + rx_trainng_done[ap_nr][cp_nr][comphy_index] = 1; + + return 0; +} + +/* During AP the proper mode is auto-negotiated and the mac, pcs and serdes + * configuration are done by the firmware loaded to the MG's CM3 for appropriate + * negotiated mode. Therefore there is no need to configure the mac, pcs and + * serdes from u-boot. The only thing that need to be setup is powering up + * the comphy, which is done through Common PHY<n> Configuration 1 Register + * (CP0: 0xF2441000, CP1: 0xF4441000). This step can't be done by MG's CM3, + * since it doesn't have an access to this register-set (but it has access to + * the network registers like: MG, AP, MAC, PCS, Serdes etc.) + */ +static int mvebu_cp110_comphy_ap_power_on(uint64_t comphy_base, + uint8_t comphy_index, + uint32_t comphy_mode) +{ + uint32_t mask, data; + uintptr_t comphy_addr = comphy_addr = + COMPHY_ADDR(comphy_base, comphy_index); + + /* configure phy selector for XFI/SFI */ + mvebu_cp110_comphy_set_phy_selector(comphy_base, comphy_index, + comphy_mode); + debug_enter(); + debug("stage: RFU configurations - hard reset comphy\n"); + /* RFU configurations - hard reset comphy */ + mask = COMMON_PHY_CFG1_PWR_UP_MASK; + data = 0x1 << COMMON_PHY_CFG1_PWR_UP_OFFSET; + mask |= COMMON_PHY_CFG1_PIPE_SELECT_MASK; + data |= 0x0 << COMMON_PHY_CFG1_PIPE_SELECT_OFFSET; + reg_set(comphy_addr + COMMON_PHY_CFG1_REG, data, mask); + debug_exit(); + +#if MSS_SUPPORT + do { + uint8_t ap_nr, cp_nr; + + /* start ap fw */ + mvebu_cp110_get_ap_and_cp_nr(&ap_nr, &cp_nr, comphy_base); + mg_start_ap_fw(cp_nr, comphy_index); + + } while (0); +#endif + return 0; +} + +/* + * This function allows to reset the digital synchronizers between + * the MAC and the PHY, it is required when the MAC changes its state. + */ +int mvebu_cp110_comphy_digital_reset(uint64_t comphy_base, + uint8_t comphy_index, + uint32_t comphy_mode, uint32_t command) +{ + int mode = COMPHY_GET_MODE(comphy_mode); + uintptr_t sd_ip_addr; + uint32_t mask, data; + + sd_ip_addr = SD_ADDR(COMPHY_PIPE_FROM_COMPHY_ADDR(comphy_base), + comphy_index); + + switch (mode) { + case (COMPHY_SGMII_MODE): + case (COMPHY_2500BASEX_MODE): + case (COMPHY_XFI_MODE): + case (COMPHY_SFI_MODE): + case (COMPHY_RXAUI_MODE): + mask = SD_EXTERNAL_CONFIG1_RF_RESET_IN_MASK; + data = ((command == COMPHY_COMMAND_DIGITAL_PWR_OFF) ? + 0x0 : 0x1) << SD_EXTERNAL_CONFIG1_RF_RESET_IN_OFFSET; + reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG1_REG, data, mask); + break; + default: + ERROR("comphy%d: Digital PWR ON/OFF is not supported\n", + comphy_index); + return -EINVAL; + } + + return 0; +} + +int mvebu_cp110_comphy_power_on(uint64_t comphy_base, + uint8_t comphy_index, + uint64_t comphy_mode, + uint64_t comphy_train_base) +{ + int mode = COMPHY_GET_MODE(comphy_mode); + int err = 0; + + debug_enter(); + + switch (mode) { + case(COMPHY_SATA_MODE): + err = mvebu_cp110_comphy_sata_power_on(comphy_base, + comphy_index, + comphy_mode); + break; + case(COMPHY_SGMII_MODE): + case(COMPHY_2500BASEX_MODE): + err = mvebu_cp110_comphy_sgmii_power_on(comphy_base, + comphy_index, + comphy_mode); + break; + /* From comphy perspective, XFI and SFI are the same */ + case (COMPHY_XFI_MODE): + case (COMPHY_SFI_MODE): + err = mvebu_cp110_comphy_xfi_power_on(comphy_base, + comphy_index, + comphy_mode, + comphy_train_base); + break; + case (COMPHY_PCIE_MODE): + err = mvebu_cp110_comphy_pcie_power_on(comphy_base, + comphy_index, + comphy_mode); + break; + case (COMPHY_RXAUI_MODE): + err = mvebu_cp110_comphy_rxaui_power_on(comphy_base, + comphy_index, + comphy_mode); + break; + case (COMPHY_USB3H_MODE): + case (COMPHY_USB3D_MODE): + err = mvebu_cp110_comphy_usb3_power_on(comphy_base, + comphy_index, + comphy_mode); + break; + case (COMPHY_AP_MODE): + err = mvebu_cp110_comphy_ap_power_on(comphy_base, comphy_index, + comphy_mode); + break; + default: + ERROR("comphy%d: unsupported comphy mode\n", comphy_index); + err = -EINVAL; + break; + } + + debug_exit(); + + return err; +} + +int mvebu_cp110_comphy_power_off(uint64_t comphy_base, uint8_t comphy_index, + uint64_t comphy_mode) +{ + uintptr_t sd_ip_addr, comphy_ip_addr; + uint32_t mask, data; + uint8_t ap_nr, cp_nr; + _Bool called_from_uboot = COMPHY_GET_CALLER(comphy_mode); + + debug_enter(); + + /* Power-off might happen because of 2 things: + * 1. Bootloader turns off unconnected lanes + * 2. Linux turns off all lanes during boot + * (and then reconfigure it). + * + * For PCIe, there's a problem: + * In Armada 8K DB boards, PCIe initialization can be executed + * only once (PCIe reset performed during chip power on and + * it cannot be executed via GPIO later) so a lane configured to + * PCIe should not be powered off by Linux. + * + * So, check 2 things: + * 1. Is Linux called for power-off? + * 2. Is the comphy configured to PCIe? + * If the answer is YES for both 1 and 2, skip the power-off. + * + * TODO: In MacciatoBIN, PCIe reset is connected via GPIO, + * so after GPIO reset is added to Linux Kernel, it can be + * powered-off. + */ + if (!called_from_uboot) { + data = mmio_read_32(comphy_base + + COMMON_SELECTOR_PIPE_REG_OFFSET); + data >>= (COMMON_SELECTOR_COMPHYN_FIELD_WIDTH * comphy_index); + data &= COMMON_SELECTOR_COMPHY_MASK; + if (data == COMMON_SELECTOR_PIPE_COMPHY_PCIE) + return 0; + } + + mvebu_cp110_get_ap_and_cp_nr(&ap_nr, &cp_nr, comphy_base); + + if (rx_trainng_done[ap_nr][cp_nr][comphy_index]) { + debug("Skip %s for comphy[%d][%d][%d], due to rx training\n", + __func__, ap_nr, cp_nr, comphy_index); + return 0; + } + + sd_ip_addr = SD_ADDR(COMPHY_PIPE_FROM_COMPHY_ADDR(comphy_base), + comphy_index); + comphy_ip_addr = COMPHY_ADDR(comphy_base, comphy_index); + + /* Hard reset the comphy, for Ethernet modes and Sata */ + mask = SD_EXTERNAL_CONFIG1_RESET_IN_MASK; + data = 0x0 << SD_EXTERNAL_CONFIG1_RESET_IN_OFFSET; + mask |= SD_EXTERNAL_CONFIG1_RESET_CORE_MASK; + data |= 0x0 << SD_EXTERNAL_CONFIG1_RESET_CORE_OFFSET; + mask |= SD_EXTERNAL_CONFIG1_RF_RESET_IN_MASK; + data |= 0x0 << SD_EXTERNAL_CONFIG1_RF_RESET_IN_OFFSET; + reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG1_REG, data, mask); + + /* PCIe reset */ + spin_lock(&cp110_mac_reset_lock); + + /* The mvebu_cp110_comphy_power_off will be called only from Linux (to + * override settings done by bootloader) and it will be relevant only + * to PCIe (called before check if to skip pcie power off or not). + */ + data = mmio_read_32(SYS_CTRL_FROM_COMPHY_ADDR(comphy_base) + + SYS_CTRL_UINIT_SOFT_RESET_REG); + switch (comphy_index) { + case COMPHY_LANE0: + data &= ~PCIE_MAC_RESET_MASK_PORT0; + break; + case COMPHY_LANE4: + data &= ~PCIE_MAC_RESET_MASK_PORT1; + break; + case COMPHY_LANE5: + data &= ~PCIE_MAC_RESET_MASK_PORT2; + break; + } + + mmio_write_32(SYS_CTRL_FROM_COMPHY_ADDR(comphy_base) + + SYS_CTRL_UINIT_SOFT_RESET_REG, data); + spin_unlock(&cp110_mac_reset_lock); + + /* Hard reset the comphy, for PCIe and usb3 */ + mask = COMMON_PHY_CFG1_PWR_ON_RESET_MASK; + data = 0x0 << COMMON_PHY_CFG1_PWR_ON_RESET_OFFSET; + mask |= COMMON_PHY_CFG1_CORE_RSTN_MASK; + data |= 0x0 << COMMON_PHY_CFG1_CORE_RSTN_OFFSET; + reg_set(comphy_ip_addr + COMMON_PHY_CFG1_REG, data, mask); + + /* Clear comphy PHY and PIPE selector, can't rely on previous config. */ + mvebu_cp110_comphy_clr_phy_selector(comphy_base, comphy_index); + mvebu_cp110_comphy_clr_pipe_selector(comphy_base, comphy_index); + + debug_exit(); + + return 0; +} diff --git a/drivers/marvell/comphy/phy-comphy-cp110.h b/drivers/marvell/comphy/phy-comphy-cp110.h new file mode 100644 index 0000000..0be6c26 --- /dev/null +++ b/drivers/marvell/comphy/phy-comphy-cp110.h @@ -0,0 +1,102 @@ +/* + * Copyright (C) 2018 Marvell International Ltd. + * + * SPDX-License-Identifier: BSD-3-Clause + * https://spdx.org/licenses + */ + +/* Those are parameters for xfi mode, which need to be tune for each board type. + * For known DB boards the parameters was already calibrated and placed under + * the plat/marvell/armada/a8k/<board_type>/board/phy-porting-layer.h + */ +struct xfi_params { + uint8_t g1_ffe_res_sel; + uint8_t g1_ffe_cap_sel; + uint8_t align90; + uint8_t g1_dfe_res; + uint8_t g1_amp; + uint8_t g1_emph; + uint8_t g1_emph_en; + uint8_t g1_tx_amp_adj; + uint8_t g1_tx_emph_en; + uint8_t g1_tx_emph; + uint8_t g1_rx_selmuff; + uint8_t g1_rx_selmufi; + uint8_t g1_rx_selmupf; + uint8_t g1_rx_selmupi; + _Bool valid; +}; + +struct sata_params { + uint8_t g1_amp; + uint8_t g2_amp; + uint8_t g3_amp; + + uint8_t g1_emph; + uint8_t g2_emph; + uint8_t g3_emph; + + uint8_t g1_emph_en; + uint8_t g2_emph_en; + uint8_t g3_emph_en; + + uint8_t g1_tx_amp_adj; + uint8_t g2_tx_amp_adj; + uint8_t g3_tx_amp_adj; + + uint8_t g1_tx_emph_en; + uint8_t g2_tx_emph_en; + uint8_t g3_tx_emph_en; + + uint8_t g1_tx_emph; + uint8_t g2_tx_emph; + uint8_t g3_tx_emph; + + uint8_t g3_dfe_res; + + uint8_t g3_ffe_res_sel; + + uint8_t g3_ffe_cap_sel; + + uint8_t align90; + + uint8_t g1_rx_selmuff; + uint8_t g2_rx_selmuff; + uint8_t g3_rx_selmuff; + + uint8_t g1_rx_selmufi; + uint8_t g2_rx_selmufi; + uint8_t g3_rx_selmufi; + + uint8_t g1_rx_selmupf; + uint8_t g2_rx_selmupf; + uint8_t g3_rx_selmupf; + + uint8_t g1_rx_selmupi; + uint8_t g2_rx_selmupi; + uint8_t g3_rx_selmupi; + + uint8_t polarity_invert; + + _Bool valid; +}; + +struct usb_params { + uint8_t polarity_invert; +}; + +int mvebu_cp110_comphy_is_pll_locked(uint64_t comphy_base, + uint8_t comphy_index); +int mvebu_cp110_comphy_power_off(uint64_t comphy_base, + uint8_t comphy_index, uint64_t comphy_mode); +int mvebu_cp110_comphy_power_on(uint64_t comphy_base, uint8_t comphy_index, + uint64_t comphy_mode, + uint64_t comphy_train_base); +int mvebu_cp110_comphy_xfi_rx_training(uint64_t comphy_base, + uint8_t comphy_index); +int mvebu_cp110_comphy_digital_reset(uint64_t comphy_base, uint8_t comphy_index, + uint32_t comphy_mode, uint32_t command); + +#define COMPHY_POLARITY_NO_INVERT 0 +#define COMPHY_POLARITY_TXD_INVERT 1 +#define COMPHY_POLARITY_RXD_INVERT 2 diff --git a/drivers/marvell/comphy/phy-default-porting-layer.h b/drivers/marvell/comphy/phy-default-porting-layer.h new file mode 100644 index 0000000..3c63c64 --- /dev/null +++ b/drivers/marvell/comphy/phy-default-porting-layer.h @@ -0,0 +1,59 @@ +/* + * Copyright (C) 2018 Marvell International Ltd. + * + * SPDX-License-Identifier: BSD-3-Clause + * https://spdx.org/licenses + */ + +#ifndef PHY_DEFAULT_PORTING_LAYER_H +#define PHY_DEFAULT_PORTING_LAYER_H + + +#define MAX_LANE_NR 6 + +#warning "Using default comphy params - you may need to suit them to your board" + +static const struct xfi_params + xfi_static_values_tab[AP_NUM][CP_NUM][MAX_LANE_NR] = { + [0 ... AP_NUM-1][0 ... CP_NUM-1][0 ... MAX_LANE_NR-1] = { + .g1_ffe_res_sel = 0x3, .g1_ffe_cap_sel = 0xf, .align90 = 0x5f, + .g1_dfe_res = 0x2, .g1_amp = 0x1c, .g1_emph = 0xe, + .g1_emph_en = 0x1, .g1_tx_amp_adj = 0x1, .g1_tx_emph_en = 0x1, + .g1_tx_emph = 0x0, .g1_rx_selmuff = 0x1, .g1_rx_selmufi = 0x0, + .g1_rx_selmupf = 0x2, .g1_rx_selmupi = 0x2, .valid = 1 + } +}; + +static const struct sata_params + sata_static_values_tab[AP_NUM][CP_NUM][MAX_LANE_NR] = { + [0 ... AP_NUM-1][0 ... CP_NUM-1][0 ... MAX_LANE_NR-1] = { + .g1_amp = 0x8, .g2_amp = 0xa, .g3_amp = 0x1e, + .g1_emph = 0x1, .g2_emph = 0x2, .g3_emph = 0xe, + .g1_emph_en = 0x1, .g2_emph_en = 0x1, .g3_emph_en = 0x1, + .g1_tx_amp_adj = 0x1, .g2_tx_amp_adj = 0x1, + .g3_tx_amp_adj = 0x1, + .g1_tx_emph_en = 0x0, .g2_tx_emph_en = 0x0, + .g3_tx_emph_en = 0x0, + .g1_tx_emph = 0x1, .g2_tx_emph = 0x1, .g3_tx_emph = 0x1, + .g3_dfe_res = 0x1, .g3_ffe_res_sel = 0x4, .g3_ffe_cap_sel = 0xf, + .align90 = 0x61, + .g1_rx_selmuff = 0x3, .g2_rx_selmuff = 0x3, + .g3_rx_selmuff = 0x3, + .g1_rx_selmufi = 0x0, .g2_rx_selmufi = 0x0, + .g3_rx_selmufi = 0x3, + .g1_rx_selmupf = 0x1, .g2_rx_selmupf = 0x1, + .g3_rx_selmupf = 0x2, + .g1_rx_selmupi = 0x0, .g2_rx_selmupi = 0x0, + .g3_rx_selmupi = 0x2, + .polarity_invert = COMPHY_POLARITY_NO_INVERT, + .valid = 0x1 + }, +}; + +static const struct usb_params + usb_static_values_tab[AP_NUM][CP_NUM][MAX_LANE_NR] = { + [0 ... AP_NUM-1][0 ... CP_NUM-1][0 ... MAX_LANE_NR-1] = { + .polarity_invert = COMPHY_POLARITY_NO_INVERT + }, +}; +#endif /* PHY_DEFAULT_PORTING_LAYER_H */ diff --git a/drivers/marvell/ddr_phy_access.c b/drivers/marvell/ddr_phy_access.c new file mode 100644 index 0000000..352d1ef --- /dev/null +++ b/drivers/marvell/ddr_phy_access.c @@ -0,0 +1,58 @@ +/* + * Copyright (C) 2021 Marvell International Ltd. + * + * SPDX-License-Identifier: BSD-3-Clause + * https://spdx.org/licenses + */ + +#include "ddr_phy_access.h" +#include <lib/mmio.h> +#include <drivers/marvell/ccu.h> +#include <errno.h> + +#define DDR_PHY_END_ADDRESS 0x100000 + +#ifdef DDR_PHY_DEBUG +#define debug_printf(...) printf(__VA_ARGS__) +#else +#define debug_printf(...) +#endif + + +/* + * This routine writes 'data' to specified 'address' offset, + * with optional debug print support + */ +int snps_fw_write(uintptr_t offset, uint16_t data) +{ + debug_printf("In %s\n", __func__); + + if (offset < DDR_PHY_END_ADDRESS) { + mmio_write_16(DDR_PHY_BASE_ADDR + (2 * offset), data); + return 0; + } + debug_printf("%s: illegal offset value: 0x%x\n", __func__, offset); + return -EINVAL; +} + +int snps_fw_read(uintptr_t offset, uint16_t *read) +{ + debug_printf("In %s\n", __func__); + + if (offset < DDR_PHY_END_ADDRESS) { + *read = mmio_read_16(DDR_PHY_BASE_ADDR + (2 * offset)); + return 0; + } + debug_printf("%s: illegal offset value: 0x%x\n", __func__, offset); + return -EINVAL; +} + +int mvebu_ddr_phy_write(uintptr_t offset, uint16_t data) +{ + return snps_fw_write(offset, data); +} + +int mvebu_ddr_phy_read(uintptr_t offset, uint16_t *read) +{ + return snps_fw_read(offset, read); +} diff --git a/drivers/marvell/ddr_phy_access.h b/drivers/marvell/ddr_phy_access.h new file mode 100644 index 0000000..5f9a668 --- /dev/null +++ b/drivers/marvell/ddr_phy_access.h @@ -0,0 +1,15 @@ +/* + * Copyright (C) 2021 Marvell International Ltd. + * + * SPDX-License-Identifier: BSD-3-Clause + * https://spdx.org/licenses + */ + +#include <plat_marvell.h> + +#define DEVICE_BASE 0xF0000000 +#define DDR_PHY_OFFSET 0x1000000 +#define DDR_PHY_BASE_ADDR (DEVICE_BASE + DDR_PHY_OFFSET) + +int mvebu_ddr_phy_write(uintptr_t offset, uint16_t data); +int mvebu_ddr_phy_read(uintptr_t offset, uint16_t *read); diff --git a/drivers/marvell/gwin.c b/drivers/marvell/gwin.c new file mode 100644 index 0000000..40f8c93 --- /dev/null +++ b/drivers/marvell/gwin.c @@ -0,0 +1,231 @@ +/* + * Copyright (C) 2018 Marvell International Ltd. + * + * SPDX-License-Identifier: BSD-3-Clause + * https://spdx.org/licenses + */ + +/* GWIN unit device driver for Marvell AP810 SoC */ + +#include <inttypes.h> +#include <stdint.h> + +#include <common/debug.h> +#include <drivers/marvell/gwin.h> +#include <lib/mmio.h> + +#include <armada_common.h> +#include <mvebu.h> +#include <mvebu_def.h> + +#if LOG_LEVEL >= LOG_LEVEL_INFO +#define DEBUG_ADDR_MAP +#endif + +/* common defines */ +#define WIN_ENABLE_BIT (0x1) +#define WIN_TARGET_MASK (0xF) +#define WIN_TARGET_SHIFT (0x8) +#define WIN_TARGET(tgt) (((tgt) & WIN_TARGET_MASK) \ + << WIN_TARGET_SHIFT) + +/* Bits[43:26] of the physical address are the window base, + * which is aligned to 64MB + */ +#define ADDRESS_RSHIFT (26) +#define ADDRESS_LSHIFT (10) +#define GWIN_ALIGNMENT_64M (0x4000000) + +/* AP registers */ +#define GWIN_CR_OFFSET(ap, win) (MVEBU_GWIN_BASE(ap) + 0x0 + \ + (0x10 * (win))) +#define GWIN_ALR_OFFSET(ap, win) (MVEBU_GWIN_BASE(ap) + 0x8 + \ + (0x10 * (win))) +#define GWIN_AHR_OFFSET(ap, win) (MVEBU_GWIN_BASE(ap) + 0xc + \ + (0x10 * (win))) + +#define CCU_GRU_CR_OFFSET(ap) (MVEBU_CCU_GRU_BASE(ap)) +#define CCR_GRU_CR_GWIN_MBYPASS (1 << 1) + +static void gwin_check(struct addr_map_win *win) +{ + /* The base is always 64M aligned */ + if (IS_NOT_ALIGN(win->base_addr, GWIN_ALIGNMENT_64M)) { + win->base_addr &= ~(GWIN_ALIGNMENT_64M - 1); + NOTICE("%s: Align the base address to 0x%" PRIx64 "\n", + __func__, win->base_addr); + } + + /* size parameter validity check */ + if (IS_NOT_ALIGN(win->win_size, GWIN_ALIGNMENT_64M)) { + win->win_size = ALIGN_UP(win->win_size, GWIN_ALIGNMENT_64M); + NOTICE("%s: Aligning window size to 0x%" PRIx64 "\n", + __func__, win->win_size); + } +} + +static void gwin_enable_window(int ap_index, struct addr_map_win *win, + uint32_t win_num) +{ + uint32_t alr, ahr; + uint64_t end_addr; + + if ((win->target_id & WIN_TARGET_MASK) != win->target_id) { + ERROR("target ID = %d, is invalid\n", win->target_id); + return; + } + + /* calculate 64bit end-address */ + end_addr = (win->base_addr + win->win_size - 1); + + alr = (uint32_t)((win->base_addr >> ADDRESS_RSHIFT) << ADDRESS_LSHIFT); + ahr = (uint32_t)((end_addr >> ADDRESS_RSHIFT) << ADDRESS_LSHIFT); + + /* write start address and end address for GWIN */ + mmio_write_32(GWIN_ALR_OFFSET(ap_index, win_num), alr); + mmio_write_32(GWIN_AHR_OFFSET(ap_index, win_num), ahr); + + /* write the target ID and enable the window */ + mmio_write_32(GWIN_CR_OFFSET(ap_index, win_num), + WIN_TARGET(win->target_id) | WIN_ENABLE_BIT); +} + +static void gwin_disable_window(int ap_index, uint32_t win_num) +{ + uint32_t win_reg; + + win_reg = mmio_read_32(GWIN_CR_OFFSET(ap_index, win_num)); + win_reg &= ~WIN_ENABLE_BIT; + mmio_write_32(GWIN_CR_OFFSET(ap_index, win_num), win_reg); +} + +/* Insert/Remove temporary window for using the out-of reset default + * CPx base address to access the CP configuration space prior to + * the further base address update in accordance with address mapping + * design. + * + * NOTE: Use the same window array for insertion and removal of + * temporary windows. + */ +void gwin_temp_win_insert(int ap_index, struct addr_map_win *win, int size) +{ + uint32_t win_id; + + for (int i = 0; i < size; i++) { + win_id = MVEBU_GWIN_MAX_WINS - i - 1; + gwin_check(win); + gwin_enable_window(ap_index, win, win_id); + win++; + } +} + +/* + * NOTE: Use the same window array for insertion and removal of + * temporary windows. + */ +void gwin_temp_win_remove(int ap_index, struct addr_map_win *win, int size) +{ + uint32_t win_id; + + for (int i = 0; i < size; i++) { + uint64_t base; + uint32_t target; + + win_id = MVEBU_GWIN_MAX_WINS - i - 1; + + target = mmio_read_32(GWIN_CR_OFFSET(ap_index, win_id)); + target >>= WIN_TARGET_SHIFT; + target &= WIN_TARGET_MASK; + + base = mmio_read_32(GWIN_ALR_OFFSET(ap_index, win_id)); + base >>= ADDRESS_LSHIFT; + base <<= ADDRESS_RSHIFT; + + if (win->target_id != target) { + ERROR("%s: Trying to remove bad window-%d!\n", + __func__, win_id); + continue; + } + gwin_disable_window(ap_index, win_id); + win++; + } +} + +#ifdef DEBUG_ADDR_MAP +static void dump_gwin(int ap_index) +{ + uint32_t win_num; + + /* Dump all GWIN windows */ + printf("\tbank target start end\n"); + printf("\t----------------------------------------------------\n"); + for (win_num = 0; win_num < MVEBU_GWIN_MAX_WINS; win_num++) { + uint32_t cr; + uint64_t alr, ahr; + + cr = mmio_read_32(GWIN_CR_OFFSET(ap_index, win_num)); + /* Window enabled */ + if (cr & WIN_ENABLE_BIT) { + alr = mmio_read_32(GWIN_ALR_OFFSET(ap_index, win_num)); + alr = (alr >> ADDRESS_LSHIFT) << ADDRESS_RSHIFT; + ahr = mmio_read_32(GWIN_AHR_OFFSET(ap_index, win_num)); + ahr = (ahr >> ADDRESS_LSHIFT) << ADDRESS_RSHIFT; + printf("\tgwin %d 0x%016" PRIx64 " 0x%016" PRIx64 "\n", + (cr >> 8) & 0xF, alr, ahr); + } + } +} +#endif + +int init_gwin(int ap_index) +{ + struct addr_map_win *win; + uint32_t win_id; + uint32_t win_count; + uint32_t win_reg; + + INFO("Initializing GWIN Address decoding\n"); + + /* Get the array of the windows and its size */ + marvell_get_gwin_memory_map(ap_index, &win, &win_count); + if (win_count <= 0) { + INFO("no windows configurations found\n"); + return 0; + } + + if (win_count > MVEBU_GWIN_MAX_WINS) { + ERROR("number of windows is bigger than %d\n", + MVEBU_GWIN_MAX_WINS); + return 0; + } + + /* disable all windows */ + for (win_id = 0; win_id < MVEBU_GWIN_MAX_WINS; win_id++) + gwin_disable_window(ap_index, win_id); + + /* enable relevant windows */ + for (win_id = 0; win_id < win_count; win_id++, win++) { + gwin_check(win); + gwin_enable_window(ap_index, win, win_id); + } + + /* GWIN Miss feature has not verified, therefore any access towards + * remote AP should be accompanied with proper configuration to + * GWIN registers group and therefore the GWIN Miss feature + * should be set into Bypass mode, need to make sure all GWIN regions + * are defined correctly that will assure no GWIN miss occurrence + * JIRA-AURORA2-1630 + */ + INFO("Update GWIN miss bypass\n"); + win_reg = mmio_read_32(CCU_GRU_CR_OFFSET(ap_index)); + win_reg |= CCR_GRU_CR_GWIN_MBYPASS; + mmio_write_32(CCU_GRU_CR_OFFSET(ap_index), win_reg); + +#ifdef DEBUG_ADDR_MAP + dump_gwin(ap_index); +#endif + + INFO("Done GWIN Address decoding Initializing\n"); + + return 0; +} diff --git a/drivers/marvell/io_win.c b/drivers/marvell/io_win.c new file mode 100644 index 0000000..124382a --- /dev/null +++ b/drivers/marvell/io_win.c @@ -0,0 +1,271 @@ +/* + * Copyright (C) 2018 Marvell International Ltd. + * + * SPDX-License-Identifier: BSD-3-Clause + * https://spdx.org/licenses + */ + +/* IO Window unit device driver for Marvell AP807, AP807 and AP810 SoCs */ + +#include <inttypes.h> +#include <stdint.h> + +#include <common/debug.h> +#include <drivers/marvell/io_win.h> +#include <lib/mmio.h> + +#include <armada_common.h> +#include <mvebu.h> +#include <mvebu_def.h> + +#if LOG_LEVEL >= LOG_LEVEL_INFO +#define DEBUG_ADDR_MAP +#endif + +/* common defines */ +#define WIN_ENABLE_BIT (0x1) +/* Physical address of the base of the window = {Addr[19:0],20`h0} */ +#define ADDRESS_SHIFT (20 - 4) +#define ADDRESS_MASK (0xFFFFFFF0) +#define IO_WIN_ALIGNMENT_1M (0x100000) +#define IO_WIN_ALIGNMENT_64K (0x10000) + +/* AP registers */ +#define IO_WIN_ALR_OFFSET(ap, win) (MVEBU_IO_WIN_BASE(ap) + 0x0 + \ + (0x10 * win)) +#define IO_WIN_AHR_OFFSET(ap, win) (MVEBU_IO_WIN_BASE(ap) + 0x8 + \ + (0x10 * win)) +#define IO_WIN_CR_OFFSET(ap, win) (MVEBU_IO_WIN_BASE(ap) + 0xC + \ + (0x10 * win)) + +/* For storage of CR, ALR, AHR abd GCR */ +static uint32_t io_win_regs_save[MVEBU_IO_WIN_MAX_WINS * 3 + 1]; + +static void io_win_check(struct addr_map_win *win) +{ + /* for IO The base is always 1M aligned */ + /* check if address is aligned to 1M */ + if (IS_NOT_ALIGN(win->base_addr, IO_WIN_ALIGNMENT_1M)) { + win->base_addr = ALIGN_UP(win->base_addr, IO_WIN_ALIGNMENT_1M); + NOTICE("%s: Align up the base address to 0x%" PRIx64 "\n", + __func__, win->base_addr); + } + + /* size parameter validity check */ + if (IS_NOT_ALIGN(win->win_size, IO_WIN_ALIGNMENT_1M)) { + win->win_size = ALIGN_UP(win->win_size, IO_WIN_ALIGNMENT_1M); + NOTICE("%s: Aligning size to 0x%" PRIx64 "\n", + __func__, win->win_size); + } +} + +static void io_win_enable_window(int ap_index, struct addr_map_win *win, + uint32_t win_num) +{ + uint32_t alr, ahr; + uint64_t end_addr; + + if (win->target_id < 0 || win->target_id >= MVEBU_IO_WIN_MAX_WINS) { + ERROR("target ID = %d, is invalid\n", win->target_id); + return; + } + + if ((win_num == 0) || (win_num > MVEBU_IO_WIN_MAX_WINS)) { + ERROR("Enabling wrong IOW window %d!\n", win_num); + return; + } + + /* calculate the end-address */ + end_addr = (win->base_addr + win->win_size - 1); + + alr = (uint32_t)((win->base_addr >> ADDRESS_SHIFT) & ADDRESS_MASK); + alr |= WIN_ENABLE_BIT; + ahr = (uint32_t)((end_addr >> ADDRESS_SHIFT) & ADDRESS_MASK); + + /* write start address and end address for IO window */ + mmio_write_32(IO_WIN_ALR_OFFSET(ap_index, win_num), alr); + mmio_write_32(IO_WIN_AHR_OFFSET(ap_index, win_num), ahr); + + /* write window target */ + mmio_write_32(IO_WIN_CR_OFFSET(ap_index, win_num), win->target_id); +} + +static void io_win_disable_window(int ap_index, uint32_t win_num) +{ + uint32_t win_reg; + + if ((win_num == 0) || (win_num > MVEBU_IO_WIN_MAX_WINS)) { + ERROR("Disabling wrong IOW window %d!\n", win_num); + return; + } + + win_reg = mmio_read_32(IO_WIN_ALR_OFFSET(ap_index, win_num)); + win_reg &= ~WIN_ENABLE_BIT; + mmio_write_32(IO_WIN_ALR_OFFSET(ap_index, win_num), win_reg); +} + +/* Insert/Remove temporary window for using the out-of reset default + * CPx base address to access the CP configuration space prior to + * the further base address update in accordance with address mapping + * design. + * + * NOTE: Use the same window array for insertion and removal of + * temporary windows. + */ +void iow_temp_win_insert(int ap_index, struct addr_map_win *win, int size) +{ + uint32_t win_id; + + for (int i = 0; i < size; i++) { + win_id = MVEBU_IO_WIN_MAX_WINS - i - 1; + io_win_check(win); + io_win_enable_window(ap_index, win, win_id); + win++; + } +} + +/* + * NOTE: Use the same window array for insertion and removal of + * temporary windows. + */ +void iow_temp_win_remove(int ap_index, struct addr_map_win *win, int size) +{ + uint32_t win_id; + + /* Start from the last window and do not touch Win0 */ + for (int i = 0; i < size; i++) { + uint64_t base; + uint32_t target; + + win_id = MVEBU_IO_WIN_MAX_WINS - i - 1; + + target = mmio_read_32(IO_WIN_CR_OFFSET(ap_index, win_id)); + base = mmio_read_32(IO_WIN_ALR_OFFSET(ap_index, win_id)); + base &= ~WIN_ENABLE_BIT; + base <<= ADDRESS_SHIFT; + + if ((win->target_id != target) || (win->base_addr != base)) { + ERROR("%s: Trying to remove bad window-%d!\n", + __func__, win_id); + continue; + } + io_win_disable_window(ap_index, win_id); + win++; + } +} + +#ifdef DEBUG_ADDR_MAP +static void dump_io_win(int ap_index) +{ + uint32_t trgt_id, win_id; + uint32_t alr, ahr; + uint64_t start, end; + + /* Dump all IO windows */ + printf("\tbank target start end\n"); + printf("\t----------------------------------------------------\n"); + for (win_id = 0; win_id < MVEBU_IO_WIN_MAX_WINS; win_id++) { + alr = mmio_read_32(IO_WIN_ALR_OFFSET(ap_index, win_id)); + if (alr & WIN_ENABLE_BIT) { + alr &= ~WIN_ENABLE_BIT; + ahr = mmio_read_32(IO_WIN_AHR_OFFSET(ap_index, win_id)); + trgt_id = mmio_read_32(IO_WIN_CR_OFFSET(ap_index, + win_id)); + start = ((uint64_t)alr << ADDRESS_SHIFT); + end = (((uint64_t)ahr + 0x10) << ADDRESS_SHIFT); + printf("\tio-win %d 0x%016" PRIx64 " 0x%016" PRIx64 "\n", + trgt_id, start, end); + } + } + printf("\tio-win gcr is %x\n", + mmio_read_32(MVEBU_IO_WIN_BASE(ap_index) + + MVEBU_IO_WIN_GCR_OFFSET)); +} +#endif + +static void iow_save_win_range(int ap_id, int win_first, int win_last, + uint32_t *buffer) +{ + int win_id, idx; + + /* Save IOW */ + for (idx = 0, win_id = win_first; win_id <= win_last; win_id++) { + buffer[idx++] = mmio_read_32(IO_WIN_CR_OFFSET(ap_id, win_id)); + buffer[idx++] = mmio_read_32(IO_WIN_ALR_OFFSET(ap_id, win_id)); + buffer[idx++] = mmio_read_32(IO_WIN_AHR_OFFSET(ap_id, win_id)); + } + buffer[idx] = mmio_read_32(MVEBU_IO_WIN_BASE(ap_id) + + MVEBU_IO_WIN_GCR_OFFSET); +} + +static void iow_restore_win_range(int ap_id, int win_first, int win_last, + uint32_t *buffer) +{ + int win_id, idx; + + /* Restore IOW */ + for (idx = 0, win_id = win_first; win_id <= win_last; win_id++) { + mmio_write_32(IO_WIN_CR_OFFSET(ap_id, win_id), buffer[idx++]); + mmio_write_32(IO_WIN_ALR_OFFSET(ap_id, win_id), buffer[idx++]); + mmio_write_32(IO_WIN_AHR_OFFSET(ap_id, win_id), buffer[idx++]); + } + mmio_write_32(MVEBU_IO_WIN_BASE(ap_id) + MVEBU_IO_WIN_GCR_OFFSET, + buffer[idx++]); +} + +void iow_save_win_all(int ap_id) +{ + iow_save_win_range(ap_id, 0, MVEBU_IO_WIN_MAX_WINS - 1, + io_win_regs_save); +} + +void iow_restore_win_all(int ap_id) +{ + iow_restore_win_range(ap_id, 0, MVEBU_IO_WIN_MAX_WINS - 1, + io_win_regs_save); +} + +int init_io_win(int ap_index) +{ + struct addr_map_win *win; + uint32_t win_id, win_reg; + uint32_t win_count; + + INFO("Initializing IO WIN Address decoding\n"); + + /* Get the array of the windows and its size */ + marvell_get_io_win_memory_map(ap_index, &win, &win_count); + if (win_count <= 0) + INFO("no windows configurations found\n"); + + if (win_count > MVEBU_IO_WIN_MAX_WINS) { + INFO("number of windows is bigger than %d\n", + MVEBU_IO_WIN_MAX_WINS); + return 0; + } + + /* Get the default target id to set the GCR */ + win_reg = marvell_get_io_win_gcr_target(ap_index); + mmio_write_32(MVEBU_IO_WIN_BASE(ap_index) + MVEBU_IO_WIN_GCR_OFFSET, + win_reg); + + /* disable all IO windows */ + for (win_id = 1; win_id < MVEBU_IO_WIN_MAX_WINS; win_id++) + io_win_disable_window(ap_index, win_id); + + /* enable relevant windows, starting from win_id = 1 because + * index 0 dedicated for BootROM + */ + for (win_id = 1; win_id <= win_count; win_id++, win++) { + io_win_check(win); + io_win_enable_window(ap_index, win, win_id); + } + +#ifdef DEBUG_ADDR_MAP + dump_io_win(ap_index); +#endif + + INFO("Done IO WIN Address decoding Initializing\n"); + + return 0; +} diff --git a/drivers/marvell/iob.c b/drivers/marvell/iob.c new file mode 100644 index 0000000..1f39395 --- /dev/null +++ b/drivers/marvell/iob.c @@ -0,0 +1,214 @@ +/* + * Copyright (C) 2016 - 2018 Marvell International Ltd. + * + * SPDX-License-Identifier: BSD-3-Clause + * https://spdx.org/licenses + */ + +/* IOW unit device driver for Marvell CP110 and CP115 SoCs */ + +#include <inttypes.h> +#include <stdint.h> + +#include <arch_helpers.h> +#include <common/debug.h> +#include <drivers/marvell/iob.h> +#include <lib/mmio.h> + +#include <armada_common.h> +#include <mvebu.h> +#include <mvebu_def.h> + +#if LOG_LEVEL >= LOG_LEVEL_INFO +#define DEBUG_ADDR_MAP +#endif + +#define MVEBU_IOB_OFFSET (0x190000) +#define MVEBU_IOB_MAX_WINS 16 + +/* common defines */ +#define WIN_ENABLE_BIT (0x1) +/* Physical address of the base of the window = {AddrLow[19:0],20`h0} */ +#define ADDRESS_SHIFT (20 - 4) +#define ADDRESS_MASK (0xFFFFFFF0) +#define IOB_WIN_ALIGNMENT (0x100000) + +/* IOB registers */ +#define IOB_WIN_CR_OFFSET(win) (iob_base + 0x0 + (0x20 * win)) +#define IOB_TARGET_ID_OFFSET (8) +#define IOB_TARGET_ID_MASK (0xF) + +#define IOB_WIN_SCR_OFFSET(win) (iob_base + 0x4 + (0x20 * win)) +#define IOB_WIN_ENA_CTRL_WRITE_SECURE (0x1) +#define IOB_WIN_ENA_CTRL_READ_SECURE (0x2) +#define IOB_WIN_ENA_WRITE_SECURE (0x4) +#define IOB_WIN_ENA_READ_SECURE (0x8) + +#define IOB_WIN_ALR_OFFSET(win) (iob_base + 0x8 + (0x20 * win)) +#define IOB_WIN_AHR_OFFSET(win) (iob_base + 0xC + (0x20 * win)) + +#define IOB_WIN_DIOB_CR_OFFSET(win) (iob_base + 0x10 + (0x20 * win)) +#define IOB_WIN_XOR0_DIOB_EN BIT(0) +#define IOB_WIN_XOR1_DIOB_EN BIT(1) + +uintptr_t iob_base; + +static void iob_win_check(struct addr_map_win *win, uint32_t win_num) +{ + /* check if address is aligned to the size */ + if (IS_NOT_ALIGN(win->base_addr, IOB_WIN_ALIGNMENT)) { + win->base_addr = ALIGN_UP(win->base_addr, IOB_WIN_ALIGNMENT); + ERROR("Window %d: base address unaligned to 0x%x\n", + win_num, IOB_WIN_ALIGNMENT); + printf("Align up the base address to 0x%" PRIx64 "\n", + win->base_addr); + } + + /* size parameter validity check */ + if (IS_NOT_ALIGN(win->win_size, IOB_WIN_ALIGNMENT)) { + win->win_size = ALIGN_UP(win->win_size, IOB_WIN_ALIGNMENT); + ERROR("Window %d: window size unaligned to 0x%x\n", win_num, + IOB_WIN_ALIGNMENT); + printf("Aligning size to 0x%" PRIx64 "\n", win->win_size); + } +} + +static void iob_enable_win(struct addr_map_win *win, uint32_t win_id) +{ + uint32_t iob_win_reg; + uint32_t alr, ahr; + uint64_t end_addr; + uint32_t reg_en; + + /* move XOR (DMA) to use WIN1 which is used for PCI-EP address space */ + reg_en = IOB_WIN_XOR0_DIOB_EN | IOB_WIN_XOR1_DIOB_EN; + iob_win_reg = mmio_read_32(IOB_WIN_DIOB_CR_OFFSET(0)); + iob_win_reg &= ~reg_en; + mmio_write_32(IOB_WIN_DIOB_CR_OFFSET(0), iob_win_reg); + + iob_win_reg = mmio_read_32(IOB_WIN_DIOB_CR_OFFSET(1)); + iob_win_reg |= reg_en; + mmio_write_32(IOB_WIN_DIOB_CR_OFFSET(1), iob_win_reg); + + end_addr = (win->base_addr + win->win_size - 1); + alr = (uint32_t)((win->base_addr >> ADDRESS_SHIFT) & ADDRESS_MASK); + ahr = (uint32_t)((end_addr >> ADDRESS_SHIFT) & ADDRESS_MASK); + + mmio_write_32(IOB_WIN_ALR_OFFSET(win_id), alr); + mmio_write_32(IOB_WIN_AHR_OFFSET(win_id), ahr); + + iob_win_reg = WIN_ENABLE_BIT; + iob_win_reg |= (win->target_id & IOB_TARGET_ID_MASK) + << IOB_TARGET_ID_OFFSET; + mmio_write_32(IOB_WIN_CR_OFFSET(win_id), iob_win_reg); + +} + +#ifdef DEBUG_ADDR_MAP +static void dump_iob(void) +{ + uint32_t win_id, win_cr, alr, ahr; + uint8_t target_id; + uint64_t start, end; + char *iob_target_name[IOB_MAX_TID] = { + "CFG ", "MCI0 ", "PEX1 ", "PEX2 ", + "PEX0 ", "NAND ", "RUNIT", "MCI1 " }; + + /* Dump all IOB windows */ + printf("bank id target start end\n"); + printf("----------------------------------------------------\n"); + for (win_id = 0; win_id < MVEBU_IOB_MAX_WINS; win_id++) { + win_cr = mmio_read_32(IOB_WIN_CR_OFFSET(win_id)); + if (win_cr & WIN_ENABLE_BIT) { + target_id = (win_cr >> IOB_TARGET_ID_OFFSET) & + IOB_TARGET_ID_MASK; + alr = mmio_read_32(IOB_WIN_ALR_OFFSET(win_id)); + start = ((uint64_t)alr << ADDRESS_SHIFT); + if (win_id != 0) { + ahr = mmio_read_32(IOB_WIN_AHR_OFFSET(win_id)); + end = (((uint64_t)ahr + 0x10) << ADDRESS_SHIFT); + } else { + /* Window #0 size is hardcoded to 16MB, as it's + * reserved for CP configuration space. + */ + end = start + (16 << 20); + } + printf("iob %02d %s 0x%016" PRIx64 " 0x%016" PRIx64 "\n", + win_id, iob_target_name[target_id], + start, end); + } + } +} +#endif + +void iob_cfg_space_update(int ap_idx, int cp_idx, uintptr_t base, + uintptr_t new_base) +{ + debug_enter(); + + iob_base = base + MVEBU_IOB_OFFSET; + + NOTICE("Change the base address of AP%d-CP%d to %lx\n", + ap_idx, cp_idx, new_base); + mmio_write_32(IOB_WIN_ALR_OFFSET(0), new_base >> ADDRESS_SHIFT); + + iob_base = new_base + MVEBU_IOB_OFFSET; + + /* Make sure the address was configured by the CPU before + * any possible access to the CP. + */ + dsb(); + + debug_exit(); +} + +int init_iob(uintptr_t base) +{ + struct addr_map_win *win; + uint32_t win_id, win_reg; + uint32_t win_count; + + INFO("Initializing IOB Address decoding\n"); + + /* Get the base address of the address decoding MBUS */ + iob_base = base + MVEBU_IOB_OFFSET; + + /* Get the array of the windows and fill the map data */ + marvell_get_iob_memory_map(&win, &win_count, base); + if (win_count <= 0) { + INFO("no windows configurations found\n"); + return 0; + } else if (win_count > (MVEBU_IOB_MAX_WINS - 1)) { + ERROR("IOB mem map array > than max available windows (%d)\n", + MVEBU_IOB_MAX_WINS); + win_count = MVEBU_IOB_MAX_WINS; + } + + /* disable all IOB windows, start from win_id = 1 + * because can't disable internal register window + */ + for (win_id = 1; win_id < MVEBU_IOB_MAX_WINS; win_id++) { + win_reg = mmio_read_32(IOB_WIN_CR_OFFSET(win_id)); + win_reg &= ~WIN_ENABLE_BIT; + mmio_write_32(IOB_WIN_CR_OFFSET(win_id), win_reg); + + win_reg = ~IOB_WIN_ENA_CTRL_WRITE_SECURE; + win_reg &= ~IOB_WIN_ENA_CTRL_READ_SECURE; + win_reg &= ~IOB_WIN_ENA_WRITE_SECURE; + win_reg &= ~IOB_WIN_ENA_READ_SECURE; + mmio_write_32(IOB_WIN_SCR_OFFSET(win_id), win_reg); + } + + for (win_id = 1; win_id < win_count + 1; win_id++, win++) { + iob_win_check(win, win_id); + iob_enable_win(win, win_id); + } + +#ifdef DEBUG_ADDR_MAP + dump_iob(); +#endif + + INFO("Done IOB Address decoding Initializing\n"); + + return 0; +} diff --git a/drivers/marvell/mc_trustzone/mc_trustzone.c b/drivers/marvell/mc_trustzone/mc_trustzone.c new file mode 100644 index 0000000..648bd0e --- /dev/null +++ b/drivers/marvell/mc_trustzone/mc_trustzone.c @@ -0,0 +1,76 @@ +/* + * Copyright (C) 2018 Marvell International Ltd. + * + * SPDX-License-Identifier: BSD-3-Clause + * https://spdx.org/licenses + */ + +#include <inttypes.h> +#include <stdint.h> + +#include <common/debug.h> +#include <drivers/marvell/addr_map.h> +#include <lib/mmio.h> + +#include <mvebu_def.h> + +#include "mc_trustzone.h" + +#define TZ_SIZE(x) ((x) >> 13) + +static int fls(int x) +{ + if (!x) + return 0; + + return 32 - __builtin_clz(x); +} + +/* To not duplicate types, the addr_map_win is used, but the "target" + * filed is referring to attributes instead of "target". + */ +void tz_enable_win(int ap_index, const struct addr_map_win *win, int win_id) +{ + int tz_size; + uint32_t val, base = win->base_addr; + + if ((win_id < 0) || (win_id > MVEBU_TZ_MAX_WINS)) { + ERROR("Enabling wrong MC TrustZone window %d!\n", win_id); + return; + } + + /* map the window size to trustzone register convention */ + tz_size = fls(TZ_SIZE(win->win_size)); + + VERBOSE("%s: window size = 0x%" PRIx64 " maps to tz_size %d\n", + __func__, win->win_size, tz_size); + if (tz_size < 0 || tz_size > 31) { + ERROR("Using not allowed size for MC TrustZone window %d!\n", + win_id); + return; + } + + if (base & 0xfff) { + base = base & ~0xfff; + WARN("Attempt to open MC TZ win. at 0x%" PRIx64 ", truncate to 0x%x\n", + win->base_addr, base); + } + + val = base | (tz_size << 7) | win->target_id | TZ_VALID; + + VERBOSE("%s: base 0x%x, tz_size moved 0x%x, attr 0x%x, val 0x%x\n", + __func__, base, (tz_size << 7), win->target_id, val); + + mmio_write_32(MVEBU_AP_MC_TRUSTZONE_REG_LOW(ap_index, win_id), val); + + VERBOSE("%s: Win%d[0x%x] configured to 0x%x\n", __func__, win_id, + MVEBU_AP_MC_TRUSTZONE_REG_LOW(ap_index, win_id), + mmio_read_32(MVEBU_AP_MC_TRUSTZONE_REG_LOW(ap_index, win_id))); + + mmio_write_32(MVEBU_AP_MC_TRUSTZONE_REG_HIGH(ap_index, win_id), + (win->base_addr >> 32)); + + VERBOSE("%s: Win%d[0x%x] configured to 0x%x\n", __func__, win_id, + MVEBU_AP_MC_TRUSTZONE_REG_HIGH(ap_index, win_id), + mmio_read_32(MVEBU_AP_MC_TRUSTZONE_REG_HIGH(ap_index, win_id))); +} diff --git a/drivers/marvell/mc_trustzone/mc_trustzone.h b/drivers/marvell/mc_trustzone/mc_trustzone.h new file mode 100644 index 0000000..296dce8 --- /dev/null +++ b/drivers/marvell/mc_trustzone/mc_trustzone.h @@ -0,0 +1,27 @@ +/* + * Copyright (C) 2018 Marvell International Ltd. + * + * SPDX-License-Identifier: BSD-3-Clause + * https://spdx.org/licenses + */ + +#ifndef MC_TRUSTZONE_H +#define MC_TRUSTZONE_H + +#include <drivers/marvell/addr_map.h> + +#define MVEBU_TZ_MAX_WINS 16 + +#define TZ_VALID (1 << 0) +#define TZ_PERM(x) ((x) << 1) +#define TZ_RZ_ENABLE (1 << 3) + +/* tz attr definitions */ +#define TZ_PERM_RW (TZ_PERM(0)) +#define TZ_PERM_RO (TZ_PERM(1)) +#define TZ_PERM_WO (TZ_PERM(2)) +#define TZ_PERM_ABORT (TZ_PERM(3)) + +void tz_enable_win(int ap_index, const struct addr_map_win *win, int win_id); + +#endif /* MC_TRUSTZONE_H */ diff --git a/drivers/marvell/mci.c b/drivers/marvell/mci.c new file mode 100644 index 0000000..2b54700 --- /dev/null +++ b/drivers/marvell/mci.c @@ -0,0 +1,834 @@ +/* + * Copyright (C) 2018 Marvell International Ltd. + * + * SPDX-License-Identifier: BSD-3-Clause + * https://spdx.org/licenses + */ + +/* MCI bus driver for Marvell ARMADA 8K and 8K+ SoCs */ + +#include <common/debug.h> +#include <drivers/delay_timer.h> +#include <drivers/marvell/mci.h> +#include <lib/mmio.h> + +#include <mvebu.h> +#include <mvebu_def.h> +#include <plat_marvell.h> + +/* /HB /Units /Direct_regs /Direct regs + * /Configuration Register Write/Read Data Register + */ +#define MCI_WRITE_READ_DATA_REG(mci_index) \ + MVEBU_MCI_REG_BASE_REMAP(mci_index) +/* /HB /Units /Direct_regs /Direct regs + * /Configuration Register Access Command Register + */ +#define MCI_ACCESS_CMD_REG(mci_index) \ + (MVEBU_MCI_REG_BASE_REMAP(mci_index) + 0x4) + +/* Access Command fields : + * bit[3:0] - Sub command: 1 => Peripheral Config Register Read, + * 0 => Peripheral Config Register Write, + * 2 => Peripheral Assign ID request, + * 3 => Circular Config Write + * bit[5] - 1 => Local (same chip access) 0 => Remote + * bit[15:8] - Destination hop ID. Put Global ID (GID) here (see scheme below). + * bit[23:22] - 0x3 IHB PHY REG address space, 0x0 IHB Controller space + * bit[21:16] - Low 6 bits of offset. Hight 2 bits are taken from bit[28:27] + * of IHB_PHY_CTRL + * (must be set before any PHY register access occurs): + * /IHB_REG /IHB_REGInterchip Hopping Bus Registers + * /IHB Version Control Register + * + * ixi_ihb_top IHB PHY + * AXI ----------------------------- ------------- + * <--| axi_hb_top | ihb_pipe_top |-->| | + * -->| GID=1 | GID=0 |<--| | + * ----------------------------- ------------- + */ +#define MCI_INDIRECT_CTRL_READ_CMD 0x1 +#define MCI_INDIRECT_CTRL_ASSIGN_CMD 0x2 +#define MCI_INDIRECT_CTRL_CIRCULAR_CMD 0x3 +#define MCI_INDIRECT_CTRL_LOCAL_PKT (1 << 5) +#define MCI_INDIRECT_CTRL_CMD_DONE_OFFSET 6 +#define MCI_INDIRECT_CTRL_CMD_DONE \ + (1 << MCI_INDIRECT_CTRL_CMD_DONE_OFFSET) +#define MCI_INDIRECT_CTRL_DATA_READY_OFFSET 7 +#define MCI_INDIRECT_CTRL_DATA_READY \ + (1 << MCI_INDIRECT_CTRL_DATA_READY_OFFSET) +#define MCI_INDIRECT_CTRL_HOPID_OFFSET 8 +#define MCI_INDIRECT_CTRL_HOPID(id) \ + (((id) & 0xFF) << MCI_INDIRECT_CTRL_HOPID_OFFSET) +#define MCI_INDIRECT_CTRL_REG_CHIPID_OFFSET 16 +#define MCI_INDIRECT_REG_CTRL_ADDR(reg_num) \ + (reg_num << MCI_INDIRECT_CTRL_REG_CHIPID_OFFSET) + +/* Hop ID values */ +#define GID_IHB_PIPE 0 +#define GID_AXI_HB 1 +#define GID_IHB_EXT 2 + +#define MCI_DID_GLOBAL_ASSIGNMENT_REQUEST_REG 0x2 +/* Target MCi Local ID (LID, which is = self DID) */ +#define MCI_DID_GLOBAL_ASSIGN_REQ_MCI_LOCAL_ID(val) (((val) & 0xFF) << 16) +/* Bits [15:8]: Number of MCis on chip of target MCi */ +#define MCI_DID_GLOBAL_ASSIGN_REQ_MCI_COUNT(val) (((val) & 0xFF) << 8) +/* Bits [7:0]: Number of hops on chip of target MCi */ +#define MCI_DID_GLOBAL_ASSIGN_REQ_HOPS_NUM(val) (((val) & 0xFF) << 0) + +/* IHB_REG domain registers */ +/* /HB /Units /IHB_REG /IHB_REGInterchip Hopping Bus Registers/ + * Rx Memory Configuration Register (RX_MEM_CFG) + */ +#define MCI_CTRL_RX_MEM_CFG_REG_NUM 0x0 +#define MCI_CTRL_RX_TX_MEM_CFG_RQ_THRESH(val) (((val) & 0xFF) << 24) +#define MCI_CTRL_RX_TX_MEM_CFG_PQ_THRESH(val) (((val) & 0xFF) << 16) +#define MCI_CTRL_RX_TX_MEM_CFG_NQ_THRESH(val) (((val) & 0xFF) << 8) +#define MCI_CTRL_RX_TX_MEM_CFG_DELTA_THRESH(val) (((val) & 0xF) << 4) +#define MCI_CTRL_RX_TX_MEM_CFG_RTC(val) (((val) & 0x3) << 2) +#define MCI_CTRL_RX_TX_MEM_CFG_WTC(val) (((val) & 0x3) << 0) +#define MCI_CTRL_RX_MEM_CFG_REG_DEF_CP_VAL \ + (MCI_CTRL_RX_TX_MEM_CFG_RQ_THRESH(0x07) | \ + MCI_CTRL_RX_TX_MEM_CFG_PQ_THRESH(0x3f) | \ + MCI_CTRL_RX_TX_MEM_CFG_NQ_THRESH(0x3f) | \ + MCI_CTRL_RX_TX_MEM_CFG_DELTA_THRESH(0xf) | \ + MCI_CTRL_RX_TX_MEM_CFG_RTC(1) | \ + MCI_CTRL_RX_TX_MEM_CFG_WTC(1)) + +#define MCI_CTRL_RX_MEM_CFG_REG_DEF_AP_VAL \ + (MCI_CTRL_RX_TX_MEM_CFG_RQ_THRESH(0x3f) | \ + MCI_CTRL_RX_TX_MEM_CFG_PQ_THRESH(0x03) | \ + MCI_CTRL_RX_TX_MEM_CFG_NQ_THRESH(0x3f) | \ + MCI_CTRL_RX_TX_MEM_CFG_DELTA_THRESH(0xf) | \ + MCI_CTRL_RX_TX_MEM_CFG_RTC(1) | \ + MCI_CTRL_RX_TX_MEM_CFG_WTC(1)) + + +/* /HB /Units /IHB_REG /IHB_REGInterchip Hopping Bus Registers/ + * Tx Memory Configuration Register (TX_MEM_CFG) + */ +#define MCI_CTRL_TX_MEM_CFG_REG_NUM 0x1 +/* field mapping for TX mem config register + * are the same as for RX register - see register above + */ +#define MCI_CTRL_TX_MEM_CFG_REG_DEF_VAL \ + (MCI_CTRL_RX_TX_MEM_CFG_RQ_THRESH(0x20) | \ + MCI_CTRL_RX_TX_MEM_CFG_PQ_THRESH(0x20) | \ + MCI_CTRL_RX_TX_MEM_CFG_NQ_THRESH(0x20) | \ + MCI_CTRL_RX_TX_MEM_CFG_DELTA_THRESH(2) | \ + MCI_CTRL_RX_TX_MEM_CFG_RTC(1) | \ + MCI_CTRL_RX_TX_MEM_CFG_WTC(1)) + +/* /HB /Units /IHB_REG /IHB_REGInterchip Hopping Bus Registers + * /IHB Link CRC Control + */ +/* MCi Link CRC Control Register (MCi_CRC_CTRL) */ +#define MCI_LINK_CRC_CTRL_REG_NUM 0x4 + +/* /HB /Units /IHB_REG /IHB_REGInterchip Hopping Bus Registers + * /IHB Status Register + */ +/* MCi Status Register (MCi_STS) */ +#define MCI_CTRL_STATUS_REG_NUM 0x5 +#define MCI_CTRL_STATUS_REG_PHY_READY (1 << 12) +#define MCI_CTRL_STATUS_REG_LINK_PRESENT (1 << 15) +#define MCI_CTRL_STATUS_REG_PHY_CID_VIO_OFFSET 24 +#define MCI_CTRL_STATUS_REG_PHY_CID_VIO_MASK \ + (0xF << MCI_CTRL_STATUS_REG_PHY_CID_VIO_OFFSET) +/* Expected successful Link result, including reserved bit */ +#define MCI_CTRL_PHY_READY (MCI_CTRL_STATUS_REG_PHY_READY | \ + MCI_CTRL_STATUS_REG_LINK_PRESENT | \ + MCI_CTRL_STATUS_REG_PHY_CID_VIO_MASK) + +/* /HB /Units /IHB_REG /IHB_REGInterchip Hopping Bus Registers/ + * MCi PHY Speed Settings Register (MCi_PHY_SETTING) + */ +#define MCI_CTRL_MCI_PHY_SETTINGS_REG_NUM 0x8 +#define MCI_CTRL_MCI_PHY_SET_DLO_FIFO_FULL_TRESH(val) (((val) & 0xF) << 28) +#define MCI_CTRL_MCI_PHY_SET_PHY_MAX_SPEED(val) (((val) & 0xF) << 12) +#define MCI_CTRL_MCI_PHY_SET_PHYCLK_SEL(val) (((val) & 0xF) << 8) +#define MCI_CTRL_MCI_PHY_SET_REFCLK_FREQ_SEL(val) (((val) & 0xF) << 4) +#define MCI_CTRL_MCI_PHY_SET_AUTO_LINK_EN(val) (((val) & 0x1) << 1) +#define MCI_CTRL_MCI_PHY_SET_REG_DEF_VAL \ + (MCI_CTRL_MCI_PHY_SET_DLO_FIFO_FULL_TRESH(0x3) | \ + MCI_CTRL_MCI_PHY_SET_PHY_MAX_SPEED(0x3) | \ + MCI_CTRL_MCI_PHY_SET_PHYCLK_SEL(0x2) | \ + MCI_CTRL_MCI_PHY_SET_REFCLK_FREQ_SEL(0x1)) +#define MCI_CTRL_MCI_PHY_SET_REG_DEF_VAL2 \ + (MCI_CTRL_MCI_PHY_SET_DLO_FIFO_FULL_TRESH(0x3) | \ + MCI_CTRL_MCI_PHY_SET_PHY_MAX_SPEED(0x3) | \ + MCI_CTRL_MCI_PHY_SET_PHYCLK_SEL(0x5) | \ + MCI_CTRL_MCI_PHY_SET_REFCLK_FREQ_SEL(0x1)) + +/* /HB /Units /IHB_REG /IHB_REGInterchip Hopping Bus Registers + * /IHB Mode Config + */ +#define MCI_CTRL_IHB_MODE_CFG_REG_NUM 0x25 +#define MCI_CTRL_IHB_MODE_HBCLK_DIV(val) ((val) & 0xFF) +#define MCI_CTRL_IHB_MODE_CHUNK_MOD_OFFSET 8 +#define MCI_CTRL_IHB_MODE_CHUNK_MOD \ + (1 << MCI_CTRL_IHB_MODE_CHUNK_MOD_OFFSET) +#define MCI_CTRL_IHB_MODE_FWD_MOD_OFFSET 9 +#define MCI_CTRL_IHB_MODE_FWD_MOD \ + (1 << MCI_CTRL_IHB_MODE_FWD_MOD_OFFSET) +#define MCI_CTRL_IHB_MODE_SEQFF_FINE_MOD(val) (((val) & 0xF) << 12) +#define MCI_CTRL_IHB_MODE_RX_COMB_THRESH(val) (((val) & 0xFF) << 16) +#define MCI_CTRL_IHB_MODE_TX_COMB_THRESH(val) (((val) & 0xFF) << 24) + +#define MCI_CTRL_IHB_MODE_CFG_REG_DEF_VAL \ + (MCI_CTRL_IHB_MODE_HBCLK_DIV(6) | \ + MCI_CTRL_IHB_MODE_FWD_MOD | \ + MCI_CTRL_IHB_MODE_SEQFF_FINE_MOD(0xF) | \ + MCI_CTRL_IHB_MODE_RX_COMB_THRESH(0x3f) | \ + MCI_CTRL_IHB_MODE_TX_COMB_THRESH(0x40)) +/* AXI_HB registers */ +#define MCI_AXI_ACCESS_DATA_REG_NUM 0x0 +#define MCI_AXI_ACCESS_PCIE_MODE 1 +#define MCI_AXI_ACCESS_CACHE_CHECK_OFFSET 5 +#define MCI_AXI_ACCESS_CACHE_CHECK \ + (1 << MCI_AXI_ACCESS_CACHE_CHECK_OFFSET) +#define MCI_AXI_ACCESS_FORCE_POST_WR_OFFSET 6 +#define MCI_AXI_ACCESS_FORCE_POST_WR \ + (1 << MCI_AXI_ACCESS_FORCE_POST_WR_OFFSET) +#define MCI_AXI_ACCESS_DISABLE_CLK_GATING_OFFSET 9 +#define MCI_AXI_ACCESS_DISABLE_CLK_GATING \ + (1 << MCI_AXI_ACCESS_DISABLE_CLK_GATING_OFFSET) + +/* /HB /Units /HB_REG /HB_REGHopping Bus Registers + * /Window 0 Address Mask Register + */ +#define MCI_HB_CTRL_WIN0_ADDRESS_MASK_REG_NUM 0x2 + +/* /HB /Units /HB_REG /HB_REGHopping Bus Registers + * /Window 0 Destination Register + */ +#define MCI_HB_CTRL_WIN0_DESTINATION_REG_NUM 0x3 +#define MCI_HB_CTRL_WIN0_DEST_VALID_FLAG(val) (((val) & 0x1) << 16) +#define MCI_HB_CTRL_WIN0_DEST_ID(val) (((val) & 0xFF) << 0) + +/* /HB /Units /HB_REG /HB_REGHopping Bus Registers /Tx Control Register */ +#define MCI_HB_CTRL_TX_CTRL_REG_NUM 0xD +#define MCI_HB_CTRL_TX_CTRL_PCIE_MODE_OFFSET 24 +#define MCI_HB_CTRL_TX_CTRL_PCIE_MODE \ + (1 << MCI_HB_CTRL_TX_CTRL_PCIE_MODE_OFFSET) +#define MCI_HB_CTRL_TX_CTRL_PRI_TH_QOS(val) (((val) & 0xF) << 12) +#define MCI_HB_CTRL_TX_CTRL_MAX_RD_CNT(val) (((val) & 0x1F) << 6) +#define MCI_HB_CTRL_TX_CTRL_MAX_WR_CNT(val) (((val) & 0x1F) << 0) + +/* /HB /Units /IHB_REG /IHB_REGInterchip Hopping Bus Registers + * /IHB Version Control Register + */ +#define MCI_PHY_CTRL_REG_NUM 0x7 +#define MCI_PHY_CTRL_MCI_MINOR 0x8 /* BITS [3:0] */ +#define MCI_PHY_CTRL_MCI_MAJOR_OFFSET 4 +#define MCI_PHY_CTRL_MCI_MAJOR \ + (1 << MCI_PHY_CTRL_MCI_MAJOR_OFFSET) +#define MCI_PHY_CTRL_MCI_SLEEP_REQ_OFFSET 11 +#define MCI_PHY_CTRL_MCI_SLEEP_REQ \ + (1 << MCI_PHY_CTRL_MCI_SLEEP_REQ_OFFSET) +/* Host=1 / Device=0 PHY mode */ +#define MCI_PHY_CTRL_MCI_PHY_MODE_OFFSET 24 +#define MCI_PHY_CTRL_MCI_PHY_MODE_HOST \ + (1 << MCI_PHY_CTRL_MCI_PHY_MODE_OFFSET) +/* Register=1 / PWM=0 interface */ +#define MCI_PHY_CTRL_MCI_PHY_REG_IF_MODE_OFFSET 25 +#define MCI_PHY_CTRL_MCI_PHY_REG_IF_MODE \ + (1 << MCI_PHY_CTRL_MCI_PHY_REG_IF_MODE_OFFSET) + /* PHY code InReset=1 */ +#define MCI_PHY_CTRL_MCI_PHY_RESET_CORE_OFFSET 26 +#define MCI_PHY_CTRL_MCI_PHY_RESET_CORE \ + (1 << MCI_PHY_CTRL_MCI_PHY_RESET_CORE_OFFSET) +#define MCI_PHY_CTRL_PHY_ADDR_MSB_OFFSET 27 +#define MCI_PHY_CTRL_PHY_ADDR_MSB(addr) \ + (((addr) & 0x3) << \ + MCI_PHY_CTRL_PHY_ADDR_MSB_OFFSET) +#define MCI_PHY_CTRL_PIDI_MODE_OFFSET 31 +#define MCI_PHY_CTRL_PIDI_MODE \ + (1U << MCI_PHY_CTRL_PIDI_MODE_OFFSET) + +/* Number of times to wait for the MCI link ready after MCI configurations + * Normally takes 34-35 successive reads + */ +#define LINK_READY_TIMEOUT 100 + +enum mci_register_type { + MCI_REG_TYPE_PHY = 0, + MCI_REG_TYPE_CTRL, +}; + +enum { + MCI_CMD_WRITE, + MCI_CMD_READ +}; + +/* Write wrapper callback for debug: + * will print written data in case LOG_LEVEL >= 40 + */ +static void mci_mmio_write_32(uintptr_t addr, uint32_t value) +{ + VERBOSE("Write:\t0x%x = 0x%x\n", (uint32_t)addr, value); + mmio_write_32(addr, value); +} +/* Read wrapper callback for debug: + * will print read data in case LOG_LEVEL >= 40 + */ +static uint32_t mci_mmio_read_32(uintptr_t addr) +{ + uint32_t value; + + value = mmio_read_32(addr); + VERBOSE("Read:\t0x%x = 0x%x\n", (uint32_t)addr, value); + return value; +} + +/* MCI indirect access command completion polling: + * Each write/read command done via MCI indirect registers must be polled + * for command completions status. + * + * Returns 1 in case of error + * Returns 0 in case of command completed successfully. + */ +static int mci_poll_command_completion(int mci_index, int command_type) +{ + uint32_t mci_cmd_value = 0, retry_count = 100, ret = 0; + uint32_t completion_flags = MCI_INDIRECT_CTRL_CMD_DONE; + + debug_enter(); + /* Read commands require validating that requested data is ready */ + if (command_type == MCI_CMD_READ) + completion_flags |= MCI_INDIRECT_CTRL_DATA_READY; + + do { + /* wait 1 ms before each polling */ + mdelay(1); + mci_cmd_value = mci_mmio_read_32(MCI_ACCESS_CMD_REG(mci_index)); + } while (((mci_cmd_value & completion_flags) != completion_flags) && + (retry_count-- > 0)); + + if (retry_count == 0) { + ERROR("%s: MCI command timeout (command status = 0x%x)\n", + __func__, mci_cmd_value); + ret = 1; + } + + debug_exit(); + return ret; +} + +int mci_read(int mci_idx, uint32_t cmd, uint32_t *value) +{ + int rval; + + mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_idx), cmd); + + rval = mci_poll_command_completion(mci_idx, MCI_CMD_READ); + + *value = mci_mmio_read_32(MCI_WRITE_READ_DATA_REG(mci_idx)); + + return rval; +} + +int mci_write(int mci_idx, uint32_t cmd, uint32_t data) +{ + mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_idx), data); + mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_idx), cmd); + + return mci_poll_command_completion(mci_idx, MCI_CMD_WRITE); +} + +/* Perform 3 configurations in one command: PCI mode, + * queues separation and cache bit + */ +static int mci_axi_set_pcie_mode(int mci_index) +{ + uint32_t reg_data, ret = 1; + + debug_enter(); + /* This configuration makes MCI IP behave consistently with AXI protocol + * It should be configured at one side only (for example locally at AP). + * The IP takes care of performing the same configurations at MCI on + * another side (for example remotely at CP). + */ + mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index), + MCI_AXI_ACCESS_PCIE_MODE | + MCI_AXI_ACCESS_CACHE_CHECK | + MCI_AXI_ACCESS_FORCE_POST_WR | + MCI_AXI_ACCESS_DISABLE_CLK_GATING); + mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index), + MCI_INDIRECT_REG_CTRL_ADDR( + MCI_AXI_ACCESS_DATA_REG_NUM) | + MCI_INDIRECT_CTRL_HOPID(GID_AXI_HB) | + MCI_INDIRECT_CTRL_LOCAL_PKT | + MCI_INDIRECT_CTRL_CIRCULAR_CMD); + + /* if Write command was successful, verify PCIe mode */ + if (mci_poll_command_completion(mci_index, MCI_CMD_WRITE) == 0) { + /* Verify the PCIe mode selected */ + mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index), + MCI_INDIRECT_REG_CTRL_ADDR( + MCI_HB_CTRL_TX_CTRL_REG_NUM) | + MCI_INDIRECT_CTRL_HOPID(GID_AXI_HB) | + MCI_INDIRECT_CTRL_LOCAL_PKT | + MCI_INDIRECT_CTRL_READ_CMD); + /* if read was completed, verify PCIe mode */ + if (mci_poll_command_completion(mci_index, MCI_CMD_READ) == 0) { + reg_data = mci_mmio_read_32( + MCI_WRITE_READ_DATA_REG(mci_index)); + if (reg_data & MCI_HB_CTRL_TX_CTRL_PCIE_MODE) + ret = 0; + } + } + + debug_exit(); + return ret; +} + +/* Reduce sequence FIFO timer expiration threshold */ +static int mci_axi_set_fifo_thresh(int mci_index) +{ + uint32_t reg_data, ret = 0; + + debug_enter(); + /* This configuration reduces sequence FIFO timer expiration threshold + * (to 0x7 instead of 0xA). + * In MCI 1.6 version this configuration prevents possible functional + * issues. + * In version 1.82 the configuration prevents performance degradation + */ + + /* Configure local AP side */ + reg_data = MCI_PHY_CTRL_PIDI_MODE | + MCI_PHY_CTRL_MCI_PHY_REG_IF_MODE | + MCI_PHY_CTRL_MCI_PHY_MODE_HOST | + MCI_PHY_CTRL_MCI_MAJOR | + MCI_PHY_CTRL_MCI_MINOR; + mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index), reg_data); + mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index), + MCI_INDIRECT_REG_CTRL_ADDR(MCI_PHY_CTRL_REG_NUM) | + MCI_INDIRECT_CTRL_LOCAL_PKT); + ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE); + + /* Reduce the threshold */ + mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index), + MCI_CTRL_IHB_MODE_CFG_REG_DEF_VAL); + + mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index), + MCI_INDIRECT_REG_CTRL_ADDR( + MCI_CTRL_IHB_MODE_CFG_REG_NUM) | + MCI_INDIRECT_CTRL_LOCAL_PKT); + ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE); + + /* Exit PIDI mode */ + reg_data = MCI_PHY_CTRL_MCI_PHY_REG_IF_MODE | + MCI_PHY_CTRL_MCI_PHY_MODE_HOST | + MCI_PHY_CTRL_MCI_MAJOR | + MCI_PHY_CTRL_MCI_MINOR; + mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index), reg_data); + mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index), + MCI_INDIRECT_REG_CTRL_ADDR(MCI_PHY_CTRL_REG_NUM) | + MCI_INDIRECT_CTRL_LOCAL_PKT); + ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE); + + /* Configure remote CP side */ + reg_data = MCI_PHY_CTRL_PIDI_MODE | + MCI_PHY_CTRL_MCI_MAJOR | + MCI_PHY_CTRL_MCI_MINOR | + MCI_PHY_CTRL_MCI_PHY_REG_IF_MODE; + mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index), reg_data); + mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index), + MCI_INDIRECT_REG_CTRL_ADDR(MCI_PHY_CTRL_REG_NUM) | + MCI_CTRL_IHB_MODE_FWD_MOD); + ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE); + + /* Reduce the threshold */ + mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index), + MCI_CTRL_IHB_MODE_CFG_REG_DEF_VAL); + mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index), + MCI_INDIRECT_REG_CTRL_ADDR( + MCI_CTRL_IHB_MODE_CFG_REG_NUM) | + MCI_INDIRECT_CTRL_HOPID(GID_IHB_EXT)); + ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE); + + /* Exit PIDI mode */ + reg_data = MCI_PHY_CTRL_MCI_MAJOR | + MCI_PHY_CTRL_MCI_MINOR | + MCI_PHY_CTRL_MCI_PHY_REG_IF_MODE; + mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index), reg_data); + mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index), + MCI_INDIRECT_REG_CTRL_ADDR(MCI_PHY_CTRL_REG_NUM) | + MCI_CTRL_IHB_MODE_FWD_MOD); + + ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE); + + debug_exit(); + return ret; +} + +/* Configure: + * 1. AP & CP TX thresholds and delta configurations + * 2. DLO & DLI FIFO full threshold + * 3. RX thresholds and delta configurations + * 4. CP AR and AW outstanding + * 5. AP AR and AW outstanding + */ +static int mci_axi_set_fifo_rx_tx_thresh(int mci_index) +{ + uint32_t ret = 0; + + debug_enter(); + /* AP TX thresholds and delta configurations (IHB_reg 0x1) */ + mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index), + MCI_CTRL_TX_MEM_CFG_REG_DEF_VAL); + mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index), + MCI_INDIRECT_REG_CTRL_ADDR( + MCI_CTRL_TX_MEM_CFG_REG_NUM) | + MCI_INDIRECT_CTRL_LOCAL_PKT); + ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE); + + /* CP TX thresholds and delta configurations (IHB_reg 0x1) */ + mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index), + MCI_CTRL_TX_MEM_CFG_REG_DEF_VAL); + mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index), + MCI_INDIRECT_REG_CTRL_ADDR( + MCI_CTRL_TX_MEM_CFG_REG_NUM) | + MCI_INDIRECT_CTRL_HOPID(GID_IHB_EXT)); + ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE); + + /* AP DLO & DLI FIFO full threshold & Auto-Link enable (IHB_reg 0x8) */ + mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index), + MCI_CTRL_MCI_PHY_SET_REG_DEF_VAL | + MCI_CTRL_MCI_PHY_SET_AUTO_LINK_EN(1)); + mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index), + MCI_INDIRECT_REG_CTRL_ADDR( + MCI_CTRL_MCI_PHY_SETTINGS_REG_NUM) | + MCI_INDIRECT_CTRL_LOCAL_PKT); + ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE); + + /* CP DLO & DLI FIFO full threshold (IHB_reg 0x8) */ + mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index), + MCI_CTRL_MCI_PHY_SET_REG_DEF_VAL); + mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index), + MCI_INDIRECT_REG_CTRL_ADDR( + MCI_CTRL_MCI_PHY_SETTINGS_REG_NUM) | + MCI_INDIRECT_CTRL_HOPID(GID_IHB_EXT)); + ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE); + + /* AP RX thresholds and delta configurations (IHB_reg 0x0) */ + mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index), + MCI_CTRL_RX_MEM_CFG_REG_DEF_AP_VAL); + mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index), + MCI_INDIRECT_REG_CTRL_ADDR( + MCI_CTRL_RX_MEM_CFG_REG_NUM) | + MCI_INDIRECT_CTRL_LOCAL_PKT); + ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE); + + /* CP RX thresholds and delta configurations (IHB_reg 0x0) */ + mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index), + MCI_CTRL_RX_MEM_CFG_REG_DEF_CP_VAL); + mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index), + MCI_INDIRECT_REG_CTRL_ADDR( + MCI_CTRL_RX_MEM_CFG_REG_NUM) | + MCI_INDIRECT_CTRL_HOPID(GID_IHB_EXT)); + ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE); + + /* AP AR & AW maximum AXI outstanding request cfg (HB_reg 0xd) */ + mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index), + MCI_HB_CTRL_TX_CTRL_PRI_TH_QOS(8) | + MCI_HB_CTRL_TX_CTRL_MAX_RD_CNT(3) | + MCI_HB_CTRL_TX_CTRL_MAX_WR_CNT(3)); + mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index), + MCI_INDIRECT_REG_CTRL_ADDR( + MCI_HB_CTRL_TX_CTRL_REG_NUM) | + MCI_INDIRECT_CTRL_HOPID(GID_AXI_HB) | + MCI_INDIRECT_CTRL_LOCAL_PKT); + ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE); + + /* CP AR & AW maximum AXI outstanding request cfg (HB_reg 0xd) */ + mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index), + MCI_HB_CTRL_TX_CTRL_PRI_TH_QOS(8) | + MCI_HB_CTRL_TX_CTRL_MAX_RD_CNT(0xB) | + MCI_HB_CTRL_TX_CTRL_MAX_WR_CNT(0x11)); + mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index), + MCI_INDIRECT_REG_CTRL_ADDR( + MCI_HB_CTRL_TX_CTRL_REG_NUM) | + MCI_INDIRECT_CTRL_HOPID(GID_IHB_EXT) | + MCI_INDIRECT_CTRL_HOPID(GID_AXI_HB)); + ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE); + + debug_exit(); + return ret; +} + +/* configure MCI to allow read & write transactions to arrive at the same time. + * Without the below configuration, MCI won't sent response to CPU for + * transactions which arrived simultaneously and will lead to CPU hang. + * The below will configure MCI to be able to pass transactions from/to CP/AP. + */ +static int mci_enable_simultaneous_transactions(int mci_index) +{ + uint32_t ret = 0; + + debug_enter(); + /* ID assignment (assigning global ID offset to CP) */ + mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index), + MCI_DID_GLOBAL_ASSIGN_REQ_MCI_LOCAL_ID(2) | + MCI_DID_GLOBAL_ASSIGN_REQ_MCI_COUNT(2) | + MCI_DID_GLOBAL_ASSIGN_REQ_HOPS_NUM(2)); + mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index), + MCI_INDIRECT_REG_CTRL_ADDR( + MCI_DID_GLOBAL_ASSIGNMENT_REQUEST_REG) | + MCI_INDIRECT_CTRL_ASSIGN_CMD); + ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE); + + /* Assigning dest. ID=3 to all transactions entering from AXI at AP */ + mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index), + MCI_HB_CTRL_WIN0_DEST_VALID_FLAG(1) | + MCI_HB_CTRL_WIN0_DEST_ID(3)); + mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index), + MCI_INDIRECT_REG_CTRL_ADDR( + MCI_HB_CTRL_WIN0_DESTINATION_REG_NUM) | + MCI_INDIRECT_CTRL_HOPID(GID_AXI_HB) | + MCI_INDIRECT_CTRL_LOCAL_PKT); + ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE); + + /* Assigning dest. ID=1 to all transactions entering from AXI at CP */ + mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index), + MCI_HB_CTRL_WIN0_DEST_VALID_FLAG(1) | + MCI_HB_CTRL_WIN0_DEST_ID(1)); + mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index), + MCI_INDIRECT_REG_CTRL_ADDR( + MCI_HB_CTRL_WIN0_DESTINATION_REG_NUM) | + MCI_INDIRECT_CTRL_HOPID(GID_IHB_EXT) | + MCI_INDIRECT_CTRL_HOPID(GID_AXI_HB)); + ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE); + + /* End address to all transactions entering from AXI at AP. + * This will lead to get match for any AXI address + * and receive destination ID=3 + */ + mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index), 0xffffffff); + mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index), + MCI_INDIRECT_REG_CTRL_ADDR( + MCI_HB_CTRL_WIN0_ADDRESS_MASK_REG_NUM) | + MCI_INDIRECT_CTRL_HOPID(GID_AXI_HB) | + MCI_INDIRECT_CTRL_LOCAL_PKT); + ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE); + + /* End address to all transactions entering from AXI at CP. + * This will lead to get match for any AXI address + * and receive destination ID=1 + */ + mci_mmio_write_32(MCI_WRITE_READ_DATA_REG(mci_index), 0xffffffff); + mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index), + MCI_INDIRECT_REG_CTRL_ADDR( + MCI_HB_CTRL_WIN0_ADDRESS_MASK_REG_NUM) | + MCI_INDIRECT_CTRL_HOPID(GID_IHB_EXT) | + MCI_INDIRECT_CTRL_HOPID(GID_AXI_HB)); + ret |= mci_poll_command_completion(mci_index, MCI_CMD_WRITE); + + debug_exit(); + return ret; +} + +/* Check if MCI simultaneous transaction was already enabled. + * Currently bootrom does this mci configuration only when the boot source is + * SAR_MCIX4, in other cases it should be done at this stage. + * It is worth noticing that in case of booting from uart, the bootrom + * flow is different and this mci initialization is skipped even if boot + * source is SAR_MCIX4. Therefore new verification bases on appropriate mci's + * register content: if the appropriate reg contains 0x0 it means that the + * bootrom didn't perform required mci configuration. + * + * Returns: + * 0 - configuration already done + * 1 - configuration missing + */ +static _Bool mci_simulatenous_trans_missing(int mci_index) +{ + uint32_t reg, ret; + + /* read 'Window 0 Destination ID assignment' from HB register 0x3 + * (TX_CFG_W0_DST_ID) to check whether ID assignment was already + * performed by BootROM. + */ + debug_enter(); + mci_mmio_write_32(MCI_ACCESS_CMD_REG(mci_index), + MCI_INDIRECT_REG_CTRL_ADDR( + MCI_HB_CTRL_WIN0_DESTINATION_REG_NUM) | + MCI_INDIRECT_CTRL_HOPID(GID_AXI_HB) | + MCI_INDIRECT_CTRL_LOCAL_PKT | + MCI_INDIRECT_CTRL_READ_CMD); + ret = mci_poll_command_completion(mci_index, MCI_CMD_READ); + + reg = mci_mmio_read_32(MCI_WRITE_READ_DATA_REG(mci_index)); + + if (ret) + ERROR("Failed to verify MCI simultaneous read/write status\n"); + + debug_exit(); + /* default ID assignment is 0, so if register doesn't contain zeros + * it means that bootrom already performed required configuration. + */ + if (reg != 0) + return 0; + + return 1; +} + +/* For A1 revision, configure the MCI link for performance improvement: + * - set MCI to support read/write transactions to arrive at the same time + * - Switch AXI to PCIe mode + * - Reduce sequence FIFO threshold + * - Configure RX/TX FIFO thresholds + * + * Note: + * We don't exit on error code from any sub routine, to try (best effort) to + * complete the MCI configuration. + * (If we exit - Bootloader will surely fail to boot) + */ +int mci_configure(int mci_index) +{ + int rval; + + debug_enter(); + /* According to design guidelines the MCI simultaneous transaction + * shouldn't be enabled more then once - therefore make sure that it + * wasn't already enabled in bootrom. + */ + if (mci_simulatenous_trans_missing(mci_index)) { + VERBOSE("Enabling MCI simultaneous transaction for mci%d\n", + mci_index); + /* set MCI to support read/write transactions + * to arrive at the same time + */ + rval = mci_enable_simultaneous_transactions(mci_index); + if (rval) + ERROR("Failed to set MCI simultaneous read/write\n"); + } else + VERBOSE("Skip MCI ID assignment - already done by bootrom\n"); + + /* Configure MCI for more consistent behavior with AXI protocol */ + rval = mci_axi_set_pcie_mode(mci_index); + if (rval) + ERROR("Failed to set MCI to AXI PCIe mode\n"); + + /* reduce FIFO global threshold */ + rval = mci_axi_set_fifo_thresh(mci_index); + if (rval) + ERROR("Failed to set MCI FIFO global threshold\n"); + + /* configure RX/TX FIFO thresholds */ + rval = mci_axi_set_fifo_rx_tx_thresh(mci_index); + if (rval) + ERROR("Failed to set MCI RX/TX FIFO threshold\n"); + + debug_exit(); + return 1; +} + +int mci_get_link_status(void) +{ + uint32_t cmd, data; + + cmd = (MCI_INDIRECT_REG_CTRL_ADDR(MCI_CTRL_STATUS_REG_NUM) | + MCI_INDIRECT_CTRL_LOCAL_PKT | MCI_INDIRECT_CTRL_READ_CMD); + if (mci_read(0, cmd, &data)) { + ERROR("Failed to read status register\n"); + return -1; + } + + /* Check if the link is ready */ + if (data != MCI_CTRL_PHY_READY) { + ERROR("Bad link status %x\n", data); + return -1; + } + + return 0; +} + +void mci_turn_link_down(void) +{ + uint32_t cmd, data; + int rval = 0; + + debug_enter(); + + /* Turn off auto-link */ + cmd = (MCI_INDIRECT_REG_CTRL_ADDR(MCI_CTRL_MCI_PHY_SETTINGS_REG_NUM) | + MCI_INDIRECT_CTRL_LOCAL_PKT); + data = (MCI_CTRL_MCI_PHY_SET_REG_DEF_VAL2 | + MCI_CTRL_MCI_PHY_SET_AUTO_LINK_EN(0)); + rval = mci_write(0, cmd, data); + if (rval) + ERROR("Failed to turn off auto-link\n"); + + /* Reset AP PHY */ + cmd = (MCI_INDIRECT_REG_CTRL_ADDR(MCI_PHY_CTRL_REG_NUM) | + MCI_INDIRECT_CTRL_LOCAL_PKT); + data = (MCI_PHY_CTRL_MCI_MINOR | + MCI_PHY_CTRL_MCI_MAJOR | + MCI_PHY_CTRL_MCI_PHY_MODE_HOST | + MCI_PHY_CTRL_MCI_PHY_RESET_CORE); + rval = mci_write(0, cmd, data); + if (rval) + ERROR("Failed to reset AP PHY\n"); + + /* Clear all status & CRC values */ + cmd = (MCI_INDIRECT_REG_CTRL_ADDR(MCI_LINK_CRC_CTRL_REG_NUM) | + MCI_INDIRECT_CTRL_LOCAL_PKT); + data = 0x0; + mci_write(0, cmd, data); + cmd = (MCI_INDIRECT_REG_CTRL_ADDR(MCI_CTRL_STATUS_REG_NUM) | + MCI_INDIRECT_CTRL_LOCAL_PKT); + data = 0x0; + rval = mci_write(0, cmd, data); + if (rval) + ERROR("Failed to reset AP PHY\n"); + + /* Wait 5ms before un-reset the PHY */ + mdelay(5); + + /* Un-reset AP PHY */ + cmd = (MCI_INDIRECT_REG_CTRL_ADDR(MCI_PHY_CTRL_REG_NUM) | + MCI_INDIRECT_CTRL_LOCAL_PKT); + data = (MCI_PHY_CTRL_MCI_MINOR | MCI_PHY_CTRL_MCI_MAJOR | + MCI_PHY_CTRL_MCI_PHY_MODE_HOST); + rval = mci_write(0, cmd, data); + if (rval) + ERROR("Failed to un-reset AP PHY\n"); + + debug_exit(); +} + +void mci_turn_link_on(void) +{ + uint32_t cmd, data; + int rval = 0; + + debug_enter(); + /* Turn on auto-link */ + cmd = (MCI_INDIRECT_REG_CTRL_ADDR(MCI_CTRL_MCI_PHY_SETTINGS_REG_NUM) | + MCI_INDIRECT_CTRL_LOCAL_PKT); + data = (MCI_CTRL_MCI_PHY_SET_REG_DEF_VAL2 | + MCI_CTRL_MCI_PHY_SET_AUTO_LINK_EN(1)); + rval = mci_write(0, cmd, data); + if (rval) + ERROR("Failed to turn on auto-link\n"); + + debug_exit(); +} + +/* Initialize MCI for performance improvements */ +int mci_link_tune(int mci_index) +{ + int ret; + + debug_enter(); + INFO("MCI%d initialization:\n", mci_index); + + ret = mci_configure(mci_index); + + debug_exit(); + return ret; +} diff --git a/drivers/marvell/mg_conf_cm3/mg_conf_cm3.c b/drivers/marvell/mg_conf_cm3/mg_conf_cm3.c new file mode 100644 index 0000000..9352437 --- /dev/null +++ b/drivers/marvell/mg_conf_cm3/mg_conf_cm3.c @@ -0,0 +1,97 @@ +/* + * Copyright (C) 2019 Marvell International Ltd. + * + * SPDX-License-Identifier: BSD-3-Clause + * https://spdx.org/licenses + */ + +#include <a8k_plat_def.h> +#include <arch_helpers.h> +#include <common/debug.h> +#include <lib/mmio.h> +#include <mss_scp_bl2_format.h> + +/* CONFI REGISTERS */ +#define MG_CM3_CONFI_BASE(CP) (MVEBU_CP_REGS_BASE(CP) + 0x100000) +#define MG_CM3_SRAM_BASE(CP) MG_CM3_CONFI_BASE(CP) +#define MG_CM3_CONFI_GLOB_CFG_REG(CP) (MG_CM3_CONFI_BASE(CP) + 0x2B500) +#define CM3_CPU_EN_BIT BIT(28) +#define MG_CM3_MG_INT_MFX_REG(CP) (MG_CM3_CONFI_BASE(CP) + 0x2B054) +#define CM3_SYS_RESET_BIT BIT(0) + +#define MG_CM3_SHARED_MEM_BASE(CP) (MG_CM3_SRAM_BASE(CP) + 0x1FC00ULL) + +#define MG_SRAM_SIZE 0x20000 /* 128KB */ + +#define MG_ACK_TIMEOUT 10 + +/** + * struct ap_sharedmem_ctrl - used to pass information between the HOST and CM3 + * @init_done: Set by CM3 when ap_proces initialzied. Host check if CM3 set + * this flag to confirm that the process is running + * @lane_nr: Set by Host to mark which comphy lane should be configure. E.g.: + * - A8K development board uses comphy lane 2 for eth0 + * - CN913x development board uses comphy lane 4 for eth0 + */ +struct ap_sharedmem_ctrl { + uint32_t init_done; + uint32_t lane_nr; +}; + +int mg_image_load(uintptr_t src_addr, uint32_t size, int cp_index) +{ + uintptr_t mg_regs = MG_CM3_SRAM_BASE(cp_index); + + if (size > MG_SRAM_SIZE) { + ERROR("image is too big to fit into MG CM3 memory\n"); + return 1; + } + + NOTICE("Loading MG image from address 0x%lx Size 0x%x to MG at 0x%lx\n", + src_addr, size, mg_regs); + + /* Copy image to MG CM3 SRAM */ + memcpy((void *)mg_regs, (void *)src_addr, size); + + /* Don't release MG CM3 from reset - it will be done by next step + * bootloader (e.g. U-Boot), when appriopriate device-tree setup (which + * has enabeld 802.3. auto-neg) will be chosen. + */ + + return 0; +} + +void mg_start_ap_fw(int cp_nr, uint8_t comphy_index) +{ + volatile struct ap_sharedmem_ctrl *ap_shared_ctrl = + (void *)MG_CM3_SHARED_MEM_BASE(cp_nr); + int timeout = MG_ACK_TIMEOUT; + + if (mmio_read_32(MG_CM3_CONFI_GLOB_CFG_REG(cp_nr)) & CM3_CPU_EN_BIT) { + VERBOSE("cm3 already running\n"); + return; /* cm3 already running */ + } + + /* + * Mark which comphy lane should be used - it will be read via shared + * mem by ap process + */ + ap_shared_ctrl->lane_nr = comphy_index; + /* Make sure it took place before enabling cm3 */ + dmbst(); + + mmio_setbits_32(MG_CM3_CONFI_GLOB_CFG_REG(cp_nr), CM3_CPU_EN_BIT); + mmio_setbits_32(MG_CM3_MG_INT_MFX_REG(cp_nr), CM3_SYS_RESET_BIT); + + /* Check for ap process initialization by fw */ + while (ap_shared_ctrl->init_done != 1 && timeout--) + VERBOSE("Waiting for ap process ack, timeout %d\n", timeout); + + if (timeout == 0) { + ERROR("AP process failed, disabling cm3\n"); + mmio_clrbits_32(MG_CM3_MG_INT_MFX_REG(cp_nr), + CM3_SYS_RESET_BIT); + mmio_clrbits_32(MG_CM3_CONFI_GLOB_CFG_REG(cp_nr), + CM3_CPU_EN_BIT); + } +} diff --git a/drivers/marvell/mg_conf_cm3/mg_conf_cm3.h b/drivers/marvell/mg_conf_cm3/mg_conf_cm3.h new file mode 100644 index 0000000..e2756de --- /dev/null +++ b/drivers/marvell/mg_conf_cm3/mg_conf_cm3.h @@ -0,0 +1,9 @@ +/* + * Copyright (C) 2019 Marvell International Ltd. + * + * SPDX-License-Identifier: BSD-3-Clause + * https://spdx.org/licenses + */ + +void mg_start_ap_fw(int cp_nr, uint8_t comphy_index); +int mg_image_load(uintptr_t src_addr, uint32_t size, int cp_index); diff --git a/drivers/marvell/mochi/ap807_setup.c b/drivers/marvell/mochi/ap807_setup.c new file mode 100644 index 0000000..75e9654 --- /dev/null +++ b/drivers/marvell/mochi/ap807_setup.c @@ -0,0 +1,339 @@ +/* + * Copyright (C) 2018 Marvell International Ltd. + * + * SPDX-License-Identifier: BSD-3-Clause + * https://spdx.org/licenses + */ + +/* AP807 Marvell SoC driver */ + +#include <common/debug.h> +#include <drivers/marvell/cache_llc.h> +#include <drivers/marvell/ccu.h> +#include <drivers/marvell/io_win.h> +#include <drivers/marvell/iob.h> +#include <drivers/marvell/mci.h> +#include <drivers/marvell/mochi/ap_setup.h> +#include <lib/mmio.h> +#include <lib/utils_def.h> + +#include <a8k_plat_def.h> + +#define SMMU_sACR (MVEBU_SMMU_BASE + 0x10) +#define SMMU_sACR_PG_64K (1 << 16) + +#define CCU_GSPMU_CR (MVEBU_CCU_BASE(MVEBU_AP0) \ + + 0x3F0) +#define GSPMU_CPU_CONTROL (0x1 << 0) + +#define CCU_HTC_CR (MVEBU_CCU_BASE(MVEBU_AP0) \ + + 0x200) +#define CCU_SET_POC_OFFSET 5 + +#define DSS_CR0 (MVEBU_RFU_BASE + 0x100) +#define DVM_48BIT_VA_ENABLE (1 << 21) + + +/* SoC RFU / IHBx4 Control */ +#define MCIX4_807_REG_START_ADDR_REG(unit_id) (MVEBU_RFU_BASE + \ + 0x4258 + (unit_id * 0x4)) + +/* Secure MoChi incoming access */ +#define SEC_MOCHI_IN_ACC_REG (MVEBU_RFU_BASE + 0x4738) +#define SEC_MOCHI_IN_ACC_IHB0_EN (1) +#define SEC_MOCHI_IN_ACC_IHB1_EN (1 << 3) +#define SEC_MOCHI_IN_ACC_IHB2_EN (1 << 6) +#define SEC_MOCHI_IN_ACC_PIDI_EN (1 << 9) +#define SEC_IN_ACCESS_ENA_ALL_MASTERS (SEC_MOCHI_IN_ACC_IHB0_EN | \ + SEC_MOCHI_IN_ACC_IHB1_EN | \ + SEC_MOCHI_IN_ACC_IHB2_EN | \ + SEC_MOCHI_IN_ACC_PIDI_EN) +#define MOCHI_IN_ACC_LEVEL_FORCE_NONSEC (0) +#define MOCHI_IN_ACC_LEVEL_FORCE_SEC (1) +#define MOCHI_IN_ACC_LEVEL_LEAVE_ORIG (2) +#define MOCHI_IN_ACC_LEVEL_MASK_ALL (3) +#define SEC_MOCHI_IN_ACC_IHB0_LEVEL(l) ((l) << 1) +#define SEC_MOCHI_IN_ACC_IHB1_LEVEL(l) ((l) << 4) +#define SEC_MOCHI_IN_ACC_PIDI_LEVEL(l) ((l) << 10) + + +/* SYSRST_OUTn Config definitions */ +#define MVEBU_SYSRST_OUT_CONFIG_REG (MVEBU_MISC_SOC_BASE + 0x4) +#define WD_MASK_SYS_RST_OUT (1 << 2) + +/* DSS PHY for DRAM */ +#define DSS_SCR_REG (MVEBU_RFU_BASE + 0x208) +#define DSS_PPROT_OFFS 4 +#define DSS_PPROT_MASK 0x7 +#define DSS_PPROT_PRIV_SECURE_DATA 0x1 + +/* Used for Units of AP-807 (e.g. SDIO and etc) */ +#define MVEBU_AXI_ATTR_BASE (MVEBU_REGS_BASE + 0x6F4580) +#define MVEBU_AXI_ATTR_REG(index) (MVEBU_AXI_ATTR_BASE + \ + 0x4 * index) + +#define XOR_STREAM_ID_REG(ch) (MVEBU_REGS_BASE + 0x410010 + (ch) * 0x20000) +#define XOR_STREAM_ID_MASK 0xFFFF +#define SDIO_STREAM_ID_REG (MVEBU_RFU_BASE + 0x4600) +#define SDIO_STREAM_ID_MASK 0xFF + +/* Do not use the default Stream ID 0 */ +#define A807_STREAM_ID_BASE (0x1) + +static uintptr_t stream_id_reg[] = { + XOR_STREAM_ID_REG(0), + XOR_STREAM_ID_REG(1), + XOR_STREAM_ID_REG(2), + XOR_STREAM_ID_REG(3), + SDIO_STREAM_ID_REG, + 0 +}; + +enum axi_attr { + AXI_SDIO_ATTR = 0, + AXI_DFX_ATTR, + AXI_MAX_ATTR, +}; + +static void ap_sec_masters_access_en(uint32_t enable) +{ + /* Open/Close incoming access for all masters. + * The access is disabled in trusted boot mode + * Could only be done in EL3 + */ + if (enable != 0) { + mmio_clrsetbits_32(SEC_MOCHI_IN_ACC_REG, 0x0U, /* no clear */ + SEC_IN_ACCESS_ENA_ALL_MASTERS); +#if LLC_SRAM + /* Do not change access security level + * for PIDI masters + */ + mmio_clrsetbits_32(SEC_MOCHI_IN_ACC_REG, + SEC_MOCHI_IN_ACC_PIDI_LEVEL( + MOCHI_IN_ACC_LEVEL_MASK_ALL), + SEC_MOCHI_IN_ACC_PIDI_LEVEL( + MOCHI_IN_ACC_LEVEL_LEAVE_ORIG)); +#endif + } else { + mmio_clrsetbits_32(SEC_MOCHI_IN_ACC_REG, + SEC_IN_ACCESS_ENA_ALL_MASTERS, + 0x0U /* no set */); +#if LLC_SRAM + /* Return PIDI access level to the default */ + mmio_clrsetbits_32(SEC_MOCHI_IN_ACC_REG, + SEC_MOCHI_IN_ACC_PIDI_LEVEL( + MOCHI_IN_ACC_LEVEL_MASK_ALL), + SEC_MOCHI_IN_ACC_PIDI_LEVEL( + MOCHI_IN_ACC_LEVEL_FORCE_NONSEC)); +#endif + } +} + +static void setup_smmu(void) +{ + uint32_t reg; + + /* Set the SMMU page size to 64 KB */ + reg = mmio_read_32(SMMU_sACR); + reg |= SMMU_sACR_PG_64K; + mmio_write_32(SMMU_sACR, reg); +} + +static void init_aurora2(void) +{ + uint32_t reg; + + /* Enable GSPMU control by CPU */ + reg = mmio_read_32(CCU_GSPMU_CR); + reg |= GSPMU_CPU_CONTROL; + mmio_write_32(CCU_GSPMU_CR, reg); + +#if LLC_ENABLE + /* Enable LLC for AP807 in exclusive mode */ + llc_enable(0, 1); + + /* Set point of coherency to DDR. + * This is required by units which have + * SW cache coherency + */ + reg = mmio_read_32(CCU_HTC_CR); + reg |= (0x1 << CCU_SET_POC_OFFSET); + mmio_write_32(CCU_HTC_CR, reg); +#endif /* LLC_ENABLE */ + + errata_wa_init(); +} + + +/* MCIx indirect access register are based by default at 0xf4000000/0xf6000000 + * to avoid conflict of internal registers of units connected via MCIx, which + * can be based on the same address (i.e CP1 base is also 0xf4000000), + * the following routines remaps the MCIx indirect bases to another domain + */ +static void mci_remap_indirect_access_base(void) +{ + uint32_t mci; + + for (mci = 0; mci < MCI_MAX_UNIT_ID; mci++) + mmio_write_32(MCIX4_807_REG_START_ADDR_REG(mci), + MVEBU_MCI_REG_BASE_REMAP(mci) >> + MCI_REMAP_OFF_SHIFT); +} + +/* Set a unique stream id for all DMA capable devices */ +static void ap807_stream_id_init(void) +{ + uint32_t i; + + for (i = 0; + stream_id_reg[i] != 0 && i < ARRAY_SIZE(stream_id_reg); i++) { + uint32_t mask = stream_id_reg[i] == SDIO_STREAM_ID_REG ? + SDIO_STREAM_ID_MASK : XOR_STREAM_ID_MASK; + + mmio_clrsetbits_32(stream_id_reg[i], mask, + i + A807_STREAM_ID_BASE); + } +} + +static void ap807_axi_attr_init(void) +{ + uint32_t index, data; + + /* Initialize AXI attributes for AP807 */ + /* Go over the AXI attributes and set Ax-Cache and Ax-Domain */ + for (index = 0; index < AXI_MAX_ATTR; index++) { + switch (index) { + /* DFX works with no coherent only - + * there's no option to configure the Ax-Cache and Ax-Domain + */ + case AXI_DFX_ATTR: + continue; + default: + /* Set Ax-Cache as cacheable, no allocate, modifiable, + * bufferable. + * The values are different because Read & Write + * definition is different in Ax-Cache + */ + data = mmio_read_32(MVEBU_AXI_ATTR_REG(index)); + data &= ~MVEBU_AXI_ATTR_ARCACHE_MASK; + data |= (CACHE_ATTR_WRITE_ALLOC | + CACHE_ATTR_CACHEABLE | + CACHE_ATTR_BUFFERABLE) << + MVEBU_AXI_ATTR_ARCACHE_OFFSET; + data &= ~MVEBU_AXI_ATTR_AWCACHE_MASK; + data |= (CACHE_ATTR_READ_ALLOC | + CACHE_ATTR_CACHEABLE | + CACHE_ATTR_BUFFERABLE) << + MVEBU_AXI_ATTR_AWCACHE_OFFSET; + /* Set Ax-Domain as Outer domain */ + data &= ~MVEBU_AXI_ATTR_ARDOMAIN_MASK; + data |= DOMAIN_OUTER_SHAREABLE << + MVEBU_AXI_ATTR_ARDOMAIN_OFFSET; + data &= ~MVEBU_AXI_ATTR_AWDOMAIN_MASK; + data |= DOMAIN_OUTER_SHAREABLE << + MVEBU_AXI_ATTR_AWDOMAIN_OFFSET; + mmio_write_32(MVEBU_AXI_ATTR_REG(index), data); + } + } +} + +static void misc_soc_configurations(void) +{ + uint32_t reg; + + /* Enable 48-bit VA */ + mmio_setbits_32(DSS_CR0, DVM_48BIT_VA_ENABLE); + + /* Un-mask Watchdog reset from influencing the SYSRST_OUTn. + * Otherwise, upon WD timeout, the WD reset signal won't trigger reset + */ + reg = mmio_read_32(MVEBU_SYSRST_OUT_CONFIG_REG); + reg &= ~(WD_MASK_SYS_RST_OUT); + mmio_write_32(MVEBU_SYSRST_OUT_CONFIG_REG, reg); +} + +/* + * By default all external CPs start with configuration address space set to + * 0xf200_0000. To overcome this issue, go in the loop and initialize the + * CP one by one, using temporary window configuration which allows to access + * each CP and update its configuration space according to decoding + * windows scheme defined for each platform. + */ +void update_cp110_default_win(int cp_id) +{ + int mci_id = cp_id - 1; + uintptr_t cp110_base, cp110_temp_base; + + /* CP110 default configuration address space */ + cp110_temp_base = MVEBU_AP_IO_BASE(MVEBU_AP0); + + struct addr_map_win iowin_temp_win = { + .base_addr = cp110_temp_base, + .win_size = MVEBU_CP_OFFSET, + }; + + iowin_temp_win.target_id = mci_id; + iow_temp_win_insert(0, &iowin_temp_win, 1); + + /* Calculate the new CP110 - base address */ + cp110_base = MVEBU_CP_REGS_BASE(cp_id); + /* Go and update the CP110 configuration address space */ + iob_cfg_space_update(0, cp_id, cp110_temp_base, cp110_base); + + /* Remove the temporary IO-WIN window */ + iow_temp_win_remove(0, &iowin_temp_win, 1); +} + +void ap_init(void) +{ + /* Setup Aurora2. */ + init_aurora2(); + + /* configure MCI mapping */ + mci_remap_indirect_access_base(); + + /* configure IO_WIN windows */ + init_io_win(MVEBU_AP0); + + /* configure CCU windows */ + init_ccu(MVEBU_AP0); + + /* Set the stream IDs for DMA masters */ + ap807_stream_id_init(); + + /* configure the SMMU */ + setup_smmu(); + + /* Open AP incoming access for all masters */ + ap_sec_masters_access_en(1); + + /* configure axi for AP */ + ap807_axi_attr_init(); + + /* misc configuration of the SoC */ + misc_soc_configurations(); +} + +static void ap807_dram_phy_access_config(void) +{ + uint32_t reg_val; + /* Update DSS port access permission to DSS_PHY */ + reg_val = mmio_read_32(DSS_SCR_REG); + reg_val &= ~(DSS_PPROT_MASK << DSS_PPROT_OFFS); + reg_val |= ((DSS_PPROT_PRIV_SECURE_DATA & DSS_PPROT_MASK) << + DSS_PPROT_OFFS); + mmio_write_32(DSS_SCR_REG, reg_val); +} + +void ap_ble_init(void) +{ + /* Enable DSS port */ + ap807_dram_phy_access_config(); +} + +int ap_get_count(void) +{ + return 1; +} + + diff --git a/drivers/marvell/mochi/apn806_setup.c b/drivers/marvell/mochi/apn806_setup.c new file mode 100644 index 0000000..5c71fed --- /dev/null +++ b/drivers/marvell/mochi/apn806_setup.c @@ -0,0 +1,297 @@ +/* + * Copyright (C) 2018 Marvell International Ltd. + * + * SPDX-License-Identifier: BSD-3-Clause + * https://spdx.org/licenses + */ + +/* AP806 Marvell SoC driver */ + +#include <common/debug.h> +#include <drivers/marvell/ccu.h> +#include <drivers/marvell/cache_llc.h> +#include <drivers/marvell/io_win.h> +#include <drivers/marvell/mci.h> +#include <drivers/marvell/mochi/ap_setup.h> +#include <lib/mmio.h> + +#include <a8k_plat_def.h> + +#define SMMU_sACR (MVEBU_SMMU_BASE + 0x10) +#define SMMU_sACR_PG_64K (1 << 16) + +#define CCU_GSPMU_CR (MVEBU_CCU_BASE(MVEBU_AP0) + \ + 0x3F0) +#define GSPMU_CPU_CONTROL (0x1 << 0) + +#define CCU_HTC_CR (MVEBU_CCU_BASE(MVEBU_AP0) + \ + 0x200) +#define CCU_SET_POC_OFFSET 5 + +#define DSS_CR0 (MVEBU_RFU_BASE + 0x100) +#define DVM_48BIT_VA_ENABLE (1 << 21) + +/* Secure MoChi incoming access */ +#define SEC_MOCHI_IN_ACC_REG (MVEBU_RFU_BASE + 0x4738) +#define SEC_MOCHI_IN_ACC_IHB0_EN (1) +#define SEC_MOCHI_IN_ACC_IHB1_EN (1 << 3) +#define SEC_MOCHI_IN_ACC_IHB2_EN (1 << 6) +#define SEC_MOCHI_IN_ACC_PIDI_EN (1 << 9) +#define SEC_IN_ACCESS_ENA_ALL_MASTERS (SEC_MOCHI_IN_ACC_IHB0_EN | \ + SEC_MOCHI_IN_ACC_IHB1_EN | \ + SEC_MOCHI_IN_ACC_IHB2_EN | \ + SEC_MOCHI_IN_ACC_PIDI_EN) +#define MOCHI_IN_ACC_LEVEL_FORCE_NONSEC (0) +#define MOCHI_IN_ACC_LEVEL_FORCE_SEC (1) +#define MOCHI_IN_ACC_LEVEL_LEAVE_ORIG (2) +#define MOCHI_IN_ACC_LEVEL_MASK_ALL (3) +#define SEC_MOCHI_IN_ACC_IHB0_LEVEL(l) ((l) << 1) +#define SEC_MOCHI_IN_ACC_IHB1_LEVEL(l) ((l) << 4) +#define SEC_MOCHI_IN_ACC_PIDI_LEVEL(l) ((l) << 10) + + +/* SYSRST_OUTn Config definitions */ +#define MVEBU_SYSRST_OUT_CONFIG_REG (MVEBU_MISC_SOC_BASE + 0x4) +#define WD_MASK_SYS_RST_OUT (1 << 2) + +/* Generic Timer System Controller */ +#define MVEBU_MSS_GTCR_REG (MVEBU_REGS_BASE + 0x581000) +#define MVEBU_MSS_GTCR_ENABLE_BIT 0x1 + +/* + * AXI Configuration. + */ + +/* Used for Units of AP-806 (e.g. SDIO and etc) */ +#define MVEBU_AXI_ATTR_BASE (MVEBU_REGS_BASE + 0x6F4580) +#define MVEBU_AXI_ATTR_REG(index) (MVEBU_AXI_ATTR_BASE + \ + 0x4 * index) + +#define XOR_STREAM_ID_REG(ch) (MVEBU_REGS_BASE + 0x410010 + (ch) * 0x20000) +#define XOR_STREAM_ID_MASK 0xFFFF +#define SDIO_STREAM_ID_REG (MVEBU_RFU_BASE + 0x4600) +#define SDIO_STREAM_ID_MASK 0xFF + +/* Do not use the default Stream ID 0 */ +#define A806_STREAM_ID_BASE (0x1) + +static uintptr_t stream_id_reg[] = { + XOR_STREAM_ID_REG(0), + XOR_STREAM_ID_REG(1), + XOR_STREAM_ID_REG(2), + XOR_STREAM_ID_REG(3), + SDIO_STREAM_ID_REG, + 0 +}; + +enum axi_attr { + AXI_SDIO_ATTR = 0, + AXI_DFX_ATTR, + AXI_MAX_ATTR, +}; + +static void apn_sec_masters_access_en(uint32_t enable) +{ + /* Open/Close incoming access for all masters. + * The access is disabled in trusted boot mode + * Could only be done in EL3 + */ + if (enable != 0) { + mmio_clrsetbits_32(SEC_MOCHI_IN_ACC_REG, 0x0U, /* no clear */ + SEC_IN_ACCESS_ENA_ALL_MASTERS); +#if LLC_SRAM + /* Do not change access security level + * for PIDI masters + */ + mmio_clrsetbits_32(SEC_MOCHI_IN_ACC_REG, + SEC_MOCHI_IN_ACC_PIDI_LEVEL( + MOCHI_IN_ACC_LEVEL_MASK_ALL), + SEC_MOCHI_IN_ACC_PIDI_LEVEL( + MOCHI_IN_ACC_LEVEL_LEAVE_ORIG)); +#endif + } else { + mmio_clrsetbits_32(SEC_MOCHI_IN_ACC_REG, + SEC_IN_ACCESS_ENA_ALL_MASTERS, + 0x0U /* no set */); +#if LLC_SRAM + /* Return PIDI access level to the default */ + mmio_clrsetbits_32(SEC_MOCHI_IN_ACC_REG, + SEC_MOCHI_IN_ACC_PIDI_LEVEL( + MOCHI_IN_ACC_LEVEL_MASK_ALL), + SEC_MOCHI_IN_ACC_PIDI_LEVEL( + MOCHI_IN_ACC_LEVEL_FORCE_NONSEC)); +#endif + } +} + +static void setup_smmu(void) +{ + uint32_t reg; + + /* Set the SMMU page size to 64 KB */ + reg = mmio_read_32(SMMU_sACR); + reg |= SMMU_sACR_PG_64K; + mmio_write_32(SMMU_sACR, reg); +} + +static void init_aurora2(void) +{ + uint32_t reg; + + /* Enable GSPMU control by CPU */ + reg = mmio_read_32(CCU_GSPMU_CR); + reg |= GSPMU_CPU_CONTROL; + mmio_write_32(CCU_GSPMU_CR, reg); + +#if LLC_ENABLE + /* Enable LLC for AP806 in exclusive mode */ + llc_enable(0, 1); + + /* Set point of coherency to DDR. + * This is required by units which have + * SW cache coherency + */ + reg = mmio_read_32(CCU_HTC_CR); + reg |= (0x1 << CCU_SET_POC_OFFSET); + mmio_write_32(CCU_HTC_CR, reg); +#endif /* LLC_ENABLE */ + + errata_wa_init(); +} + + +/* MCIx indirect access register are based by default at 0xf4000000/0xf6000000 + * to avoid conflict of internal registers of units connected via MCIx, which + * can be based on the same address (i.e CP1 base is also 0xf4000000), + * the following routines remaps the MCIx indirect bases to another domain + */ +static void mci_remap_indirect_access_base(void) +{ + uint32_t mci; + + for (mci = 0; mci < MCI_MAX_UNIT_ID; mci++) + mmio_write_32(MCIX4_REG_START_ADDRESS_REG(mci), + MVEBU_MCI_REG_BASE_REMAP(mci) >> + MCI_REMAP_OFF_SHIFT); +} + +/* Set a unique stream id for all DMA capable devices */ +static void ap806_stream_id_init(void) +{ + int i; + + for (i = 0; stream_id_reg[i] != 0; i++) { + uint32_t mask = stream_id_reg[i] == SDIO_STREAM_ID_REG ? + SDIO_STREAM_ID_MASK : XOR_STREAM_ID_MASK; + + mmio_clrsetbits_32(stream_id_reg[i], mask, + i + A806_STREAM_ID_BASE); + } +} + +static void apn806_axi_attr_init(void) +{ + uint32_t index, data; + + /* Initialize AXI attributes for APN806 */ + + /* Go over the AXI attributes and set Ax-Cache and Ax-Domain */ + for (index = 0; index < AXI_MAX_ATTR; index++) { + switch (index) { + /* DFX works with no coherent only - + * there's no option to configure the Ax-Cache and Ax-Domain + */ + case AXI_DFX_ATTR: + continue; + default: + /* Set Ax-Cache as cacheable, no allocate, modifiable, + * bufferable + * The values are different because Read & Write + * definition is different in Ax-Cache + */ + data = mmio_read_32(MVEBU_AXI_ATTR_REG(index)); + data &= ~MVEBU_AXI_ATTR_ARCACHE_MASK; + data |= (CACHE_ATTR_WRITE_ALLOC | + CACHE_ATTR_CACHEABLE | + CACHE_ATTR_BUFFERABLE) << + MVEBU_AXI_ATTR_ARCACHE_OFFSET; + data &= ~MVEBU_AXI_ATTR_AWCACHE_MASK; + data |= (CACHE_ATTR_READ_ALLOC | + CACHE_ATTR_CACHEABLE | + CACHE_ATTR_BUFFERABLE) << + MVEBU_AXI_ATTR_AWCACHE_OFFSET; + /* Set Ax-Domain as Outer domain */ + data &= ~MVEBU_AXI_ATTR_ARDOMAIN_MASK; + data |= DOMAIN_OUTER_SHAREABLE << + MVEBU_AXI_ATTR_ARDOMAIN_OFFSET; + data &= ~MVEBU_AXI_ATTR_AWDOMAIN_MASK; + data |= DOMAIN_OUTER_SHAREABLE << + MVEBU_AXI_ATTR_AWDOMAIN_OFFSET; + mmio_write_32(MVEBU_AXI_ATTR_REG(index), data); + } + } +} + +static void dss_setup(void) +{ + /* Enable 48-bit VA */ + mmio_setbits_32(DSS_CR0, DVM_48BIT_VA_ENABLE); +} + +void misc_soc_configurations(void) +{ + uint32_t reg; + + /* Un-mask Watchdog reset from influencing the SYSRST_OUTn. + * Otherwise, upon WD timeout, the WD reset signal won't trigger reset + */ + reg = mmio_read_32(MVEBU_SYSRST_OUT_CONFIG_REG); + reg &= ~(WD_MASK_SYS_RST_OUT); + mmio_write_32(MVEBU_SYSRST_OUT_CONFIG_REG, reg); +} + +void ap_init(void) +{ + /* Setup Aurora2. */ + init_aurora2(); + + /* configure MCI mapping */ + mci_remap_indirect_access_base(); + + /* configure IO_WIN windows */ + init_io_win(MVEBU_AP0); + + /* configure CCU windows */ + init_ccu(MVEBU_AP0); + + /* configure DSS */ + dss_setup(); + + /* Set the stream IDs for DMA masters */ + ap806_stream_id_init(); + + /* configure the SMMU */ + setup_smmu(); + + /* Open APN incoming access for all masters */ + apn_sec_masters_access_en(1); + + /* configure axi for APN*/ + apn806_axi_attr_init(); + + /* misc configuration of the SoC */ + misc_soc_configurations(); +} + +void ap_ble_init(void) +{ +} + +int ap_get_count(void) +{ + return 1; +} + +void update_cp110_default_win(int cp_id) +{ +} diff --git a/drivers/marvell/mochi/cp110_setup.c b/drivers/marvell/mochi/cp110_setup.c new file mode 100644 index 0000000..f12da0e --- /dev/null +++ b/drivers/marvell/mochi/cp110_setup.c @@ -0,0 +1,467 @@ +/* + * Copyright (C) 2018-2020 Marvell International Ltd. + * + * SPDX-License-Identifier: BSD-3-Clause + * https://spdx.org/licenses + */ + +/* CP110 Marvell SoC driver */ + +#include <common/debug.h> +#include <drivers/delay_timer.h> +#include <drivers/marvell/amb_adec.h> +#include <drivers/marvell/iob.h> +#include <drivers/marvell/mochi/cp110_setup.h> +#include <drivers/rambus/trng_ip_76.h> + +#include <efuse_def.h> +#include <plat_marvell.h> + +/* + * AXI Configuration. + */ + + /* Used for Units of CP-110 (e.g. USB device, USB Host, and etc) */ +#define MVEBU_AXI_ATTR_OFFSET (0x441300) +#define MVEBU_AXI_ATTR_REG(index) (MVEBU_AXI_ATTR_OFFSET + \ + 0x4 * index) + +/* AXI Protection bits */ +#define MVEBU_AXI_PROT_OFFSET (0x441200) + +/* AXI Protection regs */ +#define MVEBU_AXI_PROT_REG(index) ((index <= 4) ? \ + (MVEBU_AXI_PROT_OFFSET + \ + 0x4 * index) : \ + (MVEBU_AXI_PROT_OFFSET + 0x18)) +#define MVEBU_AXI_PROT_REGS_NUM (6) + +#define MVEBU_SOC_CFGS_OFFSET (0x441900) +#define MVEBU_SOC_CFG_REG(index) (MVEBU_SOC_CFGS_OFFSET + \ + 0x4 * index) +#define MVEBU_SOC_CFG_REG_NUM (0) +#define MVEBU_SOC_CFG_GLOG_SECURE_EN_MASK (0xE) + +/* SATA3 MBUS to AXI regs */ +#define MVEBU_BRIDGE_WIN_DIS_REG (MVEBU_SOC_CFGS_OFFSET + 0x10) +#define MVEBU_BRIDGE_WIN_DIS_OFF (0x0) + +/* SATA3 MBUS to AXI regs */ +#define MVEBU_SATA_M2A_AXI_PORT_CTRL_REG (0x54ff04) + +/* AXI to MBUS bridge registers */ +#define MVEBU_AMB_IP_OFFSET (0x13ff00) +#define MVEBU_AMB_IP_BRIDGE_WIN_REG(win) (MVEBU_AMB_IP_OFFSET + \ + (win * 0x8)) +#define MVEBU_AMB_IP_BRIDGE_WIN_EN_OFFSET 0 +#define MVEBU_AMB_IP_BRIDGE_WIN_EN_MASK \ + (0x1 << MVEBU_AMB_IP_BRIDGE_WIN_EN_OFFSET) +#define MVEBU_AMB_IP_BRIDGE_WIN_SIZE_OFFSET 16 +#define MVEBU_AMB_IP_BRIDGE_WIN_SIZE_MASK \ + (0xffffu << MVEBU_AMB_IP_BRIDGE_WIN_SIZE_OFFSET) + +#define MVEBU_SAMPLE_AT_RESET_REG (0x440600) +#define SAR_PCIE1_CLK_CFG_OFFSET 31 +#define SAR_PCIE1_CLK_CFG_MASK (0x1u << SAR_PCIE1_CLK_CFG_OFFSET) +#define SAR_PCIE0_CLK_CFG_OFFSET 30 +#define SAR_PCIE0_CLK_CFG_MASK (0x1 << SAR_PCIE0_CLK_CFG_OFFSET) +#define SAR_I2C_INIT_EN_OFFSET 24 +#define SAR_I2C_INIT_EN_MASK (1 << SAR_I2C_INIT_EN_OFFSET) + +/******************************************************************************* + * PCIE clock buffer control + ******************************************************************************/ +#define MVEBU_PCIE_REF_CLK_BUF_CTRL (0x4404F0) +#define PCIE1_REFCLK_BUFF_SOURCE 0x800 +#define PCIE0_REFCLK_BUFF_SOURCE 0x400 + +/******************************************************************************* + * MSS Device Push Set Register + ******************************************************************************/ +#define MVEBU_CP_MSS_DPSHSR_REG (0x280040) +#define MSS_DPSHSR_REG_PCIE_CLK_SEL 0x8 + +/******************************************************************************* + * RTC Configuration + ******************************************************************************/ +#define MVEBU_RTC_BASE (0x284000) +#define MVEBU_RTC_STATUS_REG (MVEBU_RTC_BASE + 0x0) +#define MVEBU_RTC_STATUS_ALARM1_MASK 0x1 +#define MVEBU_RTC_STATUS_ALARM2_MASK 0x2 +#define MVEBU_RTC_IRQ_1_CONFIG_REG (MVEBU_RTC_BASE + 0x4) +#define MVEBU_RTC_IRQ_2_CONFIG_REG (MVEBU_RTC_BASE + 0x8) +#define MVEBU_RTC_TIME_REG (MVEBU_RTC_BASE + 0xC) +#define MVEBU_RTC_ALARM_1_REG (MVEBU_RTC_BASE + 0x10) +#define MVEBU_RTC_ALARM_2_REG (MVEBU_RTC_BASE + 0x14) +#define MVEBU_RTC_CCR_REG (MVEBU_RTC_BASE + 0x18) +#define MVEBU_RTC_NOMINAL_TIMING 0x2000 +#define MVEBU_RTC_NOMINAL_TIMING_MASK 0x7FFF +#define MVEBU_RTC_TEST_CONFIG_REG (MVEBU_RTC_BASE + 0x1C) +#define MVEBU_RTC_BRIDGE_TIMING_CTRL0_REG (MVEBU_RTC_BASE + 0x80) +#define MVEBU_RTC_WRCLK_PERIOD_MASK 0xFFFF +#define MVEBU_RTC_WRCLK_PERIOD_DEFAULT 0x3FF +#define MVEBU_RTC_WRCLK_SETUP_OFFS 16 +#define MVEBU_RTC_WRCLK_SETUP_MASK 0xFFFF0000 +#define MVEBU_RTC_WRCLK_SETUP_DEFAULT 0x29 +#define MVEBU_RTC_BRIDGE_TIMING_CTRL1_REG (MVEBU_RTC_BASE + 0x84) +#define MVEBU_RTC_READ_OUTPUT_DELAY_MASK 0xFFFF +#define MVEBU_RTC_READ_OUTPUT_DELAY_DEFAULT 0x1F + +/******************************************************************************* + * TRNG Configuration + ******************************************************************************/ +#define MVEBU_TRNG_BASE (0x760000) +#define MVEBU_EFUSE_TRNG_ENABLE_EFUSE_WORD MVEBU_AP_LDX_220_189_EFUSE_OFFS +#define MVEBU_EFUSE_TRNG_ENABLE_BIT_OFFSET 13 /* LD0[202] */ + +enum axi_attr { + AXI_ADUNIT_ATTR = 0, + AXI_COMUNIT_ATTR, + AXI_EIP197_ATTR, + AXI_USB3D_ATTR, + AXI_USB3H0_ATTR, + AXI_USB3H1_ATTR, + AXI_SATA0_ATTR, + AXI_SATA1_ATTR, + AXI_DAP_ATTR, + AXI_DFX_ATTR, + AXI_DBG_TRC_ATTR = 12, + AXI_SDIO_ATTR, + AXI_MSS_ATTR, + AXI_MAX_ATTR, +}; + +/* Most stream IDS are configured centrally in the CP-110 RFU + * but some are configured inside the unit registers + */ +#define RFU_STREAM_ID_BASE (0x450000) +#define USB3H_0_STREAM_ID_REG (RFU_STREAM_ID_BASE + 0xC) +#define USB3H_1_STREAM_ID_REG (RFU_STREAM_ID_BASE + 0x10) +#define SATA_0_STREAM_ID_REG (RFU_STREAM_ID_BASE + 0x14) +#define SATA_1_STREAM_ID_REG (RFU_STREAM_ID_BASE + 0x18) +#define SDIO_STREAM_ID_REG (RFU_STREAM_ID_BASE + 0x28) + +#define CP_DMA_0_STREAM_ID_REG (0x6B0010) +#define CP_DMA_1_STREAM_ID_REG (0x6D0010) + +/* We allocate IDs 128-255 for PCIe */ +#define MAX_STREAM_ID (0x80) + +static uintptr_t stream_id_reg[] = { + USB3H_0_STREAM_ID_REG, + USB3H_1_STREAM_ID_REG, + CP_DMA_0_STREAM_ID_REG, + CP_DMA_1_STREAM_ID_REG, + SATA_0_STREAM_ID_REG, + SATA_1_STREAM_ID_REG, + SDIO_STREAM_ID_REG, + 0 +}; + +static void cp110_errata_wa_init(uintptr_t base) +{ + uint32_t data; + + /* ERRATA GL-4076863: + * Reset value for global_secure_enable inputs must be changed + * from '1' to '0'. + * When asserted, only "secured" transactions can enter IHB + * configuration space. + * However, blocking AXI transactions is performed by IOB. + * Performing it also at IHB/HB complicates programming model. + * + * Enable non-secure access in SOC configuration register + */ + data = mmio_read_32(base + MVEBU_SOC_CFG_REG(MVEBU_SOC_CFG_REG_NUM)); + data &= ~MVEBU_SOC_CFG_GLOG_SECURE_EN_MASK; + mmio_write_32(base + MVEBU_SOC_CFG_REG(MVEBU_SOC_CFG_REG_NUM), data); +} + +static void cp110_pcie_clk_cfg(uintptr_t base) +{ + uint32_t pcie0_clk, pcie1_clk, reg; + + /* + * Determine the pcie0/1 clock direction (input/output) from the + * sample at reset. + */ + reg = mmio_read_32(base + MVEBU_SAMPLE_AT_RESET_REG); + pcie0_clk = (reg & SAR_PCIE0_CLK_CFG_MASK) >> SAR_PCIE0_CLK_CFG_OFFSET; + pcie1_clk = (reg & SAR_PCIE1_CLK_CFG_MASK) >> SAR_PCIE1_CLK_CFG_OFFSET; + + /* CP110 revision A2 or CN913x */ + if (cp110_rev_id_get(base) == MVEBU_CP110_REF_ID_A2 || + cp110_device_id_get(base) == MVEBU_CN9130_DEV_ID) { + /* + * PCIe Reference Clock Buffer Control register must be + * set according to the clock direction (input/output) + */ + reg = mmio_read_32(base + MVEBU_PCIE_REF_CLK_BUF_CTRL); + reg &= ~(PCIE0_REFCLK_BUFF_SOURCE | PCIE1_REFCLK_BUFF_SOURCE); + if (!pcie0_clk) + reg |= PCIE0_REFCLK_BUFF_SOURCE; + if (!pcie1_clk) + reg |= PCIE1_REFCLK_BUFF_SOURCE; + + mmio_write_32(base + MVEBU_PCIE_REF_CLK_BUF_CTRL, reg); + } + + /* CP110 revision A1 */ + if (cp110_rev_id_get(base) == MVEBU_CP110_REF_ID_A1) { + if (!pcie0_clk || !pcie1_clk) { + /* + * if one of the pcie clocks is set to input, + * we need to set mss_push[131] field, otherwise, + * the pcie clock might not work. + */ + reg = mmio_read_32(base + MVEBU_CP_MSS_DPSHSR_REG); + reg |= MSS_DPSHSR_REG_PCIE_CLK_SEL; + mmio_write_32(base + MVEBU_CP_MSS_DPSHSR_REG, reg); + } + } +} + +/* Set a unique stream id for all DMA capable devices */ +static void cp110_stream_id_init(uintptr_t base, uint32_t stream_id) +{ + int i = 0; + + while (stream_id_reg[i]) { + if (i > MAX_STREAM_ID_PER_CP) { + NOTICE("Only first %d (maximum) Stream IDs allocated\n", + MAX_STREAM_ID_PER_CP); + return; + } + + if ((stream_id_reg[i] == CP_DMA_0_STREAM_ID_REG) || + (stream_id_reg[i] == CP_DMA_1_STREAM_ID_REG)) + mmio_write_32(base + stream_id_reg[i], + stream_id << 16 | stream_id); + else + mmio_write_32(base + stream_id_reg[i], stream_id); + + /* SATA port 0/1 are in the same SATA unit, and they should use + * the same STREAM ID number + */ + if (stream_id_reg[i] != SATA_0_STREAM_ID_REG) + stream_id++; + + i++; + } +} + +static void cp110_axi_attr_init(uintptr_t base) +{ + uint32_t index, data; + + /* Initialize AXI attributes for Armada-7K/8K SoC */ + + /* Go over the AXI attributes and set Ax-Cache and Ax-Domain */ + for (index = 0; index < AXI_MAX_ATTR; index++) { + switch (index) { + /* DFX and MSS unit works with no coherent only - + * there's no option to configure the Ax-Cache and Ax-Domain + */ + case AXI_DFX_ATTR: + case AXI_MSS_ATTR: + continue; + default: + /* Set Ax-Cache as cacheable, no allocate, modifiable, + * bufferable + * The values are different because Read & Write + * definition is different in Ax-Cache + */ + data = mmio_read_32(base + MVEBU_AXI_ATTR_REG(index)); + data &= ~MVEBU_AXI_ATTR_ARCACHE_MASK; + data |= (CACHE_ATTR_WRITE_ALLOC | + CACHE_ATTR_CACHEABLE | + CACHE_ATTR_BUFFERABLE) << + MVEBU_AXI_ATTR_ARCACHE_OFFSET; + data &= ~MVEBU_AXI_ATTR_AWCACHE_MASK; + data |= (CACHE_ATTR_READ_ALLOC | + CACHE_ATTR_CACHEABLE | + CACHE_ATTR_BUFFERABLE) << + MVEBU_AXI_ATTR_AWCACHE_OFFSET; + /* Set Ax-Domain as Outer domain */ + data &= ~MVEBU_AXI_ATTR_ARDOMAIN_MASK; + data |= DOMAIN_OUTER_SHAREABLE << + MVEBU_AXI_ATTR_ARDOMAIN_OFFSET; + data &= ~MVEBU_AXI_ATTR_AWDOMAIN_MASK; + data |= DOMAIN_OUTER_SHAREABLE << + MVEBU_AXI_ATTR_AWDOMAIN_OFFSET; + mmio_write_32(base + MVEBU_AXI_ATTR_REG(index), data); + } + } + + /* SATA IOCC supported, cache attributes + * for SATA MBUS to AXI configuration. + */ + data = mmio_read_32(base + MVEBU_SATA_M2A_AXI_PORT_CTRL_REG); + data &= ~MVEBU_SATA_M2A_AXI_AWCACHE_MASK; + data |= (CACHE_ATTR_WRITE_ALLOC | + CACHE_ATTR_CACHEABLE | + CACHE_ATTR_BUFFERABLE) << + MVEBU_SATA_M2A_AXI_AWCACHE_OFFSET; + data &= ~MVEBU_SATA_M2A_AXI_ARCACHE_MASK; + data |= (CACHE_ATTR_READ_ALLOC | + CACHE_ATTR_CACHEABLE | + CACHE_ATTR_BUFFERABLE) << + MVEBU_SATA_M2A_AXI_ARCACHE_OFFSET; + mmio_write_32(base + MVEBU_SATA_M2A_AXI_PORT_CTRL_REG, data); + + /* Set all IO's AXI attribute to non-secure access. */ + for (index = 0; index < MVEBU_AXI_PROT_REGS_NUM; index++) + mmio_write_32(base + MVEBU_AXI_PROT_REG(index), + DOMAIN_SYSTEM_SHAREABLE); +} + +void cp110_amb_init(uintptr_t base) +{ + uint32_t reg; + + /* Open AMB bridge Window to Access COMPHY/MDIO registers */ + reg = mmio_read_32(base + MVEBU_AMB_IP_BRIDGE_WIN_REG(0)); + reg &= ~(MVEBU_AMB_IP_BRIDGE_WIN_SIZE_MASK | + MVEBU_AMB_IP_BRIDGE_WIN_EN_MASK); + reg |= (0x7ff << MVEBU_AMB_IP_BRIDGE_WIN_SIZE_OFFSET) | + (0x1 << MVEBU_AMB_IP_BRIDGE_WIN_EN_OFFSET); + mmio_write_32(base + MVEBU_AMB_IP_BRIDGE_WIN_REG(0), reg); +} + +static void cp110_rtc_init(uintptr_t base) +{ + /* Update MBus timing parameters before accessing RTC registers */ + mmio_clrsetbits_32(base + MVEBU_RTC_BRIDGE_TIMING_CTRL0_REG, + MVEBU_RTC_WRCLK_PERIOD_MASK, + MVEBU_RTC_WRCLK_PERIOD_DEFAULT); + + mmio_clrsetbits_32(base + MVEBU_RTC_BRIDGE_TIMING_CTRL0_REG, + MVEBU_RTC_WRCLK_SETUP_MASK, + MVEBU_RTC_WRCLK_SETUP_DEFAULT << + MVEBU_RTC_WRCLK_SETUP_OFFS); + + mmio_clrsetbits_32(base + MVEBU_RTC_BRIDGE_TIMING_CTRL1_REG, + MVEBU_RTC_READ_OUTPUT_DELAY_MASK, + MVEBU_RTC_READ_OUTPUT_DELAY_DEFAULT); + + /* + * Issue reset to the RTC if Clock Correction register + * contents did not sustain the reboot/power-on. + */ + if ((mmio_read_32(base + MVEBU_RTC_CCR_REG) & + MVEBU_RTC_NOMINAL_TIMING_MASK) != MVEBU_RTC_NOMINAL_TIMING) { + /* Reset Test register */ + mmio_write_32(base + MVEBU_RTC_TEST_CONFIG_REG, 0); + mdelay(500); + + /* Reset Status register */ + mmio_write_32(base + MVEBU_RTC_STATUS_REG, + (MVEBU_RTC_STATUS_ALARM1_MASK | + MVEBU_RTC_STATUS_ALARM2_MASK)); + udelay(62); + + /* Turn off Int1 and Int2 sources & clear the Alarm count */ + mmio_write_32(base + MVEBU_RTC_IRQ_1_CONFIG_REG, 0); + mmio_write_32(base + MVEBU_RTC_IRQ_2_CONFIG_REG, 0); + mmio_write_32(base + MVEBU_RTC_ALARM_1_REG, 0); + mmio_write_32(base + MVEBU_RTC_ALARM_2_REG, 0); + + /* Setup nominal register access timing */ + mmio_write_32(base + MVEBU_RTC_CCR_REG, + MVEBU_RTC_NOMINAL_TIMING); + + /* Reset Status register */ + mmio_write_32(base + MVEBU_RTC_STATUS_REG, + (MVEBU_RTC_STATUS_ALARM1_MASK | + MVEBU_RTC_STATUS_ALARM2_MASK)); + udelay(50); + } +} + +static void cp110_amb_adec_init(uintptr_t base) +{ + /* enable AXI-MBUS by clearing "Bridge Windows Disable" */ + mmio_clrbits_32(base + MVEBU_BRIDGE_WIN_DIS_REG, + (1 << MVEBU_BRIDGE_WIN_DIS_OFF)); + + /* configure AXI-MBUS windows for CP */ + init_amb_adec(base); +} + +static void cp110_trng_init(uintptr_t base) +{ + static bool done; + int ret; + uint32_t reg_val, efuse; + + /* Set access to LD0 */ + reg_val = mmio_read_32(MVEBU_AP_EFUSE_SRV_CTRL_REG); + reg_val &= ~EFUSE_SRV_CTRL_LD_SELECT_MASK; + mmio_write_32(MVEBU_AP_EFUSE_SRV_CTRL_REG, reg_val); + + /* Obtain the AP LD0 bit defining TRNG presence */ + efuse = mmio_read_32(MVEBU_EFUSE_TRNG_ENABLE_EFUSE_WORD); + efuse >>= MVEBU_EFUSE_TRNG_ENABLE_BIT_OFFSET; + efuse &= 1; + + if (efuse == 0) { + VERBOSE("TRNG is not present, skipping"); + return; + } + + if (!done) { + ret = eip76_rng_probe(base + MVEBU_TRNG_BASE); + if (ret != 0) { + ERROR("Failed to init TRNG @ 0x%lx\n", base); + return; + } + done = true; + } +} +void cp110_init(uintptr_t cp110_base, uint32_t stream_id) +{ + INFO("%s: Initialize CPx - base = %lx\n", __func__, cp110_base); + + /* configure IOB windows for CP0*/ + init_iob(cp110_base); + + /* configure AXI-MBUS windows for CP0*/ + cp110_amb_adec_init(cp110_base); + + /* configure axi for CP0*/ + cp110_axi_attr_init(cp110_base); + + /* Execute SW WA for erratas */ + cp110_errata_wa_init(cp110_base); + + /* Confiure pcie clock according to clock direction */ + cp110_pcie_clk_cfg(cp110_base); + + /* configure stream id for CP0 */ + cp110_stream_id_init(cp110_base, stream_id); + + /* Open AMB bridge for comphy for CP0 & CP1*/ + cp110_amb_init(cp110_base); + + /* Reset RTC if needed */ + cp110_rtc_init(cp110_base); + + /* TRNG init - for CP0 only */ + cp110_trng_init(cp110_base); +} + +/* Do the minimal setup required to configure the CP in BLE */ +void cp110_ble_init(uintptr_t cp110_base) +{ +#if PCI_EP_SUPPORT + INFO("%s: Initialize CPx - base = %lx\n", __func__, cp110_base); + + cp110_amb_init(cp110_base); + + /* Configure PCIe clock */ + cp110_pcie_clk_cfg(cp110_base); + + /* Configure PCIe endpoint */ + ble_plat_pcie_ep_setup(); +#endif +} diff --git a/drivers/marvell/secure_dfx_access/armada_thermal.c b/drivers/marvell/secure_dfx_access/armada_thermal.c new file mode 100644 index 0000000..4f7191b --- /dev/null +++ b/drivers/marvell/secure_dfx_access/armada_thermal.c @@ -0,0 +1,253 @@ +/* + * Copyright (C) 2019 Marvell International Ltd. + * + * SPDX-License-Identifier: BSD-3-Clause + * https://spdx.org/licenses + */ +#include <common/debug.h> +#include <drivers/delay_timer.h> +#include <errno.h> +#include <lib/mmio.h> +#include <mvebu.h> +#include <stdbool.h> +#include "dfx.h" + +/* #define DEBUG_DFX */ +#ifdef DEBUG_DFX +#define debug(format...) NOTICE(format) +#else +#define debug(format, arg...) +#endif + +#define TSEN_CTRL0 0xf06f8084 + #define TSEN_CTRL0_START BIT(0) + #define TSEN_CTRL0_RESET BIT(1) + #define TSEN_CTRL0_ENABLE BIT(2) + #define TSEN_CTRL0_AVG_BYPASS BIT(6) + #define TSEN_CTRL0_CHAN_SHIFT 13 + #define TSEN_CTRL0_CHAN_MASK 0xF + #define TSEN_CTRL0_OSR_SHIFT 24 + #define TSEN_CTRL0_OSR_MAX 0x3 + #define TSEN_CTRL0_MODE_SHIFT 30 + #define TSEN_CTRL0_MODE_EXTERNAL 0x2U + #define TSEN_CTRL0_MODE_MASK 0x3U + +#define TSEN_CTRL1 0xf06f8088 + #define TSEN_CTRL1_INT_EN BIT(25) + #define TSEN_CTRL1_HYST_SHIFT 19 + #define TSEN_CTRL1_HYST_MASK (0x3 << TSEN_CTRL1_HYST_SHIFT) + #define TSEN_CTRL1_THRESH_SHIFT 3 + #define TSEN_CTRL1_THRESH_MASK (0x3ff << TSEN_CTRL1_THRESH_SHIFT) + +#define TSEN_STATUS 0xf06f808c + #define TSEN_STATUS_VALID_OFFSET 16 + #define TSEN_STATUS_VALID_MASK (0x1 << TSEN_STATUS_VALID_OFFSET) + #define TSEN_STATUS_TEMP_OUT_OFFSET 0 + #define TSEN_STATUS_TEMP_OUT_MASK (0x3FF << TSEN_STATUS_TEMP_OUT_OFFSET) + +#define DFX_SERVER_IRQ_SUM_MASK_REG 0xf06f8104 + #define DFX_SERVER_IRQ_EN BIT(1) + +#define DFX_IRQ_CAUSE_REG 0xf06f8108 + +#define DFX_IRQ_MASK_REG 0xf06f810c + #define DFX_IRQ_TSEN_OVERHEAT_OFFSET BIT(22) + +#define THERMAL_SEN_OUTPUT_MSB 512 +#define THERMAL_SEN_OUTPUT_COMP 1024 + +#define COEF_M 423 +#define COEF_B -150000LL + +static void armada_ap806_thermal_read(u_register_t *temp) +{ + uint32_t reg; + + reg = mmio_read_32(TSEN_STATUS); + + reg = ((reg & TSEN_STATUS_TEMP_OUT_MASK) >> + TSEN_STATUS_TEMP_OUT_OFFSET); + + /* + * TSEN output format is signed as a 2s complement number + * ranging from-512 to +511. when MSB is set, need to + * calculate the complement number + */ + if (reg >= THERMAL_SEN_OUTPUT_MSB) + reg -= THERMAL_SEN_OUTPUT_COMP; + + *temp = ((COEF_M * ((signed int)reg)) - COEF_B); +} + +static void armada_ap806_thermal_irq(void) +{ + /* Dummy read, register ROC */ + mmio_read_32(DFX_IRQ_CAUSE_REG); +} + +static void armada_ap806_thermal_overheat_irq_init(void) +{ + uint32_t reg; + + /* Clear DFX temperature IRQ cause */ + reg = mmio_read_32(DFX_IRQ_CAUSE_REG); + + /* Enable DFX Temperature IRQ */ + reg = mmio_read_32(DFX_IRQ_MASK_REG); + reg |= DFX_IRQ_TSEN_OVERHEAT_OFFSET; + mmio_write_32(DFX_IRQ_MASK_REG, reg); + + /* Enable DFX server IRQ */ + reg = mmio_read_32(DFX_SERVER_IRQ_SUM_MASK_REG); + reg |= DFX_SERVER_IRQ_EN; + mmio_write_32(DFX_SERVER_IRQ_SUM_MASK_REG, reg); + + /* Enable overheat interrupt */ + reg = mmio_read_32(TSEN_CTRL1); + reg |= TSEN_CTRL1_INT_EN; + mmio_write_32(TSEN_CTRL1, reg); +} + +static unsigned int armada_mc_to_reg_temp(unsigned int temp_mc) +{ + unsigned int sample; + + sample = (temp_mc + COEF_B) / COEF_M; + + return sample & 0x3ff; +} + +/* + * The documentation states: + * high/low watermark = threshold +/- 0.4761 * 2^(hysteresis + 2) + * which is the mathematical derivation for: + * 0x0 <=> 1.9°C, 0x1 <=> 3.8°C, 0x2 <=> 7.6°C, 0x3 <=> 15.2°C + */ +static unsigned int hyst_levels_mc[] = {1900, 3800, 7600, 15200}; + +static unsigned int armada_mc_to_reg_hyst(int hyst_mc) +{ + int i; + + /* + * We will always take the smallest possible hysteresis to avoid risking + * the hardware integrity by enlarging the threshold by +8°C in the + * worst case. + */ + for (i = ARRAY_SIZE(hyst_levels_mc) - 1; i > 0; i--) + if (hyst_mc >= hyst_levels_mc[i]) + break; + + return i; +} + +static void armada_ap806_thermal_threshold(int thresh_mc, int hyst_mc) +{ + uint32_t ctrl1; + unsigned int threshold = armada_mc_to_reg_temp(thresh_mc); + unsigned int hysteresis = armada_mc_to_reg_hyst(hyst_mc); + + ctrl1 = mmio_read_32(TSEN_CTRL1); + /* Set Threshold */ + if (thresh_mc >= 0) { + ctrl1 &= ~(TSEN_CTRL1_THRESH_MASK); + ctrl1 |= threshold << TSEN_CTRL1_THRESH_SHIFT; + } + + /* Set Hysteresis */ + if (hyst_mc >= 0) { + ctrl1 &= ~(TSEN_CTRL1_HYST_MASK); + ctrl1 |= hysteresis << TSEN_CTRL1_HYST_SHIFT; + } + + mmio_write_32(TSEN_CTRL1, ctrl1); +} + +static void armada_select_channel(int channel) +{ + uint32_t ctrl0; + + /* Stop the measurements */ + ctrl0 = mmio_read_32(TSEN_CTRL0); + ctrl0 &= ~TSEN_CTRL0_START; + mmio_write_32(TSEN_CTRL0, ctrl0); + + /* Reset the mode, internal sensor will be automatically selected */ + ctrl0 &= ~(TSEN_CTRL0_MODE_MASK << TSEN_CTRL0_MODE_SHIFT); + + /* Other channels are external and should be selected accordingly */ + if (channel) { + /* Change the mode to external */ + ctrl0 |= TSEN_CTRL0_MODE_EXTERNAL << + TSEN_CTRL0_MODE_SHIFT; + /* Select the sensor */ + ctrl0 &= ~(TSEN_CTRL0_CHAN_MASK << TSEN_CTRL0_CHAN_SHIFT); + ctrl0 |= (channel - 1) << TSEN_CTRL0_CHAN_SHIFT; + } + + /* Actually set the mode/channel */ + mmio_write_32(TSEN_CTRL0, ctrl0); + + /* Re-start the measurements */ + ctrl0 |= TSEN_CTRL0_START; + mmio_write_32(TSEN_CTRL0, ctrl0); +} + +static void armada_ap806_thermal_init(void) +{ + uint32_t reg; + + reg = mmio_read_32(TSEN_CTRL0); + reg &= ~TSEN_CTRL0_RESET; + reg |= TSEN_CTRL0_START | TSEN_CTRL0_ENABLE; + + /* Sample every ~2ms */ + reg |= TSEN_CTRL0_OSR_MAX << TSEN_CTRL0_OSR_SHIFT; + + /* Enable average (2 samples by default) */ + reg &= ~TSEN_CTRL0_AVG_BYPASS; + + mmio_write_32(TSEN_CTRL0, reg); + + debug("thermal: Initialization done\n"); +} + +static void armada_is_valid(u_register_t *read) +{ + *read = (mmio_read_32(TSEN_STATUS) & TSEN_STATUS_VALID_MASK); +} + +int mvebu_dfx_thermal_handle(u_register_t func, u_register_t *read, + u_register_t x2, u_register_t x3) +{ + debug_enter(); + + switch (func) { + case MV_SIP_DFX_THERMAL_INIT: + armada_ap806_thermal_init(); + break; + case MV_SIP_DFX_THERMAL_READ: + armada_ap806_thermal_read(read); + break; + case MV_SIP_DFX_THERMAL_IRQ: + armada_ap806_thermal_irq(); + break; + case MV_SIP_DFX_THERMAL_THRESH: + armada_ap806_thermal_threshold(x2, x3); + armada_ap806_thermal_overheat_irq_init(); + break; + case MV_SIP_DFX_THERMAL_IS_VALID: + armada_is_valid(read); + break; + case MV_SIP_DFX_THERMAL_SEL_CHANNEL: + armada_select_channel(x2); + break; + default: + ERROR("unsupported dfx func\n"); + return -EINVAL; + } + + debug_exit(); + + return 0; +} diff --git a/drivers/marvell/secure_dfx_access/dfx.h b/drivers/marvell/secure_dfx_access/dfx.h new file mode 100644 index 0000000..88c4de8 --- /dev/null +++ b/drivers/marvell/secure_dfx_access/dfx.h @@ -0,0 +1,22 @@ +/* + * Copyright (C) 2019 Marvell International Ltd. + * + * SPDX-License-Identifier: BSD-3-Clause + * https://spdx.org/licenses + */ + +/* DFX sub-FID */ +#define MV_SIP_DFX_THERMAL_INIT 1 +#define MV_SIP_DFX_THERMAL_READ 2 +#define MV_SIP_DFX_THERMAL_IS_VALID 3 +#define MV_SIP_DFX_THERMAL_IRQ 4 +#define MV_SIP_DFX_THERMAL_THRESH 5 +#define MV_SIP_DFX_THERMAL_SEL_CHANNEL 6 + +#define MV_SIP_DFX_SREAD 20 +#define MV_SIP_DFX_SWRITE 21 + +int mvebu_dfx_thermal_handle(u_register_t func, u_register_t *read, + u_register_t x2, u_register_t x3); +int mvebu_dfx_misc_handle(u_register_t func, u_register_t *read, + u_register_t addr, u_register_t val); diff --git a/drivers/marvell/secure_dfx_access/misc_dfx.c b/drivers/marvell/secure_dfx_access/misc_dfx.c new file mode 100644 index 0000000..189105f --- /dev/null +++ b/drivers/marvell/secure_dfx_access/misc_dfx.c @@ -0,0 +1,123 @@ +/* + * Copyright (C) 2021 Marvell International Ltd. + * + * SPDX-License-Identifier: BSD-3-Clause + * https://spdx.org/licenses + */ + +#include <common/debug.h> +#include <lib/mmio.h> +#include "dfx.h" +#include <mvebu_def.h> +#include <mvebu.h> +#include <errno.h> + +/* #define DEBUG_DFX */ +#ifdef DEBUG_DFX +#define debug(format...) NOTICE(format) +#else +#define debug(format, arg...) +#endif + +#define SAR_BASE (MVEBU_REGS_BASE + 0x6F8200) +#define SAR_SIZE 0x4 +#define AP_DEV_ID_STATUS_REG (MVEBU_REGS_BASE + 0x6F8240) +#define JTAG_DEV_ID_STATUS_REG (MVEBU_REGS_BASE + 0x6F8244) +#define EFUSE_CTRL (MVEBU_REGS_BASE + 0x6F8008) +#define EFUSE_LD_BASE (MVEBU_REGS_BASE + 0x6F8F00) +#define EFUSE_LD_SIZE 0x1C +#define EFUSE_HD_BASE (MVEBU_REGS_BASE + 0x6F9000) +#define EFUSE_HD_SIZE 0x3F8 + +/* AP806 CPU DFS register mapping*/ +#define AP806_CA72MP2_0_PLL_CR_0_BASE (MVEBU_REGS_BASE + 0x6F8278) +#define AP806_CA72MP2_0_PLL_CR_1_BASE (MVEBU_REGS_BASE + 0x6F8280) +#define AP806_CA72MP2_0_PLL_CR_2_BASE (MVEBU_REGS_BASE + 0x6F8284) +#define AP806_CA72MP2_0_PLL_SR_BASE (MVEBU_REGS_BASE + 0x6F8C94) + +/* AP807 CPU DFS register mapping */ +#define AP807_DEVICE_GENERAL_CR_10_BASE (MVEBU_REGS_BASE + 0x6F8278) +#define AP807_DEVICE_GENERAL_CR_11_BASE (MVEBU_REGS_BASE + 0x6F827C) +#define AP807_DEVICE_GENERAL_STATUS_6_BASE (MVEBU_REGS_BASE + 0x6F8C98) + +#ifdef MVEBU_SOC_AP807 + #define CLUSTER_OFFSET 0x8 + #define CLK_DIVIDER_REG AP807_DEVICE_GENERAL_CR_10_BASE + #define CLK_FORCE_REG AP807_DEVICE_GENERAL_CR_11_BASE + #define CLK_RATIO_REG AP807_DEVICE_GENERAL_CR_11_BASE + #define CLK_RATIO_STATE_REG AP807_DEVICE_GENERAL_STATUS_6_BASE +#else + #define CLUSTER_OFFSET 0x14 + #define CLK_DIVIDER_REG AP806_CA72MP2_0_PLL_CR_0_BASE + #define CLK_FORCE_REG AP806_CA72MP2_0_PLL_CR_1_BASE + #define CLK_RATIO_REG AP806_CA72MP2_0_PLL_CR_2_BASE + #define CLK_RATIO_STATE_REG AP806_CA72MP2_0_PLL_SR_BASE +#endif /* MVEBU_SOC_AP807 */ + +static _Bool is_valid(u_register_t addr) +{ + switch (addr) { + case AP_DEV_ID_STATUS_REG: + case JTAG_DEV_ID_STATUS_REG: + case SAR_BASE ... (SAR_BASE + SAR_SIZE): + case EFUSE_LD_BASE ... (EFUSE_LD_BASE + EFUSE_LD_SIZE): + case EFUSE_HD_BASE ... (EFUSE_HD_BASE + EFUSE_HD_SIZE): + case EFUSE_CTRL: + /* cpu-clk related registers */ + case CLK_DIVIDER_REG: + case CLK_DIVIDER_REG + CLUSTER_OFFSET: + case CLK_FORCE_REG: + case CLK_FORCE_REG + CLUSTER_OFFSET: +#ifndef MVEBU_SOC_AP807 + case CLK_RATIO_REG: + case CLK_RATIO_REG + CLUSTER_OFFSET: +#endif + case CLK_RATIO_STATE_REG: + case CLK_RATIO_STATE_REG + CLUSTER_OFFSET: + return true; + default: + return false; + } +} + +static int armada_dfx_sread(u_register_t *read, u_register_t addr) +{ + if (!is_valid(addr)) + return -EINVAL; + + *read = mmio_read_32(addr); + + return 0; +} + +static int armada_dfx_swrite(u_register_t addr, u_register_t val) +{ + if (!is_valid(addr)) + return -EINVAL; + + mmio_write_32(addr, val); + + return 0; +} + +int mvebu_dfx_misc_handle(u_register_t func, u_register_t *read, + u_register_t addr, u_register_t val) +{ + debug_enter(); + + debug("func %ld, addr 0x%lx, val 0x%lx\n", func, addr, val); + + switch (func) { + case MV_SIP_DFX_SREAD: + return armada_dfx_sread(read, addr); + case MV_SIP_DFX_SWRITE: + return armada_dfx_swrite(addr, val); + default: + ERROR("unsupported dfx misc sub-func\n"); + return -EINVAL; + } + + debug_exit(); + + return 0; +} diff --git a/drivers/marvell/thermal.c b/drivers/marvell/thermal.c new file mode 100644 index 0000000..a501ab4 --- /dev/null +++ b/drivers/marvell/thermal.c @@ -0,0 +1,54 @@ +/* + * Copyright (C) 2018 Marvell International Ltd. + * + * SPDX-License-Identifier: BSD-3-Clause + * https://spdx.org/licenses + */ + +/* Driver for thermal unit located in Marvell ARMADA 8K and compatible SoCs */ + +#include <common/debug.h> +#include <drivers/marvell/thermal.h> + +int marvell_thermal_init(struct tsen_config *tsen_cfg) +{ + if (tsen_cfg->tsen_ready == 1) { + INFO("thermal sensor is already initialized\n"); + return 0; + } + + if (tsen_cfg->ptr_tsen_probe == NULL) { + ERROR("initial thermal sensor configuration is missing\n"); + return -1; + } + + if (tsen_cfg->ptr_tsen_probe(tsen_cfg)) { + ERROR("thermal sensor initialization failed\n"); + return -1; + } + + VERBOSE("thermal sensor was initialized\n"); + + return 0; +} + +int marvell_thermal_read(struct tsen_config *tsen_cfg, int *temp) +{ + if (temp == NULL) { + ERROR("NULL pointer for temperature read\n"); + return -1; + } + + if (tsen_cfg->ptr_tsen_read == NULL || + tsen_cfg->tsen_ready == 0) { + ERROR("thermal sensor was not initialized\n"); + return -1; + } + + if (tsen_cfg->ptr_tsen_read(tsen_cfg, temp)) { + ERROR("temperature read failed\n"); + return -1; + } + + return 0; +} diff --git a/drivers/marvell/uart/a3700_console.S b/drivers/marvell/uart/a3700_console.S new file mode 100644 index 0000000..a1eacbc --- /dev/null +++ b/drivers/marvell/uart/a3700_console.S @@ -0,0 +1,271 @@ +/* + * Copyright (C) 2016 Marvell International Ltd. + * + * SPDX-License-Identifier: BSD-3-Clause + * https://spdx.org/licenses + */ + +#include <arch.h> +#include <asm_macros.S> +#include <console_macros.S> +#include <drivers/marvell/uart/a3700_console.h> + + /* + * "core" functions are low-level implementations that don't require + * writable memory and are thus safe to call in BL1 crash context. + */ + .globl console_a3700_core_putc + .globl console_a3700_core_init + .globl console_a3700_core_getc + .globl console_a3700_core_flush + + .globl console_a3700_putc + .globl console_a3700_getc + .globl console_a3700_flush + + /* ----------------------------------------------- + * int console_a3700_core_init(unsigned long base_addr, + * unsigned int uart_clk, unsigned int baud_rate) + * Function to initialize the console without a + * C Runtime to print debug information. This + * function will be accessed by console_init and + * crash reporting. + * In: x0 - console base address + * w1 - Uart clock in Hz + * w2 - Baud rate + * Out: return 1 on success + * Clobber list : x1, x2, x3, x4 + * ----------------------------------------------- + */ +func console_a3700_core_init + /* Check the input base address */ + cbz x0, init_fail + /* Check baud rate and uart clock for sanity */ + cbz w1, init_fail + cbz w2, init_fail + + /* + * Wait for the TX (THR and TSR) to be empty. If wait for 3ms, the TX FIFO is + * still not empty, TX FIFO will reset by all means. + */ + mov w4, #30 /* max time out 30 * 100 us */ +2: + /* Check whether TX (THR and TSR) is empty */ + ldr w3, [x0, #UART_STATUS_REG] + and w3, w3, #UARTLSR_TXEMPTY + cmp w3, #0 + b.ne 4f + + /* Delay */ + mov w3, #60000 /* 60000 cycles of below 3 instructions on 1200 MHz CPU ~~ 100 us */ +3: + sub w3, w3, #1 + cmp w3, #0 + b.ne 3b + + /* Check whether wait timeout expired */ + sub w4, w4, #1 + cmp w4, #0 + b.ne 2b + +4: + /* Reset UART via North Bridge Peripheral */ + mov_imm x4, MVEBU_NB_RESET_REG + ldr w3, [x4] + bic w3, w3, #MVEBU_NB_RESET_UART_N + str w3, [x4] + orr w3, w3, #MVEBU_NB_RESET_UART_N + str w3, [x4] + + /* Reset FIFO */ + mov w3, #UART_CTRL_RXFIFO_RESET + orr w3, w3, #UART_CTRL_TXFIFO_RESET + str w3, [x0, #UART_CTRL_REG] + + /* Delay */ + mov w3, #2000 +1: + sub w3, w3, #1 + cmp w3, #0 + b.ne 1b + + /* Program the baudrate */ + /* Divisor = Round(Uartclock / (16 * baudrate)) */ + lsl w2, w2, #4 + add w1, w1, w2, lsr #1 + udiv w2, w1, w2 + and w2, w2, #0x3ff /* clear all other bits to use default clock */ + + str w2, [x0, #UART_BAUD_REG]/* set baud rate divisor */ + + /* Set UART to default 16X scheme */ + mov w3, #0 + str w3, [x0, #UART_POSSR_REG] + + /* No Parity, 1 Stop */ + mov w3, #0 + str w3, [x0, #UART_CTRL_REG] + + mov w0, #1 + ret +init_fail: + mov w0, #0 + ret +endfunc console_a3700_core_init + + .globl console_a3700_register + + /* ----------------------------------------------- + * int console_a3700_register(console_t *console, + uintptr_t base, uint32_t clk, uint32_t baud) + * Function to initialize and register a new a3700 + * console. Storage passed in for the console struct + * *must* be persistent (i.e. not from the stack). + * In: x0 - UART register base address + * w1 - UART clock in Hz + * w2 - Baud rate + * x3 - pointer to empty console_t struct + * Out: return 1 on success, 0 on error + * Clobber list : x0, x1, x2, x3, x4, x6, x7, x14 + * ----------------------------------------------- + */ +func console_a3700_register + mov x7, x30 + mov x6, x3 + cbz x6, register_fail + str x0, [x6, #CONSOLE_T_BASE] + + bl console_a3700_core_init + cbz x0, register_fail + + mov x0, x6 + mov x30, x7 + finish_console_register a3700, putc=1, getc=ENABLE_CONSOLE_GETC, flush=1 + +register_fail: + ret x7 +endfunc console_a3700_register + + /* -------------------------------------------------------- + * int console_a3700_core_putc(int c, unsigned int base_addr) + * Function to output a character over the console. It + * returns the character printed on success or -1 on error. + * In : w0 - character to be printed + * x1 - console base address + * Out : return -1 on error else return character. + * Clobber list : x2 + * -------------------------------------------------------- + */ +func console_a3700_core_putc + /* Check the input parameter */ + cbz x1, putc_error + + /* Prepend '\r' to '\n' */ + cmp w0, #0xA + b.ne 2f + /* Check if the transmit FIFO is full */ +1: ldr w2, [x1, #UART_STATUS_REG] + and w2, w2, #UARTLSR_TXFIFOFULL + cmp w2, #UARTLSR_TXFIFOFULL + b.eq 1b + mov w2, #0xD /* '\r' */ + str w2, [x1, #UART_TX_REG] + + /* Check if the transmit FIFO is full */ +2: ldr w2, [x1, #UART_STATUS_REG] + and w2, w2, #UARTLSR_TXFIFOFULL + cmp w2, #UARTLSR_TXFIFOFULL + b.eq 2b + str w0, [x1, #UART_TX_REG] + ret +putc_error: + mov w0, #-1 + ret +endfunc console_a3700_core_putc + + /* -------------------------------------------------------- + * int console_a3700_putc(int c, console_t *console) + * Function to output a character over the console. It + * returns the character printed on success or -1 on error. + * In : w0 - character to be printed + * x1 - pointer to console_t structure + * Out : return -1 on error else return character. + * Clobber list : x2 + * -------------------------------------------------------- + */ +func console_a3700_putc + ldr x1, [x1, #CONSOLE_T_BASE] + b console_a3700_core_putc +endfunc console_a3700_putc + + /* --------------------------------------------- + * int console_a3700_core_getc(void) + * Function to get a character from the console. + * It returns the character grabbed on success + * or -1 if no character is available. + * In : w0 - console base address + * Out : w0 - character if available, else -1 + * Clobber list : x0, x1 + * --------------------------------------------- + */ +func console_a3700_core_getc + /* Check if there is a pending character */ + ldr w1, [x0, #UART_STATUS_REG] + and w1, w1, #UARTLSR_RXRDY + cmp w1, #UARTLSR_RXRDY + b.ne getc_no_char + ldr w0, [x0, #UART_RX_REG] + and w0, w0, #0xff + ret +getc_no_char: + mov w0, #ERROR_NO_PENDING_CHAR + ret +endfunc console_a3700_core_getc + + /* --------------------------------------------- + * int console_a3700_getc(console_t *console) + * Function to get a character from the console. + * It returns the character grabbed on success + * or -1 on if no character is available. + * In : x0 - pointer to console_t structure + * Out : w0 - character if available, else -1 + * Clobber list : x0, x1 + * --------------------------------------------- + */ +func console_a3700_getc + ldr x0, [x0, #CONSOLE_T_BASE] + b console_a3700_core_getc +endfunc console_a3700_getc + + /* --------------------------------------------- + * void console_a3700_core_flush(uintptr_t base_addr) + * Function to force a write of all buffered + * data that hasn't been output. + * In : x0 - console base address + * Out : void. + * Clobber list : x0, x1 + * --------------------------------------------- + */ +func console_a3700_core_flush + /* Wait for the TX (THR and TSR) to be empty */ +1: ldr w1, [x0, #UART_STATUS_REG] + and w1, w1, #UARTLSR_TXEMPTY + cmp w1, #UARTLSR_TXEMPTY + b.ne 1b + ret +endfunc console_a3700_core_flush + + /* --------------------------------------------- + * void console_a3700_flush(console_t *console) + * Function to force a write of all buffered + * data that hasn't been output. + * In : x0 - pointer to console_t structure + * Out : void. + * Clobber list : x0, x1 + * --------------------------------------------- + */ +func console_a3700_flush + ldr x0, [x0, #CONSOLE_T_BASE] + b console_a3700_core_flush +endfunc console_a3700_flush + |