summaryrefslogtreecommitdiffstats
path: root/plat/brcm/board/stingray/src
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 17:43:51 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 17:43:51 +0000
commitbe58c81aff4cd4c0ccf43dbd7998da4a6a08c03b (patch)
tree779c248fb61c83f65d1f0dc867f2053d76b4e03a /plat/brcm/board/stingray/src
parentInitial commit. (diff)
downloadarm-trusted-firmware-be58c81aff4cd4c0ccf43dbd7998da4a6a08c03b.tar.xz
arm-trusted-firmware-be58c81aff4cd4c0ccf43dbd7998da4a6a08c03b.zip
Adding upstream version 2.10.0+dfsg.upstream/2.10.0+dfsgupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'plat/brcm/board/stingray/src')
-rw-r--r--plat/brcm/board/stingray/src/bl2_setup.c743
-rw-r--r--plat/brcm/board/stingray/src/bl31_setup.c1071
-rw-r--r--plat/brcm/board/stingray/src/brcm_pm_ops.c408
-rw-r--r--plat/brcm/board/stingray/src/fsx.c477
-rw-r--r--plat/brcm/board/stingray/src/ihost_pm.c355
-rw-r--r--plat/brcm/board/stingray/src/iommu.c536
-rw-r--r--plat/brcm/board/stingray/src/ncsi.c54
-rw-r--r--plat/brcm/board/stingray/src/paxb.c911
-rw-r--r--plat/brcm/board/stingray/src/paxc.c267
-rw-r--r--plat/brcm/board/stingray/src/pm.c131
-rw-r--r--plat/brcm/board/stingray/src/scp_cmd.c60
-rw-r--r--plat/brcm/board/stingray/src/scp_utils.c227
-rw-r--r--plat/brcm/board/stingray/src/sdio.c144
-rw-r--r--plat/brcm/board/stingray/src/sr_paxb_phy.c806
-rw-r--r--plat/brcm/board/stingray/src/topology.c52
-rw-r--r--plat/brcm/board/stingray/src/tz_sec.c153
16 files changed, 6395 insertions, 0 deletions
diff --git a/plat/brcm/board/stingray/src/bl2_setup.c b/plat/brcm/board/stingray/src/bl2_setup.c
new file mode 100644
index 0000000..b2c8aec
--- /dev/null
+++ b/plat/brcm/board/stingray/src/bl2_setup.c
@@ -0,0 +1,743 @@
+/*
+ * Copyright (c) 2016-2020, Broadcom
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <drivers/arm/sp805.h>
+#include <drivers/delay_timer.h>
+#include <lib/mmio.h>
+
+#include <chimp.h>
+#include <chip_id.h>
+#include <cmn_plat_util.h>
+#include <dmu.h>
+#include <emmc_api.h>
+#include <fru.h>
+#ifdef USE_GPIO
+#include <drivers/gpio.h>
+#include <iproc_gpio.h>
+#endif
+#include <platform_def.h>
+#include <sotp.h>
+#include <swreg.h>
+#include <sr_utils.h>
+#ifdef USE_DDR
+#include <ddr_init.h>
+#else
+#include <ext_sram_init.h>
+#endif
+#if DRIVER_OCOTP_ENABLE
+#include <ocotp.h>
+#endif
+#include "board_info.h"
+
+#define WORD_SIZE 8
+#define SWREG_AVS_OTP_OFFSET (13 * WORD_SIZE) /* 13th row byte offset */
+#define AON_GPIO_OTP_OFFSET (28 * WORD_SIZE) /* 28th row byte offset */
+#define BYTES_TO_READ 8
+
+/* OTP voltage step definitions */
+#define MVOLT_STEP_MAX 0x18 /* 1v */
+#define MVOLT_PER_STEP 10 /* 0.01mv per step */
+#define MVOLT_BASE 760 /* 0.76v */
+
+#define STEP_TO_UVOLTS(step) \
+ ((MVOLT_BASE + (MVOLT_PER_STEP * (step))) * 1000)
+
+#define GET_BITS(first, last, data) \
+ ((data >> first) & ((1 << (last - first + 1)) - 1))
+
+/*
+ * SW-REG OTP encoding:
+ *
+ * SWREG_bits[11:0] = OTP 13th row 12 bits[55:44]
+ * SWREG_bits[11:10] - Valid Bits (0x2 - valid, if not 0x2 - Invalid)
+ * SWREG_bits[9:5] - iHost03, iHost12
+ * SWREG_bits[4:0] - Core VDDC
+ */
+#define SWREG_OTP_BITS_START 12 /* 44th bit in MSB 32-bits */
+#define SWREG_OTP_BITS_END 23 /* 55th bit in MSB 32-bits */
+#define SWREG_VDDC_FIELD_START 0
+#define SWREG_VDDC_FIELD_END 4
+#define SWREG_IHOST_FIELD_START 5
+#define SWREG_IHOST_FIELD_END 9
+#define SWREG_VALID_BIT_START 10
+#define SWREG_VALID_BIT_END 11
+#define SWREG_VALID_BITS 0x2
+
+/*
+ * Row 13 bit 56 is programmed as '1' today. It is not being used, so plan
+ * is to flip this bit to '0' for B1 rev. Hence SW can leverage this bit
+ * to identify Bx chip to program different sw-regulators.
+ */
+#define SPARE_BIT 24
+
+#define IS_SR_B0(data) (((data) >> SPARE_BIT) & 0x1)
+
+#if DRIVER_OCOTP_ENABLE
+static struct otpc_map otp_stingray_map = {
+ .otpc_row_size = 2,
+ .data_r_offset = {0x10, 0x5c},
+ .data_w_offset = {0x2c, 0x64},
+ .word_size = 8,
+ .stride = 8,
+};
+#endif
+
+void plat_bcm_bl2_early_platform_setup(void)
+{
+ /* Select UART0 for AP via mux setting*/
+ if (PLAT_BRCM_BOOT_UART_BASE == UART0_BASE_ADDR) {
+ mmio_write_32(UART0_SIN_MODE_SEL_CONTROL, 1);
+ mmio_write_32(UART0_SOUT_MODE_SEL_CONTROL, 1);
+ }
+}
+
+#ifdef USE_NAND
+static void brcm_stingray_nand_init(void)
+{
+ unsigned int val;
+ unsigned int nand_idm_reset_control = 0x68e0a800;
+
+ VERBOSE(" stingray nand init start.\n");
+
+ /* Reset NAND */
+ VERBOSE(" - reset nand\n");
+ val = mmio_read_32((uintptr_t)(nand_idm_reset_control + 0x0));
+ mmio_write_32((uintptr_t)(nand_idm_reset_control + 0x0), val | 0x1);
+ udelay(500);
+ val = mmio_read_32((uintptr_t)(nand_idm_reset_control + 0x0));
+ mmio_write_32((uintptr_t)(nand_idm_reset_control + 0x0), val & ~0x1);
+ udelay(500);
+
+ VERBOSE(" stingray nand init done.\n");
+}
+#endif
+
+#if defined(USE_PAXB) || defined(USE_PAXC) || defined(USE_SATA)
+#define PCIE_RESCAL_CFG_0 0x40000130
+#define PCIE_CFG_RESCAL_RSTB_R (1 << 16)
+#define PCIE_CFG_RESCAL_PWRDNB_R (1 << 8)
+#define PCIE_RESCAL_STATUS_0 0x4000014c
+#define PCIE_STAT_PON_VALID_R (1 << 0)
+#define PCIE_RESCAL_OUTPUT_STATUS 0x40000154
+#define CDRU_PCIE_RESET_N_R (1 << CDRU_MISC_RESET_CONTROL__CDRU_PCIE_RESET_N_R)
+
+#ifdef EMULATION_SETUP
+static void brcm_stingray_pcie_reset(void)
+{
+}
+#else
+static void brcm_stingray_pcie_reset(void)
+{
+ unsigned int data;
+ int try;
+
+ if (bcm_chimp_is_nic_mode()) {
+ INFO("NIC mode detected; PCIe reset/rescal not executed\n");
+ return;
+ }
+
+ mmio_clrbits_32(CDRU_MISC_RESET_CONTROL, CDRU_PCIE_RESET_N_R);
+ mmio_setbits_32(CDRU_MISC_RESET_CONTROL, CDRU_PCIE_RESET_N_R);
+ /* Release reset */
+ mmio_setbits_32(PCIE_RESCAL_CFG_0, PCIE_CFG_RESCAL_RSTB_R);
+ mdelay(1);
+ /* Power UP */
+ mmio_setbits_32(PCIE_RESCAL_CFG_0,
+ (PCIE_CFG_RESCAL_RSTB_R | PCIE_CFG_RESCAL_PWRDNB_R));
+
+ try = 1000;
+ do {
+ udelay(1);
+ data = mmio_read_32(PCIE_RESCAL_STATUS_0);
+ try--;
+ } while ((data & PCIE_STAT_PON_VALID_R) == 0x0 && (try > 0));
+
+ if (try <= 0)
+ ERROR("PCIE_RESCAL_STATUS_0: 0x%x\n", data);
+
+ VERBOSE("PCIE_SATA_RESCAL_STATUS_0 0x%x.\n",
+ mmio_read_32(PCIE_RESCAL_STATUS_0));
+ VERBOSE("PCIE_SATA_RESCAL_OUTPUT_STATUS 0x%x.\n",
+ mmio_read_32(PCIE_RESCAL_OUTPUT_STATUS));
+ INFO("PCIE SATA Rescal Init done\n");
+}
+#endif /* EMULATION_SETUP */
+#endif /* USE_PAXB || USE_PAXC || USE_SATA */
+
+#ifdef USE_PAXC
+void brcm_stingray_chimp_check_and_fastboot(void)
+{
+ int fastboot_init_result;
+
+ if (bcm_chimp_is_nic_mode())
+ /* Do not wait here */
+ return;
+
+#if WARMBOOT_DDR_S3_SUPPORT
+ /*
+ * Currently DDR shmoo parameters and QSPI boot source are
+ * tied. DDR shmoo parameters are stored in QSPI, which is
+ * used for warmboot.
+ * Do not reset nitro for warmboot
+ */
+ if (is_warmboot() && (boot_source_get() == BOOT_SOURCE_QSPI))
+ return;
+#endif /* WARMBOOT_DDR_S3_SUPPORT */
+
+ /*
+ * Not in NIC mode,
+ * initiate fastboot (if enabled)
+ */
+ if (FASTBOOT_TYPE == CHIMP_FASTBOOT_NITRO_RESET) {
+
+ VERBOSE("Bring up Nitro/ChiMP\n");
+
+ if (boot_source_get() == BOOT_SOURCE_QSPI)
+ WARN("Nitro boots from QSPI when AP has booted from QSPI.\n");
+ brcm_stingray_set_qspi_mux(0);
+ VERBOSE("Nitro controls the QSPI\n");
+ }
+
+ fastboot_init_result = bcm_chimp_initiate_fastboot(FASTBOOT_TYPE);
+ if (fastboot_init_result && boot_source_get() != BOOT_SOURCE_QSPI)
+ ERROR("Nitro init error %d. Status: 0x%x; bpe_mod reg: 0x%x\n"
+ "fastboot register: 0x%x; handshake register 0x%x\n",
+ fastboot_init_result,
+ bcm_chimp_read_ctrl(CHIMP_REG_CTRL_BPE_STAT_REG),
+ bcm_chimp_read_ctrl(CHIMP_REG_CTRL_BPE_MODE_REG),
+ bcm_chimp_read_ctrl(CHIMP_REG_CTRL_FSTBOOT_PTR_REG),
+ bcm_chimp_read(CHIMP_REG_ECO_RESERVED));
+
+ /*
+ * CRMU watchdog kicks is an example, which is L1 reset,
+ * does not clear Nitro scratch pad ram.
+ * For Nitro resets: Clear the Nitro health status memory.
+ */
+ bcm_chimp_write((CHIMP_REG_CHIMP_SCPAD + CHIMP_HEALTH_STATUS_OFFSET),
+ 0);
+}
+#endif
+
+void set_ihost_vddc_swreg(uint32_t ihost_uvolts, uint32_t vddc_uvolts)
+{
+ NOTICE("ihost_uvolts: %duv, vddc_uvolts: %duv\n",
+ ihost_uvolts, vddc_uvolts);
+
+ set_swreg(VDDC_CORE, vddc_uvolts);
+ set_swreg(IHOST03, ihost_uvolts);
+ set_swreg(IHOST12, ihost_uvolts);
+}
+
+/*
+ * Reads SWREG AVS OTP bits (13th row) with ECC enabled and get voltage
+ * defined in OTP if valid OTP is found
+ */
+void read_avs_otp_bits(uint32_t *ihost_uvolts, uint32_t *vddc_uvolts)
+{
+ uint32_t offset = SWREG_AVS_OTP_OFFSET;
+ uint32_t ihost_step, vddc_step;
+ uint32_t avs_bits;
+ uint32_t buf[2];
+
+ if (bcm_otpc_read(offset, &buf[0], BYTES_TO_READ, 1) == -1)
+ return;
+
+ VERBOSE("AVS OTP %d ROW: 0x%x.0x%x\n",
+ offset/WORD_SIZE, buf[1], buf[0]);
+
+ /* get voltage readings from AVS OTP bits */
+ avs_bits = GET_BITS(SWREG_OTP_BITS_START,
+ SWREG_OTP_BITS_END,
+ buf[1]);
+
+ /* check for valid otp bits */
+ if (GET_BITS(SWREG_VALID_BIT_START, SWREG_VALID_BIT_END, avs_bits) !=
+ SWREG_VALID_BITS) {
+ WARN("Invalid AVS OTP bits at %d row\n", offset/WORD_SIZE);
+ return;
+ }
+
+ /* get ihost and vddc step value */
+ vddc_step = GET_BITS(SWREG_VDDC_FIELD_START,
+ SWREG_VDDC_FIELD_END,
+ avs_bits);
+
+ ihost_step = GET_BITS(SWREG_IHOST_FIELD_START,
+ SWREG_IHOST_FIELD_END,
+ avs_bits);
+
+ if ((ihost_step > MVOLT_STEP_MAX) || (vddc_step > MVOLT_STEP_MAX)) {
+ WARN("OTP entry invalid\n");
+ return;
+ }
+
+ /* get voltage in micro-volts */
+ *ihost_uvolts = STEP_TO_UVOLTS(ihost_step);
+ *vddc_uvolts = STEP_TO_UVOLTS(vddc_step);
+}
+
+/*
+ * This api reads otp bits and program internal swreg's - ihos12, ihost03,
+ * vddc_core and ddr_core based on different chip. External swreg's
+ * programming will be done from crmu.
+ *
+ * For A2 chip:
+ * Read OTP row 20, bit 50. This bit will be set for A2 chip. Once A2 chip is
+ * found, read AVS OTP row 13, 12bits[55:44], if valid otp bits are found
+ * then set ihost and vddc according to avs otp bits else set them to 0.94v
+ * and 0.91v respectively. Also update the firmware after setting voltage.
+ *
+ * For B0 chip:
+ * Read OTP row 13, bit 56. This bit will be set for B0 chip. Once B0 chip is
+ * found then set ihost and vddc to 0.95v and ddr_core to 1v. No AVS OTP bits
+ * are used get ihost/vddc voltages.
+ *
+ * For B1 chip:
+ * Read AVS OTP row 13, 12bits[55:44], if valid otp bits are found then set
+ * ihost and vddc according to avs otp bits else set them to 0.94v and 0.91v
+ * respectively.
+ */
+void set_swreg_based_on_otp(void)
+{
+ /* default voltage if no valid OTP */
+ uint32_t vddc_uvolts = VDDC_CORE_DEF_VOLT;
+ uint32_t ihost_uvolts = IHOST_DEF_VOLT;
+ uint32_t ddrc_uvolts;
+ uint32_t offset;
+ uint32_t buf[2];
+
+ offset = SWREG_AVS_OTP_OFFSET;
+ if (bcm_otpc_read(offset, &buf[0], BYTES_TO_READ, 1) == -1)
+ return;
+
+ VERBOSE("OTP %d ROW: 0x%x.0x%x\n",
+ offset/WORD_SIZE, buf[1], buf[0]);
+
+ if (IS_SR_B0(buf[1])) {
+ /* don't read AVS OTP for B0 */
+ ihost_uvolts = B0_IHOST_DEF_VOLT;
+ vddc_uvolts = B0_VDDC_CORE_DEF_VOLT;
+ ddrc_uvolts = B0_DDR_VDDC_DEF_VOLT;
+ } else {
+ read_avs_otp_bits(&ihost_uvolts, &vddc_uvolts);
+ }
+
+#if (IHOST_REG_TYPE == IHOST_REG_INTEGRATED) && \
+ (VDDC_REG_TYPE == VDDC_REG_INTEGRATED)
+ /* enable IHOST12 cluster before changing voltage */
+ NOTICE("Switching on the Regulator idx: %u\n",
+ SWREG_IHOST1_DIS);
+ mmio_clrsetbits_32(CRMU_SWREG_CTRL_ADDR,
+ BIT(SWREG_IHOST1_DIS),
+ BIT(SWREG_IHOST1_REG_RESETB));
+
+ /* wait for regulator supply gets stable */
+ while (!(mmio_read_32(CRMU_SWREG_STATUS_ADDR) &
+ (1 << SWREG_IHOST1_PMU_STABLE)))
+ ;
+
+ INFO("Regulator supply got stable\n");
+
+#ifndef DEFAULT_SWREG_CONFIG
+ swreg_firmware_update();
+#endif
+
+ set_ihost_vddc_swreg(ihost_uvolts, vddc_uvolts);
+#endif
+ if (IS_SR_B0(buf[1])) {
+ NOTICE("ddrc_uvolts: %duv\n", ddrc_uvolts);
+ set_swreg(DDR_VDDC, ddrc_uvolts);
+ }
+}
+
+#ifdef USE_DDR
+static struct ddr_info ddr_info;
+#endif
+#ifdef USE_FRU
+static struct fru_area_info fru_area[FRU_MAX_NR_AREAS];
+static struct fru_board_info board_info;
+static struct fru_time fru_tm;
+static uint8_t fru_tbl[BCM_MAX_FRU_LEN];
+
+static void board_detect_fru(void)
+{
+ uint32_t i, result;
+ int ret = -1;
+
+ result = bcm_emmc_init(false);
+ if (!result) {
+ ERROR("eMMC init failed\n");
+ return;
+ }
+
+ /* go through eMMC boot partitions looking for FRU table */
+ for (i = EMMC_BOOT_PARTITION1; i <= EMMC_BOOT_PARTITION2; i++) {
+ result = emmc_partition_select(i);
+ if (!result) {
+ ERROR("Switching to eMMC part %u failed\n", i);
+ return;
+ }
+
+ result = emmc_read(BCM_FRU_TBL_OFFSET, (uintptr_t)fru_tbl,
+ BCM_MAX_FRU_LEN, BCM_MAX_FRU_LEN);
+ if (!result) {
+ ERROR("Failed to read from eMMC part %u\n", i);
+ return;
+ }
+
+ /*
+ * Run sanity check and checksum to make sure valid FRU table
+ * is detected
+ */
+ ret = fru_validate(fru_tbl, fru_area);
+ if (ret < 0) {
+ WARN("FRU table not found in eMMC part %u\n", i);
+ continue;
+ }
+
+ /* parse DDR information from FRU table */
+ ret = fru_parse_ddr(fru_tbl, &fru_area[FRU_AREA_INTERNAL],
+ &ddr_info);
+ if (ret < 0) {
+ WARN("No FRU DDR info found in eMMC part %u\n", i);
+ continue;
+ }
+
+ /* parse board information from FRU table */
+ ret = fru_parse_board(fru_tbl, &fru_area[FRU_AREA_BOARD_INFO],
+ &board_info);
+ if (ret < 0) {
+ WARN("No FRU board info found in eMMC part %u\n", i);
+ continue;
+ }
+
+ /* if we reach here, valid FRU table is parsed */
+ break;
+ }
+
+ if (ret < 0) {
+ WARN("FRU table missing for this board\n");
+ return;
+ }
+
+ for (i = 0; i < BCM_MAX_NR_DDR; i++) {
+ INFO("DDR channel index: %d\n", ddr_info.mcb[i].idx);
+ INFO("DDR size %u GB\n", ddr_info.mcb[i].size_mb / 1024);
+ INFO("DDR ref ID by SW (Not MCB Ref ID) 0x%x\n",
+ ddr_info.mcb[i].ref_id);
+ }
+
+ fru_format_time(board_info.mfg_date, &fru_tm);
+
+ INFO("**** FRU board information ****\n");
+ INFO("Language 0x%x\n", board_info.lang);
+ INFO("Manufacturing Date %u.%02u.%02u, %02u:%02u\n",
+ fru_tm.year, fru_tm.month, fru_tm.day,
+ fru_tm.hour, fru_tm.min);
+ INFO("Manufacturing Date(Raw) 0x%x\n", board_info.mfg_date);
+ INFO("Manufacturer %s\n", board_info.manufacturer);
+ INFO("Product Name %s\n", board_info.product_name);
+ INFO("Serial number %s\n", board_info.serial_number);
+ INFO("Part number %s\n", board_info.part_number);
+ INFO("File ID %s\n", board_info.file_id);
+}
+#endif /* USE_FRU */
+
+#ifdef USE_GPIO
+
+#define INVALID_GPIO 0xffff
+
+static const int gpio_cfg_bitmap[MAX_NR_GPIOS] = {
+#ifdef BRD_DETECT_GPIO_BIT0
+ BRD_DETECT_GPIO_BIT0,
+#else
+ INVALID_GPIO,
+#endif
+#ifdef BRD_DETECT_GPIO_BIT1
+ BRD_DETECT_GPIO_BIT1,
+#else
+ INVALID_GPIO,
+#endif
+#ifdef BRD_DETECT_GPIO_BIT2
+ BRD_DETECT_GPIO_BIT2,
+#else
+ INVALID_GPIO,
+#endif
+#ifdef BRD_DETECT_GPIO_BIT3
+ BRD_DETECT_GPIO_BIT3,
+#else
+ INVALID_GPIO,
+#endif
+};
+
+static uint8_t gpio_bitmap;
+
+/*
+ * Use an odd number to avoid potential conflict with public GPIO level
+ * defines
+ */
+#define GPIO_STATE_FLOAT 15
+
+/*
+ * If GPIO_SUPPORT_FLOAT_DETECTION is disabled, simply return GPIO level
+ *
+ * If GPIO_SUPPORT_FLOAT_DETECTION is enabled, add additional test for possible
+ * pin floating (unconnected) scenario. This support is assuming externally
+ * applied pull up / pull down will have a stronger pull than the internal pull
+ * up / pull down.
+ */
+static uint8_t gpio_get_state(int gpio)
+{
+ uint8_t val;
+
+ /* set direction to GPIO input */
+ gpio_set_direction(gpio, GPIO_DIR_IN);
+
+#ifndef GPIO_SUPPORT_FLOAT_DETECTION
+ if (gpio_get_value(gpio) == GPIO_LEVEL_HIGH)
+ val = GPIO_LEVEL_HIGH;
+ else
+ val = GPIO_LEVEL_LOW;
+
+ return val;
+#else
+ /*
+ * Enable internal pull down. If GPIO level is still high, there must
+ * be an external pull up
+ */
+ gpio_set_pull(gpio, GPIO_PULL_DOWN);
+ if (gpio_get_value(gpio) == GPIO_LEVEL_HIGH) {
+ val = GPIO_LEVEL_HIGH;
+ goto exit;
+ }
+
+ /*
+ * Enable internal pull up. If GPIO level is still low, there must
+ * be an external pull down
+ */
+ gpio_set_pull(gpio, GPIO_PULL_UP);
+ if (gpio_get_value(gpio) == GPIO_LEVEL_LOW) {
+ val = GPIO_LEVEL_LOW;
+ goto exit;
+ }
+
+ /* if reached here, the pin must be not connected */
+ val = GPIO_STATE_FLOAT;
+
+exit:
+ /* make sure internall pull is disabled */
+ if (gpio_get_pull(gpio) != GPIO_PULL_NONE)
+ gpio_set_pull(gpio, GPIO_PULL_NONE);
+
+ return val;
+#endif
+}
+
+static void board_detect_gpio(void)
+{
+ unsigned int i, val;
+ int gpio;
+
+ iproc_gpio_init(IPROC_GPIO_S_BASE, IPROC_GPIO_NR,
+ IPROC_IOPAD_MODE_BASE, HSLS_IOPAD_BASE);
+
+ gpio_bitmap = 0;
+ for (i = 0; i < MAX_NR_GPIOS; i++) {
+ if (gpio_cfg_bitmap[i] == INVALID_GPIO)
+ continue;
+
+ /*
+ * Construct the bitmap based on GPIO value. Floating pin
+ * detection is a special case. As soon as a floating pin is
+ * detected, a special value of MAX_GPIO_BITMAP_VAL is
+ * assigned and we break out of the loop immediately
+ */
+ gpio = gpio_cfg_bitmap[i];
+ val = gpio_get_state(gpio);
+ if (val == GPIO_STATE_FLOAT) {
+ gpio_bitmap = MAX_GPIO_BITMAP_VAL;
+ break;
+ }
+
+ if (val == GPIO_LEVEL_HIGH)
+ gpio_bitmap |= BIT(i);
+ }
+
+ memcpy(&ddr_info, &gpio_ddr_info[gpio_bitmap], sizeof(ddr_info));
+ INFO("Board detection GPIO bitmap = 0x%x\n", gpio_bitmap);
+}
+#endif /* USE_GPIO */
+
+static void bcm_board_detect(void)
+{
+#ifdef DDR_LEGACY_MCB_SUPPORTED
+ /* Loading default DDR info */
+ memcpy(&ddr_info, &default_ddr_info, sizeof(ddr_info));
+#endif
+#ifdef USE_FRU
+ board_detect_fru();
+#endif
+#ifdef USE_GPIO
+ board_detect_gpio();
+#endif
+}
+
+static void dump_persistent_regs(void)
+{
+ NOTICE("pr0: %x\n", mmio_read_32(CRMU_IHOST_SW_PERSISTENT_REG0));
+ NOTICE("pr1: %x\n", mmio_read_32(CRMU_IHOST_SW_PERSISTENT_REG1));
+ NOTICE("pr2: %x\n", mmio_read_32(CRMU_IHOST_SW_PERSISTENT_REG2));
+ NOTICE("pr3: %x\n", mmio_read_32(CRMU_IHOST_SW_PERSISTENT_REG3));
+ NOTICE("pr4: %x\n", mmio_read_32(CRMU_IHOST_SW_PERSISTENT_REG4));
+ NOTICE("pr5: %x\n", mmio_read_32(CRMU_IHOST_SW_PERSISTENT_REG5));
+ NOTICE("pr6: %x\n", mmio_read_32(CRMU_IHOST_SW_PERSISTENT_REG6));
+ NOTICE("pr7: %x\n", mmio_read_32(CRMU_IHOST_SW_PERSISTENT_REG7));
+ NOTICE("pr8: %x\n", mmio_read_32(CRMU_IHOST_SW_PERSISTENT_REG8));
+ NOTICE("pr9: %x\n", mmio_read_32(CRMU_IHOST_SW_PERSISTENT_REG9));
+ NOTICE("pr10: %x\n", mmio_read_32(CRMU_IHOST_SW_PERSISTENT_REG10));
+ NOTICE("pr11: %x\n", mmio_read_32(CRMU_IHOST_SW_PERSISTENT_REG11));
+}
+
+void plat_bcm_bl2_plat_arch_setup(void)
+{
+ if (chip_get_rev_id_major() == CHIP_REV_MAJOR_AX) {
+ if (!(sotp_mem_read(SOTP_ATF_CFG_ROW_ID, SOTP_ROW_NO_ECC) &
+ SOTP_ATF_WATCHDOG_ENABLE_MASK)) {
+ /*
+ * Stop sp805 watchdog timer immediately.
+ * It might has been set up by MCU patch earlier for
+ * eMMC workaround.
+ *
+ * Note the watchdog timer started in CRMU has a very
+ * short timeout and needs to be stopped immediately.
+ * Down below we restart it with a much longer timeout
+ * for BL2 and BL31
+ */
+ sp805_stop(ARM_SP805_TWDG_BASE);
+ }
+ }
+
+#if !BRCM_DISABLE_TRUSTED_WDOG
+ /*
+ * start secure watchdog for BL2 and BL31.
+ * Note that UART download can take a longer time,
+ * so do not allow watchdog for UART download,
+ * as this boot source is not a standard modus operandi.
+ */
+ if (boot_source_get() != BOOT_SOURCE_UART)
+ sp805_start(ARM_SP805_TWDG_BASE, ARM_TWDG_LOAD_VAL);
+#endif
+
+#ifdef BCM_ELOG
+ /* Ensure logging is started out fresh in BL2. */
+ mmio_write_32(BCM_ELOG_BL2_BASE, 0);
+#endif
+ /*
+ * In BL2, since we have very limited space to store logs, we only
+ * save logs that are >= the WARNING level.
+ */
+ bcm_elog_init((void *)BCM_ELOG_BL2_BASE, BCM_ELOG_BL2_SIZE,
+ LOG_LEVEL_WARNING);
+
+ dump_persistent_regs();
+
+ /* Read CRMU mailbox 0 */
+ NOTICE("RESET (reported by CRMU): 0x%x\n",
+ mmio_read_32(CRMU_READ_MAIL_BOX0));
+
+ /*
+ * All non-boot-source PADs are in forced input-mode at
+ * reset so clear the force on non-boot-source PADs using
+ * CDRU register.
+ */
+ mmio_clrbits_32((uintptr_t)CDRU_CHIP_IO_PAD_CONTROL,
+ (1 << CDRU_CHIP_IO_PAD_CONTROL__CDRU_IOMUX_FORCE_PAD_IN_R));
+
+#if DRIVER_OCOTP_ENABLE
+ bcm_otpc_init(&otp_stingray_map);
+#endif
+
+ set_swreg_based_on_otp();
+
+#if IHOST_PLL_FREQ != 0
+ bcm_set_ihost_pll_freq(0x0, IHOST_PLL_FREQ);
+#endif
+
+#ifdef INCLUDE_EMMC_DRIVER_ERASE_CODE
+ /* The erasable unit of the eMMC is the "Erase Group";
+ * Erase group is measured in write blocks which are the
+ * basic writable units of the Device.
+ * The size of the Erase Group is a Device specific parameter
+ */
+ emmc_erase(EMMC_ERASE_START_BLOCK, EMMC_ERASE_BLOCK_COUNT,
+ EMMC_ERASE_PARTITION);
+#endif
+
+ bcm_board_detect();
+#ifdef DRIVER_EMMC_ENABLE
+ /* Initialize the card, if it is not */
+ if (bcm_emmc_init(true) == 0)
+ WARN("eMMC Card Initialization Failed!!!\n");
+#endif
+
+#if BL2_TEST_I2C
+ i2c_test();
+#endif
+
+#ifdef USE_DDR
+ ddr_initialize(&ddr_info);
+
+ ddr_secure_region_config(SECURE_DDR_BASE_ADDRESS,
+ SECURE_DDR_END_ADDRESS);
+#ifdef NITRO_SECURE_ACCESS
+ ddr_secure_region_config(DDR_NITRO_SECURE_REGION_START,
+ DDR_NITRO_SECURE_REGION_END);
+#endif
+#else
+ ext_sram_init();
+#endif
+
+#if BL2_TEST_MEM
+ ddr_test();
+#endif
+
+#ifdef USE_NAND
+ brcm_stingray_nand_init();
+#endif
+
+#if defined(USE_PAXB) || defined(USE_PAXC) || defined(USE_SATA)
+ brcm_stingray_pcie_reset();
+#endif
+
+#ifdef USE_PAXC
+ if (boot_source_get() != BOOT_SOURCE_QSPI)
+ brcm_stingray_chimp_check_and_fastboot();
+#endif
+
+#if ((!CLEAN_DDR || MMU_DISABLED))
+ /*
+ * Now DDR has been initialized. We want to copy all the logs in SRAM
+ * into DDR so we will have much more space to store the logs in the
+ * next boot stage
+ */
+ bcm_elog_copy_log((void *)BCM_ELOG_BL31_BASE,
+ MIN(BCM_ELOG_BL2_SIZE, BCM_ELOG_BL31_SIZE)
+ );
+
+ /*
+ * We are not yet at the end of BL2, but we can stop log here so we do
+ * not need to add 'bcm_elog_exit' to the standard BL2 code. The
+ * benefit of capturing BL2 logs after this is very minimal in a
+ * production system
+ * NOTE: BL2 logging must be exited before going forward to setup
+ * page tables
+ */
+ bcm_elog_exit();
+#endif
+}
diff --git a/plat/brcm/board/stingray/src/bl31_setup.c b/plat/brcm/board/stingray/src/bl31_setup.c
new file mode 100644
index 0000000..04df6a0
--- /dev/null
+++ b/plat/brcm/board/stingray/src/bl31_setup.c
@@ -0,0 +1,1071 @@
+/*
+ * Copyright (c) 2015 - 2021, Broadcom
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <errno.h>
+
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <cortex_a72.h>
+#include <drivers/arm/sp805.h>
+#include <drivers/console.h>
+#include <drivers/delay_timer.h>
+#include <drivers/ti/uart/uart_16550.h>
+#include <lib/mmio.h>
+#include <lib/utils_def.h>
+#include <plat/common/common_def.h>
+#include <plat/common/platform.h>
+
+#include <bl33_info.h>
+#include <chimp.h>
+#include <cmn_plat_util.h>
+#include <dmu.h>
+#include <fsx.h>
+#include <iommu.h>
+#include <ncsi.h>
+#include <paxb.h>
+#include <paxc.h>
+#include <platform_def.h>
+#ifdef USE_USB
+#include <platform_usb.h>
+#endif
+#include <sdio.h>
+#include <sr_utils.h>
+#include <timer_sync.h>
+
+/*******************************************************************************
+ * Perform any BL3-1 platform setup common to ARM standard platforms
+ ******************************************************************************/
+
+static void brcm_stingray_gain_qspi_control(void)
+{
+ if (boot_source_get() != BOOT_SOURCE_QSPI) {
+ if (bcm_chimp_is_nic_mode() &&
+ (!bcm_chimp_handshake_done())) {
+ /*
+ * Last chance to wait for ChiMP firmware to report
+ * "I am done" before grabbing the QSPI
+ */
+ WARN("ChiMP still not booted\n");
+#ifndef CHIMP_ALWAYS_NEEDS_QSPI
+ WARN("ChiMP is given the last chance to boot (%d s)\n",
+ CHIMP_HANDSHAKE_TIMEOUT_MS / 1000);
+
+ if (!bcm_chimp_wait_handshake()) {
+ ERROR("ChiMP failed to boot\n");
+ } else {
+ INFO("ChiMP booted successfully\n");
+ }
+#endif
+ }
+
+#ifndef CHIMP_ALWAYS_NEEDS_QSPI
+ INFO("AP grabs QSPI\n");
+ /*
+ * For QSPI boot sbl/bl1 has already taken care.
+ * For other boot sources QSPI needs to be muxed to
+ * AP for exclusive use
+ */
+ brcm_stingray_set_qspi_mux(1);
+ INFO("AP (bl31) gained control over QSPI\n");
+#endif
+ }
+}
+
+static void brcm_stingray_dma_pl330_init(void)
+{
+ unsigned int val;
+
+ VERBOSE("dma pl330 init start\n");
+
+ /* Set DMAC boot_manager_ns = 0x1 */
+ VERBOSE(" - configure boot security state\n");
+ mmio_setbits_32(DMAC_M0_IDM_IO_CONTROL_DIRECT, BOOT_MANAGER_NS);
+ /* Set boot_peripheral_ns[n:0] = 0xffffffff */
+ mmio_write_32(ICFG_DMAC_CONFIG_2, BOOT_PERIPHERAL_NS);
+ /* Set boot_irq_ns[n:0] = 0x0000ffff */
+ mmio_write_32(ICFG_DMAC_CONFIG_3, BOOT_IRQ_NS);
+
+ /* Set DMAC stream_id */
+ VERBOSE(" - configure stream_id = 0x6000\n");
+ val = (DMAC_STREAM_ID << DMAC_SID_SHIFT);
+ mmio_write_32(ICFG_DMAC_SID_ARADDR_CONTROL, val);
+ mmio_write_32(ICFG_DMAC_SID_AWADDR_CONTROL, val);
+
+ /* Reset DMAC */
+ VERBOSE(" - reset dma pl330\n");
+
+ mmio_setbits_32(DMAC_M0_IDM_RESET_CONTROL, 0x1);
+ udelay(500);
+
+ mmio_clrbits_32(DMAC_M0_IDM_RESET_CONTROL, 0x1);
+ udelay(500);
+
+ INFO("dma pl330 init done\n");
+}
+
+static void brcm_stingray_spi_pl022_init(uintptr_t idm_reset_control)
+{
+ VERBOSE("spi pl022 init start\n");
+
+ /* Reset APB SPI bridge */
+ VERBOSE(" - reset apb spi bridge\n");
+ mmio_setbits_32(idm_reset_control, 0x1);
+ udelay(500);
+
+ mmio_clrbits_32(idm_reset_control, 0x1);
+ udelay(500);
+
+ INFO("spi pl022 init done\n");
+}
+
+#define CDRU_SATA_RESET_N \
+ BIT(CDRU_MISC_RESET_CONTROL__CDRU_SATA_RESET_N_R)
+#define CDRU_MISC_CLK_SATA \
+ BIT(CDRU_MISC_CLK_ENABLE_CONTROL__CDRU_SATA_CLK_EN_R)
+#define CCN_CONFIG_CLK_ENABLE (1 << 2)
+#define MMU_CONFIG_CLK_ENABLE (0x3F << 16)
+
+#define SATA_SATA_TOP_CTRL_BUS_CTRL (SATA_BASE + 0x2044)
+#define DMA_BIT_CTRL_MASK 0x003
+#define DMA_DESCR_ENDIAN_CTRL (DMA_BIT_CTRL_MASK << 0x002)
+#define DMA_DATA_ENDIAN_CTRL (DMA_BIT_CTRL_MASK << 0x004)
+
+#define SATA_PORT_SATA3_PCB_REG8 (SATA_BASE + 0x2320)
+#define SATA_PORT_SATA3_PCB_REG11 (SATA_BASE + 0x232c)
+#define SATA_PORT_SATA3_PCB_BLOCK_ADDR (SATA_BASE + 0x233c)
+
+#define SATA3_AFE_TXRX_ACTRL 0x1d0
+/* TXDriver swing setting is 800mV */
+#define DFS_SWINGNOPE_VALUE (0x0 << 6)
+#define DFS_SWINGNOPE_MASK (0x3 << 6)
+
+#define DFS_SWINGPE_VALUE (0x1 << 4)
+#define DFS_SWINGPE_MASK (0x3 << 4)
+
+#define DFS_INJSTRENGTH_VALUE (0x0 << 4)
+#define DFS_INJSTRENGTH_MASK (0x3 << 4)
+
+#define DFS_INJEN (0x1 << 3)
+
+#define SATA_CORE_MEM_CTRL (SATA_BASE + 0x3a08)
+#define SATA_CORE_MEM_CTRL_ISO BIT(0)
+#define SATA_CORE_MEM_CTRL_ARRPOWEROKIN BIT(1)
+#define SATA_CORE_MEM_CTRL_ARRPOWERONIN BIT(2)
+#define SATA_CORE_MEM_CTRL_POWEROKIN BIT(3)
+#define SATA_CORE_MEM_CTRL_POWERONIN BIT(4)
+
+#define SATA0_IDM_RESET_CONTROL (SATA_BASE + 0x500800)
+#define SATA_APBT0_IDM_IO_CONTROL_DIRECT (SATA_BASE + 0x51a408)
+#define IO_CONTROL_DIRECT_CLK_ENABLE BIT(0)
+#define SATA_APBT0_IDM_RESET_CONTROL (SATA_BASE + 0x51a800)
+#define IDM_RESET_CONTROL_RESET BIT(0)
+
+#define NIC400_SATA_NOC_SECURITY1 0x6830000c
+#define SATA_NOC_SECURITY1_FIELD 0xf
+#define NIC400_SATA_NOC_SECURITY2 0x68300010
+#define SATA_NOC_SECURITY2_FIELD 0xf
+#define NIC400_SATA_NOC_SECURITY3 0x68300014
+#define SATA_NOC_SECURITY3_FIELD 0x1
+#define NIC400_SATA_NOC_SECURITY4 0x68300018
+#define SATA_NOC_SECURITY4_FIELD 0x1
+#define NIC400_SATA_NOC_SECURITY5 0x6830001c
+#define SATA_NOC_SECURITY5_FIELD 0xf
+#define NIC400_SATA_NOC_SECURITY6 0x68300020
+#define SATA_NOC_SECURITY6_FIELD 0x1
+#define NIC400_SATA_NOC_SECURITY7 0x68300024
+#define SATA_NOC_SECURITY7_FIELD 0xf
+#define NIC400_SATA_NOC_SECURITY8 0x68300028
+#define SATA_NOC_SECURITY8_FIELD 0xf
+#define NIC400_SATA_NOC_SECURITY9 0x6830002c
+#define SATA_NOC_SECURITY9_FIELD 0x1
+
+#define SATA_APBT_IDM_PORT_REG(port, reg) \
+ (((port/4) << 12) + reg)
+
+#define SATA_IDM_PORT_REG(port, reg) ((port << 12) + reg)
+
+#define SATA_PORT_REG(port, reg) \
+ (((port%4) << 16) + ((port/4) << 20) + reg)
+
+#define MAX_SATA_PORTS 8
+#define USE_SATA_PORTS 8
+
+#ifdef USE_SATA
+static const uint8_t sr_b0_sata_port[MAX_SATA_PORTS] = {
+ 0, 1, 2, 3, 4, 5, 6, 7
+};
+
+static uint32_t brcm_stingray_get_sata_port(unsigned int port)
+{
+ return sr_b0_sata_port[port];
+}
+
+static void brcm_stingray_sata_init(void)
+{
+ unsigned int port = 0;
+ uint32_t sata_port;
+
+ mmio_setbits_32(CDRU_MISC_CLK_ENABLE_CONTROL,
+ CDRU_MISC_CLK_SATA);
+
+ mmio_clrbits_32(CDRU_MISC_RESET_CONTROL, CDRU_SATA_RESET_N);
+ mmio_setbits_32(CDRU_MISC_RESET_CONTROL, CDRU_SATA_RESET_N);
+
+ for (port = 0; port < USE_SATA_PORTS; port++) {
+
+ sata_port = brcm_stingray_get_sata_port(port);
+ mmio_write_32(SATA_APBT_IDM_PORT_REG(sata_port,
+ SATA_APBT0_IDM_RESET_CONTROL),
+ 0x0);
+ mmio_setbits_32(SATA_APBT_IDM_PORT_REG(sata_port,
+ SATA_APBT0_IDM_IO_CONTROL_DIRECT),
+ IO_CONTROL_DIRECT_CLK_ENABLE);
+ mmio_write_32(SATA_IDM_PORT_REG(sata_port,
+ SATA0_IDM_RESET_CONTROL),
+ 0x0);
+
+ mmio_setbits_32(SATA_PORT_REG(sata_port, SATA_CORE_MEM_CTRL),
+ SATA_CORE_MEM_CTRL_ARRPOWERONIN);
+ mmio_setbits_32(SATA_PORT_REG(sata_port, SATA_CORE_MEM_CTRL),
+ SATA_CORE_MEM_CTRL_ARRPOWEROKIN);
+ mmio_setbits_32(SATA_PORT_REG(sata_port, SATA_CORE_MEM_CTRL),
+ SATA_CORE_MEM_CTRL_POWERONIN);
+ mmio_setbits_32(SATA_PORT_REG(sata_port, SATA_CORE_MEM_CTRL),
+ SATA_CORE_MEM_CTRL_POWEROKIN);
+ mmio_clrbits_32(SATA_PORT_REG(sata_port, SATA_CORE_MEM_CTRL),
+ SATA_CORE_MEM_CTRL_ISO);
+
+ mmio_clrbits_32(SATA_PORT_REG(sata_port,
+ SATA_SATA_TOP_CTRL_BUS_CTRL),
+ (DMA_DESCR_ENDIAN_CTRL | DMA_DATA_ENDIAN_CTRL));
+ }
+
+ mmio_setbits_32(NIC400_SATA_NOC_SECURITY1, SATA_NOC_SECURITY1_FIELD);
+ mmio_setbits_32(NIC400_SATA_NOC_SECURITY2, SATA_NOC_SECURITY2_FIELD);
+ mmio_setbits_32(NIC400_SATA_NOC_SECURITY3, SATA_NOC_SECURITY3_FIELD);
+ mmio_setbits_32(NIC400_SATA_NOC_SECURITY4, SATA_NOC_SECURITY4_FIELD);
+ mmio_setbits_32(NIC400_SATA_NOC_SECURITY5, SATA_NOC_SECURITY5_FIELD);
+ mmio_setbits_32(NIC400_SATA_NOC_SECURITY6, SATA_NOC_SECURITY6_FIELD);
+ mmio_setbits_32(NIC400_SATA_NOC_SECURITY7, SATA_NOC_SECURITY7_FIELD);
+ mmio_setbits_32(NIC400_SATA_NOC_SECURITY8, SATA_NOC_SECURITY8_FIELD);
+ mmio_setbits_32(NIC400_SATA_NOC_SECURITY9, SATA_NOC_SECURITY9_FIELD);
+
+ INFO("sata init done\n");
+}
+#else
+static void poweroff_sata_pll(void)
+{
+ /*
+ * SATA subsystem is clocked by LCPLL0 which is enabled by
+ * default by bootrom. Poweroff the PLL if SATA is not used
+ */
+
+ /* enable isolation */
+ mmio_setbits_32(CRMU_AON_CTRL1,
+ BIT(CRMU_AON_CTRL1__LCPLL0_ISO_IN));
+
+ /* Power off the SATA PLL/LDO */
+ mmio_clrbits_32(CRMU_AON_CTRL1,
+ (BIT(CRMU_AON_CTRL1__LCPLL0_PWRON_LDO) |
+ BIT(CRMU_AON_CTRL1__LCPLL0_PWR_ON)));
+}
+#endif
+
+#ifdef USE_AMAC
+#ifdef EMULATION_SETUP
+#define ICFG_AMAC_STRAP_CONFIG (HSLS_ICFG_REGS_BASE + 0xa5c)
+#define ICFG_AMAC_STRAP_DLL_BYPASS (1 << 2)
+#endif
+#define ICFG_AMAC_MAC_CTRL_REG (HSLS_ICFG_REGS_BASE + 0xa6c)
+#define ICFG_AMAC_MAC_FULL_DUPLEX (1 << 1)
+#define ICFG_AMAC_RGMII_PHY_CONFIG (HSLS_ICFG_REGS_BASE + 0xa60)
+#define ICFG_AMAC_SID_CONTROL (HSLS_ICFG_REGS_BASE + 0xb10)
+#define ICFG_AMAC_SID_SHIFT 5
+#define ICFG_AMAC_SID_AWADDR_OFFSET 0x0
+#define ICFG_AMAC_SID_ARADDR_OFFSET 0x4
+#define AMAC_RPHY_1000_DATARATE (1 << 20)
+#define AMAC_RPHY_FULL_DUPLEX (1 << 5)
+#define AMAC_RPHY_SPEED_OFFSET 2
+#define AMAC_RPHY_SPEED_MASK (7 << AMAC_RPHY_SPEED_OFFSET)
+#define AMAC_RPHY_1G_SPEED (2 << AMAC_RPHY_SPEED_OFFSET)
+#define ICFG_AMAC_MEM_PWR_CTRL (HSLS_ICFG_REGS_BASE + 0xa68)
+#define AMAC_ISO BIT(9)
+#define AMAC_STDBY BIT(8)
+#define AMAC_ARRPOWEROKIN BIT(7)
+#define AMAC_ARRPOWERONIN BIT(6)
+#define AMAC_POWEROKIN BIT(5)
+#define AMAC_POWERONIN BIT(4)
+
+#define AMAC_IDM0_IO_CONTROL_DIRECT (HSLS_IDM_REGS_BASE + 0x4408)
+#define AMAC_IDM0_ARCACHE_OFFSET 16
+#define AMAC_IDM0_AWCACHE_OFFSET 7
+#define AMAC_IDM0_ARCACHE_MASK (0xF << AMAC_IDM0_ARCACHE_OFFSET)
+#define AMAC_IDM0_AWCACHE_MASK (0xF << AMAC_IDM0_AWCACHE_OFFSET)
+/* ARCACHE - AWCACHE is 0xB7 for write-back no allocate */
+#define AMAC_IDM0_ARCACHE_VAL (0xb << AMAC_IDM0_ARCACHE_OFFSET)
+#define AMAC_IDM0_AWCACHE_VAL (0x7 << AMAC_IDM0_AWCACHE_OFFSET)
+
+static void brcm_stingray_amac_init(void)
+{
+ unsigned int val;
+ uintptr_t icfg_amac_sid = ICFG_AMAC_SID_CONTROL;
+
+ VERBOSE("amac init start\n");
+
+ val = SR_SID_VAL(0x3, 0x0, 0x4) << ICFG_AMAC_SID_SHIFT;
+ mmio_write_32(icfg_amac_sid + ICFG_AMAC_SID_AWADDR_OFFSET, val);
+ mmio_write_32(icfg_amac_sid + ICFG_AMAC_SID_ARADDR_OFFSET, val);
+
+ mmio_setbits_32(ICFG_AMAC_MEM_PWR_CTRL, AMAC_ARRPOWEROKIN);
+ mmio_setbits_32(ICFG_AMAC_MEM_PWR_CTRL, AMAC_ARRPOWERONIN);
+ mmio_setbits_32(ICFG_AMAC_MEM_PWR_CTRL, AMAC_POWEROKIN);
+ mmio_setbits_32(ICFG_AMAC_MEM_PWR_CTRL, AMAC_POWERONIN);
+ mmio_clrbits_32(ICFG_AMAC_MEM_PWR_CTRL, AMAC_ISO);
+ mmio_write_32(APBR_IDM_RESET_CONTROL, 0x0);
+ mmio_clrsetbits_32(ICFG_AMAC_RGMII_PHY_CONFIG, AMAC_RPHY_SPEED_MASK,
+ AMAC_RPHY_1G_SPEED); /*1 Gbps line rate*/
+ /* 1000 datarate set */
+ mmio_setbits_32(ICFG_AMAC_RGMII_PHY_CONFIG, AMAC_RPHY_1000_DATARATE);
+ /* full duplex */
+ mmio_setbits_32(ICFG_AMAC_RGMII_PHY_CONFIG, AMAC_RPHY_FULL_DUPLEX);
+#ifdef EMULATION_SETUP
+ /* DLL bypass */
+ mmio_setbits_32(ICFG_AMAC_STRAP_CONFIG, ICFG_AMAC_STRAP_DLL_BYPASS);
+#endif
+ /* serdes full duplex */
+ mmio_setbits_32(ICFG_AMAC_MAC_CTRL_REG, ICFG_AMAC_MAC_FULL_DUPLEX);
+ mmio_clrsetbits_32(AMAC_IDM0_IO_CONTROL_DIRECT, AMAC_IDM0_ARCACHE_MASK,
+ AMAC_IDM0_ARCACHE_VAL);
+ mmio_clrsetbits_32(AMAC_IDM0_IO_CONTROL_DIRECT, AMAC_IDM0_AWCACHE_MASK,
+ AMAC_IDM0_AWCACHE_VAL);
+ INFO("amac init done\n");
+}
+#endif /* USE_AMAC */
+
+static void brcm_stingray_pka_meminit(void)
+{
+ uintptr_t icfg_mem_ctrl = ICFG_PKA_MEM_PWR_CTRL;
+
+ VERBOSE("pka meminit start\n");
+
+ VERBOSE(" - arrpoweron\n");
+ mmio_setbits_32(icfg_mem_ctrl,
+ ICFG_PKA_MEM_PWR_CTRL__ARRPOWERONIN);
+ while (!(mmio_read_32(icfg_mem_ctrl) &
+ ICFG_PKA_MEM_PWR_CTRL__ARRPOWERONOUT))
+ ;
+
+ VERBOSE(" - arrpowerok\n");
+ mmio_setbits_32(icfg_mem_ctrl,
+ ICFG_PKA_MEM_PWR_CTRL__ARRPOWEROKIN);
+ while (!(mmio_read_32(icfg_mem_ctrl) &
+ ICFG_PKA_MEM_PWR_CTRL__ARRPOWEROKOUT))
+ ;
+
+ VERBOSE(" - poweron\n");
+ mmio_setbits_32(icfg_mem_ctrl,
+ ICFG_PKA_MEM_PWR_CTRL__POWERONIN);
+ while (!(mmio_read_32(icfg_mem_ctrl) &
+ ICFG_PKA_MEM_PWR_CTRL__POWERONOUT))
+ ;
+
+ VERBOSE(" - powerok\n");
+ mmio_setbits_32(icfg_mem_ctrl,
+ ICFG_PKA_MEM_PWR_CTRL__POWEROKIN);
+ while (!(mmio_read_32(icfg_mem_ctrl) &
+ ICFG_PKA_MEM_PWR_CTRL__POWEROKOUT))
+ ;
+
+ /* Wait sometime */
+ mdelay(1);
+
+ VERBOSE(" - remove isolation\n");
+ mmio_clrbits_32(icfg_mem_ctrl, ICFG_PKA_MEM_PWR_CTRL__ISO);
+
+ INFO("pka meminit done\n");
+}
+
+static void brcm_stingray_smmu_init(void)
+{
+ unsigned int val;
+ uintptr_t smmu_base = SMMU_BASE;
+
+ VERBOSE("smmu init start\n");
+
+ /* Configure SCR0 */
+ VERBOSE(" - configure scr0\n");
+ val = mmio_read_32(smmu_base + 0x0);
+ val |= (0x1 << 12);
+ mmio_write_32(smmu_base + 0x0, val);
+
+ /* Reserve context banks for secure masters */
+ arm_smmu_reserve_secure_cntxt();
+
+ /* Print configuration */
+ VERBOSE(" - scr0=0x%x scr1=0x%x scr2=0x%x\n",
+ mmio_read_32(smmu_base + 0x0),
+ mmio_read_32(smmu_base + 0x4),
+ mmio_read_32(smmu_base + 0x8));
+
+ VERBOSE(" - idr0=0x%x idr1=0x%x idr2=0x%x\n",
+ mmio_read_32(smmu_base + 0x20),
+ mmio_read_32(smmu_base + 0x24),
+ mmio_read_32(smmu_base + 0x28));
+
+ VERBOSE(" - idr3=0x%x idr4=0x%x idr5=0x%x\n",
+ mmio_read_32(smmu_base + 0x2c),
+ mmio_read_32(smmu_base + 0x30),
+ mmio_read_32(smmu_base + 0x34));
+
+ VERBOSE(" - idr6=0x%x idr7=0x%x\n",
+ mmio_read_32(smmu_base + 0x38),
+ mmio_read_32(smmu_base + 0x3c));
+
+ INFO("smmu init done\n");
+}
+
+static void brcm_stingray_dma_pl330_meminit(void)
+{
+ uintptr_t icfg_mem_ctrl = ICFG_DMAC_MEM_PWR_CTRL;
+
+ VERBOSE("dmac meminit start\n");
+
+ VERBOSE(" - arrpoweron\n");
+ mmio_setbits_32(icfg_mem_ctrl,
+ ICFG_DMAC_MEM_PWR_CTRL__ARRPOWERONIN);
+ while (!(mmio_read_32(icfg_mem_ctrl) &
+ ICFG_DMAC_MEM_PWR_CTRL__ARRPOWERONOUT))
+ ;
+
+ VERBOSE(" - arrpowerok\n");
+ mmio_setbits_32(icfg_mem_ctrl,
+ ICFG_DMAC_MEM_PWR_CTRL__ARRPOWEROKIN);
+ while (!(mmio_read_32(icfg_mem_ctrl) &
+ ICFG_DMAC_MEM_PWR_CTRL__ARRPOWEROKOUT))
+ ;
+
+ VERBOSE(" - poweron\n");
+ mmio_setbits_32(icfg_mem_ctrl,
+ ICFG_DMAC_MEM_PWR_CTRL__POWERONIN);
+ while (!(mmio_read_32(icfg_mem_ctrl) &
+ ICFG_DMAC_MEM_PWR_CTRL__POWERONOUT))
+ ;
+
+ VERBOSE(" - powerok\n");
+ mmio_setbits_32(icfg_mem_ctrl,
+ ICFG_DMAC_MEM_PWR_CTRL__POWEROKIN);
+ while (!(mmio_read_32(icfg_mem_ctrl) &
+ ICFG_DMAC_MEM_PWR_CTRL__POWEROKOUT))
+ ;
+
+ /* Wait sometime */
+ mdelay(1);
+
+ VERBOSE(" - remove isolation\n");
+ mmio_clrbits_32(icfg_mem_ctrl, ICFG_DMAC_MEM_PWR_CTRL__ISO);
+
+ INFO("dmac meminit done\n");
+}
+
+/* program the crmu access ranges for allowing non sec access*/
+static void brcm_stingray_crmu_access_init(void)
+{
+ /* Enable 0x6641c001 - 0x6641c701 for non secure access */
+ mmio_write_32(CRMU_CORE_ADDR_RANGE0_LOW, 0x6641c001);
+ mmio_write_32(CRMU_CORE_ADDR_RANGE0_LOW + 0x4, 0x6641c701);
+
+ /* Enable 0x6641d001 - 0x66424b01 for non secure access */
+ mmio_write_32(CRMU_CORE_ADDR_RANGE1_LOW, 0x6641d001);
+ mmio_write_32(CRMU_CORE_ADDR_RANGE1_LOW + 0x4, 0x66424b01);
+
+ /* Enable 0x66425001 - 0x66425f01 for non secure access */
+ mmio_write_32(CRMU_CORE_ADDR_RANGE2_LOW, 0x66425001);
+ mmio_write_32(CRMU_CORE_ADDR_RANGE2_LOW + 0x4, 0x66425f01);
+
+ INFO("crmu access init done\n");
+}
+
+static void brcm_stingray_scr_init(void)
+{
+ unsigned int val;
+ uintptr_t scr_base = SCR_BASE;
+ unsigned int clr_mask = SCR_AXCACHE_CONFIG_MASK;
+ unsigned int set_mask = SCR_TBUX_AXCACHE_CONFIG;
+
+ VERBOSE("scr init start\n");
+
+ /* awdomain=0x1 and ardomain=0x1 */
+ mmio_clrsetbits_32(scr_base + 0x0, clr_mask, set_mask);
+ val = mmio_read_32(scr_base + 0x0);
+ VERBOSE(" - set tbu0_config=0x%x\n", val);
+
+ /* awdomain=0x1 and ardomain=0x1 */
+ mmio_clrsetbits_32(scr_base + 0x4, clr_mask, set_mask);
+ val = mmio_read_32(scr_base + 0x4);
+ VERBOSE(" - set tbu1_config=0x%x\n", val);
+
+ /* awdomain=0x1 and ardomain=0x1 */
+ mmio_clrsetbits_32(scr_base + 0x8, clr_mask, set_mask);
+ val = mmio_read_32(scr_base + 0x8);
+ VERBOSE(" - set tbu2_config=0x%x\n", val);
+
+ /* awdomain=0x1 and ardomain=0x1 */
+ mmio_clrsetbits_32(scr_base + 0xc, clr_mask, set_mask);
+ val = mmio_read_32(scr_base + 0xc);
+ VERBOSE(" - set tbu3_config=0x%x\n", val);
+
+ /* awdomain=0x1 and ardomain=0x1 */
+ mmio_clrsetbits_32(scr_base + 0x10, clr_mask, set_mask);
+ val = mmio_read_32(scr_base + 0x10);
+ VERBOSE(" - set tbu4_config=0x%x\n", val);
+
+ /* awdomain=0x0 and ardomain=0x0 */
+ mmio_clrbits_32(scr_base + 0x14, clr_mask);
+ val = mmio_read_32(scr_base + 0x14);
+ VERBOSE(" - set gic_config=0x%x\n", val);
+
+ INFO("scr init done\n");
+}
+
+static void brcm_stingray_hsls_tzpcprot_init(void)
+{
+ unsigned int val;
+ uintptr_t tzpcdecprot_base = HSLS_TZPC_BASE;
+
+ VERBOSE("hsls tzpcprot init start\n");
+
+ /* Treat third-party masters as non-secured */
+ val = 0;
+ val |= BIT(6); /* SDIO1 */
+ val |= BIT(5); /* SDIO0 */
+ val |= BIT(0); /* AMAC */
+ mmio_write_32(tzpcdecprot_base + 0x810, val);
+
+ /* Print TZPC decode status registers */
+ VERBOSE(" - tzpcdecprot0=0x%x\n",
+ mmio_read_32(tzpcdecprot_base + 0x800));
+
+ VERBOSE(" - tzpcdecprot1=0x%x\n",
+ mmio_read_32(tzpcdecprot_base + 0x80c));
+
+ INFO("hsls tzpcprot init done\n");
+}
+
+#ifdef USE_I2S
+#define ICFG_AUDIO_POWER_CTRL (HSLS_ICFG_REGS_BASE + 0xaa8)
+#define ICFG_AUDIO_POWER_CTRL__POWERONIN BIT(0)
+#define ICFG_AUDIO_POWER_CTRL__POWEROKIN BIT(1)
+#define ICFG_AUDIO_POWER_CTRL__ARRPOWERONIN BIT(2)
+#define ICFG_AUDIO_POWER_CTRL__ARRPOWEROKIN BIT(3)
+#define ICFG_AUDIO_POWER_CTRL__POWERONOUT BIT(4)
+#define ICFG_AUDIO_POWER_CTRL__POWEROKOUT BIT(5)
+#define ICFG_AUDIO_POWER_CTRL__ARRPOWERONOUT BIT(6)
+#define ICFG_AUDIO_POWER_CTRL__ARRPOWEROKOUT BIT(7)
+#define ICFG_AUDIO_POWER_CTRL__ISO BIT(8)
+#define ICFG_AUDIO_SID_CONTROL (HSLS_ICFG_REGS_BASE + 0xaf8)
+#define ICFG_AUDIO_SID_SHIFT 5
+#define ICFG_AUDIO_SID_AWADDR_OFFSET 0x0
+#define ICFG_AUDIO_SID_ARADDR_OFFSET 0x4
+
+#define I2S_RESET_CONTROL (HSLS_IDM_REGS_BASE + 0x1800)
+#define I2S_IDM_IO_CONTROL (HSLS_IDM_REGS_BASE + 0x1408)
+#define IO_CONTROL_CLK_ENABLE BIT(0)
+#define I2S_IDM0_ARCACHE_OFFSET 16
+#define I2S_IDM0_AWCACHE_OFFSET 20
+#define I2S_IDM0_ARCACHE_MASK (0xF << I2S_IDM0_ARCACHE_OFFSET)
+#define I2S_IDM0_AWCACHE_MASK (0xF << I2S_IDM0_AWCACHE_OFFSET)
+/* ARCACHE - AWCACHE is 0x22 Normal Non-cacheable Non-bufferable. */
+#define I2S_IDM0_ARCACHE_VAL (0x2 << I2S_IDM0_ARCACHE_OFFSET)
+#define I2S_IDM0_AWCACHE_VAL (0x2 << I2S_IDM0_AWCACHE_OFFSET)
+
+static void brcm_stingray_audio_init(void)
+{
+ unsigned int val;
+ uintptr_t icfg_mem_ctrl = ICFG_AUDIO_POWER_CTRL;
+ uintptr_t icfg_audio_sid = ICFG_AUDIO_SID_CONTROL;
+
+ mmio_write_32(I2S_RESET_CONTROL, 0x0);
+
+ mmio_clrsetbits_32(I2S_IDM_IO_CONTROL, I2S_IDM0_ARCACHE_MASK,
+ I2S_IDM0_ARCACHE_VAL);
+
+ mmio_clrsetbits_32(I2S_IDM_IO_CONTROL, I2S_IDM0_AWCACHE_MASK,
+ I2S_IDM0_AWCACHE_VAL);
+
+ mmio_setbits_32(I2S_IDM_IO_CONTROL, IO_CONTROL_CLK_ENABLE);
+
+ VERBOSE("audio meminit start\n");
+
+ VERBOSE(" - configure stream_id = 0x6001\n");
+ val = SR_SID_VAL(0x3, 0x0, 0x1) << ICFG_AUDIO_SID_SHIFT;
+ mmio_write_32(icfg_audio_sid + ICFG_AUDIO_SID_AWADDR_OFFSET, val);
+ mmio_write_32(icfg_audio_sid + ICFG_AUDIO_SID_ARADDR_OFFSET, val);
+
+ VERBOSE(" - arrpoweron\n");
+ mmio_setbits_32(icfg_mem_ctrl,
+ ICFG_AUDIO_POWER_CTRL__ARRPOWERONIN);
+ while (!(mmio_read_32(icfg_mem_ctrl) &
+ ICFG_AUDIO_POWER_CTRL__ARRPOWERONOUT))
+ ;
+
+ VERBOSE(" - arrpowerok\n");
+ mmio_setbits_32(icfg_mem_ctrl,
+ ICFG_AUDIO_POWER_CTRL__ARRPOWEROKIN);
+ while (!(mmio_read_32(icfg_mem_ctrl) &
+ ICFG_AUDIO_POWER_CTRL__ARRPOWEROKOUT))
+ ;
+
+ VERBOSE(" - poweron\n");
+ mmio_setbits_32(icfg_mem_ctrl,
+ ICFG_AUDIO_POWER_CTRL__POWERONIN);
+ while (!(mmio_read_32(icfg_mem_ctrl) &
+ ICFG_AUDIO_POWER_CTRL__POWERONOUT))
+ ;
+
+ VERBOSE(" - powerok\n");
+ mmio_setbits_32(icfg_mem_ctrl,
+ ICFG_AUDIO_POWER_CTRL__POWEROKIN);
+ while (!(mmio_read_32(icfg_mem_ctrl) &
+ ICFG_AUDIO_POWER_CTRL__POWEROKOUT))
+ ;
+
+ /* Wait sometime */
+ mdelay(1);
+
+ VERBOSE(" - remove isolation\n");
+ mmio_clrbits_32(icfg_mem_ctrl, ICFG_AUDIO_POWER_CTRL__ISO);
+
+ INFO("audio meminit done\n");
+}
+#endif /* USE_I2S */
+
+/*
+ * These defines do not match the regfile but they are renamed in a way such
+ * that they are much more readible
+ */
+
+#define SCR_GPV_SMMU_NS (SCR_GPV_BASE + 0x28)
+#define SCR_GPV_GIC500_NS (SCR_GPV_BASE + 0x34)
+#define HSLS_GPV_NOR_S0_NS (HSLS_GPV_BASE + 0x14)
+#define HSLS_GPV_IDM1_NS (HSLS_GPV_BASE + 0x18)
+#define HSLS_GPV_IDM2_NS (HSLS_GPV_BASE + 0x1c)
+#define HSLS_SDIO0_SLAVE_NS (HSLS_GPV_BASE + 0x20)
+#define HSLS_SDIO1_SLAVE_NS (HSLS_GPV_BASE + 0x24)
+#define HSLS_GPV_APBY_NS (HSLS_GPV_BASE + 0x2c)
+#define HSLS_GPV_APBZ_NS (HSLS_GPV_BASE + 0x30)
+#define HSLS_GPV_APBX_NS (HSLS_GPV_BASE + 0x34)
+#define HSLS_GPV_APBS_NS (HSLS_GPV_BASE + 0x38)
+#define HSLS_GPV_QSPI_S0_NS (HSLS_GPV_BASE + 0x68)
+#define HSLS_GPV_APBR_NS (HSLS_GPV_BASE + 0x6c)
+#define FS4_CRYPTO_GPV_RM_SLAVE_NS (FS4_CRYPTO_GPV_BASE + 0x8)
+#define FS4_CRYPTO_GPV_APB_SWITCH_NS (FS4_CRYPTO_GPV_BASE + 0xc)
+#define FS4_RAID_GPV_RM_SLAVE_NS (FS4_RAID_GPV_BASE + 0x8)
+#define FS4_RAID_GPV_APB_SWITCH_NS (FS4_RAID_GPV_BASE + 0xc)
+#define FS4_CRYPTO_IDM_NS (NIC400_FS_NOC_ROOT + 0x1c)
+#define FS4_RAID_IDM_NS (NIC400_FS_NOC_ROOT + 0x28)
+
+#define FS4_CRYPTO_RING_COUNT 32
+#define FS4_CRYPTO_DME_COUNT 10
+#define FS4_CRYPTO_AE_COUNT 10
+#define FS4_CRYPTO_START_STREAM_ID 0x4000
+#define FS4_CRYPTO_MSI_DEVICE_ID 0x4100
+
+#define FS4_RAID_RING_COUNT 32
+#define FS4_RAID_DME_COUNT 8
+#define FS4_RAID_AE_COUNT 8
+#define FS4_RAID_START_STREAM_ID 0x4200
+#define FS4_RAID_MSI_DEVICE_ID 0x4300
+
+#define FS6_PKI_AXI_SLAVE_NS \
+ (NIC400_FS_NOC_ROOT + NIC400_FS_NOC_SECURITY2_OFFSET)
+
+#define FS6_PKI_AE_DME_APB_NS \
+ (NIC400_FS_NOC_ROOT + NIC400_FS_NOC_SECURITY7_OFFSET)
+#define FS6_PKI_IDM_IO_CONTROL_DIRECT 0x0
+#define FS6_PKI_IDM_RESET_CONTROL 0x0
+#define FS6_PKI_RING_COUNT 32
+#define FS6_PKI_DME_COUNT 1
+#define FS6_PKI_AE_COUNT 4
+#define FS6_PKI_START_STREAM_ID 0x4000
+#define FS6_PKI_MSI_DEVICE_ID 0x4100
+
+static void brcm_stingray_security_init(void)
+{
+ unsigned int val;
+
+ val = mmio_read_32(SCR_GPV_SMMU_NS);
+ val |= BIT(0); /* SMMU NS = 1 */
+ mmio_write_32(SCR_GPV_SMMU_NS, val);
+
+ val = mmio_read_32(SCR_GPV_GIC500_NS);
+ val |= BIT(0); /* GIC-500 NS = 1 */
+ mmio_write_32(SCR_GPV_GIC500_NS, val);
+
+ val = mmio_read_32(HSLS_GPV_NOR_S0_NS);
+ val |= BIT(0); /* NOR SLAVE NS = 1 */
+ mmio_write_32(HSLS_GPV_NOR_S0_NS, val);
+
+ val = mmio_read_32(HSLS_GPV_IDM1_NS);
+ val |= BIT(0); /* DMA IDM NS = 1 */
+ val |= BIT(1); /* I2S IDM NS = 1 */
+ val |= BIT(2); /* AMAC IDM NS = 1 */
+ val |= BIT(3); /* SDIO0 IDM NS = 1 */
+ val |= BIT(4); /* SDIO1 IDM NS = 1 */
+ val |= BIT(5); /* DS_3 IDM NS = 1 */
+ mmio_write_32(HSLS_GPV_IDM1_NS, val);
+
+ val = mmio_read_32(HSLS_GPV_IDM2_NS);
+ val |= BIT(2); /* QSPI IDM NS = 1 */
+ val |= BIT(1); /* NOR IDM NS = 1 */
+ val |= BIT(0); /* NAND IDM NS = 1 */
+ mmio_write_32(HSLS_GPV_IDM2_NS, val);
+
+ val = mmio_read_32(HSLS_GPV_APBY_NS);
+ val |= BIT(10); /* I2S NS = 1 */
+ val |= BIT(4); /* IOPAD NS = 1 */
+ val |= 0xf; /* UARTx NS = 1 */
+ mmio_write_32(HSLS_GPV_APBY_NS, val);
+
+ val = mmio_read_32(HSLS_GPV_APBZ_NS);
+ val |= BIT(2); /* RNG NS = 1 */
+ mmio_write_32(HSLS_GPV_APBZ_NS, val);
+
+ val = mmio_read_32(HSLS_GPV_APBS_NS);
+ val |= 0x3; /* SPIx NS = 1 */
+ mmio_write_32(HSLS_GPV_APBS_NS, val);
+
+ val = mmio_read_32(HSLS_GPV_APBR_NS);
+ val |= BIT(7); /* QSPI APB NS = 1 */
+ val |= BIT(6); /* NAND APB NS = 1 */
+ val |= BIT(5); /* NOR APB NS = 1 */
+ val |= BIT(4); /* AMAC APB NS = 1 */
+ val |= BIT(1); /* DMA S1 APB NS = 1 */
+ mmio_write_32(HSLS_GPV_APBR_NS, val);
+
+ val = mmio_read_32(HSLS_SDIO0_SLAVE_NS);
+ val |= BIT(0); /* SDIO0 NS = 1 */
+ mmio_write_32(HSLS_SDIO0_SLAVE_NS, val);
+
+ val = mmio_read_32(HSLS_SDIO1_SLAVE_NS);
+ val |= BIT(0); /* SDIO1 NS = 1 */
+ mmio_write_32(HSLS_SDIO1_SLAVE_NS, val);
+
+ val = mmio_read_32(HSLS_GPV_APBX_NS);
+ val |= BIT(14); /* SMBUS1 NS = 1 */
+ val |= BIT(13); /* GPIO NS = 1 */
+ val |= BIT(12); /* WDT NS = 1 */
+ val |= BIT(11); /* SMBUS0 NS = 1 */
+ val |= BIT(10); /* Timer7 NS = 1 */
+ val |= BIT(9); /* Timer6 NS = 1 */
+ val |= BIT(8); /* Timer5 NS = 1 */
+ val |= BIT(7); /* Timer4 NS = 1 */
+ val |= BIT(6); /* Timer3 NS = 1 */
+ val |= BIT(5); /* Timer2 NS = 1 */
+ val |= BIT(4); /* Timer1 NS = 1 */
+ val |= BIT(3); /* Timer0 NS = 1 */
+ val |= BIT(2); /* MDIO NS = 1 */
+ val |= BIT(1); /* PWM NS = 1 */
+ mmio_write_32(HSLS_GPV_APBX_NS, val);
+
+ val = mmio_read_32(HSLS_GPV_QSPI_S0_NS);
+ val |= BIT(0); /* QSPI NS = 1 */
+ mmio_write_32(HSLS_GPV_QSPI_S0_NS, val);
+
+#ifdef USE_FS4
+ val = 0x1; /* FS4 Crypto rm_slave */
+ mmio_write_32(FS4_CRYPTO_GPV_RM_SLAVE_NS, val);
+ val = 0x1; /* FS4 Crypto apb_switch */
+ mmio_write_32(FS4_CRYPTO_GPV_APB_SWITCH_NS, val);
+
+ val = 0x1; /* FS4 Raid rm_slave */
+ mmio_write_32(FS4_RAID_GPV_RM_SLAVE_NS, val);
+ val = 0x1; /* FS4 Raid apb_switch */
+ mmio_write_32(FS4_RAID_GPV_APB_SWITCH_NS, val);
+
+ val = 0x1; /* FS4 Crypto IDM */
+ mmio_write_32(FS4_CRYPTO_IDM_NS, val);
+ val = 0x1; /* FS4 RAID IDM */
+ mmio_write_32(FS4_RAID_IDM_NS, val);
+#endif
+
+#ifdef BL31_CCN_NONSECURE
+ /* Enable non-secure access to CCN registers */
+ mmio_write_32(OLY_MN_REGISTERS_NODE0_SECURE_ACCESS, 0x1);
+#endif
+
+#ifdef DDR_CTRL_PHY_NONSECURE
+ mmio_write_32(SCR_NOC_DDR_REGISTER_ACCESS, 0x1);
+#endif
+
+ paxc_mhb_ns_init();
+
+ /* unlock scr idm for non secure access */
+ mmio_write_32(SCR_NOC_SECURITY0, 0xffffffff);
+
+ INFO("security init done\r\n");
+}
+
+void brcm_gpio_pad_ns_init(void)
+{
+ /* configure all GPIO pads for non secure world access*/
+ mmio_write_32(GPIO_S_CNTRL_REG, 0xffffffff); /* 128-140 gpio pads */
+ mmio_write_32(GPIO_S_CNTRL_REG + 0x4, 0xffffffff); /* 96-127 gpio pad */
+ mmio_write_32(GPIO_S_CNTRL_REG + 0x8, 0xffffffff); /* 64-95 gpio pad */
+ mmio_write_32(GPIO_S_CNTRL_REG + 0xc, 0xffffffff); /* 32-63 gpio pad */
+ mmio_write_32(GPIO_S_CNTRL_REG + 0x10, 0xffffffff); /* 0-31 gpio pad */
+}
+
+#ifndef USE_DDR
+static void brcm_stingray_sram_ns_init(void)
+{
+ uintptr_t sram_root = TZC400_FS_SRAM_ROOT;
+ uintptr_t noc_root = NIC400_FS_NOC_ROOT;
+
+ mmio_write_32(sram_root + GATE_KEEPER_OFFSET, 1);
+ mmio_write_32(sram_root + REGION_ATTRIBUTES_0_OFFSET, 0xc0000000);
+ mmio_write_32(sram_root + REGION_ID_ACCESS_0_OFFSET, 0x00010001);
+ mmio_write_32(noc_root + NIC400_FS_NOC_SECURITY4_OFFSET, 0x1);
+ INFO(" stingray sram ns init done.\n");
+}
+#endif
+
+static void ccn_pre_init(void)
+{
+ /*
+ * Set WFC bit of RN-I nodes where FS4 is connected.
+ * This is required inorder to wait for read/write requests
+ * completion acknowledgment. Otherwise FS4 Ring Manager is
+ * getting stale data because of re-ordering of read/write
+ * requests at CCN level
+ */
+ mmio_setbits_32(OLY_RNI3PDVM_REGISTERS_NODE8_AUX_CTL,
+ OLY_RNI3PDVM_REGISTERS_NODE8_AUX_CTL_WFC);
+}
+
+static void ccn_post_init(void)
+{
+ mmio_setbits_32(OLY_HNI_REGISTERS_NODE0_PCIERC_RNI_NODEID_LIST,
+ SRP_RNI_PCIE_CONNECTED);
+ mmio_setbits_32(OLY_HNI_REGISTERS_NODE0_SA_AUX_CTL,
+ SA_AUX_CTL_SER_DEVNE_WR);
+
+ mmio_clrbits_32(OLY_HNI_REGISTERS_NODE0_POS_CONTROL,
+ POS_CONTROL_HNI_POS_EN);
+ mmio_clrbits_32(OLY_HNI_REGISTERS_NODE0_SA_AUX_CTL,
+ SA_AUX_CTL_POS_EARLY_WR_COMP_EN);
+}
+
+#ifndef BL31_BOOT_PRELOADED_SCP
+static void crmu_init(void)
+{
+ /*
+ * Configure CRMU for using SMMU
+ */
+
+ /*Program CRMU Stream ID */
+ mmio_write_32(CRMU_MASTER_AXI_ARUSER_CONFIG,
+ (CRMU_STREAM_ID << CRMU_SID_SHIFT));
+ mmio_write_32(CRMU_MASTER_AXI_AWUSER_CONFIG,
+ (CRMU_STREAM_ID << CRMU_SID_SHIFT));
+
+ /* Create Identity mapping */
+ arm_smmu_create_identity_map(DOMAIN_CRMU);
+
+ /* Enable Client Port for Secure Masters*/
+ arm_smmu_enable_secure_client_port();
+}
+#endif
+
+static void brcm_fsx_init(void)
+{
+#if defined(USE_FS4) && defined(USE_FS6)
+ #error "USE_FS4 and USE_FS6 should not be used together"
+#endif
+
+#ifdef USE_FS4
+ fsx_init(eFS4_CRYPTO, FS4_CRYPTO_RING_COUNT, FS4_CRYPTO_DME_COUNT,
+ FS4_CRYPTO_AE_COUNT, FS4_CRYPTO_START_STREAM_ID,
+ FS4_CRYPTO_MSI_DEVICE_ID, FS4_CRYPTO_IDM_IO_CONTROL_DIRECT,
+ FS4_CRYPTO_IDM_RESET_CONTROL, FS4_CRYPTO_BASE,
+ FS4_CRYPTO_DME_BASE);
+
+ fsx_init(eFS4_RAID, FS4_RAID_RING_COUNT, FS4_RAID_DME_COUNT,
+ FS4_RAID_AE_COUNT, FS4_RAID_START_STREAM_ID,
+ FS4_RAID_MSI_DEVICE_ID, FS4_RAID_IDM_IO_CONTROL_DIRECT,
+ FS4_RAID_IDM_RESET_CONTROL, FS4_RAID_BASE,
+ FS4_RAID_DME_BASE);
+
+ fsx_meminit("raid",
+ FS4_RAID_IDM_IO_CONTROL_DIRECT,
+ FS4_RAID_IDM_IO_STATUS);
+#endif
+}
+
+static void bcm_bl33_pass_info(void)
+{
+ struct bl33_info *info = (struct bl33_info *)BL33_SHARED_DDR_BASE;
+
+ if (sizeof(*info) > BL33_SHARED_DDR_SIZE)
+ WARN("bl33 shared area not reserved\n");
+
+ info->version = BL33_INFO_VERSION;
+ info->chip.chip_id = PLAT_CHIP_ID_GET;
+ info->chip.rev_id = PLAT_CHIP_REV_GET;
+}
+
+DEFINE_RENAME_SYSREG_RW_FUNCS(l2ctlr_el1, CORTEX_A72_L2CTLR_EL1)
+
+void plat_bcm_bl31_early_platform_setup(void *from_bl2,
+ bl_params_t *plat_params_from_bl2)
+{
+#ifdef BL31_BOOT_PRELOADED_SCP
+ image_info_t scp_image_info;
+
+ scp_image_info.image_base = PRELOADED_SCP_BASE;
+ scp_image_info.image_size = PRELOADED_SCP_SIZE;
+ plat_bcm_bl2_plat_handle_scp_bl2(&scp_image_info);
+#endif
+ /*
+ * In BL31, logs are saved to DDR and we have much larger space to
+ * store logs. We can now afford to save all logs >= the 'INFO' level
+ */
+ bcm_elog_init((void *)BCM_ELOG_BL31_BASE, BCM_ELOG_BL31_SIZE,
+ LOG_LEVEL_INFO);
+
+ INFO("L2CTLR = 0x%lx\n", read_l2ctlr_el1());
+
+ brcm_timer_sync_init();
+
+ brcm_stingray_dma_pl330_init();
+
+ brcm_stingray_dma_pl330_meminit();
+
+ brcm_stingray_spi_pl022_init(APBS_IDM_IDM_RESET_CONTROL);
+
+#ifdef USE_AMAC
+ brcm_stingray_amac_init();
+#endif
+
+ brcm_stingray_sdio_init();
+
+#ifdef NCSI_IO_DRIVE_STRENGTH_MA
+ brcm_stingray_ncsi_init();
+#endif
+
+#ifdef USE_USB
+ xhci_phy_init();
+#endif
+
+#ifdef USE_SATA
+ brcm_stingray_sata_init();
+#else
+ poweroff_sata_pll();
+#endif
+
+ ccn_pre_init();
+
+ brcm_fsx_init();
+
+ brcm_stingray_smmu_init();
+
+ brcm_stingray_pka_meminit();
+
+ brcm_stingray_crmu_access_init();
+
+ brcm_stingray_scr_init();
+
+ brcm_stingray_hsls_tzpcprot_init();
+
+#ifdef USE_I2S
+ brcm_stingray_audio_init();
+#endif
+
+ ccn_post_init();
+
+ paxb_init();
+
+ paxc_init();
+
+#ifndef BL31_BOOT_PRELOADED_SCP
+ crmu_init();
+#endif
+
+ /* Note: this should be last thing because
+ * FS4 GPV registers only work after FS4 block
+ * (i.e. crypto,raid,cop) is out of reset.
+ */
+ brcm_stingray_security_init();
+
+ brcm_gpio_pad_ns_init();
+
+#ifndef USE_DDR
+ brcm_stingray_sram_ns_init();
+#endif
+
+#ifdef BL31_FORCE_CPU_FULL_FREQ
+ bcm_set_ihost_pll_freq(0x0, PLL_FREQ_FULL);
+#endif
+
+ brcm_stingray_gain_qspi_control();
+
+#ifdef USE_PAXC
+ /*
+ * Check that the handshake has occurred and report ChiMP status.
+ * This is required. Otherwise (especially on Palladium)
+ * Linux might have booted to the pcie stage whereas
+ * ChiMP has not yet booted. Note that nic_mode case has already
+ * been considered above.
+ */
+ if ((boot_source_get() != BOOT_SOURCE_QSPI) &&
+ (!bcm_chimp_is_nic_mode()) &&
+ (!bcm_chimp_wait_handshake())
+ ) {
+ /* Does ChiMP report an error ? */
+ uint32_t err;
+
+ err = bcm_chimp_read_ctrl(CHIMP_REG_CTRL_BPE_STAT_REG);
+ if ((err & CHIMP_ERROR_MASK) == 0)
+ /* ChiMP has not booted yet, but no error reported */
+ WARN("ChiMP not booted yet, but no error reported.\n");
+ }
+
+#if DEBUG
+ if (boot_source_get() != BOOT_SOURCE_QSPI)
+ INFO("Current ChiMP Status: 0x%x; bpe_mod reg: 0x%x\n"
+ "fastboot register: 0x%x; handshake register 0x%x\n",
+ bcm_chimp_read_ctrl(CHIMP_REG_CTRL_BPE_STAT_REG),
+ bcm_chimp_read_ctrl(CHIMP_REG_CTRL_BPE_MODE_REG),
+ bcm_chimp_read_ctrl(CHIMP_REG_CTRL_FSTBOOT_PTR_REG),
+ bcm_chimp_read(CHIMP_REG_ECO_RESERVED));
+#endif /* DEBUG */
+#endif
+
+#ifdef FS4_DISABLE_CLOCK
+ flush_dcache_range(
+ PLAT_BRCM_TRUSTED_SRAM_BASE,
+ PLAT_BRCM_TRUSTED_SRAM_SIZE);
+ fs4_disable_clocks(true, true, true);
+#endif
+
+ /* pass information to BL33 through shared DDR region */
+ bcm_bl33_pass_info();
+
+ /*
+ * We are not yet at the end of BL31, but we can stop log here so we do
+ * not need to add 'bcm_elog_exit' to the standard BL31 code. The
+ * benefit of capturing BL31 logs after this is very minimal in a
+ * production system
+ */
+ bcm_elog_exit();
+
+#if !BRCM_DISABLE_TRUSTED_WDOG
+ /*
+ * Secure watchdog was started earlier in BL2, now it's time to stop
+ * it
+ */
+ sp805_stop(ARM_SP805_TWDG_BASE);
+#endif
+}
diff --git a/plat/brcm/board/stingray/src/brcm_pm_ops.c b/plat/brcm/board/stingray/src/brcm_pm_ops.c
new file mode 100644
index 0000000..5e07fac
--- /dev/null
+++ b/plat/brcm/board/stingray/src/brcm_pm_ops.c
@@ -0,0 +1,408 @@
+/*
+ * Copyright (c) 2017 - 2020, Broadcom
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <inttypes.h>
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <drivers/arm/ccn.h>
+#include <lib/bakery_lock.h>
+#include <lib/mmio.h>
+#include <lib/psci/psci.h>
+#include <lib/spinlock.h>
+
+#include <brcm_scpi.h>
+#include <chimp.h>
+#include <cmn_plat_util.h>
+#include <plat_brcm.h>
+#include <platform_def.h>
+#include <sr_utils.h>
+
+#include "m0_cfg.h"
+
+
+#define CORE_PWR_STATE(state) ((state)->pwr_domain_state[MPIDR_AFFLVL0])
+#define CLUSTER_PWR_STATE(state) \
+ ((state)->pwr_domain_state[MPIDR_AFFLVL1])
+#define SYSTEM_PWR_STATE(state) ((state)->pwr_domain_state[MPIDR_AFFLVL2])
+
+#define VENDOR_RST_TYPE_SHIFT 4
+
+#if HW_ASSISTED_COHERENCY
+/*
+ * On systems where participant CPUs are cache-coherent, we can use spinlocks
+ * instead of bakery locks.
+ */
+spinlock_t event_lock;
+#define event_lock_get(_lock) spin_lock(&_lock)
+#define event_lock_release(_lock) spin_unlock(&_lock)
+
+#else
+/*
+ * Use bakery locks for state coordination as not all participants are
+ * cache coherent now.
+ */
+DEFINE_BAKERY_LOCK(event_lock);
+#define event_lock_get(_lock) bakery_lock_get(&_lock)
+#define event_lock_release(_lock) bakery_lock_release(&_lock)
+#endif
+
+static int brcm_pwr_domain_on(u_register_t mpidr)
+{
+ /*
+ * SCP takes care of powering up parent power domains so we
+ * only need to care about level 0
+ */
+ scpi_set_brcm_power_state(mpidr, scpi_power_on, scpi_power_on,
+ scpi_power_on);
+
+ return PSCI_E_SUCCESS;
+}
+
+/*******************************************************************************
+ * Handler called when a power level has just been powered on after
+ * being turned off earlier. The target_state encodes the low power state that
+ * each level has woken up from. This handler would never be invoked with
+ * the system power domain uninitialized as either the primary would have taken
+ * care of it as part of cold boot or the first core awakened from system
+ * suspend would have already initialized it.
+ ******************************************************************************/
+static void brcm_pwr_domain_on_finish(const psci_power_state_t *target_state)
+{
+ unsigned long cluster_id = MPIDR_AFFLVL1_VAL(read_mpidr());
+
+ /* Assert that the system power domain need not be initialized */
+ assert(SYSTEM_PWR_STATE(target_state) == PLAT_LOCAL_STATE_RUN);
+
+ assert(CORE_PWR_STATE(target_state) == PLAT_LOCAL_STATE_OFF);
+
+ /*
+ * Perform the common cluster specific operations i.e enable coherency
+ * if this cluster was off.
+ */
+ if (CLUSTER_PWR_STATE(target_state) == PLAT_LOCAL_STATE_OFF) {
+ INFO("Cluster #%lu entering to snoop/dvm domain\n", cluster_id);
+ ccn_enter_snoop_dvm_domain(1 << cluster_id);
+ }
+
+ /* Program the gic per-cpu distributor or re-distributor interface */
+ plat_brcm_gic_pcpu_init();
+
+ /* Enable the gic cpu interface */
+ plat_brcm_gic_cpuif_enable();
+}
+
+static void brcm_power_down_common(void)
+{
+ unsigned int standbywfil2, standbywfi;
+ uint64_t mpidr = read_mpidr_el1();
+
+ switch (MPIDR_AFFLVL1_VAL(mpidr)) {
+ case 0x0:
+ standbywfi = CDRU_PROC_EVENT_CLEAR__IH0_CDRU_STANDBYWFI;
+ standbywfil2 = CDRU_PROC_EVENT_CLEAR__IH0_CDRU_STANDBYWFIL2;
+ break;
+ case 0x1:
+ standbywfi = CDRU_PROC_EVENT_CLEAR__IH1_CDRU_STANDBYWFI;
+ standbywfil2 = CDRU_PROC_EVENT_CLEAR__IH1_CDRU_STANDBYWFIL2;
+ break;
+ case 0x2:
+ standbywfi = CDRU_PROC_EVENT_CLEAR__IH2_CDRU_STANDBYWFI;
+ standbywfil2 = CDRU_PROC_EVENT_CLEAR__IH2_CDRU_STANDBYWFIL2;
+ break;
+ case 0x3:
+ standbywfi = CDRU_PROC_EVENT_CLEAR__IH3_CDRU_STANDBYWFI;
+ standbywfil2 = CDRU_PROC_EVENT_CLEAR__IH3_CDRU_STANDBYWFIL2;
+ break;
+ default:
+ ERROR("Invalid cluster #%" PRIx64 "\n", MPIDR_AFFLVL1_VAL(mpidr));
+ return;
+ }
+ /* Clear the WFI status bit */
+ event_lock_get(event_lock);
+ mmio_setbits_32(CDRU_PROC_EVENT_CLEAR,
+ (1 << (standbywfi + MPIDR_AFFLVL0_VAL(mpidr))) |
+ (1 << standbywfil2));
+ event_lock_release(event_lock);
+}
+
+/*
+ * Helper function to inform power down state to SCP.
+ */
+static void brcm_scp_suspend(const psci_power_state_t *target_state)
+{
+ uint32_t cluster_state = scpi_power_on;
+ uint32_t system_state = scpi_power_on;
+
+ /* Check if power down at system power domain level is requested */
+ if (SYSTEM_PWR_STATE(target_state) == PLAT_LOCAL_STATE_OFF)
+ system_state = scpi_power_retention;
+
+ /* Check if Cluster is to be turned off */
+ if (CLUSTER_PWR_STATE(target_state) == PLAT_LOCAL_STATE_OFF)
+ cluster_state = scpi_power_off;
+
+ /*
+ * Ask the SCP to power down the appropriate components depending upon
+ * their state.
+ */
+ scpi_set_brcm_power_state(read_mpidr_el1(),
+ scpi_power_off,
+ cluster_state,
+ system_state);
+}
+
+/*
+ * Helper function to turn off a CPU power domain and its parent power domains
+ * if applicable. Since SCPI doesn't differentiate between OFF and suspend, we
+ * call the suspend helper here.
+ */
+static void brcm_scp_off(const psci_power_state_t *target_state)
+{
+ brcm_scp_suspend(target_state);
+}
+
+static void brcm_pwr_domain_off(const psci_power_state_t *target_state)
+{
+ unsigned long cluster_id = MPIDR_AFFLVL1_VAL(read_mpidr_el1());
+
+ assert(CORE_PWR_STATE(target_state) == PLAT_LOCAL_STATE_OFF);
+ /* Prevent interrupts from spuriously waking up this cpu */
+ plat_brcm_gic_cpuif_disable();
+
+ /* Turn redistributor off */
+ plat_brcm_gic_redistif_off();
+
+ /* If Cluster is to be turned off, disable coherency */
+ if (CLUSTER_PWR_STATE(target_state) == PLAT_LOCAL_STATE_OFF)
+ ccn_exit_snoop_dvm_domain(1 << cluster_id);
+
+ brcm_power_down_common();
+
+ brcm_scp_off(target_state);
+}
+
+/*******************************************************************************
+ * Handler called when the CPU power domain is about to enter standby.
+ ******************************************************************************/
+static void brcm_cpu_standby(plat_local_state_t cpu_state)
+{
+ unsigned int scr;
+
+ assert(cpu_state == PLAT_LOCAL_STATE_RET);
+
+ scr = read_scr_el3();
+ /*
+ * Enable the Non secure interrupt to wake the CPU.
+ * In GICv3 affinity routing mode, the non secure group1 interrupts use
+ * the PhysicalFIQ at EL3 whereas in GICv2, it uses the PhysicalIRQ.
+ * Enabling both the bits works for both GICv2 mode and GICv3 affinity
+ * routing mode.
+ */
+ write_scr_el3(scr | SCR_IRQ_BIT | SCR_FIQ_BIT);
+ isb();
+ dsb();
+ wfi();
+
+ /*
+ * Restore SCR to the original value, synchronisation of scr_el3 is
+ * done by eret while el3_exit to save some execution cycles.
+ */
+ write_scr_el3(scr);
+}
+
+/*
+ * Helper function to shutdown the system via SCPI.
+ */
+static void __dead2 brcm_scp_sys_shutdown(void)
+{
+ /*
+ * Disable GIC CPU interface to prevent pending interrupt
+ * from waking up the AP from WFI.
+ */
+ plat_brcm_gic_cpuif_disable();
+
+ /* Flush and invalidate data cache */
+ dcsw_op_all(DCCISW);
+
+ /* Bring Cluster out of coherency domain as its going to die */
+ plat_brcm_interconnect_exit_coherency();
+
+ brcm_power_down_common();
+
+ /* Send the power down request to the SCP */
+ scpi_sys_power_state(scpi_system_shutdown);
+
+ wfi();
+ ERROR("BRCM System Off: operation not handled.\n");
+ panic();
+}
+
+/*
+ * Helper function to reset the system
+ */
+static void __dead2 brcm_scp_sys_reset(unsigned int reset_type)
+{
+ /*
+ * Disable GIC CPU interface to prevent pending interrupt
+ * from waking up the AP from WFI.
+ */
+ plat_brcm_gic_cpuif_disable();
+
+ /* Flush and invalidate data cache */
+ dcsw_op_all(DCCISW);
+
+ /* Bring Cluster out of coherency domain as its going to die */
+ plat_brcm_interconnect_exit_coherency();
+
+ brcm_power_down_common();
+
+ /* Send the system reset request to the SCP
+ *
+ * As per PSCI spec system power state could be
+ * 0-> Shutdown
+ * 1-> Reboot- Board level Reset
+ * 2-> Reset - SoC level Reset
+ *
+ * Spec allocates 8 bits, 2 nibble, for this. One nibble is sufficient
+ * for sending the state hence We are utilizing 2nd nibble for vendor
+ * define reset type.
+ */
+ scpi_sys_power_state((reset_type << VENDOR_RST_TYPE_SHIFT) |
+ scpi_system_reboot);
+
+ wfi();
+ ERROR("BRCM System Reset: operation not handled.\n");
+ panic();
+}
+
+static void __dead2 brcm_system_reset(void)
+{
+ unsigned int reset_type;
+
+ if (bcm_chimp_is_nic_mode())
+ reset_type = SOFT_RESET_L3;
+ else
+ reset_type = SOFT_SYS_RESET_L1;
+
+ brcm_scp_sys_reset(reset_type);
+}
+
+static int brcm_system_reset2(int is_vendor, int reset_type,
+ u_register_t cookie)
+{
+ if (!is_vendor) {
+ /* Architectural warm boot: only warm reset is supported */
+ reset_type = SOFT_RESET_L3;
+ } else {
+ uint32_t boot_source = (uint32_t)cookie;
+
+ boot_source &= BOOT_SOURCE_MASK;
+ brcm_stingray_set_straps(boot_source);
+ }
+ brcm_scp_sys_reset(reset_type);
+
+ /*
+ * brcm_scp_sys_reset cannot return (it is a __dead function),
+ * but brcm_system_reset2 has to return some value, even in
+ * this case.
+ */
+ return 0;
+}
+
+static int brcm_validate_ns_entrypoint(uintptr_t entrypoint)
+{
+ /*
+ * Check if the non secure entrypoint lies within the non
+ * secure DRAM.
+ */
+ if ((entrypoint >= BRCM_NS_DRAM1_BASE) &&
+ (entrypoint < (BRCM_NS_DRAM1_BASE + BRCM_NS_DRAM1_SIZE)))
+ return PSCI_E_SUCCESS;
+#ifdef __aarch64__
+ if ((entrypoint >= BRCM_DRAM2_BASE) &&
+ (entrypoint < (BRCM_DRAM2_BASE + BRCM_DRAM2_SIZE)))
+ return PSCI_E_SUCCESS;
+
+ if ((entrypoint >= BRCM_DRAM3_BASE) &&
+ (entrypoint < (BRCM_DRAM3_BASE + BRCM_DRAM3_SIZE)))
+ return PSCI_E_SUCCESS;
+#endif
+
+ return PSCI_E_INVALID_ADDRESS;
+}
+
+/*******************************************************************************
+ * ARM standard platform handler called to check the validity of the power state
+ * parameter.
+ ******************************************************************************/
+static int brcm_validate_power_state(unsigned int power_state,
+ psci_power_state_t *req_state)
+{
+ int pstate = psci_get_pstate_type(power_state);
+ int pwr_lvl = psci_get_pstate_pwrlvl(power_state);
+ int i;
+
+ assert(req_state);
+
+ if (pwr_lvl > PLAT_MAX_PWR_LVL)
+ return PSCI_E_INVALID_PARAMS;
+
+ /* Sanity check the requested state */
+ if (pstate == PSTATE_TYPE_STANDBY) {
+ /*
+ * It's possible to enter standby only on power level 0
+ * Ignore any other power level.
+ */
+ if (pwr_lvl != MPIDR_AFFLVL0)
+ return PSCI_E_INVALID_PARAMS;
+
+ req_state->pwr_domain_state[MPIDR_AFFLVL0] =
+ PLAT_LOCAL_STATE_RET;
+ } else {
+ for (i = MPIDR_AFFLVL0; i <= pwr_lvl; i++)
+ req_state->pwr_domain_state[i] =
+ PLAT_LOCAL_STATE_OFF;
+ }
+
+ /*
+ * We expect the 'state id' to be zero.
+ */
+ if (psci_get_pstate_id(power_state))
+ return PSCI_E_INVALID_PARAMS;
+
+ return PSCI_E_SUCCESS;
+}
+
+/*******************************************************************************
+ * Export the platform handlers via plat_brcm_psci_pm_ops. The ARM Standard
+ * platform will take care of registering the handlers with PSCI.
+ ******************************************************************************/
+plat_psci_ops_t plat_brcm_psci_pm_ops = {
+ .pwr_domain_on = brcm_pwr_domain_on,
+ .pwr_domain_on_finish = brcm_pwr_domain_on_finish,
+ .pwr_domain_off = brcm_pwr_domain_off,
+ .cpu_standby = brcm_cpu_standby,
+ .system_off = brcm_scp_sys_shutdown,
+ .system_reset = brcm_system_reset,
+ .system_reset2 = brcm_system_reset2,
+ .validate_ns_entrypoint = brcm_validate_ns_entrypoint,
+ .validate_power_state = brcm_validate_power_state,
+};
+
+int plat_setup_psci_ops(uintptr_t sec_entrypoint,
+ const struct plat_psci_ops **psci_ops)
+{
+ *psci_ops = &plat_brcm_psci_pm_ops;
+
+ /* Setup mailbox with entry point. */
+ mmio_write_64(CRMU_CFG_BASE + offsetof(M0CFG, core_cfg.rvbar),
+ sec_entrypoint);
+
+ return 0;
+}
diff --git a/plat/brcm/board/stingray/src/fsx.c b/plat/brcm/board/stingray/src/fsx.c
new file mode 100644
index 0000000..5725a2e
--- /dev/null
+++ b/plat/brcm/board/stingray/src/fsx.c
@@ -0,0 +1,477 @@
+/*
+ * Copyright (c) 2019-2020, Broadcom
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <common/debug.h>
+#include <drivers/console.h>
+#include <drivers/delay_timer.h>
+#include <lib/mmio.h>
+#include <plat/common/common_def.h>
+
+#include <fsx.h>
+#include <platform_def.h>
+#include <sr_utils.h>
+
+#define FS4_IDM_IO_CONTROL_DIRECT__SRAM_CLK_EN 0
+
+#define FS4_IDM_IO_CONTROL_DIRECT__MEM_POWERON 11
+#define FS4_IDM_IO_CONTROL_DIRECT__MEM_POWEROK 12
+#define FS4_IDM_IO_CONTROL_DIRECT__MEM_ARRPOWERON 13
+#define FS4_IDM_IO_CONTROL_DIRECT__MEM_ARRPOWEROK 14
+#define FS4_IDM_IO_CONTROL_DIRECT__MEM_ISO 15
+#define FS4_IDM_IO_CONTROL_DIRECT__CLK_EN 31
+
+#define FS4_IDM_IO_STATUS__MEM_POWERON 0
+#define FS4_IDM_IO_STATUS__MEM_POWEROK 1
+#define FS4_IDM_IO_STATUS__MEM_ARRPOWERON 2
+#define FS4_IDM_IO_STATUS__MEM_ARRPOWEROK 3
+#define FS4_IDM_IO_STATUS__MEM_ALLOK 0xf
+
+#define FS4_IDM_RESET_CONTROL__RESET 0
+
+#define FSX_RINGx_BASE(__b, __i) \
+ ((__b) + (__i) * 0x10000)
+
+#define FSX_RINGx_VERSION_NUMBER(__b, __i) \
+ (FSX_RINGx_BASE(__b, __i) + 0x0)
+
+#define FSX_RINGx_MSI_DEV_ID(__b, __i) \
+ (FSX_RINGx_BASE(__b, __i) + 0x44)
+
+#define FSX_COMM_RINGx_BASE(__b, __i) \
+ ((__b) + 0x200000 + (__i) * 0x100)
+
+#define FSX_COMM_RINGx_CONTROL(__b, __i) \
+ (FSX_COMM_RINGx_BASE(__b, __i) + 0x0)
+#define FSX_COMM_RINGx_CONTROL__AXI_ID 8
+#define FSX_COMM_RINGx_CONTROL__AXI_ID_MASK 0x1f
+#define FSX_COMM_RINGx_CONTROL__PRIORITY 4
+#define FSX_COMM_RINGx_CONTROL__PRIORITY_MASK 0x7
+#define FSX_COMM_RINGx_CONTROL__AE_GROUP 0
+#define FSX_COMM_RINGx_CONTROL__AE_GROUP_MASK 0x7
+
+#define FSX_COMM_RINGx_MSI_DEV_ID(__b, __i) \
+ (FSX_COMM_RINGx_BASE(__b, __i) + 0x4)
+
+#define FSX_AEx_BASE(__b, __i) \
+ ((__b) + 0x202000 + (__i) * 0x100)
+
+#define FSX_AEx_CONTROL_REGISTER(__b, __i) \
+ (FSX_AEx_BASE(__b, __i) + 0x0)
+#define FSX_AEx_CONTROL_REGISTER__ACTIVE 4
+#define FSX_AEx_CONTROL_REGISTER__GROUP_ID 0
+#define FSX_AEx_CONTROL_REGISTER__GROUP_ID_MASK 0x7
+
+#define FSX_COMM_RM_RING_SECURITY_SETTING 0x0
+
+#define FSX_COMM_RM_SSID_CONTROL 0x4
+#define FSX_COMM_RM_SSID_CONTROL__RING_BITS 5
+#define FSX_COMM_RM_SSID_CONTROL__MASK 0x3ff
+
+#define FSX_COMM_RM_CONTROL_REGISTER 0x8
+#define FSX_COMM_RM_CONTROL_REGISTER__CONFIG_DONE 2
+#define FSX_COMM_RM_CONTROL_REGISTER__AE_TIMEOUT 5
+#define FSX_COMM_RM_CONTROL_REGISTER__AE_LOCKING 7
+
+#define FSX_COMM_RM_TIMER_CONTROL_0 0xc
+#define FSX_COMM_RM_TIMER_CONTROL_0__FAST 16
+#define FSX_COMM_RM_TIMER_CONTROL_0__MEDIUM 0
+
+#define FSX_COMM_RM_TIMER_CONTROL_1 0x10
+#define FSX_COMM_RM_TIMER_CONTROL_1__SLOW 16
+#define FSX_COMM_RM_TIMER_CONTROL_1__IDLE 0
+
+#define FSX_COMM_RM_BURST_BD_THRESHOLD 0x14
+#define FSX_COMM_RM_BURST_BD_THRESHOLD_LOW 0
+#define FSX_COMM_RM_BURST_BD_THRESHOLD_HIGH 16
+
+#define FSX_COMM_RM_BURST_LENGTH 0x18
+#define FSX_COMM_RM_BURST_LENGTH__FOR_DDR_ADDR_GEN 16
+#define FSX_COMM_RM_BURST_LENGTH__FOR_DDR_ADDR_GEN_MASK 0x1ff
+#define FSX_COMM_RM_BURST_LENGTH__FOR_TOGGLE 0
+#define FSX_COMM_RM_BURST_LENGTH__FOR_TOGGLE_MASK 0x1ff
+
+#define FSX_COMM_RM_FIFO_THRESHOLD 0x1c
+#define FSX_COMM_RM_FIFO_THRESHOLD__BD_FIFO_FULL 16
+#define FSX_COMM_RM_FIFO_THRESHOLD__BD_FIFO_FULL_MASK 0x1ff
+#define FSX_COMM_RM_FIFO_THRESHOLD__AE_FIFO_FULL 0
+#define FSX_COMM_RM_FIFO_THRESHOLD__AE_FIFO_FULL_MASK 0x1f
+
+#define FSX_COMM_RM_AE_TIMEOUT 0x24
+
+#define FSX_COMM_RM_RING_FLUSH_TIMEOUT 0x2c
+
+#define FSX_COMM_RM_MEMORY_CONFIGURATION 0x30
+#define FSX_COMM_RM_MEMORY_CONFIGURATION__ARRPOWERONIN 12
+#define FSX_COMM_RM_MEMORY_CONFIGURATION__ARRPOWEROKIN 13
+#define FSX_COMM_RM_MEMORY_CONFIGURATION__POWERONIN 14
+#define FSX_COMM_RM_MEMORY_CONFIGURATION__POWEROKIN 15
+
+#define FSX_COMM_RM_AXI_CONTROL 0x34
+#define FSX_COMM_RM_AXI_CONTROL__WRITE_CHANNEL_EN 28
+#define FSX_COMM_RM_AXI_CONTROL__READ_CHANNEL_EN 24
+#define FSX_COMM_RM_AXI_CONTROL__AWQOS 20
+#define FSX_COMM_RM_AXI_CONTROL__ARQOS 16
+#define FSX_COMM_RM_AXI_CONTROL__AWPROT 12
+#define FSX_COMM_RM_AXI_CONTROL__ARPROT 8
+#define FSX_COMM_RM_AXI_CONTROL__AWCACHE 4
+#define FSX_COMM_RM_AXI_CONTROL__ARCACHE 0
+
+#define FSX_COMM_RM_CONFIG_INTERRUPT_STATUS_CLEAR 0x48
+
+#define FSX_COMM_RM_GROUP_PKT_EXTENSION_SUPPORT 0xc0
+
+#define FSX_COMM_RM_AXI_READ_BURST_THRESHOLD 0xc8
+#define FSX_COMM_RM_AXI_READ_BURST_THRESHOLD__MASK 0x1ff
+#define FSX_COMM_RM_AXI_READ_BURST_THRESHOLD__MAX 16
+#define FSX_COMM_RM_AXI_READ_BURST_THRESHOLD__MIN 0
+
+#define FSX_COMM_RM_GROUP_RING_COUNT 0xcc
+
+#define FSX_COMM_RM_MAIN_HW_INIT_DONE 0x12c
+#define FSX_COMM_RM_MAIN_HW_INIT_DONE__MASK 0x1
+
+#define FSX_DMEx_BASE(__b, __i) \
+ ((__b) + (__i) * 0x1000)
+
+#define FSX_DMEx_AXI_CONTROL(__b, __i) \
+ (FSX_DMEx_BASE(__b, __i) + 0x4)
+#define FSX_DMEx_AXI_CONTROL__WRITE_CHANNEL_EN 28
+#define FSX_DMEx_AXI_CONTROL__READ_CHANNEL_EN 24
+#define FSX_DMEx_AXI_CONTROL__AWQOS 20
+#define FSX_DMEx_AXI_CONTROL__ARQOS 16
+#define FSX_DMEx_AXI_CONTROL__AWCACHE 4
+#define FSX_DMEx_AXI_CONTROL__ARCACHE 0
+
+#define FSX_DMEx_WR_FIFO_THRESHOLD(__b, __i) \
+ (FSX_DMEx_BASE(__b, __i) + 0xc)
+#define FSX_DMEx_WR_FIFO_THRESHOLD__MASK 0x3ff
+#define FSX_DMEx_WR_FIFO_THRESHOLD__MAX 10
+#define FSX_DMEx_WR_FIFO_THRESHOLD__MIN 0
+
+#define FSX_DMEx_RD_FIFO_THRESHOLD(__b, __i) \
+ (FSX_DMEx_BASE(__b, __i) + 0x14)
+#define FSX_DMEx_RD_FIFO_THRESHOLD__MASK 0x3ff
+#define FSX_DMEx_RD_FIFO_THRESHOLD__MAX 10
+#define FSX_DMEx_RD_FIFO_THRESHOLD__MIN 0
+
+#define FS6_SUB_TOP_BASE 0x66D8F800
+#define FS6_PKI_DME_RESET 0x4
+#define PKI_DME_RESET 1
+
+char *fsx_type_names[] = {
+ "fs4-raid",
+ "fs4-crypto",
+ "fs6-pki",
+};
+
+void fsx_init(eFSX_TYPE fsx_type,
+ unsigned int ring_count,
+ unsigned int dme_count,
+ unsigned int ae_count,
+ unsigned int start_stream_id,
+ unsigned int msi_dev_id,
+ uintptr_t idm_io_control_direct,
+ uintptr_t idm_reset_control,
+ uintptr_t base,
+ uintptr_t dme_base)
+{
+ int try;
+ unsigned int i, v, data;
+ uintptr_t fs4_idm_io_control_direct = idm_io_control_direct;
+ uintptr_t fs4_idm_reset_control = idm_reset_control;
+ uintptr_t fsx_comm_rm = (base + 0x203000);
+
+ VERBOSE("fsx %s init start\n", fsx_type_names[fsx_type]);
+
+ if (fsx_type == eFS4_RAID || fsx_type == eFS4_CRYPTO) {
+ /* Enable FSx engine clock */
+ VERBOSE(" - enable fsx clock\n");
+ mmio_write_32(fs4_idm_io_control_direct,
+ (1U << FS4_IDM_IO_CONTROL_DIRECT__CLK_EN));
+ udelay(500);
+
+ /* Reset FSx engine */
+ VERBOSE(" - reset fsx\n");
+ v = mmio_read_32(fs4_idm_reset_control);
+ v |= (1 << FS4_IDM_RESET_CONTROL__RESET);
+ mmio_write_32(fs4_idm_reset_control, v);
+ udelay(500);
+ v = mmio_read_32(fs4_idm_reset_control);
+ v &= ~(1 << FS4_IDM_RESET_CONTROL__RESET);
+ mmio_write_32(fs4_idm_reset_control, v);
+ } else {
+ /*
+ * Default RM and AE are out of reset,
+ * So only DME Reset added here
+ */
+ v = mmio_read_32(FS6_SUB_TOP_BASE + FS6_PKI_DME_RESET);
+ v &= ~(PKI_DME_RESET);
+ mmio_write_32(FS6_SUB_TOP_BASE + FS6_PKI_DME_RESET, v);
+ }
+
+ /* Wait for HW-init done */
+ VERBOSE(" - wait for HW-init done\n");
+ try = 10000;
+ do {
+ udelay(1);
+ data = mmio_read_32(fsx_comm_rm +
+ FSX_COMM_RM_MAIN_HW_INIT_DONE);
+ try--;
+ } while (!(data & FSX_COMM_RM_MAIN_HW_INIT_DONE__MASK) && (try > 0));
+
+ if (try <= 0)
+ ERROR("fsx_comm_rm + 0x%x: 0x%x\n",
+ data, FSX_COMM_RM_MAIN_HW_INIT_DONE);
+
+ /* Make all rings non-secured */
+ VERBOSE(" - make all rings non-secured\n");
+ v = 0xffffffff;
+ mmio_write_32(fsx_comm_rm + FSX_COMM_RM_RING_SECURITY_SETTING, v);
+
+ /* Set start stream-id for rings to */
+ VERBOSE(" - set start stream-id for rings to 0x%x\n",
+ start_stream_id);
+ v = start_stream_id >> FSX_COMM_RM_SSID_CONTROL__RING_BITS;
+ v &= FSX_COMM_RM_SSID_CONTROL__MASK;
+ mmio_write_32(fsx_comm_rm + FSX_COMM_RM_SSID_CONTROL, v);
+
+ /* Set timer configuration */
+ VERBOSE(" - set timer configuration\n");
+ v = 0x0271 << FSX_COMM_RM_TIMER_CONTROL_0__MEDIUM;
+ v |= (0x0138 << FSX_COMM_RM_TIMER_CONTROL_0__FAST);
+ mmio_write_32(fsx_comm_rm + FSX_COMM_RM_TIMER_CONTROL_0, v);
+ v = 0x09c4 << FSX_COMM_RM_TIMER_CONTROL_1__IDLE;
+ v |= (0x04e2 << FSX_COMM_RM_TIMER_CONTROL_1__SLOW);
+ mmio_write_32(fsx_comm_rm + FSX_COMM_RM_TIMER_CONTROL_1, v);
+ v = 0x0000f424;
+ mmio_write_32(fsx_comm_rm + FSX_COMM_RM_RING_FLUSH_TIMEOUT, v);
+
+ /* Set burst length and fifo threshold */
+ VERBOSE(" - set burst length, fifo and bd threshold\n");
+ v = 0x0;
+ v |= (0x8 << FSX_COMM_RM_BURST_LENGTH__FOR_DDR_ADDR_GEN);
+ v |= (0x8 << FSX_COMM_RM_BURST_LENGTH__FOR_TOGGLE);
+ mmio_write_32(fsx_comm_rm + FSX_COMM_RM_BURST_LENGTH, v);
+ v = 0x0;
+ v |= (0x67 << FSX_COMM_RM_FIFO_THRESHOLD__BD_FIFO_FULL);
+ v |= (0x18 << FSX_COMM_RM_FIFO_THRESHOLD__AE_FIFO_FULL);
+ mmio_write_32(fsx_comm_rm + FSX_COMM_RM_FIFO_THRESHOLD, v);
+ v = 0x0;
+ v |= (0x8 << FSX_COMM_RM_BURST_BD_THRESHOLD_LOW);
+ v |= (0x8 << FSX_COMM_RM_BURST_BD_THRESHOLD_HIGH);
+ mmio_write_32(fsx_comm_rm + FSX_COMM_RM_BURST_BD_THRESHOLD, v);
+
+ /* Set memory configuration */
+ VERBOSE(" - set memory configuration\n");
+ v = 0x0;
+ v |= (1 << FSX_COMM_RM_MEMORY_CONFIGURATION__POWERONIN);
+ v |= (1 << FSX_COMM_RM_MEMORY_CONFIGURATION__POWEROKIN);
+ v |= (1 << FSX_COMM_RM_MEMORY_CONFIGURATION__ARRPOWERONIN);
+ v |= (1 << FSX_COMM_RM_MEMORY_CONFIGURATION__ARRPOWEROKIN);
+ mmio_write_32(fsx_comm_rm + FSX_COMM_RM_MEMORY_CONFIGURATION, v);
+
+ /* AXI configuration for RM */
+ v = 0;
+ v |= (0x1 << FSX_COMM_RM_AXI_CONTROL__WRITE_CHANNEL_EN);
+ v |= (0x1 << FSX_COMM_RM_AXI_CONTROL__READ_CHANNEL_EN);
+ v |= (0xe << FSX_COMM_RM_AXI_CONTROL__AWQOS);
+ v |= (0xa << FSX_COMM_RM_AXI_CONTROL__ARQOS);
+ v |= (0x2 << FSX_COMM_RM_AXI_CONTROL__AWPROT);
+ v |= (0x2 << FSX_COMM_RM_AXI_CONTROL__ARPROT);
+ v |= (0xf << FSX_COMM_RM_AXI_CONTROL__AWCACHE);
+ v |= (0xf << FSX_COMM_RM_AXI_CONTROL__ARCACHE);
+ mmio_write_32(fsx_comm_rm + FSX_COMM_RM_AXI_CONTROL, v);
+ VERBOSE(" - set AXI control = 0x%x\n",
+ mmio_read_32(fsx_comm_rm + FSX_COMM_RM_AXI_CONTROL));
+ v = 0x0;
+ v |= (0x10 << FSX_COMM_RM_AXI_READ_BURST_THRESHOLD__MAX);
+ v |= (0x10 << FSX_COMM_RM_AXI_READ_BURST_THRESHOLD__MIN);
+ mmio_write_32(fsx_comm_rm + FSX_COMM_RM_AXI_READ_BURST_THRESHOLD, v);
+ VERBOSE(" - set AXI read burst threshold = 0x%x\n",
+ mmio_read_32(fsx_comm_rm + FSX_COMM_RM_AXI_READ_BURST_THRESHOLD));
+
+ /* Configure group ring count for all groups */
+ /* By default we schedule extended packets
+ * on all AEs/DMEs in a group.
+ */
+ v = (dme_count & 0xf) << 0;
+ v |= (dme_count & 0xf) << 4;
+ v |= (dme_count & 0xf) << 8;
+ v |= (dme_count & 0xf) << 12;
+ v |= (dme_count & 0xf) << 16;
+ v |= (dme_count & 0xf) << 20;
+ v |= (dme_count & 0xf) << 24;
+ v |= (dme_count & 0xf) << 28;
+ mmio_write_32(fsx_comm_rm + FSX_COMM_RM_GROUP_RING_COUNT, v);
+
+ /*
+ * Due to HW issue spurious interrupts are getting generated.
+ * To fix sw needs to clear the config status interrupts
+ * before setting CONFIG_DONE.
+ */
+ mmio_write_32(fsx_comm_rm +
+ FSX_COMM_RM_CONFIG_INTERRUPT_STATUS_CLEAR,
+ 0xffffffff);
+
+ /* Configure RM control */
+ VERBOSE(" - configure RM control\n");
+ v = mmio_read_32(fsx_comm_rm + FSX_COMM_RM_CONTROL_REGISTER);
+ v |= (1 << FSX_COMM_RM_CONTROL_REGISTER__AE_LOCKING);
+ mmio_write_32(fsx_comm_rm + FSX_COMM_RM_CONTROL_REGISTER, v);
+ v |= (1 << FSX_COMM_RM_CONTROL_REGISTER__CONFIG_DONE);
+ mmio_write_32(fsx_comm_rm + FSX_COMM_RM_CONTROL_REGISTER, v);
+
+ /* Configure AE timeout */
+ VERBOSE(" - configure AE timeout\n");
+ v = 0x00003fff;
+ mmio_write_32(fsx_comm_rm + FSX_COMM_RM_AE_TIMEOUT, v);
+
+ /* Initialize all AEs */
+ for (i = 0; i < ae_count; i++) {
+ VERBOSE(" - initialize AE%d\n", i);
+ v = (0x1 << FSX_AEx_CONTROL_REGISTER__ACTIVE);
+ mmio_write_32(FSX_AEx_CONTROL_REGISTER(base, i), v);
+ }
+
+ /* Initialize all DMEs */
+ for (i = 0; i < dme_count; i++) {
+ VERBOSE(" - initialize DME%d\n", i);
+ v = 0;
+ v |= (0x1 << FSX_DMEx_AXI_CONTROL__WRITE_CHANNEL_EN);
+ v |= (0x1 << FSX_DMEx_AXI_CONTROL__READ_CHANNEL_EN);
+ v |= (0xe << FSX_DMEx_AXI_CONTROL__AWQOS);
+ v |= (0xa << FSX_DMEx_AXI_CONTROL__ARQOS);
+ v |= (0xf << FSX_DMEx_AXI_CONTROL__AWCACHE);
+ v |= (0xf << FSX_DMEx_AXI_CONTROL__ARCACHE);
+ mmio_write_32(FSX_DMEx_AXI_CONTROL(dme_base, i), v);
+ VERBOSE(" -- AXI_CONTROL = 0x%x\n",
+ mmio_read_32(FSX_DMEx_AXI_CONTROL(dme_base, i)));
+ v = 0;
+ v |= (0x4 << FSX_DMEx_WR_FIFO_THRESHOLD__MIN);
+ v |= (0x4 << FSX_DMEx_WR_FIFO_THRESHOLD__MAX);
+ mmio_write_32(FSX_DMEx_WR_FIFO_THRESHOLD(dme_base, i), v);
+ VERBOSE(" -- WR_FIFO_THRESHOLD = 0x%x\n",
+ mmio_read_32(FSX_DMEx_WR_FIFO_THRESHOLD(dme_base, i)));
+ v = 0;
+ v |= (0x4 << FSX_DMEx_RD_FIFO_THRESHOLD__MIN);
+ v |= (0x4 << FSX_DMEx_RD_FIFO_THRESHOLD__MAX);
+ mmio_write_32(FSX_DMEx_RD_FIFO_THRESHOLD(dme_base, i), v);
+ VERBOSE(" -- RD_FIFO_THRESHOLD = 0x%x\n",
+ mmio_read_32(FSX_DMEx_RD_FIFO_THRESHOLD(dme_base, i)));
+ }
+
+ /* Configure ring axi id and msi device id */
+ for (i = 0; i < ring_count; i++) {
+ VERBOSE(" - ring%d version=0x%x\n", i,
+ mmio_read_32(FSX_RINGx_VERSION_NUMBER(base, i)));
+ mmio_write_32(FSX_COMM_RINGx_MSI_DEV_ID(base, i),
+ msi_dev_id);
+ v = 0;
+ v |= ((i & FSX_COMM_RINGx_CONTROL__AXI_ID_MASK) <<
+ FSX_COMM_RINGx_CONTROL__AXI_ID);
+ mmio_write_32(FSX_COMM_RINGx_CONTROL(base, i), v);
+ }
+
+ INFO("fsx %s init done\n", fsx_type_names[fsx_type]);
+}
+
+void fsx_meminit(const char *name,
+ uintptr_t idm_io_control_direct,
+ uintptr_t idm_io_status)
+{
+ int try;
+ unsigned int val;
+
+ VERBOSE("fsx %s meminit start\n", name);
+
+ VERBOSE(" - arrpoweron\n");
+ mmio_setbits_32(idm_io_control_direct,
+ BIT(FS4_IDM_IO_CONTROL_DIRECT__MEM_ARRPOWERON));
+ while (!(mmio_read_32(idm_io_status) &
+ BIT(FS4_IDM_IO_STATUS__MEM_ARRPOWERON)))
+ ;
+
+ VERBOSE(" - arrpowerok\n");
+ mmio_setbits_32(idm_io_control_direct,
+ (1 << FS4_IDM_IO_CONTROL_DIRECT__MEM_ARRPOWEROK));
+ while (!(mmio_read_32(idm_io_status) &
+ BIT(FS4_IDM_IO_STATUS__MEM_ARRPOWEROK)))
+ ;
+
+ VERBOSE(" - poweron\n");
+ mmio_setbits_32(idm_io_control_direct,
+ (1 << FS4_IDM_IO_CONTROL_DIRECT__MEM_POWERON));
+ while (!(mmio_read_32(idm_io_status) &
+ BIT(FS4_IDM_IO_STATUS__MEM_POWERON)))
+ ;
+
+ VERBOSE(" - powerok\n");
+ mmio_setbits_32(idm_io_control_direct,
+ (1 << FS4_IDM_IO_CONTROL_DIRECT__MEM_POWEROK));
+ while (!(mmio_read_32(idm_io_status) &
+ BIT(FS4_IDM_IO_STATUS__MEM_POWEROK)))
+ ;
+
+ /* Final check on all power bits */
+ try = 10;
+ do {
+ val = mmio_read_32(idm_io_status);
+ if (val == FS4_IDM_IO_STATUS__MEM_ALLOK)
+ break;
+
+ /* Wait sometime */
+ mdelay(1);
+
+ try--;
+ } while (try > 0);
+
+ /* Remove memory isolation if things are fine. */
+ if (try <= 0) {
+ INFO(" - powerup failed\n");
+ } else {
+ VERBOSE(" - remove isolation\n");
+ mmio_clrbits_32(idm_io_control_direct,
+ (1 << FS4_IDM_IO_CONTROL_DIRECT__MEM_ISO));
+ VERBOSE(" - powerup done\n");
+ }
+
+ INFO("fsx %s meminit done\n", name);
+}
+
+void fs4_disable_clocks(bool disable_sram,
+ bool disable_crypto,
+ bool disable_raid)
+{
+ VERBOSE("fs4 disable clocks start\n");
+
+ if (disable_sram) {
+ VERBOSE(" - disable sram clock\n");
+ mmio_clrbits_32(FS4_SRAM_IDM_IO_CONTROL_DIRECT,
+ (1 << FS4_IDM_IO_CONTROL_DIRECT__SRAM_CLK_EN));
+ }
+
+ if (disable_crypto) {
+ VERBOSE(" - disable crypto clock\n");
+ mmio_setbits_32(CDRU_GENPLL5_CONTROL1,
+ CDRU_GENPLL5_CONTROL1__CHNL1_CRYPTO_AE_CLK);
+ }
+
+ if (disable_raid) {
+ VERBOSE(" - disable raid clock\n");
+ mmio_setbits_32(CDRU_GENPLL5_CONTROL1,
+ CDRU_GENPLL5_CONTROL1__CHNL2_RAID_AE_CLK);
+ }
+
+ if (disable_sram && disable_crypto && disable_raid) {
+ VERBOSE(" - disable root clock\n");
+ mmio_setbits_32(CDRU_GENPLL5_CONTROL1,
+ CDRU_GENPLL5_CONTROL1__CHNL0_DME_CLK);
+ mmio_setbits_32(CDRU_GENPLL2_CONTROL1,
+ CDRU_GENPLL2_CONTROL1__CHNL6_FS4_CLK);
+ }
+
+ INFO("fs4 disable clocks done\n");
+}
diff --git a/plat/brcm/board/stingray/src/ihost_pm.c b/plat/brcm/board/stingray/src/ihost_pm.c
new file mode 100644
index 0000000..9141d3e
--- /dev/null
+++ b/plat/brcm/board/stingray/src/ihost_pm.c
@@ -0,0 +1,355 @@
+/*
+ * Copyright (c) 2016 - 2020, Broadcom
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <common/debug.h>
+#include <drivers/delay_timer.h>
+#include <lib/mmio.h>
+
+#include <dmu.h>
+#include <ihost_pm.h>
+#include <platform_def.h>
+
+#define CDRU_CCN_REGISTER_CONTROL_1__D2XS_PD_IHOST1 2
+#define CDRU_CCN_REGISTER_CONTROL_1__D2XS_PD_IHOST2 1
+#define CDRU_CCN_REGISTER_CONTROL_1__D2XS_PD_IHOST3 0
+#define CDRU_MISC_RESET_CONTROL__CDRU_IH1_RESET 9
+#define CDRU_MISC_RESET_CONTROL__CDRU_IH2_RESET 8
+#define CDRU_MISC_RESET_CONTROL__CDRU_IH3_RESET 7
+#define A72_CRM_SOFTRESETN_0 0x480
+#define A72_CRM_SOFTRESETN_1 0x484
+#define A72_CRM_DOMAIN_4_CONTROL 0x810
+#define A72_CRM_DOMAIN_4_CONTROL__DOMAIN_4_ISO_DFT 3
+#define A72_CRM_DOMAIN_4_CONTROL__DOMAIN_4_ISO_MEM 6
+#define A72_CRM_DOMAIN_4_CONTROL__DOMAIN_4_ISO_I_O 0
+#define A72_CRM_SUBSYSTEM_MEMORY_CONTROL_3 0xB4C
+#define MEMORY_PDA_HI_SHIFT 0x0
+#define A72_CRM_PLL_PWR_ON 0x70
+#define A72_CRM_PLL_PWR_ON__PLL0_ISO_PLLOUT 4
+#define A72_CRM_PLL_PWR_ON__PLL0_PWRON_LDO 1
+#define A72_CRM_PLL_PWR_ON__PLL0_PWRON_PLL 0
+#define A72_CRM_SUBSYSTEM_MEMORY_CONTROL_2 0xB48
+#define A72_CRM_PLL_INTERRUPT_STATUS 0x8c
+#define A72_CRM_PLL_INTERRUPT_STATUS__PLL0_LOCK_LOST_STATUS 8
+#define A72_CRM_PLL_INTERRUPT_STATUS__PLL0_LOCK_STATUS 9
+#define A72_CRM_INTERRUPT_ENABLE 0x4
+#define A72_CRM_INTERRUPT_ENABLE__PLL0_INT_ENABLE 4
+#define A72_CRM_PLL_INTERRUPT_ENABLE 0x88
+#define A72_CRM_PLL_INTERRUPT_ENABLE__PLL0_LOCK_STATUS_INT_ENB 9
+#define A72_CRM_PLL_INTERRUPT_ENABLE__PLL0_LOCK_LOST_STATUS_INT_ENB 8
+#define A72_CRM_PLL0_CFG0_CTRL 0x120
+#define A72_CRM_PLL0_CFG1_CTRL 0x124
+#define A72_CRM_PLL0_CFG2_CTRL 0x128
+#define A72_CRM_PLL0_CFG3_CTRL 0x12C
+#define A72_CRM_CORE_CONFIG_DBGCTRL__DBGROMADDRV 0
+#define A72_CRM_CORE_CONFIG_DBGCTRL 0xD50
+#define A72_CRM_CORE_CONFIG_DBGROM_LO 0xD54
+#define A72_CRM_CORE_CONFIG_DBGROM_HI 0xD58
+#define A72_CRM_SUBSYSTEM_CONFIG_1__DBGL1RSTDISABLE 2
+#define A72_CRM_SOFTRESETN_0__CRYSTAL26_SOFTRESETN 0
+#define A72_CRM_SOFTRESETN_0__CRM_PLL0_SOFTRESETN 1
+#define A72_CRM_AXI_CLK_DESC 0x304
+#define A72_CRM_ACP_CLK_DESC 0x308
+#define A72_CRM_ATB_CLK_DESC 0x30C
+#define A72_CRM_PCLKDBG_DESC 0x310
+#define A72_CRM_CLOCK_MODE_CONTROL 0x40
+#define A72_CRM_CLOCK_MODE_CONTROL__CLK_CHANGE_TRIGGER 0
+#define A72_CRM_CLOCK_CONTROL_0 0x200
+#define A72_CRM_CLOCK_CONTROL_0__ARM_HW_SW_ENABLE_SEL 0
+#define A72_CRM_CLOCK_CONTROL_0__AXI_HW_SW_ENABLE_SEL 2
+#define A72_CRM_CLOCK_CONTROL_0__ACP_HW_SW_ENABLE_SEL 4
+#define A72_CRM_CLOCK_CONTROL_0__ATB_HW_SW_ENABLE_SEL 6
+#define A72_CRM_CLOCK_CONTROL_0__PCLKDBG_HW_SW_ENA_SEL 8
+#define A72_CRM_CLOCK_CONTROL_1 0x204
+#define A72_CRM_CLOCK_CONTROL_1__TMON_HW_SW_ENABLE_SEL 6
+#define A72_CRM_CLOCK_CONTROL_1__APB_HW_SW_ENABLE_SEL 8
+#define A72_CRM_SOFTRESETN_0__CRYSTAL26_SOFTRESETN 0
+#define A72_CRM_SOFTRESETN_0__CRM_PLL0_SOFTRESETN 1
+#define A72_CRM_SOFTRESETN_0__AXI_SOFTRESETN 9
+#define A72_CRM_SOFTRESETN_0__ACP_SOFTRESETN 10
+#define A72_CRM_SOFTRESETN_0__ATB_SOFTRESETN 11
+#define A72_CRM_SOFTRESETN_0__PCLKDBG_SOFTRESETN 12
+#define A72_CRM_SOFTRESETN_0__TMON_SOFTRESETN 15
+#define A72_CRM_SOFTRESETN_0__L2_SOFTRESETN 3
+#define A72_CRM_SOFTRESETN_1__APB_SOFTRESETN 8
+
+/* core related regs */
+#define A72_CRM_DOMAIN_0_CONTROL 0x800
+#define A72_CRM_DOMAIN_0_CONTROL__DOMAIN_0_ISO_MEM 0x6
+#define A72_CRM_DOMAIN_0_CONTROL__DOMAIN_0_ISO_I_O 0x0
+#define A72_CRM_DOMAIN_1_CONTROL 0x804
+#define A72_CRM_DOMAIN_1_CONTROL__DOMAIN_1_ISO_MEM 0x6
+#define A72_CRM_DOMAIN_1_CONTROL__DOMAIN_1_ISO_I_O 0x0
+#define A72_CRM_CORE_CONFIG_RVBA0_LO 0xD10
+#define A72_CRM_CORE_CONFIG_RVBA0_MID 0xD14
+#define A72_CRM_CORE_CONFIG_RVBA0_HI 0xD18
+#define A72_CRM_CORE_CONFIG_RVBA1_LO 0xD20
+#define A72_CRM_CORE_CONFIG_RVBA1_MID 0xD24
+#define A72_CRM_CORE_CONFIG_RVBA1_HI 0xD28
+#define A72_CRM_SUBSYSTEM_CONFIG_0 0xC80
+#define A72_CRM_SUBSYSTEM_CONFIG_0__DBGPWRDUP_CFG_SHIFT 4
+#define A72_CRM_SOFTRESETN_0__COREPOR0_SOFTRESETN 4
+#define A72_CRM_SOFTRESETN_0__COREPOR1_SOFTRESETN 5
+#define A72_CRM_SOFTRESETN_1__CORE0_SOFTRESETN 0
+#define A72_CRM_SOFTRESETN_1__DEBUG0_SOFTRESETN 4
+#define A72_CRM_SOFTRESETN_1__CORE1_SOFTRESETN 1
+#define A72_CRM_SOFTRESETN_1__DEBUG1_SOFTRESETN 5
+
+#define SPROC_MEMORY_BISR 0
+
+static int cluster_power_status[PLAT_BRCM_CLUSTER_COUNT] = {CLUSTER_POWER_ON,
+ CLUSTER_POWER_OFF,
+ CLUSTER_POWER_OFF,
+ CLUSTER_POWER_OFF};
+
+void ihost_power_on_cluster(u_register_t mpidr)
+{
+ uint32_t rst, d2xs;
+ uint32_t cluster_id;
+ uint32_t ihost_base;
+#if SPROC_MEMORY_BISR
+ uint32_t bisr, cnt;
+#endif
+ cluster_id = MPIDR_AFFLVL1_VAL(mpidr);
+ uint32_t cluster0_freq_sel;
+
+ if (cluster_power_status[cluster_id] == CLUSTER_POWER_ON)
+ return;
+
+ cluster_power_status[cluster_id] = CLUSTER_POWER_ON;
+ INFO("enabling Cluster #%u\n", cluster_id);
+
+ switch (cluster_id) {
+ case 1:
+ rst = (1 << CDRU_MISC_RESET_CONTROL__CDRU_IH1_RESET);
+ d2xs = (1 << CDRU_CCN_REGISTER_CONTROL_1__D2XS_PD_IHOST1);
+#if SPROC_MEMORY_BISR
+ bisr = CRMU_BISR_PDG_MASK__CRMU_BISR_IHOST1;
+#endif
+ break;
+ case 2:
+ rst = (1 << CDRU_MISC_RESET_CONTROL__CDRU_IH2_RESET);
+ d2xs = (1 << CDRU_CCN_REGISTER_CONTROL_1__D2XS_PD_IHOST2);
+#if SPROC_MEMORY_BISR
+ bisr = CRMU_BISR_PDG_MASK__CRMU_BISR_IHOST2;
+#endif
+ break;
+ case 3:
+ rst = (1 << CDRU_MISC_RESET_CONTROL__CDRU_IH3_RESET);
+ d2xs = (1 << CDRU_CCN_REGISTER_CONTROL_1__D2XS_PD_IHOST3);
+#if SPROC_MEMORY_BISR
+ bisr = CRMU_BISR_PDG_MASK__CRMU_BISR_IHOST3;
+#endif
+ break;
+ default:
+ ERROR("Invalid cluster :%u\n", cluster_id);
+ return;
+ }
+
+ /* Releasing ihost resets */
+ mmio_setbits_32(CDRU_MISC_RESET_CONTROL, rst);
+
+ /* calculate cluster/ihost base address */
+ ihost_base = IHOST0_BASE + cluster_id * IHOST_ADDR_SPACE;
+
+ /* Remove Cluster IO isolation */
+ mmio_clrsetbits_32(ihost_base + A72_CRM_DOMAIN_4_CONTROL,
+ (1 << A72_CRM_DOMAIN_4_CONTROL__DOMAIN_4_ISO_I_O),
+ (1 << A72_CRM_DOMAIN_4_CONTROL__DOMAIN_4_ISO_DFT) |
+ (1 << A72_CRM_DOMAIN_4_CONTROL__DOMAIN_4_ISO_MEM));
+
+ /*
+ * Since BISR sequence requires that all cores of cluster should
+ * have removed I/O isolation hence doing same here.
+ */
+ /* Remove core0 memory IO isolations */
+ mmio_clrsetbits_32(ihost_base + A72_CRM_DOMAIN_0_CONTROL,
+ (1 << A72_CRM_DOMAIN_0_CONTROL__DOMAIN_0_ISO_I_O),
+ (1 << A72_CRM_DOMAIN_0_CONTROL__DOMAIN_0_ISO_MEM));
+
+ /* Remove core1 memory IO isolations */
+ mmio_clrsetbits_32(ihost_base + A72_CRM_DOMAIN_1_CONTROL,
+ (1 << A72_CRM_DOMAIN_1_CONTROL__DOMAIN_1_ISO_I_O),
+ (1 << A72_CRM_DOMAIN_1_CONTROL__DOMAIN_1_ISO_MEM));
+
+#if SPROC_MEMORY_BISR
+ mmio_setbits_32(CRMU_BISR_PDG_MASK, (1 << bisr));
+
+ if (!(mmio_read_32(CDRU_CHIP_STRAP_DATA_LSW) &
+ (1 << CDRU_CHIP_STRAP_DATA_LSW__BISR_BYPASS_MODE))) {
+ /* BISR completion would take max 2 usec */
+ cnt = 0;
+ while (cnt < 2) {
+ udelay(1);
+ if (mmio_read_32(CRMU_CHIP_OTPC_STATUS) &
+ (1 << CRMU_CHIP_OTPC_STATUS__OTP_BISR_LOAD_DONE))
+ break;
+ cnt++;
+ }
+ }
+
+ /* if BISR is not completed, need to be checked with ASIC team */
+ if (((mmio_read_32(CRMU_CHIP_OTPC_STATUS)) &
+ (1 << CRMU_CHIP_OTPC_STATUS__OTP_BISR_LOAD_DONE)) == 0) {
+ WARN("BISR did not completed and need to be addressed\n");
+ }
+#endif
+
+ /* PLL Power up. supply is already on. Turn on PLL LDO/PWR */
+ mmio_write_32(ihost_base + A72_CRM_PLL_PWR_ON,
+ (1 << A72_CRM_PLL_PWR_ON__PLL0_ISO_PLLOUT) |
+ (1 << A72_CRM_PLL_PWR_ON__PLL0_PWRON_LDO) |
+ (1 << A72_CRM_PLL_PWR_ON__PLL0_PWRON_PLL));
+
+ /* 1us in spec; Doubling it to be safe*/
+ udelay(2);
+
+ /* Remove PLL output ISO */
+ mmio_write_32(ihost_base + A72_CRM_PLL_PWR_ON,
+ (1 << A72_CRM_PLL_PWR_ON__PLL0_PWRON_LDO) |
+ (1 << A72_CRM_PLL_PWR_ON__PLL0_PWRON_PLL));
+
+ /*
+ * PLL0 Configuration Control Register
+ * these 4 registers drive the i_pll_ctrl[63:0] input of pll
+ * (16b per register).
+ * the values are derived from the spec (sections 8 and 10).
+ */
+
+ mmio_write_32(ihost_base + A72_CRM_PLL0_CFG0_CTRL, 0x00000000);
+ mmio_write_32(ihost_base + A72_CRM_PLL0_CFG1_CTRL, 0x00008400);
+ mmio_write_32(ihost_base + A72_CRM_PLL0_CFG2_CTRL, 0x00000001);
+ mmio_write_32(ihost_base + A72_CRM_PLL0_CFG3_CTRL, 0x00000000);
+
+ /* Read the freq_sel from cluster 0, which is up already */
+ cluster0_freq_sel = bcm_get_ihost_pll_freq(0);
+ bcm_set_ihost_pll_freq(cluster_id, cluster0_freq_sel);
+
+ udelay(1);
+
+ /* Release clock source reset */
+ mmio_setbits_32(ihost_base + A72_CRM_SOFTRESETN_0,
+ (1 << A72_CRM_SOFTRESETN_0__CRYSTAL26_SOFTRESETN) |
+ (1 << A72_CRM_SOFTRESETN_0__CRM_PLL0_SOFTRESETN));
+
+ udelay(1);
+
+ /*
+ * Integer division for clks (divider value = n+1).
+ * These are the divisor of ARM PLL clock frequecy.
+ */
+ mmio_write_32(ihost_base + A72_CRM_AXI_CLK_DESC, 0x00000001);
+ mmio_write_32(ihost_base + A72_CRM_ACP_CLK_DESC, 0x00000001);
+ mmio_write_32(ihost_base + A72_CRM_ATB_CLK_DESC, 0x00000004);
+ mmio_write_32(ihost_base + A72_CRM_PCLKDBG_DESC, 0x0000000b);
+
+ /*
+ * clock change trigger - must set to take effect after clock
+ * source change
+ */
+ mmio_setbits_32(ihost_base + A72_CRM_CLOCK_MODE_CONTROL,
+ (1 << A72_CRM_CLOCK_MODE_CONTROL__CLK_CHANGE_TRIGGER));
+
+ /* turn on functional clocks */
+ mmio_setbits_32(ihost_base + A72_CRM_CLOCK_CONTROL_0,
+ (3 << A72_CRM_CLOCK_CONTROL_0__ARM_HW_SW_ENABLE_SEL) |
+ (3 << A72_CRM_CLOCK_CONTROL_0__AXI_HW_SW_ENABLE_SEL) |
+ (3 << A72_CRM_CLOCK_CONTROL_0__ACP_HW_SW_ENABLE_SEL) |
+ (3 << A72_CRM_CLOCK_CONTROL_0__ATB_HW_SW_ENABLE_SEL) |
+ (3 << A72_CRM_CLOCK_CONTROL_0__PCLKDBG_HW_SW_ENA_SEL));
+
+ mmio_setbits_32(ihost_base + A72_CRM_CLOCK_CONTROL_1,
+ (3 << A72_CRM_CLOCK_CONTROL_1__TMON_HW_SW_ENABLE_SEL) |
+ (3 << A72_CRM_CLOCK_CONTROL_1__APB_HW_SW_ENABLE_SEL));
+
+ /* Program D2XS Power Down Registers */
+ mmio_setbits_32(CDRU_CCN_REGISTER_CONTROL_1, d2xs);
+
+ /* Program Core Config Debug ROM Address Registers */
+ /* mark valid for Debug ROM base address */
+ mmio_write_32(ihost_base + A72_CRM_CORE_CONFIG_DBGCTRL,
+ (1 << A72_CRM_CORE_CONFIG_DBGCTRL__DBGROMADDRV));
+
+ /* Program Lo and HI address of coresight DBG rom address */
+ mmio_write_32(ihost_base + A72_CRM_CORE_CONFIG_DBGROM_LO,
+ (CORESIGHT_BASE_ADDR >> 12) & 0xffff);
+ mmio_write_32(ihost_base + A72_CRM_CORE_CONFIG_DBGROM_HI,
+ (CORESIGHT_BASE_ADDR >> 28) & 0xffff);
+
+ /*
+ * Release soft resets of different components.
+ * Order: Bus clocks --> PERIPH --> L2 --> cores
+ */
+
+ /* Bus clocks soft resets */
+ mmio_setbits_32(ihost_base + A72_CRM_SOFTRESETN_0,
+ (1 << A72_CRM_SOFTRESETN_0__CRYSTAL26_SOFTRESETN) |
+ (1 << A72_CRM_SOFTRESETN_0__CRM_PLL0_SOFTRESETN) |
+ (1 << A72_CRM_SOFTRESETN_0__AXI_SOFTRESETN) |
+ (1 << A72_CRM_SOFTRESETN_0__ACP_SOFTRESETN) |
+ (1 << A72_CRM_SOFTRESETN_0__ATB_SOFTRESETN) |
+ (1 << A72_CRM_SOFTRESETN_0__PCLKDBG_SOFTRESETN));
+
+ mmio_setbits_32(ihost_base + A72_CRM_SOFTRESETN_1,
+ (1 << A72_CRM_SOFTRESETN_1__APB_SOFTRESETN));
+
+ /* Periph component softreset */
+ mmio_setbits_32(ihost_base + A72_CRM_SOFTRESETN_0,
+ (1 << A72_CRM_SOFTRESETN_0__TMON_SOFTRESETN));
+
+ /* L2 softreset */
+ mmio_setbits_32(ihost_base + A72_CRM_SOFTRESETN_0,
+ (1 << A72_CRM_SOFTRESETN_0__L2_SOFTRESETN));
+
+ /* Enable and program Satellite timer */
+ ihost_enable_satellite_timer(cluster_id);
+}
+
+void ihost_power_on_secondary_core(u_register_t mpidr, uint64_t rvbar)
+{
+ uint32_t ihost_base;
+ uint32_t coreid = MPIDR_AFFLVL0_VAL(mpidr);
+ uint32_t cluster_id = MPIDR_AFFLVL1_VAL(mpidr);
+
+ ihost_base = IHOST0_BASE + cluster_id * IHOST_ADDR_SPACE;
+ INFO("programming core #%u\n", coreid);
+
+ if (coreid) {
+ /* program the entry point for core1 */
+ mmio_write_32(ihost_base + A72_CRM_CORE_CONFIG_RVBA1_LO,
+ rvbar & 0xFFFF);
+ mmio_write_32(ihost_base + A72_CRM_CORE_CONFIG_RVBA1_MID,
+ (rvbar >> 16) & 0xFFFF);
+ mmio_write_32(ihost_base + A72_CRM_CORE_CONFIG_RVBA1_HI,
+ (rvbar >> 32) & 0xFFFF);
+ } else {
+ /* program the entry point for core */
+ mmio_write_32(ihost_base + A72_CRM_CORE_CONFIG_RVBA0_LO,
+ rvbar & 0xFFFF);
+ mmio_write_32(ihost_base + A72_CRM_CORE_CONFIG_RVBA0_MID,
+ (rvbar >> 16) & 0xFFFF);
+ mmio_write_32(ihost_base + A72_CRM_CORE_CONFIG_RVBA0_HI,
+ (rvbar >> 32) & 0xFFFF);
+ }
+
+ /* Tell debug logic which processor is up */
+ mmio_setbits_32(ihost_base + A72_CRM_SUBSYSTEM_CONFIG_0,
+ (coreid ?
+ (2 << A72_CRM_SUBSYSTEM_CONFIG_0__DBGPWRDUP_CFG_SHIFT) :
+ (1 << A72_CRM_SUBSYSTEM_CONFIG_0__DBGPWRDUP_CFG_SHIFT)));
+
+ /* releasing soft resets for IHOST core */
+ mmio_setbits_32(ihost_base + A72_CRM_SOFTRESETN_0,
+ (coreid ?
+ (1 << A72_CRM_SOFTRESETN_0__COREPOR1_SOFTRESETN) :
+ (1 << A72_CRM_SOFTRESETN_0__COREPOR0_SOFTRESETN)));
+
+ mmio_setbits_32(ihost_base + A72_CRM_SOFTRESETN_1,
+ (coreid ?
+ ((1 << A72_CRM_SOFTRESETN_1__CORE1_SOFTRESETN) |
+ (1 << A72_CRM_SOFTRESETN_1__DEBUG1_SOFTRESETN)) :
+ ((1 << A72_CRM_SOFTRESETN_1__CORE0_SOFTRESETN) |
+ (1 << A72_CRM_SOFTRESETN_1__DEBUG0_SOFTRESETN))));
+}
diff --git a/plat/brcm/board/stingray/src/iommu.c b/plat/brcm/board/stingray/src/iommu.c
new file mode 100644
index 0000000..de8b995
--- /dev/null
+++ b/plat/brcm/board/stingray/src/iommu.c
@@ -0,0 +1,536 @@
+/*
+ * Copyright (c) 2017 - 2020, Broadcom
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <drivers/delay_timer.h>
+#include <lib/mmio.h>
+
+#include <iommu.h>
+#include <platform_def.h>
+
+#define SMMU_BASE 0x64000000
+#define ARM_SMMU_MAX_NUM_CNTXT_BANK 64
+#define SMMU_CTX_BANK_IDX_SECURE_CRMU 63
+#define ARM_SMMU_NUM_SECURE_MASTER 1
+#define ARM_SMMU_NSNUMCBO (ARM_SMMU_MAX_NUM_CNTXT_BANK - \
+ ARM_SMMU_NUM_SECURE_MASTER)
+#define ARM_SMMU_NSNUMSMRGO (ARM_SMMU_MAX_NUM_CNTXT_BANK - \
+ ARM_SMMU_NUM_SECURE_MASTER)
+/* Reserved Banks. */
+#define SMMU_CTX_BANK_IDX (SMMU_CTX_BANK_IDX_SECURE_CRMU - \
+ ARM_SMMU_NUM_SECURE_MASTER)
+#define NUM_OF_SMRS 1
+
+#define STG1_WITH_STG2_BYPASS 1
+#define ARM_LPAE_PGTBL_PHYS_CRMU 0x880000000
+#define ARM_LPAE_PGTBL_PHYS 0x880200000
+#define ARM_LPAE_PGTBL_PTE_CNT 512
+#define ARM_LPAE_PTE_L1_BLOCK_SIZE 0x40000000
+#define ARM_LPAE_PTE_L1_ADDR_MASK 0x0000FFFFC0000000UL
+#define ARM_LPAE_PTE_TABLE 0x2UL
+#define ARM_LPAE_PTE_VALID 0x1UL
+#define ARM_LPAE_PTE_ATTRINDX 2
+#define ARM_LPAE_PTE_NS 5
+#define ARM_LPAE_PTE_AP 6
+#define ARM_LPAE_PTE_AP_EL1_RW 0x0
+#define ARM_LPAE_PTE_AP_EL0_RW 0x1
+#define ARM_LPAE_PTE_SH 8
+#define ARM_LPAE_PTE_SH_NON 0x0
+#define ARM_LPAE_PTE_SH_OUTER 0x2
+#define ARM_LPAE_PTE_SH_INNER 0x3
+#define ARM_LPAE_PTE_AF 10
+#define ARM_SMMU_RES_SIZE 0x80000
+
+#define ARM_LPAE_PTE_NSTABLE 0x8000000000000000UL
+#define ARM_LPAE_PTE_L1_INDEX_SHIFT 30
+#define ARM_LPAE_PTE_L1_INDEX_MASK 0x1ff
+#define ARM_LPAE_PTE_L0_INDEX_SHIFT 39
+#define ARM_LPAE_PTE_L0_INDEX_MASK 0x1ff
+#define ARM_LPAE_PTE_TABLE_MASK ~(0xfffUL)
+/* Configuration registers */
+#define ARM_SMMU_GR0_sCR0 0x0
+#define sCR0_CLIENTPD (1 << 0)
+#define sCR0_GFRE (1 << 1)
+#define sCR0_GFIE (1 << 2)
+#define sCR0_GCFGFRE (1 << 4)
+#define sCR0_GCFGFIE (1 << 5)
+#define sCR0_USFCFG (1 << 10)
+#define sCR0_VMIDPNE (1 << 11)
+#define sCR0_PTM (1 << 12)
+#define sCR0_FB (1 << 13)
+#define sCR0_VMID16EN (1 << 31)
+#define sCR0_BSU_SHIFT 14
+#define sCR0_BSU_MASK 0x3
+#define ARM_SMMU_SMMU_SCR1 0x4
+#define SCR1_NSNUMCBO_MASK 0xFF
+#define SCR1_NSNUMCBO_SHIFT 0x0
+#define SCR1_NSNUMSMRGO_MASK 0xFF00
+#define SCR1_NSNUMSMRGO_SHIFT 0x8
+
+/* Identification registers */
+#define ARM_SMMU_GR0_ID0 0x20
+#define ARM_SMMU_GR0_ID1 0x24
+#define ARM_SMMU_GR0_ID2 0x28
+#define ARM_SMMU_GR0_ID3 0x2c
+#define ARM_SMMU_GR0_ID4 0x30
+#define ARM_SMMU_GR0_ID5 0x34
+#define ARM_SMMU_GR0_ID6 0x38
+#define ARM_SMMU_GR0_ID7 0x3c
+#define ARM_SMMU_GR0_sGFSR 0x48
+#define ARM_SMMU_GR0_sGFSYNR0 0x50
+#define ARM_SMMU_GR0_sGFSYNR1 0x54
+#define ARM_SMMU_GR0_sGFSYNR2 0x58
+
+#define ID1_PAGESIZE (1U << 31)
+#define ID1_NUMPAGENDXB_SHIFT 28
+#define ID1_NUMPAGENDXB_MASK 7
+#define ID1_NUMS2CB_SHIFT 16
+#define ID1_NUMS2CB_MASK 0xff
+#define ID1_NUMCB_SHIFT 0
+#define ID1_NUMCB_MASK 0xff
+
+/* SMMU global address space */
+#define ARM_SMMU_GR0(smmu) ((smmu)->base)
+#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
+
+/* Stream mapping registers */
+#define ARM_SMMU_GR0_SMR(n) (0x800 + (n << 2))
+#define SMR_VALID (1U << 31)
+#define SMR_MASK_SHIFT 16
+#define SMR_ID_SHIFT 0
+
+#define ARM_SMMU_GR0_S2CR(n) (0xc00 + (n << 2))
+#define S2CR_CBNDX_SHIFT 0
+#define S2CR_CBNDX_MASK 0xff
+#define S2CR_TYPE_SHIFT 16
+#define S2CR_TYPE_MASK 0x3
+
+#define ARM_SMMU_GR1_CBA2R(n) (0x800 + (n << 2))
+#define CBA2R_RW64_32BIT (0 << 0)
+#define CBA2R_RW64_64BIT (1 << 0)
+#define CBA2R_VMID_SHIFT 16
+#define CBA2R_VMID_MASK 0xffff
+
+#define ARM_SMMU_GR1_CBAR(n) (0x0 + (n << 2))
+#define CBAR_VMID_SHIFT 0
+#define CBAR_VMID_MASK 0xff
+#define CBAR_S1_BPSHCFG_SHIFT 8
+#define CBAR_S1_BPSHCFG_MASK 3
+#define CBAR_S1_BPSHCFG_NSH 3
+#define CBAR_S1_MEMATTR_SHIFT 12
+#define CBAR_S1_MEMATTR_MASK 0xf
+#define CBAR_S1_MEMATTR_WB 0xf
+#define CBAR_TYPE_SHIFT 16
+#define CBAR_TYPE_MASK 0x3
+#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
+#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
+#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
+#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
+#define CBAR_IRPTNDX_SHIFT 24
+#define CBAR_IRPTNDX_MASK 0xff
+
+/* Translation context bank */
+#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
+#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
+
+#define ARM_SMMU_CB_SCTLR 0x0
+#define ARM_SMMU_CB_ACTLR 0x4
+#define ARM_SMMU_CB_RESUME 0x8
+#define ARM_SMMU_CB_TTBCR2 0x10
+#define ARM_SMMU_CB_TTBR0 0x20
+#define ARM_SMMU_CB_TTBR1 0x28
+#define ARM_SMMU_CB_TTBCR 0x30
+#define ARM_SMMU_CB_CONTEXTIDR 0x34
+#define ARM_SMMU_CB_S1_MAIR0 0x38
+#define ARM_SMMU_CB_S1_MAIR1 0x3c
+#define ARM_SMMU_CB_PAR 0x50
+#define ARM_SMMU_CB_FSR 0x58
+#define ARM_SMMU_CB_FAR 0x60
+#define ARM_SMMU_CB_FSYNR0 0x68
+#define ARM_SMMU_CB_S1_TLBIVA 0x600
+#define ARM_SMMU_CB_S1_TLBIASID 0x610
+#define ARM_SMMU_CB_S1_TLBIVAL 0x620
+#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
+#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
+#define ARM_SMMU_CB_ATS1PR 0x800
+#define ARM_SMMU_CB_ATSR 0x8f0
+
+#define SCTLR_S1_ASIDPNE (1 << 12)
+#define SCTLR_CFCFG (1 << 7)
+#define SCTLR_CFIE (1 << 6)
+#define SCTLR_CFRE (1 << 5)
+#define SCTLR_E (1 << 4)
+#define SCTLR_AFE (1 << 2)
+#define SCTLR_TRE (1 << 1)
+#define SCTLR_M (1 << 0)
+
+/* ARM LPAE configuration. */
+/**************************************************************/
+/* Register bits */
+#define ARM_32_LPAE_TCR_EAE (1 << 31)
+#define ARM_64_LPAE_S2_TCR_RES1 (1 << 31)
+
+#define ARM_LPAE_TCR_EPD1 (1 << 23)
+
+#define ARM_LPAE_TCR_TG0_4K (0 << 14)
+#define ARM_LPAE_TCR_TG0_64K (1 << 14)
+#define ARM_LPAE_TCR_TG0_16K (2 << 14)
+
+#define ARM_LPAE_TCR_SH0_SHIFT 12
+#define ARM_LPAE_TCR_SH0_MASK 0x3
+#define ARM_LPAE_TCR_SH_NS 0
+#define ARM_LPAE_TCR_SH_OS 2
+#define ARM_LPAE_TCR_SH_IS 3
+
+#define ARM_LPAE_TCR_ORGN0_SHIFT 10
+#define ARM_LPAE_TCR_IRGN0_SHIFT 8
+#define ARM_LPAE_TCR_RGN_MASK 0x3
+#define ARM_LPAE_TCR_RGN_NC 0
+#define ARM_LPAE_TCR_RGN_WBWA 1
+#define ARM_LPAE_TCR_RGN_WT 2
+#define ARM_LPAE_TCR_RGN_WB 3
+
+#define ARM_LPAE_TCR_SL0_SHIFT 6
+#define ARM_LPAE_TCR_SL0_MASK 0x3
+
+#define ARM_LPAE_TCR_T0SZ_SHIFT 0
+#define ARM_LPAE_TCR_SZ_MASK 0xf
+
+#define ARM_LPAE_TCR_PS_SHIFT 16
+#define ARM_LPAE_TCR_PS_MASK 0x7
+
+#define ARM_LPAE_TCR_IPS_SHIFT 32
+#define ARM_LPAE_TCR_IPS_MASK 0x7
+
+#define ARM_LPAE_TCR_PS_32_BIT 0x0ULL
+#define ARM_LPAE_TCR_PS_36_BIT 0x1ULL
+#define ARM_LPAE_TCR_PS_40_BIT 0x2ULL
+#define ARM_LPAE_TCR_PS_42_BIT 0x3ULL
+#define ARM_LPAE_TCR_PS_44_BIT 0x4ULL
+#define ARM_LPAE_TCR_PS_48_BIT 0x5ULL
+
+#define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
+#define ARM_LPAE_MAIR_ATTR_MASK 0xff
+#define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
+#define ARM_LPAE_MAIR_ATTR_NC 0x44
+#define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
+#define ARM_LPAE_MAIR_ATTR_IDX_NC 0
+#define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
+#define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
+
+#define TTBRn_ASID_SHIFT 48
+#define TTBCR2_SEP_SHIFT 15
+#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
+#define TTBCR2_AS (1 << 4)
+#define TTBCR_T0SZ(ia_bits) (64 - (ia_bits))
+
+#define S2CR_PRIVCFG_SHIFT 24
+#define S2CR_PRIVCFG_MASK 0x3
+
+/**************************************************************/
+
+uint16_t paxc_stream_ids[] = { 0x2000 };
+
+uint16_t paxc_stream_ids_mask[] = { 0x1fff };
+uint16_t crmu_stream_ids[] = { CRMU_STREAM_ID };
+uint16_t crmu_stream_ids_mask[] = { 0x0 };
+
+enum arm_smmu_s2cr_type {
+ S2CR_TYPE_TRANS,
+ S2CR_TYPE_BYPASS,
+ S2CR_TYPE_FAULT,
+};
+
+enum arm_smmu_s2cr_privcfg {
+ S2CR_PRIVCFG_DEFAULT,
+ S2CR_PRIVCFG_DIPAN,
+ S2CR_PRIVCFG_UNPRIV,
+ S2CR_PRIVCFG_PRIV,
+};
+
+struct arm_smmu_smr {
+ uint16_t mask;
+ uint16_t id;
+ uint32_t valid;
+};
+
+struct arm_smmu_s2cr {
+ int count;
+ enum arm_smmu_s2cr_type type;
+ enum arm_smmu_s2cr_privcfg privcfg;
+ uint8_t cbndx;
+};
+
+struct arm_smmu_cfg {
+ uint8_t cbndx;
+ uint8_t irptndx;
+ uint32_t cbar;
+};
+
+struct arm_smmu_device {
+ uint8_t *base;
+ uint32_t streams;
+ unsigned long size;
+ unsigned long pgshift;
+ unsigned long va_size;
+ unsigned long ipa_size;
+ unsigned long pa_size;
+ struct arm_smmu_smr smr[NUM_OF_SMRS];
+ struct arm_smmu_s2cr s2cr[NUM_OF_SMRS];
+ struct arm_smmu_cfg cfg[NUM_OF_SMRS];
+ uint16_t *stream_ids;
+ uint16_t *stream_ids_mask;
+};
+
+void arm_smmu_enable_secure_client_port(void)
+{
+ uintptr_t smmu_base = SMMU_BASE;
+
+ mmio_clrbits_32(smmu_base, sCR0_CLIENTPD);
+}
+
+void arm_smmu_reserve_secure_cntxt(void)
+{
+ uintptr_t smmu_base = SMMU_BASE;
+
+ mmio_clrsetbits_32(smmu_base + ARM_SMMU_SMMU_SCR1,
+ (SCR1_NSNUMSMRGO_MASK | SCR1_NSNUMCBO_MASK),
+ ((ARM_SMMU_NSNUMCBO << SCR1_NSNUMCBO_SHIFT) |
+ (ARM_SMMU_NSNUMSMRGO << SCR1_NSNUMSMRGO_SHIFT)));
+}
+
+static void arm_smmu_smr_cfg(struct arm_smmu_device *smmu, uint32_t index)
+{
+ uint32_t idx = smmu->cfg[index].cbndx;
+ struct arm_smmu_smr *smr = &smmu->smr[index];
+ uint32_t reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
+
+ if (smr->valid)
+ reg |= SMR_VALID;
+
+ mmio_write_32((uintptr_t) (ARM_SMMU_GR0(smmu) +
+ ARM_SMMU_GR0_SMR(idx)), reg);
+}
+
+static void arm_smmu_s2cr_cfg(struct arm_smmu_device *smmu, uint32_t index)
+{
+ uint32_t idx = smmu->cfg[index].cbndx;
+ struct arm_smmu_s2cr *s2cr = &smmu->s2cr[index];
+
+ uint32_t reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
+ (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
+ (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
+
+ mmio_write_32((uintptr_t) (ARM_SMMU_GR0(smmu) +
+ ARM_SMMU_GR0_S2CR(idx)), reg);
+}
+
+static void smmu_set_pgtbl(struct arm_smmu_device *smmu,
+ enum iommu_domain dom,
+ uint64_t *pg_table_base)
+{
+ int i, l0_index, l1_index;
+ uint64_t addr, *pte, *l0_base, *l1_base;
+ uint64_t addr_space_limit;
+
+ if (dom == PCIE_PAXC) {
+ addr_space_limit = 0xffffffffff;
+ } else if (dom == DOMAIN_CRMU) {
+ addr_space_limit = 0xffffffff;
+ } else {
+ ERROR("dom is not supported\n");
+ return;
+ }
+
+ l0_base = pg_table_base;
+ /* clear L0 descriptors. */
+ for (i = 0; i < ARM_LPAE_PGTBL_PTE_CNT; i++)
+ l0_base[i] = 0x0;
+
+ addr = 0x0;
+ while (addr < addr_space_limit) {
+ /* find L0 pte */
+ l0_index = ((addr >> ARM_LPAE_PTE_L0_INDEX_SHIFT) &
+ ARM_LPAE_PTE_L0_INDEX_MASK);
+ l1_base = l0_base + ((l0_index + 1) * ARM_LPAE_PGTBL_PTE_CNT);
+
+ /* setup L0 pte if required */
+ pte = l0_base + l0_index;
+ if (*pte == 0x0) {
+ *pte |= ((uint64_t)l1_base & ARM_LPAE_PTE_TABLE_MASK);
+ if (dom == PCIE_PAXC)
+ *pte |= ARM_LPAE_PTE_NSTABLE;
+ *pte |= ARM_LPAE_PTE_TABLE;
+ *pte |= ARM_LPAE_PTE_VALID;
+ }
+
+ /* find L1 pte */
+ l1_index = ((addr >> ARM_LPAE_PTE_L1_INDEX_SHIFT) &
+ ARM_LPAE_PTE_L1_INDEX_MASK);
+ pte = l1_base + l1_index;
+
+ /* setup L1 pte */
+ *pte = 0x0;
+ *pte |= (addr & ARM_LPAE_PTE_L1_ADDR_MASK);
+ if (addr < 0x80000000) {
+ *pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV <<
+ ARM_LPAE_PTE_ATTRINDX);
+ if (dom == PCIE_PAXC)
+ *pte |= (1 << ARM_LPAE_PTE_NS);
+ } else {
+ *pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE <<
+ ARM_LPAE_PTE_ATTRINDX);
+ *pte |= (1 << ARM_LPAE_PTE_NS);
+ }
+ *pte |= (ARM_LPAE_PTE_AP_EL0_RW << ARM_LPAE_PTE_AP);
+ *pte |= (ARM_LPAE_PTE_SH_INNER << ARM_LPAE_PTE_SH);
+ *pte |= (1 << ARM_LPAE_PTE_AF);
+ *pte |= ARM_LPAE_PTE_VALID;
+
+ addr += ARM_LPAE_PTE_L1_BLOCK_SIZE;
+ }
+}
+
+void arm_smmu_create_identity_map(enum iommu_domain dom)
+{
+ struct arm_smmu_device iommu;
+ struct arm_smmu_device *smmu = &iommu;
+ uint32_t reg, reg2;
+ unsigned long long reg64;
+ uint32_t idx;
+ uint16_t asid;
+ unsigned int context_bank_index;
+ unsigned long long pg_table_base;
+
+ smmu->base = (uint8_t *) SMMU_BASE;
+ reg = mmio_read_32((uintptr_t) (ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_ID1));
+ smmu->pgshift = (reg & ID1_PAGESIZE) ? 16 : 12;
+ smmu->size = ARM_SMMU_RES_SIZE;
+ smmu->stream_ids = NULL;
+
+ switch (dom) {
+ case PCIE_PAXC:
+ smmu->stream_ids = &paxc_stream_ids[0];
+ smmu->stream_ids_mask = &paxc_stream_ids_mask[0];
+ smmu->streams = ARRAY_SIZE(paxc_stream_ids);
+ context_bank_index = SMMU_CTX_BANK_IDX;
+ pg_table_base = ARM_LPAE_PGTBL_PHYS;
+ break;
+ case DOMAIN_CRMU:
+ smmu->stream_ids = &crmu_stream_ids[0];
+ smmu->stream_ids_mask = &crmu_stream_ids_mask[0];
+ smmu->streams = ARRAY_SIZE(crmu_stream_ids);
+ context_bank_index = SMMU_CTX_BANK_IDX_SECURE_CRMU;
+ pg_table_base = ARM_LPAE_PGTBL_PHYS_CRMU;
+ break;
+ default:
+ ERROR("domain not supported\n");
+ return;
+ }
+
+ if (smmu->streams > NUM_OF_SMRS) {
+ INFO("can not support more than %d sids\n", NUM_OF_SMRS);
+ return;
+ }
+
+ /* set up iommu dev. */
+ for (idx = 0; idx < smmu->streams; idx++) {
+ /* S2CR. */
+ smmu->s2cr[idx].type = S2CR_TYPE_TRANS;
+ smmu->s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
+ smmu->s2cr[idx].cbndx = context_bank_index;
+ smmu->cfg[idx].cbndx = context_bank_index;
+ smmu->cfg[idx].cbar = STG1_WITH_STG2_BYPASS << CBAR_TYPE_SHIFT;
+ arm_smmu_s2cr_cfg(smmu, idx);
+
+ /* SMR. */
+ smmu->smr[idx].mask = smmu->stream_ids_mask[idx];
+ smmu->smr[idx].id = smmu->stream_ids[idx];
+ smmu->smr[idx].valid = 1;
+ arm_smmu_smr_cfg(smmu, idx);
+
+ /* CBA2R. 64-bit Translation */
+ mmio_write_32((uintptr_t) (ARM_SMMU_GR1(smmu) +
+ ARM_SMMU_GR1_CBA2R(smmu->cfg[idx].cbndx)),
+ 0x1);
+ /* CBAR.*/
+ reg = smmu->cfg[idx].cbar;
+ reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
+ (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
+
+ mmio_write_32((uintptr_t) (ARM_SMMU_GR1(smmu) +
+ ARM_SMMU_GR1_CBAR(smmu->cfg[idx].cbndx)),
+ reg);
+
+ /* TTBCR. */
+ reg64 = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
+ (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
+ (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
+ reg64 |= ARM_LPAE_TCR_TG0_4K;
+ reg64 |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+ /* ias 40 bits.*/
+ reg64 |= TTBCR_T0SZ(40) << ARM_LPAE_TCR_T0SZ_SHIFT;
+ /* Disable speculative walks through TTBR1 */
+ reg64 |= ARM_LPAE_TCR_EPD1;
+ reg = (uint32_t) reg64;
+ reg2 = (uint32_t) (reg64 >> 32);
+ reg2 |= TTBCR2_SEP_UPSTREAM;
+ reg2 |= TTBCR2_AS;
+
+ mmio_write_32((uintptr_t) (ARM_SMMU_CB_BASE(smmu) +
+ ARM_SMMU_CB(smmu, smmu->cfg[idx].cbndx) +
+ ARM_SMMU_CB_TTBCR2), reg2);
+
+ mmio_write_32((uintptr_t) (ARM_SMMU_CB_BASE(smmu) +
+ ARM_SMMU_CB(smmu, smmu->cfg[idx].cbndx) +
+ ARM_SMMU_CB_TTBCR), reg);
+
+ /* TTBR0. */
+ asid = smmu->cfg[idx].cbndx;
+ reg64 = pg_table_base;
+ reg64 |= (unsigned long long) asid << TTBRn_ASID_SHIFT;
+
+ mmio_write_64((uintptr_t) (ARM_SMMU_CB_BASE(smmu) +
+ ARM_SMMU_CB(smmu, smmu->cfg[idx].cbndx) +
+ ARM_SMMU_CB_TTBR0), reg64);
+ /* TTBR1. */
+ reg64 = 0;
+ reg64 |= (unsigned long long) asid << TTBRn_ASID_SHIFT;
+
+ mmio_write_64((uintptr_t) (ARM_SMMU_CB_BASE(smmu) +
+ ARM_SMMU_CB(smmu, smmu->cfg[idx].cbndx) +
+ ARM_SMMU_CB_TTBR1), reg64);
+ /* MAIR. */
+ reg = (ARM_LPAE_MAIR_ATTR_NC
+ << ARM_LPAE_MAIR_ATTR_SHIFT
+ (ARM_LPAE_MAIR_ATTR_IDX_NC)) |
+ (ARM_LPAE_MAIR_ATTR_WBRWA <<
+ ARM_LPAE_MAIR_ATTR_SHIFT
+ (ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
+ (ARM_LPAE_MAIR_ATTR_DEVICE <<
+ ARM_LPAE_MAIR_ATTR_SHIFT
+ (ARM_LPAE_MAIR_ATTR_IDX_DEV));
+
+ mmio_write_32((uintptr_t) (ARM_SMMU_CB_BASE(smmu) +
+ ARM_SMMU_CB(smmu, smmu->cfg[idx].cbndx) +
+ ARM_SMMU_CB_S1_MAIR0), reg);
+
+ /* MAIR1. */
+ reg = 0;
+ mmio_write_32((uintptr_t) (ARM_SMMU_CB_BASE(smmu) +
+ ARM_SMMU_CB(smmu, smmu->cfg[idx].cbndx) +
+ ARM_SMMU_CB_S1_MAIR1), reg);
+ /* SCTLR. */
+ reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
+ /* stage 1.*/
+ reg |= SCTLR_S1_ASIDPNE;
+ mmio_write_32((uintptr_t) (ARM_SMMU_CB_BASE(smmu) +
+ ARM_SMMU_CB(smmu, smmu->cfg[idx].cbndx) +
+ ARM_SMMU_CB_SCTLR), reg);
+ }
+ smmu_set_pgtbl(smmu, dom, (uint64_t *)pg_table_base);
+}
diff --git a/plat/brcm/board/stingray/src/ncsi.c b/plat/brcm/board/stingray/src/ncsi.c
new file mode 100644
index 0000000..58ea9e2
--- /dev/null
+++ b/plat/brcm/board/stingray/src/ncsi.c
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2019-2020, Broadcom
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdbool.h>
+
+#include <common/debug.h>
+#include <lib/mmio.h>
+
+#include <ncsi.h>
+#include <sr_def.h>
+#include <sr_utils.h>
+
+static const char *const io_drives[] = {
+ "2mA", "4mA", "6mA", "8mA",
+ "10mA", "12mA", "14mA", "16mA"
+};
+
+void brcm_stingray_ncsi_init(void)
+{
+ unsigned int i = 0;
+ unsigned int selx = 0;
+
+#if NCSI_IO_DRIVE_STRENGTH_MA == 2
+ selx = 0x0;
+#elif NCSI_IO_DRIVE_STRENGTH_MA == 4
+ selx = 0x1;
+#elif NCSI_IO_DRIVE_STRENGTH_MA == 6
+ selx = 0x2;
+#elif NCSI_IO_DRIVE_STRENGTH_MA == 8
+ selx = 0x3;
+#elif NCSI_IO_DRIVE_STRENGTH_MA == 10
+ selx = 0x4;
+#elif NCSI_IO_DRIVE_STRENGTH_MA == 12
+ selx = 0x5;
+#elif NCSI_IO_DRIVE_STRENGTH_MA == 14
+ selx = 0x6;
+#elif NCSI_IO_DRIVE_STRENGTH_MA == 16
+ selx = 0x7;
+#else
+ ERROR("Unsupported NCSI_IO_DRIVE_STRENGTH_MA. Please check it.\n");
+ return;
+#endif
+ INFO("ncsi io drives: %s\n", io_drives[selx]);
+
+ for (i = 0; i < NITRO_NCSI_IOPAD_CONTROL_NUM; i++) {
+ mmio_clrsetbits_32((NITRO_NCSI_IOPAD_CONTROL_BASE + (i * 4)),
+ PAD_SELX_MASK, PAD_SELX_VALUE(selx));
+ }
+
+ INFO("ncsi init done\n");
+}
diff --git a/plat/brcm/board/stingray/src/paxb.c b/plat/brcm/board/stingray/src/paxb.c
new file mode 100644
index 0000000..89f76d0
--- /dev/null
+++ b/plat/brcm/board/stingray/src/paxb.c
@@ -0,0 +1,911 @@
+/*
+ * Copyright (c) 2016 - 2020, Broadcom
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <errno.h>
+#include <stdbool.h>
+
+#include <common/debug.h>
+#include <drivers/delay_timer.h>
+#include <lib/mmio.h>
+
+#include <paxb.h>
+#include <sr_def.h>
+#include <sr_utils.h>
+
+#define PCIE_CORE_PWR_ARR_POWERON 0x8
+#define PCIE_CORE_PWR_ARR_POWEROK 0x4
+#define PCIE_CORE_PWR_POWERON 0x2
+#define PCIE_CORE_PWR_POWEROK 0x1
+
+#define PCIE_CORE_USER_CFG (PCIE_CORE_BASE + 0x38)
+#define PCIE_PAXB_SMMU_SID_CFG (PCIE_CORE_BASE + 0x60)
+#ifdef SID_B8_D1_F1
+#define PAXB_SMMU_SID_CFG_BUS_WIDTH (0x8 << 8)
+#define PAXB_SMMU_SID_CFG_DEV_WIDTH (0x1 << 12)
+#define PAXB_SMMU_SID_CFG_FUN_WIDTH (0x1 << 16)
+#else
+#define PAXB_SMMU_SID_CFG_BUS_WIDTH (0x2 << 8)
+#define PAXB_SMMU_SID_CFG_DEV_WIDTH (0x5 << 12)
+#define PAXB_SMMU_SID_CFG_FUN_WIDTH (0x3 << 16)
+#endif
+
+#define PAXB_APB_TIMEOUT_COUNT_OFFSET 0x034
+
+/* allow up to 5 ms for each power switch to stabilize */
+#define PCIE_CORE_PWR_TIMEOUT_MS 5
+
+/* wait 1 microsecond for PCIe core soft reset */
+#define PCIE_CORE_SOFT_RST_DELAY_US 1
+
+/*
+ * List of PAXB APB registers
+ */
+#define PAXB_BASE 0x48000000
+#define PAXB_BASE_OFFSET 0x4000
+#define PAXB_OFFSET(core) (PAXB_BASE + \
+ (core) * PAXB_BASE_OFFSET)
+
+#define PAXB_CLK_CTRL_OFFSET 0x000
+#define PAXB_EP_PERST_SRC_SEL_MASK (1 << 2)
+#define PAXB_EP_MODE_PERST_MASK (1 << 1)
+#define PAXB_RC_PCIE_RST_OUT_MASK (1 << 0)
+
+#define PAXB_MAX_IMAP_WINDOWS 8
+#define PAXB_IMAP_REG_WIDTH 8
+#define PAXB_IMAP0_REG_WIDTH 4
+#define PAXB_AXUSER_REG_WIDTH 4
+
+#define PAXB_CFG_IND_ADDR_OFFSET 0x120
+#define PAXB_CFG_IND_DATA_OFFSET 0x124
+#define PAXB_CFG_IND_ADDR_MASK 0x1ffc
+#define PAXB_CFG_CFG_TYPE_MASK 0x1
+
+#define PAXB_EP_CFG_ADDR_OFFSET 0x1f8
+#define PAXB_EP_CFG_DATA_OFFSET 0x1fc
+#define PAXB_EP_CFG_ADDR_MASK 0xffc
+#define PAXB_EP_CFG_TYPE_MASK 0x1
+
+#define PAXB_0_DEFAULT_IMAP 0xed0
+#define DEFAULT_ADDR_INVALID BIT(0)
+#define PAXB_0_DEFAULT_IMAP_AXUSER 0xed8
+#define PAXB_0_DEFAULT_IMAP_AXCACHE 0xedc
+#define IMAP_AXCACHE 0xff
+#define OARR_VALID BIT(0)
+#define IMAP_VALID BIT(0)
+
+#define PAXB_IMAP0_BASE_OFFSET 0xc00
+#define PAXB_IARR0_BASE_OFFSET 0xd00
+#define PAXB_IMAP0_OFFSET(idx) (PAXB_IMAP0_BASE_OFFSET + \
+ (idx) * PAXB_IMAP0_REG_WIDTH)
+#define PAXB_IMAP0_WINDOW_SIZE 0x1000
+
+#define PAXB_IMAP2_OFFSET 0xcc0
+#define PAXB_IMAP0_REGS_TYPE_OFFSET 0xcd0
+#define PAXB_IARR2_LOWER_OFFSET 0xd10
+
+#define PAXB_IMAP3_BASE_OFFSET 0xe08
+#define PAXB_IMAP3_OFFSET(idx) (PAXB_IMAP3_BASE_OFFSET + \
+ (idx) * PAXB_IMAP_REG_WIDTH)
+
+#define PAXB_IMAP3_0_AXUSER_B_OFFSET 0xe48
+#define PAXB_IMAP3_0_AXUSER_OFFSET(idx) (PAXB_IMAP3_0_AXUSER_B_OFFSET + \
+ (idx) * PAXB_AXUSER_REG_WIDTH)
+
+#define PAXB_IMAP4_BASE_OFFSET 0xe70
+#define PAXB_IMAP4_OFFSET(idx) (PAXB_IMAP4_BASE_OFFSET + \
+ (idx) * PAXB_IMAP_REG_WIDTH)
+
+#define PAXB_IMAP4_0_AXUSER_B_OFFSET 0xeb0
+#define PAXB_IMAP4_0_AXUSER_OFFSET(idx) (PAXB_IMAP4_0_AXUSER_B_OFFSET + \
+ (idx) * PAXB_AXUSER_REG_WIDTH)
+
+#define PAXB_CFG_LINK_STATUS_OFFSET 0xf0c
+#define PAXB_CFG_PHYLINKUP_MASK (1 << 3)
+#define PAXB_CFG_DL_ACTIVE_MASK (1 << 2)
+
+#define PAXB_IMAP0_0_AXUSER_OFFSET 0xf60
+#define PAXB_IMAP2_AXUSER_OFFSET 0xfe0
+
+/* cacheable write-back, allocate on both reads and writes */
+#define IMAP_ARCACHE 0x0f0
+#define IMAP_AWCACHE 0xf00
+/* normal access, nonsecure access, and data access */
+/* AWQOS:0xe and ARQOS:0xa */
+/* AWPROT:0x2 and ARPROT:0x1 */
+#define IMAP_AXUSER 0x002e002a
+
+/*
+ * List of NIC security and PIPEMUX related registers
+ */
+#define SR_PCIE_NIC_SECURITY_BASE 0x58100000
+#define NS3Z_PCIE_NIC_SECURITY_BASE 0x48100000
+
+#define GITS_TRANSLATER 0x63c30000
+
+#define VENDOR_ID 0x14e4
+#define CFG_RC_DEV_ID 0x434
+#define CFG_RC_DEV_SUBID 0x438
+#define PCI_BRIDGE_CTRL_REG_OFFSET 0x43c
+#define PCI_CLASS_BRIDGE_MASK 0xffff00
+#define PCI_CLASS_BRIDGE_SHIFT 8
+#define PCI_CLASS_BRIDGE_PCI 0x0604
+
+/*
+ * List of PAXB RC configuration space registers
+ */
+
+/* first capability list entry */
+#define PCI_CAPABILITY_LIST_OFFSET 0x34
+#define PCI_CAPABILITY_SPEED_OFFSET 0xc
+#define PCI_EP_CAPABILITY_OFFSET 0x10
+
+#define CFG_RC_LINK_STATUS_CTRL_2 0x0dc
+#define CFG_RC_LINK_SPEED_SHIFT 0
+#define CFG_RC_LINK_SPEED_MASK (0xf << CFG_RC_LINK_SPEED_SHIFT)
+
+#define CFG_RC_DEVICE_CAP 0x4d4
+#define CFG_RC_DEVICE_CAP_MPS_SHIFT 0
+#define CFG_RC_DEVICE_CAP_MPS_MASK (0x7 << CFG_RC_DEVICE_CAP_MPS_SHIFT)
+/* MPS 256 bytes */
+#define CFG_RC_DEVICE_CAP_MPS_256B (0x1 << CFG_RC_DEVICE_CAP_MPS_SHIFT)
+/* MPS 512 bytes */
+#define CFG_RC_DEVICE_CAP_MPS_512B (0x2 << CFG_RC_DEVICE_CAP_MPS_SHIFT)
+
+#define CFG_RC_TL_FCIMM_NP_LIMIT 0xa10
+#define CFG_RC_TL_FCIMM_NP_VAL 0x01500000
+#define CFG_RC_TL_FCIMM_P_LIMIT 0xa14
+#define CFG_RC_TL_FCIMM_P_VAL 0x03408080
+
+#define CFG_RC_LINK_CAP 0x4dc
+#define CFG_RC_LINK_CAP_SPEED_SHIFT 0
+#define CFG_RC_LINK_CAP_SPEED_MASK (0xf << CFG_RC_LINK_CAP_SPEED_SHIFT)
+#define CFG_RC_LINK_CAP_WIDTH_SHIFT 4
+#define CFG_RC_LINK_CAP_WIDTH_MASK (0x1f << CFG_RC_LINK_CAP_WIDTH_SHIFT)
+
+#define CFG_LINK_CAP_RC 0x4f0
+#define CFG_RC_DL_ACTIVE_SHIFT 0
+#define CFG_RC_DL_ACTIVE_MASK (0x1 << CFG_RC_DL_ACTIVE_SHIFT)
+#define CFG_RC_SLOT_CLK_SHIFT 1
+#define CFG_RC_SLOT_CLK_MASK (0x1 << CFG_RC_SLOT_CLK_SHIFT)
+
+#define CFG_ROOT_CAP_RC 0x4f8
+#define CFG_ROOT_CAP_LTR_SHIFT 1
+#define CFG_ROOT_CAP_LTR_MASK (0x1 << CFG_ROOT_CAP_LTR_SHIFT)
+
+#define CFG_RC_CLKREQ_ENABLED 0x4fc
+#define CFG_RC_CLKREQ_ENABLED_SHIFT 0
+#define CFG_RC_CLKREQ_ENABLED_MASK (0x1 << CFG_RC_CLKREQ_ENABLED_SHIFT)
+
+#define CFG_RC_COEFF_ADDR 0x638
+
+#define CFG_RC_TL_CTRL_0 0x800
+#define RC_MEM_DW_CHK_MASK 0x03fe
+
+#define CFG_RC_PDL_CTRL_4 0x1010
+#define NPH_FC_INIT_SHIFT 24
+#define NPH_FC_INIT_MASK (U(0xff) << NPH_FC_INIT_SHIFT)
+#define PD_FC_INIT_SHIFT 12
+#define PD_FC_INIT_MASK (0xffff << PD_FC_INIT_SHIFT)
+
+#define CFG_RC_PDL_CTRL_5 0x1014
+#define PH_INIT_SHIFT 0
+#define PH_INIT_MASK (0xff << PH_INIT_SHIFT)
+
+#define DL_STATUS_OFFSET 0x1048
+#define PHYLINKUP BIT(13)
+
+#define PH_INIT 0x10
+#define PD_FC_INIT 0x100
+#define NPH_FC_INIT 0x8
+
+#define SRP_PH_INIT 0x7F
+#define SRP_PD_FC_INIT 0x200
+#define SRP_NPH_FC_INIT 0x7F
+
+#define CFG_ADDR_BUS_NUM_SHIFT 20
+#define CFG_ADDR_DEV_NUM_SHIFT 15
+#define CFG_ADDR_FUNC_NUM_SHIFT 12
+#define CFG_ADDR_REG_NUM_SHIFT 2
+#define CFG_ADDR_REG_NUM_MASK 0x00000ffc
+#define CFG_ADDR_CFG_TYPE_MASK 0x00000003
+
+#define DL_LINK_UP_TIMEOUT_MS 1000
+
+#define CFG_RETRY_STATUS 0xffff0001
+#define CRS_TIMEOUT_MS 5000
+
+/* create EP config data to write */
+#define DEF_BUS_NO 1 /* default bus 1 */
+#define DEF_SLOT_NO 0 /* default slot 0 */
+#define DEF_FN_NO 0 /* default fn 0 */
+
+#define EP_CONFIG_VAL(bus_no, slot, fn, where) \
+ (((bus_no) << CFG_ADDR_BUS_NUM_SHIFT) | \
+ ((slot) << CFG_ADDR_DEV_NUM_SHIFT) | \
+ ((fn) << CFG_ADDR_FUNC_NUM_SHIFT) | \
+ ((where) & CFG_ADDR_REG_NUM_MASK) | \
+ (1 & CFG_ADDR_CFG_TYPE_MASK))
+
+/* PAXB security offset */
+#define PAXB_SECURITY_IDM_OFFSET 0x1c
+#define PAXB_SECURITY_APB_OFFSET 0x24
+#define PAXB_SECURITY_ECAM_OFFSET 0x3c
+
+#define paxb_get_config(type) paxb_get_##type##_config()
+
+static unsigned int paxb_sec_reg_offset[] = {
+ 0x0c, /* PAXB0 AXI */
+ 0x10, /* PAXB1 AXI */
+ 0x14, /* PAXB2 AXI */
+ 0x18, /* PAXB3 AXI */
+ 0x20, /* PAXB4 AXI */
+ 0x28, /* PAXB5 AXI */
+ 0x2c, /* PAXB6 AXI */
+ 0x30, /* PAXB7 AXI */
+ 0x24, /* PAXB APB */
+};
+
+const paxb_cfg *paxb;
+
+/*
+ * Given a PIPEMUX strap and PCIe core index, this function returns 1 if a
+ * PCIe core needs to be enabled
+ */
+int pcie_core_needs_enable(unsigned int core_idx)
+{
+ if (paxb->core_needs_enable)
+ return paxb->core_needs_enable(core_idx);
+
+ return 0;
+}
+
+static void pcie_set_default_tx_coeff(uint32_t core_idx, uint32_t link_width)
+{
+ unsigned int lanes = 0;
+ uint32_t data, addr;
+
+ addr = CFG_RC_COEFF_ADDR;
+ for (lanes = 0; lanes < link_width; lanes = lanes + 2) {
+ data = paxb_rc_cfg_read(core_idx, addr);
+ data &= 0xf0f0f0f0;
+ data |= (7 & 0xf);
+ data |= (7 & 0xf) << 8;
+ data |= (7 & 0xf) << 16;
+ data |= (7 & 0xf) << 24;
+
+ paxb_rc_cfg_write(core_idx, addr, data);
+ addr += 4;
+ }
+}
+
+static int paxb_rc_link_init(void)
+{
+ uint32_t val, link_speed;
+ unsigned int link_width;
+ uint32_t core_idx;
+
+ for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) {
+ if (!pcie_core_needs_enable(core_idx))
+ continue;
+
+ link_width = paxb->get_link_width(core_idx);
+ if (!link_width) {
+ ERROR("Unsupported PIPEMUX\n");
+ return -EOPNOTSUPP;
+ }
+
+ link_speed = paxb->get_link_speed();
+ /* program RC's link cap reg to advertise proper link width */
+ val = paxb_rc_cfg_read(core_idx, CFG_RC_LINK_CAP);
+ val &= ~CFG_RC_LINK_CAP_WIDTH_MASK;
+ val |= (link_width << CFG_RC_LINK_CAP_WIDTH_SHIFT);
+ paxb_rc_cfg_write(core_idx, CFG_RC_LINK_CAP, val);
+
+ /* program RC's link cap reg to advertise proper link speed */
+ val = paxb_rc_cfg_read(core_idx, CFG_RC_LINK_CAP);
+ val &= ~CFG_RC_LINK_CAP_SPEED_MASK;
+ val |= link_speed << CFG_RC_LINK_CAP_SPEED_SHIFT;
+ paxb_rc_cfg_write(core_idx, CFG_RC_LINK_CAP, val);
+
+ /* also need to program RC's link status control register */
+ val = paxb_rc_cfg_read(core_idx, CFG_RC_LINK_STATUS_CTRL_2);
+ val &= ~(CFG_RC_LINK_SPEED_MASK);
+ val |= link_speed << CFG_RC_LINK_SPEED_SHIFT;
+ paxb_rc_cfg_write(core_idx, CFG_RC_LINK_STATUS_CTRL_2, val);
+
+#ifdef WAR_PLX_PRESET_PARITY_FAIL
+ /* WAR to avoid crash with PLX switch in GEN3*/
+ /* While PRESET, PLX switch is not fixing parity so disabled */
+ val = paxb_rc_cfg_read(core_idx, CFG_RC_REG_PHY_CTL_10);
+ val &= ~(PHY_CTL_10_GEN3_MATCH_PARITY);
+ paxb_rc_cfg_write(core_idx, CFG_RC_REG_PHY_CTL_10, val);
+#endif
+ pcie_set_default_tx_coeff(core_idx, link_width);
+ }
+ return 0;
+}
+
+#ifdef PAXB_LINKUP
+static void paxb_perst_ctrl(unsigned int core_idx, bool assert)
+{
+ uint32_t clk_ctrl = PAXB_OFFSET(core_idx) + PAXB_CLK_CTRL_OFFSET;
+
+ if (assert) {
+ mmio_clrbits_32(clk_ctrl, PAXB_EP_PERST_SRC_SEL_MASK |
+ PAXB_EP_MODE_PERST_MASK |
+ PAXB_RC_PCIE_RST_OUT_MASK);
+ udelay(250);
+ } else {
+ mmio_setbits_32(clk_ctrl, PAXB_RC_PCIE_RST_OUT_MASK);
+ mdelay(100);
+ }
+}
+
+static void paxb_start_link_up(void)
+{
+ unsigned int core_idx;
+ uint32_t val, timeout;
+
+ for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) {
+ if (!pcie_core_needs_enable(core_idx))
+ continue;
+
+ /* toggle PERST */
+ paxb_perst_ctrl(core_idx, true);
+ paxb_perst_ctrl(core_idx, false);
+
+ timeout = DL_LINK_UP_TIMEOUT_MS;
+ /* wait for Link up */
+ do {
+ val = mmio_read_32(PAXB_OFFSET(core_idx) +
+ PAXB_CFG_LINK_STATUS_OFFSET);
+ if (val & PAXB_CFG_DL_ACTIVE_MASK)
+ break;
+
+ mdelay(1);
+ } while (--timeout);
+
+ if (!timeout)
+ ERROR("PAXB core %u link is down\n", core_idx);
+ }
+}
+#endif
+
+static void pcie_core_soft_reset(unsigned int core_idx)
+{
+ uint32_t offset = core_idx * PCIE_CORE_PWR_OFFSET;
+ uintptr_t ctrl = (uintptr_t)(PCIE_CORE_SOFT_RST_CFG_BASE + offset);
+
+ /* Put PCIe core in soft reset */
+ mmio_clrbits_32(ctrl, PCIE_CORE_SOFT_RST);
+
+ /* Wait for 1 us before pulling PCIe core out of soft reset */
+ udelay(PCIE_CORE_SOFT_RST_DELAY_US);
+
+ mmio_setbits_32(ctrl, PCIE_CORE_SOFT_RST);
+}
+
+static int pcie_core_pwron_switch(uintptr_t ctrl, uintptr_t status,
+ uint32_t mask)
+{
+ uint32_t val;
+ unsigned int timeout = PCIE_CORE_PWR_TIMEOUT_MS;
+
+ /* enable switch */
+ mmio_setbits_32(ctrl, mask);
+
+ /* now wait for it to stabilize */
+ do {
+ val = mmio_read_32(status);
+ if ((val & mask) == mask)
+ return 0;
+ mdelay(1);
+ } while (--timeout);
+
+ return -EIO;
+}
+
+static int pcie_core_pwr_seq(uintptr_t ctrl, uintptr_t status)
+{
+ int ret;
+
+ /*
+ * Enable the switch with the following sequence:
+ * 1. Array weak switch output switch
+ * 2. Array strong switch
+ * 3. Weak switch output acknowledge
+ * 4. Strong switch output acknowledge
+ */
+ ret = pcie_core_pwron_switch(ctrl, status, PCIE_CORE_PWR_ARR_POWERON);
+ if (ret)
+ return ret;
+
+ ret = pcie_core_pwron_switch(ctrl, status, PCIE_CORE_PWR_ARR_POWEROK);
+ if (ret)
+ return ret;
+
+ ret = pcie_core_pwron_switch(ctrl, status, PCIE_CORE_PWR_POWERON);
+ if (ret)
+ return ret;
+
+ ret = pcie_core_pwron_switch(ctrl, status, PCIE_CORE_PWR_POWEROK);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/*
+ * This function enables PCIe core and PAXB memory buffer power, and then
+ * remove the PCIe core from isolation
+ */
+static int pcie_core_pwr_init(unsigned int core_idx)
+{
+ int ret;
+ uint32_t offset = core_idx * PCIE_CORE_PWR_OFFSET;
+ uintptr_t ctrl, status;
+
+ /* enable mem power to PCIe core */
+ ctrl = (uintptr_t)(PCIE_CORE_MEM_PWR_BASE + offset);
+ status = (uintptr_t)(PCIE_CORE_MEM_PWR_STATUS_BASE + offset);
+ ret = pcie_core_pwr_seq(ctrl, status);
+ if (ret) {
+ ERROR("PCIe core mem power failed\n");
+ return ret;
+ }
+
+ /* now enable mem power to PAXB wrapper */
+ ctrl = (uintptr_t)(PCIE_PAXB_MEM_PWR_BASE + offset);
+ status = (uintptr_t)(PCIE_PAXB_MEM_PWR_STATUS_BASE + offset);
+ ret = pcie_core_pwr_seq(ctrl, status);
+ if (ret) {
+ ERROR("PAXB mem power failed\n");
+ return ret;
+ }
+
+ /* now remove power isolation */
+ ctrl = (uintptr_t)(PCIE_CORE_ISO_CFG_BASE + offset);
+ mmio_clrbits_32(ctrl, PCIE_CORE_ISO | PCIE_CORE_MEM_ISO);
+
+ return 0;
+}
+
+static void pcie_ss_reset(void)
+{
+ mmio_setbits_32(CDRU_MISC_RESET_CONTROL,
+ 1 << CDRU_MISC_RESET_CONTROL__CDRU_PCIE_RESET_N_R);
+}
+
+/*
+ * This function reads the PIPEMUX strap, figures out all the PCIe cores that
+ * need to be enabled and enable the mem power for those cores
+ */
+static int pcie_cores_init(void)
+{
+ int ret = 0;
+ uint32_t core_idx;
+
+ if (paxb->pipemux_init) {
+ ret = paxb->pipemux_init();
+ if (ret)
+ return ret;
+ }
+
+ /* bring PCIe subsystem out of reset */
+ pcie_ss_reset();
+
+ /* power up all PCIe cores that will be used as RC */
+ for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) {
+ if (!pcie_core_needs_enable(core_idx))
+ continue;
+
+ ret = pcie_core_pwr_init(core_idx);
+ if (ret) {
+ ERROR("PCIe core %u power up failed\n", core_idx);
+ return ret;
+ }
+
+ pcie_core_soft_reset(core_idx);
+
+ VERBOSE("PCIe core %u is powered up\n", core_idx);
+ }
+
+ return ret;
+}
+
+void paxb_rc_cfg_write(unsigned int core_idx, unsigned int where,
+ uint32_t val)
+{
+ mmio_write_32(PAXB_OFFSET(core_idx) + PAXB_CFG_IND_ADDR_OFFSET,
+ (where & PAXB_CFG_IND_ADDR_MASK) |
+ PAXB_CFG_CFG_TYPE_MASK);
+ mmio_write_32(PAXB_OFFSET(core_idx) + PAXB_CFG_IND_DATA_OFFSET, val);
+}
+
+unsigned int paxb_rc_cfg_read(unsigned int core_idx, unsigned int where)
+{
+ unsigned int val;
+
+ mmio_write_32(PAXB_OFFSET(core_idx) + PAXB_CFG_IND_ADDR_OFFSET,
+ (where & PAXB_CFG_IND_ADDR_MASK) |
+ PAXB_CFG_CFG_TYPE_MASK);
+ val = mmio_read_32(PAXB_OFFSET(core_idx) + PAXB_CFG_IND_DATA_OFFSET);
+
+ return val;
+}
+
+static void paxb_cfg_mps(void)
+{
+ uint32_t val, core_idx, mps;
+
+ for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) {
+ if (!pcie_core_needs_enable(core_idx))
+ continue;
+
+ val = paxb_rc_cfg_read(core_idx, CFG_RC_DEVICE_CAP);
+ val &= ~CFG_RC_DEVICE_CAP_MPS_MASK;
+ mps = CFG_RC_DEVICE_CAP_MPS_256B;
+ if (core_idx == 0 || core_idx == 1 ||
+ core_idx == 6 || core_idx == 7) {
+ mps = CFG_RC_DEVICE_CAP_MPS_512B;
+ }
+ val |= mps;
+ paxb_rc_cfg_write(core_idx, CFG_RC_DEVICE_CAP, val);
+ }
+}
+
+static void paxb_cfg_dev_id(void)
+{
+ uint32_t val, core_idx;
+ uint32_t device_id;
+
+ device_id = paxb->device_id;
+
+ for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) {
+ if (!pcie_core_needs_enable(core_idx))
+ continue;
+
+ /* Set Core in RC mode */
+ mmio_setbits_32(PCIE_CORE_USER_CFG +
+ (core_idx * PCIE_CORE_PWR_OFFSET), 1);
+
+ /* force class to PCI_CLASS_BRIDGE_PCI (0x0604) */
+ val = paxb_rc_cfg_read(core_idx, PCI_BRIDGE_CTRL_REG_OFFSET);
+ val &= ~PCI_CLASS_BRIDGE_MASK;
+ val |= (PCI_CLASS_BRIDGE_PCI << PCI_CLASS_BRIDGE_SHIFT);
+ paxb_rc_cfg_write(core_idx, PCI_BRIDGE_CTRL_REG_OFFSET, val);
+
+ val = (VENDOR_ID << 16) | device_id;
+ paxb_rc_cfg_write(core_idx, CFG_RC_DEV_ID, val);
+
+ val = (device_id << 16) | VENDOR_ID;
+ paxb_rc_cfg_write(core_idx, CFG_RC_DEV_SUBID, val);
+ }
+}
+
+static void paxb_cfg_tgt_trn(void)
+{
+ uint32_t val, core_idx;
+
+ /*
+ * Disable all mem Rd/Wr size check so it allows target read/write
+ * transactions to be more than stipulated DW. As a result, PAXB root
+ * complex will not abort these read/write transcations beyond
+ * stipulated limit
+ */
+ for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) {
+ if (!pcie_core_needs_enable(core_idx))
+ continue;
+
+ val = paxb_rc_cfg_read(core_idx, CFG_RC_TL_CTRL_0);
+ val &= ~(RC_MEM_DW_CHK_MASK);
+ paxb_rc_cfg_write(core_idx, CFG_RC_TL_CTRL_0, val);
+ }
+}
+
+static void paxb_cfg_pdl_ctrl(void)
+{
+ uint32_t val, core_idx;
+ uint32_t nph, ph, pd;
+
+ /* increase the credit counter to 4 for non-posted header */
+ for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) {
+ if (!pcie_core_needs_enable(core_idx))
+ continue;
+
+ nph = NPH_FC_INIT;
+ ph = PH_INIT;
+ pd = PD_FC_INIT;
+
+ if (core_idx == 0 || core_idx == 1 ||
+ core_idx == 6 || core_idx == 7) {
+ nph = SRP_NPH_FC_INIT;
+ ph = SRP_PH_INIT;
+ pd = SRP_PD_FC_INIT;
+ }
+ val = paxb_rc_cfg_read(core_idx, CFG_RC_PDL_CTRL_4);
+ val &= ~NPH_FC_INIT_MASK;
+ val &= ~PD_FC_INIT_MASK;
+ val = val | (nph << NPH_FC_INIT_SHIFT);
+ val = val | (pd << PD_FC_INIT_SHIFT);
+ paxb_rc_cfg_write(core_idx, CFG_RC_PDL_CTRL_4, val);
+
+ val = paxb_rc_cfg_read(core_idx, CFG_RC_PDL_CTRL_5);
+ val &= ~PH_INIT_MASK;
+ val = val | (ph << PH_INIT_SHIFT);
+ paxb_rc_cfg_write(core_idx, CFG_RC_PDL_CTRL_5, val);
+
+ /*
+ * ASIC to give more optmized value after further investigation.
+ * till then this is important to have to get similar
+ * performance on all the slots.
+ */
+ paxb_rc_cfg_write(core_idx, CFG_RC_TL_FCIMM_NP_LIMIT,
+ CFG_RC_TL_FCIMM_NP_VAL);
+
+ paxb_rc_cfg_write(core_idx, CFG_RC_TL_FCIMM_P_LIMIT,
+ CFG_RC_TL_FCIMM_P_VAL);
+ }
+}
+
+static void paxb_cfg_clkreq(void)
+{
+ uint32_t val, core_idx;
+
+ for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) {
+ if (!pcie_core_needs_enable(core_idx))
+ continue;
+
+ val = paxb_rc_cfg_read(core_idx, CFG_RC_CLKREQ_ENABLED);
+ val &= ~CFG_RC_CLKREQ_ENABLED_MASK;
+ paxb_rc_cfg_write(core_idx, CFG_RC_CLKREQ_ENABLED, val);
+ }
+}
+
+static void paxb_cfg_dl_active(bool enable)
+{
+ uint32_t val, core_idx;
+
+ for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) {
+ if (!pcie_core_needs_enable(core_idx))
+ continue;
+
+ val = paxb_rc_cfg_read(core_idx, CFG_LINK_CAP_RC);
+ if (enable)
+ val |= CFG_RC_DL_ACTIVE_MASK;
+ else
+ val &= ~CFG_RC_DL_ACTIVE_MASK;
+ paxb_rc_cfg_write(core_idx, CFG_LINK_CAP_RC, val);
+ }
+}
+
+static void paxb_cfg_LTR(int enable)
+{
+ uint32_t val, core_idx;
+
+ for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) {
+ if (!pcie_core_needs_enable(core_idx))
+ continue;
+
+ val = paxb_rc_cfg_read(core_idx, CFG_ROOT_CAP_RC);
+ if (enable)
+ val |= CFG_ROOT_CAP_LTR_MASK;
+ else
+ val &= ~CFG_ROOT_CAP_LTR_MASK;
+ paxb_rc_cfg_write(core_idx, CFG_ROOT_CAP_RC, val);
+ }
+}
+
+static void paxb_ib_regs_bypass(void)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < paxb->num_cores; i++) {
+ if (!pcie_core_needs_enable(i))
+ continue;
+
+ /* Configure Default IMAP window */
+ mmio_write_32(PAXB_OFFSET(i) + PAXB_0_DEFAULT_IMAP,
+ DEFAULT_ADDR_INVALID);
+ mmio_write_32(PAXB_OFFSET(i) + PAXB_0_DEFAULT_IMAP_AXUSER,
+ IMAP_AXUSER);
+ mmio_write_32(PAXB_OFFSET(i) + PAXB_0_DEFAULT_IMAP_AXCACHE,
+ IMAP_AXCACHE);
+
+ /* Configure MSI IMAP window */
+ mmio_setbits_32(PAXB_OFFSET(i) +
+ PAXB_IMAP0_REGS_TYPE_OFFSET,
+ 0x1);
+ mmio_write_32(PAXB_OFFSET(i) + PAXB_IARR0_BASE_OFFSET,
+ GITS_TRANSLATER | OARR_VALID);
+ for (j = 0; j < PAXB_MAX_IMAP_WINDOWS; j++) {
+ mmio_write_32(PAXB_OFFSET(i) + PAXB_IMAP0_OFFSET(j),
+ (GITS_TRANSLATER +
+ (j * PAXB_IMAP0_WINDOW_SIZE)) |
+ IMAP_VALID);
+ }
+ }
+}
+
+static void paxb_ib_regs_init(void)
+{
+ unsigned int core_idx;
+
+ for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) {
+ if (!pcie_core_needs_enable(core_idx))
+ continue;
+
+ /* initialize IARR2 to zero */
+ mmio_write_32(PAXB_OFFSET(core_idx) + PAXB_IARR2_LOWER_OFFSET,
+ 0x0);
+ mmio_setbits_32(PAXB_OFFSET(core_idx) +
+ PAXB_IMAP0_REGS_TYPE_OFFSET,
+ 0x1);
+ }
+}
+
+static void paxb_cfg_apb_timeout(void)
+{
+ unsigned int core_idx;
+
+ for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) {
+ if (!pcie_core_needs_enable(core_idx))
+ continue;
+
+ /* allow unlimited timeout */
+ mmio_write_32(PAXB_OFFSET(core_idx) +
+ PAXB_APB_TIMEOUT_COUNT_OFFSET,
+ 0xFFFFFFFF);
+ }
+}
+
+static void paxb_smmu_cfg(void)
+{
+ unsigned int core_idx;
+ uint32_t offset;
+ uint32_t val;
+
+ for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) {
+ if (!pcie_core_needs_enable(core_idx))
+ continue;
+
+ offset = core_idx * PCIE_CORE_PWR_OFFSET;
+ val = mmio_read_32(PCIE_PAXB_SMMU_SID_CFG + offset);
+ val &= ~(0xFFF00);
+ val |= (PAXB_SMMU_SID_CFG_FUN_WIDTH |
+ PAXB_SMMU_SID_CFG_DEV_WIDTH |
+ PAXB_SMMU_SID_CFG_BUS_WIDTH);
+ mmio_write_32(PCIE_PAXB_SMMU_SID_CFG + offset, val);
+ val = mmio_read_32(PCIE_PAXB_SMMU_SID_CFG + offset);
+ VERBOSE("smmu cfg reg 0x%x\n", val);
+ }
+}
+
+static void paxb_cfg_coherency(void)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < paxb->num_cores; i++) {
+ if (!pcie_core_needs_enable(i))
+ continue;
+
+#ifdef USE_DDR
+ mmio_write_32(PAXB_OFFSET(i) + PAXB_IMAP2_OFFSET,
+ IMAP_ARCACHE | IMAP_AWCACHE);
+#endif
+
+ mmio_write_32(PAXB_OFFSET(i) + PAXB_IMAP0_0_AXUSER_OFFSET,
+ IMAP_AXUSER);
+
+ mmio_write_32(PAXB_OFFSET(i) + PAXB_IMAP2_AXUSER_OFFSET,
+ IMAP_AXUSER);
+
+ for (j = 0; j < PAXB_MAX_IMAP_WINDOWS; j++) {
+#ifdef USE_DDR
+ mmio_write_32(PAXB_OFFSET(i) + PAXB_IMAP3_OFFSET(j),
+ IMAP_ARCACHE | IMAP_AWCACHE);
+ mmio_write_32(PAXB_OFFSET(i) + PAXB_IMAP4_OFFSET(j),
+ IMAP_ARCACHE | IMAP_AWCACHE);
+#endif
+ /* zero out IMAP0 mapping windows for MSI/MSI-X */
+ mmio_write_32(PAXB_OFFSET(i) + PAXB_IMAP0_OFFSET(j),
+ 0x0);
+
+ mmio_write_32(PAXB_OFFSET(i) +
+ PAXB_IMAP3_0_AXUSER_OFFSET(j),
+ IMAP_AXUSER);
+ mmio_write_32(PAXB_OFFSET(i) +
+ PAXB_IMAP4_0_AXUSER_OFFSET(j),
+ IMAP_AXUSER);
+ }
+ }
+}
+
+/*
+ * This function configures all PAXB related blocks to allow non-secure access
+ */
+void paxb_ns_init(enum paxb_type type)
+{
+ unsigned int reg;
+
+ switch (type) {
+ case PAXB_SR:
+ for (reg = 0; reg < ARRAY_SIZE(paxb_sec_reg_offset); reg++) {
+
+ mmio_setbits_32(SR_PCIE_NIC_SECURITY_BASE +
+ paxb_sec_reg_offset[reg], 0x1);
+ }
+ /* Enabled all PAXB's relevant IDM blocks access in non-secure mode */
+ mmio_setbits_32(SR_PCIE_NIC_SECURITY_BASE + PAXB_SECURITY_IDM_OFFSET,
+ 0xffff);
+ break;
+ case PAXB_NS3Z:
+ mmio_setbits_32(NS3Z_PCIE_NIC_SECURITY_BASE +
+ paxb_sec_reg_offset[0], 0x1);
+ mmio_setbits_32(NS3Z_PCIE_NIC_SECURITY_BASE +
+ PAXB_SECURITY_IDM_OFFSET, 0xffff);
+ mmio_setbits_32(NS3Z_PCIE_NIC_SECURITY_BASE +
+ PAXB_SECURITY_APB_OFFSET, 0x7);
+ mmio_setbits_32(NS3Z_PCIE_NIC_SECURITY_BASE +
+ PAXB_SECURITY_ECAM_OFFSET, 0x1);
+ break;
+ }
+}
+
+static int paxb_set_config(void)
+{
+ paxb = paxb_get_config(sr);
+ if (paxb)
+ return 0;
+
+ return -ENODEV;
+}
+
+void paxb_init(void)
+{
+ int ret;
+
+ ret = paxb_set_config();
+ if (ret)
+ return;
+
+ paxb_ns_init(paxb->type);
+
+ ret = pcie_cores_init();
+ if (ret)
+ return;
+
+ if (paxb->phy_init) {
+ ret = paxb->phy_init();
+ if (ret)
+ return;
+ }
+
+ paxb_cfg_dev_id();
+ paxb_cfg_tgt_trn();
+ paxb_cfg_pdl_ctrl();
+ if (paxb->type == PAXB_SR) {
+ paxb_ib_regs_init();
+ paxb_cfg_coherency();
+ } else
+ paxb_ib_regs_bypass();
+
+ paxb_cfg_apb_timeout();
+ paxb_smmu_cfg();
+ paxb_cfg_clkreq();
+ paxb_rc_link_init();
+
+ /* Stingray Doesn't support LTR */
+ paxb_cfg_LTR(false);
+ paxb_cfg_dl_active(true);
+
+ paxb_cfg_mps();
+
+#ifdef PAXB_LINKUP
+ paxb_start_link_up();
+#endif
+ INFO("PAXB init done\n");
+}
diff --git a/plat/brcm/board/stingray/src/paxc.c b/plat/brcm/board/stingray/src/paxc.c
new file mode 100644
index 0000000..44af4b0
--- /dev/null
+++ b/plat/brcm/board/stingray/src/paxc.c
@@ -0,0 +1,267 @@
+/*
+ * Copyright (c) 2017 - 2020, Broadcom
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <common/debug.h>
+#include <lib/mmio.h>
+
+#include <iommu.h>
+#include <platform_def.h>
+#include <sr_utils.h>
+
+#define PAXC_BASE 0x60400000
+#define PAXC_AXI_CFG_PF 0x10
+#define PAXC_AXI_CFG_PF_OFFSET(pf) (PAXC_AXI_CFG_PF + (pf) * 4)
+#define PAXC_ARPROT_PF_CFG 0x40
+#define PAXC_AWPROT_PF_CFG 0x44
+
+#define PAXC_ARQOS_PF_CFG 0x48
+#define PAXC_ARQOS_VAL 0xaaaaaaaa
+
+#define PAXC_AWQOS_PF_CFG 0x4c
+#define PAXC_AWQOS_VAL 0xeeeeeeee
+
+#define PAXC_CFG_IND_ADDR_OFFSET 0x1f0
+#define PAXC_CFG_IND_ADDR_MASK 0xffc
+#define PAXC_CFG_IND_DATA_OFFSET 0x1f4
+
+/* offsets for PAXC root complex configuration space registers */
+
+#define PAXC_CFG_ID_OFFSET 0x434
+#define PAXC_RC_VENDOR_ID 0x14e4
+#define PAXC_RC_VENDOR_ID_SHIFT 16
+
+#define PAXC_RC_DEVICE_ID 0xd750
+
+#define PAXC_CFG_LINK_CAP_OFFSET 0x4dc
+#define PAXC_RC_LINK_CAP_SPD_SHIFT 0
+#define PAXC_RC_LINK_CAP_SPD_MASK (0xf << PAXC_RC_LINK_CAP_SPD_SHIFT)
+#define PAXC_RC_LINK_CAP_SPD 3
+#define PAXC_RC_LINK_CAP_WIDTH_SHIFT 4
+#define PAXC_RC_LINK_CAP_WIDTH_MASK (0x1f << PAXC_RC_LINK_CAP_WIDTH_SHIFT)
+#define PAXC_RC_LINK_CAP_WIDTH 16
+
+/* offsets for MHB registers */
+
+#define MHB_BASE 0x60401000
+#define MHB_MEM_PWR_STATUS_PAXC (MHB_BASE + 0x1c0)
+#define MHB_PWR_ARR_POWERON 0x8
+#define MHB_PWR_ARR_POWEROK 0x4
+#define MHB_PWR_POWERON 0x2
+#define MHB_PWR_POWEROK 0x1
+#define MHB_PWR_STATUS_MASK (MHB_PWR_ARR_POWERON | \
+ MHB_PWR_ARR_POWEROK | \
+ MHB_PWR_POWERON | \
+ MHB_PWR_POWEROK)
+
+/* max number of PFs from Nitro that PAXC sees */
+#define MAX_NR_NITRO_PF 8
+
+#ifdef EMULATION_SETUP
+static void paxc_reg_dump(void)
+{
+}
+#else
+/* total number of PAXC registers */
+#define NR_PAXC_REGS 53
+static void paxc_reg_dump(void)
+{
+ uint32_t idx, offset = 0;
+
+ VERBOSE("PAXC register dump start\n");
+ for (idx = 0; idx < NR_PAXC_REGS; idx++, offset += 4)
+ VERBOSE("offset: 0x%x val: 0x%x\n", offset,
+ mmio_read_32(PAXC_BASE + offset));
+ VERBOSE("PAXC register dump end\n");
+}
+#endif /* EMULATION_SETUP */
+
+#ifdef EMULATION_SETUP
+static void mhb_reg_dump(void)
+{
+}
+#else
+#define NR_MHB_REGS 227
+static void mhb_reg_dump(void)
+{
+ uint32_t idx, offset = 0;
+
+ VERBOSE("MHB register dump start\n");
+ for (idx = 0; idx < NR_MHB_REGS; idx++, offset += 4)
+ VERBOSE("offset: 0x%x val: 0x%x\n", offset,
+ mmio_read_32(MHB_BASE + offset));
+ VERBOSE("MHB register dump end\n");
+}
+#endif /* EMULATION_SETUP */
+
+static void paxc_rc_cfg_write(uint32_t where, uint32_t val)
+{
+ mmio_write_32(PAXC_BASE + PAXC_CFG_IND_ADDR_OFFSET,
+ where & PAXC_CFG_IND_ADDR_MASK);
+ mmio_write_32(PAXC_BASE + PAXC_CFG_IND_DATA_OFFSET, val);
+}
+
+static uint32_t paxc_rc_cfg_read(uint32_t where)
+{
+ mmio_write_32(PAXC_BASE + PAXC_CFG_IND_ADDR_OFFSET,
+ where & PAXC_CFG_IND_ADDR_MASK);
+ return mmio_read_32(PAXC_BASE + PAXC_CFG_IND_DATA_OFFSET);
+}
+
+/*
+ * Function to program PAXC root complex link capability register
+ */
+static void paxc_cfg_link_cap(void)
+{
+ uint32_t val;
+
+ val = paxc_rc_cfg_read(PAXC_CFG_LINK_CAP_OFFSET);
+ val &= ~(PAXC_RC_LINK_CAP_SPD_MASK | PAXC_RC_LINK_CAP_WIDTH_MASK);
+ val |= (PAXC_RC_LINK_CAP_SPD << PAXC_RC_LINK_CAP_SPD_SHIFT) |
+ (PAXC_RC_LINK_CAP_WIDTH << PAXC_RC_LINK_CAP_WIDTH_SHIFT);
+ paxc_rc_cfg_write(PAXC_CFG_LINK_CAP_OFFSET, val);
+}
+
+/*
+ * Function to program PAXC root complex vendor ID and device ID
+ */
+static void paxc_cfg_id(void)
+{
+ uint32_t val;
+
+ val = (PAXC_RC_VENDOR_ID << PAXC_RC_VENDOR_ID_SHIFT) |
+ PAXC_RC_DEVICE_ID;
+ paxc_rc_cfg_write(PAXC_CFG_ID_OFFSET, val);
+}
+
+void paxc_init(void)
+{
+ unsigned int pf_index;
+ unsigned int val;
+
+ val = mmio_read_32(MHB_MEM_PWR_STATUS_PAXC);
+ if ((val & MHB_PWR_STATUS_MASK) != MHB_PWR_STATUS_MASK) {
+ INFO("PAXC not powered\n");
+ return;
+ }
+
+ paxc_cfg_id();
+ paxc_cfg_link_cap();
+
+ paxc_reg_dump();
+ mhb_reg_dump();
+
+#ifdef USE_DDR
+ /*
+ * Set AWCACHE and ARCACHE to 0xff (Cacheable write-back,
+ * allocate on both reads and writes) per
+ * recommendation from the ASIC team
+ */
+ val = 0xff;
+#else
+ /* disable IO cache if non-DDR memory is used, e.g., external SRAM */
+ val = 0x0;
+#endif
+ for (pf_index = 0; pf_index < MAX_NR_NITRO_PF; pf_index++)
+ mmio_write_32(PAXC_BASE + PAXC_AXI_CFG_PF_OFFSET(pf_index),
+ val);
+
+ /*
+ * Set ARPROT and AWPROT to enable non-secure access from
+ * PAXC to all PFs, PF0 to PF7
+ */
+ mmio_write_32(PAXC_BASE + PAXC_ARPROT_PF_CFG, 0x22222222);
+ mmio_write_32(PAXC_BASE + PAXC_AWPROT_PF_CFG, 0x22222222);
+
+ mmio_write_32(PAXC_BASE + PAXC_ARQOS_PF_CFG, PAXC_ARQOS_VAL);
+ mmio_write_32(PAXC_BASE + PAXC_AWQOS_PF_CFG, PAXC_AWQOS_VAL);
+
+ INFO("PAXC init done\n");
+}
+
+/*
+ * These defines do not match the regfile but they are renamed in a way such
+ * that they are much more readible
+ */
+
+#define MHB_NIC_SECURITY_BASE 0x60500000
+#define MHB_NIC_PAXC_AXI_NS 0x0008
+#define MHB_NIC_IDM_NS 0x000c
+#define MHB_NIC_MHB_APB_NS 0x0010
+#define MHB_NIC_NITRO_AXI_NS 0x0014
+#define MHB_NIC_PCIE_AXI_NS 0x0018
+#define MHB_NIC_PAXC_APB_NS 0x001c
+#define MHB_NIC_EP_APB_NS 0x0020
+
+#define MHB_NIC_PAXC_APB_S_IDM_SHIFT 5
+#define MHB_NIC_EP_APB_S_IDM_SHIFT 4
+#define MHB_NIC_MHB_APB_S_IDM_SHIFT 3
+#define MHB_NIC_PAXC_AXI_S_IDM_SHIFT 2
+#define MHB_NIC_PCIE_AXI_S_IDM_SHIFT 1
+#define MHB_NIC_NITRO_AXI_S_IDM_SHIFT 0
+
+#define NIC400_NITRO_TOP_NIC_SECURITY_BASE 0x60d00000
+
+#define NITRO_NIC_SECURITY_3_SHIFT 0x14
+#define NITRO_NIC_SECURITY_4_SHIFT 0x18
+#define NITRO_NIC_SECURITY_5_SHIFT 0x1c
+#define NITRO_NIC_SECURITY_6_SHIFT 0x20
+
+void paxc_mhb_ns_init(void)
+{
+ unsigned int val;
+ uintptr_t mhb_nic_gpv = MHB_NIC_SECURITY_BASE;
+#ifndef NITRO_SECURE_ACCESS
+ uintptr_t nic400_nitro_gpv = NIC400_NITRO_TOP_NIC_SECURITY_BASE;
+#endif /* NITRO_SECURE_ACCESS */
+
+ /* set PAXC AXI to allow non-secure access */
+ val = mmio_read_32(mhb_nic_gpv + MHB_NIC_PAXC_AXI_NS);
+ val |= 0x1;
+ mmio_write_32(mhb_nic_gpv + MHB_NIC_PAXC_AXI_NS, val);
+
+ /* set various MHB IDM interfaces to allow non-secure access */
+ val = mmio_read_32(mhb_nic_gpv + MHB_NIC_IDM_NS);
+ val |= (0x1 << MHB_NIC_PAXC_APB_S_IDM_SHIFT);
+ val |= (0x1 << MHB_NIC_EP_APB_S_IDM_SHIFT);
+ val |= (0x1 << MHB_NIC_MHB_APB_S_IDM_SHIFT);
+ val |= (0x1 << MHB_NIC_PAXC_AXI_S_IDM_SHIFT);
+ val |= (0x1 << MHB_NIC_PCIE_AXI_S_IDM_SHIFT);
+ val |= (0x1 << MHB_NIC_NITRO_AXI_S_IDM_SHIFT);
+ mmio_write_32(mhb_nic_gpv + MHB_NIC_IDM_NS, val);
+
+ /* set MHB APB to allow non-secure access */
+ val = mmio_read_32(mhb_nic_gpv + MHB_NIC_MHB_APB_NS);
+ val |= 0x1;
+ mmio_write_32(mhb_nic_gpv + MHB_NIC_MHB_APB_NS, val);
+
+ /* set Nitro AXI to allow non-secure access */
+ val = mmio_read_32(mhb_nic_gpv + MHB_NIC_NITRO_AXI_NS);
+ val |= 0x1;
+ mmio_write_32(mhb_nic_gpv + MHB_NIC_NITRO_AXI_NS, val);
+
+ /* set PCIe AXI to allow non-secure access */
+ val = mmio_read_32(mhb_nic_gpv + MHB_NIC_PCIE_AXI_NS);
+ val |= 0x1;
+ mmio_write_32(mhb_nic_gpv + MHB_NIC_PCIE_AXI_NS, val);
+
+ /* set PAXC APB to allow non-secure access */
+ val = mmio_read_32(mhb_nic_gpv + MHB_NIC_PAXC_APB_NS);
+ val |= 0x1;
+ mmio_write_32(mhb_nic_gpv + MHB_NIC_PAXC_APB_NS, val);
+
+ /* set EP APB to allow non-secure access */
+ val = mmio_read_32(mhb_nic_gpv + MHB_NIC_EP_APB_NS);
+ val |= 0x1;
+ mmio_write_32(mhb_nic_gpv + MHB_NIC_EP_APB_NS, val);
+
+#ifndef NITRO_SECURE_ACCESS
+ /* Set NIC400 to allow non-secure access */
+ mmio_setbits_32(nic400_nitro_gpv + NITRO_NIC_SECURITY_3_SHIFT, 0x1);
+ mmio_setbits_32(nic400_nitro_gpv + NITRO_NIC_SECURITY_4_SHIFT, 0x1);
+ mmio_setbits_32(nic400_nitro_gpv + NITRO_NIC_SECURITY_5_SHIFT, 0x1);
+ mmio_setbits_32(nic400_nitro_gpv + NITRO_NIC_SECURITY_6_SHIFT, 0x1);
+#endif /* NITRO_SECURE_ACCESS */
+}
diff --git a/plat/brcm/board/stingray/src/pm.c b/plat/brcm/board/stingray/src/pm.c
new file mode 100644
index 0000000..a5ac2e7
--- /dev/null
+++ b/plat/brcm/board/stingray/src/pm.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2015 - 2020, Broadcom
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <drivers/arm/ccn.h>
+#include <drivers/delay_timer.h>
+#include <lib/bakery_lock.h>
+#include <lib/mmio.h>
+#include <lib/psci/psci.h>
+#include <lib/spinlock.h>
+#include <plat/common/platform.h>
+
+#ifdef USE_PAXC
+#include <chimp.h>
+#endif
+#include <cmn_plat_util.h>
+#include <ihost_pm.h>
+#include <plat_brcm.h>
+#include <platform_def.h>
+
+static uint64_t plat_sec_entrypoint;
+
+/*******************************************************************************
+ * SR handler called when a power domain is about to be turned on. The
+ * mpidr determines the CPU to be turned on.
+ ******************************************************************************/
+static int brcm_pwr_domain_on(u_register_t mpidr)
+{
+ int cpuid;
+
+ cpuid = plat_brcm_calc_core_pos(mpidr);
+ INFO("mpidr :%lu, cpuid:%d\n", mpidr, cpuid);
+
+#ifdef USE_SINGLE_CLUSTER
+ if (cpuid > 1)
+ return PSCI_E_INTERN_FAIL;
+#endif
+
+ ihost_power_on_cluster(mpidr);
+
+ ihost_power_on_secondary_core(mpidr, plat_sec_entrypoint);
+
+ return PSCI_E_SUCCESS;
+}
+
+/*******************************************************************************
+ * SR handler called when a power domain has just been powered on after
+ * being turned off earlier. The target_state encodes the low power state that
+ * each level has woken up from.
+ ******************************************************************************/
+static void brcm_pwr_domain_on_finish(const psci_power_state_t *target_state)
+{
+ unsigned long cluster_id = MPIDR_AFFLVL1_VAL(read_mpidr());
+
+ assert(target_state->pwr_domain_state[MPIDR_AFFLVL0] ==
+ PLAT_LOCAL_STATE_OFF);
+
+ if (target_state->pwr_domain_state[MPIDR_AFFLVL1] ==
+ PLAT_LOCAL_STATE_OFF) {
+ INFO("Cluster #%lu entering to snoop/dvm domain\n", cluster_id);
+ ccn_enter_snoop_dvm_domain(1 << cluster_id);
+ }
+
+ /* Enable the gic cpu interface */
+ plat_brcm_gic_pcpu_init();
+
+ /* Program the gic per-cpu distributor or re-distributor interface */
+ plat_brcm_gic_cpuif_enable();
+
+ INFO("Gic Initialization done for this affinity instance\n");
+}
+
+static void __dead2 brcm_system_reset(void)
+{
+ uint32_t reset_type = SOFT_SYS_RESET_L1;
+
+#ifdef USE_PAXC
+ if (bcm_chimp_is_nic_mode())
+ reset_type = SOFT_RESET_L3;
+#endif
+ INFO("System rebooting - L%d...\n", reset_type);
+
+ plat_soft_reset(reset_type);
+
+ /* Prevent the function to return due to the attribute */
+ while (1)
+ ;
+}
+
+static int brcm_system_reset2(int is_vendor, int reset_type,
+ u_register_t cookie)
+{
+ INFO("System rebooting - L%d...\n", reset_type);
+
+ plat_soft_reset(reset_type);
+
+ /*
+ * plat_soft_reset cannot return (it is a __dead function),
+ * but brcm_system_reset2 has to return some value, even in
+ * this case.
+ */
+ return 0;
+}
+
+/*******************************************************************************
+ * Export the platform handlers via plat_brcm_psci_pm_ops. The ARM Standard
+ * platform will take care of registering the handlers with PSCI.
+ ******************************************************************************/
+const plat_psci_ops_t plat_brcm_psci_pm_ops = {
+ .pwr_domain_on = brcm_pwr_domain_on,
+ .pwr_domain_on_finish = brcm_pwr_domain_on_finish,
+ .system_reset = brcm_system_reset,
+ .system_reset2 = brcm_system_reset2
+};
+
+int plat_setup_psci_ops(uintptr_t sec_entrypoint,
+ const plat_psci_ops_t **psci_ops)
+{
+ *psci_ops = &plat_brcm_psci_pm_ops;
+ plat_sec_entrypoint = sec_entrypoint;
+
+ return 0;
+}
diff --git a/plat/brcm/board/stingray/src/scp_cmd.c b/plat/brcm/board/stingray/src/scp_cmd.c
new file mode 100644
index 0000000..2aa9519
--- /dev/null
+++ b/plat/brcm/board/stingray/src/scp_cmd.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2017-2020, Broadcom
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <common/bl_common.h>
+#include <drivers/delay_timer.h>
+
+#include <platform_def.h>
+#include <scp.h>
+#include <scp_cmd.h>
+
+#include "m0_ipc.h"
+
+/*
+ * Reads a response from CRMU MAILBOX
+ * Assumes that access has been granted and locked.
+ * Note that this is just a temporary implementation until
+ * channels are introduced
+ */
+static void scp_read_response(crmu_response_t *resp)
+{
+ uint32_t code;
+
+ code = mmio_read_32(CRMU_MAIL_BOX0);
+ resp->completed = code & MCU_IPC_CMD_DONE_MASK;
+ resp->cmd = code & SCP_CMD_MASK;
+ resp->ret = (code & MCU_IPC_CMD_REPLY_MASK) >> MCU_IPC_CMD_REPLY_SHIFT;
+}
+
+/*
+ * Send a command to SCP and wait for timeout us.
+ * Return: 0 on success
+ * -1 if there was no proper reply from SCP
+ * >0 if there was a response from MCU, but
+ * command completed with an error.
+ */
+int scp_send_cmd(uint32_t cmd, uint32_t param, uint32_t timeout)
+{
+ int ret = -1;
+
+ mmio_write_32(CRMU_MAIL_BOX0, cmd);
+ mmio_write_32(CRMU_MAIL_BOX1, param);
+ do {
+ crmu_response_t scp_resp;
+
+ udelay(1);
+ scp_read_response(&scp_resp);
+ if (scp_resp.completed &&
+ (scp_resp.cmd == cmd)) {
+ /* This command has completed */
+ ret = scp_resp.ret;
+ break;
+ }
+ } while (--timeout);
+
+ return ret;
+}
diff --git a/plat/brcm/board/stingray/src/scp_utils.c b/plat/brcm/board/stingray/src/scp_utils.c
new file mode 100644
index 0000000..1d82cef
--- /dev/null
+++ b/plat/brcm/board/stingray/src/scp_utils.c
@@ -0,0 +1,227 @@
+/*
+ * Copyright (c) 2017-2020, Broadcom
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <string.h>
+
+#include <arch_helpers.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <drivers/delay_timer.h>
+
+#include <bcm_elog_ddr.h>
+#include <brcm_mhu.h>
+#include <brcm_scpi.h>
+#include <chimp.h>
+#include <cmn_plat_util.h>
+#include <ddr_init.h>
+#include <scp.h>
+#include <scp_cmd.h>
+#include <scp_utils.h>
+
+#include "m0_cfg.h"
+#include "m0_ipc.h"
+
+#ifdef BCM_ELOG
+static void prepare_elog(void)
+{
+#if (CLEAN_DDR && !defined(MMU_DISABLED))
+ /*
+ * Now DDR has been initialized. We want to copy all the logs in SRAM
+ * into DDR so we will have much more space to store the logs in the
+ * next boot stage
+ */
+ bcm_elog_copy_log((void *)BCM_ELOG_BL31_BASE,
+ MIN(BCM_ELOG_BL2_SIZE, BCM_ELOG_BL31_SIZE)
+ );
+
+ /*
+ * We are almost at the end of BL2, and we can stop log here so we do
+ * not need to add 'bcm_elog_exit' to the standard BL2 code. The
+ * benefit of capturing BL2 logs after this is very minimal in a
+ * production system.
+ */
+ bcm_elog_exit();
+#endif
+
+ /*
+ * Notify CRMU that now it should pull logs from DDR instead of from
+ * FS4 SRAM.
+ */
+ SCP_WRITE_CFG(flash_log.can_use_ddr, 1);
+}
+#endif
+
+bool is_crmu_alive(void)
+{
+ return (scp_send_cmd(MCU_IPC_MCU_CMD_NOP, 0, SCP_CMD_DEFAULT_TIMEOUT_US)
+ == 0);
+}
+
+bool bcm_scp_issue_sys_reset(void)
+{
+ return (scp_send_cmd(MCU_IPC_MCU_CMD_L1_RESET, 0,
+ SCP_CMD_DEFAULT_TIMEOUT_US));
+}
+
+/*
+ * Note that this is just a temporary implementation until
+ * channels are introduced
+ */
+
+int plat_bcm_bl2_plat_handle_scp_bl2(image_info_t *scp_bl2_image_info)
+{
+ int scp_patch_activated, scp_patch_version;
+#ifndef EMULATION_SETUP
+ uint8_t active_ch_bitmap, i;
+#endif
+ uint32_t reset_state = 0;
+ uint32_t mcu_ap_init_param = 0;
+
+ /*
+ * First check if SCP patch has already been loaded
+ * Send NOP command and see if there is a valid response
+ */
+ scp_patch_activated =
+ (scp_send_cmd(MCU_IPC_MCU_CMD_NOP, 0,
+ SCP_CMD_DEFAULT_TIMEOUT_US) == 0);
+ if (scp_patch_activated) {
+ INFO("SCP Patch is already active.\n");
+
+ reset_state = SCP_READ_CFG(board_cfg.reset_state);
+ mcu_ap_init_param = SCP_READ_CFG(board_cfg.mcu_init_param);
+
+ /* Clear reset state, it's been already read */
+ SCP_WRITE_CFG(board_cfg.reset_state, 0);
+
+ if (mcu_ap_init_param & MCU_PATCH_LOADED_BY_NITRO) {
+ /*
+ * Reset "MCU_PATCH_LOADED_BY_NITRO" flag, but
+ * Preserve any other flags we don't deal with here
+ */
+ INFO("AP booted by Nitro\n");
+ SCP_WRITE_CFG(
+ board_cfg.mcu_init_param,
+ mcu_ap_init_param &
+ ~MCU_PATCH_LOADED_BY_NITRO
+ );
+ }
+ } else {
+ /*
+ * MCU Patch not loaded, so load it.
+ * MCU patch stamps critical points in REG9 (debug test-point)
+ * Display its last content here. This helps to locate
+ * where crash occurred if a CRMU watchdog kicked in.
+ */
+ int ret;
+
+ INFO("MCU Patch Point: 0x%x\n",
+ mmio_read_32(CRMU_IHOST_SW_PERSISTENT_REG9));
+
+ ret = download_scp_patch((void *)scp_bl2_image_info->image_base,
+ scp_bl2_image_info->image_size);
+ if (ret != 0)
+ return ret;
+
+ VERBOSE("SCP Patch loaded OK.\n");
+
+ ret = scp_send_cmd(MCU_IPC_MCU_CMD_INIT,
+ MCU_PATCH_LOADED_BY_AP,
+ SCP_CMD_SCP_BOOT_TIMEOUT_US);
+ if (ret) {
+ ERROR("SCP Patch could not initialize; error %d\n",
+ ret);
+ return ret;
+ }
+
+ INFO("SCP Patch successfully initialized.\n");
+ }
+
+ scp_patch_version = scp_send_cmd(MCU_IPC_MCU_CMD_GET_FW_VERSION, 0,
+ SCP_CMD_DEFAULT_TIMEOUT_US);
+ INFO("SCP Patch version :0x%x\n", scp_patch_version);
+
+ /* Next block just reports current AVS voltages (if applicable) */
+ {
+ uint16_t vcore_mv, ihost03_mv, ihost12_mv;
+
+ vcore_mv = SCP_READ_CFG16(vcore.millivolts) +
+ SCP_READ_CFG8(vcore.avs_cfg.additive_margin);
+ ihost03_mv = SCP_READ_CFG16(ihost03.millivolts) +
+ SCP_READ_CFG8(ihost03.avs_cfg.additive_margin);
+ ihost12_mv = SCP_READ_CFG16(ihost12.millivolts) +
+ SCP_READ_CFG8(ihost12.avs_cfg.additive_margin);
+
+ if (vcore_mv || ihost03_mv || ihost12_mv) {
+ INFO("AVS voltages from cfg (including margin)\n");
+ if (vcore_mv > 0)
+ INFO("%s\tVCORE: %dmv\n",
+ SCP_READ_CFG8(vcore.avs_cfg.avs_set) ?
+ "*" : "n/a", vcore_mv);
+ if (ihost03_mv > 0)
+ INFO("%s\tIHOST03: %dmv\n",
+ SCP_READ_CFG8(ihost03.avs_cfg.avs_set) ?
+ "*" : "n/a", ihost03_mv);
+ if (ihost12_mv > 0)
+ INFO("%s\tIHOST12: %dmv\n",
+ SCP_READ_CFG8(ihost12.avs_cfg.avs_set) ?
+ "*" : "n/a", ihost12_mv);
+ } else {
+ INFO("AVS settings not applicable\n");
+ }
+ }
+
+#if (CLEAN_DDR && !defined(MMU_DISABLED) && !defined(EMULATION_SETUP))
+ /* This will clean the DDR and enable ECC if set */
+ check_ddr_clean();
+#endif
+
+#if (WARMBOOT_DDR_S3_SUPPORT && ELOG_STORE_MEDIA_DDR)
+ elog_init_ddr_log();
+#endif
+
+#ifdef BCM_ELOG
+ /* Prepare ELOG to use DDR */
+ prepare_elog();
+#endif
+
+#ifndef EMULATION_SETUP
+ /* Ask ddr_init to save obtained DDR information into DDR */
+ ddr_info_save();
+#endif
+
+ /*
+ * Configure TMON DDR address.
+ * This cfg is common for all cases
+ */
+ SCP_WRITE_CFG(tmon_cfg.ddr_desc, TMON_SHARED_DDR_ADDRESS);
+
+ if (reset_state == SOFT_RESET_L3 && !mcu_ap_init_param) {
+ INFO("SCP configuration after L3 RESET done.\n");
+ return 0;
+ }
+
+ if (bcm_chimp_is_nic_mode())
+ /* Configure AP WDT to not reset the NIC interface */
+ SCP_WRITE_CFG(board_cfg.apwdt_reset_type, SOFT_RESET_L3);
+
+#if (WARMBOOT_DDR_S3_SUPPORT && ELOG_STORE_MEDIA_DDR)
+ /* When AP WDog triggers perform L3 reset if DDR err logging enabled */
+ SCP_WRITE_CFG(board_cfg.apwdt_reset_type, SOFT_RESET_L3);
+#endif
+
+#ifndef EMULATION_SETUP
+
+#ifdef DDR_SCRUB_ENA
+ ddr_scrub_enable();
+#endif
+ /* Fill the Active channel information */
+ active_ch_bitmap = get_active_ddr_channel();
+ for (i = 0; i < MAX_NR_DDR_CH; i++)
+ SCP_WRITE_CFG(ddr_cfg.ddr_cfg[i],
+ (active_ch_bitmap & BIT(i)) ? 1 : 0);
+#endif
+ return 0;
+}
diff --git a/plat/brcm/board/stingray/src/sdio.c b/plat/brcm/board/stingray/src/sdio.c
new file mode 100644
index 0000000..aa2b71a
--- /dev/null
+++ b/plat/brcm/board/stingray/src/sdio.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2019-2020, Broadcom
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <errno.h>
+#include <stdbool.h>
+
+#include <common/debug.h>
+#include <drivers/delay_timer.h>
+#include <lib/mmio.h>
+
+#include <sdio.h>
+#include <sr_def.h>
+#include <sr_utils.h>
+
+const SDIO_CFG sr_sdio0_cfg = {
+ .cfg_base = SR_IPROC_SDIO0_CFG_BASE,
+ .sid_base = SR_IPROC_SDIO0_SID_BASE,
+ .io_ctrl_base = SR_IPROC_SDIO0_IOCTRL_BASE,
+ .pad_base = SR_IPROC_SDIO0_PAD_BASE,
+};
+const SDIO_CFG sr_sdio1_cfg = {
+ .cfg_base = SR_IPROC_SDIO1_CFG_BASE,
+ .sid_base = SR_IPROC_SDIO1_SID_BASE,
+ .io_ctrl_base = SR_IPROC_SDIO1_IOCTRL_BASE,
+ .pad_base = SR_IPROC_SDIO1_PAD_BASE,
+};
+
+void brcm_stingray_sdio_init(void)
+{
+ unsigned int val;
+ const SDIO_CFG *sdio0_cfg, *sdio1_cfg;
+
+ sdio0_cfg = &sr_sdio0_cfg;
+ sdio1_cfg = &sr_sdio1_cfg;
+
+ INFO("set sdio0 caps\n");
+ /* SDIO0 CAPS0 */
+ val = SDIO0_CAP0_CFG;
+ INFO("caps0 0x%x\n", val);
+ mmio_write_32(sdio0_cfg->cfg_base + ICFG_SDIO_CAP0, val);
+
+ /* SDIO0 CAPS1 */
+ val = SDIO0_CAP1_CFG;
+ INFO("caps1 0x%x\n", val);
+ mmio_write_32(sdio0_cfg->cfg_base + ICFG_SDIO_CAP1, val);
+
+ mmio_write_32(sdio0_cfg->cfg_base + ICFG_SDIO_STRAPSTATUS_0,
+ SDIO_PRESETVAL0);
+ mmio_write_32(sdio0_cfg->cfg_base + ICFG_SDIO_STRAPSTATUS_1,
+ SDIO_PRESETVAL1);
+ mmio_write_32(sdio0_cfg->cfg_base + ICFG_SDIO_STRAPSTATUS_2,
+ SDIO_PRESETVAL2);
+ mmio_write_32(sdio0_cfg->cfg_base + ICFG_SDIO_STRAPSTATUS_3,
+ SDIO_PRESETVAL3);
+ mmio_write_32(sdio0_cfg->cfg_base + ICFG_SDIO_STRAPSTATUS_4,
+ SDIO_PRESETVAL4);
+
+ val = SR_SID_VAL(0x3, 0x0, 0x2) << SDIO_SID_SHIFT;
+ mmio_write_32(sdio0_cfg->sid_base + ICFG_SDIO_SID_ARADDR, val);
+ mmio_write_32(sdio0_cfg->sid_base + ICFG_SDIO_SID_AWADDR, val);
+
+ val = mmio_read_32(sdio0_cfg->io_ctrl_base);
+ val &= ~(0xff << 23); /* Clear ARCACHE and AWCACHE */
+ val |= (0xb7 << 23); /* Set ARCACHE and AWCACHE */
+ mmio_write_32(sdio0_cfg->io_ctrl_base, val);
+
+ mmio_clrsetbits_32(sdio0_cfg->pad_base + PAD_SDIO_CLK,
+ PAD_SDIO_MASK, PAD_SDIO_VALUE);
+ mmio_clrsetbits_32(sdio0_cfg->pad_base + PAD_SDIO_DATA0,
+ PAD_SDIO_MASK, PAD_SDIO_VALUE);
+ mmio_clrsetbits_32(sdio0_cfg->pad_base + PAD_SDIO_DATA1,
+ PAD_SDIO_MASK, PAD_SDIO_VALUE);
+ mmio_clrsetbits_32(sdio0_cfg->pad_base + PAD_SDIO_DATA2,
+ PAD_SDIO_MASK, PAD_SDIO_VALUE);
+ mmio_clrsetbits_32(sdio0_cfg->pad_base + PAD_SDIO_DATA3,
+ PAD_SDIO_MASK, PAD_SDIO_VALUE);
+ mmio_clrsetbits_32(sdio0_cfg->pad_base + PAD_SDIO_DATA4,
+ PAD_SDIO_MASK, PAD_SDIO_VALUE);
+ mmio_clrsetbits_32(sdio0_cfg->pad_base + PAD_SDIO_DATA5,
+ PAD_SDIO_MASK, PAD_SDIO_VALUE);
+ mmio_clrsetbits_32(sdio0_cfg->pad_base + PAD_SDIO_DATA6,
+ PAD_SDIO_MASK, PAD_SDIO_VALUE);
+ mmio_clrsetbits_32(sdio0_cfg->pad_base + PAD_SDIO_DATA7,
+ PAD_SDIO_MASK, PAD_SDIO_VALUE);
+ mmio_clrsetbits_32(sdio0_cfg->pad_base + PAD_SDIO_CMD,
+ PAD_SDIO_MASK, PAD_SDIO_VALUE);
+
+ INFO("set sdio1 caps\n");
+
+ /* SDIO1 CAPS0 */
+ val = SDIO1_CAP0_CFG;
+ INFO("caps0 0x%x\n", val);
+ mmio_write_32(sdio1_cfg->cfg_base + ICFG_SDIO_CAP0, val);
+ /* SDIO1 CAPS1 */
+ val = SDIO1_CAP1_CFG;
+ INFO("caps1 0x%x\n", val);
+ mmio_write_32(sdio1_cfg->cfg_base + ICFG_SDIO_CAP1, val);
+
+ mmio_write_32(sdio1_cfg->cfg_base + ICFG_SDIO_STRAPSTATUS_0,
+ SDIO_PRESETVAL0);
+ mmio_write_32(sdio1_cfg->cfg_base + ICFG_SDIO_STRAPSTATUS_1,
+ SDIO_PRESETVAL1);
+ mmio_write_32(sdio1_cfg->cfg_base + ICFG_SDIO_STRAPSTATUS_2,
+ SDIO_PRESETVAL2);
+ mmio_write_32(sdio1_cfg->cfg_base + ICFG_SDIO_STRAPSTATUS_3,
+ SDIO_PRESETVAL3);
+ mmio_write_32(sdio1_cfg->cfg_base + ICFG_SDIO_STRAPSTATUS_4,
+ SDIO_PRESETVAL4);
+
+ val = SR_SID_VAL(0x3, 0x0, 0x3) << SDIO_SID_SHIFT;
+ mmio_write_32(sdio1_cfg->sid_base + ICFG_SDIO_SID_ARADDR, val);
+ mmio_write_32(sdio1_cfg->sid_base + ICFG_SDIO_SID_AWADDR, val);
+
+ val = mmio_read_32(sdio1_cfg->io_ctrl_base);
+ val &= ~(0xff << 23); /* Clear ARCACHE and AWCACHE */
+ val |= (0xb7 << 23); /* Set ARCACHE and AWCACHE */
+ mmio_write_32(sdio1_cfg->io_ctrl_base, val);
+
+ mmio_clrsetbits_32(sdio1_cfg->pad_base + PAD_SDIO_CLK,
+ PAD_SDIO_MASK, PAD_SDIO_VALUE);
+ mmio_clrsetbits_32(sdio1_cfg->pad_base + PAD_SDIO_DATA0,
+ PAD_SDIO_MASK, PAD_SDIO_VALUE);
+ mmio_clrsetbits_32(sdio1_cfg->pad_base + PAD_SDIO_DATA1,
+ PAD_SDIO_MASK, PAD_SDIO_VALUE);
+ mmio_clrsetbits_32(sdio1_cfg->pad_base + PAD_SDIO_DATA2,
+ PAD_SDIO_MASK, PAD_SDIO_VALUE);
+ mmio_clrsetbits_32(sdio1_cfg->pad_base + PAD_SDIO_DATA3,
+ PAD_SDIO_MASK, PAD_SDIO_VALUE);
+ mmio_clrsetbits_32(sdio1_cfg->pad_base + PAD_SDIO_DATA4,
+ PAD_SDIO_MASK, PAD_SDIO_VALUE);
+ mmio_clrsetbits_32(sdio1_cfg->pad_base + PAD_SDIO_DATA5,
+ PAD_SDIO_MASK, PAD_SDIO_VALUE);
+ mmio_clrsetbits_32(sdio1_cfg->pad_base + PAD_SDIO_DATA6,
+ PAD_SDIO_MASK, PAD_SDIO_VALUE);
+ mmio_clrsetbits_32(sdio1_cfg->pad_base + PAD_SDIO_DATA7,
+ PAD_SDIO_MASK, PAD_SDIO_VALUE);
+ mmio_clrsetbits_32(sdio1_cfg->pad_base + PAD_SDIO_CMD,
+ PAD_SDIO_MASK, PAD_SDIO_VALUE);
+
+ INFO("sdio init done\n");
+}
diff --git a/plat/brcm/board/stingray/src/sr_paxb_phy.c b/plat/brcm/board/stingray/src/sr_paxb_phy.c
new file mode 100644
index 0000000..7380e09
--- /dev/null
+++ b/plat/brcm/board/stingray/src/sr_paxb_phy.c
@@ -0,0 +1,806 @@
+/*
+ * Copyright (c) 2019-2020, Broadcom
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <errno.h>
+#include <stdbool.h>
+
+#include <common/debug.h>
+#include <drivers/delay_timer.h>
+#include <lib/mmio.h>
+
+#include <paxb.h>
+#include <sr_def.h>
+#include <sr_utils.h>
+
+/* total number of PCIe Phys */
+#define NUM_OF_PCIE_SERDES 8
+
+#define CFG_RC_PMI_ADDR 0x1130
+#define PMI_RX_TERM_SEQ ((0x1 << 27) | (0x1ff << 16) | (0xd090))
+#define PMI_RX_TERM_VAL 0x4c00
+#define PMI_PLL_CTRL_4 0xd0b4
+#define PMI_SERDES_CLK_ENABLE (1 << 12)
+
+#define WAR_PLX_PRESET_PARITY_FAIL
+
+#define CFG_RC_REG_PHY_CTL_10 0x1838
+#define PHY_CTL_10_GEN3_MATCH_PARITY (1 << 15)
+
+#define PMI_X8_CORE0_7_PATCH_SEQ ((0x1 << 27) | (0x1ff << 16) | (0xd2a5))
+#define PMI_X8_CORE0_7_PATCH_VAL 0xd864
+
+#define PMI_ADDR_BCAST(addr) ((0x1 << 27) | (0x1ff << 16) | (addr))
+#define PMI_ADDR_LANE0(addr) ((0x1 << 27) | (addr))
+#define PMI_ADDR_LANE1(addr) ((0x1 << 27) | (0x1 << 16) | (addr))
+
+#define MERLIN16_PCIE_BLK2_PWRMGMT_7 ((0x1 << 27) | (0x1ff << 16) | 0x1208)
+#define MERLIN16_PCIE_BLK2_PWRMGMT_8 ((0x1 << 27) | (0x1ff << 16) | 0x1209)
+#define MERLIN16_AMS_TX_CTRL_5 ((0x1 << 27) | (0x1ff << 16) | 0xd0a5)
+#define MERLIN16_AMS_TX_CTRL_5_VAL \
+ ((1 << 13) | (1 << 12) | (1 << 11) | (1 << 10))
+#define MERLIN16_PCIE_BLK2_PWRMGMT_7_VAL 0x96
+#define MERLIN16_PCIE_BLK2_PWRMGMT_8_VAL 0x12c
+
+#define CFG_RC_PMI_WDATA 0x1134
+#define CFG_RC_WCMD_SHIFT 31
+#define CFG_RC_WCMD_MASK ((uint32_t)1U << CFG_RC_WCMD_SHIFT)
+#define CFG_RC_RCMD_SHIFT 30
+#define CFG_RC_RCMD_MASK ((uint32_t)1U << CFG_RC_RCMD_SHIFT)
+#define CFG_RC_RWCMD_MASK (CFG_RC_RCMD_MASK | CFG_RC_WCMD_MASK)
+#define CFG_RC_PMI_RDATA 0x1138
+#define CFG_RC_RACK_SHIFT 31
+#define CFG_RC_RACK_MASK ((uint32_t)1U << CFG_RC_RACK_SHIFT)
+
+/* allow up to 5 ms for PMI write to finish */
+#define PMI_TIMEOUT_MS 5
+
+/* in 2x8 RC mode, one needs to patch up Serdes 3 and 7 for link to come up */
+#define SERDES_PATCH_PIPEMUX_INDEX 0x3
+#define SERDES_PATCH_INDEX 0x8
+
+#define DSC_UC_CTRL 0xd00d
+#define DSC_UC_CTRL_RDY_CMD (1 << 7)
+#define LANE_DBG_RST_CTRL 0xd164
+#define UC_A_CLK_CTRL0 0xd200
+#define UC_A_RST_CTRL0 0xd201
+#define UC_A_AHB_CTRL0 0xd202
+#define UC_A_AHB_STAT0 0xd203
+#define UC_A_AHB_WADDR_LSW 0xd204
+#define UC_A_AHB_WADDR_MSW 0xd205
+#define UC_A_AHB_WDATA_LSW 0xd206
+#define UC_A_AHB_WDATA_MSW 0xd207
+#define UC_A_AHB_RADDR_LSW 0xd208
+#define UC_A_AHB_RADDR_MSW 0xd209
+#define UC_A_AHB_RDATA_LSW 0xd20a
+#define UC_A_AHB_RDATA_MSW 0xd20b
+#define UC_VERSION_NUM 0xd230
+#define DSC_SM_CTL22 0xd267
+#define UC_DBG1 0xd251
+
+#define LOAD_UC_CHECK 0
+#define UC_RAM_INIT_TIMEOUT 100
+#define UC_RAM_CONTROL 0xd225
+#define UC_INIT_TIMEOUT 100
+#define SIZE_ALIGN(x, a) (((x) + (a) - 1) & ~((a) - 1))
+#define SZ_4 4
+#define GET_2_BYTES(p, i) ((uint16_t)p[i] | (uint16_t)p[i+1] << 8)
+
+/*
+ * List of PCIe LCPLL related registers
+ *
+ * LCPLL channel 0 provides the Serdes pad clock when running in RC mode
+ */
+#define PCIE_LCPLL_BASE 0x40000000
+
+#define PCIE_LCPLL_CTRL0_OFFSET 0x00
+#define PCIE_LCPLL_RESETB_SHIFT 31
+#define PCIE_LCPLL_RESETB_MASK BIT(PCIE_LCPLL_RESETB_SHIFT)
+#define PCIE_LCPLL_P_RESETB_SHIFT 30
+#define PCIE_LCPLL_P_RESETB_MASK BIT(PCIE_LCPLL_P_RESETB_SHIFT)
+
+#define PCIE_LCPLL_CTRL3_OFFSET 0x0c
+#define PCIE_LCPLL_EN_CTRL_SHIFT 16
+#define PCIE_LCPLL_CM_ENA 0x1a
+#define PCIE_LCPLL_CM_BUF_ENA 0x18
+#define PCIE_LCPLL_D2C2_ENA 0x2
+#define PCIE_LCPLL_REF_CLK_SHIFT 1
+#define PCIE_LCPLL_REF_CLK_MASK BIT(PCIE_LCPLL_REF_CLK_SHIFT)
+#define PCIE_LCPLL_CTRL13_OFFSET 0x34
+#define PCIE_LCPLL_D2C2_CTRL_SHIFT 16
+#define PCIE_LCPLL_D2C2_TERM_DISC 0xe0
+
+#define PCIE_LCPLL_STATUS_OFFSET 0x40
+#define PCIE_LCPLL_LOCK_SHIFT 12
+#define PCIE_LCPLL_LOCK_MASK BIT(PCIE_LCPLL_LOCK_SHIFT)
+
+#define PCIE_PIPE_MUX_RC_MODE_OVERRIDE_CFG 0x114
+#define PCIE_TX_CLKMASTER_CTRL_OVERRIDE_CFG 0x11c
+
+/* wait 500 microseconds for PCIe LCPLL to power up */
+#define PCIE_LCPLL_DELAY_US 500
+
+/* allow up to 5 ms for PCIe LCPLL VCO to lock */
+#define PCIE_LCPLL_TIMEOUT_MS 5
+
+#define PCIE_PIPE_MUX_CONFIGURATION_CFG 0x4000010c
+
+#define PCIE_PIPEMUX_SHIFT 19
+#define PCIE_PIPEMUX_MASK 0xf
+
+/* keep track of PIPEMUX index to use */
+static unsigned int pipemux_idx;
+
+/*
+ * PCIe PIPEMUX lookup table
+ *
+ * Each array index represents a PIPEMUX strap setting
+ * The array element represents a bitmap where a set bit means the PCIe core
+ * needs to be enabled as RC
+ */
+static uint8_t pipemux_table[] = {
+ /* PIPEMUX = 0, EP 1x16 */
+ 0x00,
+ /* PIPEMUX = 1, EP 1x8 + RC 1x8, core 7 */
+ 0x80,
+ /* PIPEMUX = 2, EP 4x4 */
+ 0x00,
+ /* PIPEMUX = 3, RC 2x8, cores 0, 7 */
+ 0x81,
+ /* PIPEMUX = 4, RC 4x4, cores 0, 1, 6, 7 */
+ 0xc3,
+ /* PIPEMUX = 5, RC 8x2, all 8 cores */
+ 0xff,
+ /* PIPEMUX = 6, RC 3x4 + 2x2, cores 0, 2, 3, 6, 7 */
+ 0xcd,
+ /* PIPEMUX = 7, RC 1x4 + 6x2, cores 0, 2, 3, 4, 5, 6, 7 */
+ 0xfd,
+ /* PIPEMUX = 8, EP 1x8 + RC 4x2, cores 4, 5, 6, 7 */
+ 0xf0,
+ /* PIPEMUX = 9, EP 1x8 + RC 2x4, cores 6, 7 */
+ 0xc0,
+ /* PIPEMUX = 10, EP 2x4 + RC 2x4, cores 1, 6 */
+ 0x42,
+ /* PIPEMUX = 11, EP 2x4 + RC 4x2, cores 2, 3, 4, 5 */
+ 0x3c,
+ /* PIPEMUX = 12, EP 1x4 + RC 6x2, cores 2, 3, 4, 5, 6, 7 */
+ 0xfc,
+ /* PIPEMUX = 13, RC 2x4 + RC 1x4 + 2x2, cores 2, 3, 6 */
+ 0x4c,
+};
+
+/*
+ * Return 1 if pipemux strap is supported
+ */
+static int pipemux_strap_is_valid(uint32_t pipemux)
+{
+ if (pipemux < ARRAY_SIZE(pipemux_table))
+ return 1;
+ else
+ return 0;
+}
+
+/*
+ * Read the PCIe PIPEMUX from strap
+ */
+static uint32_t pipemux_strap_read(void)
+{
+ uint32_t pipemux;
+
+ pipemux = mmio_read_32(PCIE_PIPE_MUX_CONFIGURATION_CFG);
+ pipemux &= PCIE_PIPEMUX_MASK;
+ if (pipemux == PCIE_PIPEMUX_MASK) {
+ /* read the PCIe PIPEMUX strap setting */
+ pipemux = mmio_read_32(CDRU_CHIP_STRAP_DATA_LSW);
+ pipemux >>= PCIE_PIPEMUX_SHIFT;
+ pipemux &= PCIE_PIPEMUX_MASK;
+ }
+
+ return pipemux;
+}
+
+/*
+ * Store the PIPEMUX index (set for each boot)
+ */
+static void pipemux_save_index(unsigned int idx)
+{
+ pipemux_idx = idx;
+}
+
+static int paxb_sr_core_needs_enable(unsigned int core_idx)
+{
+ return !!((pipemux_table[pipemux_idx] >> core_idx) & 0x1);
+}
+
+static int pipemux_sr_init(void)
+{
+ uint32_t pipemux;
+
+ /* read the PCIe PIPEMUX strap setting */
+ pipemux = pipemux_strap_read();
+ if (!pipemux_strap_is_valid(pipemux)) {
+ ERROR("Invalid PCIe PIPEMUX strap %u\n", pipemux);
+ return -EIO;
+ }
+
+ /* no PCIe RC is needed */
+ if (!pipemux_table[pipemux]) {
+ WARN("PIPEMUX indicates no PCIe RC required\n");
+ return -ENODEV;
+ }
+
+ /* save the PIPEMUX strap */
+ pipemux_save_index(pipemux);
+
+ return 0;
+}
+
+/*
+ * PCIe RC serdes link width
+ *
+ * The array is first organized in rows as indexed by the PIPEMUX setting.
+ * Within each row, eight lane width entries are specified -- one entry
+ * per PCIe core, from 0 to 7.
+ *
+ * Note: The EP lanes/cores are not mapped in this table! EP cores are
+ * controlled and thus configured by Nitro.
+ */
+static uint8_t link_width_table[][NUM_OF_SR_PCIE_CORES] = {
+ /* PIPEMUX = 0, EP 1x16 */
+ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* PIPEMUX = 1, EP 1x8 + RC 1x8, core 7 */
+ {0, 0, 0, 0, 0, 0, 0, 8},
+ /* PIPEMUX = 2, EP 4x4 */
+ {0, 0, 0, 0, 0, 0, 0, 0},
+ /* PIPEMUX = 3, RC 2x8, cores 0, 7 */
+ {8, 0, 0, 0, 0, 0, 0, 8},
+ /* PIPEMUX = 4, RC 4x4, cores 0, 1, 6, 7 */
+ {4, 4, 0, 0, 0, 0, 4, 4},
+ /* PIPEMUX = 5, RC 8x2, all 8 cores */
+ {2, 2, 2, 2, 2, 2, 2, 2},
+ /* PIPEMUX = 6, RC 3x4 (cores 0, 6, 7), RC 2x2 (cores 2, 3) */
+ {4, 0, 2, 2, 0, 0, 4, 4},
+ /* PIPEMUX = 7, RC 1x4 (core 0), RC 6x2 (cores 2, 3, 4, 5, 6, 7 */
+ {4, 0, 2, 2, 2, 2, 2, 2},
+ /* PIPEMUX = 8, EP 1x8 + RC 4x2 (cores 4, 5, 6, 7) */
+ {0, 0, 0, 0, 2, 2, 2, 2},
+ /* PIPEMUX = 9, EP 1x8 + RC 2x4 (cores 6, 7) */
+ {0, 0, 0, 0, 0, 0, 4, 4},
+ /* PIPEMUX = 10, EP 2x4 + RC 2x4 (cores 1, 6) */
+ {0, 4, 0, 0, 0, 0, 4, 0},
+ /* PIPEMUX = 11, EP 2x4 + RC 4x2 (cores 2, 3, 4, 5) */
+ {0, 0, 2, 2, 2, 2, 0, 0},
+ /* PIPEMUX = 12, EP 1x4 + RC 6x2 (cores 2, 3, 4, 5, 6, 7) */
+ {0, 0, 2, 2, 2, 2, 2, 2},
+ /* PIPEMUX = 13, EP 2x4 + RC 1x4 (core 6) + RC 2x2 (cores 2, 3) */
+ {0, 0, 2, 2, 0, 0, 4, 0}
+};
+
+/*
+ * function for writes to the Serdes registers through the PMI interface
+ */
+static int paxb_pmi_write(unsigned int core_idx, uint32_t pmi, uint32_t val)
+{
+ uint32_t status;
+ unsigned int timeout = PMI_TIMEOUT_MS;
+
+ paxb_rc_cfg_write(core_idx, CFG_RC_PMI_ADDR, pmi);
+
+ val &= ~CFG_RC_RWCMD_MASK;
+ val |= CFG_RC_WCMD_MASK;
+ paxb_rc_cfg_write(core_idx, CFG_RC_PMI_WDATA, val);
+
+ do {
+ status = paxb_rc_cfg_read(core_idx, CFG_RC_PMI_WDATA);
+
+ /* wait for write command bit to clear */
+ if ((status & CFG_RC_WCMD_MASK) == 0)
+ return 0;
+ } while (--timeout);
+
+ return -EIO;
+}
+
+/*
+ * function for reads from the Serdes registers through the PMI interface
+ */
+static int paxb_pmi_read(unsigned int core_idx, uint32_t pmi, uint32_t *val)
+{
+ uint32_t status;
+ unsigned int timeout = PMI_TIMEOUT_MS;
+
+ paxb_rc_cfg_write(core_idx, CFG_RC_PMI_ADDR, pmi);
+
+ paxb_rc_cfg_write(core_idx, CFG_RC_PMI_WDATA, CFG_RC_RCMD_MASK);
+
+ do {
+ status = paxb_rc_cfg_read(core_idx, CFG_RC_PMI_RDATA);
+
+ /* wait for read ack bit set */
+ if ((status & CFG_RC_RACK_MASK)) {
+ *val = paxb_rc_cfg_read(core_idx, CFG_RC_PMI_RDATA);
+ return 0;
+ }
+ } while (--timeout);
+
+ return -EIO;
+}
+
+
+#ifndef BOARD_PCIE_EXT_CLK
+/*
+ * PCIe Override clock lookup table
+ *
+ * Each array index represents pcie override clock has been done
+ * by CFW or not.
+ */
+static uint8_t pcie_override_clk_table[] = {
+ /* PIPEMUX = 0, EP 1x16 */
+ 0x0,
+ /* PIPEMUX = 1, EP 1x8 + RC 1x8, core 7 */
+ 0x1,
+ /* PIPEMUX = 2, EP 4x4 */
+ 0x0,
+ /* PIPEMUX = 3, RC 2x8, cores 0, 7 */
+ 0x0,
+ /* PIPEMUX = 4, RC 4x4, cores 0, 1, 6, 7 */
+ 0x0,
+ /* PIPEMUX = 5, RC 8x2, all 8 cores */
+ 0x0,
+ /* PIPEMUX = 6, RC 3x4 + 2x2, cores 0, 2, 3, 6, 7 */
+ 0x0,
+ /* PIPEMUX = 7, RC 1x4 + 6x2, cores 0, 2, 3, 4, 5, 6, 7 */
+ 0x0,
+ /* PIPEMUX = 8, EP 1x8 + RC 4x2, cores 4, 5, 6, 7 */
+ 0x0,
+ /* PIPEMUX = 9, EP 1x8 + RC 2x4, cores 6, 7 */
+ 0x0,
+ /* PIPEMUX = 10, EP 2x4 + RC 2x4, cores 1, 6 */
+ 0x0,
+ /* PIPEMUX = 11, EP 2x4 + RC 4x2, cores 2, 3, 4, 5 */
+ 0x0,
+ /* PIPEMUX = 12, EP 1x4 + RC 6x2, cores 2, 3, 4, 5, 6, 7 */
+ 0x0,
+ /* PIPEMUX = 13, RC 2x4 + RC 1x4 + 2x2, cores 2, 3, 6 */
+ 0x0,
+};
+
+/*
+ * Bring up LCPLL channel 0 reference clock for PCIe serdes used in RC mode
+ */
+static int pcie_lcpll_init(void)
+{
+ uintptr_t reg;
+ unsigned int timeout = PCIE_LCPLL_TIMEOUT_MS;
+ uint32_t val;
+
+ if (pcie_override_clk_table[pipemux_idx]) {
+ /*
+ * Check rc_mode_override again to avoid halt
+ * because of cfw uninitialized lcpll.
+ */
+ reg = (uintptr_t)(PCIE_LCPLL_BASE +
+ PCIE_PIPE_MUX_RC_MODE_OVERRIDE_CFG);
+ val = mmio_read_32(reg);
+ if (val & 0x1)
+ return 0;
+ else
+ return -ENODEV;
+ }
+
+ /* power on PCIe LCPLL and its LDO */
+ reg = (uintptr_t)CRMU_AON_CTRL1;
+ mmio_setbits_32(reg, CRMU_PCIE_LCPLL_PWR_ON_MASK |
+ CRMU_PCIE_LCPLL_PWRON_LDO_MASK);
+ udelay(PCIE_LCPLL_DELAY_US);
+
+ /* remove isolation */
+ mmio_clrbits_32(reg, CRMU_PCIE_LCPLL_ISO_IN_MASK);
+ udelay(PCIE_LCPLL_DELAY_US);
+
+ /* disconnect termination */
+ reg = (uintptr_t)(PCIE_LCPLL_BASE + PCIE_LCPLL_CTRL13_OFFSET);
+ mmio_setbits_32(reg, PCIE_LCPLL_D2C2_TERM_DISC <<
+ PCIE_LCPLL_D2C2_CTRL_SHIFT);
+
+ /* enable CML buf1/2 and D2C2 */
+ reg = (uintptr_t)(PCIE_LCPLL_BASE + PCIE_LCPLL_CTRL3_OFFSET);
+ mmio_setbits_32(reg, PCIE_LCPLL_CM_ENA << PCIE_LCPLL_EN_CTRL_SHIFT);
+
+ /* select diff clock mux out as ref clock */
+ mmio_clrbits_32(reg, PCIE_LCPLL_REF_CLK_MASK);
+
+ /* delay for 500 microseconds per ASIC spec for PCIe LCPLL */
+ udelay(PCIE_LCPLL_DELAY_US);
+
+ /* now bring PCIe LCPLL out of reset */
+ reg = (uintptr_t)(PCIE_LCPLL_BASE + PCIE_LCPLL_CTRL0_OFFSET);
+ mmio_setbits_32(reg, PCIE_LCPLL_RESETB_MASK);
+
+ /* wait for PLL to lock */
+ reg = (uintptr_t)(PCIE_LCPLL_BASE + PCIE_LCPLL_STATUS_OFFSET);
+ do {
+ val = mmio_read_32(reg);
+ if ((val & PCIE_LCPLL_LOCK_MASK) == PCIE_LCPLL_LOCK_MASK) {
+ /* now bring the post divider out of reset */
+ reg = (uintptr_t)(PCIE_LCPLL_BASE +
+ PCIE_LCPLL_CTRL0_OFFSET);
+ mmio_setbits_32(reg, PCIE_LCPLL_P_RESETB_MASK);
+ VERBOSE("PCIe LCPLL locked\n");
+ return 0;
+ }
+ mdelay(1);
+ } while (--timeout);
+
+ ERROR("PCIe LCPLL failed to lock\n");
+ return -EIO;
+}
+#else
+/*
+ * Bring up EXT CLK reference clock for PCIe serdes used in RC mode
+ * XTAL_BYPASS (3 << 0)
+ * INTR_LC_REF (5 << 0)
+ * PD_CML_LC_REF_OUT (1 << 4)
+ * PD_CML_REF_CH_OUT (1 << 8)
+ * CLK_MASTER_SEL (1 << 11)
+ * CLK_MASTER_CTRL_A (1 << 12)
+ * CLK_MASTER_CTRL_B (2 << 14)
+ */
+static const uint16_t pcie_ext_clk[][NUM_OF_PCIE_SERDES] = {
+ /* PIPEMUX = 0, EP 1x16 */
+ {0},
+ /* PIPEMUX = 1, EP 1x8 + RC 1x8, core 7 */
+ {0},
+ /* PIPEMUX = 2, EP 4x4 */
+ {0},
+ /* PIPEMUX = 3, RC 2x8, cores 0, 7 */
+ {0x8803, 0x9115, 0x9115, 0x1115, 0x8803, 0x9115, 0x9115, 0x1115},
+ /* PIPEMUX = 4, RC 4x4, cores 0, 1, 6, 7 */
+ {0x8803, 0x1115, 0x8915, 0x1115, 0x8803, 0x1115, 0x8915, 0x1115,},
+ /* PIPEMUX = 5, RC 8x2, all 8 cores */
+ {0x0803, 0x0915, 0x0915, 0x0915, 0x0803, 0x0915, 0x0915, 0x0915,},
+ /* PIPEMUX = 6, RC 3x4 + 2x2, cores 0, 2, 3, 6, 7 */
+ {0},
+ /* PIPEMUX = 7, RC 1x4 + 6x2, cores 0, 2, 3, 4, 5, 6, 7 */
+ {0},
+ /* PIPEMUX = 8, EP 1x8 + RC 4x2, cores 4, 5, 6, 7 */
+ {0},
+ /* PIPEMUX = 9, EP 1x8 + RC 2x4, cores 6, 7 */
+ {0},
+ /* PIPEMUX = 10, EP 2x4 + RC 2x4, cores 1, 6 */
+ {0},
+ /* PIPEMUX = 11, EP 2x4 + RC 4x2, cores 2, 3, 4, 5 */
+ {0},
+ /* PIPEMUX = 12, EP 1x4 + RC 6x2, cores 2, 3, 4, 5, 6, 7 */
+ {0},
+ /* PIPEMUX = 13, RC 2x4 + RC 1x4 + 2x2, cores 2, 3, 6 */
+ {0},
+};
+
+static void pcie_ext_clk_init(void)
+{
+ unsigned int serdes;
+ uint32_t val;
+
+ for (serdes = 0; serdes < NUM_OF_PCIE_SERDES; serdes++) {
+ val = pcie_ext_clk[pipemux_idx][serdes];
+ if (!val)
+ return;
+ mmio_write_32(PCIE_CORE_RESERVED_CFG +
+ serdes * PCIE_CORE_PWR_OFFSET, val);
+ }
+ /* disable CML buf1/2 and enable D2C2 */
+ mmio_clrsetbits_32((PCIE_LCPLL_BASE + PCIE_LCPLL_CTRL3_OFFSET),
+ PCIE_LCPLL_CM_BUF_ENA << PCIE_LCPLL_EN_CTRL_SHIFT,
+ PCIE_LCPLL_D2C2_ENA << PCIE_LCPLL_EN_CTRL_SHIFT);
+ mmio_write_32(PCIE_LCPLL_BASE + PCIE_TX_CLKMASTER_CTRL_OVERRIDE_CFG, 1);
+ INFO("Overriding Clocking - using REF clock from PAD...\n");
+}
+#endif
+
+static int load_uc(unsigned int core_idx)
+{
+ return 0;
+}
+
+static int paxb_serdes_gate_clock(unsigned int core_idx, int gate_clk)
+{
+ unsigned int link_width, serdes, nr_serdes;
+ uintptr_t pmi_base;
+ unsigned int rdata;
+ uint32_t core_offset = core_idx * PCIE_CORE_PWR_OFFSET;
+
+ link_width = paxb->get_link_width(core_idx);
+ if (!link_width) {
+ ERROR("Unsupported PIPEMUX\n");
+ return -EOPNOTSUPP;
+ }
+
+ nr_serdes = link_width / 2;
+ pmi_base = (uintptr_t)(PCIE_CORE_PMI_CFG_BASE + core_offset);
+
+ for (serdes = 0; serdes < nr_serdes; serdes++) {
+ mmio_write_32(pmi_base, serdes);
+ paxb_pmi_read(core_idx, PMI_ADDR_LANE0(PMI_PLL_CTRL_4), &rdata);
+ if (!gate_clk)
+ rdata |= PMI_SERDES_CLK_ENABLE;
+ else
+ rdata &= ~PMI_SERDES_CLK_ENABLE;
+ paxb_pmi_write(core_idx, PMI_ADDR_BCAST(PMI_PLL_CTRL_4), rdata);
+ }
+ return 0;
+}
+
+static int paxb_gen3_serdes_init(unsigned int core_idx, uint32_t nSerdes)
+{
+ uint32_t rdata;
+ int serdes;
+ uintptr_t pmi_base;
+ unsigned int timeout;
+ unsigned int reg_d230, reg_d267;
+
+
+ pmi_base = (uintptr_t)(PCIE_CORE_PMI_CFG_BASE +
+ (core_idx * PCIE_CORE_PWR_OFFSET));
+
+ for (serdes = 0; serdes < nSerdes; serdes++) {
+ /* select the PMI interface */
+ mmio_write_32(pmi_base, serdes);
+
+ /* Clock enable */
+ paxb_pmi_write(core_idx, PMI_ADDR_BCAST(UC_A_CLK_CTRL0),
+ 0x3);
+
+ /* Release reset of master */
+ paxb_pmi_write(core_idx, PMI_ADDR_BCAST(UC_A_RST_CTRL0),
+ 0x1);
+
+ /* clearing PRAM memory */
+ paxb_pmi_write(core_idx, PMI_ADDR_BCAST(UC_A_AHB_CTRL0),
+ 0x100);
+
+ timeout = UC_RAM_INIT_TIMEOUT;
+ do {
+ paxb_pmi_read(core_idx,
+ PMI_ADDR_LANE0(UC_A_AHB_STAT0),
+ &rdata);
+ } while ((rdata & 0x01) == 0 && timeout--);
+
+ if (!timeout)
+ return -EIO;
+
+ timeout = UC_RAM_INIT_TIMEOUT;
+ do {
+ paxb_pmi_read(core_idx,
+ PMI_ADDR_LANE1(UC_A_AHB_STAT0),
+ &rdata);
+ } while ((rdata & 0x01) == 0 && timeout--);
+
+ if (!timeout)
+ return -EIO;
+
+ /* clearing PRAM memory */
+ paxb_pmi_write(core_idx, PMI_ADDR_BCAST(UC_A_AHB_CTRL0),
+ 0);
+
+ /* to identify 2 lane serdes */
+ paxb_pmi_write(core_idx, PMI_ADDR_BCAST(UC_DBG1), 0x1);
+
+ /* De-Assert Pram & master resets */
+ paxb_pmi_write(core_idx, PMI_ADDR_BCAST(UC_A_RST_CTRL0),
+ 0x9);
+
+ if (load_uc(core_idx))
+ return -EIO;
+
+ /* UC UC ready for command */
+ paxb_pmi_read(core_idx, PMI_ADDR_LANE0(DSC_UC_CTRL),
+ &rdata);
+ rdata |= DSC_UC_CTRL_RDY_CMD;
+ paxb_pmi_write(core_idx, PMI_ADDR_LANE0(DSC_UC_CTRL),
+ rdata);
+
+ paxb_pmi_read(core_idx, PMI_ADDR_LANE1(DSC_UC_CTRL),
+ &rdata);
+ rdata |= DSC_UC_CTRL_RDY_CMD;
+ paxb_pmi_write(core_idx, PMI_ADDR_LANE1(DSC_UC_CTRL),
+ rdata);
+
+ /* Lane reset */
+ paxb_pmi_write(core_idx,
+ PMI_ADDR_BCAST(LANE_DBG_RST_CTRL), 0x3);
+
+ /* De-Assert Core and Master resets */
+ paxb_pmi_write(core_idx, PMI_ADDR_BCAST(UC_A_RST_CTRL0),
+ 0x3);
+
+ timeout = UC_INIT_TIMEOUT;
+ while (timeout--) {
+ paxb_pmi_read(core_idx,
+ PMI_ADDR_LANE0(UC_VERSION_NUM),
+ &reg_d230);
+ paxb_pmi_read(core_idx,
+ PMI_ADDR_LANE0(DSC_SM_CTL22),
+ &reg_d267);
+
+ if (((reg_d230 & 0xffff) != 0) &
+ ((reg_d267 & 0xc000) == 0xc000)) {
+ break;
+ }
+ mdelay(1);
+ }
+
+ if (!timeout)
+ return -EIO;
+
+ timeout = UC_INIT_TIMEOUT;
+ while (timeout--) {
+ paxb_pmi_read(core_idx,
+ PMI_ADDR_LANE1(UC_VERSION_NUM),
+ &reg_d230);
+ paxb_pmi_read(core_idx,
+ PMI_ADDR_LANE1(DSC_SM_CTL22),
+ &reg_d267);
+
+ if (((reg_d230 & 0xffff) != 0) &
+ ((reg_d267 & 0xc000) == 0xc000)) {
+ break;
+ }
+ mdelay(1);
+ }
+
+ if (!timeout)
+ return -EIO;
+ }
+ return 0;
+}
+
+static int pcie_serdes_requires_patch(unsigned int serdes_idx)
+{
+ if (pipemux_idx != SERDES_PATCH_PIPEMUX_INDEX)
+ return 0;
+
+ return !!((SERDES_PATCH_INDEX >> serdes_idx) & 0x1);
+}
+
+static void pcie_tx_coeff_p7(unsigned int core_idx)
+{
+ paxb_pmi_write(core_idx, PMI_ADDR_BCAST(0xd11b), 0x00aa);
+ paxb_pmi_write(core_idx, PMI_ADDR_BCAST(0xd11c), 0x1155);
+ paxb_pmi_write(core_idx, PMI_ADDR_BCAST(0xd11d), 0x2449);
+ paxb_pmi_write(core_idx, PMI_ADDR_BCAST(0xd11e), 0x000f);
+ paxb_pmi_write(core_idx, PMI_ADDR_BCAST(0xd307), 0x0001);
+}
+
+
+static unsigned int paxb_sr_get_rc_link_width(unsigned int core_idx)
+{
+ return link_width_table[pipemux_idx][core_idx];
+}
+
+static uint32_t paxb_sr_get_rc_link_speed(void)
+{
+ return GEN3_LINK_SPEED;
+}
+
+
+static int paxb_serdes_init(unsigned int core_idx, unsigned int nr_serdes)
+{
+ uint32_t core_offset = core_idx * PCIE_CORE_PWR_OFFSET;
+ unsigned int serdes;
+ uintptr_t pmi_base;
+ int ret;
+
+ /*
+ * Each serdes has a x2 link width
+ *
+ * Use PAXB to patch the serdes for proper RX termination through the
+ * PMI interface
+ */
+ pmi_base = (uintptr_t)(PCIE_CORE_PMI_CFG_BASE + core_offset);
+ for (serdes = 0; serdes < nr_serdes; serdes++) {
+ /* select the PMI interface */
+ mmio_write_32(pmi_base, serdes);
+
+ /* patch Serdes for RX termination */
+ ret = paxb_pmi_write(core_idx, PMI_RX_TERM_SEQ,
+ PMI_RX_TERM_VAL);
+ if (ret)
+ goto err_pmi;
+
+ ret = paxb_pmi_write(core_idx, MERLIN16_PCIE_BLK2_PWRMGMT_7,
+ MERLIN16_PCIE_BLK2_PWRMGMT_7_VAL);
+ if (ret)
+ goto err_pmi;
+
+ ret = paxb_pmi_write(core_idx, MERLIN16_PCIE_BLK2_PWRMGMT_8,
+ MERLIN16_PCIE_BLK2_PWRMGMT_8_VAL);
+ if (ret)
+ goto err_pmi;
+
+ ret = paxb_pmi_write(core_idx, MERLIN16_AMS_TX_CTRL_5,
+ MERLIN16_AMS_TX_CTRL_5_VAL);
+ if (ret)
+ goto err_pmi;
+
+ pcie_tx_coeff_p7(core_idx);
+
+ if (pcie_serdes_requires_patch(serdes)) {
+ if (((core_idx == 0) || (core_idx == 7))) {
+ ret = paxb_pmi_write(core_idx,
+ PMI_X8_CORE0_7_PATCH_SEQ,
+ PMI_X8_CORE0_7_PATCH_VAL);
+ if (ret)
+ goto err_pmi;
+ }
+ }
+ }
+
+ return 0;
+
+err_pmi:
+ ERROR("PCIe PMI write failed\n");
+ return ret;
+}
+
+static int paxb_sr_phy_init(void)
+{
+ int ret;
+ unsigned int core_idx;
+
+#ifndef BOARD_PCIE_EXT_CLK
+ ret = pcie_lcpll_init();
+ if (ret)
+ return ret;
+#else
+ pcie_ext_clk_init();
+#endif
+
+ for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) {
+ if (!pcie_core_needs_enable(core_idx))
+ continue;
+ unsigned int link_width;
+
+ paxb_serdes_gate_clock(core_idx, 0);
+
+ link_width = paxb->get_link_width(core_idx);
+ if (!link_width) {
+ ERROR("Unsupported PIPEMUX\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = paxb_serdes_init(core_idx, link_width / 2);
+ if (ret) {
+ ERROR("PCIe serdes initialization failed for core %u\n",
+ core_idx);
+ return ret;
+ }
+
+
+ ret = paxb_gen3_serdes_init(core_idx, link_width / 2);
+ if (ret) {
+ ERROR("PCIe GEN3 serdes initialization failed\n");
+ return ret;
+ }
+
+ }
+ return 0;
+}
+
+const paxb_cfg sr_paxb_cfg = {
+ .type = PAXB_SR,
+ .device_id = SR_B0_DEVICE_ID,
+ .pipemux_init = pipemux_sr_init,
+ .phy_init = paxb_sr_phy_init,
+ .core_needs_enable = paxb_sr_core_needs_enable,
+ .num_cores = NUM_OF_SR_PCIE_CORES,
+ .get_link_width = paxb_sr_get_rc_link_width,
+ .get_link_speed = paxb_sr_get_rc_link_speed,
+};
+
+const paxb_cfg *paxb_get_sr_config(void)
+{
+ return &sr_paxb_cfg;
+}
diff --git a/plat/brcm/board/stingray/src/topology.c b/plat/brcm/board/stingray/src/topology.c
new file mode 100644
index 0000000..24718e5
--- /dev/null
+++ b/plat/brcm/board/stingray/src/topology.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2019-2020, Broadcom
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <stdint.h>
+
+#include <plat_brcm.h>
+#include <platform_def.h>
+
+/*
+ * On Stingray, the system power level is the highest power level.
+ * The first entry in the power domain descriptor specifies the
+ * number of system power domains i.e. 1.
+ */
+#define SR_PWR_DOMAINS_AT_MAX_PWR_LVL 1
+
+/*
+ * The Stingray power domain tree descriptor. The cluster power domains
+ * are arranged so that when the PSCI generic code creates the power
+ * domain tree, the indices of the CPU power domain nodes it allocates
+ * match the linear indices returned by plat_core_pos_by_mpidr()
+ * i.e. CLUSTER0 CPUs are allocated indices from 0 to 1 and the higher
+ * indices for other Cluster CPUs.
+ */
+const unsigned char sr_power_domain_tree_desc[] = {
+ /* No of root nodes */
+ SR_PWR_DOMAINS_AT_MAX_PWR_LVL,
+ /* No of children for the root node */
+ BRCM_CLUSTER_COUNT,
+ /* No of children for the first cluster node */
+ PLATFORM_CLUSTER0_CORE_COUNT,
+ /* No of children for the second cluster node */
+ PLATFORM_CLUSTER1_CORE_COUNT,
+ /* No of children for the third cluster node */
+ PLATFORM_CLUSTER2_CORE_COUNT,
+ /* No of children for the fourth cluster node */
+ PLATFORM_CLUSTER3_CORE_COUNT,
+};
+
+/*******************************************************************************
+ * This function returns the Stingray topology tree information.
+ ******************************************************************************/
+const unsigned char *plat_get_power_domain_tree_desc(void)
+{
+ return sr_power_domain_tree_desc;
+}
+
+int plat_core_pos_by_mpidr(u_register_t mpidr)
+{
+ return plat_brcm_calc_core_pos(mpidr);
+}
diff --git a/plat/brcm/board/stingray/src/tz_sec.c b/plat/brcm/board/stingray/src/tz_sec.c
new file mode 100644
index 0000000..07b12a7
--- /dev/null
+++ b/plat/brcm/board/stingray/src/tz_sec.c
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2016 - 2020, Broadcom
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <common/debug.h>
+#include <drivers/arm/tzc400.h>
+#include <lib/mmio.h>
+
+#include <cmn_sec.h>
+#include <platform_def.h>
+
+/*
+ * Trust Zone controllers
+ */
+#define TZC400_FS_SRAM_ROOT 0x66d84000
+
+/*
+ * TZPC Master configure registers
+ */
+
+/* TZPC_TZPCDECPROT0set */
+#define TZPC0_MASTER_NS_BASE 0x68b40804
+#define TZPC0_SATA3_BIT 5
+#define TZPC0_SATA2_BIT 4
+#define TZPC0_SATA1_BIT 3
+#define TZPC0_SATA0_BIT 2
+#define TZPC0_USB3H1_BIT 1
+#define TZPC0_USB3H0_BIT 0
+#define TZPC0_MASTER_SEC_DEFAULT 0
+
+/* TZPC_TZPCDECPROT1set */
+#define TZPC1_MASTER_NS_BASE 0x68b40810
+#define TZPC1_SDIO1_BIT 6
+#define TZPC1_SDIO0_BIT 5
+#define TZPC1_AUDIO0_BIT 4
+#define TZPC1_USB2D_BIT 3
+#define TZPC1_USB2H1_BIT 2
+#define TZPC1_USB2H0_BIT 1
+#define TZPC1_AMAC0_BIT 0
+#define TZPC1_MASTER_SEC_DEFAULT 0
+
+
+struct tz_sec_desc {
+ uintptr_t addr;
+ uint32_t val;
+};
+
+static const struct tz_sec_desc tz_master_defaults[] = {
+{ TZPC0_MASTER_NS_BASE, TZPC0_MASTER_SEC_DEFAULT },
+{ TZPC1_MASTER_NS_BASE, TZPC1_MASTER_SEC_DEFAULT }
+};
+
+/*
+ * Initialize the TrustZone Controller for SRAM partitioning.
+ */
+static void bcm_tzc_setup(void)
+{
+ VERBOSE("Configuring SRAM TrustZone Controller\n");
+
+ /* Init the TZASC controller */
+ tzc400_init(TZC400_FS_SRAM_ROOT);
+
+ /*
+ * Close the entire SRAM space
+ * Region 0 covers the entire SRAM space
+ * None of the NS device can access it.
+ */
+ tzc400_configure_region0(TZC_REGION_S_RDWR, 0);
+
+ /* Do raise an exception if a NS device tries to access secure memory */
+ tzc400_set_action(TZC_ACTION_ERR);
+}
+
+/*
+ * Configure TZ Master as NS_MASTER or SECURE_MASTER
+ * To set a Master to non-secure, use *_SET registers
+ * To set a Master to secure, use *_CLR registers (set + 0x4 address)
+ */
+static void tz_master_set(uint32_t base, uint32_t value, uint32_t ns)
+{
+ if (ns == SECURE_MASTER) {
+ mmio_write_32(base + 4, value);
+ } else {
+ mmio_write_32(base, value);
+ }
+}
+
+/*
+ * Initialize the secure environment for sdio.
+ */
+void plat_tz_sdio_ns_master_set(uint32_t ns)
+{
+ tz_master_set(TZPC1_MASTER_NS_BASE,
+ 1 << TZPC1_SDIO0_BIT,
+ ns);
+}
+
+/*
+ * Initialize the secure environment for usb.
+ */
+void plat_tz_usb_ns_master_set(uint32_t ns)
+{
+ tz_master_set(TZPC1_MASTER_NS_BASE,
+ 1 << TZPC1_USB2H0_BIT,
+ ns);
+}
+
+/*
+ * Set masters to default configuration.
+ *
+ * DMA security settings are programmed into the PL-330 controller and
+ * are not set by iProc TZPC registers.
+ * DMA always comes up as secure master (*NS bit is 0).
+ *
+ * Because the default reset values of TZPC are 0 (== Secure),
+ * ARM Verilog code makes all masters, including PCIe, come up as
+ * secure.
+ * However, SOTP has a bit called SOTP_ALLMASTER_NS that overrides
+ * TZPC and makes all masters non-secure for AB devices.
+ *
+ * Hence we first set all the TZPC bits to program all masters,
+ * including PCIe, as non-secure, then set the CLEAR_ALLMASTER_NS bit
+ * so that the SOTP_ALLMASTER_NS cannot override TZPC.
+ * now security settings for each masters come from TZPC
+ * (which makes all masters other than DMA as non-secure).
+ *
+ * During the boot, all masters other than DMA Ctrlr + list
+ * are non-secure in an AB Prod/AB Dev/AB Pending device.
+ *
+ */
+void plat_tz_master_default_cfg(void)
+{
+ int i;
+
+ /* Configure default secure and non-secure TZ Masters */
+ for (i = 0; i < ARRAY_SIZE(tz_master_defaults); i++) {
+ tz_master_set(tz_master_defaults[i].addr,
+ tz_master_defaults[i].val,
+ SECURE_MASTER);
+ tz_master_set(tz_master_defaults[i].addr,
+ ~tz_master_defaults[i].val,
+ NS_MASTER);
+ }
+
+ /* Clear all master NS */
+ mmio_setbits_32(SOTP_CHIP_CTRL,
+ 1 << SOTP_CLEAR_SYSCTRL_ALL_MASTER_NS);
+
+ /* Initialize TZ controller and Set SRAM to secure */
+ bcm_tzc_setup();
+}