summaryrefslogtreecommitdiffstats
path: root/plat/qemu/qemu_sbsa
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 17:43:51 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 17:43:51 +0000
commitbe58c81aff4cd4c0ccf43dbd7998da4a6a08c03b (patch)
tree779c248fb61c83f65d1f0dc867f2053d76b4e03a /plat/qemu/qemu_sbsa
parentInitial commit. (diff)
downloadarm-trusted-firmware-be58c81aff4cd4c0ccf43dbd7998da4a6a08c03b.tar.xz
arm-trusted-firmware-be58c81aff4cd4c0ccf43dbd7998da4a6a08c03b.zip
Adding upstream version 2.10.0+dfsg.upstream/2.10.0+dfsgupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'plat/qemu/qemu_sbsa')
-rw-r--r--plat/qemu/qemu_sbsa/include/platform_def.h380
-rw-r--r--plat/qemu/qemu_sbsa/platform.mk65
-rw-r--r--plat/qemu/qemu_sbsa/sbsa_gic.c67
-rw-r--r--plat/qemu/qemu_sbsa/sbsa_pm.c237
-rw-r--r--plat/qemu/qemu_sbsa/sbsa_private.h17
-rw-r--r--plat/qemu/qemu_sbsa/sbsa_sip_svc.c186
-rw-r--r--plat/qemu/qemu_sbsa/sbsa_topology.c63
7 files changed, 1015 insertions, 0 deletions
diff --git a/plat/qemu/qemu_sbsa/include/platform_def.h b/plat/qemu/qemu_sbsa/include/platform_def.h
new file mode 100644
index 0000000..14030e3
--- /dev/null
+++ b/plat/qemu/qemu_sbsa/include/platform_def.h
@@ -0,0 +1,380 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2019-2020, Linaro Limited and Contributors.
+ * All rights reserved.
+ */
+
+#ifndef PLATFORM_DEF_H
+#define PLATFORM_DEF_H
+
+#include <arch.h>
+#include <plat/common/common_def.h>
+#include <tbbr_img_def.h>
+
+/* Special value used to verify platform parameters from BL2 to BL3-1 */
+#define QEMU_BL31_PLAT_PARAM_VAL 0x0f1e2d3c4b5a6978ULL
+
+#define PLATFORM_STACK_SIZE 0x1000
+
+#define PLATFORM_MAX_CPUS_PER_CLUSTER U(8)
+/*
+ * Define the number of cores per cluster used in calculating core position.
+ * The cluster number is shifted by this value and added to the core ID,
+ * so its value represents log2(cores/cluster).
+ * Default is 2**(3) = 8 cores per cluster.
+ */
+#define PLATFORM_CPU_PER_CLUSTER_SHIFT U(3)
+#define PLATFORM_CLUSTER_COUNT U(64)
+#define PLATFORM_CORE_COUNT (PLATFORM_CLUSTER_COUNT * \
+ PLATFORM_MAX_CPUS_PER_CLUSTER)
+#define QEMU_PRIMARY_CPU U(0)
+
+#define PLAT_NUM_PWR_DOMAINS (PLATFORM_CLUSTER_COUNT + \
+ PLATFORM_CORE_COUNT)
+#define PLAT_MAX_PWR_LVL MPIDR_AFFLVL1
+
+#define PLAT_MAX_RET_STATE 1
+#define PLAT_MAX_OFF_STATE 2
+
+/* Local power state for power domains in Run state. */
+#define PLAT_LOCAL_STATE_RUN 0
+/* Local power state for retention. Valid only for CPU power domains */
+#define PLAT_LOCAL_STATE_RET 1
+/*
+ * Local power state for OFF/power-down. Valid for CPU and cluster power
+ * domains.
+ */
+#define PLAT_LOCAL_STATE_OFF 2
+
+/*
+ * Macros used to parse state information from State-ID if it is using the
+ * recommended encoding for State-ID.
+ */
+#define PLAT_LOCAL_PSTATE_WIDTH 4
+#define PLAT_LOCAL_PSTATE_MASK ((1 << PLAT_LOCAL_PSTATE_WIDTH) - 1)
+
+/*
+ * Some data must be aligned on the biggest cache line size in the platform.
+ * This is known only to the platform as it might have a combination of
+ * integrated and external caches.
+ */
+#define CACHE_WRITEBACK_SHIFT 6
+#define CACHE_WRITEBACK_GRANULE (1 << CACHE_WRITEBACK_SHIFT)
+
+/*
+ * Partition memory into secure ROM, non-secure DRAM, secure "SRAM",
+ * and secure DRAM.
+ */
+#define SEC_ROM_BASE 0x00000000
+#define SEC_ROM_SIZE 0x00020000
+
+#define NS_DRAM0_BASE 0x10000000000ULL
+#define NS_DRAM0_SIZE 0x00020000000
+
+#define SEC_SRAM_BASE 0x20000000
+#define SEC_SRAM_SIZE 0x20000000
+
+/*
+ * RAD just placeholders, need to be chosen after finalizing mem map
+ */
+#define SEC_DRAM_BASE 0x1000
+#define SEC_DRAM_SIZE 0x1000
+
+/* Load pageable part of OP-TEE 2MB above secure DRAM base */
+#define QEMU_OPTEE_PAGEABLE_LOAD_BASE (SEC_DRAM_BASE + 0x00200000)
+#define QEMU_OPTEE_PAGEABLE_LOAD_SIZE 0x00400000
+
+/*
+ * ARM-TF lives in SRAM, partition it here
+ */
+
+#define SHARED_RAM_BASE SEC_SRAM_BASE
+#define SHARED_RAM_SIZE 0x00002000
+
+#define PLAT_QEMU_TRUSTED_MAILBOX_BASE SHARED_RAM_BASE
+#define PLAT_QEMU_TRUSTED_MAILBOX_SIZE (8 + PLAT_QEMU_HOLD_SIZE)
+#define PLAT_QEMU_HOLD_BASE (PLAT_QEMU_TRUSTED_MAILBOX_BASE + 8)
+#define PLAT_QEMU_HOLD_SIZE (PLATFORM_CORE_COUNT * \
+ PLAT_QEMU_HOLD_ENTRY_SIZE)
+#define PLAT_QEMU_HOLD_ENTRY_SHIFT 3
+#define PLAT_QEMU_HOLD_ENTRY_SIZE (1 << PLAT_QEMU_HOLD_ENTRY_SHIFT)
+#define PLAT_QEMU_HOLD_STATE_WAIT 0
+#define PLAT_QEMU_HOLD_STATE_GO 1
+
+#define BL_RAM_BASE (SHARED_RAM_BASE + SHARED_RAM_SIZE)
+#define BL_RAM_SIZE (SEC_SRAM_SIZE - SHARED_RAM_SIZE)
+
+/*
+ * BL1 specific defines.
+ *
+ * BL1 RW data is relocated from ROM to RAM at runtime so we need 2 sets of
+ * addresses.
+ * Put BL1 RW at the top of the Secure SRAM. BL1_RW_BASE is calculated using
+ * the current BL1 RW debug size plus a little space for growth.
+ */
+#define BL1_SIZE 0x12000
+#define BL1_RO_BASE SEC_ROM_BASE
+#define BL1_RO_LIMIT (SEC_ROM_BASE + SEC_ROM_SIZE)
+#define BL1_RW_BASE (BL1_RW_LIMIT - BL1_SIZE)
+#define BL1_RW_LIMIT (BL_RAM_BASE + BL_RAM_SIZE)
+
+/*
+ * BL2 specific defines.
+ *
+ * Put BL2 just below BL3-1. BL2_BASE is calculated using the current BL2 debug
+ * size plus a little space for growth.
+ */
+#define BL2_SIZE 0x1D000
+#define BL2_BASE (BL31_BASE - BL2_SIZE)
+#define BL2_LIMIT BL31_BASE
+
+/*
+ * BL3-1 specific defines.
+ *
+ * Put BL3-1 at the top of the Trusted SRAM. BL31_BASE is calculated using the
+ * current BL3-1 debug size plus a little space for growth.
+ */
+#define BL31_SIZE 0x300000
+#define BL31_BASE (BL31_LIMIT - BL31_SIZE)
+#define BL31_LIMIT (BL1_RW_BASE)
+#define BL31_PROGBITS_LIMIT BL1_RW_BASE
+
+
+/*
+ * BL3-2 specific defines.
+ *
+ * BL3-2 can execute from Secure SRAM, or Secure DRAM.
+ */
+#define BL32_SRAM_BASE BL_RAM_BASE
+#define BL32_SRAM_LIMIT BL2_BASE
+
+#define BL32_MEM_BASE BL_RAM_BASE
+#define BL32_MEM_SIZE (BL_RAM_SIZE - BL1_SIZE - \
+ BL2_SIZE - BL31_SIZE)
+#define BL32_BASE BL32_SRAM_BASE
+#define BL32_LIMIT BL32_SRAM_LIMIT
+
+#define NS_IMAGE_OFFSET (NS_DRAM0_BASE + 0x20000000)
+#define NS_IMAGE_MAX_SIZE (NS_DRAM0_SIZE - 0x20000000)
+
+#define PLAT_PHY_ADDR_SPACE_SIZE (1ull << 42)
+#define PLAT_VIRT_ADDR_SPACE_SIZE (1ull << 42)
+#if SPM_MM
+#define MAX_MMAP_REGIONS 12
+#define MAX_XLAT_TABLES 12
+#else
+#define MAX_MMAP_REGIONS 11
+#define MAX_XLAT_TABLES 11
+#endif
+#define MAX_IO_DEVICES 3
+#define MAX_IO_HANDLES 4
+
+#if SPM_MM && defined(IMAGE_BL31)
+# define PLAT_SP_IMAGE_MMAP_REGIONS 30
+# define PLAT_SP_IMAGE_MAX_XLAT_TABLES 50
+#endif
+
+/*
+ * PL011 related constants
+ */
+#define UART0_BASE 0x60000000
+#define UART1_BASE 0x60030000
+#define UART0_CLK_IN_HZ 1
+#define UART1_CLK_IN_HZ 1
+
+/* Secure UART */
+#define UART2_BASE 0x60040000
+#define UART2_CLK_IN_HZ 1
+
+#define PLAT_QEMU_BOOT_UART_BASE UART0_BASE
+#define PLAT_QEMU_BOOT_UART_CLK_IN_HZ UART0_CLK_IN_HZ
+
+#define PLAT_QEMU_CRASH_UART_BASE UART1_BASE
+#define PLAT_QEMU_CRASH_UART_CLK_IN_HZ UART1_CLK_IN_HZ
+
+#define PLAT_QEMU_CONSOLE_BAUDRATE 115200
+
+#define QEMU_FLASH0_BASE 0x00000000
+#define QEMU_FLASH0_SIZE 0x10000000
+#define QEMU_FLASH1_BASE 0x10000000
+#define QEMU_FLASH1_SIZE 0x10000000
+
+#define PLAT_QEMU_FIP_BASE BL1_SIZE
+#define PLAT_QEMU_FIP_MAX_SIZE 0x00400000
+
+/* This is map from GIC_DIST up to last CPU (255) GIC_REDISTR */
+#define DEVICE0_BASE 0x40000000
+#define DEVICE0_SIZE 0x04080000
+/* This is map from NORMAL_UART up to SECURE_UART_MM */
+#define DEVICE1_BASE 0x60000000
+#define DEVICE1_SIZE 0x10041000
+/* This is a map for SECURE_EC */
+#define DEVICE2_BASE 0x50000000
+#define DEVICE2_SIZE 0x00001000
+
+/*
+ * GIC related constants
+ * We use GICv3 where CPU Interface registers are not memory mapped
+ *
+ * Legacy values - on platform version 0.1+ they are read from DT
+ */
+#define GICD_BASE 0x40060000
+#define GICR_BASE 0x40080000
+#define GICC_BASE 0x0
+
+#define QEMU_IRQ_SEC_SGI_0 8
+#define QEMU_IRQ_SEC_SGI_1 9
+#define QEMU_IRQ_SEC_SGI_2 10
+#define QEMU_IRQ_SEC_SGI_3 11
+#define QEMU_IRQ_SEC_SGI_4 12
+#define QEMU_IRQ_SEC_SGI_5 13
+#define QEMU_IRQ_SEC_SGI_6 14
+#define QEMU_IRQ_SEC_SGI_7 15
+
+/******************************************************************************
+ * On a GICv2 system, the Group 1 secure interrupts are treated as Group 0
+ * interrupts.
+ *****************************************************************************/
+#define PLATFORM_G1S_PROPS(grp) \
+ INTR_PROP_DESC(QEMU_IRQ_SEC_SGI_0, GIC_HIGHEST_SEC_PRIORITY, \
+ grp, GIC_INTR_CFG_EDGE), \
+ INTR_PROP_DESC(QEMU_IRQ_SEC_SGI_1, GIC_HIGHEST_SEC_PRIORITY, \
+ grp, GIC_INTR_CFG_EDGE), \
+ INTR_PROP_DESC(QEMU_IRQ_SEC_SGI_2, GIC_HIGHEST_SEC_PRIORITY, \
+ grp, GIC_INTR_CFG_EDGE), \
+ INTR_PROP_DESC(QEMU_IRQ_SEC_SGI_3, GIC_HIGHEST_SEC_PRIORITY, \
+ grp, GIC_INTR_CFG_EDGE), \
+ INTR_PROP_DESC(QEMU_IRQ_SEC_SGI_4, GIC_HIGHEST_SEC_PRIORITY, \
+ grp, GIC_INTR_CFG_EDGE), \
+ INTR_PROP_DESC(QEMU_IRQ_SEC_SGI_5, GIC_HIGHEST_SEC_PRIORITY, \
+ grp, GIC_INTR_CFG_EDGE), \
+ INTR_PROP_DESC(QEMU_IRQ_SEC_SGI_6, GIC_HIGHEST_SEC_PRIORITY, \
+ grp, GIC_INTR_CFG_EDGE), \
+ INTR_PROP_DESC(QEMU_IRQ_SEC_SGI_7, GIC_HIGHEST_SEC_PRIORITY, \
+ grp, GIC_INTR_CFG_EDGE)
+
+#define PLATFORM_G0_PROPS(grp)
+
+/*
+ * DT related constants
+ */
+#define PLAT_QEMU_DT_BASE NS_DRAM0_BASE
+#define PLAT_QEMU_DT_MAX_SIZE 0x100000
+
+/*
+ * System counter
+ */
+#define SYS_COUNTER_FREQ_IN_TICKS ((1000 * 1000 * 1000) / 16)
+
+#if SPM_MM
+#define PLAT_QEMU_SP_IMAGE_BASE BL_RAM_BASE
+#define PLAT_QEMU_SP_IMAGE_SIZE ULL(0x300000)
+
+#ifdef IMAGE_BL2
+/* In BL2 all memory allocated to the SPM Payload image is marked as RW. */
+# define QEMU_SP_IMAGE_MMAP MAP_REGION_FLAT( \
+ PLAT_QEMU_SP_IMAGE_BASE, \
+ PLAT_QEMU_SP_IMAGE_SIZE, \
+ MT_MEMORY | MT_RW | \
+ MT_SECURE)
+#elif IMAGE_BL31
+/* All SPM Payload memory is marked as code in S-EL0 */
+# define QEMU_SP_IMAGE_MMAP MAP_REGION2(PLAT_QEMU_SP_IMAGE_BASE, \
+ PLAT_QEMU_SP_IMAGE_BASE, \
+ PLAT_QEMU_SP_IMAGE_SIZE, \
+ MT_CODE | MT_SECURE | \
+ MT_USER, \
+ PAGE_SIZE)
+#endif
+
+/*
+ * EL3 -> S-EL0 secure shared memory
+ */
+#define PLAT_SPM_BUF_PCPU_SIZE ULL(0x10000)
+#define PLAT_SPM_BUF_SIZE (PLATFORM_CORE_COUNT * \
+ PLAT_SPM_BUF_PCPU_SIZE)
+#define PLAT_SPM_BUF_BASE (BL32_LIMIT - PLAT_SPM_BUF_SIZE)
+
+#define QEMU_SPM_BUF_EL3_MMAP MAP_REGION_FLAT(PLAT_SPM_BUF_BASE, \
+ PLAT_SPM_BUF_SIZE, \
+ MT_RW_DATA | MT_SECURE)
+
+#define QEMU_SPM_BUF_EL0_MMAP MAP_REGION2(PLAT_SPM_BUF_BASE, \
+ PLAT_SPM_BUF_BASE, \
+ PLAT_SPM_BUF_SIZE, \
+ MT_RO_DATA | MT_SECURE | \
+ MT_USER, \
+ PAGE_SIZE)
+
+/*
+ * Shared memory between Normal world and S-EL0 for
+ * passing data during service requests. It will be marked as RW and NS.
+ * This buffer is allocated at the top of NS_DRAM, the base address is
+ * overridden in SPM initialization.
+ */
+#define PLAT_QEMU_SP_IMAGE_NS_BUF_BASE (PLAT_QEMU_DT_BASE + \
+ PLAT_QEMU_DT_MAX_SIZE)
+#define PLAT_QEMU_SP_IMAGE_NS_BUF_SIZE ULL(0x200000)
+
+#define QEMU_SP_IMAGE_NS_BUF_MMAP MAP_REGION2( \
+ PLAT_QEMU_SP_IMAGE_NS_BUF_BASE, \
+ PLAT_QEMU_SP_IMAGE_NS_BUF_BASE, \
+ PLAT_QEMU_SP_IMAGE_NS_BUF_SIZE, \
+ MT_RW_DATA | MT_NS | \
+ MT_USER, \
+ PAGE_SIZE)
+
+#define PLAT_SP_IMAGE_NS_BUF_BASE PLAT_QEMU_SP_IMAGE_NS_BUF_BASE
+#define PLAT_SP_IMAGE_NS_BUF_SIZE PLAT_QEMU_SP_IMAGE_NS_BUF_SIZE
+
+#define PLAT_QEMU_SP_IMAGE_HEAP_BASE (PLAT_QEMU_SP_IMAGE_BASE + \
+ PLAT_QEMU_SP_IMAGE_SIZE)
+#define PLAT_QEMU_SP_IMAGE_HEAP_SIZE ULL(0x800000)
+
+#define PLAT_SP_IMAGE_STACK_BASE (PLAT_QEMU_SP_IMAGE_HEAP_BASE + \
+ PLAT_QEMU_SP_IMAGE_HEAP_SIZE)
+#define PLAT_SP_IMAGE_STACK_PCPU_SIZE ULL(0x10000)
+#define QEMU_SP_IMAGE_STACK_TOTAL_SIZE (PLATFORM_CORE_COUNT * \
+ PLAT_SP_IMAGE_STACK_PCPU_SIZE)
+
+#define QEMU_SP_IMAGE_RW_MMAP MAP_REGION2( \
+ PLAT_QEMU_SP_IMAGE_HEAP_BASE, \
+ PLAT_QEMU_SP_IMAGE_HEAP_BASE, \
+ (QEMU_SP_IMAGE_STACK_TOTAL_SIZE + \
+ PLAT_QEMU_SP_IMAGE_HEAP_SIZE), \
+ MT_RW_DATA | MT_SECURE | \
+ MT_USER, \
+ PAGE_SIZE)
+
+/*
+ * Secure variable storage is located at Secure Flash.
+ */
+#if SPM_MM
+#define QEMU_SECURE_VARSTORE_BASE 0x01000000
+#define QEMU_SECURE_VARSTORE_SIZE 0x00100000
+#define MAP_SECURE_VARSTORE MAP_REGION_FLAT( \
+ QEMU_SECURE_VARSTORE_BASE, \
+ QEMU_SECURE_VARSTORE_SIZE, \
+ MT_DEVICE | MT_RW | \
+ MT_SECURE | MT_USER)
+#endif
+
+/* Total number of memory regions with distinct properties */
+#define PLAT_QEMU_SP_IMAGE_NUM_MEM_REGIONS 6
+
+/*
+ * Name of the section to put the translation tables used by the S-EL1/S-EL0
+ * context of a Secure Partition.
+ */
+#define PLAT_SP_IMAGE_XLAT_SECTION_NAME ".qemu_sp_xlat_table"
+#define PLAT_SP_IMAGE_BASE_XLAT_SECTION_NAME ".qemu_sp_xlat_table"
+
+/* Cookies passed to the Secure Partition at boot. Not used by QEMU platforms.*/
+#define PLAT_SPM_COOKIE_0 ULL(0)
+#define PLAT_SPM_COOKIE_1 ULL(0)
+#endif
+
+#define QEMU_PRI_BITS 2
+#define PLAT_SP_PRI 0x20
+
+#endif /* PLATFORM_DEF_H */
diff --git a/plat/qemu/qemu_sbsa/platform.mk b/plat/qemu/qemu_sbsa/platform.mk
new file mode 100644
index 0000000..528e093
--- /dev/null
+++ b/plat/qemu/qemu_sbsa/platform.mk
@@ -0,0 +1,65 @@
+#
+# Copyright (c) 2019-2023, Linaro Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+PLAT_QEMU_PATH := plat/qemu/qemu_sbsa
+PLAT_QEMU_COMMON_PATH := plat/qemu/common
+
+CRASH_REPORTING := 1
+
+# Disable the PSCI platform compatibility layer
+ENABLE_PLAT_COMPAT := 0
+
+SEPARATE_CODE_AND_RODATA := 1
+ENABLE_STACK_PROTECTOR := 0
+
+ifeq (${SPM_MM},1)
+NEED_BL32 := yes
+EL3_EXCEPTION_HANDLING := 1
+endif
+
+include plat/qemu/common/common.mk
+
+# Enable new version of image loading on QEMU platforms
+LOAD_IMAGE_V2 := 1
+
+ifeq ($(NEED_BL32),yes)
+$(eval $(call add_define,QEMU_LOAD_BL32))
+endif
+
+BL2_SOURCES += $(LIBFDT_SRCS)
+
+# Include GICv3 driver files
+include drivers/arm/gic/v3/gicv3.mk
+
+QEMU_GIC_SOURCES := ${GICV3_SOURCES} \
+ plat/common/plat_gicv3.c
+
+BL31_SOURCES += ${PLAT_QEMU_PATH}/sbsa_gic.c \
+ ${PLAT_QEMU_PATH}/sbsa_pm.c \
+ ${PLAT_QEMU_PATH}/sbsa_sip_svc.c \
+ ${PLAT_QEMU_PATH}/sbsa_topology.c
+
+BL31_SOURCES += ${FDT_WRAPPERS_SOURCES}
+
+ifeq (${SPM_MM},1)
+ BL31_SOURCES += ${PLAT_QEMU_COMMON_PATH}/qemu_spm.c
+endif
+
+# Use known base for UEFI if not given from command line
+# By default BL33 is at FLASH1 base
+PRELOADED_BL33_BASE ?= 0x10000000
+
+# Qemu SBSA plafrom only support SEC_SRAM
+BL32_RAM_LOCATION_ID = SEC_SRAM_ID
+$(eval $(call add_define,BL32_RAM_LOCATION_ID))
+
+# Don't have the Linux kernel as a BL33 image by default
+ARM_LINUX_KERNEL_AS_BL33 := 0
+$(eval $(call assert_boolean,ARM_LINUX_KERNEL_AS_BL33))
+$(eval $(call add_define,ARM_LINUX_KERNEL_AS_BL33))
+
+ARM_PRELOADED_DTB_BASE := PLAT_QEMU_DT_BASE
+$(eval $(call add_define,ARM_PRELOADED_DTB_BASE))
diff --git a/plat/qemu/qemu_sbsa/sbsa_gic.c b/plat/qemu/qemu_sbsa/sbsa_gic.c
new file mode 100644
index 0000000..962dbb3
--- /dev/null
+++ b/plat/qemu/qemu_sbsa/sbsa_gic.c
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2023, Linaro Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <drivers/arm/gicv3.h>
+#include <plat/common/platform.h>
+
+static const interrupt_prop_t qemu_interrupt_props[] = {
+ PLATFORM_G1S_PROPS(INTR_GROUP1S),
+ PLATFORM_G0_PROPS(INTR_GROUP0)
+};
+
+static uintptr_t qemu_rdistif_base_addrs[PLATFORM_CORE_COUNT];
+
+static unsigned int qemu_mpidr_to_core_pos(unsigned long mpidr)
+{
+ return plat_core_pos_by_mpidr(mpidr);
+}
+
+static gicv3_driver_data_t sbsa_gic_driver_data = {
+ /* we set those two values for compatibility with older QEMU */
+ .gicd_base = GICD_BASE,
+ .gicr_base = GICR_BASE,
+ .interrupt_props = qemu_interrupt_props,
+ .interrupt_props_num = ARRAY_SIZE(qemu_interrupt_props),
+ .rdistif_num = PLATFORM_CORE_COUNT,
+ .rdistif_base_addrs = qemu_rdistif_base_addrs,
+ .mpidr_to_core_pos = qemu_mpidr_to_core_pos
+};
+
+void sbsa_set_gic_bases(const uintptr_t gicd_base, const uintptr_t gicr_base)
+{
+ sbsa_gic_driver_data.gicd_base = gicd_base;
+ sbsa_gic_driver_data.gicr_base = gicr_base;
+}
+
+uintptr_t sbsa_get_gicd(void)
+{
+ return sbsa_gic_driver_data.gicd_base;
+}
+
+uintptr_t sbsa_get_gicr(void)
+{
+ return sbsa_gic_driver_data.gicr_base;
+}
+
+void plat_qemu_gic_init(void)
+{
+ gicv3_driver_init(&sbsa_gic_driver_data);
+ gicv3_distif_init();
+ gicv3_rdistif_init(plat_my_core_pos());
+ gicv3_cpuif_enable(plat_my_core_pos());
+}
+
+void qemu_pwr_gic_on_finish(void)
+{
+ gicv3_rdistif_init(plat_my_core_pos());
+ gicv3_cpuif_enable(plat_my_core_pos());
+}
+
+void qemu_pwr_gic_off(void)
+{
+ gicv3_cpuif_disable(plat_my_core_pos());
+ gicv3_rdistif_off(plat_my_core_pos());
+}
diff --git a/plat/qemu/qemu_sbsa/sbsa_pm.c b/plat/qemu/qemu_sbsa/sbsa_pm.c
new file mode 100644
index 0000000..8d1e1d4
--- /dev/null
+++ b/plat/qemu/qemu_sbsa/sbsa_pm.c
@@ -0,0 +1,237 @@
+/*
+ * Copyright (c) 2020, Nuvia Inc
+ * Copyright (c) 2015-2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <lib/mmio.h>
+#include <lib/psci/psci.h>
+#include <plat/common/platform.h>
+
+#include <platform_def.h>
+#include "sbsa_private.h"
+
+#define ADP_STOPPED_APPLICATION_EXIT 0x20026
+
+/*
+ * Define offset and commands for the fake EC device
+ */
+#define SBSA_SECURE_EC_OFFSET 0x50000000
+
+#define SBSA_SECURE_EC_CMD_SHUTDOWN 0x01
+#define SBSA_SECURE_EC_CMD_REBOOT 0x02
+
+/*
+ * The secure entry point to be used on warm reset.
+ */
+static unsigned long secure_entrypoint;
+
+/* Make composite power state parameter till power level 0 */
+#if PSCI_EXTENDED_STATE_ID
+
+#define qemu_make_pwrstate_lvl0(lvl0_state, pwr_lvl, type) \
+ (((lvl0_state) << PSTATE_ID_SHIFT) | \
+ ((type) << PSTATE_TYPE_SHIFT))
+#else
+#define qemu_make_pwrstate_lvl0(lvl0_state, pwr_lvl, type) \
+ (((lvl0_state) << PSTATE_ID_SHIFT) | \
+ ((pwr_lvl) << PSTATE_PWR_LVL_SHIFT) | \
+ ((type) << PSTATE_TYPE_SHIFT))
+#endif /* PSCI_EXTENDED_STATE_ID */
+
+
+#define qemu_make_pwrstate_lvl1(lvl1_state, lvl0_state, pwr_lvl, type) \
+ (((lvl1_state) << PLAT_LOCAL_PSTATE_WIDTH) | \
+ qemu_make_pwrstate_lvl0(lvl0_state, pwr_lvl, type))
+
+
+
+/*
+ * The table storing the valid idle power states. Ensure that the
+ * array entries are populated in ascending order of state-id to
+ * enable us to use binary search during power state validation.
+ * The table must be terminated by a NULL entry.
+ */
+static const unsigned int qemu_pm_idle_states[] = {
+ /* State-id - 0x01 */
+ qemu_make_pwrstate_lvl1(PLAT_LOCAL_STATE_RUN, PLAT_LOCAL_STATE_RET,
+ MPIDR_AFFLVL0, PSTATE_TYPE_STANDBY),
+ /* State-id - 0x02 */
+ qemu_make_pwrstate_lvl1(PLAT_LOCAL_STATE_RUN, PLAT_LOCAL_STATE_OFF,
+ MPIDR_AFFLVL0, PSTATE_TYPE_POWERDOWN),
+ /* State-id - 0x22 */
+ qemu_make_pwrstate_lvl1(PLAT_LOCAL_STATE_OFF, PLAT_LOCAL_STATE_OFF,
+ MPIDR_AFFLVL1, PSTATE_TYPE_POWERDOWN),
+ 0
+};
+
+/*******************************************************************************
+ * Platform handler called to check the validity of the power state
+ * parameter. The power state parameter has to be a composite power state.
+ ******************************************************************************/
+static int qemu_validate_power_state(unsigned int power_state,
+ psci_power_state_t *req_state)
+{
+ unsigned int state_id;
+ unsigned int i;
+
+ assert(req_state != NULL);
+
+ /*
+ * Currently we are using a linear search for finding the matching
+ * entry in the idle power state array. This can be made a binary
+ * search if the number of entries justifies the additional complexity.
+ */
+ for (i = 0U; qemu_pm_idle_states[i] != 0U; i++) {
+ if (power_state == qemu_pm_idle_states[i]) {
+ break;
+ }
+ }
+
+ /* Return error if entry not found in the idle state array */
+ if (qemu_pm_idle_states[i] == 0U) {
+ return PSCI_E_INVALID_PARAMS;
+ }
+
+ i = 0U;
+ state_id = psci_get_pstate_id(power_state);
+
+ /* Parse the State ID and populate the state info parameter */
+ while (state_id != 0U) {
+ req_state->pwr_domain_state[i++] = state_id &
+ PLAT_LOCAL_PSTATE_MASK;
+ state_id >>= PLAT_LOCAL_PSTATE_WIDTH;
+ }
+
+ return PSCI_E_SUCCESS;
+}
+
+/*******************************************************************************
+ * Platform handler called when a CPU is about to enter standby.
+ ******************************************************************************/
+static void qemu_cpu_standby(plat_local_state_t cpu_state)
+{
+
+ assert(cpu_state == PLAT_LOCAL_STATE_RET);
+
+ /*
+ * Enter standby state
+ * dsb is good practice before using wfi to enter low power states
+ */
+ dsb();
+ wfi();
+}
+
+/*******************************************************************************
+ * Platform handler called when a power domain is about to be turned on. The
+ * mpidr determines the CPU to be turned on.
+ ******************************************************************************/
+static int qemu_pwr_domain_on(u_register_t mpidr)
+{
+ int pos = plat_core_pos_by_mpidr(mpidr);
+ uint64_t *hold_base = (uint64_t *)PLAT_QEMU_HOLD_BASE;
+
+ if (pos < 0) {
+ return PSCI_E_INVALID_PARAMS;
+ }
+
+ hold_base[pos] = PLAT_QEMU_HOLD_STATE_GO;
+ dsb();
+ sev();
+
+ return PSCI_E_SUCCESS;
+}
+
+/*******************************************************************************
+ * Platform handler called when a power domain is about to be turned off. The
+ * target_state encodes the power state that each level should transition to.
+ ******************************************************************************/
+static void qemu_pwr_domain_off(const psci_power_state_t *target_state)
+{
+ qemu_pwr_gic_off();
+}
+
+void __dead2 plat_secondary_cold_boot_setup(void);
+
+static void __dead2
+qemu_pwr_domain_pwr_down_wfi(const psci_power_state_t *target_state)
+{
+ disable_mmu_el3();
+ plat_secondary_cold_boot_setup();
+}
+
+/*******************************************************************************
+ * Platform handler called when a power domain is about to be suspended. The
+ * target_state encodes the power state that each level should transition to.
+ ******************************************************************************/
+void qemu_pwr_domain_suspend(const psci_power_state_t *target_state)
+{
+ assert(false);
+}
+
+/*******************************************************************************
+ * Platform handler called when a power domain has just been powered on after
+ * being turned off earlier. The target_state encodes the low power state that
+ * each level has woken up from.
+ ******************************************************************************/
+void qemu_pwr_domain_on_finish(const psci_power_state_t *target_state)
+{
+ assert(target_state->pwr_domain_state[MPIDR_AFFLVL0] ==
+ PLAT_LOCAL_STATE_OFF);
+
+ qemu_pwr_gic_on_finish();
+}
+
+/*******************************************************************************
+ * Platform handler called when a power domain has just been powered on after
+ * having been suspended earlier. The target_state encodes the low power state
+ * that each level has woken up from.
+ ******************************************************************************/
+void qemu_pwr_domain_suspend_finish(const psci_power_state_t *target_state)
+{
+ assert(false);
+}
+
+/*******************************************************************************
+ * Platform handlers to shutdown/reboot the system
+ ******************************************************************************/
+static void __dead2 qemu_system_off(void)
+{
+ mmio_write_32(SBSA_SECURE_EC_OFFSET, SBSA_SECURE_EC_CMD_SHUTDOWN);
+ panic();
+}
+
+static void __dead2 qemu_system_reset(void)
+{
+ mmio_write_32(SBSA_SECURE_EC_OFFSET, SBSA_SECURE_EC_CMD_REBOOT);
+ panic();
+}
+
+static const plat_psci_ops_t plat_qemu_psci_pm_ops = {
+ .cpu_standby = qemu_cpu_standby,
+ .pwr_domain_on = qemu_pwr_domain_on,
+ .pwr_domain_off = qemu_pwr_domain_off,
+ .pwr_domain_pwr_down_wfi = qemu_pwr_domain_pwr_down_wfi,
+ .pwr_domain_suspend = qemu_pwr_domain_suspend,
+ .pwr_domain_on_finish = qemu_pwr_domain_on_finish,
+ .pwr_domain_suspend_finish = qemu_pwr_domain_suspend_finish,
+ .system_off = qemu_system_off,
+ .system_reset = qemu_system_reset,
+ .validate_power_state = qemu_validate_power_state
+};
+
+int plat_setup_psci_ops(uintptr_t sec_entrypoint,
+ const plat_psci_ops_t **psci_ops)
+{
+ uintptr_t *mailbox = (uintptr_t *)PLAT_QEMU_TRUSTED_MAILBOX_BASE;
+
+ *mailbox = sec_entrypoint;
+ secure_entrypoint = (unsigned long)sec_entrypoint;
+ *psci_ops = &plat_qemu_psci_pm_ops;
+
+ return 0;
+}
diff --git a/plat/qemu/qemu_sbsa/sbsa_private.h b/plat/qemu/qemu_sbsa/sbsa_private.h
new file mode 100644
index 0000000..a9f4601
--- /dev/null
+++ b/plat/qemu/qemu_sbsa/sbsa_private.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2020, Nuvia Inc
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SBSA_PRIVATE_H
+#define SBSA_PRIVATE_H
+
+#include <stdint.h>
+
+unsigned int plat_qemu_calc_core_pos(u_register_t mpidr);
+
+void qemu_pwr_gic_on_finish(void);
+void qemu_pwr_gic_off(void);
+
+#endif /* SBSA_PRIVATE_H */
diff --git a/plat/qemu/qemu_sbsa/sbsa_sip_svc.c b/plat/qemu/qemu_sbsa/sbsa_sip_svc.c
new file mode 100644
index 0000000..05ebec4
--- /dev/null
+++ b/plat/qemu/qemu_sbsa/sbsa_sip_svc.c
@@ -0,0 +1,186 @@
+/*
+ * Copyright (c) 2023, Linaro Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include <common/fdt_wrappers.h>
+#include <common/runtime_svc.h>
+#include <libfdt.h>
+#include <smccc_helpers.h>
+
+/* default platform version is 0.0 */
+static int platform_version_major;
+static int platform_version_minor;
+
+#define SMC_FASTCALL 0x80000000
+#define SMC64_FUNCTION (SMC_FASTCALL | 0x40000000)
+#define SIP_FUNCTION (SMC64_FUNCTION | 0x02000000)
+#define SIP_FUNCTION_ID(n) (SIP_FUNCTION | (n))
+
+/*
+ * We do not use SMCCC_ARCH_SOC_ID here because qemu_sbsa is virtual platform
+ * which uses SoC present in QEMU. And they can change on their own while we
+ * need version of whole 'virtual hardware platform'.
+ */
+#define SIP_SVC_VERSION SIP_FUNCTION_ID(1)
+#define SIP_SVC_GET_GIC SIP_FUNCTION_ID(100)
+#define SIP_SVC_GET_GIC_ITS SIP_FUNCTION_ID(101)
+
+static uint64_t gic_its_addr;
+
+void sbsa_set_gic_bases(const uintptr_t gicd_base, const uintptr_t gicr_base);
+uintptr_t sbsa_get_gicd(void);
+uintptr_t sbsa_get_gicr(void);
+
+void read_platform_config_from_dt(void *dtb)
+{
+ int node;
+ const fdt64_t *data;
+ int err;
+ uintptr_t gicd_base;
+ uintptr_t gicr_base;
+
+ /*
+ * QEMU gives us this DeviceTree node:
+ *
+ * intc {
+ * reg = < 0x00 0x40060000 0x00 0x10000
+ * 0x00 0x40080000 0x00 0x4000000>;
+ * its {
+ * reg = <0x00 0x44081000 0x00 0x20000>;
+ * };
+ * };
+ */
+ node = fdt_path_offset(dtb, "/intc");
+ if (node < 0) {
+ return;
+ }
+
+ data = fdt_getprop(dtb, node, "reg", NULL);
+ if (data == NULL) {
+ return;
+ }
+
+ err = fdt_get_reg_props_by_index(dtb, node, 0, &gicd_base, NULL);
+ if (err < 0) {
+ ERROR("Failed to read GICD reg property of GIC node\n");
+ return;
+ }
+ INFO("GICD base = 0x%lx\n", gicd_base);
+
+ err = fdt_get_reg_props_by_index(dtb, node, 1, &gicr_base, NULL);
+ if (err < 0) {
+ ERROR("Failed to read GICR reg property of GIC node\n");
+ return;
+ }
+ INFO("GICR base = 0x%lx\n", gicr_base);
+
+ sbsa_set_gic_bases(gicd_base, gicr_base);
+
+ node = fdt_path_offset(dtb, "/intc/its");
+ if (node < 0) {
+ return;
+ }
+
+ err = fdt_get_reg_props_by_index(dtb, node, 0, &gic_its_addr, NULL);
+ if (err < 0) {
+ ERROR("Failed to read GICI reg property of GIC node\n");
+ return;
+ }
+ INFO("GICI base = 0x%lx\n", gic_its_addr);
+}
+
+void read_platform_version(void *dtb)
+{
+ int node;
+
+ node = fdt_path_offset(dtb, "/");
+ if (node >= 0) {
+ platform_version_major = fdt32_ld(fdt_getprop(dtb, node,
+ "machine-version-major", NULL));
+ platform_version_minor = fdt32_ld(fdt_getprop(dtb, node,
+ "machine-version-minor", NULL));
+ }
+}
+
+void sip_svc_init(void)
+{
+ /* Read DeviceTree data before MMU is enabled */
+
+ void *dtb = (void *)(uintptr_t)ARM_PRELOADED_DTB_BASE;
+ int err;
+
+ err = fdt_open_into(dtb, dtb, PLAT_QEMU_DT_MAX_SIZE);
+ if (err < 0) {
+ ERROR("Invalid Device Tree at %p: error %d\n", dtb, err);
+ return;
+ }
+
+ err = fdt_check_header(dtb);
+ if (err < 0) {
+ ERROR("Invalid DTB file passed\n");
+ return;
+ }
+
+ read_platform_version(dtb);
+ INFO("Platform version: %d.%d\n", platform_version_major, platform_version_minor);
+
+ read_platform_config_from_dt(dtb);
+}
+
+/*
+ * This function is responsible for handling all SiP calls from the NS world
+ */
+uintptr_t sbsa_sip_smc_handler(uint32_t smc_fid,
+ u_register_t x1,
+ u_register_t x2,
+ u_register_t x3,
+ u_register_t x4,
+ void *cookie,
+ void *handle,
+ u_register_t flags)
+{
+ uint32_t ns;
+
+ /* Determine which security state this SMC originated from */
+ ns = is_caller_non_secure(flags);
+ if (!ns) {
+ ERROR("%s: wrong world SMC (0x%x)\n", __func__, smc_fid);
+ SMC_RET1(handle, SMC_UNK);
+ }
+
+ switch (smc_fid) {
+ case SIP_SVC_VERSION:
+ INFO("Platform version requested\n");
+ SMC_RET3(handle, NULL, platform_version_major, platform_version_minor);
+
+ case SIP_SVC_GET_GIC:
+ SMC_RET3(handle, NULL, sbsa_get_gicd(), sbsa_get_gicr());
+
+ case SIP_SVC_GET_GIC_ITS:
+ SMC_RET2(handle, NULL, gic_its_addr);
+
+ default:
+ ERROR("%s: unhandled SMC (0x%x) (function id: %d)\n", __func__, smc_fid,
+ smc_fid - SIP_FUNCTION);
+ SMC_RET1(handle, SMC_UNK);
+ }
+}
+
+int sbsa_sip_smc_setup(void)
+{
+ return 0;
+}
+
+/* Define a runtime service descriptor for fast SMC calls */
+DECLARE_RT_SVC(
+ sbsa_sip_svc,
+ OEN_SIP_START,
+ OEN_SIP_END,
+ SMC_TYPE_FAST,
+ sbsa_sip_smc_setup,
+ sbsa_sip_smc_handler
+);
diff --git a/plat/qemu/qemu_sbsa/sbsa_topology.c b/plat/qemu/qemu_sbsa/sbsa_topology.c
new file mode 100644
index 0000000..bd8d16b
--- /dev/null
+++ b/plat/qemu/qemu_sbsa/sbsa_topology.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2020, Nuvia Inc
+ * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <common/debug.h>
+
+#include <platform_def.h>
+#include "sbsa_private.h"
+
+/* The power domain tree descriptor */
+static unsigned char power_domain_tree_desc[PLATFORM_CLUSTER_COUNT + 1];
+
+/*******************************************************************************
+ * This function returns the sbsa-ref default topology tree information.
+ ******************************************************************************/
+const unsigned char *plat_get_power_domain_tree_desc(void)
+{
+ unsigned int i;
+
+ power_domain_tree_desc[0] = PLATFORM_CLUSTER_COUNT;
+
+ for (i = 0U; i < PLATFORM_CLUSTER_COUNT; i++) {
+ power_domain_tree_desc[i + 1] = PLATFORM_MAX_CPUS_PER_CLUSTER;
+ }
+
+ return power_domain_tree_desc;
+}
+
+/*******************************************************************************
+ * This function implements a part of the critical interface between the psci
+ * generic layer and the platform that allows the former to query the platform
+ * to convert an MPIDR to a unique linear index. An error code (-1) is returned
+ * in case the MPIDR is invalid.
+ ******************************************************************************/
+int plat_core_pos_by_mpidr(u_register_t mpidr)
+{
+ unsigned int cluster_id, cpu_id;
+
+ mpidr &= MPIDR_AFFINITY_MASK;
+ if ((mpidr & ~(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK)) != 0U) {
+ ERROR("Invalid MPIDR\n");
+ return -1;
+ }
+
+ cluster_id = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
+ cpu_id = (mpidr >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK;
+
+ if (cluster_id >= PLATFORM_CLUSTER_COUNT) {
+ ERROR("cluster_id >= PLATFORM_CLUSTER_COUNT define\n");
+ return -1;
+ }
+
+ if (cpu_id >= PLATFORM_MAX_CPUS_PER_CLUSTER) {
+ ERROR("cpu_id >= PLATFORM_MAX_CPUS_PER_CLUSTER define\n");
+ return -1;
+ }
+
+ return plat_qemu_calc_core_pos(mpidr);
+}