From dc50eab76b709d68175a358d6e23a5a3890764d3 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sat, 18 May 2024 19:39:57 +0200 Subject: Merging upstream version 6.7.7. Signed-off-by: Daniel Baumann --- drivers/firmware/Kconfig | 41 +- drivers/firmware/Makefile | 4 +- drivers/firmware/arm_ffa/bus.c | 15 +- drivers/firmware/arm_ffa/driver.c | 794 ++++++++- drivers/firmware/arm_scmi/Kconfig | 12 + drivers/firmware/arm_scmi/Makefile | 1 - drivers/firmware/arm_scmi/clock.c | 401 ++++- drivers/firmware/arm_scmi/driver.c | 1 + drivers/firmware/arm_scmi/perf.c | 41 +- drivers/firmware/arm_scmi/powercap.c | 4 +- drivers/firmware/arm_scmi/scmi_pm_domain.c | 153 -- drivers/firmware/arm_scmi/smc.c | 35 +- drivers/firmware/arm_scpi.c | 13 +- drivers/firmware/efi/Kconfig | 6 +- drivers/firmware/efi/arm-runtime.c | 2 +- drivers/firmware/efi/efi-init.c | 33 +- drivers/firmware/efi/efi.c | 13 +- drivers/firmware/efi/libstub/Makefile | 9 +- drivers/firmware/efi/libstub/efi-stub-entry.c | 8 +- drivers/firmware/efi/libstub/efistub.h | 3 +- drivers/firmware/efi/libstub/kaslr.c | 2 +- drivers/firmware/efi/libstub/randomalloc.c | 12 +- drivers/firmware/efi/libstub/x86-stub.c | 75 +- drivers/firmware/efi/libstub/x86-stub.h | 6 +- drivers/firmware/efi/libstub/zboot.c | 2 +- drivers/firmware/efi/riscv-runtime.c | 2 +- drivers/firmware/efi/unaccepted_memory.c | 20 + drivers/firmware/imx/Kconfig | 6 - drivers/firmware/meson/meson_sm.c | 25 +- drivers/firmware/pcdp.c | 135 -- drivers/firmware/pcdp.h | 108 -- drivers/firmware/qcom/Kconfig | 56 + drivers/firmware/qcom/Makefile | 9 + drivers/firmware/qcom/qcom_qseecom.c | 120 ++ drivers/firmware/qcom/qcom_qseecom_uefisecapp.c | 877 ++++++++++ drivers/firmware/qcom/qcom_scm-legacy.c | 246 +++ drivers/firmware/qcom/qcom_scm-smc.c | 225 +++ drivers/firmware/qcom/qcom_scm.c | 1943 +++++++++++++++++++++++ drivers/firmware/qcom/qcom_scm.h | 169 ++ drivers/firmware/qcom_scm-legacy.c | 246 --- drivers/firmware/qcom_scm-smc.c | 225 --- drivers/firmware/qcom_scm.c | 1518 ------------------ drivers/firmware/qcom_scm.h | 167 -- drivers/firmware/qemu_fw_cfg.c | 2 +- drivers/firmware/raspberrypi.c | 1 + drivers/firmware/tegra/bpmp-debugfs.c | 4 +- drivers/firmware/ti_sci.c | 23 +- drivers/firmware/xilinx/zynqmp.c | 2 +- 48 files changed, 5003 insertions(+), 2812 deletions(-) delete mode 100644 drivers/firmware/arm_scmi/scmi_pm_domain.c delete mode 100644 drivers/firmware/pcdp.c delete mode 100644 drivers/firmware/pcdp.h create mode 100644 drivers/firmware/qcom/Kconfig create mode 100644 drivers/firmware/qcom/Makefile create mode 100644 drivers/firmware/qcom/qcom_qseecom.c create mode 100644 drivers/firmware/qcom/qcom_qseecom_uefisecapp.c create mode 100644 drivers/firmware/qcom/qcom_scm-legacy.c create mode 100644 drivers/firmware/qcom/qcom_scm-smc.c create mode 100644 drivers/firmware/qcom/qcom_scm.c create mode 100644 drivers/firmware/qcom/qcom_scm.h delete mode 100644 drivers/firmware/qcom_scm-legacy.c delete mode 100644 drivers/firmware/qcom_scm-smc.c delete mode 100644 drivers/firmware/qcom_scm.c delete mode 100644 drivers/firmware/qcom_scm.h (limited to 'drivers/firmware') diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index b59e3041fd..4a98a859d4 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -77,30 +77,6 @@ config FIRMWARE_MEMMAP See also Documentation/ABI/testing/sysfs-firmware-memmap. -config EFI_PCDP - bool "Console device selection via EFI PCDP or HCDP table" - depends on ACPI && EFI && IA64 - default y if IA64 - help - If your firmware supplies the PCDP table, and you want to - automatically use the primary console device it describes - as the Linux console, say Y here. - - If your firmware supplies the HCDP table, and you want to - use the first serial port it describes as the Linux console, - say Y here. If your EFI ConOut path contains only a UART - device, it will become the console automatically. Otherwise, - you must specify the "console=hcdp" kernel boot argument. - - Neither the PCDP nor the HCDP affects naming of serial devices, - so a serial console may be /dev/ttyS0, /dev/ttyS1, etc, depending - on how the driver discovers devices. - - You must also enable the appropriate drivers (serial, VGA, etc.) - - See DIG64_HCDPv20_042804.pdf available from - - config DMIID bool "Export DMI identification via sysfs to userspace" depends on DMI @@ -155,7 +131,7 @@ config RASPBERRYPI_FIRMWARE config FW_CFG_SYSFS tristate "QEMU fw_cfg device support in sysfs" - depends on SYSFS && (ARM || ARM64 || PARISC || PPC_PMAC || SPARC || X86) + depends on SYSFS && (ARM || ARM64 || PARISC || PPC_PMAC || RISCV || SPARC || X86) depends on HAS_IOPORT_MAP default n help @@ -212,20 +188,6 @@ config MTK_ADSP_IPC ADSP exists on some mtk processors. Client might use shared memory to exchange information with ADSP. -config QCOM_SCM - tristate - -config QCOM_SCM_DOWNLOAD_MODE_DEFAULT - bool "Qualcomm download mode enabled by default" - depends on QCOM_SCM - help - A device with "download mode" enabled will upon an unexpected - warm-restart enter a special debug mode that allows the user to - "download" memory content over USB for offline postmortem analysis. - The feature can be enabled/disabled on the kernel command line. - - Say Y here to enable "download mode" by default. - config SYSFB bool select BOOT_VESA_SUPPORT @@ -311,6 +273,7 @@ source "drivers/firmware/efi/Kconfig" source "drivers/firmware/imx/Kconfig" source "drivers/firmware/meson/Kconfig" source "drivers/firmware/psci/Kconfig" +source "drivers/firmware/qcom/Kconfig" source "drivers/firmware/smccc/Kconfig" source "drivers/firmware/tegra/Kconfig" source "drivers/firmware/xilinx/Kconfig" diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile index 28fcddcd68..5f9dab82e1 100644 --- a/drivers/firmware/Makefile +++ b/drivers/firmware/Makefile @@ -8,7 +8,6 @@ obj-$(CONFIG_ARM_SDE_INTERFACE) += arm_sdei.o obj-$(CONFIG_DMI) += dmi_scan.o obj-$(CONFIG_DMI_SYSFS) += dmi-sysfs.o obj-$(CONFIG_EDD) += edd.o -obj-$(CONFIG_EFI_PCDP) += pcdp.o obj-$(CONFIG_DMIID) += dmi-id.o obj-$(CONFIG_INTEL_STRATIX10_SERVICE) += stratix10-svc.o obj-$(CONFIG_INTEL_STRATIX10_RSU) += stratix10-rsu.o @@ -18,8 +17,6 @@ obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o obj-$(CONFIG_MTK_ADSP_IPC) += mtk-adsp-ipc.o obj-$(CONFIG_RASPBERRYPI_FIRMWARE) += raspberrypi.o obj-$(CONFIG_FW_CFG_SYSFS) += qemu_fw_cfg.o -obj-$(CONFIG_QCOM_SCM) += qcom-scm.o -qcom-scm-objs += qcom_scm.o qcom_scm-smc.o qcom_scm-legacy.o obj-$(CONFIG_SYSFB) += sysfb.o obj-$(CONFIG_SYSFB_SIMPLEFB) += sysfb_simplefb.o obj-$(CONFIG_TI_SCI_PROTOCOL) += ti_sci.o @@ -35,6 +32,7 @@ obj-$(CONFIG_GOOGLE_FIRMWARE) += google/ obj-y += efi/ obj-y += imx/ obj-y += psci/ +obj-y += qcom/ obj-y += smccc/ obj-y += tegra/ obj-y += xilinx/ diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c index 7865438b36..1c7940ba55 100644 --- a/drivers/firmware/arm_ffa/bus.c +++ b/drivers/firmware/arm_ffa/bus.c @@ -15,6 +15,8 @@ #include "common.h" +#define SCMI_UEVENT_MODALIAS_FMT "arm_ffa:%04x:%pUb" + static DEFINE_IDA(ffa_bus_id); static int ffa_device_match(struct device *dev, struct device_driver *drv) @@ -63,10 +65,20 @@ static int ffa_device_uevent(const struct device *dev, struct kobj_uevent_env *e { const struct ffa_device *ffa_dev = to_ffa_dev(dev); - return add_uevent_var(env, "MODALIAS=arm_ffa:%04x:%pUb", + return add_uevent_var(env, "MODALIAS=" SCMI_UEVENT_MODALIAS_FMT, ffa_dev->vm_id, &ffa_dev->uuid); } +static ssize_t modalias_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ffa_device *ffa_dev = to_ffa_dev(dev); + + return sysfs_emit(buf, SCMI_UEVENT_MODALIAS_FMT, ffa_dev->vm_id, + &ffa_dev->uuid); +} +static DEVICE_ATTR_RO(modalias); + static ssize_t partition_id_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -88,6 +100,7 @@ static DEVICE_ATTR_RO(uuid); static struct attribute *ffa_device_attributes_attrs[] = { &dev_attr_partition_id.attr, &dev_attr_uuid.attr, + &dev_attr_modalias.attr, NULL, }; ATTRIBUTE_GROUPS(ffa_device_attributes); diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c index 7cd6b1564e..0ea1dd6e55 100644 --- a/drivers/firmware/arm_ffa/driver.c +++ b/drivers/firmware/arm_ffa/driver.c @@ -22,20 +22,28 @@ #define DRIVER_NAME "ARM FF-A" #define pr_fmt(fmt) DRIVER_NAME ": " fmt +#include #include #include +#include #include +#include +#include #include #include #include #include +#include +#include #include #include +#include #include +#include #include "common.h" -#define FFA_DRIVER_VERSION FFA_VERSION_1_0 +#define FFA_DRIVER_VERSION FFA_VERSION_1_1 #define FFA_MIN_VERSION FFA_VERSION_1_0 #define SENDER_ID_MASK GENMASK(31, 16) @@ -51,6 +59,8 @@ */ #define RXTX_BUFFER_SIZE SZ_4K +#define FFA_MAX_NOTIFICATIONS 64 + static ffa_fn *invoke_ffa_fn; static const int ffa_linux_errmap[] = { @@ -64,6 +74,7 @@ static const int ffa_linux_errmap[] = { -EACCES, /* FFA_RET_DENIED */ -EAGAIN, /* FFA_RET_RETRY */ -ECANCELED, /* FFA_RET_ABORTED */ + -ENODATA, /* FFA_RET_NO_DATA */ }; static inline int ffa_to_linux_errno(int errno) @@ -75,6 +86,10 @@ static inline int ffa_to_linux_errno(int errno) return -EINVAL; } +struct ffa_pcpu_irq { + struct ffa_drv_info *info; +}; + struct ffa_drv_info { u32 version; u16 vm_id; @@ -83,6 +98,18 @@ struct ffa_drv_info { void *rx_buffer; void *tx_buffer; bool mem_ops_native; + bool bitmap_created; + bool notif_enabled; + unsigned int sched_recv_irq; + unsigned int cpuhp_state; + struct ffa_pcpu_irq __percpu *irq_pcpu; + struct workqueue_struct *notif_pcpu_wq; + struct work_struct notif_pcpu_work; + struct work_struct irq_work; + struct xarray partition_info; + unsigned int partition_count; + DECLARE_HASHTABLE(notifier_hash, ilog2(FFA_MAX_NOTIFICATIONS)); + struct mutex notify_lock; /* lock to protect notifier hashtable */ }; static struct ffa_drv_info *drv_info; @@ -397,7 +424,7 @@ static u32 ffa_get_num_pages_sg(struct scatterlist *sg) return num_pages; } -static u8 ffa_memory_attributes_get(u32 func_id) +static u16 ffa_memory_attributes_get(u32 func_id) { /* * For the memory lend or donate operation, if the receiver is a PE or @@ -416,38 +443,47 @@ ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize, { int rc = 0; bool first = true; + u32 composite_offset; phys_addr_t addr = 0; + struct ffa_mem_region *mem_region = buffer; struct ffa_composite_mem_region *composite; struct ffa_mem_region_addr_range *constituents; struct ffa_mem_region_attributes *ep_mem_access; - struct ffa_mem_region *mem_region = buffer; u32 idx, frag_len, length, buf_sz = 0, num_entries = sg_nents(args->sg); mem_region->tag = args->tag; mem_region->flags = args->flags; mem_region->sender_id = drv_info->vm_id; mem_region->attributes = ffa_memory_attributes_get(func_id); - ep_mem_access = &mem_region->ep_mem_access[0]; + ep_mem_access = buffer + + ffa_mem_desc_offset(buffer, 0, drv_info->version); + composite_offset = ffa_mem_desc_offset(buffer, args->nattrs, + drv_info->version); for (idx = 0; idx < args->nattrs; idx++, ep_mem_access++) { ep_mem_access->receiver = args->attrs[idx].receiver; ep_mem_access->attrs = args->attrs[idx].attrs; - ep_mem_access->composite_off = COMPOSITE_OFFSET(args->nattrs); + ep_mem_access->composite_off = composite_offset; ep_mem_access->flag = 0; ep_mem_access->reserved = 0; } mem_region->handle = 0; - mem_region->reserved_0 = 0; - mem_region->reserved_1 = 0; mem_region->ep_count = args->nattrs; + if (drv_info->version <= FFA_VERSION_1_0) { + mem_region->ep_mem_size = 0; + } else { + mem_region->ep_mem_size = sizeof(*ep_mem_access); + mem_region->ep_mem_offset = sizeof(*mem_region); + memset(mem_region->reserved, 0, 12); + } - composite = buffer + COMPOSITE_OFFSET(args->nattrs); + composite = buffer + composite_offset; composite->total_pg_cnt = ffa_get_num_pages_sg(args->sg); composite->addr_range_cnt = num_entries; composite->reserved = 0; - length = COMPOSITE_CONSTITUENTS_OFFSET(args->nattrs, num_entries); - frag_len = COMPOSITE_CONSTITUENTS_OFFSET(args->nattrs, 0); + length = composite_offset + CONSTITUENTS_OFFSET(num_entries); + frag_len = composite_offset + CONSTITUENTS_OFFSET(0); if (frag_len > max_fragsize) return -ENXIO; @@ -554,6 +590,241 @@ static int ffa_features(u32 func_feat_id, u32 input_props, return 0; } +static int ffa_notification_bitmap_create(void) +{ + ffa_value_t ret; + u16 vcpu_count = nr_cpu_ids; + + invoke_ffa_fn((ffa_value_t){ + .a0 = FFA_NOTIFICATION_BITMAP_CREATE, + .a1 = drv_info->vm_id, .a2 = vcpu_count, + }, &ret); + + if (ret.a0 == FFA_ERROR) + return ffa_to_linux_errno((int)ret.a2); + + return 0; +} + +static int ffa_notification_bitmap_destroy(void) +{ + ffa_value_t ret; + + invoke_ffa_fn((ffa_value_t){ + .a0 = FFA_NOTIFICATION_BITMAP_DESTROY, + .a1 = drv_info->vm_id, + }, &ret); + + if (ret.a0 == FFA_ERROR) + return ffa_to_linux_errno((int)ret.a2); + + return 0; +} + +#define NOTIFICATION_LOW_MASK GENMASK(31, 0) +#define NOTIFICATION_HIGH_MASK GENMASK(63, 32) +#define NOTIFICATION_BITMAP_HIGH(x) \ + ((u32)(FIELD_GET(NOTIFICATION_HIGH_MASK, (x)))) +#define NOTIFICATION_BITMAP_LOW(x) \ + ((u32)(FIELD_GET(NOTIFICATION_LOW_MASK, (x)))) +#define PACK_NOTIFICATION_BITMAP(low, high) \ + (FIELD_PREP(NOTIFICATION_LOW_MASK, (low)) | \ + FIELD_PREP(NOTIFICATION_HIGH_MASK, (high))) + +#define RECEIVER_VCPU_MASK GENMASK(31, 16) +#define PACK_NOTIFICATION_GET_RECEIVER_INFO(vcpu_r, r) \ + (FIELD_PREP(RECEIVER_VCPU_MASK, (vcpu_r)) | \ + FIELD_PREP(RECEIVER_ID_MASK, (r))) + +#define NOTIFICATION_INFO_GET_MORE_PEND_MASK BIT(0) +#define NOTIFICATION_INFO_GET_ID_COUNT GENMASK(11, 7) +#define ID_LIST_MASK_64 GENMASK(51, 12) +#define ID_LIST_MASK_32 GENMASK(31, 12) +#define MAX_IDS_64 20 +#define MAX_IDS_32 10 + +#define PER_VCPU_NOTIFICATION_FLAG BIT(0) +#define SECURE_PARTITION_BITMAP BIT(0) +#define NON_SECURE_VM_BITMAP BIT(1) +#define SPM_FRAMEWORK_BITMAP BIT(2) +#define NS_HYP_FRAMEWORK_BITMAP BIT(3) + +static int ffa_notification_bind_common(u16 dst_id, u64 bitmap, + u32 flags, bool is_bind) +{ + ffa_value_t ret; + u32 func, src_dst_ids = PACK_TARGET_INFO(dst_id, drv_info->vm_id); + + func = is_bind ? FFA_NOTIFICATION_BIND : FFA_NOTIFICATION_UNBIND; + + invoke_ffa_fn((ffa_value_t){ + .a0 = func, .a1 = src_dst_ids, .a2 = flags, + .a3 = NOTIFICATION_BITMAP_LOW(bitmap), + .a4 = NOTIFICATION_BITMAP_HIGH(bitmap), + }, &ret); + + if (ret.a0 == FFA_ERROR) + return ffa_to_linux_errno((int)ret.a2); + else if (ret.a0 != FFA_SUCCESS) + return -EINVAL; + + return 0; +} + +static +int ffa_notification_set(u16 src_id, u16 dst_id, u32 flags, u64 bitmap) +{ + ffa_value_t ret; + u32 src_dst_ids = PACK_TARGET_INFO(dst_id, src_id); + + invoke_ffa_fn((ffa_value_t) { + .a0 = FFA_NOTIFICATION_SET, .a1 = src_dst_ids, .a2 = flags, + .a3 = NOTIFICATION_BITMAP_LOW(bitmap), + .a4 = NOTIFICATION_BITMAP_HIGH(bitmap), + }, &ret); + + if (ret.a0 == FFA_ERROR) + return ffa_to_linux_errno((int)ret.a2); + else if (ret.a0 != FFA_SUCCESS) + return -EINVAL; + + return 0; +} + +struct ffa_notify_bitmaps { + u64 sp_map; + u64 vm_map; + u64 arch_map; +}; + +static int ffa_notification_get(u32 flags, struct ffa_notify_bitmaps *notify) +{ + ffa_value_t ret; + u16 src_id = drv_info->vm_id; + u16 cpu_id = smp_processor_id(); + u32 rec_vcpu_ids = PACK_NOTIFICATION_GET_RECEIVER_INFO(cpu_id, src_id); + + invoke_ffa_fn((ffa_value_t){ + .a0 = FFA_NOTIFICATION_GET, .a1 = rec_vcpu_ids, .a2 = flags, + }, &ret); + + if (ret.a0 == FFA_ERROR) + return ffa_to_linux_errno((int)ret.a2); + else if (ret.a0 != FFA_SUCCESS) + return -EINVAL; /* Something else went wrong. */ + + notify->sp_map = PACK_NOTIFICATION_BITMAP(ret.a2, ret.a3); + notify->vm_map = PACK_NOTIFICATION_BITMAP(ret.a4, ret.a5); + notify->arch_map = PACK_NOTIFICATION_BITMAP(ret.a6, ret.a7); + + return 0; +} + +struct ffa_dev_part_info { + ffa_sched_recv_cb callback; + void *cb_data; + rwlock_t rw_lock; +}; + +static void __do_sched_recv_cb(u16 part_id, u16 vcpu, bool is_per_vcpu) +{ + struct ffa_dev_part_info *partition; + ffa_sched_recv_cb callback; + void *cb_data; + + partition = xa_load(&drv_info->partition_info, part_id); + if (!partition) { + pr_err("%s: Invalid partition ID 0x%x\n", __func__, part_id); + return; + } + + read_lock(&partition->rw_lock); + callback = partition->callback; + cb_data = partition->cb_data; + read_unlock(&partition->rw_lock); + + if (callback) + callback(vcpu, is_per_vcpu, cb_data); +} + +static void ffa_notification_info_get(void) +{ + int idx, list, max_ids, lists_cnt, ids_processed, ids_count[MAX_IDS_64]; + bool is_64b_resp; + ffa_value_t ret; + u64 id_list; + + do { + invoke_ffa_fn((ffa_value_t){ + .a0 = FFA_FN_NATIVE(NOTIFICATION_INFO_GET), + }, &ret); + + if (ret.a0 != FFA_FN_NATIVE(SUCCESS) && ret.a0 != FFA_SUCCESS) { + if (ret.a2 != FFA_RET_NO_DATA) + pr_err("Notification Info fetch failed: 0x%lx (0x%lx)", + ret.a0, ret.a2); + return; + } + + is_64b_resp = (ret.a0 == FFA_FN64_SUCCESS); + + ids_processed = 0; + lists_cnt = FIELD_GET(NOTIFICATION_INFO_GET_ID_COUNT, ret.a2); + if (is_64b_resp) { + max_ids = MAX_IDS_64; + id_list = FIELD_GET(ID_LIST_MASK_64, ret.a2); + } else { + max_ids = MAX_IDS_32; + id_list = FIELD_GET(ID_LIST_MASK_32, ret.a2); + } + + for (idx = 0; idx < lists_cnt; idx++, id_list >>= 2) + ids_count[idx] = (id_list & 0x3) + 1; + + /* Process IDs */ + for (list = 0; list < lists_cnt; list++) { + u16 vcpu_id, part_id, *packed_id_list = (u16 *)&ret.a3; + + if (ids_processed >= max_ids - 1) + break; + + part_id = packed_id_list[ids_processed++]; + + if (!ids_count[list]) { /* Global Notification */ + __do_sched_recv_cb(part_id, 0, false); + continue; + } + + /* Per vCPU Notification */ + for (idx = 0; idx < ids_count[list]; idx++) { + if (ids_processed >= max_ids - 1) + break; + + vcpu_id = packed_id_list[ids_processed++]; + + __do_sched_recv_cb(part_id, vcpu_id, true); + } + } + } while (ret.a2 & NOTIFICATION_INFO_GET_MORE_PEND_MASK); +} + +static int ffa_run(struct ffa_device *dev, u16 vcpu) +{ + ffa_value_t ret; + u32 target = dev->vm_id << 16 | vcpu; + + invoke_ffa_fn((ffa_value_t){ .a0 = FFA_RUN, .a1 = target, }, &ret); + + while (ret.a0 == FFA_INTERRUPT) + invoke_ffa_fn((ffa_value_t){ .a0 = FFA_RUN, .a1 = ret.a1, }, + &ret); + + if (ret.a0 == FFA_ERROR) + return ffa_to_linux_errno((int)ret.a2); + + return 0; +} + static void ffa_set_up_mem_ops_native_flag(void) { if (!ffa_features(FFA_FN_NATIVE(MEM_LEND), 0, NULL, NULL) || @@ -622,6 +893,250 @@ static int ffa_memory_lend(struct ffa_mem_ops_args *args) return ffa_memory_ops(FFA_MEM_LEND, args); } +#define FFA_SECURE_PARTITION_ID_FLAG BIT(15) + +#define ffa_notifications_disabled() (!drv_info->notif_enabled) + +enum notify_type { + NON_SECURE_VM, + SECURE_PARTITION, + FRAMEWORK, +}; + +struct notifier_cb_info { + struct hlist_node hnode; + ffa_notifier_cb cb; + void *cb_data; + enum notify_type type; +}; + +static int ffa_sched_recv_cb_update(u16 part_id, ffa_sched_recv_cb callback, + void *cb_data, bool is_registration) +{ + struct ffa_dev_part_info *partition; + bool cb_valid; + + if (ffa_notifications_disabled()) + return -EOPNOTSUPP; + + partition = xa_load(&drv_info->partition_info, part_id); + if (!partition) { + pr_err("%s: Invalid partition ID 0x%x\n", __func__, part_id); + return -EINVAL; + } + + write_lock(&partition->rw_lock); + + cb_valid = !!partition->callback; + if (!(is_registration ^ cb_valid)) { + write_unlock(&partition->rw_lock); + return -EINVAL; + } + + partition->callback = callback; + partition->cb_data = cb_data; + + write_unlock(&partition->rw_lock); + return 0; +} + +static int ffa_sched_recv_cb_register(struct ffa_device *dev, + ffa_sched_recv_cb cb, void *cb_data) +{ + return ffa_sched_recv_cb_update(dev->vm_id, cb, cb_data, true); +} + +static int ffa_sched_recv_cb_unregister(struct ffa_device *dev) +{ + return ffa_sched_recv_cb_update(dev->vm_id, NULL, NULL, false); +} + +static int ffa_notification_bind(u16 dst_id, u64 bitmap, u32 flags) +{ + return ffa_notification_bind_common(dst_id, bitmap, flags, true); +} + +static int ffa_notification_unbind(u16 dst_id, u64 bitmap) +{ + return ffa_notification_bind_common(dst_id, bitmap, 0, false); +} + +/* Should be called while the notify_lock is taken */ +static struct notifier_cb_info * +notifier_hash_node_get(u16 notify_id, enum notify_type type) +{ + struct notifier_cb_info *node; + + hash_for_each_possible(drv_info->notifier_hash, node, hnode, notify_id) + if (type == node->type) + return node; + + return NULL; +} + +static int +update_notifier_cb(int notify_id, enum notify_type type, ffa_notifier_cb cb, + void *cb_data, bool is_registration) +{ + struct notifier_cb_info *cb_info = NULL; + bool cb_found; + + cb_info = notifier_hash_node_get(notify_id, type); + cb_found = !!cb_info; + + if (!(is_registration ^ cb_found)) + return -EINVAL; + + if (is_registration) { + cb_info = kzalloc(sizeof(*cb_info), GFP_KERNEL); + if (!cb_info) + return -ENOMEM; + + cb_info->type = type; + cb_info->cb = cb; + cb_info->cb_data = cb_data; + + hash_add(drv_info->notifier_hash, &cb_info->hnode, notify_id); + } else { + hash_del(&cb_info->hnode); + } + + return 0; +} + +static enum notify_type ffa_notify_type_get(u16 vm_id) +{ + if (vm_id & FFA_SECURE_PARTITION_ID_FLAG) + return SECURE_PARTITION; + else + return NON_SECURE_VM; +} + +static int ffa_notify_relinquish(struct ffa_device *dev, int notify_id) +{ + int rc; + enum notify_type type = ffa_notify_type_get(dev->vm_id); + + if (ffa_notifications_disabled()) + return -EOPNOTSUPP; + + if (notify_id >= FFA_MAX_NOTIFICATIONS) + return -EINVAL; + + mutex_lock(&drv_info->notify_lock); + + rc = update_notifier_cb(notify_id, type, NULL, NULL, false); + if (rc) { + pr_err("Could not unregister notification callback\n"); + mutex_unlock(&drv_info->notify_lock); + return rc; + } + + rc = ffa_notification_unbind(dev->vm_id, BIT(notify_id)); + + mutex_unlock(&drv_info->notify_lock); + + return rc; +} + +static int ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu, + ffa_notifier_cb cb, void *cb_data, int notify_id) +{ + int rc; + u32 flags = 0; + enum notify_type type = ffa_notify_type_get(dev->vm_id); + + if (ffa_notifications_disabled()) + return -EOPNOTSUPP; + + if (notify_id >= FFA_MAX_NOTIFICATIONS) + return -EINVAL; + + mutex_lock(&drv_info->notify_lock); + + if (is_per_vcpu) + flags = PER_VCPU_NOTIFICATION_FLAG; + + rc = ffa_notification_bind(dev->vm_id, BIT(notify_id), flags); + if (rc) { + mutex_unlock(&drv_info->notify_lock); + return rc; + } + + rc = update_notifier_cb(notify_id, type, cb, cb_data, true); + if (rc) { + pr_err("Failed to register callback for %d - %d\n", + notify_id, rc); + ffa_notification_unbind(dev->vm_id, BIT(notify_id)); + } + mutex_unlock(&drv_info->notify_lock); + + return rc; +} + +static int ffa_notify_send(struct ffa_device *dev, int notify_id, + bool is_per_vcpu, u16 vcpu) +{ + u32 flags = 0; + + if (ffa_notifications_disabled()) + return -EOPNOTSUPP; + + if (is_per_vcpu) + flags |= (PER_VCPU_NOTIFICATION_FLAG | vcpu << 16); + + return ffa_notification_set(dev->vm_id, drv_info->vm_id, flags, + BIT(notify_id)); +} + +static void handle_notif_callbacks(u64 bitmap, enum notify_type type) +{ + int notify_id; + struct notifier_cb_info *cb_info = NULL; + + for (notify_id = 0; notify_id <= FFA_MAX_NOTIFICATIONS && bitmap; + notify_id++, bitmap >>= 1) { + if (!(bitmap & 1)) + continue; + + mutex_lock(&drv_info->notify_lock); + cb_info = notifier_hash_node_get(notify_id, type); + mutex_unlock(&drv_info->notify_lock); + + if (cb_info && cb_info->cb) + cb_info->cb(notify_id, cb_info->cb_data); + } +} + +static void notif_pcpu_irq_work_fn(struct work_struct *work) +{ + int rc; + struct ffa_notify_bitmaps bitmaps; + + rc = ffa_notification_get(SECURE_PARTITION_BITMAP | + SPM_FRAMEWORK_BITMAP, &bitmaps); + if (rc) { + pr_err("Failed to retrieve notifications with %d!\n", rc); + return; + } + + handle_notif_callbacks(bitmaps.vm_map, NON_SECURE_VM); + handle_notif_callbacks(bitmaps.sp_map, SECURE_PARTITION); + handle_notif_callbacks(bitmaps.arch_map, FRAMEWORK); +} + +static void +ffa_self_notif_handle(u16 vcpu, bool is_per_vcpu, void *cb_data) +{ + struct ffa_drv_info *info = cb_data; + + if (!is_per_vcpu) + notif_pcpu_irq_work_fn(&info->notif_pcpu_work); + else + queue_work_on(vcpu, info->notif_pcpu_wq, + &info->notif_pcpu_work); +} + static const struct ffa_info_ops ffa_drv_info_ops = { .api_version_get = ffa_api_version_get, .partition_info_get = ffa_partition_info_get, @@ -638,10 +1153,24 @@ static const struct ffa_mem_ops ffa_drv_mem_ops = { .memory_lend = ffa_memory_lend, }; +static const struct ffa_cpu_ops ffa_drv_cpu_ops = { + .run = ffa_run, +}; + +static const struct ffa_notifier_ops ffa_drv_notifier_ops = { + .sched_recv_cb_register = ffa_sched_recv_cb_register, + .sched_recv_cb_unregister = ffa_sched_recv_cb_unregister, + .notify_request = ffa_notify_request, + .notify_relinquish = ffa_notify_relinquish, + .notify_send = ffa_notify_send, +}; + static const struct ffa_ops ffa_drv_ops = { .info_ops = &ffa_drv_info_ops, .msg_ops = &ffa_drv_msg_ops, .mem_ops = &ffa_drv_mem_ops, + .cpu_ops = &ffa_drv_cpu_ops, + .notifier_ops = &ffa_drv_notifier_ops, }; void ffa_device_match_uuid(struct ffa_device *ffa_dev, const uuid_t *uuid) @@ -672,6 +1201,7 @@ static void ffa_setup_partitions(void) int count, idx; uuid_t uuid; struct ffa_device *ffa_dev; + struct ffa_dev_part_info *info; struct ffa_partition_info *pbuf, *tpbuf; count = ffa_partition_probe(&uuid_null, &pbuf); @@ -680,6 +1210,7 @@ static void ffa_setup_partitions(void) return; } + xa_init(&drv_info->partition_info); for (idx = 0, tpbuf = pbuf; idx < count; idx++, tpbuf++) { import_uuid(&uuid, (u8 *)tpbuf->uuid); @@ -699,8 +1230,239 @@ static void ffa_setup_partitions(void) if (drv_info->version > FFA_VERSION_1_0 && !(tpbuf->properties & FFA_PARTITION_AARCH64_EXEC)) ffa_mode_32bit_set(ffa_dev); + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) { + ffa_device_unregister(ffa_dev); + continue; + } + rwlock_init(&info->rw_lock); + xa_store(&drv_info->partition_info, tpbuf->id, info, GFP_KERNEL); } + drv_info->partition_count = count; + kfree(pbuf); + + /* Allocate for the host */ + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return; + rwlock_init(&info->rw_lock); + xa_store(&drv_info->partition_info, drv_info->vm_id, info, GFP_KERNEL); + drv_info->partition_count++; +} + +static void ffa_partitions_cleanup(void) +{ + struct ffa_dev_part_info **info; + int idx, count = drv_info->partition_count; + + if (!count) + return; + + info = kcalloc(count, sizeof(*info), GFP_KERNEL); + if (!info) + return; + + xa_extract(&drv_info->partition_info, (void **)info, 0, VM_ID_MASK, + count, XA_PRESENT); + + for (idx = 0; idx < count; idx++) + kfree(info[idx]); + kfree(info); + + drv_info->partition_count = 0; + xa_destroy(&drv_info->partition_info); +} + +/* FFA FEATURE IDs */ +#define FFA_FEAT_NOTIFICATION_PENDING_INT (1) +#define FFA_FEAT_SCHEDULE_RECEIVER_INT (2) +#define FFA_FEAT_MANAGED_EXIT_INT (3) + +static irqreturn_t irq_handler(int irq, void *irq_data) +{ + struct ffa_pcpu_irq *pcpu = irq_data; + struct ffa_drv_info *info = pcpu->info; + + queue_work(info->notif_pcpu_wq, &info->irq_work); + + return IRQ_HANDLED; +} + +static void ffa_sched_recv_irq_work_fn(struct work_struct *work) +{ + ffa_notification_info_get(); +} + +static int ffa_sched_recv_irq_map(void) +{ + int ret, irq, sr_intid; + + /* The returned sr_intid is assumed to be SGI donated to NS world */ + ret = ffa_features(FFA_FEAT_SCHEDULE_RECEIVER_INT, 0, &sr_intid, NULL); + if (ret < 0) { + if (ret != -EOPNOTSUPP) + pr_err("Failed to retrieve scheduler Rx interrupt\n"); + return ret; + } + + if (acpi_disabled) { + struct of_phandle_args oirq = {}; + struct device_node *gic; + + /* Only GICv3 supported currently with the device tree */ + gic = of_find_compatible_node(NULL, NULL, "arm,gic-v3"); + if (!gic) + return -ENXIO; + + oirq.np = gic; + oirq.args_count = 1; + oirq.args[0] = sr_intid; + irq = irq_create_of_mapping(&oirq); + of_node_put(gic); +#ifdef CONFIG_ACPI + } else { + irq = acpi_register_gsi(NULL, sr_intid, ACPI_EDGE_SENSITIVE, + ACPI_ACTIVE_HIGH); +#endif + } + + if (irq <= 0) { + pr_err("Failed to create IRQ mapping!\n"); + return -ENODATA; + } + + return irq; +} + +static void ffa_sched_recv_irq_unmap(void) +{ + if (drv_info->sched_recv_irq) { + irq_dispose_mapping(drv_info->sched_recv_irq); + drv_info->sched_recv_irq = 0; + } +} + +static int ffa_cpuhp_pcpu_irq_enable(unsigned int cpu) +{ + enable_percpu_irq(drv_info->sched_recv_irq, IRQ_TYPE_NONE); + return 0; +} + +static int ffa_cpuhp_pcpu_irq_disable(unsigned int cpu) +{ + disable_percpu_irq(drv_info->sched_recv_irq); + return 0; +} + +static void ffa_uninit_pcpu_irq(void) +{ + if (drv_info->cpuhp_state) { + cpuhp_remove_state(drv_info->cpuhp_state); + drv_info->cpuhp_state = 0; + } + + if (drv_info->notif_pcpu_wq) { + destroy_workqueue(drv_info->notif_pcpu_wq); + drv_info->notif_pcpu_wq = NULL; + } + + if (drv_info->sched_recv_irq) + free_percpu_irq(drv_info->sched_recv_irq, drv_info->irq_pcpu); + + if (drv_info->irq_pcpu) { + free_percpu(drv_info->irq_pcpu); + drv_info->irq_pcpu = NULL; + } +} + +static int ffa_init_pcpu_irq(unsigned int irq) +{ + struct ffa_pcpu_irq __percpu *irq_pcpu; + int ret, cpu; + + irq_pcpu = alloc_percpu(struct ffa_pcpu_irq); + if (!irq_pcpu) + return -ENOMEM; + + for_each_present_cpu(cpu) + per_cpu_ptr(irq_pcpu, cpu)->info = drv_info; + + drv_info->irq_pcpu = irq_pcpu; + + ret = request_percpu_irq(irq, irq_handler, "ARM-FFA", irq_pcpu); + if (ret) { + pr_err("Error registering notification IRQ %d: %d\n", irq, ret); + return ret; + } + + INIT_WORK(&drv_info->irq_work, ffa_sched_recv_irq_work_fn); + INIT_WORK(&drv_info->notif_pcpu_work, notif_pcpu_irq_work_fn); + drv_info->notif_pcpu_wq = create_workqueue("ffa_pcpu_irq_notification"); + if (!drv_info->notif_pcpu_wq) + return -EINVAL; + + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "ffa/pcpu-irq:starting", + ffa_cpuhp_pcpu_irq_enable, + ffa_cpuhp_pcpu_irq_disable); + + if (ret < 0) + return ret; + + drv_info->cpuhp_state = ret; + return 0; +} + +static void ffa_notifications_cleanup(void) +{ + ffa_uninit_pcpu_irq(); + ffa_sched_recv_irq_unmap(); + + if (drv_info->bitmap_created) { + ffa_notification_bitmap_destroy(); + drv_info->bitmap_created = false; + } + drv_info->notif_enabled = false; +} + +static void ffa_notifications_setup(void) +{ + int ret, irq; + + ret = ffa_features(FFA_NOTIFICATION_BITMAP_CREATE, 0, NULL, NULL); + if (ret) { + pr_info("Notifications not supported, continuing with it ..\n"); + return; + } + + ret = ffa_notification_bitmap_create(); + if (ret) { + pr_info("Notification bitmap create error %d\n", ret); + return; + } + drv_info->bitmap_created = true; + + irq = ffa_sched_recv_irq_map(); + if (irq <= 0) { + ret = irq; + goto cleanup; + } + + drv_info->sched_recv_irq = irq; + + ret = ffa_init_pcpu_irq(irq); + if (ret) + goto cleanup; + + hash_init(drv_info->notifier_hash); + mutex_init(&drv_info->notify_lock); + + drv_info->notif_enabled = true; + return; +cleanup: + pr_info("Notification setup failed %d, not enabled\n", ret); + ffa_notifications_cleanup(); } static int __init ffa_init(void) @@ -754,9 +1516,16 @@ static int __init ffa_init(void) mutex_init(&drv_info->rx_lock); mutex_init(&drv_info->tx_lock); + ffa_set_up_mem_ops_native_flag(); + + ffa_notifications_setup(); + ffa_setup_partitions(); - ffa_set_up_mem_ops_native_flag(); + ret = ffa_sched_recv_cb_update(drv_info->vm_id, ffa_self_notif_handle, + drv_info, true); + if (ret) + pr_info("Failed to register driver sched callback %d\n", ret); return 0; free_pages: @@ -773,9 +1542,12 @@ subsys_initcall(ffa_init); static void __exit ffa_exit(void) { + ffa_notifications_cleanup(); + ffa_partitions_cleanup(); ffa_rxtx_unmap(drv_info->vm_id); free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE); free_pages_exact(drv_info->rx_buffer, RXTX_BUFFER_SIZE); + xa_destroy(&drv_info->partition_info); kfree(drv_info); arm_ffa_bus_exit(); } diff --git a/drivers/firmware/arm_scmi/Kconfig b/drivers/firmware/arm_scmi/Kconfig index ea0f5083ac..706d1264d0 100644 --- a/drivers/firmware/arm_scmi/Kconfig +++ b/drivers/firmware/arm_scmi/Kconfig @@ -181,6 +181,18 @@ config ARM_SCMI_POWER_DOMAIN will be called scmi_pm_domain. Note this may needed early in boot before rootfs may be available. +config ARM_SCMI_PERF_DOMAIN + tristate "SCMI performance domain driver" + depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF) + default y + select PM_GENERIC_DOMAINS if PM + help + This enables support for the SCMI performance domains which can be + enabled or disabled via the SCP firmware. + + This driver can also be built as a module. If so, the module will be + called scmi_perf_domain. + config ARM_SCMI_POWER_CONTROL tristate "SCMI system power control driver" depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF) diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile index b31d78fa66..a7bc479651 100644 --- a/drivers/firmware/arm_scmi/Makefile +++ b/drivers/firmware/arm_scmi/Makefile @@ -16,7 +16,6 @@ scmi-module-objs := $(scmi-driver-y) $(scmi-protocols-y) $(scmi-transport-y) obj-$(CONFIG_ARM_SCMI_PROTOCOL) += scmi-core.o obj-$(CONFIG_ARM_SCMI_PROTOCOL) += scmi-module.o -obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o obj-$(CONFIG_ARM_SCMI_POWER_CONTROL) += scmi_power_control.o ifeq ($(CONFIG_THUMB2_KERNEL)$(CONFIG_CC_IS_CLANG),yy) diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c index 96060bf90a..96b4f0694f 100644 --- a/drivers/firmware/arm_scmi/clock.c +++ b/drivers/firmware/arm_scmi/clock.c @@ -21,6 +21,17 @@ enum scmi_clock_protocol_cmd { CLOCK_NAME_GET = 0x8, CLOCK_RATE_NOTIFY = 0x9, CLOCK_RATE_CHANGE_REQUESTED_NOTIFY = 0xA, + CLOCK_CONFIG_GET = 0xB, + CLOCK_POSSIBLE_PARENTS_GET = 0xC, + CLOCK_PARENT_SET = 0xD, + CLOCK_PARENT_GET = 0xE, +}; + +enum clk_state { + CLK_STATE_DISABLE, + CLK_STATE_ENABLE, + CLK_STATE_RESERVED, + CLK_STATE_UNCHANGED, }; struct scmi_msg_resp_clock_protocol_attributes { @@ -31,17 +42,57 @@ struct scmi_msg_resp_clock_protocol_attributes { struct scmi_msg_resp_clock_attributes { __le32 attributes; -#define CLOCK_ENABLE BIT(0) #define SUPPORTS_RATE_CHANGED_NOTIF(x) ((x) & BIT(31)) #define SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(x) ((x) & BIT(30)) #define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(29)) +#define SUPPORTS_PARENT_CLOCK(x) ((x) & BIT(28)) u8 name[SCMI_SHORT_NAME_MAX_SIZE]; __le32 clock_enable_latency; }; -struct scmi_clock_set_config { +struct scmi_msg_clock_possible_parents { + __le32 id; + __le32 skip_parents; +}; + +struct scmi_msg_resp_clock_possible_parents { + __le32 num_parent_flags; +#define NUM_PARENTS_RETURNED(x) ((x) & 0xff) +#define NUM_PARENTS_REMAINING(x) ((x) >> 24) + __le32 possible_parents[]; +}; + +struct scmi_msg_clock_set_parent { + __le32 id; + __le32 parent_id; +}; + +struct scmi_msg_clock_config_set { + __le32 id; + __le32 attributes; +}; + +/* Valid only from SCMI clock v2.1 */ +struct scmi_msg_clock_config_set_v2 { __le32 id; __le32 attributes; +#define NULL_OEM_TYPE 0 +#define REGMASK_OEM_TYPE_SET GENMASK(23, 16) +#define REGMASK_CLK_STATE GENMASK(1, 0) + __le32 oem_config_val; +}; + +struct scmi_msg_clock_config_get { + __le32 id; + __le32 flags; +#define REGMASK_OEM_TYPE_GET GENMASK(7, 0) +}; + +struct scmi_msg_resp_clock_config_get { + __le32 attributes; + __le32 config; +#define IS_CLK_ENABLED(x) le32_get_bits((x), BIT(0)) + __le32 oem_config_val; }; struct scmi_msg_clock_describe_rates { @@ -100,6 +151,12 @@ struct clock_info { int max_async_req; atomic_t cur_async_req; struct scmi_clock_info *clk; + int (*clock_config_set)(const struct scmi_protocol_handle *ph, + u32 clk_id, enum clk_state state, + u8 oem_type, u32 oem_val, bool atomic); + int (*clock_config_get)(const struct scmi_protocol_handle *ph, + u32 clk_id, u8 oem_type, u32 *attributes, + bool *enabled, u32 *oem_val, bool atomic); }; static enum scmi_clock_protocol_cmd evt_2_cmd[] = { @@ -132,6 +189,98 @@ scmi_clock_protocol_attributes_get(const struct scmi_protocol_handle *ph, return ret; } +struct scmi_clk_ipriv { + struct device *dev; + u32 clk_id; + struct scmi_clock_info *clk; +}; + +static void iter_clk_possible_parents_prepare_message(void *message, unsigned int desc_index, + const void *priv) +{ + struct scmi_msg_clock_possible_parents *msg = message; + const struct scmi_clk_ipriv *p = priv; + + msg->id = cpu_to_le32(p->clk_id); + /* Set the number of OPPs to be skipped/already read */ + msg->skip_parents = cpu_to_le32(desc_index); +} + +static int iter_clk_possible_parents_update_state(struct scmi_iterator_state *st, + const void *response, void *priv) +{ + const struct scmi_msg_resp_clock_possible_parents *r = response; + struct scmi_clk_ipriv *p = priv; + struct device *dev = ((struct scmi_clk_ipriv *)p)->dev; + u32 flags; + + flags = le32_to_cpu(r->num_parent_flags); + st->num_returned = NUM_PARENTS_RETURNED(flags); + st->num_remaining = NUM_PARENTS_REMAINING(flags); + + /* + * num parents is not declared previously anywhere so we + * assume it's returned+remaining on first call. + */ + if (!st->max_resources) { + p->clk->num_parents = st->num_returned + st->num_remaining; + p->clk->parents = devm_kcalloc(dev, p->clk->num_parents, + sizeof(*p->clk->parents), + GFP_KERNEL); + if (!p->clk->parents) { + p->clk->num_parents = 0; + return -ENOMEM; + } + st->max_resources = st->num_returned + st->num_remaining; + } + + return 0; +} + +static int iter_clk_possible_parents_process_response(const struct scmi_protocol_handle *ph, + const void *response, + struct scmi_iterator_state *st, + void *priv) +{ + const struct scmi_msg_resp_clock_possible_parents *r = response; + struct scmi_clk_ipriv *p = priv; + + u32 *parent = &p->clk->parents[st->desc_index + st->loop_idx]; + + *parent = le32_to_cpu(r->possible_parents[st->loop_idx]); + + return 0; +} + +static int scmi_clock_possible_parents(const struct scmi_protocol_handle *ph, u32 clk_id, + struct scmi_clock_info *clk) +{ + struct scmi_iterator_ops ops = { + .prepare_message = iter_clk_possible_parents_prepare_message, + .update_state = iter_clk_possible_parents_update_state, + .process_response = iter_clk_possible_parents_process_response, + }; + + struct scmi_clk_ipriv ppriv = { + .clk_id = clk_id, + .clk = clk, + .dev = ph->dev, + }; + void *iter; + int ret; + + iter = ph->hops->iter_response_init(ph, &ops, 0, + CLOCK_POSSIBLE_PARENTS_GET, + sizeof(struct scmi_msg_clock_possible_parents), + &ppriv); + if (IS_ERR(iter)) + return PTR_ERR(iter); + + ret = ph->hops->iter_response_run(iter); + + return ret; +} + static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph, u32 clk_id, struct scmi_clock_info *clk, u32 version) @@ -176,6 +325,8 @@ static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph, clk->rate_changed_notifications = true; if (SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(attributes)) clk->rate_change_requested_notifications = true; + if (SUPPORTS_PARENT_CLOCK(attributes)) + scmi_clock_possible_parents(ph, clk_id, clk); } return ret; @@ -193,12 +344,6 @@ static int rate_cmp_func(const void *_r1, const void *_r2) return 1; } -struct scmi_clk_ipriv { - struct device *dev; - u32 clk_id; - struct scmi_clock_info *clk; -}; - static void iter_clk_describe_prepare_message(void *message, const unsigned int desc_index, const void *priv) @@ -395,11 +540,105 @@ static int scmi_clock_rate_set(const struct scmi_protocol_handle *ph, static int scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id, - u32 config, bool atomic) + enum clk_state state, u8 __unused0, u32 __unused1, + bool atomic) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_clock_config_set *cfg; + + if (state >= CLK_STATE_RESERVED) + return -EINVAL; + + ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_SET, + sizeof(*cfg), 0, &t); + if (ret) + return ret; + + t->hdr.poll_completion = atomic; + + cfg = t->tx.buf; + cfg->id = cpu_to_le32(clk_id); + cfg->attributes = cpu_to_le32(state); + + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int +scmi_clock_set_parent(const struct scmi_protocol_handle *ph, u32 clk_id, + u32 parent_id) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_clock_set_parent *cfg; + struct clock_info *ci = ph->get_priv(ph); + struct scmi_clock_info *clk; + + if (clk_id >= ci->num_clocks) + return -EINVAL; + + clk = ci->clk + clk_id; + + if (parent_id >= clk->num_parents) + return -EINVAL; + + ret = ph->xops->xfer_get_init(ph, CLOCK_PARENT_SET, + sizeof(*cfg), 0, &t); + if (ret) + return ret; + + t->hdr.poll_completion = false; + + cfg = t->tx.buf; + cfg->id = cpu_to_le32(clk_id); + cfg->parent_id = cpu_to_le32(clk->parents[parent_id]); + + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int +scmi_clock_get_parent(const struct scmi_protocol_handle *ph, u32 clk_id, + u32 *parent_id) { int ret; struct scmi_xfer *t; - struct scmi_clock_set_config *cfg; + + ret = ph->xops->xfer_get_init(ph, CLOCK_PARENT_GET, + sizeof(__le32), sizeof(u32), &t); + if (ret) + return ret; + + put_unaligned_le32(clk_id, t->tx.buf); + + ret = ph->xops->do_xfer(ph, t); + if (!ret) + *parent_id = get_unaligned_le32(t->rx.buf); + + ph->xops->xfer_put(ph, t); + return ret; +} + +/* For SCMI clock v2.1 and onwards */ +static int +scmi_clock_config_set_v2(const struct scmi_protocol_handle *ph, u32 clk_id, + enum clk_state state, u8 oem_type, u32 oem_val, + bool atomic) +{ + int ret; + u32 attrs; + struct scmi_xfer *t; + struct scmi_msg_clock_config_set_v2 *cfg; + + if (state == CLK_STATE_RESERVED || + (!oem_type && state == CLK_STATE_UNCHANGED)) + return -EINVAL; ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_SET, sizeof(*cfg), 0, &t); @@ -408,9 +647,16 @@ scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id, t->hdr.poll_completion = atomic; + attrs = FIELD_PREP(REGMASK_OEM_TYPE_SET, oem_type) | + FIELD_PREP(REGMASK_CLK_STATE, state); + cfg = t->tx.buf; cfg->id = cpu_to_le32(clk_id); - cfg->attributes = cpu_to_le32(config); + cfg->attributes = cpu_to_le32(attrs); + /* Clear in any case */ + cfg->oem_config_val = cpu_to_le32(0); + if (oem_type) + cfg->oem_config_val = cpu_to_le32(oem_val); ret = ph->xops->do_xfer(ph, t); @@ -418,26 +664,124 @@ scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id, return ret; } -static int scmi_clock_enable(const struct scmi_protocol_handle *ph, u32 clk_id) +static int scmi_clock_enable(const struct scmi_protocol_handle *ph, u32 clk_id, + bool atomic) { - return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE, false); + struct clock_info *ci = ph->get_priv(ph); + + return ci->clock_config_set(ph, clk_id, CLK_STATE_ENABLE, + NULL_OEM_TYPE, 0, atomic); } -static int scmi_clock_disable(const struct scmi_protocol_handle *ph, u32 clk_id) +static int scmi_clock_disable(const struct scmi_protocol_handle *ph, u32 clk_id, + bool atomic) { - return scmi_clock_config_set(ph, clk_id, 0, false); + struct clock_info *ci = ph->get_priv(ph); + + return ci->clock_config_set(ph, clk_id, CLK_STATE_DISABLE, + NULL_OEM_TYPE, 0, atomic); } -static int scmi_clock_enable_atomic(const struct scmi_protocol_handle *ph, - u32 clk_id) +/* For SCMI clock v2.1 and onwards */ +static int +scmi_clock_config_get_v2(const struct scmi_protocol_handle *ph, u32 clk_id, + u8 oem_type, u32 *attributes, bool *enabled, + u32 *oem_val, bool atomic) { - return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE, true); + int ret; + u32 flags; + struct scmi_xfer *t; + struct scmi_msg_clock_config_get *cfg; + + ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_GET, + sizeof(*cfg), 0, &t); + if (ret) + return ret; + + t->hdr.poll_completion = atomic; + + flags = FIELD_PREP(REGMASK_OEM_TYPE_GET, oem_type); + + cfg = t->tx.buf; + cfg->id = cpu_to_le32(clk_id); + cfg->flags = cpu_to_le32(flags); + + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + struct scmi_msg_resp_clock_config_get *resp = t->rx.buf; + + if (attributes) + *attributes = le32_to_cpu(resp->attributes); + + if (enabled) + *enabled = IS_CLK_ENABLED(resp->config); + + if (oem_val && oem_type) + *oem_val = le32_to_cpu(resp->oem_config_val); + } + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int +scmi_clock_config_get(const struct scmi_protocol_handle *ph, u32 clk_id, + u8 oem_type, u32 *attributes, bool *enabled, + u32 *oem_val, bool atomic) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_resp_clock_attributes *resp; + + if (!enabled) + return -EINVAL; + + ret = ph->xops->xfer_get_init(ph, CLOCK_ATTRIBUTES, + sizeof(clk_id), sizeof(*resp), &t); + if (ret) + return ret; + + t->hdr.poll_completion = atomic; + put_unaligned_le32(clk_id, t->tx.buf); + resp = t->rx.buf; + + ret = ph->xops->do_xfer(ph, t); + if (!ret) + *enabled = IS_CLK_ENABLED(resp->attributes); + + ph->xops->xfer_put(ph, t); + + return ret; } -static int scmi_clock_disable_atomic(const struct scmi_protocol_handle *ph, - u32 clk_id) +static int scmi_clock_state_get(const struct scmi_protocol_handle *ph, + u32 clk_id, bool *enabled, bool atomic) { - return scmi_clock_config_set(ph, clk_id, 0, true); + struct clock_info *ci = ph->get_priv(ph); + + return ci->clock_config_get(ph, clk_id, NULL_OEM_TYPE, NULL, + enabled, NULL, atomic); +} + +static int scmi_clock_config_oem_set(const struct scmi_protocol_handle *ph, + u32 clk_id, u8 oem_type, u32 oem_val, + bool atomic) +{ + struct clock_info *ci = ph->get_priv(ph); + + return ci->clock_config_set(ph, clk_id, CLK_STATE_UNCHANGED, + oem_type, oem_val, atomic); +} + +static int scmi_clock_config_oem_get(const struct scmi_protocol_handle *ph, + u32 clk_id, u8 oem_type, u32 *oem_val, + u32 *attributes, bool atomic) +{ + struct clock_info *ci = ph->get_priv(ph); + + return ci->clock_config_get(ph, clk_id, oem_type, attributes, + NULL, oem_val, atomic); } static int scmi_clock_count_get(const struct scmi_protocol_handle *ph) @@ -470,8 +814,11 @@ static const struct scmi_clk_proto_ops clk_proto_ops = { .rate_set = scmi_clock_rate_set, .enable = scmi_clock_enable, .disable = scmi_clock_disable, - .enable_atomic = scmi_clock_enable_atomic, - .disable_atomic = scmi_clock_disable_atomic, + .state_get = scmi_clock_state_get, + .config_oem_get = scmi_clock_config_oem_get, + .config_oem_set = scmi_clock_config_oem_set, + .parent_set = scmi_clock_set_parent, + .parent_get = scmi_clock_get_parent, }; static int scmi_clk_rate_notify(const struct scmi_protocol_handle *ph, @@ -604,6 +951,14 @@ static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph) scmi_clock_describe_rates_get(ph, clkid, clk); } + if (PROTOCOL_REV_MAJOR(version) >= 0x3) { + cinfo->clock_config_set = scmi_clock_config_set_v2; + cinfo->clock_config_get = scmi_clock_config_get_v2; + } else { + cinfo->clock_config_set = scmi_clock_config_set; + cinfo->clock_config_get = scmi_clock_config_get; + } + cinfo->version = version; return ph->set_priv(ph, cinfo); } diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 87383c0542..09371f40d6 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -2915,6 +2915,7 @@ static const struct of_device_id scmi_of_match[] = { #ifdef CONFIG_ARM_SCMI_TRANSPORT_SMC { .compatible = "arm,scmi-smc", .data = &scmi_smc_desc}, { .compatible = "arm,scmi-smc-param", .data = &scmi_smc_desc}, + { .compatible = "qcom,scmi-smc", .data = &scmi_smc_desc}, #endif #ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO { .compatible = "arm,scmi-virtio", .data = &scmi_virtio_desc}, diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c index dd344506b0..d26eca37dc 100644 --- a/drivers/firmware/arm_scmi/perf.c +++ b/drivers/firmware/arm_scmi/perf.c @@ -796,29 +796,14 @@ static void scmi_perf_domain_init_fc(const struct scmi_protocol_handle *ph, *p_fc = fc; } -/* Device specific ops */ -static int scmi_dev_domain_id(struct device *dev) -{ - struct of_phandle_args clkspec; - - if (of_parse_phandle_with_args(dev->of_node, "clocks", "#clock-cells", - 0, &clkspec)) - return -EINVAL; - - return clkspec.args[0]; -} - static int scmi_dvfs_device_opps_add(const struct scmi_protocol_handle *ph, - struct device *dev) + struct device *dev, u32 domain) { - int idx, ret, domain; + int idx, ret; unsigned long freq; + struct dev_pm_opp_data data = {}; struct perf_dom_info *dom; - domain = scmi_dev_domain_id(dev); - if (domain < 0) - return -EINVAL; - dom = scmi_perf_domain_lookup(ph, domain); if (IS_ERR(dom)) return PTR_ERR(dom); @@ -829,7 +814,10 @@ static int scmi_dvfs_device_opps_add(const struct scmi_protocol_handle *ph, else freq = dom->opp[idx].indicative_freq * dom->mult_factor; - ret = dev_pm_opp_add(dev, freq, 0); + data.level = dom->opp[idx].perf; + data.freq = freq; + + ret = dev_pm_opp_add_dynamic(dev, &data); if (ret) { dev_warn(dev, "failed to add opp %luHz\n", freq); dev_pm_opp_remove_all_dynamic(dev); @@ -844,15 +832,10 @@ static int scmi_dvfs_device_opps_add(const struct scmi_protocol_handle *ph, static int scmi_dvfs_transition_latency_get(const struct scmi_protocol_handle *ph, - struct device *dev) + u32 domain) { - int domain; struct perf_dom_info *dom; - domain = scmi_dev_domain_id(dev); - if (domain < 0) - return -EINVAL; - dom = scmi_perf_domain_lookup(ph, domain); if (IS_ERR(dom)) return PTR_ERR(dom); @@ -949,15 +932,10 @@ static int scmi_dvfs_est_power_get(const struct scmi_protocol_handle *ph, } static bool scmi_fast_switch_possible(const struct scmi_protocol_handle *ph, - struct device *dev) + u32 domain) { - int domain; struct perf_dom_info *dom; - domain = scmi_dev_domain_id(dev); - if (domain < 0) - return false; - dom = scmi_perf_domain_lookup(ph, domain); if (IS_ERR(dom)) return false; @@ -980,7 +958,6 @@ static const struct scmi_perf_proto_ops perf_proto_ops = { .limits_get = scmi_perf_limits_get, .level_set = scmi_perf_level_set, .level_get = scmi_perf_level_get, - .device_domain_id = scmi_dev_domain_id, .transition_latency_get = scmi_dvfs_transition_latency_get, .device_opps_add = scmi_dvfs_device_opps_add, .freq_set = scmi_dvfs_freq_set, diff --git a/drivers/firmware/arm_scmi/powercap.c b/drivers/firmware/arm_scmi/powercap.c index 244929cb4f..cb5617443a 100644 --- a/drivers/firmware/arm_scmi/powercap.c +++ b/drivers/firmware/arm_scmi/powercap.c @@ -360,8 +360,8 @@ static int scmi_powercap_xfer_cap_set(const struct scmi_protocol_handle *ph, msg = t->tx.buf; msg->domain = cpu_to_le32(pc->id); msg->flags = - cpu_to_le32(FIELD_PREP(CAP_SET_ASYNC, !!pc->async_powercap_cap_set) | - FIELD_PREP(CAP_SET_IGNORE_DRESP, !!ignore_dresp)); + cpu_to_le32(FIELD_PREP(CAP_SET_ASYNC, pc->async_powercap_cap_set) | + FIELD_PREP(CAP_SET_IGNORE_DRESP, ignore_dresp)); msg->value = cpu_to_le32(power_cap); if (!pc->async_powercap_cap_set || ignore_dresp) { diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c deleted file mode 100644 index 0e05a79de8..0000000000 --- a/drivers/firmware/arm_scmi/scmi_pm_domain.c +++ /dev/null @@ -1,153 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * SCMI Generic power domain support. - * - * Copyright (C) 2018-2021 ARM Ltd. - */ - -#include -#include -#include -#include -#include - -static const struct scmi_power_proto_ops *power_ops; - -struct scmi_pm_domain { - struct generic_pm_domain genpd; - const struct scmi_protocol_handle *ph; - const char *name; - u32 domain; -}; - -#define to_scmi_pd(gpd) container_of(gpd, struct scmi_pm_domain, genpd) - -static int scmi_pd_power(struct generic_pm_domain *domain, bool power_on) -{ - int ret; - u32 state, ret_state; - struct scmi_pm_domain *pd = to_scmi_pd(domain); - - if (power_on) - state = SCMI_POWER_STATE_GENERIC_ON; - else - state = SCMI_POWER_STATE_GENERIC_OFF; - - ret = power_ops->state_set(pd->ph, pd->domain, state); - if (!ret) - ret = power_ops->state_get(pd->ph, pd->domain, &ret_state); - if (!ret && state != ret_state) - return -EIO; - - return ret; -} - -static int scmi_pd_power_on(struct generic_pm_domain *domain) -{ - return scmi_pd_power(domain, true); -} - -static int scmi_pd_power_off(struct generic_pm_domain *domain) -{ - return scmi_pd_power(domain, false); -} - -static int scmi_pm_domain_probe(struct scmi_device *sdev) -{ - int num_domains, i; - struct device *dev = &sdev->dev; - struct device_node *np = dev->of_node; - struct scmi_pm_domain *scmi_pd; - struct genpd_onecell_data *scmi_pd_data; - struct generic_pm_domain **domains; - const struct scmi_handle *handle = sdev->handle; - struct scmi_protocol_handle *ph; - - if (!handle) - return -ENODEV; - - power_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_POWER, &ph); - if (IS_ERR(power_ops)) - return PTR_ERR(power_ops); - - num_domains = power_ops->num_domains_get(ph); - if (num_domains < 0) { - dev_err(dev, "number of domains not found\n"); - return num_domains; - } - - scmi_pd = devm_kcalloc(dev, num_domains, sizeof(*scmi_pd), GFP_KERNEL); - if (!scmi_pd) - return -ENOMEM; - - scmi_pd_data = devm_kzalloc(dev, sizeof(*scmi_pd_data), GFP_KERNEL); - if (!scmi_pd_data) - return -ENOMEM; - - domains = devm_kcalloc(dev, num_domains, sizeof(*domains), GFP_KERNEL); - if (!domains) - return -ENOMEM; - - for (i = 0; i < num_domains; i++, scmi_pd++) { - u32 state; - - if (power_ops->state_get(ph, i, &state)) { - dev_warn(dev, "failed to get state for domain %d\n", i); - continue; - } - - scmi_pd->domain = i; - scmi_pd->ph = ph; - scmi_pd->name = power_ops->name_get(ph, i); - scmi_pd->genpd.name = scmi_pd->name; - scmi_pd->genpd.power_off = scmi_pd_power_off; - scmi_pd->genpd.power_on = scmi_pd_power_on; - - pm_genpd_init(&scmi_pd->genpd, NULL, - state == SCMI_POWER_STATE_GENERIC_OFF); - - domains[i] = &scmi_pd->genpd; - } - - scmi_pd_data->domains = domains; - scmi_pd_data->num_domains = num_domains; - - dev_set_drvdata(dev, scmi_pd_data); - - return of_genpd_add_provider_onecell(np, scmi_pd_data); -} - -static void scmi_pm_domain_remove(struct scmi_device *sdev) -{ - int i; - struct genpd_onecell_data *scmi_pd_data; - struct device *dev = &sdev->dev; - struct device_node *np = dev->of_node; - - of_genpd_del_provider(np); - - scmi_pd_data = dev_get_drvdata(dev); - for (i = 0; i < scmi_pd_data->num_domains; i++) { - if (!scmi_pd_data->domains[i]) - continue; - pm_genpd_remove(scmi_pd_data->domains[i]); - } -} - -static const struct scmi_device_id scmi_id_table[] = { - { SCMI_PROTOCOL_POWER, "genpd" }, - { }, -}; -MODULE_DEVICE_TABLE(scmi, scmi_id_table); - -static struct scmi_driver scmi_power_domain_driver = { - .name = "scmi-power-domain", - .probe = scmi_pm_domain_probe, - .remove = scmi_pm_domain_remove, - .id_table = scmi_id_table, -}; -module_scmi_driver(scmi_power_domain_driver); - -MODULE_AUTHOR("Sudeep Holla "); -MODULE_DESCRIPTION("ARM SCMI power domain driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c index c193516a25..7611e96650 100644 --- a/drivers/firmware/arm_scmi/smc.c +++ b/drivers/firmware/arm_scmi/smc.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -50,6 +51,8 @@ * @func_id: smc/hvc call function id * @param_page: 4K page number of the shmem channel * @param_offset: Offset within the 4K page of the shmem channel + * @cap_id: smc/hvc doorbell's capability id to be used on Qualcomm virtual + * platforms */ struct scmi_smc { @@ -60,9 +63,10 @@ struct scmi_smc { struct mutex shmem_lock; #define INFLIGHT_NONE MSG_TOKEN_MAX atomic_t inflight; - u32 func_id; - u32 param_page; - u32 param_offset; + unsigned long func_id; + unsigned long param_page; + unsigned long param_offset; + unsigned long cap_id; }; static irqreturn_t smc_msg_done_isr(int irq, void *data) @@ -124,6 +128,7 @@ static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, bool tx) { struct device *cdev = cinfo->dev; + unsigned long cap_id = ULONG_MAX; struct scmi_smc *scmi_info; resource_size_t size; struct resource res; @@ -162,6 +167,18 @@ static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, if (ret < 0) return ret; + if (of_device_is_compatible(dev->of_node, "qcom,scmi-smc")) { + void __iomem *ptr = (void __iomem *)scmi_info->shmem + size - 8; + /* The capability-id is kept in last 8 bytes of shmem. + * +-------+ <-- 0 + * | shmem | + * +-------+ <-- size - 8 + * | capId | + * +-------+ <-- size + */ + memcpy_fromio(&cap_id, ptr, sizeof(cap_id)); + } + if (of_device_is_compatible(dev->of_node, "arm,scmi-smc-param")) { scmi_info->param_page = SHMEM_PAGE(res.start); scmi_info->param_offset = SHMEM_OFFSET(res.start); @@ -184,6 +201,7 @@ static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, } scmi_info->func_id = func_id; + scmi_info->cap_id = cap_id; scmi_info->cinfo = cinfo; smc_channel_lock_init(scmi_info); cinfo->transport_info = scmi_info; @@ -211,8 +229,6 @@ static int smc_send_message(struct scmi_chan_info *cinfo, { struct scmi_smc *scmi_info = cinfo->transport_info; struct arm_smccc_res res; - unsigned long page = scmi_info->param_page; - unsigned long offset = scmi_info->param_offset; /* * Channel will be released only once response has been @@ -222,8 +238,13 @@ static int smc_send_message(struct scmi_chan_info *cinfo, shmem_tx_prepare(scmi_info->shmem, xfer, cinfo); - arm_smccc_1_1_invoke(scmi_info->func_id, page, offset, 0, 0, 0, 0, 0, - &res); + if (scmi_info->cap_id != ULONG_MAX) + arm_smccc_1_1_invoke(scmi_info->func_id, scmi_info->cap_id, 0, + 0, 0, 0, 0, 0, &res); + else + arm_smccc_1_1_invoke(scmi_info->func_id, scmi_info->param_page, + scmi_info->param_offset, 0, 0, 0, 0, 0, + &res); /* Only SMCCC_RET_NOT_SUPPORTED is valid error code */ if (res.a0) { diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c index 435d0e2658..3f123f592c 100644 --- a/drivers/firmware/arm_scpi.c +++ b/drivers/firmware/arm_scpi.c @@ -26,9 +26,12 @@ #include #include #include +#include #include #include +#include #include +#include #include #include #include @@ -894,11 +897,6 @@ static int scpi_alloc_xfer_list(struct device *dev, struct scpi_chan *ch) return 0; } -static const struct of_device_id legacy_scpi_of_match[] = { - {.compatible = "arm,scpi-pre-1.0"}, - {}, -}; - static const struct of_device_id shmem_of_match[] __maybe_unused = { { .compatible = "amlogic,meson-gxbb-scp-shmem", }, { .compatible = "amlogic,meson-axg-scp-shmem", }, @@ -919,8 +917,7 @@ static int scpi_probe(struct platform_device *pdev) if (!scpi_drvinfo) return -ENOMEM; - if (of_match_device(legacy_scpi_of_match, &pdev->dev)) - scpi_drvinfo->is_legacy = true; + scpi_drvinfo->is_legacy = !!device_get_match_data(dev); count = of_count_phandle_with_args(np, "mboxes", "#mbox-cells"); if (count < 0) { @@ -1038,7 +1035,7 @@ static int scpi_probe(struct platform_device *pdev) static const struct of_device_id scpi_of_match[] = { {.compatible = "arm,scpi"}, - {.compatible = "arm,scpi-pre-1.0"}, + {.compatible = "arm,scpi-pre-1.0", .data = (void *)1UL }, {}, }; diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index 231f1c70d1..cb374b2da9 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig @@ -4,7 +4,7 @@ menu "EFI (Extensible Firmware Interface) Support" config EFI_ESRT bool - depends on EFI && !IA64 + depends on EFI default y config EFI_VARS_PSTORE @@ -123,7 +123,7 @@ config EFI_BOOTLOADER_CONTROL config EFI_CAPSULE_LOADER tristate "EFI capsule loader" - depends on EFI && !IA64 + depends on EFI help This option exposes a loader interface "/dev/efi_capsule_loader" for users to load EFI capsules. This driver requires working runtime @@ -224,7 +224,7 @@ config EFI_DISABLE_PCI_DMA config EFI_EARLYCON def_bool y - depends on SERIAL_EARLYCON && !ARM && !IA64 + depends on SERIAL_EARLYCON && !ARM select FONT_SUPPORT select ARCH_USE_MEMREMAP_PROT diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c index 83f5bb57fa..83092d93f3 100644 --- a/drivers/firmware/efi/arm-runtime.c +++ b/drivers/firmware/efi/arm-runtime.c @@ -107,7 +107,7 @@ static int __init arm_enable_runtime_services(void) efi_memory_desc_t *md; for_each_efi_memory_desc(md) { - int md_size = md->num_pages << EFI_PAGE_SHIFT; + u64 md_size = md->num_pages << EFI_PAGE_SHIFT; struct resource *res; if (!(md->attribute & EFI_MEMORY_SP)) diff --git a/drivers/firmware/efi/efi-init.c b/drivers/firmware/efi/efi-init.c index ef0820f1a9..a00e07b853 100644 --- a/drivers/firmware/efi/efi-init.c +++ b/drivers/firmware/efi/efi-init.c @@ -55,6 +55,15 @@ static phys_addr_t __init efi_to_phys(unsigned long addr) extern __weak const efi_config_table_type_t efi_arch_tables[]; +/* + * x86 defines its own screen_info and uses it even without EFI, + * everything else can get it from here. + */ +#if !defined(CONFIG_X86) && (defined(CONFIG_SYSFB) || defined(CONFIG_EFI_EARLYCON)) +struct screen_info screen_info __section(".data"); +EXPORT_SYMBOL_GPL(screen_info); +#endif + static void __init init_screen_info(void) { struct screen_info *si; @@ -134,15 +143,6 @@ static __init int is_usable_memory(efi_memory_desc_t *md) case EFI_BOOT_SERVICES_DATA: case EFI_CONVENTIONAL_MEMORY: case EFI_PERSISTENT_MEMORY: - /* - * Special purpose memory is 'soft reserved', which means it - * is set aside initially, but can be hotplugged back in or - * be assigned to the dax driver after boot. - */ - if (efi_soft_reserve_enabled() && - (md->attribute & EFI_MEMORY_SP)) - return false; - /* * According to the spec, these regions are no longer reserved * after calling ExitBootServices(). However, we can only use @@ -187,6 +187,16 @@ static __init void reserve_regions(void) size = npages << PAGE_SHIFT; if (is_memory(md)) { + /* + * Special purpose memory is 'soft reserved', which + * means it is set aside initially. Don't add a memblock + * for it now so that it can be hotplugged back in or + * be assigned to the dax driver after boot. + */ + if (efi_soft_reserve_enabled() && + (md->attribute & EFI_MEMORY_SP)) + continue; + early_init_dt_add_memory_arch(paddr, size); if (!is_usable_memory(md)) @@ -240,5 +250,8 @@ void __init efi_init(void) memblock_reserve(data.phys_map & PAGE_MASK, PAGE_ALIGN(data.size + (data.phys_map & ~PAGE_MASK))); - init_screen_info(); + if (IS_ENABLED(CONFIG_X86) || + IS_ENABLED(CONFIG_SYSFB) || + IS_ENABLED(CONFIG_EFI_EARLYCON)) + init_screen_info(); } diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 1974f0ad32..9d3910d1ab 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -147,7 +147,7 @@ static ssize_t systab_show(struct kobject *kobj, if (efi.smbios != EFI_INVALID_TABLE_ADDR) str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios); - if (IS_ENABLED(CONFIG_IA64) || IS_ENABLED(CONFIG_X86)) + if (IS_ENABLED(CONFIG_X86)) str = efi_systab_show_arch(str); return str - buf; @@ -807,7 +807,6 @@ int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr) return 0; } -#ifndef CONFIG_IA64 static const efi_char16_t *__init map_fw_vendor(unsigned long fw_vendor, size_t size) { @@ -823,10 +822,6 @@ static void __init unmap_fw_vendor(const void *fw_vendor, size_t size) { early_memunmap((void *)fw_vendor, size); } -#else -#define map_fw_vendor(p, s) __va(p) -#define unmap_fw_vendor(v, s) -#endif void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr, unsigned long fw_vendor) @@ -929,11 +924,6 @@ char * __init efi_md_typeattr_format(char *buf, size_t size, return buf; } -/* - * IA64 has a funky EFI memory map that doesn't work the same way as - * other architectures. - */ -#ifndef CONFIG_IA64 /* * efi_mem_attributes - lookup memmap attributes for physical address * @phys_addr: the physical address to lookup @@ -981,7 +971,6 @@ int efi_mem_type(unsigned long phys_addr) } return -EINVAL; } -#endif int efi_status_to_err(efi_status_t status) { diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index a1157c2a71..d561d7de46 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -28,7 +28,7 @@ cflags-$(CONFIG_ARM) += -DEFI_HAVE_STRLEN -DEFI_HAVE_STRNLEN \ -DEFI_HAVE_MEMCHR -DEFI_HAVE_STRRCHR \ -DEFI_HAVE_STRCMP -fno-builtin -fpic \ $(call cc-option,-mno-single-pic-base) -cflags-$(CONFIG_RISCV) += -fpic +cflags-$(CONFIG_RISCV) += -fpic -DNO_ALTERNATIVE -mno-relax cflags-$(CONFIG_LOONGARCH) += -fpie cflags-$(CONFIG_EFI_PARAMS_FROM_FDT) += -I$(srctree)/scripts/dtc/libfdt @@ -108,13 +108,6 @@ lib-y := $(patsubst %.o,%.stub.o,$(lib-y)) # https://bugs.llvm.org/show_bug.cgi?id=46480 STUBCOPY_FLAGS-y += --remove-section=.note.gnu.property -# -# For x86, bootloaders like systemd-boot or grub-efi do not zero-initialize the -# .bss section, so the .bss section of the EFI stub needs to be included in the -# .data section of the compressed kernel to ensure initialization. Rename the -# .bss section here so it's easy to pick out in the linker script. -# -STUBCOPY_FLAGS-$(CONFIG_X86) += --rename-section .bss=.bss.efistub,load,alloc STUBCOPY_RELOC-$(CONFIG_X86_32) := R_386_32 STUBCOPY_RELOC-$(CONFIG_X86_64) := R_X86_64_64 diff --git a/drivers/firmware/efi/libstub/efi-stub-entry.c b/drivers/firmware/efi/libstub/efi-stub-entry.c index 2f1902e5d4..a6c0498351 100644 --- a/drivers/firmware/efi/libstub/efi-stub-entry.c +++ b/drivers/firmware/efi/libstub/efi-stub-entry.c @@ -13,7 +13,13 @@ struct screen_info *alloc_screen_info(void) { if (IS_ENABLED(CONFIG_ARM)) return __alloc_screen_info(); - return (void *)&screen_info + screen_info_offset; + + if (IS_ENABLED(CONFIG_X86) || + IS_ENABLED(CONFIG_EFI_EARLYCON) || + IS_ENABLED(CONFIG_SYSFB)) + return (void *)&screen_info + screen_info_offset; + + return NULL; } /* diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h index 212687c30d..c04b82ea40 100644 --- a/drivers/firmware/efi/libstub/efistub.h +++ b/drivers/firmware/efi/libstub/efistub.h @@ -956,7 +956,8 @@ efi_status_t efi_get_random_bytes(unsigned long size, u8 *out); efi_status_t efi_random_alloc(unsigned long size, unsigned long align, unsigned long *addr, unsigned long random_seed, - int memory_type, unsigned long alloc_limit); + int memory_type, unsigned long alloc_min, + unsigned long alloc_max); efi_status_t efi_random_get_seed(void); diff --git a/drivers/firmware/efi/libstub/kaslr.c b/drivers/firmware/efi/libstub/kaslr.c index 62d63f7a26..1a9808012a 100644 --- a/drivers/firmware/efi/libstub/kaslr.c +++ b/drivers/firmware/efi/libstub/kaslr.c @@ -119,7 +119,7 @@ efi_status_t efi_kaslr_relocate_kernel(unsigned long *image_addr, */ status = efi_random_alloc(*reserve_size, min_kimg_align, reserve_addr, phys_seed, - EFI_LOADER_CODE, EFI_ALLOC_LIMIT); + EFI_LOADER_CODE, 0, EFI_ALLOC_LIMIT); if (status != EFI_SUCCESS) efi_warn("efi_random_alloc() failed: 0x%lx\n", status); } else { diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c index 674a064b8f..4e96a855fd 100644 --- a/drivers/firmware/efi/libstub/randomalloc.c +++ b/drivers/firmware/efi/libstub/randomalloc.c @@ -17,7 +17,7 @@ static unsigned long get_entry_num_slots(efi_memory_desc_t *md, unsigned long size, unsigned long align_shift, - u64 alloc_limit) + u64 alloc_min, u64 alloc_max) { unsigned long align = 1UL << align_shift; u64 first_slot, last_slot, region_end; @@ -30,11 +30,11 @@ static unsigned long get_entry_num_slots(efi_memory_desc_t *md, return 0; region_end = min(md->phys_addr + md->num_pages * EFI_PAGE_SIZE - 1, - alloc_limit); + alloc_max); if (region_end < size) return 0; - first_slot = round_up(md->phys_addr, align); + first_slot = round_up(max(md->phys_addr, alloc_min), align); last_slot = round_down(region_end - size + 1, align); if (first_slot > last_slot) @@ -56,7 +56,8 @@ efi_status_t efi_random_alloc(unsigned long size, unsigned long *addr, unsigned long random_seed, int memory_type, - unsigned long alloc_limit) + unsigned long alloc_min, + unsigned long alloc_max) { unsigned long total_slots = 0, target_slot; unsigned long total_mirrored_slots = 0; @@ -78,7 +79,8 @@ efi_status_t efi_random_alloc(unsigned long size, efi_memory_desc_t *md = (void *)map->map + map_offset; unsigned long slots; - slots = get_entry_num_slots(md, size, ilog2(align), alloc_limit); + slots = get_entry_num_slots(md, size, ilog2(align), alloc_min, + alloc_max); MD_NUM_SLOTS(md) = slots; total_slots += slots; if (md->attribute & EFI_MEMORY_MORE_RELIABLE) diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c index 70b325a2f1..99429bc4b0 100644 --- a/drivers/firmware/efi/libstub/x86-stub.c +++ b/drivers/firmware/efi/libstub/x86-stub.c @@ -223,8 +223,8 @@ static void retrieve_apple_device_properties(struct boot_params *boot_params) } } -void efi_adjust_memory_range_protection(unsigned long start, - unsigned long size) +efi_status_t efi_adjust_memory_range_protection(unsigned long start, + unsigned long size) { efi_status_t status; efi_gcd_memory_space_desc_t desc; @@ -236,13 +236,17 @@ void efi_adjust_memory_range_protection(unsigned long start, rounded_end = roundup(start + size, EFI_PAGE_SIZE); if (memattr != NULL) { - efi_call_proto(memattr, clear_memory_attributes, rounded_start, - rounded_end - rounded_start, EFI_MEMORY_XP); - return; + status = efi_call_proto(memattr, clear_memory_attributes, + rounded_start, + rounded_end - rounded_start, + EFI_MEMORY_XP); + if (status != EFI_SUCCESS) + efi_warn("Failed to clear EFI_MEMORY_XP attribute\n"); + return status; } if (efi_dxe_table == NULL) - return; + return EFI_SUCCESS; /* * Don't modify memory region attributes, they are @@ -255,7 +259,7 @@ void efi_adjust_memory_range_protection(unsigned long start, status = efi_dxe_call(get_memory_space_descriptor, start, &desc); if (status != EFI_SUCCESS) - return; + break; next = desc.base_address + desc.length; @@ -280,8 +284,10 @@ void efi_adjust_memory_range_protection(unsigned long start, unprotect_start, unprotect_start + unprotect_size, status); + break; } } + return EFI_SUCCESS; } static void setup_unaccepted_memory(void) @@ -452,9 +458,8 @@ void __noreturn efi_stub_entry(efi_handle_t handle, efi_status_t __efiapi efi_pe_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg) { - struct boot_params *boot_params; - struct setup_header *hdr; - void *image_base; + static struct boot_params boot_params __page_aligned_bss; + struct setup_header *hdr = &boot_params.hdr; efi_guid_t proto = LOADED_IMAGE_PROTOCOL_GUID; int options_size = 0; efi_status_t status; @@ -472,30 +477,9 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle, efi_exit(handle, status); } - image_base = efi_table_attr(image, image_base); - - status = efi_allocate_pages(sizeof(struct boot_params), - (unsigned long *)&boot_params, ULONG_MAX); - if (status != EFI_SUCCESS) { - efi_err("Failed to allocate lowmem for boot params\n"); - efi_exit(handle, status); - } - - memset(boot_params, 0x0, sizeof(struct boot_params)); - - hdr = &boot_params->hdr; - - /* Copy the setup header from the second sector to boot_params */ - memcpy(&hdr->jump, image_base + 512, - sizeof(struct setup_header) - offsetof(struct setup_header, jump)); - - /* - * Fill out some of the header fields ourselves because the - * EFI firmware loader doesn't load the first sector. - */ + /* Assign the setup_header fields that the kernel actually cares about */ hdr->root_flags = 1; hdr->vid_mode = 0xffff; - hdr->boot_flag = 0xAA55; hdr->type_of_loader = 0x21; @@ -504,25 +488,13 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle, if (!cmdline_ptr) goto fail; - efi_set_u64_split((unsigned long)cmdline_ptr, - &hdr->cmd_line_ptr, &boot_params->ext_cmd_line_ptr); + efi_set_u64_split((unsigned long)cmdline_ptr, &hdr->cmd_line_ptr, + &boot_params.ext_cmd_line_ptr); - hdr->ramdisk_image = 0; - hdr->ramdisk_size = 0; - - /* - * Disregard any setup data that was provided by the bootloader: - * setup_data could be pointing anywhere, and we have no way of - * authenticating or validating the payload. - */ - hdr->setup_data = 0; - - efi_stub_entry(handle, sys_table_arg, boot_params); + efi_stub_entry(handle, sys_table_arg, &boot_params); /* not reached */ fail: - efi_free(sizeof(struct boot_params), (unsigned long)boot_params); - efi_exit(handle, status); } @@ -821,10 +793,13 @@ static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry) efi_debug("AMI firmware v2.0 or older detected - disabling physical KASLR\n"); seed[0] = 0; } + + boot_params_ptr->hdr.loadflags |= KASLR_FLAG; } status = efi_random_alloc(alloc_size, CONFIG_PHYSICAL_ALIGN, &addr, seed[0], EFI_LOADER_CODE, + LOAD_PHYSICAL_ADDR, EFI_X86_KERNEL_ALLOC_LIMIT); if (status != EFI_SUCCESS) return status; @@ -837,9 +812,7 @@ static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry) *kernel_entry = addr + entry; - efi_adjust_memory_range_protection(addr, kernel_total_size); - - return EFI_SUCCESS; + return efi_adjust_memory_range_protection(addr, kernel_total_size); } static void __noreturn enter_kernel(unsigned long kernel_addr, @@ -866,7 +839,7 @@ void __noreturn efi_stub_entry(efi_handle_t handle, unsigned long kernel_entry; efi_status_t status; - boot_params_pointer = boot_params; + boot_params_ptr = boot_params; efi_system_table = sys_table_arg; /* Check if we were booted by the EFI firmware */ diff --git a/drivers/firmware/efi/libstub/x86-stub.h b/drivers/firmware/efi/libstub/x86-stub.h index 2748bca192..1c20e99a64 100644 --- a/drivers/firmware/efi/libstub/x86-stub.h +++ b/drivers/firmware/efi/libstub/x86-stub.h @@ -2,13 +2,11 @@ #include -extern struct boot_params *boot_params_pointer asm("boot_params"); - extern void trampoline_32bit_src(void *, bool); extern const u16 trampoline_ljmp_imm_offset; -void efi_adjust_memory_range_protection(unsigned long start, - unsigned long size); +efi_status_t efi_adjust_memory_range_protection(unsigned long start, + unsigned long size); #ifdef CONFIG_X86_64 efi_status_t efi_setup_5level_paging(void); diff --git a/drivers/firmware/efi/libstub/zboot.c b/drivers/firmware/efi/libstub/zboot.c index bdb17eac0c..1ceace9567 100644 --- a/drivers/firmware/efi/libstub/zboot.c +++ b/drivers/firmware/efi/libstub/zboot.c @@ -119,7 +119,7 @@ efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab) } status = efi_random_alloc(alloc_size, min_kimg_align, &image_base, - seed, EFI_LOADER_CODE, EFI_ALLOC_LIMIT); + seed, EFI_LOADER_CODE, 0, EFI_ALLOC_LIMIT); if (status != EFI_SUCCESS) { efi_err("Failed to allocate memory\n"); goto free_cmdline; diff --git a/drivers/firmware/efi/riscv-runtime.c b/drivers/firmware/efi/riscv-runtime.c index 09525fb5c2..01f0f90ea4 100644 --- a/drivers/firmware/efi/riscv-runtime.c +++ b/drivers/firmware/efi/riscv-runtime.c @@ -85,7 +85,7 @@ static int __init riscv_enable_runtime_services(void) efi_memory_desc_t *md; for_each_efi_memory_desc(md) { - int md_size = md->num_pages << EFI_PAGE_SHIFT; + u64 md_size = md->num_pages << EFI_PAGE_SHIFT; struct resource *res; if (!(md->attribute & EFI_MEMORY_SP)) diff --git a/drivers/firmware/efi/unaccepted_memory.c b/drivers/firmware/efi/unaccepted_memory.c index 79fb687bb9..5b439d0407 100644 --- a/drivers/firmware/efi/unaccepted_memory.c +++ b/drivers/firmware/efi/unaccepted_memory.c @@ -3,6 +3,7 @@ #include #include #include +#include #include /* Protects unaccepted memory bitmap and accepting_list */ @@ -201,3 +202,22 @@ bool range_contains_unaccepted_memory(phys_addr_t start, phys_addr_t end) return ret; } + +#ifdef CONFIG_PROC_VMCORE +static bool unaccepted_memory_vmcore_pfn_is_ram(struct vmcore_cb *cb, + unsigned long pfn) +{ + return !pfn_is_unaccepted_memory(pfn); +} + +static struct vmcore_cb vmcore_cb = { + .pfn_is_ram = unaccepted_memory_vmcore_pfn_is_ram, +}; + +static int __init unaccepted_memory_init_kdump(void) +{ + register_vmcore_cb(&vmcore_cb); + return 0; +} +core_initcall(unaccepted_memory_init_kdump); +#endif /* CONFIG_PROC_VMCORE */ diff --git a/drivers/firmware/imx/Kconfig b/drivers/firmware/imx/Kconfig index c027d99f2a..183613f82a 100644 --- a/drivers/firmware/imx/Kconfig +++ b/drivers/firmware/imx/Kconfig @@ -22,9 +22,3 @@ config IMX_SCU This driver manages the IPC interface between host CPU and the SCU firmware running on M4. - -config IMX_SCU_PD - bool "IMX SCU Power Domain driver" - depends on IMX_SCU - help - The System Controller Firmware (SCFW) based power domain driver. diff --git a/drivers/firmware/meson/meson_sm.c b/drivers/firmware/meson/meson_sm.c index 9a2656d736..ed60f11030 100644 --- a/drivers/firmware/meson/meson_sm.c +++ b/drivers/firmware/meson/meson_sm.c @@ -13,9 +13,10 @@ #include #include #include -#include +#include #include #include +#include #include #include #include @@ -67,7 +68,7 @@ static u32 meson_sm_get_cmd(const struct meson_sm_chip *chip, return cmd->smc_id; } -static u32 __meson_sm_call(u32 cmd, u32 arg0, u32 arg1, u32 arg2, +static s32 __meson_sm_call(u32 cmd, u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4) { struct arm_smccc_res res; @@ -102,9 +103,10 @@ static void __iomem *meson_sm_map_shmem(u32 cmd_shmem, unsigned int size) * Return: 0 on success, a negative value on error */ int meson_sm_call(struct meson_sm_firmware *fw, unsigned int cmd_index, - u32 *ret, u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4) + s32 *ret, u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4) { - u32 cmd, lret; + u32 cmd; + s32 lret; if (!fw->chip) return -ENOENT; @@ -143,7 +145,7 @@ int meson_sm_call_read(struct meson_sm_firmware *fw, void *buffer, unsigned int bsize, unsigned int cmd_index, u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4) { - u32 size; + s32 size; int ret; if (!fw->chip) @@ -158,11 +160,16 @@ int meson_sm_call_read(struct meson_sm_firmware *fw, void *buffer, if (meson_sm_call(fw, cmd_index, &size, arg0, arg1, arg2, arg3, arg4) < 0) return -EINVAL; - if (size > bsize) + if (size < 0 || size > bsize) return -EINVAL; ret = size; + /* In some cases (for example GET_CHIP_ID command), + * SMC doesn't return the number of bytes read, even + * though the bytes were actually read into sm_shmem_out. + * So this check is needed. + */ if (!size) size = bsize; @@ -192,7 +199,7 @@ int meson_sm_call_write(struct meson_sm_firmware *fw, void *buffer, unsigned int size, unsigned int cmd_index, u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4) { - u32 written; + s32 written; if (!fw->chip) return -ENOENT; @@ -208,7 +215,7 @@ int meson_sm_call_write(struct meson_sm_firmware *fw, void *buffer, if (meson_sm_call(fw, cmd_index, &written, arg0, arg1, arg2, arg3, arg4) < 0) return -EINVAL; - if (!written) + if (written <= 0 || written > size) return -EINVAL; return written; @@ -291,7 +298,7 @@ static int __init meson_sm_probe(struct platform_device *pdev) if (!fw) return -ENOMEM; - chip = of_match_device(meson_sm_ids, dev)->data; + chip = device_get_match_data(dev); if (!chip) return -EINVAL; diff --git a/drivers/firmware/pcdp.c b/drivers/firmware/pcdp.c deleted file mode 100644 index 715a45442d..0000000000 --- a/drivers/firmware/pcdp.c +++ /dev/null @@ -1,135 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Parse the EFI PCDP table to locate the console device. - * - * (c) Copyright 2002, 2003, 2004 Hewlett-Packard Development Company, L.P. - * Khalid Aziz - * Alex Williamson - * Bjorn Helgaas - */ - -#include -#include -#include -#include -#include -#include -#include "pcdp.h" - -static int __init -setup_serial_console(struct pcdp_uart *uart) -{ -#ifdef CONFIG_SERIAL_8250_CONSOLE - int mmio; - static char options[64], *p = options; - char parity; - - mmio = (uart->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY); - p += sprintf(p, "uart8250,%s,0x%llx", - mmio ? "mmio" : "io", uart->addr.address); - if (uart->baud) { - p += sprintf(p, ",%llu", uart->baud); - if (uart->bits) { - switch (uart->parity) { - case 0x2: parity = 'e'; break; - case 0x3: parity = 'o'; break; - default: parity = 'n'; - } - p += sprintf(p, "%c%d", parity, uart->bits); - } - } - - add_preferred_console("uart", 8250, &options[9]); - return setup_earlycon(options); -#else - return -ENODEV; -#endif -} - -static int __init -setup_vga_console(struct pcdp_device *dev) -{ -#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE) - u8 *if_ptr; - - if_ptr = ((u8 *)dev + sizeof(struct pcdp_device)); - if (if_ptr[0] == PCDP_IF_PCI) { - struct pcdp_if_pci if_pci; - - /* struct copy since ifptr might not be correctly aligned */ - - memcpy(&if_pci, if_ptr, sizeof(if_pci)); - - if (if_pci.trans & PCDP_PCI_TRANS_IOPORT) - vga_console_iobase = if_pci.ioport_tra; - - if (if_pci.trans & PCDP_PCI_TRANS_MMIO) - vga_console_membase = if_pci.mmio_tra; - } - - if (efi_mem_type(vga_console_membase + 0xA0000) == EFI_CONVENTIONAL_MEMORY) { - printk(KERN_ERR "PCDP: VGA selected, but frame buffer is not MMIO!\n"); - return -ENODEV; - } - - conswitchp = &vga_con; - printk(KERN_INFO "PCDP: VGA console\n"); - return 0; -#else - return -ENODEV; -#endif -} - -extern unsigned long hcdp_phys; - -int __init -efi_setup_pcdp_console(char *cmdline) -{ - struct pcdp *pcdp; - struct pcdp_uart *uart; - struct pcdp_device *dev, *end; - int i, serial = 0; - int rc = -ENODEV; - - if (hcdp_phys == EFI_INVALID_TABLE_ADDR) - return -ENODEV; - - pcdp = early_memremap(hcdp_phys, 4096); - printk(KERN_INFO "PCDP: v%d at 0x%lx\n", pcdp->rev, hcdp_phys); - - if (strstr(cmdline, "console=hcdp")) { - if (pcdp->rev < 3) - serial = 1; - } else if (strstr(cmdline, "console=")) { - printk(KERN_INFO "Explicit \"console=\"; ignoring PCDP\n"); - goto out; - } - - if (pcdp->rev < 3 && efi_uart_console_only()) - serial = 1; - - for (i = 0, uart = pcdp->uart; i < pcdp->num_uarts; i++, uart++) { - if (uart->flags & PCDP_UART_PRIMARY_CONSOLE || serial) { - if (uart->type == PCDP_CONSOLE_UART) { - rc = setup_serial_console(uart); - goto out; - } - } - } - - end = (struct pcdp_device *) ((u8 *) pcdp + pcdp->length); - for (dev = (struct pcdp_device *) (pcdp->uart + pcdp->num_uarts); - dev < end; - dev = (struct pcdp_device *) ((u8 *) dev + dev->length)) { - if (dev->flags & PCDP_PRIMARY_CONSOLE) { - if (dev->type == PCDP_CONSOLE_VGA) { - rc = setup_vga_console(dev); - goto out; - } - } - } - -out: - early_memunmap(pcdp, 4096); - return rc; -} diff --git a/drivers/firmware/pcdp.h b/drivers/firmware/pcdp.h deleted file mode 100644 index e02540571c..0000000000 --- a/drivers/firmware/pcdp.h +++ /dev/null @@ -1,108 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Definitions for PCDP-defined console devices - * - * For DIG64_HCDPv10a_01.pdf and DIG64_PCDPv20.pdf (v1.0a and v2.0 resp.), - * please see - * - * (c) Copyright 2002, 2004 Hewlett-Packard Development Company, L.P. - * Khalid Aziz - * Bjorn Helgaas - */ - -#define PCDP_CONSOLE 0 -#define PCDP_DEBUG 1 -#define PCDP_CONSOLE_OUTPUT 2 -#define PCDP_CONSOLE_INPUT 3 - -#define PCDP_UART (0 << 3) -#define PCDP_VGA (1 << 3) -#define PCDP_USB (2 << 3) - -/* pcdp_uart.type and pcdp_device.type */ -#define PCDP_CONSOLE_UART (PCDP_UART | PCDP_CONSOLE) -#define PCDP_DEBUG_UART (PCDP_UART | PCDP_DEBUG) -#define PCDP_CONSOLE_VGA (PCDP_VGA | PCDP_CONSOLE_OUTPUT) -#define PCDP_CONSOLE_USB (PCDP_USB | PCDP_CONSOLE_INPUT) - -/* pcdp_uart.flags */ -#define PCDP_UART_EDGE_SENSITIVE (1 << 0) -#define PCDP_UART_ACTIVE_LOW (1 << 1) -#define PCDP_UART_PRIMARY_CONSOLE (1 << 2) -#define PCDP_UART_IRQ (1 << 6) /* in pci_func for rev < 3 */ -#define PCDP_UART_PCI (1 << 7) /* in pci_func for rev < 3 */ - -struct pcdp_uart { - u8 type; - u8 bits; - u8 parity; - u8 stop_bits; - u8 pci_seg; - u8 pci_bus; - u8 pci_dev; - u8 pci_func; - u64 baud; - struct acpi_generic_address addr; - u16 pci_dev_id; - u16 pci_vendor_id; - u32 gsi; - u32 clock_rate; - u8 pci_prog_intfc; - u8 flags; - u16 conout_index; - u32 reserved; -} __attribute__((packed)); - -#define PCDP_IF_PCI 1 - -/* pcdp_if_pci.trans */ -#define PCDP_PCI_TRANS_IOPORT 0x02 -#define PCDP_PCI_TRANS_MMIO 0x01 - -struct pcdp_if_pci { - u8 interconnect; - u8 reserved; - u16 length; - u8 segment; - u8 bus; - u8 dev; - u8 fun; - u16 dev_id; - u16 vendor_id; - u32 acpi_interrupt; - u64 mmio_tra; - u64 ioport_tra; - u8 flags; - u8 trans; -} __attribute__((packed)); - -struct pcdp_vga { - u8 count; /* address space descriptors */ -} __attribute__((packed)); - -/* pcdp_device.flags */ -#define PCDP_PRIMARY_CONSOLE 1 - -struct pcdp_device { - u8 type; - u8 flags; - u16 length; - u16 efi_index; - /* next data is pcdp_if_pci or pcdp_if_acpi (not yet supported) */ - /* next data is device specific type (currently only pcdp_vga) */ -} __attribute__((packed)); - -struct pcdp { - u8 signature[4]; - u32 length; - u8 rev; /* PCDP v2.0 is rev 3 */ - u8 chksum; - u8 oemid[6]; - u8 oem_tabid[8]; - u32 oem_rev; - u8 creator_id[4]; - u32 creator_rev; - u32 num_uarts; - struct pcdp_uart uart[]; /* actual size is num_uarts */ - /* remainder of table is pcdp_device structures */ -} __attribute__((packed)); diff --git a/drivers/firmware/qcom/Kconfig b/drivers/firmware/qcom/Kconfig new file mode 100644 index 0000000000..3f05d9854d --- /dev/null +++ b/drivers/firmware/qcom/Kconfig @@ -0,0 +1,56 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# For a description of the syntax of this configuration file, +# see Documentation/kbuild/kconfig-language.rst. +# + +menu "Qualcomm firmware drivers" + +config QCOM_SCM + tristate + +config QCOM_SCM_DOWNLOAD_MODE_DEFAULT + bool "Qualcomm download mode enabled by default" + depends on QCOM_SCM + help + A device with "download mode" enabled will upon an unexpected + warm-restart enter a special debug mode that allows the user to + "download" memory content over USB for offline postmortem analysis. + The feature can be enabled/disabled on the kernel command line. + + Say Y here to enable "download mode" by default. + +config QCOM_QSEECOM + bool "Qualcomm QSEECOM interface driver" + depends on QCOM_SCM=y + select AUXILIARY_BUS + help + Various Qualcomm SoCs have a Secure Execution Environment (SEE) running + in the Trust Zone. This module provides an interface to that via the + QSEECOM mechanism, using SCM calls. + + The QSEECOM interface allows, among other things, access to applications + running in the SEE. An example of such an application is 'uefisecapp', + which is required to access UEFI variables on certain systems. If + selected, the interface will also attempt to detect and register client + devices for supported applications. + + Select Y here to enable the QSEECOM interface driver. + +config QCOM_QSEECOM_UEFISECAPP + bool "Qualcomm SEE UEFI Secure App client driver" + depends on QCOM_QSEECOM + depends on EFI + help + Various Qualcomm SoCs do not allow direct access to EFI variables. + Instead, these need to be accessed via the UEFI Secure Application + (uefisecapp), residing in the Secure Execution Environment (SEE). + + This module provides a client driver for uefisecapp, installing efivar + operations to allow the kernel accessing EFI variables, and via that also + provide user-space with access to EFI variables via efivarfs. + + Select Y here to provide access to EFI variables on the aforementioned + platforms. + +endmenu diff --git a/drivers/firmware/qcom/Makefile b/drivers/firmware/qcom/Makefile new file mode 100644 index 0000000000..c9f12ee822 --- /dev/null +++ b/drivers/firmware/qcom/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the linux kernel. +# + +obj-$(CONFIG_QCOM_SCM) += qcom-scm.o +qcom-scm-objs += qcom_scm.o qcom_scm-smc.o qcom_scm-legacy.o +obj-$(CONFIG_QCOM_QSEECOM) += qcom_qseecom.o +obj-$(CONFIG_QCOM_QSEECOM_UEFISECAPP) += qcom_qseecom_uefisecapp.o diff --git a/drivers/firmware/qcom/qcom_qseecom.c b/drivers/firmware/qcom/qcom_qseecom.c new file mode 100644 index 0000000000..731e6d5719 --- /dev/null +++ b/drivers/firmware/qcom/qcom_qseecom.c @@ -0,0 +1,120 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Driver for Qualcomm Secure Execution Environment (SEE) interface (QSEECOM). + * Responsible for setting up and managing QSEECOM client devices. + * + * Copyright (C) 2023 Maximilian Luz + */ +#include +#include +#include +#include +#include + +#include +#include + +struct qseecom_app_desc { + const char *app_name; + const char *dev_name; +}; + +static void qseecom_client_release(struct device *dev) +{ + struct qseecom_client *client; + + client = container_of(dev, struct qseecom_client, aux_dev.dev); + kfree(client); +} + +static void qseecom_client_remove(void *data) +{ + struct qseecom_client *client = data; + + auxiliary_device_delete(&client->aux_dev); + auxiliary_device_uninit(&client->aux_dev); +} + +static int qseecom_client_register(struct platform_device *qseecom_dev, + const struct qseecom_app_desc *desc) +{ + struct qseecom_client *client; + u32 app_id; + int ret; + + /* Try to find the app ID, skip device if not found */ + ret = qcom_scm_qseecom_app_get_id(desc->app_name, &app_id); + if (ret) + return ret == -ENOENT ? 0 : ret; + + dev_info(&qseecom_dev->dev, "setting up client for %s\n", desc->app_name); + + /* Allocate and set-up the client device */ + client = kzalloc(sizeof(*client), GFP_KERNEL); + if (!client) + return -ENOMEM; + + client->aux_dev.name = desc->dev_name; + client->aux_dev.dev.parent = &qseecom_dev->dev; + client->aux_dev.dev.release = qseecom_client_release; + client->app_id = app_id; + + ret = auxiliary_device_init(&client->aux_dev); + if (ret) { + kfree(client); + return ret; + } + + ret = auxiliary_device_add(&client->aux_dev); + if (ret) { + auxiliary_device_uninit(&client->aux_dev); + return ret; + } + + ret = devm_add_action_or_reset(&qseecom_dev->dev, qseecom_client_remove, client); + if (ret) + return ret; + + return 0; +} + +/* + * List of supported applications. One client device will be created per entry, + * assuming the app has already been loaded (usually by firmware bootloaders) + * and its ID can be queried successfully. + */ +static const struct qseecom_app_desc qcom_qseecom_apps[] = { + { "qcom.tz.uefisecapp", "uefisecapp" }, +}; + +static int qcom_qseecom_probe(struct platform_device *qseecom_dev) +{ + int ret; + int i; + + /* Set up client devices for each base application */ + for (i = 0; i < ARRAY_SIZE(qcom_qseecom_apps); i++) { + ret = qseecom_client_register(qseecom_dev, &qcom_qseecom_apps[i]); + if (ret) + return ret; + } + + return 0; +} + +static struct platform_driver qcom_qseecom_driver = { + .driver = { + .name = "qcom_qseecom", + }, + .probe = qcom_qseecom_probe, +}; + +static int __init qcom_qseecom_init(void) +{ + return platform_driver_register(&qcom_qseecom_driver); +} +subsys_initcall(qcom_qseecom_init); + +MODULE_AUTHOR("Maximilian Luz "); +MODULE_DESCRIPTION("Driver for the Qualcomm SEE (QSEECOM) interface"); +MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c b/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c new file mode 100644 index 0000000000..32188f098e --- /dev/null +++ b/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c @@ -0,0 +1,877 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Client driver for Qualcomm UEFI Secure Application (qcom.tz.uefisecapp). + * Provides access to UEFI variables on platforms where they are secured by the + * aforementioned Secure Execution Environment (SEE) application. + * + * Copyright (C) 2023 Maximilian Luz + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/* -- Qualcomm "uefisecapp" interface definitions. -------------------------- */ + +/* Maximum length of name string with null-terminator */ +#define QSEE_MAX_NAME_LEN 1024 + +#define QSEE_CMD_UEFI(x) (0x8000 | (x)) +#define QSEE_CMD_UEFI_GET_VARIABLE QSEE_CMD_UEFI(0) +#define QSEE_CMD_UEFI_SET_VARIABLE QSEE_CMD_UEFI(1) +#define QSEE_CMD_UEFI_GET_NEXT_VARIABLE QSEE_CMD_UEFI(2) +#define QSEE_CMD_UEFI_QUERY_VARIABLE_INFO QSEE_CMD_UEFI(3) + +/** + * struct qsee_req_uefi_get_variable - Request for GetVariable command. + * @command_id: The ID of the command. Must be %QSEE_CMD_UEFI_GET_VARIABLE. + * @length: Length of the request in bytes, including this struct and any + * parameters (name, GUID) stored after it as well as any padding + * thereof for alignment. + * @name_offset: Offset from the start of this struct to where the variable + * name is stored (as utf-16 string), in bytes. + * @name_size: Size of the name parameter in bytes, including null-terminator. + * @guid_offset: Offset from the start of this struct to where the GUID + * parameter is stored, in bytes. + * @guid_size: Size of the GUID parameter in bytes, i.e. sizeof(efi_guid_t). + * @data_size: Size of the output buffer, in bytes. + */ +struct qsee_req_uefi_get_variable { + u32 command_id; + u32 length; + u32 name_offset; + u32 name_size; + u32 guid_offset; + u32 guid_size; + u32 data_size; +} __packed; + +/** + * struct qsee_rsp_uefi_get_variable - Response for GetVariable command. + * @command_id: The ID of the command. Should be %QSEE_CMD_UEFI_GET_VARIABLE. + * @length: Length of the response in bytes, including this struct and the + * returned data. + * @status: Status of this command. + * @attributes: EFI variable attributes. + * @data_offset: Offset from the start of this struct to where the data is + * stored, in bytes. + * @data_size: Size of the returned data, in bytes. In case status indicates + * that the buffer is too small, this will be the size required + * to store the EFI variable data. + */ +struct qsee_rsp_uefi_get_variable { + u32 command_id; + u32 length; + u32 status; + u32 attributes; + u32 data_offset; + u32 data_size; +} __packed; + +/** + * struct qsee_req_uefi_set_variable - Request for the SetVariable command. + * @command_id: The ID of the command. Must be %QSEE_CMD_UEFI_SET_VARIABLE. + * @length: Length of the request in bytes, including this struct and any + * parameters (name, GUID, data) stored after it as well as any + * padding thereof required for alignment. + * @name_offset: Offset from the start of this struct to where the variable + * name is stored (as utf-16 string), in bytes. + * @name_size: Size of the name parameter in bytes, including null-terminator. + * @guid_offset: Offset from the start of this struct to where the GUID + * parameter is stored, in bytes. + * @guid_size: Size of the GUID parameter in bytes, i.e. sizeof(efi_guid_t). + * @attributes: The EFI variable attributes to set for this variable. + * @data_offset: Offset from the start of this struct to where the EFI variable + * data is stored, in bytes. + * @data_size: Size of EFI variable data, in bytes. + * + */ +struct qsee_req_uefi_set_variable { + u32 command_id; + u32 length; + u32 name_offset; + u32 name_size; + u32 guid_offset; + u32 guid_size; + u32 attributes; + u32 data_offset; + u32 data_size; +} __packed; + +/** + * struct qsee_rsp_uefi_set_variable - Response for the SetVariable command. + * @command_id: The ID of the command. Should be %QSEE_CMD_UEFI_SET_VARIABLE. + * @length: The length of this response, i.e. the size of this struct in + * bytes. + * @status: Status of this command. + * @_unknown1: Unknown response field. + * @_unknown2: Unknown response field. + */ +struct qsee_rsp_uefi_set_variable { + u32 command_id; + u32 length; + u32 status; + u32 _unknown1; + u32 _unknown2; +} __packed; + +/** + * struct qsee_req_uefi_get_next_variable - Request for the + * GetNextVariableName command. + * @command_id: The ID of the command. Must be + * %QSEE_CMD_UEFI_GET_NEXT_VARIABLE. + * @length: Length of the request in bytes, including this struct and any + * parameters (name, GUID) stored after it as well as any padding + * thereof for alignment. + * @guid_offset: Offset from the start of this struct to where the GUID + * parameter is stored, in bytes. + * @guid_size: Size of the GUID parameter in bytes, i.e. sizeof(efi_guid_t). + * @name_offset: Offset from the start of this struct to where the variable + * name is stored (as utf-16 string), in bytes. + * @name_size: Size of the name parameter in bytes, including null-terminator. + */ +struct qsee_req_uefi_get_next_variable { + u32 command_id; + u32 length; + u32 guid_offset; + u32 guid_size; + u32 name_offset; + u32 name_size; +} __packed; + +/** + * struct qsee_rsp_uefi_get_next_variable - Response for the + * GetNextVariableName command. + * @command_id: The ID of the command. Should be + * %QSEE_CMD_UEFI_GET_NEXT_VARIABLE. + * @length: Length of the response in bytes, including this struct and any + * parameters (name, GUID) stored after it as well as any padding + * thereof for alignment. + * @status: Status of this command. + * @guid_offset: Offset from the start of this struct to where the GUID + * parameter is stored, in bytes. + * @guid_size: Size of the GUID parameter in bytes, i.e. sizeof(efi_guid_t). + * @name_offset: Offset from the start of this struct to where the variable + * name is stored (as utf-16 string), in bytes. + * @name_size: Size of the name parameter in bytes, including null-terminator. + */ +struct qsee_rsp_uefi_get_next_variable { + u32 command_id; + u32 length; + u32 status; + u32 guid_offset; + u32 guid_size; + u32 name_offset; + u32 name_size; +} __packed; + +/** + * struct qsee_req_uefi_query_variable_info - Response for the + * GetNextVariableName command. + * @command_id: The ID of the command. Must be + * %QSEE_CMD_UEFI_QUERY_VARIABLE_INFO. + * @length: The length of this request, i.e. the size of this struct in + * bytes. + * @attributes: The storage attributes to query the info for. + */ +struct qsee_req_uefi_query_variable_info { + u32 command_id; + u32 length; + u32 attributes; +} __packed; + +/** + * struct qsee_rsp_uefi_query_variable_info - Response for the + * GetNextVariableName command. + * @command_id: The ID of the command. Must be + * %QSEE_CMD_UEFI_QUERY_VARIABLE_INFO. + * @length: The length of this response, i.e. the size of this + * struct in bytes. + * @status: Status of this command. + * @_pad: Padding. + * @storage_space: Full storage space size, in bytes. + * @remaining_space: Free storage space available, in bytes. + * @max_variable_size: Maximum variable data size, in bytes. + */ +struct qsee_rsp_uefi_query_variable_info { + u32 command_id; + u32 length; + u32 status; + u32 _pad; + u64 storage_space; + u64 remaining_space; + u64 max_variable_size; +} __packed; + +/* -- Alignment helpers ----------------------------------------------------- */ + +/* + * Helper macro to ensure proper alignment of types (fields and arrays) when + * stored in some (contiguous) buffer. + * + * Note: The driver from which this one has been reverse-engineered expects an + * alignment of 8 bytes (64 bits) for GUIDs. Our definition of efi_guid_t, + * however, has an alignment of 4 byte (32 bits). So far, this seems to work + * fine here. See also the comment on the typedef of efi_guid_t. + */ +#define qcuefi_buf_align_fields(fields...) \ + ({ \ + size_t __len = 0; \ + fields \ + __len; \ + }) + +#define __field_impl(size, align, offset) \ + ({ \ + size_t *__offset = (offset); \ + size_t __aligned; \ + \ + __aligned = ALIGN(__len, align); \ + __len = __aligned + (size); \ + \ + if (__offset) \ + *__offset = __aligned; \ + }); + +#define __array_offs(type, count, offset) \ + __field_impl(sizeof(type) * (count), __alignof__(type), offset) + +#define __array(type, count) __array_offs(type, count, NULL) +#define __field_offs(type, offset) __array_offs(type, 1, offset) +#define __field(type) __array_offs(type, 1, NULL) + +/* -- UEFI app interface. --------------------------------------------------- */ + +struct qcuefi_client { + struct qseecom_client *client; + struct efivars efivars; +}; + +static struct device *qcuefi_dev(struct qcuefi_client *qcuefi) +{ + return &qcuefi->client->aux_dev.dev; +} + +static efi_status_t qsee_uefi_status_to_efi(u32 status) +{ + u64 category = status & 0xf0000000; + u64 code = status & 0x0fffffff; + + return category << (BITS_PER_LONG - 32) | code; +} + +static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const efi_char16_t *name, + const efi_guid_t *guid, u32 *attributes, + unsigned long *data_size, void *data) +{ + struct qsee_req_uefi_get_variable *req_data; + struct qsee_rsp_uefi_get_variable *rsp_data; + unsigned long buffer_size = *data_size; + efi_status_t efi_status = EFI_SUCCESS; + unsigned long name_length; + size_t guid_offs; + size_t name_offs; + size_t req_size; + size_t rsp_size; + ssize_t status; + + if (!name || !guid) + return EFI_INVALID_PARAMETER; + + name_length = ucs2_strnlen(name, QSEE_MAX_NAME_LEN) + 1; + if (name_length > QSEE_MAX_NAME_LEN) + return EFI_INVALID_PARAMETER; + + if (buffer_size && !data) + return EFI_INVALID_PARAMETER; + + req_size = qcuefi_buf_align_fields( + __field(*req_data) + __array_offs(*name, name_length, &name_offs) + __field_offs(*guid, &guid_offs) + ); + + rsp_size = qcuefi_buf_align_fields( + __field(*rsp_data) + __array(u8, buffer_size) + ); + + req_data = kzalloc(req_size, GFP_KERNEL); + if (!req_data) { + efi_status = EFI_OUT_OF_RESOURCES; + goto out; + } + + rsp_data = kzalloc(rsp_size, GFP_KERNEL); + if (!rsp_data) { + efi_status = EFI_OUT_OF_RESOURCES; + goto out_free_req; + } + + req_data->command_id = QSEE_CMD_UEFI_GET_VARIABLE; + req_data->data_size = buffer_size; + req_data->name_offset = name_offs; + req_data->name_size = name_length * sizeof(*name); + req_data->guid_offset = guid_offs; + req_data->guid_size = sizeof(*guid); + req_data->length = req_size; + + status = ucs2_strscpy(((void *)req_data) + req_data->name_offset, name, name_length); + if (status < 0) { + efi_status = EFI_INVALID_PARAMETER; + goto out_free; + } + + memcpy(((void *)req_data) + req_data->guid_offset, guid, req_data->guid_size); + + status = qcom_qseecom_app_send(qcuefi->client, req_data, req_size, rsp_data, rsp_size); + if (status) { + efi_status = EFI_DEVICE_ERROR; + goto out_free; + } + + if (rsp_data->command_id != QSEE_CMD_UEFI_GET_VARIABLE) { + efi_status = EFI_DEVICE_ERROR; + goto out_free; + } + + if (rsp_data->length < sizeof(*rsp_data)) { + efi_status = EFI_DEVICE_ERROR; + goto out_free; + } + + if (rsp_data->status) { + dev_dbg(qcuefi_dev(qcuefi), "%s: uefisecapp error: 0x%x\n", + __func__, rsp_data->status); + efi_status = qsee_uefi_status_to_efi(rsp_data->status); + + /* Update size and attributes in case buffer is too small. */ + if (efi_status == EFI_BUFFER_TOO_SMALL) { + *data_size = rsp_data->data_size; + if (attributes) + *attributes = rsp_data->attributes; + } + + goto out_free; + } + + if (rsp_data->length > rsp_size) { + efi_status = EFI_DEVICE_ERROR; + goto out_free; + } + + if (rsp_data->data_offset + rsp_data->data_size > rsp_data->length) { + efi_status = EFI_DEVICE_ERROR; + goto out_free; + } + + /* + * Note: We need to set attributes and data size even if the buffer is + * too small and we won't copy any data. This is described in spec, so + * that callers can either allocate a buffer properly (with two calls + * to this function) or just read back attributes withouth having to + * deal with that. + * + * Specifically: + * - If we have a buffer size of zero and no buffer, just return the + * attributes, required size, and indicate success. + * - If the buffer size is nonzero but too small, indicate that as an + * error. + * - Otherwise, we are good to copy the data. + * + * Note that we have already ensured above that the buffer pointer is + * non-NULL if its size is nonzero. + */ + *data_size = rsp_data->data_size; + if (attributes) + *attributes = rsp_data->attributes; + + if (buffer_size == 0 && !data) { + efi_status = EFI_SUCCESS; + goto out_free; + } + + if (buffer_size < rsp_data->data_size) { + efi_status = EFI_BUFFER_TOO_SMALL; + goto out_free; + } + + memcpy(data, ((void *)rsp_data) + rsp_data->data_offset, rsp_data->data_size); + +out_free: + kfree(rsp_data); +out_free_req: + kfree(req_data); +out: + return efi_status; +} + +static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const efi_char16_t *name, + const efi_guid_t *guid, u32 attributes, + unsigned long data_size, const void *data) +{ + struct qsee_req_uefi_set_variable *req_data; + struct qsee_rsp_uefi_set_variable *rsp_data; + efi_status_t efi_status = EFI_SUCCESS; + unsigned long name_length; + size_t name_offs; + size_t guid_offs; + size_t data_offs; + size_t req_size; + ssize_t status; + + if (!name || !guid) + return EFI_INVALID_PARAMETER; + + name_length = ucs2_strnlen(name, QSEE_MAX_NAME_LEN) + 1; + if (name_length > QSEE_MAX_NAME_LEN) + return EFI_INVALID_PARAMETER; + + /* + * Make sure we have some data if data_size is nonzero. Note that using + * a size of zero is a valid use-case described in spec and deletes the + * variable. + */ + if (data_size && !data) + return EFI_INVALID_PARAMETER; + + req_size = qcuefi_buf_align_fields( + __field(*req_data) + __array_offs(*name, name_length, &name_offs) + __field_offs(*guid, &guid_offs) + __array_offs(u8, data_size, &data_offs) + ); + + req_data = kzalloc(req_size, GFP_KERNEL); + if (!req_data) { + efi_status = EFI_OUT_OF_RESOURCES; + goto out; + } + + rsp_data = kzalloc(sizeof(*rsp_data), GFP_KERNEL); + if (!rsp_data) { + efi_status = EFI_OUT_OF_RESOURCES; + goto out_free_req; + } + + req_data->command_id = QSEE_CMD_UEFI_SET_VARIABLE; + req_data->attributes = attributes; + req_data->name_offset = name_offs; + req_data->name_size = name_length * sizeof(*name); + req_data->guid_offset = guid_offs; + req_data->guid_size = sizeof(*guid); + req_data->data_offset = data_offs; + req_data->data_size = data_size; + req_data->length = req_size; + + status = ucs2_strscpy(((void *)req_data) + req_data->name_offset, name, name_length); + if (status < 0) { + efi_status = EFI_INVALID_PARAMETER; + goto out_free; + } + + memcpy(((void *)req_data) + req_data->guid_offset, guid, req_data->guid_size); + + if (data_size) + memcpy(((void *)req_data) + req_data->data_offset, data, req_data->data_size); + + status = qcom_qseecom_app_send(qcuefi->client, req_data, req_size, rsp_data, + sizeof(*rsp_data)); + if (status) { + efi_status = EFI_DEVICE_ERROR; + goto out_free; + } + + if (rsp_data->command_id != QSEE_CMD_UEFI_SET_VARIABLE) { + efi_status = EFI_DEVICE_ERROR; + goto out_free; + } + + if (rsp_data->length != sizeof(*rsp_data)) { + efi_status = EFI_DEVICE_ERROR; + goto out_free; + } + + if (rsp_data->status) { + dev_dbg(qcuefi_dev(qcuefi), "%s: uefisecapp error: 0x%x\n", + __func__, rsp_data->status); + efi_status = qsee_uefi_status_to_efi(rsp_data->status); + } + +out_free: + kfree(rsp_data); +out_free_req: + kfree(req_data); +out: + return efi_status; +} + +static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi, + unsigned long *name_size, efi_char16_t *name, + efi_guid_t *guid) +{ + struct qsee_req_uefi_get_next_variable *req_data; + struct qsee_rsp_uefi_get_next_variable *rsp_data; + efi_status_t efi_status = EFI_SUCCESS; + size_t guid_offs; + size_t name_offs; + size_t req_size; + size_t rsp_size; + ssize_t status; + + if (!name_size || !name || !guid) + return EFI_INVALID_PARAMETER; + + if (*name_size == 0) + return EFI_INVALID_PARAMETER; + + req_size = qcuefi_buf_align_fields( + __field(*req_data) + __field_offs(*guid, &guid_offs) + __array_offs(*name, *name_size / sizeof(*name), &name_offs) + ); + + rsp_size = qcuefi_buf_align_fields( + __field(*rsp_data) + __field(*guid) + __array(*name, *name_size / sizeof(*name)) + ); + + req_data = kzalloc(req_size, GFP_KERNEL); + if (!req_data) { + efi_status = EFI_OUT_OF_RESOURCES; + goto out; + } + + rsp_data = kzalloc(rsp_size, GFP_KERNEL); + if (!rsp_data) { + efi_status = EFI_OUT_OF_RESOURCES; + goto out_free_req; + } + + req_data->command_id = QSEE_CMD_UEFI_GET_NEXT_VARIABLE; + req_data->guid_offset = guid_offs; + req_data->guid_size = sizeof(*guid); + req_data->name_offset = name_offs; + req_data->name_size = *name_size; + req_data->length = req_size; + + memcpy(((void *)req_data) + req_data->guid_offset, guid, req_data->guid_size); + status = ucs2_strscpy(((void *)req_data) + req_data->name_offset, name, + *name_size / sizeof(*name)); + if (status < 0) { + efi_status = EFI_INVALID_PARAMETER; + goto out_free; + } + + status = qcom_qseecom_app_send(qcuefi->client, req_data, req_size, rsp_data, rsp_size); + if (status) { + efi_status = EFI_DEVICE_ERROR; + goto out_free; + } + + if (rsp_data->command_id != QSEE_CMD_UEFI_GET_NEXT_VARIABLE) { + efi_status = EFI_DEVICE_ERROR; + goto out_free; + } + + if (rsp_data->length < sizeof(*rsp_data)) { + efi_status = EFI_DEVICE_ERROR; + goto out_free; + } + + if (rsp_data->status) { + dev_dbg(qcuefi_dev(qcuefi), "%s: uefisecapp error: 0x%x\n", + __func__, rsp_data->status); + efi_status = qsee_uefi_status_to_efi(rsp_data->status); + + /* + * If the buffer to hold the name is too small, update the + * name_size with the required size, so that callers can + * reallocate it accordingly. + */ + if (efi_status == EFI_BUFFER_TOO_SMALL) + *name_size = rsp_data->name_size; + + goto out_free; + } + + if (rsp_data->length > rsp_size) { + efi_status = EFI_DEVICE_ERROR; + goto out_free; + } + + if (rsp_data->name_offset + rsp_data->name_size > rsp_data->length) { + efi_status = EFI_DEVICE_ERROR; + goto out_free; + } + + if (rsp_data->guid_offset + rsp_data->guid_size > rsp_data->length) { + efi_status = EFI_DEVICE_ERROR; + goto out_free; + } + + if (rsp_data->name_size > *name_size) { + *name_size = rsp_data->name_size; + efi_status = EFI_BUFFER_TOO_SMALL; + goto out_free; + } + + if (rsp_data->guid_size != sizeof(*guid)) { + efi_status = EFI_DEVICE_ERROR; + goto out_free; + } + + memcpy(guid, ((void *)rsp_data) + rsp_data->guid_offset, rsp_data->guid_size); + status = ucs2_strscpy(name, ((void *)rsp_data) + rsp_data->name_offset, + rsp_data->name_size / sizeof(*name)); + *name_size = rsp_data->name_size; + + if (status < 0) { + /* + * Return EFI_DEVICE_ERROR here because the buffer size should + * have already been validated above, causing this function to + * bail with EFI_BUFFER_TOO_SMALL. + */ + efi_status = EFI_DEVICE_ERROR; + } + +out_free: + kfree(rsp_data); +out_free_req: + kfree(req_data); +out: + return efi_status; +} + +static efi_status_t qsee_uefi_query_variable_info(struct qcuefi_client *qcuefi, u32 attr, + u64 *storage_space, u64 *remaining_space, + u64 *max_variable_size) +{ + struct qsee_req_uefi_query_variable_info *req_data; + struct qsee_rsp_uefi_query_variable_info *rsp_data; + efi_status_t efi_status = EFI_SUCCESS; + int status; + + req_data = kzalloc(sizeof(*req_data), GFP_KERNEL); + if (!req_data) { + efi_status = EFI_OUT_OF_RESOURCES; + goto out; + } + + rsp_data = kzalloc(sizeof(*rsp_data), GFP_KERNEL); + if (!rsp_data) { + efi_status = EFI_OUT_OF_RESOURCES; + goto out_free_req; + } + + req_data->command_id = QSEE_CMD_UEFI_QUERY_VARIABLE_INFO; + req_data->attributes = attr; + req_data->length = sizeof(*req_data); + + status = qcom_qseecom_app_send(qcuefi->client, req_data, sizeof(*req_data), rsp_data, + sizeof(*rsp_data)); + if (status) { + efi_status = EFI_DEVICE_ERROR; + goto out_free; + } + + if (rsp_data->command_id != QSEE_CMD_UEFI_QUERY_VARIABLE_INFO) { + efi_status = EFI_DEVICE_ERROR; + goto out_free; + } + + if (rsp_data->length != sizeof(*rsp_data)) { + efi_status = EFI_DEVICE_ERROR; + goto out_free; + } + + if (rsp_data->status) { + dev_dbg(qcuefi_dev(qcuefi), "%s: uefisecapp error: 0x%x\n", + __func__, rsp_data->status); + efi_status = qsee_uefi_status_to_efi(rsp_data->status); + goto out_free; + } + + if (storage_space) + *storage_space = rsp_data->storage_space; + + if (remaining_space) + *remaining_space = rsp_data->remaining_space; + + if (max_variable_size) + *max_variable_size = rsp_data->max_variable_size; + +out_free: + kfree(rsp_data); +out_free_req: + kfree(req_data); +out: + return efi_status; +} + +/* -- Global efivar interface. ---------------------------------------------- */ + +static struct qcuefi_client *__qcuefi; +static DEFINE_MUTEX(__qcuefi_lock); + +static int qcuefi_set_reference(struct qcuefi_client *qcuefi) +{ + mutex_lock(&__qcuefi_lock); + + if (qcuefi && __qcuefi) { + mutex_unlock(&__qcuefi_lock); + return -EEXIST; + } + + __qcuefi = qcuefi; + + mutex_unlock(&__qcuefi_lock); + return 0; +} + +static struct qcuefi_client *qcuefi_acquire(void) +{ + mutex_lock(&__qcuefi_lock); + return __qcuefi; +} + +static void qcuefi_release(void) +{ + mutex_unlock(&__qcuefi_lock); +} + +static efi_status_t qcuefi_get_variable(efi_char16_t *name, efi_guid_t *vendor, u32 *attr, + unsigned long *data_size, void *data) +{ + struct qcuefi_client *qcuefi; + efi_status_t status; + + qcuefi = qcuefi_acquire(); + if (!qcuefi) + return EFI_NOT_READY; + + status = qsee_uefi_get_variable(qcuefi, name, vendor, attr, data_size, data); + + qcuefi_release(); + return status; +} + +static efi_status_t qcuefi_set_variable(efi_char16_t *name, efi_guid_t *vendor, + u32 attr, unsigned long data_size, void *data) +{ + struct qcuefi_client *qcuefi; + efi_status_t status; + + qcuefi = qcuefi_acquire(); + if (!qcuefi) + return EFI_NOT_READY; + + status = qsee_uefi_set_variable(qcuefi, name, vendor, attr, data_size, data); + + qcuefi_release(); + return status; +} + +static efi_status_t qcuefi_get_next_variable(unsigned long *name_size, efi_char16_t *name, + efi_guid_t *vendor) +{ + struct qcuefi_client *qcuefi; + efi_status_t status; + + qcuefi = qcuefi_acquire(); + if (!qcuefi) + return EFI_NOT_READY; + + status = qsee_uefi_get_next_variable(qcuefi, name_size, name, vendor); + + qcuefi_release(); + return status; +} + +static efi_status_t qcuefi_query_variable_info(u32 attr, u64 *storage_space, u64 *remaining_space, + u64 *max_variable_size) +{ + struct qcuefi_client *qcuefi; + efi_status_t status; + + qcuefi = qcuefi_acquire(); + if (!qcuefi) + return EFI_NOT_READY; + + status = qsee_uefi_query_variable_info(qcuefi, attr, storage_space, remaining_space, + max_variable_size); + + qcuefi_release(); + return status; +} + +static const struct efivar_operations qcom_efivar_ops = { + .get_variable = qcuefi_get_variable, + .set_variable = qcuefi_set_variable, + .get_next_variable = qcuefi_get_next_variable, + .query_variable_info = qcuefi_query_variable_info, +}; + +/* -- Driver setup. --------------------------------------------------------- */ + +static int qcom_uefisecapp_probe(struct auxiliary_device *aux_dev, + const struct auxiliary_device_id *aux_dev_id) +{ + struct qcuefi_client *qcuefi; + int status; + + qcuefi = devm_kzalloc(&aux_dev->dev, sizeof(*qcuefi), GFP_KERNEL); + if (!qcuefi) + return -ENOMEM; + + qcuefi->client = container_of(aux_dev, struct qseecom_client, aux_dev); + + auxiliary_set_drvdata(aux_dev, qcuefi); + status = qcuefi_set_reference(qcuefi); + if (status) + return status; + + status = efivars_register(&qcuefi->efivars, &qcom_efivar_ops); + if (status) + qcuefi_set_reference(NULL); + + return status; +} + +static void qcom_uefisecapp_remove(struct auxiliary_device *aux_dev) +{ + struct qcuefi_client *qcuefi = auxiliary_get_drvdata(aux_dev); + + efivars_unregister(&qcuefi->efivars); + qcuefi_set_reference(NULL); +} + +static const struct auxiliary_device_id qcom_uefisecapp_id_table[] = { + { .name = "qcom_qseecom.uefisecapp" }, + {} +}; +MODULE_DEVICE_TABLE(auxiliary, qcom_uefisecapp_id_table); + +static struct auxiliary_driver qcom_uefisecapp_driver = { + .probe = qcom_uefisecapp_probe, + .remove = qcom_uefisecapp_remove, + .id_table = qcom_uefisecapp_id_table, + .driver = { + .name = "qcom_qseecom_uefisecapp", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, +}; +module_auxiliary_driver(qcom_uefisecapp_driver); + +MODULE_AUTHOR("Maximilian Luz "); +MODULE_DESCRIPTION("Client driver for Qualcomm SEE UEFI Secure App"); +MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/qcom/qcom_scm-legacy.c b/drivers/firmware/qcom/qcom_scm-legacy.c new file mode 100644 index 0000000000..029e6d117c --- /dev/null +++ b/drivers/firmware/qcom/qcom_scm-legacy.c @@ -0,0 +1,246 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved. + * Copyright (C) 2015 Linaro Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "qcom_scm.h" + +static DEFINE_MUTEX(qcom_scm_lock); + + +/** + * struct arm_smccc_args + * @args: The array of values used in registers in smc instruction + */ +struct arm_smccc_args { + unsigned long args[8]; +}; + + +/** + * struct scm_legacy_command - one SCM command buffer + * @len: total available memory for command and response + * @buf_offset: start of command buffer + * @resp_hdr_offset: start of response buffer + * @id: command to be executed + * @buf: buffer returned from scm_legacy_get_command_buffer() + * + * An SCM command is laid out in memory as follows: + * + * ------------------- <--- struct scm_legacy_command + * | command header | + * ------------------- <--- scm_legacy_get_command_buffer() + * | command buffer | + * ------------------- <--- struct scm_legacy_response and + * | response header | scm_legacy_command_to_response() + * ------------------- <--- scm_legacy_get_response_buffer() + * | response buffer | + * ------------------- + * + * There can be arbitrary padding between the headers and buffers so + * you should always use the appropriate scm_legacy_get_*_buffer() routines + * to access the buffers in a safe manner. + */ +struct scm_legacy_command { + __le32 len; + __le32 buf_offset; + __le32 resp_hdr_offset; + __le32 id; + __le32 buf[]; +}; + +/** + * struct scm_legacy_response - one SCM response buffer + * @len: total available memory for response + * @buf_offset: start of response data relative to start of scm_legacy_response + * @is_complete: indicates if the command has finished processing + */ +struct scm_legacy_response { + __le32 len; + __le32 buf_offset; + __le32 is_complete; +}; + +/** + * scm_legacy_command_to_response() - Get a pointer to a scm_legacy_response + * @cmd: command + * + * Returns a pointer to a response for a command. + */ +static inline struct scm_legacy_response *scm_legacy_command_to_response( + const struct scm_legacy_command *cmd) +{ + return (void *)cmd + le32_to_cpu(cmd->resp_hdr_offset); +} + +/** + * scm_legacy_get_command_buffer() - Get a pointer to a command buffer + * @cmd: command + * + * Returns a pointer to the command buffer of a command. + */ +static inline void *scm_legacy_get_command_buffer( + const struct scm_legacy_command *cmd) +{ + return (void *)cmd->buf; +} + +/** + * scm_legacy_get_response_buffer() - Get a pointer to a response buffer + * @rsp: response + * + * Returns a pointer to a response buffer of a response. + */ +static inline void *scm_legacy_get_response_buffer( + const struct scm_legacy_response *rsp) +{ + return (void *)rsp + le32_to_cpu(rsp->buf_offset); +} + +static void __scm_legacy_do(const struct arm_smccc_args *smc, + struct arm_smccc_res *res) +{ + do { + arm_smccc_smc(smc->args[0], smc->args[1], smc->args[2], + smc->args[3], smc->args[4], smc->args[5], + smc->args[6], smc->args[7], res); + } while (res->a0 == QCOM_SCM_INTERRUPTED); +} + +/** + * scm_legacy_call() - Sends a command to the SCM and waits for the command to + * finish processing. + * @dev: device + * @desc: descriptor structure containing arguments and return values + * @res: results from SMC call + * + * A note on cache maintenance: + * Note that any buffers that are expected to be accessed by the secure world + * must be flushed before invoking qcom_scm_call and invalidated in the cache + * immediately after qcom_scm_call returns. Cache maintenance on the command + * and response buffers is taken care of by qcom_scm_call; however, callers are + * responsible for any other cached buffers passed over to the secure world. + */ +int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc, + struct qcom_scm_res *res) +{ + u8 arglen = desc->arginfo & 0xf; + int ret = 0, context_id; + unsigned int i; + struct scm_legacy_command *cmd; + struct scm_legacy_response *rsp; + struct arm_smccc_args smc = {0}; + struct arm_smccc_res smc_res; + const size_t cmd_len = arglen * sizeof(__le32); + const size_t resp_len = MAX_QCOM_SCM_RETS * sizeof(__le32); + size_t alloc_len = sizeof(*cmd) + cmd_len + sizeof(*rsp) + resp_len; + dma_addr_t cmd_phys; + __le32 *arg_buf; + const __le32 *res_buf; + + cmd = kzalloc(PAGE_ALIGN(alloc_len), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->len = cpu_to_le32(alloc_len); + cmd->buf_offset = cpu_to_le32(sizeof(*cmd)); + cmd->resp_hdr_offset = cpu_to_le32(sizeof(*cmd) + cmd_len); + cmd->id = cpu_to_le32(SCM_LEGACY_FNID(desc->svc, desc->cmd)); + + arg_buf = scm_legacy_get_command_buffer(cmd); + for (i = 0; i < arglen; i++) + arg_buf[i] = cpu_to_le32(desc->args[i]); + + rsp = scm_legacy_command_to_response(cmd); + + cmd_phys = dma_map_single(dev, cmd, alloc_len, DMA_TO_DEVICE); + if (dma_mapping_error(dev, cmd_phys)) { + kfree(cmd); + return -ENOMEM; + } + + smc.args[0] = 1; + smc.args[1] = (unsigned long)&context_id; + smc.args[2] = cmd_phys; + + mutex_lock(&qcom_scm_lock); + __scm_legacy_do(&smc, &smc_res); + if (smc_res.a0) + ret = qcom_scm_remap_error(smc_res.a0); + mutex_unlock(&qcom_scm_lock); + if (ret) + goto out; + + do { + dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len, + sizeof(*rsp), DMA_FROM_DEVICE); + } while (!rsp->is_complete); + + dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len + + le32_to_cpu(rsp->buf_offset), + resp_len, DMA_FROM_DEVICE); + + if (res) { + res_buf = scm_legacy_get_response_buffer(rsp); + for (i = 0; i < MAX_QCOM_SCM_RETS; i++) + res->result[i] = le32_to_cpu(res_buf[i]); + } +out: + dma_unmap_single(dev, cmd_phys, alloc_len, DMA_TO_DEVICE); + kfree(cmd); + return ret; +} + +#define SCM_LEGACY_ATOMIC_N_REG_ARGS 5 +#define SCM_LEGACY_ATOMIC_FIRST_REG_IDX 2 +#define SCM_LEGACY_CLASS_REGISTER (0x2 << 8) +#define SCM_LEGACY_MASK_IRQS BIT(5) +#define SCM_LEGACY_ATOMIC_ID(svc, cmd, n) \ + ((SCM_LEGACY_FNID(svc, cmd) << 12) | \ + SCM_LEGACY_CLASS_REGISTER | \ + SCM_LEGACY_MASK_IRQS | \ + (n & 0xf)) + +/** + * scm_legacy_call_atomic() - Send an atomic SCM command with up to 5 arguments + * and 3 return values + * @unused: device, legacy argument, not used, can be NULL + * @desc: SCM call descriptor containing arguments + * @res: SCM call return values + * + * This shall only be used with commands that are guaranteed to be + * uninterruptable, atomic and SMP safe. + */ +int scm_legacy_call_atomic(struct device *unused, + const struct qcom_scm_desc *desc, + struct qcom_scm_res *res) +{ + int context_id; + struct arm_smccc_res smc_res; + size_t arglen = desc->arginfo & 0xf; + + BUG_ON(arglen > SCM_LEGACY_ATOMIC_N_REG_ARGS); + + arm_smccc_smc(SCM_LEGACY_ATOMIC_ID(desc->svc, desc->cmd, arglen), + (unsigned long)&context_id, + desc->args[0], desc->args[1], desc->args[2], + desc->args[3], desc->args[4], 0, &smc_res); + + if (res) { + res->result[0] = smc_res.a1; + res->result[1] = smc_res.a2; + res->result[2] = smc_res.a3; + } + + return smc_res.a0; +} diff --git a/drivers/firmware/qcom/qcom_scm-smc.c b/drivers/firmware/qcom/qcom_scm-smc.c new file mode 100644 index 0000000000..16cf88acfa --- /dev/null +++ b/drivers/firmware/qcom/qcom_scm-smc.c @@ -0,0 +1,225 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2015,2019 The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "qcom_scm.h" + +/** + * struct arm_smccc_args + * @args: The array of values used in registers in smc instruction + */ +struct arm_smccc_args { + unsigned long args[8]; +}; + +static DEFINE_MUTEX(qcom_scm_lock); + +#define QCOM_SCM_EBUSY_WAIT_MS 30 +#define QCOM_SCM_EBUSY_MAX_RETRY 20 + +#define SCM_SMC_N_REG_ARGS 4 +#define SCM_SMC_FIRST_EXT_IDX (SCM_SMC_N_REG_ARGS - 1) +#define SCM_SMC_N_EXT_ARGS (MAX_QCOM_SCM_ARGS - SCM_SMC_N_REG_ARGS + 1) +#define SCM_SMC_FIRST_REG_IDX 2 +#define SCM_SMC_LAST_REG_IDX (SCM_SMC_FIRST_REG_IDX + SCM_SMC_N_REG_ARGS - 1) + +static void __scm_smc_do_quirk(const struct arm_smccc_args *smc, + struct arm_smccc_res *res) +{ + unsigned long a0 = smc->args[0]; + struct arm_smccc_quirk quirk = { .id = ARM_SMCCC_QUIRK_QCOM_A6 }; + + quirk.state.a6 = 0; + + do { + arm_smccc_smc_quirk(a0, smc->args[1], smc->args[2], + smc->args[3], smc->args[4], smc->args[5], + quirk.state.a6, smc->args[7], res, &quirk); + + if (res->a0 == QCOM_SCM_INTERRUPTED) + a0 = res->a0; + + } while (res->a0 == QCOM_SCM_INTERRUPTED); +} + +static void fill_wq_resume_args(struct arm_smccc_args *resume, u32 smc_call_ctx) +{ + memset(resume->args, 0, sizeof(resume->args[0]) * ARRAY_SIZE(resume->args)); + + resume->args[0] = ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL, + ARM_SMCCC_SMC_64, ARM_SMCCC_OWNER_SIP, + SCM_SMC_FNID(QCOM_SCM_SVC_WAITQ, QCOM_SCM_WAITQ_RESUME)); + + resume->args[1] = QCOM_SCM_ARGS(1); + + resume->args[2] = smc_call_ctx; +} + +int scm_get_wq_ctx(u32 *wq_ctx, u32 *flags, u32 *more_pending) +{ + int ret; + struct arm_smccc_res get_wq_res; + struct arm_smccc_args get_wq_ctx = {0}; + + get_wq_ctx.args[0] = ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL, + ARM_SMCCC_SMC_64, ARM_SMCCC_OWNER_SIP, + SCM_SMC_FNID(QCOM_SCM_SVC_WAITQ, QCOM_SCM_WAITQ_GET_WQ_CTX)); + + /* Guaranteed to return only success or error, no WAITQ_* */ + __scm_smc_do_quirk(&get_wq_ctx, &get_wq_res); + ret = get_wq_res.a0; + if (ret) + return ret; + + *wq_ctx = get_wq_res.a1; + *flags = get_wq_res.a2; + *more_pending = get_wq_res.a3; + + return 0; +} + +static int __scm_smc_do_quirk_handle_waitq(struct device *dev, struct arm_smccc_args *waitq, + struct arm_smccc_res *res) +{ + int ret; + u32 wq_ctx, smc_call_ctx; + struct arm_smccc_args resume; + struct arm_smccc_args *smc = waitq; + + do { + __scm_smc_do_quirk(smc, res); + + if (res->a0 == QCOM_SCM_WAITQ_SLEEP) { + wq_ctx = res->a1; + smc_call_ctx = res->a2; + + ret = qcom_scm_wait_for_wq_completion(wq_ctx); + if (ret) + return ret; + + fill_wq_resume_args(&resume, smc_call_ctx); + smc = &resume; + } + } while (res->a0 == QCOM_SCM_WAITQ_SLEEP); + + return 0; +} + +static int __scm_smc_do(struct device *dev, struct arm_smccc_args *smc, + struct arm_smccc_res *res, bool atomic) +{ + int ret, retry_count = 0; + + if (atomic) { + __scm_smc_do_quirk(smc, res); + return 0; + } + + do { + mutex_lock(&qcom_scm_lock); + + ret = __scm_smc_do_quirk_handle_waitq(dev, smc, res); + + mutex_unlock(&qcom_scm_lock); + + if (ret) + return ret; + + if (res->a0 == QCOM_SCM_V2_EBUSY) { + if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY) + break; + msleep(QCOM_SCM_EBUSY_WAIT_MS); + } + } while (res->a0 == QCOM_SCM_V2_EBUSY); + + return 0; +} + + +int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc, + enum qcom_scm_convention qcom_convention, + struct qcom_scm_res *res, bool atomic) +{ + int arglen = desc->arginfo & 0xf; + int i, ret; + dma_addr_t args_phys = 0; + void *args_virt = NULL; + size_t alloc_len; + gfp_t flag = atomic ? GFP_ATOMIC : GFP_KERNEL; + u32 smccc_call_type = atomic ? ARM_SMCCC_FAST_CALL : ARM_SMCCC_STD_CALL; + u32 qcom_smccc_convention = (qcom_convention == SMC_CONVENTION_ARM_32) ? + ARM_SMCCC_SMC_32 : ARM_SMCCC_SMC_64; + struct arm_smccc_res smc_res; + struct arm_smccc_args smc = {0}; + + smc.args[0] = ARM_SMCCC_CALL_VAL( + smccc_call_type, + qcom_smccc_convention, + desc->owner, + SCM_SMC_FNID(desc->svc, desc->cmd)); + smc.args[1] = desc->arginfo; + for (i = 0; i < SCM_SMC_N_REG_ARGS; i++) + smc.args[i + SCM_SMC_FIRST_REG_IDX] = desc->args[i]; + + if (unlikely(arglen > SCM_SMC_N_REG_ARGS)) { + alloc_len = SCM_SMC_N_EXT_ARGS * sizeof(u64); + args_virt = kzalloc(PAGE_ALIGN(alloc_len), flag); + + if (!args_virt) + return -ENOMEM; + + if (qcom_smccc_convention == ARM_SMCCC_SMC_32) { + __le32 *args = args_virt; + + for (i = 0; i < SCM_SMC_N_EXT_ARGS; i++) + args[i] = cpu_to_le32(desc->args[i + + SCM_SMC_FIRST_EXT_IDX]); + } else { + __le64 *args = args_virt; + + for (i = 0; i < SCM_SMC_N_EXT_ARGS; i++) + args[i] = cpu_to_le64(desc->args[i + + SCM_SMC_FIRST_EXT_IDX]); + } + + args_phys = dma_map_single(dev, args_virt, alloc_len, + DMA_TO_DEVICE); + + if (dma_mapping_error(dev, args_phys)) { + kfree(args_virt); + return -ENOMEM; + } + + smc.args[SCM_SMC_LAST_REG_IDX] = args_phys; + } + + /* ret error check follows after args_virt cleanup*/ + ret = __scm_smc_do(dev, &smc, &smc_res, atomic); + + if (args_virt) { + dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE); + kfree(args_virt); + } + + if (ret) + return ret; + + if (res) { + res->result[0] = smc_res.a1; + res->result[1] = smc_res.a2; + res->result[2] = smc_res.a3; + } + + return (long)smc_res.a0 ? qcom_scm_remap_error(smc_res.a0) : 0; + +} diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c new file mode 100644 index 0000000000..520de9b563 --- /dev/null +++ b/drivers/firmware/qcom/qcom_scm.c @@ -0,0 +1,1943 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved. + * Copyright (C) 2015 Linaro Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "qcom_scm.h" + +static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT); +module_param(download_mode, bool, 0); + +struct qcom_scm { + struct device *dev; + struct clk *core_clk; + struct clk *iface_clk; + struct clk *bus_clk; + struct icc_path *path; + struct completion waitq_comp; + struct reset_controller_dev reset; + + /* control access to the interconnect path */ + struct mutex scm_bw_lock; + int scm_vote_count; + + u64 dload_mode_addr; +}; + +struct qcom_scm_current_perm_info { + __le32 vmid; + __le32 perm; + __le64 ctx; + __le32 ctx_size; + __le32 unused; +}; + +struct qcom_scm_mem_map_info { + __le64 mem_addr; + __le64 mem_size; +}; + +/** + * struct qcom_scm_qseecom_resp - QSEECOM SCM call response. + * @result: Result or status of the SCM call. See &enum qcom_scm_qseecom_result. + * @resp_type: Type of the response. See &enum qcom_scm_qseecom_resp_type. + * @data: Response data. The type of this data is given in @resp_type. + */ +struct qcom_scm_qseecom_resp { + u64 result; + u64 resp_type; + u64 data; +}; + +enum qcom_scm_qseecom_result { + QSEECOM_RESULT_SUCCESS = 0, + QSEECOM_RESULT_INCOMPLETE = 1, + QSEECOM_RESULT_BLOCKED_ON_LISTENER = 2, + QSEECOM_RESULT_FAILURE = 0xFFFFFFFF, +}; + +enum qcom_scm_qseecom_resp_type { + QSEECOM_SCM_RES_APP_ID = 0xEE01, + QSEECOM_SCM_RES_QSEOS_LISTENER_ID = 0xEE02, +}; + +enum qcom_scm_qseecom_tz_owner { + QSEECOM_TZ_OWNER_SIP = 2, + QSEECOM_TZ_OWNER_TZ_APPS = 48, + QSEECOM_TZ_OWNER_QSEE_OS = 50 +}; + +enum qcom_scm_qseecom_tz_svc { + QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER = 0, + QSEECOM_TZ_SVC_APP_MGR = 1, + QSEECOM_TZ_SVC_INFO = 6, +}; + +enum qcom_scm_qseecom_tz_cmd_app { + QSEECOM_TZ_CMD_APP_SEND = 1, + QSEECOM_TZ_CMD_APP_LOOKUP = 3, +}; + +enum qcom_scm_qseecom_tz_cmd_info { + QSEECOM_TZ_CMD_INFO_VERSION = 3, +}; + +#define QSEECOM_MAX_APP_NAME_SIZE 64 + +/* Each bit configures cold/warm boot address for one of the 4 CPUs */ +static const u8 qcom_scm_cpu_cold_bits[QCOM_SCM_BOOT_MAX_CPUS] = { + 0, BIT(0), BIT(3), BIT(5) +}; +static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = { + BIT(2), BIT(1), BIT(4), BIT(6) +}; + +#define QCOM_SMC_WAITQ_FLAG_WAKE_ONE BIT(0) +#define QCOM_SMC_WAITQ_FLAG_WAKE_ALL BIT(1) + +static const char * const qcom_scm_convention_names[] = { + [SMC_CONVENTION_UNKNOWN] = "unknown", + [SMC_CONVENTION_ARM_32] = "smc arm 32", + [SMC_CONVENTION_ARM_64] = "smc arm 64", + [SMC_CONVENTION_LEGACY] = "smc legacy", +}; + +static struct qcom_scm *__scm; + +static int qcom_scm_clk_enable(void) +{ + int ret; + + ret = clk_prepare_enable(__scm->core_clk); + if (ret) + goto bail; + + ret = clk_prepare_enable(__scm->iface_clk); + if (ret) + goto disable_core; + + ret = clk_prepare_enable(__scm->bus_clk); + if (ret) + goto disable_iface; + + return 0; + +disable_iface: + clk_disable_unprepare(__scm->iface_clk); +disable_core: + clk_disable_unprepare(__scm->core_clk); +bail: + return ret; +} + +static void qcom_scm_clk_disable(void) +{ + clk_disable_unprepare(__scm->core_clk); + clk_disable_unprepare(__scm->iface_clk); + clk_disable_unprepare(__scm->bus_clk); +} + +static int qcom_scm_bw_enable(void) +{ + int ret = 0; + + if (!__scm->path) + return 0; + + if (IS_ERR(__scm->path)) + return -EINVAL; + + mutex_lock(&__scm->scm_bw_lock); + if (!__scm->scm_vote_count) { + ret = icc_set_bw(__scm->path, 0, UINT_MAX); + if (ret < 0) { + dev_err(__scm->dev, "failed to set bandwidth request\n"); + goto err_bw; + } + } + __scm->scm_vote_count++; +err_bw: + mutex_unlock(&__scm->scm_bw_lock); + + return ret; +} + +static void qcom_scm_bw_disable(void) +{ + if (IS_ERR_OR_NULL(__scm->path)) + return; + + mutex_lock(&__scm->scm_bw_lock); + if (__scm->scm_vote_count-- == 1) + icc_set_bw(__scm->path, 0, 0); + mutex_unlock(&__scm->scm_bw_lock); +} + +enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN; +static DEFINE_SPINLOCK(scm_query_lock); + +static enum qcom_scm_convention __get_convention(void) +{ + unsigned long flags; + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_INFO, + .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL, + .args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO, + QCOM_SCM_INFO_IS_CALL_AVAIL) | + (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT), + .arginfo = QCOM_SCM_ARGS(1), + .owner = ARM_SMCCC_OWNER_SIP, + }; + struct qcom_scm_res res; + enum qcom_scm_convention probed_convention; + int ret; + bool forced = false; + + if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN)) + return qcom_scm_convention; + + /* + * Per the "SMC calling convention specification", the 64-bit calling + * convention can only be used when the client is 64-bit, otherwise + * system will encounter the undefined behaviour. + */ +#if IS_ENABLED(CONFIG_ARM64) + /* + * Device isn't required as there is only one argument - no device + * needed to dma_map_single to secure world + */ + probed_convention = SMC_CONVENTION_ARM_64; + ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true); + if (!ret && res.result[0] == 1) + goto found; + + /* + * Some SC7180 firmwares didn't implement the + * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64 + * calling conventions on these firmwares. Luckily we don't make any + * early calls into the firmware on these SoCs so the device pointer + * will be valid here to check if the compatible matches. + */ + if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) { + forced = true; + goto found; + } +#endif + + probed_convention = SMC_CONVENTION_ARM_32; + ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true); + if (!ret && res.result[0] == 1) + goto found; + + probed_convention = SMC_CONVENTION_LEGACY; +found: + spin_lock_irqsave(&scm_query_lock, flags); + if (probed_convention != qcom_scm_convention) { + qcom_scm_convention = probed_convention; + pr_info("qcom_scm: convention: %s%s\n", + qcom_scm_convention_names[qcom_scm_convention], + forced ? " (forced)" : ""); + } + spin_unlock_irqrestore(&scm_query_lock, flags); + + return qcom_scm_convention; +} + +/** + * qcom_scm_call() - Invoke a syscall in the secure world + * @dev: device + * @desc: Descriptor structure containing arguments and return values + * @res: Structure containing results from SMC/HVC call + * + * Sends a command to the SCM and waits for the command to finish processing. + * This should *only* be called in pre-emptible context. + */ +static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc, + struct qcom_scm_res *res) +{ + might_sleep(); + switch (__get_convention()) { + case SMC_CONVENTION_ARM_32: + case SMC_CONVENTION_ARM_64: + return scm_smc_call(dev, desc, res, false); + case SMC_CONVENTION_LEGACY: + return scm_legacy_call(dev, desc, res); + default: + pr_err("Unknown current SCM calling convention.\n"); + return -EINVAL; + } +} + +/** + * qcom_scm_call_atomic() - atomic variation of qcom_scm_call() + * @dev: device + * @desc: Descriptor structure containing arguments and return values + * @res: Structure containing results from SMC/HVC call + * + * Sends a command to the SCM and waits for the command to finish processing. + * This can be called in atomic context. + */ +static int qcom_scm_call_atomic(struct device *dev, + const struct qcom_scm_desc *desc, + struct qcom_scm_res *res) +{ + switch (__get_convention()) { + case SMC_CONVENTION_ARM_32: + case SMC_CONVENTION_ARM_64: + return scm_smc_call(dev, desc, res, true); + case SMC_CONVENTION_LEGACY: + return scm_legacy_call_atomic(dev, desc, res); + default: + pr_err("Unknown current SCM calling convention.\n"); + return -EINVAL; + } +} + +static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id, + u32 cmd_id) +{ + int ret; + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_INFO, + .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL, + .owner = ARM_SMCCC_OWNER_SIP, + }; + struct qcom_scm_res res; + + desc.arginfo = QCOM_SCM_ARGS(1); + switch (__get_convention()) { + case SMC_CONVENTION_ARM_32: + case SMC_CONVENTION_ARM_64: + desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) | + (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT); + break; + case SMC_CONVENTION_LEGACY: + desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id); + break; + default: + pr_err("Unknown SMC convention being used\n"); + return false; + } + + ret = qcom_scm_call(dev, &desc, &res); + + return ret ? false : !!res.result[0]; +} + +static int qcom_scm_set_boot_addr(void *entry, const u8 *cpu_bits) +{ + int cpu; + unsigned int flags = 0; + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_BOOT, + .cmd = QCOM_SCM_BOOT_SET_ADDR, + .arginfo = QCOM_SCM_ARGS(2), + .owner = ARM_SMCCC_OWNER_SIP, + }; + + for_each_present_cpu(cpu) { + if (cpu >= QCOM_SCM_BOOT_MAX_CPUS) + return -EINVAL; + flags |= cpu_bits[cpu]; + } + + desc.args[0] = flags; + desc.args[1] = virt_to_phys(entry); + + return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL); +} + +static int qcom_scm_set_boot_addr_mc(void *entry, unsigned int flags) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_BOOT, + .cmd = QCOM_SCM_BOOT_SET_ADDR_MC, + .owner = ARM_SMCCC_OWNER_SIP, + .arginfo = QCOM_SCM_ARGS(6), + .args = { + virt_to_phys(entry), + /* Apply to all CPUs in all affinity levels */ + ~0ULL, ~0ULL, ~0ULL, ~0ULL, + flags, + }, + }; + + /* Need a device for DMA of the additional arguments */ + if (!__scm || __get_convention() == SMC_CONVENTION_LEGACY) + return -EOPNOTSUPP; + + return qcom_scm_call(__scm->dev, &desc, NULL); +} + +/** + * qcom_scm_set_warm_boot_addr() - Set the warm boot address for all cpus + * @entry: Entry point function for the cpus + * + * Set the Linux entry point for the SCM to transfer control to when coming + * out of a power down. CPU power down may be executed on cpuidle or hotplug. + */ +int qcom_scm_set_warm_boot_addr(void *entry) +{ + if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_WARMBOOT)) + /* Fallback to old SCM call */ + return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_warm_bits); + return 0; +} +EXPORT_SYMBOL_GPL(qcom_scm_set_warm_boot_addr); + +/** + * qcom_scm_set_cold_boot_addr() - Set the cold boot address for all cpus + * @entry: Entry point function for the cpus + */ +int qcom_scm_set_cold_boot_addr(void *entry) +{ + if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_COLDBOOT)) + /* Fallback to old SCM call */ + return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_cold_bits); + return 0; +} +EXPORT_SYMBOL_GPL(qcom_scm_set_cold_boot_addr); + +/** + * qcom_scm_cpu_power_down() - Power down the cpu + * @flags: Flags to flush cache + * + * This is an end point to power down cpu. If there was a pending interrupt, + * the control would return from this function, otherwise, the cpu jumps to the + * warm boot entry point set for this cpu upon reset. + */ +void qcom_scm_cpu_power_down(u32 flags) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_BOOT, + .cmd = QCOM_SCM_BOOT_TERMINATE_PC, + .args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK, + .arginfo = QCOM_SCM_ARGS(1), + .owner = ARM_SMCCC_OWNER_SIP, + }; + + qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL); +} +EXPORT_SYMBOL_GPL(qcom_scm_cpu_power_down); + +int qcom_scm_set_remote_state(u32 state, u32 id) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_BOOT, + .cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE, + .arginfo = QCOM_SCM_ARGS(2), + .args[0] = state, + .args[1] = id, + .owner = ARM_SMCCC_OWNER_SIP, + }; + struct qcom_scm_res res; + int ret; + + ret = qcom_scm_call(__scm->dev, &desc, &res); + + return ret ? : res.result[0]; +} +EXPORT_SYMBOL_GPL(qcom_scm_set_remote_state); + +static int qcom_scm_disable_sdi(void) +{ + int ret; + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_BOOT, + .cmd = QCOM_SCM_BOOT_SDI_CONFIG, + .args[0] = 1, /* Disable watchdog debug */ + .args[1] = 0, /* Disable SDI */ + .arginfo = QCOM_SCM_ARGS(2), + .owner = ARM_SMCCC_OWNER_SIP, + }; + struct qcom_scm_res res; + + ret = qcom_scm_clk_enable(); + if (ret) + return ret; + ret = qcom_scm_call(__scm->dev, &desc, &res); + + qcom_scm_clk_disable(); + + return ret ? : res.result[0]; +} + +static int __qcom_scm_set_dload_mode(struct device *dev, bool enable) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_BOOT, + .cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE, + .arginfo = QCOM_SCM_ARGS(2), + .args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE, + .owner = ARM_SMCCC_OWNER_SIP, + }; + + desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0; + + return qcom_scm_call_atomic(__scm->dev, &desc, NULL); +} + +static void qcom_scm_set_download_mode(bool enable) +{ + bool avail; + int ret = 0; + + avail = __qcom_scm_is_call_available(__scm->dev, + QCOM_SCM_SVC_BOOT, + QCOM_SCM_BOOT_SET_DLOAD_MODE); + if (avail) { + ret = __qcom_scm_set_dload_mode(__scm->dev, enable); + } else if (__scm->dload_mode_addr) { + ret = qcom_scm_io_writel(__scm->dload_mode_addr, + enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0); + } else { + dev_err(__scm->dev, + "No available mechanism for setting download mode\n"); + } + + if (ret) + dev_err(__scm->dev, "failed to set download mode: %d\n", ret); +} + +/** + * qcom_scm_pas_init_image() - Initialize peripheral authentication service + * state machine for a given peripheral, using the + * metadata + * @peripheral: peripheral id + * @metadata: pointer to memory containing ELF header, program header table + * and optional blob of data used for authenticating the metadata + * and the rest of the firmware + * @size: size of the metadata + * @ctx: optional metadata context + * + * Return: 0 on success. + * + * Upon successful return, the PAS metadata context (@ctx) will be used to + * track the metadata allocation, this needs to be released by invoking + * qcom_scm_pas_metadata_release() by the caller. + */ +int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size, + struct qcom_scm_pas_metadata *ctx) +{ + dma_addr_t mdata_phys; + void *mdata_buf; + int ret; + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_PIL, + .cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE, + .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW), + .args[0] = peripheral, + .owner = ARM_SMCCC_OWNER_SIP, + }; + struct qcom_scm_res res; + + /* + * During the scm call memory protection will be enabled for the meta + * data blob, so make sure it's physically contiguous, 4K aligned and + * non-cachable to avoid XPU violations. + */ + mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys, + GFP_KERNEL); + if (!mdata_buf) { + dev_err(__scm->dev, "Allocation of metadata buffer failed.\n"); + return -ENOMEM; + } + memcpy(mdata_buf, metadata, size); + + ret = qcom_scm_clk_enable(); + if (ret) + goto out; + + ret = qcom_scm_bw_enable(); + if (ret) + return ret; + + desc.args[1] = mdata_phys; + + ret = qcom_scm_call(__scm->dev, &desc, &res); + + qcom_scm_bw_disable(); + qcom_scm_clk_disable(); + +out: + if (ret < 0 || !ctx) { + dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys); + } else if (ctx) { + ctx->ptr = mdata_buf; + ctx->phys = mdata_phys; + ctx->size = size; + } + + return ret ? : res.result[0]; +} +EXPORT_SYMBOL_GPL(qcom_scm_pas_init_image); + +/** + * qcom_scm_pas_metadata_release() - release metadata context + * @ctx: metadata context + */ +void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx) +{ + if (!ctx->ptr) + return; + + dma_free_coherent(__scm->dev, ctx->size, ctx->ptr, ctx->phys); + + ctx->ptr = NULL; + ctx->phys = 0; + ctx->size = 0; +} +EXPORT_SYMBOL_GPL(qcom_scm_pas_metadata_release); + +/** + * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral + * for firmware loading + * @peripheral: peripheral id + * @addr: start address of memory area to prepare + * @size: size of the memory area to prepare + * + * Returns 0 on success. + */ +int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size) +{ + int ret; + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_PIL, + .cmd = QCOM_SCM_PIL_PAS_MEM_SETUP, + .arginfo = QCOM_SCM_ARGS(3), + .args[0] = peripheral, + .args[1] = addr, + .args[2] = size, + .owner = ARM_SMCCC_OWNER_SIP, + }; + struct qcom_scm_res res; + + ret = qcom_scm_clk_enable(); + if (ret) + return ret; + + ret = qcom_scm_bw_enable(); + if (ret) + return ret; + + ret = qcom_scm_call(__scm->dev, &desc, &res); + qcom_scm_bw_disable(); + qcom_scm_clk_disable(); + + return ret ? : res.result[0]; +} +EXPORT_SYMBOL_GPL(qcom_scm_pas_mem_setup); + +/** + * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware + * and reset the remote processor + * @peripheral: peripheral id + * + * Return 0 on success. + */ +int qcom_scm_pas_auth_and_reset(u32 peripheral) +{ + int ret; + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_PIL, + .cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET, + .arginfo = QCOM_SCM_ARGS(1), + .args[0] = peripheral, + .owner = ARM_SMCCC_OWNER_SIP, + }; + struct qcom_scm_res res; + + ret = qcom_scm_clk_enable(); + if (ret) + return ret; + + ret = qcom_scm_bw_enable(); + if (ret) + return ret; + + ret = qcom_scm_call(__scm->dev, &desc, &res); + qcom_scm_bw_disable(); + qcom_scm_clk_disable(); + + return ret ? : res.result[0]; +} +EXPORT_SYMBOL_GPL(qcom_scm_pas_auth_and_reset); + +/** + * qcom_scm_pas_shutdown() - Shut down the remote processor + * @peripheral: peripheral id + * + * Returns 0 on success. + */ +int qcom_scm_pas_shutdown(u32 peripheral) +{ + int ret; + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_PIL, + .cmd = QCOM_SCM_PIL_PAS_SHUTDOWN, + .arginfo = QCOM_SCM_ARGS(1), + .args[0] = peripheral, + .owner = ARM_SMCCC_OWNER_SIP, + }; + struct qcom_scm_res res; + + ret = qcom_scm_clk_enable(); + if (ret) + return ret; + + ret = qcom_scm_bw_enable(); + if (ret) + return ret; + + ret = qcom_scm_call(__scm->dev, &desc, &res); + + qcom_scm_bw_disable(); + qcom_scm_clk_disable(); + + return ret ? : res.result[0]; +} +EXPORT_SYMBOL_GPL(qcom_scm_pas_shutdown); + +/** + * qcom_scm_pas_supported() - Check if the peripheral authentication service is + * available for the given peripherial + * @peripheral: peripheral id + * + * Returns true if PAS is supported for this peripheral, otherwise false. + */ +bool qcom_scm_pas_supported(u32 peripheral) +{ + int ret; + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_PIL, + .cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED, + .arginfo = QCOM_SCM_ARGS(1), + .args[0] = peripheral, + .owner = ARM_SMCCC_OWNER_SIP, + }; + struct qcom_scm_res res; + + if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL, + QCOM_SCM_PIL_PAS_IS_SUPPORTED)) + return false; + + ret = qcom_scm_call(__scm->dev, &desc, &res); + + return ret ? false : !!res.result[0]; +} +EXPORT_SYMBOL_GPL(qcom_scm_pas_supported); + +static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_PIL, + .cmd = QCOM_SCM_PIL_PAS_MSS_RESET, + .arginfo = QCOM_SCM_ARGS(2), + .args[0] = reset, + .args[1] = 0, + .owner = ARM_SMCCC_OWNER_SIP, + }; + struct qcom_scm_res res; + int ret; + + ret = qcom_scm_call(__scm->dev, &desc, &res); + + return ret ? : res.result[0]; +} + +static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev, + unsigned long idx) +{ + if (idx != 0) + return -EINVAL; + + return __qcom_scm_pas_mss_reset(__scm->dev, 1); +} + +static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev, + unsigned long idx) +{ + if (idx != 0) + return -EINVAL; + + return __qcom_scm_pas_mss_reset(__scm->dev, 0); +} + +static const struct reset_control_ops qcom_scm_pas_reset_ops = { + .assert = qcom_scm_pas_reset_assert, + .deassert = qcom_scm_pas_reset_deassert, +}; + +int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_IO, + .cmd = QCOM_SCM_IO_READ, + .arginfo = QCOM_SCM_ARGS(1), + .args[0] = addr, + .owner = ARM_SMCCC_OWNER_SIP, + }; + struct qcom_scm_res res; + int ret; + + + ret = qcom_scm_call_atomic(__scm->dev, &desc, &res); + if (ret >= 0) + *val = res.result[0]; + + return ret < 0 ? ret : 0; +} +EXPORT_SYMBOL_GPL(qcom_scm_io_readl); + +int qcom_scm_io_writel(phys_addr_t addr, unsigned int val) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_IO, + .cmd = QCOM_SCM_IO_WRITE, + .arginfo = QCOM_SCM_ARGS(2), + .args[0] = addr, + .args[1] = val, + .owner = ARM_SMCCC_OWNER_SIP, + }; + + return qcom_scm_call_atomic(__scm->dev, &desc, NULL); +} +EXPORT_SYMBOL_GPL(qcom_scm_io_writel); + +/** + * qcom_scm_restore_sec_cfg_available() - Check if secure environment + * supports restore security config interface. + * + * Return true if restore-cfg interface is supported, false if not. + */ +bool qcom_scm_restore_sec_cfg_available(void) +{ + return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP, + QCOM_SCM_MP_RESTORE_SEC_CFG); +} +EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg_available); + +int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_MP, + .cmd = QCOM_SCM_MP_RESTORE_SEC_CFG, + .arginfo = QCOM_SCM_ARGS(2), + .args[0] = device_id, + .args[1] = spare, + .owner = ARM_SMCCC_OWNER_SIP, + }; + struct qcom_scm_res res; + int ret; + + ret = qcom_scm_call(__scm->dev, &desc, &res); + + return ret ? : res.result[0]; +} +EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg); + +int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_MP, + .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE, + .arginfo = QCOM_SCM_ARGS(1), + .args[0] = spare, + .owner = ARM_SMCCC_OWNER_SIP, + }; + struct qcom_scm_res res; + int ret; + + ret = qcom_scm_call(__scm->dev, &desc, &res); + + if (size) + *size = res.result[0]; + + return ret ? : res.result[1]; +} +EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_size); + +int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_MP, + .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT, + .arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL, + QCOM_SCM_VAL), + .args[0] = addr, + .args[1] = size, + .args[2] = spare, + .owner = ARM_SMCCC_OWNER_SIP, + }; + int ret; + + ret = qcom_scm_call(__scm->dev, &desc, NULL); + + /* the pg table has been initialized already, ignore the error */ + if (ret == -EPERM) + ret = 0; + + return ret; +} +EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_init); + +int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_MP, + .cmd = QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE, + .arginfo = QCOM_SCM_ARGS(2), + .args[0] = size, + .args[1] = spare, + .owner = ARM_SMCCC_OWNER_SIP, + }; + + return qcom_scm_call(__scm->dev, &desc, NULL); +} +EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_cp_pool_size); + +int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size, + u32 cp_nonpixel_start, + u32 cp_nonpixel_size) +{ + int ret; + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_MP, + .cmd = QCOM_SCM_MP_VIDEO_VAR, + .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL, + QCOM_SCM_VAL, QCOM_SCM_VAL), + .args[0] = cp_start, + .args[1] = cp_size, + .args[2] = cp_nonpixel_start, + .args[3] = cp_nonpixel_size, + .owner = ARM_SMCCC_OWNER_SIP, + }; + struct qcom_scm_res res; + + ret = qcom_scm_call(__scm->dev, &desc, &res); + + return ret ? : res.result[0]; +} +EXPORT_SYMBOL_GPL(qcom_scm_mem_protect_video_var); + +static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region, + size_t mem_sz, phys_addr_t src, size_t src_sz, + phys_addr_t dest, size_t dest_sz) +{ + int ret; + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_MP, + .cmd = QCOM_SCM_MP_ASSIGN, + .arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL, + QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO, + QCOM_SCM_VAL, QCOM_SCM_VAL), + .args[0] = mem_region, + .args[1] = mem_sz, + .args[2] = src, + .args[3] = src_sz, + .args[4] = dest, + .args[5] = dest_sz, + .args[6] = 0, + .owner = ARM_SMCCC_OWNER_SIP, + }; + struct qcom_scm_res res; + + ret = qcom_scm_call(dev, &desc, &res); + + return ret ? : res.result[0]; +} + +/** + * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership + * @mem_addr: mem region whose ownership need to be reassigned + * @mem_sz: size of the region. + * @srcvm: vmid for current set of owners, each set bit in + * flag indicate a unique owner + * @newvm: array having new owners and corresponding permission + * flags + * @dest_cnt: number of owners in next set. + * + * Return negative errno on failure or 0 on success with @srcvm updated. + */ +int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, + u64 *srcvm, + const struct qcom_scm_vmperm *newvm, + unsigned int dest_cnt) +{ + struct qcom_scm_current_perm_info *destvm; + struct qcom_scm_mem_map_info *mem_to_map; + phys_addr_t mem_to_map_phys; + phys_addr_t dest_phys; + dma_addr_t ptr_phys; + size_t mem_to_map_sz; + size_t dest_sz; + size_t src_sz; + size_t ptr_sz; + int next_vm; + __le32 *src; + void *ptr; + int ret, i, b; + u64 srcvm_bits = *srcvm; + + src_sz = hweight64(srcvm_bits) * sizeof(*src); + mem_to_map_sz = sizeof(*mem_to_map); + dest_sz = dest_cnt * sizeof(*destvm); + ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) + + ALIGN(dest_sz, SZ_64); + + ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + /* Fill source vmid detail */ + src = ptr; + i = 0; + for (b = 0; b < BITS_PER_TYPE(u64); b++) { + if (srcvm_bits & BIT(b)) + src[i++] = cpu_to_le32(b); + } + + /* Fill details of mem buff to map */ + mem_to_map = ptr + ALIGN(src_sz, SZ_64); + mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64); + mem_to_map->mem_addr = cpu_to_le64(mem_addr); + mem_to_map->mem_size = cpu_to_le64(mem_sz); + + next_vm = 0; + /* Fill details of next vmid detail */ + destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64); + dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64); + for (i = 0; i < dest_cnt; i++, destvm++, newvm++) { + destvm->vmid = cpu_to_le32(newvm->vmid); + destvm->perm = cpu_to_le32(newvm->perm); + destvm->ctx = 0; + destvm->ctx_size = 0; + next_vm |= BIT(newvm->vmid); + } + + ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz, + ptr_phys, src_sz, dest_phys, dest_sz); + dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_phys); + if (ret) { + dev_err(__scm->dev, + "Assign memory protection call failed %d\n", ret); + return -EINVAL; + } + + *srcvm = next_vm; + return 0; +} +EXPORT_SYMBOL_GPL(qcom_scm_assign_mem); + +/** + * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available + */ +bool qcom_scm_ocmem_lock_available(void) +{ + return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM, + QCOM_SCM_OCMEM_LOCK_CMD); +} +EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock_available); + +/** + * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM + * region to the specified initiator + * + * @id: tz initiator id + * @offset: OCMEM offset + * @size: OCMEM size + * @mode: access mode (WIDE/NARROW) + */ +int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size, + u32 mode) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_OCMEM, + .cmd = QCOM_SCM_OCMEM_LOCK_CMD, + .args[0] = id, + .args[1] = offset, + .args[2] = size, + .args[3] = mode, + .arginfo = QCOM_SCM_ARGS(4), + }; + + return qcom_scm_call(__scm->dev, &desc, NULL); +} +EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock); + +/** + * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM + * region from the specified initiator + * + * @id: tz initiator id + * @offset: OCMEM offset + * @size: OCMEM size + */ +int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_OCMEM, + .cmd = QCOM_SCM_OCMEM_UNLOCK_CMD, + .args[0] = id, + .args[1] = offset, + .args[2] = size, + .arginfo = QCOM_SCM_ARGS(3), + }; + + return qcom_scm_call(__scm->dev, &desc, NULL); +} +EXPORT_SYMBOL_GPL(qcom_scm_ocmem_unlock); + +/** + * qcom_scm_ice_available() - Is the ICE key programming interface available? + * + * Return: true iff the SCM calls wrapped by qcom_scm_ice_invalidate_key() and + * qcom_scm_ice_set_key() are available. + */ +bool qcom_scm_ice_available(void) +{ + return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, + QCOM_SCM_ES_INVALIDATE_ICE_KEY) && + __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, + QCOM_SCM_ES_CONFIG_SET_ICE_KEY); +} +EXPORT_SYMBOL_GPL(qcom_scm_ice_available); + +/** + * qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key + * @index: the keyslot to invalidate + * + * The UFSHCI and eMMC standards define a standard way to do this, but it + * doesn't work on these SoCs; only this SCM call does. + * + * It is assumed that the SoC has only one ICE instance being used, as this SCM + * call doesn't specify which ICE instance the keyslot belongs to. + * + * Return: 0 on success; -errno on failure. + */ +int qcom_scm_ice_invalidate_key(u32 index) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_ES, + .cmd = QCOM_SCM_ES_INVALIDATE_ICE_KEY, + .arginfo = QCOM_SCM_ARGS(1), + .args[0] = index, + .owner = ARM_SMCCC_OWNER_SIP, + }; + + return qcom_scm_call(__scm->dev, &desc, NULL); +} +EXPORT_SYMBOL_GPL(qcom_scm_ice_invalidate_key); + +/** + * qcom_scm_ice_set_key() - Set an inline encryption key + * @index: the keyslot into which to set the key + * @key: the key to program + * @key_size: the size of the key in bytes + * @cipher: the encryption algorithm the key is for + * @data_unit_size: the encryption data unit size, i.e. the size of each + * individual plaintext and ciphertext. Given in 512-byte + * units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc. + * + * Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it + * can then be used to encrypt/decrypt UFS or eMMC I/O requests inline. + * + * The UFSHCI and eMMC standards define a standard way to do this, but it + * doesn't work on these SoCs; only this SCM call does. + * + * It is assumed that the SoC has only one ICE instance being used, as this SCM + * call doesn't specify which ICE instance the keyslot belongs to. + * + * Return: 0 on success; -errno on failure. + */ +int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size, + enum qcom_scm_ice_cipher cipher, u32 data_unit_size) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_ES, + .cmd = QCOM_SCM_ES_CONFIG_SET_ICE_KEY, + .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RW, + QCOM_SCM_VAL, QCOM_SCM_VAL, + QCOM_SCM_VAL), + .args[0] = index, + .args[2] = key_size, + .args[3] = cipher, + .args[4] = data_unit_size, + .owner = ARM_SMCCC_OWNER_SIP, + }; + void *keybuf; + dma_addr_t key_phys; + int ret; + + /* + * 'key' may point to vmalloc()'ed memory, but we need to pass a + * physical address that's been properly flushed. The sanctioned way to + * do this is by using the DMA API. But as is best practice for crypto + * keys, we also must wipe the key after use. This makes kmemdup() + + * dma_map_single() not clearly correct, since the DMA API can use + * bounce buffers. Instead, just use dma_alloc_coherent(). Programming + * keys is normally rare and thus not performance-critical. + */ + + keybuf = dma_alloc_coherent(__scm->dev, key_size, &key_phys, + GFP_KERNEL); + if (!keybuf) + return -ENOMEM; + memcpy(keybuf, key, key_size); + desc.args[1] = key_phys; + + ret = qcom_scm_call(__scm->dev, &desc, NULL); + + memzero_explicit(keybuf, key_size); + + dma_free_coherent(__scm->dev, key_size, keybuf, key_phys); + return ret; +} +EXPORT_SYMBOL_GPL(qcom_scm_ice_set_key); + +/** + * qcom_scm_hdcp_available() - Check if secure environment supports HDCP. + * + * Return true if HDCP is supported, false if not. + */ +bool qcom_scm_hdcp_available(void) +{ + bool avail; + int ret = qcom_scm_clk_enable(); + + if (ret) + return ret; + + avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP, + QCOM_SCM_HDCP_INVOKE); + + qcom_scm_clk_disable(); + + return avail; +} +EXPORT_SYMBOL_GPL(qcom_scm_hdcp_available); + +/** + * qcom_scm_hdcp_req() - Send HDCP request. + * @req: HDCP request array + * @req_cnt: HDCP request array count + * @resp: response buffer passed to SCM + * + * Write HDCP register(s) through SCM. + */ +int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp) +{ + int ret; + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_HDCP, + .cmd = QCOM_SCM_HDCP_INVOKE, + .arginfo = QCOM_SCM_ARGS(10), + .args = { + req[0].addr, + req[0].val, + req[1].addr, + req[1].val, + req[2].addr, + req[2].val, + req[3].addr, + req[3].val, + req[4].addr, + req[4].val + }, + .owner = ARM_SMCCC_OWNER_SIP, + }; + struct qcom_scm_res res; + + if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT) + return -ERANGE; + + ret = qcom_scm_clk_enable(); + if (ret) + return ret; + + ret = qcom_scm_call(__scm->dev, &desc, &res); + *resp = res.result[0]; + + qcom_scm_clk_disable(); + + return ret; +} +EXPORT_SYMBOL_GPL(qcom_scm_hdcp_req); + +int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_SMMU_PROGRAM, + .cmd = QCOM_SCM_SMMU_PT_FORMAT, + .arginfo = QCOM_SCM_ARGS(3), + .args[0] = sec_id, + .args[1] = ctx_num, + .args[2] = pt_fmt, /* 0: LPAE AArch32 - 1: AArch64 */ + .owner = ARM_SMCCC_OWNER_SIP, + }; + + return qcom_scm_call(__scm->dev, &desc, NULL); +} +EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_pt_format); + +int qcom_scm_qsmmu500_wait_safe_toggle(bool en) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_SMMU_PROGRAM, + .cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1, + .arginfo = QCOM_SCM_ARGS(2), + .args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL, + .args[1] = en, + .owner = ARM_SMCCC_OWNER_SIP, + }; + + + return qcom_scm_call_atomic(__scm->dev, &desc, NULL); +} +EXPORT_SYMBOL_GPL(qcom_scm_qsmmu500_wait_safe_toggle); + +bool qcom_scm_lmh_dcvsh_available(void) +{ + return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_LMH, QCOM_SCM_LMH_LIMIT_DCVSH); +} +EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh_available); + +int qcom_scm_lmh_profile_change(u32 profile_id) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_LMH, + .cmd = QCOM_SCM_LMH_LIMIT_PROFILE_CHANGE, + .arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL), + .args[0] = profile_id, + .owner = ARM_SMCCC_OWNER_SIP, + }; + + return qcom_scm_call(__scm->dev, &desc, NULL); +} +EXPORT_SYMBOL_GPL(qcom_scm_lmh_profile_change); + +int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val, + u64 limit_node, u32 node_id, u64 version) +{ + dma_addr_t payload_phys; + u32 *payload_buf; + int ret, payload_size = 5 * sizeof(u32); + + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_LMH, + .cmd = QCOM_SCM_LMH_LIMIT_DCVSH, + .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_VAL, + QCOM_SCM_VAL, QCOM_SCM_VAL), + .args[1] = payload_size, + .args[2] = limit_node, + .args[3] = node_id, + .args[4] = version, + .owner = ARM_SMCCC_OWNER_SIP, + }; + + payload_buf = dma_alloc_coherent(__scm->dev, payload_size, &payload_phys, GFP_KERNEL); + if (!payload_buf) + return -ENOMEM; + + payload_buf[0] = payload_fn; + payload_buf[1] = 0; + payload_buf[2] = payload_reg; + payload_buf[3] = 1; + payload_buf[4] = payload_val; + + desc.args[0] = payload_phys; + + ret = qcom_scm_call(__scm->dev, &desc, NULL); + + dma_free_coherent(__scm->dev, payload_size, payload_buf, payload_phys); + return ret; +} +EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh); + +static int qcom_scm_find_dload_address(struct device *dev, u64 *addr) +{ + struct device_node *tcsr; + struct device_node *np = dev->of_node; + struct resource res; + u32 offset; + int ret; + + tcsr = of_parse_phandle(np, "qcom,dload-mode", 0); + if (!tcsr) + return 0; + + ret = of_address_to_resource(tcsr, 0, &res); + of_node_put(tcsr); + if (ret) + return ret; + + ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset); + if (ret < 0) + return ret; + + *addr = res.start + offset; + + return 0; +} + +#ifdef CONFIG_QCOM_QSEECOM + +/* Lock for QSEECOM SCM call executions */ +static DEFINE_MUTEX(qcom_scm_qseecom_call_lock); + +static int __qcom_scm_qseecom_call(const struct qcom_scm_desc *desc, + struct qcom_scm_qseecom_resp *res) +{ + struct qcom_scm_res scm_res = {}; + int status; + + /* + * QSEECOM SCM calls should not be executed concurrently. Therefore, we + * require the respective call lock to be held. + */ + lockdep_assert_held(&qcom_scm_qseecom_call_lock); + + status = qcom_scm_call(__scm->dev, desc, &scm_res); + + res->result = scm_res.result[0]; + res->resp_type = scm_res.result[1]; + res->data = scm_res.result[2]; + + if (status) + return status; + + return 0; +} + +/** + * qcom_scm_qseecom_call() - Perform a QSEECOM SCM call. + * @desc: SCM call descriptor. + * @res: SCM call response (output). + * + * Performs the QSEECOM SCM call described by @desc, returning the response in + * @rsp. + * + * Return: Zero on success, nonzero on failure. + */ +static int qcom_scm_qseecom_call(const struct qcom_scm_desc *desc, + struct qcom_scm_qseecom_resp *res) +{ + int status; + + /* + * Note: Multiple QSEECOM SCM calls should not be executed same time, + * so lock things here. This needs to be extended to callback/listener + * handling when support for that is implemented. + */ + + mutex_lock(&qcom_scm_qseecom_call_lock); + status = __qcom_scm_qseecom_call(desc, res); + mutex_unlock(&qcom_scm_qseecom_call_lock); + + dev_dbg(__scm->dev, "%s: owner=%x, svc=%x, cmd=%x, result=%lld, type=%llx, data=%llx\n", + __func__, desc->owner, desc->svc, desc->cmd, res->result, + res->resp_type, res->data); + + if (status) { + dev_err(__scm->dev, "qseecom: scm call failed with error %d\n", status); + return status; + } + + /* + * TODO: Handle incomplete and blocked calls: + * + * Incomplete and blocked calls are not supported yet. Some devices + * and/or commands require those, some don't. Let's warn about them + * prominently in case someone attempts to try these commands with a + * device/command combination that isn't supported yet. + */ + WARN_ON(res->result == QSEECOM_RESULT_INCOMPLETE); + WARN_ON(res->result == QSEECOM_RESULT_BLOCKED_ON_LISTENER); + + return 0; +} + +/** + * qcom_scm_qseecom_get_version() - Query the QSEECOM version. + * @version: Pointer where the QSEECOM version will be stored. + * + * Performs the QSEECOM SCM querying the QSEECOM version currently running in + * the TrustZone. + * + * Return: Zero on success, nonzero on failure. + */ +static int qcom_scm_qseecom_get_version(u32 *version) +{ + struct qcom_scm_desc desc = {}; + struct qcom_scm_qseecom_resp res = {}; + u32 feature = 10; + int ret; + + desc.owner = QSEECOM_TZ_OWNER_SIP; + desc.svc = QSEECOM_TZ_SVC_INFO; + desc.cmd = QSEECOM_TZ_CMD_INFO_VERSION; + desc.arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL); + desc.args[0] = feature; + + ret = qcom_scm_qseecom_call(&desc, &res); + if (ret) + return ret; + + *version = res.result; + return 0; +} + +/** + * qcom_scm_qseecom_app_get_id() - Query the app ID for a given QSEE app name. + * @app_name: The name of the app. + * @app_id: The returned app ID. + * + * Query and return the application ID of the SEE app identified by the given + * name. This returned ID is the unique identifier of the app required for + * subsequent communication. + * + * Return: Zero on success, nonzero on failure, -ENOENT if the app has not been + * loaded or could not be found. + */ +int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id) +{ + unsigned long name_buf_size = QSEECOM_MAX_APP_NAME_SIZE; + unsigned long app_name_len = strlen(app_name); + struct qcom_scm_desc desc = {}; + struct qcom_scm_qseecom_resp res = {}; + dma_addr_t name_buf_phys; + char *name_buf; + int status; + + if (app_name_len >= name_buf_size) + return -EINVAL; + + name_buf = kzalloc(name_buf_size, GFP_KERNEL); + if (!name_buf) + return -ENOMEM; + + memcpy(name_buf, app_name, app_name_len); + + name_buf_phys = dma_map_single(__scm->dev, name_buf, name_buf_size, DMA_TO_DEVICE); + status = dma_mapping_error(__scm->dev, name_buf_phys); + if (status) { + kfree(name_buf); + dev_err(__scm->dev, "qseecom: failed to map dma address\n"); + return status; + } + + desc.owner = QSEECOM_TZ_OWNER_QSEE_OS; + desc.svc = QSEECOM_TZ_SVC_APP_MGR; + desc.cmd = QSEECOM_TZ_CMD_APP_LOOKUP; + desc.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL); + desc.args[0] = name_buf_phys; + desc.args[1] = app_name_len; + + status = qcom_scm_qseecom_call(&desc, &res); + dma_unmap_single(__scm->dev, name_buf_phys, name_buf_size, DMA_TO_DEVICE); + kfree(name_buf); + + if (status) + return status; + + if (res.result == QSEECOM_RESULT_FAILURE) + return -ENOENT; + + if (res.result != QSEECOM_RESULT_SUCCESS) + return -EINVAL; + + if (res.resp_type != QSEECOM_SCM_RES_APP_ID) + return -EINVAL; + + *app_id = res.data; + return 0; +} +EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_get_id); + +/** + * qcom_scm_qseecom_app_send() - Send to and receive data from a given QSEE app. + * @app_id: The ID of the target app. + * @req: Request buffer sent to the app (must be DMA-mappable). + * @req_size: Size of the request buffer. + * @rsp: Response buffer, written to by the app (must be DMA-mappable). + * @rsp_size: Size of the response buffer. + * + * Sends a request to the QSEE app associated with the given ID and read back + * its response. The caller must provide two DMA memory regions, one for the + * request and one for the response, and fill out the @req region with the + * respective (app-specific) request data. The QSEE app reads this and returns + * its response in the @rsp region. + * + * Return: Zero on success, nonzero on failure. + */ +int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size, void *rsp, + size_t rsp_size) +{ + struct qcom_scm_qseecom_resp res = {}; + struct qcom_scm_desc desc = {}; + dma_addr_t req_phys; + dma_addr_t rsp_phys; + int status; + + /* Map request buffer */ + req_phys = dma_map_single(__scm->dev, req, req_size, DMA_TO_DEVICE); + status = dma_mapping_error(__scm->dev, req_phys); + if (status) { + dev_err(__scm->dev, "qseecom: failed to map request buffer\n"); + return status; + } + + /* Map response buffer */ + rsp_phys = dma_map_single(__scm->dev, rsp, rsp_size, DMA_FROM_DEVICE); + status = dma_mapping_error(__scm->dev, rsp_phys); + if (status) { + dma_unmap_single(__scm->dev, req_phys, req_size, DMA_TO_DEVICE); + dev_err(__scm->dev, "qseecom: failed to map response buffer\n"); + return status; + } + + /* Set up SCM call data */ + desc.owner = QSEECOM_TZ_OWNER_TZ_APPS; + desc.svc = QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER; + desc.cmd = QSEECOM_TZ_CMD_APP_SEND; + desc.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, + QCOM_SCM_RW, QCOM_SCM_VAL, + QCOM_SCM_RW, QCOM_SCM_VAL); + desc.args[0] = app_id; + desc.args[1] = req_phys; + desc.args[2] = req_size; + desc.args[3] = rsp_phys; + desc.args[4] = rsp_size; + + /* Perform call */ + status = qcom_scm_qseecom_call(&desc, &res); + + /* Unmap buffers */ + dma_unmap_single(__scm->dev, rsp_phys, rsp_size, DMA_FROM_DEVICE); + dma_unmap_single(__scm->dev, req_phys, req_size, DMA_TO_DEVICE); + + if (status) + return status; + + if (res.result != QSEECOM_RESULT_SUCCESS) + return -EIO; + + return 0; +} +EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_send); + +/* + * We do not yet support re-entrant calls via the qseecom interface. To prevent + + any potential issues with this, only allow validated machines for now. + */ +static const struct of_device_id qcom_scm_qseecom_allowlist[] = { + { .compatible = "lenovo,thinkpad-x13s", }, + { } +}; + +static bool qcom_scm_qseecom_machine_is_allowed(void) +{ + struct device_node *np; + bool match; + + np = of_find_node_by_path("/"); + if (!np) + return false; + + match = of_match_node(qcom_scm_qseecom_allowlist, np); + of_node_put(np); + + return match; +} + +static void qcom_scm_qseecom_free(void *data) +{ + struct platform_device *qseecom_dev = data; + + platform_device_del(qseecom_dev); + platform_device_put(qseecom_dev); +} + +static int qcom_scm_qseecom_init(struct qcom_scm *scm) +{ + struct platform_device *qseecom_dev; + u32 version; + int ret; + + /* + * Note: We do two steps of validation here: First, we try to query the + * QSEECOM version as a check to see if the interface exists on this + * device. Second, we check against known good devices due to current + * driver limitations (see comment in qcom_scm_qseecom_allowlist). + * + * Note that we deliberately do the machine check after the version + * check so that we can log potentially supported devices. This should + * be safe as downstream sources indicate that the version query is + * neither blocking nor reentrant. + */ + ret = qcom_scm_qseecom_get_version(&version); + if (ret) + return 0; + + dev_info(scm->dev, "qseecom: found qseecom with version 0x%x\n", version); + + if (!qcom_scm_qseecom_machine_is_allowed()) { + dev_info(scm->dev, "qseecom: untested machine, skipping\n"); + return 0; + } + + /* + * Set up QSEECOM interface device. All application clients will be + * set up and managed by the corresponding driver for it. + */ + qseecom_dev = platform_device_alloc("qcom_qseecom", -1); + if (!qseecom_dev) + return -ENOMEM; + + qseecom_dev->dev.parent = scm->dev; + + ret = platform_device_add(qseecom_dev); + if (ret) { + platform_device_put(qseecom_dev); + return ret; + } + + return devm_add_action_or_reset(scm->dev, qcom_scm_qseecom_free, qseecom_dev); +} + +#else /* CONFIG_QCOM_QSEECOM */ + +static int qcom_scm_qseecom_init(struct qcom_scm *scm) +{ + return 0; +} + +#endif /* CONFIG_QCOM_QSEECOM */ + +/** + * qcom_scm_is_available() - Checks if SCM is available + */ +bool qcom_scm_is_available(void) +{ + return !!__scm; +} +EXPORT_SYMBOL_GPL(qcom_scm_is_available); + +static int qcom_scm_assert_valid_wq_ctx(u32 wq_ctx) +{ + /* FW currently only supports a single wq_ctx (zero). + * TODO: Update this logic to include dynamic allocation and lookup of + * completion structs when FW supports more wq_ctx values. + */ + if (wq_ctx != 0) { + dev_err(__scm->dev, "Firmware unexpectedly passed non-zero wq_ctx\n"); + return -EINVAL; + } + + return 0; +} + +int qcom_scm_wait_for_wq_completion(u32 wq_ctx) +{ + int ret; + + ret = qcom_scm_assert_valid_wq_ctx(wq_ctx); + if (ret) + return ret; + + wait_for_completion(&__scm->waitq_comp); + + return 0; +} + +static int qcom_scm_waitq_wakeup(struct qcom_scm *scm, unsigned int wq_ctx) +{ + int ret; + + ret = qcom_scm_assert_valid_wq_ctx(wq_ctx); + if (ret) + return ret; + + complete(&__scm->waitq_comp); + + return 0; +} + +static irqreturn_t qcom_scm_irq_handler(int irq, void *data) +{ + int ret; + struct qcom_scm *scm = data; + u32 wq_ctx, flags, more_pending = 0; + + do { + ret = scm_get_wq_ctx(&wq_ctx, &flags, &more_pending); + if (ret) { + dev_err(scm->dev, "GET_WQ_CTX SMC call failed: %d\n", ret); + goto out; + } + + if (flags != QCOM_SMC_WAITQ_FLAG_WAKE_ONE && + flags != QCOM_SMC_WAITQ_FLAG_WAKE_ALL) { + dev_err(scm->dev, "Invalid flags found for wq_ctx: %u\n", flags); + goto out; + } + + ret = qcom_scm_waitq_wakeup(scm, wq_ctx); + if (ret) + goto out; + } while (more_pending); + +out: + return IRQ_HANDLED; +} + +static int qcom_scm_probe(struct platform_device *pdev) +{ + struct qcom_scm *scm; + int irq, ret; + + scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL); + if (!scm) + return -ENOMEM; + + ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr); + if (ret < 0) + return ret; + + mutex_init(&scm->scm_bw_lock); + + scm->path = devm_of_icc_get(&pdev->dev, NULL); + if (IS_ERR(scm->path)) + return dev_err_probe(&pdev->dev, PTR_ERR(scm->path), + "failed to acquire interconnect path\n"); + + scm->core_clk = devm_clk_get_optional(&pdev->dev, "core"); + if (IS_ERR(scm->core_clk)) + return PTR_ERR(scm->core_clk); + + scm->iface_clk = devm_clk_get_optional(&pdev->dev, "iface"); + if (IS_ERR(scm->iface_clk)) + return PTR_ERR(scm->iface_clk); + + scm->bus_clk = devm_clk_get_optional(&pdev->dev, "bus"); + if (IS_ERR(scm->bus_clk)) + return PTR_ERR(scm->bus_clk); + + scm->reset.ops = &qcom_scm_pas_reset_ops; + scm->reset.nr_resets = 1; + scm->reset.of_node = pdev->dev.of_node; + ret = devm_reset_controller_register(&pdev->dev, &scm->reset); + if (ret) + return ret; + + /* vote for max clk rate for highest performance */ + ret = clk_set_rate(scm->core_clk, INT_MAX); + if (ret) + return ret; + + __scm = scm; + __scm->dev = &pdev->dev; + + init_completion(&__scm->waitq_comp); + + irq = platform_get_irq_optional(pdev, 0); + if (irq < 0) { + if (irq != -ENXIO) + return irq; + } else { + ret = devm_request_threaded_irq(__scm->dev, irq, NULL, qcom_scm_irq_handler, + IRQF_ONESHOT, "qcom-scm", __scm); + if (ret < 0) + return dev_err_probe(scm->dev, ret, "Failed to request qcom-scm irq\n"); + } + + __get_convention(); + + /* + * If requested enable "download mode", from this point on warmboot + * will cause the boot stages to enter download mode, unless + * disabled below by a clean shutdown/reboot. + */ + if (download_mode) + qcom_scm_set_download_mode(true); + + + /* + * Disable SDI if indicated by DT that it is enabled by default. + */ + if (of_property_read_bool(pdev->dev.of_node, "qcom,sdi-enabled")) + qcom_scm_disable_sdi(); + + /* + * Initialize the QSEECOM interface. + * + * Note: QSEECOM is fairly self-contained and this only adds the + * interface device (the driver of which does most of the heavy + * lifting). So any errors returned here should be either -ENOMEM or + * -EINVAL (with the latter only in case there's a bug in our code). + * This means that there is no need to bring down the whole SCM driver. + * Just log the error instead and let SCM live. + */ + ret = qcom_scm_qseecom_init(scm); + WARN(ret < 0, "failed to initialize qseecom: %d\n", ret); + + return 0; +} + +static void qcom_scm_shutdown(struct platform_device *pdev) +{ + /* Clean shutdown, disable download mode to allow normal restart */ + qcom_scm_set_download_mode(false); +} + +static const struct of_device_id qcom_scm_dt_match[] = { + { .compatible = "qcom,scm" }, + + /* Legacy entries kept for backwards compatibility */ + { .compatible = "qcom,scm-apq8064" }, + { .compatible = "qcom,scm-apq8084" }, + { .compatible = "qcom,scm-ipq4019" }, + { .compatible = "qcom,scm-msm8953" }, + { .compatible = "qcom,scm-msm8974" }, + { .compatible = "qcom,scm-msm8996" }, + {} +}; +MODULE_DEVICE_TABLE(of, qcom_scm_dt_match); + +static struct platform_driver qcom_scm_driver = { + .driver = { + .name = "qcom_scm", + .of_match_table = qcom_scm_dt_match, + .suppress_bind_attrs = true, + }, + .probe = qcom_scm_probe, + .shutdown = qcom_scm_shutdown, +}; + +static int __init qcom_scm_init(void) +{ + return platform_driver_register(&qcom_scm_driver); +} +subsys_initcall(qcom_scm_init); + +MODULE_DESCRIPTION("Qualcomm Technologies, Inc. SCM driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/firmware/qcom/qcom_scm.h b/drivers/firmware/qcom/qcom_scm.h new file mode 100644 index 0000000000..4532907e84 --- /dev/null +++ b/drivers/firmware/qcom/qcom_scm.h @@ -0,0 +1,169 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2010-2015,2019 The Linux Foundation. All rights reserved. + */ +#ifndef __QCOM_SCM_INT_H +#define __QCOM_SCM_INT_H + +struct device; + +enum qcom_scm_convention { + SMC_CONVENTION_UNKNOWN, + SMC_CONVENTION_LEGACY, + SMC_CONVENTION_ARM_32, + SMC_CONVENTION_ARM_64, +}; + +extern enum qcom_scm_convention qcom_scm_convention; + +#define MAX_QCOM_SCM_ARGS 10 +#define MAX_QCOM_SCM_RETS 3 + +enum qcom_scm_arg_types { + QCOM_SCM_VAL, + QCOM_SCM_RO, + QCOM_SCM_RW, + QCOM_SCM_BUFVAL, +}; + +#define QCOM_SCM_ARGS_IMPL(num, a, b, c, d, e, f, g, h, i, j, ...) (\ + (((a) & 0x3) << 4) | \ + (((b) & 0x3) << 6) | \ + (((c) & 0x3) << 8) | \ + (((d) & 0x3) << 10) | \ + (((e) & 0x3) << 12) | \ + (((f) & 0x3) << 14) | \ + (((g) & 0x3) << 16) | \ + (((h) & 0x3) << 18) | \ + (((i) & 0x3) << 20) | \ + (((j) & 0x3) << 22) | \ + ((num) & 0xf)) + +#define QCOM_SCM_ARGS(...) QCOM_SCM_ARGS_IMPL(__VA_ARGS__, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) + + +/** + * struct qcom_scm_desc + * @arginfo: Metadata describing the arguments in args[] + * @args: The array of arguments for the secure syscall + */ +struct qcom_scm_desc { + u32 svc; + u32 cmd; + u32 arginfo; + u64 args[MAX_QCOM_SCM_ARGS]; + u32 owner; +}; + +/** + * struct qcom_scm_res + * @result: The values returned by the secure syscall + */ +struct qcom_scm_res { + u64 result[MAX_QCOM_SCM_RETS]; +}; + +int qcom_scm_wait_for_wq_completion(u32 wq_ctx); +int scm_get_wq_ctx(u32 *wq_ctx, u32 *flags, u32 *more_pending); + +#define SCM_SMC_FNID(s, c) ((((s) & 0xFF) << 8) | ((c) & 0xFF)) +int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc, + enum qcom_scm_convention qcom_convention, + struct qcom_scm_res *res, bool atomic); +#define scm_smc_call(dev, desc, res, atomic) \ + __scm_smc_call((dev), (desc), qcom_scm_convention, (res), (atomic)) + +#define SCM_LEGACY_FNID(s, c) (((s) << 10) | ((c) & 0x3ff)) +int scm_legacy_call_atomic(struct device *dev, const struct qcom_scm_desc *desc, + struct qcom_scm_res *res); +int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc, + struct qcom_scm_res *res); + +#define QCOM_SCM_SVC_BOOT 0x01 +#define QCOM_SCM_BOOT_SET_ADDR 0x01 +#define QCOM_SCM_BOOT_TERMINATE_PC 0x02 +#define QCOM_SCM_BOOT_SDI_CONFIG 0x09 +#define QCOM_SCM_BOOT_SET_DLOAD_MODE 0x10 +#define QCOM_SCM_BOOT_SET_ADDR_MC 0x11 +#define QCOM_SCM_BOOT_SET_REMOTE_STATE 0x0a +#define QCOM_SCM_FLUSH_FLAG_MASK 0x3 +#define QCOM_SCM_BOOT_MAX_CPUS 4 +#define QCOM_SCM_BOOT_MC_FLAG_AARCH64 BIT(0) +#define QCOM_SCM_BOOT_MC_FLAG_COLDBOOT BIT(1) +#define QCOM_SCM_BOOT_MC_FLAG_WARMBOOT BIT(2) + +#define QCOM_SCM_SVC_PIL 0x02 +#define QCOM_SCM_PIL_PAS_INIT_IMAGE 0x01 +#define QCOM_SCM_PIL_PAS_MEM_SETUP 0x02 +#define QCOM_SCM_PIL_PAS_AUTH_AND_RESET 0x05 +#define QCOM_SCM_PIL_PAS_SHUTDOWN 0x06 +#define QCOM_SCM_PIL_PAS_IS_SUPPORTED 0x07 +#define QCOM_SCM_PIL_PAS_MSS_RESET 0x0a + +#define QCOM_SCM_SVC_IO 0x05 +#define QCOM_SCM_IO_READ 0x01 +#define QCOM_SCM_IO_WRITE 0x02 + +#define QCOM_SCM_SVC_INFO 0x06 +#define QCOM_SCM_INFO_IS_CALL_AVAIL 0x01 + +#define QCOM_SCM_SVC_MP 0x0c +#define QCOM_SCM_MP_RESTORE_SEC_CFG 0x02 +#define QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE 0x03 +#define QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT 0x04 +#define QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE 0x05 +#define QCOM_SCM_MP_VIDEO_VAR 0x08 +#define QCOM_SCM_MP_ASSIGN 0x16 + +#define QCOM_SCM_SVC_OCMEM 0x0f +#define QCOM_SCM_OCMEM_LOCK_CMD 0x01 +#define QCOM_SCM_OCMEM_UNLOCK_CMD 0x02 + +#define QCOM_SCM_SVC_ES 0x10 /* Enterprise Security */ +#define QCOM_SCM_ES_INVALIDATE_ICE_KEY 0x03 +#define QCOM_SCM_ES_CONFIG_SET_ICE_KEY 0x04 + +#define QCOM_SCM_SVC_HDCP 0x11 +#define QCOM_SCM_HDCP_INVOKE 0x01 + +#define QCOM_SCM_SVC_LMH 0x13 +#define QCOM_SCM_LMH_LIMIT_PROFILE_CHANGE 0x01 +#define QCOM_SCM_LMH_LIMIT_DCVSH 0x10 + +#define QCOM_SCM_SVC_SMMU_PROGRAM 0x15 +#define QCOM_SCM_SMMU_PT_FORMAT 0x01 +#define QCOM_SCM_SMMU_CONFIG_ERRATA1 0x03 +#define QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL 0x02 + +#define QCOM_SCM_SVC_WAITQ 0x24 +#define QCOM_SCM_WAITQ_RESUME 0x02 +#define QCOM_SCM_WAITQ_GET_WQ_CTX 0x03 + +/* common error codes */ +#define QCOM_SCM_V2_EBUSY -12 +#define QCOM_SCM_ENOMEM -5 +#define QCOM_SCM_EOPNOTSUPP -4 +#define QCOM_SCM_EINVAL_ADDR -3 +#define QCOM_SCM_EINVAL_ARG -2 +#define QCOM_SCM_ERROR -1 +#define QCOM_SCM_INTERRUPTED 1 +#define QCOM_SCM_WAITQ_SLEEP 2 + +static inline int qcom_scm_remap_error(int err) +{ + switch (err) { + case QCOM_SCM_ERROR: + return -EIO; + case QCOM_SCM_EINVAL_ADDR: + case QCOM_SCM_EINVAL_ARG: + return -EINVAL; + case QCOM_SCM_EOPNOTSUPP: + return -EOPNOTSUPP; + case QCOM_SCM_ENOMEM: + return -ENOMEM; + case QCOM_SCM_V2_EBUSY: + return -EBUSY; + } + return -EINVAL; +} + +#endif diff --git a/drivers/firmware/qcom_scm-legacy.c b/drivers/firmware/qcom_scm-legacy.c deleted file mode 100644 index 029e6d117c..0000000000 --- a/drivers/firmware/qcom_scm-legacy.c +++ /dev/null @@ -1,246 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved. - * Copyright (C) 2015 Linaro Ltd. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "qcom_scm.h" - -static DEFINE_MUTEX(qcom_scm_lock); - - -/** - * struct arm_smccc_args - * @args: The array of values used in registers in smc instruction - */ -struct arm_smccc_args { - unsigned long args[8]; -}; - - -/** - * struct scm_legacy_command - one SCM command buffer - * @len: total available memory for command and response - * @buf_offset: start of command buffer - * @resp_hdr_offset: start of response buffer - * @id: command to be executed - * @buf: buffer returned from scm_legacy_get_command_buffer() - * - * An SCM command is laid out in memory as follows: - * - * ------------------- <--- struct scm_legacy_command - * | command header | - * ------------------- <--- scm_legacy_get_command_buffer() - * | command buffer | - * ------------------- <--- struct scm_legacy_response and - * | response header | scm_legacy_command_to_response() - * ------------------- <--- scm_legacy_get_response_buffer() - * | response buffer | - * ------------------- - * - * There can be arbitrary padding between the headers and buffers so - * you should always use the appropriate scm_legacy_get_*_buffer() routines - * to access the buffers in a safe manner. - */ -struct scm_legacy_command { - __le32 len; - __le32 buf_offset; - __le32 resp_hdr_offset; - __le32 id; - __le32 buf[]; -}; - -/** - * struct scm_legacy_response - one SCM response buffer - * @len: total available memory for response - * @buf_offset: start of response data relative to start of scm_legacy_response - * @is_complete: indicates if the command has finished processing - */ -struct scm_legacy_response { - __le32 len; - __le32 buf_offset; - __le32 is_complete; -}; - -/** - * scm_legacy_command_to_response() - Get a pointer to a scm_legacy_response - * @cmd: command - * - * Returns a pointer to a response for a command. - */ -static inline struct scm_legacy_response *scm_legacy_command_to_response( - const struct scm_legacy_command *cmd) -{ - return (void *)cmd + le32_to_cpu(cmd->resp_hdr_offset); -} - -/** - * scm_legacy_get_command_buffer() - Get a pointer to a command buffer - * @cmd: command - * - * Returns a pointer to the command buffer of a command. - */ -static inline void *scm_legacy_get_command_buffer( - const struct scm_legacy_command *cmd) -{ - return (void *)cmd->buf; -} - -/** - * scm_legacy_get_response_buffer() - Get a pointer to a response buffer - * @rsp: response - * - * Returns a pointer to a response buffer of a response. - */ -static inline void *scm_legacy_get_response_buffer( - const struct scm_legacy_response *rsp) -{ - return (void *)rsp + le32_to_cpu(rsp->buf_offset); -} - -static void __scm_legacy_do(const struct arm_smccc_args *smc, - struct arm_smccc_res *res) -{ - do { - arm_smccc_smc(smc->args[0], smc->args[1], smc->args[2], - smc->args[3], smc->args[4], smc->args[5], - smc->args[6], smc->args[7], res); - } while (res->a0 == QCOM_SCM_INTERRUPTED); -} - -/** - * scm_legacy_call() - Sends a command to the SCM and waits for the command to - * finish processing. - * @dev: device - * @desc: descriptor structure containing arguments and return values - * @res: results from SMC call - * - * A note on cache maintenance: - * Note that any buffers that are expected to be accessed by the secure world - * must be flushed before invoking qcom_scm_call and invalidated in the cache - * immediately after qcom_scm_call returns. Cache maintenance on the command - * and response buffers is taken care of by qcom_scm_call; however, callers are - * responsible for any other cached buffers passed over to the secure world. - */ -int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc, - struct qcom_scm_res *res) -{ - u8 arglen = desc->arginfo & 0xf; - int ret = 0, context_id; - unsigned int i; - struct scm_legacy_command *cmd; - struct scm_legacy_response *rsp; - struct arm_smccc_args smc = {0}; - struct arm_smccc_res smc_res; - const size_t cmd_len = arglen * sizeof(__le32); - const size_t resp_len = MAX_QCOM_SCM_RETS * sizeof(__le32); - size_t alloc_len = sizeof(*cmd) + cmd_len + sizeof(*rsp) + resp_len; - dma_addr_t cmd_phys; - __le32 *arg_buf; - const __le32 *res_buf; - - cmd = kzalloc(PAGE_ALIGN(alloc_len), GFP_KERNEL); - if (!cmd) - return -ENOMEM; - - cmd->len = cpu_to_le32(alloc_len); - cmd->buf_offset = cpu_to_le32(sizeof(*cmd)); - cmd->resp_hdr_offset = cpu_to_le32(sizeof(*cmd) + cmd_len); - cmd->id = cpu_to_le32(SCM_LEGACY_FNID(desc->svc, desc->cmd)); - - arg_buf = scm_legacy_get_command_buffer(cmd); - for (i = 0; i < arglen; i++) - arg_buf[i] = cpu_to_le32(desc->args[i]); - - rsp = scm_legacy_command_to_response(cmd); - - cmd_phys = dma_map_single(dev, cmd, alloc_len, DMA_TO_DEVICE); - if (dma_mapping_error(dev, cmd_phys)) { - kfree(cmd); - return -ENOMEM; - } - - smc.args[0] = 1; - smc.args[1] = (unsigned long)&context_id; - smc.args[2] = cmd_phys; - - mutex_lock(&qcom_scm_lock); - __scm_legacy_do(&smc, &smc_res); - if (smc_res.a0) - ret = qcom_scm_remap_error(smc_res.a0); - mutex_unlock(&qcom_scm_lock); - if (ret) - goto out; - - do { - dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len, - sizeof(*rsp), DMA_FROM_DEVICE); - } while (!rsp->is_complete); - - dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len + - le32_to_cpu(rsp->buf_offset), - resp_len, DMA_FROM_DEVICE); - - if (res) { - res_buf = scm_legacy_get_response_buffer(rsp); - for (i = 0; i < MAX_QCOM_SCM_RETS; i++) - res->result[i] = le32_to_cpu(res_buf[i]); - } -out: - dma_unmap_single(dev, cmd_phys, alloc_len, DMA_TO_DEVICE); - kfree(cmd); - return ret; -} - -#define SCM_LEGACY_ATOMIC_N_REG_ARGS 5 -#define SCM_LEGACY_ATOMIC_FIRST_REG_IDX 2 -#define SCM_LEGACY_CLASS_REGISTER (0x2 << 8) -#define SCM_LEGACY_MASK_IRQS BIT(5) -#define SCM_LEGACY_ATOMIC_ID(svc, cmd, n) \ - ((SCM_LEGACY_FNID(svc, cmd) << 12) | \ - SCM_LEGACY_CLASS_REGISTER | \ - SCM_LEGACY_MASK_IRQS | \ - (n & 0xf)) - -/** - * scm_legacy_call_atomic() - Send an atomic SCM command with up to 5 arguments - * and 3 return values - * @unused: device, legacy argument, not used, can be NULL - * @desc: SCM call descriptor containing arguments - * @res: SCM call return values - * - * This shall only be used with commands that are guaranteed to be - * uninterruptable, atomic and SMP safe. - */ -int scm_legacy_call_atomic(struct device *unused, - const struct qcom_scm_desc *desc, - struct qcom_scm_res *res) -{ - int context_id; - struct arm_smccc_res smc_res; - size_t arglen = desc->arginfo & 0xf; - - BUG_ON(arglen > SCM_LEGACY_ATOMIC_N_REG_ARGS); - - arm_smccc_smc(SCM_LEGACY_ATOMIC_ID(desc->svc, desc->cmd, arglen), - (unsigned long)&context_id, - desc->args[0], desc->args[1], desc->args[2], - desc->args[3], desc->args[4], 0, &smc_res); - - if (res) { - res->result[0] = smc_res.a1; - res->result[1] = smc_res.a2; - res->result[2] = smc_res.a3; - } - - return smc_res.a0; -} diff --git a/drivers/firmware/qcom_scm-smc.c b/drivers/firmware/qcom_scm-smc.c deleted file mode 100644 index 16cf88acfa..0000000000 --- a/drivers/firmware/qcom_scm-smc.c +++ /dev/null @@ -1,225 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2015,2019 The Linux Foundation. All rights reserved. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "qcom_scm.h" - -/** - * struct arm_smccc_args - * @args: The array of values used in registers in smc instruction - */ -struct arm_smccc_args { - unsigned long args[8]; -}; - -static DEFINE_MUTEX(qcom_scm_lock); - -#define QCOM_SCM_EBUSY_WAIT_MS 30 -#define QCOM_SCM_EBUSY_MAX_RETRY 20 - -#define SCM_SMC_N_REG_ARGS 4 -#define SCM_SMC_FIRST_EXT_IDX (SCM_SMC_N_REG_ARGS - 1) -#define SCM_SMC_N_EXT_ARGS (MAX_QCOM_SCM_ARGS - SCM_SMC_N_REG_ARGS + 1) -#define SCM_SMC_FIRST_REG_IDX 2 -#define SCM_SMC_LAST_REG_IDX (SCM_SMC_FIRST_REG_IDX + SCM_SMC_N_REG_ARGS - 1) - -static void __scm_smc_do_quirk(const struct arm_smccc_args *smc, - struct arm_smccc_res *res) -{ - unsigned long a0 = smc->args[0]; - struct arm_smccc_quirk quirk = { .id = ARM_SMCCC_QUIRK_QCOM_A6 }; - - quirk.state.a6 = 0; - - do { - arm_smccc_smc_quirk(a0, smc->args[1], smc->args[2], - smc->args[3], smc->args[4], smc->args[5], - quirk.state.a6, smc->args[7], res, &quirk); - - if (res->a0 == QCOM_SCM_INTERRUPTED) - a0 = res->a0; - - } while (res->a0 == QCOM_SCM_INTERRUPTED); -} - -static void fill_wq_resume_args(struct arm_smccc_args *resume, u32 smc_call_ctx) -{ - memset(resume->args, 0, sizeof(resume->args[0]) * ARRAY_SIZE(resume->args)); - - resume->args[0] = ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL, - ARM_SMCCC_SMC_64, ARM_SMCCC_OWNER_SIP, - SCM_SMC_FNID(QCOM_SCM_SVC_WAITQ, QCOM_SCM_WAITQ_RESUME)); - - resume->args[1] = QCOM_SCM_ARGS(1); - - resume->args[2] = smc_call_ctx; -} - -int scm_get_wq_ctx(u32 *wq_ctx, u32 *flags, u32 *more_pending) -{ - int ret; - struct arm_smccc_res get_wq_res; - struct arm_smccc_args get_wq_ctx = {0}; - - get_wq_ctx.args[0] = ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL, - ARM_SMCCC_SMC_64, ARM_SMCCC_OWNER_SIP, - SCM_SMC_FNID(QCOM_SCM_SVC_WAITQ, QCOM_SCM_WAITQ_GET_WQ_CTX)); - - /* Guaranteed to return only success or error, no WAITQ_* */ - __scm_smc_do_quirk(&get_wq_ctx, &get_wq_res); - ret = get_wq_res.a0; - if (ret) - return ret; - - *wq_ctx = get_wq_res.a1; - *flags = get_wq_res.a2; - *more_pending = get_wq_res.a3; - - return 0; -} - -static int __scm_smc_do_quirk_handle_waitq(struct device *dev, struct arm_smccc_args *waitq, - struct arm_smccc_res *res) -{ - int ret; - u32 wq_ctx, smc_call_ctx; - struct arm_smccc_args resume; - struct arm_smccc_args *smc = waitq; - - do { - __scm_smc_do_quirk(smc, res); - - if (res->a0 == QCOM_SCM_WAITQ_SLEEP) { - wq_ctx = res->a1; - smc_call_ctx = res->a2; - - ret = qcom_scm_wait_for_wq_completion(wq_ctx); - if (ret) - return ret; - - fill_wq_resume_args(&resume, smc_call_ctx); - smc = &resume; - } - } while (res->a0 == QCOM_SCM_WAITQ_SLEEP); - - return 0; -} - -static int __scm_smc_do(struct device *dev, struct arm_smccc_args *smc, - struct arm_smccc_res *res, bool atomic) -{ - int ret, retry_count = 0; - - if (atomic) { - __scm_smc_do_quirk(smc, res); - return 0; - } - - do { - mutex_lock(&qcom_scm_lock); - - ret = __scm_smc_do_quirk_handle_waitq(dev, smc, res); - - mutex_unlock(&qcom_scm_lock); - - if (ret) - return ret; - - if (res->a0 == QCOM_SCM_V2_EBUSY) { - if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY) - break; - msleep(QCOM_SCM_EBUSY_WAIT_MS); - } - } while (res->a0 == QCOM_SCM_V2_EBUSY); - - return 0; -} - - -int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc, - enum qcom_scm_convention qcom_convention, - struct qcom_scm_res *res, bool atomic) -{ - int arglen = desc->arginfo & 0xf; - int i, ret; - dma_addr_t args_phys = 0; - void *args_virt = NULL; - size_t alloc_len; - gfp_t flag = atomic ? GFP_ATOMIC : GFP_KERNEL; - u32 smccc_call_type = atomic ? ARM_SMCCC_FAST_CALL : ARM_SMCCC_STD_CALL; - u32 qcom_smccc_convention = (qcom_convention == SMC_CONVENTION_ARM_32) ? - ARM_SMCCC_SMC_32 : ARM_SMCCC_SMC_64; - struct arm_smccc_res smc_res; - struct arm_smccc_args smc = {0}; - - smc.args[0] = ARM_SMCCC_CALL_VAL( - smccc_call_type, - qcom_smccc_convention, - desc->owner, - SCM_SMC_FNID(desc->svc, desc->cmd)); - smc.args[1] = desc->arginfo; - for (i = 0; i < SCM_SMC_N_REG_ARGS; i++) - smc.args[i + SCM_SMC_FIRST_REG_IDX] = desc->args[i]; - - if (unlikely(arglen > SCM_SMC_N_REG_ARGS)) { - alloc_len = SCM_SMC_N_EXT_ARGS * sizeof(u64); - args_virt = kzalloc(PAGE_ALIGN(alloc_len), flag); - - if (!args_virt) - return -ENOMEM; - - if (qcom_smccc_convention == ARM_SMCCC_SMC_32) { - __le32 *args = args_virt; - - for (i = 0; i < SCM_SMC_N_EXT_ARGS; i++) - args[i] = cpu_to_le32(desc->args[i + - SCM_SMC_FIRST_EXT_IDX]); - } else { - __le64 *args = args_virt; - - for (i = 0; i < SCM_SMC_N_EXT_ARGS; i++) - args[i] = cpu_to_le64(desc->args[i + - SCM_SMC_FIRST_EXT_IDX]); - } - - args_phys = dma_map_single(dev, args_virt, alloc_len, - DMA_TO_DEVICE); - - if (dma_mapping_error(dev, args_phys)) { - kfree(args_virt); - return -ENOMEM; - } - - smc.args[SCM_SMC_LAST_REG_IDX] = args_phys; - } - - /* ret error check follows after args_virt cleanup*/ - ret = __scm_smc_do(dev, &smc, &smc_res, atomic); - - if (args_virt) { - dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE); - kfree(args_virt); - } - - if (ret) - return ret; - - if (res) { - res->result[0] = smc_res.a1; - res->result[1] = smc_res.a2; - res->result[2] = smc_res.a3; - } - - return (long)smc_res.a0 ? qcom_scm_remap_error(smc_res.a0) : 0; - -} diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c deleted file mode 100644 index 69831f1d91..0000000000 --- a/drivers/firmware/qcom_scm.c +++ /dev/null @@ -1,1518 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved. - * Copyright (C) 2015 Linaro Ltd. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "qcom_scm.h" - -static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT); -module_param(download_mode, bool, 0); - -struct qcom_scm { - struct device *dev; - struct clk *core_clk; - struct clk *iface_clk; - struct clk *bus_clk; - struct icc_path *path; - struct completion waitq_comp; - struct reset_controller_dev reset; - - /* control access to the interconnect path */ - struct mutex scm_bw_lock; - int scm_vote_count; - - u64 dload_mode_addr; -}; - -struct qcom_scm_current_perm_info { - __le32 vmid; - __le32 perm; - __le64 ctx; - __le32 ctx_size; - __le32 unused; -}; - -struct qcom_scm_mem_map_info { - __le64 mem_addr; - __le64 mem_size; -}; - -/* Each bit configures cold/warm boot address for one of the 4 CPUs */ -static const u8 qcom_scm_cpu_cold_bits[QCOM_SCM_BOOT_MAX_CPUS] = { - 0, BIT(0), BIT(3), BIT(5) -}; -static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = { - BIT(2), BIT(1), BIT(4), BIT(6) -}; - -#define QCOM_SMC_WAITQ_FLAG_WAKE_ONE BIT(0) -#define QCOM_SMC_WAITQ_FLAG_WAKE_ALL BIT(1) - -static const char * const qcom_scm_convention_names[] = { - [SMC_CONVENTION_UNKNOWN] = "unknown", - [SMC_CONVENTION_ARM_32] = "smc arm 32", - [SMC_CONVENTION_ARM_64] = "smc arm 64", - [SMC_CONVENTION_LEGACY] = "smc legacy", -}; - -static struct qcom_scm *__scm; - -static int qcom_scm_clk_enable(void) -{ - int ret; - - ret = clk_prepare_enable(__scm->core_clk); - if (ret) - goto bail; - - ret = clk_prepare_enable(__scm->iface_clk); - if (ret) - goto disable_core; - - ret = clk_prepare_enable(__scm->bus_clk); - if (ret) - goto disable_iface; - - return 0; - -disable_iface: - clk_disable_unprepare(__scm->iface_clk); -disable_core: - clk_disable_unprepare(__scm->core_clk); -bail: - return ret; -} - -static void qcom_scm_clk_disable(void) -{ - clk_disable_unprepare(__scm->core_clk); - clk_disable_unprepare(__scm->iface_clk); - clk_disable_unprepare(__scm->bus_clk); -} - -static int qcom_scm_bw_enable(void) -{ - int ret = 0; - - if (!__scm->path) - return 0; - - if (IS_ERR(__scm->path)) - return -EINVAL; - - mutex_lock(&__scm->scm_bw_lock); - if (!__scm->scm_vote_count) { - ret = icc_set_bw(__scm->path, 0, UINT_MAX); - if (ret < 0) { - dev_err(__scm->dev, "failed to set bandwidth request\n"); - goto err_bw; - } - } - __scm->scm_vote_count++; -err_bw: - mutex_unlock(&__scm->scm_bw_lock); - - return ret; -} - -static void qcom_scm_bw_disable(void) -{ - if (IS_ERR_OR_NULL(__scm->path)) - return; - - mutex_lock(&__scm->scm_bw_lock); - if (__scm->scm_vote_count-- == 1) - icc_set_bw(__scm->path, 0, 0); - mutex_unlock(&__scm->scm_bw_lock); -} - -enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN; -static DEFINE_SPINLOCK(scm_query_lock); - -static enum qcom_scm_convention __get_convention(void) -{ - unsigned long flags; - struct qcom_scm_desc desc = { - .svc = QCOM_SCM_SVC_INFO, - .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL, - .args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO, - QCOM_SCM_INFO_IS_CALL_AVAIL) | - (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT), - .arginfo = QCOM_SCM_ARGS(1), - .owner = ARM_SMCCC_OWNER_SIP, - }; - struct qcom_scm_res res; - enum qcom_scm_convention probed_convention; - int ret; - bool forced = false; - - if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN)) - return qcom_scm_convention; - - /* - * Per the "SMC calling convention specification", the 64-bit calling - * convention can only be used when the client is 64-bit, otherwise - * system will encounter the undefined behaviour. - */ -#if IS_ENABLED(CONFIG_ARM64) - /* - * Device isn't required as there is only one argument - no device - * needed to dma_map_single to secure world - */ - probed_convention = SMC_CONVENTION_ARM_64; - ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true); - if (!ret && res.result[0] == 1) - goto found; - - /* - * Some SC7180 firmwares didn't implement the - * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64 - * calling conventions on these firmwares. Luckily we don't make any - * early calls into the firmware on these SoCs so the device pointer - * will be valid here to check if the compatible matches. - */ - if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) { - forced = true; - goto found; - } -#endif - - probed_convention = SMC_CONVENTION_ARM_32; - ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true); - if (!ret && res.result[0] == 1) - goto found; - - probed_convention = SMC_CONVENTION_LEGACY; -found: - spin_lock_irqsave(&scm_query_lock, flags); - if (probed_convention != qcom_scm_convention) { - qcom_scm_convention = probed_convention; - pr_info("qcom_scm: convention: %s%s\n", - qcom_scm_convention_names[qcom_scm_convention], - forced ? " (forced)" : ""); - } - spin_unlock_irqrestore(&scm_query_lock, flags); - - return qcom_scm_convention; -} - -/** - * qcom_scm_call() - Invoke a syscall in the secure world - * @dev: device - * @desc: Descriptor structure containing arguments and return values - * @res: Structure containing results from SMC/HVC call - * - * Sends a command to the SCM and waits for the command to finish processing. - * This should *only* be called in pre-emptible context. - */ -static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc, - struct qcom_scm_res *res) -{ - might_sleep(); - switch (__get_convention()) { - case SMC_CONVENTION_ARM_32: - case SMC_CONVENTION_ARM_64: - return scm_smc_call(dev, desc, res, false); - case SMC_CONVENTION_LEGACY: - return scm_legacy_call(dev, desc, res); - default: - pr_err("Unknown current SCM calling convention.\n"); - return -EINVAL; - } -} - -/** - * qcom_scm_call_atomic() - atomic variation of qcom_scm_call() - * @dev: device - * @desc: Descriptor structure containing arguments and return values - * @res: Structure containing results from SMC/HVC call - * - * Sends a command to the SCM and waits for the command to finish processing. - * This can be called in atomic context. - */ -static int qcom_scm_call_atomic(struct device *dev, - const struct qcom_scm_desc *desc, - struct qcom_scm_res *res) -{ - switch (__get_convention()) { - case SMC_CONVENTION_ARM_32: - case SMC_CONVENTION_ARM_64: - return scm_smc_call(dev, desc, res, true); - case SMC_CONVENTION_LEGACY: - return scm_legacy_call_atomic(dev, desc, res); - default: - pr_err("Unknown current SCM calling convention.\n"); - return -EINVAL; - } -} - -static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id, - u32 cmd_id) -{ - int ret; - struct qcom_scm_desc desc = { - .svc = QCOM_SCM_SVC_INFO, - .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL, - .owner = ARM_SMCCC_OWNER_SIP, - }; - struct qcom_scm_res res; - - desc.arginfo = QCOM_SCM_ARGS(1); - switch (__get_convention()) { - case SMC_CONVENTION_ARM_32: - case SMC_CONVENTION_ARM_64: - desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) | - (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT); - break; - case SMC_CONVENTION_LEGACY: - desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id); - break; - default: - pr_err("Unknown SMC convention being used\n"); - return false; - } - - ret = qcom_scm_call(dev, &desc, &res); - - return ret ? false : !!res.result[0]; -} - -static int qcom_scm_set_boot_addr(void *entry, const u8 *cpu_bits) -{ - int cpu; - unsigned int flags = 0; - struct qcom_scm_desc desc = { - .svc = QCOM_SCM_SVC_BOOT, - .cmd = QCOM_SCM_BOOT_SET_ADDR, - .arginfo = QCOM_SCM_ARGS(2), - .owner = ARM_SMCCC_OWNER_SIP, - }; - - for_each_present_cpu(cpu) { - if (cpu >= QCOM_SCM_BOOT_MAX_CPUS) - return -EINVAL; - flags |= cpu_bits[cpu]; - } - - desc.args[0] = flags; - desc.args[1] = virt_to_phys(entry); - - return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL); -} - -static int qcom_scm_set_boot_addr_mc(void *entry, unsigned int flags) -{ - struct qcom_scm_desc desc = { - .svc = QCOM_SCM_SVC_BOOT, - .cmd = QCOM_SCM_BOOT_SET_ADDR_MC, - .owner = ARM_SMCCC_OWNER_SIP, - .arginfo = QCOM_SCM_ARGS(6), - .args = { - virt_to_phys(entry), - /* Apply to all CPUs in all affinity levels */ - ~0ULL, ~0ULL, ~0ULL, ~0ULL, - flags, - }, - }; - - /* Need a device for DMA of the additional arguments */ - if (!__scm || __get_convention() == SMC_CONVENTION_LEGACY) - return -EOPNOTSUPP; - - return qcom_scm_call(__scm->dev, &desc, NULL); -} - -/** - * qcom_scm_set_warm_boot_addr() - Set the warm boot address for all cpus - * @entry: Entry point function for the cpus - * - * Set the Linux entry point for the SCM to transfer control to when coming - * out of a power down. CPU power down may be executed on cpuidle or hotplug. - */ -int qcom_scm_set_warm_boot_addr(void *entry) -{ - if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_WARMBOOT)) - /* Fallback to old SCM call */ - return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_warm_bits); - return 0; -} -EXPORT_SYMBOL_GPL(qcom_scm_set_warm_boot_addr); - -/** - * qcom_scm_set_cold_boot_addr() - Set the cold boot address for all cpus - * @entry: Entry point function for the cpus - */ -int qcom_scm_set_cold_boot_addr(void *entry) -{ - if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_COLDBOOT)) - /* Fallback to old SCM call */ - return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_cold_bits); - return 0; -} -EXPORT_SYMBOL_GPL(qcom_scm_set_cold_boot_addr); - -/** - * qcom_scm_cpu_power_down() - Power down the cpu - * @flags: Flags to flush cache - * - * This is an end point to power down cpu. If there was a pending interrupt, - * the control would return from this function, otherwise, the cpu jumps to the - * warm boot entry point set for this cpu upon reset. - */ -void qcom_scm_cpu_power_down(u32 flags) -{ - struct qcom_scm_desc desc = { - .svc = QCOM_SCM_SVC_BOOT, - .cmd = QCOM_SCM_BOOT_TERMINATE_PC, - .args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK, - .arginfo = QCOM_SCM_ARGS(1), - .owner = ARM_SMCCC_OWNER_SIP, - }; - - qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL); -} -EXPORT_SYMBOL_GPL(qcom_scm_cpu_power_down); - -int qcom_scm_set_remote_state(u32 state, u32 id) -{ - struct qcom_scm_desc desc = { - .svc = QCOM_SCM_SVC_BOOT, - .cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE, - .arginfo = QCOM_SCM_ARGS(2), - .args[0] = state, - .args[1] = id, - .owner = ARM_SMCCC_OWNER_SIP, - }; - struct qcom_scm_res res; - int ret; - - ret = qcom_scm_call(__scm->dev, &desc, &res); - - return ret ? : res.result[0]; -} -EXPORT_SYMBOL_GPL(qcom_scm_set_remote_state); - -static int __qcom_scm_set_dload_mode(struct device *dev, bool enable) -{ - struct qcom_scm_desc desc = { - .svc = QCOM_SCM_SVC_BOOT, - .cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE, - .arginfo = QCOM_SCM_ARGS(2), - .args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE, - .owner = ARM_SMCCC_OWNER_SIP, - }; - - desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0; - - return qcom_scm_call_atomic(__scm->dev, &desc, NULL); -} - -static void qcom_scm_set_download_mode(bool enable) -{ - bool avail; - int ret = 0; - - avail = __qcom_scm_is_call_available(__scm->dev, - QCOM_SCM_SVC_BOOT, - QCOM_SCM_BOOT_SET_DLOAD_MODE); - if (avail) { - ret = __qcom_scm_set_dload_mode(__scm->dev, enable); - } else if (__scm->dload_mode_addr) { - ret = qcom_scm_io_writel(__scm->dload_mode_addr, - enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0); - } else { - dev_err(__scm->dev, - "No available mechanism for setting download mode\n"); - } - - if (ret) - dev_err(__scm->dev, "failed to set download mode: %d\n", ret); -} - -/** - * qcom_scm_pas_init_image() - Initialize peripheral authentication service - * state machine for a given peripheral, using the - * metadata - * @peripheral: peripheral id - * @metadata: pointer to memory containing ELF header, program header table - * and optional blob of data used for authenticating the metadata - * and the rest of the firmware - * @size: size of the metadata - * @ctx: optional metadata context - * - * Return: 0 on success. - * - * Upon successful return, the PAS metadata context (@ctx) will be used to - * track the metadata allocation, this needs to be released by invoking - * qcom_scm_pas_metadata_release() by the caller. - */ -int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size, - struct qcom_scm_pas_metadata *ctx) -{ - dma_addr_t mdata_phys; - void *mdata_buf; - int ret; - struct qcom_scm_desc desc = { - .svc = QCOM_SCM_SVC_PIL, - .cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE, - .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW), - .args[0] = peripheral, - .owner = ARM_SMCCC_OWNER_SIP, - }; - struct qcom_scm_res res; - - /* - * During the scm call memory protection will be enabled for the meta - * data blob, so make sure it's physically contiguous, 4K aligned and - * non-cachable to avoid XPU violations. - */ - mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys, - GFP_KERNEL); - if (!mdata_buf) { - dev_err(__scm->dev, "Allocation of metadata buffer failed.\n"); - return -ENOMEM; - } - memcpy(mdata_buf, metadata, size); - - ret = qcom_scm_clk_enable(); - if (ret) - goto out; - - ret = qcom_scm_bw_enable(); - if (ret) - return ret; - - desc.args[1] = mdata_phys; - - ret = qcom_scm_call(__scm->dev, &desc, &res); - - qcom_scm_bw_disable(); - qcom_scm_clk_disable(); - -out: - if (ret < 0 || !ctx) { - dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys); - } else if (ctx) { - ctx->ptr = mdata_buf; - ctx->phys = mdata_phys; - ctx->size = size; - } - - return ret ? : res.result[0]; -} -EXPORT_SYMBOL_GPL(qcom_scm_pas_init_image); - -/** - * qcom_scm_pas_metadata_release() - release metadata context - * @ctx: metadata context - */ -void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx) -{ - if (!ctx->ptr) - return; - - dma_free_coherent(__scm->dev, ctx->size, ctx->ptr, ctx->phys); - - ctx->ptr = NULL; - ctx->phys = 0; - ctx->size = 0; -} -EXPORT_SYMBOL_GPL(qcom_scm_pas_metadata_release); - -/** - * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral - * for firmware loading - * @peripheral: peripheral id - * @addr: start address of memory area to prepare - * @size: size of the memory area to prepare - * - * Returns 0 on success. - */ -int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size) -{ - int ret; - struct qcom_scm_desc desc = { - .svc = QCOM_SCM_SVC_PIL, - .cmd = QCOM_SCM_PIL_PAS_MEM_SETUP, - .arginfo = QCOM_SCM_ARGS(3), - .args[0] = peripheral, - .args[1] = addr, - .args[2] = size, - .owner = ARM_SMCCC_OWNER_SIP, - }; - struct qcom_scm_res res; - - ret = qcom_scm_clk_enable(); - if (ret) - return ret; - - ret = qcom_scm_bw_enable(); - if (ret) - return ret; - - ret = qcom_scm_call(__scm->dev, &desc, &res); - qcom_scm_bw_disable(); - qcom_scm_clk_disable(); - - return ret ? : res.result[0]; -} -EXPORT_SYMBOL_GPL(qcom_scm_pas_mem_setup); - -/** - * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware - * and reset the remote processor - * @peripheral: peripheral id - * - * Return 0 on success. - */ -int qcom_scm_pas_auth_and_reset(u32 peripheral) -{ - int ret; - struct qcom_scm_desc desc = { - .svc = QCOM_SCM_SVC_PIL, - .cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET, - .arginfo = QCOM_SCM_ARGS(1), - .args[0] = peripheral, - .owner = ARM_SMCCC_OWNER_SIP, - }; - struct qcom_scm_res res; - - ret = qcom_scm_clk_enable(); - if (ret) - return ret; - - ret = qcom_scm_bw_enable(); - if (ret) - return ret; - - ret = qcom_scm_call(__scm->dev, &desc, &res); - qcom_scm_bw_disable(); - qcom_scm_clk_disable(); - - return ret ? : res.result[0]; -} -EXPORT_SYMBOL_GPL(qcom_scm_pas_auth_and_reset); - -/** - * qcom_scm_pas_shutdown() - Shut down the remote processor - * @peripheral: peripheral id - * - * Returns 0 on success. - */ -int qcom_scm_pas_shutdown(u32 peripheral) -{ - int ret; - struct qcom_scm_desc desc = { - .svc = QCOM_SCM_SVC_PIL, - .cmd = QCOM_SCM_PIL_PAS_SHUTDOWN, - .arginfo = QCOM_SCM_ARGS(1), - .args[0] = peripheral, - .owner = ARM_SMCCC_OWNER_SIP, - }; - struct qcom_scm_res res; - - ret = qcom_scm_clk_enable(); - if (ret) - return ret; - - ret = qcom_scm_bw_enable(); - if (ret) - return ret; - - ret = qcom_scm_call(__scm->dev, &desc, &res); - - qcom_scm_bw_disable(); - qcom_scm_clk_disable(); - - return ret ? : res.result[0]; -} -EXPORT_SYMBOL_GPL(qcom_scm_pas_shutdown); - -/** - * qcom_scm_pas_supported() - Check if the peripheral authentication service is - * available for the given peripherial - * @peripheral: peripheral id - * - * Returns true if PAS is supported for this peripheral, otherwise false. - */ -bool qcom_scm_pas_supported(u32 peripheral) -{ - int ret; - struct qcom_scm_desc desc = { - .svc = QCOM_SCM_SVC_PIL, - .cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED, - .arginfo = QCOM_SCM_ARGS(1), - .args[0] = peripheral, - .owner = ARM_SMCCC_OWNER_SIP, - }; - struct qcom_scm_res res; - - if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL, - QCOM_SCM_PIL_PAS_IS_SUPPORTED)) - return false; - - ret = qcom_scm_call(__scm->dev, &desc, &res); - - return ret ? false : !!res.result[0]; -} -EXPORT_SYMBOL_GPL(qcom_scm_pas_supported); - -static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset) -{ - struct qcom_scm_desc desc = { - .svc = QCOM_SCM_SVC_PIL, - .cmd = QCOM_SCM_PIL_PAS_MSS_RESET, - .arginfo = QCOM_SCM_ARGS(2), - .args[0] = reset, - .args[1] = 0, - .owner = ARM_SMCCC_OWNER_SIP, - }; - struct qcom_scm_res res; - int ret; - - ret = qcom_scm_call(__scm->dev, &desc, &res); - - return ret ? : res.result[0]; -} - -static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev, - unsigned long idx) -{ - if (idx != 0) - return -EINVAL; - - return __qcom_scm_pas_mss_reset(__scm->dev, 1); -} - -static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev, - unsigned long idx) -{ - if (idx != 0) - return -EINVAL; - - return __qcom_scm_pas_mss_reset(__scm->dev, 0); -} - -static const struct reset_control_ops qcom_scm_pas_reset_ops = { - .assert = qcom_scm_pas_reset_assert, - .deassert = qcom_scm_pas_reset_deassert, -}; - -int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val) -{ - struct qcom_scm_desc desc = { - .svc = QCOM_SCM_SVC_IO, - .cmd = QCOM_SCM_IO_READ, - .arginfo = QCOM_SCM_ARGS(1), - .args[0] = addr, - .owner = ARM_SMCCC_OWNER_SIP, - }; - struct qcom_scm_res res; - int ret; - - - ret = qcom_scm_call_atomic(__scm->dev, &desc, &res); - if (ret >= 0) - *val = res.result[0]; - - return ret < 0 ? ret : 0; -} -EXPORT_SYMBOL_GPL(qcom_scm_io_readl); - -int qcom_scm_io_writel(phys_addr_t addr, unsigned int val) -{ - struct qcom_scm_desc desc = { - .svc = QCOM_SCM_SVC_IO, - .cmd = QCOM_SCM_IO_WRITE, - .arginfo = QCOM_SCM_ARGS(2), - .args[0] = addr, - .args[1] = val, - .owner = ARM_SMCCC_OWNER_SIP, - }; - - return qcom_scm_call_atomic(__scm->dev, &desc, NULL); -} -EXPORT_SYMBOL_GPL(qcom_scm_io_writel); - -/** - * qcom_scm_restore_sec_cfg_available() - Check if secure environment - * supports restore security config interface. - * - * Return true if restore-cfg interface is supported, false if not. - */ -bool qcom_scm_restore_sec_cfg_available(void) -{ - return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP, - QCOM_SCM_MP_RESTORE_SEC_CFG); -} -EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg_available); - -int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare) -{ - struct qcom_scm_desc desc = { - .svc = QCOM_SCM_SVC_MP, - .cmd = QCOM_SCM_MP_RESTORE_SEC_CFG, - .arginfo = QCOM_SCM_ARGS(2), - .args[0] = device_id, - .args[1] = spare, - .owner = ARM_SMCCC_OWNER_SIP, - }; - struct qcom_scm_res res; - int ret; - - ret = qcom_scm_call(__scm->dev, &desc, &res); - - return ret ? : res.result[0]; -} -EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg); - -int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size) -{ - struct qcom_scm_desc desc = { - .svc = QCOM_SCM_SVC_MP, - .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE, - .arginfo = QCOM_SCM_ARGS(1), - .args[0] = spare, - .owner = ARM_SMCCC_OWNER_SIP, - }; - struct qcom_scm_res res; - int ret; - - ret = qcom_scm_call(__scm->dev, &desc, &res); - - if (size) - *size = res.result[0]; - - return ret ? : res.result[1]; -} -EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_size); - -int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) -{ - struct qcom_scm_desc desc = { - .svc = QCOM_SCM_SVC_MP, - .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT, - .arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL, - QCOM_SCM_VAL), - .args[0] = addr, - .args[1] = size, - .args[2] = spare, - .owner = ARM_SMCCC_OWNER_SIP, - }; - int ret; - - ret = qcom_scm_call(__scm->dev, &desc, NULL); - - /* the pg table has been initialized already, ignore the error */ - if (ret == -EPERM) - ret = 0; - - return ret; -} -EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_init); - -int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size) -{ - struct qcom_scm_desc desc = { - .svc = QCOM_SCM_SVC_MP, - .cmd = QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE, - .arginfo = QCOM_SCM_ARGS(2), - .args[0] = size, - .args[1] = spare, - .owner = ARM_SMCCC_OWNER_SIP, - }; - - return qcom_scm_call(__scm->dev, &desc, NULL); -} -EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_cp_pool_size); - -int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size, - u32 cp_nonpixel_start, - u32 cp_nonpixel_size) -{ - int ret; - struct qcom_scm_desc desc = { - .svc = QCOM_SCM_SVC_MP, - .cmd = QCOM_SCM_MP_VIDEO_VAR, - .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL, - QCOM_SCM_VAL, QCOM_SCM_VAL), - .args[0] = cp_start, - .args[1] = cp_size, - .args[2] = cp_nonpixel_start, - .args[3] = cp_nonpixel_size, - .owner = ARM_SMCCC_OWNER_SIP, - }; - struct qcom_scm_res res; - - ret = qcom_scm_call(__scm->dev, &desc, &res); - - return ret ? : res.result[0]; -} -EXPORT_SYMBOL_GPL(qcom_scm_mem_protect_video_var); - -static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region, - size_t mem_sz, phys_addr_t src, size_t src_sz, - phys_addr_t dest, size_t dest_sz) -{ - int ret; - struct qcom_scm_desc desc = { - .svc = QCOM_SCM_SVC_MP, - .cmd = QCOM_SCM_MP_ASSIGN, - .arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL, - QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO, - QCOM_SCM_VAL, QCOM_SCM_VAL), - .args[0] = mem_region, - .args[1] = mem_sz, - .args[2] = src, - .args[3] = src_sz, - .args[4] = dest, - .args[5] = dest_sz, - .args[6] = 0, - .owner = ARM_SMCCC_OWNER_SIP, - }; - struct qcom_scm_res res; - - ret = qcom_scm_call(dev, &desc, &res); - - return ret ? : res.result[0]; -} - -/** - * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership - * @mem_addr: mem region whose ownership need to be reassigned - * @mem_sz: size of the region. - * @srcvm: vmid for current set of owners, each set bit in - * flag indicate a unique owner - * @newvm: array having new owners and corresponding permission - * flags - * @dest_cnt: number of owners in next set. - * - * Return negative errno on failure or 0 on success with @srcvm updated. - */ -int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, - u64 *srcvm, - const struct qcom_scm_vmperm *newvm, - unsigned int dest_cnt) -{ - struct qcom_scm_current_perm_info *destvm; - struct qcom_scm_mem_map_info *mem_to_map; - phys_addr_t mem_to_map_phys; - phys_addr_t dest_phys; - dma_addr_t ptr_phys; - size_t mem_to_map_sz; - size_t dest_sz; - size_t src_sz; - size_t ptr_sz; - int next_vm; - __le32 *src; - void *ptr; - int ret, i, b; - u64 srcvm_bits = *srcvm; - - src_sz = hweight64(srcvm_bits) * sizeof(*src); - mem_to_map_sz = sizeof(*mem_to_map); - dest_sz = dest_cnt * sizeof(*destvm); - ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) + - ALIGN(dest_sz, SZ_64); - - ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL); - if (!ptr) - return -ENOMEM; - - /* Fill source vmid detail */ - src = ptr; - i = 0; - for (b = 0; b < BITS_PER_TYPE(u64); b++) { - if (srcvm_bits & BIT(b)) - src[i++] = cpu_to_le32(b); - } - - /* Fill details of mem buff to map */ - mem_to_map = ptr + ALIGN(src_sz, SZ_64); - mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64); - mem_to_map->mem_addr = cpu_to_le64(mem_addr); - mem_to_map->mem_size = cpu_to_le64(mem_sz); - - next_vm = 0; - /* Fill details of next vmid detail */ - destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64); - dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64); - for (i = 0; i < dest_cnt; i++, destvm++, newvm++) { - destvm->vmid = cpu_to_le32(newvm->vmid); - destvm->perm = cpu_to_le32(newvm->perm); - destvm->ctx = 0; - destvm->ctx_size = 0; - next_vm |= BIT(newvm->vmid); - } - - ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz, - ptr_phys, src_sz, dest_phys, dest_sz); - dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_phys); - if (ret) { - dev_err(__scm->dev, - "Assign memory protection call failed %d\n", ret); - return -EINVAL; - } - - *srcvm = next_vm; - return 0; -} -EXPORT_SYMBOL_GPL(qcom_scm_assign_mem); - -/** - * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available - */ -bool qcom_scm_ocmem_lock_available(void) -{ - return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM, - QCOM_SCM_OCMEM_LOCK_CMD); -} -EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock_available); - -/** - * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM - * region to the specified initiator - * - * @id: tz initiator id - * @offset: OCMEM offset - * @size: OCMEM size - * @mode: access mode (WIDE/NARROW) - */ -int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size, - u32 mode) -{ - struct qcom_scm_desc desc = { - .svc = QCOM_SCM_SVC_OCMEM, - .cmd = QCOM_SCM_OCMEM_LOCK_CMD, - .args[0] = id, - .args[1] = offset, - .args[2] = size, - .args[3] = mode, - .arginfo = QCOM_SCM_ARGS(4), - }; - - return qcom_scm_call(__scm->dev, &desc, NULL); -} -EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock); - -/** - * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM - * region from the specified initiator - * - * @id: tz initiator id - * @offset: OCMEM offset - * @size: OCMEM size - */ -int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size) -{ - struct qcom_scm_desc desc = { - .svc = QCOM_SCM_SVC_OCMEM, - .cmd = QCOM_SCM_OCMEM_UNLOCK_CMD, - .args[0] = id, - .args[1] = offset, - .args[2] = size, - .arginfo = QCOM_SCM_ARGS(3), - }; - - return qcom_scm_call(__scm->dev, &desc, NULL); -} -EXPORT_SYMBOL_GPL(qcom_scm_ocmem_unlock); - -/** - * qcom_scm_ice_available() - Is the ICE key programming interface available? - * - * Return: true iff the SCM calls wrapped by qcom_scm_ice_invalidate_key() and - * qcom_scm_ice_set_key() are available. - */ -bool qcom_scm_ice_available(void) -{ - return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, - QCOM_SCM_ES_INVALIDATE_ICE_KEY) && - __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, - QCOM_SCM_ES_CONFIG_SET_ICE_KEY); -} -EXPORT_SYMBOL_GPL(qcom_scm_ice_available); - -/** - * qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key - * @index: the keyslot to invalidate - * - * The UFSHCI and eMMC standards define a standard way to do this, but it - * doesn't work on these SoCs; only this SCM call does. - * - * It is assumed that the SoC has only one ICE instance being used, as this SCM - * call doesn't specify which ICE instance the keyslot belongs to. - * - * Return: 0 on success; -errno on failure. - */ -int qcom_scm_ice_invalidate_key(u32 index) -{ - struct qcom_scm_desc desc = { - .svc = QCOM_SCM_SVC_ES, - .cmd = QCOM_SCM_ES_INVALIDATE_ICE_KEY, - .arginfo = QCOM_SCM_ARGS(1), - .args[0] = index, - .owner = ARM_SMCCC_OWNER_SIP, - }; - - return qcom_scm_call(__scm->dev, &desc, NULL); -} -EXPORT_SYMBOL_GPL(qcom_scm_ice_invalidate_key); - -/** - * qcom_scm_ice_set_key() - Set an inline encryption key - * @index: the keyslot into which to set the key - * @key: the key to program - * @key_size: the size of the key in bytes - * @cipher: the encryption algorithm the key is for - * @data_unit_size: the encryption data unit size, i.e. the size of each - * individual plaintext and ciphertext. Given in 512-byte - * units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc. - * - * Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it - * can then be used to encrypt/decrypt UFS or eMMC I/O requests inline. - * - * The UFSHCI and eMMC standards define a standard way to do this, but it - * doesn't work on these SoCs; only this SCM call does. - * - * It is assumed that the SoC has only one ICE instance being used, as this SCM - * call doesn't specify which ICE instance the keyslot belongs to. - * - * Return: 0 on success; -errno on failure. - */ -int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size, - enum qcom_scm_ice_cipher cipher, u32 data_unit_size) -{ - struct qcom_scm_desc desc = { - .svc = QCOM_SCM_SVC_ES, - .cmd = QCOM_SCM_ES_CONFIG_SET_ICE_KEY, - .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RW, - QCOM_SCM_VAL, QCOM_SCM_VAL, - QCOM_SCM_VAL), - .args[0] = index, - .args[2] = key_size, - .args[3] = cipher, - .args[4] = data_unit_size, - .owner = ARM_SMCCC_OWNER_SIP, - }; - void *keybuf; - dma_addr_t key_phys; - int ret; - - /* - * 'key' may point to vmalloc()'ed memory, but we need to pass a - * physical address that's been properly flushed. The sanctioned way to - * do this is by using the DMA API. But as is best practice for crypto - * keys, we also must wipe the key after use. This makes kmemdup() + - * dma_map_single() not clearly correct, since the DMA API can use - * bounce buffers. Instead, just use dma_alloc_coherent(). Programming - * keys is normally rare and thus not performance-critical. - */ - - keybuf = dma_alloc_coherent(__scm->dev, key_size, &key_phys, - GFP_KERNEL); - if (!keybuf) - return -ENOMEM; - memcpy(keybuf, key, key_size); - desc.args[1] = key_phys; - - ret = qcom_scm_call(__scm->dev, &desc, NULL); - - memzero_explicit(keybuf, key_size); - - dma_free_coherent(__scm->dev, key_size, keybuf, key_phys); - return ret; -} -EXPORT_SYMBOL_GPL(qcom_scm_ice_set_key); - -/** - * qcom_scm_hdcp_available() - Check if secure environment supports HDCP. - * - * Return true if HDCP is supported, false if not. - */ -bool qcom_scm_hdcp_available(void) -{ - bool avail; - int ret = qcom_scm_clk_enable(); - - if (ret) - return ret; - - avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP, - QCOM_SCM_HDCP_INVOKE); - - qcom_scm_clk_disable(); - - return avail; -} -EXPORT_SYMBOL_GPL(qcom_scm_hdcp_available); - -/** - * qcom_scm_hdcp_req() - Send HDCP request. - * @req: HDCP request array - * @req_cnt: HDCP request array count - * @resp: response buffer passed to SCM - * - * Write HDCP register(s) through SCM. - */ -int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp) -{ - int ret; - struct qcom_scm_desc desc = { - .svc = QCOM_SCM_SVC_HDCP, - .cmd = QCOM_SCM_HDCP_INVOKE, - .arginfo = QCOM_SCM_ARGS(10), - .args = { - req[0].addr, - req[0].val, - req[1].addr, - req[1].val, - req[2].addr, - req[2].val, - req[3].addr, - req[3].val, - req[4].addr, - req[4].val - }, - .owner = ARM_SMCCC_OWNER_SIP, - }; - struct qcom_scm_res res; - - if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT) - return -ERANGE; - - ret = qcom_scm_clk_enable(); - if (ret) - return ret; - - ret = qcom_scm_call(__scm->dev, &desc, &res); - *resp = res.result[0]; - - qcom_scm_clk_disable(); - - return ret; -} -EXPORT_SYMBOL_GPL(qcom_scm_hdcp_req); - -int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt) -{ - struct qcom_scm_desc desc = { - .svc = QCOM_SCM_SVC_SMMU_PROGRAM, - .cmd = QCOM_SCM_SMMU_PT_FORMAT, - .arginfo = QCOM_SCM_ARGS(3), - .args[0] = sec_id, - .args[1] = ctx_num, - .args[2] = pt_fmt, /* 0: LPAE AArch32 - 1: AArch64 */ - .owner = ARM_SMCCC_OWNER_SIP, - }; - - return qcom_scm_call(__scm->dev, &desc, NULL); -} -EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_pt_format); - -int qcom_scm_qsmmu500_wait_safe_toggle(bool en) -{ - struct qcom_scm_desc desc = { - .svc = QCOM_SCM_SVC_SMMU_PROGRAM, - .cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1, - .arginfo = QCOM_SCM_ARGS(2), - .args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL, - .args[1] = en, - .owner = ARM_SMCCC_OWNER_SIP, - }; - - - return qcom_scm_call_atomic(__scm->dev, &desc, NULL); -} -EXPORT_SYMBOL_GPL(qcom_scm_qsmmu500_wait_safe_toggle); - -bool qcom_scm_lmh_dcvsh_available(void) -{ - return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_LMH, QCOM_SCM_LMH_LIMIT_DCVSH); -} -EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh_available); - -int qcom_scm_lmh_profile_change(u32 profile_id) -{ - struct qcom_scm_desc desc = { - .svc = QCOM_SCM_SVC_LMH, - .cmd = QCOM_SCM_LMH_LIMIT_PROFILE_CHANGE, - .arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL), - .args[0] = profile_id, - .owner = ARM_SMCCC_OWNER_SIP, - }; - - return qcom_scm_call(__scm->dev, &desc, NULL); -} -EXPORT_SYMBOL_GPL(qcom_scm_lmh_profile_change); - -int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val, - u64 limit_node, u32 node_id, u64 version) -{ - dma_addr_t payload_phys; - u32 *payload_buf; - int ret, payload_size = 5 * sizeof(u32); - - struct qcom_scm_desc desc = { - .svc = QCOM_SCM_SVC_LMH, - .cmd = QCOM_SCM_LMH_LIMIT_DCVSH, - .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_VAL, - QCOM_SCM_VAL, QCOM_SCM_VAL), - .args[1] = payload_size, - .args[2] = limit_node, - .args[3] = node_id, - .args[4] = version, - .owner = ARM_SMCCC_OWNER_SIP, - }; - - payload_buf = dma_alloc_coherent(__scm->dev, payload_size, &payload_phys, GFP_KERNEL); - if (!payload_buf) - return -ENOMEM; - - payload_buf[0] = payload_fn; - payload_buf[1] = 0; - payload_buf[2] = payload_reg; - payload_buf[3] = 1; - payload_buf[4] = payload_val; - - desc.args[0] = payload_phys; - - ret = qcom_scm_call(__scm->dev, &desc, NULL); - - dma_free_coherent(__scm->dev, payload_size, payload_buf, payload_phys); - return ret; -} -EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh); - -static int qcom_scm_find_dload_address(struct device *dev, u64 *addr) -{ - struct device_node *tcsr; - struct device_node *np = dev->of_node; - struct resource res; - u32 offset; - int ret; - - tcsr = of_parse_phandle(np, "qcom,dload-mode", 0); - if (!tcsr) - return 0; - - ret = of_address_to_resource(tcsr, 0, &res); - of_node_put(tcsr); - if (ret) - return ret; - - ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset); - if (ret < 0) - return ret; - - *addr = res.start + offset; - - return 0; -} - -/** - * qcom_scm_is_available() - Checks if SCM is available - */ -bool qcom_scm_is_available(void) -{ - return !!__scm; -} -EXPORT_SYMBOL_GPL(qcom_scm_is_available); - -static int qcom_scm_assert_valid_wq_ctx(u32 wq_ctx) -{ - /* FW currently only supports a single wq_ctx (zero). - * TODO: Update this logic to include dynamic allocation and lookup of - * completion structs when FW supports more wq_ctx values. - */ - if (wq_ctx != 0) { - dev_err(__scm->dev, "Firmware unexpectedly passed non-zero wq_ctx\n"); - return -EINVAL; - } - - return 0; -} - -int qcom_scm_wait_for_wq_completion(u32 wq_ctx) -{ - int ret; - - ret = qcom_scm_assert_valid_wq_ctx(wq_ctx); - if (ret) - return ret; - - wait_for_completion(&__scm->waitq_comp); - - return 0; -} - -static int qcom_scm_waitq_wakeup(struct qcom_scm *scm, unsigned int wq_ctx) -{ - int ret; - - ret = qcom_scm_assert_valid_wq_ctx(wq_ctx); - if (ret) - return ret; - - complete(&__scm->waitq_comp); - - return 0; -} - -static irqreturn_t qcom_scm_irq_handler(int irq, void *data) -{ - int ret; - struct qcom_scm *scm = data; - u32 wq_ctx, flags, more_pending = 0; - - do { - ret = scm_get_wq_ctx(&wq_ctx, &flags, &more_pending); - if (ret) { - dev_err(scm->dev, "GET_WQ_CTX SMC call failed: %d\n", ret); - goto out; - } - - if (flags != QCOM_SMC_WAITQ_FLAG_WAKE_ONE && - flags != QCOM_SMC_WAITQ_FLAG_WAKE_ALL) { - dev_err(scm->dev, "Invalid flags found for wq_ctx: %u\n", flags); - goto out; - } - - ret = qcom_scm_waitq_wakeup(scm, wq_ctx); - if (ret) - goto out; - } while (more_pending); - -out: - return IRQ_HANDLED; -} - -static int qcom_scm_probe(struct platform_device *pdev) -{ - struct qcom_scm *scm; - int irq, ret; - - scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL); - if (!scm) - return -ENOMEM; - - ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr); - if (ret < 0) - return ret; - - mutex_init(&scm->scm_bw_lock); - - scm->path = devm_of_icc_get(&pdev->dev, NULL); - if (IS_ERR(scm->path)) - return dev_err_probe(&pdev->dev, PTR_ERR(scm->path), - "failed to acquire interconnect path\n"); - - scm->core_clk = devm_clk_get_optional(&pdev->dev, "core"); - if (IS_ERR(scm->core_clk)) - return PTR_ERR(scm->core_clk); - - scm->iface_clk = devm_clk_get_optional(&pdev->dev, "iface"); - if (IS_ERR(scm->iface_clk)) - return PTR_ERR(scm->iface_clk); - - scm->bus_clk = devm_clk_get_optional(&pdev->dev, "bus"); - if (IS_ERR(scm->bus_clk)) - return PTR_ERR(scm->bus_clk); - - scm->reset.ops = &qcom_scm_pas_reset_ops; - scm->reset.nr_resets = 1; - scm->reset.of_node = pdev->dev.of_node; - ret = devm_reset_controller_register(&pdev->dev, &scm->reset); - if (ret) - return ret; - - /* vote for max clk rate for highest performance */ - ret = clk_set_rate(scm->core_clk, INT_MAX); - if (ret) - return ret; - - __scm = scm; - __scm->dev = &pdev->dev; - - init_completion(&__scm->waitq_comp); - - irq = platform_get_irq_optional(pdev, 0); - if (irq < 0) { - if (irq != -ENXIO) - return irq; - } else { - ret = devm_request_threaded_irq(__scm->dev, irq, NULL, qcom_scm_irq_handler, - IRQF_ONESHOT, "qcom-scm", __scm); - if (ret < 0) - return dev_err_probe(scm->dev, ret, "Failed to request qcom-scm irq\n"); - } - - __get_convention(); - - /* - * If requested enable "download mode", from this point on warmboot - * will cause the boot stages to enter download mode, unless - * disabled below by a clean shutdown/reboot. - */ - if (download_mode) - qcom_scm_set_download_mode(true); - - return 0; -} - -static void qcom_scm_shutdown(struct platform_device *pdev) -{ - /* Clean shutdown, disable download mode to allow normal restart */ - qcom_scm_set_download_mode(false); -} - -static const struct of_device_id qcom_scm_dt_match[] = { - { .compatible = "qcom,scm" }, - - /* Legacy entries kept for backwards compatibility */ - { .compatible = "qcom,scm-apq8064" }, - { .compatible = "qcom,scm-apq8084" }, - { .compatible = "qcom,scm-ipq4019" }, - { .compatible = "qcom,scm-msm8953" }, - { .compatible = "qcom,scm-msm8974" }, - { .compatible = "qcom,scm-msm8996" }, - {} -}; -MODULE_DEVICE_TABLE(of, qcom_scm_dt_match); - -static struct platform_driver qcom_scm_driver = { - .driver = { - .name = "qcom_scm", - .of_match_table = qcom_scm_dt_match, - .suppress_bind_attrs = true, - }, - .probe = qcom_scm_probe, - .shutdown = qcom_scm_shutdown, -}; - -static int __init qcom_scm_init(void) -{ - return platform_driver_register(&qcom_scm_driver); -} -subsys_initcall(qcom_scm_init); - -MODULE_DESCRIPTION("Qualcomm Technologies, Inc. SCM driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h deleted file mode 100644 index e6e512bd57..0000000000 --- a/drivers/firmware/qcom_scm.h +++ /dev/null @@ -1,167 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (c) 2010-2015,2019 The Linux Foundation. All rights reserved. - */ -#ifndef __QCOM_SCM_INT_H -#define __QCOM_SCM_INT_H - -enum qcom_scm_convention { - SMC_CONVENTION_UNKNOWN, - SMC_CONVENTION_LEGACY, - SMC_CONVENTION_ARM_32, - SMC_CONVENTION_ARM_64, -}; - -extern enum qcom_scm_convention qcom_scm_convention; - -#define MAX_QCOM_SCM_ARGS 10 -#define MAX_QCOM_SCM_RETS 3 - -enum qcom_scm_arg_types { - QCOM_SCM_VAL, - QCOM_SCM_RO, - QCOM_SCM_RW, - QCOM_SCM_BUFVAL, -}; - -#define QCOM_SCM_ARGS_IMPL(num, a, b, c, d, e, f, g, h, i, j, ...) (\ - (((a) & 0x3) << 4) | \ - (((b) & 0x3) << 6) | \ - (((c) & 0x3) << 8) | \ - (((d) & 0x3) << 10) | \ - (((e) & 0x3) << 12) | \ - (((f) & 0x3) << 14) | \ - (((g) & 0x3) << 16) | \ - (((h) & 0x3) << 18) | \ - (((i) & 0x3) << 20) | \ - (((j) & 0x3) << 22) | \ - ((num) & 0xf)) - -#define QCOM_SCM_ARGS(...) QCOM_SCM_ARGS_IMPL(__VA_ARGS__, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) - - -/** - * struct qcom_scm_desc - * @arginfo: Metadata describing the arguments in args[] - * @args: The array of arguments for the secure syscall - */ -struct qcom_scm_desc { - u32 svc; - u32 cmd; - u32 arginfo; - u64 args[MAX_QCOM_SCM_ARGS]; - u32 owner; -}; - -/** - * struct qcom_scm_res - * @result: The values returned by the secure syscall - */ -struct qcom_scm_res { - u64 result[MAX_QCOM_SCM_RETS]; -}; - -int qcom_scm_wait_for_wq_completion(u32 wq_ctx); -int scm_get_wq_ctx(u32 *wq_ctx, u32 *flags, u32 *more_pending); - -#define SCM_SMC_FNID(s, c) ((((s) & 0xFF) << 8) | ((c) & 0xFF)) -extern int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc, - enum qcom_scm_convention qcom_convention, - struct qcom_scm_res *res, bool atomic); -#define scm_smc_call(dev, desc, res, atomic) \ - __scm_smc_call((dev), (desc), qcom_scm_convention, (res), (atomic)) - -#define SCM_LEGACY_FNID(s, c) (((s) << 10) | ((c) & 0x3ff)) -extern int scm_legacy_call_atomic(struct device *dev, - const struct qcom_scm_desc *desc, - struct qcom_scm_res *res); -extern int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc, - struct qcom_scm_res *res); - -#define QCOM_SCM_SVC_BOOT 0x01 -#define QCOM_SCM_BOOT_SET_ADDR 0x01 -#define QCOM_SCM_BOOT_TERMINATE_PC 0x02 -#define QCOM_SCM_BOOT_SET_DLOAD_MODE 0x10 -#define QCOM_SCM_BOOT_SET_ADDR_MC 0x11 -#define QCOM_SCM_BOOT_SET_REMOTE_STATE 0x0a -#define QCOM_SCM_FLUSH_FLAG_MASK 0x3 -#define QCOM_SCM_BOOT_MAX_CPUS 4 -#define QCOM_SCM_BOOT_MC_FLAG_AARCH64 BIT(0) -#define QCOM_SCM_BOOT_MC_FLAG_COLDBOOT BIT(1) -#define QCOM_SCM_BOOT_MC_FLAG_WARMBOOT BIT(2) - -#define QCOM_SCM_SVC_PIL 0x02 -#define QCOM_SCM_PIL_PAS_INIT_IMAGE 0x01 -#define QCOM_SCM_PIL_PAS_MEM_SETUP 0x02 -#define QCOM_SCM_PIL_PAS_AUTH_AND_RESET 0x05 -#define QCOM_SCM_PIL_PAS_SHUTDOWN 0x06 -#define QCOM_SCM_PIL_PAS_IS_SUPPORTED 0x07 -#define QCOM_SCM_PIL_PAS_MSS_RESET 0x0a - -#define QCOM_SCM_SVC_IO 0x05 -#define QCOM_SCM_IO_READ 0x01 -#define QCOM_SCM_IO_WRITE 0x02 - -#define QCOM_SCM_SVC_INFO 0x06 -#define QCOM_SCM_INFO_IS_CALL_AVAIL 0x01 - -#define QCOM_SCM_SVC_MP 0x0c -#define QCOM_SCM_MP_RESTORE_SEC_CFG 0x02 -#define QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE 0x03 -#define QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT 0x04 -#define QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE 0x05 -#define QCOM_SCM_MP_VIDEO_VAR 0x08 -#define QCOM_SCM_MP_ASSIGN 0x16 - -#define QCOM_SCM_SVC_OCMEM 0x0f -#define QCOM_SCM_OCMEM_LOCK_CMD 0x01 -#define QCOM_SCM_OCMEM_UNLOCK_CMD 0x02 - -#define QCOM_SCM_SVC_ES 0x10 /* Enterprise Security */ -#define QCOM_SCM_ES_INVALIDATE_ICE_KEY 0x03 -#define QCOM_SCM_ES_CONFIG_SET_ICE_KEY 0x04 - -#define QCOM_SCM_SVC_HDCP 0x11 -#define QCOM_SCM_HDCP_INVOKE 0x01 - -#define QCOM_SCM_SVC_LMH 0x13 -#define QCOM_SCM_LMH_LIMIT_PROFILE_CHANGE 0x01 -#define QCOM_SCM_LMH_LIMIT_DCVSH 0x10 - -#define QCOM_SCM_SVC_SMMU_PROGRAM 0x15 -#define QCOM_SCM_SMMU_PT_FORMAT 0x01 -#define QCOM_SCM_SMMU_CONFIG_ERRATA1 0x03 -#define QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL 0x02 - -#define QCOM_SCM_SVC_WAITQ 0x24 -#define QCOM_SCM_WAITQ_RESUME 0x02 -#define QCOM_SCM_WAITQ_GET_WQ_CTX 0x03 - -/* common error codes */ -#define QCOM_SCM_V2_EBUSY -12 -#define QCOM_SCM_ENOMEM -5 -#define QCOM_SCM_EOPNOTSUPP -4 -#define QCOM_SCM_EINVAL_ADDR -3 -#define QCOM_SCM_EINVAL_ARG -2 -#define QCOM_SCM_ERROR -1 -#define QCOM_SCM_INTERRUPTED 1 -#define QCOM_SCM_WAITQ_SLEEP 2 - -static inline int qcom_scm_remap_error(int err) -{ - switch (err) { - case QCOM_SCM_ERROR: - return -EIO; - case QCOM_SCM_EINVAL_ADDR: - case QCOM_SCM_EINVAL_ARG: - return -EINVAL; - case QCOM_SCM_EOPNOTSUPP: - return -EOPNOTSUPP; - case QCOM_SCM_ENOMEM: - return -ENOMEM; - case QCOM_SCM_V2_EBUSY: - return -EBUSY; - } - return -EINVAL; -} - -#endif diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c index a69399a6b7..1448f61173 100644 --- a/drivers/firmware/qemu_fw_cfg.c +++ b/drivers/firmware/qemu_fw_cfg.c @@ -211,7 +211,7 @@ static void fw_cfg_io_cleanup(void) /* arch-specific ctrl & data register offsets are not available in ACPI, DT */ #if !(defined(FW_CFG_CTRL_OFF) && defined(FW_CFG_DATA_OFF)) -# if (defined(CONFIG_ARM) || defined(CONFIG_ARM64)) +# if (defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_RISCV)) # define FW_CFG_CTRL_OFF 0x08 # define FW_CFG_DATA_OFF 0x00 # define FW_CFG_DMA_OFF 0x10 diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c index f66efaa519..4cd290a60f 100644 --- a/drivers/firmware/raspberrypi.c +++ b/drivers/firmware/raspberrypi.c @@ -378,6 +378,7 @@ EXPORT_SYMBOL_GPL(rpi_firmware_get); /** * devm_rpi_firmware_get - Get pointer to rpi_firmware structure. + * @dev: The firmware device structure * @firmware_node: Pointer to the firmware Device Tree node. * * Returns NULL is the firmware device is not ready. diff --git a/drivers/firmware/tegra/bpmp-debugfs.c b/drivers/firmware/tegra/bpmp-debugfs.c index 6dfe3d3410..bbcdd9fed3 100644 --- a/drivers/firmware/tegra/bpmp-debugfs.c +++ b/drivers/firmware/tegra/bpmp-debugfs.c @@ -610,7 +610,7 @@ static int debugfs_show(struct seq_file *m, void *p) } len = strlen(filename); - strncpy(namevirt, filename, namesize); + strscpy_pad(namevirt, filename, namesize); err = mrq_debugfs_read(bpmp, namephys, len, dataphys, datasize, &nbytes); @@ -661,7 +661,7 @@ static ssize_t debugfs_store(struct file *file, const char __user *buf, } len = strlen(filename); - strncpy(namevirt, filename, namesize); + strscpy_pad(namevirt, filename, namesize); if (copy_from_user(datavirt, buf, count)) { err = -EFAULT; diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c index 3b4c9355cb..8b9a2556de 100644 --- a/drivers/firmware/ti_sci.c +++ b/drivers/firmware/ti_sci.c @@ -16,7 +16,10 @@ #include #include #include -#include +#include +#include +#include +#include #include #include #include @@ -472,7 +475,7 @@ static int ti_sci_cmd_get_revision(struct ti_sci_info *info) ver->abi_major = rev_info->abi_major; ver->abi_minor = rev_info->abi_minor; ver->firmware_revision = rev_info->firmware_revision; - strncpy(ver->firmware_description, rev_info->firmware_description, + strscpy(ver->firmware_description, rev_info->firmware_description, sizeof(ver->firmware_description)); fail: @@ -2873,7 +2876,6 @@ static void ti_sci_setup_ops(struct ti_sci_info *info) const struct ti_sci_handle *ti_sci_get_handle(struct device *dev) { struct device_node *ti_sci_np; - struct list_head *p; struct ti_sci_handle *handle = NULL; struct ti_sci_info *info; @@ -2888,8 +2890,7 @@ const struct ti_sci_handle *ti_sci_get_handle(struct device *dev) } mutex_lock(&ti_sci_list_mutex); - list_for_each(p, &ti_sci_list) { - info = list_entry(p, struct ti_sci_info, node); + list_for_each_entry(info, &ti_sci_list, node) { if (ti_sci_np == info->dev->of_node) { handle = &info->handle; info->users++; @@ -2999,7 +3000,6 @@ const struct ti_sci_handle *ti_sci_get_by_phandle(struct device_node *np, struct ti_sci_handle *handle = NULL; struct device_node *ti_sci_np; struct ti_sci_info *info; - struct list_head *p; if (!np) { pr_err("I need a device pointer\n"); @@ -3011,8 +3011,7 @@ const struct ti_sci_handle *ti_sci_get_by_phandle(struct device_node *np, return ERR_PTR(-ENODEV); mutex_lock(&ti_sci_list_mutex); - list_for_each(p, &ti_sci_list) { - info = list_entry(p, struct ti_sci_info, node); + list_for_each_entry(info, &ti_sci_list, node) { if (ti_sci_np == info->dev->of_node) { handle = &info->handle; info->users++; @@ -3297,7 +3296,6 @@ MODULE_DEVICE_TABLE(of, ti_sci_of_match); static int ti_sci_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - const struct of_device_id *of_id; const struct ti_sci_desc *desc; struct ti_sci_xfer *xfer; struct ti_sci_info *info = NULL; @@ -3308,12 +3306,7 @@ static int ti_sci_probe(struct platform_device *pdev) int reboot = 0; u32 h_id; - of_id = of_match_device(ti_sci_of_match, dev); - if (!of_id) { - dev_err(dev, "OF data missing\n"); - return -EINVAL; - } - desc = of_id->data; + desc = device_get_match_data(dev); info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); if (!info) diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c index 4cc1ac7f76..b0d22d4455 100644 --- a/drivers/firmware/xilinx/zynqmp.c +++ b/drivers/firmware/xilinx/zynqmp.c @@ -226,7 +226,6 @@ static int do_feature_check_call(const u32 api_id) return ret; } -EXPORT_SYMBOL_GPL(zynqmp_pm_feature); /** * zynqmp_pm_feature() - Check whether given feature is supported or not and @@ -246,6 +245,7 @@ int zynqmp_pm_feature(const u32 api_id) return ret; } +EXPORT_SYMBOL_GPL(zynqmp_pm_feature); /** * zynqmp_pm_is_function_supported() - Check whether given IOCTL/QUERY function -- cgit v1.2.3