diff options
Diffstat (limited to '')
25 files changed, 13711 insertions, 0 deletions
diff --git a/drivers/firmware/arm_scmi/Kconfig b/drivers/firmware/arm_scmi/Kconfig new file mode 100644 index 000000000..a14f65444 --- /dev/null +++ b/drivers/firmware/arm_scmi/Kconfig @@ -0,0 +1,164 @@ +# SPDX-License-Identifier: GPL-2.0-only +menu "ARM System Control and Management Interface Protocol" + +config ARM_SCMI_PROTOCOL + tristate "ARM System Control and Management Interface (SCMI) Message Protocol" + depends on ARM || ARM64 || COMPILE_TEST + help + ARM System Control and Management Interface (SCMI) protocol is a + set of operating system-independent software interfaces that are + used in system management. SCMI is extensible and currently provides + interfaces for: Discovery and self-description of the interfaces + it supports, Power domain management which is the ability to place + a given device or domain into the various power-saving states that + it supports, Performance management which is the ability to control + the performance of a domain that is composed of compute engines + such as application processors and other accelerators, Clock + management which is the ability to set and inquire rates on platform + managed clocks and Sensor management which is the ability to read + sensor data, and be notified of sensor value. + + This protocol library provides interface for all the client drivers + making use of the features offered by the SCMI. + +if ARM_SCMI_PROTOCOL + +config ARM_SCMI_HAVE_TRANSPORT + bool + help + This declares whether at least one SCMI transport has been configured. + Used to trigger a build bug when trying to build SCMI without any + configured transport. + +config ARM_SCMI_HAVE_SHMEM + bool + help + This declares whether a shared memory based transport for SCMI is + available. + +config ARM_SCMI_HAVE_MSG + bool + help + This declares whether a message passing based transport for SCMI is + available. + +config ARM_SCMI_TRANSPORT_MAILBOX + bool "SCMI transport based on Mailbox" + depends on MAILBOX + select ARM_SCMI_HAVE_TRANSPORT + select ARM_SCMI_HAVE_SHMEM + default y + help + Enable mailbox based transport for SCMI. + + If you want the ARM SCMI PROTOCOL stack to include support for a + transport based on mailboxes, answer Y. + +config ARM_SCMI_TRANSPORT_OPTEE + bool "SCMI transport based on OP-TEE service" + depends on OPTEE=y || OPTEE=ARM_SCMI_PROTOCOL + select ARM_SCMI_HAVE_TRANSPORT + select ARM_SCMI_HAVE_SHMEM + select ARM_SCMI_HAVE_MSG + default y + help + This enables the OP-TEE service based transport for SCMI. + + If you want the ARM SCMI PROTOCOL stack to include support for a + transport based on OP-TEE SCMI service, answer Y. + +config ARM_SCMI_TRANSPORT_SMC + bool "SCMI transport based on SMC" + depends on HAVE_ARM_SMCCC_DISCOVERY + select ARM_SCMI_HAVE_TRANSPORT + select ARM_SCMI_HAVE_SHMEM + default y + help + Enable SMC based transport for SCMI. + + If you want the ARM SCMI PROTOCOL stack to include support for a + transport based on SMC, answer Y. + +config ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE + bool "Enable atomic mode support for SCMI SMC transport" + depends on ARM_SCMI_TRANSPORT_SMC + help + Enable support of atomic operation for SCMI SMC based transport. + + If you want the SCMI SMC based transport to operate in atomic + mode, avoiding any kind of sleeping behaviour for selected + transactions on the TX path, answer Y. + Enabling atomic mode operations allows any SCMI driver using this + transport to optionally ask for atomic SCMI transactions and operate + in atomic context too, at the price of using a number of busy-waiting + primitives all over instead. If unsure say N. + +config ARM_SCMI_TRANSPORT_VIRTIO + bool "SCMI transport based on VirtIO" + depends on VIRTIO=y || VIRTIO=ARM_SCMI_PROTOCOL + select ARM_SCMI_HAVE_TRANSPORT + select ARM_SCMI_HAVE_MSG + help + This enables the virtio based transport for SCMI. + + If you want the ARM SCMI PROTOCOL stack to include support for a + transport based on VirtIO, answer Y. + +config ARM_SCMI_TRANSPORT_VIRTIO_VERSION1_COMPLIANCE + bool "SCMI VirtIO transport Version 1 compliance" + depends on ARM_SCMI_TRANSPORT_VIRTIO + default y + help + This enforces strict compliance with VirtIO Version 1 specification. + + If you want the ARM SCMI VirtIO transport layer to refuse to work + with Legacy VirtIO backends and instead support only VirtIO Version 1 + devices (or above), answer Y. + + If you want instead to support also old Legacy VirtIO backends (like + the ones implemented by kvmtool) and let the core Kernel VirtIO layer + take care of the needed conversions, say N. + +config ARM_SCMI_TRANSPORT_VIRTIO_ATOMIC_ENABLE + bool "Enable atomic mode for SCMI VirtIO transport" + depends on ARM_SCMI_TRANSPORT_VIRTIO + help + Enable support of atomic operation for SCMI VirtIO based transport. + + If you want the SCMI VirtIO based transport to operate in atomic + mode, avoiding any kind of sleeping behaviour for selected + transactions on the TX path, answer Y. + + Enabling atomic mode operations allows any SCMI driver using this + transport to optionally ask for atomic SCMI transactions and operate + in atomic context too, at the price of using a number of busy-waiting + primitives all over instead. If unsure say N. + +endif #ARM_SCMI_PROTOCOL + +config ARM_SCMI_POWER_DOMAIN + tristate "SCMI power domain driver" + depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF) + default y + select PM_GENERIC_DOMAINS if PM + help + This enables support for the SCMI power domains which can be + enabled or disabled via the SCP firmware + + This driver can also be built as a module. If so, the module + will be called scmi_pm_domain. Note this may needed early in boot + before rootfs may be available. + +config ARM_SCMI_POWER_CONTROL + tristate "SCMI system power control driver" + depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF) + help + This enables System Power control logic which binds system shutdown or + reboot actions to SCMI System Power notifications generated by SCP + firmware. + + This driver can also be built as a module. If so, the module will be + called scmi_power_control. Note this may needed early in boot to catch + early shutdown/reboot SCMI requests. + +endmenu diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile new file mode 100644 index 000000000..9ea86f8cc --- /dev/null +++ b/drivers/firmware/arm_scmi/Makefile @@ -0,0 +1,22 @@ +# SPDX-License-Identifier: GPL-2.0-only +scmi-bus-y = bus.o +scmi-driver-y = driver.o notify.o +scmi-transport-$(CONFIG_ARM_SCMI_HAVE_SHMEM) = shmem.o +scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_MAILBOX) += mailbox.o +scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_SMC) += smc.o +scmi-transport-$(CONFIG_ARM_SCMI_HAVE_MSG) += msg.o +scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO) += virtio.o +scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_OPTEE) += optee.o +scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o system.o voltage.o powercap.o +scmi-module-objs := $(scmi-bus-y) $(scmi-driver-y) $(scmi-protocols-y) \ + $(scmi-transport-y) +obj-$(CONFIG_ARM_SCMI_PROTOCOL) += scmi-module.o +obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o +obj-$(CONFIG_ARM_SCMI_POWER_CONTROL) += scmi_power_control.o + +ifeq ($(CONFIG_THUMB2_KERNEL)$(CONFIG_CC_IS_CLANG),yy) +# The use of R7 in the SMCCC conflicts with the compiler's use of R7 as a frame +# pointer in Thumb2 mode, which is forcibly enabled by Clang when profiling +# hooks are inserted via the -pg switch. +CFLAGS_REMOVE_smc.o += $(CC_FLAGS_FTRACE) +endif diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c new file mode 100644 index 000000000..a52f084a6 --- /dev/null +++ b/drivers/firmware/arm_scmi/base.c @@ -0,0 +1,428 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System Control and Management Interface (SCMI) Base Protocol + * + * Copyright (C) 2018-2021 ARM Ltd. + */ + +#define pr_fmt(fmt) "SCMI Notifications BASE - " fmt + +#include <linux/module.h> +#include <linux/scmi_protocol.h> + +#include "common.h" +#include "notify.h" + +#define SCMI_BASE_NUM_SOURCES 1 +#define SCMI_BASE_MAX_CMD_ERR_COUNT 1024 + +enum scmi_base_protocol_cmd { + BASE_DISCOVER_VENDOR = 0x3, + BASE_DISCOVER_SUB_VENDOR = 0x4, + BASE_DISCOVER_IMPLEMENT_VERSION = 0x5, + BASE_DISCOVER_LIST_PROTOCOLS = 0x6, + BASE_DISCOVER_AGENT = 0x7, + BASE_NOTIFY_ERRORS = 0x8, + BASE_SET_DEVICE_PERMISSIONS = 0x9, + BASE_SET_PROTOCOL_PERMISSIONS = 0xa, + BASE_RESET_AGENT_CONFIGURATION = 0xb, +}; + +struct scmi_msg_resp_base_attributes { + u8 num_protocols; + u8 num_agents; + __le16 reserved; +}; + +struct scmi_msg_resp_base_discover_agent { + __le32 agent_id; + u8 name[SCMI_SHORT_NAME_MAX_SIZE]; +}; + + +struct scmi_msg_base_error_notify { + __le32 event_control; +#define BASE_TP_NOTIFY_ALL BIT(0) +}; + +struct scmi_base_error_notify_payld { + __le32 agent_id; + __le32 error_status; +#define IS_FATAL_ERROR(x) ((x) & BIT(31)) +#define ERROR_CMD_COUNT(x) FIELD_GET(GENMASK(9, 0), (x)) + __le64 msg_reports[SCMI_BASE_MAX_CMD_ERR_COUNT]; +}; + +/** + * scmi_base_attributes_get() - gets the implementation details + * that are associated with the base protocol. + * + * @ph: SCMI protocol handle + * + * Return: 0 on success, else appropriate SCMI error. + */ +static int scmi_base_attributes_get(const struct scmi_protocol_handle *ph) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_resp_base_attributes *attr_info; + struct scmi_revision_info *rev = ph->get_priv(ph); + + ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, + 0, sizeof(*attr_info), &t); + if (ret) + return ret; + + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + attr_info = t->rx.buf; + rev->num_protocols = attr_info->num_protocols; + rev->num_agents = attr_info->num_agents; + } + + ph->xops->xfer_put(ph, t); + + return ret; +} + +/** + * scmi_base_vendor_id_get() - gets vendor/subvendor identifier ASCII string. + * + * @ph: SCMI protocol handle + * @sub_vendor: specify true if sub-vendor ID is needed + * + * Return: 0 on success, else appropriate SCMI error. + */ +static int +scmi_base_vendor_id_get(const struct scmi_protocol_handle *ph, bool sub_vendor) +{ + u8 cmd; + int ret, size; + char *vendor_id; + struct scmi_xfer *t; + struct scmi_revision_info *rev = ph->get_priv(ph); + + + if (sub_vendor) { + cmd = BASE_DISCOVER_SUB_VENDOR; + vendor_id = rev->sub_vendor_id; + size = ARRAY_SIZE(rev->sub_vendor_id); + } else { + cmd = BASE_DISCOVER_VENDOR; + vendor_id = rev->vendor_id; + size = ARRAY_SIZE(rev->vendor_id); + } + + ret = ph->xops->xfer_get_init(ph, cmd, 0, size, &t); + if (ret) + return ret; + + ret = ph->xops->do_xfer(ph, t); + if (!ret) + strscpy(vendor_id, t->rx.buf, size); + + ph->xops->xfer_put(ph, t); + + return ret; +} + +/** + * scmi_base_implementation_version_get() - gets a vendor-specific + * implementation 32-bit version. The format of the version number is + * vendor-specific + * + * @ph: SCMI protocol handle + * + * Return: 0 on success, else appropriate SCMI error. + */ +static int +scmi_base_implementation_version_get(const struct scmi_protocol_handle *ph) +{ + int ret; + __le32 *impl_ver; + struct scmi_xfer *t; + struct scmi_revision_info *rev = ph->get_priv(ph); + + ret = ph->xops->xfer_get_init(ph, BASE_DISCOVER_IMPLEMENT_VERSION, + 0, sizeof(*impl_ver), &t); + if (ret) + return ret; + + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + impl_ver = t->rx.buf; + rev->impl_ver = le32_to_cpu(*impl_ver); + } + + ph->xops->xfer_put(ph, t); + + return ret; +} + +/** + * scmi_base_implementation_list_get() - gets the list of protocols it is + * OSPM is allowed to access + * + * @ph: SCMI protocol handle + * @protocols_imp: pointer to hold the list of protocol identifiers + * + * Return: 0 on success, else appropriate SCMI error. + */ +static int +scmi_base_implementation_list_get(const struct scmi_protocol_handle *ph, + u8 *protocols_imp) +{ + u8 *list; + int ret, loop; + struct scmi_xfer *t; + __le32 *num_skip, *num_ret; + u32 tot_num_ret = 0, loop_num_ret; + struct device *dev = ph->dev; + struct scmi_revision_info *rev = ph->get_priv(ph); + + ret = ph->xops->xfer_get_init(ph, BASE_DISCOVER_LIST_PROTOCOLS, + sizeof(*num_skip), 0, &t); + if (ret) + return ret; + + num_skip = t->tx.buf; + num_ret = t->rx.buf; + list = t->rx.buf + sizeof(*num_ret); + + do { + size_t real_list_sz; + u32 calc_list_sz; + + /* Set the number of protocols to be skipped/already read */ + *num_skip = cpu_to_le32(tot_num_ret); + + ret = ph->xops->do_xfer(ph, t); + if (ret) + break; + + loop_num_ret = le32_to_cpu(*num_ret); + if (!loop_num_ret) + break; + + if (loop_num_ret > rev->num_protocols - tot_num_ret) { + dev_err(dev, + "No. Returned protocols > Total protocols.\n"); + break; + } + + if (t->rx.len < (sizeof(u32) * 2)) { + dev_err(dev, "Truncated reply - rx.len:%zd\n", + t->rx.len); + ret = -EPROTO; + break; + } + + real_list_sz = t->rx.len - sizeof(u32); + calc_list_sz = (1 + (loop_num_ret - 1) / sizeof(u32)) * + sizeof(u32); + if (calc_list_sz != real_list_sz) { + dev_warn(dev, + "Malformed reply - real_sz:%zd calc_sz:%u (loop_num_ret:%d)\n", + real_list_sz, calc_list_sz, loop_num_ret); + /* + * Bail out if the expected list size is bigger than the + * total payload size of the received reply. + */ + if (calc_list_sz > real_list_sz) { + ret = -EPROTO; + break; + } + } + + for (loop = 0; loop < loop_num_ret; loop++) + protocols_imp[tot_num_ret + loop] = *(list + loop); + + tot_num_ret += loop_num_ret; + + ph->xops->reset_rx_to_maxsz(ph, t); + } while (tot_num_ret < rev->num_protocols); + + ph->xops->xfer_put(ph, t); + + return ret; +} + +/** + * scmi_base_discover_agent_get() - discover the name of an agent + * + * @ph: SCMI protocol handle + * @id: Agent identifier + * @name: Agent identifier ASCII string + * + * An agent id of 0 is reserved to identify the platform itself. + * Generally operating system is represented as "OSPM" + * + * Return: 0 on success, else appropriate SCMI error. + */ +static int scmi_base_discover_agent_get(const struct scmi_protocol_handle *ph, + int id, char *name) +{ + int ret; + struct scmi_msg_resp_base_discover_agent *agent_info; + struct scmi_xfer *t; + + ret = ph->xops->xfer_get_init(ph, BASE_DISCOVER_AGENT, + sizeof(__le32), sizeof(*agent_info), &t); + if (ret) + return ret; + + put_unaligned_le32(id, t->tx.buf); + + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + agent_info = t->rx.buf; + strscpy(name, agent_info->name, SCMI_SHORT_NAME_MAX_SIZE); + } + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int scmi_base_error_notify(const struct scmi_protocol_handle *ph, + bool enable) +{ + int ret; + u32 evt_cntl = enable ? BASE_TP_NOTIFY_ALL : 0; + struct scmi_xfer *t; + struct scmi_msg_base_error_notify *cfg; + + ret = ph->xops->xfer_get_init(ph, BASE_NOTIFY_ERRORS, + sizeof(*cfg), 0, &t); + if (ret) + return ret; + + cfg = t->tx.buf; + cfg->event_control = cpu_to_le32(evt_cntl); + + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int scmi_base_set_notify_enabled(const struct scmi_protocol_handle *ph, + u8 evt_id, u32 src_id, bool enable) +{ + int ret; + + ret = scmi_base_error_notify(ph, enable); + if (ret) + pr_debug("FAIL_ENABLED - evt[%X] ret:%d\n", evt_id, ret); + + return ret; +} + +static void *scmi_base_fill_custom_report(const struct scmi_protocol_handle *ph, + u8 evt_id, ktime_t timestamp, + const void *payld, size_t payld_sz, + void *report, u32 *src_id) +{ + int i; + const struct scmi_base_error_notify_payld *p = payld; + struct scmi_base_error_report *r = report; + + /* + * BaseError notification payload is variable in size but + * up to a maximum length determined by the struct ponted by p. + * Instead payld_sz is the effective length of this notification + * payload so cannot be greater of the maximum allowed size as + * pointed by p. + */ + if (evt_id != SCMI_EVENT_BASE_ERROR_EVENT || sizeof(*p) < payld_sz) + return NULL; + + r->timestamp = timestamp; + r->agent_id = le32_to_cpu(p->agent_id); + r->fatal = IS_FATAL_ERROR(le32_to_cpu(p->error_status)); + r->cmd_count = ERROR_CMD_COUNT(le32_to_cpu(p->error_status)); + for (i = 0; i < r->cmd_count; i++) + r->reports[i] = le64_to_cpu(p->msg_reports[i]); + *src_id = 0; + + return r; +} + +static const struct scmi_event base_events[] = { + { + .id = SCMI_EVENT_BASE_ERROR_EVENT, + .max_payld_sz = sizeof(struct scmi_base_error_notify_payld), + .max_report_sz = sizeof(struct scmi_base_error_report) + + SCMI_BASE_MAX_CMD_ERR_COUNT * sizeof(u64), + }, +}; + +static const struct scmi_event_ops base_event_ops = { + .set_notify_enabled = scmi_base_set_notify_enabled, + .fill_custom_report = scmi_base_fill_custom_report, +}; + +static const struct scmi_protocol_events base_protocol_events = { + .queue_sz = 4 * SCMI_PROTO_QUEUE_SZ, + .ops = &base_event_ops, + .evts = base_events, + .num_events = ARRAY_SIZE(base_events), + .num_sources = SCMI_BASE_NUM_SOURCES, +}; + +static int scmi_base_protocol_init(const struct scmi_protocol_handle *ph) +{ + int id, ret; + u8 *prot_imp; + u32 version; + char name[SCMI_SHORT_NAME_MAX_SIZE]; + struct device *dev = ph->dev; + struct scmi_revision_info *rev = scmi_revision_area_get(ph); + + ret = ph->xops->version_get(ph, &version); + if (ret) + return ret; + + rev->major_ver = PROTOCOL_REV_MAJOR(version), + rev->minor_ver = PROTOCOL_REV_MINOR(version); + ph->set_priv(ph, rev); + + ret = scmi_base_attributes_get(ph); + if (ret) + return ret; + + prot_imp = devm_kcalloc(dev, rev->num_protocols, sizeof(u8), + GFP_KERNEL); + if (!prot_imp) + return -ENOMEM; + + scmi_base_vendor_id_get(ph, false); + scmi_base_vendor_id_get(ph, true); + scmi_base_implementation_version_get(ph); + scmi_base_implementation_list_get(ph, prot_imp); + + scmi_setup_protocol_implemented(ph, prot_imp); + + dev_info(dev, "SCMI Protocol v%d.%d '%s:%s' Firmware version 0x%x\n", + rev->major_ver, rev->minor_ver, rev->vendor_id, + rev->sub_vendor_id, rev->impl_ver); + dev_dbg(dev, "Found %d protocol(s) %d agent(s)\n", rev->num_protocols, + rev->num_agents); + + for (id = 0; id < rev->num_agents; id++) { + scmi_base_discover_agent_get(ph, id, name); + dev_dbg(dev, "Agent %d: %s\n", id, name); + } + + return 0; +} + +static const struct scmi_protocol scmi_base = { + .id = SCMI_PROTOCOL_BASE, + .owner = NULL, + .instance_init = &scmi_base_protocol_init, + .ops = NULL, + .events = &base_protocol_events, +}; + +DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(base, scmi_base) diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c new file mode 100644 index 000000000..35bb70724 --- /dev/null +++ b/drivers/firmware/arm_scmi/bus.c @@ -0,0 +1,306 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System Control and Management Interface (SCMI) Message Protocol bus layer + * + * Copyright (C) 2018-2021 ARM Ltd. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/types.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/device.h> + +#include "common.h" + +static DEFINE_IDA(scmi_bus_id); +static DEFINE_IDR(scmi_protocols); +static DEFINE_SPINLOCK(protocol_lock); + +static const struct scmi_device_id * +scmi_dev_match_id(struct scmi_device *scmi_dev, struct scmi_driver *scmi_drv) +{ + const struct scmi_device_id *id = scmi_drv->id_table; + + if (!id) + return NULL; + + for (; id->protocol_id; id++) + if (id->protocol_id == scmi_dev->protocol_id) { + if (!id->name) + return id; + else if (!strcmp(id->name, scmi_dev->name)) + return id; + } + + return NULL; +} + +static int scmi_dev_match(struct device *dev, struct device_driver *drv) +{ + struct scmi_driver *scmi_drv = to_scmi_driver(drv); + struct scmi_device *scmi_dev = to_scmi_dev(dev); + const struct scmi_device_id *id; + + id = scmi_dev_match_id(scmi_dev, scmi_drv); + if (id) + return 1; + + return 0; +} + +static int scmi_match_by_id_table(struct device *dev, void *data) +{ + struct scmi_device *sdev = to_scmi_dev(dev); + struct scmi_device_id *id_table = data; + + return sdev->protocol_id == id_table->protocol_id && + !strcmp(sdev->name, id_table->name); +} + +struct scmi_device *scmi_child_dev_find(struct device *parent, + int prot_id, const char *name) +{ + struct scmi_device_id id_table; + struct device *dev; + + id_table.protocol_id = prot_id; + id_table.name = name; + + dev = device_find_child(parent, &id_table, scmi_match_by_id_table); + if (!dev) + return NULL; + + return to_scmi_dev(dev); +} + +const struct scmi_protocol *scmi_protocol_get(int protocol_id) +{ + const struct scmi_protocol *proto; + + proto = idr_find(&scmi_protocols, protocol_id); + if (!proto || !try_module_get(proto->owner)) { + pr_warn("SCMI Protocol 0x%x not found!\n", protocol_id); + return NULL; + } + + pr_debug("Found SCMI Protocol 0x%x\n", protocol_id); + + return proto; +} + +void scmi_protocol_put(int protocol_id) +{ + const struct scmi_protocol *proto; + + proto = idr_find(&scmi_protocols, protocol_id); + if (proto) + module_put(proto->owner); +} + +static int scmi_dev_probe(struct device *dev) +{ + struct scmi_driver *scmi_drv = to_scmi_driver(dev->driver); + struct scmi_device *scmi_dev = to_scmi_dev(dev); + + if (!scmi_dev->handle) + return -EPROBE_DEFER; + + return scmi_drv->probe(scmi_dev); +} + +static void scmi_dev_remove(struct device *dev) +{ + struct scmi_driver *scmi_drv = to_scmi_driver(dev->driver); + struct scmi_device *scmi_dev = to_scmi_dev(dev); + + if (scmi_drv->remove) + scmi_drv->remove(scmi_dev); +} + +static struct bus_type scmi_bus_type = { + .name = "scmi_protocol", + .match = scmi_dev_match, + .probe = scmi_dev_probe, + .remove = scmi_dev_remove, +}; + +int scmi_driver_register(struct scmi_driver *driver, struct module *owner, + const char *mod_name) +{ + int retval; + + if (!driver->probe) + return -EINVAL; + + retval = scmi_protocol_device_request(driver->id_table); + if (retval) + return retval; + + driver->driver.bus = &scmi_bus_type; + driver->driver.name = driver->name; + driver->driver.owner = owner; + driver->driver.mod_name = mod_name; + + retval = driver_register(&driver->driver); + if (!retval) + pr_debug("registered new scmi driver %s\n", driver->name); + + return retval; +} +EXPORT_SYMBOL_GPL(scmi_driver_register); + +void scmi_driver_unregister(struct scmi_driver *driver) +{ + driver_unregister(&driver->driver); + scmi_protocol_device_unrequest(driver->id_table); +} +EXPORT_SYMBOL_GPL(scmi_driver_unregister); + +static void scmi_device_release(struct device *dev) +{ + kfree(to_scmi_dev(dev)); +} + +struct scmi_device * +scmi_device_create(struct device_node *np, struct device *parent, int protocol, + const char *name) +{ + int id, retval; + struct scmi_device *scmi_dev; + + scmi_dev = kzalloc(sizeof(*scmi_dev), GFP_KERNEL); + if (!scmi_dev) + return NULL; + + scmi_dev->name = kstrdup_const(name ?: "unknown", GFP_KERNEL); + if (!scmi_dev->name) { + kfree(scmi_dev); + return NULL; + } + + id = ida_alloc_min(&scmi_bus_id, 1, GFP_KERNEL); + if (id < 0) { + kfree_const(scmi_dev->name); + kfree(scmi_dev); + return NULL; + } + + scmi_dev->id = id; + scmi_dev->protocol_id = protocol; + scmi_dev->dev.parent = parent; + scmi_dev->dev.of_node = np; + scmi_dev->dev.bus = &scmi_bus_type; + scmi_dev->dev.release = scmi_device_release; + dev_set_name(&scmi_dev->dev, "scmi_dev.%d", id); + + retval = device_register(&scmi_dev->dev); + if (retval) + goto put_dev; + + return scmi_dev; +put_dev: + kfree_const(scmi_dev->name); + put_device(&scmi_dev->dev); + ida_free(&scmi_bus_id, id); + return NULL; +} + +void scmi_device_destroy(struct scmi_device *scmi_dev) +{ + kfree_const(scmi_dev->name); + scmi_handle_put(scmi_dev->handle); + ida_free(&scmi_bus_id, scmi_dev->id); + device_unregister(&scmi_dev->dev); +} + +void scmi_device_link_add(struct device *consumer, struct device *supplier) +{ + struct device_link *link; + + link = device_link_add(consumer, supplier, DL_FLAG_AUTOREMOVE_CONSUMER); + + WARN_ON(!link); +} + +void scmi_set_handle(struct scmi_device *scmi_dev) +{ + scmi_dev->handle = scmi_handle_get(&scmi_dev->dev); + if (scmi_dev->handle) + scmi_device_link_add(&scmi_dev->dev, scmi_dev->handle->dev); +} + +int scmi_protocol_register(const struct scmi_protocol *proto) +{ + int ret; + + if (!proto) { + pr_err("invalid protocol\n"); + return -EINVAL; + } + + if (!proto->instance_init) { + pr_err("missing init for protocol 0x%x\n", proto->id); + return -EINVAL; + } + + spin_lock(&protocol_lock); + ret = idr_alloc(&scmi_protocols, (void *)proto, + proto->id, proto->id + 1, GFP_ATOMIC); + spin_unlock(&protocol_lock); + if (ret != proto->id) { + pr_err("unable to allocate SCMI idr slot for 0x%x - err %d\n", + proto->id, ret); + return ret; + } + + pr_debug("Registered SCMI Protocol 0x%x\n", proto->id); + + return 0; +} +EXPORT_SYMBOL_GPL(scmi_protocol_register); + +void scmi_protocol_unregister(const struct scmi_protocol *proto) +{ + spin_lock(&protocol_lock); + idr_remove(&scmi_protocols, proto->id); + spin_unlock(&protocol_lock); + + pr_debug("Unregistered SCMI Protocol 0x%x\n", proto->id); + + return; +} +EXPORT_SYMBOL_GPL(scmi_protocol_unregister); + +static int __scmi_devices_unregister(struct device *dev, void *data) +{ + struct scmi_device *scmi_dev = to_scmi_dev(dev); + + scmi_device_destroy(scmi_dev); + return 0; +} + +static void scmi_devices_unregister(void) +{ + bus_for_each_dev(&scmi_bus_type, NULL, NULL, __scmi_devices_unregister); +} + +int __init scmi_bus_init(void) +{ + int retval; + + retval = bus_register(&scmi_bus_type); + if (retval) + pr_err("scmi protocol bus register failed (%d)\n", retval); + + return retval; +} + +void __exit scmi_bus_exit(void) +{ + scmi_devices_unregister(); + bus_unregister(&scmi_bus_type); + ida_destroy(&scmi_bus_id); +} diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c new file mode 100644 index 000000000..96060bf90 --- /dev/null +++ b/drivers/firmware/arm_scmi/clock.c @@ -0,0 +1,619 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System Control and Management Interface (SCMI) Clock Protocol + * + * Copyright (C) 2018-2022 ARM Ltd. + */ + +#include <linux/module.h> +#include <linux/limits.h> +#include <linux/sort.h> + +#include "protocols.h" +#include "notify.h" + +enum scmi_clock_protocol_cmd { + CLOCK_ATTRIBUTES = 0x3, + CLOCK_DESCRIBE_RATES = 0x4, + CLOCK_RATE_SET = 0x5, + CLOCK_RATE_GET = 0x6, + CLOCK_CONFIG_SET = 0x7, + CLOCK_NAME_GET = 0x8, + CLOCK_RATE_NOTIFY = 0x9, + CLOCK_RATE_CHANGE_REQUESTED_NOTIFY = 0xA, +}; + +struct scmi_msg_resp_clock_protocol_attributes { + __le16 num_clocks; + u8 max_async_req; + u8 reserved; +}; + +struct scmi_msg_resp_clock_attributes { + __le32 attributes; +#define CLOCK_ENABLE BIT(0) +#define SUPPORTS_RATE_CHANGED_NOTIF(x) ((x) & BIT(31)) +#define SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(x) ((x) & BIT(30)) +#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(29)) + u8 name[SCMI_SHORT_NAME_MAX_SIZE]; + __le32 clock_enable_latency; +}; + +struct scmi_clock_set_config { + __le32 id; + __le32 attributes; +}; + +struct scmi_msg_clock_describe_rates { + __le32 id; + __le32 rate_index; +}; + +struct scmi_msg_resp_clock_describe_rates { + __le32 num_rates_flags; +#define NUM_RETURNED(x) ((x) & 0xfff) +#define RATE_DISCRETE(x) !((x) & BIT(12)) +#define NUM_REMAINING(x) ((x) >> 16) + struct { + __le32 value_low; + __le32 value_high; + } rate[]; +#define RATE_TO_U64(X) \ +({ \ + typeof(X) x = (X); \ + le32_to_cpu((x).value_low) | (u64)le32_to_cpu((x).value_high) << 32; \ +}) +}; + +struct scmi_clock_set_rate { + __le32 flags; +#define CLOCK_SET_ASYNC BIT(0) +#define CLOCK_SET_IGNORE_RESP BIT(1) +#define CLOCK_SET_ROUND_UP BIT(2) +#define CLOCK_SET_ROUND_AUTO BIT(3) + __le32 id; + __le32 value_low; + __le32 value_high; +}; + +struct scmi_msg_resp_set_rate_complete { + __le32 id; + __le32 rate_low; + __le32 rate_high; +}; + +struct scmi_msg_clock_rate_notify { + __le32 clk_id; + __le32 notify_enable; +}; + +struct scmi_clock_rate_notify_payld { + __le32 agent_id; + __le32 clock_id; + __le32 rate_low; + __le32 rate_high; +}; + +struct clock_info { + u32 version; + int num_clocks; + int max_async_req; + atomic_t cur_async_req; + struct scmi_clock_info *clk; +}; + +static enum scmi_clock_protocol_cmd evt_2_cmd[] = { + CLOCK_RATE_NOTIFY, + CLOCK_RATE_CHANGE_REQUESTED_NOTIFY, +}; + +static int +scmi_clock_protocol_attributes_get(const struct scmi_protocol_handle *ph, + struct clock_info *ci) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_resp_clock_protocol_attributes *attr; + + ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, + 0, sizeof(*attr), &t); + if (ret) + return ret; + + attr = t->rx.buf; + + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + ci->num_clocks = le16_to_cpu(attr->num_clocks); + ci->max_async_req = attr->max_async_req; + } + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph, + u32 clk_id, struct scmi_clock_info *clk, + u32 version) +{ + int ret; + u32 attributes; + struct scmi_xfer *t; + struct scmi_msg_resp_clock_attributes *attr; + + ret = ph->xops->xfer_get_init(ph, CLOCK_ATTRIBUTES, + sizeof(clk_id), sizeof(*attr), &t); + if (ret) + return ret; + + put_unaligned_le32(clk_id, t->tx.buf); + attr = t->rx.buf; + + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + u32 latency = 0; + attributes = le32_to_cpu(attr->attributes); + strscpy(clk->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE); + /* clock_enable_latency field is present only since SCMI v3.1 */ + if (PROTOCOL_REV_MAJOR(version) >= 0x2) + latency = le32_to_cpu(attr->clock_enable_latency); + clk->enable_latency = latency ? : U32_MAX; + } + + ph->xops->xfer_put(ph, t); + + /* + * If supported overwrite short name with the extended one; + * on error just carry on and use already provided short name. + */ + if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x2) { + if (SUPPORTS_EXTENDED_NAMES(attributes)) + ph->hops->extended_name_get(ph, CLOCK_NAME_GET, clk_id, + clk->name, + SCMI_MAX_STR_SIZE); + + if (SUPPORTS_RATE_CHANGED_NOTIF(attributes)) + clk->rate_changed_notifications = true; + if (SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(attributes)) + clk->rate_change_requested_notifications = true; + } + + return ret; +} + +static int rate_cmp_func(const void *_r1, const void *_r2) +{ + const u64 *r1 = _r1, *r2 = _r2; + + if (*r1 < *r2) + return -1; + else if (*r1 == *r2) + return 0; + else + return 1; +} + +struct scmi_clk_ipriv { + struct device *dev; + u32 clk_id; + struct scmi_clock_info *clk; +}; + +static void iter_clk_describe_prepare_message(void *message, + const unsigned int desc_index, + const void *priv) +{ + struct scmi_msg_clock_describe_rates *msg = message; + const struct scmi_clk_ipriv *p = priv; + + msg->id = cpu_to_le32(p->clk_id); + /* Set the number of rates to be skipped/already read */ + msg->rate_index = cpu_to_le32(desc_index); +} + +static int +iter_clk_describe_update_state(struct scmi_iterator_state *st, + const void *response, void *priv) +{ + u32 flags; + struct scmi_clk_ipriv *p = priv; + const struct scmi_msg_resp_clock_describe_rates *r = response; + + flags = le32_to_cpu(r->num_rates_flags); + st->num_remaining = NUM_REMAINING(flags); + st->num_returned = NUM_RETURNED(flags); + p->clk->rate_discrete = RATE_DISCRETE(flags); + + /* Warn about out of spec replies ... */ + if (!p->clk->rate_discrete && + (st->num_returned != 3 || st->num_remaining != 0)) { + dev_warn(p->dev, + "Out-of-spec CLOCK_DESCRIBE_RATES reply for %s - returned:%d remaining:%d rx_len:%zd\n", + p->clk->name, st->num_returned, st->num_remaining, + st->rx_len); + + /* + * A known quirk: a triplet is returned but num_returned != 3 + * Check for a safe payload size and fix. + */ + if (st->num_returned != 3 && st->num_remaining == 0 && + st->rx_len == sizeof(*r) + sizeof(__le32) * 2 * 3) { + st->num_returned = 3; + st->num_remaining = 0; + } else { + dev_err(p->dev, + "Cannot fix out-of-spec reply !\n"); + return -EPROTO; + } + } + + return 0; +} + +static int +iter_clk_describe_process_response(const struct scmi_protocol_handle *ph, + const void *response, + struct scmi_iterator_state *st, void *priv) +{ + int ret = 0; + struct scmi_clk_ipriv *p = priv; + const struct scmi_msg_resp_clock_describe_rates *r = response; + + if (!p->clk->rate_discrete) { + switch (st->desc_index + st->loop_idx) { + case 0: + p->clk->range.min_rate = RATE_TO_U64(r->rate[0]); + break; + case 1: + p->clk->range.max_rate = RATE_TO_U64(r->rate[1]); + break; + case 2: + p->clk->range.step_size = RATE_TO_U64(r->rate[2]); + break; + default: + ret = -EINVAL; + break; + } + } else { + u64 *rate = &p->clk->list.rates[st->desc_index + st->loop_idx]; + + *rate = RATE_TO_U64(r->rate[st->loop_idx]); + p->clk->list.num_rates++; + } + + return ret; +} + +static int +scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id, + struct scmi_clock_info *clk) +{ + int ret; + void *iter; + struct scmi_iterator_ops ops = { + .prepare_message = iter_clk_describe_prepare_message, + .update_state = iter_clk_describe_update_state, + .process_response = iter_clk_describe_process_response, + }; + struct scmi_clk_ipriv cpriv = { + .clk_id = clk_id, + .clk = clk, + .dev = ph->dev, + }; + + iter = ph->hops->iter_response_init(ph, &ops, SCMI_MAX_NUM_RATES, + CLOCK_DESCRIBE_RATES, + sizeof(struct scmi_msg_clock_describe_rates), + &cpriv); + if (IS_ERR(iter)) + return PTR_ERR(iter); + + ret = ph->hops->iter_response_run(iter); + if (ret) + return ret; + + if (!clk->rate_discrete) { + dev_dbg(ph->dev, "Min %llu Max %llu Step %llu Hz\n", + clk->range.min_rate, clk->range.max_rate, + clk->range.step_size); + } else if (clk->list.num_rates) { + sort(clk->list.rates, clk->list.num_rates, + sizeof(clk->list.rates[0]), rate_cmp_func, NULL); + } + + return ret; +} + +static int +scmi_clock_rate_get(const struct scmi_protocol_handle *ph, + u32 clk_id, u64 *value) +{ + int ret; + struct scmi_xfer *t; + + ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_GET, + sizeof(__le32), sizeof(u64), &t); + if (ret) + return ret; + + put_unaligned_le32(clk_id, t->tx.buf); + + ret = ph->xops->do_xfer(ph, t); + if (!ret) + *value = get_unaligned_le64(t->rx.buf); + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int scmi_clock_rate_set(const struct scmi_protocol_handle *ph, + u32 clk_id, u64 rate) +{ + int ret; + u32 flags = 0; + struct scmi_xfer *t; + struct scmi_clock_set_rate *cfg; + struct clock_info *ci = ph->get_priv(ph); + + ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_SET, sizeof(*cfg), 0, &t); + if (ret) + return ret; + + if (ci->max_async_req && + atomic_inc_return(&ci->cur_async_req) < ci->max_async_req) + flags |= CLOCK_SET_ASYNC; + + cfg = t->tx.buf; + cfg->flags = cpu_to_le32(flags); + cfg->id = cpu_to_le32(clk_id); + cfg->value_low = cpu_to_le32(rate & 0xffffffff); + cfg->value_high = cpu_to_le32(rate >> 32); + + if (flags & CLOCK_SET_ASYNC) { + ret = ph->xops->do_xfer_with_response(ph, t); + if (!ret) { + struct scmi_msg_resp_set_rate_complete *resp; + + resp = t->rx.buf; + if (le32_to_cpu(resp->id) == clk_id) + dev_dbg(ph->dev, + "Clk ID %d set async to %llu\n", clk_id, + get_unaligned_le64(&resp->rate_low)); + else + ret = -EPROTO; + } + } else { + ret = ph->xops->do_xfer(ph, t); + } + + if (ci->max_async_req) + atomic_dec(&ci->cur_async_req); + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int +scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id, + u32 config, bool atomic) +{ + int ret; + struct scmi_xfer *t; + struct scmi_clock_set_config *cfg; + + ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_SET, + sizeof(*cfg), 0, &t); + if (ret) + return ret; + + t->hdr.poll_completion = atomic; + + cfg = t->tx.buf; + cfg->id = cpu_to_le32(clk_id); + cfg->attributes = cpu_to_le32(config); + + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int scmi_clock_enable(const struct scmi_protocol_handle *ph, u32 clk_id) +{ + return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE, false); +} + +static int scmi_clock_disable(const struct scmi_protocol_handle *ph, u32 clk_id) +{ + return scmi_clock_config_set(ph, clk_id, 0, false); +} + +static int scmi_clock_enable_atomic(const struct scmi_protocol_handle *ph, + u32 clk_id) +{ + return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE, true); +} + +static int scmi_clock_disable_atomic(const struct scmi_protocol_handle *ph, + u32 clk_id) +{ + return scmi_clock_config_set(ph, clk_id, 0, true); +} + +static int scmi_clock_count_get(const struct scmi_protocol_handle *ph) +{ + struct clock_info *ci = ph->get_priv(ph); + + return ci->num_clocks; +} + +static const struct scmi_clock_info * +scmi_clock_info_get(const struct scmi_protocol_handle *ph, u32 clk_id) +{ + struct scmi_clock_info *clk; + struct clock_info *ci = ph->get_priv(ph); + + if (clk_id >= ci->num_clocks) + return NULL; + + clk = ci->clk + clk_id; + if (!clk->name[0]) + return NULL; + + return clk; +} + +static const struct scmi_clk_proto_ops clk_proto_ops = { + .count_get = scmi_clock_count_get, + .info_get = scmi_clock_info_get, + .rate_get = scmi_clock_rate_get, + .rate_set = scmi_clock_rate_set, + .enable = scmi_clock_enable, + .disable = scmi_clock_disable, + .enable_atomic = scmi_clock_enable_atomic, + .disable_atomic = scmi_clock_disable_atomic, +}; + +static int scmi_clk_rate_notify(const struct scmi_protocol_handle *ph, + u32 clk_id, int message_id, bool enable) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_clock_rate_notify *notify; + + ret = ph->xops->xfer_get_init(ph, message_id, sizeof(*notify), 0, &t); + if (ret) + return ret; + + notify = t->tx.buf; + notify->clk_id = cpu_to_le32(clk_id); + notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0; + + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int scmi_clk_set_notify_enabled(const struct scmi_protocol_handle *ph, + u8 evt_id, u32 src_id, bool enable) +{ + int ret, cmd_id; + + if (evt_id >= ARRAY_SIZE(evt_2_cmd)) + return -EINVAL; + + cmd_id = evt_2_cmd[evt_id]; + ret = scmi_clk_rate_notify(ph, src_id, cmd_id, enable); + if (ret) + pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n", + evt_id, src_id, ret); + + return ret; +} + +static void *scmi_clk_fill_custom_report(const struct scmi_protocol_handle *ph, + u8 evt_id, ktime_t timestamp, + const void *payld, size_t payld_sz, + void *report, u32 *src_id) +{ + const struct scmi_clock_rate_notify_payld *p = payld; + struct scmi_clock_rate_notif_report *r = report; + + if (sizeof(*p) != payld_sz || + (evt_id != SCMI_EVENT_CLOCK_RATE_CHANGED && + evt_id != SCMI_EVENT_CLOCK_RATE_CHANGE_REQUESTED)) + return NULL; + + r->timestamp = timestamp; + r->agent_id = le32_to_cpu(p->agent_id); + r->clock_id = le32_to_cpu(p->clock_id); + r->rate = get_unaligned_le64(&p->rate_low); + *src_id = r->clock_id; + + return r; +} + +static int scmi_clk_get_num_sources(const struct scmi_protocol_handle *ph) +{ + struct clock_info *ci = ph->get_priv(ph); + + if (!ci) + return -EINVAL; + + return ci->num_clocks; +} + +static const struct scmi_event clk_events[] = { + { + .id = SCMI_EVENT_CLOCK_RATE_CHANGED, + .max_payld_sz = sizeof(struct scmi_clock_rate_notify_payld), + .max_report_sz = sizeof(struct scmi_clock_rate_notif_report), + }, + { + .id = SCMI_EVENT_CLOCK_RATE_CHANGE_REQUESTED, + .max_payld_sz = sizeof(struct scmi_clock_rate_notify_payld), + .max_report_sz = sizeof(struct scmi_clock_rate_notif_report), + }, +}; + +static const struct scmi_event_ops clk_event_ops = { + .get_num_sources = scmi_clk_get_num_sources, + .set_notify_enabled = scmi_clk_set_notify_enabled, + .fill_custom_report = scmi_clk_fill_custom_report, +}; + +static const struct scmi_protocol_events clk_protocol_events = { + .queue_sz = SCMI_PROTO_QUEUE_SZ, + .ops = &clk_event_ops, + .evts = clk_events, + .num_events = ARRAY_SIZE(clk_events), +}; + +static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph) +{ + u32 version; + int clkid, ret; + struct clock_info *cinfo; + + ret = ph->xops->version_get(ph, &version); + if (ret) + return ret; + + dev_dbg(ph->dev, "Clock Version %d.%d\n", + PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); + + cinfo = devm_kzalloc(ph->dev, sizeof(*cinfo), GFP_KERNEL); + if (!cinfo) + return -ENOMEM; + + ret = scmi_clock_protocol_attributes_get(ph, cinfo); + if (ret) + return ret; + + cinfo->clk = devm_kcalloc(ph->dev, cinfo->num_clocks, + sizeof(*cinfo->clk), GFP_KERNEL); + if (!cinfo->clk) + return -ENOMEM; + + for (clkid = 0; clkid < cinfo->num_clocks; clkid++) { + struct scmi_clock_info *clk = cinfo->clk + clkid; + + ret = scmi_clock_attributes_get(ph, clkid, clk, version); + if (!ret) + scmi_clock_describe_rates_get(ph, clkid, clk); + } + + cinfo->version = version; + return ph->set_priv(ph, cinfo); +} + +static const struct scmi_protocol scmi_clock = { + .id = SCMI_PROTOCOL_CLOCK, + .owner = THIS_MODULE, + .instance_init = &scmi_clock_protocol_init, + .ops = &clk_proto_ops, + .events = &clk_protocol_events, +}; + +DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(clock, scmi_clock) diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h new file mode 100644 index 000000000..4ac72754c --- /dev/null +++ b/drivers/firmware/arm_scmi/common.h @@ -0,0 +1,267 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * System Control and Management Interface (SCMI) Message Protocol + * driver common header file containing some definitions, structures + * and function prototypes used in all the different SCMI protocols. + * + * Copyright (C) 2018-2022 ARM Ltd. + */ +#ifndef _SCMI_COMMON_H +#define _SCMI_COMMON_H + +#include <linux/bitfield.h> +#include <linux/completion.h> +#include <linux/device.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/hashtable.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/refcount.h> +#include <linux/scmi_protocol.h> +#include <linux/spinlock.h> +#include <linux/types.h> + +#include <asm/unaligned.h> + +#include "protocols.h" +#include "notify.h" + +#define MSG_ID_MASK GENMASK(7, 0) +#define MSG_XTRACT_ID(hdr) FIELD_GET(MSG_ID_MASK, (hdr)) +#define MSG_TYPE_MASK GENMASK(9, 8) +#define MSG_XTRACT_TYPE(hdr) FIELD_GET(MSG_TYPE_MASK, (hdr)) +#define MSG_TYPE_COMMAND 0 +#define MSG_TYPE_DELAYED_RESP 2 +#define MSG_TYPE_NOTIFICATION 3 +#define MSG_PROTOCOL_ID_MASK GENMASK(17, 10) +#define MSG_XTRACT_PROT_ID(hdr) FIELD_GET(MSG_PROTOCOL_ID_MASK, (hdr)) +#define MSG_TOKEN_ID_MASK GENMASK(27, 18) +#define MSG_XTRACT_TOKEN(hdr) FIELD_GET(MSG_TOKEN_ID_MASK, (hdr)) +#define MSG_TOKEN_MAX (MSG_XTRACT_TOKEN(MSG_TOKEN_ID_MASK) + 1) + +/* + * Size of @pending_xfers hashtable included in @scmi_xfers_info; ideally, in + * order to minimize space and collisions, this should equal max_msg, i.e. the + * maximum number of in-flight messages on a specific platform, but such value + * is only available at runtime while kernel hashtables are statically sized: + * pick instead as a fixed static size the maximum number of entries that can + * fit the whole table into one 4k page. + */ +#define SCMI_PENDING_XFERS_HT_ORDER_SZ 9 + +/** + * pack_scmi_header() - packs and returns 32-bit header + * + * @hdr: pointer to header containing all the information on message id, + * protocol id, sequence id and type. + * + * Return: 32-bit packed message header to be sent to the platform. + */ +static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr) +{ + return FIELD_PREP(MSG_ID_MASK, hdr->id) | + FIELD_PREP(MSG_TYPE_MASK, hdr->type) | + FIELD_PREP(MSG_TOKEN_ID_MASK, hdr->seq) | + FIELD_PREP(MSG_PROTOCOL_ID_MASK, hdr->protocol_id); +} + +/** + * unpack_scmi_header() - unpacks and records message and protocol id + * + * @msg_hdr: 32-bit packed message header sent from the platform + * @hdr: pointer to header to fetch message and protocol id. + */ +static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr) +{ + hdr->id = MSG_XTRACT_ID(msg_hdr); + hdr->protocol_id = MSG_XTRACT_PROT_ID(msg_hdr); + hdr->type = MSG_XTRACT_TYPE(msg_hdr); +} + +/* + * An helper macro to lookup an xfer from the @pending_xfers hashtable + * using the message sequence number token as a key. + */ +#define XFER_FIND(__ht, __k) \ +({ \ + typeof(__k) k_ = __k; \ + struct scmi_xfer *xfer_ = NULL; \ + \ + hash_for_each_possible((__ht), xfer_, node, k_) \ + if (xfer_->hdr.seq == k_) \ + break; \ + xfer_; \ +}) + +struct scmi_revision_info * +scmi_revision_area_get(const struct scmi_protocol_handle *ph); +int scmi_handle_put(const struct scmi_handle *handle); +void scmi_device_link_add(struct device *consumer, struct device *supplier); +struct scmi_handle *scmi_handle_get(struct device *dev); +void scmi_set_handle(struct scmi_device *scmi_dev); +void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph, + u8 *prot_imp); + +int __init scmi_bus_init(void); +void __exit scmi_bus_exit(void); + +const struct scmi_protocol *scmi_protocol_get(int protocol_id); +void scmi_protocol_put(int protocol_id); + +int scmi_protocol_acquire(const struct scmi_handle *handle, u8 protocol_id); +void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id); + +/* SCMI Transport */ +/** + * struct scmi_chan_info - Structure representing a SCMI channel information + * + * @dev: Reference to device in the SCMI hierarchy corresponding to this + * channel + * @rx_timeout_ms: The configured RX timeout in milliseconds. + * @handle: Pointer to SCMI entity handle + * @no_completion_irq: Flag to indicate that this channel has no completion + * interrupt mechanism for synchronous commands. + * This can be dynamically set by transports at run-time + * inside their provided .chan_setup(). + * @transport_info: Transport layer related information + */ +struct scmi_chan_info { + struct device *dev; + unsigned int rx_timeout_ms; + struct scmi_handle *handle; + bool no_completion_irq; + void *transport_info; +}; + +/** + * struct scmi_transport_ops - Structure representing a SCMI transport ops + * + * @link_supplier: Optional callback to add link to a supplier device + * @chan_available: Callback to check if channel is available or not + * @chan_setup: Callback to allocate and setup a channel + * @chan_free: Callback to free a channel + * @get_max_msg: Optional callback to provide max_msg dynamically + * Returns the maximum number of messages for the channel type + * (tx or rx) that can be pending simultaneously in the system + * @send_message: Callback to send a message + * @mark_txdone: Callback to mark tx as done + * @fetch_response: Callback to fetch response + * @fetch_notification: Callback to fetch notification + * @clear_channel: Callback to clear a channel + * @poll_done: Callback to poll transfer status + */ +struct scmi_transport_ops { + int (*link_supplier)(struct device *dev); + bool (*chan_available)(struct device *dev, int idx); + int (*chan_setup)(struct scmi_chan_info *cinfo, struct device *dev, + bool tx); + int (*chan_free)(int id, void *p, void *data); + unsigned int (*get_max_msg)(struct scmi_chan_info *base_cinfo); + int (*send_message)(struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer); + void (*mark_txdone)(struct scmi_chan_info *cinfo, int ret, + struct scmi_xfer *xfer); + void (*fetch_response)(struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer); + void (*fetch_notification)(struct scmi_chan_info *cinfo, + size_t max_len, struct scmi_xfer *xfer); + void (*clear_channel)(struct scmi_chan_info *cinfo); + bool (*poll_done)(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer); +}; + +int scmi_protocol_device_request(const struct scmi_device_id *id_table); +void scmi_protocol_device_unrequest(const struct scmi_device_id *id_table); +struct scmi_device *scmi_child_dev_find(struct device *parent, + int prot_id, const char *name); + +/** + * struct scmi_desc - Description of SoC integration + * + * @transport_init: An optional function that a transport can provide to + * initialize some transport-specific setup during SCMI core + * initialization, so ahead of SCMI core probing. + * @transport_exit: An optional function that a transport can provide to + * de-initialize some transport-specific setup during SCMI core + * de-initialization, so after SCMI core removal. + * @ops: Pointer to the transport specific ops structure + * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds) + * @max_msg: Maximum number of messages for a channel type (tx or rx) that can + * be pending simultaneously in the system. May be overridden by the + * get_max_msg op. + * @max_msg_size: Maximum size of data per message that can be handled. + * @force_polling: Flag to force this whole transport to use SCMI core polling + * mechanism instead of completion interrupts even if available. + * @sync_cmds_completed_on_ret: Flag to indicate that the transport assures + * synchronous-command messages are atomically + * completed on .send_message: no need to poll + * actively waiting for a response. + * Used by core internally only when polling is + * selected as a waiting for reply method: i.e. + * if a completion irq was found use that anyway. + * @atomic_enabled: Flag to indicate that this transport, which is assured not + * to sleep anywhere on the TX path, can be used in atomic mode + * when requested. + */ +struct scmi_desc { + int (*transport_init)(void); + void (*transport_exit)(void); + const struct scmi_transport_ops *ops; + int max_rx_timeout_ms; + int max_msg; + int max_msg_size; + const bool force_polling; + const bool sync_cmds_completed_on_ret; + const bool atomic_enabled; +}; + +#ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX +extern const struct scmi_desc scmi_mailbox_desc; +#endif +#ifdef CONFIG_ARM_SCMI_TRANSPORT_SMC +extern const struct scmi_desc scmi_smc_desc; +#endif +#ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO +extern const struct scmi_desc scmi_virtio_desc; +#endif +#ifdef CONFIG_ARM_SCMI_TRANSPORT_OPTEE +extern const struct scmi_desc scmi_optee_desc; +#endif + +void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv); +void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id); + +/* shmem related declarations */ +struct scmi_shared_mem; + +void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem, + struct scmi_xfer *xfer, struct scmi_chan_info *cinfo); +u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem); +void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem, + struct scmi_xfer *xfer); +void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem, + size_t max_len, struct scmi_xfer *xfer); +void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem); +bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem, + struct scmi_xfer *xfer); +bool shmem_channel_free(struct scmi_shared_mem __iomem *shmem); + +/* declarations for message passing transports */ +struct scmi_msg_payld; + +/* Maximum overhead of message w.r.t. struct scmi_desc.max_msg_size */ +#define SCMI_MSG_MAX_PROT_OVERHEAD (2 * sizeof(__le32)) + +size_t msg_response_size(struct scmi_xfer *xfer); +size_t msg_command_size(struct scmi_xfer *xfer); +void msg_tx_prepare(struct scmi_msg_payld *msg, struct scmi_xfer *xfer); +u32 msg_read_header(struct scmi_msg_payld *msg); +void msg_fetch_response(struct scmi_msg_payld *msg, size_t len, + struct scmi_xfer *xfer); +void msg_fetch_notification(struct scmi_msg_payld *msg, size_t len, + size_t max_len, struct scmi_xfer *xfer); + +void scmi_notification_instance_data_set(const struct scmi_handle *handle, + void *priv); +void *scmi_notification_instance_data_get(const struct scmi_handle *handle); +#endif /* _SCMI_COMMON_H */ diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c new file mode 100644 index 000000000..fe06dc193 --- /dev/null +++ b/drivers/firmware/arm_scmi/driver.c @@ -0,0 +1,2698 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System Control and Management Interface (SCMI) Message Protocol driver + * + * SCMI Message Protocol is used between the System Control Processor(SCP) + * and the Application Processors(AP). The Message Handling Unit(MHU) + * provides a mechanism for inter-processor communication between SCP's + * Cortex M3 and AP. + * + * SCP offers control and management of the core/cluster power states, + * various power domain DVFS including the core/cluster, certain system + * clocks configuration, thermal sensors and many others. + * + * Copyright (C) 2018-2021 ARM Ltd. + */ + +#include <linux/bitmap.h> +#include <linux/device.h> +#include <linux/export.h> +#include <linux/idr.h> +#include <linux/io.h> +#include <linux/io-64-nonatomic-hi-lo.h> +#include <linux/kernel.h> +#include <linux/ktime.h> +#include <linux/hashtable.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/processor.h> +#include <linux/refcount.h> +#include <linux/slab.h> + +#include "common.h" +#include "notify.h" + +#define CREATE_TRACE_POINTS +#include <trace/events/scmi.h> + +enum scmi_error_codes { + SCMI_SUCCESS = 0, /* Success */ + SCMI_ERR_SUPPORT = -1, /* Not supported */ + SCMI_ERR_PARAMS = -2, /* Invalid Parameters */ + SCMI_ERR_ACCESS = -3, /* Invalid access/permission denied */ + SCMI_ERR_ENTRY = -4, /* Not found */ + SCMI_ERR_RANGE = -5, /* Value out of range */ + SCMI_ERR_BUSY = -6, /* Device busy */ + SCMI_ERR_COMMS = -7, /* Communication Error */ + SCMI_ERR_GENERIC = -8, /* Generic Error */ + SCMI_ERR_HARDWARE = -9, /* Hardware Error */ + SCMI_ERR_PROTOCOL = -10,/* Protocol Error */ +}; + +/* List of all SCMI devices active in system */ +static LIST_HEAD(scmi_list); +/* Protection for the entire list */ +static DEFINE_MUTEX(scmi_list_mutex); +/* Track the unique id for the transfers for debug & profiling purpose */ +static atomic_t transfer_last_id; + +static DEFINE_IDR(scmi_requested_devices); +static DEFINE_MUTEX(scmi_requested_devices_mtx); + +/* Track globally the creation of SCMI SystemPower related devices */ +static bool scmi_syspower_registered; +/* Protect access to scmi_syspower_registered */ +static DEFINE_MUTEX(scmi_syspower_mtx); + +struct scmi_requested_dev { + const struct scmi_device_id *id_table; + struct list_head node; +}; + +/** + * struct scmi_xfers_info - Structure to manage transfer information + * + * @xfer_alloc_table: Bitmap table for allocated messages. + * Index of this bitmap table is also used for message + * sequence identifier. + * @xfer_lock: Protection for message allocation + * @max_msg: Maximum number of messages that can be pending + * @free_xfers: A free list for available to use xfers. It is initialized with + * a number of xfers equal to the maximum allowed in-flight + * messages. + * @pending_xfers: An hashtable, indexed by msg_hdr.seq, used to keep all the + * currently in-flight messages. + */ +struct scmi_xfers_info { + unsigned long *xfer_alloc_table; + spinlock_t xfer_lock; + int max_msg; + struct hlist_head free_xfers; + DECLARE_HASHTABLE(pending_xfers, SCMI_PENDING_XFERS_HT_ORDER_SZ); +}; + +/** + * struct scmi_protocol_instance - Describe an initialized protocol instance. + * @handle: Reference to the SCMI handle associated to this protocol instance. + * @proto: A reference to the protocol descriptor. + * @gid: A reference for per-protocol devres management. + * @users: A refcount to track effective users of this protocol. + * @priv: Reference for optional protocol private data. + * @ph: An embedded protocol handle that will be passed down to protocol + * initialization code to identify this instance. + * + * Each protocol is initialized independently once for each SCMI platform in + * which is defined by DT and implemented by the SCMI server fw. + */ +struct scmi_protocol_instance { + const struct scmi_handle *handle; + const struct scmi_protocol *proto; + void *gid; + refcount_t users; + void *priv; + struct scmi_protocol_handle ph; +}; + +#define ph_to_pi(h) container_of(h, struct scmi_protocol_instance, ph) + +/** + * struct scmi_info - Structure representing a SCMI instance + * + * @dev: Device pointer + * @desc: SoC description for this instance + * @version: SCMI revision information containing protocol version, + * implementation version and (sub-)vendor identification. + * @handle: Instance of SCMI handle to send to clients + * @tx_minfo: Universal Transmit Message management info + * @rx_minfo: Universal Receive Message management info + * @tx_idr: IDR object to map protocol id to Tx channel info pointer + * @rx_idr: IDR object to map protocol id to Rx channel info pointer + * @protocols: IDR for protocols' instance descriptors initialized for + * this SCMI instance: populated on protocol's first attempted + * usage. + * @protocols_mtx: A mutex to protect protocols instances initialization. + * @protocols_imp: List of protocols implemented, currently maximum of + * scmi_revision_info.num_protocols elements allocated by the + * base protocol + * @active_protocols: IDR storing device_nodes for protocols actually defined + * in the DT and confirmed as implemented by fw. + * @atomic_threshold: Optional system wide DT-configured threshold, expressed + * in microseconds, for atomic operations. + * Only SCMI synchronous commands reported by the platform + * to have an execution latency lesser-equal to the threshold + * should be considered for atomic mode operation: such + * decision is finally left up to the SCMI drivers. + * @notify_priv: Pointer to private data structure specific to notifications. + * @node: List head + * @users: Number of users of this instance + */ +struct scmi_info { + struct device *dev; + const struct scmi_desc *desc; + struct scmi_revision_info version; + struct scmi_handle handle; + struct scmi_xfers_info tx_minfo; + struct scmi_xfers_info rx_minfo; + struct idr tx_idr; + struct idr rx_idr; + struct idr protocols; + /* Ensure mutual exclusive access to protocols instance array */ + struct mutex protocols_mtx; + u8 *protocols_imp; + struct idr active_protocols; + unsigned int atomic_threshold; + void *notify_priv; + struct list_head node; + int users; +}; + +#define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle) + +static const int scmi_linux_errmap[] = { + /* better than switch case as long as return value is continuous */ + 0, /* SCMI_SUCCESS */ + -EOPNOTSUPP, /* SCMI_ERR_SUPPORT */ + -EINVAL, /* SCMI_ERR_PARAM */ + -EACCES, /* SCMI_ERR_ACCESS */ + -ENOENT, /* SCMI_ERR_ENTRY */ + -ERANGE, /* SCMI_ERR_RANGE */ + -EBUSY, /* SCMI_ERR_BUSY */ + -ECOMM, /* SCMI_ERR_COMMS */ + -EIO, /* SCMI_ERR_GENERIC */ + -EREMOTEIO, /* SCMI_ERR_HARDWARE */ + -EPROTO, /* SCMI_ERR_PROTOCOL */ +}; + +static inline int scmi_to_linux_errno(int errno) +{ + int err_idx = -errno; + + if (err_idx >= SCMI_SUCCESS && err_idx < ARRAY_SIZE(scmi_linux_errmap)) + return scmi_linux_errmap[err_idx]; + return -EIO; +} + +void scmi_notification_instance_data_set(const struct scmi_handle *handle, + void *priv) +{ + struct scmi_info *info = handle_to_scmi_info(handle); + + info->notify_priv = priv; + /* Ensure updated protocol private date are visible */ + smp_wmb(); +} + +void *scmi_notification_instance_data_get(const struct scmi_handle *handle) +{ + struct scmi_info *info = handle_to_scmi_info(handle); + + /* Ensure protocols_private_data has been updated */ + smp_rmb(); + return info->notify_priv; +} + +/** + * scmi_xfer_token_set - Reserve and set new token for the xfer at hand + * + * @minfo: Pointer to Tx/Rx Message management info based on channel type + * @xfer: The xfer to act upon + * + * Pick the next unused monotonically increasing token and set it into + * xfer->hdr.seq: picking a monotonically increasing value avoids immediate + * reuse of freshly completed or timed-out xfers, thus mitigating the risk + * of incorrect association of a late and expired xfer with a live in-flight + * transaction, both happening to re-use the same token identifier. + * + * Since platform is NOT required to answer our request in-order we should + * account for a few rare but possible scenarios: + * + * - exactly 'next_token' may be NOT available so pick xfer_id >= next_token + * using find_next_zero_bit() starting from candidate next_token bit + * + * - all tokens ahead upto (MSG_TOKEN_ID_MASK - 1) are used in-flight but we + * are plenty of free tokens at start, so try a second pass using + * find_next_zero_bit() and starting from 0. + * + * X = used in-flight + * + * Normal + * ------ + * + * |- xfer_id picked + * -----------+---------------------------------------------------------- + * | | |X|X|X| | | | | | ... ... ... ... ... ... ... ... ... ... ...|X|X| + * ---------------------------------------------------------------------- + * ^ + * |- next_token + * + * Out-of-order pending at start + * ----------------------------- + * + * |- xfer_id picked, last_token fixed + * -----+---------------------------------------------------------------- + * |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... ... ...|X| | + * ---------------------------------------------------------------------- + * ^ + * |- next_token + * + * + * Out-of-order pending at end + * --------------------------- + * + * |- xfer_id picked, last_token fixed + * -----+---------------------------------------------------------------- + * |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... |X|X|X||X|X| + * ---------------------------------------------------------------------- + * ^ + * |- next_token + * + * Context: Assumes to be called with @xfer_lock already acquired. + * + * Return: 0 on Success or error + */ +static int scmi_xfer_token_set(struct scmi_xfers_info *minfo, + struct scmi_xfer *xfer) +{ + unsigned long xfer_id, next_token; + + /* + * Pick a candidate monotonic token in range [0, MSG_TOKEN_MAX - 1] + * using the pre-allocated transfer_id as a base. + * Note that the global transfer_id is shared across all message types + * so there could be holes in the allocated set of monotonic sequence + * numbers, but that is going to limit the effectiveness of the + * mitigation only in very rare limit conditions. + */ + next_token = (xfer->transfer_id & (MSG_TOKEN_MAX - 1)); + + /* Pick the next available xfer_id >= next_token */ + xfer_id = find_next_zero_bit(minfo->xfer_alloc_table, + MSG_TOKEN_MAX, next_token); + if (xfer_id == MSG_TOKEN_MAX) { + /* + * After heavily out-of-order responses, there are no free + * tokens ahead, but only at start of xfer_alloc_table so + * try again from the beginning. + */ + xfer_id = find_next_zero_bit(minfo->xfer_alloc_table, + MSG_TOKEN_MAX, 0); + /* + * Something is wrong if we got here since there can be a + * maximum number of (MSG_TOKEN_MAX - 1) in-flight messages + * but we have not found any free token [0, MSG_TOKEN_MAX - 1]. + */ + if (WARN_ON_ONCE(xfer_id == MSG_TOKEN_MAX)) + return -ENOMEM; + } + + /* Update +/- last_token accordingly if we skipped some hole */ + if (xfer_id != next_token) + atomic_add((int)(xfer_id - next_token), &transfer_last_id); + + /* Set in-flight */ + set_bit(xfer_id, minfo->xfer_alloc_table); + xfer->hdr.seq = (u16)xfer_id; + + return 0; +} + +/** + * scmi_xfer_token_clear - Release the token + * + * @minfo: Pointer to Tx/Rx Message management info based on channel type + * @xfer: The xfer to act upon + */ +static inline void scmi_xfer_token_clear(struct scmi_xfers_info *minfo, + struct scmi_xfer *xfer) +{ + clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table); +} + +/** + * scmi_xfer_get() - Allocate one message + * + * @handle: Pointer to SCMI entity handle + * @minfo: Pointer to Tx/Rx Message management info based on channel type + * @set_pending: If true a monotonic token is picked and the xfer is added to + * the pending hash table. + * + * Helper function which is used by various message functions that are + * exposed to clients of this driver for allocating a message traffic event. + * + * Picks an xfer from the free list @free_xfers (if any available) and, if + * required, sets a monotonically increasing token and stores the inflight xfer + * into the @pending_xfers hashtable for later retrieval. + * + * The successfully initialized xfer is refcounted. + * + * Context: Holds @xfer_lock while manipulating @xfer_alloc_table and + * @free_xfers. + * + * Return: 0 if all went fine, else corresponding error. + */ +static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle, + struct scmi_xfers_info *minfo, + bool set_pending) +{ + int ret; + unsigned long flags; + struct scmi_xfer *xfer; + + spin_lock_irqsave(&minfo->xfer_lock, flags); + if (hlist_empty(&minfo->free_xfers)) { + spin_unlock_irqrestore(&minfo->xfer_lock, flags); + return ERR_PTR(-ENOMEM); + } + + /* grab an xfer from the free_list */ + xfer = hlist_entry(minfo->free_xfers.first, struct scmi_xfer, node); + hlist_del_init(&xfer->node); + + /* + * Allocate transfer_id early so that can be used also as base for + * monotonic sequence number generation if needed. + */ + xfer->transfer_id = atomic_inc_return(&transfer_last_id); + + if (set_pending) { + /* Pick and set monotonic token */ + ret = scmi_xfer_token_set(minfo, xfer); + if (!ret) { + hash_add(minfo->pending_xfers, &xfer->node, + xfer->hdr.seq); + xfer->pending = true; + } else { + dev_err(handle->dev, + "Failed to get monotonic token %d\n", ret); + hlist_add_head(&xfer->node, &minfo->free_xfers); + xfer = ERR_PTR(ret); + } + } + + if (!IS_ERR(xfer)) { + refcount_set(&xfer->users, 1); + atomic_set(&xfer->busy, SCMI_XFER_FREE); + } + spin_unlock_irqrestore(&minfo->xfer_lock, flags); + + return xfer; +} + +/** + * __scmi_xfer_put() - Release a message + * + * @minfo: Pointer to Tx/Rx Message management info based on channel type + * @xfer: message that was reserved by scmi_xfer_get + * + * After refcount check, possibly release an xfer, clearing the token slot, + * removing xfer from @pending_xfers and putting it back into free_xfers. + * + * This holds a spinlock to maintain integrity of internal data structures. + */ +static void +__scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer) +{ + unsigned long flags; + + spin_lock_irqsave(&minfo->xfer_lock, flags); + if (refcount_dec_and_test(&xfer->users)) { + if (xfer->pending) { + scmi_xfer_token_clear(minfo, xfer); + hash_del(&xfer->node); + xfer->pending = false; + } + hlist_add_head(&xfer->node, &minfo->free_xfers); + } + spin_unlock_irqrestore(&minfo->xfer_lock, flags); +} + +/** + * scmi_xfer_lookup_unlocked - Helper to lookup an xfer_id + * + * @minfo: Pointer to Tx/Rx Message management info based on channel type + * @xfer_id: Token ID to lookup in @pending_xfers + * + * Refcounting is untouched. + * + * Context: Assumes to be called with @xfer_lock already acquired. + * + * Return: A valid xfer on Success or error otherwise + */ +static struct scmi_xfer * +scmi_xfer_lookup_unlocked(struct scmi_xfers_info *minfo, u16 xfer_id) +{ + struct scmi_xfer *xfer = NULL; + + if (test_bit(xfer_id, minfo->xfer_alloc_table)) + xfer = XFER_FIND(minfo->pending_xfers, xfer_id); + + return xfer ?: ERR_PTR(-EINVAL); +} + +/** + * scmi_msg_response_validate - Validate message type against state of related + * xfer + * + * @cinfo: A reference to the channel descriptor. + * @msg_type: Message type to check + * @xfer: A reference to the xfer to validate against @msg_type + * + * This function checks if @msg_type is congruent with the current state of + * a pending @xfer; if an asynchronous delayed response is received before the + * related synchronous response (Out-of-Order Delayed Response) the missing + * synchronous response is assumed to be OK and completed, carrying on with the + * Delayed Response: this is done to address the case in which the underlying + * SCMI transport can deliver such out-of-order responses. + * + * Context: Assumes to be called with xfer->lock already acquired. + * + * Return: 0 on Success, error otherwise + */ +static inline int scmi_msg_response_validate(struct scmi_chan_info *cinfo, + u8 msg_type, + struct scmi_xfer *xfer) +{ + /* + * Even if a response was indeed expected on this slot at this point, + * a buggy platform could wrongly reply feeding us an unexpected + * delayed response we're not prepared to handle: bail-out safely + * blaming firmware. + */ + if (msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done) { + dev_err(cinfo->dev, + "Delayed Response for %d not expected! Buggy F/W ?\n", + xfer->hdr.seq); + return -EINVAL; + } + + switch (xfer->state) { + case SCMI_XFER_SENT_OK: + if (msg_type == MSG_TYPE_DELAYED_RESP) { + /* + * Delayed Response expected but delivered earlier. + * Assume message RESPONSE was OK and skip state. + */ + xfer->hdr.status = SCMI_SUCCESS; + xfer->state = SCMI_XFER_RESP_OK; + complete(&xfer->done); + dev_warn(cinfo->dev, + "Received valid OoO Delayed Response for %d\n", + xfer->hdr.seq); + } + break; + case SCMI_XFER_RESP_OK: + if (msg_type != MSG_TYPE_DELAYED_RESP) + return -EINVAL; + break; + case SCMI_XFER_DRESP_OK: + /* No further message expected once in SCMI_XFER_DRESP_OK */ + return -EINVAL; + } + + return 0; +} + +/** + * scmi_xfer_state_update - Update xfer state + * + * @xfer: A reference to the xfer to update + * @msg_type: Type of message being processed. + * + * Note that this message is assumed to have been already successfully validated + * by @scmi_msg_response_validate(), so here we just update the state. + * + * Context: Assumes to be called on an xfer exclusively acquired using the + * busy flag. + */ +static inline void scmi_xfer_state_update(struct scmi_xfer *xfer, u8 msg_type) +{ + xfer->hdr.type = msg_type; + + /* Unknown command types were already discarded earlier */ + if (xfer->hdr.type == MSG_TYPE_COMMAND) + xfer->state = SCMI_XFER_RESP_OK; + else + xfer->state = SCMI_XFER_DRESP_OK; +} + +static bool scmi_xfer_acquired(struct scmi_xfer *xfer) +{ + int ret; + + ret = atomic_cmpxchg(&xfer->busy, SCMI_XFER_FREE, SCMI_XFER_BUSY); + + return ret == SCMI_XFER_FREE; +} + +/** + * scmi_xfer_command_acquire - Helper to lookup and acquire a command xfer + * + * @cinfo: A reference to the channel descriptor. + * @msg_hdr: A message header to use as lookup key + * + * When a valid xfer is found for the sequence number embedded in the provided + * msg_hdr, reference counting is properly updated and exclusive access to this + * xfer is granted till released with @scmi_xfer_command_release. + * + * Return: A valid @xfer on Success or error otherwise. + */ +static inline struct scmi_xfer * +scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr) +{ + int ret; + unsigned long flags; + struct scmi_xfer *xfer; + struct scmi_info *info = handle_to_scmi_info(cinfo->handle); + struct scmi_xfers_info *minfo = &info->tx_minfo; + u8 msg_type = MSG_XTRACT_TYPE(msg_hdr); + u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr); + + /* Are we even expecting this? */ + spin_lock_irqsave(&minfo->xfer_lock, flags); + xfer = scmi_xfer_lookup_unlocked(minfo, xfer_id); + if (IS_ERR(xfer)) { + dev_err(cinfo->dev, + "Message for %d type %d is not expected!\n", + xfer_id, msg_type); + spin_unlock_irqrestore(&minfo->xfer_lock, flags); + return xfer; + } + refcount_inc(&xfer->users); + spin_unlock_irqrestore(&minfo->xfer_lock, flags); + + spin_lock_irqsave(&xfer->lock, flags); + ret = scmi_msg_response_validate(cinfo, msg_type, xfer); + /* + * If a pending xfer was found which was also in a congruent state with + * the received message, acquire exclusive access to it setting the busy + * flag. + * Spins only on the rare limit condition of concurrent reception of + * RESP and DRESP for the same xfer. + */ + if (!ret) { + spin_until_cond(scmi_xfer_acquired(xfer)); + scmi_xfer_state_update(xfer, msg_type); + } + spin_unlock_irqrestore(&xfer->lock, flags); + + if (ret) { + dev_err(cinfo->dev, + "Invalid message type:%d for %d - HDR:0x%X state:%d\n", + msg_type, xfer_id, msg_hdr, xfer->state); + /* On error the refcount incremented above has to be dropped */ + __scmi_xfer_put(minfo, xfer); + xfer = ERR_PTR(-EINVAL); + } + + return xfer; +} + +static inline void scmi_xfer_command_release(struct scmi_info *info, + struct scmi_xfer *xfer) +{ + atomic_set(&xfer->busy, SCMI_XFER_FREE); + __scmi_xfer_put(&info->tx_minfo, xfer); +} + +static inline void scmi_clear_channel(struct scmi_info *info, + struct scmi_chan_info *cinfo) +{ + if (info->desc->ops->clear_channel) + info->desc->ops->clear_channel(cinfo); +} + +static inline bool is_polling_required(struct scmi_chan_info *cinfo, + struct scmi_info *info) +{ + return cinfo->no_completion_irq || info->desc->force_polling; +} + +static inline bool is_transport_polling_capable(struct scmi_info *info) +{ + return info->desc->ops->poll_done || + info->desc->sync_cmds_completed_on_ret; +} + +static inline bool is_polling_enabled(struct scmi_chan_info *cinfo, + struct scmi_info *info) +{ + return is_polling_required(cinfo, info) && + is_transport_polling_capable(info); +} + +static void scmi_handle_notification(struct scmi_chan_info *cinfo, + u32 msg_hdr, void *priv) +{ + struct scmi_xfer *xfer; + struct device *dev = cinfo->dev; + struct scmi_info *info = handle_to_scmi_info(cinfo->handle); + struct scmi_xfers_info *minfo = &info->rx_minfo; + ktime_t ts; + + ts = ktime_get_boottime(); + xfer = scmi_xfer_get(cinfo->handle, minfo, false); + if (IS_ERR(xfer)) { + dev_err(dev, "failed to get free message slot (%ld)\n", + PTR_ERR(xfer)); + scmi_clear_channel(info, cinfo); + return; + } + + unpack_scmi_header(msg_hdr, &xfer->hdr); + if (priv) + /* Ensure order between xfer->priv store and following ops */ + smp_store_mb(xfer->priv, priv); + info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size, + xfer); + + trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id, "NOTI", + xfer->hdr.seq, xfer->hdr.status, + xfer->rx.buf, xfer->rx.len); + + scmi_notify(cinfo->handle, xfer->hdr.protocol_id, + xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts); + + trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id, + xfer->hdr.protocol_id, xfer->hdr.seq, + MSG_TYPE_NOTIFICATION); + + __scmi_xfer_put(minfo, xfer); + + scmi_clear_channel(info, cinfo); +} + +static void scmi_handle_response(struct scmi_chan_info *cinfo, + u32 msg_hdr, void *priv) +{ + struct scmi_xfer *xfer; + struct scmi_info *info = handle_to_scmi_info(cinfo->handle); + + xfer = scmi_xfer_command_acquire(cinfo, msg_hdr); + if (IS_ERR(xfer)) { + if (MSG_XTRACT_TYPE(msg_hdr) == MSG_TYPE_DELAYED_RESP) + scmi_clear_channel(info, cinfo); + return; + } + + /* rx.len could be shrunk in the sync do_xfer, so reset to maxsz */ + if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) + xfer->rx.len = info->desc->max_msg_size; + + if (priv) + /* Ensure order between xfer->priv store and following ops */ + smp_store_mb(xfer->priv, priv); + info->desc->ops->fetch_response(cinfo, xfer); + + trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id, + xfer->hdr.type == MSG_TYPE_DELAYED_RESP ? + "DLYD" : "RESP", + xfer->hdr.seq, xfer->hdr.status, + xfer->rx.buf, xfer->rx.len); + + trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id, + xfer->hdr.protocol_id, xfer->hdr.seq, + xfer->hdr.type); + + if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) { + scmi_clear_channel(info, cinfo); + complete(xfer->async_done); + } else { + complete(&xfer->done); + } + + scmi_xfer_command_release(info, xfer); +} + +/** + * scmi_rx_callback() - callback for receiving messages + * + * @cinfo: SCMI channel info + * @msg_hdr: Message header + * @priv: Transport specific private data. + * + * Processes one received message to appropriate transfer information and + * signals completion of the transfer. + * + * NOTE: This function will be invoked in IRQ context, hence should be + * as optimal as possible. + */ +void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv) +{ + u8 msg_type = MSG_XTRACT_TYPE(msg_hdr); + + switch (msg_type) { + case MSG_TYPE_NOTIFICATION: + scmi_handle_notification(cinfo, msg_hdr, priv); + break; + case MSG_TYPE_COMMAND: + case MSG_TYPE_DELAYED_RESP: + scmi_handle_response(cinfo, msg_hdr, priv); + break; + default: + WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type); + break; + } +} + +/** + * xfer_put() - Release a transmit message + * + * @ph: Pointer to SCMI protocol handle + * @xfer: message that was reserved by xfer_get_init + */ +static void xfer_put(const struct scmi_protocol_handle *ph, + struct scmi_xfer *xfer) +{ + const struct scmi_protocol_instance *pi = ph_to_pi(ph); + struct scmi_info *info = handle_to_scmi_info(pi->handle); + + __scmi_xfer_put(&info->tx_minfo, xfer); +} + +static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer, ktime_t stop) +{ + struct scmi_info *info = handle_to_scmi_info(cinfo->handle); + + /* + * Poll also on xfer->done so that polling can be forcibly terminated + * in case of out-of-order receptions of delayed responses + */ + return info->desc->ops->poll_done(cinfo, xfer) || + try_wait_for_completion(&xfer->done) || + ktime_after(ktime_get(), stop); +} + +/** + * scmi_wait_for_message_response - An helper to group all the possible ways of + * waiting for a synchronous message response. + * + * @cinfo: SCMI channel info + * @xfer: Reference to the transfer being waited for. + * + * Chooses waiting strategy (sleep-waiting vs busy-waiting) depending on + * configuration flags like xfer->hdr.poll_completion. + * + * Return: 0 on Success, error otherwise. + */ +static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer) +{ + struct scmi_info *info = handle_to_scmi_info(cinfo->handle); + struct device *dev = info->dev; + int ret = 0, timeout_ms = info->desc->max_rx_timeout_ms; + + trace_scmi_xfer_response_wait(xfer->transfer_id, xfer->hdr.id, + xfer->hdr.protocol_id, xfer->hdr.seq, + timeout_ms, + xfer->hdr.poll_completion); + + if (xfer->hdr.poll_completion) { + /* + * Real polling is needed only if transport has NOT declared + * itself to support synchronous commands replies. + */ + if (!info->desc->sync_cmds_completed_on_ret) { + /* + * Poll on xfer using transport provided .poll_done(); + * assumes no completion interrupt was available. + */ + ktime_t stop = ktime_add_ms(ktime_get(), timeout_ms); + + spin_until_cond(scmi_xfer_done_no_timeout(cinfo, + xfer, stop)); + if (ktime_after(ktime_get(), stop)) { + dev_err(dev, + "timed out in resp(caller: %pS) - polling\n", + (void *)_RET_IP_); + ret = -ETIMEDOUT; + } + } + + if (!ret) { + unsigned long flags; + + /* + * Do not fetch_response if an out-of-order delayed + * response is being processed. + */ + spin_lock_irqsave(&xfer->lock, flags); + if (xfer->state == SCMI_XFER_SENT_OK) { + info->desc->ops->fetch_response(cinfo, xfer); + xfer->state = SCMI_XFER_RESP_OK; + } + spin_unlock_irqrestore(&xfer->lock, flags); + + /* Trace polled replies. */ + trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id, + "RESP", + xfer->hdr.seq, xfer->hdr.status, + xfer->rx.buf, xfer->rx.len); + } + } else { + /* And we wait for the response. */ + if (!wait_for_completion_timeout(&xfer->done, + msecs_to_jiffies(timeout_ms))) { + dev_err(dev, "timed out in resp(caller: %pS)\n", + (void *)_RET_IP_); + ret = -ETIMEDOUT; + } + } + + return ret; +} + +/** + * do_xfer() - Do one transfer + * + * @ph: Pointer to SCMI protocol handle + * @xfer: Transfer to initiate and wait for response + * + * Return: -ETIMEDOUT in case of no response, if transmit error, + * return corresponding error, else if all goes well, + * return 0. + */ +static int do_xfer(const struct scmi_protocol_handle *ph, + struct scmi_xfer *xfer) +{ + int ret; + const struct scmi_protocol_instance *pi = ph_to_pi(ph); + struct scmi_info *info = handle_to_scmi_info(pi->handle); + struct device *dev = info->dev; + struct scmi_chan_info *cinfo; + + /* Check for polling request on custom command xfers at first */ + if (xfer->hdr.poll_completion && !is_transport_polling_capable(info)) { + dev_warn_once(dev, + "Polling mode is not supported by transport.\n"); + return -EINVAL; + } + + cinfo = idr_find(&info->tx_idr, pi->proto->id); + if (unlikely(!cinfo)) + return -EINVAL; + + /* True ONLY if also supported by transport. */ + if (is_polling_enabled(cinfo, info)) + xfer->hdr.poll_completion = true; + + /* + * Initialise protocol id now from protocol handle to avoid it being + * overridden by mistake (or malice) by the protocol code mangling with + * the scmi_xfer structure prior to this. + */ + xfer->hdr.protocol_id = pi->proto->id; + reinit_completion(&xfer->done); + + trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id, + xfer->hdr.protocol_id, xfer->hdr.seq, + xfer->hdr.poll_completion); + + /* Clear any stale status */ + xfer->hdr.status = SCMI_SUCCESS; + xfer->state = SCMI_XFER_SENT_OK; + /* + * Even though spinlocking is not needed here since no race is possible + * on xfer->state due to the monotonically increasing tokens allocation, + * we must anyway ensure xfer->state initialization is not re-ordered + * after the .send_message() to be sure that on the RX path an early + * ISR calling scmi_rx_callback() cannot see an old stale xfer->state. + */ + smp_mb(); + + ret = info->desc->ops->send_message(cinfo, xfer); + if (ret < 0) { + dev_dbg(dev, "Failed to send message %d\n", ret); + return ret; + } + + trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id, "CMND", + xfer->hdr.seq, xfer->hdr.status, + xfer->tx.buf, xfer->tx.len); + + ret = scmi_wait_for_message_response(cinfo, xfer); + if (!ret && xfer->hdr.status) + ret = scmi_to_linux_errno(xfer->hdr.status); + + if (info->desc->ops->mark_txdone) + info->desc->ops->mark_txdone(cinfo, ret, xfer); + + trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id, + xfer->hdr.protocol_id, xfer->hdr.seq, ret); + + return ret; +} + +static void reset_rx_to_maxsz(const struct scmi_protocol_handle *ph, + struct scmi_xfer *xfer) +{ + const struct scmi_protocol_instance *pi = ph_to_pi(ph); + struct scmi_info *info = handle_to_scmi_info(pi->handle); + + xfer->rx.len = info->desc->max_msg_size; +} + +#define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC) + +/** + * do_xfer_with_response() - Do one transfer and wait until the delayed + * response is received + * + * @ph: Pointer to SCMI protocol handle + * @xfer: Transfer to initiate and wait for response + * + * Using asynchronous commands in atomic/polling mode should be avoided since + * it could cause long busy-waiting here, so ignore polling for the delayed + * response and WARN if it was requested for this command transaction since + * upper layers should refrain from issuing such kind of requests. + * + * The only other option would have been to refrain from using any asynchronous + * command even if made available, when an atomic transport is detected, and + * instead forcibly use the synchronous version (thing that can be easily + * attained at the protocol layer), but this would also have led to longer + * stalls of the channel for synchronous commands and possibly timeouts. + * (in other words there is usually a good reason if a platform provides an + * asynchronous version of a command and we should prefer to use it...just not + * when using atomic/polling mode) + * + * Return: -ETIMEDOUT in case of no delayed response, if transmit error, + * return corresponding error, else if all goes well, return 0. + */ +static int do_xfer_with_response(const struct scmi_protocol_handle *ph, + struct scmi_xfer *xfer) +{ + int ret, timeout = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT); + DECLARE_COMPLETION_ONSTACK(async_response); + + xfer->async_done = &async_response; + + /* + * Delayed responses should not be polled, so an async command should + * not have been used when requiring an atomic/poll context; WARN and + * perform instead a sleeping wait. + * (Note Async + IgnoreDelayedResponses are sent via do_xfer) + */ + WARN_ON_ONCE(xfer->hdr.poll_completion); + + ret = do_xfer(ph, xfer); + if (!ret) { + if (!wait_for_completion_timeout(xfer->async_done, timeout)) { + dev_err(ph->dev, + "timed out in delayed resp(caller: %pS)\n", + (void *)_RET_IP_); + ret = -ETIMEDOUT; + } else if (xfer->hdr.status) { + ret = scmi_to_linux_errno(xfer->hdr.status); + } + } + + xfer->async_done = NULL; + return ret; +} + +/** + * xfer_get_init() - Allocate and initialise one message for transmit + * + * @ph: Pointer to SCMI protocol handle + * @msg_id: Message identifier + * @tx_size: transmit message size + * @rx_size: receive message size + * @p: pointer to the allocated and initialised message + * + * This function allocates the message using @scmi_xfer_get and + * initialise the header. + * + * Return: 0 if all went fine with @p pointing to message, else + * corresponding error. + */ +static int xfer_get_init(const struct scmi_protocol_handle *ph, + u8 msg_id, size_t tx_size, size_t rx_size, + struct scmi_xfer **p) +{ + int ret; + struct scmi_xfer *xfer; + const struct scmi_protocol_instance *pi = ph_to_pi(ph); + struct scmi_info *info = handle_to_scmi_info(pi->handle); + struct scmi_xfers_info *minfo = &info->tx_minfo; + struct device *dev = info->dev; + + /* Ensure we have sane transfer sizes */ + if (rx_size > info->desc->max_msg_size || + tx_size > info->desc->max_msg_size) + return -ERANGE; + + xfer = scmi_xfer_get(pi->handle, minfo, true); + if (IS_ERR(xfer)) { + ret = PTR_ERR(xfer); + dev_err(dev, "failed to get free message slot(%d)\n", ret); + return ret; + } + + xfer->tx.len = tx_size; + xfer->rx.len = rx_size ? : info->desc->max_msg_size; + xfer->hdr.type = MSG_TYPE_COMMAND; + xfer->hdr.id = msg_id; + xfer->hdr.poll_completion = false; + + *p = xfer; + + return 0; +} + +/** + * version_get() - command to get the revision of the SCMI entity + * + * @ph: Pointer to SCMI protocol handle + * @version: Holds returned version of protocol. + * + * Updates the SCMI information in the internal data structure. + * + * Return: 0 if all went fine, else return appropriate error. + */ +static int version_get(const struct scmi_protocol_handle *ph, u32 *version) +{ + int ret; + __le32 *rev_info; + struct scmi_xfer *t; + + ret = xfer_get_init(ph, PROTOCOL_VERSION, 0, sizeof(*version), &t); + if (ret) + return ret; + + ret = do_xfer(ph, t); + if (!ret) { + rev_info = t->rx.buf; + *version = le32_to_cpu(*rev_info); + } + + xfer_put(ph, t); + return ret; +} + +/** + * scmi_set_protocol_priv - Set protocol specific data at init time + * + * @ph: A reference to the protocol handle. + * @priv: The private data to set. + * + * Return: 0 on Success + */ +static int scmi_set_protocol_priv(const struct scmi_protocol_handle *ph, + void *priv) +{ + struct scmi_protocol_instance *pi = ph_to_pi(ph); + + pi->priv = priv; + + return 0; +} + +/** + * scmi_get_protocol_priv - Set protocol specific data at init time + * + * @ph: A reference to the protocol handle. + * + * Return: Protocol private data if any was set. + */ +static void *scmi_get_protocol_priv(const struct scmi_protocol_handle *ph) +{ + const struct scmi_protocol_instance *pi = ph_to_pi(ph); + + return pi->priv; +} + +static const struct scmi_xfer_ops xfer_ops = { + .version_get = version_get, + .xfer_get_init = xfer_get_init, + .reset_rx_to_maxsz = reset_rx_to_maxsz, + .do_xfer = do_xfer, + .do_xfer_with_response = do_xfer_with_response, + .xfer_put = xfer_put, +}; + +struct scmi_msg_resp_domain_name_get { + __le32 flags; + u8 name[SCMI_MAX_STR_SIZE]; +}; + +/** + * scmi_common_extended_name_get - Common helper to get extended resources name + * @ph: A protocol handle reference. + * @cmd_id: The specific command ID to use. + * @res_id: The specific resource ID to use. + * @name: A pointer to the preallocated area where the retrieved name will be + * stored as a NULL terminated string. + * @len: The len in bytes of the @name char array. + * + * Return: 0 on Succcess + */ +static int scmi_common_extended_name_get(const struct scmi_protocol_handle *ph, + u8 cmd_id, u32 res_id, char *name, + size_t len) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_resp_domain_name_get *resp; + + ret = ph->xops->xfer_get_init(ph, cmd_id, sizeof(res_id), + sizeof(*resp), &t); + if (ret) + goto out; + + put_unaligned_le32(res_id, t->tx.buf); + resp = t->rx.buf; + + ret = ph->xops->do_xfer(ph, t); + if (!ret) + strscpy(name, resp->name, len); + + ph->xops->xfer_put(ph, t); +out: + if (ret) + dev_warn(ph->dev, + "Failed to get extended name - id:%u (ret:%d). Using %s\n", + res_id, ret, name); + return ret; +} + +/** + * struct scmi_iterator - Iterator descriptor + * @msg: A reference to the message TX buffer; filled by @prepare_message with + * a proper custom command payload for each multi-part command request. + * @resp: A reference to the response RX buffer; used by @update_state and + * @process_response to parse the multi-part replies. + * @t: A reference to the underlying xfer initialized and used transparently by + * the iterator internal routines. + * @ph: A reference to the associated protocol handle to be used. + * @ops: A reference to the custom provided iterator operations. + * @state: The current iterator state; used and updated in turn by the iterators + * internal routines and by the caller-provided @scmi_iterator_ops. + * @priv: A reference to optional private data as provided by the caller and + * passed back to the @@scmi_iterator_ops. + */ +struct scmi_iterator { + void *msg; + void *resp; + struct scmi_xfer *t; + const struct scmi_protocol_handle *ph; + struct scmi_iterator_ops *ops; + struct scmi_iterator_state state; + void *priv; +}; + +static void *scmi_iterator_init(const struct scmi_protocol_handle *ph, + struct scmi_iterator_ops *ops, + unsigned int max_resources, u8 msg_id, + size_t tx_size, void *priv) +{ + int ret; + struct scmi_iterator *i; + + i = devm_kzalloc(ph->dev, sizeof(*i), GFP_KERNEL); + if (!i) + return ERR_PTR(-ENOMEM); + + i->ph = ph; + i->ops = ops; + i->priv = priv; + + ret = ph->xops->xfer_get_init(ph, msg_id, tx_size, 0, &i->t); + if (ret) { + devm_kfree(ph->dev, i); + return ERR_PTR(ret); + } + + i->state.max_resources = max_resources; + i->msg = i->t->tx.buf; + i->resp = i->t->rx.buf; + + return i; +} + +static int scmi_iterator_run(void *iter) +{ + int ret = -EINVAL; + struct scmi_iterator_ops *iops; + const struct scmi_protocol_handle *ph; + struct scmi_iterator_state *st; + struct scmi_iterator *i = iter; + + if (!i || !i->ops || !i->ph) + return ret; + + iops = i->ops; + ph = i->ph; + st = &i->state; + + do { + iops->prepare_message(i->msg, st->desc_index, i->priv); + ret = ph->xops->do_xfer(ph, i->t); + if (ret) + break; + + st->rx_len = i->t->rx.len; + ret = iops->update_state(st, i->resp, i->priv); + if (ret) + break; + + if (st->num_returned > st->max_resources - st->desc_index) { + dev_err(ph->dev, + "No. of resources can't exceed %d\n", + st->max_resources); + ret = -EINVAL; + break; + } + + for (st->loop_idx = 0; st->loop_idx < st->num_returned; + st->loop_idx++) { + ret = iops->process_response(ph, i->resp, st, i->priv); + if (ret) + goto out; + } + + st->desc_index += st->num_returned; + ph->xops->reset_rx_to_maxsz(ph, i->t); + /* + * check for both returned and remaining to avoid infinite + * loop due to buggy firmware + */ + } while (st->num_returned && st->num_remaining); + +out: + /* Finalize and destroy iterator */ + ph->xops->xfer_put(ph, i->t); + devm_kfree(ph->dev, i); + + return ret; +} + +struct scmi_msg_get_fc_info { + __le32 domain; + __le32 message_id; +}; + +struct scmi_msg_resp_desc_fc { + __le32 attr; +#define SUPPORTS_DOORBELL(x) ((x) & BIT(0)) +#define DOORBELL_REG_WIDTH(x) FIELD_GET(GENMASK(2, 1), (x)) + __le32 rate_limit; + __le32 chan_addr_low; + __le32 chan_addr_high; + __le32 chan_size; + __le32 db_addr_low; + __le32 db_addr_high; + __le32 db_set_lmask; + __le32 db_set_hmask; + __le32 db_preserve_lmask; + __le32 db_preserve_hmask; +}; + +static void +scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph, + u8 describe_id, u32 message_id, u32 valid_size, + u32 domain, void __iomem **p_addr, + struct scmi_fc_db_info **p_db) +{ + int ret; + u32 flags; + u64 phys_addr; + u8 size; + void __iomem *addr; + struct scmi_xfer *t; + struct scmi_fc_db_info *db = NULL; + struct scmi_msg_get_fc_info *info; + struct scmi_msg_resp_desc_fc *resp; + const struct scmi_protocol_instance *pi = ph_to_pi(ph); + + if (!p_addr) { + ret = -EINVAL; + goto err_out; + } + + ret = ph->xops->xfer_get_init(ph, describe_id, + sizeof(*info), sizeof(*resp), &t); + if (ret) + goto err_out; + + info = t->tx.buf; + info->domain = cpu_to_le32(domain); + info->message_id = cpu_to_le32(message_id); + + /* + * Bail out on error leaving fc_info addresses zeroed; this includes + * the case in which the requested domain/message_id does NOT support + * fastchannels at all. + */ + ret = ph->xops->do_xfer(ph, t); + if (ret) + goto err_xfer; + + resp = t->rx.buf; + flags = le32_to_cpu(resp->attr); + size = le32_to_cpu(resp->chan_size); + if (size != valid_size) { + ret = -EINVAL; + goto err_xfer; + } + + phys_addr = le32_to_cpu(resp->chan_addr_low); + phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32; + addr = devm_ioremap(ph->dev, phys_addr, size); + if (!addr) { + ret = -EADDRNOTAVAIL; + goto err_xfer; + } + + *p_addr = addr; + + if (p_db && SUPPORTS_DOORBELL(flags)) { + db = devm_kzalloc(ph->dev, sizeof(*db), GFP_KERNEL); + if (!db) { + ret = -ENOMEM; + goto err_db; + } + + size = 1 << DOORBELL_REG_WIDTH(flags); + phys_addr = le32_to_cpu(resp->db_addr_low); + phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32; + addr = devm_ioremap(ph->dev, phys_addr, size); + if (!addr) { + ret = -EADDRNOTAVAIL; + goto err_db_mem; + } + + db->addr = addr; + db->width = size; + db->set = le32_to_cpu(resp->db_set_lmask); + db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32; + db->mask = le32_to_cpu(resp->db_preserve_lmask); + db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32; + + *p_db = db; + } + + ph->xops->xfer_put(ph, t); + + dev_dbg(ph->dev, + "Using valid FC for protocol %X [MSG_ID:%u / RES_ID:%u]\n", + pi->proto->id, message_id, domain); + + return; + +err_db_mem: + devm_kfree(ph->dev, db); + +err_db: + *p_addr = NULL; + +err_xfer: + ph->xops->xfer_put(ph, t); + +err_out: + dev_warn(ph->dev, + "Failed to get FC for protocol %X [MSG_ID:%u / RES_ID:%u] - ret:%d. Using regular messaging.\n", + pi->proto->id, message_id, domain, ret); +} + +#define SCMI_PROTO_FC_RING_DB(w) \ +do { \ + u##w val = 0; \ + \ + if (db->mask) \ + val = ioread##w(db->addr) & db->mask; \ + iowrite##w((u##w)db->set | val, db->addr); \ +} while (0) + +static void scmi_common_fastchannel_db_ring(struct scmi_fc_db_info *db) +{ + if (!db || !db->addr) + return; + + if (db->width == 1) + SCMI_PROTO_FC_RING_DB(8); + else if (db->width == 2) + SCMI_PROTO_FC_RING_DB(16); + else if (db->width == 4) + SCMI_PROTO_FC_RING_DB(32); + else /* db->width == 8 */ +#ifdef CONFIG_64BIT + SCMI_PROTO_FC_RING_DB(64); +#else + { + u64 val = 0; + + if (db->mask) + val = ioread64_hi_lo(db->addr) & db->mask; + iowrite64_hi_lo(db->set | val, db->addr); + } +#endif +} + +static const struct scmi_proto_helpers_ops helpers_ops = { + .extended_name_get = scmi_common_extended_name_get, + .iter_response_init = scmi_iterator_init, + .iter_response_run = scmi_iterator_run, + .fastchannel_init = scmi_common_fastchannel_init, + .fastchannel_db_ring = scmi_common_fastchannel_db_ring, +}; + +/** + * scmi_revision_area_get - Retrieve version memory area. + * + * @ph: A reference to the protocol handle. + * + * A helper to grab the version memory area reference during SCMI Base protocol + * initialization. + * + * Return: A reference to the version memory area associated to the SCMI + * instance underlying this protocol handle. + */ +struct scmi_revision_info * +scmi_revision_area_get(const struct scmi_protocol_handle *ph) +{ + const struct scmi_protocol_instance *pi = ph_to_pi(ph); + + return pi->handle->version; +} + +/** + * scmi_alloc_init_protocol_instance - Allocate and initialize a protocol + * instance descriptor. + * @info: The reference to the related SCMI instance. + * @proto: The protocol descriptor. + * + * Allocate a new protocol instance descriptor, using the provided @proto + * description, against the specified SCMI instance @info, and initialize it; + * all resources management is handled via a dedicated per-protocol devres + * group. + * + * Context: Assumes to be called with @protocols_mtx already acquired. + * Return: A reference to a freshly allocated and initialized protocol instance + * or ERR_PTR on failure. On failure the @proto reference is at first + * put using @scmi_protocol_put() before releasing all the devres group. + */ +static struct scmi_protocol_instance * +scmi_alloc_init_protocol_instance(struct scmi_info *info, + const struct scmi_protocol *proto) +{ + int ret = -ENOMEM; + void *gid; + struct scmi_protocol_instance *pi; + const struct scmi_handle *handle = &info->handle; + + /* Protocol specific devres group */ + gid = devres_open_group(handle->dev, NULL, GFP_KERNEL); + if (!gid) { + scmi_protocol_put(proto->id); + goto out; + } + + pi = devm_kzalloc(handle->dev, sizeof(*pi), GFP_KERNEL); + if (!pi) + goto clean; + + pi->gid = gid; + pi->proto = proto; + pi->handle = handle; + pi->ph.dev = handle->dev; + pi->ph.xops = &xfer_ops; + pi->ph.hops = &helpers_ops; + pi->ph.set_priv = scmi_set_protocol_priv; + pi->ph.get_priv = scmi_get_protocol_priv; + refcount_set(&pi->users, 1); + /* proto->init is assured NON NULL by scmi_protocol_register */ + ret = pi->proto->instance_init(&pi->ph); + if (ret) + goto clean; + + ret = idr_alloc(&info->protocols, pi, proto->id, proto->id + 1, + GFP_KERNEL); + if (ret != proto->id) + goto clean; + + /* + * Warn but ignore events registration errors since we do not want + * to skip whole protocols if their notifications are messed up. + */ + if (pi->proto->events) { + ret = scmi_register_protocol_events(handle, pi->proto->id, + &pi->ph, + pi->proto->events); + if (ret) + dev_warn(handle->dev, + "Protocol:%X - Events Registration Failed - err:%d\n", + pi->proto->id, ret); + } + + devres_close_group(handle->dev, pi->gid); + dev_dbg(handle->dev, "Initialized protocol: 0x%X\n", pi->proto->id); + + return pi; + +clean: + /* Take care to put the protocol module's owner before releasing all */ + scmi_protocol_put(proto->id); + devres_release_group(handle->dev, gid); +out: + return ERR_PTR(ret); +} + +/** + * scmi_get_protocol_instance - Protocol initialization helper. + * @handle: A reference to the SCMI platform instance. + * @protocol_id: The protocol being requested. + * + * In case the required protocol has never been requested before for this + * instance, allocate and initialize all the needed structures while handling + * resource allocation with a dedicated per-protocol devres subgroup. + * + * Return: A reference to an initialized protocol instance or error on failure: + * in particular returns -EPROBE_DEFER when the desired protocol could + * NOT be found. + */ +static struct scmi_protocol_instance * __must_check +scmi_get_protocol_instance(const struct scmi_handle *handle, u8 protocol_id) +{ + struct scmi_protocol_instance *pi; + struct scmi_info *info = handle_to_scmi_info(handle); + + mutex_lock(&info->protocols_mtx); + pi = idr_find(&info->protocols, protocol_id); + + if (pi) { + refcount_inc(&pi->users); + } else { + const struct scmi_protocol *proto; + + /* Fails if protocol not registered on bus */ + proto = scmi_protocol_get(protocol_id); + if (proto) + pi = scmi_alloc_init_protocol_instance(info, proto); + else + pi = ERR_PTR(-EPROBE_DEFER); + } + mutex_unlock(&info->protocols_mtx); + + return pi; +} + +/** + * scmi_protocol_acquire - Protocol acquire + * @handle: A reference to the SCMI platform instance. + * @protocol_id: The protocol being requested. + * + * Register a new user for the requested protocol on the specified SCMI + * platform instance, possibly triggering its initialization on first user. + * + * Return: 0 if protocol was acquired successfully. + */ +int scmi_protocol_acquire(const struct scmi_handle *handle, u8 protocol_id) +{ + return PTR_ERR_OR_ZERO(scmi_get_protocol_instance(handle, protocol_id)); +} + +/** + * scmi_protocol_release - Protocol de-initialization helper. + * @handle: A reference to the SCMI platform instance. + * @protocol_id: The protocol being requested. + * + * Remove one user for the specified protocol and triggers de-initialization + * and resources de-allocation once the last user has gone. + */ +void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id) +{ + struct scmi_info *info = handle_to_scmi_info(handle); + struct scmi_protocol_instance *pi; + + mutex_lock(&info->protocols_mtx); + pi = idr_find(&info->protocols, protocol_id); + if (WARN_ON(!pi)) + goto out; + + if (refcount_dec_and_test(&pi->users)) { + void *gid = pi->gid; + + if (pi->proto->events) + scmi_deregister_protocol_events(handle, protocol_id); + + if (pi->proto->instance_deinit) + pi->proto->instance_deinit(&pi->ph); + + idr_remove(&info->protocols, protocol_id); + + scmi_protocol_put(protocol_id); + + devres_release_group(handle->dev, gid); + dev_dbg(handle->dev, "De-Initialized protocol: 0x%X\n", + protocol_id); + } + +out: + mutex_unlock(&info->protocols_mtx); +} + +void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph, + u8 *prot_imp) +{ + const struct scmi_protocol_instance *pi = ph_to_pi(ph); + struct scmi_info *info = handle_to_scmi_info(pi->handle); + + info->protocols_imp = prot_imp; +} + +static bool +scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id) +{ + int i; + struct scmi_info *info = handle_to_scmi_info(handle); + struct scmi_revision_info *rev = handle->version; + + if (!info->protocols_imp) + return false; + + for (i = 0; i < rev->num_protocols; i++) + if (info->protocols_imp[i] == prot_id) + return true; + return false; +} + +struct scmi_protocol_devres { + const struct scmi_handle *handle; + u8 protocol_id; +}; + +static void scmi_devm_release_protocol(struct device *dev, void *res) +{ + struct scmi_protocol_devres *dres = res; + + scmi_protocol_release(dres->handle, dres->protocol_id); +} + +static struct scmi_protocol_instance __must_check * +scmi_devres_protocol_instance_get(struct scmi_device *sdev, u8 protocol_id) +{ + struct scmi_protocol_instance *pi; + struct scmi_protocol_devres *dres; + + dres = devres_alloc(scmi_devm_release_protocol, + sizeof(*dres), GFP_KERNEL); + if (!dres) + return ERR_PTR(-ENOMEM); + + pi = scmi_get_protocol_instance(sdev->handle, protocol_id); + if (IS_ERR(pi)) { + devres_free(dres); + return pi; + } + + dres->handle = sdev->handle; + dres->protocol_id = protocol_id; + devres_add(&sdev->dev, dres); + + return pi; +} + +/** + * scmi_devm_protocol_get - Devres managed get protocol operations and handle + * @sdev: A reference to an scmi_device whose embedded struct device is to + * be used for devres accounting. + * @protocol_id: The protocol being requested. + * @ph: A pointer reference used to pass back the associated protocol handle. + * + * Get hold of a protocol accounting for its usage, eventually triggering its + * initialization, and returning the protocol specific operations and related + * protocol handle which will be used as first argument in most of the + * protocols operations methods. + * Being a devres based managed method, protocol hold will be automatically + * released, and possibly de-initialized on last user, once the SCMI driver + * owning the scmi_device is unbound from it. + * + * Return: A reference to the requested protocol operations or error. + * Must be checked for errors by caller. + */ +static const void __must_check * +scmi_devm_protocol_get(struct scmi_device *sdev, u8 protocol_id, + struct scmi_protocol_handle **ph) +{ + struct scmi_protocol_instance *pi; + + if (!ph) + return ERR_PTR(-EINVAL); + + pi = scmi_devres_protocol_instance_get(sdev, protocol_id); + if (IS_ERR(pi)) + return pi; + + *ph = &pi->ph; + + return pi->proto->ops; +} + +/** + * scmi_devm_protocol_acquire - Devres managed helper to get hold of a protocol + * @sdev: A reference to an scmi_device whose embedded struct device is to + * be used for devres accounting. + * @protocol_id: The protocol being requested. + * + * Get hold of a protocol accounting for its usage, possibly triggering its + * initialization but without getting access to its protocol specific operations + * and handle. + * + * Being a devres based managed method, protocol hold will be automatically + * released, and possibly de-initialized on last user, once the SCMI driver + * owning the scmi_device is unbound from it. + * + * Return: 0 on SUCCESS + */ +static int __must_check scmi_devm_protocol_acquire(struct scmi_device *sdev, + u8 protocol_id) +{ + struct scmi_protocol_instance *pi; + + pi = scmi_devres_protocol_instance_get(sdev, protocol_id); + if (IS_ERR(pi)) + return PTR_ERR(pi); + + return 0; +} + +static int scmi_devm_protocol_match(struct device *dev, void *res, void *data) +{ + struct scmi_protocol_devres *dres = res; + + if (WARN_ON(!dres || !data)) + return 0; + + return dres->protocol_id == *((u8 *)data); +} + +/** + * scmi_devm_protocol_put - Devres managed put protocol operations and handle + * @sdev: A reference to an scmi_device whose embedded struct device is to + * be used for devres accounting. + * @protocol_id: The protocol being requested. + * + * Explicitly release a protocol hold previously obtained calling the above + * @scmi_devm_protocol_get. + */ +static void scmi_devm_protocol_put(struct scmi_device *sdev, u8 protocol_id) +{ + int ret; + + ret = devres_release(&sdev->dev, scmi_devm_release_protocol, + scmi_devm_protocol_match, &protocol_id); + WARN_ON(ret); +} + +/** + * scmi_is_transport_atomic - Method to check if underlying transport for an + * SCMI instance is configured as atomic. + * + * @handle: A reference to the SCMI platform instance. + * @atomic_threshold: An optional return value for the system wide currently + * configured threshold for atomic operations. + * + * Return: True if transport is configured as atomic + */ +static bool scmi_is_transport_atomic(const struct scmi_handle *handle, + unsigned int *atomic_threshold) +{ + bool ret; + struct scmi_info *info = handle_to_scmi_info(handle); + + ret = info->desc->atomic_enabled && is_transport_polling_capable(info); + if (ret && atomic_threshold) + *atomic_threshold = info->atomic_threshold; + + return ret; +} + +static inline +struct scmi_handle *scmi_handle_get_from_info_unlocked(struct scmi_info *info) +{ + info->users++; + return &info->handle; +} + +/** + * scmi_handle_get() - Get the SCMI handle for a device + * + * @dev: pointer to device for which we want SCMI handle + * + * NOTE: The function does not track individual clients of the framework + * and is expected to be maintained by caller of SCMI protocol library. + * scmi_handle_put must be balanced with successful scmi_handle_get + * + * Return: pointer to handle if successful, NULL on error + */ +struct scmi_handle *scmi_handle_get(struct device *dev) +{ + struct list_head *p; + struct scmi_info *info; + struct scmi_handle *handle = NULL; + + mutex_lock(&scmi_list_mutex); + list_for_each(p, &scmi_list) { + info = list_entry(p, struct scmi_info, node); + if (dev->parent == info->dev) { + handle = scmi_handle_get_from_info_unlocked(info); + break; + } + } + mutex_unlock(&scmi_list_mutex); + + return handle; +} + +/** + * scmi_handle_put() - Release the handle acquired by scmi_handle_get + * + * @handle: handle acquired by scmi_handle_get + * + * NOTE: The function does not track individual clients of the framework + * and is expected to be maintained by caller of SCMI protocol library. + * scmi_handle_put must be balanced with successful scmi_handle_get + * + * Return: 0 is successfully released + * if null was passed, it returns -EINVAL; + */ +int scmi_handle_put(const struct scmi_handle *handle) +{ + struct scmi_info *info; + + if (!handle) + return -EINVAL; + + info = handle_to_scmi_info(handle); + mutex_lock(&scmi_list_mutex); + if (!WARN_ON(!info->users)) + info->users--; + mutex_unlock(&scmi_list_mutex); + + return 0; +} + +static int __scmi_xfer_info_init(struct scmi_info *sinfo, + struct scmi_xfers_info *info) +{ + int i; + struct scmi_xfer *xfer; + struct device *dev = sinfo->dev; + const struct scmi_desc *desc = sinfo->desc; + + /* Pre-allocated messages, no more than what hdr.seq can support */ + if (WARN_ON(!info->max_msg || info->max_msg > MSG_TOKEN_MAX)) { + dev_err(dev, + "Invalid maximum messages %d, not in range [1 - %lu]\n", + info->max_msg, MSG_TOKEN_MAX); + return -EINVAL; + } + + hash_init(info->pending_xfers); + + /* Allocate a bitmask sized to hold MSG_TOKEN_MAX tokens */ + info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(MSG_TOKEN_MAX), + sizeof(long), GFP_KERNEL); + if (!info->xfer_alloc_table) + return -ENOMEM; + + /* + * Preallocate a number of xfers equal to max inflight messages, + * pre-initialize the buffer pointer to pre-allocated buffers and + * attach all of them to the free list + */ + INIT_HLIST_HEAD(&info->free_xfers); + for (i = 0; i < info->max_msg; i++) { + xfer = devm_kzalloc(dev, sizeof(*xfer), GFP_KERNEL); + if (!xfer) + return -ENOMEM; + + xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size, + GFP_KERNEL); + if (!xfer->rx.buf) + return -ENOMEM; + + xfer->tx.buf = xfer->rx.buf; + init_completion(&xfer->done); + spin_lock_init(&xfer->lock); + + /* Add initialized xfer to the free list */ + hlist_add_head(&xfer->node, &info->free_xfers); + } + + spin_lock_init(&info->xfer_lock); + + return 0; +} + +static int scmi_channels_max_msg_configure(struct scmi_info *sinfo) +{ + const struct scmi_desc *desc = sinfo->desc; + + if (!desc->ops->get_max_msg) { + sinfo->tx_minfo.max_msg = desc->max_msg; + sinfo->rx_minfo.max_msg = desc->max_msg; + } else { + struct scmi_chan_info *base_cinfo; + + base_cinfo = idr_find(&sinfo->tx_idr, SCMI_PROTOCOL_BASE); + if (!base_cinfo) + return -EINVAL; + sinfo->tx_minfo.max_msg = desc->ops->get_max_msg(base_cinfo); + + /* RX channel is optional so can be skipped */ + base_cinfo = idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE); + if (base_cinfo) + sinfo->rx_minfo.max_msg = + desc->ops->get_max_msg(base_cinfo); + } + + return 0; +} + +static int scmi_xfer_info_init(struct scmi_info *sinfo) +{ + int ret; + + ret = scmi_channels_max_msg_configure(sinfo); + if (ret) + return ret; + + ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo); + if (!ret && !idr_is_empty(&sinfo->rx_idr)) + ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo); + + return ret; +} + +static int scmi_chan_setup(struct scmi_info *info, struct device *dev, + int prot_id, bool tx) +{ + int ret, idx; + struct scmi_chan_info *cinfo; + struct idr *idr; + + /* Transmit channel is first entry i.e. index 0 */ + idx = tx ? 0 : 1; + idr = tx ? &info->tx_idr : &info->rx_idr; + + /* check if already allocated, used for multiple device per protocol */ + cinfo = idr_find(idr, prot_id); + if (cinfo) + return 0; + + if (!info->desc->ops->chan_available(dev, idx)) { + cinfo = idr_find(idr, SCMI_PROTOCOL_BASE); + if (unlikely(!cinfo)) /* Possible only if platform has no Rx */ + return -EINVAL; + goto idr_alloc; + } + + cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL); + if (!cinfo) + return -ENOMEM; + + cinfo->dev = dev; + cinfo->rx_timeout_ms = info->desc->max_rx_timeout_ms; + + ret = info->desc->ops->chan_setup(cinfo, info->dev, tx); + if (ret) + return ret; + + if (tx && is_polling_required(cinfo, info)) { + if (is_transport_polling_capable(info)) + dev_info(dev, + "Enabled polling mode TX channel - prot_id:%d\n", + prot_id); + else + dev_warn(dev, + "Polling mode NOT supported by transport.\n"); + } + +idr_alloc: + ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL); + if (ret != prot_id) { + dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret); + return ret; + } + + cinfo->handle = &info->handle; + return 0; +} + +static inline int +scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id) +{ + int ret = scmi_chan_setup(info, dev, prot_id, true); + + if (!ret) { + /* Rx is optional, report only memory errors */ + ret = scmi_chan_setup(info, dev, prot_id, false); + if (ret && ret != -ENOMEM) + ret = 0; + } + + return ret; +} + +/** + * scmi_get_protocol_device - Helper to get/create an SCMI device. + * + * @np: A device node representing a valid active protocols for the referred + * SCMI instance. + * @info: The referred SCMI instance for which we are getting/creating this + * device. + * @prot_id: The protocol ID. + * @name: The device name. + * + * Referring to the specific SCMI instance identified by @info, this helper + * takes care to return a properly initialized device matching the requested + * @proto_id and @name: if device was still not existent it is created as a + * child of the specified SCMI instance @info and its transport properly + * initialized as usual. + * + * Return: A properly initialized scmi device, NULL otherwise. + */ +static inline struct scmi_device * +scmi_get_protocol_device(struct device_node *np, struct scmi_info *info, + int prot_id, const char *name) +{ + struct scmi_device *sdev; + + /* Already created for this parent SCMI instance ? */ + sdev = scmi_child_dev_find(info->dev, prot_id, name); + if (sdev) + return sdev; + + mutex_lock(&scmi_syspower_mtx); + if (prot_id == SCMI_PROTOCOL_SYSTEM && scmi_syspower_registered) { + dev_warn(info->dev, + "SCMI SystemPower protocol device must be unique !\n"); + mutex_unlock(&scmi_syspower_mtx); + + return NULL; + } + + pr_debug("Creating SCMI device (%s) for protocol %x\n", name, prot_id); + + sdev = scmi_device_create(np, info->dev, prot_id, name); + if (!sdev) { + dev_err(info->dev, "failed to create %d protocol device\n", + prot_id); + mutex_unlock(&scmi_syspower_mtx); + + return NULL; + } + + if (scmi_txrx_setup(info, &sdev->dev, prot_id)) { + dev_err(&sdev->dev, "failed to setup transport\n"); + scmi_device_destroy(sdev); + mutex_unlock(&scmi_syspower_mtx); + + return NULL; + } + + if (prot_id == SCMI_PROTOCOL_SYSTEM) + scmi_syspower_registered = true; + + mutex_unlock(&scmi_syspower_mtx); + + return sdev; +} + +static inline void +scmi_create_protocol_device(struct device_node *np, struct scmi_info *info, + int prot_id, const char *name) +{ + struct scmi_device *sdev; + + sdev = scmi_get_protocol_device(np, info, prot_id, name); + if (!sdev) + return; + + /* setup handle now as the transport is ready */ + scmi_set_handle(sdev); +} + +/** + * scmi_create_protocol_devices - Create devices for all pending requests for + * this SCMI instance. + * + * @np: The device node describing the protocol + * @info: The SCMI instance descriptor + * @prot_id: The protocol ID + * + * All devices previously requested for this instance (if any) are found and + * created by scanning the proper @&scmi_requested_devices entry. + */ +static void scmi_create_protocol_devices(struct device_node *np, + struct scmi_info *info, int prot_id) +{ + struct list_head *phead; + + mutex_lock(&scmi_requested_devices_mtx); + phead = idr_find(&scmi_requested_devices, prot_id); + if (phead) { + struct scmi_requested_dev *rdev; + + list_for_each_entry(rdev, phead, node) + scmi_create_protocol_device(np, info, prot_id, + rdev->id_table->name); + } + mutex_unlock(&scmi_requested_devices_mtx); +} + +/** + * scmi_protocol_device_request - Helper to request a device + * + * @id_table: A protocol/name pair descriptor for the device to be created. + * + * This helper let an SCMI driver request specific devices identified by the + * @id_table to be created for each active SCMI instance. + * + * The requested device name MUST NOT be already existent for any protocol; + * at first the freshly requested @id_table is annotated in the IDR table + * @scmi_requested_devices, then a matching device is created for each already + * active SCMI instance. (if any) + * + * This way the requested device is created straight-away for all the already + * initialized(probed) SCMI instances (handles) and it remains also annotated + * as pending creation if the requesting SCMI driver was loaded before some + * SCMI instance and related transports were available: when such late instance + * is probed, its probe will take care to scan the list of pending requested + * devices and create those on its own (see @scmi_create_protocol_devices and + * its enclosing loop) + * + * Return: 0 on Success + */ +int scmi_protocol_device_request(const struct scmi_device_id *id_table) +{ + int ret = 0; + unsigned int id = 0; + struct list_head *head, *phead = NULL; + struct scmi_requested_dev *rdev; + struct scmi_info *info; + + pr_debug("Requesting SCMI device (%s) for protocol %x\n", + id_table->name, id_table->protocol_id); + + /* + * Search for the matching protocol rdev list and then search + * of any existent equally named device...fails if any duplicate found. + */ + mutex_lock(&scmi_requested_devices_mtx); + idr_for_each_entry(&scmi_requested_devices, head, id) { + if (!phead) { + /* A list found registered in the IDR is never empty */ + rdev = list_first_entry(head, struct scmi_requested_dev, + node); + if (rdev->id_table->protocol_id == + id_table->protocol_id) + phead = head; + } + list_for_each_entry(rdev, head, node) { + if (!strcmp(rdev->id_table->name, id_table->name)) { + pr_err("Ignoring duplicate request [%d] %s\n", + rdev->id_table->protocol_id, + rdev->id_table->name); + ret = -EINVAL; + goto out; + } + } + } + + /* + * No duplicate found for requested id_table, so let's create a new + * requested device entry for this new valid request. + */ + rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); + if (!rdev) { + ret = -ENOMEM; + goto out; + } + rdev->id_table = id_table; + + /* + * Append the new requested device table descriptor to the head of the + * related protocol list, eventually creating such head if not already + * there. + */ + if (!phead) { + phead = kzalloc(sizeof(*phead), GFP_KERNEL); + if (!phead) { + kfree(rdev); + ret = -ENOMEM; + goto out; + } + INIT_LIST_HEAD(phead); + + ret = idr_alloc(&scmi_requested_devices, (void *)phead, + id_table->protocol_id, + id_table->protocol_id + 1, GFP_KERNEL); + if (ret != id_table->protocol_id) { + pr_err("Failed to save SCMI device - ret:%d\n", ret); + kfree(rdev); + kfree(phead); + ret = -EINVAL; + goto out; + } + ret = 0; + } + list_add(&rdev->node, phead); + + /* + * Now effectively create and initialize the requested device for every + * already initialized SCMI instance which has registered the requested + * protocol as a valid active one: i.e. defined in DT and supported by + * current platform FW. + */ + mutex_lock(&scmi_list_mutex); + list_for_each_entry(info, &scmi_list, node) { + struct device_node *child; + + child = idr_find(&info->active_protocols, + id_table->protocol_id); + if (child) { + struct scmi_device *sdev; + + sdev = scmi_get_protocol_device(child, info, + id_table->protocol_id, + id_table->name); + if (sdev) { + /* Set handle if not already set: device existed */ + if (!sdev->handle) + sdev->handle = + scmi_handle_get_from_info_unlocked(info); + /* Relink consumer and suppliers */ + if (sdev->handle) + scmi_device_link_add(&sdev->dev, + sdev->handle->dev); + } + } else { + dev_err(info->dev, + "Failed. SCMI protocol %d not active.\n", + id_table->protocol_id); + } + } + mutex_unlock(&scmi_list_mutex); + +out: + mutex_unlock(&scmi_requested_devices_mtx); + + return ret; +} + +/** + * scmi_protocol_device_unrequest - Helper to unrequest a device + * + * @id_table: A protocol/name pair descriptor for the device to be unrequested. + * + * An helper to let an SCMI driver release its request about devices; note that + * devices are created and initialized once the first SCMI driver request them + * but they destroyed only on SCMI core unloading/unbinding. + * + * The current SCMI transport layer uses such devices as internal references and + * as such they could be shared as same transport between multiple drivers so + * that cannot be safely destroyed till the whole SCMI stack is removed. + * (unless adding further burden of refcounting.) + */ +void scmi_protocol_device_unrequest(const struct scmi_device_id *id_table) +{ + struct list_head *phead; + + pr_debug("Unrequesting SCMI device (%s) for protocol %x\n", + id_table->name, id_table->protocol_id); + + mutex_lock(&scmi_requested_devices_mtx); + phead = idr_find(&scmi_requested_devices, id_table->protocol_id); + if (phead) { + struct scmi_requested_dev *victim, *tmp; + + list_for_each_entry_safe(victim, tmp, phead, node) { + if (!strcmp(victim->id_table->name, id_table->name)) { + list_del(&victim->node); + kfree(victim); + break; + } + } + + if (list_empty(phead)) { + idr_remove(&scmi_requested_devices, + id_table->protocol_id); + kfree(phead); + } + } + mutex_unlock(&scmi_requested_devices_mtx); +} + +static int scmi_cleanup_txrx_channels(struct scmi_info *info) +{ + int ret; + struct idr *idr = &info->tx_idr; + + ret = idr_for_each(idr, info->desc->ops->chan_free, idr); + idr_destroy(&info->tx_idr); + + idr = &info->rx_idr; + ret = idr_for_each(idr, info->desc->ops->chan_free, idr); + idr_destroy(&info->rx_idr); + + return ret; +} + +static int scmi_probe(struct platform_device *pdev) +{ + int ret; + struct scmi_handle *handle; + const struct scmi_desc *desc; + struct scmi_info *info; + struct device *dev = &pdev->dev; + struct device_node *child, *np = dev->of_node; + + desc = of_device_get_match_data(dev); + if (!desc) + return -EINVAL; + + info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + info->dev = dev; + info->desc = desc; + INIT_LIST_HEAD(&info->node); + idr_init(&info->protocols); + mutex_init(&info->protocols_mtx); + idr_init(&info->active_protocols); + + platform_set_drvdata(pdev, info); + idr_init(&info->tx_idr); + idr_init(&info->rx_idr); + + handle = &info->handle; + handle->dev = info->dev; + handle->version = &info->version; + handle->devm_protocol_acquire = scmi_devm_protocol_acquire; + handle->devm_protocol_get = scmi_devm_protocol_get; + handle->devm_protocol_put = scmi_devm_protocol_put; + + /* System wide atomic threshold for atomic ops .. if any */ + if (!of_property_read_u32(np, "atomic-threshold-us", + &info->atomic_threshold)) + dev_info(dev, + "SCMI System wide atomic threshold set to %d us\n", + info->atomic_threshold); + handle->is_transport_atomic = scmi_is_transport_atomic; + + if (desc->ops->link_supplier) { + ret = desc->ops->link_supplier(dev); + if (ret) + return ret; + } + + ret = scmi_txrx_setup(info, dev, SCMI_PROTOCOL_BASE); + if (ret) + return ret; + + ret = scmi_xfer_info_init(info); + if (ret) + goto clear_txrx_setup; + + if (scmi_notification_init(handle)) + dev_err(dev, "SCMI Notifications NOT available.\n"); + + if (info->desc->atomic_enabled && !is_transport_polling_capable(info)) + dev_err(dev, + "Transport is not polling capable. Atomic mode not supported.\n"); + + /* + * Trigger SCMI Base protocol initialization. + * It's mandatory and won't be ever released/deinit until the + * SCMI stack is shutdown/unloaded as a whole. + */ + ret = scmi_protocol_acquire(handle, SCMI_PROTOCOL_BASE); + if (ret) { + dev_err(dev, "unable to communicate with SCMI\n"); + goto notification_exit; + } + + mutex_lock(&scmi_list_mutex); + list_add_tail(&info->node, &scmi_list); + mutex_unlock(&scmi_list_mutex); + + for_each_available_child_of_node(np, child) { + u32 prot_id; + + if (of_property_read_u32(child, "reg", &prot_id)) + continue; + + if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id)) + dev_err(dev, "Out of range protocol %d\n", prot_id); + + if (!scmi_is_protocol_implemented(handle, prot_id)) { + dev_err(dev, "SCMI protocol %d not implemented\n", + prot_id); + continue; + } + + /* + * Save this valid DT protocol descriptor amongst + * @active_protocols for this SCMI instance/ + */ + ret = idr_alloc(&info->active_protocols, child, + prot_id, prot_id + 1, GFP_KERNEL); + if (ret != prot_id) { + dev_err(dev, "SCMI protocol %d already activated. Skip\n", + prot_id); + continue; + } + + of_node_get(child); + scmi_create_protocol_devices(child, info, prot_id); + } + + return 0; + +notification_exit: + scmi_notification_exit(&info->handle); +clear_txrx_setup: + scmi_cleanup_txrx_channels(info); + return ret; +} + +void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id) +{ + idr_remove(idr, id); +} + +static int scmi_remove(struct platform_device *pdev) +{ + int ret, id; + struct scmi_info *info = platform_get_drvdata(pdev); + struct device_node *child; + + mutex_lock(&scmi_list_mutex); + if (info->users) + dev_warn(&pdev->dev, + "Still active SCMI users will be forcibly unbound.\n"); + list_del(&info->node); + mutex_unlock(&scmi_list_mutex); + + scmi_notification_exit(&info->handle); + + mutex_lock(&info->protocols_mtx); + idr_destroy(&info->protocols); + mutex_unlock(&info->protocols_mtx); + + idr_for_each_entry(&info->active_protocols, child, id) + of_node_put(child); + idr_destroy(&info->active_protocols); + + /* Safe to free channels since no more users */ + ret = scmi_cleanup_txrx_channels(info); + if (ret) + dev_warn(&pdev->dev, "Failed to cleanup SCMI channels.\n"); + + return 0; +} + +static ssize_t protocol_version_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scmi_info *info = dev_get_drvdata(dev); + + return sprintf(buf, "%u.%u\n", info->version.major_ver, + info->version.minor_ver); +} +static DEVICE_ATTR_RO(protocol_version); + +static ssize_t firmware_version_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scmi_info *info = dev_get_drvdata(dev); + + return sprintf(buf, "0x%x\n", info->version.impl_ver); +} +static DEVICE_ATTR_RO(firmware_version); + +static ssize_t vendor_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scmi_info *info = dev_get_drvdata(dev); + + return sprintf(buf, "%s\n", info->version.vendor_id); +} +static DEVICE_ATTR_RO(vendor_id); + +static ssize_t sub_vendor_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scmi_info *info = dev_get_drvdata(dev); + + return sprintf(buf, "%s\n", info->version.sub_vendor_id); +} +static DEVICE_ATTR_RO(sub_vendor_id); + +static struct attribute *versions_attrs[] = { + &dev_attr_firmware_version.attr, + &dev_attr_protocol_version.attr, + &dev_attr_vendor_id.attr, + &dev_attr_sub_vendor_id.attr, + NULL, +}; +ATTRIBUTE_GROUPS(versions); + +/* Each compatible listed below must have descriptor associated with it */ +static const struct of_device_id scmi_of_match[] = { +#ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX + { .compatible = "arm,scmi", .data = &scmi_mailbox_desc }, +#endif +#ifdef CONFIG_ARM_SCMI_TRANSPORT_OPTEE + { .compatible = "linaro,scmi-optee", .data = &scmi_optee_desc }, +#endif +#ifdef CONFIG_ARM_SCMI_TRANSPORT_SMC + { .compatible = "arm,scmi-smc", .data = &scmi_smc_desc}, +#endif +#ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO + { .compatible = "arm,scmi-virtio", .data = &scmi_virtio_desc}, +#endif + { /* Sentinel */ }, +}; + +MODULE_DEVICE_TABLE(of, scmi_of_match); + +static struct platform_driver scmi_driver = { + .driver = { + .name = "arm-scmi", + .suppress_bind_attrs = true, + .of_match_table = scmi_of_match, + .dev_groups = versions_groups, + }, + .probe = scmi_probe, + .remove = scmi_remove, +}; + +/** + * __scmi_transports_setup - Common helper to call transport-specific + * .init/.exit code if provided. + * + * @init: A flag to distinguish between init and exit. + * + * Note that, if provided, we invoke .init/.exit functions for all the + * transports currently compiled in. + * + * Return: 0 on Success. + */ +static inline int __scmi_transports_setup(bool init) +{ + int ret = 0; + const struct of_device_id *trans; + + for (trans = scmi_of_match; trans->data; trans++) { + const struct scmi_desc *tdesc = trans->data; + + if ((init && !tdesc->transport_init) || + (!init && !tdesc->transport_exit)) + continue; + + if (init) + ret = tdesc->transport_init(); + else + tdesc->transport_exit(); + + if (ret) { + pr_err("SCMI transport %s FAILED initialization!\n", + trans->compatible); + break; + } + } + + return ret; +} + +static int __init scmi_transports_init(void) +{ + return __scmi_transports_setup(true); +} + +static void __exit scmi_transports_exit(void) +{ + __scmi_transports_setup(false); +} + +static int __init scmi_driver_init(void) +{ + int ret; + + /* Bail out if no SCMI transport was configured */ + if (WARN_ON(!IS_ENABLED(CONFIG_ARM_SCMI_HAVE_TRANSPORT))) + return -EINVAL; + + scmi_bus_init(); + + /* Initialize any compiled-in transport which provided an init/exit */ + ret = scmi_transports_init(); + if (ret) + return ret; + + scmi_base_register(); + + scmi_clock_register(); + scmi_perf_register(); + scmi_power_register(); + scmi_reset_register(); + scmi_sensors_register(); + scmi_voltage_register(); + scmi_system_register(); + scmi_powercap_register(); + + return platform_driver_register(&scmi_driver); +} +subsys_initcall(scmi_driver_init); + +static void __exit scmi_driver_exit(void) +{ + scmi_base_unregister(); + + scmi_clock_unregister(); + scmi_perf_unregister(); + scmi_power_unregister(); + scmi_reset_unregister(); + scmi_sensors_unregister(); + scmi_voltage_unregister(); + scmi_system_unregister(); + scmi_powercap_unregister(); + + scmi_bus_exit(); + + scmi_transports_exit(); + + platform_driver_unregister(&scmi_driver); +} +module_exit(scmi_driver_exit); + +MODULE_ALIAS("platform:arm-scmi"); +MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>"); +MODULE_DESCRIPTION("ARM SCMI protocol driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c new file mode 100644 index 000000000..81c902672 --- /dev/null +++ b/drivers/firmware/arm_scmi/mailbox.c @@ -0,0 +1,258 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System Control and Management Interface (SCMI) Message Mailbox Transport + * driver. + * + * Copyright (C) 2019 ARM Ltd. + */ + +#include <linux/err.h> +#include <linux/device.h> +#include <linux/mailbox_client.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/slab.h> + +#include "common.h" + +/** + * struct scmi_mailbox - Structure representing a SCMI mailbox transport + * + * @cl: Mailbox Client + * @chan: Transmit/Receive mailbox channel + * @cinfo: SCMI channel info + * @shmem: Transmit/Receive shared memory area + */ +struct scmi_mailbox { + struct mbox_client cl; + struct mbox_chan *chan; + struct scmi_chan_info *cinfo; + struct scmi_shared_mem __iomem *shmem; +}; + +#define client_to_scmi_mailbox(c) container_of(c, struct scmi_mailbox, cl) + +static void tx_prepare(struct mbox_client *cl, void *m) +{ + struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl); + + shmem_tx_prepare(smbox->shmem, m, smbox->cinfo); +} + +static void rx_callback(struct mbox_client *cl, void *m) +{ + struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl); + + /* + * An A2P IRQ is NOT valid when received while the platform still has + * the ownership of the channel, because the platform at first releases + * the SMT channel and then sends the completion interrupt. + * + * This addresses a possible race condition in which a spurious IRQ from + * a previous timed-out reply which arrived late could be wrongly + * associated with the next pending transaction. + */ + if (cl->knows_txdone && !shmem_channel_free(smbox->shmem)) { + dev_warn(smbox->cinfo->dev, "Ignoring spurious A2P IRQ !\n"); + return; + } + + scmi_rx_callback(smbox->cinfo, shmem_read_header(smbox->shmem), NULL); +} + +static bool mailbox_chan_available(struct device *dev, int idx) +{ + return !of_parse_phandle_with_args(dev->of_node, "mboxes", + "#mbox-cells", idx, NULL); +} + +static int mailbox_chan_validate(struct device *cdev) +{ + int num_mb, num_sh, ret = 0; + struct device_node *np = cdev->of_node; + + num_mb = of_count_phandle_with_args(np, "mboxes", "#mbox-cells"); + num_sh = of_count_phandle_with_args(np, "shmem", NULL); + /* Bail out if mboxes and shmem descriptors are inconsistent */ + if (num_mb <= 0 || num_sh > 2 || num_mb != num_sh) { + dev_warn(cdev, "Invalid channel descriptor for '%s'\n", + of_node_full_name(np)); + return -EINVAL; + } + + if (num_sh > 1) { + struct device_node *np_tx, *np_rx; + + np_tx = of_parse_phandle(np, "shmem", 0); + np_rx = of_parse_phandle(np, "shmem", 1); + /* SCMI Tx and Rx shared mem areas have to be distinct */ + if (!np_tx || !np_rx || np_tx == np_rx) { + dev_warn(cdev, "Invalid shmem descriptor for '%s'\n", + of_node_full_name(np)); + ret = -EINVAL; + } + + of_node_put(np_tx); + of_node_put(np_rx); + } + + return ret; +} + +static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, + bool tx) +{ + const char *desc = tx ? "Tx" : "Rx"; + struct device *cdev = cinfo->dev; + struct scmi_mailbox *smbox; + struct device_node *shmem; + int ret, idx = tx ? 0 : 1; + struct mbox_client *cl; + resource_size_t size; + struct resource res; + + ret = mailbox_chan_validate(cdev); + if (ret) + return ret; + + smbox = devm_kzalloc(dev, sizeof(*smbox), GFP_KERNEL); + if (!smbox) + return -ENOMEM; + + shmem = of_parse_phandle(cdev->of_node, "shmem", idx); + if (!of_device_is_compatible(shmem, "arm,scmi-shmem")) { + of_node_put(shmem); + return -ENXIO; + } + + ret = of_address_to_resource(shmem, 0, &res); + of_node_put(shmem); + if (ret) { + dev_err(cdev, "failed to get SCMI %s shared memory\n", desc); + return ret; + } + + size = resource_size(&res); + smbox->shmem = devm_ioremap(dev, res.start, size); + if (!smbox->shmem) { + dev_err(dev, "failed to ioremap SCMI %s shared memory\n", desc); + return -EADDRNOTAVAIL; + } + + cl = &smbox->cl; + cl->dev = cdev; + cl->tx_prepare = tx ? tx_prepare : NULL; + cl->rx_callback = rx_callback; + cl->tx_block = false; + cl->knows_txdone = tx; + + smbox->chan = mbox_request_channel(cl, tx ? 0 : 1); + if (IS_ERR(smbox->chan)) { + ret = PTR_ERR(smbox->chan); + if (ret != -EPROBE_DEFER) + dev_err(cdev, "failed to request SCMI %s mailbox\n", + tx ? "Tx" : "Rx"); + return ret; + } + + cinfo->transport_info = smbox; + smbox->cinfo = cinfo; + + return 0; +} + +static int mailbox_chan_free(int id, void *p, void *data) +{ + struct scmi_chan_info *cinfo = p; + struct scmi_mailbox *smbox = cinfo->transport_info; + + if (smbox && !IS_ERR(smbox->chan)) { + mbox_free_channel(smbox->chan); + cinfo->transport_info = NULL; + smbox->chan = NULL; + smbox->cinfo = NULL; + } + + scmi_free_channel(cinfo, data, id); + + return 0; +} + +static int mailbox_send_message(struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer) +{ + struct scmi_mailbox *smbox = cinfo->transport_info; + int ret; + + ret = mbox_send_message(smbox->chan, xfer); + + /* mbox_send_message returns non-negative value on success, so reset */ + if (ret > 0) + ret = 0; + + return ret; +} + +static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret, + struct scmi_xfer *__unused) +{ + struct scmi_mailbox *smbox = cinfo->transport_info; + + /* + * NOTE: we might prefer not to need the mailbox ticker to manage the + * transfer queueing since the protocol layer queues things by itself. + * Unfortunately, we have to kick the mailbox framework after we have + * received our message. + */ + mbox_client_txdone(smbox->chan, ret); +} + +static void mailbox_fetch_response(struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer) +{ + struct scmi_mailbox *smbox = cinfo->transport_info; + + shmem_fetch_response(smbox->shmem, xfer); +} + +static void mailbox_fetch_notification(struct scmi_chan_info *cinfo, + size_t max_len, struct scmi_xfer *xfer) +{ + struct scmi_mailbox *smbox = cinfo->transport_info; + + shmem_fetch_notification(smbox->shmem, max_len, xfer); +} + +static void mailbox_clear_channel(struct scmi_chan_info *cinfo) +{ + struct scmi_mailbox *smbox = cinfo->transport_info; + + shmem_clear_channel(smbox->shmem); +} + +static bool +mailbox_poll_done(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer) +{ + struct scmi_mailbox *smbox = cinfo->transport_info; + + return shmem_poll_done(smbox->shmem, xfer); +} + +static const struct scmi_transport_ops scmi_mailbox_ops = { + .chan_available = mailbox_chan_available, + .chan_setup = mailbox_chan_setup, + .chan_free = mailbox_chan_free, + .send_message = mailbox_send_message, + .mark_txdone = mailbox_mark_txdone, + .fetch_response = mailbox_fetch_response, + .fetch_notification = mailbox_fetch_notification, + .clear_channel = mailbox_clear_channel, + .poll_done = mailbox_poll_done, +}; + +const struct scmi_desc scmi_mailbox_desc = { + .ops = &scmi_mailbox_ops, + .max_rx_timeout_ms = 30, /* We may increase this if required */ + .max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */ + .max_msg_size = 128, +}; diff --git a/drivers/firmware/arm_scmi/msg.c b/drivers/firmware/arm_scmi/msg.c new file mode 100644 index 000000000..d33a704e5 --- /dev/null +++ b/drivers/firmware/arm_scmi/msg.c @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * For transports using message passing. + * + * Derived from shm.c. + * + * Copyright (C) 2019-2021 ARM Ltd. + * Copyright (C) 2020-2021 OpenSynergy GmbH + */ + +#include <linux/types.h> + +#include "common.h" + +/* + * struct scmi_msg_payld - Transport SDU layout + * + * The SCMI specification requires all parameters, message headers, return + * arguments or any protocol data to be expressed in little endian format only. + */ +struct scmi_msg_payld { + __le32 msg_header; + __le32 msg_payload[]; +}; + +/** + * msg_command_size() - Actual size of transport SDU for command. + * + * @xfer: message which core has prepared for sending + * + * Return: transport SDU size. + */ +size_t msg_command_size(struct scmi_xfer *xfer) +{ + return sizeof(struct scmi_msg_payld) + xfer->tx.len; +} + +/** + * msg_response_size() - Maximum size of transport SDU for response. + * + * @xfer: message which core has prepared for sending + * + * Return: transport SDU size. + */ +size_t msg_response_size(struct scmi_xfer *xfer) +{ + return sizeof(struct scmi_msg_payld) + sizeof(__le32) + xfer->rx.len; +} + +/** + * msg_tx_prepare() - Set up transport SDU for command. + * + * @msg: transport SDU for command + * @xfer: message which is being sent + */ +void msg_tx_prepare(struct scmi_msg_payld *msg, struct scmi_xfer *xfer) +{ + msg->msg_header = cpu_to_le32(pack_scmi_header(&xfer->hdr)); + if (xfer->tx.buf) + memcpy(msg->msg_payload, xfer->tx.buf, xfer->tx.len); +} + +/** + * msg_read_header() - Read SCMI header from transport SDU. + * + * @msg: transport SDU + * + * Return: SCMI header + */ +u32 msg_read_header(struct scmi_msg_payld *msg) +{ + return le32_to_cpu(msg->msg_header); +} + +/** + * msg_fetch_response() - Fetch response SCMI payload from transport SDU. + * + * @msg: transport SDU with response + * @len: transport SDU size + * @xfer: message being responded to + */ +void msg_fetch_response(struct scmi_msg_payld *msg, size_t len, + struct scmi_xfer *xfer) +{ + size_t prefix_len = sizeof(*msg) + sizeof(msg->msg_payload[0]); + + xfer->hdr.status = le32_to_cpu(msg->msg_payload[0]); + xfer->rx.len = min_t(size_t, xfer->rx.len, + len >= prefix_len ? len - prefix_len : 0); + + /* Take a copy to the rx buffer.. */ + memcpy(xfer->rx.buf, &msg->msg_payload[1], xfer->rx.len); +} + +/** + * msg_fetch_notification() - Fetch notification payload from transport SDU. + * + * @msg: transport SDU with notification + * @len: transport SDU size + * @max_len: maximum SCMI payload size to fetch + * @xfer: notification message + */ +void msg_fetch_notification(struct scmi_msg_payld *msg, size_t len, + size_t max_len, struct scmi_xfer *xfer) +{ + xfer->rx.len = min_t(size_t, max_len, + len >= sizeof(*msg) ? len - sizeof(*msg) : 0); + + /* Take a copy to the rx buffer.. */ + memcpy(xfer->rx.buf, msg->msg_payload, xfer->rx.len); +} diff --git a/drivers/firmware/arm_scmi/notify.c b/drivers/firmware/arm_scmi/notify.c new file mode 100644 index 000000000..0efd20cd9 --- /dev/null +++ b/drivers/firmware/arm_scmi/notify.c @@ -0,0 +1,1712 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System Control and Management Interface (SCMI) Notification support + * + * Copyright (C) 2020-2021 ARM Ltd. + */ +/** + * DOC: Theory of operation + * + * SCMI Protocol specification allows the platform to signal events to + * interested agents via notification messages: this is an implementation + * of the dispatch and delivery of such notifications to the interested users + * inside the Linux kernel. + * + * An SCMI Notification core instance is initialized for each active platform + * instance identified by the means of the usual &struct scmi_handle. + * + * Each SCMI Protocol implementation, during its initialization, registers with + * this core its set of supported events using scmi_register_protocol_events(): + * all the needed descriptors are stored in the &struct registered_protocols and + * &struct registered_events arrays. + * + * Kernel users interested in some specific event can register their callbacks + * providing the usual notifier_block descriptor, since this core implements + * events' delivery using the standard Kernel notification chains machinery. + * + * Given the number of possible events defined by SCMI and the extensibility + * of the SCMI Protocol itself, the underlying notification chains are created + * and destroyed dynamically on demand depending on the number of users + * effectively registered for an event, so that no support structures or chains + * are allocated until at least one user has registered a notifier_block for + * such event. Similarly, events' generation itself is enabled at the platform + * level only after at least one user has registered, and it is shutdown after + * the last user for that event has gone. + * + * All users provided callbacks and allocated notification-chains are stored in + * the @registered_events_handlers hashtable. Callbacks' registration requests + * for still to be registered events are instead kept in the dedicated common + * hashtable @pending_events_handlers. + * + * An event is identified univocally by the tuple (proto_id, evt_id, src_id) + * and is served by its own dedicated notification chain; information contained + * in such tuples is used, in a few different ways, to generate the needed + * hash-keys. + * + * Here proto_id and evt_id are simply the protocol_id and message_id numbers + * as described in the SCMI Protocol specification, while src_id represents an + * optional, protocol dependent, source identifier (like domain_id, perf_id + * or sensor_id and so forth). + * + * Upon reception of a notification message from the platform the SCMI RX ISR + * passes the received message payload and some ancillary information (including + * an arrival timestamp in nanoseconds) to the core via @scmi_notify() which + * pushes the event-data itself on a protocol-dedicated kfifo queue for further + * deferred processing as specified in @scmi_events_dispatcher(). + * + * Each protocol has it own dedicated work_struct and worker which, once kicked + * by the ISR, takes care to empty its own dedicated queue, deliverying the + * queued items into the proper notification-chain: notifications processing can + * proceed concurrently on distinct workers only between events belonging to + * different protocols while delivery of events within the same protocol is + * still strictly sequentially ordered by time of arrival. + * + * Events' information is then extracted from the SCMI Notification messages and + * conveyed, converted into a custom per-event report struct, as the void *data + * param to the user callback provided by the registered notifier_block, so that + * from the user perspective his callback will look invoked like: + * + * int user_cb(struct notifier_block *nb, unsigned long event_id, void *report) + * + */ + +#define dev_fmt(fmt) "SCMI Notifications - " fmt +#define pr_fmt(fmt) "SCMI Notifications - " fmt + +#include <linux/bitfield.h> +#include <linux/bug.h> +#include <linux/compiler.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/hashtable.h> +#include <linux/kernel.h> +#include <linux/ktime.h> +#include <linux/kfifo.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/notifier.h> +#include <linux/refcount.h> +#include <linux/scmi_protocol.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/workqueue.h> + +#include "common.h" +#include "notify.h" + +#define SCMI_MAX_PROTO 256 + +#define PROTO_ID_MASK GENMASK(31, 24) +#define EVT_ID_MASK GENMASK(23, 16) +#define SRC_ID_MASK GENMASK(15, 0) + +/* + * Builds an unsigned 32bit key from the given input tuple to be used + * as a key in hashtables. + */ +#define MAKE_HASH_KEY(p, e, s) \ + (FIELD_PREP(PROTO_ID_MASK, (p)) | \ + FIELD_PREP(EVT_ID_MASK, (e)) | \ + FIELD_PREP(SRC_ID_MASK, (s))) + +#define MAKE_ALL_SRCS_KEY(p, e) MAKE_HASH_KEY((p), (e), SRC_ID_MASK) + +/* + * Assumes that the stored obj includes its own hash-key in a field named 'key': + * with this simplification this macro can be equally used for all the objects' + * types hashed by this implementation. + * + * @__ht: The hashtable name + * @__obj: A pointer to the object type to be retrieved from the hashtable; + * it will be used as a cursor while scanning the hastable and it will + * be possibly left as NULL when @__k is not found + * @__k: The key to search for + */ +#define KEY_FIND(__ht, __obj, __k) \ +({ \ + typeof(__k) k_ = __k; \ + typeof(__obj) obj_; \ + \ + hash_for_each_possible((__ht), obj_, hash, k_) \ + if (obj_->key == k_) \ + break; \ + __obj = obj_; \ +}) + +#define KEY_XTRACT_PROTO_ID(key) FIELD_GET(PROTO_ID_MASK, (key)) +#define KEY_XTRACT_EVT_ID(key) FIELD_GET(EVT_ID_MASK, (key)) +#define KEY_XTRACT_SRC_ID(key) FIELD_GET(SRC_ID_MASK, (key)) + +/* + * A set of macros used to access safely @registered_protocols and + * @registered_events arrays; these are fixed in size and each entry is possibly + * populated at protocols' registration time and then only read but NEVER + * modified or removed. + */ +#define SCMI_GET_PROTO(__ni, __pid) \ +({ \ + typeof(__ni) ni_ = __ni; \ + struct scmi_registered_events_desc *__pd = NULL; \ + \ + if (ni_) \ + __pd = READ_ONCE(ni_->registered_protocols[(__pid)]); \ + __pd; \ +}) + +#define SCMI_GET_REVT_FROM_PD(__pd, __eid) \ +({ \ + typeof(__pd) pd_ = __pd; \ + typeof(__eid) eid_ = __eid; \ + struct scmi_registered_event *__revt = NULL; \ + \ + if (pd_ && eid_ < pd_->num_events) \ + __revt = READ_ONCE(pd_->registered_events[eid_]); \ + __revt; \ +}) + +#define SCMI_GET_REVT(__ni, __pid, __eid) \ +({ \ + struct scmi_registered_event *__revt; \ + struct scmi_registered_events_desc *__pd; \ + \ + __pd = SCMI_GET_PROTO((__ni), (__pid)); \ + __revt = SCMI_GET_REVT_FROM_PD(__pd, (__eid)); \ + __revt; \ +}) + +/* A couple of utility macros to limit cruft when calling protocols' helpers */ +#define REVT_NOTIFY_SET_STATUS(revt, eid, sid, state) \ +({ \ + typeof(revt) r = revt; \ + r->proto->ops->set_notify_enabled(r->proto->ph, \ + (eid), (sid), (state)); \ +}) + +#define REVT_NOTIFY_ENABLE(revt, eid, sid) \ + REVT_NOTIFY_SET_STATUS((revt), (eid), (sid), true) + +#define REVT_NOTIFY_DISABLE(revt, eid, sid) \ + REVT_NOTIFY_SET_STATUS((revt), (eid), (sid), false) + +#define REVT_FILL_REPORT(revt, ...) \ +({ \ + typeof(revt) r = revt; \ + r->proto->ops->fill_custom_report(r->proto->ph, \ + __VA_ARGS__); \ +}) + +#define SCMI_PENDING_HASH_SZ 4 +#define SCMI_REGISTERED_HASH_SZ 6 + +struct scmi_registered_events_desc; + +/** + * struct scmi_notify_instance - Represents an instance of the notification + * core + * @gid: GroupID used for devres + * @handle: A reference to the platform instance + * @init_work: A work item to perform final initializations of pending handlers + * @notify_wq: A reference to the allocated Kernel cmwq + * @pending_mtx: A mutex to protect @pending_events_handlers + * @registered_protocols: A statically allocated array containing pointers to + * all the registered protocol-level specific information + * related to events' handling + * @pending_events_handlers: An hashtable containing all pending events' + * handlers descriptors + * + * Each platform instance, represented by a handle, has its own instance of + * the notification subsystem represented by this structure. + */ +struct scmi_notify_instance { + void *gid; + struct scmi_handle *handle; + struct work_struct init_work; + struct workqueue_struct *notify_wq; + /* lock to protect pending_events_handlers */ + struct mutex pending_mtx; + struct scmi_registered_events_desc **registered_protocols; + DECLARE_HASHTABLE(pending_events_handlers, SCMI_PENDING_HASH_SZ); +}; + +/** + * struct events_queue - Describes a queue and its associated worker + * @sz: Size in bytes of the related kfifo + * @kfifo: A dedicated Kernel kfifo descriptor + * @notify_work: A custom work item bound to this queue + * @wq: A reference to the associated workqueue + * + * Each protocol has its own dedicated events_queue descriptor. + */ +struct events_queue { + size_t sz; + struct kfifo kfifo; + struct work_struct notify_work; + struct workqueue_struct *wq; +}; + +/** + * struct scmi_event_header - A utility header + * @timestamp: The timestamp, in nanoseconds (boottime), which was associated + * to this event as soon as it entered the SCMI RX ISR + * @payld_sz: Effective size of the embedded message payload which follows + * @evt_id: Event ID (corresponds to the Event MsgID for this Protocol) + * @payld: A reference to the embedded event payload + * + * This header is prepended to each received event message payload before + * queueing it on the related &struct events_queue. + */ +struct scmi_event_header { + ktime_t timestamp; + size_t payld_sz; + unsigned char evt_id; + unsigned char payld[]; +}; + +struct scmi_registered_event; + +/** + * struct scmi_registered_events_desc - Protocol Specific information + * @id: Protocol ID + * @ops: Protocol specific and event-related operations + * @equeue: The embedded per-protocol events_queue + * @ni: A reference to the initialized instance descriptor + * @eh: A reference to pre-allocated buffer to be used as a scratch area by the + * deferred worker when fetching data from the kfifo + * @eh_sz: Size of the pre-allocated buffer @eh + * @in_flight: A reference to an in flight &struct scmi_registered_event + * @num_events: Number of events in @registered_events + * @registered_events: A dynamically allocated array holding all the registered + * events' descriptors, whose fixed-size is determined at + * compile time. + * @registered_mtx: A mutex to protect @registered_events_handlers + * @ph: SCMI protocol handle reference + * @registered_events_handlers: An hashtable containing all events' handlers + * descriptors registered for this protocol + * + * All protocols that register at least one event have their protocol-specific + * information stored here, together with the embedded allocated events_queue. + * These descriptors are stored in the @registered_protocols array at protocol + * registration time. + * + * Once these descriptors are successfully registered, they are NEVER again + * removed or modified since protocols do not unregister ever, so that, once + * we safely grab a NON-NULL reference from the array we can keep it and use it. + */ +struct scmi_registered_events_desc { + u8 id; + const struct scmi_event_ops *ops; + struct events_queue equeue; + struct scmi_notify_instance *ni; + struct scmi_event_header *eh; + size_t eh_sz; + void *in_flight; + int num_events; + struct scmi_registered_event **registered_events; + /* mutex to protect registered_events_handlers */ + struct mutex registered_mtx; + const struct scmi_protocol_handle *ph; + DECLARE_HASHTABLE(registered_events_handlers, SCMI_REGISTERED_HASH_SZ); +}; + +/** + * struct scmi_registered_event - Event Specific Information + * @proto: A reference to the associated protocol descriptor + * @evt: A reference to the associated event descriptor (as provided at + * registration time) + * @report: A pre-allocated buffer used by the deferred worker to fill a + * customized event report + * @num_sources: The number of possible sources for this event as stated at + * events' registration time + * @sources: A reference to a dynamically allocated array used to refcount the + * events' enable requests for all the existing sources + * @sources_mtx: A mutex to serialize the access to @sources + * + * All registered events are represented by one of these structures that are + * stored in the @registered_events array at protocol registration time. + * + * Once these descriptors are successfully registered, they are NEVER again + * removed or modified since protocols do not unregister ever, so that once we + * safely grab a NON-NULL reference from the table we can keep it and use it. + */ +struct scmi_registered_event { + struct scmi_registered_events_desc *proto; + const struct scmi_event *evt; + void *report; + u32 num_sources; + refcount_t *sources; + /* locking to serialize the access to sources */ + struct mutex sources_mtx; +}; + +/** + * struct scmi_event_handler - Event handler information + * @key: The used hashkey + * @users: A reference count for number of active users for this handler + * @r_evt: A reference to the associated registered event; when this is NULL + * this handler is pending, which means that identifies a set of + * callbacks intended to be attached to an event which is still not + * known nor registered by any protocol at that point in time + * @chain: The notification chain dedicated to this specific event tuple + * @hash: The hlist_node used for collision handling + * @enabled: A boolean which records if event's generation has been already + * enabled for this handler as a whole + * + * This structure collects all the information needed to process a received + * event identified by the tuple (proto_id, evt_id, src_id). + * These descriptors are stored in a per-protocol @registered_events_handlers + * table using as a key a value derived from that tuple. + */ +struct scmi_event_handler { + u32 key; + refcount_t users; + struct scmi_registered_event *r_evt; + struct blocking_notifier_head chain; + struct hlist_node hash; + bool enabled; +}; + +#define IS_HNDL_PENDING(hndl) (!(hndl)->r_evt) + +static struct scmi_event_handler * +scmi_get_active_handler(struct scmi_notify_instance *ni, u32 evt_key); +static void scmi_put_active_handler(struct scmi_notify_instance *ni, + struct scmi_event_handler *hndl); +static bool scmi_put_handler_unlocked(struct scmi_notify_instance *ni, + struct scmi_event_handler *hndl); + +/** + * scmi_lookup_and_call_event_chain() - Lookup the proper chain and call it + * @ni: A reference to the notification instance to use + * @evt_key: The key to use to lookup the related notification chain + * @report: The customized event-specific report to pass down to the callbacks + * as their *data parameter. + */ +static inline void +scmi_lookup_and_call_event_chain(struct scmi_notify_instance *ni, + u32 evt_key, void *report) +{ + int ret; + struct scmi_event_handler *hndl; + + /* + * Here ensure the event handler cannot vanish while using it. + * It is legitimate, though, for an handler not to be found at all here, + * e.g. when it has been unregistered by the user after some events had + * already been queued. + */ + hndl = scmi_get_active_handler(ni, evt_key); + if (!hndl) + return; + + ret = blocking_notifier_call_chain(&hndl->chain, + KEY_XTRACT_EVT_ID(evt_key), + report); + /* Notifiers are NOT supposed to cut the chain ... */ + WARN_ON_ONCE(ret & NOTIFY_STOP_MASK); + + scmi_put_active_handler(ni, hndl); +} + +/** + * scmi_process_event_header() - Dequeue and process an event header + * @eq: The queue to use + * @pd: The protocol descriptor to use + * + * Read an event header from the protocol queue into the dedicated scratch + * buffer and looks for a matching registered event; in case an anomalously + * sized read is detected just flush the queue. + * + * Return: + * * a reference to the matching registered event when found + * * ERR_PTR(-EINVAL) when NO registered event could be found + * * NULL when the queue is empty + */ +static inline struct scmi_registered_event * +scmi_process_event_header(struct events_queue *eq, + struct scmi_registered_events_desc *pd) +{ + unsigned int outs; + struct scmi_registered_event *r_evt; + + outs = kfifo_out(&eq->kfifo, pd->eh, + sizeof(struct scmi_event_header)); + if (!outs) + return NULL; + if (outs != sizeof(struct scmi_event_header)) { + dev_err(pd->ni->handle->dev, "corrupted EVT header. Flush.\n"); + kfifo_reset_out(&eq->kfifo); + return NULL; + } + + r_evt = SCMI_GET_REVT_FROM_PD(pd, pd->eh->evt_id); + if (!r_evt) + r_evt = ERR_PTR(-EINVAL); + + return r_evt; +} + +/** + * scmi_process_event_payload() - Dequeue and process an event payload + * @eq: The queue to use + * @pd: The protocol descriptor to use + * @r_evt: The registered event descriptor to use + * + * Read an event payload from the protocol queue into the dedicated scratch + * buffer, fills a custom report and then look for matching event handlers and + * call them; skip any unknown event (as marked by scmi_process_event_header()) + * and in case an anomalously sized read is detected just flush the queue. + * + * Return: False when the queue is empty + */ +static inline bool +scmi_process_event_payload(struct events_queue *eq, + struct scmi_registered_events_desc *pd, + struct scmi_registered_event *r_evt) +{ + u32 src_id, key; + unsigned int outs; + void *report = NULL; + + outs = kfifo_out(&eq->kfifo, pd->eh->payld, pd->eh->payld_sz); + if (!outs) + return false; + + /* Any in-flight event has now been officially processed */ + pd->in_flight = NULL; + + if (outs != pd->eh->payld_sz) { + dev_err(pd->ni->handle->dev, "corrupted EVT Payload. Flush.\n"); + kfifo_reset_out(&eq->kfifo); + return false; + } + + if (IS_ERR(r_evt)) { + dev_warn(pd->ni->handle->dev, + "SKIP UNKNOWN EVT - proto:%X evt:%d\n", + pd->id, pd->eh->evt_id); + return true; + } + + report = REVT_FILL_REPORT(r_evt, pd->eh->evt_id, pd->eh->timestamp, + pd->eh->payld, pd->eh->payld_sz, + r_evt->report, &src_id); + if (!report) { + dev_err(pd->ni->handle->dev, + "report not available - proto:%X evt:%d\n", + pd->id, pd->eh->evt_id); + return true; + } + + /* At first search for a generic ALL src_ids handler... */ + key = MAKE_ALL_SRCS_KEY(pd->id, pd->eh->evt_id); + scmi_lookup_and_call_event_chain(pd->ni, key, report); + + /* ...then search for any specific src_id */ + key = MAKE_HASH_KEY(pd->id, pd->eh->evt_id, src_id); + scmi_lookup_and_call_event_chain(pd->ni, key, report); + + return true; +} + +/** + * scmi_events_dispatcher() - Common worker logic for all work items. + * @work: The work item to use, which is associated to a dedicated events_queue + * + * Logic: + * 1. dequeue one pending RX notification (queued in SCMI RX ISR context) + * 2. generate a custom event report from the received event message + * 3. lookup for any registered ALL_SRC_IDs handler: + * - > call the related notification chain passing in the report + * 4. lookup for any registered specific SRC_ID handler: + * - > call the related notification chain passing in the report + * + * Note that: + * * a dedicated per-protocol kfifo queue is used: in this way an anomalous + * flood of events cannot saturate other protocols' queues. + * * each per-protocol queue is associated to a distinct work_item, which + * means, in turn, that: + * + all protocols can process their dedicated queues concurrently + * (since notify_wq:max_active != 1) + * + anyway at most one worker instance is allowed to run on the same queue + * concurrently: this ensures that we can have only one concurrent + * reader/writer on the associated kfifo, so that we can use it lock-less + * + * Context: Process context. + */ +static void scmi_events_dispatcher(struct work_struct *work) +{ + struct events_queue *eq; + struct scmi_registered_events_desc *pd; + struct scmi_registered_event *r_evt; + + eq = container_of(work, struct events_queue, notify_work); + pd = container_of(eq, struct scmi_registered_events_desc, equeue); + /* + * In order to keep the queue lock-less and the number of memcopies + * to the bare minimum needed, the dispatcher accounts for the + * possibility of per-protocol in-flight events: i.e. an event whose + * reception could end up being split across two subsequent runs of this + * worker, first the header, then the payload. + */ + do { + if (!pd->in_flight) { + r_evt = scmi_process_event_header(eq, pd); + if (!r_evt) + break; + pd->in_flight = r_evt; + } else { + r_evt = pd->in_flight; + } + } while (scmi_process_event_payload(eq, pd, r_evt)); +} + +/** + * scmi_notify() - Queues a notification for further deferred processing + * @handle: The handle identifying the platform instance from which the + * dispatched event is generated + * @proto_id: Protocol ID + * @evt_id: Event ID (msgID) + * @buf: Event Message Payload (without the header) + * @len: Event Message Payload size + * @ts: RX Timestamp in nanoseconds (boottime) + * + * Context: Called in interrupt context to queue a received event for + * deferred processing. + * + * Return: 0 on Success + */ +int scmi_notify(const struct scmi_handle *handle, u8 proto_id, u8 evt_id, + const void *buf, size_t len, ktime_t ts) +{ + struct scmi_registered_event *r_evt; + struct scmi_event_header eh; + struct scmi_notify_instance *ni; + + ni = scmi_notification_instance_data_get(handle); + if (!ni) + return 0; + + r_evt = SCMI_GET_REVT(ni, proto_id, evt_id); + if (!r_evt) + return -EINVAL; + + if (len > r_evt->evt->max_payld_sz) { + dev_err(handle->dev, "discard badly sized message\n"); + return -EINVAL; + } + if (kfifo_avail(&r_evt->proto->equeue.kfifo) < sizeof(eh) + len) { + dev_warn(handle->dev, + "queue full, dropping proto_id:%d evt_id:%d ts:%lld\n", + proto_id, evt_id, ktime_to_ns(ts)); + return -ENOMEM; + } + + eh.timestamp = ts; + eh.evt_id = evt_id; + eh.payld_sz = len; + /* + * Header and payload are enqueued with two distinct kfifo_in() (so non + * atomic), but this situation is handled properly on the consumer side + * with in-flight events tracking. + */ + kfifo_in(&r_evt->proto->equeue.kfifo, &eh, sizeof(eh)); + kfifo_in(&r_evt->proto->equeue.kfifo, buf, len); + /* + * Don't care about return value here since we just want to ensure that + * a work is queued all the times whenever some items have been pushed + * on the kfifo: + * - if work was already queued it will simply fail to queue a new one + * since it is not needed + * - if work was not queued already it will be now, even in case work + * was in fact already running: this behavior avoids any possible race + * when this function pushes new items onto the kfifos after the + * related executing worker had already determined the kfifo to be + * empty and it was terminating. + */ + queue_work(r_evt->proto->equeue.wq, + &r_evt->proto->equeue.notify_work); + + return 0; +} + +/** + * scmi_kfifo_free() - Devres action helper to free the kfifo + * @kfifo: The kfifo to free + */ +static void scmi_kfifo_free(void *kfifo) +{ + kfifo_free((struct kfifo *)kfifo); +} + +/** + * scmi_initialize_events_queue() - Allocate/Initialize a kfifo buffer + * @ni: A reference to the notification instance to use + * @equeue: The events_queue to initialize + * @sz: Size of the kfifo buffer to allocate + * + * Allocate a buffer for the kfifo and initialize it. + * + * Return: 0 on Success + */ +static int scmi_initialize_events_queue(struct scmi_notify_instance *ni, + struct events_queue *equeue, size_t sz) +{ + int ret; + + if (kfifo_alloc(&equeue->kfifo, sz, GFP_KERNEL)) + return -ENOMEM; + /* Size could have been roundup to power-of-two */ + equeue->sz = kfifo_size(&equeue->kfifo); + + ret = devm_add_action_or_reset(ni->handle->dev, scmi_kfifo_free, + &equeue->kfifo); + if (ret) + return ret; + + INIT_WORK(&equeue->notify_work, scmi_events_dispatcher); + equeue->wq = ni->notify_wq; + + return ret; +} + +/** + * scmi_allocate_registered_events_desc() - Allocate a registered events' + * descriptor + * @ni: A reference to the &struct scmi_notify_instance notification instance + * to use + * @proto_id: Protocol ID + * @queue_sz: Size of the associated queue to allocate + * @eh_sz: Size of the event header scratch area to pre-allocate + * @num_events: Number of events to support (size of @registered_events) + * @ops: Pointer to a struct holding references to protocol specific helpers + * needed during events handling + * + * It is supposed to be called only once for each protocol at protocol + * initialization time, so it warns if the requested protocol is found already + * registered. + * + * Return: The allocated and registered descriptor on Success + */ +static struct scmi_registered_events_desc * +scmi_allocate_registered_events_desc(struct scmi_notify_instance *ni, + u8 proto_id, size_t queue_sz, size_t eh_sz, + int num_events, + const struct scmi_event_ops *ops) +{ + int ret; + struct scmi_registered_events_desc *pd; + + /* Ensure protocols are up to date */ + smp_rmb(); + if (WARN_ON(ni->registered_protocols[proto_id])) + return ERR_PTR(-EINVAL); + + pd = devm_kzalloc(ni->handle->dev, sizeof(*pd), GFP_KERNEL); + if (!pd) + return ERR_PTR(-ENOMEM); + pd->id = proto_id; + pd->ops = ops; + pd->ni = ni; + + ret = scmi_initialize_events_queue(ni, &pd->equeue, queue_sz); + if (ret) + return ERR_PTR(ret); + + pd->eh = devm_kzalloc(ni->handle->dev, eh_sz, GFP_KERNEL); + if (!pd->eh) + return ERR_PTR(-ENOMEM); + pd->eh_sz = eh_sz; + + pd->registered_events = devm_kcalloc(ni->handle->dev, num_events, + sizeof(char *), GFP_KERNEL); + if (!pd->registered_events) + return ERR_PTR(-ENOMEM); + pd->num_events = num_events; + + /* Initialize per protocol handlers table */ + mutex_init(&pd->registered_mtx); + hash_init(pd->registered_events_handlers); + + return pd; +} + +/** + * scmi_register_protocol_events() - Register Protocol Events with the core + * @handle: The handle identifying the platform instance against which the + * protocol's events are registered + * @proto_id: Protocol ID + * @ph: SCMI protocol handle. + * @ee: A structure describing the events supported by this protocol. + * + * Used by SCMI Protocols initialization code to register with the notification + * core the list of supported events and their descriptors: takes care to + * pre-allocate and store all needed descriptors, scratch buffers and event + * queues. + * + * Return: 0 on Success + */ +int scmi_register_protocol_events(const struct scmi_handle *handle, u8 proto_id, + const struct scmi_protocol_handle *ph, + const struct scmi_protocol_events *ee) +{ + int i; + unsigned int num_sources; + size_t payld_sz = 0; + struct scmi_registered_events_desc *pd; + struct scmi_notify_instance *ni; + const struct scmi_event *evt; + + if (!ee || !ee->ops || !ee->evts || !ph || + (!ee->num_sources && !ee->ops->get_num_sources)) + return -EINVAL; + + ni = scmi_notification_instance_data_get(handle); + if (!ni) + return -ENOMEM; + + /* num_sources cannot be <= 0 */ + if (ee->num_sources) { + num_sources = ee->num_sources; + } else { + int nsrc = ee->ops->get_num_sources(ph); + + if (nsrc <= 0) + return -EINVAL; + num_sources = nsrc; + } + + evt = ee->evts; + for (i = 0; i < ee->num_events; i++) + payld_sz = max_t(size_t, payld_sz, evt[i].max_payld_sz); + payld_sz += sizeof(struct scmi_event_header); + + pd = scmi_allocate_registered_events_desc(ni, proto_id, ee->queue_sz, + payld_sz, ee->num_events, + ee->ops); + if (IS_ERR(pd)) + return PTR_ERR(pd); + + pd->ph = ph; + for (i = 0; i < ee->num_events; i++, evt++) { + struct scmi_registered_event *r_evt; + + r_evt = devm_kzalloc(ni->handle->dev, sizeof(*r_evt), + GFP_KERNEL); + if (!r_evt) + return -ENOMEM; + r_evt->proto = pd; + r_evt->evt = evt; + + r_evt->sources = devm_kcalloc(ni->handle->dev, num_sources, + sizeof(refcount_t), GFP_KERNEL); + if (!r_evt->sources) + return -ENOMEM; + r_evt->num_sources = num_sources; + mutex_init(&r_evt->sources_mtx); + + r_evt->report = devm_kzalloc(ni->handle->dev, + evt->max_report_sz, GFP_KERNEL); + if (!r_evt->report) + return -ENOMEM; + + pd->registered_events[i] = r_evt; + /* Ensure events are updated */ + smp_wmb(); + dev_dbg(handle->dev, "registered event - %lX\n", + MAKE_ALL_SRCS_KEY(r_evt->proto->id, r_evt->evt->id)); + } + + /* Register protocol and events...it will never be removed */ + ni->registered_protocols[proto_id] = pd; + /* Ensure protocols are updated */ + smp_wmb(); + + /* + * Finalize any pending events' handler which could have been waiting + * for this protocol's events registration. + */ + schedule_work(&ni->init_work); + + return 0; +} + +/** + * scmi_deregister_protocol_events - Deregister protocol events with the core + * @handle: The handle identifying the platform instance against which the + * protocol's events are registered + * @proto_id: Protocol ID + */ +void scmi_deregister_protocol_events(const struct scmi_handle *handle, + u8 proto_id) +{ + struct scmi_notify_instance *ni; + struct scmi_registered_events_desc *pd; + + ni = scmi_notification_instance_data_get(handle); + if (!ni) + return; + + pd = ni->registered_protocols[proto_id]; + if (!pd) + return; + + ni->registered_protocols[proto_id] = NULL; + /* Ensure protocols are updated */ + smp_wmb(); + + cancel_work_sync(&pd->equeue.notify_work); +} + +/** + * scmi_allocate_event_handler() - Allocate Event handler + * @ni: A reference to the notification instance to use + * @evt_key: 32bit key uniquely bind to the event identified by the tuple + * (proto_id, evt_id, src_id) + * + * Allocate an event handler and related notification chain associated with + * the provided event handler key. + * Note that, at this point, a related registered_event is still to be + * associated to this handler descriptor (hndl->r_evt == NULL), so the handler + * is initialized as pending. + * + * Context: Assumes to be called with @pending_mtx already acquired. + * Return: the freshly allocated structure on Success + */ +static struct scmi_event_handler * +scmi_allocate_event_handler(struct scmi_notify_instance *ni, u32 evt_key) +{ + struct scmi_event_handler *hndl; + + hndl = kzalloc(sizeof(*hndl), GFP_KERNEL); + if (!hndl) + return NULL; + hndl->key = evt_key; + BLOCKING_INIT_NOTIFIER_HEAD(&hndl->chain); + refcount_set(&hndl->users, 1); + /* New handlers are created pending */ + hash_add(ni->pending_events_handlers, &hndl->hash, hndl->key); + + return hndl; +} + +/** + * scmi_free_event_handler() - Free the provided Event handler + * @hndl: The event handler structure to free + * + * Context: Assumes to be called with proper locking acquired depending + * on the situation. + */ +static void scmi_free_event_handler(struct scmi_event_handler *hndl) +{ + hash_del(&hndl->hash); + kfree(hndl); +} + +/** + * scmi_bind_event_handler() - Helper to attempt binding an handler to an event + * @ni: A reference to the notification instance to use + * @hndl: The event handler to bind + * + * If an associated registered event is found, move the handler from the pending + * into the registered table. + * + * Context: Assumes to be called with @pending_mtx already acquired. + * + * Return: 0 on Success + */ +static inline int scmi_bind_event_handler(struct scmi_notify_instance *ni, + struct scmi_event_handler *hndl) +{ + struct scmi_registered_event *r_evt; + + r_evt = SCMI_GET_REVT(ni, KEY_XTRACT_PROTO_ID(hndl->key), + KEY_XTRACT_EVT_ID(hndl->key)); + if (!r_evt) + return -EINVAL; + + /* + * Remove from pending and insert into registered while getting hold + * of protocol instance. + */ + hash_del(&hndl->hash); + /* + * Acquire protocols only for NON pending handlers, so as NOT to trigger + * protocol initialization when a notifier is registered against a still + * not registered protocol, since it would make little sense to force init + * protocols for which still no SCMI driver user exists: they wouldn't + * emit any event anyway till some SCMI driver starts using it. + */ + scmi_protocol_acquire(ni->handle, KEY_XTRACT_PROTO_ID(hndl->key)); + hndl->r_evt = r_evt; + + mutex_lock(&r_evt->proto->registered_mtx); + hash_add(r_evt->proto->registered_events_handlers, + &hndl->hash, hndl->key); + mutex_unlock(&r_evt->proto->registered_mtx); + + return 0; +} + +/** + * scmi_valid_pending_handler() - Helper to check pending status of handlers + * @ni: A reference to the notification instance to use + * @hndl: The event handler to check + * + * An handler is considered pending when its r_evt == NULL, because the related + * event was still unknown at handler's registration time; anyway, since all + * protocols register their supported events once for all at protocols' + * initialization time, a pending handler cannot be considered valid anymore if + * the underlying event (which it is waiting for), belongs to an already + * initialized and registered protocol. + * + * Return: 0 on Success + */ +static inline int scmi_valid_pending_handler(struct scmi_notify_instance *ni, + struct scmi_event_handler *hndl) +{ + struct scmi_registered_events_desc *pd; + + if (!IS_HNDL_PENDING(hndl)) + return -EINVAL; + + pd = SCMI_GET_PROTO(ni, KEY_XTRACT_PROTO_ID(hndl->key)); + if (pd) + return -EINVAL; + + return 0; +} + +/** + * scmi_register_event_handler() - Register whenever possible an Event handler + * @ni: A reference to the notification instance to use + * @hndl: The event handler to register + * + * At first try to bind an event handler to its associated event, then check if + * it was at least a valid pending handler: if it was not bound nor valid return + * false. + * + * Valid pending incomplete bindings will be periodically retried by a dedicated + * worker which is kicked each time a new protocol completes its own + * registration phase. + * + * Context: Assumes to be called with @pending_mtx acquired. + * + * Return: 0 on Success + */ +static int scmi_register_event_handler(struct scmi_notify_instance *ni, + struct scmi_event_handler *hndl) +{ + int ret; + + ret = scmi_bind_event_handler(ni, hndl); + if (!ret) { + dev_dbg(ni->handle->dev, "registered NEW handler - key:%X\n", + hndl->key); + } else { + ret = scmi_valid_pending_handler(ni, hndl); + if (!ret) + dev_dbg(ni->handle->dev, + "registered PENDING handler - key:%X\n", + hndl->key); + } + + return ret; +} + +/** + * __scmi_event_handler_get_ops() - Utility to get or create an event handler + * @ni: A reference to the notification instance to use + * @evt_key: The event key to use + * @create: A boolean flag to specify if a handler must be created when + * not already existent + * + * Search for the desired handler matching the key in both the per-protocol + * registered table and the common pending table: + * * if found adjust users refcount + * * if not found and @create is true, create and register the new handler: + * handler could end up being registered as pending if no matching event + * could be found. + * + * An handler is guaranteed to reside in one and only one of the tables at + * any one time; to ensure this the whole search and create is performed + * holding the @pending_mtx lock, with @registered_mtx additionally acquired + * if needed. + * + * Note that when a nested acquisition of these mutexes is needed the locking + * order is always (same as in @init_work): + * 1. pending_mtx + * 2. registered_mtx + * + * Events generation is NOT enabled right after creation within this routine + * since at creation time we usually want to have all setup and ready before + * events really start flowing. + * + * Return: A properly refcounted handler on Success, NULL on Failure + */ +static inline struct scmi_event_handler * +__scmi_event_handler_get_ops(struct scmi_notify_instance *ni, + u32 evt_key, bool create) +{ + struct scmi_registered_event *r_evt; + struct scmi_event_handler *hndl = NULL; + + r_evt = SCMI_GET_REVT(ni, KEY_XTRACT_PROTO_ID(evt_key), + KEY_XTRACT_EVT_ID(evt_key)); + + mutex_lock(&ni->pending_mtx); + /* Search registered events at first ... if possible at all */ + if (r_evt) { + mutex_lock(&r_evt->proto->registered_mtx); + hndl = KEY_FIND(r_evt->proto->registered_events_handlers, + hndl, evt_key); + if (hndl) + refcount_inc(&hndl->users); + mutex_unlock(&r_evt->proto->registered_mtx); + } + + /* ...then amongst pending. */ + if (!hndl) { + hndl = KEY_FIND(ni->pending_events_handlers, hndl, evt_key); + if (hndl) + refcount_inc(&hndl->users); + } + + /* Create if still not found and required */ + if (!hndl && create) { + hndl = scmi_allocate_event_handler(ni, evt_key); + if (hndl && scmi_register_event_handler(ni, hndl)) { + dev_dbg(ni->handle->dev, + "purging UNKNOWN handler - key:%X\n", + hndl->key); + /* this hndl can be only a pending one */ + scmi_put_handler_unlocked(ni, hndl); + hndl = NULL; + } + } + mutex_unlock(&ni->pending_mtx); + + return hndl; +} + +static struct scmi_event_handler * +scmi_get_handler(struct scmi_notify_instance *ni, u32 evt_key) +{ + return __scmi_event_handler_get_ops(ni, evt_key, false); +} + +static struct scmi_event_handler * +scmi_get_or_create_handler(struct scmi_notify_instance *ni, u32 evt_key) +{ + return __scmi_event_handler_get_ops(ni, evt_key, true); +} + +/** + * scmi_get_active_handler() - Helper to get active handlers only + * @ni: A reference to the notification instance to use + * @evt_key: The event key to use + * + * Search for the desired handler matching the key only in the per-protocol + * table of registered handlers: this is called only from the dispatching path + * so want to be as quick as possible and do not care about pending. + * + * Return: A properly refcounted active handler + */ +static struct scmi_event_handler * +scmi_get_active_handler(struct scmi_notify_instance *ni, u32 evt_key) +{ + struct scmi_registered_event *r_evt; + struct scmi_event_handler *hndl = NULL; + + r_evt = SCMI_GET_REVT(ni, KEY_XTRACT_PROTO_ID(evt_key), + KEY_XTRACT_EVT_ID(evt_key)); + if (r_evt) { + mutex_lock(&r_evt->proto->registered_mtx); + hndl = KEY_FIND(r_evt->proto->registered_events_handlers, + hndl, evt_key); + if (hndl) + refcount_inc(&hndl->users); + mutex_unlock(&r_evt->proto->registered_mtx); + } + + return hndl; +} + +/** + * __scmi_enable_evt() - Enable/disable events generation + * @r_evt: The registered event to act upon + * @src_id: The src_id to act upon + * @enable: The action to perform: true->Enable, false->Disable + * + * Takes care of proper refcounting while performing enable/disable: handles + * the special case of ALL sources requests by itself. + * Returns successfully if at least one of the required src_id has been + * successfully enabled/disabled. + * + * Return: 0 on Success + */ +static inline int __scmi_enable_evt(struct scmi_registered_event *r_evt, + u32 src_id, bool enable) +{ + int retvals = 0; + u32 num_sources; + refcount_t *sid; + + if (src_id == SRC_ID_MASK) { + src_id = 0; + num_sources = r_evt->num_sources; + } else if (src_id < r_evt->num_sources) { + num_sources = 1; + } else { + return -EINVAL; + } + + mutex_lock(&r_evt->sources_mtx); + if (enable) { + for (; num_sources; src_id++, num_sources--) { + int ret = 0; + + sid = &r_evt->sources[src_id]; + if (refcount_read(sid) == 0) { + ret = REVT_NOTIFY_ENABLE(r_evt, r_evt->evt->id, + src_id); + if (!ret) + refcount_set(sid, 1); + } else { + refcount_inc(sid); + } + retvals += !ret; + } + } else { + for (; num_sources; src_id++, num_sources--) { + sid = &r_evt->sources[src_id]; + if (refcount_dec_and_test(sid)) + REVT_NOTIFY_DISABLE(r_evt, + r_evt->evt->id, src_id); + } + retvals = 1; + } + mutex_unlock(&r_evt->sources_mtx); + + return retvals ? 0 : -EINVAL; +} + +static int scmi_enable_events(struct scmi_event_handler *hndl) +{ + int ret = 0; + + if (!hndl->enabled) { + ret = __scmi_enable_evt(hndl->r_evt, + KEY_XTRACT_SRC_ID(hndl->key), true); + if (!ret) + hndl->enabled = true; + } + + return ret; +} + +static int scmi_disable_events(struct scmi_event_handler *hndl) +{ + int ret = 0; + + if (hndl->enabled) { + ret = __scmi_enable_evt(hndl->r_evt, + KEY_XTRACT_SRC_ID(hndl->key), false); + if (!ret) + hndl->enabled = false; + } + + return ret; +} + +/** + * scmi_put_handler_unlocked() - Put an event handler + * @ni: A reference to the notification instance to use + * @hndl: The event handler to act upon + * + * After having got exclusive access to the registered handlers hashtable, + * update the refcount and if @hndl is no more in use by anyone: + * * ask for events' generation disabling + * * unregister and free the handler itself + * + * Context: Assumes all the proper locking has been managed by the caller. + * + * Return: True if handler was freed (users dropped to zero) + */ +static bool scmi_put_handler_unlocked(struct scmi_notify_instance *ni, + struct scmi_event_handler *hndl) +{ + bool freed = false; + + if (refcount_dec_and_test(&hndl->users)) { + if (!IS_HNDL_PENDING(hndl)) + scmi_disable_events(hndl); + scmi_free_event_handler(hndl); + freed = true; + } + + return freed; +} + +static void scmi_put_handler(struct scmi_notify_instance *ni, + struct scmi_event_handler *hndl) +{ + bool freed; + u8 protocol_id; + struct scmi_registered_event *r_evt = hndl->r_evt; + + mutex_lock(&ni->pending_mtx); + if (r_evt) { + protocol_id = r_evt->proto->id; + mutex_lock(&r_evt->proto->registered_mtx); + } + + freed = scmi_put_handler_unlocked(ni, hndl); + + if (r_evt) { + mutex_unlock(&r_evt->proto->registered_mtx); + /* + * Only registered handler acquired protocol; must be here + * released only AFTER unlocking registered_mtx, since + * releasing a protocol can trigger its de-initialization + * (ie. including r_evt and registered_mtx) + */ + if (freed) + scmi_protocol_release(ni->handle, protocol_id); + } + mutex_unlock(&ni->pending_mtx); +} + +static void scmi_put_active_handler(struct scmi_notify_instance *ni, + struct scmi_event_handler *hndl) +{ + bool freed; + struct scmi_registered_event *r_evt = hndl->r_evt; + u8 protocol_id = r_evt->proto->id; + + mutex_lock(&r_evt->proto->registered_mtx); + freed = scmi_put_handler_unlocked(ni, hndl); + mutex_unlock(&r_evt->proto->registered_mtx); + if (freed) + scmi_protocol_release(ni->handle, protocol_id); +} + +/** + * scmi_event_handler_enable_events() - Enable events associated to an handler + * @hndl: The Event handler to act upon + * + * Return: 0 on Success + */ +static int scmi_event_handler_enable_events(struct scmi_event_handler *hndl) +{ + if (scmi_enable_events(hndl)) { + pr_err("Failed to ENABLE events for key:%X !\n", hndl->key); + return -EINVAL; + } + + return 0; +} + +/** + * scmi_notifier_register() - Register a notifier_block for an event + * @handle: The handle identifying the platform instance against which the + * callback is registered + * @proto_id: Protocol ID + * @evt_id: Event ID + * @src_id: Source ID, when NULL register for events coming form ALL possible + * sources + * @nb: A standard notifier block to register for the specified event + * + * Generic helper to register a notifier_block against a protocol event. + * + * A notifier_block @nb will be registered for each distinct event identified + * by the tuple (proto_id, evt_id, src_id) on a dedicated notification chain + * so that: + * + * (proto_X, evt_Y, src_Z) --> chain_X_Y_Z + * + * @src_id meaning is protocol specific and identifies the origin of the event + * (like domain_id, sensor_id and so forth). + * + * @src_id can be NULL to signify that the caller is interested in receiving + * notifications from ALL the available sources for that protocol OR simply that + * the protocol does not support distinct sources. + * + * As soon as one user for the specified tuple appears, an handler is created, + * and that specific event's generation is enabled at the platform level, unless + * an associated registered event is found missing, meaning that the needed + * protocol is still to be initialized and the handler has just been registered + * as still pending. + * + * Return: 0 on Success + */ +static int scmi_notifier_register(const struct scmi_handle *handle, + u8 proto_id, u8 evt_id, const u32 *src_id, + struct notifier_block *nb) +{ + int ret = 0; + u32 evt_key; + struct scmi_event_handler *hndl; + struct scmi_notify_instance *ni; + + ni = scmi_notification_instance_data_get(handle); + if (!ni) + return -ENODEV; + + evt_key = MAKE_HASH_KEY(proto_id, evt_id, + src_id ? *src_id : SRC_ID_MASK); + hndl = scmi_get_or_create_handler(ni, evt_key); + if (!hndl) + return -EINVAL; + + blocking_notifier_chain_register(&hndl->chain, nb); + + /* Enable events for not pending handlers */ + if (!IS_HNDL_PENDING(hndl)) { + ret = scmi_event_handler_enable_events(hndl); + if (ret) + scmi_put_handler(ni, hndl); + } + + return ret; +} + +/** + * scmi_notifier_unregister() - Unregister a notifier_block for an event + * @handle: The handle identifying the platform instance against which the + * callback is unregistered + * @proto_id: Protocol ID + * @evt_id: Event ID + * @src_id: Source ID + * @nb: The notifier_block to unregister + * + * Takes care to unregister the provided @nb from the notification chain + * associated to the specified event and, if there are no more users for the + * event handler, frees also the associated event handler structures. + * (this could possibly cause disabling of event's generation at platform level) + * + * Return: 0 on Success + */ +static int scmi_notifier_unregister(const struct scmi_handle *handle, + u8 proto_id, u8 evt_id, const u32 *src_id, + struct notifier_block *nb) +{ + u32 evt_key; + struct scmi_event_handler *hndl; + struct scmi_notify_instance *ni; + + ni = scmi_notification_instance_data_get(handle); + if (!ni) + return -ENODEV; + + evt_key = MAKE_HASH_KEY(proto_id, evt_id, + src_id ? *src_id : SRC_ID_MASK); + hndl = scmi_get_handler(ni, evt_key); + if (!hndl) + return -EINVAL; + + /* + * Note that this chain unregistration call is safe on its own + * being internally protected by an rwsem. + */ + blocking_notifier_chain_unregister(&hndl->chain, nb); + scmi_put_handler(ni, hndl); + + /* + * This balances the initial get issued in @scmi_notifier_register. + * If this notifier_block happened to be the last known user callback + * for this event, the handler is here freed and the event's generation + * stopped. + * + * Note that, an ongoing concurrent lookup on the delivery workqueue + * path could still hold the refcount to 1 even after this routine + * completes: in such a case it will be the final put on the delivery + * path which will finally free this unused handler. + */ + scmi_put_handler(ni, hndl); + + return 0; +} + +struct scmi_notifier_devres { + const struct scmi_handle *handle; + u8 proto_id; + u8 evt_id; + u32 __src_id; + u32 *src_id; + struct notifier_block *nb; +}; + +static void scmi_devm_release_notifier(struct device *dev, void *res) +{ + struct scmi_notifier_devres *dres = res; + + scmi_notifier_unregister(dres->handle, dres->proto_id, dres->evt_id, + dres->src_id, dres->nb); +} + +/** + * scmi_devm_notifier_register() - Managed registration of a notifier_block + * for an event + * @sdev: A reference to an scmi_device whose embedded struct device is to + * be used for devres accounting. + * @proto_id: Protocol ID + * @evt_id: Event ID + * @src_id: Source ID, when NULL register for events coming form ALL possible + * sources + * @nb: A standard notifier block to register for the specified event + * + * Generic devres managed helper to register a notifier_block against a + * protocol event. + * + * Return: 0 on Success + */ +static int scmi_devm_notifier_register(struct scmi_device *sdev, + u8 proto_id, u8 evt_id, + const u32 *src_id, + struct notifier_block *nb) +{ + int ret; + struct scmi_notifier_devres *dres; + + dres = devres_alloc(scmi_devm_release_notifier, + sizeof(*dres), GFP_KERNEL); + if (!dres) + return -ENOMEM; + + ret = scmi_notifier_register(sdev->handle, proto_id, + evt_id, src_id, nb); + if (ret) { + devres_free(dres); + return ret; + } + + dres->handle = sdev->handle; + dres->proto_id = proto_id; + dres->evt_id = evt_id; + dres->nb = nb; + if (src_id) { + dres->__src_id = *src_id; + dres->src_id = &dres->__src_id; + } else { + dres->src_id = NULL; + } + devres_add(&sdev->dev, dres); + + return ret; +} + +static int scmi_devm_notifier_match(struct device *dev, void *res, void *data) +{ + struct scmi_notifier_devres *dres = res; + struct scmi_notifier_devres *xres = data; + + if (WARN_ON(!dres || !xres)) + return 0; + + return dres->proto_id == xres->proto_id && + dres->evt_id == xres->evt_id && + dres->nb == xres->nb && + ((!dres->src_id && !xres->src_id) || + (dres->src_id && xres->src_id && + dres->__src_id == xres->__src_id)); +} + +/** + * scmi_devm_notifier_unregister() - Managed un-registration of a + * notifier_block for an event + * @sdev: A reference to an scmi_device whose embedded struct device is to + * be used for devres accounting. + * @proto_id: Protocol ID + * @evt_id: Event ID + * @src_id: Source ID, when NULL register for events coming form ALL possible + * sources + * @nb: A standard notifier block to register for the specified event + * + * Generic devres managed helper to explicitly un-register a notifier_block + * against a protocol event, which was previously registered using the above + * @scmi_devm_notifier_register. + * + * Return: 0 on Success + */ +static int scmi_devm_notifier_unregister(struct scmi_device *sdev, + u8 proto_id, u8 evt_id, + const u32 *src_id, + struct notifier_block *nb) +{ + int ret; + struct scmi_notifier_devres dres; + + dres.handle = sdev->handle; + dres.proto_id = proto_id; + dres.evt_id = evt_id; + if (src_id) { + dres.__src_id = *src_id; + dres.src_id = &dres.__src_id; + } else { + dres.src_id = NULL; + } + + ret = devres_release(&sdev->dev, scmi_devm_release_notifier, + scmi_devm_notifier_match, &dres); + + WARN_ON(ret); + + return ret; +} + +/** + * scmi_protocols_late_init() - Worker for late initialization + * @work: The work item to use associated to the proper SCMI instance + * + * This kicks in whenever a new protocol has completed its own registration via + * scmi_register_protocol_events(): it is in charge of scanning the table of + * pending handlers (registered by users while the related protocol was still + * not initialized) and finalizing their initialization whenever possible; + * invalid pending handlers are purged at this point in time. + */ +static void scmi_protocols_late_init(struct work_struct *work) +{ + int bkt; + struct scmi_event_handler *hndl; + struct scmi_notify_instance *ni; + struct hlist_node *tmp; + + ni = container_of(work, struct scmi_notify_instance, init_work); + + /* Ensure protocols and events are up to date */ + smp_rmb(); + + mutex_lock(&ni->pending_mtx); + hash_for_each_safe(ni->pending_events_handlers, bkt, tmp, hndl, hash) { + int ret; + + ret = scmi_bind_event_handler(ni, hndl); + if (!ret) { + dev_dbg(ni->handle->dev, + "finalized PENDING handler - key:%X\n", + hndl->key); + ret = scmi_event_handler_enable_events(hndl); + if (ret) { + dev_dbg(ni->handle->dev, + "purging INVALID handler - key:%X\n", + hndl->key); + scmi_put_active_handler(ni, hndl); + } + } else { + ret = scmi_valid_pending_handler(ni, hndl); + if (ret) { + dev_dbg(ni->handle->dev, + "purging PENDING handler - key:%X\n", + hndl->key); + /* this hndl can be only a pending one */ + scmi_put_handler_unlocked(ni, hndl); + } + } + } + mutex_unlock(&ni->pending_mtx); +} + +/* + * notify_ops are attached to the handle so that can be accessed + * directly from an scmi_driver to register its own notifiers. + */ +static const struct scmi_notify_ops notify_ops = { + .devm_event_notifier_register = scmi_devm_notifier_register, + .devm_event_notifier_unregister = scmi_devm_notifier_unregister, + .event_notifier_register = scmi_notifier_register, + .event_notifier_unregister = scmi_notifier_unregister, +}; + +/** + * scmi_notification_init() - Initializes Notification Core Support + * @handle: The handle identifying the platform instance to initialize + * + * This function lays out all the basic resources needed by the notification + * core instance identified by the provided handle: once done, all of the + * SCMI Protocols can register their events with the core during their own + * initializations. + * + * Note that failing to initialize the core notifications support does not + * cause the whole SCMI Protocols stack to fail its initialization. + * + * SCMI Notification Initialization happens in 2 steps: + * * initialization: basic common allocations (this function) + * * registration: protocols asynchronously come into life and registers their + * own supported list of events with the core; this causes + * further per-protocol allocations + * + * Any user's callback registration attempt, referring a still not registered + * event, will be registered as pending and finalized later (if possible) + * by scmi_protocols_late_init() work. + * This allows for lazy initialization of SCMI Protocols due to late (or + * missing) SCMI drivers' modules loading. + * + * Return: 0 on Success + */ +int scmi_notification_init(struct scmi_handle *handle) +{ + void *gid; + struct scmi_notify_instance *ni; + + gid = devres_open_group(handle->dev, NULL, GFP_KERNEL); + if (!gid) + return -ENOMEM; + + ni = devm_kzalloc(handle->dev, sizeof(*ni), GFP_KERNEL); + if (!ni) + goto err; + + ni->gid = gid; + ni->handle = handle; + + ni->registered_protocols = devm_kcalloc(handle->dev, SCMI_MAX_PROTO, + sizeof(char *), GFP_KERNEL); + if (!ni->registered_protocols) + goto err; + + ni->notify_wq = alloc_workqueue(dev_name(handle->dev), + WQ_UNBOUND | WQ_FREEZABLE | WQ_SYSFS, + 0); + if (!ni->notify_wq) + goto err; + + mutex_init(&ni->pending_mtx); + hash_init(ni->pending_events_handlers); + + INIT_WORK(&ni->init_work, scmi_protocols_late_init); + + scmi_notification_instance_data_set(handle, ni); + handle->notify_ops = ¬ify_ops; + /* Ensure handle is up to date */ + smp_wmb(); + + dev_info(handle->dev, "Core Enabled.\n"); + + devres_close_group(handle->dev, ni->gid); + + return 0; + +err: + dev_warn(handle->dev, "Initialization Failed.\n"); + devres_release_group(handle->dev, gid); + return -ENOMEM; +} + +/** + * scmi_notification_exit() - Shutdown and clean Notification core + * @handle: The handle identifying the platform instance to shutdown + */ +void scmi_notification_exit(struct scmi_handle *handle) +{ + struct scmi_notify_instance *ni; + + ni = scmi_notification_instance_data_get(handle); + if (!ni) + return; + scmi_notification_instance_data_set(handle, NULL); + + /* Destroy while letting pending work complete */ + destroy_workqueue(ni->notify_wq); + + devres_release_group(ni->handle->dev, ni->gid); +} diff --git a/drivers/firmware/arm_scmi/notify.h b/drivers/firmware/arm_scmi/notify.h new file mode 100644 index 000000000..4e9b627ed --- /dev/null +++ b/drivers/firmware/arm_scmi/notify.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * System Control and Management Interface (SCMI) Message Protocol + * notification header file containing some definitions, structures + * and function prototypes related to SCMI Notification handling. + * + * Copyright (C) 2020-2021 ARM Ltd. + */ +#ifndef _SCMI_NOTIFY_H +#define _SCMI_NOTIFY_H + +#include <linux/device.h> +#include <linux/ktime.h> +#include <linux/types.h> + +#define SCMI_PROTO_QUEUE_SZ 4096 + +/** + * struct scmi_event - Describes an event to be supported + * @id: Event ID + * @max_payld_sz: Max possible size for the payload of a notification message + * @max_report_sz: Max possible size for the report of a notification message + * + * Each SCMI protocol, during its initialization phase, can describe the events + * it wishes to support in a few struct scmi_event and pass them to the core + * using scmi_register_protocol_events(). + */ +struct scmi_event { + u8 id; + size_t max_payld_sz; + size_t max_report_sz; +}; + +struct scmi_protocol_handle; + +/** + * struct scmi_event_ops - Protocol helpers called by the notification core. + * @get_num_sources: Returns the number of possible events' sources for this + * protocol + * @set_notify_enabled: Enable/disable the required evt_id/src_id notifications + * using the proper custom protocol commands. + * Return 0 on Success + * @fill_custom_report: fills a custom event report from the provided + * event message payld identifying the event + * specific src_id. + * Return NULL on failure otherwise @report now fully + * populated + * + * Context: Helpers described in &struct scmi_event_ops are called only in + * process context. + */ +struct scmi_event_ops { + int (*get_num_sources)(const struct scmi_protocol_handle *ph); + int (*set_notify_enabled)(const struct scmi_protocol_handle *ph, + u8 evt_id, u32 src_id, bool enabled); + void *(*fill_custom_report)(const struct scmi_protocol_handle *ph, + u8 evt_id, ktime_t timestamp, + const void *payld, size_t payld_sz, + void *report, u32 *src_id); +}; + +/** + * struct scmi_protocol_events - Per-protocol description of available events + * @queue_sz: Size in bytes of the per-protocol queue to use. + * @ops: Array of protocol-specific events operations. + * @evts: Array of supported protocol's events. + * @num_events: Number of supported protocol's events described in @evts. + * @num_sources: Number of protocol's sources, should be greater than 0; if not + * available at compile time, it will be provided at run-time via + * @get_num_sources. + */ +struct scmi_protocol_events { + size_t queue_sz; + const struct scmi_event_ops *ops; + const struct scmi_event *evts; + unsigned int num_events; + unsigned int num_sources; +}; + +int scmi_notification_init(struct scmi_handle *handle); +void scmi_notification_exit(struct scmi_handle *handle); +int scmi_register_protocol_events(const struct scmi_handle *handle, u8 proto_id, + const struct scmi_protocol_handle *ph, + const struct scmi_protocol_events *ee); +void scmi_deregister_protocol_events(const struct scmi_handle *handle, + u8 proto_id); +int scmi_notify(const struct scmi_handle *handle, u8 proto_id, u8 evt_id, + const void *buf, size_t len, ktime_t ts); + +#endif /* _SCMI_NOTIFY_H */ diff --git a/drivers/firmware/arm_scmi/optee.c b/drivers/firmware/arm_scmi/optee.c new file mode 100644 index 000000000..2a7aeab40 --- /dev/null +++ b/drivers/firmware/arm_scmi/optee.c @@ -0,0 +1,645 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019-2021 Linaro Ltd. + */ + +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/slab.h> +#include <linux/tee_drv.h> +#include <linux/uuid.h> +#include <uapi/linux/tee.h> + +#include "common.h" + +#define SCMI_OPTEE_MAX_MSG_SIZE 128 + +enum scmi_optee_pta_cmd { + /* + * PTA_SCMI_CMD_CAPABILITIES - Get channel capabilities + * + * [out] value[0].a: Capability bit mask (enum pta_scmi_caps) + * [out] value[0].b: Extended capabilities or 0 + */ + PTA_SCMI_CMD_CAPABILITIES = 0, + + /* + * PTA_SCMI_CMD_PROCESS_SMT_CHANNEL - Process SCMI message in SMT buffer + * + * [in] value[0].a: Channel handle + * + * Shared memory used for SCMI message/response exhange is expected + * already identified and bound to channel handle in both SCMI agent + * and SCMI server (OP-TEE) parts. + * The memory uses SMT header to carry SCMI meta-data (protocol ID and + * protocol message ID). + */ + PTA_SCMI_CMD_PROCESS_SMT_CHANNEL = 1, + + /* + * PTA_SCMI_CMD_PROCESS_SMT_CHANNEL_MESSAGE - Process SMT/SCMI message + * + * [in] value[0].a: Channel handle + * [in/out] memref[1]: Message/response buffer (SMT and SCMI payload) + * + * Shared memory used for SCMI message/response is a SMT buffer + * referenced by param[1]. It shall be 128 bytes large to fit response + * payload whatever message playload size. + * The memory uses SMT header to carry SCMI meta-data (protocol ID and + * protocol message ID). + */ + PTA_SCMI_CMD_PROCESS_SMT_CHANNEL_MESSAGE = 2, + + /* + * PTA_SCMI_CMD_GET_CHANNEL - Get channel handle + * + * SCMI shm information are 0 if agent expects to use OP-TEE regular SHM + * + * [in] value[0].a: Channel identifier + * [out] value[0].a: Returned channel handle + * [in] value[0].b: Requested capabilities mask (enum pta_scmi_caps) + */ + PTA_SCMI_CMD_GET_CHANNEL = 3, + + /* + * PTA_SCMI_CMD_PROCESS_MSG_CHANNEL - Process SCMI message in a MSG + * buffer pointed by memref parameters + * + * [in] value[0].a: Channel handle + * [in] memref[1]: Message buffer (MSG and SCMI payload) + * [out] memref[2]: Response buffer (MSG and SCMI payload) + * + * Shared memories used for SCMI message/response are MSG buffers + * referenced by param[1] and param[2]. MSG transport protocol + * uses a 32bit header to carry SCMI meta-data (protocol ID and + * protocol message ID) followed by the effective SCMI message + * payload. + */ + PTA_SCMI_CMD_PROCESS_MSG_CHANNEL = 4, +}; + +/* + * OP-TEE SCMI service capabilities bit flags (32bit) + * + * PTA_SCMI_CAPS_SMT_HEADER + * When set, OP-TEE supports command using SMT header protocol (SCMI shmem) in + * shared memory buffers to carry SCMI protocol synchronisation information. + * + * PTA_SCMI_CAPS_MSG_HEADER + * When set, OP-TEE supports command using MSG header protocol in an OP-TEE + * shared memory to carry SCMI protocol synchronisation information and SCMI + * message payload. + */ +#define PTA_SCMI_CAPS_NONE 0 +#define PTA_SCMI_CAPS_SMT_HEADER BIT(0) +#define PTA_SCMI_CAPS_MSG_HEADER BIT(1) +#define PTA_SCMI_CAPS_MASK (PTA_SCMI_CAPS_SMT_HEADER | \ + PTA_SCMI_CAPS_MSG_HEADER) + +/** + * struct scmi_optee_channel - Description of an OP-TEE SCMI channel + * + * @channel_id: OP-TEE channel ID used for this transport + * @tee_session: TEE session identifier + * @caps: OP-TEE SCMI channel capabilities + * @rx_len: Response size + * @mu: Mutex protection on channel access + * @cinfo: SCMI channel information + * @shmem: Virtual base address of the shared memory + * @req: Shared memory protocol handle for SCMI request and synchronous response + * @tee_shm: TEE shared memory handle @req or NULL if using IOMEM shmem + * @link: Reference in agent's channel list + */ +struct scmi_optee_channel { + u32 channel_id; + u32 tee_session; + u32 caps; + u32 rx_len; + struct mutex mu; + struct scmi_chan_info *cinfo; + union { + struct scmi_shared_mem __iomem *shmem; + struct scmi_msg_payld *msg; + } req; + struct tee_shm *tee_shm; + struct list_head link; +}; + +/** + * struct scmi_optee_agent - OP-TEE transport private data + * + * @dev: Device used for communication with TEE + * @tee_ctx: TEE context used for communication + * @caps: Supported channel capabilities + * @mu: Mutex for protection of @channel_list + * @channel_list: List of all created channels for the agent + */ +struct scmi_optee_agent { + struct device *dev; + struct tee_context *tee_ctx; + u32 caps; + struct mutex mu; + struct list_head channel_list; +}; + +/* There can be only 1 SCMI service in OP-TEE we connect to */ +static struct scmi_optee_agent *scmi_optee_private; + +/* Forward reference to scmi_optee transport initialization */ +static int scmi_optee_init(void); + +/* Open a session toward SCMI OP-TEE service with REE_KERNEL identity */ +static int open_session(struct scmi_optee_agent *agent, u32 *tee_session) +{ + struct device *dev = agent->dev; + struct tee_client_device *scmi_pta = to_tee_client_device(dev); + struct tee_ioctl_open_session_arg arg = { }; + int ret; + + memcpy(arg.uuid, scmi_pta->id.uuid.b, TEE_IOCTL_UUID_LEN); + arg.clnt_login = TEE_IOCTL_LOGIN_REE_KERNEL; + + ret = tee_client_open_session(agent->tee_ctx, &arg, NULL); + if (ret < 0 || arg.ret) { + dev_err(dev, "Can't open tee session: %d / %#x\n", ret, arg.ret); + return -EOPNOTSUPP; + } + + *tee_session = arg.session; + + return 0; +} + +static void close_session(struct scmi_optee_agent *agent, u32 tee_session) +{ + tee_client_close_session(agent->tee_ctx, tee_session); +} + +static int get_capabilities(struct scmi_optee_agent *agent) +{ + struct tee_ioctl_invoke_arg arg = { }; + struct tee_param param[1] = { }; + u32 caps; + u32 tee_session; + int ret; + + ret = open_session(agent, &tee_session); + if (ret) + return ret; + + arg.func = PTA_SCMI_CMD_CAPABILITIES; + arg.session = tee_session; + arg.num_params = 1; + + param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT; + + ret = tee_client_invoke_func(agent->tee_ctx, &arg, param); + + close_session(agent, tee_session); + + if (ret < 0 || arg.ret) { + dev_err(agent->dev, "Can't get capabilities: %d / %#x\n", ret, arg.ret); + return -EOPNOTSUPP; + } + + caps = param[0].u.value.a; + + if (!(caps & (PTA_SCMI_CAPS_SMT_HEADER | PTA_SCMI_CAPS_MSG_HEADER))) { + dev_err(agent->dev, "OP-TEE SCMI PTA doesn't support SMT and MSG\n"); + return -EOPNOTSUPP; + } + + agent->caps = caps; + + return 0; +} + +static int get_channel(struct scmi_optee_channel *channel) +{ + struct device *dev = scmi_optee_private->dev; + struct tee_ioctl_invoke_arg arg = { }; + struct tee_param param[1] = { }; + unsigned int caps = 0; + int ret; + + if (channel->tee_shm) + caps = PTA_SCMI_CAPS_MSG_HEADER; + else + caps = PTA_SCMI_CAPS_SMT_HEADER; + + arg.func = PTA_SCMI_CMD_GET_CHANNEL; + arg.session = channel->tee_session; + arg.num_params = 1; + + param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT; + param[0].u.value.a = channel->channel_id; + param[0].u.value.b = caps; + + ret = tee_client_invoke_func(scmi_optee_private->tee_ctx, &arg, param); + + if (ret || arg.ret) { + dev_err(dev, "Can't get channel with caps %#x: %d / %#x\n", caps, ret, arg.ret); + return -EOPNOTSUPP; + } + + /* From now on use channel identifer provided by OP-TEE SCMI service */ + channel->channel_id = param[0].u.value.a; + channel->caps = caps; + + return 0; +} + +static int invoke_process_smt_channel(struct scmi_optee_channel *channel) +{ + struct tee_ioctl_invoke_arg arg = { + .func = PTA_SCMI_CMD_PROCESS_SMT_CHANNEL, + .session = channel->tee_session, + .num_params = 1, + }; + struct tee_param param[1] = { }; + int ret; + + param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT; + param[0].u.value.a = channel->channel_id; + + ret = tee_client_invoke_func(scmi_optee_private->tee_ctx, &arg, param); + if (ret < 0 || arg.ret) { + dev_err(scmi_optee_private->dev, "Can't invoke channel %u: %d / %#x\n", + channel->channel_id, ret, arg.ret); + return -EIO; + } + + return 0; +} + +static int invoke_process_msg_channel(struct scmi_optee_channel *channel, size_t msg_size) +{ + struct tee_ioctl_invoke_arg arg = { + .func = PTA_SCMI_CMD_PROCESS_MSG_CHANNEL, + .session = channel->tee_session, + .num_params = 3, + }; + struct tee_param param[3] = { }; + int ret; + + param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT; + param[0].u.value.a = channel->channel_id; + + param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT; + param[1].u.memref.shm = channel->tee_shm; + param[1].u.memref.size = msg_size; + + param[2].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT; + param[2].u.memref.shm = channel->tee_shm; + param[2].u.memref.size = SCMI_OPTEE_MAX_MSG_SIZE; + + ret = tee_client_invoke_func(scmi_optee_private->tee_ctx, &arg, param); + if (ret < 0 || arg.ret) { + dev_err(scmi_optee_private->dev, "Can't invoke channel %u: %d / %#x\n", + channel->channel_id, ret, arg.ret); + return -EIO; + } + + /* Save response size */ + channel->rx_len = param[2].u.memref.size; + + return 0; +} + +static int scmi_optee_link_supplier(struct device *dev) +{ + if (!scmi_optee_private) { + if (scmi_optee_init()) + dev_dbg(dev, "Optee bus not yet ready\n"); + + /* Wait for optee bus */ + return -EPROBE_DEFER; + } + + if (!device_link_add(dev, scmi_optee_private->dev, DL_FLAG_AUTOREMOVE_CONSUMER)) { + dev_err(dev, "Adding link to supplier optee device failed\n"); + return -ECANCELED; + } + + return 0; +} + +static bool scmi_optee_chan_available(struct device *dev, int idx) +{ + u32 channel_id; + + return !of_property_read_u32_index(dev->of_node, "linaro,optee-channel-id", + idx, &channel_id); +} + +static void scmi_optee_clear_channel(struct scmi_chan_info *cinfo) +{ + struct scmi_optee_channel *channel = cinfo->transport_info; + + if (!channel->tee_shm) + shmem_clear_channel(channel->req.shmem); +} + +static int setup_dynamic_shmem(struct device *dev, struct scmi_optee_channel *channel) +{ + const size_t msg_size = SCMI_OPTEE_MAX_MSG_SIZE; + void *shbuf; + + channel->tee_shm = tee_shm_alloc_kernel_buf(scmi_optee_private->tee_ctx, msg_size); + if (IS_ERR(channel->tee_shm)) { + dev_err(channel->cinfo->dev, "shmem allocation failed\n"); + return -ENOMEM; + } + + shbuf = tee_shm_get_va(channel->tee_shm, 0); + memset(shbuf, 0, msg_size); + channel->req.msg = shbuf; + channel->rx_len = msg_size; + + return 0; +} + +static int setup_static_shmem(struct device *dev, struct scmi_chan_info *cinfo, + struct scmi_optee_channel *channel) +{ + struct device_node *np; + resource_size_t size; + struct resource res; + int ret; + + np = of_parse_phandle(cinfo->dev->of_node, "shmem", 0); + if (!of_device_is_compatible(np, "arm,scmi-shmem")) { + ret = -ENXIO; + goto out; + } + + ret = of_address_to_resource(np, 0, &res); + if (ret) { + dev_err(dev, "Failed to get SCMI Tx shared memory\n"); + goto out; + } + + size = resource_size(&res); + + channel->req.shmem = devm_ioremap(dev, res.start, size); + if (!channel->req.shmem) { + dev_err(dev, "Failed to ioremap SCMI Tx shared memory\n"); + ret = -EADDRNOTAVAIL; + goto out; + } + + ret = 0; + +out: + of_node_put(np); + + return ret; +} + +static int setup_shmem(struct device *dev, struct scmi_chan_info *cinfo, + struct scmi_optee_channel *channel) +{ + if (of_find_property(cinfo->dev->of_node, "shmem", NULL)) + return setup_static_shmem(dev, cinfo, channel); + else + return setup_dynamic_shmem(dev, channel); +} + +static int scmi_optee_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, bool tx) +{ + struct scmi_optee_channel *channel; + uint32_t channel_id; + int ret; + + if (!tx) + return -ENODEV; + + channel = devm_kzalloc(dev, sizeof(*channel), GFP_KERNEL); + if (!channel) + return -ENOMEM; + + ret = of_property_read_u32_index(cinfo->dev->of_node, "linaro,optee-channel-id", + 0, &channel_id); + if (ret) + return ret; + + cinfo->transport_info = channel; + channel->cinfo = cinfo; + channel->channel_id = channel_id; + mutex_init(&channel->mu); + + ret = setup_shmem(dev, cinfo, channel); + if (ret) + return ret; + + ret = open_session(scmi_optee_private, &channel->tee_session); + if (ret) + goto err_free_shm; + + ret = get_channel(channel); + if (ret) + goto err_close_sess; + + /* Enable polling */ + cinfo->no_completion_irq = true; + + mutex_lock(&scmi_optee_private->mu); + list_add(&channel->link, &scmi_optee_private->channel_list); + mutex_unlock(&scmi_optee_private->mu); + + return 0; + +err_close_sess: + close_session(scmi_optee_private, channel->tee_session); +err_free_shm: + if (channel->tee_shm) + tee_shm_free(channel->tee_shm); + + return ret; +} + +static int scmi_optee_chan_free(int id, void *p, void *data) +{ + struct scmi_chan_info *cinfo = p; + struct scmi_optee_channel *channel = cinfo->transport_info; + + mutex_lock(&scmi_optee_private->mu); + list_del(&channel->link); + mutex_unlock(&scmi_optee_private->mu); + + close_session(scmi_optee_private, channel->tee_session); + + if (channel->tee_shm) { + tee_shm_free(channel->tee_shm); + channel->tee_shm = NULL; + } + + cinfo->transport_info = NULL; + channel->cinfo = NULL; + + scmi_free_channel(cinfo, data, id); + + return 0; +} + +static int scmi_optee_send_message(struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer) +{ + struct scmi_optee_channel *channel = cinfo->transport_info; + int ret; + + mutex_lock(&channel->mu); + + if (channel->tee_shm) { + msg_tx_prepare(channel->req.msg, xfer); + ret = invoke_process_msg_channel(channel, msg_command_size(xfer)); + } else { + shmem_tx_prepare(channel->req.shmem, xfer, cinfo); + ret = invoke_process_smt_channel(channel); + } + + if (ret) + mutex_unlock(&channel->mu); + + return ret; +} + +static void scmi_optee_fetch_response(struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer) +{ + struct scmi_optee_channel *channel = cinfo->transport_info; + + if (channel->tee_shm) + msg_fetch_response(channel->req.msg, channel->rx_len, xfer); + else + shmem_fetch_response(channel->req.shmem, xfer); +} + +static void scmi_optee_mark_txdone(struct scmi_chan_info *cinfo, int ret, + struct scmi_xfer *__unused) +{ + struct scmi_optee_channel *channel = cinfo->transport_info; + + mutex_unlock(&channel->mu); +} + +static struct scmi_transport_ops scmi_optee_ops = { + .link_supplier = scmi_optee_link_supplier, + .chan_available = scmi_optee_chan_available, + .chan_setup = scmi_optee_chan_setup, + .chan_free = scmi_optee_chan_free, + .send_message = scmi_optee_send_message, + .mark_txdone = scmi_optee_mark_txdone, + .fetch_response = scmi_optee_fetch_response, + .clear_channel = scmi_optee_clear_channel, +}; + +static int scmi_optee_ctx_match(struct tee_ioctl_version_data *ver, const void *data) +{ + return ver->impl_id == TEE_IMPL_ID_OPTEE; +} + +static int scmi_optee_service_probe(struct device *dev) +{ + struct scmi_optee_agent *agent; + struct tee_context *tee_ctx; + int ret; + + /* Only one SCMI OP-TEE device allowed */ + if (scmi_optee_private) { + dev_err(dev, "An SCMI OP-TEE device was already initialized: only one allowed\n"); + return -EBUSY; + } + + tee_ctx = tee_client_open_context(NULL, scmi_optee_ctx_match, NULL, NULL); + if (IS_ERR(tee_ctx)) + return -ENODEV; + + agent = devm_kzalloc(dev, sizeof(*agent), GFP_KERNEL); + if (!agent) { + ret = -ENOMEM; + goto err; + } + + agent->dev = dev; + agent->tee_ctx = tee_ctx; + INIT_LIST_HEAD(&agent->channel_list); + mutex_init(&agent->mu); + + ret = get_capabilities(agent); + if (ret) + goto err; + + /* Ensure agent resources are all visible before scmi_optee_private is */ + smp_mb(); + scmi_optee_private = agent; + + return 0; + +err: + tee_client_close_context(tee_ctx); + + return ret; +} + +static int scmi_optee_service_remove(struct device *dev) +{ + struct scmi_optee_agent *agent = scmi_optee_private; + + if (!scmi_optee_private) + return -EINVAL; + + if (!list_empty(&scmi_optee_private->channel_list)) + return -EBUSY; + + /* Ensure cleared reference is visible before resources are released */ + smp_store_mb(scmi_optee_private, NULL); + + tee_client_close_context(agent->tee_ctx); + + return 0; +} + +static const struct tee_client_device_id scmi_optee_service_id[] = { + { + UUID_INIT(0xa8cfe406, 0xd4f5, 0x4a2e, + 0x9f, 0x8d, 0xa2, 0x5d, 0xc7, 0x54, 0xc0, 0x99) + }, + { } +}; + +MODULE_DEVICE_TABLE(tee, scmi_optee_service_id); + +static struct tee_client_driver scmi_optee_driver = { + .id_table = scmi_optee_service_id, + .driver = { + .name = "scmi-optee", + .bus = &tee_bus_type, + .probe = scmi_optee_service_probe, + .remove = scmi_optee_service_remove, + }, +}; + +static int scmi_optee_init(void) +{ + return driver_register(&scmi_optee_driver.driver); +} + +static void scmi_optee_exit(void) +{ + if (scmi_optee_private) + driver_unregister(&scmi_optee_driver.driver); +} + +const struct scmi_desc scmi_optee_desc = { + .transport_exit = scmi_optee_exit, + .ops = &scmi_optee_ops, + .max_rx_timeout_ms = 30, + .max_msg = 20, + .max_msg_size = SCMI_OPTEE_MAX_MSG_SIZE, + .sync_cmds_completed_on_ret = true, +}; diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c new file mode 100644 index 000000000..2775bcafe --- /dev/null +++ b/drivers/firmware/arm_scmi/perf.c @@ -0,0 +1,893 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System Control and Management Interface (SCMI) Performance Protocol + * + * Copyright (C) 2018-2022 ARM Ltd. + */ + +#define pr_fmt(fmt) "SCMI Notifications PERF - " fmt + +#include <linux/bits.h> +#include <linux/of.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/pm_opp.h> +#include <linux/scmi_protocol.h> +#include <linux/sort.h> + +#include <trace/events/scmi.h> + +#include "protocols.h" +#include "notify.h" + +#define MAX_OPPS 16 + +enum scmi_performance_protocol_cmd { + PERF_DOMAIN_ATTRIBUTES = 0x3, + PERF_DESCRIBE_LEVELS = 0x4, + PERF_LIMITS_SET = 0x5, + PERF_LIMITS_GET = 0x6, + PERF_LEVEL_SET = 0x7, + PERF_LEVEL_GET = 0x8, + PERF_NOTIFY_LIMITS = 0x9, + PERF_NOTIFY_LEVEL = 0xa, + PERF_DESCRIBE_FASTCHANNEL = 0xb, + PERF_DOMAIN_NAME_GET = 0xc, +}; + +enum { + PERF_FC_LEVEL, + PERF_FC_LIMIT, + PERF_FC_MAX, +}; + +struct scmi_opp { + u32 perf; + u32 power; + u32 trans_latency_us; +}; + +struct scmi_msg_resp_perf_attributes { + __le16 num_domains; + __le16 flags; +#define POWER_SCALE_IN_MILLIWATT(x) ((x) & BIT(0)) +#define POWER_SCALE_IN_MICROWATT(x) ((x) & BIT(1)) + __le32 stats_addr_low; + __le32 stats_addr_high; + __le32 stats_size; +}; + +struct scmi_msg_resp_perf_domain_attributes { + __le32 flags; +#define SUPPORTS_SET_LIMITS(x) ((x) & BIT(31)) +#define SUPPORTS_SET_PERF_LVL(x) ((x) & BIT(30)) +#define SUPPORTS_PERF_LIMIT_NOTIFY(x) ((x) & BIT(29)) +#define SUPPORTS_PERF_LEVEL_NOTIFY(x) ((x) & BIT(28)) +#define SUPPORTS_PERF_FASTCHANNELS(x) ((x) & BIT(27)) +#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(26)) + __le32 rate_limit_us; + __le32 sustained_freq_khz; + __le32 sustained_perf_level; + u8 name[SCMI_SHORT_NAME_MAX_SIZE]; +}; + +struct scmi_msg_perf_describe_levels { + __le32 domain; + __le32 level_index; +}; + +struct scmi_perf_set_limits { + __le32 domain; + __le32 max_level; + __le32 min_level; +}; + +struct scmi_perf_get_limits { + __le32 max_level; + __le32 min_level; +}; + +struct scmi_perf_set_level { + __le32 domain; + __le32 level; +}; + +struct scmi_perf_notify_level_or_limits { + __le32 domain; + __le32 notify_enable; +}; + +struct scmi_perf_limits_notify_payld { + __le32 agent_id; + __le32 domain_id; + __le32 range_max; + __le32 range_min; +}; + +struct scmi_perf_level_notify_payld { + __le32 agent_id; + __le32 domain_id; + __le32 performance_level; +}; + +struct scmi_msg_resp_perf_describe_levels { + __le16 num_returned; + __le16 num_remaining; + struct { + __le32 perf_val; + __le32 power; + __le16 transition_latency_us; + __le16 reserved; + } opp[]; +}; + +struct perf_dom_info { + bool set_limits; + bool set_perf; + bool perf_limit_notify; + bool perf_level_notify; + bool perf_fastchannels; + u32 opp_count; + u32 sustained_freq_khz; + u32 sustained_perf_level; + unsigned long mult_factor; + char name[SCMI_MAX_STR_SIZE]; + struct scmi_opp opp[MAX_OPPS]; + struct scmi_fc_info *fc_info; +}; + +struct scmi_perf_info { + u32 version; + u16 num_domains; + enum scmi_power_scale power_scale; + u64 stats_addr; + u32 stats_size; + struct perf_dom_info *dom_info; +}; + +static enum scmi_performance_protocol_cmd evt_2_cmd[] = { + PERF_NOTIFY_LIMITS, + PERF_NOTIFY_LEVEL, +}; + +static int scmi_perf_attributes_get(const struct scmi_protocol_handle *ph, + struct scmi_perf_info *pi) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_resp_perf_attributes *attr; + + ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0, + sizeof(*attr), &t); + if (ret) + return ret; + + attr = t->rx.buf; + + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + u16 flags = le16_to_cpu(attr->flags); + + pi->num_domains = le16_to_cpu(attr->num_domains); + + if (POWER_SCALE_IN_MILLIWATT(flags)) + pi->power_scale = SCMI_POWER_MILLIWATTS; + if (PROTOCOL_REV_MAJOR(pi->version) >= 0x3) + if (POWER_SCALE_IN_MICROWATT(flags)) + pi->power_scale = SCMI_POWER_MICROWATTS; + + pi->stats_addr = le32_to_cpu(attr->stats_addr_low) | + (u64)le32_to_cpu(attr->stats_addr_high) << 32; + pi->stats_size = le32_to_cpu(attr->stats_size); + } + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int +scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph, + u32 domain, struct perf_dom_info *dom_info, + u32 version) +{ + int ret; + u32 flags; + struct scmi_xfer *t; + struct scmi_msg_resp_perf_domain_attributes *attr; + + ret = ph->xops->xfer_get_init(ph, PERF_DOMAIN_ATTRIBUTES, + sizeof(domain), sizeof(*attr), &t); + if (ret) + return ret; + + put_unaligned_le32(domain, t->tx.buf); + attr = t->rx.buf; + + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + flags = le32_to_cpu(attr->flags); + + dom_info->set_limits = SUPPORTS_SET_LIMITS(flags); + dom_info->set_perf = SUPPORTS_SET_PERF_LVL(flags); + dom_info->perf_limit_notify = SUPPORTS_PERF_LIMIT_NOTIFY(flags); + dom_info->perf_level_notify = SUPPORTS_PERF_LEVEL_NOTIFY(flags); + dom_info->perf_fastchannels = SUPPORTS_PERF_FASTCHANNELS(flags); + dom_info->sustained_freq_khz = + le32_to_cpu(attr->sustained_freq_khz); + dom_info->sustained_perf_level = + le32_to_cpu(attr->sustained_perf_level); + if (!dom_info->sustained_freq_khz || + !dom_info->sustained_perf_level) + /* CPUFreq converts to kHz, hence default 1000 */ + dom_info->mult_factor = 1000; + else + dom_info->mult_factor = + (dom_info->sustained_freq_khz * 1000UL) + / dom_info->sustained_perf_level; + strscpy(dom_info->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE); + } + + ph->xops->xfer_put(ph, t); + + /* + * If supported overwrite short name with the extended one; + * on error just carry on and use already provided short name. + */ + if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x3 && + SUPPORTS_EXTENDED_NAMES(flags)) + ph->hops->extended_name_get(ph, PERF_DOMAIN_NAME_GET, domain, + dom_info->name, SCMI_MAX_STR_SIZE); + + return ret; +} + +static int opp_cmp_func(const void *opp1, const void *opp2) +{ + const struct scmi_opp *t1 = opp1, *t2 = opp2; + + return t1->perf - t2->perf; +} + +struct scmi_perf_ipriv { + u32 domain; + struct perf_dom_info *perf_dom; +}; + +static void iter_perf_levels_prepare_message(void *message, + unsigned int desc_index, + const void *priv) +{ + struct scmi_msg_perf_describe_levels *msg = message; + const struct scmi_perf_ipriv *p = priv; + + msg->domain = cpu_to_le32(p->domain); + /* Set the number of OPPs to be skipped/already read */ + msg->level_index = cpu_to_le32(desc_index); +} + +static int iter_perf_levels_update_state(struct scmi_iterator_state *st, + const void *response, void *priv) +{ + const struct scmi_msg_resp_perf_describe_levels *r = response; + + st->num_returned = le16_to_cpu(r->num_returned); + st->num_remaining = le16_to_cpu(r->num_remaining); + + return 0; +} + +static int +iter_perf_levels_process_response(const struct scmi_protocol_handle *ph, + const void *response, + struct scmi_iterator_state *st, void *priv) +{ + struct scmi_opp *opp; + const struct scmi_msg_resp_perf_describe_levels *r = response; + struct scmi_perf_ipriv *p = priv; + + opp = &p->perf_dom->opp[st->desc_index + st->loop_idx]; + opp->perf = le32_to_cpu(r->opp[st->loop_idx].perf_val); + opp->power = le32_to_cpu(r->opp[st->loop_idx].power); + opp->trans_latency_us = + le16_to_cpu(r->opp[st->loop_idx].transition_latency_us); + p->perf_dom->opp_count++; + + dev_dbg(ph->dev, "Level %d Power %d Latency %dus\n", + opp->perf, opp->power, opp->trans_latency_us); + + return 0; +} + +static int +scmi_perf_describe_levels_get(const struct scmi_protocol_handle *ph, u32 domain, + struct perf_dom_info *perf_dom) +{ + int ret; + void *iter; + struct scmi_iterator_ops ops = { + .prepare_message = iter_perf_levels_prepare_message, + .update_state = iter_perf_levels_update_state, + .process_response = iter_perf_levels_process_response, + }; + struct scmi_perf_ipriv ppriv = { + .domain = domain, + .perf_dom = perf_dom, + }; + + iter = ph->hops->iter_response_init(ph, &ops, MAX_OPPS, + PERF_DESCRIBE_LEVELS, + sizeof(struct scmi_msg_perf_describe_levels), + &ppriv); + if (IS_ERR(iter)) + return PTR_ERR(iter); + + ret = ph->hops->iter_response_run(iter); + if (ret) + return ret; + + if (perf_dom->opp_count) + sort(perf_dom->opp, perf_dom->opp_count, + sizeof(struct scmi_opp), opp_cmp_func, NULL); + + return ret; +} + +static int scmi_perf_mb_limits_set(const struct scmi_protocol_handle *ph, + u32 domain, u32 max_perf, u32 min_perf) +{ + int ret; + struct scmi_xfer *t; + struct scmi_perf_set_limits *limits; + + ret = ph->xops->xfer_get_init(ph, PERF_LIMITS_SET, + sizeof(*limits), 0, &t); + if (ret) + return ret; + + limits = t->tx.buf; + limits->domain = cpu_to_le32(domain); + limits->max_level = cpu_to_le32(max_perf); + limits->min_level = cpu_to_le32(min_perf); + + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + return ret; +} + +static inline struct perf_dom_info * +scmi_perf_domain_lookup(const struct scmi_protocol_handle *ph, u32 domain) +{ + struct scmi_perf_info *pi = ph->get_priv(ph); + + if (domain >= pi->num_domains) + return ERR_PTR(-EINVAL); + + return pi->dom_info + domain; +} + +static int scmi_perf_limits_set(const struct scmi_protocol_handle *ph, + u32 domain, u32 max_perf, u32 min_perf) +{ + struct scmi_perf_info *pi = ph->get_priv(ph); + struct perf_dom_info *dom; + + dom = scmi_perf_domain_lookup(ph, domain); + if (IS_ERR(dom)) + return PTR_ERR(dom); + + if (PROTOCOL_REV_MAJOR(pi->version) >= 0x3 && !max_perf && !min_perf) + return -EINVAL; + + if (dom->fc_info && dom->fc_info[PERF_FC_LIMIT].set_addr) { + struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LIMIT]; + + trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LIMITS_SET, + domain, min_perf, max_perf); + iowrite32(max_perf, fci->set_addr); + iowrite32(min_perf, fci->set_addr + 4); + ph->hops->fastchannel_db_ring(fci->set_db); + return 0; + } + + return scmi_perf_mb_limits_set(ph, domain, max_perf, min_perf); +} + +static int scmi_perf_mb_limits_get(const struct scmi_protocol_handle *ph, + u32 domain, u32 *max_perf, u32 *min_perf) +{ + int ret; + struct scmi_xfer *t; + struct scmi_perf_get_limits *limits; + + ret = ph->xops->xfer_get_init(ph, PERF_LIMITS_GET, + sizeof(__le32), 0, &t); + if (ret) + return ret; + + put_unaligned_le32(domain, t->tx.buf); + + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + limits = t->rx.buf; + + *max_perf = le32_to_cpu(limits->max_level); + *min_perf = le32_to_cpu(limits->min_level); + } + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int scmi_perf_limits_get(const struct scmi_protocol_handle *ph, + u32 domain, u32 *max_perf, u32 *min_perf) +{ + struct perf_dom_info *dom; + + dom = scmi_perf_domain_lookup(ph, domain); + if (IS_ERR(dom)) + return PTR_ERR(dom); + + if (dom->fc_info && dom->fc_info[PERF_FC_LIMIT].get_addr) { + struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LIMIT]; + + *max_perf = ioread32(fci->get_addr); + *min_perf = ioread32(fci->get_addr + 4); + trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LIMITS_GET, + domain, *min_perf, *max_perf); + return 0; + } + + return scmi_perf_mb_limits_get(ph, domain, max_perf, min_perf); +} + +static int scmi_perf_mb_level_set(const struct scmi_protocol_handle *ph, + u32 domain, u32 level, bool poll) +{ + int ret; + struct scmi_xfer *t; + struct scmi_perf_set_level *lvl; + + ret = ph->xops->xfer_get_init(ph, PERF_LEVEL_SET, sizeof(*lvl), 0, &t); + if (ret) + return ret; + + t->hdr.poll_completion = poll; + lvl = t->tx.buf; + lvl->domain = cpu_to_le32(domain); + lvl->level = cpu_to_le32(level); + + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int scmi_perf_level_set(const struct scmi_protocol_handle *ph, + u32 domain, u32 level, bool poll) +{ + struct perf_dom_info *dom; + + dom = scmi_perf_domain_lookup(ph, domain); + if (IS_ERR(dom)) + return PTR_ERR(dom); + + if (dom->fc_info && dom->fc_info[PERF_FC_LEVEL].set_addr) { + struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LEVEL]; + + trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LEVEL_SET, + domain, level, 0); + iowrite32(level, fci->set_addr); + ph->hops->fastchannel_db_ring(fci->set_db); + return 0; + } + + return scmi_perf_mb_level_set(ph, domain, level, poll); +} + +static int scmi_perf_mb_level_get(const struct scmi_protocol_handle *ph, + u32 domain, u32 *level, bool poll) +{ + int ret; + struct scmi_xfer *t; + + ret = ph->xops->xfer_get_init(ph, PERF_LEVEL_GET, + sizeof(u32), sizeof(u32), &t); + if (ret) + return ret; + + t->hdr.poll_completion = poll; + put_unaligned_le32(domain, t->tx.buf); + + ret = ph->xops->do_xfer(ph, t); + if (!ret) + *level = get_unaligned_le32(t->rx.buf); + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int scmi_perf_level_get(const struct scmi_protocol_handle *ph, + u32 domain, u32 *level, bool poll) +{ + struct perf_dom_info *dom; + + dom = scmi_perf_domain_lookup(ph, domain); + if (IS_ERR(dom)) + return PTR_ERR(dom); + + if (dom->fc_info && dom->fc_info[PERF_FC_LEVEL].get_addr) { + *level = ioread32(dom->fc_info[PERF_FC_LEVEL].get_addr); + trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LEVEL_GET, + domain, *level, 0); + return 0; + } + + return scmi_perf_mb_level_get(ph, domain, level, poll); +} + +static int scmi_perf_level_limits_notify(const struct scmi_protocol_handle *ph, + u32 domain, int message_id, + bool enable) +{ + int ret; + struct scmi_xfer *t; + struct scmi_perf_notify_level_or_limits *notify; + + ret = ph->xops->xfer_get_init(ph, message_id, sizeof(*notify), 0, &t); + if (ret) + return ret; + + notify = t->tx.buf; + notify->domain = cpu_to_le32(domain); + notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0; + + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + return ret; +} + +static void scmi_perf_domain_init_fc(const struct scmi_protocol_handle *ph, + u32 domain, struct scmi_fc_info **p_fc) +{ + struct scmi_fc_info *fc; + + fc = devm_kcalloc(ph->dev, PERF_FC_MAX, sizeof(*fc), GFP_KERNEL); + if (!fc) + return; + + ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL, + PERF_LEVEL_SET, 4, domain, + &fc[PERF_FC_LEVEL].set_addr, + &fc[PERF_FC_LEVEL].set_db); + + ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL, + PERF_LEVEL_GET, 4, domain, + &fc[PERF_FC_LEVEL].get_addr, NULL); + + ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL, + PERF_LIMITS_SET, 8, domain, + &fc[PERF_FC_LIMIT].set_addr, + &fc[PERF_FC_LIMIT].set_db); + + ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL, + PERF_LIMITS_GET, 8, domain, + &fc[PERF_FC_LIMIT].get_addr, NULL); + + *p_fc = fc; +} + +/* Device specific ops */ +static int scmi_dev_domain_id(struct device *dev) +{ + struct of_phandle_args clkspec; + + if (of_parse_phandle_with_args(dev->of_node, "clocks", "#clock-cells", + 0, &clkspec)) + return -EINVAL; + + return clkspec.args[0]; +} + +static int scmi_dvfs_device_opps_add(const struct scmi_protocol_handle *ph, + struct device *dev) +{ + int idx, ret, domain; + unsigned long freq; + struct scmi_opp *opp; + struct perf_dom_info *dom; + + domain = scmi_dev_domain_id(dev); + if (domain < 0) + return -EINVAL; + + dom = scmi_perf_domain_lookup(ph, domain); + if (IS_ERR(dom)) + return PTR_ERR(dom); + + for (opp = dom->opp, idx = 0; idx < dom->opp_count; idx++, opp++) { + freq = opp->perf * dom->mult_factor; + + ret = dev_pm_opp_add(dev, freq, 0); + if (ret) { + dev_warn(dev, "failed to add opp %luHz\n", freq); + + while (idx-- > 0) { + freq = (--opp)->perf * dom->mult_factor; + dev_pm_opp_remove(dev, freq); + } + return ret; + } + } + return 0; +} + +static int +scmi_dvfs_transition_latency_get(const struct scmi_protocol_handle *ph, + struct device *dev) +{ + int domain; + struct perf_dom_info *dom; + + domain = scmi_dev_domain_id(dev); + if (domain < 0) + return -EINVAL; + + dom = scmi_perf_domain_lookup(ph, domain); + if (IS_ERR(dom)) + return PTR_ERR(dom); + + /* uS to nS */ + return dom->opp[dom->opp_count - 1].trans_latency_us * 1000; +} + +static int scmi_dvfs_freq_set(const struct scmi_protocol_handle *ph, u32 domain, + unsigned long freq, bool poll) +{ + struct perf_dom_info *dom; + + dom = scmi_perf_domain_lookup(ph, domain); + if (IS_ERR(dom)) + return PTR_ERR(dom); + + return scmi_perf_level_set(ph, domain, freq / dom->mult_factor, poll); +} + +static int scmi_dvfs_freq_get(const struct scmi_protocol_handle *ph, u32 domain, + unsigned long *freq, bool poll) +{ + int ret; + u32 level; + struct scmi_perf_info *pi = ph->get_priv(ph); + + ret = scmi_perf_level_get(ph, domain, &level, poll); + if (!ret) { + struct perf_dom_info *dom = pi->dom_info + domain; + + /* Note domain is validated implicitly by scmi_perf_level_get */ + *freq = level * dom->mult_factor; + } + + return ret; +} + +static int scmi_dvfs_est_power_get(const struct scmi_protocol_handle *ph, + u32 domain, unsigned long *freq, + unsigned long *power) +{ + struct perf_dom_info *dom; + unsigned long opp_freq; + int idx, ret = -EINVAL; + struct scmi_opp *opp; + + dom = scmi_perf_domain_lookup(ph, domain); + if (IS_ERR(dom)) + return PTR_ERR(dom); + + for (opp = dom->opp, idx = 0; idx < dom->opp_count; idx++, opp++) { + opp_freq = opp->perf * dom->mult_factor; + if (opp_freq < *freq) + continue; + + *freq = opp_freq; + *power = opp->power; + ret = 0; + break; + } + + return ret; +} + +static bool scmi_fast_switch_possible(const struct scmi_protocol_handle *ph, + struct device *dev) +{ + int domain; + struct perf_dom_info *dom; + + domain = scmi_dev_domain_id(dev); + if (domain < 0) + return false; + + dom = scmi_perf_domain_lookup(ph, domain); + if (IS_ERR(dom)) + return false; + + return dom->fc_info && dom->fc_info[PERF_FC_LEVEL].set_addr; +} + +static enum scmi_power_scale +scmi_power_scale_get(const struct scmi_protocol_handle *ph) +{ + struct scmi_perf_info *pi = ph->get_priv(ph); + + return pi->power_scale; +} + +static const struct scmi_perf_proto_ops perf_proto_ops = { + .limits_set = scmi_perf_limits_set, + .limits_get = scmi_perf_limits_get, + .level_set = scmi_perf_level_set, + .level_get = scmi_perf_level_get, + .device_domain_id = scmi_dev_domain_id, + .transition_latency_get = scmi_dvfs_transition_latency_get, + .device_opps_add = scmi_dvfs_device_opps_add, + .freq_set = scmi_dvfs_freq_set, + .freq_get = scmi_dvfs_freq_get, + .est_power_get = scmi_dvfs_est_power_get, + .fast_switch_possible = scmi_fast_switch_possible, + .power_scale_get = scmi_power_scale_get, +}; + +static int scmi_perf_set_notify_enabled(const struct scmi_protocol_handle *ph, + u8 evt_id, u32 src_id, bool enable) +{ + int ret, cmd_id; + + if (evt_id >= ARRAY_SIZE(evt_2_cmd)) + return -EINVAL; + + cmd_id = evt_2_cmd[evt_id]; + ret = scmi_perf_level_limits_notify(ph, src_id, cmd_id, enable); + if (ret) + pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n", + evt_id, src_id, ret); + + return ret; +} + +static void *scmi_perf_fill_custom_report(const struct scmi_protocol_handle *ph, + u8 evt_id, ktime_t timestamp, + const void *payld, size_t payld_sz, + void *report, u32 *src_id) +{ + void *rep = NULL; + + switch (evt_id) { + case SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED: + { + const struct scmi_perf_limits_notify_payld *p = payld; + struct scmi_perf_limits_report *r = report; + + if (sizeof(*p) != payld_sz) + break; + + r->timestamp = timestamp; + r->agent_id = le32_to_cpu(p->agent_id); + r->domain_id = le32_to_cpu(p->domain_id); + r->range_max = le32_to_cpu(p->range_max); + r->range_min = le32_to_cpu(p->range_min); + *src_id = r->domain_id; + rep = r; + break; + } + case SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED: + { + const struct scmi_perf_level_notify_payld *p = payld; + struct scmi_perf_level_report *r = report; + + if (sizeof(*p) != payld_sz) + break; + + r->timestamp = timestamp; + r->agent_id = le32_to_cpu(p->agent_id); + r->domain_id = le32_to_cpu(p->domain_id); + r->performance_level = le32_to_cpu(p->performance_level); + *src_id = r->domain_id; + rep = r; + break; + } + default: + break; + } + + return rep; +} + +static int scmi_perf_get_num_sources(const struct scmi_protocol_handle *ph) +{ + struct scmi_perf_info *pi = ph->get_priv(ph); + + if (!pi) + return -EINVAL; + + return pi->num_domains; +} + +static const struct scmi_event perf_events[] = { + { + .id = SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED, + .max_payld_sz = sizeof(struct scmi_perf_limits_notify_payld), + .max_report_sz = sizeof(struct scmi_perf_limits_report), + }, + { + .id = SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED, + .max_payld_sz = sizeof(struct scmi_perf_level_notify_payld), + .max_report_sz = sizeof(struct scmi_perf_level_report), + }, +}; + +static const struct scmi_event_ops perf_event_ops = { + .get_num_sources = scmi_perf_get_num_sources, + .set_notify_enabled = scmi_perf_set_notify_enabled, + .fill_custom_report = scmi_perf_fill_custom_report, +}; + +static const struct scmi_protocol_events perf_protocol_events = { + .queue_sz = SCMI_PROTO_QUEUE_SZ, + .ops = &perf_event_ops, + .evts = perf_events, + .num_events = ARRAY_SIZE(perf_events), +}; + +static int scmi_perf_protocol_init(const struct scmi_protocol_handle *ph) +{ + int domain, ret; + u32 version; + struct scmi_perf_info *pinfo; + + ret = ph->xops->version_get(ph, &version); + if (ret) + return ret; + + dev_dbg(ph->dev, "Performance Version %d.%d\n", + PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); + + pinfo = devm_kzalloc(ph->dev, sizeof(*pinfo), GFP_KERNEL); + if (!pinfo) + return -ENOMEM; + + pinfo->version = version; + + ret = scmi_perf_attributes_get(ph, pinfo); + if (ret) + return ret; + + pinfo->dom_info = devm_kcalloc(ph->dev, pinfo->num_domains, + sizeof(*pinfo->dom_info), GFP_KERNEL); + if (!pinfo->dom_info) + return -ENOMEM; + + for (domain = 0; domain < pinfo->num_domains; domain++) { + struct perf_dom_info *dom = pinfo->dom_info + domain; + + scmi_perf_domain_attributes_get(ph, domain, dom, version); + scmi_perf_describe_levels_get(ph, domain, dom); + + if (dom->perf_fastchannels) + scmi_perf_domain_init_fc(ph, domain, &dom->fc_info); + } + + return ph->set_priv(ph, pinfo); +} + +static const struct scmi_protocol scmi_perf = { + .id = SCMI_PROTOCOL_PERF, + .owner = THIS_MODULE, + .instance_init = &scmi_perf_protocol_init, + .ops = &perf_proto_ops, + .events = &perf_protocol_events, +}; + +DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(perf, scmi_perf) diff --git a/drivers/firmware/arm_scmi/power.c b/drivers/firmware/arm_scmi/power.c new file mode 100644 index 000000000..356e83631 --- /dev/null +++ b/drivers/firmware/arm_scmi/power.c @@ -0,0 +1,342 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System Control and Management Interface (SCMI) Power Protocol + * + * Copyright (C) 2018-2022 ARM Ltd. + */ + +#define pr_fmt(fmt) "SCMI Notifications POWER - " fmt + +#include <linux/module.h> +#include <linux/scmi_protocol.h> + +#include "protocols.h" +#include "notify.h" + +enum scmi_power_protocol_cmd { + POWER_DOMAIN_ATTRIBUTES = 0x3, + POWER_STATE_SET = 0x4, + POWER_STATE_GET = 0x5, + POWER_STATE_NOTIFY = 0x6, + POWER_DOMAIN_NAME_GET = 0x8, +}; + +struct scmi_msg_resp_power_attributes { + __le16 num_domains; + __le16 reserved; + __le32 stats_addr_low; + __le32 stats_addr_high; + __le32 stats_size; +}; + +struct scmi_msg_resp_power_domain_attributes { + __le32 flags; +#define SUPPORTS_STATE_SET_NOTIFY(x) ((x) & BIT(31)) +#define SUPPORTS_STATE_SET_ASYNC(x) ((x) & BIT(30)) +#define SUPPORTS_STATE_SET_SYNC(x) ((x) & BIT(29)) +#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(27)) + u8 name[SCMI_SHORT_NAME_MAX_SIZE]; +}; + +struct scmi_power_set_state { + __le32 flags; +#define STATE_SET_ASYNC BIT(0) + __le32 domain; + __le32 state; +}; + +struct scmi_power_state_notify { + __le32 domain; + __le32 notify_enable; +}; + +struct scmi_power_state_notify_payld { + __le32 agent_id; + __le32 domain_id; + __le32 power_state; +}; + +struct power_dom_info { + bool state_set_sync; + bool state_set_async; + bool state_set_notify; + char name[SCMI_MAX_STR_SIZE]; +}; + +struct scmi_power_info { + u32 version; + int num_domains; + u64 stats_addr; + u32 stats_size; + struct power_dom_info *dom_info; +}; + +static int scmi_power_attributes_get(const struct scmi_protocol_handle *ph, + struct scmi_power_info *pi) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_resp_power_attributes *attr; + + ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, + 0, sizeof(*attr), &t); + if (ret) + return ret; + + attr = t->rx.buf; + + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + pi->num_domains = le16_to_cpu(attr->num_domains); + pi->stats_addr = le32_to_cpu(attr->stats_addr_low) | + (u64)le32_to_cpu(attr->stats_addr_high) << 32; + pi->stats_size = le32_to_cpu(attr->stats_size); + } + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int +scmi_power_domain_attributes_get(const struct scmi_protocol_handle *ph, + u32 domain, struct power_dom_info *dom_info, + u32 version) +{ + int ret; + u32 flags; + struct scmi_xfer *t; + struct scmi_msg_resp_power_domain_attributes *attr; + + ret = ph->xops->xfer_get_init(ph, POWER_DOMAIN_ATTRIBUTES, + sizeof(domain), sizeof(*attr), &t); + if (ret) + return ret; + + put_unaligned_le32(domain, t->tx.buf); + attr = t->rx.buf; + + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + flags = le32_to_cpu(attr->flags); + + dom_info->state_set_notify = SUPPORTS_STATE_SET_NOTIFY(flags); + dom_info->state_set_async = SUPPORTS_STATE_SET_ASYNC(flags); + dom_info->state_set_sync = SUPPORTS_STATE_SET_SYNC(flags); + strscpy(dom_info->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE); + } + ph->xops->xfer_put(ph, t); + + /* + * If supported overwrite short name with the extended one; + * on error just carry on and use already provided short name. + */ + if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x3 && + SUPPORTS_EXTENDED_NAMES(flags)) { + ph->hops->extended_name_get(ph, POWER_DOMAIN_NAME_GET, + domain, dom_info->name, + SCMI_MAX_STR_SIZE); + } + + return ret; +} + +static int scmi_power_state_set(const struct scmi_protocol_handle *ph, + u32 domain, u32 state) +{ + int ret; + struct scmi_xfer *t; + struct scmi_power_set_state *st; + + ret = ph->xops->xfer_get_init(ph, POWER_STATE_SET, sizeof(*st), 0, &t); + if (ret) + return ret; + + st = t->tx.buf; + st->flags = cpu_to_le32(0); + st->domain = cpu_to_le32(domain); + st->state = cpu_to_le32(state); + + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int scmi_power_state_get(const struct scmi_protocol_handle *ph, + u32 domain, u32 *state) +{ + int ret; + struct scmi_xfer *t; + + ret = ph->xops->xfer_get_init(ph, POWER_STATE_GET, sizeof(u32), sizeof(u32), &t); + if (ret) + return ret; + + put_unaligned_le32(domain, t->tx.buf); + + ret = ph->xops->do_xfer(ph, t); + if (!ret) + *state = get_unaligned_le32(t->rx.buf); + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int scmi_power_num_domains_get(const struct scmi_protocol_handle *ph) +{ + struct scmi_power_info *pi = ph->get_priv(ph); + + return pi->num_domains; +} + +static const char * +scmi_power_name_get(const struct scmi_protocol_handle *ph, + u32 domain) +{ + struct scmi_power_info *pi = ph->get_priv(ph); + struct power_dom_info *dom = pi->dom_info + domain; + + return dom->name; +} + +static const struct scmi_power_proto_ops power_proto_ops = { + .num_domains_get = scmi_power_num_domains_get, + .name_get = scmi_power_name_get, + .state_set = scmi_power_state_set, + .state_get = scmi_power_state_get, +}; + +static int scmi_power_request_notify(const struct scmi_protocol_handle *ph, + u32 domain, bool enable) +{ + int ret; + struct scmi_xfer *t; + struct scmi_power_state_notify *notify; + + ret = ph->xops->xfer_get_init(ph, POWER_STATE_NOTIFY, + sizeof(*notify), 0, &t); + if (ret) + return ret; + + notify = t->tx.buf; + notify->domain = cpu_to_le32(domain); + notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0; + + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int scmi_power_set_notify_enabled(const struct scmi_protocol_handle *ph, + u8 evt_id, u32 src_id, bool enable) +{ + int ret; + + ret = scmi_power_request_notify(ph, src_id, enable); + if (ret) + pr_debug("FAIL_ENABLE - evt[%X] dom[%d] - ret:%d\n", + evt_id, src_id, ret); + + return ret; +} + +static void * +scmi_power_fill_custom_report(const struct scmi_protocol_handle *ph, + u8 evt_id, ktime_t timestamp, + const void *payld, size_t payld_sz, + void *report, u32 *src_id) +{ + const struct scmi_power_state_notify_payld *p = payld; + struct scmi_power_state_changed_report *r = report; + + if (evt_id != SCMI_EVENT_POWER_STATE_CHANGED || sizeof(*p) != payld_sz) + return NULL; + + r->timestamp = timestamp; + r->agent_id = le32_to_cpu(p->agent_id); + r->domain_id = le32_to_cpu(p->domain_id); + r->power_state = le32_to_cpu(p->power_state); + *src_id = r->domain_id; + + return r; +} + +static int scmi_power_get_num_sources(const struct scmi_protocol_handle *ph) +{ + struct scmi_power_info *pinfo = ph->get_priv(ph); + + if (!pinfo) + return -EINVAL; + + return pinfo->num_domains; +} + +static const struct scmi_event power_events[] = { + { + .id = SCMI_EVENT_POWER_STATE_CHANGED, + .max_payld_sz = sizeof(struct scmi_power_state_notify_payld), + .max_report_sz = + sizeof(struct scmi_power_state_changed_report), + }, +}; + +static const struct scmi_event_ops power_event_ops = { + .get_num_sources = scmi_power_get_num_sources, + .set_notify_enabled = scmi_power_set_notify_enabled, + .fill_custom_report = scmi_power_fill_custom_report, +}; + +static const struct scmi_protocol_events power_protocol_events = { + .queue_sz = SCMI_PROTO_QUEUE_SZ, + .ops = &power_event_ops, + .evts = power_events, + .num_events = ARRAY_SIZE(power_events), +}; + +static int scmi_power_protocol_init(const struct scmi_protocol_handle *ph) +{ + int domain, ret; + u32 version; + struct scmi_power_info *pinfo; + + ret = ph->xops->version_get(ph, &version); + if (ret) + return ret; + + dev_dbg(ph->dev, "Power Version %d.%d\n", + PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); + + pinfo = devm_kzalloc(ph->dev, sizeof(*pinfo), GFP_KERNEL); + if (!pinfo) + return -ENOMEM; + + ret = scmi_power_attributes_get(ph, pinfo); + if (ret) + return ret; + + pinfo->dom_info = devm_kcalloc(ph->dev, pinfo->num_domains, + sizeof(*pinfo->dom_info), GFP_KERNEL); + if (!pinfo->dom_info) + return -ENOMEM; + + for (domain = 0; domain < pinfo->num_domains; domain++) { + struct power_dom_info *dom = pinfo->dom_info + domain; + + scmi_power_domain_attributes_get(ph, domain, dom, version); + } + + pinfo->version = version; + + return ph->set_priv(ph, pinfo); +} + +static const struct scmi_protocol scmi_power = { + .id = SCMI_PROTOCOL_POWER, + .owner = THIS_MODULE, + .instance_init = &scmi_power_protocol_init, + .ops = &power_proto_ops, + .events = &power_protocol_events, +}; + +DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(power, scmi_power) diff --git a/drivers/firmware/arm_scmi/powercap.c b/drivers/firmware/arm_scmi/powercap.c new file mode 100644 index 000000000..83b90bde7 --- /dev/null +++ b/drivers/firmware/arm_scmi/powercap.c @@ -0,0 +1,866 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System Control and Management Interface (SCMI) Powercap Protocol + * + * Copyright (C) 2022 ARM Ltd. + */ + +#define pr_fmt(fmt) "SCMI Notifications POWERCAP - " fmt + +#include <linux/bitfield.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/scmi_protocol.h> + +#include <trace/events/scmi.h> + +#include "protocols.h" +#include "notify.h" + +enum scmi_powercap_protocol_cmd { + POWERCAP_DOMAIN_ATTRIBUTES = 0x3, + POWERCAP_CAP_GET = 0x4, + POWERCAP_CAP_SET = 0x5, + POWERCAP_PAI_GET = 0x6, + POWERCAP_PAI_SET = 0x7, + POWERCAP_DOMAIN_NAME_GET = 0x8, + POWERCAP_MEASUREMENTS_GET = 0x9, + POWERCAP_CAP_NOTIFY = 0xa, + POWERCAP_MEASUREMENTS_NOTIFY = 0xb, + POWERCAP_DESCRIBE_FASTCHANNEL = 0xc, +}; + +enum { + POWERCAP_FC_CAP, + POWERCAP_FC_PAI, + POWERCAP_FC_MAX, +}; + +struct scmi_msg_resp_powercap_domain_attributes { + __le32 attributes; +#define SUPPORTS_POWERCAP_CAP_CHANGE_NOTIFY(x) ((x) & BIT(31)) +#define SUPPORTS_POWERCAP_MEASUREMENTS_CHANGE_NOTIFY(x) ((x) & BIT(30)) +#define SUPPORTS_ASYNC_POWERCAP_CAP_SET(x) ((x) & BIT(29)) +#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(28)) +#define SUPPORTS_POWERCAP_CAP_CONFIGURATION(x) ((x) & BIT(27)) +#define SUPPORTS_POWERCAP_MONITORING(x) ((x) & BIT(26)) +#define SUPPORTS_POWERCAP_PAI_CONFIGURATION(x) ((x) & BIT(25)) +#define SUPPORTS_POWERCAP_FASTCHANNELS(x) ((x) & BIT(22)) +#define POWERCAP_POWER_UNIT(x) \ + (FIELD_GET(GENMASK(24, 23), (x))) +#define SUPPORTS_POWER_UNITS_MW(x) \ + (POWERCAP_POWER_UNIT(x) == 0x2) +#define SUPPORTS_POWER_UNITS_UW(x) \ + (POWERCAP_POWER_UNIT(x) == 0x1) + u8 name[SCMI_SHORT_NAME_MAX_SIZE]; + __le32 min_pai; + __le32 max_pai; + __le32 pai_step; + __le32 min_power_cap; + __le32 max_power_cap; + __le32 power_cap_step; + __le32 sustainable_power; + __le32 accuracy; + __le32 parent_id; +}; + +struct scmi_msg_powercap_set_cap_or_pai { + __le32 domain; + __le32 flags; +#define CAP_SET_ASYNC BIT(1) +#define CAP_SET_IGNORE_DRESP BIT(0) + __le32 value; +}; + +struct scmi_msg_resp_powercap_cap_set_complete { + __le32 domain; + __le32 power_cap; +}; + +struct scmi_msg_resp_powercap_meas_get { + __le32 power; + __le32 pai; +}; + +struct scmi_msg_powercap_notify_cap { + __le32 domain; + __le32 notify_enable; +}; + +struct scmi_msg_powercap_notify_thresh { + __le32 domain; + __le32 notify_enable; + __le32 power_thresh_low; + __le32 power_thresh_high; +}; + +struct scmi_powercap_cap_changed_notify_payld { + __le32 agent_id; + __le32 domain_id; + __le32 power_cap; + __le32 pai; +}; + +struct scmi_powercap_meas_changed_notify_payld { + __le32 agent_id; + __le32 domain_id; + __le32 power; +}; + +struct scmi_powercap_state { + bool meas_notif_enabled; + u64 thresholds; +#define THRESH_LOW(p, id) \ + (lower_32_bits((p)->states[(id)].thresholds)) +#define THRESH_HIGH(p, id) \ + (upper_32_bits((p)->states[(id)].thresholds)) +}; + +struct powercap_info { + u32 version; + int num_domains; + struct scmi_powercap_state *states; + struct scmi_powercap_info *powercaps; +}; + +static enum scmi_powercap_protocol_cmd evt_2_cmd[] = { + POWERCAP_CAP_NOTIFY, + POWERCAP_MEASUREMENTS_NOTIFY, +}; + +static int scmi_powercap_notify(const struct scmi_protocol_handle *ph, + u32 domain, int message_id, bool enable); + +static int +scmi_powercap_attributes_get(const struct scmi_protocol_handle *ph, + struct powercap_info *pi) +{ + int ret; + struct scmi_xfer *t; + + ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0, + sizeof(u32), &t); + if (ret) + return ret; + + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + u32 attributes; + + attributes = get_unaligned_le32(t->rx.buf); + pi->num_domains = FIELD_GET(GENMASK(15, 0), attributes); + } + + ph->xops->xfer_put(ph, t); + return ret; +} + +static inline int +scmi_powercap_validate(unsigned int min_val, unsigned int max_val, + unsigned int step_val, bool configurable) +{ + if (!min_val || !max_val) + return -EPROTO; + + if ((configurable && min_val == max_val) || + (!configurable && min_val != max_val)) + return -EPROTO; + + if (min_val != max_val && !step_val) + return -EPROTO; + + return 0; +} + +static int +scmi_powercap_domain_attributes_get(const struct scmi_protocol_handle *ph, + struct powercap_info *pinfo, u32 domain) +{ + int ret; + u32 flags; + struct scmi_xfer *t; + struct scmi_powercap_info *dom_info = pinfo->powercaps + domain; + struct scmi_msg_resp_powercap_domain_attributes *resp; + + ret = ph->xops->xfer_get_init(ph, POWERCAP_DOMAIN_ATTRIBUTES, + sizeof(domain), sizeof(*resp), &t); + if (ret) + return ret; + + put_unaligned_le32(domain, t->tx.buf); + resp = t->rx.buf; + + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + flags = le32_to_cpu(resp->attributes); + + dom_info->id = domain; + dom_info->notify_powercap_cap_change = + SUPPORTS_POWERCAP_CAP_CHANGE_NOTIFY(flags); + dom_info->notify_powercap_measurement_change = + SUPPORTS_POWERCAP_MEASUREMENTS_CHANGE_NOTIFY(flags); + dom_info->async_powercap_cap_set = + SUPPORTS_ASYNC_POWERCAP_CAP_SET(flags); + dom_info->powercap_cap_config = + SUPPORTS_POWERCAP_CAP_CONFIGURATION(flags); + dom_info->powercap_monitoring = + SUPPORTS_POWERCAP_MONITORING(flags); + dom_info->powercap_pai_config = + SUPPORTS_POWERCAP_PAI_CONFIGURATION(flags); + dom_info->powercap_scale_mw = + SUPPORTS_POWER_UNITS_MW(flags); + dom_info->powercap_scale_uw = + SUPPORTS_POWER_UNITS_UW(flags); + dom_info->fastchannels = + SUPPORTS_POWERCAP_FASTCHANNELS(flags); + + strscpy(dom_info->name, resp->name, SCMI_SHORT_NAME_MAX_SIZE); + + dom_info->min_pai = le32_to_cpu(resp->min_pai); + dom_info->max_pai = le32_to_cpu(resp->max_pai); + dom_info->pai_step = le32_to_cpu(resp->pai_step); + ret = scmi_powercap_validate(dom_info->min_pai, + dom_info->max_pai, + dom_info->pai_step, + dom_info->powercap_pai_config); + if (ret) { + dev_err(ph->dev, + "Platform reported inconsistent PAI config for domain %d - %s\n", + dom_info->id, dom_info->name); + goto clean; + } + + dom_info->min_power_cap = le32_to_cpu(resp->min_power_cap); + dom_info->max_power_cap = le32_to_cpu(resp->max_power_cap); + dom_info->power_cap_step = le32_to_cpu(resp->power_cap_step); + ret = scmi_powercap_validate(dom_info->min_power_cap, + dom_info->max_power_cap, + dom_info->power_cap_step, + dom_info->powercap_cap_config); + if (ret) { + dev_err(ph->dev, + "Platform reported inconsistent CAP config for domain %d - %s\n", + dom_info->id, dom_info->name); + goto clean; + } + + dom_info->sustainable_power = + le32_to_cpu(resp->sustainable_power); + dom_info->accuracy = le32_to_cpu(resp->accuracy); + + dom_info->parent_id = le32_to_cpu(resp->parent_id); + if (dom_info->parent_id != SCMI_POWERCAP_ROOT_ZONE_ID && + (dom_info->parent_id >= pinfo->num_domains || + dom_info->parent_id == dom_info->id)) { + dev_err(ph->dev, + "Platform reported inconsistent parent ID for domain %d - %s\n", + dom_info->id, dom_info->name); + ret = -ENODEV; + } + } + +clean: + ph->xops->xfer_put(ph, t); + + /* + * If supported overwrite short name with the extended one; + * on error just carry on and use already provided short name. + */ + if (!ret && SUPPORTS_EXTENDED_NAMES(flags)) + ph->hops->extended_name_get(ph, POWERCAP_DOMAIN_NAME_GET, + domain, dom_info->name, + SCMI_MAX_STR_SIZE); + + return ret; +} + +static int scmi_powercap_num_domains_get(const struct scmi_protocol_handle *ph) +{ + struct powercap_info *pi = ph->get_priv(ph); + + return pi->num_domains; +} + +static const struct scmi_powercap_info * +scmi_powercap_dom_info_get(const struct scmi_protocol_handle *ph, u32 domain_id) +{ + struct powercap_info *pi = ph->get_priv(ph); + + if (domain_id >= pi->num_domains) + return NULL; + + return pi->powercaps + domain_id; +} + +static int scmi_powercap_xfer_cap_get(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 *power_cap) +{ + int ret; + struct scmi_xfer *t; + + ret = ph->xops->xfer_get_init(ph, POWERCAP_CAP_GET, sizeof(u32), + sizeof(u32), &t); + if (ret) + return ret; + + put_unaligned_le32(domain_id, t->tx.buf); + ret = ph->xops->do_xfer(ph, t); + if (!ret) + *power_cap = get_unaligned_le32(t->rx.buf); + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int scmi_powercap_cap_get(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 *power_cap) +{ + struct scmi_powercap_info *dom; + struct powercap_info *pi = ph->get_priv(ph); + + if (!power_cap || domain_id >= pi->num_domains) + return -EINVAL; + + dom = pi->powercaps + domain_id; + if (dom->fc_info && dom->fc_info[POWERCAP_FC_CAP].get_addr) { + *power_cap = ioread32(dom->fc_info[POWERCAP_FC_CAP].get_addr); + trace_scmi_fc_call(SCMI_PROTOCOL_POWERCAP, POWERCAP_CAP_GET, + domain_id, *power_cap, 0); + return 0; + } + + return scmi_powercap_xfer_cap_get(ph, domain_id, power_cap); +} + +static int scmi_powercap_xfer_cap_set(const struct scmi_protocol_handle *ph, + const struct scmi_powercap_info *pc, + u32 power_cap, bool ignore_dresp) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_powercap_set_cap_or_pai *msg; + + ret = ph->xops->xfer_get_init(ph, POWERCAP_CAP_SET, + sizeof(*msg), 0, &t); + if (ret) + return ret; + + msg = t->tx.buf; + msg->domain = cpu_to_le32(pc->id); + msg->flags = + cpu_to_le32(FIELD_PREP(CAP_SET_ASYNC, !!pc->async_powercap_cap_set) | + FIELD_PREP(CAP_SET_IGNORE_DRESP, !!ignore_dresp)); + msg->value = cpu_to_le32(power_cap); + + if (!pc->async_powercap_cap_set || ignore_dresp) { + ret = ph->xops->do_xfer(ph, t); + } else { + ret = ph->xops->do_xfer_with_response(ph, t); + if (!ret) { + struct scmi_msg_resp_powercap_cap_set_complete *resp; + + resp = t->rx.buf; + if (le32_to_cpu(resp->domain) == pc->id) + dev_dbg(ph->dev, + "Powercap ID %d CAP set async to %u\n", + pc->id, + get_unaligned_le32(&resp->power_cap)); + else + ret = -EPROTO; + } + } + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int scmi_powercap_cap_set(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 power_cap, + bool ignore_dresp) +{ + const struct scmi_powercap_info *pc; + + pc = scmi_powercap_dom_info_get(ph, domain_id); + if (!pc || !pc->powercap_cap_config || !power_cap || + power_cap < pc->min_power_cap || + power_cap > pc->max_power_cap) + return -EINVAL; + + if (pc->fc_info && pc->fc_info[POWERCAP_FC_CAP].set_addr) { + struct scmi_fc_info *fci = &pc->fc_info[POWERCAP_FC_CAP]; + + iowrite32(power_cap, fci->set_addr); + ph->hops->fastchannel_db_ring(fci->set_db); + trace_scmi_fc_call(SCMI_PROTOCOL_POWERCAP, POWERCAP_CAP_SET, + domain_id, power_cap, 0); + return 0; + } + + return scmi_powercap_xfer_cap_set(ph, pc, power_cap, ignore_dresp); +} + +static int scmi_powercap_xfer_pai_get(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 *pai) +{ + int ret; + struct scmi_xfer *t; + + ret = ph->xops->xfer_get_init(ph, POWERCAP_PAI_GET, sizeof(u32), + sizeof(u32), &t); + if (ret) + return ret; + + put_unaligned_le32(domain_id, t->tx.buf); + ret = ph->xops->do_xfer(ph, t); + if (!ret) + *pai = get_unaligned_le32(t->rx.buf); + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int scmi_powercap_pai_get(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 *pai) +{ + struct scmi_powercap_info *dom; + struct powercap_info *pi = ph->get_priv(ph); + + if (!pai || domain_id >= pi->num_domains) + return -EINVAL; + + dom = pi->powercaps + domain_id; + if (dom->fc_info && dom->fc_info[POWERCAP_FC_PAI].get_addr) { + *pai = ioread32(dom->fc_info[POWERCAP_FC_PAI].get_addr); + trace_scmi_fc_call(SCMI_PROTOCOL_POWERCAP, POWERCAP_PAI_GET, + domain_id, *pai, 0); + return 0; + } + + return scmi_powercap_xfer_pai_get(ph, domain_id, pai); +} + +static int scmi_powercap_xfer_pai_set(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 pai) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_powercap_set_cap_or_pai *msg; + + ret = ph->xops->xfer_get_init(ph, POWERCAP_PAI_SET, + sizeof(*msg), 0, &t); + if (ret) + return ret; + + msg = t->tx.buf; + msg->domain = cpu_to_le32(domain_id); + msg->flags = cpu_to_le32(0); + msg->value = cpu_to_le32(pai); + + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int scmi_powercap_pai_set(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 pai) +{ + const struct scmi_powercap_info *pc; + + pc = scmi_powercap_dom_info_get(ph, domain_id); + if (!pc || !pc->powercap_pai_config || !pai || + pai < pc->min_pai || pai > pc->max_pai) + return -EINVAL; + + if (pc->fc_info && pc->fc_info[POWERCAP_FC_PAI].set_addr) { + struct scmi_fc_info *fci = &pc->fc_info[POWERCAP_FC_PAI]; + + trace_scmi_fc_call(SCMI_PROTOCOL_POWERCAP, POWERCAP_PAI_SET, + domain_id, pai, 0); + iowrite32(pai, fci->set_addr); + ph->hops->fastchannel_db_ring(fci->set_db); + return 0; + } + + return scmi_powercap_xfer_pai_set(ph, domain_id, pai); +} + +static int scmi_powercap_measurements_get(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 *average_power, + u32 *pai) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_resp_powercap_meas_get *resp; + const struct scmi_powercap_info *pc; + + pc = scmi_powercap_dom_info_get(ph, domain_id); + if (!pc || !pc->powercap_monitoring || !pai || !average_power) + return -EINVAL; + + ret = ph->xops->xfer_get_init(ph, POWERCAP_MEASUREMENTS_GET, + sizeof(u32), sizeof(*resp), &t); + if (ret) + return ret; + + resp = t->rx.buf; + put_unaligned_le32(domain_id, t->tx.buf); + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + *average_power = le32_to_cpu(resp->power); + *pai = le32_to_cpu(resp->pai); + } + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int +scmi_powercap_measurements_threshold_get(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 *power_thresh_low, + u32 *power_thresh_high) +{ + struct powercap_info *pi = ph->get_priv(ph); + + if (!power_thresh_low || !power_thresh_high || + domain_id >= pi->num_domains) + return -EINVAL; + + *power_thresh_low = THRESH_LOW(pi, domain_id); + *power_thresh_high = THRESH_HIGH(pi, domain_id); + + return 0; +} + +static int +scmi_powercap_measurements_threshold_set(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 power_thresh_low, + u32 power_thresh_high) +{ + int ret = 0; + struct powercap_info *pi = ph->get_priv(ph); + + if (domain_id >= pi->num_domains || + power_thresh_low > power_thresh_high) + return -EINVAL; + + /* Anything to do ? */ + if (THRESH_LOW(pi, domain_id) == power_thresh_low && + THRESH_HIGH(pi, domain_id) == power_thresh_high) + return ret; + + pi->states[domain_id].thresholds = + (FIELD_PREP(GENMASK_ULL(31, 0), power_thresh_low) | + FIELD_PREP(GENMASK_ULL(63, 32), power_thresh_high)); + + /* Update thresholds if notification already enabled */ + if (pi->states[domain_id].meas_notif_enabled) + ret = scmi_powercap_notify(ph, domain_id, + POWERCAP_MEASUREMENTS_NOTIFY, + true); + + return ret; +} + +static const struct scmi_powercap_proto_ops powercap_proto_ops = { + .num_domains_get = scmi_powercap_num_domains_get, + .info_get = scmi_powercap_dom_info_get, + .cap_get = scmi_powercap_cap_get, + .cap_set = scmi_powercap_cap_set, + .pai_get = scmi_powercap_pai_get, + .pai_set = scmi_powercap_pai_set, + .measurements_get = scmi_powercap_measurements_get, + .measurements_threshold_set = scmi_powercap_measurements_threshold_set, + .measurements_threshold_get = scmi_powercap_measurements_threshold_get, +}; + +static void scmi_powercap_domain_init_fc(const struct scmi_protocol_handle *ph, + u32 domain, struct scmi_fc_info **p_fc) +{ + struct scmi_fc_info *fc; + + fc = devm_kcalloc(ph->dev, POWERCAP_FC_MAX, sizeof(*fc), GFP_KERNEL); + if (!fc) + return; + + ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL, + POWERCAP_CAP_SET, 4, domain, + &fc[POWERCAP_FC_CAP].set_addr, + &fc[POWERCAP_FC_CAP].set_db); + + ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL, + POWERCAP_CAP_GET, 4, domain, + &fc[POWERCAP_FC_CAP].get_addr, NULL); + + ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL, + POWERCAP_PAI_SET, 4, domain, + &fc[POWERCAP_FC_PAI].set_addr, + &fc[POWERCAP_FC_PAI].set_db); + + ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL, + POWERCAP_PAI_GET, 4, domain, + &fc[POWERCAP_FC_PAI].get_addr, NULL); + + *p_fc = fc; +} + +static int scmi_powercap_notify(const struct scmi_protocol_handle *ph, + u32 domain, int message_id, bool enable) +{ + int ret; + struct scmi_xfer *t; + + switch (message_id) { + case POWERCAP_CAP_NOTIFY: + { + struct scmi_msg_powercap_notify_cap *notify; + + ret = ph->xops->xfer_get_init(ph, message_id, + sizeof(*notify), 0, &t); + if (ret) + return ret; + + notify = t->tx.buf; + notify->domain = cpu_to_le32(domain); + notify->notify_enable = cpu_to_le32(enable ? BIT(0) : 0); + break; + } + case POWERCAP_MEASUREMENTS_NOTIFY: + { + u32 low, high; + struct scmi_msg_powercap_notify_thresh *notify; + + /* + * Note that we have to pick the most recently configured + * thresholds to build a proper POWERCAP_MEASUREMENTS_NOTIFY + * enable request and we fail, complaining, if no thresholds + * were ever set, since this is an indication the API has been + * used wrongly. + */ + ret = scmi_powercap_measurements_threshold_get(ph, domain, + &low, &high); + if (ret) + return ret; + + if (enable && !low && !high) { + dev_err(ph->dev, + "Invalid Measurements Notify thresholds: %u/%u\n", + low, high); + return -EINVAL; + } + + ret = ph->xops->xfer_get_init(ph, message_id, + sizeof(*notify), 0, &t); + if (ret) + return ret; + + notify = t->tx.buf; + notify->domain = cpu_to_le32(domain); + notify->notify_enable = cpu_to_le32(enable ? BIT(0) : 0); + notify->power_thresh_low = cpu_to_le32(low); + notify->power_thresh_high = cpu_to_le32(high); + break; + } + default: + return -EINVAL; + } + + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int +scmi_powercap_set_notify_enabled(const struct scmi_protocol_handle *ph, + u8 evt_id, u32 src_id, bool enable) +{ + int ret, cmd_id; + struct powercap_info *pi = ph->get_priv(ph); + + if (evt_id >= ARRAY_SIZE(evt_2_cmd) || src_id >= pi->num_domains) + return -EINVAL; + + cmd_id = evt_2_cmd[evt_id]; + ret = scmi_powercap_notify(ph, src_id, cmd_id, enable); + if (ret) + pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n", + evt_id, src_id, ret); + else if (cmd_id == POWERCAP_MEASUREMENTS_NOTIFY) + /* + * On success save the current notification enabled state, so + * as to be able to properly update the notification thresholds + * when they are modified on a domain for which measurement + * notifications were currently enabled. + * + * This is needed because the SCMI Notification core machinery + * and API does not support passing per-notification custom + * arguments at callback registration time. + * + * Note that this can be done here with a simple flag since the + * SCMI core Notifications code takes care of keeping proper + * per-domain enables refcounting, so that this helper function + * will be called only once (for enables) when the first user + * registers a callback on this domain and once more (disable) + * when the last user de-registers its callback. + */ + pi->states[src_id].meas_notif_enabled = enable; + + return ret; +} + +static void * +scmi_powercap_fill_custom_report(const struct scmi_protocol_handle *ph, + u8 evt_id, ktime_t timestamp, + const void *payld, size_t payld_sz, + void *report, u32 *src_id) +{ + void *rep = NULL; + + switch (evt_id) { + case SCMI_EVENT_POWERCAP_CAP_CHANGED: + { + const struct scmi_powercap_cap_changed_notify_payld *p = payld; + struct scmi_powercap_cap_changed_report *r = report; + + if (sizeof(*p) != payld_sz) + break; + + r->timestamp = timestamp; + r->agent_id = le32_to_cpu(p->agent_id); + r->domain_id = le32_to_cpu(p->domain_id); + r->power_cap = le32_to_cpu(p->power_cap); + r->pai = le32_to_cpu(p->pai); + *src_id = r->domain_id; + rep = r; + break; + } + case SCMI_EVENT_POWERCAP_MEASUREMENTS_CHANGED: + { + const struct scmi_powercap_meas_changed_notify_payld *p = payld; + struct scmi_powercap_meas_changed_report *r = report; + + if (sizeof(*p) != payld_sz) + break; + + r->timestamp = timestamp; + r->agent_id = le32_to_cpu(p->agent_id); + r->domain_id = le32_to_cpu(p->domain_id); + r->power = le32_to_cpu(p->power); + *src_id = r->domain_id; + rep = r; + break; + } + default: + break; + } + + return rep; +} + +static int +scmi_powercap_get_num_sources(const struct scmi_protocol_handle *ph) +{ + struct powercap_info *pi = ph->get_priv(ph); + + if (!pi) + return -EINVAL; + + return pi->num_domains; +} + +static const struct scmi_event powercap_events[] = { + { + .id = SCMI_EVENT_POWERCAP_CAP_CHANGED, + .max_payld_sz = + sizeof(struct scmi_powercap_cap_changed_notify_payld), + .max_report_sz = + sizeof(struct scmi_powercap_cap_changed_report), + }, + { + .id = SCMI_EVENT_POWERCAP_MEASUREMENTS_CHANGED, + .max_payld_sz = + sizeof(struct scmi_powercap_meas_changed_notify_payld), + .max_report_sz = + sizeof(struct scmi_powercap_meas_changed_report), + }, +}; + +static const struct scmi_event_ops powercap_event_ops = { + .get_num_sources = scmi_powercap_get_num_sources, + .set_notify_enabled = scmi_powercap_set_notify_enabled, + .fill_custom_report = scmi_powercap_fill_custom_report, +}; + +static const struct scmi_protocol_events powercap_protocol_events = { + .queue_sz = SCMI_PROTO_QUEUE_SZ, + .ops = &powercap_event_ops, + .evts = powercap_events, + .num_events = ARRAY_SIZE(powercap_events), +}; + +static int +scmi_powercap_protocol_init(const struct scmi_protocol_handle *ph) +{ + int domain, ret; + u32 version; + struct powercap_info *pinfo; + + ret = ph->xops->version_get(ph, &version); + if (ret) + return ret; + + dev_dbg(ph->dev, "Powercap Version %d.%d\n", + PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); + + pinfo = devm_kzalloc(ph->dev, sizeof(*pinfo), GFP_KERNEL); + if (!pinfo) + return -ENOMEM; + + ret = scmi_powercap_attributes_get(ph, pinfo); + if (ret) + return ret; + + pinfo->powercaps = devm_kcalloc(ph->dev, pinfo->num_domains, + sizeof(*pinfo->powercaps), + GFP_KERNEL); + if (!pinfo->powercaps) + return -ENOMEM; + + /* + * Note that any failure in retrieving any domain attribute leads to + * the whole Powercap protocol initialization failure: this way the + * reported Powercap domains are all assured, when accessed, to be well + * formed and correlated by sane parent-child relationship (if any). + */ + for (domain = 0; domain < pinfo->num_domains; domain++) { + ret = scmi_powercap_domain_attributes_get(ph, pinfo, domain); + if (ret) + return ret; + + if (pinfo->powercaps[domain].fastchannels) + scmi_powercap_domain_init_fc(ph, domain, + &pinfo->powercaps[domain].fc_info); + } + + pinfo->states = devm_kcalloc(ph->dev, pinfo->num_domains, + sizeof(*pinfo->states), GFP_KERNEL); + if (!pinfo->states) + return -ENOMEM; + + pinfo->version = version; + + return ph->set_priv(ph, pinfo); +} + +static const struct scmi_protocol scmi_powercap = { + .id = SCMI_PROTOCOL_POWERCAP, + .owner = THIS_MODULE, + .instance_init = &scmi_powercap_protocol_init, + .ops = &powercap_proto_ops, + .events = &powercap_protocol_events, +}; + +DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(powercap, scmi_powercap) diff --git a/drivers/firmware/arm_scmi/protocols.h b/drivers/firmware/arm_scmi/protocols.h new file mode 100644 index 000000000..2f3bf691d --- /dev/null +++ b/drivers/firmware/arm_scmi/protocols.h @@ -0,0 +1,342 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * System Control and Management Interface (SCMI) Message Protocol + * protocols common header file containing some definitions, structures + * and function prototypes used in all the different SCMI protocols. + * + * Copyright (C) 2022 ARM Ltd. + */ +#ifndef _SCMI_PROTOCOLS_H +#define _SCMI_PROTOCOLS_H + +#include <linux/bitfield.h> +#include <linux/completion.h> +#include <linux/device.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/hashtable.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/refcount.h> +#include <linux/scmi_protocol.h> +#include <linux/spinlock.h> +#include <linux/types.h> + +#include <asm/unaligned.h> + +#define PROTOCOL_REV_MINOR_MASK GENMASK(15, 0) +#define PROTOCOL_REV_MAJOR_MASK GENMASK(31, 16) +#define PROTOCOL_REV_MAJOR(x) ((u16)(FIELD_GET(PROTOCOL_REV_MAJOR_MASK, (x)))) +#define PROTOCOL_REV_MINOR(x) ((u16)(FIELD_GET(PROTOCOL_REV_MINOR_MASK, (x)))) + +enum scmi_common_cmd { + PROTOCOL_VERSION = 0x0, + PROTOCOL_ATTRIBUTES = 0x1, + PROTOCOL_MESSAGE_ATTRIBUTES = 0x2, +}; + +/** + * struct scmi_msg_resp_prot_version - Response for a message + * + * @minor_version: Minor version of the ABI that firmware supports + * @major_version: Major version of the ABI that firmware supports + * + * In general, ABI version changes follow the rule that minor version increments + * are backward compatible. Major revision changes in ABI may not be + * backward compatible. + * + * Response to a generic message with message type SCMI_MSG_VERSION + */ +struct scmi_msg_resp_prot_version { + __le16 minor_version; + __le16 major_version; +}; + +/** + * struct scmi_msg - Message(Tx/Rx) structure + * + * @buf: Buffer pointer + * @len: Length of data in the Buffer + */ +struct scmi_msg { + void *buf; + size_t len; +}; + +/** + * struct scmi_msg_hdr - Message(Tx/Rx) header + * + * @id: The identifier of the message being sent + * @protocol_id: The identifier of the protocol used to send @id message + * @type: The SCMI type for this message + * @seq: The token to identify the message. When a message returns, the + * platform returns the whole message header unmodified including the + * token + * @status: Status of the transfer once it's complete + * @poll_completion: Indicate if the transfer needs to be polled for + * completion or interrupt mode is used + */ +struct scmi_msg_hdr { + u8 id; + u8 protocol_id; + u8 type; + u16 seq; + u32 status; + bool poll_completion; +}; + +/** + * struct scmi_xfer - Structure representing a message flow + * + * @transfer_id: Unique ID for debug & profiling purpose + * @hdr: Transmit message header + * @tx: Transmit message + * @rx: Receive message, the buffer should be pre-allocated to store + * message. If request-ACK protocol is used, we can reuse the same + * buffer for the rx path as we use for the tx path. + * @done: command message transmit completion event + * @async_done: pointer to delayed response message received event completion + * @pending: True for xfers added to @pending_xfers hashtable + * @node: An hlist_node reference used to store this xfer, alternatively, on + * the free list @free_xfers or in the @pending_xfers hashtable + * @users: A refcount to track the active users for this xfer. + * This is meant to protect against the possibility that, when a command + * transaction times out concurrently with the reception of a valid + * response message, the xfer could be finally put on the TX path, and + * so vanish, while on the RX path scmi_rx_callback() is still + * processing it: in such a case this refcounting will ensure that, even + * though the timed-out transaction will anyway cause the command + * request to be reported as failed by time-out, the underlying xfer + * cannot be discarded and possibly reused until the last one user on + * the RX path has released it. + * @busy: An atomic flag to ensure exclusive write access to this xfer + * @state: The current state of this transfer, with states transitions deemed + * valid being: + * - SCMI_XFER_SENT_OK -> SCMI_XFER_RESP_OK [ -> SCMI_XFER_DRESP_OK ] + * - SCMI_XFER_SENT_OK -> SCMI_XFER_DRESP_OK + * (Missing synchronous response is assumed OK and ignored) + * @lock: A spinlock to protect state and busy fields. + * @priv: A pointer for transport private usage. + */ +struct scmi_xfer { + int transfer_id; + struct scmi_msg_hdr hdr; + struct scmi_msg tx; + struct scmi_msg rx; + struct completion done; + struct completion *async_done; + bool pending; + struct hlist_node node; + refcount_t users; +#define SCMI_XFER_FREE 0 +#define SCMI_XFER_BUSY 1 + atomic_t busy; +#define SCMI_XFER_SENT_OK 0 +#define SCMI_XFER_RESP_OK 1 +#define SCMI_XFER_DRESP_OK 2 + int state; + /* A lock to protect state and busy fields */ + spinlock_t lock; + void *priv; +}; + +struct scmi_xfer_ops; +struct scmi_proto_helpers_ops; + +/** + * struct scmi_protocol_handle - Reference to an initialized protocol instance + * + * @dev: A reference to the associated SCMI instance device (handle->dev). + * @xops: A reference to a struct holding refs to the core xfer operations that + * can be used by the protocol implementation to generate SCMI messages. + * @set_priv: A method to set protocol private data for this instance. + * @get_priv: A method to get protocol private data previously set. + * + * This structure represents a protocol initialized against specific SCMI + * instance and it will be used as follows: + * - as a parameter fed from the core to the protocol initialization code so + * that it can access the core xfer operations to build and generate SCMI + * messages exclusively for the specific underlying protocol instance. + * - as an opaque handle fed by an SCMI driver user when it tries to access + * this protocol through its own protocol operations. + * In this case this handle will be returned as an opaque object together + * with the related protocol operations when the SCMI driver tries to access + * the protocol. + */ +struct scmi_protocol_handle { + struct device *dev; + const struct scmi_xfer_ops *xops; + const struct scmi_proto_helpers_ops *hops; + int (*set_priv)(const struct scmi_protocol_handle *ph, void *priv); + void *(*get_priv)(const struct scmi_protocol_handle *ph); +}; + +/** + * struct scmi_iterator_state - Iterator current state descriptor + * @desc_index: Starting index for the current mulit-part request. + * @num_returned: Number of returned items in the last multi-part reply. + * @num_remaining: Number of remaining items in the multi-part message. + * @max_resources: Maximum acceptable number of items, configured by the caller + * depending on the underlying resources that it is querying. + * @loop_idx: The iterator loop index in the current multi-part reply. + * @rx_len: Size in bytes of the currenly processed message; it can be used by + * the user of the iterator to verify a reply size. + * @priv: Optional pointer to some additional state-related private data setup + * by the caller during the iterations. + */ +struct scmi_iterator_state { + unsigned int desc_index; + unsigned int num_returned; + unsigned int num_remaining; + unsigned int max_resources; + unsigned int loop_idx; + size_t rx_len; + void *priv; +}; + +/** + * struct scmi_iterator_ops - Custom iterator operations + * @prepare_message: An operation to provide the custom logic to fill in the + * SCMI command request pointed by @message. @desc_index is + * a reference to the next index to use in the multi-part + * request. + * @update_state: An operation to provide the custom logic to update the + * iterator state from the actual message response. + * @process_response: An operation to provide the custom logic needed to process + * each chunk of the multi-part message. + */ +struct scmi_iterator_ops { + void (*prepare_message)(void *message, unsigned int desc_index, + const void *priv); + int (*update_state)(struct scmi_iterator_state *st, + const void *response, void *priv); + int (*process_response)(const struct scmi_protocol_handle *ph, + const void *response, + struct scmi_iterator_state *st, void *priv); +}; + +struct scmi_fc_db_info { + int width; + u64 set; + u64 mask; + void __iomem *addr; +}; + +struct scmi_fc_info { + void __iomem *set_addr; + void __iomem *get_addr; + struct scmi_fc_db_info *set_db; +}; + +/** + * struct scmi_proto_helpers_ops - References to common protocol helpers + * @extended_name_get: A common helper function to retrieve extended naming + * for the specified resource using the specified command. + * Result is returned as a NULL terminated string in the + * pre-allocated area pointed to by @name with maximum + * capacity of @len bytes. + * @iter_response_init: A common helper to initialize a generic iterator to + * parse multi-message responses: when run the iterator + * will take care to send the initial command request as + * specified by @msg_id and @tx_size and then to parse the + * multi-part responses using the custom operations + * provided in @ops. + * @iter_response_run: A common helper to trigger the run of a previously + * initialized iterator. + * @fastchannel_init: A common helper used to initialize FC descriptors by + * gathering FC descriptions from the SCMI platform server. + * @fastchannel_db_ring: A common helper to ring a FC doorbell. + */ +struct scmi_proto_helpers_ops { + int (*extended_name_get)(const struct scmi_protocol_handle *ph, + u8 cmd_id, u32 res_id, char *name, size_t len); + void *(*iter_response_init)(const struct scmi_protocol_handle *ph, + struct scmi_iterator_ops *ops, + unsigned int max_resources, u8 msg_id, + size_t tx_size, void *priv); + int (*iter_response_run)(void *iter); + void (*fastchannel_init)(const struct scmi_protocol_handle *ph, + u8 describe_id, u32 message_id, + u32 valid_size, u32 domain, + void __iomem **p_addr, + struct scmi_fc_db_info **p_db); + void (*fastchannel_db_ring)(struct scmi_fc_db_info *db); +}; + +/** + * struct scmi_xfer_ops - References to the core SCMI xfer operations. + * @version_get: Get this version protocol. + * @xfer_get_init: Initialize one struct xfer if any xfer slot is free. + * @reset_rx_to_maxsz: Reset rx size to max transport size. + * @do_xfer: Do the SCMI transfer. + * @do_xfer_with_response: Do the SCMI transfer waiting for a response. + * @xfer_put: Free the xfer slot. + * + * Note that all this operations expect a protocol handle as first parameter; + * they then internally use it to infer the underlying protocol number: this + * way is not possible for a protocol implementation to forge messages for + * another protocol. + */ +struct scmi_xfer_ops { + int (*version_get)(const struct scmi_protocol_handle *ph, u32 *version); + int (*xfer_get_init)(const struct scmi_protocol_handle *ph, u8 msg_id, + size_t tx_size, size_t rx_size, + struct scmi_xfer **p); + void (*reset_rx_to_maxsz)(const struct scmi_protocol_handle *ph, + struct scmi_xfer *xfer); + int (*do_xfer)(const struct scmi_protocol_handle *ph, + struct scmi_xfer *xfer); + int (*do_xfer_with_response)(const struct scmi_protocol_handle *ph, + struct scmi_xfer *xfer); + void (*xfer_put)(const struct scmi_protocol_handle *ph, + struct scmi_xfer *xfer); +}; + +typedef int (*scmi_prot_init_ph_fn_t)(const struct scmi_protocol_handle *); + +/** + * struct scmi_protocol - Protocol descriptor + * @id: Protocol ID. + * @owner: Module reference if any. + * @instance_init: Mandatory protocol initialization function. + * @instance_deinit: Optional protocol de-initialization function. + * @ops: Optional reference to the operations provided by the protocol and + * exposed in scmi_protocol.h. + * @events: An optional reference to the events supported by this protocol. + */ +struct scmi_protocol { + const u8 id; + struct module *owner; + const scmi_prot_init_ph_fn_t instance_init; + const scmi_prot_init_ph_fn_t instance_deinit; + const void *ops; + const struct scmi_protocol_events *events; +}; + +#define DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(name, proto) \ +static const struct scmi_protocol *__this_proto = &(proto); \ + \ +int __init scmi_##name##_register(void) \ +{ \ + return scmi_protocol_register(__this_proto); \ +} \ + \ +void __exit scmi_##name##_unregister(void) \ +{ \ + scmi_protocol_unregister(__this_proto); \ +} + +#define DECLARE_SCMI_REGISTER_UNREGISTER(func) \ + int __init scmi_##func##_register(void); \ + void __exit scmi_##func##_unregister(void) +DECLARE_SCMI_REGISTER_UNREGISTER(base); +DECLARE_SCMI_REGISTER_UNREGISTER(clock); +DECLARE_SCMI_REGISTER_UNREGISTER(perf); +DECLARE_SCMI_REGISTER_UNREGISTER(power); +DECLARE_SCMI_REGISTER_UNREGISTER(reset); +DECLARE_SCMI_REGISTER_UNREGISTER(sensors); +DECLARE_SCMI_REGISTER_UNREGISTER(voltage); +DECLARE_SCMI_REGISTER_UNREGISTER(system); +DECLARE_SCMI_REGISTER_UNREGISTER(powercap); + +#endif /* _SCMI_PROTOCOLS_H */ diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c new file mode 100644 index 000000000..e9afa8cab --- /dev/null +++ b/drivers/firmware/arm_scmi/reset.c @@ -0,0 +1,356 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System Control and Management Interface (SCMI) Reset Protocol + * + * Copyright (C) 2019-2022 ARM Ltd. + */ + +#define pr_fmt(fmt) "SCMI Notifications RESET - " fmt + +#include <linux/module.h> +#include <linux/scmi_protocol.h> + +#include "protocols.h" +#include "notify.h" + +enum scmi_reset_protocol_cmd { + RESET_DOMAIN_ATTRIBUTES = 0x3, + RESET = 0x4, + RESET_NOTIFY = 0x5, + RESET_DOMAIN_NAME_GET = 0x6, +}; + +#define NUM_RESET_DOMAIN_MASK 0xffff +#define RESET_NOTIFY_ENABLE BIT(0) + +struct scmi_msg_resp_reset_domain_attributes { + __le32 attributes; +#define SUPPORTS_ASYNC_RESET(x) ((x) & BIT(31)) +#define SUPPORTS_NOTIFY_RESET(x) ((x) & BIT(30)) +#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(29)) + __le32 latency; + u8 name[SCMI_SHORT_NAME_MAX_SIZE]; +}; + +struct scmi_msg_reset_domain_reset { + __le32 domain_id; + __le32 flags; +#define AUTONOMOUS_RESET BIT(0) +#define EXPLICIT_RESET_ASSERT BIT(1) +#define ASYNCHRONOUS_RESET BIT(2) + __le32 reset_state; +#define ARCH_COLD_RESET 0 +}; + +struct scmi_msg_reset_notify { + __le32 id; + __le32 event_control; +#define RESET_TP_NOTIFY_ALL BIT(0) +}; + +struct scmi_reset_issued_notify_payld { + __le32 agent_id; + __le32 domain_id; + __le32 reset_state; +}; + +struct reset_dom_info { + bool async_reset; + bool reset_notify; + u32 latency_us; + char name[SCMI_MAX_STR_SIZE]; +}; + +struct scmi_reset_info { + u32 version; + int num_domains; + struct reset_dom_info *dom_info; +}; + +static int scmi_reset_attributes_get(const struct scmi_protocol_handle *ph, + struct scmi_reset_info *pi) +{ + int ret; + struct scmi_xfer *t; + u32 attr; + + ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, + 0, sizeof(attr), &t); + if (ret) + return ret; + + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + attr = get_unaligned_le32(t->rx.buf); + pi->num_domains = attr & NUM_RESET_DOMAIN_MASK; + } + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int +scmi_reset_domain_attributes_get(const struct scmi_protocol_handle *ph, + u32 domain, struct reset_dom_info *dom_info, + u32 version) +{ + int ret; + u32 attributes; + struct scmi_xfer *t; + struct scmi_msg_resp_reset_domain_attributes *attr; + + ret = ph->xops->xfer_get_init(ph, RESET_DOMAIN_ATTRIBUTES, + sizeof(domain), sizeof(*attr), &t); + if (ret) + return ret; + + put_unaligned_le32(domain, t->tx.buf); + attr = t->rx.buf; + + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + attributes = le32_to_cpu(attr->attributes); + + dom_info->async_reset = SUPPORTS_ASYNC_RESET(attributes); + dom_info->reset_notify = SUPPORTS_NOTIFY_RESET(attributes); + dom_info->latency_us = le32_to_cpu(attr->latency); + if (dom_info->latency_us == U32_MAX) + dom_info->latency_us = 0; + strscpy(dom_info->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE); + } + + ph->xops->xfer_put(ph, t); + + /* + * If supported overwrite short name with the extended one; + * on error just carry on and use already provided short name. + */ + if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x3 && + SUPPORTS_EXTENDED_NAMES(attributes)) + ph->hops->extended_name_get(ph, RESET_DOMAIN_NAME_GET, domain, + dom_info->name, SCMI_MAX_STR_SIZE); + + return ret; +} + +static int scmi_reset_num_domains_get(const struct scmi_protocol_handle *ph) +{ + struct scmi_reset_info *pi = ph->get_priv(ph); + + return pi->num_domains; +} + +static const char * +scmi_reset_name_get(const struct scmi_protocol_handle *ph, u32 domain) +{ + struct scmi_reset_info *pi = ph->get_priv(ph); + + struct reset_dom_info *dom = pi->dom_info + domain; + + return dom->name; +} + +static int scmi_reset_latency_get(const struct scmi_protocol_handle *ph, + u32 domain) +{ + struct scmi_reset_info *pi = ph->get_priv(ph); + struct reset_dom_info *dom = pi->dom_info + domain; + + return dom->latency_us; +} + +static int scmi_domain_reset(const struct scmi_protocol_handle *ph, u32 domain, + u32 flags, u32 state) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_reset_domain_reset *dom; + struct scmi_reset_info *pi = ph->get_priv(ph); + struct reset_dom_info *rdom; + + if (domain >= pi->num_domains) + return -EINVAL; + + rdom = pi->dom_info + domain; + if (rdom->async_reset && flags & AUTONOMOUS_RESET) + flags |= ASYNCHRONOUS_RESET; + + ret = ph->xops->xfer_get_init(ph, RESET, sizeof(*dom), 0, &t); + if (ret) + return ret; + + dom = t->tx.buf; + dom->domain_id = cpu_to_le32(domain); + dom->flags = cpu_to_le32(flags); + dom->reset_state = cpu_to_le32(state); + + if (flags & ASYNCHRONOUS_RESET) + ret = ph->xops->do_xfer_with_response(ph, t); + else + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int scmi_reset_domain_reset(const struct scmi_protocol_handle *ph, + u32 domain) +{ + return scmi_domain_reset(ph, domain, AUTONOMOUS_RESET, + ARCH_COLD_RESET); +} + +static int +scmi_reset_domain_assert(const struct scmi_protocol_handle *ph, u32 domain) +{ + return scmi_domain_reset(ph, domain, EXPLICIT_RESET_ASSERT, + ARCH_COLD_RESET); +} + +static int +scmi_reset_domain_deassert(const struct scmi_protocol_handle *ph, u32 domain) +{ + return scmi_domain_reset(ph, domain, 0, ARCH_COLD_RESET); +} + +static const struct scmi_reset_proto_ops reset_proto_ops = { + .num_domains_get = scmi_reset_num_domains_get, + .name_get = scmi_reset_name_get, + .latency_get = scmi_reset_latency_get, + .reset = scmi_reset_domain_reset, + .assert = scmi_reset_domain_assert, + .deassert = scmi_reset_domain_deassert, +}; + +static int scmi_reset_notify(const struct scmi_protocol_handle *ph, + u32 domain_id, bool enable) +{ + int ret; + u32 evt_cntl = enable ? RESET_TP_NOTIFY_ALL : 0; + struct scmi_xfer *t; + struct scmi_msg_reset_notify *cfg; + + ret = ph->xops->xfer_get_init(ph, RESET_NOTIFY, sizeof(*cfg), 0, &t); + if (ret) + return ret; + + cfg = t->tx.buf; + cfg->id = cpu_to_le32(domain_id); + cfg->event_control = cpu_to_le32(evt_cntl); + + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int scmi_reset_set_notify_enabled(const struct scmi_protocol_handle *ph, + u8 evt_id, u32 src_id, bool enable) +{ + int ret; + + ret = scmi_reset_notify(ph, src_id, enable); + if (ret) + pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n", + evt_id, src_id, ret); + + return ret; +} + +static void * +scmi_reset_fill_custom_report(const struct scmi_protocol_handle *ph, + u8 evt_id, ktime_t timestamp, + const void *payld, size_t payld_sz, + void *report, u32 *src_id) +{ + const struct scmi_reset_issued_notify_payld *p = payld; + struct scmi_reset_issued_report *r = report; + + if (evt_id != SCMI_EVENT_RESET_ISSUED || sizeof(*p) != payld_sz) + return NULL; + + r->timestamp = timestamp; + r->agent_id = le32_to_cpu(p->agent_id); + r->domain_id = le32_to_cpu(p->domain_id); + r->reset_state = le32_to_cpu(p->reset_state); + *src_id = r->domain_id; + + return r; +} + +static int scmi_reset_get_num_sources(const struct scmi_protocol_handle *ph) +{ + struct scmi_reset_info *pinfo = ph->get_priv(ph); + + if (!pinfo) + return -EINVAL; + + return pinfo->num_domains; +} + +static const struct scmi_event reset_events[] = { + { + .id = SCMI_EVENT_RESET_ISSUED, + .max_payld_sz = sizeof(struct scmi_reset_issued_notify_payld), + .max_report_sz = sizeof(struct scmi_reset_issued_report), + }, +}; + +static const struct scmi_event_ops reset_event_ops = { + .get_num_sources = scmi_reset_get_num_sources, + .set_notify_enabled = scmi_reset_set_notify_enabled, + .fill_custom_report = scmi_reset_fill_custom_report, +}; + +static const struct scmi_protocol_events reset_protocol_events = { + .queue_sz = SCMI_PROTO_QUEUE_SZ, + .ops = &reset_event_ops, + .evts = reset_events, + .num_events = ARRAY_SIZE(reset_events), +}; + +static int scmi_reset_protocol_init(const struct scmi_protocol_handle *ph) +{ + int domain, ret; + u32 version; + struct scmi_reset_info *pinfo; + + ret = ph->xops->version_get(ph, &version); + if (ret) + return ret; + + dev_dbg(ph->dev, "Reset Version %d.%d\n", + PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); + + pinfo = devm_kzalloc(ph->dev, sizeof(*pinfo), GFP_KERNEL); + if (!pinfo) + return -ENOMEM; + + ret = scmi_reset_attributes_get(ph, pinfo); + if (ret) + return ret; + + pinfo->dom_info = devm_kcalloc(ph->dev, pinfo->num_domains, + sizeof(*pinfo->dom_info), GFP_KERNEL); + if (!pinfo->dom_info) + return -ENOMEM; + + for (domain = 0; domain < pinfo->num_domains; domain++) { + struct reset_dom_info *dom = pinfo->dom_info + domain; + + scmi_reset_domain_attributes_get(ph, domain, dom, version); + } + + pinfo->version = version; + return ph->set_priv(ph, pinfo); +} + +static const struct scmi_protocol scmi_reset = { + .id = SCMI_PROTOCOL_RESET, + .owner = THIS_MODULE, + .instance_init = &scmi_reset_protocol_init, + .ops = &reset_proto_ops, + .events = &reset_protocol_events, +}; + +DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(reset, scmi_reset) diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c new file mode 100644 index 000000000..0e05a79de --- /dev/null +++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c @@ -0,0 +1,153 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SCMI Generic power domain support. + * + * Copyright (C) 2018-2021 ARM Ltd. + */ + +#include <linux/err.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/pm_domain.h> +#include <linux/scmi_protocol.h> + +static const struct scmi_power_proto_ops *power_ops; + +struct scmi_pm_domain { + struct generic_pm_domain genpd; + const struct scmi_protocol_handle *ph; + const char *name; + u32 domain; +}; + +#define to_scmi_pd(gpd) container_of(gpd, struct scmi_pm_domain, genpd) + +static int scmi_pd_power(struct generic_pm_domain *domain, bool power_on) +{ + int ret; + u32 state, ret_state; + struct scmi_pm_domain *pd = to_scmi_pd(domain); + + if (power_on) + state = SCMI_POWER_STATE_GENERIC_ON; + else + state = SCMI_POWER_STATE_GENERIC_OFF; + + ret = power_ops->state_set(pd->ph, pd->domain, state); + if (!ret) + ret = power_ops->state_get(pd->ph, pd->domain, &ret_state); + if (!ret && state != ret_state) + return -EIO; + + return ret; +} + +static int scmi_pd_power_on(struct generic_pm_domain *domain) +{ + return scmi_pd_power(domain, true); +} + +static int scmi_pd_power_off(struct generic_pm_domain *domain) +{ + return scmi_pd_power(domain, false); +} + +static int scmi_pm_domain_probe(struct scmi_device *sdev) +{ + int num_domains, i; + struct device *dev = &sdev->dev; + struct device_node *np = dev->of_node; + struct scmi_pm_domain *scmi_pd; + struct genpd_onecell_data *scmi_pd_data; + struct generic_pm_domain **domains; + const struct scmi_handle *handle = sdev->handle; + struct scmi_protocol_handle *ph; + + if (!handle) + return -ENODEV; + + power_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_POWER, &ph); + if (IS_ERR(power_ops)) + return PTR_ERR(power_ops); + + num_domains = power_ops->num_domains_get(ph); + if (num_domains < 0) { + dev_err(dev, "number of domains not found\n"); + return num_domains; + } + + scmi_pd = devm_kcalloc(dev, num_domains, sizeof(*scmi_pd), GFP_KERNEL); + if (!scmi_pd) + return -ENOMEM; + + scmi_pd_data = devm_kzalloc(dev, sizeof(*scmi_pd_data), GFP_KERNEL); + if (!scmi_pd_data) + return -ENOMEM; + + domains = devm_kcalloc(dev, num_domains, sizeof(*domains), GFP_KERNEL); + if (!domains) + return -ENOMEM; + + for (i = 0; i < num_domains; i++, scmi_pd++) { + u32 state; + + if (power_ops->state_get(ph, i, &state)) { + dev_warn(dev, "failed to get state for domain %d\n", i); + continue; + } + + scmi_pd->domain = i; + scmi_pd->ph = ph; + scmi_pd->name = power_ops->name_get(ph, i); + scmi_pd->genpd.name = scmi_pd->name; + scmi_pd->genpd.power_off = scmi_pd_power_off; + scmi_pd->genpd.power_on = scmi_pd_power_on; + + pm_genpd_init(&scmi_pd->genpd, NULL, + state == SCMI_POWER_STATE_GENERIC_OFF); + + domains[i] = &scmi_pd->genpd; + } + + scmi_pd_data->domains = domains; + scmi_pd_data->num_domains = num_domains; + + dev_set_drvdata(dev, scmi_pd_data); + + return of_genpd_add_provider_onecell(np, scmi_pd_data); +} + +static void scmi_pm_domain_remove(struct scmi_device *sdev) +{ + int i; + struct genpd_onecell_data *scmi_pd_data; + struct device *dev = &sdev->dev; + struct device_node *np = dev->of_node; + + of_genpd_del_provider(np); + + scmi_pd_data = dev_get_drvdata(dev); + for (i = 0; i < scmi_pd_data->num_domains; i++) { + if (!scmi_pd_data->domains[i]) + continue; + pm_genpd_remove(scmi_pd_data->domains[i]); + } +} + +static const struct scmi_device_id scmi_id_table[] = { + { SCMI_PROTOCOL_POWER, "genpd" }, + { }, +}; +MODULE_DEVICE_TABLE(scmi, scmi_id_table); + +static struct scmi_driver scmi_power_domain_driver = { + .name = "scmi-power-domain", + .probe = scmi_pm_domain_probe, + .remove = scmi_pm_domain_remove, + .id_table = scmi_id_table, +}; +module_scmi_driver(scmi_power_domain_driver); + +MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>"); +MODULE_DESCRIPTION("ARM SCMI power domain driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/firmware/arm_scmi/scmi_power_control.c b/drivers/firmware/arm_scmi/scmi_power_control.c new file mode 100644 index 000000000..6eb7d2a4b --- /dev/null +++ b/drivers/firmware/arm_scmi/scmi_power_control.c @@ -0,0 +1,362 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SCMI Generic SystemPower Control driver. + * + * Copyright (C) 2020-2022 ARM Ltd. + */ +/* + * In order to handle platform originated SCMI SystemPower requests (like + * shutdowns or cold/warm resets) we register an SCMI Notification notifier + * block to react when such SCMI SystemPower events are emitted by platform. + * + * Once such a notification is received we act accordingly to perform the + * required system transition depending on the kind of request. + * + * Graceful requests are routed to userspace through the same API methods + * (orderly_poweroff/reboot()) used by ACPI when handling ACPI Shutdown bus + * events. + * + * Direct forceful requests are not supported since are not meant to be sent + * by the SCMI platform to an OSPM like Linux. + * + * Additionally, graceful request notifications can carry an optional timeout + * field stating the maximum amount of time allowed by the platform for + * completion after which they are converted to forceful ones: the assumption + * here is that even graceful requests can be upper-bound by a maximum final + * timeout strictly enforced by the platform itself which can ultimately cut + * the power off at will anytime; in order to avoid such extreme scenario, we + * track progress of graceful requests through the means of a reboot notifier + * converting timed-out graceful requests to forceful ones, so at least we + * try to perform a clean sync and shutdown/restart before the power is cut. + * + * Given the peculiar nature of SCMI SystemPower protocol, that is being in + * charge of triggering system wide shutdown/reboot events, there should be + * only one SCMI platform actively emitting SystemPower events. + * For this reason the SCMI core takes care to enforce the creation of one + * single unique device associated to the SCMI System Power protocol; no matter + * how many SCMI platforms are defined on the system, only one can be designated + * to support System Power: as a consequence this driver will never be probed + * more than once. + * + * For similar reasons as soon as the first valid SystemPower is received by + * this driver and the shutdown/reboot is started, any further notification + * possibly emitted by the platform will be ignored. + */ + +#include <linux/math.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/printk.h> +#include <linux/reboot.h> +#include <linux/scmi_protocol.h> +#include <linux/slab.h> +#include <linux/time64.h> +#include <linux/timer.h> +#include <linux/types.h> +#include <linux/workqueue.h> + +#ifndef MODULE +#include <linux/fs.h> +#endif + +enum scmi_syspower_state { + SCMI_SYSPOWER_IDLE, + SCMI_SYSPOWER_IN_PROGRESS, + SCMI_SYSPOWER_REBOOTING +}; + +/** + * struct scmi_syspower_conf - Common configuration + * + * @dev: A reference device + * @state: Current SystemPower state + * @state_mtx: @state related mutex + * @required_transition: The requested transition as decribed in the received + * SCMI SystemPower notification + * @userspace_nb: The notifier_block registered against the SCMI SystemPower + * notification to start the needed userspace interactions. + * @reboot_nb: A notifier_block optionally used to track reboot progress + * @forceful_work: A worker used to trigger a forceful transition once a + * graceful has timed out. + */ +struct scmi_syspower_conf { + struct device *dev; + enum scmi_syspower_state state; + /* Protect access to state */ + struct mutex state_mtx; + enum scmi_system_events required_transition; + + struct notifier_block userspace_nb; + struct notifier_block reboot_nb; + + struct delayed_work forceful_work; +}; + +#define userspace_nb_to_sconf(x) \ + container_of(x, struct scmi_syspower_conf, userspace_nb) + +#define reboot_nb_to_sconf(x) \ + container_of(x, struct scmi_syspower_conf, reboot_nb) + +#define dwork_to_sconf(x) \ + container_of(x, struct scmi_syspower_conf, forceful_work) + +/** + * scmi_reboot_notifier - A reboot notifier to catch an ongoing successful + * system transition + * @nb: Reference to the related notifier block + * @reason: The reason for the ongoing reboot + * @__unused: The cmd being executed on a restart request (unused) + * + * When an ongoing system transition is detected, compatible with the one + * requested by SCMI, cancel the delayed work. + * + * Return: NOTIFY_OK in any case + */ +static int scmi_reboot_notifier(struct notifier_block *nb, + unsigned long reason, void *__unused) +{ + struct scmi_syspower_conf *sc = reboot_nb_to_sconf(nb); + + mutex_lock(&sc->state_mtx); + switch (reason) { + case SYS_HALT: + case SYS_POWER_OFF: + if (sc->required_transition == SCMI_SYSTEM_SHUTDOWN) + sc->state = SCMI_SYSPOWER_REBOOTING; + break; + case SYS_RESTART: + if (sc->required_transition == SCMI_SYSTEM_COLDRESET || + sc->required_transition == SCMI_SYSTEM_WARMRESET) + sc->state = SCMI_SYSPOWER_REBOOTING; + break; + default: + break; + } + + if (sc->state == SCMI_SYSPOWER_REBOOTING) { + dev_dbg(sc->dev, "Reboot in progress...cancel delayed work.\n"); + cancel_delayed_work_sync(&sc->forceful_work); + } + mutex_unlock(&sc->state_mtx); + + return NOTIFY_OK; +} + +/** + * scmi_request_forceful_transition - Request forceful SystemPower transition + * @sc: A reference to the configuration data + * + * Initiates the required SystemPower transition without involving userspace: + * just trigger the action at the kernel level after issuing an emergency + * sync. (if possible at all) + */ +static inline void +scmi_request_forceful_transition(struct scmi_syspower_conf *sc) +{ + dev_dbg(sc->dev, "Serving forceful request:%d\n", + sc->required_transition); + +#ifndef MODULE + emergency_sync(); +#endif + switch (sc->required_transition) { + case SCMI_SYSTEM_SHUTDOWN: + kernel_power_off(); + break; + case SCMI_SYSTEM_COLDRESET: + case SCMI_SYSTEM_WARMRESET: + kernel_restart(NULL); + break; + default: + break; + } +} + +static void scmi_forceful_work_func(struct work_struct *work) +{ + struct scmi_syspower_conf *sc; + struct delayed_work *dwork; + + if (system_state > SYSTEM_RUNNING) + return; + + dwork = to_delayed_work(work); + sc = dwork_to_sconf(dwork); + + dev_dbg(sc->dev, "Graceful request timed out...forcing !\n"); + mutex_lock(&sc->state_mtx); + /* avoid deadlock by unregistering reboot notifier first */ + unregister_reboot_notifier(&sc->reboot_nb); + if (sc->state == SCMI_SYSPOWER_IN_PROGRESS) + scmi_request_forceful_transition(sc); + mutex_unlock(&sc->state_mtx); +} + +/** + * scmi_request_graceful_transition - Request graceful SystemPower transition + * @sc: A reference to the configuration data + * @timeout_ms: The desired timeout to wait for the shutdown to complete before + * system is forcibly shutdown. + * + * Initiates the required SystemPower transition, requesting userspace + * co-operation: it uses the same orderly_ methods used by ACPI Shutdown event + * processing. + * + * Takes care also to register a reboot notifier and to schedule a delayed work + * in order to detect if userspace actions are taking too long and in such a + * case to trigger a forceful transition. + */ +static void scmi_request_graceful_transition(struct scmi_syspower_conf *sc, + unsigned int timeout_ms) +{ + unsigned int adj_timeout_ms = 0; + + if (timeout_ms) { + int ret; + + sc->reboot_nb.notifier_call = &scmi_reboot_notifier; + ret = register_reboot_notifier(&sc->reboot_nb); + if (!ret) { + /* Wait only up to 75% of the advertised timeout */ + adj_timeout_ms = mult_frac(timeout_ms, 3, 4); + INIT_DELAYED_WORK(&sc->forceful_work, + scmi_forceful_work_func); + schedule_delayed_work(&sc->forceful_work, + msecs_to_jiffies(adj_timeout_ms)); + } else { + /* Carry on best effort even without a reboot notifier */ + dev_warn(sc->dev, + "Cannot register reboot notifier !\n"); + } + } + + dev_dbg(sc->dev, + "Serving graceful req:%d (timeout_ms:%u adj_timeout_ms:%u)\n", + sc->required_transition, timeout_ms, adj_timeout_ms); + + switch (sc->required_transition) { + case SCMI_SYSTEM_SHUTDOWN: + /* + * When triggered early at boot-time the 'orderly' call will + * partially fail due to the lack of userspace itself, but + * the force=true argument will start anyway a successful + * forced shutdown. + */ + orderly_poweroff(true); + break; + case SCMI_SYSTEM_COLDRESET: + case SCMI_SYSTEM_WARMRESET: + orderly_reboot(); + break; + default: + break; + } +} + +/** + * scmi_userspace_notifier - Notifier callback to act on SystemPower + * Notifications + * @nb: Reference to the related notifier block + * @event: The SystemPower notification event id + * @data: The SystemPower event report + * + * This callback is in charge of decoding the received SystemPower report + * and act accordingly triggering a graceful or forceful system transition. + * + * Note that once a valid SCMI SystemPower event starts being served, any + * other following SystemPower notification received from the same SCMI + * instance (handle) will be ignored. + * + * Return: NOTIFY_OK once a valid SystemPower event has been successfully + * processed. + */ +static int scmi_userspace_notifier(struct notifier_block *nb, + unsigned long event, void *data) +{ + struct scmi_system_power_state_notifier_report *er = data; + struct scmi_syspower_conf *sc = userspace_nb_to_sconf(nb); + + if (er->system_state >= SCMI_SYSTEM_POWERUP) { + dev_err(sc->dev, "Ignoring unsupported system_state: 0x%X\n", + er->system_state); + return NOTIFY_DONE; + } + + if (!SCMI_SYSPOWER_IS_REQUEST_GRACEFUL(er->flags)) { + dev_err(sc->dev, "Ignoring forceful notification.\n"); + return NOTIFY_DONE; + } + + /* + * Bail out if system is already shutting down or an SCMI SystemPower + * requested is already being served. + */ + if (system_state > SYSTEM_RUNNING) + return NOTIFY_DONE; + mutex_lock(&sc->state_mtx); + if (sc->state != SCMI_SYSPOWER_IDLE) { + dev_dbg(sc->dev, + "Transition already in progress...ignore.\n"); + mutex_unlock(&sc->state_mtx); + return NOTIFY_DONE; + } + sc->state = SCMI_SYSPOWER_IN_PROGRESS; + mutex_unlock(&sc->state_mtx); + + sc->required_transition = er->system_state; + + /* Leaving a trace in logs of who triggered the shutdown/reboot. */ + dev_info(sc->dev, "Serving shutdown/reboot request: %d\n", + sc->required_transition); + + scmi_request_graceful_transition(sc, er->timeout); + + return NOTIFY_OK; +} + +static int scmi_syspower_probe(struct scmi_device *sdev) +{ + int ret; + struct scmi_syspower_conf *sc; + struct scmi_handle *handle = sdev->handle; + + if (!handle) + return -ENODEV; + + ret = handle->devm_protocol_acquire(sdev, SCMI_PROTOCOL_SYSTEM); + if (ret) + return ret; + + sc = devm_kzalloc(&sdev->dev, sizeof(*sc), GFP_KERNEL); + if (!sc) + return -ENOMEM; + + sc->state = SCMI_SYSPOWER_IDLE; + mutex_init(&sc->state_mtx); + sc->required_transition = SCMI_SYSTEM_MAX; + sc->userspace_nb.notifier_call = &scmi_userspace_notifier; + sc->dev = &sdev->dev; + + return handle->notify_ops->devm_event_notifier_register(sdev, + SCMI_PROTOCOL_SYSTEM, + SCMI_EVENT_SYSTEM_POWER_STATE_NOTIFIER, + NULL, &sc->userspace_nb); +} + +static const struct scmi_device_id scmi_id_table[] = { + { SCMI_PROTOCOL_SYSTEM, "syspower" }, + { }, +}; +MODULE_DEVICE_TABLE(scmi, scmi_id_table); + +static struct scmi_driver scmi_system_power_driver = { + .name = "scmi-system-power", + .probe = scmi_syspower_probe, + .id_table = scmi_id_table, +}; +module_scmi_driver(scmi_system_power_driver); + +MODULE_AUTHOR("Cristian Marussi <cristian.marussi@arm.com>"); +MODULE_DESCRIPTION("ARM SCMI SystemPower Control driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c new file mode 100644 index 000000000..0b5853fa9 --- /dev/null +++ b/drivers/firmware/arm_scmi/sensors.c @@ -0,0 +1,1152 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System Control and Management Interface (SCMI) Sensor Protocol + * + * Copyright (C) 2018-2022 ARM Ltd. + */ + +#define pr_fmt(fmt) "SCMI Notifications SENSOR - " fmt + +#include <linux/bitfield.h> +#include <linux/module.h> +#include <linux/scmi_protocol.h> + +#include "protocols.h" +#include "notify.h" + +#define SCMI_MAX_NUM_SENSOR_AXIS 63 +#define SCMIv2_SENSOR_PROTOCOL 0x10000 + +enum scmi_sensor_protocol_cmd { + SENSOR_DESCRIPTION_GET = 0x3, + SENSOR_TRIP_POINT_NOTIFY = 0x4, + SENSOR_TRIP_POINT_CONFIG = 0x5, + SENSOR_READING_GET = 0x6, + SENSOR_AXIS_DESCRIPTION_GET = 0x7, + SENSOR_LIST_UPDATE_INTERVALS = 0x8, + SENSOR_CONFIG_GET = 0x9, + SENSOR_CONFIG_SET = 0xA, + SENSOR_CONTINUOUS_UPDATE_NOTIFY = 0xB, + SENSOR_NAME_GET = 0xC, + SENSOR_AXIS_NAME_GET = 0xD, +}; + +struct scmi_msg_resp_sensor_attributes { + __le16 num_sensors; + u8 max_requests; + u8 reserved; + __le32 reg_addr_low; + __le32 reg_addr_high; + __le32 reg_size; +}; + +/* v3 attributes_low macros */ +#define SUPPORTS_UPDATE_NOTIFY(x) FIELD_GET(BIT(30), (x)) +#define SENSOR_TSTAMP_EXP(x) FIELD_GET(GENMASK(14, 10), (x)) +#define SUPPORTS_TIMESTAMP(x) FIELD_GET(BIT(9), (x)) +#define SUPPORTS_EXTEND_ATTRS(x) FIELD_GET(BIT(8), (x)) + +/* v2 attributes_high macros */ +#define SENSOR_UPDATE_BASE(x) FIELD_GET(GENMASK(31, 27), (x)) +#define SENSOR_UPDATE_SCALE(x) FIELD_GET(GENMASK(26, 22), (x)) + +/* v3 attributes_high macros */ +#define SENSOR_AXIS_NUMBER(x) FIELD_GET(GENMASK(21, 16), (x)) +#define SUPPORTS_AXIS(x) FIELD_GET(BIT(8), (x)) + +/* v3 resolution macros */ +#define SENSOR_RES(x) FIELD_GET(GENMASK(26, 0), (x)) +#define SENSOR_RES_EXP(x) FIELD_GET(GENMASK(31, 27), (x)) + +struct scmi_msg_resp_attrs { + __le32 min_range_low; + __le32 min_range_high; + __le32 max_range_low; + __le32 max_range_high; +}; + +struct scmi_msg_sensor_description { + __le32 desc_index; +}; + +struct scmi_msg_resp_sensor_description { + __le16 num_returned; + __le16 num_remaining; + struct scmi_sensor_descriptor { + __le32 id; + __le32 attributes_low; +/* Common attributes_low macros */ +#define SUPPORTS_ASYNC_READ(x) FIELD_GET(BIT(31), (x)) +#define SUPPORTS_EXTENDED_NAMES(x) FIELD_GET(BIT(29), (x)) +#define NUM_TRIP_POINTS(x) FIELD_GET(GENMASK(7, 0), (x)) + __le32 attributes_high; +/* Common attributes_high macros */ +#define SENSOR_SCALE(x) FIELD_GET(GENMASK(15, 11), (x)) +#define SENSOR_SCALE_SIGN BIT(4) +#define SENSOR_SCALE_EXTEND GENMASK(31, 5) +#define SENSOR_TYPE(x) FIELD_GET(GENMASK(7, 0), (x)) + u8 name[SCMI_SHORT_NAME_MAX_SIZE]; + /* only for version > 2.0 */ + __le32 power; + __le32 resolution; + struct scmi_msg_resp_attrs scalar_attrs; + } desc[]; +}; + +/* Base scmi_sensor_descriptor size excluding extended attrs after name */ +#define SCMI_MSG_RESP_SENS_DESCR_BASE_SZ 28 + +/* Sign extend to a full s32 */ +#define S32_EXT(v) \ + ({ \ + int __v = (v); \ + \ + if (__v & SENSOR_SCALE_SIGN) \ + __v |= SENSOR_SCALE_EXTEND; \ + __v; \ + }) + +struct scmi_msg_sensor_axis_description_get { + __le32 id; + __le32 axis_desc_index; +}; + +struct scmi_msg_resp_sensor_axis_description { + __le32 num_axis_flags; +#define NUM_AXIS_RETURNED(x) FIELD_GET(GENMASK(5, 0), (x)) +#define NUM_AXIS_REMAINING(x) FIELD_GET(GENMASK(31, 26), (x)) + struct scmi_axis_descriptor { + __le32 id; + __le32 attributes_low; +#define SUPPORTS_EXTENDED_AXIS_NAMES(x) FIELD_GET(BIT(9), (x)) + __le32 attributes_high; + u8 name[SCMI_SHORT_NAME_MAX_SIZE]; + __le32 resolution; + struct scmi_msg_resp_attrs attrs; + } desc[]; +}; + +struct scmi_msg_resp_sensor_axis_names_description { + __le32 num_axis_flags; + struct scmi_sensor_axis_name_descriptor { + __le32 axis_id; + u8 name[SCMI_MAX_STR_SIZE]; + } desc[]; +}; + +/* Base scmi_axis_descriptor size excluding extended attrs after name */ +#define SCMI_MSG_RESP_AXIS_DESCR_BASE_SZ 28 + +struct scmi_msg_sensor_list_update_intervals { + __le32 id; + __le32 index; +}; + +struct scmi_msg_resp_sensor_list_update_intervals { + __le32 num_intervals_flags; +#define NUM_INTERVALS_RETURNED(x) FIELD_GET(GENMASK(11, 0), (x)) +#define SEGMENTED_INTVL_FORMAT(x) FIELD_GET(BIT(12), (x)) +#define NUM_INTERVALS_REMAINING(x) FIELD_GET(GENMASK(31, 16), (x)) + __le32 intervals[]; +}; + +struct scmi_msg_sensor_request_notify { + __le32 id; + __le32 event_control; +#define SENSOR_NOTIFY_ALL BIT(0) +}; + +struct scmi_msg_set_sensor_trip_point { + __le32 id; + __le32 event_control; +#define SENSOR_TP_EVENT_MASK (0x3) +#define SENSOR_TP_DISABLED 0x0 +#define SENSOR_TP_POSITIVE 0x1 +#define SENSOR_TP_NEGATIVE 0x2 +#define SENSOR_TP_BOTH 0x3 +#define SENSOR_TP_ID(x) (((x) & 0xff) << 4) + __le32 value_low; + __le32 value_high; +}; + +struct scmi_msg_sensor_config_set { + __le32 id; + __le32 sensor_config; +}; + +struct scmi_msg_sensor_reading_get { + __le32 id; + __le32 flags; +#define SENSOR_READ_ASYNC BIT(0) +}; + +struct scmi_resp_sensor_reading_complete { + __le32 id; + __le32 readings_low; + __le32 readings_high; +}; + +struct scmi_sensor_reading_resp { + __le32 sensor_value_low; + __le32 sensor_value_high; + __le32 timestamp_low; + __le32 timestamp_high; +}; + +struct scmi_resp_sensor_reading_complete_v3 { + __le32 id; + struct scmi_sensor_reading_resp readings[]; +}; + +struct scmi_sensor_trip_notify_payld { + __le32 agent_id; + __le32 sensor_id; + __le32 trip_point_desc; +}; + +struct scmi_sensor_update_notify_payld { + __le32 agent_id; + __le32 sensor_id; + struct scmi_sensor_reading_resp readings[]; +}; + +struct sensors_info { + u32 version; + int num_sensors; + int max_requests; + u64 reg_addr; + u32 reg_size; + struct scmi_sensor_info *sensors; +}; + +static int scmi_sensor_attributes_get(const struct scmi_protocol_handle *ph, + struct sensors_info *si) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_resp_sensor_attributes *attr; + + ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, + 0, sizeof(*attr), &t); + if (ret) + return ret; + + attr = t->rx.buf; + + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + si->num_sensors = le16_to_cpu(attr->num_sensors); + si->max_requests = attr->max_requests; + si->reg_addr = le32_to_cpu(attr->reg_addr_low) | + (u64)le32_to_cpu(attr->reg_addr_high) << 32; + si->reg_size = le32_to_cpu(attr->reg_size); + } + + ph->xops->xfer_put(ph, t); + return ret; +} + +static inline void scmi_parse_range_attrs(struct scmi_range_attrs *out, + const struct scmi_msg_resp_attrs *in) +{ + out->min_range = get_unaligned_le64((void *)&in->min_range_low); + out->max_range = get_unaligned_le64((void *)&in->max_range_low); +} + +struct scmi_sens_ipriv { + void *priv; + struct device *dev; +}; + +static void iter_intervals_prepare_message(void *message, + unsigned int desc_index, + const void *p) +{ + struct scmi_msg_sensor_list_update_intervals *msg = message; + const struct scmi_sensor_info *s; + + s = ((const struct scmi_sens_ipriv *)p)->priv; + /* Set the number of sensors to be skipped/already read */ + msg->id = cpu_to_le32(s->id); + msg->index = cpu_to_le32(desc_index); +} + +static int iter_intervals_update_state(struct scmi_iterator_state *st, + const void *response, void *p) +{ + u32 flags; + struct scmi_sensor_info *s = ((struct scmi_sens_ipriv *)p)->priv; + struct device *dev = ((struct scmi_sens_ipriv *)p)->dev; + const struct scmi_msg_resp_sensor_list_update_intervals *r = response; + + flags = le32_to_cpu(r->num_intervals_flags); + st->num_returned = NUM_INTERVALS_RETURNED(flags); + st->num_remaining = NUM_INTERVALS_REMAINING(flags); + + /* + * Max intervals is not declared previously anywhere so we + * assume it's returned+remaining on first call. + */ + if (!st->max_resources) { + s->intervals.segmented = SEGMENTED_INTVL_FORMAT(flags); + s->intervals.count = st->num_returned + st->num_remaining; + /* segmented intervals are reported in one triplet */ + if (s->intervals.segmented && + (st->num_remaining || st->num_returned != 3)) { + dev_err(dev, + "Sensor ID:%d advertises an invalid segmented interval (%d)\n", + s->id, s->intervals.count); + s->intervals.segmented = false; + s->intervals.count = 0; + return -EINVAL; + } + /* Direct allocation when exceeding pre-allocated */ + if (s->intervals.count >= SCMI_MAX_PREALLOC_POOL) { + s->intervals.desc = + devm_kcalloc(dev, + s->intervals.count, + sizeof(*s->intervals.desc), + GFP_KERNEL); + if (!s->intervals.desc) { + s->intervals.segmented = false; + s->intervals.count = 0; + return -ENOMEM; + } + } + + st->max_resources = s->intervals.count; + } + + return 0; +} + +static int +iter_intervals_process_response(const struct scmi_protocol_handle *ph, + const void *response, + struct scmi_iterator_state *st, void *p) +{ + const struct scmi_msg_resp_sensor_list_update_intervals *r = response; + struct scmi_sensor_info *s = ((struct scmi_sens_ipriv *)p)->priv; + + s->intervals.desc[st->desc_index + st->loop_idx] = + le32_to_cpu(r->intervals[st->loop_idx]); + + return 0; +} + +static int scmi_sensor_update_intervals(const struct scmi_protocol_handle *ph, + struct scmi_sensor_info *s) +{ + void *iter; + struct scmi_iterator_ops ops = { + .prepare_message = iter_intervals_prepare_message, + .update_state = iter_intervals_update_state, + .process_response = iter_intervals_process_response, + }; + struct scmi_sens_ipriv upriv = { + .priv = s, + .dev = ph->dev, + }; + + iter = ph->hops->iter_response_init(ph, &ops, s->intervals.count, + SENSOR_LIST_UPDATE_INTERVALS, + sizeof(struct scmi_msg_sensor_list_update_intervals), + &upriv); + if (IS_ERR(iter)) + return PTR_ERR(iter); + + return ph->hops->iter_response_run(iter); +} + +struct scmi_apriv { + bool any_axes_support_extended_names; + struct scmi_sensor_info *s; +}; + +static void iter_axes_desc_prepare_message(void *message, + const unsigned int desc_index, + const void *priv) +{ + struct scmi_msg_sensor_axis_description_get *msg = message; + const struct scmi_apriv *apriv = priv; + + /* Set the number of sensors to be skipped/already read */ + msg->id = cpu_to_le32(apriv->s->id); + msg->axis_desc_index = cpu_to_le32(desc_index); +} + +static int +iter_axes_desc_update_state(struct scmi_iterator_state *st, + const void *response, void *priv) +{ + u32 flags; + const struct scmi_msg_resp_sensor_axis_description *r = response; + + flags = le32_to_cpu(r->num_axis_flags); + st->num_returned = NUM_AXIS_RETURNED(flags); + st->num_remaining = NUM_AXIS_REMAINING(flags); + st->priv = (void *)&r->desc[0]; + + return 0; +} + +static int +iter_axes_desc_process_response(const struct scmi_protocol_handle *ph, + const void *response, + struct scmi_iterator_state *st, void *priv) +{ + u32 attrh, attrl; + struct scmi_sensor_axis_info *a; + size_t dsize = SCMI_MSG_RESP_AXIS_DESCR_BASE_SZ; + struct scmi_apriv *apriv = priv; + const struct scmi_axis_descriptor *adesc = st->priv; + + attrl = le32_to_cpu(adesc->attributes_low); + if (SUPPORTS_EXTENDED_AXIS_NAMES(attrl)) + apriv->any_axes_support_extended_names = true; + + a = &apriv->s->axis[st->desc_index + st->loop_idx]; + a->id = le32_to_cpu(adesc->id); + a->extended_attrs = SUPPORTS_EXTEND_ATTRS(attrl); + + attrh = le32_to_cpu(adesc->attributes_high); + a->scale = S32_EXT(SENSOR_SCALE(attrh)); + a->type = SENSOR_TYPE(attrh); + strscpy(a->name, adesc->name, SCMI_SHORT_NAME_MAX_SIZE); + + if (a->extended_attrs) { + unsigned int ares = le32_to_cpu(adesc->resolution); + + a->resolution = SENSOR_RES(ares); + a->exponent = S32_EXT(SENSOR_RES_EXP(ares)); + dsize += sizeof(adesc->resolution); + + scmi_parse_range_attrs(&a->attrs, &adesc->attrs); + dsize += sizeof(adesc->attrs); + } + st->priv = ((u8 *)adesc + dsize); + + return 0; +} + +static int +iter_axes_extended_name_update_state(struct scmi_iterator_state *st, + const void *response, void *priv) +{ + u32 flags; + const struct scmi_msg_resp_sensor_axis_names_description *r = response; + + flags = le32_to_cpu(r->num_axis_flags); + st->num_returned = NUM_AXIS_RETURNED(flags); + st->num_remaining = NUM_AXIS_REMAINING(flags); + st->priv = (void *)&r->desc[0]; + + return 0; +} + +static int +iter_axes_extended_name_process_response(const struct scmi_protocol_handle *ph, + const void *response, + struct scmi_iterator_state *st, + void *priv) +{ + struct scmi_sensor_axis_info *a; + const struct scmi_apriv *apriv = priv; + struct scmi_sensor_axis_name_descriptor *adesc = st->priv; + u32 axis_id = le32_to_cpu(adesc->axis_id); + + if (axis_id >= st->max_resources) + return -EPROTO; + + /* + * Pick the corresponding descriptor based on the axis_id embedded + * in the reply since the list of axes supporting extended names + * can be a subset of all the axes. + */ + a = &apriv->s->axis[axis_id]; + strscpy(a->name, adesc->name, SCMI_MAX_STR_SIZE); + st->priv = ++adesc; + + return 0; +} + +static int +scmi_sensor_axis_extended_names_get(const struct scmi_protocol_handle *ph, + struct scmi_sensor_info *s) +{ + int ret; + void *iter; + struct scmi_iterator_ops ops = { + .prepare_message = iter_axes_desc_prepare_message, + .update_state = iter_axes_extended_name_update_state, + .process_response = iter_axes_extended_name_process_response, + }; + struct scmi_apriv apriv = { + .any_axes_support_extended_names = false, + .s = s, + }; + + iter = ph->hops->iter_response_init(ph, &ops, s->num_axis, + SENSOR_AXIS_NAME_GET, + sizeof(struct scmi_msg_sensor_axis_description_get), + &apriv); + if (IS_ERR(iter)) + return PTR_ERR(iter); + + /* + * Do not cause whole protocol initialization failure when failing to + * get extended names for axes. + */ + ret = ph->hops->iter_response_run(iter); + if (ret) + dev_warn(ph->dev, + "Failed to get axes extended names for %s (ret:%d).\n", + s->name, ret); + + return 0; +} + +static int scmi_sensor_axis_description(const struct scmi_protocol_handle *ph, + struct scmi_sensor_info *s, + u32 version) +{ + int ret; + void *iter; + struct scmi_iterator_ops ops = { + .prepare_message = iter_axes_desc_prepare_message, + .update_state = iter_axes_desc_update_state, + .process_response = iter_axes_desc_process_response, + }; + struct scmi_apriv apriv = { + .any_axes_support_extended_names = false, + .s = s, + }; + + s->axis = devm_kcalloc(ph->dev, s->num_axis, + sizeof(*s->axis), GFP_KERNEL); + if (!s->axis) + return -ENOMEM; + + iter = ph->hops->iter_response_init(ph, &ops, s->num_axis, + SENSOR_AXIS_DESCRIPTION_GET, + sizeof(struct scmi_msg_sensor_axis_description_get), + &apriv); + if (IS_ERR(iter)) + return PTR_ERR(iter); + + ret = ph->hops->iter_response_run(iter); + if (ret) + return ret; + + if (PROTOCOL_REV_MAJOR(version) >= 0x3 && + apriv.any_axes_support_extended_names) + ret = scmi_sensor_axis_extended_names_get(ph, s); + + return ret; +} + +static void iter_sens_descr_prepare_message(void *message, + unsigned int desc_index, + const void *priv) +{ + struct scmi_msg_sensor_description *msg = message; + + msg->desc_index = cpu_to_le32(desc_index); +} + +static int iter_sens_descr_update_state(struct scmi_iterator_state *st, + const void *response, void *priv) +{ + const struct scmi_msg_resp_sensor_description *r = response; + + st->num_returned = le16_to_cpu(r->num_returned); + st->num_remaining = le16_to_cpu(r->num_remaining); + st->priv = (void *)&r->desc[0]; + + return 0; +} + +static int +iter_sens_descr_process_response(const struct scmi_protocol_handle *ph, + const void *response, + struct scmi_iterator_state *st, void *priv) + +{ + int ret = 0; + u32 attrh, attrl; + size_t dsize = SCMI_MSG_RESP_SENS_DESCR_BASE_SZ; + struct scmi_sensor_info *s; + struct sensors_info *si = priv; + const struct scmi_sensor_descriptor *sdesc = st->priv; + + s = &si->sensors[st->desc_index + st->loop_idx]; + s->id = le32_to_cpu(sdesc->id); + + attrl = le32_to_cpu(sdesc->attributes_low); + /* common bitfields parsing */ + s->async = SUPPORTS_ASYNC_READ(attrl); + s->num_trip_points = NUM_TRIP_POINTS(attrl); + /** + * only SCMIv3.0 specific bitfield below. + * Such bitfields are assumed to be zeroed on non + * relevant fw versions...assuming fw not buggy ! + */ + s->update = SUPPORTS_UPDATE_NOTIFY(attrl); + s->timestamped = SUPPORTS_TIMESTAMP(attrl); + if (s->timestamped) + s->tstamp_scale = S32_EXT(SENSOR_TSTAMP_EXP(attrl)); + s->extended_scalar_attrs = SUPPORTS_EXTEND_ATTRS(attrl); + + attrh = le32_to_cpu(sdesc->attributes_high); + /* common bitfields parsing */ + s->scale = S32_EXT(SENSOR_SCALE(attrh)); + s->type = SENSOR_TYPE(attrh); + /* Use pre-allocated pool wherever possible */ + s->intervals.desc = s->intervals.prealloc_pool; + if (si->version == SCMIv2_SENSOR_PROTOCOL) { + s->intervals.segmented = false; + s->intervals.count = 1; + /* + * Convert SCMIv2.0 update interval format to + * SCMIv3.0 to be used as the common exposed + * descriptor, accessible via common macros. + */ + s->intervals.desc[0] = (SENSOR_UPDATE_BASE(attrh) << 5) | + SENSOR_UPDATE_SCALE(attrh); + } else { + /* + * From SCMIv3.0 update intervals are retrieved + * via a dedicated (optional) command. + * Since the command is optional, on error carry + * on without any update interval. + */ + if (scmi_sensor_update_intervals(ph, s)) + dev_dbg(ph->dev, + "Update Intervals not available for sensor ID:%d\n", + s->id); + } + /** + * only > SCMIv2.0 specific bitfield below. + * Such bitfields are assumed to be zeroed on non + * relevant fw versions...assuming fw not buggy ! + */ + s->num_axis = min_t(unsigned int, + SUPPORTS_AXIS(attrh) ? + SENSOR_AXIS_NUMBER(attrh) : 0, + SCMI_MAX_NUM_SENSOR_AXIS); + strscpy(s->name, sdesc->name, SCMI_SHORT_NAME_MAX_SIZE); + + /* + * If supported overwrite short name with the extended + * one; on error just carry on and use already provided + * short name. + */ + if (PROTOCOL_REV_MAJOR(si->version) >= 0x3 && + SUPPORTS_EXTENDED_NAMES(attrl)) + ph->hops->extended_name_get(ph, SENSOR_NAME_GET, s->id, + s->name, SCMI_MAX_STR_SIZE); + + if (s->extended_scalar_attrs) { + s->sensor_power = le32_to_cpu(sdesc->power); + dsize += sizeof(sdesc->power); + + /* Only for sensors reporting scalar values */ + if (s->num_axis == 0) { + unsigned int sres = le32_to_cpu(sdesc->resolution); + + s->resolution = SENSOR_RES(sres); + s->exponent = S32_EXT(SENSOR_RES_EXP(sres)); + dsize += sizeof(sdesc->resolution); + + scmi_parse_range_attrs(&s->scalar_attrs, + &sdesc->scalar_attrs); + dsize += sizeof(sdesc->scalar_attrs); + } + } + + if (s->num_axis > 0) + ret = scmi_sensor_axis_description(ph, s, si->version); + + st->priv = ((u8 *)sdesc + dsize); + + return ret; +} + +static int scmi_sensor_description_get(const struct scmi_protocol_handle *ph, + struct sensors_info *si) +{ + void *iter; + struct scmi_iterator_ops ops = { + .prepare_message = iter_sens_descr_prepare_message, + .update_state = iter_sens_descr_update_state, + .process_response = iter_sens_descr_process_response, + }; + + iter = ph->hops->iter_response_init(ph, &ops, si->num_sensors, + SENSOR_DESCRIPTION_GET, + sizeof(__le32), si); + if (IS_ERR(iter)) + return PTR_ERR(iter); + + return ph->hops->iter_response_run(iter); +} + +static inline int +scmi_sensor_request_notify(const struct scmi_protocol_handle *ph, u32 sensor_id, + u8 message_id, bool enable) +{ + int ret; + u32 evt_cntl = enable ? SENSOR_NOTIFY_ALL : 0; + struct scmi_xfer *t; + struct scmi_msg_sensor_request_notify *cfg; + + ret = ph->xops->xfer_get_init(ph, message_id, sizeof(*cfg), 0, &t); + if (ret) + return ret; + + cfg = t->tx.buf; + cfg->id = cpu_to_le32(sensor_id); + cfg->event_control = cpu_to_le32(evt_cntl); + + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int scmi_sensor_trip_point_notify(const struct scmi_protocol_handle *ph, + u32 sensor_id, bool enable) +{ + return scmi_sensor_request_notify(ph, sensor_id, + SENSOR_TRIP_POINT_NOTIFY, + enable); +} + +static int +scmi_sensor_continuous_update_notify(const struct scmi_protocol_handle *ph, + u32 sensor_id, bool enable) +{ + return scmi_sensor_request_notify(ph, sensor_id, + SENSOR_CONTINUOUS_UPDATE_NOTIFY, + enable); +} + +static int +scmi_sensor_trip_point_config(const struct scmi_protocol_handle *ph, + u32 sensor_id, u8 trip_id, u64 trip_value) +{ + int ret; + u32 evt_cntl = SENSOR_TP_BOTH; + struct scmi_xfer *t; + struct scmi_msg_set_sensor_trip_point *trip; + + ret = ph->xops->xfer_get_init(ph, SENSOR_TRIP_POINT_CONFIG, + sizeof(*trip), 0, &t); + if (ret) + return ret; + + trip = t->tx.buf; + trip->id = cpu_to_le32(sensor_id); + trip->event_control = cpu_to_le32(evt_cntl | SENSOR_TP_ID(trip_id)); + trip->value_low = cpu_to_le32(trip_value & 0xffffffff); + trip->value_high = cpu_to_le32(trip_value >> 32); + + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int scmi_sensor_config_get(const struct scmi_protocol_handle *ph, + u32 sensor_id, u32 *sensor_config) +{ + int ret; + struct scmi_xfer *t; + struct sensors_info *si = ph->get_priv(ph); + + if (sensor_id >= si->num_sensors) + return -EINVAL; + + ret = ph->xops->xfer_get_init(ph, SENSOR_CONFIG_GET, + sizeof(__le32), sizeof(__le32), &t); + if (ret) + return ret; + + put_unaligned_le32(sensor_id, t->tx.buf); + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + struct scmi_sensor_info *s = si->sensors + sensor_id; + + *sensor_config = get_unaligned_le64(t->rx.buf); + s->sensor_config = *sensor_config; + } + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int scmi_sensor_config_set(const struct scmi_protocol_handle *ph, + u32 sensor_id, u32 sensor_config) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_sensor_config_set *msg; + struct sensors_info *si = ph->get_priv(ph); + + if (sensor_id >= si->num_sensors) + return -EINVAL; + + ret = ph->xops->xfer_get_init(ph, SENSOR_CONFIG_SET, + sizeof(*msg), 0, &t); + if (ret) + return ret; + + msg = t->tx.buf; + msg->id = cpu_to_le32(sensor_id); + msg->sensor_config = cpu_to_le32(sensor_config); + + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + struct scmi_sensor_info *s = si->sensors + sensor_id; + + s->sensor_config = sensor_config; + } + + ph->xops->xfer_put(ph, t); + return ret; +} + +/** + * scmi_sensor_reading_get - Read scalar sensor value + * @ph: Protocol handle + * @sensor_id: Sensor ID + * @value: The 64bit value sensor reading + * + * This function returns a single 64 bit reading value representing the sensor + * value; if the platform SCMI Protocol implementation and the sensor support + * multiple axis and timestamped-reads, this just returns the first axis while + * dropping the timestamp value. + * Use instead the @scmi_sensor_reading_get_timestamped to retrieve the array of + * timestamped multi-axis values. + * + * Return: 0 on Success + */ +static int scmi_sensor_reading_get(const struct scmi_protocol_handle *ph, + u32 sensor_id, u64 *value) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_sensor_reading_get *sensor; + struct scmi_sensor_info *s; + struct sensors_info *si = ph->get_priv(ph); + + if (sensor_id >= si->num_sensors) + return -EINVAL; + + ret = ph->xops->xfer_get_init(ph, SENSOR_READING_GET, + sizeof(*sensor), 0, &t); + if (ret) + return ret; + + sensor = t->tx.buf; + sensor->id = cpu_to_le32(sensor_id); + s = si->sensors + sensor_id; + if (s->async) { + sensor->flags = cpu_to_le32(SENSOR_READ_ASYNC); + ret = ph->xops->do_xfer_with_response(ph, t); + if (!ret) { + struct scmi_resp_sensor_reading_complete *resp; + + resp = t->rx.buf; + if (le32_to_cpu(resp->id) == sensor_id) + *value = + get_unaligned_le64(&resp->readings_low); + else + ret = -EPROTO; + } + } else { + sensor->flags = cpu_to_le32(0); + ret = ph->xops->do_xfer(ph, t); + if (!ret) + *value = get_unaligned_le64(t->rx.buf); + } + + ph->xops->xfer_put(ph, t); + return ret; +} + +static inline void +scmi_parse_sensor_readings(struct scmi_sensor_reading *out, + const struct scmi_sensor_reading_resp *in) +{ + out->value = get_unaligned_le64((void *)&in->sensor_value_low); + out->timestamp = get_unaligned_le64((void *)&in->timestamp_low); +} + +/** + * scmi_sensor_reading_get_timestamped - Read multiple-axis timestamped values + * @ph: Protocol handle + * @sensor_id: Sensor ID + * @count: The length of the provided @readings array + * @readings: An array of elements each representing a timestamped per-axis + * reading of type @struct scmi_sensor_reading. + * Returned readings are ordered as the @axis descriptors array + * included in @struct scmi_sensor_info and the max number of + * returned elements is min(@count, @num_axis); ideally the provided + * array should be of length @count equal to @num_axis. + * + * Return: 0 on Success + */ +static int +scmi_sensor_reading_get_timestamped(const struct scmi_protocol_handle *ph, + u32 sensor_id, u8 count, + struct scmi_sensor_reading *readings) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_sensor_reading_get *sensor; + struct scmi_sensor_info *s; + struct sensors_info *si = ph->get_priv(ph); + + if (sensor_id >= si->num_sensors) + return -EINVAL; + + s = si->sensors + sensor_id; + if (!count || !readings || + (!s->num_axis && count > 1) || (s->num_axis && count > s->num_axis)) + return -EINVAL; + + ret = ph->xops->xfer_get_init(ph, SENSOR_READING_GET, + sizeof(*sensor), 0, &t); + if (ret) + return ret; + + sensor = t->tx.buf; + sensor->id = cpu_to_le32(sensor_id); + if (s->async) { + sensor->flags = cpu_to_le32(SENSOR_READ_ASYNC); + ret = ph->xops->do_xfer_with_response(ph, t); + if (!ret) { + int i; + struct scmi_resp_sensor_reading_complete_v3 *resp; + + resp = t->rx.buf; + /* Retrieve only the number of requested axis anyway */ + if (le32_to_cpu(resp->id) == sensor_id) + for (i = 0; i < count; i++) + scmi_parse_sensor_readings(&readings[i], + &resp->readings[i]); + else + ret = -EPROTO; + } + } else { + sensor->flags = cpu_to_le32(0); + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + int i; + struct scmi_sensor_reading_resp *resp_readings; + + resp_readings = t->rx.buf; + for (i = 0; i < count; i++) + scmi_parse_sensor_readings(&readings[i], + &resp_readings[i]); + } + } + + ph->xops->xfer_put(ph, t); + return ret; +} + +static const struct scmi_sensor_info * +scmi_sensor_info_get(const struct scmi_protocol_handle *ph, u32 sensor_id) +{ + struct sensors_info *si = ph->get_priv(ph); + + if (sensor_id >= si->num_sensors) + return NULL; + + return si->sensors + sensor_id; +} + +static int scmi_sensor_count_get(const struct scmi_protocol_handle *ph) +{ + struct sensors_info *si = ph->get_priv(ph); + + return si->num_sensors; +} + +static const struct scmi_sensor_proto_ops sensor_proto_ops = { + .count_get = scmi_sensor_count_get, + .info_get = scmi_sensor_info_get, + .trip_point_config = scmi_sensor_trip_point_config, + .reading_get = scmi_sensor_reading_get, + .reading_get_timestamped = scmi_sensor_reading_get_timestamped, + .config_get = scmi_sensor_config_get, + .config_set = scmi_sensor_config_set, +}; + +static int scmi_sensor_set_notify_enabled(const struct scmi_protocol_handle *ph, + u8 evt_id, u32 src_id, bool enable) +{ + int ret; + + switch (evt_id) { + case SCMI_EVENT_SENSOR_TRIP_POINT_EVENT: + ret = scmi_sensor_trip_point_notify(ph, src_id, enable); + break; + case SCMI_EVENT_SENSOR_UPDATE: + ret = scmi_sensor_continuous_update_notify(ph, src_id, enable); + break; + default: + ret = -EINVAL; + break; + } + + if (ret) + pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n", + evt_id, src_id, ret); + + return ret; +} + +static void * +scmi_sensor_fill_custom_report(const struct scmi_protocol_handle *ph, + u8 evt_id, ktime_t timestamp, + const void *payld, size_t payld_sz, + void *report, u32 *src_id) +{ + void *rep = NULL; + + switch (evt_id) { + case SCMI_EVENT_SENSOR_TRIP_POINT_EVENT: + { + const struct scmi_sensor_trip_notify_payld *p = payld; + struct scmi_sensor_trip_point_report *r = report; + + if (sizeof(*p) != payld_sz) + break; + + r->timestamp = timestamp; + r->agent_id = le32_to_cpu(p->agent_id); + r->sensor_id = le32_to_cpu(p->sensor_id); + r->trip_point_desc = le32_to_cpu(p->trip_point_desc); + *src_id = r->sensor_id; + rep = r; + break; + } + case SCMI_EVENT_SENSOR_UPDATE: + { + int i; + struct scmi_sensor_info *s; + const struct scmi_sensor_update_notify_payld *p = payld; + struct scmi_sensor_update_report *r = report; + struct sensors_info *sinfo = ph->get_priv(ph); + + /* payld_sz is variable for this event */ + r->sensor_id = le32_to_cpu(p->sensor_id); + if (r->sensor_id >= sinfo->num_sensors) + break; + r->timestamp = timestamp; + r->agent_id = le32_to_cpu(p->agent_id); + s = &sinfo->sensors[r->sensor_id]; + /* + * The generated report r (@struct scmi_sensor_update_report) + * was pre-allocated to contain up to SCMI_MAX_NUM_SENSOR_AXIS + * readings: here it is filled with the effective @num_axis + * readings defined for this sensor or 1 for scalar sensors. + */ + r->readings_count = s->num_axis ?: 1; + for (i = 0; i < r->readings_count; i++) + scmi_parse_sensor_readings(&r->readings[i], + &p->readings[i]); + *src_id = r->sensor_id; + rep = r; + break; + } + default: + break; + } + + return rep; +} + +static int scmi_sensor_get_num_sources(const struct scmi_protocol_handle *ph) +{ + struct sensors_info *si = ph->get_priv(ph); + + return si->num_sensors; +} + +static const struct scmi_event sensor_events[] = { + { + .id = SCMI_EVENT_SENSOR_TRIP_POINT_EVENT, + .max_payld_sz = sizeof(struct scmi_sensor_trip_notify_payld), + .max_report_sz = sizeof(struct scmi_sensor_trip_point_report), + }, + { + .id = SCMI_EVENT_SENSOR_UPDATE, + .max_payld_sz = + sizeof(struct scmi_sensor_update_notify_payld) + + SCMI_MAX_NUM_SENSOR_AXIS * + sizeof(struct scmi_sensor_reading_resp), + .max_report_sz = sizeof(struct scmi_sensor_update_report) + + SCMI_MAX_NUM_SENSOR_AXIS * + sizeof(struct scmi_sensor_reading), + }, +}; + +static const struct scmi_event_ops sensor_event_ops = { + .get_num_sources = scmi_sensor_get_num_sources, + .set_notify_enabled = scmi_sensor_set_notify_enabled, + .fill_custom_report = scmi_sensor_fill_custom_report, +}; + +static const struct scmi_protocol_events sensor_protocol_events = { + .queue_sz = SCMI_PROTO_QUEUE_SZ, + .ops = &sensor_event_ops, + .evts = sensor_events, + .num_events = ARRAY_SIZE(sensor_events), +}; + +static int scmi_sensors_protocol_init(const struct scmi_protocol_handle *ph) +{ + u32 version; + int ret; + struct sensors_info *sinfo; + + ret = ph->xops->version_get(ph, &version); + if (ret) + return ret; + + dev_dbg(ph->dev, "Sensor Version %d.%d\n", + PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); + + sinfo = devm_kzalloc(ph->dev, sizeof(*sinfo), GFP_KERNEL); + if (!sinfo) + return -ENOMEM; + sinfo->version = version; + + ret = scmi_sensor_attributes_get(ph, sinfo); + if (ret) + return ret; + sinfo->sensors = devm_kcalloc(ph->dev, sinfo->num_sensors, + sizeof(*sinfo->sensors), GFP_KERNEL); + if (!sinfo->sensors) + return -ENOMEM; + + ret = scmi_sensor_description_get(ph, sinfo); + if (ret) + return ret; + + return ph->set_priv(ph, sinfo); +} + +static const struct scmi_protocol scmi_sensors = { + .id = SCMI_PROTOCOL_SENSOR, + .owner = THIS_MODULE, + .instance_init = &scmi_sensors_protocol_init, + .ops = &sensor_proto_ops, + .events = &sensor_protocol_events, +}; + +DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(sensors, scmi_sensors) diff --git a/drivers/firmware/arm_scmi/shmem.c b/drivers/firmware/arm_scmi/shmem.c new file mode 100644 index 000000000..517d52fb3 --- /dev/null +++ b/drivers/firmware/arm_scmi/shmem.c @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * For transport using shared mem structure. + * + * Copyright (C) 2019 ARM Ltd. + */ + +#include <linux/ktime.h> +#include <linux/io.h> +#include <linux/processor.h> +#include <linux/types.h> + +#include <asm-generic/bug.h> + +#include "common.h" + +/* + * SCMI specification requires all parameters, message headers, return + * arguments or any protocol data to be expressed in little endian + * format only. + */ +struct scmi_shared_mem { + __le32 reserved; + __le32 channel_status; +#define SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR BIT(1) +#define SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE BIT(0) + __le32 reserved1[2]; + __le32 flags; +#define SCMI_SHMEM_FLAG_INTR_ENABLED BIT(0) + __le32 length; + __le32 msg_header; + u8 msg_payload[]; +}; + +void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem, + struct scmi_xfer *xfer, struct scmi_chan_info *cinfo) +{ + ktime_t stop; + + /* + * Ideally channel must be free by now unless OS timeout last + * request and platform continued to process the same, wait + * until it releases the shared memory, otherwise we may endup + * overwriting its response with new message payload or vice-versa. + * Giving up anyway after twice the expected channel timeout so as + * not to bail-out on intermittent issues where the platform is + * occasionally a bit slower to answer. + * + * Note that after a timeout is detected we bail-out and carry on but + * the transport functionality is probably permanently compromised: + * this is just to ease debugging and avoid complete hangs on boot + * due to a misbehaving SCMI firmware. + */ + stop = ktime_add_ms(ktime_get(), 2 * cinfo->rx_timeout_ms); + spin_until_cond((ioread32(&shmem->channel_status) & + SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE) || + ktime_after(ktime_get(), stop)); + if (!(ioread32(&shmem->channel_status) & + SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE)) { + WARN_ON_ONCE(1); + dev_err(cinfo->dev, + "Timeout waiting for a free TX channel !\n"); + return; + } + + /* Mark channel busy + clear error */ + iowrite32(0x0, &shmem->channel_status); + iowrite32(xfer->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED, + &shmem->flags); + iowrite32(sizeof(shmem->msg_header) + xfer->tx.len, &shmem->length); + iowrite32(pack_scmi_header(&xfer->hdr), &shmem->msg_header); + if (xfer->tx.buf) + memcpy_toio(shmem->msg_payload, xfer->tx.buf, xfer->tx.len); +} + +u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem) +{ + return ioread32(&shmem->msg_header); +} + +void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem, + struct scmi_xfer *xfer) +{ + size_t len = ioread32(&shmem->length); + + xfer->hdr.status = ioread32(shmem->msg_payload); + /* Skip the length of header and status in shmem area i.e 8 bytes */ + xfer->rx.len = min_t(size_t, xfer->rx.len, len > 8 ? len - 8 : 0); + + /* Take a copy to the rx buffer.. */ + memcpy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len); +} + +void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem, + size_t max_len, struct scmi_xfer *xfer) +{ + size_t len = ioread32(&shmem->length); + + /* Skip only the length of header in shmem area i.e 4 bytes */ + xfer->rx.len = min_t(size_t, max_len, len > 4 ? len - 4 : 0); + + /* Take a copy to the rx buffer.. */ + memcpy_fromio(xfer->rx.buf, shmem->msg_payload, xfer->rx.len); +} + +void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem) +{ + iowrite32(SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE, &shmem->channel_status); +} + +bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem, + struct scmi_xfer *xfer) +{ + u16 xfer_id; + + xfer_id = MSG_XTRACT_TOKEN(ioread32(&shmem->msg_header)); + + if (xfer->hdr.seq != xfer_id) + return false; + + return ioread32(&shmem->channel_status) & + (SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR | + SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE); +} + +bool shmem_channel_free(struct scmi_shared_mem __iomem *shmem) +{ + return (ioread32(&shmem->channel_status) & + SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE); +} diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c new file mode 100644 index 000000000..ac0bd51ef --- /dev/null +++ b/drivers/firmware/arm_scmi/smc.c @@ -0,0 +1,251 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System Control and Management Interface (SCMI) Message SMC/HVC + * Transport driver + * + * Copyright 2020 NXP + */ + +#include <linux/arm-smccc.h> +#include <linux/atomic.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/mutex.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/processor.h> +#include <linux/slab.h> + +#include "common.h" + +/** + * struct scmi_smc - Structure representing a SCMI smc transport + * + * @irq: An optional IRQ for completion + * @cinfo: SCMI channel info + * @shmem: Transmit/Receive shared memory area + * @shmem_lock: Lock to protect access to Tx/Rx shared memory area. + * Used when NOT operating in atomic mode. + * @inflight: Atomic flag to protect access to Tx/Rx shared memory area. + * Used when operating in atomic mode. + * @func_id: smc/hvc call function id + */ + +struct scmi_smc { + int irq; + struct scmi_chan_info *cinfo; + struct scmi_shared_mem __iomem *shmem; + /* Protect access to shmem area */ + struct mutex shmem_lock; +#define INFLIGHT_NONE MSG_TOKEN_MAX + atomic_t inflight; + u32 func_id; +}; + +static irqreturn_t smc_msg_done_isr(int irq, void *data) +{ + struct scmi_smc *scmi_info = data; + + scmi_rx_callback(scmi_info->cinfo, + shmem_read_header(scmi_info->shmem), NULL); + + return IRQ_HANDLED; +} + +static bool smc_chan_available(struct device *dev, int idx) +{ + struct device_node *np = of_parse_phandle(dev->of_node, "shmem", 0); + if (!np) + return false; + + of_node_put(np); + return true; +} + +static inline void smc_channel_lock_init(struct scmi_smc *scmi_info) +{ + if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE)) + atomic_set(&scmi_info->inflight, INFLIGHT_NONE); + else + mutex_init(&scmi_info->shmem_lock); +} + +static bool smc_xfer_inflight(struct scmi_xfer *xfer, atomic_t *inflight) +{ + int ret; + + ret = atomic_cmpxchg(inflight, INFLIGHT_NONE, xfer->hdr.seq); + + return ret == INFLIGHT_NONE; +} + +static inline void +smc_channel_lock_acquire(struct scmi_smc *scmi_info, + struct scmi_xfer *xfer __maybe_unused) +{ + if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE)) + spin_until_cond(smc_xfer_inflight(xfer, &scmi_info->inflight)); + else + mutex_lock(&scmi_info->shmem_lock); +} + +static inline void smc_channel_lock_release(struct scmi_smc *scmi_info) +{ + if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE)) + atomic_set(&scmi_info->inflight, INFLIGHT_NONE); + else + mutex_unlock(&scmi_info->shmem_lock); +} + +static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, + bool tx) +{ + struct device *cdev = cinfo->dev; + struct scmi_smc *scmi_info; + resource_size_t size; + struct resource res; + struct device_node *np; + u32 func_id; + int ret; + + if (!tx) + return -ENODEV; + + scmi_info = devm_kzalloc(dev, sizeof(*scmi_info), GFP_KERNEL); + if (!scmi_info) + return -ENOMEM; + + np = of_parse_phandle(cdev->of_node, "shmem", 0); + if (!of_device_is_compatible(np, "arm,scmi-shmem")) { + of_node_put(np); + return -ENXIO; + } + + ret = of_address_to_resource(np, 0, &res); + of_node_put(np); + if (ret) { + dev_err(cdev, "failed to get SCMI Tx shared memory\n"); + return ret; + } + + size = resource_size(&res); + scmi_info->shmem = devm_ioremap(dev, res.start, size); + if (!scmi_info->shmem) { + dev_err(dev, "failed to ioremap SCMI Tx shared memory\n"); + return -EADDRNOTAVAIL; + } + + ret = of_property_read_u32(dev->of_node, "arm,smc-id", &func_id); + if (ret < 0) + return ret; + + /* + * If there is an interrupt named "a2p", then the service and + * completion of a message is signaled by an interrupt rather than by + * the return of the SMC call. + */ + scmi_info->irq = of_irq_get_byname(cdev->of_node, "a2p"); + if (scmi_info->irq > 0) { + ret = request_irq(scmi_info->irq, smc_msg_done_isr, + IRQF_NO_SUSPEND, dev_name(dev), scmi_info); + if (ret) { + dev_err(dev, "failed to setup SCMI smc irq\n"); + return ret; + } + } else { + cinfo->no_completion_irq = true; + } + + scmi_info->func_id = func_id; + scmi_info->cinfo = cinfo; + smc_channel_lock_init(scmi_info); + cinfo->transport_info = scmi_info; + + return 0; +} + +static int smc_chan_free(int id, void *p, void *data) +{ + struct scmi_chan_info *cinfo = p; + struct scmi_smc *scmi_info = cinfo->transport_info; + + /* Ignore any possible further reception on the IRQ path */ + if (scmi_info->irq > 0) + free_irq(scmi_info->irq, scmi_info); + + cinfo->transport_info = NULL; + scmi_info->cinfo = NULL; + + scmi_free_channel(cinfo, data, id); + + return 0; +} + +static int smc_send_message(struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer) +{ + struct scmi_smc *scmi_info = cinfo->transport_info; + struct arm_smccc_res res; + + /* + * Channel will be released only once response has been + * surely fully retrieved, so after .mark_txdone() + */ + smc_channel_lock_acquire(scmi_info, xfer); + + shmem_tx_prepare(scmi_info->shmem, xfer, cinfo); + + arm_smccc_1_1_invoke(scmi_info->func_id, 0, 0, 0, 0, 0, 0, 0, &res); + + /* Only SMCCC_RET_NOT_SUPPORTED is valid error code */ + if (res.a0) { + smc_channel_lock_release(scmi_info); + return -EOPNOTSUPP; + } + + return 0; +} + +static void smc_fetch_response(struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer) +{ + struct scmi_smc *scmi_info = cinfo->transport_info; + + shmem_fetch_response(scmi_info->shmem, xfer); +} + +static void smc_mark_txdone(struct scmi_chan_info *cinfo, int ret, + struct scmi_xfer *__unused) +{ + struct scmi_smc *scmi_info = cinfo->transport_info; + + smc_channel_lock_release(scmi_info); +} + +static const struct scmi_transport_ops scmi_smc_ops = { + .chan_available = smc_chan_available, + .chan_setup = smc_chan_setup, + .chan_free = smc_chan_free, + .send_message = smc_send_message, + .mark_txdone = smc_mark_txdone, + .fetch_response = smc_fetch_response, +}; + +const struct scmi_desc scmi_smc_desc = { + .ops = &scmi_smc_ops, + .max_rx_timeout_ms = 30, + .max_msg = 20, + .max_msg_size = 128, + /* + * Setting .sync_cmds_atomic_replies to true for SMC assumes that, + * once the SMC instruction has completed successfully, the issued + * SCMI command would have been already fully processed by the SCMI + * platform firmware and so any possible response value expected + * for the issued command will be immmediately ready to be fetched + * from the shared memory area. + */ + .sync_cmds_completed_on_ret = true, + .atomic_enabled = IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE), +}; diff --git a/drivers/firmware/arm_scmi/system.c b/drivers/firmware/arm_scmi/system.c new file mode 100644 index 000000000..9383d7584 --- /dev/null +++ b/drivers/firmware/arm_scmi/system.c @@ -0,0 +1,158 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System Control and Management Interface (SCMI) System Power Protocol + * + * Copyright (C) 2020-2022 ARM Ltd. + */ + +#define pr_fmt(fmt) "SCMI Notifications SYSTEM - " fmt + +#include <linux/module.h> +#include <linux/scmi_protocol.h> + +#include "protocols.h" +#include "notify.h" + +#define SCMI_SYSTEM_NUM_SOURCES 1 + +enum scmi_system_protocol_cmd { + SYSTEM_POWER_STATE_NOTIFY = 0x5, +}; + +struct scmi_system_power_state_notify { + __le32 notify_enable; +}; + +struct scmi_system_power_state_notifier_payld { + __le32 agent_id; + __le32 flags; + __le32 system_state; + __le32 timeout; +}; + +struct scmi_system_info { + u32 version; + bool graceful_timeout_supported; +}; + +static int scmi_system_request_notify(const struct scmi_protocol_handle *ph, + bool enable) +{ + int ret; + struct scmi_xfer *t; + struct scmi_system_power_state_notify *notify; + + ret = ph->xops->xfer_get_init(ph, SYSTEM_POWER_STATE_NOTIFY, + sizeof(*notify), 0, &t); + if (ret) + return ret; + + notify = t->tx.buf; + notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0; + + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int scmi_system_set_notify_enabled(const struct scmi_protocol_handle *ph, + u8 evt_id, u32 src_id, bool enable) +{ + int ret; + + ret = scmi_system_request_notify(ph, enable); + if (ret) + pr_debug("FAIL_ENABLE - evt[%X] - ret:%d\n", evt_id, ret); + + return ret; +} + +static void * +scmi_system_fill_custom_report(const struct scmi_protocol_handle *ph, + u8 evt_id, ktime_t timestamp, + const void *payld, size_t payld_sz, + void *report, u32 *src_id) +{ + size_t expected_sz; + const struct scmi_system_power_state_notifier_payld *p = payld; + struct scmi_system_power_state_notifier_report *r = report; + struct scmi_system_info *pinfo = ph->get_priv(ph); + + expected_sz = pinfo->graceful_timeout_supported ? + sizeof(*p) : sizeof(*p) - sizeof(__le32); + if (evt_id != SCMI_EVENT_SYSTEM_POWER_STATE_NOTIFIER || + payld_sz != expected_sz) + return NULL; + + r->timestamp = timestamp; + r->agent_id = le32_to_cpu(p->agent_id); + r->flags = le32_to_cpu(p->flags); + r->system_state = le32_to_cpu(p->system_state); + if (pinfo->graceful_timeout_supported && + r->system_state == SCMI_SYSTEM_SHUTDOWN && + SCMI_SYSPOWER_IS_REQUEST_GRACEFUL(r->flags)) + r->timeout = le32_to_cpu(p->timeout); + else + r->timeout = 0x00; + *src_id = 0; + + return r; +} + +static const struct scmi_event system_events[] = { + { + .id = SCMI_EVENT_SYSTEM_POWER_STATE_NOTIFIER, + .max_payld_sz = + sizeof(struct scmi_system_power_state_notifier_payld), + .max_report_sz = + sizeof(struct scmi_system_power_state_notifier_report), + }, +}; + +static const struct scmi_event_ops system_event_ops = { + .set_notify_enabled = scmi_system_set_notify_enabled, + .fill_custom_report = scmi_system_fill_custom_report, +}; + +static const struct scmi_protocol_events system_protocol_events = { + .queue_sz = SCMI_PROTO_QUEUE_SZ, + .ops = &system_event_ops, + .evts = system_events, + .num_events = ARRAY_SIZE(system_events), + .num_sources = SCMI_SYSTEM_NUM_SOURCES, +}; + +static int scmi_system_protocol_init(const struct scmi_protocol_handle *ph) +{ + int ret; + u32 version; + struct scmi_system_info *pinfo; + + ret = ph->xops->version_get(ph, &version); + if (ret) + return ret; + + dev_dbg(ph->dev, "System Power Version %d.%d\n", + PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); + + pinfo = devm_kzalloc(ph->dev, sizeof(*pinfo), GFP_KERNEL); + if (!pinfo) + return -ENOMEM; + + pinfo->version = version; + if (PROTOCOL_REV_MAJOR(pinfo->version) >= 0x2) + pinfo->graceful_timeout_supported = true; + + return ph->set_priv(ph, pinfo); +} + +static const struct scmi_protocol scmi_system = { + .id = SCMI_PROTOCOL_SYSTEM, + .owner = THIS_MODULE, + .instance_init = &scmi_system_protocol_init, + .ops = NULL, + .events = &system_protocol_events, +}; + +DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(system, scmi_system) diff --git a/drivers/firmware/arm_scmi/virtio.c b/drivers/firmware/arm_scmi/virtio.c new file mode 100644 index 000000000..1db975c08 --- /dev/null +++ b/drivers/firmware/arm_scmi/virtio.c @@ -0,0 +1,941 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Virtio Transport driver for Arm System Control and Management Interface + * (SCMI). + * + * Copyright (C) 2020-2022 OpenSynergy. + * Copyright (C) 2021-2022 ARM Ltd. + */ + +/** + * DOC: Theory of Operation + * + * The scmi-virtio transport implements a driver for the virtio SCMI device. + * + * There is one Tx channel (virtio cmdq, A2P channel) and at most one Rx + * channel (virtio eventq, P2A channel). Each channel is implemented through a + * virtqueue. Access to each virtqueue is protected by spinlocks. + */ + +#include <linux/completion.h> +#include <linux/errno.h> +#include <linux/refcount.h> +#include <linux/slab.h> +#include <linux/virtio.h> +#include <linux/virtio_config.h> + +#include <uapi/linux/virtio_ids.h> +#include <uapi/linux/virtio_scmi.h> + +#include "common.h" + +#define VIRTIO_MAX_RX_TIMEOUT_MS 60000 +#define VIRTIO_SCMI_MAX_MSG_SIZE 128 /* Value may be increased. */ +#define VIRTIO_SCMI_MAX_PDU_SIZE \ + (VIRTIO_SCMI_MAX_MSG_SIZE + SCMI_MSG_MAX_PROT_OVERHEAD) +#define DESCRIPTORS_PER_TX_MSG 2 + +/** + * struct scmi_vio_channel - Transport channel information + * + * @vqueue: Associated virtqueue + * @cinfo: SCMI Tx or Rx channel + * @free_lock: Protects access to the @free_list. + * @free_list: List of unused scmi_vio_msg, maintained for Tx channels only + * @deferred_tx_work: Worker for TX deferred replies processing + * @deferred_tx_wq: Workqueue for TX deferred replies + * @pending_lock: Protects access to the @pending_cmds_list. + * @pending_cmds_list: List of pre-fetched commands queueud for later processing + * @is_rx: Whether channel is an Rx channel + * @max_msg: Maximum number of pending messages for this channel. + * @lock: Protects access to all members except users, free_list and + * pending_cmds_list. + * @shutdown_done: A reference to a completion used when freeing this channel. + * @users: A reference count to currently active users of this channel. + */ +struct scmi_vio_channel { + struct virtqueue *vqueue; + struct scmi_chan_info *cinfo; + /* lock to protect access to the free list. */ + spinlock_t free_lock; + struct list_head free_list; + /* lock to protect access to the pending list. */ + spinlock_t pending_lock; + struct list_head pending_cmds_list; + struct work_struct deferred_tx_work; + struct workqueue_struct *deferred_tx_wq; + bool is_rx; + unsigned int max_msg; + /* + * Lock to protect access to all members except users, free_list and + * pending_cmds_list + */ + spinlock_t lock; + struct completion *shutdown_done; + refcount_t users; +}; + +enum poll_states { + VIO_MSG_NOT_POLLED, + VIO_MSG_POLL_TIMEOUT, + VIO_MSG_POLLING, + VIO_MSG_POLL_DONE, +}; + +/** + * struct scmi_vio_msg - Transport PDU information + * + * @request: SDU used for commands + * @input: SDU used for (delayed) responses and notifications + * @list: List which scmi_vio_msg may be part of + * @rx_len: Input SDU size in bytes, once input has been received + * @poll_idx: Last used index registered for polling purposes if this message + * transaction reply was configured for polling. + * @poll_status: Polling state for this message. + * @poll_lock: A lock to protect @poll_status + * @users: A reference count to track this message users and avoid premature + * freeing (and reuse) when polling and IRQ execution paths interleave. + */ +struct scmi_vio_msg { + struct scmi_msg_payld *request; + struct scmi_msg_payld *input; + struct list_head list; + unsigned int rx_len; + unsigned int poll_idx; + enum poll_states poll_status; + /* Lock to protect access to poll_status */ + spinlock_t poll_lock; + refcount_t users; +}; + +/* Only one SCMI VirtIO device can possibly exist */ +static struct virtio_device *scmi_vdev; + +static void scmi_vio_channel_ready(struct scmi_vio_channel *vioch, + struct scmi_chan_info *cinfo) +{ + unsigned long flags; + + spin_lock_irqsave(&vioch->lock, flags); + cinfo->transport_info = vioch; + /* Indirectly setting channel not available any more */ + vioch->cinfo = cinfo; + spin_unlock_irqrestore(&vioch->lock, flags); + + refcount_set(&vioch->users, 1); +} + +static inline bool scmi_vio_channel_acquire(struct scmi_vio_channel *vioch) +{ + return refcount_inc_not_zero(&vioch->users); +} + +static inline void scmi_vio_channel_release(struct scmi_vio_channel *vioch) +{ + if (refcount_dec_and_test(&vioch->users)) { + unsigned long flags; + + spin_lock_irqsave(&vioch->lock, flags); + if (vioch->shutdown_done) { + vioch->cinfo = NULL; + complete(vioch->shutdown_done); + } + spin_unlock_irqrestore(&vioch->lock, flags); + } +} + +static void scmi_vio_channel_cleanup_sync(struct scmi_vio_channel *vioch) +{ + unsigned long flags; + DECLARE_COMPLETION_ONSTACK(vioch_shutdown_done); + + /* + * Prepare to wait for the last release if not already released + * or in progress. + */ + spin_lock_irqsave(&vioch->lock, flags); + if (!vioch->cinfo || vioch->shutdown_done) { + spin_unlock_irqrestore(&vioch->lock, flags); + return; + } + + vioch->shutdown_done = &vioch_shutdown_done; + if (!vioch->is_rx && vioch->deferred_tx_wq) + /* Cannot be kicked anymore after this...*/ + vioch->deferred_tx_wq = NULL; + spin_unlock_irqrestore(&vioch->lock, flags); + + scmi_vio_channel_release(vioch); + + /* Let any possibly concurrent RX path release the channel */ + wait_for_completion(vioch->shutdown_done); +} + +/* Assumes to be called with vio channel acquired already */ +static struct scmi_vio_msg * +scmi_virtio_get_free_msg(struct scmi_vio_channel *vioch) +{ + unsigned long flags; + struct scmi_vio_msg *msg; + + spin_lock_irqsave(&vioch->free_lock, flags); + if (list_empty(&vioch->free_list)) { + spin_unlock_irqrestore(&vioch->free_lock, flags); + return NULL; + } + + msg = list_first_entry(&vioch->free_list, typeof(*msg), list); + list_del_init(&msg->list); + spin_unlock_irqrestore(&vioch->free_lock, flags); + + /* Still no users, no need to acquire poll_lock */ + msg->poll_status = VIO_MSG_NOT_POLLED; + refcount_set(&msg->users, 1); + + return msg; +} + +static inline bool scmi_vio_msg_acquire(struct scmi_vio_msg *msg) +{ + return refcount_inc_not_zero(&msg->users); +} + +/* Assumes to be called with vio channel acquired already */ +static inline bool scmi_vio_msg_release(struct scmi_vio_channel *vioch, + struct scmi_vio_msg *msg) +{ + bool ret; + + ret = refcount_dec_and_test(&msg->users); + if (ret) { + unsigned long flags; + + spin_lock_irqsave(&vioch->free_lock, flags); + list_add_tail(&msg->list, &vioch->free_list); + spin_unlock_irqrestore(&vioch->free_lock, flags); + } + + return ret; +} + +static bool scmi_vio_have_vq_rx(struct virtio_device *vdev) +{ + return virtio_has_feature(vdev, VIRTIO_SCMI_F_P2A_CHANNELS); +} + +static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch, + struct scmi_vio_msg *msg) +{ + struct scatterlist sg_in; + int rc; + unsigned long flags; + struct device *dev = &vioch->vqueue->vdev->dev; + + sg_init_one(&sg_in, msg->input, VIRTIO_SCMI_MAX_PDU_SIZE); + + spin_lock_irqsave(&vioch->lock, flags); + + rc = virtqueue_add_inbuf(vioch->vqueue, &sg_in, 1, msg, GFP_ATOMIC); + if (rc) + dev_err(dev, "failed to add to RX virtqueue (%d)\n", rc); + else + virtqueue_kick(vioch->vqueue); + + spin_unlock_irqrestore(&vioch->lock, flags); + + return rc; +} + +/* + * Assume to be called with channel already acquired or not ready at all; + * vioch->lock MUST NOT have been already acquired. + */ +static void scmi_finalize_message(struct scmi_vio_channel *vioch, + struct scmi_vio_msg *msg) +{ + if (vioch->is_rx) + scmi_vio_feed_vq_rx(vioch, msg); + else + scmi_vio_msg_release(vioch, msg); +} + +static void scmi_vio_complete_cb(struct virtqueue *vqueue) +{ + unsigned long flags; + unsigned int length; + struct scmi_vio_channel *vioch; + struct scmi_vio_msg *msg; + bool cb_enabled = true; + + if (WARN_ON_ONCE(!vqueue->vdev->priv)) + return; + vioch = &((struct scmi_vio_channel *)vqueue->vdev->priv)[vqueue->index]; + + for (;;) { + if (!scmi_vio_channel_acquire(vioch)) + return; + + spin_lock_irqsave(&vioch->lock, flags); + if (cb_enabled) { + virtqueue_disable_cb(vqueue); + cb_enabled = false; + } + + msg = virtqueue_get_buf(vqueue, &length); + if (!msg) { + if (virtqueue_enable_cb(vqueue)) { + spin_unlock_irqrestore(&vioch->lock, flags); + scmi_vio_channel_release(vioch); + return; + } + cb_enabled = true; + } + spin_unlock_irqrestore(&vioch->lock, flags); + + if (msg) { + msg->rx_len = length; + scmi_rx_callback(vioch->cinfo, + msg_read_header(msg->input), msg); + + scmi_finalize_message(vioch, msg); + } + + /* + * Release vio channel between loop iterations to allow + * virtio_chan_free() to eventually fully release it when + * shutting down; in such a case, any outstanding message will + * be ignored since this loop will bail out at the next + * iteration. + */ + scmi_vio_channel_release(vioch); + } +} + +static void scmi_vio_deferred_tx_worker(struct work_struct *work) +{ + unsigned long flags; + struct scmi_vio_channel *vioch; + struct scmi_vio_msg *msg, *tmp; + + vioch = container_of(work, struct scmi_vio_channel, deferred_tx_work); + + if (!scmi_vio_channel_acquire(vioch)) + return; + + /* + * Process pre-fetched messages: these could be non-polled messages or + * late timed-out replies to polled messages dequeued by chance while + * polling for some other messages: this worker is in charge to process + * the valid non-expired messages and anyway finally free all of them. + */ + spin_lock_irqsave(&vioch->pending_lock, flags); + + /* Scan the list of possibly pre-fetched messages during polling. */ + list_for_each_entry_safe(msg, tmp, &vioch->pending_cmds_list, list) { + list_del(&msg->list); + + /* + * Channel is acquired here (cannot vanish) and this message + * is no more processed elsewhere so no poll_lock needed. + */ + if (msg->poll_status == VIO_MSG_NOT_POLLED) + scmi_rx_callback(vioch->cinfo, + msg_read_header(msg->input), msg); + + /* Free the processed message once done */ + scmi_vio_msg_release(vioch, msg); + } + + spin_unlock_irqrestore(&vioch->pending_lock, flags); + + /* Process possibly still pending messages */ + scmi_vio_complete_cb(vioch->vqueue); + + scmi_vio_channel_release(vioch); +} + +static const char *const scmi_vio_vqueue_names[] = { "tx", "rx" }; + +static vq_callback_t *scmi_vio_complete_callbacks[] = { + scmi_vio_complete_cb, + scmi_vio_complete_cb +}; + +static unsigned int virtio_get_max_msg(struct scmi_chan_info *base_cinfo) +{ + struct scmi_vio_channel *vioch = base_cinfo->transport_info; + + return vioch->max_msg; +} + +static int virtio_link_supplier(struct device *dev) +{ + if (!scmi_vdev) { + dev_notice(dev, + "Deferring probe after not finding a bound scmi-virtio device\n"); + return -EPROBE_DEFER; + } + + if (!device_link_add(dev, &scmi_vdev->dev, + DL_FLAG_AUTOREMOVE_CONSUMER)) { + dev_err(dev, "Adding link to supplier virtio device failed\n"); + return -ECANCELED; + } + + return 0; +} + +static bool virtio_chan_available(struct device *dev, int idx) +{ + struct scmi_vio_channel *channels, *vioch = NULL; + + if (WARN_ON_ONCE(!scmi_vdev)) + return false; + + channels = (struct scmi_vio_channel *)scmi_vdev->priv; + + switch (idx) { + case VIRTIO_SCMI_VQ_TX: + vioch = &channels[VIRTIO_SCMI_VQ_TX]; + break; + case VIRTIO_SCMI_VQ_RX: + if (scmi_vio_have_vq_rx(scmi_vdev)) + vioch = &channels[VIRTIO_SCMI_VQ_RX]; + break; + default: + return false; + } + + return vioch && !vioch->cinfo; +} + +static void scmi_destroy_tx_workqueue(void *deferred_tx_wq) +{ + destroy_workqueue(deferred_tx_wq); +} + +static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, + bool tx) +{ + struct scmi_vio_channel *vioch; + int index = tx ? VIRTIO_SCMI_VQ_TX : VIRTIO_SCMI_VQ_RX; + int i; + + if (!scmi_vdev) + return -EPROBE_DEFER; + + vioch = &((struct scmi_vio_channel *)scmi_vdev->priv)[index]; + + /* Setup a deferred worker for polling. */ + if (tx && !vioch->deferred_tx_wq) { + int ret; + + vioch->deferred_tx_wq = + alloc_workqueue(dev_name(&scmi_vdev->dev), + WQ_UNBOUND | WQ_FREEZABLE | WQ_SYSFS, + 0); + if (!vioch->deferred_tx_wq) + return -ENOMEM; + + ret = devm_add_action_or_reset(dev, scmi_destroy_tx_workqueue, + vioch->deferred_tx_wq); + if (ret) + return ret; + + INIT_WORK(&vioch->deferred_tx_work, + scmi_vio_deferred_tx_worker); + } + + for (i = 0; i < vioch->max_msg; i++) { + struct scmi_vio_msg *msg; + + msg = devm_kzalloc(dev, sizeof(*msg), GFP_KERNEL); + if (!msg) + return -ENOMEM; + + if (tx) { + msg->request = devm_kzalloc(dev, + VIRTIO_SCMI_MAX_PDU_SIZE, + GFP_KERNEL); + if (!msg->request) + return -ENOMEM; + spin_lock_init(&msg->poll_lock); + refcount_set(&msg->users, 1); + } + + msg->input = devm_kzalloc(dev, VIRTIO_SCMI_MAX_PDU_SIZE, + GFP_KERNEL); + if (!msg->input) + return -ENOMEM; + + scmi_finalize_message(vioch, msg); + } + + scmi_vio_channel_ready(vioch, cinfo); + + return 0; +} + +static int virtio_chan_free(int id, void *p, void *data) +{ + struct scmi_chan_info *cinfo = p; + struct scmi_vio_channel *vioch = cinfo->transport_info; + + /* + * Break device to inhibit further traffic flowing while shutting down + * the channels: doing it later holding vioch->lock creates unsafe + * locking dependency chains as reported by LOCKDEP. + */ + virtio_break_device(vioch->vqueue->vdev); + scmi_vio_channel_cleanup_sync(vioch); + + scmi_free_channel(cinfo, data, id); + + return 0; +} + +static int virtio_send_message(struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer) +{ + struct scmi_vio_channel *vioch = cinfo->transport_info; + struct scatterlist sg_out; + struct scatterlist sg_in; + struct scatterlist *sgs[DESCRIPTORS_PER_TX_MSG] = { &sg_out, &sg_in }; + unsigned long flags; + int rc; + struct scmi_vio_msg *msg; + + if (!scmi_vio_channel_acquire(vioch)) + return -EINVAL; + + msg = scmi_virtio_get_free_msg(vioch); + if (!msg) { + scmi_vio_channel_release(vioch); + return -EBUSY; + } + + msg_tx_prepare(msg->request, xfer); + + sg_init_one(&sg_out, msg->request, msg_command_size(xfer)); + sg_init_one(&sg_in, msg->input, msg_response_size(xfer)); + + spin_lock_irqsave(&vioch->lock, flags); + + /* + * If polling was requested for this transaction: + * - retrieve last used index (will be used as polling reference) + * - bind the polled message to the xfer via .priv + * - grab an additional msg refcount for the poll-path + */ + if (xfer->hdr.poll_completion) { + msg->poll_idx = virtqueue_enable_cb_prepare(vioch->vqueue); + /* Still no users, no need to acquire poll_lock */ + msg->poll_status = VIO_MSG_POLLING; + scmi_vio_msg_acquire(msg); + /* Ensure initialized msg is visibly bound to xfer */ + smp_store_mb(xfer->priv, msg); + } + + rc = virtqueue_add_sgs(vioch->vqueue, sgs, 1, 1, msg, GFP_ATOMIC); + if (rc) + dev_err(vioch->cinfo->dev, + "failed to add to TX virtqueue (%d)\n", rc); + else + virtqueue_kick(vioch->vqueue); + + spin_unlock_irqrestore(&vioch->lock, flags); + + if (rc) { + /* Ensure order between xfer->priv clear and vq feeding */ + smp_store_mb(xfer->priv, NULL); + if (xfer->hdr.poll_completion) + scmi_vio_msg_release(vioch, msg); + scmi_vio_msg_release(vioch, msg); + } + + scmi_vio_channel_release(vioch); + + return rc; +} + +static void virtio_fetch_response(struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer) +{ + struct scmi_vio_msg *msg = xfer->priv; + + if (msg) + msg_fetch_response(msg->input, msg->rx_len, xfer); +} + +static void virtio_fetch_notification(struct scmi_chan_info *cinfo, + size_t max_len, struct scmi_xfer *xfer) +{ + struct scmi_vio_msg *msg = xfer->priv; + + if (msg) + msg_fetch_notification(msg->input, msg->rx_len, max_len, xfer); +} + +/** + * virtio_mark_txdone - Mark transmission done + * + * Free only completed polling transfer messages. + * + * Note that in the SCMI VirtIO transport we never explicitly release still + * outstanding but timed-out messages by forcibly re-adding them to the + * free-list inside the TX code path; we instead let IRQ/RX callbacks, or the + * TX deferred worker, eventually clean up such messages once, finally, a late + * reply is received and discarded (if ever). + * + * This approach was deemed preferable since those pending timed-out buffers are + * still effectively owned by the SCMI platform VirtIO device even after timeout + * expiration: forcibly freeing and reusing them before they had been returned + * explicitly by the SCMI platform could lead to subtle bugs due to message + * corruption. + * An SCMI platform VirtIO device which never returns message buffers is + * anyway broken and it will quickly lead to exhaustion of available messages. + * + * For this same reason, here, we take care to free only the polled messages + * that had been somehow replied (only if not by chance already processed on the + * IRQ path - the initial scmi_vio_msg_release() takes care of this) and also + * any timed-out polled message if that indeed appears to have been at least + * dequeued from the virtqueues (VIO_MSG_POLL_DONE): this is needed since such + * messages won't be freed elsewhere. Any other polled message is marked as + * VIO_MSG_POLL_TIMEOUT. + * + * Possible late replies to timed-out polled messages will be eventually freed + * by RX callbacks if delivered on the IRQ path or by the deferred TX worker if + * dequeued on some other polling path. + * + * @cinfo: SCMI channel info + * @ret: Transmission return code + * @xfer: Transfer descriptor + */ +static void virtio_mark_txdone(struct scmi_chan_info *cinfo, int ret, + struct scmi_xfer *xfer) +{ + unsigned long flags; + struct scmi_vio_channel *vioch = cinfo->transport_info; + struct scmi_vio_msg *msg = xfer->priv; + + if (!msg || !scmi_vio_channel_acquire(vioch)) + return; + + /* Ensure msg is unbound from xfer anyway at this point */ + smp_store_mb(xfer->priv, NULL); + + /* Must be a polled xfer and not already freed on the IRQ path */ + if (!xfer->hdr.poll_completion || scmi_vio_msg_release(vioch, msg)) { + scmi_vio_channel_release(vioch); + return; + } + + spin_lock_irqsave(&msg->poll_lock, flags); + /* Do not free timedout polled messages only if still inflight */ + if (ret != -ETIMEDOUT || msg->poll_status == VIO_MSG_POLL_DONE) + scmi_vio_msg_release(vioch, msg); + else if (msg->poll_status == VIO_MSG_POLLING) + msg->poll_status = VIO_MSG_POLL_TIMEOUT; + spin_unlock_irqrestore(&msg->poll_lock, flags); + + scmi_vio_channel_release(vioch); +} + +/** + * virtio_poll_done - Provide polling support for VirtIO transport + * + * @cinfo: SCMI channel info + * @xfer: Reference to the transfer being poll for. + * + * VirtIO core provides a polling mechanism based only on last used indexes: + * this means that it is possible to poll the virtqueues waiting for something + * new to arrive from the host side, but the only way to check if the freshly + * arrived buffer was indeed what we were waiting for is to compare the newly + * arrived message descriptor with the one we are polling on. + * + * As a consequence it can happen to dequeue something different from the buffer + * we were poll-waiting for: if that is the case such early fetched buffers are + * then added to a the @pending_cmds_list list for later processing by a + * dedicated deferred worker. + * + * So, basically, once something new is spotted we proceed to de-queue all the + * freshly received used buffers until we found the one we were polling on, or, + * we have 'seemingly' emptied the virtqueue; if some buffers are still pending + * in the vqueue at the end of the polling loop (possible due to inherent races + * in virtqueues handling mechanisms), we similarly kick the deferred worker + * and let it process those, to avoid indefinitely looping in the .poll_done + * busy-waiting helper. + * + * Finally, we delegate to the deferred worker also the final free of any timed + * out reply to a polled message that we should dequeue. + * + * Note that, since we do NOT have per-message suppress notification mechanism, + * the message we are polling for could be alternatively delivered via usual + * IRQs callbacks on another core which happened to have IRQs enabled while we + * are actively polling for it here: in such a case it will be handled as such + * by scmi_rx_callback() and the polling loop in the SCMI Core TX path will be + * transparently terminated anyway. + * + * Return: True once polling has successfully completed. + */ +static bool virtio_poll_done(struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer) +{ + bool pending, found = false; + unsigned int length, any_prefetched = 0; + unsigned long flags; + struct scmi_vio_msg *next_msg, *msg = xfer->priv; + struct scmi_vio_channel *vioch = cinfo->transport_info; + + if (!msg) + return true; + + /* + * Processed already by other polling loop on another CPU ? + * + * Note that this message is acquired on the poll path so cannot vanish + * while inside this loop iteration even if concurrently processed on + * the IRQ path. + * + * Avoid to acquire poll_lock since polled_status can be changed + * in a relevant manner only later in this same thread of execution: + * any other possible changes made concurrently by other polling loops + * or by a reply delivered on the IRQ path have no meaningful impact on + * this loop iteration: in other words it is harmless to allow this + * possible race but let has avoid spinlocking with irqs off in this + * initial part of the polling loop. + */ + if (msg->poll_status == VIO_MSG_POLL_DONE) + return true; + + if (!scmi_vio_channel_acquire(vioch)) + return true; + + /* Has cmdq index moved at all ? */ + pending = virtqueue_poll(vioch->vqueue, msg->poll_idx); + if (!pending) { + scmi_vio_channel_release(vioch); + return false; + } + + spin_lock_irqsave(&vioch->lock, flags); + virtqueue_disable_cb(vioch->vqueue); + + /* + * Process all new messages till the polled-for message is found OR + * the vqueue is empty. + */ + while ((next_msg = virtqueue_get_buf(vioch->vqueue, &length))) { + bool next_msg_done = false; + + /* + * Mark any dequeued buffer message as VIO_MSG_POLL_DONE so + * that can be properly freed even on timeout in mark_txdone. + */ + spin_lock(&next_msg->poll_lock); + if (next_msg->poll_status == VIO_MSG_POLLING) { + next_msg->poll_status = VIO_MSG_POLL_DONE; + next_msg_done = true; + } + spin_unlock(&next_msg->poll_lock); + + next_msg->rx_len = length; + /* Is the message we were polling for ? */ + if (next_msg == msg) { + found = true; + break; + } else if (next_msg_done) { + /* Skip the rest if this was another polled msg */ + continue; + } + + /* + * Enqueue for later processing any non-polled message and any + * timed-out polled one that we happen to have dequeued. + */ + spin_lock(&next_msg->poll_lock); + if (next_msg->poll_status == VIO_MSG_NOT_POLLED || + next_msg->poll_status == VIO_MSG_POLL_TIMEOUT) { + spin_unlock(&next_msg->poll_lock); + + any_prefetched++; + spin_lock(&vioch->pending_lock); + list_add_tail(&next_msg->list, + &vioch->pending_cmds_list); + spin_unlock(&vioch->pending_lock); + } else { + spin_unlock(&next_msg->poll_lock); + } + } + + /* + * When the polling loop has successfully terminated if something + * else was queued in the meantime, it will be served by a deferred + * worker OR by the normal IRQ/callback OR by other poll loops. + * + * If we are still looking for the polled reply, the polling index has + * to be updated to the current vqueue last used index. + */ + if (found) { + pending = !virtqueue_enable_cb(vioch->vqueue); + } else { + msg->poll_idx = virtqueue_enable_cb_prepare(vioch->vqueue); + pending = virtqueue_poll(vioch->vqueue, msg->poll_idx); + } + + if (vioch->deferred_tx_wq && (any_prefetched || pending)) + queue_work(vioch->deferred_tx_wq, &vioch->deferred_tx_work); + + spin_unlock_irqrestore(&vioch->lock, flags); + + scmi_vio_channel_release(vioch); + + return found; +} + +static const struct scmi_transport_ops scmi_virtio_ops = { + .link_supplier = virtio_link_supplier, + .chan_available = virtio_chan_available, + .chan_setup = virtio_chan_setup, + .chan_free = virtio_chan_free, + .get_max_msg = virtio_get_max_msg, + .send_message = virtio_send_message, + .fetch_response = virtio_fetch_response, + .fetch_notification = virtio_fetch_notification, + .mark_txdone = virtio_mark_txdone, + .poll_done = virtio_poll_done, +}; + +static int scmi_vio_probe(struct virtio_device *vdev) +{ + struct device *dev = &vdev->dev; + struct scmi_vio_channel *channels; + bool have_vq_rx; + int vq_cnt; + int i; + int ret; + struct virtqueue *vqs[VIRTIO_SCMI_VQ_MAX_CNT]; + + /* Only one SCMI VirtiO device allowed */ + if (scmi_vdev) { + dev_err(dev, + "One SCMI Virtio device was already initialized: only one allowed.\n"); + return -EBUSY; + } + + have_vq_rx = scmi_vio_have_vq_rx(vdev); + vq_cnt = have_vq_rx ? VIRTIO_SCMI_VQ_MAX_CNT : 1; + + channels = devm_kcalloc(dev, vq_cnt, sizeof(*channels), GFP_KERNEL); + if (!channels) + return -ENOMEM; + + if (have_vq_rx) + channels[VIRTIO_SCMI_VQ_RX].is_rx = true; + + ret = virtio_find_vqs(vdev, vq_cnt, vqs, scmi_vio_complete_callbacks, + scmi_vio_vqueue_names, NULL); + if (ret) { + dev_err(dev, "Failed to get %d virtqueue(s)\n", vq_cnt); + return ret; + } + + for (i = 0; i < vq_cnt; i++) { + unsigned int sz; + + spin_lock_init(&channels[i].lock); + spin_lock_init(&channels[i].free_lock); + INIT_LIST_HEAD(&channels[i].free_list); + spin_lock_init(&channels[i].pending_lock); + INIT_LIST_HEAD(&channels[i].pending_cmds_list); + channels[i].vqueue = vqs[i]; + + sz = virtqueue_get_vring_size(channels[i].vqueue); + /* Tx messages need multiple descriptors. */ + if (!channels[i].is_rx) + sz /= DESCRIPTORS_PER_TX_MSG; + + if (sz > MSG_TOKEN_MAX) { + dev_info(dev, + "%s virtqueue could hold %d messages. Only %ld allowed to be pending.\n", + channels[i].is_rx ? "rx" : "tx", + sz, MSG_TOKEN_MAX); + sz = MSG_TOKEN_MAX; + } + channels[i].max_msg = sz; + } + + vdev->priv = channels; + /* Ensure initialized scmi_vdev is visible */ + smp_store_mb(scmi_vdev, vdev); + + return 0; +} + +static void scmi_vio_remove(struct virtio_device *vdev) +{ + /* + * Once we get here, virtio_chan_free() will have already been called by + * the SCMI core for any existing channel and, as a consequence, all the + * virtio channels will have been already marked NOT ready, causing any + * outstanding message on any vqueue to be ignored by complete_cb: now + * we can just stop processing buffers and destroy the vqueues. + */ + virtio_reset_device(vdev); + vdev->config->del_vqs(vdev); + /* Ensure scmi_vdev is visible as NULL */ + smp_store_mb(scmi_vdev, NULL); +} + +static int scmi_vio_validate(struct virtio_device *vdev) +{ +#ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO_VERSION1_COMPLIANCE + if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) { + dev_err(&vdev->dev, + "device does not comply with spec version 1.x\n"); + return -EINVAL; + } +#endif + return 0; +} + +static unsigned int features[] = { + VIRTIO_SCMI_F_P2A_CHANNELS, +}; + +static const struct virtio_device_id id_table[] = { + { VIRTIO_ID_SCMI, VIRTIO_DEV_ANY_ID }, + { 0 } +}; + +static struct virtio_driver virtio_scmi_driver = { + .driver.name = "scmi-virtio", + .driver.owner = THIS_MODULE, + .feature_table = features, + .feature_table_size = ARRAY_SIZE(features), + .id_table = id_table, + .probe = scmi_vio_probe, + .remove = scmi_vio_remove, + .validate = scmi_vio_validate, +}; + +static int __init virtio_scmi_init(void) +{ + return register_virtio_driver(&virtio_scmi_driver); +} + +static void virtio_scmi_exit(void) +{ + unregister_virtio_driver(&virtio_scmi_driver); +} + +const struct scmi_desc scmi_virtio_desc = { + .transport_init = virtio_scmi_init, + .transport_exit = virtio_scmi_exit, + .ops = &scmi_virtio_ops, + /* for non-realtime virtio devices */ + .max_rx_timeout_ms = VIRTIO_MAX_RX_TIMEOUT_MS, + .max_msg = 0, /* overridden by virtio_get_max_msg() */ + .max_msg_size = VIRTIO_SCMI_MAX_MSG_SIZE, + .atomic_enabled = IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO_ATOMIC_ENABLE), +}; diff --git a/drivers/firmware/arm_scmi/voltage.c b/drivers/firmware/arm_scmi/voltage.c new file mode 100644 index 000000000..eaa8d9449 --- /dev/null +++ b/drivers/firmware/arm_scmi/voltage.c @@ -0,0 +1,445 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System Control and Management Interface (SCMI) Voltage Protocol + * + * Copyright (C) 2020-2022 ARM Ltd. + */ + +#include <linux/module.h> +#include <linux/scmi_protocol.h> + +#include "protocols.h" + +#define VOLTAGE_DOMS_NUM_MASK GENMASK(15, 0) +#define REMAINING_LEVELS_MASK GENMASK(31, 16) +#define RETURNED_LEVELS_MASK GENMASK(11, 0) + +enum scmi_voltage_protocol_cmd { + VOLTAGE_DOMAIN_ATTRIBUTES = 0x3, + VOLTAGE_DESCRIBE_LEVELS = 0x4, + VOLTAGE_CONFIG_SET = 0x5, + VOLTAGE_CONFIG_GET = 0x6, + VOLTAGE_LEVEL_SET = 0x7, + VOLTAGE_LEVEL_GET = 0x8, + VOLTAGE_DOMAIN_NAME_GET = 0x09, +}; + +#define NUM_VOLTAGE_DOMAINS(x) ((u16)(FIELD_GET(VOLTAGE_DOMS_NUM_MASK, (x)))) + +struct scmi_msg_resp_domain_attributes { + __le32 attr; +#define SUPPORTS_ASYNC_LEVEL_SET(x) ((x) & BIT(31)) +#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(30)) + u8 name[SCMI_SHORT_NAME_MAX_SIZE]; +}; + +struct scmi_msg_cmd_describe_levels { + __le32 domain_id; + __le32 level_index; +}; + +struct scmi_msg_resp_describe_levels { + __le32 flags; +#define NUM_REMAINING_LEVELS(f) ((u16)(FIELD_GET(REMAINING_LEVELS_MASK, (f)))) +#define NUM_RETURNED_LEVELS(f) ((u16)(FIELD_GET(RETURNED_LEVELS_MASK, (f)))) +#define SUPPORTS_SEGMENTED_LEVELS(f) ((f) & BIT(12)) + __le32 voltage[]; +}; + +struct scmi_msg_cmd_config_set { + __le32 domain_id; + __le32 config; +}; + +struct scmi_msg_cmd_level_set { + __le32 domain_id; + __le32 flags; + __le32 voltage_level; +}; + +struct scmi_resp_voltage_level_set_complete { + __le32 domain_id; + __le32 voltage_level; +}; + +struct voltage_info { + unsigned int version; + unsigned int num_domains; + struct scmi_voltage_info *domains; +}; + +static int scmi_protocol_attributes_get(const struct scmi_protocol_handle *ph, + struct voltage_info *vinfo) +{ + int ret; + struct scmi_xfer *t; + + ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0, + sizeof(__le32), &t); + if (ret) + return ret; + + ret = ph->xops->do_xfer(ph, t); + if (!ret) + vinfo->num_domains = + NUM_VOLTAGE_DOMAINS(get_unaligned_le32(t->rx.buf)); + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int scmi_init_voltage_levels(struct device *dev, + struct scmi_voltage_info *v, + u32 num_returned, u32 num_remaining, + bool segmented) +{ + u32 num_levels; + + num_levels = num_returned + num_remaining; + /* + * segmented levels entries are represented by a single triplet + * returned all in one go. + */ + if (!num_levels || + (segmented && (num_remaining || num_returned != 3))) { + dev_err(dev, + "Invalid level descriptor(%d/%d/%d) for voltage dom %d\n", + num_levels, num_returned, num_remaining, v->id); + return -EINVAL; + } + + v->levels_uv = devm_kcalloc(dev, num_levels, sizeof(u32), GFP_KERNEL); + if (!v->levels_uv) + return -ENOMEM; + + v->num_levels = num_levels; + v->segmented = segmented; + + return 0; +} + +struct scmi_volt_ipriv { + struct device *dev; + struct scmi_voltage_info *v; +}; + +static void iter_volt_levels_prepare_message(void *message, + unsigned int desc_index, + const void *priv) +{ + struct scmi_msg_cmd_describe_levels *msg = message; + const struct scmi_volt_ipriv *p = priv; + + msg->domain_id = cpu_to_le32(p->v->id); + msg->level_index = cpu_to_le32(desc_index); +} + +static int iter_volt_levels_update_state(struct scmi_iterator_state *st, + const void *response, void *priv) +{ + int ret = 0; + u32 flags; + const struct scmi_msg_resp_describe_levels *r = response; + struct scmi_volt_ipriv *p = priv; + + flags = le32_to_cpu(r->flags); + st->num_returned = NUM_RETURNED_LEVELS(flags); + st->num_remaining = NUM_REMAINING_LEVELS(flags); + + /* Allocate space for num_levels if not already done */ + if (!p->v->num_levels) { + ret = scmi_init_voltage_levels(p->dev, p->v, st->num_returned, + st->num_remaining, + SUPPORTS_SEGMENTED_LEVELS(flags)); + if (!ret) + st->max_resources = p->v->num_levels; + } + + return ret; +} + +static int +iter_volt_levels_process_response(const struct scmi_protocol_handle *ph, + const void *response, + struct scmi_iterator_state *st, void *priv) +{ + s32 val; + const struct scmi_msg_resp_describe_levels *r = response; + struct scmi_volt_ipriv *p = priv; + + val = (s32)le32_to_cpu(r->voltage[st->loop_idx]); + p->v->levels_uv[st->desc_index + st->loop_idx] = val; + if (val < 0) + p->v->negative_volts_allowed = true; + + return 0; +} + +static int scmi_voltage_levels_get(const struct scmi_protocol_handle *ph, + struct scmi_voltage_info *v) +{ + int ret; + void *iter; + struct scmi_iterator_ops ops = { + .prepare_message = iter_volt_levels_prepare_message, + .update_state = iter_volt_levels_update_state, + .process_response = iter_volt_levels_process_response, + }; + struct scmi_volt_ipriv vpriv = { + .dev = ph->dev, + .v = v, + }; + + iter = ph->hops->iter_response_init(ph, &ops, v->num_levels, + VOLTAGE_DESCRIBE_LEVELS, + sizeof(struct scmi_msg_cmd_describe_levels), + &vpriv); + if (IS_ERR(iter)) + return PTR_ERR(iter); + + ret = ph->hops->iter_response_run(iter); + if (ret) { + v->num_levels = 0; + devm_kfree(ph->dev, v->levels_uv); + } + + return ret; +} + +static int scmi_voltage_descriptors_get(const struct scmi_protocol_handle *ph, + struct voltage_info *vinfo) +{ + int ret, dom; + struct scmi_xfer *td; + struct scmi_msg_resp_domain_attributes *resp_dom; + + ret = ph->xops->xfer_get_init(ph, VOLTAGE_DOMAIN_ATTRIBUTES, + sizeof(__le32), sizeof(*resp_dom), &td); + if (ret) + return ret; + resp_dom = td->rx.buf; + + for (dom = 0; dom < vinfo->num_domains; dom++) { + u32 attributes; + struct scmi_voltage_info *v; + + /* Retrieve domain attributes at first ... */ + put_unaligned_le32(dom, td->tx.buf); + /* Skip domain on comms error */ + if (ph->xops->do_xfer(ph, td)) + continue; + + v = vinfo->domains + dom; + v->id = dom; + attributes = le32_to_cpu(resp_dom->attr); + strscpy(v->name, resp_dom->name, SCMI_SHORT_NAME_MAX_SIZE); + + /* + * If supported overwrite short name with the extended one; + * on error just carry on and use already provided short name. + */ + if (PROTOCOL_REV_MAJOR(vinfo->version) >= 0x2) { + if (SUPPORTS_EXTENDED_NAMES(attributes)) + ph->hops->extended_name_get(ph, + VOLTAGE_DOMAIN_NAME_GET, + v->id, v->name, + SCMI_MAX_STR_SIZE); + if (SUPPORTS_ASYNC_LEVEL_SET(attributes)) + v->async_level_set = true; + } + + /* Skip invalid voltage descriptors */ + scmi_voltage_levels_get(ph, v); + } + + ph->xops->xfer_put(ph, td); + + return ret; +} + +static int __scmi_voltage_get_u32(const struct scmi_protocol_handle *ph, + u8 cmd_id, u32 domain_id, u32 *value) +{ + int ret; + struct scmi_xfer *t; + struct voltage_info *vinfo = ph->get_priv(ph); + + if (domain_id >= vinfo->num_domains) + return -EINVAL; + + ret = ph->xops->xfer_get_init(ph, cmd_id, sizeof(__le32), 0, &t); + if (ret) + return ret; + + put_unaligned_le32(domain_id, t->tx.buf); + ret = ph->xops->do_xfer(ph, t); + if (!ret) + *value = get_unaligned_le32(t->rx.buf); + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int scmi_voltage_config_set(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 config) +{ + int ret; + struct scmi_xfer *t; + struct voltage_info *vinfo = ph->get_priv(ph); + struct scmi_msg_cmd_config_set *cmd; + + if (domain_id >= vinfo->num_domains) + return -EINVAL; + + ret = ph->xops->xfer_get_init(ph, VOLTAGE_CONFIG_SET, + sizeof(*cmd), 0, &t); + if (ret) + return ret; + + cmd = t->tx.buf; + cmd->domain_id = cpu_to_le32(domain_id); + cmd->config = cpu_to_le32(config & GENMASK(3, 0)); + + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int scmi_voltage_config_get(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 *config) +{ + return __scmi_voltage_get_u32(ph, VOLTAGE_CONFIG_GET, + domain_id, config); +} + +static int scmi_voltage_level_set(const struct scmi_protocol_handle *ph, + u32 domain_id, + enum scmi_voltage_level_mode mode, + s32 volt_uV) +{ + int ret; + struct scmi_xfer *t; + struct voltage_info *vinfo = ph->get_priv(ph); + struct scmi_msg_cmd_level_set *cmd; + struct scmi_voltage_info *v; + + if (domain_id >= vinfo->num_domains) + return -EINVAL; + + ret = ph->xops->xfer_get_init(ph, VOLTAGE_LEVEL_SET, + sizeof(*cmd), 0, &t); + if (ret) + return ret; + + v = vinfo->domains + domain_id; + + cmd = t->tx.buf; + cmd->domain_id = cpu_to_le32(domain_id); + cmd->voltage_level = cpu_to_le32(volt_uV); + + if (!v->async_level_set || mode != SCMI_VOLTAGE_LEVEL_SET_AUTO) { + cmd->flags = cpu_to_le32(0x0); + ret = ph->xops->do_xfer(ph, t); + } else { + cmd->flags = cpu_to_le32(0x1); + ret = ph->xops->do_xfer_with_response(ph, t); + if (!ret) { + struct scmi_resp_voltage_level_set_complete *resp; + + resp = t->rx.buf; + if (le32_to_cpu(resp->domain_id) == domain_id) + dev_dbg(ph->dev, + "Voltage domain %d set async to %d\n", + v->id, + le32_to_cpu(resp->voltage_level)); + else + ret = -EPROTO; + } + } + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int scmi_voltage_level_get(const struct scmi_protocol_handle *ph, + u32 domain_id, s32 *volt_uV) +{ + return __scmi_voltage_get_u32(ph, VOLTAGE_LEVEL_GET, + domain_id, (u32 *)volt_uV); +} + +static const struct scmi_voltage_info * __must_check +scmi_voltage_info_get(const struct scmi_protocol_handle *ph, u32 domain_id) +{ + struct voltage_info *vinfo = ph->get_priv(ph); + + if (domain_id >= vinfo->num_domains || + !vinfo->domains[domain_id].num_levels) + return NULL; + + return vinfo->domains + domain_id; +} + +static int scmi_voltage_domains_num_get(const struct scmi_protocol_handle *ph) +{ + struct voltage_info *vinfo = ph->get_priv(ph); + + return vinfo->num_domains; +} + +static struct scmi_voltage_proto_ops voltage_proto_ops = { + .num_domains_get = scmi_voltage_domains_num_get, + .info_get = scmi_voltage_info_get, + .config_set = scmi_voltage_config_set, + .config_get = scmi_voltage_config_get, + .level_set = scmi_voltage_level_set, + .level_get = scmi_voltage_level_get, +}; + +static int scmi_voltage_protocol_init(const struct scmi_protocol_handle *ph) +{ + int ret; + u32 version; + struct voltage_info *vinfo; + + ret = ph->xops->version_get(ph, &version); + if (ret) + return ret; + + dev_dbg(ph->dev, "Voltage Version %d.%d\n", + PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); + + vinfo = devm_kzalloc(ph->dev, sizeof(*vinfo), GFP_KERNEL); + if (!vinfo) + return -ENOMEM; + vinfo->version = version; + + ret = scmi_protocol_attributes_get(ph, vinfo); + if (ret) + return ret; + + if (vinfo->num_domains) { + vinfo->domains = devm_kcalloc(ph->dev, vinfo->num_domains, + sizeof(*vinfo->domains), + GFP_KERNEL); + if (!vinfo->domains) + return -ENOMEM; + ret = scmi_voltage_descriptors_get(ph, vinfo); + if (ret) + return ret; + } else { + dev_warn(ph->dev, "No Voltage domains found.\n"); + } + + return ph->set_priv(ph, vinfo); +} + +static const struct scmi_protocol scmi_voltage = { + .id = SCMI_PROTOCOL_VOLTAGE, + .owner = THIS_MODULE, + .instance_init = &scmi_voltage_protocol_init, + .ops = &voltage_proto_ops, +}; + +DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(voltage, scmi_voltage) |