diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-11 08:27:49 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-11 08:27:49 +0000 |
commit | ace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch) | |
tree | b2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/platform/surface | |
parent | Initial commit. (diff) | |
download | linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip |
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/platform/surface')
29 files changed, 16216 insertions, 0 deletions
diff --git a/drivers/platform/surface/Kconfig b/drivers/platform/surface/Kconfig new file mode 100644 index 0000000000..b629e82af9 --- /dev/null +++ b/drivers/platform/surface/Kconfig @@ -0,0 +1,234 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Microsoft Surface Platform-Specific Drivers +# + +menuconfig SURFACE_PLATFORMS + bool "Microsoft Surface Platform-Specific Device Drivers" + depends on ARM64 || X86 || COMPILE_TEST + default y + help + Say Y here to get to see options for platform-specific device drivers + for Microsoft Surface devices. This option alone does not add any + kernel code. + + If you say N, all options in this submenu will be skipped and disabled. + +if SURFACE_PLATFORMS + +config SURFACE3_WMI + tristate "Surface 3 WMI Driver" + depends on ACPI_WMI + depends on DMI + depends on INPUT + depends on SPI + help + Say Y here if you have a Surface 3. + + To compile this driver as a module, choose M here: the module will + be called surface3-wmi. + +config SURFACE_3_POWER_OPREGION + tristate "Surface 3 battery platform operation region support" + depends on ACPI + depends on I2C + help + This driver provides support for ACPI operation + region of the Surface 3 battery platform driver. + +config SURFACE_ACPI_NOTIFY + tristate "Surface ACPI Notify Driver" + depends on SURFACE_AGGREGATOR + help + Surface ACPI Notify (SAN) driver for Microsoft Surface devices. + + This driver provides support for the ACPI interface (called SAN) of + the Surface System Aggregator Module (SSAM) EC. This interface is used + on 5th- and 6th-generation Microsoft Surface devices (including + Surface Pro 5 and 6, Surface Book 2, Surface Laptops 1 and 2, and in + reduced functionality on the Surface Laptop 3) to execute SSAM + requests directly from ACPI code, as well as receive SSAM events and + turn them into ACPI notifications. It essentially acts as a + translation layer between the SSAM controller and ACPI. + + Specifically, this driver may be needed for battery status reporting, + thermal sensor access, and real-time clock information, depending on + the Surface device in question. + +config SURFACE_AGGREGATOR_CDEV + tristate "Surface System Aggregator Module User-Space Interface" + depends on SURFACE_AGGREGATOR + help + Provides a misc-device interface to the Surface System Aggregator + Module (SSAM) controller. + + This option provides a module (called surface_aggregator_cdev), that, + when loaded, will add a client device (and its respective driver) to + the SSAM controller. Said client device manages a misc-device + interface (/dev/surface/aggregator), which can be used by user-space + tools to directly communicate with the SSAM EC by sending requests and + receiving the corresponding responses. + + The provided interface is intended for debugging and development only, + and should not be used otherwise. + +config SURFACE_AGGREGATOR_HUB + tristate "Surface System Aggregator Module Subsystem Device Hubs" + depends on SURFACE_AGGREGATOR + depends on SURFACE_AGGREGATOR_BUS + help + Device-hub drivers for Surface System Aggregator Module (SSAM) subsystem + devices. + + Provides subsystem hub drivers which manage client devices on various + SSAM subsystems. In some subsystems, notably the BAS subsystem managing + devices contained in the base of the Surface Book 3 and the KIP subsystem + managing type-cover devices in the Surface Pro 8 and Surface Pro X, + devices can be (hot-)removed. Hub devices and drivers are required to + manage these subdevices. + + Devices managed via these hubs are: + - Battery/AC devices (Surface Book 3). + - HID input devices (7th-generation and later models with detachable + input devices). + + Select M (recommended) or Y here if you want support for the above + mentioned devices on the corresponding Surface models. Without this + module, the respective devices mentioned above will not be instantiated + and thus any functionality provided by them will be missing, even when + drivers for these devices are present. This module only provides the + respective subsystem hubs. Both drivers and device specification (e.g. + via the Surface Aggregator Registry) for these devices still need to be + selected via other options. + +config SURFACE_AGGREGATOR_REGISTRY + tristate "Surface System Aggregator Module Device Registry" + depends on SURFACE_AGGREGATOR + depends on SURFACE_AGGREGATOR_BUS + help + Device-registry for Surface System Aggregator Module (SSAM) devices. + + Provides a module and driver which act as a device-registry for SSAM + client devices that cannot be detected automatically, e.g. via ACPI. + Such devices are instead provided and managed via this registry. + + Devices provided via this registry are: + - Platform profile (performance-/cooling-mode) device (5th- and later + generations). + - Battery/AC devices (7th-generation). + - HID input devices (7th-generation). + + Select M (recommended) or Y here if you want support for the above + mentioned devices on the corresponding Surface models. Without this + module, the respective devices will not be instantiated and thus any + functionality provided by them will be missing, even when drivers for + these devices are present. In other words, this module only provides + the respective client devices. Drivers for these devices still need to + be selected via the other options. + +config SURFACE_AGGREGATOR_TABLET_SWITCH + tristate "Surface Aggregator Generic Tablet-Mode Switch Driver" + depends on SURFACE_AGGREGATOR + depends on SURFACE_AGGREGATOR_BUS + depends on INPUT + help + Provides a tablet-mode switch input device on Microsoft Surface models + using the KIP subsystem for detachable keyboards (e.g. keyboard covers) + or the POS subsystem for device/screen posture changes. + + The KIP subsystem is used on newer Surface generations to handle + detachable input peripherals, specifically the keyboard cover (containing + keyboard and touchpad) on the Surface Pro 8 and Surface Pro X. The POS + subsystem is used for device posture change notifications on the Surface + Laptop Studio. This module provides a driver to let user-space know when + the device should be considered in tablet-mode due to the keyboard cover + being detached or folded back (essentially signaling when the keyboard is + not available for input). It does so by creating a tablet-mode switch + input device, sending the standard SW_TABLET_MODE event on mode change. + + Select M or Y here, if you want to provide tablet-mode switch input + events on the Surface Pro 8, Surface Pro X, and Surface Laptop Studio. + +config SURFACE_DTX + tristate "Surface DTX (Detachment System) Driver" + depends on SURFACE_AGGREGATOR + depends on INPUT + help + Driver for the Surface Book clipboard detachment system (DTX). + + On the Surface Book series devices, the display part containing the + CPU (called the clipboard) can be detached from the base (containing a + battery, the keyboard, and, optionally, a discrete GPU) by (if + necessary) unlocking and opening the latch connecting both parts. + + This driver provides a user-space interface that can influence the + behavior of this process, which includes the option to abort it in + case the base is still in use or speed it up in case it is not. + + Note that this module can be built without support for the Surface + Aggregator Bus (i.e. CONFIG_SURFACE_AGGREGATOR_BUS=n). In that case, + some devices, specifically the Surface Book 3, will not be supported. + +config SURFACE_GPE + tristate "Surface GPE/Lid Support Driver" + depends on ACPI + depends on DMI + help + This driver marks the GPEs related to the ACPI lid device found on + Microsoft Surface devices as wakeup sources and prepares them + accordingly. It is required on those devices to allow wake-ups from + suspend by opening the lid. + +config SURFACE_HOTPLUG + tristate "Surface Hot-Plug Driver" + depends on ACPI + depends on GPIOLIB + help + Driver for out-of-band hot-plug event signaling on Microsoft Surface + devices with hot-pluggable PCIe cards. + + This driver is used on Surface Book (2 and 3) devices with a + hot-pluggable discrete GPU (dGPU). When not in use, the dGPU on those + devices can enter D3cold, which prevents in-band (standard) PCIe + hot-plug signaling. Thus, without this driver, detaching the base + containing the dGPU will not correctly update the state of the + corresponding PCIe device if it is in D3cold. This driver adds support + for out-of-band hot-plug notifications, ensuring that the device state + is properly updated even when the device in question is in D3cold. + + Select M or Y here, if you want to (fully) support hot-plugging of + dGPU devices on the Surface Book 2 and/or 3 during D3cold. + +config SURFACE_PLATFORM_PROFILE + tristate "Surface Platform Profile Driver" + depends on ACPI + depends on SURFACE_AGGREGATOR_REGISTRY + select ACPI_PLATFORM_PROFILE + help + Provides support for the ACPI platform profile on 5th- and later + generation Microsoft Surface devices. + + More specifically, this driver provides ACPI platform profile support + on Microsoft Surface devices with a Surface System Aggregator Module + (SSAM) connected via the Surface Serial Hub (SSH / SAM-over-SSH). In + other words, this driver provides platform profile support on the + Surface Pro 5, Surface Book 2, Surface Laptop, Surface Laptop Go and + later. On those devices, the platform profile can significantly + influence cooling behavior, e.g. setting it to 'quiet' (default) or + 'low-power' can significantly limit performance of the discrete GPU on + Surface Books, while in turn leading to lower power consumption and/or + less fan noise. + + Select M or Y here, if you want to include ACPI platform profile + support on the above mentioned devices. + +config SURFACE_PRO3_BUTTON + tristate "Power/home/volume buttons driver for Microsoft Surface Pro 3/4 tablet" + depends on ACPI + depends on INPUT + help + This driver handles the power/home/volume buttons on the Microsoft Surface Pro 3/4 tablet. + +source "drivers/platform/surface/aggregator/Kconfig" + +endif # SURFACE_PLATFORMS diff --git a/drivers/platform/surface/Makefile b/drivers/platform/surface/Makefile new file mode 100644 index 0000000000..5334433093 --- /dev/null +++ b/drivers/platform/surface/Makefile @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for linux/drivers/platform/surface +# Microsoft Surface Platform-Specific Drivers +# + +obj-$(CONFIG_SURFACE3_WMI) += surface3-wmi.o +obj-$(CONFIG_SURFACE_3_POWER_OPREGION) += surface3_power.o +obj-$(CONFIG_SURFACE_ACPI_NOTIFY) += surface_acpi_notify.o +obj-$(CONFIG_SURFACE_AGGREGATOR) += aggregator/ +obj-$(CONFIG_SURFACE_AGGREGATOR_CDEV) += surface_aggregator_cdev.o +obj-$(CONFIG_SURFACE_AGGREGATOR_HUB) += surface_aggregator_hub.o +obj-$(CONFIG_SURFACE_AGGREGATOR_REGISTRY) += surface_aggregator_registry.o +obj-$(CONFIG_SURFACE_AGGREGATOR_TABLET_SWITCH) += surface_aggregator_tabletsw.o +obj-$(CONFIG_SURFACE_DTX) += surface_dtx.o +obj-$(CONFIG_SURFACE_GPE) += surface_gpe.o +obj-$(CONFIG_SURFACE_HOTPLUG) += surface_hotplug.o +obj-$(CONFIG_SURFACE_PLATFORM_PROFILE) += surface_platform_profile.o +obj-$(CONFIG_SURFACE_PRO3_BUTTON) += surfacepro3_button.o diff --git a/drivers/platform/surface/aggregator/Kconfig b/drivers/platform/surface/aggregator/Kconfig new file mode 100644 index 0000000000..88afc38ffd --- /dev/null +++ b/drivers/platform/surface/aggregator/Kconfig @@ -0,0 +1,69 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com> + +menuconfig SURFACE_AGGREGATOR + tristate "Microsoft Surface System Aggregator Module Subsystem and Drivers" + depends on SERIAL_DEV_BUS + depends on ACPI && !RISCV + select CRC_CCITT + help + The Surface System Aggregator Module (Surface SAM or SSAM) is an + embedded controller (EC) found on 5th- and later-generation Microsoft + Surface devices (i.e. Surface Pro 5, Surface Book 2, Surface Laptop, + and newer, with exception of Surface Go series devices). + + Depending on the device in question, this EC provides varying + functionality, including: + - EC access from ACPI via Surface ACPI Notify (5th- and 6th-generation) + - battery status information (all devices) + - thermal sensor access (all devices) + - performance mode / cooling mode control (all devices) + - clipboard detachment system control (Surface Book 2 and 3) + - HID / keyboard input (Surface Laptops, Surface Book 3) + + This option controls whether the Surface SAM subsystem core will be + built. This includes a driver for the Surface Serial Hub (SSH), which + is the device responsible for the communication with the EC, and a + basic kernel interface exposing the EC functionality to other client + drivers, i.e. allowing them to make requests to the EC and receive + events from it. Selecting this option alone will not provide any + client drivers and therefore no functionality beyond the in-kernel + interface. Said functionality is the responsibility of the respective + client drivers. + + Note: While 4th-generation Surface devices also make use of a SAM EC, + due to a difference in the communication interface of the controller, + only 5th and later generations are currently supported. Specifically, + devices using SAM-over-SSH are supported, whereas devices using + SAM-over-HID, which is used on the 4th generation, are currently not + supported. + + Choose m if you want to build the SAM subsystem core and SSH driver as + module, y if you want to build it into the kernel and n if you don't + want it at all. + +config SURFACE_AGGREGATOR_BUS + bool "Surface System Aggregator Module Bus" + depends on SURFACE_AGGREGATOR + default y + help + Expands the Surface System Aggregator Module (SSAM) core driver by + providing a dedicated bus and client-device type. + + This bus and device type are intended to provide and simplify support + for non-platform and non-ACPI SSAM devices, i.e. SSAM devices that are + not auto-detectable via the conventional means (e.g. ACPI). + +config SURFACE_AGGREGATOR_ERROR_INJECTION + bool "Surface System Aggregator Module Error Injection Capabilities" + depends on SURFACE_AGGREGATOR + depends on FUNCTION_ERROR_INJECTION + help + Provides error-injection capabilities for the Surface System + Aggregator Module subsystem and Surface Serial Hub driver. + + Specifically, exports error injection hooks to be used with the + kernel's function error injection capabilities to simulate underlying + transport and communication problems, such as invalid data sent to or + received from the EC, dropped data, and communication timeouts. + Intended for development and debugging. diff --git a/drivers/platform/surface/aggregator/Makefile b/drivers/platform/surface/aggregator/Makefile new file mode 100644 index 0000000000..fdf664a217 --- /dev/null +++ b/drivers/platform/surface/aggregator/Makefile @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com> + +# For include/trace/define_trace.h to include trace.h +CFLAGS_core.o = -I$(src) + +obj-$(CONFIG_SURFACE_AGGREGATOR) += surface_aggregator.o + +surface_aggregator-y := core.o +surface_aggregator-y += ssh_parser.o +surface_aggregator-y += ssh_packet_layer.o +surface_aggregator-y += ssh_request_layer.o +surface_aggregator-$(CONFIG_SURFACE_AGGREGATOR_BUS) += bus.o +surface_aggregator-y += controller.o diff --git a/drivers/platform/surface/aggregator/bus.c b/drivers/platform/surface/aggregator/bus.c new file mode 100644 index 0000000000..42ccd7f1c9 --- /dev/null +++ b/drivers/platform/surface/aggregator/bus.c @@ -0,0 +1,524 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Surface System Aggregator Module bus and device integration. + * + * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com> + */ + +#include <linux/device.h> +#include <linux/property.h> +#include <linux/slab.h> + +#include <linux/surface_aggregator/controller.h> +#include <linux/surface_aggregator/device.h> + +#include "bus.h" +#include "controller.h" + + +/* -- Device and bus functions. --------------------------------------------- */ + +static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct ssam_device *sdev = to_ssam_device(dev); + + return sysfs_emit(buf, "ssam:d%02Xc%02Xt%02Xi%02Xf%02X\n", + sdev->uid.domain, sdev->uid.category, sdev->uid.target, + sdev->uid.instance, sdev->uid.function); +} +static DEVICE_ATTR_RO(modalias); + +static struct attribute *ssam_device_attrs[] = { + &dev_attr_modalias.attr, + NULL, +}; +ATTRIBUTE_GROUPS(ssam_device); + +static int ssam_device_uevent(const struct device *dev, struct kobj_uevent_env *env) +{ + const struct ssam_device *sdev = to_ssam_device(dev); + + return add_uevent_var(env, "MODALIAS=ssam:d%02Xc%02Xt%02Xi%02Xf%02X", + sdev->uid.domain, sdev->uid.category, + sdev->uid.target, sdev->uid.instance, + sdev->uid.function); +} + +static void ssam_device_release(struct device *dev) +{ + struct ssam_device *sdev = to_ssam_device(dev); + + ssam_controller_put(sdev->ctrl); + fwnode_handle_put(sdev->dev.fwnode); + kfree(sdev); +} + +const struct device_type ssam_device_type = { + .name = "surface_aggregator_device", + .groups = ssam_device_groups, + .uevent = ssam_device_uevent, + .release = ssam_device_release, +}; +EXPORT_SYMBOL_GPL(ssam_device_type); + +/** + * ssam_device_alloc() - Allocate and initialize a SSAM client device. + * @ctrl: The controller under which the device should be added. + * @uid: The UID of the device to be added. + * + * Allocates and initializes a new client device. The parent of the device + * will be set to the controller device and the name will be set based on the + * UID. Note that the device still has to be added via ssam_device_add(). + * Refer to that function for more details. + * + * Return: Returns the newly allocated and initialized SSAM client device, or + * %NULL if it could not be allocated. + */ +struct ssam_device *ssam_device_alloc(struct ssam_controller *ctrl, + struct ssam_device_uid uid) +{ + struct ssam_device *sdev; + + sdev = kzalloc(sizeof(*sdev), GFP_KERNEL); + if (!sdev) + return NULL; + + device_initialize(&sdev->dev); + sdev->dev.bus = &ssam_bus_type; + sdev->dev.type = &ssam_device_type; + sdev->dev.parent = ssam_controller_device(ctrl); + sdev->ctrl = ssam_controller_get(ctrl); + sdev->uid = uid; + + dev_set_name(&sdev->dev, "%02x:%02x:%02x:%02x:%02x", + sdev->uid.domain, sdev->uid.category, sdev->uid.target, + sdev->uid.instance, sdev->uid.function); + + return sdev; +} +EXPORT_SYMBOL_GPL(ssam_device_alloc); + +/** + * ssam_device_add() - Add a SSAM client device. + * @sdev: The SSAM client device to be added. + * + * Added client devices must be guaranteed to always have a valid and active + * controller. Thus, this function will fail with %-ENODEV if the controller + * of the device has not been initialized yet, has been suspended, or has been + * shut down. + * + * The caller of this function should ensure that the corresponding call to + * ssam_device_remove() is issued before the controller is shut down. If the + * added device is a direct child of the controller device (default), it will + * be automatically removed when the controller is shut down. + * + * By default, the controller device will become the parent of the newly + * created client device. The parent may be changed before ssam_device_add is + * called, but care must be taken that a) the correct suspend/resume ordering + * is guaranteed and b) the client device does not outlive the controller, + * i.e. that the device is removed before the controller is being shut down. + * In case these guarantees have to be manually enforced, please refer to the + * ssam_client_link() and ssam_client_bind() functions, which are intended to + * set up device-links for this purpose. + * + * Return: Returns zero on success, a negative error code on failure. + */ +int ssam_device_add(struct ssam_device *sdev) +{ + int status; + + /* + * Ensure that we can only add new devices to a controller if it has + * been started and is not going away soon. This works in combination + * with ssam_controller_remove_clients to ensure driver presence for the + * controller device, i.e. it ensures that the controller (sdev->ctrl) + * is always valid and can be used for requests as long as the client + * device we add here is registered as child under it. This essentially + * guarantees that the client driver can always expect the preconditions + * for functions like ssam_request_do_sync() (controller has to be + * started and is not suspended) to hold and thus does not have to check + * for them. + * + * Note that for this to work, the controller has to be a parent device. + * If it is not a direct parent, care has to be taken that the device is + * removed via ssam_device_remove(), as device_unregister does not + * remove child devices recursively. + */ + ssam_controller_statelock(sdev->ctrl); + + if (sdev->ctrl->state != SSAM_CONTROLLER_STARTED) { + ssam_controller_stateunlock(sdev->ctrl); + return -ENODEV; + } + + status = device_add(&sdev->dev); + + ssam_controller_stateunlock(sdev->ctrl); + return status; +} +EXPORT_SYMBOL_GPL(ssam_device_add); + +/** + * ssam_device_remove() - Remove a SSAM client device. + * @sdev: The device to remove. + * + * Removes and unregisters the provided SSAM client device. + */ +void ssam_device_remove(struct ssam_device *sdev) +{ + device_unregister(&sdev->dev); +} +EXPORT_SYMBOL_GPL(ssam_device_remove); + +/** + * ssam_device_id_compatible() - Check if a device ID matches a UID. + * @id: The device ID as potential match. + * @uid: The device UID matching against. + * + * Check if the given ID is a match for the given UID, i.e. if a device with + * the provided UID is compatible to the given ID following the match rules + * described in its &ssam_device_id.match_flags member. + * + * Return: Returns %true if the given UID is compatible to the match rule + * described by the given ID, %false otherwise. + */ +static bool ssam_device_id_compatible(const struct ssam_device_id *id, + struct ssam_device_uid uid) +{ + if (id->domain != uid.domain || id->category != uid.category) + return false; + + if ((id->match_flags & SSAM_MATCH_TARGET) && id->target != uid.target) + return false; + + if ((id->match_flags & SSAM_MATCH_INSTANCE) && id->instance != uid.instance) + return false; + + if ((id->match_flags & SSAM_MATCH_FUNCTION) && id->function != uid.function) + return false; + + return true; +} + +/** + * ssam_device_id_is_null() - Check if a device ID is null. + * @id: The device ID to check. + * + * Check if a given device ID is null, i.e. all zeros. Used to check for the + * end of ``MODULE_DEVICE_TABLE(ssam, ...)`` or similar lists. + * + * Return: Returns %true if the given ID represents a null ID, %false + * otherwise. + */ +static bool ssam_device_id_is_null(const struct ssam_device_id *id) +{ + return id->match_flags == 0 && + id->domain == 0 && + id->category == 0 && + id->target == 0 && + id->instance == 0 && + id->function == 0 && + id->driver_data == 0; +} + +/** + * ssam_device_id_match() - Find the matching ID table entry for the given UID. + * @table: The table to search in. + * @uid: The UID to matched against the individual table entries. + * + * Find the first match for the provided device UID in the provided ID table + * and return it. Returns %NULL if no match could be found. + */ +const struct ssam_device_id *ssam_device_id_match(const struct ssam_device_id *table, + const struct ssam_device_uid uid) +{ + const struct ssam_device_id *id; + + for (id = table; !ssam_device_id_is_null(id); ++id) + if (ssam_device_id_compatible(id, uid)) + return id; + + return NULL; +} +EXPORT_SYMBOL_GPL(ssam_device_id_match); + +/** + * ssam_device_get_match() - Find and return the ID matching the device in the + * ID table of the bound driver. + * @dev: The device for which to get the matching ID table entry. + * + * Find the fist match for the UID of the device in the ID table of the + * currently bound driver and return it. Returns %NULL if the device does not + * have a driver bound to it, the driver does not have match_table (i.e. it is + * %NULL), or there is no match in the driver's match_table. + * + * This function essentially calls ssam_device_id_match() with the ID table of + * the bound device driver and the UID of the device. + * + * Return: Returns the first match for the UID of the device in the device + * driver's match table, or %NULL if no such match could be found. + */ +const struct ssam_device_id *ssam_device_get_match(const struct ssam_device *dev) +{ + const struct ssam_device_driver *sdrv; + + sdrv = to_ssam_device_driver(dev->dev.driver); + if (!sdrv) + return NULL; + + if (!sdrv->match_table) + return NULL; + + return ssam_device_id_match(sdrv->match_table, dev->uid); +} +EXPORT_SYMBOL_GPL(ssam_device_get_match); + +/** + * ssam_device_get_match_data() - Find the ID matching the device in the + * ID table of the bound driver and return its ``driver_data`` member. + * @dev: The device for which to get the match data. + * + * Find the fist match for the UID of the device in the ID table of the + * corresponding driver and return its driver_data. Returns %NULL if the + * device does not have a driver bound to it, the driver does not have + * match_table (i.e. it is %NULL), there is no match in the driver's + * match_table, or the match does not have any driver_data. + * + * This function essentially calls ssam_device_get_match() and, if any match + * could be found, returns its ``struct ssam_device_id.driver_data`` member. + * + * Return: Returns the driver data associated with the first match for the UID + * of the device in the device driver's match table, or %NULL if no such match + * could be found. + */ +const void *ssam_device_get_match_data(const struct ssam_device *dev) +{ + const struct ssam_device_id *id; + + id = ssam_device_get_match(dev); + if (!id) + return NULL; + + return (const void *)id->driver_data; +} +EXPORT_SYMBOL_GPL(ssam_device_get_match_data); + +static int ssam_bus_match(struct device *dev, struct device_driver *drv) +{ + struct ssam_device_driver *sdrv = to_ssam_device_driver(drv); + struct ssam_device *sdev = to_ssam_device(dev); + + if (!is_ssam_device(dev)) + return 0; + + return !!ssam_device_id_match(sdrv->match_table, sdev->uid); +} + +static int ssam_bus_probe(struct device *dev) +{ + return to_ssam_device_driver(dev->driver) + ->probe(to_ssam_device(dev)); +} + +static void ssam_bus_remove(struct device *dev) +{ + struct ssam_device_driver *sdrv = to_ssam_device_driver(dev->driver); + + if (sdrv->remove) + sdrv->remove(to_ssam_device(dev)); +} + +struct bus_type ssam_bus_type = { + .name = "surface_aggregator", + .match = ssam_bus_match, + .probe = ssam_bus_probe, + .remove = ssam_bus_remove, +}; +EXPORT_SYMBOL_GPL(ssam_bus_type); + +/** + * __ssam_device_driver_register() - Register a SSAM client device driver. + * @sdrv: The driver to register. + * @owner: The module owning the provided driver. + * + * Please refer to the ssam_device_driver_register() macro for the normal way + * to register a driver from inside its owning module. + */ +int __ssam_device_driver_register(struct ssam_device_driver *sdrv, + struct module *owner) +{ + sdrv->driver.owner = owner; + sdrv->driver.bus = &ssam_bus_type; + + /* force drivers to async probe so I/O is possible in probe */ + sdrv->driver.probe_type = PROBE_PREFER_ASYNCHRONOUS; + + return driver_register(&sdrv->driver); +} +EXPORT_SYMBOL_GPL(__ssam_device_driver_register); + +/** + * ssam_device_driver_unregister - Unregister a SSAM device driver. + * @sdrv: The driver to unregister. + */ +void ssam_device_driver_unregister(struct ssam_device_driver *sdrv) +{ + driver_unregister(&sdrv->driver); +} +EXPORT_SYMBOL_GPL(ssam_device_driver_unregister); + + +/* -- Bus registration. ----------------------------------------------------- */ + +/** + * ssam_bus_register() - Register and set-up the SSAM client device bus. + */ +int ssam_bus_register(void) +{ + return bus_register(&ssam_bus_type); +} + +/** + * ssam_bus_unregister() - Unregister the SSAM client device bus. + */ +void ssam_bus_unregister(void) +{ + return bus_unregister(&ssam_bus_type); +} + + +/* -- Helpers for controller and hub devices. ------------------------------- */ + +static int ssam_device_uid_from_string(const char *str, struct ssam_device_uid *uid) +{ + u8 d, tc, tid, iid, fn; + int n; + + n = sscanf(str, "%hhx:%hhx:%hhx:%hhx:%hhx", &d, &tc, &tid, &iid, &fn); + if (n != 5) + return -EINVAL; + + uid->domain = d; + uid->category = tc; + uid->target = tid; + uid->instance = iid; + uid->function = fn; + + return 0; +} + +static int ssam_get_uid_for_node(struct fwnode_handle *node, struct ssam_device_uid *uid) +{ + const char *str = fwnode_get_name(node); + + /* + * To simplify definitions of firmware nodes, we set the device name + * based on the UID of the device, prefixed with "ssam:". + */ + if (strncmp(str, "ssam:", strlen("ssam:")) != 0) + return -ENODEV; + + str += strlen("ssam:"); + return ssam_device_uid_from_string(str, uid); +} + +static int ssam_add_client_device(struct device *parent, struct ssam_controller *ctrl, + struct fwnode_handle *node) +{ + struct ssam_device_uid uid; + struct ssam_device *sdev; + int status; + + status = ssam_get_uid_for_node(node, &uid); + if (status) + return status; + + sdev = ssam_device_alloc(ctrl, uid); + if (!sdev) + return -ENOMEM; + + sdev->dev.parent = parent; + sdev->dev.fwnode = fwnode_handle_get(node); + + status = ssam_device_add(sdev); + if (status) + ssam_device_put(sdev); + + return status; +} + +/** + * __ssam_register_clients() - Register client devices defined under the + * given firmware node as children of the given device. + * @parent: The parent device under which clients should be registered. + * @ctrl: The controller with which client should be registered. + * @node: The firmware node holding definitions of the devices to be added. + * + * Register all clients that have been defined as children of the given root + * firmware node as children of the given parent device. The respective child + * firmware nodes will be associated with the correspondingly created child + * devices. + * + * The given controller will be used to instantiate the new devices. See + * ssam_device_add() for details. + * + * Note that, generally, the use of either ssam_device_register_clients() or + * ssam_register_clients() should be preferred as they directly use the + * firmware node and/or controller associated with the given device. This + * function is only intended for use when different device specifications (e.g. + * ACPI and firmware nodes) need to be combined (as is done in the platform hub + * of the device registry). + * + * Return: Returns zero on success, nonzero on failure. + */ +int __ssam_register_clients(struct device *parent, struct ssam_controller *ctrl, + struct fwnode_handle *node) +{ + struct fwnode_handle *child; + int status; + + fwnode_for_each_child_node(node, child) { + /* + * Try to add the device specified in the firmware node. If + * this fails with -ENODEV, the node does not specify any SSAM + * device, so ignore it and continue with the next one. + */ + status = ssam_add_client_device(parent, ctrl, child); + if (status && status != -ENODEV) { + fwnode_handle_put(child); + goto err; + } + } + + return 0; +err: + ssam_remove_clients(parent); + return status; +} +EXPORT_SYMBOL_GPL(__ssam_register_clients); + +static int ssam_remove_device(struct device *dev, void *_data) +{ + struct ssam_device *sdev = to_ssam_device(dev); + + if (is_ssam_device(dev)) + ssam_device_remove(sdev); + + return 0; +} + +/** + * ssam_remove_clients() - Remove SSAM client devices registered as direct + * children under the given parent device. + * @dev: The (parent) device to remove all direct clients for. + * + * Remove all SSAM client devices registered as direct children under the given + * device. Note that this only accounts for direct children of the device. + * Refer to ssam_device_add()/ssam_device_remove() for more details. + */ +void ssam_remove_clients(struct device *dev) +{ + device_for_each_child_reverse(dev, NULL, ssam_remove_device); +} +EXPORT_SYMBOL_GPL(ssam_remove_clients); diff --git a/drivers/platform/surface/aggregator/bus.h b/drivers/platform/surface/aggregator/bus.h new file mode 100644 index 0000000000..5b4dbf2190 --- /dev/null +++ b/drivers/platform/surface/aggregator/bus.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Surface System Aggregator Module bus and device integration. + * + * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com> + */ + +#ifndef _SURFACE_AGGREGATOR_BUS_H +#define _SURFACE_AGGREGATOR_BUS_H + +#include <linux/surface_aggregator/controller.h> + +#ifdef CONFIG_SURFACE_AGGREGATOR_BUS + +int ssam_bus_register(void); +void ssam_bus_unregister(void); + +#else /* CONFIG_SURFACE_AGGREGATOR_BUS */ + +static inline int ssam_bus_register(void) { return 0; } +static inline void ssam_bus_unregister(void) {} + +#endif /* CONFIG_SURFACE_AGGREGATOR_BUS */ +#endif /* _SURFACE_AGGREGATOR_BUS_H */ diff --git a/drivers/platform/surface/aggregator/controller.c b/drivers/platform/surface/aggregator/controller.c new file mode 100644 index 0000000000..7fc602e014 --- /dev/null +++ b/drivers/platform/surface/aggregator/controller.c @@ -0,0 +1,2807 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Main SSAM/SSH controller structure and functionality. + * + * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com> + */ + +#include <linux/acpi.h> +#include <linux/atomic.h> +#include <linux/completion.h> +#include <linux/gpio/consumer.h> +#include <linux/interrupt.h> +#include <linux/kref.h> +#include <linux/limits.h> +#include <linux/list.h> +#include <linux/lockdep.h> +#include <linux/mutex.h> +#include <linux/rculist.h> +#include <linux/rbtree.h> +#include <linux/rwsem.h> +#include <linux/serdev.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/srcu.h> +#include <linux/types.h> +#include <linux/workqueue.h> + +#include <linux/surface_aggregator/controller.h> +#include <linux/surface_aggregator/serial_hub.h> + +#include "controller.h" +#include "ssh_msgb.h" +#include "ssh_request_layer.h" + +#include "trace.h" + + +/* -- Safe counters. -------------------------------------------------------- */ + +/** + * ssh_seq_reset() - Reset/initialize sequence ID counter. + * @c: The counter to reset. + */ +static void ssh_seq_reset(struct ssh_seq_counter *c) +{ + WRITE_ONCE(c->value, 0); +} + +/** + * ssh_seq_next() - Get next sequence ID. + * @c: The counter providing the sequence IDs. + * + * Return: Returns the next sequence ID of the counter. + */ +static u8 ssh_seq_next(struct ssh_seq_counter *c) +{ + u8 old = READ_ONCE(c->value); + u8 new = old + 1; + u8 ret; + + while (unlikely((ret = cmpxchg(&c->value, old, new)) != old)) { + old = ret; + new = old + 1; + } + + return old; +} + +/** + * ssh_rqid_reset() - Reset/initialize request ID counter. + * @c: The counter to reset. + */ +static void ssh_rqid_reset(struct ssh_rqid_counter *c) +{ + WRITE_ONCE(c->value, 0); +} + +/** + * ssh_rqid_next() - Get next request ID. + * @c: The counter providing the request IDs. + * + * Return: Returns the next request ID of the counter, skipping any reserved + * request IDs. + */ +static u16 ssh_rqid_next(struct ssh_rqid_counter *c) +{ + u16 old = READ_ONCE(c->value); + u16 new = ssh_rqid_next_valid(old); + u16 ret; + + while (unlikely((ret = cmpxchg(&c->value, old, new)) != old)) { + old = ret; + new = ssh_rqid_next_valid(old); + } + + return old; +} + + +/* -- Event notifier/callbacks. --------------------------------------------- */ +/* + * The notifier system is based on linux/notifier.h, specifically the SRCU + * implementation. The difference to that is, that some bits of the notifier + * call return value can be tracked across multiple calls. This is done so + * that handling of events can be tracked and a warning can be issued in case + * an event goes unhandled. The idea of that warning is that it should help + * discover and identify new/currently unimplemented features. + */ + +/** + * ssam_event_matches_notifier() - Test if an event matches a notifier. + * @n: The event notifier to test against. + * @event: The event to test. + * + * Return: Returns %true if the given event matches the given notifier + * according to the rules set in the notifier's event mask, %false otherwise. + */ +static bool ssam_event_matches_notifier(const struct ssam_event_notifier *n, + const struct ssam_event *event) +{ + bool match = n->event.id.target_category == event->target_category; + + if (n->event.mask & SSAM_EVENT_MASK_TARGET) + match &= n->event.reg.target_id == event->target_id; + + if (n->event.mask & SSAM_EVENT_MASK_INSTANCE) + match &= n->event.id.instance == event->instance_id; + + return match; +} + +/** + * ssam_nfblk_call_chain() - Call event notifier callbacks of the given chain. + * @nh: The notifier head for which the notifier callbacks should be called. + * @event: The event data provided to the callbacks. + * + * Call all registered notifier callbacks in order of their priority until + * either no notifier is left or a notifier returns a value with the + * %SSAM_NOTIF_STOP bit set. Note that this bit is automatically set via + * ssam_notifier_from_errno() on any non-zero error value. + * + * Return: Returns the notifier status value, which contains the notifier + * status bits (%SSAM_NOTIF_HANDLED and %SSAM_NOTIF_STOP) as well as a + * potential error value returned from the last executed notifier callback. + * Use ssam_notifier_to_errno() to convert this value to the original error + * value. + */ +static int ssam_nfblk_call_chain(struct ssam_nf_head *nh, struct ssam_event *event) +{ + struct ssam_event_notifier *nf; + int ret = 0, idx; + + idx = srcu_read_lock(&nh->srcu); + + list_for_each_entry_rcu(nf, &nh->head, base.node, + srcu_read_lock_held(&nh->srcu)) { + if (ssam_event_matches_notifier(nf, event)) { + ret = (ret & SSAM_NOTIF_STATE_MASK) | nf->base.fn(nf, event); + if (ret & SSAM_NOTIF_STOP) + break; + } + } + + srcu_read_unlock(&nh->srcu, idx); + return ret; +} + +/** + * ssam_nfblk_insert() - Insert a new notifier block into the given notifier + * list. + * @nh: The notifier head into which the block should be inserted. + * @nb: The notifier block to add. + * + * Note: This function must be synchronized by the caller with respect to other + * insert, find, and/or remove calls by holding ``struct ssam_nf.lock``. + * + * Return: Returns zero on success, %-EEXIST if the notifier block has already + * been registered. + */ +static int ssam_nfblk_insert(struct ssam_nf_head *nh, struct ssam_notifier_block *nb) +{ + struct ssam_notifier_block *p; + struct list_head *h; + + /* Runs under lock, no need for RCU variant. */ + list_for_each(h, &nh->head) { + p = list_entry(h, struct ssam_notifier_block, node); + + if (unlikely(p == nb)) { + WARN(1, "double register detected"); + return -EEXIST; + } + + if (nb->priority > p->priority) + break; + } + + list_add_tail_rcu(&nb->node, h); + return 0; +} + +/** + * ssam_nfblk_find() - Check if a notifier block is registered on the given + * notifier head. + * list. + * @nh: The notifier head on which to search. + * @nb: The notifier block to search for. + * + * Note: This function must be synchronized by the caller with respect to other + * insert, find, and/or remove calls by holding ``struct ssam_nf.lock``. + * + * Return: Returns true if the given notifier block is registered on the given + * notifier head, false otherwise. + */ +static bool ssam_nfblk_find(struct ssam_nf_head *nh, struct ssam_notifier_block *nb) +{ + struct ssam_notifier_block *p; + + /* Runs under lock, no need for RCU variant. */ + list_for_each_entry(p, &nh->head, node) { + if (p == nb) + return true; + } + + return false; +} + +/** + * ssam_nfblk_remove() - Remove a notifier block from its notifier list. + * @nb: The notifier block to be removed. + * + * Note: This function must be synchronized by the caller with respect to + * other insert, find, and/or remove calls by holding ``struct ssam_nf.lock``. + * Furthermore, the caller _must_ ensure SRCU synchronization by calling + * synchronize_srcu() with ``nh->srcu`` after leaving the critical section, to + * ensure that the removed notifier block is not in use any more. + */ +static void ssam_nfblk_remove(struct ssam_notifier_block *nb) +{ + list_del_rcu(&nb->node); +} + +/** + * ssam_nf_head_init() - Initialize the given notifier head. + * @nh: The notifier head to initialize. + */ +static int ssam_nf_head_init(struct ssam_nf_head *nh) +{ + int status; + + status = init_srcu_struct(&nh->srcu); + if (status) + return status; + + INIT_LIST_HEAD(&nh->head); + return 0; +} + +/** + * ssam_nf_head_destroy() - Deinitialize the given notifier head. + * @nh: The notifier head to deinitialize. + */ +static void ssam_nf_head_destroy(struct ssam_nf_head *nh) +{ + cleanup_srcu_struct(&nh->srcu); +} + + +/* -- Event/notification registry. ------------------------------------------ */ + +/** + * struct ssam_nf_refcount_key - Key used for event activation reference + * counting. + * @reg: The registry via which the event is enabled/disabled. + * @id: The ID uniquely describing the event. + */ +struct ssam_nf_refcount_key { + struct ssam_event_registry reg; + struct ssam_event_id id; +}; + +/** + * struct ssam_nf_refcount_entry - RB-tree entry for reference counting event + * activations. + * @node: The node of this entry in the rb-tree. + * @key: The key of the event. + * @refcount: The reference-count of the event. + * @flags: The flags used when enabling the event. + */ +struct ssam_nf_refcount_entry { + struct rb_node node; + struct ssam_nf_refcount_key key; + int refcount; + u8 flags; +}; + +/** + * ssam_nf_refcount_inc() - Increment reference-/activation-count of the given + * event. + * @nf: The notifier system reference. + * @reg: The registry used to enable/disable the event. + * @id: The event ID. + * + * Increments the reference-/activation-count associated with the specified + * event type/ID, allocating a new entry for this event ID if necessary. A + * newly allocated entry will have a refcount of one. + * + * Note: ``nf->lock`` must be held when calling this function. + * + * Return: Returns the refcount entry on success. Returns an error pointer + * with %-ENOSPC if there have already been %INT_MAX events of the specified + * ID and type registered, or %-ENOMEM if the entry could not be allocated. + */ +static struct ssam_nf_refcount_entry * +ssam_nf_refcount_inc(struct ssam_nf *nf, struct ssam_event_registry reg, + struct ssam_event_id id) +{ + struct ssam_nf_refcount_entry *entry; + struct ssam_nf_refcount_key key; + struct rb_node **link = &nf->refcount.rb_node; + struct rb_node *parent = NULL; + int cmp; + + lockdep_assert_held(&nf->lock); + + key.reg = reg; + key.id = id; + + while (*link) { + entry = rb_entry(*link, struct ssam_nf_refcount_entry, node); + parent = *link; + + cmp = memcmp(&key, &entry->key, sizeof(key)); + if (cmp < 0) { + link = &(*link)->rb_left; + } else if (cmp > 0) { + link = &(*link)->rb_right; + } else if (entry->refcount < INT_MAX) { + entry->refcount++; + return entry; + } else { + WARN_ON(1); + return ERR_PTR(-ENOSPC); + } + } + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return ERR_PTR(-ENOMEM); + + entry->key = key; + entry->refcount = 1; + + rb_link_node(&entry->node, parent, link); + rb_insert_color(&entry->node, &nf->refcount); + + return entry; +} + +/** + * ssam_nf_refcount_dec() - Decrement reference-/activation-count of the given + * event. + * @nf: The notifier system reference. + * @reg: The registry used to enable/disable the event. + * @id: The event ID. + * + * Decrements the reference-/activation-count of the specified event, + * returning its entry. If the returned entry has a refcount of zero, the + * caller is responsible for freeing it using kfree(). + * + * Note: ``nf->lock`` must be held when calling this function. + * + * Return: Returns the refcount entry on success or %NULL if the entry has not + * been found. + */ +static struct ssam_nf_refcount_entry * +ssam_nf_refcount_dec(struct ssam_nf *nf, struct ssam_event_registry reg, + struct ssam_event_id id) +{ + struct ssam_nf_refcount_entry *entry; + struct ssam_nf_refcount_key key; + struct rb_node *node = nf->refcount.rb_node; + int cmp; + + lockdep_assert_held(&nf->lock); + + key.reg = reg; + key.id = id; + + while (node) { + entry = rb_entry(node, struct ssam_nf_refcount_entry, node); + + cmp = memcmp(&key, &entry->key, sizeof(key)); + if (cmp < 0) { + node = node->rb_left; + } else if (cmp > 0) { + node = node->rb_right; + } else { + entry->refcount--; + if (entry->refcount == 0) + rb_erase(&entry->node, &nf->refcount); + + return entry; + } + } + + return NULL; +} + +/** + * ssam_nf_refcount_dec_free() - Decrement reference-/activation-count of the + * given event and free its entry if the reference count reaches zero. + * @nf: The notifier system reference. + * @reg: The registry used to enable/disable the event. + * @id: The event ID. + * + * Decrements the reference-/activation-count of the specified event, freeing + * its entry if it reaches zero. + * + * Note: ``nf->lock`` must be held when calling this function. + */ +static void ssam_nf_refcount_dec_free(struct ssam_nf *nf, + struct ssam_event_registry reg, + struct ssam_event_id id) +{ + struct ssam_nf_refcount_entry *entry; + + lockdep_assert_held(&nf->lock); + + entry = ssam_nf_refcount_dec(nf, reg, id); + if (entry && entry->refcount == 0) + kfree(entry); +} + +/** + * ssam_nf_refcount_empty() - Test if the notification system has any + * enabled/active events. + * @nf: The notification system. + */ +static bool ssam_nf_refcount_empty(struct ssam_nf *nf) +{ + return RB_EMPTY_ROOT(&nf->refcount); +} + +/** + * ssam_nf_call() - Call notification callbacks for the provided event. + * @nf: The notifier system + * @dev: The associated device, only used for logging. + * @rqid: The request ID of the event. + * @event: The event provided to the callbacks. + * + * Execute registered callbacks in order of their priority until either no + * callback is left or a callback returns a value with the %SSAM_NOTIF_STOP + * bit set. Note that this bit is set automatically when converting non-zero + * error values via ssam_notifier_from_errno() to notifier values. + * + * Also note that any callback that could handle an event should return a value + * with bit %SSAM_NOTIF_HANDLED set, indicating that the event does not go + * unhandled/ignored. In case no registered callback could handle an event, + * this function will emit a warning. + * + * In case a callback failed, this function will emit an error message. + */ +static void ssam_nf_call(struct ssam_nf *nf, struct device *dev, u16 rqid, + struct ssam_event *event) +{ + struct ssam_nf_head *nf_head; + int status, nf_ret; + + if (!ssh_rqid_is_event(rqid)) { + dev_warn(dev, "event: unsupported rqid: %#06x\n", rqid); + return; + } + + nf_head = &nf->head[ssh_rqid_to_event(rqid)]; + nf_ret = ssam_nfblk_call_chain(nf_head, event); + status = ssam_notifier_to_errno(nf_ret); + + if (status < 0) { + dev_err(dev, + "event: error handling event: %d (tc: %#04x, tid: %#04x, cid: %#04x, iid: %#04x)\n", + status, event->target_category, event->target_id, + event->command_id, event->instance_id); + } else if (!(nf_ret & SSAM_NOTIF_HANDLED)) { + dev_warn(dev, + "event: unhandled event (rqid: %#04x, tc: %#04x, tid: %#04x, cid: %#04x, iid: %#04x)\n", + rqid, event->target_category, event->target_id, + event->command_id, event->instance_id); + } +} + +/** + * ssam_nf_init() - Initialize the notifier system. + * @nf: The notifier system to initialize. + */ +static int ssam_nf_init(struct ssam_nf *nf) +{ + int i, status; + + for (i = 0; i < SSH_NUM_EVENTS; i++) { + status = ssam_nf_head_init(&nf->head[i]); + if (status) + break; + } + + if (status) { + while (i--) + ssam_nf_head_destroy(&nf->head[i]); + + return status; + } + + mutex_init(&nf->lock); + return 0; +} + +/** + * ssam_nf_destroy() - Deinitialize the notifier system. + * @nf: The notifier system to deinitialize. + */ +static void ssam_nf_destroy(struct ssam_nf *nf) +{ + int i; + + for (i = 0; i < SSH_NUM_EVENTS; i++) + ssam_nf_head_destroy(&nf->head[i]); + + mutex_destroy(&nf->lock); +} + + +/* -- Event/async request completion system. -------------------------------- */ + +#define SSAM_CPLT_WQ_NAME "ssam_cpltq" + +/* + * SSAM_CPLT_WQ_BATCH - Maximum number of event item completions executed per + * work execution. Used to prevent livelocking of the workqueue. Value chosen + * via educated guess, may be adjusted. + */ +#define SSAM_CPLT_WQ_BATCH 10 + +/* + * SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN - Maximum payload length for a cached + * &struct ssam_event_item. + * + * This length has been chosen to be accommodate standard touchpad and + * keyboard input events. Events with larger payloads will be allocated + * separately. + */ +#define SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN 32 + +static struct kmem_cache *ssam_event_item_cache; + +/** + * ssam_event_item_cache_init() - Initialize the event item cache. + */ +int ssam_event_item_cache_init(void) +{ + const unsigned int size = sizeof(struct ssam_event_item) + + SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN; + const unsigned int align = __alignof__(struct ssam_event_item); + struct kmem_cache *cache; + + cache = kmem_cache_create("ssam_event_item", size, align, 0, NULL); + if (!cache) + return -ENOMEM; + + ssam_event_item_cache = cache; + return 0; +} + +/** + * ssam_event_item_cache_destroy() - Deinitialize the event item cache. + */ +void ssam_event_item_cache_destroy(void) +{ + kmem_cache_destroy(ssam_event_item_cache); + ssam_event_item_cache = NULL; +} + +static void __ssam_event_item_free_cached(struct ssam_event_item *item) +{ + kmem_cache_free(ssam_event_item_cache, item); +} + +static void __ssam_event_item_free_generic(struct ssam_event_item *item) +{ + kfree(item); +} + +/** + * ssam_event_item_free() - Free the provided event item. + * @item: The event item to free. + */ +static void ssam_event_item_free(struct ssam_event_item *item) +{ + trace_ssam_event_item_free(item); + item->ops.free(item); +} + +/** + * ssam_event_item_alloc() - Allocate an event item with the given payload size. + * @len: The event payload length. + * @flags: The flags used for allocation. + * + * Allocate an event item with the given payload size, preferring allocation + * from the event item cache if the payload is small enough (i.e. smaller than + * %SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN). Sets the item operations and payload + * length values. The item free callback (``ops.free``) should not be + * overwritten after this call. + * + * Return: Returns the newly allocated event item. + */ +static struct ssam_event_item *ssam_event_item_alloc(size_t len, gfp_t flags) +{ + struct ssam_event_item *item; + + if (len <= SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN) { + item = kmem_cache_alloc(ssam_event_item_cache, flags); + if (!item) + return NULL; + + item->ops.free = __ssam_event_item_free_cached; + } else { + item = kzalloc(struct_size(item, event.data, len), flags); + if (!item) + return NULL; + + item->ops.free = __ssam_event_item_free_generic; + } + + item->event.length = len; + + trace_ssam_event_item_alloc(item, len); + return item; +} + +/** + * ssam_event_queue_push() - Push an event item to the event queue. + * @q: The event queue. + * @item: The item to add. + */ +static void ssam_event_queue_push(struct ssam_event_queue *q, + struct ssam_event_item *item) +{ + spin_lock(&q->lock); + list_add_tail(&item->node, &q->head); + spin_unlock(&q->lock); +} + +/** + * ssam_event_queue_pop() - Pop the next event item from the event queue. + * @q: The event queue. + * + * Returns and removes the next event item from the queue. Returns %NULL If + * there is no event item left. + */ +static struct ssam_event_item *ssam_event_queue_pop(struct ssam_event_queue *q) +{ + struct ssam_event_item *item; + + spin_lock(&q->lock); + item = list_first_entry_or_null(&q->head, struct ssam_event_item, node); + if (item) + list_del(&item->node); + spin_unlock(&q->lock); + + return item; +} + +/** + * ssam_event_queue_is_empty() - Check if the event queue is empty. + * @q: The event queue. + */ +static bool ssam_event_queue_is_empty(struct ssam_event_queue *q) +{ + bool empty; + + spin_lock(&q->lock); + empty = list_empty(&q->head); + spin_unlock(&q->lock); + + return empty; +} + +/** + * ssam_cplt_get_event_queue() - Get the event queue for the given parameters. + * @cplt: The completion system on which to look for the queue. + * @tid: The target ID of the queue. + * @rqid: The request ID representing the event ID for which to get the queue. + * + * Return: Returns the event queue corresponding to the event type described + * by the given parameters. If the request ID does not represent an event, + * this function returns %NULL. If the target ID is not supported, this + * function will fall back to the default target ID (``tid = 1``). + */ +static +struct ssam_event_queue *ssam_cplt_get_event_queue(struct ssam_cplt *cplt, + u8 tid, u16 rqid) +{ + u16 event = ssh_rqid_to_event(rqid); + u16 tidx = ssh_tid_to_index(tid); + + if (!ssh_rqid_is_event(rqid)) { + dev_err(cplt->dev, "event: unsupported request ID: %#06x\n", rqid); + return NULL; + } + + if (!ssh_tid_is_valid(tid)) { + dev_warn(cplt->dev, "event: unsupported target ID: %u\n", tid); + tidx = 0; + } + + return &cplt->event.target[tidx].queue[event]; +} + +/** + * ssam_cplt_submit() - Submit a work item to the completion system workqueue. + * @cplt: The completion system. + * @work: The work item to submit. + */ +static bool ssam_cplt_submit(struct ssam_cplt *cplt, struct work_struct *work) +{ + return queue_work(cplt->wq, work); +} + +/** + * ssam_cplt_submit_event() - Submit an event to the completion system. + * @cplt: The completion system. + * @item: The event item to submit. + * + * Submits the event to the completion system by queuing it on the event item + * queue and queuing the respective event queue work item on the completion + * workqueue, which will eventually complete the event. + * + * Return: Returns zero on success, %-EINVAL if there is no event queue that + * can handle the given event item. + */ +static int ssam_cplt_submit_event(struct ssam_cplt *cplt, + struct ssam_event_item *item) +{ + struct ssam_event_queue *evq; + + evq = ssam_cplt_get_event_queue(cplt, item->event.target_id, item->rqid); + if (!evq) + return -EINVAL; + + ssam_event_queue_push(evq, item); + ssam_cplt_submit(cplt, &evq->work); + return 0; +} + +/** + * ssam_cplt_flush() - Flush the completion system. + * @cplt: The completion system. + * + * Flush the completion system by waiting until all currently submitted work + * items have been completed. + * + * Note: This function does not guarantee that all events will have been + * handled once this call terminates. In case of a larger number of + * to-be-completed events, the event queue work function may re-schedule its + * work item, which this flush operation will ignore. + * + * This operation is only intended to, during normal operation prior to + * shutdown, try to complete most events and requests to get them out of the + * system while the system is still fully operational. It does not aim to + * provide any guarantee that all of them have been handled. + */ +static void ssam_cplt_flush(struct ssam_cplt *cplt) +{ + flush_workqueue(cplt->wq); +} + +static void ssam_event_queue_work_fn(struct work_struct *work) +{ + struct ssam_event_queue *queue; + struct ssam_event_item *item; + struct ssam_nf *nf; + struct device *dev; + unsigned int iterations = SSAM_CPLT_WQ_BATCH; + + queue = container_of(work, struct ssam_event_queue, work); + nf = &queue->cplt->event.notif; + dev = queue->cplt->dev; + + /* Limit number of processed events to avoid livelocking. */ + do { + item = ssam_event_queue_pop(queue); + if (!item) + return; + + ssam_nf_call(nf, dev, item->rqid, &item->event); + ssam_event_item_free(item); + } while (--iterations); + + if (!ssam_event_queue_is_empty(queue)) + ssam_cplt_submit(queue->cplt, &queue->work); +} + +/** + * ssam_event_queue_init() - Initialize an event queue. + * @cplt: The completion system on which the queue resides. + * @evq: The event queue to initialize. + */ +static void ssam_event_queue_init(struct ssam_cplt *cplt, + struct ssam_event_queue *evq) +{ + evq->cplt = cplt; + spin_lock_init(&evq->lock); + INIT_LIST_HEAD(&evq->head); + INIT_WORK(&evq->work, ssam_event_queue_work_fn); +} + +/** + * ssam_cplt_init() - Initialize completion system. + * @cplt: The completion system to initialize. + * @dev: The device used for logging. + */ +static int ssam_cplt_init(struct ssam_cplt *cplt, struct device *dev) +{ + struct ssam_event_target *target; + int status, c, i; + + cplt->dev = dev; + + cplt->wq = alloc_workqueue(SSAM_CPLT_WQ_NAME, WQ_UNBOUND | WQ_MEM_RECLAIM, 0); + if (!cplt->wq) + return -ENOMEM; + + for (c = 0; c < ARRAY_SIZE(cplt->event.target); c++) { + target = &cplt->event.target[c]; + + for (i = 0; i < ARRAY_SIZE(target->queue); i++) + ssam_event_queue_init(cplt, &target->queue[i]); + } + + status = ssam_nf_init(&cplt->event.notif); + if (status) + destroy_workqueue(cplt->wq); + + return status; +} + +/** + * ssam_cplt_destroy() - Deinitialize the completion system. + * @cplt: The completion system to deinitialize. + * + * Deinitialize the given completion system and ensure that all pending, i.e. + * yet-to-be-completed, event items and requests have been handled. + */ +static void ssam_cplt_destroy(struct ssam_cplt *cplt) +{ + /* + * Note: destroy_workqueue ensures that all currently queued work will + * be fully completed and the workqueue drained. This means that this + * call will inherently also free any queued ssam_event_items, thus we + * don't have to take care of that here explicitly. + */ + destroy_workqueue(cplt->wq); + ssam_nf_destroy(&cplt->event.notif); +} + + +/* -- Main SSAM device structures. ------------------------------------------ */ + +/** + * ssam_controller_device() - Get the &struct device associated with this + * controller. + * @c: The controller for which to get the device. + * + * Return: Returns the &struct device associated with this controller, + * providing its lower-level transport. + */ +struct device *ssam_controller_device(struct ssam_controller *c) +{ + return ssh_rtl_get_device(&c->rtl); +} +EXPORT_SYMBOL_GPL(ssam_controller_device); + +static void __ssam_controller_release(struct kref *kref) +{ + struct ssam_controller *ctrl = to_ssam_controller(kref, kref); + + /* + * The lock-call here is to satisfy lockdep. At this point we really + * expect this to be the last remaining reference to the controller. + * Anything else is a bug. + */ + ssam_controller_lock(ctrl); + ssam_controller_destroy(ctrl); + ssam_controller_unlock(ctrl); + + kfree(ctrl); +} + +/** + * ssam_controller_get() - Increment reference count of controller. + * @c: The controller. + * + * Return: Returns the controller provided as input. + */ +struct ssam_controller *ssam_controller_get(struct ssam_controller *c) +{ + if (c) + kref_get(&c->kref); + return c; +} +EXPORT_SYMBOL_GPL(ssam_controller_get); + +/** + * ssam_controller_put() - Decrement reference count of controller. + * @c: The controller. + */ +void ssam_controller_put(struct ssam_controller *c) +{ + if (c) + kref_put(&c->kref, __ssam_controller_release); +} +EXPORT_SYMBOL_GPL(ssam_controller_put); + +/** + * ssam_controller_statelock() - Lock the controller against state transitions. + * @c: The controller to lock. + * + * Lock the controller against state transitions. Holding this lock guarantees + * that the controller will not transition between states, i.e. if the + * controller is in state "started", when this lock has been acquired, it will + * remain in this state at least until the lock has been released. + * + * Multiple clients may concurrently hold this lock. In other words: The + * ``statelock`` functions represent the read-lock part of a r/w-semaphore. + * Actions causing state transitions of the controller must be executed while + * holding the write-part of this r/w-semaphore (see ssam_controller_lock() + * and ssam_controller_unlock() for that). + * + * See ssam_controller_stateunlock() for the corresponding unlock function. + */ +void ssam_controller_statelock(struct ssam_controller *c) +{ + down_read(&c->lock); +} +EXPORT_SYMBOL_GPL(ssam_controller_statelock); + +/** + * ssam_controller_stateunlock() - Unlock controller state transitions. + * @c: The controller to unlock. + * + * See ssam_controller_statelock() for the corresponding lock function. + */ +void ssam_controller_stateunlock(struct ssam_controller *c) +{ + up_read(&c->lock); +} +EXPORT_SYMBOL_GPL(ssam_controller_stateunlock); + +/** + * ssam_controller_lock() - Acquire the main controller lock. + * @c: The controller to lock. + * + * This lock must be held for any state transitions, including transition to + * suspend/resumed states and during shutdown. See ssam_controller_statelock() + * for more details on controller locking. + * + * See ssam_controller_unlock() for the corresponding unlock function. + */ +void ssam_controller_lock(struct ssam_controller *c) +{ + down_write(&c->lock); +} + +/* + * ssam_controller_unlock() - Release the main controller lock. + * @c: The controller to unlock. + * + * See ssam_controller_lock() for the corresponding lock function. + */ +void ssam_controller_unlock(struct ssam_controller *c) +{ + up_write(&c->lock); +} + +static void ssam_handle_event(struct ssh_rtl *rtl, + const struct ssh_command *cmd, + const struct ssam_span *data) +{ + struct ssam_controller *ctrl = to_ssam_controller(rtl, rtl); + struct ssam_event_item *item; + + item = ssam_event_item_alloc(data->len, GFP_KERNEL); + if (!item) + return; + + item->rqid = get_unaligned_le16(&cmd->rqid); + item->event.target_category = cmd->tc; + item->event.target_id = cmd->sid; + item->event.command_id = cmd->cid; + item->event.instance_id = cmd->iid; + memcpy(&item->event.data[0], data->ptr, data->len); + + if (WARN_ON(ssam_cplt_submit_event(&ctrl->cplt, item))) + ssam_event_item_free(item); +} + +static const struct ssh_rtl_ops ssam_rtl_ops = { + .handle_event = ssam_handle_event, +}; + +static bool ssam_notifier_is_empty(struct ssam_controller *ctrl); +static void ssam_notifier_unregister_all(struct ssam_controller *ctrl); + +#define SSAM_SSH_DSM_REVISION 0 + +/* d5e383e1-d892-4a76-89fc-f6aaae7ed5b5 */ +static const guid_t SSAM_SSH_DSM_GUID = + GUID_INIT(0xd5e383e1, 0xd892, 0x4a76, + 0x89, 0xfc, 0xf6, 0xaa, 0xae, 0x7e, 0xd5, 0xb5); + +enum ssh_dsm_fn { + SSH_DSM_FN_SSH_POWER_PROFILE = 0x05, + SSH_DSM_FN_SCREEN_ON_SLEEP_IDLE_TIMEOUT = 0x06, + SSH_DSM_FN_SCREEN_OFF_SLEEP_IDLE_TIMEOUT = 0x07, + SSH_DSM_FN_D3_CLOSES_HANDLE = 0x08, + SSH_DSM_FN_SSH_BUFFER_SIZE = 0x09, +}; + +static int ssam_dsm_get_functions(acpi_handle handle, u64 *funcs) +{ + union acpi_object *obj; + u64 mask = 0; + int i; + + *funcs = 0; + + /* + * The _DSM function is only present on newer models. It is not + * present on 5th and 6th generation devices (i.e. up to and including + * Surface Pro 6, Surface Laptop 2, Surface Book 2). + * + * If the _DSM is not present, indicate that no function is supported. + * This will result in default values being set. + */ + if (!acpi_has_method(handle, "_DSM")) + return 0; + + obj = acpi_evaluate_dsm_typed(handle, &SSAM_SSH_DSM_GUID, + SSAM_SSH_DSM_REVISION, 0, NULL, + ACPI_TYPE_BUFFER); + if (!obj) + return -EIO; + + for (i = 0; i < obj->buffer.length && i < 8; i++) + mask |= (((u64)obj->buffer.pointer[i]) << (i * 8)); + + if (mask & BIT(0)) + *funcs = mask; + + ACPI_FREE(obj); + return 0; +} + +static int ssam_dsm_load_u32(acpi_handle handle, u64 funcs, u64 func, u32 *ret) +{ + union acpi_object *obj; + u64 val; + + if (!(funcs & BIT_ULL(func))) + return 0; /* Not supported, leave *ret at its default value */ + + obj = acpi_evaluate_dsm_typed(handle, &SSAM_SSH_DSM_GUID, + SSAM_SSH_DSM_REVISION, func, NULL, + ACPI_TYPE_INTEGER); + if (!obj) + return -EIO; + + val = obj->integer.value; + ACPI_FREE(obj); + + if (val > U32_MAX) + return -ERANGE; + + *ret = val; + return 0; +} + +/** + * ssam_controller_caps_load_from_acpi() - Load controller capabilities from + * ACPI _DSM. + * @handle: The handle of the ACPI controller/SSH device. + * @caps: Where to store the capabilities in. + * + * Initializes the given controller capabilities with default values, then + * checks and, if the respective _DSM functions are available, loads the + * actual capabilities from the _DSM. + * + * Return: Returns zero on success, a negative error code on failure. + */ +static +int ssam_controller_caps_load_from_acpi(acpi_handle handle, + struct ssam_controller_caps *caps) +{ + u32 d3_closes_handle = false; + u64 funcs; + int status; + + /* Set defaults. */ + caps->ssh_power_profile = U32_MAX; + caps->screen_on_sleep_idle_timeout = U32_MAX; + caps->screen_off_sleep_idle_timeout = U32_MAX; + caps->d3_closes_handle = false; + caps->ssh_buffer_size = U32_MAX; + + /* Pre-load supported DSM functions. */ + status = ssam_dsm_get_functions(handle, &funcs); + if (status) + return status; + + /* Load actual values from ACPI, if present. */ + status = ssam_dsm_load_u32(handle, funcs, SSH_DSM_FN_SSH_POWER_PROFILE, + &caps->ssh_power_profile); + if (status) + return status; + + status = ssam_dsm_load_u32(handle, funcs, + SSH_DSM_FN_SCREEN_ON_SLEEP_IDLE_TIMEOUT, + &caps->screen_on_sleep_idle_timeout); + if (status) + return status; + + status = ssam_dsm_load_u32(handle, funcs, + SSH_DSM_FN_SCREEN_OFF_SLEEP_IDLE_TIMEOUT, + &caps->screen_off_sleep_idle_timeout); + if (status) + return status; + + status = ssam_dsm_load_u32(handle, funcs, SSH_DSM_FN_D3_CLOSES_HANDLE, + &d3_closes_handle); + if (status) + return status; + + caps->d3_closes_handle = !!d3_closes_handle; + + status = ssam_dsm_load_u32(handle, funcs, SSH_DSM_FN_SSH_BUFFER_SIZE, + &caps->ssh_buffer_size); + if (status) + return status; + + return 0; +} + +/** + * ssam_controller_init() - Initialize SSAM controller. + * @ctrl: The controller to initialize. + * @serdev: The serial device representing the underlying data transport. + * + * Initializes the given controller. Does neither start receiver nor + * transmitter threads. After this call, the controller has to be hooked up to + * the serdev core separately via &struct serdev_device_ops, relaying calls to + * ssam_controller_receive_buf() and ssam_controller_write_wakeup(). Once the + * controller has been hooked up, transmitter and receiver threads may be + * started via ssam_controller_start(). These setup steps need to be completed + * before controller can be used for requests. + */ +int ssam_controller_init(struct ssam_controller *ctrl, + struct serdev_device *serdev) +{ + acpi_handle handle = ACPI_HANDLE(&serdev->dev); + int status; + + init_rwsem(&ctrl->lock); + kref_init(&ctrl->kref); + + status = ssam_controller_caps_load_from_acpi(handle, &ctrl->caps); + if (status) + return status; + + dev_dbg(&serdev->dev, + "device capabilities:\n" + " ssh_power_profile: %u\n" + " ssh_buffer_size: %u\n" + " screen_on_sleep_idle_timeout: %u\n" + " screen_off_sleep_idle_timeout: %u\n" + " d3_closes_handle: %u\n", + ctrl->caps.ssh_power_profile, + ctrl->caps.ssh_buffer_size, + ctrl->caps.screen_on_sleep_idle_timeout, + ctrl->caps.screen_off_sleep_idle_timeout, + ctrl->caps.d3_closes_handle); + + ssh_seq_reset(&ctrl->counter.seq); + ssh_rqid_reset(&ctrl->counter.rqid); + + /* Initialize event/request completion system. */ + status = ssam_cplt_init(&ctrl->cplt, &serdev->dev); + if (status) + return status; + + /* Initialize request and packet transport layers. */ + status = ssh_rtl_init(&ctrl->rtl, serdev, &ssam_rtl_ops); + if (status) { + ssam_cplt_destroy(&ctrl->cplt); + return status; + } + + /* + * Set state via write_once even though we expect to be in an + * exclusive context, due to smoke-testing in + * ssam_request_sync_submit(). + */ + WRITE_ONCE(ctrl->state, SSAM_CONTROLLER_INITIALIZED); + return 0; +} + +/** + * ssam_controller_start() - Start the receiver and transmitter threads of the + * controller. + * @ctrl: The controller. + * + * Note: When this function is called, the controller should be properly + * hooked up to the serdev core via &struct serdev_device_ops. Please refer + * to ssam_controller_init() for more details on controller initialization. + * + * This function must be called with the main controller lock held (i.e. by + * calling ssam_controller_lock()). + */ +int ssam_controller_start(struct ssam_controller *ctrl) +{ + int status; + + lockdep_assert_held_write(&ctrl->lock); + + if (ctrl->state != SSAM_CONTROLLER_INITIALIZED) + return -EINVAL; + + status = ssh_rtl_start(&ctrl->rtl); + if (status) + return status; + + /* + * Set state via write_once even though we expect to be locked/in an + * exclusive context, due to smoke-testing in + * ssam_request_sync_submit(). + */ + WRITE_ONCE(ctrl->state, SSAM_CONTROLLER_STARTED); + return 0; +} + +/* + * SSAM_CTRL_SHUTDOWN_FLUSH_TIMEOUT - Timeout for flushing requests during + * shutdown. + * + * Chosen to be larger than one full request timeout, including packets timing + * out. This value should give ample time to complete any outstanding requests + * during normal operation and account for the odd package timeout. + */ +#define SSAM_CTRL_SHUTDOWN_FLUSH_TIMEOUT msecs_to_jiffies(5000) + +/** + * ssam_controller_shutdown() - Shut down the controller. + * @ctrl: The controller. + * + * Shuts down the controller by flushing all pending requests and stopping the + * transmitter and receiver threads. All requests submitted after this call + * will fail with %-ESHUTDOWN. While it is discouraged to do so, this function + * is safe to use in parallel with ongoing request submission. + * + * In the course of this shutdown procedure, all currently registered + * notifiers will be unregistered. It is, however, strongly recommended to not + * rely on this behavior, and instead the party registering the notifier + * should unregister it before the controller gets shut down, e.g. via the + * SSAM bus which guarantees client devices to be removed before a shutdown. + * + * Note that events may still be pending after this call, but, due to the + * notifiers being unregistered, these events will be dropped when the + * controller is subsequently destroyed via ssam_controller_destroy(). + * + * This function must be called with the main controller lock held (i.e. by + * calling ssam_controller_lock()). + */ +void ssam_controller_shutdown(struct ssam_controller *ctrl) +{ + enum ssam_controller_state s = ctrl->state; + int status; + + lockdep_assert_held_write(&ctrl->lock); + + if (s == SSAM_CONTROLLER_UNINITIALIZED || s == SSAM_CONTROLLER_STOPPED) + return; + + /* + * Try to flush pending events and requests while everything still + * works. Note: There may still be packets and/or requests in the + * system after this call (e.g. via control packets submitted by the + * packet transport layer or flush timeout / failure, ...). Those will + * be handled with the ssh_rtl_shutdown() call below. + */ + status = ssh_rtl_flush(&ctrl->rtl, SSAM_CTRL_SHUTDOWN_FLUSH_TIMEOUT); + if (status) { + ssam_err(ctrl, "failed to flush request transport layer: %d\n", + status); + } + + /* Try to flush all currently completing requests and events. */ + ssam_cplt_flush(&ctrl->cplt); + + /* + * We expect all notifiers to have been removed by the respective client + * driver that set them up at this point. If this warning occurs, some + * client driver has not done that... + */ + WARN_ON(!ssam_notifier_is_empty(ctrl)); + + /* + * Nevertheless, we should still take care of drivers that don't behave + * well. Thus disable all enabled events, unregister all notifiers. + */ + ssam_notifier_unregister_all(ctrl); + + /* + * Cancel remaining requests. Ensure no new ones can be queued and stop + * threads. + */ + ssh_rtl_shutdown(&ctrl->rtl); + + /* + * Set state via write_once even though we expect to be locked/in an + * exclusive context, due to smoke-testing in + * ssam_request_sync_submit(). + */ + WRITE_ONCE(ctrl->state, SSAM_CONTROLLER_STOPPED); + ctrl->rtl.ptl.serdev = NULL; +} + +/** + * ssam_controller_destroy() - Destroy the controller and free its resources. + * @ctrl: The controller. + * + * Ensures that all resources associated with the controller get freed. This + * function should only be called after the controller has been stopped via + * ssam_controller_shutdown(). In general, this function should not be called + * directly. The only valid place to call this function directly is during + * initialization, before the controller has been fully initialized and passed + * to other processes. This function is called automatically when the + * reference count of the controller reaches zero. + * + * This function must be called with the main controller lock held (i.e. by + * calling ssam_controller_lock()). + */ +void ssam_controller_destroy(struct ssam_controller *ctrl) +{ + lockdep_assert_held_write(&ctrl->lock); + + if (ctrl->state == SSAM_CONTROLLER_UNINITIALIZED) + return; + + WARN_ON(ctrl->state != SSAM_CONTROLLER_STOPPED); + + /* + * Note: New events could still have been received after the previous + * flush in ssam_controller_shutdown, before the request transport layer + * has been shut down. At this point, after the shutdown, we can be sure + * that no new events will be queued. The call to ssam_cplt_destroy will + * ensure that those remaining are being completed and freed. + */ + + /* Actually free resources. */ + ssam_cplt_destroy(&ctrl->cplt); + ssh_rtl_destroy(&ctrl->rtl); + + /* + * Set state via write_once even though we expect to be locked/in an + * exclusive context, due to smoke-testing in + * ssam_request_sync_submit(). + */ + WRITE_ONCE(ctrl->state, SSAM_CONTROLLER_UNINITIALIZED); +} + +/** + * ssam_controller_suspend() - Suspend the controller. + * @ctrl: The controller to suspend. + * + * Marks the controller as suspended. Note that display-off and D0-exit + * notifications have to be sent manually before transitioning the controller + * into the suspended state via this function. + * + * See ssam_controller_resume() for the corresponding resume function. + * + * Return: Returns %-EINVAL if the controller is currently not in the + * "started" state. + */ +int ssam_controller_suspend(struct ssam_controller *ctrl) +{ + ssam_controller_lock(ctrl); + + if (ctrl->state != SSAM_CONTROLLER_STARTED) { + ssam_controller_unlock(ctrl); + return -EINVAL; + } + + ssam_dbg(ctrl, "pm: suspending controller\n"); + + /* + * Set state via write_once even though we're locked, due to + * smoke-testing in ssam_request_sync_submit(). + */ + WRITE_ONCE(ctrl->state, SSAM_CONTROLLER_SUSPENDED); + + ssam_controller_unlock(ctrl); + return 0; +} + +/** + * ssam_controller_resume() - Resume the controller from suspend. + * @ctrl: The controller to resume. + * + * Resume the controller from the suspended state it was put into via + * ssam_controller_suspend(). This function does not issue display-on and + * D0-entry notifications. If required, those have to be sent manually after + * this call. + * + * Return: Returns %-EINVAL if the controller is currently not suspended. + */ +int ssam_controller_resume(struct ssam_controller *ctrl) +{ + ssam_controller_lock(ctrl); + + if (ctrl->state != SSAM_CONTROLLER_SUSPENDED) { + ssam_controller_unlock(ctrl); + return -EINVAL; + } + + ssam_dbg(ctrl, "pm: resuming controller\n"); + + /* + * Set state via write_once even though we're locked, due to + * smoke-testing in ssam_request_sync_submit(). + */ + WRITE_ONCE(ctrl->state, SSAM_CONTROLLER_STARTED); + + ssam_controller_unlock(ctrl); + return 0; +} + + +/* -- Top-level request interface ------------------------------------------- */ + +/** + * ssam_request_write_data() - Construct and write SAM request message to + * buffer. + * @buf: The buffer to write the data to. + * @ctrl: The controller via which the request will be sent. + * @spec: The request data and specification. + * + * Constructs a SAM/SSH request message and writes it to the provided buffer. + * The request and transport counters, specifically RQID and SEQ, will be set + * in this call. These counters are obtained from the controller. It is thus + * only valid to send the resulting message via the controller specified here. + * + * For calculation of the required buffer size, refer to the + * SSH_COMMAND_MESSAGE_LENGTH() macro. + * + * Return: Returns the number of bytes used in the buffer on success. Returns + * %-EINVAL if the payload length provided in the request specification is too + * large (larger than %SSH_COMMAND_MAX_PAYLOAD_SIZE) or if the provided buffer + * is too small. + */ +ssize_t ssam_request_write_data(struct ssam_span *buf, + struct ssam_controller *ctrl, + const struct ssam_request *spec) +{ + struct msgbuf msgb; + u16 rqid; + u8 seq; + + if (spec->length > SSH_COMMAND_MAX_PAYLOAD_SIZE) + return -EINVAL; + + if (SSH_COMMAND_MESSAGE_LENGTH(spec->length) > buf->len) + return -EINVAL; + + msgb_init(&msgb, buf->ptr, buf->len); + seq = ssh_seq_next(&ctrl->counter.seq); + rqid = ssh_rqid_next(&ctrl->counter.rqid); + msgb_push_cmd(&msgb, seq, rqid, spec); + + return msgb_bytes_used(&msgb); +} +EXPORT_SYMBOL_GPL(ssam_request_write_data); + +static void ssam_request_sync_complete(struct ssh_request *rqst, + const struct ssh_command *cmd, + const struct ssam_span *data, int status) +{ + struct ssh_rtl *rtl = ssh_request_rtl(rqst); + struct ssam_request_sync *r; + + r = container_of(rqst, struct ssam_request_sync, base); + r->status = status; + + if (r->resp) + r->resp->length = 0; + + if (status) { + rtl_dbg_cond(rtl, "rsp: request failed: %d\n", status); + return; + } + + if (!data) /* Handle requests without a response. */ + return; + + if (!r->resp || !r->resp->pointer) { + if (data->len) + rtl_warn(rtl, "rsp: no response buffer provided, dropping data\n"); + return; + } + + if (data->len > r->resp->capacity) { + rtl_err(rtl, + "rsp: response buffer too small, capacity: %zu bytes, got: %zu bytes\n", + r->resp->capacity, data->len); + r->status = -ENOSPC; + return; + } + + r->resp->length = data->len; + memcpy(r->resp->pointer, data->ptr, data->len); +} + +static void ssam_request_sync_release(struct ssh_request *rqst) +{ + complete_all(&container_of(rqst, struct ssam_request_sync, base)->comp); +} + +static const struct ssh_request_ops ssam_request_sync_ops = { + .release = ssam_request_sync_release, + .complete = ssam_request_sync_complete, +}; + +/** + * ssam_request_sync_alloc() - Allocate a synchronous request. + * @payload_len: The length of the request payload. + * @flags: Flags used for allocation. + * @rqst: Where to store the pointer to the allocated request. + * @buffer: Where to store the buffer descriptor for the message buffer of + * the request. + * + * Allocates a synchronous request with corresponding message buffer. The + * request still needs to be initialized ssam_request_sync_init() before + * it can be submitted, and the message buffer data must still be set to the + * returned buffer via ssam_request_sync_set_data() after it has been filled, + * if need be with adjusted message length. + * + * After use, the request and its corresponding message buffer should be freed + * via ssam_request_sync_free(). The buffer must not be freed separately. + * + * Return: Returns zero on success, %-ENOMEM if the request could not be + * allocated. + */ +int ssam_request_sync_alloc(size_t payload_len, gfp_t flags, + struct ssam_request_sync **rqst, + struct ssam_span *buffer) +{ + size_t msglen = SSH_COMMAND_MESSAGE_LENGTH(payload_len); + + *rqst = kzalloc(sizeof(**rqst) + msglen, flags); + if (!*rqst) + return -ENOMEM; + + buffer->ptr = (u8 *)(*rqst + 1); + buffer->len = msglen; + + return 0; +} +EXPORT_SYMBOL_GPL(ssam_request_sync_alloc); + +/** + * ssam_request_sync_free() - Free a synchronous request. + * @rqst: The request to be freed. + * + * Free a synchronous request and its corresponding buffer allocated with + * ssam_request_sync_alloc(). Do not use for requests allocated on the stack + * or via any other function. + * + * Warning: The caller must ensure that the request is not in use any more. + * I.e. the caller must ensure that it has the only reference to the request + * and the request is not currently pending. This means that the caller has + * either never submitted the request, request submission has failed, or the + * caller has waited until the submitted request has been completed via + * ssam_request_sync_wait(). + */ +void ssam_request_sync_free(struct ssam_request_sync *rqst) +{ + kfree(rqst); +} +EXPORT_SYMBOL_GPL(ssam_request_sync_free); + +/** + * ssam_request_sync_init() - Initialize a synchronous request struct. + * @rqst: The request to initialize. + * @flags: The request flags. + * + * Initializes the given request struct. Does not initialize the request + * message data. This has to be done explicitly after this call via + * ssam_request_sync_set_data() and the actual message data has to be written + * via ssam_request_write_data(). + * + * Return: Returns zero on success or %-EINVAL if the given flags are invalid. + */ +int ssam_request_sync_init(struct ssam_request_sync *rqst, + enum ssam_request_flags flags) +{ + int status; + + status = ssh_request_init(&rqst->base, flags, &ssam_request_sync_ops); + if (status) + return status; + + init_completion(&rqst->comp); + rqst->resp = NULL; + rqst->status = 0; + + return 0; +} +EXPORT_SYMBOL_GPL(ssam_request_sync_init); + +/** + * ssam_request_sync_submit() - Submit a synchronous request. + * @ctrl: The controller with which to submit the request. + * @rqst: The request to submit. + * + * Submit a synchronous request. The request has to be initialized and + * properly set up, including response buffer (may be %NULL if no response is + * expected) and command message data. This function does not wait for the + * request to be completed. + * + * If this function succeeds, ssam_request_sync_wait() must be used to ensure + * that the request has been completed before the response data can be + * accessed and/or the request can be freed. On failure, the request may + * immediately be freed. + * + * This function may only be used if the controller is active, i.e. has been + * initialized and not suspended. + */ +int ssam_request_sync_submit(struct ssam_controller *ctrl, + struct ssam_request_sync *rqst) +{ + int status; + + /* + * This is only a superficial check. In general, the caller needs to + * ensure that the controller is initialized and is not (and does not + * get) suspended during use, i.e. until the request has been completed + * (if _absolutely_ necessary, by use of ssam_controller_statelock/ + * ssam_controller_stateunlock, but something like ssam_client_link + * should be preferred as this needs to last until the request has been + * completed). + * + * Note that it is actually safe to use this function while the + * controller is in the process of being shut down (as ssh_rtl_submit + * is safe with regards to this), but it is generally discouraged to do + * so. + */ + if (WARN_ON(READ_ONCE(ctrl->state) != SSAM_CONTROLLER_STARTED)) { + ssh_request_put(&rqst->base); + return -ENODEV; + } + + status = ssh_rtl_submit(&ctrl->rtl, &rqst->base); + ssh_request_put(&rqst->base); + + return status; +} +EXPORT_SYMBOL_GPL(ssam_request_sync_submit); + +/** + * ssam_request_do_sync() - Execute a synchronous request. + * @ctrl: The controller via which the request will be submitted. + * @spec: The request specification and payload. + * @rsp: The response buffer. + * + * Allocates a synchronous request with its message data buffer on the heap + * via ssam_request_sync_alloc(), fully initializes it via the provided + * request specification, submits it, and finally waits for its completion + * before freeing it and returning its status. + * + * Return: Returns the status of the request or any failure during setup. + */ +int ssam_request_do_sync(struct ssam_controller *ctrl, + const struct ssam_request *spec, + struct ssam_response *rsp) +{ + struct ssam_request_sync *rqst; + struct ssam_span buf; + ssize_t len; + int status; + + status = ssam_request_sync_alloc(spec->length, GFP_KERNEL, &rqst, &buf); + if (status) + return status; + + status = ssam_request_sync_init(rqst, spec->flags); + if (status) { + ssam_request_sync_free(rqst); + return status; + } + + ssam_request_sync_set_resp(rqst, rsp); + + len = ssam_request_write_data(&buf, ctrl, spec); + if (len < 0) { + ssam_request_sync_free(rqst); + return len; + } + + ssam_request_sync_set_data(rqst, buf.ptr, len); + + status = ssam_request_sync_submit(ctrl, rqst); + if (!status) + status = ssam_request_sync_wait(rqst); + + ssam_request_sync_free(rqst); + return status; +} +EXPORT_SYMBOL_GPL(ssam_request_do_sync); + +/** + * ssam_request_do_sync_with_buffer() - Execute a synchronous request with the + * provided buffer as back-end for the message buffer. + * @ctrl: The controller via which the request will be submitted. + * @spec: The request specification and payload. + * @rsp: The response buffer. + * @buf: The buffer for the request message data. + * + * Allocates a synchronous request struct on the stack, fully initializes it + * using the provided buffer as message data buffer, submits it, and then + * waits for its completion before returning its status. The + * SSH_COMMAND_MESSAGE_LENGTH() macro can be used to compute the required + * message buffer size. + * + * This function does essentially the same as ssam_request_do_sync(), but + * instead of dynamically allocating the request and message data buffer, it + * uses the provided message data buffer and stores the (small) request struct + * on the heap. + * + * Return: Returns the status of the request or any failure during setup. + */ +int ssam_request_do_sync_with_buffer(struct ssam_controller *ctrl, + const struct ssam_request *spec, + struct ssam_response *rsp, + struct ssam_span *buf) +{ + struct ssam_request_sync rqst; + ssize_t len; + int status; + + status = ssam_request_sync_init(&rqst, spec->flags); + if (status) + return status; + + ssam_request_sync_set_resp(&rqst, rsp); + + len = ssam_request_write_data(buf, ctrl, spec); + if (len < 0) + return len; + + ssam_request_sync_set_data(&rqst, buf->ptr, len); + + status = ssam_request_sync_submit(ctrl, &rqst); + if (!status) + status = ssam_request_sync_wait(&rqst); + + return status; +} +EXPORT_SYMBOL_GPL(ssam_request_do_sync_with_buffer); + + +/* -- Internal SAM requests. ------------------------------------------------ */ + +SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_get_firmware_version, __le32, { + .target_category = SSAM_SSH_TC_SAM, + .target_id = SSAM_SSH_TID_SAM, + .command_id = 0x13, + .instance_id = 0x00, +}); + +SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_display_off, u8, { + .target_category = SSAM_SSH_TC_SAM, + .target_id = SSAM_SSH_TID_SAM, + .command_id = 0x15, + .instance_id = 0x00, +}); + +SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_display_on, u8, { + .target_category = SSAM_SSH_TC_SAM, + .target_id = SSAM_SSH_TID_SAM, + .command_id = 0x16, + .instance_id = 0x00, +}); + +SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_d0_exit, u8, { + .target_category = SSAM_SSH_TC_SAM, + .target_id = SSAM_SSH_TID_SAM, + .command_id = 0x33, + .instance_id = 0x00, +}); + +SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_d0_entry, u8, { + .target_category = SSAM_SSH_TC_SAM, + .target_id = SSAM_SSH_TID_SAM, + .command_id = 0x34, + .instance_id = 0x00, +}); + +/** + * struct ssh_notification_params - Command payload to enable/disable SSH + * notifications. + * @target_category: The target category for which notifications should be + * enabled/disabled. + * @flags: Flags determining how notifications are being sent. + * @request_id: The request ID that is used to send these notifications. + * @instance_id: The specific instance in the given target category for + * which notifications should be enabled. + */ +struct ssh_notification_params { + u8 target_category; + u8 flags; + __le16 request_id; + u8 instance_id; +} __packed; + +static_assert(sizeof(struct ssh_notification_params) == 5); + +static int __ssam_ssh_event_request(struct ssam_controller *ctrl, + struct ssam_event_registry reg, u8 cid, + struct ssam_event_id id, u8 flags) +{ + struct ssh_notification_params params; + struct ssam_request rqst; + struct ssam_response result; + int status; + + u16 rqid = ssh_tc_to_rqid(id.target_category); + u8 buf = 0; + + /* Only allow RQIDs that lie within the event spectrum. */ + if (!ssh_rqid_is_event(rqid)) + return -EINVAL; + + params.target_category = id.target_category; + params.instance_id = id.instance; + params.flags = flags; + put_unaligned_le16(rqid, ¶ms.request_id); + + rqst.target_category = reg.target_category; + rqst.target_id = reg.target_id; + rqst.command_id = cid; + rqst.instance_id = 0x00; + rqst.flags = SSAM_REQUEST_HAS_RESPONSE; + rqst.length = sizeof(params); + rqst.payload = (u8 *)¶ms; + + result.capacity = sizeof(buf); + result.length = 0; + result.pointer = &buf; + + status = ssam_retry(ssam_request_do_sync_onstack, ctrl, &rqst, &result, + sizeof(params)); + + return status < 0 ? status : buf; +} + +/** + * ssam_ssh_event_enable() - Enable SSH event. + * @ctrl: The controller for which to enable the event. + * @reg: The event registry describing what request to use for enabling and + * disabling the event. + * @id: The event identifier. + * @flags: The event flags. + * + * Enables the specified event on the EC. This function does not manage + * reference counting of enabled events and is basically only a wrapper for + * the raw EC request. If the specified event is already enabled, the EC will + * ignore this request. + * + * Return: Returns the status of the executed SAM request (zero on success and + * negative on direct failure) or %-EPROTO if the request response indicates a + * failure. + */ +static int ssam_ssh_event_enable(struct ssam_controller *ctrl, + struct ssam_event_registry reg, + struct ssam_event_id id, u8 flags) +{ + int status; + + status = __ssam_ssh_event_request(ctrl, reg, reg.cid_enable, id, flags); + + if (status < 0 && status != -EINVAL) { + ssam_err(ctrl, + "failed to enable event source (tc: %#04x, iid: %#04x, reg: %#04x)\n", + id.target_category, id.instance, reg.target_category); + } + + if (status > 0) { + ssam_err(ctrl, + "unexpected result while enabling event source: %#04x (tc: %#04x, iid: %#04x, reg: %#04x)\n", + status, id.target_category, id.instance, reg.target_category); + return -EPROTO; + } + + return status; +} + +/** + * ssam_ssh_event_disable() - Disable SSH event. + * @ctrl: The controller for which to disable the event. + * @reg: The event registry describing what request to use for enabling and + * disabling the event (must be same as used when enabling the event). + * @id: The event identifier. + * @flags: The event flags (likely ignored for disabling of events). + * + * Disables the specified event on the EC. This function does not manage + * reference counting of enabled events and is basically only a wrapper for + * the raw EC request. If the specified event is already disabled, the EC will + * ignore this request. + * + * Return: Returns the status of the executed SAM request (zero on success and + * negative on direct failure) or %-EPROTO if the request response indicates a + * failure. + */ +static int ssam_ssh_event_disable(struct ssam_controller *ctrl, + struct ssam_event_registry reg, + struct ssam_event_id id, u8 flags) +{ + int status; + + status = __ssam_ssh_event_request(ctrl, reg, reg.cid_disable, id, flags); + + if (status < 0 && status != -EINVAL) { + ssam_err(ctrl, + "failed to disable event source (tc: %#04x, iid: %#04x, reg: %#04x)\n", + id.target_category, id.instance, reg.target_category); + } + + if (status > 0) { + ssam_err(ctrl, + "unexpected result while disabling event source: %#04x (tc: %#04x, iid: %#04x, reg: %#04x)\n", + status, id.target_category, id.instance, reg.target_category); + return -EPROTO; + } + + return status; +} + + +/* -- Wrappers for internal SAM requests. ----------------------------------- */ + +/** + * ssam_get_firmware_version() - Get the SAM/EC firmware version. + * @ctrl: The controller. + * @version: Where to store the version number. + * + * Return: Returns zero on success or the status of the executed SAM request + * if that request failed. + */ +int ssam_get_firmware_version(struct ssam_controller *ctrl, u32 *version) +{ + __le32 __version; + int status; + + status = ssam_retry(ssam_ssh_get_firmware_version, ctrl, &__version); + if (status) + return status; + + *version = le32_to_cpu(__version); + return 0; +} + +/** + * ssam_ctrl_notif_display_off() - Notify EC that the display has been turned + * off. + * @ctrl: The controller. + * + * Notify the EC that the display has been turned off and the driver may enter + * a lower-power state. This will prevent events from being sent directly. + * Rather, the EC signals an event by pulling the wakeup GPIO high for as long + * as there are pending events. The events then need to be manually released, + * one by one, via the GPIO callback request. All pending events accumulated + * during this state can also be released by issuing the display-on + * notification, e.g. via ssam_ctrl_notif_display_on(), which will also reset + * the GPIO. + * + * On some devices, specifically ones with an integrated keyboard, the keyboard + * backlight will be turned off by this call. + * + * This function will only send the display-off notification command if + * display notifications are supported by the EC. Currently all known devices + * support these notifications. + * + * Use ssam_ctrl_notif_display_on() to reverse the effects of this function. + * + * Return: Returns zero on success or if no request has been executed, the + * status of the executed SAM request if that request failed, or %-EPROTO if + * an unexpected response has been received. + */ +int ssam_ctrl_notif_display_off(struct ssam_controller *ctrl) +{ + int status; + u8 response; + + ssam_dbg(ctrl, "pm: notifying display off\n"); + + status = ssam_retry(ssam_ssh_notif_display_off, ctrl, &response); + if (status) + return status; + + if (response != 0) { + ssam_err(ctrl, "unexpected response from display-off notification: %#04x\n", + response); + return -EPROTO; + } + + return 0; +} + +/** + * ssam_ctrl_notif_display_on() - Notify EC that the display has been turned on. + * @ctrl: The controller. + * + * Notify the EC that the display has been turned back on and the driver has + * exited its lower-power state. This notification is the counterpart to the + * display-off notification sent via ssam_ctrl_notif_display_off() and will + * reverse its effects, including resetting events to their default behavior. + * + * This function will only send the display-on notification command if display + * notifications are supported by the EC. Currently all known devices support + * these notifications. + * + * See ssam_ctrl_notif_display_off() for more details. + * + * Return: Returns zero on success or if no request has been executed, the + * status of the executed SAM request if that request failed, or %-EPROTO if + * an unexpected response has been received. + */ +int ssam_ctrl_notif_display_on(struct ssam_controller *ctrl) +{ + int status; + u8 response; + + ssam_dbg(ctrl, "pm: notifying display on\n"); + + status = ssam_retry(ssam_ssh_notif_display_on, ctrl, &response); + if (status) + return status; + + if (response != 0) { + ssam_err(ctrl, "unexpected response from display-on notification: %#04x\n", + response); + return -EPROTO; + } + + return 0; +} + +/** + * ssam_ctrl_notif_d0_exit() - Notify EC that the driver/device exits the D0 + * power state. + * @ctrl: The controller + * + * Notifies the EC that the driver prepares to exit the D0 power state in + * favor of a lower-power state. Exact effects of this function related to the + * EC are currently unknown. + * + * This function will only send the D0-exit notification command if D0-state + * notifications are supported by the EC. Only newer Surface generations + * support these notifications. + * + * Use ssam_ctrl_notif_d0_entry() to reverse the effects of this function. + * + * Return: Returns zero on success or if no request has been executed, the + * status of the executed SAM request if that request failed, or %-EPROTO if + * an unexpected response has been received. + */ +int ssam_ctrl_notif_d0_exit(struct ssam_controller *ctrl) +{ + int status; + u8 response; + + if (!ctrl->caps.d3_closes_handle) + return 0; + + ssam_dbg(ctrl, "pm: notifying D0 exit\n"); + + status = ssam_retry(ssam_ssh_notif_d0_exit, ctrl, &response); + if (status) + return status; + + if (response != 0) { + ssam_err(ctrl, "unexpected response from D0-exit notification: %#04x\n", + response); + return -EPROTO; + } + + return 0; +} + +/** + * ssam_ctrl_notif_d0_entry() - Notify EC that the driver/device enters the D0 + * power state. + * @ctrl: The controller + * + * Notifies the EC that the driver has exited a lower-power state and entered + * the D0 power state. Exact effects of this function related to the EC are + * currently unknown. + * + * This function will only send the D0-entry notification command if D0-state + * notifications are supported by the EC. Only newer Surface generations + * support these notifications. + * + * See ssam_ctrl_notif_d0_exit() for more details. + * + * Return: Returns zero on success or if no request has been executed, the + * status of the executed SAM request if that request failed, or %-EPROTO if + * an unexpected response has been received. + */ +int ssam_ctrl_notif_d0_entry(struct ssam_controller *ctrl) +{ + int status; + u8 response; + + if (!ctrl->caps.d3_closes_handle) + return 0; + + ssam_dbg(ctrl, "pm: notifying D0 entry\n"); + + status = ssam_retry(ssam_ssh_notif_d0_entry, ctrl, &response); + if (status) + return status; + + if (response != 0) { + ssam_err(ctrl, "unexpected response from D0-entry notification: %#04x\n", + response); + return -EPROTO; + } + + return 0; +} + + +/* -- Top-level event registry interface. ----------------------------------- */ + +/** + * ssam_nf_refcount_enable() - Enable event for reference count entry if it has + * not already been enabled. + * @ctrl: The controller to enable the event on. + * @entry: The reference count entry for the event to be enabled. + * @flags: The flags used for enabling the event on the EC. + * + * Enable the event associated with the given reference count entry if the + * reference count equals one, i.e. the event has not previously been enabled. + * If the event has already been enabled (i.e. reference count not equal to + * one), check that the flags used for enabling match and warn about this if + * they do not. + * + * This does not modify the reference count itself, which is done with + * ssam_nf_refcount_inc() / ssam_nf_refcount_dec(). + * + * Note: ``nf->lock`` must be held when calling this function. + * + * Return: Returns zero on success. If the event is enabled by this call, + * returns the status of the event-enable EC command. + */ +static int ssam_nf_refcount_enable(struct ssam_controller *ctrl, + struct ssam_nf_refcount_entry *entry, u8 flags) +{ + const struct ssam_event_registry reg = entry->key.reg; + const struct ssam_event_id id = entry->key.id; + struct ssam_nf *nf = &ctrl->cplt.event.notif; + int status; + + lockdep_assert_held(&nf->lock); + + ssam_dbg(ctrl, "enabling event (reg: %#04x, tc: %#04x, iid: %#04x, rc: %d)\n", + reg.target_category, id.target_category, id.instance, entry->refcount); + + if (entry->refcount == 1) { + status = ssam_ssh_event_enable(ctrl, reg, id, flags); + if (status) + return status; + + entry->flags = flags; + + } else if (entry->flags != flags) { + ssam_warn(ctrl, + "inconsistent flags when enabling event: got %#04x, expected %#04x (reg: %#04x, tc: %#04x, iid: %#04x)\n", + flags, entry->flags, reg.target_category, id.target_category, + id.instance); + } + + return 0; +} + +/** + * ssam_nf_refcount_disable_free() - Disable event for reference count entry if + * it is no longer in use and free the corresponding entry. + * @ctrl: The controller to disable the event on. + * @entry: The reference count entry for the event to be disabled. + * @flags: The flags used for enabling the event on the EC. + * @ec: Flag specifying if the event should actually be disabled on the EC. + * + * If ``ec`` equals ``true`` and the reference count equals zero (i.e. the + * event is no longer requested by any client), the specified event will be + * disabled on the EC via the corresponding request. + * + * If ``ec`` equals ``false``, no request will be sent to the EC and the event + * can be considered in a detached state (i.e. no longer used but still + * enabled). Disabling an event via this method may be required for + * hot-removable devices, where event disable requests may time out after the + * device has been physically removed. + * + * In both cases, if the reference count equals zero, the corresponding + * reference count entry will be freed. The reference count entry must not be + * used any more after a call to this function. + * + * Also checks if the flags used for disabling the event match the flags used + * for enabling the event and warns if they do not (regardless of reference + * count). + * + * This does not modify the reference count itself, which is done with + * ssam_nf_refcount_inc() / ssam_nf_refcount_dec(). + * + * Note: ``nf->lock`` must be held when calling this function. + * + * Return: Returns zero on success. If the event is disabled by this call, + * returns the status of the event-enable EC command. + */ +static int ssam_nf_refcount_disable_free(struct ssam_controller *ctrl, + struct ssam_nf_refcount_entry *entry, u8 flags, bool ec) +{ + const struct ssam_event_registry reg = entry->key.reg; + const struct ssam_event_id id = entry->key.id; + struct ssam_nf *nf = &ctrl->cplt.event.notif; + int status = 0; + + lockdep_assert_held(&nf->lock); + + ssam_dbg(ctrl, "%s event (reg: %#04x, tc: %#04x, iid: %#04x, rc: %d)\n", + ec ? "disabling" : "detaching", reg.target_category, id.target_category, + id.instance, entry->refcount); + + if (entry->flags != flags) { + ssam_warn(ctrl, + "inconsistent flags when disabling event: got %#04x, expected %#04x (reg: %#04x, tc: %#04x, iid: %#04x)\n", + flags, entry->flags, reg.target_category, id.target_category, + id.instance); + } + + if (ec && entry->refcount == 0) { + status = ssam_ssh_event_disable(ctrl, reg, id, flags); + kfree(entry); + } + + return status; +} + +/** + * ssam_notifier_register() - Register an event notifier. + * @ctrl: The controller to register the notifier on. + * @n: The event notifier to register. + * + * Register an event notifier. Increment the usage counter of the associated + * SAM event if the notifier is not marked as an observer. If the event is not + * marked as an observer and is currently not enabled, it will be enabled + * during this call. If the notifier is marked as an observer, no attempt will + * be made at enabling any event and no reference count will be modified. + * + * Notifiers marked as observers do not need to be associated with one specific + * event, i.e. as long as no event matching is performed, only the event target + * category needs to be set. + * + * Return: Returns zero on success, %-ENOSPC if there have already been + * %INT_MAX notifiers for the event ID/type associated with the notifier block + * registered, %-ENOMEM if the corresponding event entry could not be + * allocated. If this is the first time that a notifier block is registered + * for the specific associated event, returns the status of the event-enable + * EC-command. + */ +int ssam_notifier_register(struct ssam_controller *ctrl, struct ssam_event_notifier *n) +{ + u16 rqid = ssh_tc_to_rqid(n->event.id.target_category); + struct ssam_nf_refcount_entry *entry = NULL; + struct ssam_nf_head *nf_head; + struct ssam_nf *nf; + int status; + + if (!ssh_rqid_is_event(rqid)) + return -EINVAL; + + nf = &ctrl->cplt.event.notif; + nf_head = &nf->head[ssh_rqid_to_event(rqid)]; + + mutex_lock(&nf->lock); + + if (!(n->flags & SSAM_EVENT_NOTIFIER_OBSERVER)) { + entry = ssam_nf_refcount_inc(nf, n->event.reg, n->event.id); + if (IS_ERR(entry)) { + mutex_unlock(&nf->lock); + return PTR_ERR(entry); + } + } + + status = ssam_nfblk_insert(nf_head, &n->base); + if (status) { + if (entry) + ssam_nf_refcount_dec_free(nf, n->event.reg, n->event.id); + + mutex_unlock(&nf->lock); + return status; + } + + if (entry) { + status = ssam_nf_refcount_enable(ctrl, entry, n->event.flags); + if (status) { + ssam_nfblk_remove(&n->base); + ssam_nf_refcount_dec_free(nf, n->event.reg, n->event.id); + mutex_unlock(&nf->lock); + synchronize_srcu(&nf_head->srcu); + return status; + } + } + + mutex_unlock(&nf->lock); + return 0; +} +EXPORT_SYMBOL_GPL(ssam_notifier_register); + +/** + * __ssam_notifier_unregister() - Unregister an event notifier. + * @ctrl: The controller the notifier has been registered on. + * @n: The event notifier to unregister. + * @disable: Whether to disable the corresponding event on the EC. + * + * Unregister an event notifier. Decrement the usage counter of the associated + * SAM event if the notifier is not marked as an observer. If the usage counter + * reaches zero and ``disable`` equals ``true``, the event will be disabled. + * + * Useful for hot-removable devices, where communication may fail once the + * device has been physically removed. In that case, specifying ``disable`` as + * ``false`` avoids communication with the EC. + * + * Return: Returns zero on success, %-ENOENT if the given notifier block has + * not been registered on the controller. If the given notifier block was the + * last one associated with its specific event, returns the status of the + * event-disable EC-command. + */ +int __ssam_notifier_unregister(struct ssam_controller *ctrl, struct ssam_event_notifier *n, + bool disable) +{ + u16 rqid = ssh_tc_to_rqid(n->event.id.target_category); + struct ssam_nf_refcount_entry *entry; + struct ssam_nf_head *nf_head; + struct ssam_nf *nf; + int status = 0; + + if (!ssh_rqid_is_event(rqid)) + return -EINVAL; + + nf = &ctrl->cplt.event.notif; + nf_head = &nf->head[ssh_rqid_to_event(rqid)]; + + mutex_lock(&nf->lock); + + if (!ssam_nfblk_find(nf_head, &n->base)) { + mutex_unlock(&nf->lock); + return -ENOENT; + } + + /* + * If this is an observer notifier, do not attempt to disable the + * event, just remove it. + */ + if (!(n->flags & SSAM_EVENT_NOTIFIER_OBSERVER)) { + entry = ssam_nf_refcount_dec(nf, n->event.reg, n->event.id); + if (WARN_ON(!entry)) { + /* + * If this does not return an entry, there's a logic + * error somewhere: The notifier block is registered, + * but the event refcount entry is not there. Remove + * the notifier block anyways. + */ + status = -ENOENT; + goto remove; + } + + status = ssam_nf_refcount_disable_free(ctrl, entry, n->event.flags, disable); + } + +remove: + ssam_nfblk_remove(&n->base); + mutex_unlock(&nf->lock); + synchronize_srcu(&nf_head->srcu); + + return status; +} +EXPORT_SYMBOL_GPL(__ssam_notifier_unregister); + +/** + * ssam_controller_event_enable() - Enable the specified event. + * @ctrl: The controller to enable the event for. + * @reg: The event registry to use for enabling the event. + * @id: The event ID specifying the event to be enabled. + * @flags: The SAM event flags used for enabling the event. + * + * Increment the event reference count of the specified event. If the event has + * not been enabled previously, it will be enabled by this call. + * + * Note: In general, ssam_notifier_register() with a non-observer notifier + * should be preferred for enabling/disabling events, as this will guarantee + * proper ordering and event forwarding in case of errors during event + * enabling/disabling. + * + * Return: Returns zero on success, %-ENOSPC if the reference count for the + * specified event has reached its maximum, %-ENOMEM if the corresponding event + * entry could not be allocated. If this is the first time that this event has + * been enabled (i.e. the reference count was incremented from zero to one by + * this call), returns the status of the event-enable EC-command. + */ +int ssam_controller_event_enable(struct ssam_controller *ctrl, + struct ssam_event_registry reg, + struct ssam_event_id id, u8 flags) +{ + u16 rqid = ssh_tc_to_rqid(id.target_category); + struct ssam_nf *nf = &ctrl->cplt.event.notif; + struct ssam_nf_refcount_entry *entry; + int status; + + if (!ssh_rqid_is_event(rqid)) + return -EINVAL; + + mutex_lock(&nf->lock); + + entry = ssam_nf_refcount_inc(nf, reg, id); + if (IS_ERR(entry)) { + mutex_unlock(&nf->lock); + return PTR_ERR(entry); + } + + status = ssam_nf_refcount_enable(ctrl, entry, flags); + if (status) { + ssam_nf_refcount_dec_free(nf, reg, id); + mutex_unlock(&nf->lock); + return status; + } + + mutex_unlock(&nf->lock); + return 0; +} +EXPORT_SYMBOL_GPL(ssam_controller_event_enable); + +/** + * ssam_controller_event_disable() - Disable the specified event. + * @ctrl: The controller to disable the event for. + * @reg: The event registry to use for disabling the event. + * @id: The event ID specifying the event to be disabled. + * @flags: The flags used when enabling the event. + * + * Decrement the reference count of the specified event. If the reference count + * reaches zero, the event will be disabled. + * + * Note: In general, ssam_notifier_register()/ssam_notifier_unregister() with a + * non-observer notifier should be preferred for enabling/disabling events, as + * this will guarantee proper ordering and event forwarding in case of errors + * during event enabling/disabling. + * + * Return: Returns zero on success, %-ENOENT if the given event has not been + * enabled on the controller. If the reference count of the event reaches zero + * during this call, returns the status of the event-disable EC-command. + */ +int ssam_controller_event_disable(struct ssam_controller *ctrl, + struct ssam_event_registry reg, + struct ssam_event_id id, u8 flags) +{ + u16 rqid = ssh_tc_to_rqid(id.target_category); + struct ssam_nf *nf = &ctrl->cplt.event.notif; + struct ssam_nf_refcount_entry *entry; + int status; + + if (!ssh_rqid_is_event(rqid)) + return -EINVAL; + + mutex_lock(&nf->lock); + + entry = ssam_nf_refcount_dec(nf, reg, id); + if (!entry) { + mutex_unlock(&nf->lock); + return -ENOENT; + } + + status = ssam_nf_refcount_disable_free(ctrl, entry, flags, true); + + mutex_unlock(&nf->lock); + return status; +} +EXPORT_SYMBOL_GPL(ssam_controller_event_disable); + +/** + * ssam_notifier_disable_registered() - Disable events for all registered + * notifiers. + * @ctrl: The controller for which to disable the notifiers/events. + * + * Disables events for all currently registered notifiers. In case of an error + * (EC command failing), all previously disabled events will be restored and + * the error code returned. + * + * This function is intended to disable all events prior to hibernation entry. + * See ssam_notifier_restore_registered() to restore/re-enable all events + * disabled with this function. + * + * Note that this function will not disable events for notifiers registered + * after calling this function. It should thus be made sure that no new + * notifiers are going to be added after this call and before the corresponding + * call to ssam_notifier_restore_registered(). + * + * Return: Returns zero on success. In case of failure returns the error code + * returned by the failed EC command to disable an event. + */ +int ssam_notifier_disable_registered(struct ssam_controller *ctrl) +{ + struct ssam_nf *nf = &ctrl->cplt.event.notif; + struct rb_node *n; + int status; + + mutex_lock(&nf->lock); + for (n = rb_first(&nf->refcount); n; n = rb_next(n)) { + struct ssam_nf_refcount_entry *e; + + e = rb_entry(n, struct ssam_nf_refcount_entry, node); + status = ssam_ssh_event_disable(ctrl, e->key.reg, + e->key.id, e->flags); + if (status) + goto err; + } + mutex_unlock(&nf->lock); + + return 0; + +err: + for (n = rb_prev(n); n; n = rb_prev(n)) { + struct ssam_nf_refcount_entry *e; + + e = rb_entry(n, struct ssam_nf_refcount_entry, node); + ssam_ssh_event_enable(ctrl, e->key.reg, e->key.id, e->flags); + } + mutex_unlock(&nf->lock); + + return status; +} + +/** + * ssam_notifier_restore_registered() - Restore/re-enable events for all + * registered notifiers. + * @ctrl: The controller for which to restore the notifiers/events. + * + * Restores/re-enables all events for which notifiers have been registered on + * the given controller. In case of a failure, the error is logged and the + * function continues to try and enable the remaining events. + * + * This function is intended to restore/re-enable all registered events after + * hibernation. See ssam_notifier_disable_registered() for the counter part + * disabling the events and more details. + */ +void ssam_notifier_restore_registered(struct ssam_controller *ctrl) +{ + struct ssam_nf *nf = &ctrl->cplt.event.notif; + struct rb_node *n; + + mutex_lock(&nf->lock); + for (n = rb_first(&nf->refcount); n; n = rb_next(n)) { + struct ssam_nf_refcount_entry *e; + + e = rb_entry(n, struct ssam_nf_refcount_entry, node); + + /* Ignore errors, will get logged in call. */ + ssam_ssh_event_enable(ctrl, e->key.reg, e->key.id, e->flags); + } + mutex_unlock(&nf->lock); +} + +/** + * ssam_notifier_is_empty() - Check if there are any registered notifiers. + * @ctrl: The controller to check on. + * + * Return: Returns %true if there are currently no notifiers registered on the + * controller, %false otherwise. + */ +static bool ssam_notifier_is_empty(struct ssam_controller *ctrl) +{ + struct ssam_nf *nf = &ctrl->cplt.event.notif; + bool result; + + mutex_lock(&nf->lock); + result = ssam_nf_refcount_empty(nf); + mutex_unlock(&nf->lock); + + return result; +} + +/** + * ssam_notifier_unregister_all() - Unregister all currently registered + * notifiers. + * @ctrl: The controller to unregister the notifiers on. + * + * Unregisters all currently registered notifiers. This function is used to + * ensure that all notifiers will be unregistered and associated + * entries/resources freed when the controller is being shut down. + */ +static void ssam_notifier_unregister_all(struct ssam_controller *ctrl) +{ + struct ssam_nf *nf = &ctrl->cplt.event.notif; + struct ssam_nf_refcount_entry *e, *n; + + mutex_lock(&nf->lock); + rbtree_postorder_for_each_entry_safe(e, n, &nf->refcount, node) { + /* Ignore errors, will get logged in call. */ + ssam_ssh_event_disable(ctrl, e->key.reg, e->key.id, e->flags); + kfree(e); + } + nf->refcount = RB_ROOT; + mutex_unlock(&nf->lock); +} + + +/* -- Wakeup IRQ. ----------------------------------------------------------- */ + +static irqreturn_t ssam_irq_handle(int irq, void *dev_id) +{ + struct ssam_controller *ctrl = dev_id; + + ssam_dbg(ctrl, "pm: wake irq triggered\n"); + + /* + * Note: Proper wakeup detection is currently unimplemented. + * When the EC is in display-off or any other non-D0 state, it + * does not send events/notifications to the host. Instead it + * signals that there are events available via the wakeup IRQ. + * This driver is responsible for calling back to the EC to + * release these events one-by-one. + * + * This IRQ should not cause a full system resume by its own. + * Instead, events should be handled by their respective subsystem + * drivers, which in turn should signal whether a full system + * resume should be performed. + * + * TODO: Send GPIO callback command repeatedly to EC until callback + * returns 0x00. Return flag of callback is "has more events". + * Each time the command is sent, one event is "released". Once + * all events have been released (return = 0x00), the GPIO is + * re-armed. Detect wakeup events during this process, go back to + * sleep if no wakeup event has been received. + */ + + return IRQ_HANDLED; +} + +/** + * ssam_irq_setup() - Set up SAM EC wakeup-GPIO interrupt. + * @ctrl: The controller for which the IRQ should be set up. + * + * Set up an IRQ for the wakeup-GPIO pin of the SAM EC. This IRQ can be used + * to wake the device from a low power state. + * + * Note that this IRQ can only be triggered while the EC is in the display-off + * state. In this state, events are not sent to the host in the usual way. + * Instead the wakeup-GPIO gets pulled to "high" as long as there are pending + * events and these events need to be released one-by-one via the GPIO + * callback request, either until there are no events left and the GPIO is + * reset, or all at once by transitioning the EC out of the display-off state, + * which will also clear the GPIO. + * + * Not all events, however, should trigger a full system wakeup. Instead the + * driver should, if necessary, inspect and forward each event to the + * corresponding subsystem, which in turn should decide if the system needs to + * be woken up. This logic has not been implemented yet, thus wakeup by this + * IRQ should be disabled by default to avoid spurious wake-ups, caused, for + * example, by the remaining battery percentage changing. Refer to comments in + * this function and comments in the corresponding IRQ handler for more + * details on how this should be implemented. + * + * See also ssam_ctrl_notif_display_off() and ssam_ctrl_notif_display_off() + * for functions to transition the EC into and out of the display-off state as + * well as more details on it. + * + * The IRQ is disabled by default and has to be enabled before it can wake up + * the device from suspend via ssam_irq_arm_for_wakeup(). On teardown, the IRQ + * should be freed via ssam_irq_free(). + */ +int ssam_irq_setup(struct ssam_controller *ctrl) +{ + struct device *dev = ssam_controller_device(ctrl); + struct gpio_desc *gpiod; + int irq; + int status; + + /* + * The actual GPIO interrupt is declared in ACPI as TRIGGER_HIGH. + * However, the GPIO line only gets reset by sending the GPIO callback + * command to SAM (or alternatively the display-on notification). As + * proper handling for this interrupt is not implemented yet, leaving + * the IRQ at TRIGGER_HIGH would cause an IRQ storm (as the callback + * never gets sent and thus the line never gets reset). To avoid this, + * mark the IRQ as TRIGGER_RISING for now, only creating a single + * interrupt, and let the SAM resume callback during the controller + * resume process clear it. + */ + const int irqf = IRQF_ONESHOT | IRQF_TRIGGER_RISING | IRQF_NO_AUTOEN; + + gpiod = gpiod_get(dev, "ssam_wakeup-int", GPIOD_ASIS); + if (IS_ERR(gpiod)) + return PTR_ERR(gpiod); + + irq = gpiod_to_irq(gpiod); + gpiod_put(gpiod); + + if (irq < 0) + return irq; + + status = request_threaded_irq(irq, NULL, ssam_irq_handle, irqf, + "ssam_wakeup", ctrl); + if (status) + return status; + + ctrl->irq.num = irq; + return 0; +} + +/** + * ssam_irq_free() - Free SAM EC wakeup-GPIO interrupt. + * @ctrl: The controller for which the IRQ should be freed. + * + * Free the wakeup-GPIO IRQ previously set-up via ssam_irq_setup(). + */ +void ssam_irq_free(struct ssam_controller *ctrl) +{ + free_irq(ctrl->irq.num, ctrl); + ctrl->irq.num = -1; +} + +/** + * ssam_irq_arm_for_wakeup() - Arm the EC IRQ for wakeup, if enabled. + * @ctrl: The controller for which the IRQ should be armed. + * + * Sets up the IRQ so that it can be used to wake the device. Specifically, + * this function enables the irq and then, if the device is allowed to wake up + * the system, calls enable_irq_wake(). See ssam_irq_disarm_wakeup() for the + * corresponding function to disable the IRQ. + * + * This function is intended to arm the IRQ before entering S2idle suspend. + * + * Note: calls to ssam_irq_arm_for_wakeup() and ssam_irq_disarm_wakeup() must + * be balanced. + */ +int ssam_irq_arm_for_wakeup(struct ssam_controller *ctrl) +{ + struct device *dev = ssam_controller_device(ctrl); + int status; + + enable_irq(ctrl->irq.num); + if (device_may_wakeup(dev)) { + status = enable_irq_wake(ctrl->irq.num); + if (status) { + ssam_err(ctrl, "failed to enable wake IRQ: %d\n", status); + disable_irq(ctrl->irq.num); + return status; + } + + ctrl->irq.wakeup_enabled = true; + } else { + ctrl->irq.wakeup_enabled = false; + } + + return 0; +} + +/** + * ssam_irq_disarm_wakeup() - Disarm the wakeup IRQ. + * @ctrl: The controller for which the IRQ should be disarmed. + * + * Disarm the IRQ previously set up for wake via ssam_irq_arm_for_wakeup(). + * + * This function is intended to disarm the IRQ after exiting S2idle suspend. + * + * Note: calls to ssam_irq_arm_for_wakeup() and ssam_irq_disarm_wakeup() must + * be balanced. + */ +void ssam_irq_disarm_wakeup(struct ssam_controller *ctrl) +{ + int status; + + if (ctrl->irq.wakeup_enabled) { + status = disable_irq_wake(ctrl->irq.num); + if (status) + ssam_err(ctrl, "failed to disable wake IRQ: %d\n", status); + + ctrl->irq.wakeup_enabled = false; + } + disable_irq(ctrl->irq.num); +} diff --git a/drivers/platform/surface/aggregator/controller.h b/drivers/platform/surface/aggregator/controller.h new file mode 100644 index 0000000000..f0d987abc5 --- /dev/null +++ b/drivers/platform/surface/aggregator/controller.h @@ -0,0 +1,285 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Main SSAM/SSH controller structure and functionality. + * + * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com> + */ + +#ifndef _SURFACE_AGGREGATOR_CONTROLLER_H +#define _SURFACE_AGGREGATOR_CONTROLLER_H + +#include <linux/kref.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/rbtree.h> +#include <linux/rwsem.h> +#include <linux/serdev.h> +#include <linux/spinlock.h> +#include <linux/srcu.h> +#include <linux/types.h> +#include <linux/workqueue.h> + +#include <linux/surface_aggregator/controller.h> +#include <linux/surface_aggregator/serial_hub.h> + +#include "ssh_request_layer.h" + + +/* -- Safe counters. -------------------------------------------------------- */ + +/** + * struct ssh_seq_counter - Safe counter for SSH sequence IDs. + * @value: The current counter value. + */ +struct ssh_seq_counter { + u8 value; +}; + +/** + * struct ssh_rqid_counter - Safe counter for SSH request IDs. + * @value: The current counter value. + */ +struct ssh_rqid_counter { + u16 value; +}; + + +/* -- Event/notification system. -------------------------------------------- */ + +/** + * struct ssam_nf_head - Notifier head for SSAM events. + * @srcu: The SRCU struct for synchronization. + * @head: List-head for notifier blocks registered under this head. + */ +struct ssam_nf_head { + struct srcu_struct srcu; + struct list_head head; +}; + +/** + * struct ssam_nf - Notifier callback- and activation-registry for SSAM events. + * @lock: Lock guarding (de-)registration of notifier blocks. Note: This + * lock does not need to be held for notifier calls, only + * registration and deregistration. + * @refcount: The root of the RB-tree used for reference-counting enabled + * events/notifications. + * @head: The list of notifier heads for event/notification callbacks. + */ +struct ssam_nf { + struct mutex lock; + struct rb_root refcount; + struct ssam_nf_head head[SSH_NUM_EVENTS]; +}; + + +/* -- Event/async request completion system. -------------------------------- */ + +struct ssam_cplt; + +/** + * struct ssam_event_item - Struct for event queuing and completion. + * @node: The node in the queue. + * @rqid: The request ID of the event. + * @ops: Instance specific functions. + * @ops.free: Callback for freeing this event item. + * @event: Actual event data. + */ +struct ssam_event_item { + struct list_head node; + u16 rqid; + + struct { + void (*free)(struct ssam_event_item *event); + } ops; + + struct ssam_event event; /* must be last */ +}; + +/** + * struct ssam_event_queue - Queue for completing received events. + * @cplt: Reference to the completion system on which this queue is active. + * @lock: The lock for any operation on the queue. + * @head: The list-head of the queue. + * @work: The &struct work_struct performing completion work for this queue. + */ +struct ssam_event_queue { + struct ssam_cplt *cplt; + + spinlock_t lock; + struct list_head head; + struct work_struct work; +}; + +/** + * struct ssam_event_target - Set of queues for a single SSH target ID. + * @queue: The array of queues, one queue per event ID. + */ +struct ssam_event_target { + struct ssam_event_queue queue[SSH_NUM_EVENTS]; +}; + +/** + * struct ssam_cplt - SSAM event/async request completion system. + * @dev: The device with which this system is associated. Only used + * for logging. + * @wq: The &struct workqueue_struct on which all completion work + * items are queued. + * @event: Event completion management. + * @event.target: Array of &struct ssam_event_target, one for each target. + * @event.notif: Notifier callbacks and event activation reference counting. + */ +struct ssam_cplt { + struct device *dev; + struct workqueue_struct *wq; + + struct { + struct ssam_event_target target[SSH_NUM_TARGETS]; + struct ssam_nf notif; + } event; +}; + + +/* -- Main SSAM device structures. ------------------------------------------ */ + +/** + * enum ssam_controller_state - State values for &struct ssam_controller. + * @SSAM_CONTROLLER_UNINITIALIZED: + * The controller has not been initialized yet or has been deinitialized. + * @SSAM_CONTROLLER_INITIALIZED: + * The controller is initialized, but has not been started yet. + * @SSAM_CONTROLLER_STARTED: + * The controller has been started and is ready to use. + * @SSAM_CONTROLLER_STOPPED: + * The controller has been stopped. + * @SSAM_CONTROLLER_SUSPENDED: + * The controller has been suspended. + */ +enum ssam_controller_state { + SSAM_CONTROLLER_UNINITIALIZED, + SSAM_CONTROLLER_INITIALIZED, + SSAM_CONTROLLER_STARTED, + SSAM_CONTROLLER_STOPPED, + SSAM_CONTROLLER_SUSPENDED, +}; + +/** + * struct ssam_controller_caps - Controller device capabilities. + * @ssh_power_profile: SSH power profile. + * @ssh_buffer_size: SSH driver UART buffer size. + * @screen_on_sleep_idle_timeout: SAM UART screen-on sleep idle timeout. + * @screen_off_sleep_idle_timeout: SAM UART screen-off sleep idle timeout. + * @d3_closes_handle: SAM closes UART handle in D3. + * + * Controller and SSH device capabilities found in ACPI. + */ +struct ssam_controller_caps { + u32 ssh_power_profile; + u32 ssh_buffer_size; + u32 screen_on_sleep_idle_timeout; + u32 screen_off_sleep_idle_timeout; + u32 d3_closes_handle:1; +}; + +/** + * struct ssam_controller - SSAM controller device. + * @kref: Reference count of the controller. + * @lock: Main lock for the controller, used to guard state changes. + * @state: Controller state. + * @rtl: Request transport layer for SSH I/O. + * @cplt: Completion system for SSH/SSAM events and asynchronous requests. + * @counter: Safe SSH message ID counters. + * @counter.seq: Sequence ID counter. + * @counter.rqid: Request ID counter. + * @irq: Wakeup IRQ resources. + * @irq.num: The wakeup IRQ number. + * @irq.wakeup_enabled: Whether wakeup by IRQ is enabled during suspend. + * @caps: The controller device capabilities. + */ +struct ssam_controller { + struct kref kref; + + struct rw_semaphore lock; + enum ssam_controller_state state; + + struct ssh_rtl rtl; + struct ssam_cplt cplt; + + struct { + struct ssh_seq_counter seq; + struct ssh_rqid_counter rqid; + } counter; + + struct { + int num; + bool wakeup_enabled; + } irq; + + struct ssam_controller_caps caps; +}; + +#define to_ssam_controller(ptr, member) \ + container_of(ptr, struct ssam_controller, member) + +#define ssam_dbg(ctrl, fmt, ...) rtl_dbg(&(ctrl)->rtl, fmt, ##__VA_ARGS__) +#define ssam_info(ctrl, fmt, ...) rtl_info(&(ctrl)->rtl, fmt, ##__VA_ARGS__) +#define ssam_warn(ctrl, fmt, ...) rtl_warn(&(ctrl)->rtl, fmt, ##__VA_ARGS__) +#define ssam_err(ctrl, fmt, ...) rtl_err(&(ctrl)->rtl, fmt, ##__VA_ARGS__) + +/** + * ssam_controller_receive_buf() - Provide input-data to the controller. + * @ctrl: The controller. + * @buf: The input buffer. + * @n: The number of bytes in the input buffer. + * + * Provide input data to be evaluated by the controller, which has been + * received via the lower-level transport. + * + * Return: Returns the number of bytes consumed, or, if the packet transport + * layer of the controller has been shut down, %-ESHUTDOWN. + */ +static inline +int ssam_controller_receive_buf(struct ssam_controller *ctrl, + const unsigned char *buf, size_t n) +{ + return ssh_ptl_rx_rcvbuf(&ctrl->rtl.ptl, buf, n); +} + +/** + * ssam_controller_write_wakeup() - Notify the controller that the underlying + * device has space available for data to be written. + * @ctrl: The controller. + */ +static inline void ssam_controller_write_wakeup(struct ssam_controller *ctrl) +{ + ssh_ptl_tx_wakeup_transfer(&ctrl->rtl.ptl); +} + +int ssam_controller_init(struct ssam_controller *ctrl, struct serdev_device *s); +int ssam_controller_start(struct ssam_controller *ctrl); +void ssam_controller_shutdown(struct ssam_controller *ctrl); +void ssam_controller_destroy(struct ssam_controller *ctrl); + +int ssam_notifier_disable_registered(struct ssam_controller *ctrl); +void ssam_notifier_restore_registered(struct ssam_controller *ctrl); + +int ssam_irq_setup(struct ssam_controller *ctrl); +void ssam_irq_free(struct ssam_controller *ctrl); +int ssam_irq_arm_for_wakeup(struct ssam_controller *ctrl); +void ssam_irq_disarm_wakeup(struct ssam_controller *ctrl); + +void ssam_controller_lock(struct ssam_controller *c); +void ssam_controller_unlock(struct ssam_controller *c); + +int ssam_get_firmware_version(struct ssam_controller *ctrl, u32 *version); +int ssam_ctrl_notif_display_off(struct ssam_controller *ctrl); +int ssam_ctrl_notif_display_on(struct ssam_controller *ctrl); +int ssam_ctrl_notif_d0_exit(struct ssam_controller *ctrl); +int ssam_ctrl_notif_d0_entry(struct ssam_controller *ctrl); + +int ssam_controller_suspend(struct ssam_controller *ctrl); +int ssam_controller_resume(struct ssam_controller *ctrl); + +int ssam_event_item_cache_init(void); +void ssam_event_item_cache_destroy(void); + +#endif /* _SURFACE_AGGREGATOR_CONTROLLER_H */ diff --git a/drivers/platform/surface/aggregator/core.c b/drivers/platform/surface/aggregator/core.c new file mode 100644 index 0000000000..6152be3839 --- /dev/null +++ b/drivers/platform/surface/aggregator/core.c @@ -0,0 +1,836 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Surface Serial Hub (SSH) driver for communication with the Surface/System + * Aggregator Module (SSAM/SAM). + * + * Provides access to a SAM-over-SSH connected EC via a controller device. + * Handles communication via requests as well as enabling, disabling, and + * relaying of events. + * + * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com> + */ + +#include <linux/acpi.h> +#include <linux/atomic.h> +#include <linux/completion.h> +#include <linux/gpio/consumer.h> +#include <linux/kernel.h> +#include <linux/kref.h> +#include <linux/module.h> +#include <linux/pm.h> +#include <linux/serdev.h> +#include <linux/sysfs.h> + +#include <linux/surface_aggregator/controller.h> +#include <linux/surface_aggregator/device.h> + +#include "bus.h" +#include "controller.h" + +#define CREATE_TRACE_POINTS +#include "trace.h" + + +/* -- Static controller reference. ------------------------------------------ */ + +/* + * Main controller reference. The corresponding lock must be held while + * accessing (reading/writing) the reference. + */ +static struct ssam_controller *__ssam_controller; +static DEFINE_SPINLOCK(__ssam_controller_lock); + +/** + * ssam_get_controller() - Get reference to SSAM controller. + * + * Returns a reference to the SSAM controller of the system or %NULL if there + * is none, it hasn't been set up yet, or it has already been unregistered. + * This function automatically increments the reference count of the + * controller, thus the calling party must ensure that ssam_controller_put() + * is called when it doesn't need the controller any more. + */ +struct ssam_controller *ssam_get_controller(void) +{ + struct ssam_controller *ctrl; + + spin_lock(&__ssam_controller_lock); + + ctrl = __ssam_controller; + if (!ctrl) + goto out; + + if (WARN_ON(!kref_get_unless_zero(&ctrl->kref))) + ctrl = NULL; + +out: + spin_unlock(&__ssam_controller_lock); + return ctrl; +} +EXPORT_SYMBOL_GPL(ssam_get_controller); + +/** + * ssam_try_set_controller() - Try to set the main controller reference. + * @ctrl: The controller to which the reference should point. + * + * Set the main controller reference to the given pointer if the reference + * hasn't been set already. + * + * Return: Returns zero on success or %-EEXIST if the reference has already + * been set. + */ +static int ssam_try_set_controller(struct ssam_controller *ctrl) +{ + int status = 0; + + spin_lock(&__ssam_controller_lock); + if (!__ssam_controller) + __ssam_controller = ctrl; + else + status = -EEXIST; + spin_unlock(&__ssam_controller_lock); + + return status; +} + +/** + * ssam_clear_controller() - Remove/clear the main controller reference. + * + * Clears the main controller reference, i.e. sets it to %NULL. This function + * should be called before the controller is shut down. + */ +static void ssam_clear_controller(void) +{ + spin_lock(&__ssam_controller_lock); + __ssam_controller = NULL; + spin_unlock(&__ssam_controller_lock); +} + +/** + * ssam_client_link() - Link an arbitrary client device to the controller. + * @c: The controller to link to. + * @client: The client device. + * + * Link an arbitrary client device to the controller by creating a device link + * between it as consumer and the controller device as provider. This function + * can be used for non-SSAM devices (or SSAM devices not registered as child + * under the controller) to guarantee that the controller is valid for as long + * as the driver of the client device is bound, and that proper suspend and + * resume ordering is guaranteed. + * + * The device link does not have to be destructed manually. It is removed + * automatically once the driver of the client device unbinds. + * + * Return: Returns zero on success, %-ENODEV if the controller is not ready or + * going to be removed soon, or %-ENOMEM if the device link could not be + * created for other reasons. + */ +int ssam_client_link(struct ssam_controller *c, struct device *client) +{ + const u32 flags = DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_CONSUMER; + struct device_link *link; + struct device *ctrldev; + + ssam_controller_statelock(c); + + if (c->state != SSAM_CONTROLLER_STARTED) { + ssam_controller_stateunlock(c); + return -ENODEV; + } + + ctrldev = ssam_controller_device(c); + if (!ctrldev) { + ssam_controller_stateunlock(c); + return -ENODEV; + } + + link = device_link_add(client, ctrldev, flags); + if (!link) { + ssam_controller_stateunlock(c); + return -ENOMEM; + } + + /* + * Return -ENODEV if supplier driver is on its way to be removed. In + * this case, the controller won't be around for much longer and the + * device link is not going to save us any more, as unbinding is + * already in progress. + */ + if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND) { + ssam_controller_stateunlock(c); + return -ENODEV; + } + + ssam_controller_stateunlock(c); + return 0; +} +EXPORT_SYMBOL_GPL(ssam_client_link); + +/** + * ssam_client_bind() - Bind an arbitrary client device to the controller. + * @client: The client device. + * + * Link an arbitrary client device to the controller by creating a device link + * between it as consumer and the main controller device as provider. This + * function can be used for non-SSAM devices to guarantee that the controller + * returned by this function is valid for as long as the driver of the client + * device is bound, and that proper suspend and resume ordering is guaranteed. + * + * This function does essentially the same as ssam_client_link(), except that + * it first fetches the main controller reference, then creates the link, and + * finally returns this reference. Note that this function does not increment + * the reference counter of the controller, as, due to the link, the + * controller lifetime is assured as long as the driver of the client device + * is bound. + * + * It is not valid to use the controller reference obtained by this method + * outside of the driver bound to the client device at the time of calling + * this function, without first incrementing the reference count of the + * controller via ssam_controller_get(). Even after doing this, care must be + * taken that requests are only submitted and notifiers are only + * (un-)registered when the controller is active and not suspended. In other + * words: The device link only lives as long as the client driver is bound and + * any guarantees enforced by this link (e.g. active controller state) can + * only be relied upon as long as this link exists and may need to be enforced + * in other ways afterwards. + * + * The created device link does not have to be destructed manually. It is + * removed automatically once the driver of the client device unbinds. + * + * Return: Returns the controller on success, an error pointer with %-ENODEV + * if the controller is not present, not ready or going to be removed soon, or + * %-ENOMEM if the device link could not be created for other reasons. + */ +struct ssam_controller *ssam_client_bind(struct device *client) +{ + struct ssam_controller *c; + int status; + + c = ssam_get_controller(); + if (!c) + return ERR_PTR(-ENODEV); + + status = ssam_client_link(c, client); + + /* + * Note that we can drop our controller reference in both success and + * failure cases: On success, we have bound the controller lifetime + * inherently to the client driver lifetime, i.e. it the controller is + * now guaranteed to outlive the client driver. On failure, we're not + * going to use the controller any more. + */ + ssam_controller_put(c); + + return status >= 0 ? c : ERR_PTR(status); +} +EXPORT_SYMBOL_GPL(ssam_client_bind); + + +/* -- Glue layer (serdev_device -> ssam_controller). ------------------------ */ + +static int ssam_receive_buf(struct serdev_device *dev, const unsigned char *buf, + size_t n) +{ + struct ssam_controller *ctrl; + int ret; + + ctrl = serdev_device_get_drvdata(dev); + ret = ssam_controller_receive_buf(ctrl, buf, n); + + return ret < 0 ? 0 : ret; +} + +static void ssam_write_wakeup(struct serdev_device *dev) +{ + ssam_controller_write_wakeup(serdev_device_get_drvdata(dev)); +} + +static const struct serdev_device_ops ssam_serdev_ops = { + .receive_buf = ssam_receive_buf, + .write_wakeup = ssam_write_wakeup, +}; + + +/* -- SysFS and misc. ------------------------------------------------------- */ + +static int ssam_log_firmware_version(struct ssam_controller *ctrl) +{ + u32 version, a, b, c; + int status; + + status = ssam_get_firmware_version(ctrl, &version); + if (status) + return status; + + a = (version >> 24) & 0xff; + b = ((version >> 8) & 0xffff); + c = version & 0xff; + + ssam_info(ctrl, "SAM firmware version: %u.%u.%u\n", a, b, c); + return 0; +} + +static ssize_t firmware_version_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ssam_controller *ctrl = dev_get_drvdata(dev); + u32 version, a, b, c; + int status; + + status = ssam_get_firmware_version(ctrl, &version); + if (status < 0) + return status; + + a = (version >> 24) & 0xff; + b = ((version >> 8) & 0xffff); + c = version & 0xff; + + return sysfs_emit(buf, "%u.%u.%u\n", a, b, c); +} +static DEVICE_ATTR_RO(firmware_version); + +static struct attribute *ssam_sam_attrs[] = { + &dev_attr_firmware_version.attr, + NULL +}; + +static const struct attribute_group ssam_sam_group = { + .name = "sam", + .attrs = ssam_sam_attrs, +}; + + +/* -- ACPI based device setup. ---------------------------------------------- */ + +static acpi_status ssam_serdev_setup_via_acpi_crs(struct acpi_resource *rsc, + void *ctx) +{ + struct serdev_device *serdev = ctx; + struct acpi_resource_uart_serialbus *uart; + bool flow_control; + int status = 0; + + if (!serdev_acpi_get_uart_resource(rsc, &uart)) + return AE_OK; + + /* Set up serdev device. */ + serdev_device_set_baudrate(serdev, uart->default_baud_rate); + + /* serdev currently only supports RTSCTS flow control. */ + if (uart->flow_control & (~((u8)ACPI_UART_FLOW_CONTROL_HW))) { + dev_warn(&serdev->dev, "setup: unsupported flow control (value: %#04x)\n", + uart->flow_control); + } + + /* Set RTSCTS flow control. */ + flow_control = uart->flow_control & ACPI_UART_FLOW_CONTROL_HW; + serdev_device_set_flow_control(serdev, flow_control); + + /* serdev currently only supports EVEN/ODD parity. */ + switch (uart->parity) { + case ACPI_UART_PARITY_NONE: + status = serdev_device_set_parity(serdev, SERDEV_PARITY_NONE); + break; + case ACPI_UART_PARITY_EVEN: + status = serdev_device_set_parity(serdev, SERDEV_PARITY_EVEN); + break; + case ACPI_UART_PARITY_ODD: + status = serdev_device_set_parity(serdev, SERDEV_PARITY_ODD); + break; + default: + dev_warn(&serdev->dev, "setup: unsupported parity (value: %#04x)\n", + uart->parity); + break; + } + + if (status) { + dev_err(&serdev->dev, "setup: failed to set parity (value: %#04x, error: %d)\n", + uart->parity, status); + return AE_ERROR; + } + + /* We've found the resource and are done. */ + return AE_CTRL_TERMINATE; +} + +static acpi_status ssam_serdev_setup_via_acpi(acpi_handle handle, + struct serdev_device *serdev) +{ + return acpi_walk_resources(handle, METHOD_NAME__CRS, + ssam_serdev_setup_via_acpi_crs, serdev); +} + + +/* -- Power management. ----------------------------------------------------- */ + +static void ssam_serial_hub_shutdown(struct device *dev) +{ + struct ssam_controller *c = dev_get_drvdata(dev); + int status; + + /* + * Try to disable notifiers, signal display-off and D0-exit, ignore any + * errors. + * + * Note: It has not been established yet if this is actually + * necessary/useful for shutdown. + */ + + status = ssam_notifier_disable_registered(c); + if (status) { + ssam_err(c, "pm: failed to disable notifiers for shutdown: %d\n", + status); + } + + status = ssam_ctrl_notif_display_off(c); + if (status) + ssam_err(c, "pm: display-off notification failed: %d\n", status); + + status = ssam_ctrl_notif_d0_exit(c); + if (status) + ssam_err(c, "pm: D0-exit notification failed: %d\n", status); +} + +#ifdef CONFIG_PM_SLEEP + +static int ssam_serial_hub_pm_prepare(struct device *dev) +{ + struct ssam_controller *c = dev_get_drvdata(dev); + int status; + + /* + * Try to signal display-off, This will quiesce events. + * + * Note: Signaling display-off/display-on should normally be done from + * some sort of display state notifier. As that is not available, + * signal it here. + */ + + status = ssam_ctrl_notif_display_off(c); + if (status) + ssam_err(c, "pm: display-off notification failed: %d\n", status); + + return status; +} + +static void ssam_serial_hub_pm_complete(struct device *dev) +{ + struct ssam_controller *c = dev_get_drvdata(dev); + int status; + + /* + * Try to signal display-on. This will restore events. + * + * Note: Signaling display-off/display-on should normally be done from + * some sort of display state notifier. As that is not available, + * signal it here. + */ + + status = ssam_ctrl_notif_display_on(c); + if (status) + ssam_err(c, "pm: display-on notification failed: %d\n", status); +} + +static int ssam_serial_hub_pm_suspend(struct device *dev) +{ + struct ssam_controller *c = dev_get_drvdata(dev); + int status; + + /* + * Try to signal D0-exit, enable IRQ wakeup if specified. Abort on + * error. + */ + + status = ssam_ctrl_notif_d0_exit(c); + if (status) { + ssam_err(c, "pm: D0-exit notification failed: %d\n", status); + goto err_notif; + } + + status = ssam_irq_arm_for_wakeup(c); + if (status) + goto err_irq; + + WARN_ON(ssam_controller_suspend(c)); + return 0; + +err_irq: + ssam_ctrl_notif_d0_entry(c); +err_notif: + ssam_ctrl_notif_display_on(c); + return status; +} + +static int ssam_serial_hub_pm_resume(struct device *dev) +{ + struct ssam_controller *c = dev_get_drvdata(dev); + int status; + + WARN_ON(ssam_controller_resume(c)); + + /* + * Try to disable IRQ wakeup (if specified) and signal D0-entry. In + * case of errors, log them and try to restore normal operation state + * as far as possible. + * + * Note: Signaling display-off/display-on should normally be done from + * some sort of display state notifier. As that is not available, + * signal it here. + */ + + ssam_irq_disarm_wakeup(c); + + status = ssam_ctrl_notif_d0_entry(c); + if (status) + ssam_err(c, "pm: D0-entry notification failed: %d\n", status); + + return 0; +} + +static int ssam_serial_hub_pm_freeze(struct device *dev) +{ + struct ssam_controller *c = dev_get_drvdata(dev); + int status; + + /* + * During hibernation image creation, we only have to ensure that the + * EC doesn't send us any events. This is done via the display-off + * and D0-exit notifications. Note that this sets up the wakeup IRQ + * on the EC side, however, we have disabled it by default on our side + * and won't enable it here. + * + * See ssam_serial_hub_poweroff() for more details on the hibernation + * process. + */ + + status = ssam_ctrl_notif_d0_exit(c); + if (status) { + ssam_err(c, "pm: D0-exit notification failed: %d\n", status); + ssam_ctrl_notif_display_on(c); + return status; + } + + WARN_ON(ssam_controller_suspend(c)); + return 0; +} + +static int ssam_serial_hub_pm_thaw(struct device *dev) +{ + struct ssam_controller *c = dev_get_drvdata(dev); + int status; + + WARN_ON(ssam_controller_resume(c)); + + status = ssam_ctrl_notif_d0_entry(c); + if (status) + ssam_err(c, "pm: D0-exit notification failed: %d\n", status); + + return status; +} + +static int ssam_serial_hub_pm_poweroff(struct device *dev) +{ + struct ssam_controller *c = dev_get_drvdata(dev); + int status; + + /* + * When entering hibernation and powering off the system, the EC, at + * least on some models, may disable events. Without us taking care of + * that, this leads to events not being enabled/restored when the + * system resumes from hibernation, resulting SAM-HID subsystem devices + * (i.e. keyboard, touchpad) not working, AC-plug/AC-unplug events being + * gone, etc. + * + * To avoid these issues, we disable all registered events here (this is + * likely not actually required) and restore them during the drivers PM + * restore callback. + * + * Wakeup from the EC interrupt is not supported during hibernation, + * so don't arm the IRQ here. + */ + + status = ssam_notifier_disable_registered(c); + if (status) { + ssam_err(c, "pm: failed to disable notifiers for hibernation: %d\n", + status); + return status; + } + + status = ssam_ctrl_notif_d0_exit(c); + if (status) { + ssam_err(c, "pm: D0-exit notification failed: %d\n", status); + ssam_notifier_restore_registered(c); + return status; + } + + WARN_ON(ssam_controller_suspend(c)); + return 0; +} + +static int ssam_serial_hub_pm_restore(struct device *dev) +{ + struct ssam_controller *c = dev_get_drvdata(dev); + int status; + + /* + * Ignore but log errors, try to restore state as much as possible in + * case of failures. See ssam_serial_hub_poweroff() for more details on + * the hibernation process. + */ + + WARN_ON(ssam_controller_resume(c)); + + status = ssam_ctrl_notif_d0_entry(c); + if (status) + ssam_err(c, "pm: D0-entry notification failed: %d\n", status); + + ssam_notifier_restore_registered(c); + return 0; +} + +static const struct dev_pm_ops ssam_serial_hub_pm_ops = { + .prepare = ssam_serial_hub_pm_prepare, + .complete = ssam_serial_hub_pm_complete, + .suspend = ssam_serial_hub_pm_suspend, + .resume = ssam_serial_hub_pm_resume, + .freeze = ssam_serial_hub_pm_freeze, + .thaw = ssam_serial_hub_pm_thaw, + .poweroff = ssam_serial_hub_pm_poweroff, + .restore = ssam_serial_hub_pm_restore, +}; + +#else /* CONFIG_PM_SLEEP */ + +static const struct dev_pm_ops ssam_serial_hub_pm_ops = { }; + +#endif /* CONFIG_PM_SLEEP */ + + +/* -- Device/driver setup. -------------------------------------------------- */ + +static const struct acpi_gpio_params gpio_ssam_wakeup_int = { 0, 0, false }; +static const struct acpi_gpio_params gpio_ssam_wakeup = { 1, 0, false }; + +static const struct acpi_gpio_mapping ssam_acpi_gpios[] = { + { "ssam_wakeup-int-gpio", &gpio_ssam_wakeup_int, 1 }, + { "ssam_wakeup-gpio", &gpio_ssam_wakeup, 1 }, + { }, +}; + +static int ssam_serial_hub_probe(struct serdev_device *serdev) +{ + struct acpi_device *ssh = ACPI_COMPANION(&serdev->dev); + struct ssam_controller *ctrl; + acpi_status astatus; + int status; + + if (gpiod_count(&serdev->dev, NULL) < 0) + return -ENODEV; + + status = devm_acpi_dev_add_driver_gpios(&serdev->dev, ssam_acpi_gpios); + if (status) + return status; + + /* Allocate controller. */ + ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); + if (!ctrl) + return -ENOMEM; + + /* Initialize controller. */ + status = ssam_controller_init(ctrl, serdev); + if (status) + goto err_ctrl_init; + + ssam_controller_lock(ctrl); + + /* Set up serdev device. */ + serdev_device_set_drvdata(serdev, ctrl); + serdev_device_set_client_ops(serdev, &ssam_serdev_ops); + status = serdev_device_open(serdev); + if (status) + goto err_devopen; + + astatus = ssam_serdev_setup_via_acpi(ssh->handle, serdev); + if (ACPI_FAILURE(astatus)) { + status = -ENXIO; + goto err_devinit; + } + + /* Start controller. */ + status = ssam_controller_start(ctrl); + if (status) + goto err_devinit; + + ssam_controller_unlock(ctrl); + + /* + * Initial SAM requests: Log version and notify default/init power + * states. + */ + status = ssam_log_firmware_version(ctrl); + if (status) + goto err_initrq; + + status = ssam_ctrl_notif_d0_entry(ctrl); + if (status) + goto err_initrq; + + status = ssam_ctrl_notif_display_on(ctrl); + if (status) + goto err_initrq; + + status = sysfs_create_group(&serdev->dev.kobj, &ssam_sam_group); + if (status) + goto err_initrq; + + /* Set up IRQ. */ + status = ssam_irq_setup(ctrl); + if (status) + goto err_irq; + + /* Finally, set main controller reference. */ + status = ssam_try_set_controller(ctrl); + if (WARN_ON(status)) /* Currently, we're the only provider. */ + goto err_mainref; + + /* + * TODO: The EC can wake up the system via the associated GPIO interrupt + * in multiple situations. One of which is the remaining battery + * capacity falling below a certain threshold. Normally, we should + * use the device_init_wakeup function, however, the EC also seems + * to have other reasons for waking up the system and it seems + * that Windows has additional checks whether the system should be + * resumed. In short, this causes some spurious unwanted wake-ups. + * For now let's thus default power/wakeup to false. + */ + device_set_wakeup_capable(&serdev->dev, true); + acpi_dev_clear_dependencies(ssh); + + return 0; + +err_mainref: + ssam_irq_free(ctrl); +err_irq: + sysfs_remove_group(&serdev->dev.kobj, &ssam_sam_group); +err_initrq: + ssam_controller_lock(ctrl); + ssam_controller_shutdown(ctrl); +err_devinit: + serdev_device_close(serdev); +err_devopen: + ssam_controller_destroy(ctrl); + ssam_controller_unlock(ctrl); +err_ctrl_init: + kfree(ctrl); + return status; +} + +static void ssam_serial_hub_remove(struct serdev_device *serdev) +{ + struct ssam_controller *ctrl = serdev_device_get_drvdata(serdev); + int status; + + /* Clear static reference so that no one else can get a new one. */ + ssam_clear_controller(); + + /* Disable and free IRQ. */ + ssam_irq_free(ctrl); + + sysfs_remove_group(&serdev->dev.kobj, &ssam_sam_group); + ssam_controller_lock(ctrl); + + /* Remove all client devices. */ + ssam_remove_clients(&serdev->dev); + + /* Act as if suspending to silence events. */ + status = ssam_ctrl_notif_display_off(ctrl); + if (status) { + dev_err(&serdev->dev, "display-off notification failed: %d\n", + status); + } + + status = ssam_ctrl_notif_d0_exit(ctrl); + if (status) { + dev_err(&serdev->dev, "D0-exit notification failed: %d\n", + status); + } + + /* Shut down controller and remove serdev device reference from it. */ + ssam_controller_shutdown(ctrl); + + /* Shut down actual transport. */ + serdev_device_wait_until_sent(serdev, 0); + serdev_device_close(serdev); + + /* Drop our controller reference. */ + ssam_controller_unlock(ctrl); + ssam_controller_put(ctrl); + + device_set_wakeup_capable(&serdev->dev, false); +} + +static const struct acpi_device_id ssam_serial_hub_match[] = { + { "MSHW0084", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, ssam_serial_hub_match); + +static struct serdev_device_driver ssam_serial_hub = { + .probe = ssam_serial_hub_probe, + .remove = ssam_serial_hub_remove, + .driver = { + .name = "surface_serial_hub", + .acpi_match_table = ssam_serial_hub_match, + .pm = &ssam_serial_hub_pm_ops, + .shutdown = ssam_serial_hub_shutdown, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, +}; + + +/* -- Module setup. --------------------------------------------------------- */ + +static int __init ssam_core_init(void) +{ + int status; + + status = ssam_bus_register(); + if (status) + goto err_bus; + + status = ssh_ctrl_packet_cache_init(); + if (status) + goto err_cpkg; + + status = ssam_event_item_cache_init(); + if (status) + goto err_evitem; + + status = serdev_device_driver_register(&ssam_serial_hub); + if (status) + goto err_register; + + return 0; + +err_register: + ssam_event_item_cache_destroy(); +err_evitem: + ssh_ctrl_packet_cache_destroy(); +err_cpkg: + ssam_bus_unregister(); +err_bus: + return status; +} +subsys_initcall(ssam_core_init); + +static void __exit ssam_core_exit(void) +{ + serdev_device_driver_unregister(&ssam_serial_hub); + ssam_event_item_cache_destroy(); + ssh_ctrl_packet_cache_destroy(); + ssam_bus_unregister(); +} +module_exit(ssam_core_exit); + +MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>"); +MODULE_DESCRIPTION("Subsystem and Surface Serial Hub driver for Surface System Aggregator Module"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/surface/aggregator/ssh_msgb.h b/drivers/platform/surface/aggregator/ssh_msgb.h new file mode 100644 index 0000000000..438873e060 --- /dev/null +++ b/drivers/platform/surface/aggregator/ssh_msgb.h @@ -0,0 +1,205 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * SSH message builder functions. + * + * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com> + */ + +#ifndef _SURFACE_AGGREGATOR_SSH_MSGB_H +#define _SURFACE_AGGREGATOR_SSH_MSGB_H + +#include <asm/unaligned.h> +#include <linux/types.h> + +#include <linux/surface_aggregator/controller.h> +#include <linux/surface_aggregator/serial_hub.h> + +/** + * struct msgbuf - Buffer struct to construct SSH messages. + * @begin: Pointer to the beginning of the allocated buffer space. + * @end: Pointer to the end (one past last element) of the allocated buffer + * space. + * @ptr: Pointer to the first free element in the buffer. + */ +struct msgbuf { + u8 *begin; + u8 *end; + u8 *ptr; +}; + +/** + * msgb_init() - Initialize the given message buffer struct. + * @msgb: The buffer struct to initialize + * @ptr: Pointer to the underlying memory by which the buffer will be backed. + * @cap: Size of the underlying memory. + * + * Initialize the given message buffer struct using the provided memory as + * backing. + */ +static inline void msgb_init(struct msgbuf *msgb, u8 *ptr, size_t cap) +{ + msgb->begin = ptr; + msgb->end = ptr + cap; + msgb->ptr = ptr; +} + +/** + * msgb_bytes_used() - Return the current number of bytes used in the buffer. + * @msgb: The message buffer. + */ +static inline size_t msgb_bytes_used(const struct msgbuf *msgb) +{ + return msgb->ptr - msgb->begin; +} + +static inline void __msgb_push_u8(struct msgbuf *msgb, u8 value) +{ + *msgb->ptr = value; + msgb->ptr += sizeof(u8); +} + +static inline void __msgb_push_u16(struct msgbuf *msgb, u16 value) +{ + put_unaligned_le16(value, msgb->ptr); + msgb->ptr += sizeof(u16); +} + +/** + * msgb_push_u16() - Push a u16 value to the buffer. + * @msgb: The message buffer. + * @value: The value to push to the buffer. + */ +static inline void msgb_push_u16(struct msgbuf *msgb, u16 value) +{ + if (WARN_ON(msgb->ptr + sizeof(u16) > msgb->end)) + return; + + __msgb_push_u16(msgb, value); +} + +/** + * msgb_push_syn() - Push SSH SYN bytes to the buffer. + * @msgb: The message buffer. + */ +static inline void msgb_push_syn(struct msgbuf *msgb) +{ + msgb_push_u16(msgb, SSH_MSG_SYN); +} + +/** + * msgb_push_buf() - Push raw data to the buffer. + * @msgb: The message buffer. + * @buf: The data to push to the buffer. + * @len: The length of the data to push to the buffer. + */ +static inline void msgb_push_buf(struct msgbuf *msgb, const u8 *buf, size_t len) +{ + msgb->ptr = memcpy(msgb->ptr, buf, len) + len; +} + +/** + * msgb_push_crc() - Compute CRC and push it to the buffer. + * @msgb: The message buffer. + * @buf: The data for which the CRC should be computed. + * @len: The length of the data for which the CRC should be computed. + */ +static inline void msgb_push_crc(struct msgbuf *msgb, const u8 *buf, size_t len) +{ + msgb_push_u16(msgb, ssh_crc(buf, len)); +} + +/** + * msgb_push_frame() - Push a SSH message frame header to the buffer. + * @msgb: The message buffer + * @ty: The type of the frame. + * @len: The length of the payload of the frame. + * @seq: The sequence ID of the frame/packet. + */ +static inline void msgb_push_frame(struct msgbuf *msgb, u8 ty, u16 len, u8 seq) +{ + u8 *const begin = msgb->ptr; + + if (WARN_ON(msgb->ptr + sizeof(struct ssh_frame) > msgb->end)) + return; + + __msgb_push_u8(msgb, ty); /* Frame type. */ + __msgb_push_u16(msgb, len); /* Frame payload length. */ + __msgb_push_u8(msgb, seq); /* Frame sequence ID. */ + + msgb_push_crc(msgb, begin, msgb->ptr - begin); +} + +/** + * msgb_push_ack() - Push a SSH ACK frame to the buffer. + * @msgb: The message buffer + * @seq: The sequence ID of the frame/packet to be ACKed. + */ +static inline void msgb_push_ack(struct msgbuf *msgb, u8 seq) +{ + /* SYN. */ + msgb_push_syn(msgb); + + /* ACK-type frame + CRC. */ + msgb_push_frame(msgb, SSH_FRAME_TYPE_ACK, 0x00, seq); + + /* Payload CRC (ACK-type frames do not have a payload). */ + msgb_push_crc(msgb, msgb->ptr, 0); +} + +/** + * msgb_push_nak() - Push a SSH NAK frame to the buffer. + * @msgb: The message buffer + */ +static inline void msgb_push_nak(struct msgbuf *msgb) +{ + /* SYN. */ + msgb_push_syn(msgb); + + /* NAK-type frame + CRC. */ + msgb_push_frame(msgb, SSH_FRAME_TYPE_NAK, 0x00, 0x00); + + /* Payload CRC (ACK-type frames do not have a payload). */ + msgb_push_crc(msgb, msgb->ptr, 0); +} + +/** + * msgb_push_cmd() - Push a SSH command frame with payload to the buffer. + * @msgb: The message buffer. + * @seq: The sequence ID (SEQ) of the frame/packet. + * @rqid: The request ID (RQID) of the request contained in the frame. + * @rqst: The request to wrap in the frame. + */ +static inline void msgb_push_cmd(struct msgbuf *msgb, u8 seq, u16 rqid, + const struct ssam_request *rqst) +{ + const u8 type = SSH_FRAME_TYPE_DATA_SEQ; + u8 *cmd; + + /* SYN. */ + msgb_push_syn(msgb); + + /* Command frame + CRC. */ + msgb_push_frame(msgb, type, sizeof(struct ssh_command) + rqst->length, seq); + + /* Frame payload: Command struct + payload. */ + if (WARN_ON(msgb->ptr + sizeof(struct ssh_command) > msgb->end)) + return; + + cmd = msgb->ptr; + + __msgb_push_u8(msgb, SSH_PLD_TYPE_CMD); /* Payload type. */ + __msgb_push_u8(msgb, rqst->target_category); /* Target category. */ + __msgb_push_u8(msgb, rqst->target_id); /* Target ID. */ + __msgb_push_u8(msgb, SSAM_SSH_TID_HOST); /* Source ID. */ + __msgb_push_u8(msgb, rqst->instance_id); /* Instance ID. */ + __msgb_push_u16(msgb, rqid); /* Request ID. */ + __msgb_push_u8(msgb, rqst->command_id); /* Command ID. */ + + /* Command payload. */ + msgb_push_buf(msgb, rqst->payload, rqst->length); + + /* CRC for command struct + payload. */ + msgb_push_crc(msgb, cmd, msgb->ptr - cmd); +} + +#endif /* _SURFACE_AGGREGATOR_SSH_MSGB_H */ diff --git a/drivers/platform/surface/aggregator/ssh_packet_layer.c b/drivers/platform/surface/aggregator/ssh_packet_layer.c new file mode 100644 index 0000000000..def8d7ac54 --- /dev/null +++ b/drivers/platform/surface/aggregator/ssh_packet_layer.c @@ -0,0 +1,2086 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * SSH packet transport layer. + * + * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com> + */ + +#include <asm/unaligned.h> +#include <linux/atomic.h> +#include <linux/error-injection.h> +#include <linux/jiffies.h> +#include <linux/kfifo.h> +#include <linux/kref.h> +#include <linux/kthread.h> +#include <linux/ktime.h> +#include <linux/limits.h> +#include <linux/list.h> +#include <linux/lockdep.h> +#include <linux/serdev.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/workqueue.h> + +#include <linux/surface_aggregator/serial_hub.h> + +#include "ssh_msgb.h" +#include "ssh_packet_layer.h" +#include "ssh_parser.h" + +#include "trace.h" + +/* + * To simplify reasoning about the code below, we define a few concepts. The + * system below is similar to a state-machine for packets, however, there are + * too many states to explicitly write them down. To (somewhat) manage the + * states and packets we rely on flags, reference counting, and some simple + * concepts. State transitions are triggered by actions. + * + * >> Actions << + * + * - submit + * - transmission start (process next item in queue) + * - transmission finished (guaranteed to never be parallel to transmission + * start) + * - ACK received + * - NAK received (this is equivalent to issuing re-submit for all pending + * packets) + * - timeout (this is equivalent to re-issuing a submit or canceling) + * - cancel (non-pending and pending) + * + * >> Data Structures, Packet Ownership, General Overview << + * + * The code below employs two main data structures: The packet queue, + * containing all packets scheduled for transmission, and the set of pending + * packets, containing all packets awaiting an ACK. + * + * Shared ownership of a packet is controlled via reference counting. Inside + * the transport system are a total of five packet owners: + * + * - the packet queue, + * - the pending set, + * - the transmitter thread, + * - the receiver thread (via ACKing), and + * - the timeout work item. + * + * Normal operation is as follows: The initial reference of the packet is + * obtained by submitting the packet and queuing it. The receiver thread takes + * packets from the queue. By doing this, it does not increment the refcount + * but takes over the reference (removing it from the queue). If the packet is + * sequenced (i.e. needs to be ACKed by the client), the transmitter thread + * sets-up the timeout and adds the packet to the pending set before starting + * to transmit it. As the timeout is handled by a reaper task, no additional + * reference for it is needed. After the transmit is done, the reference held + * by the transmitter thread is dropped. If the packet is unsequenced (i.e. + * does not need an ACK), the packet is completed by the transmitter thread + * before dropping that reference. + * + * On receival of an ACK, the receiver thread removes and obtains the + * reference to the packet from the pending set. The receiver thread will then + * complete the packet and drop its reference. + * + * On receival of a NAK, the receiver thread re-submits all currently pending + * packets. + * + * Packet timeouts are detected by the timeout reaper. This is a task, + * scheduled depending on the earliest packet timeout expiration date, + * checking all currently pending packets if their timeout has expired. If the + * timeout of a packet has expired, it is re-submitted and the number of tries + * of this packet is incremented. If this number reaches its limit, the packet + * will be completed with a failure. + * + * On transmission failure (such as repeated packet timeouts), the completion + * callback is immediately run by on thread on which the error was detected. + * + * To ensure that a packet eventually leaves the system it is marked as + * "locked" directly before it is going to be completed or when it is + * canceled. Marking a packet as "locked" has the effect that passing and + * creating new references of the packet is disallowed. This means that the + * packet cannot be added to the queue, the pending set, and the timeout, or + * be picked up by the transmitter thread or receiver thread. To remove a + * packet from the system it has to be marked as locked and subsequently all + * references from the data structures (queue, pending) have to be removed. + * References held by threads will eventually be dropped automatically as + * their execution progresses. + * + * Note that the packet completion callback is, in case of success and for a + * sequenced packet, guaranteed to run on the receiver thread, thus providing + * a way to reliably identify responses to the packet. The packet completion + * callback is only run once and it does not indicate that the packet has + * fully left the system (for this, one should rely on the release method, + * triggered when the reference count of the packet reaches zero). In case of + * re-submission (and with somewhat unlikely timing), it may be possible that + * the packet is being re-transmitted while the completion callback runs. + * Completion will occur both on success and internal error, as well as when + * the packet is canceled. + * + * >> Flags << + * + * Flags are used to indicate the state and progression of a packet. Some flags + * have stricter guarantees than other: + * + * - locked + * Indicates if the packet is locked. If the packet is locked, passing and/or + * creating additional references to the packet is forbidden. The packet thus + * may not be queued, dequeued, or removed or added to the pending set. Note + * that the packet state flags may still change (e.g. it may be marked as + * ACKed, transmitted, ...). + * + * - completed + * Indicates if the packet completion callback has been executed or is about + * to be executed. This flag is used to ensure that the packet completion + * callback is only run once. + * + * - queued + * Indicates if a packet is present in the submission queue or not. This flag + * must only be modified with the queue lock held, and must be coherent to the + * presence of the packet in the queue. + * + * - pending + * Indicates if a packet is present in the set of pending packets or not. + * This flag must only be modified with the pending lock held, and must be + * coherent to the presence of the packet in the pending set. + * + * - transmitting + * Indicates if the packet is currently transmitting. In case of + * re-transmissions, it is only safe to wait on the "transmitted" completion + * after this flag has been set. The completion will be set both in success + * and error case. + * + * - transmitted + * Indicates if the packet has been transmitted. This flag is not cleared by + * the system, thus it indicates the first transmission only. + * + * - acked + * Indicates if the packet has been acknowledged by the client. There are no + * other guarantees given. For example, the packet may still be canceled + * and/or the completion may be triggered an error even though this bit is + * set. Rely on the status provided to the completion callback instead. + * + * - canceled + * Indicates if the packet has been canceled from the outside. There are no + * other guarantees given. Specifically, the packet may be completed by + * another part of the system before the cancellation attempts to complete it. + * + * >> General Notes << + * + * - To avoid deadlocks, if both queue and pending locks are required, the + * pending lock must be acquired before the queue lock. + * + * - The packet priority must be accessed only while holding the queue lock. + * + * - The packet timestamp must be accessed only while holding the pending + * lock. + */ + +/* + * SSH_PTL_MAX_PACKET_TRIES - Maximum transmission attempts for packet. + * + * Maximum number of transmission attempts per sequenced packet in case of + * time-outs. Must be smaller than 16. If the packet times out after this + * amount of tries, the packet will be completed with %-ETIMEDOUT as status + * code. + */ +#define SSH_PTL_MAX_PACKET_TRIES 3 + +/* + * SSH_PTL_TX_TIMEOUT - Packet transmission timeout. + * + * Timeout in jiffies for packet transmission via the underlying serial + * device. If transmitting the packet takes longer than this timeout, the + * packet will be completed with -ETIMEDOUT. It will not be re-submitted. + */ +#define SSH_PTL_TX_TIMEOUT HZ + +/* + * SSH_PTL_PACKET_TIMEOUT - Packet response timeout. + * + * Timeout as ktime_t delta for ACKs. If we have not received an ACK in this + * time-frame after starting transmission, the packet will be re-submitted. + */ +#define SSH_PTL_PACKET_TIMEOUT ms_to_ktime(1000) + +/* + * SSH_PTL_PACKET_TIMEOUT_RESOLUTION - Packet timeout granularity. + * + * Time-resolution for timeouts. Should be larger than one jiffy to avoid + * direct re-scheduling of reaper work_struct. + */ +#define SSH_PTL_PACKET_TIMEOUT_RESOLUTION ms_to_ktime(max(2000 / HZ, 50)) + +/* + * SSH_PTL_MAX_PENDING - Maximum number of pending packets. + * + * Maximum number of sequenced packets concurrently waiting for an ACK. + * Packets marked as blocking will not be transmitted while this limit is + * reached. + */ +#define SSH_PTL_MAX_PENDING 1 + +/* + * SSH_PTL_RX_BUF_LEN - Evaluation-buffer size in bytes. + */ +#define SSH_PTL_RX_BUF_LEN 4096 + +/* + * SSH_PTL_RX_FIFO_LEN - Fifo input-buffer size in bytes. + */ +#define SSH_PTL_RX_FIFO_LEN 4096 + +#ifdef CONFIG_SURFACE_AGGREGATOR_ERROR_INJECTION + +/** + * ssh_ptl_should_drop_ack_packet() - Error injection hook to drop ACK packets. + * + * Useful to test detection and handling of automated re-transmits by the EC. + * Specifically of packets that the EC considers not-ACKed but the driver + * already considers ACKed (due to dropped ACK). In this case, the EC + * re-transmits the packet-to-be-ACKed and the driver should detect it as + * duplicate/already handled. Note that the driver should still send an ACK + * for the re-transmitted packet. + */ +static noinline bool ssh_ptl_should_drop_ack_packet(void) +{ + return false; +} +ALLOW_ERROR_INJECTION(ssh_ptl_should_drop_ack_packet, TRUE); + +/** + * ssh_ptl_should_drop_nak_packet() - Error injection hook to drop NAK packets. + * + * Useful to test/force automated (timeout-based) re-transmit by the EC. + * Specifically, packets that have not reached the driver completely/with valid + * checksums. Only useful in combination with receival of (injected) bad data. + */ +static noinline bool ssh_ptl_should_drop_nak_packet(void) +{ + return false; +} +ALLOW_ERROR_INJECTION(ssh_ptl_should_drop_nak_packet, TRUE); + +/** + * ssh_ptl_should_drop_dsq_packet() - Error injection hook to drop sequenced + * data packet. + * + * Useful to test re-transmit timeout of the driver. If the data packet has not + * been ACKed after a certain time, the driver should re-transmit the packet up + * to limited number of times defined in SSH_PTL_MAX_PACKET_TRIES. + */ +static noinline bool ssh_ptl_should_drop_dsq_packet(void) +{ + return false; +} +ALLOW_ERROR_INJECTION(ssh_ptl_should_drop_dsq_packet, TRUE); + +/** + * ssh_ptl_should_fail_write() - Error injection hook to make + * serdev_device_write() fail. + * + * Hook to simulate errors in serdev_device_write when transmitting packets. + */ +static noinline int ssh_ptl_should_fail_write(void) +{ + return 0; +} +ALLOW_ERROR_INJECTION(ssh_ptl_should_fail_write, ERRNO); + +/** + * ssh_ptl_should_corrupt_tx_data() - Error injection hook to simulate invalid + * data being sent to the EC. + * + * Hook to simulate corrupt/invalid data being sent from host (driver) to EC. + * Causes the packet data to be actively corrupted by overwriting it with + * pre-defined values, such that it becomes invalid, causing the EC to respond + * with a NAK packet. Useful to test handling of NAK packets received by the + * driver. + */ +static noinline bool ssh_ptl_should_corrupt_tx_data(void) +{ + return false; +} +ALLOW_ERROR_INJECTION(ssh_ptl_should_corrupt_tx_data, TRUE); + +/** + * ssh_ptl_should_corrupt_rx_syn() - Error injection hook to simulate invalid + * data being sent by the EC. + * + * Hook to simulate invalid SYN bytes, i.e. an invalid start of messages and + * test handling thereof in the driver. + */ +static noinline bool ssh_ptl_should_corrupt_rx_syn(void) +{ + return false; +} +ALLOW_ERROR_INJECTION(ssh_ptl_should_corrupt_rx_syn, TRUE); + +/** + * ssh_ptl_should_corrupt_rx_data() - Error injection hook to simulate invalid + * data being sent by the EC. + * + * Hook to simulate invalid data/checksum of the message frame and test handling + * thereof in the driver. + */ +static noinline bool ssh_ptl_should_corrupt_rx_data(void) +{ + return false; +} +ALLOW_ERROR_INJECTION(ssh_ptl_should_corrupt_rx_data, TRUE); + +static bool __ssh_ptl_should_drop_ack_packet(struct ssh_packet *packet) +{ + if (likely(!ssh_ptl_should_drop_ack_packet())) + return false; + + trace_ssam_ei_tx_drop_ack_packet(packet); + ptl_info(packet->ptl, "packet error injection: dropping ACK packet %p\n", + packet); + + return true; +} + +static bool __ssh_ptl_should_drop_nak_packet(struct ssh_packet *packet) +{ + if (likely(!ssh_ptl_should_drop_nak_packet())) + return false; + + trace_ssam_ei_tx_drop_nak_packet(packet); + ptl_info(packet->ptl, "packet error injection: dropping NAK packet %p\n", + packet); + + return true; +} + +static bool __ssh_ptl_should_drop_dsq_packet(struct ssh_packet *packet) +{ + if (likely(!ssh_ptl_should_drop_dsq_packet())) + return false; + + trace_ssam_ei_tx_drop_dsq_packet(packet); + ptl_info(packet->ptl, + "packet error injection: dropping sequenced data packet %p\n", + packet); + + return true; +} + +static bool ssh_ptl_should_drop_packet(struct ssh_packet *packet) +{ + /* Ignore packets that don't carry any data (i.e. flush). */ + if (!packet->data.ptr || !packet->data.len) + return false; + + switch (packet->data.ptr[SSH_MSGOFFSET_FRAME(type)]) { + case SSH_FRAME_TYPE_ACK: + return __ssh_ptl_should_drop_ack_packet(packet); + + case SSH_FRAME_TYPE_NAK: + return __ssh_ptl_should_drop_nak_packet(packet); + + case SSH_FRAME_TYPE_DATA_SEQ: + return __ssh_ptl_should_drop_dsq_packet(packet); + + default: + return false; + } +} + +static int ssh_ptl_write_buf(struct ssh_ptl *ptl, struct ssh_packet *packet, + const unsigned char *buf, size_t count) +{ + int status; + + status = ssh_ptl_should_fail_write(); + if (unlikely(status)) { + trace_ssam_ei_tx_fail_write(packet, status); + ptl_info(packet->ptl, + "packet error injection: simulating transmit error %d, packet %p\n", + status, packet); + + return status; + } + + return serdev_device_write_buf(ptl->serdev, buf, count); +} + +static void ssh_ptl_tx_inject_invalid_data(struct ssh_packet *packet) +{ + /* Ignore packets that don't carry any data (i.e. flush). */ + if (!packet->data.ptr || !packet->data.len) + return; + + /* Only allow sequenced data packets to be modified. */ + if (packet->data.ptr[SSH_MSGOFFSET_FRAME(type)] != SSH_FRAME_TYPE_DATA_SEQ) + return; + + if (likely(!ssh_ptl_should_corrupt_tx_data())) + return; + + trace_ssam_ei_tx_corrupt_data(packet); + ptl_info(packet->ptl, + "packet error injection: simulating invalid transmit data on packet %p\n", + packet); + + /* + * NB: The value 0xb3 has been chosen more or less randomly so that it + * doesn't have any (major) overlap with the SYN bytes (aa 55) and is + * non-trivial (i.e. non-zero, non-0xff). + */ + memset(packet->data.ptr, 0xb3, packet->data.len); +} + +static void ssh_ptl_rx_inject_invalid_syn(struct ssh_ptl *ptl, + struct ssam_span *data) +{ + struct ssam_span frame; + + /* Check if there actually is something to corrupt. */ + if (!sshp_find_syn(data, &frame)) + return; + + if (likely(!ssh_ptl_should_corrupt_rx_syn())) + return; + + trace_ssam_ei_rx_corrupt_syn(data->len); + + data->ptr[1] = 0xb3; /* Set second byte of SYN to "random" value. */ +} + +static void ssh_ptl_rx_inject_invalid_data(struct ssh_ptl *ptl, + struct ssam_span *frame) +{ + size_t payload_len, message_len; + struct ssh_frame *sshf; + + /* Ignore incomplete messages, will get handled once it's complete. */ + if (frame->len < SSH_MESSAGE_LENGTH(0)) + return; + + /* Ignore incomplete messages, part 2. */ + payload_len = get_unaligned_le16(&frame->ptr[SSH_MSGOFFSET_FRAME(len)]); + message_len = SSH_MESSAGE_LENGTH(payload_len); + if (frame->len < message_len) + return; + + if (likely(!ssh_ptl_should_corrupt_rx_data())) + return; + + sshf = (struct ssh_frame *)&frame->ptr[SSH_MSGOFFSET_FRAME(type)]; + trace_ssam_ei_rx_corrupt_data(sshf); + + /* + * Flip bits in first byte of payload checksum. This is basically + * equivalent to a payload/frame data error without us having to worry + * about (the, arguably pretty small, probability of) accidental + * checksum collisions. + */ + frame->ptr[frame->len - 2] = ~frame->ptr[frame->len - 2]; +} + +#else /* CONFIG_SURFACE_AGGREGATOR_ERROR_INJECTION */ + +static inline bool ssh_ptl_should_drop_packet(struct ssh_packet *packet) +{ + return false; +} + +static inline int ssh_ptl_write_buf(struct ssh_ptl *ptl, + struct ssh_packet *packet, + const unsigned char *buf, + size_t count) +{ + return serdev_device_write_buf(ptl->serdev, buf, count); +} + +static inline void ssh_ptl_tx_inject_invalid_data(struct ssh_packet *packet) +{ +} + +static inline void ssh_ptl_rx_inject_invalid_syn(struct ssh_ptl *ptl, + struct ssam_span *data) +{ +} + +static inline void ssh_ptl_rx_inject_invalid_data(struct ssh_ptl *ptl, + struct ssam_span *frame) +{ +} + +#endif /* CONFIG_SURFACE_AGGREGATOR_ERROR_INJECTION */ + +static void __ssh_ptl_packet_release(struct kref *kref) +{ + struct ssh_packet *p = container_of(kref, struct ssh_packet, refcnt); + + trace_ssam_packet_release(p); + + ptl_dbg_cond(p->ptl, "ptl: releasing packet %p\n", p); + p->ops->release(p); +} + +/** + * ssh_packet_get() - Increment reference count of packet. + * @packet: The packet to increment the reference count of. + * + * Increments the reference count of the given packet. See ssh_packet_put() + * for the counter-part of this function. + * + * Return: Returns the packet provided as input. + */ +struct ssh_packet *ssh_packet_get(struct ssh_packet *packet) +{ + if (packet) + kref_get(&packet->refcnt); + return packet; +} +EXPORT_SYMBOL_GPL(ssh_packet_get); + +/** + * ssh_packet_put() - Decrement reference count of packet. + * @packet: The packet to decrement the reference count of. + * + * If the reference count reaches zero, the ``release`` callback specified in + * the packet's &struct ssh_packet_ops, i.e. ``packet->ops->release``, will be + * called. + * + * See ssh_packet_get() for the counter-part of this function. + */ +void ssh_packet_put(struct ssh_packet *packet) +{ + if (packet) + kref_put(&packet->refcnt, __ssh_ptl_packet_release); +} +EXPORT_SYMBOL_GPL(ssh_packet_put); + +static u8 ssh_packet_get_seq(struct ssh_packet *packet) +{ + return packet->data.ptr[SSH_MSGOFFSET_FRAME(seq)]; +} + +/** + * ssh_packet_init() - Initialize SSH packet. + * @packet: The packet to initialize. + * @type: Type-flags of the packet. + * @priority: Priority of the packet. See SSH_PACKET_PRIORITY() for details. + * @ops: Packet operations. + * + * Initializes the given SSH packet. Sets the transmission buffer pointer to + * %NULL and the transmission buffer length to zero. For data-type packets, + * this buffer has to be set separately via ssh_packet_set_data() before + * submission, and must contain a valid SSH message, i.e. frame with optional + * payload of any type. + */ +void ssh_packet_init(struct ssh_packet *packet, unsigned long type, + u8 priority, const struct ssh_packet_ops *ops) +{ + kref_init(&packet->refcnt); + + packet->ptl = NULL; + INIT_LIST_HEAD(&packet->queue_node); + INIT_LIST_HEAD(&packet->pending_node); + + packet->state = type & SSH_PACKET_FLAGS_TY_MASK; + packet->priority = priority; + packet->timestamp = KTIME_MAX; + + packet->data.ptr = NULL; + packet->data.len = 0; + + packet->ops = ops; +} + +static struct kmem_cache *ssh_ctrl_packet_cache; + +/** + * ssh_ctrl_packet_cache_init() - Initialize the control packet cache. + */ +int ssh_ctrl_packet_cache_init(void) +{ + const unsigned int size = sizeof(struct ssh_packet) + SSH_MSG_LEN_CTRL; + const unsigned int align = __alignof__(struct ssh_packet); + struct kmem_cache *cache; + + cache = kmem_cache_create("ssam_ctrl_packet", size, align, 0, NULL); + if (!cache) + return -ENOMEM; + + ssh_ctrl_packet_cache = cache; + return 0; +} + +/** + * ssh_ctrl_packet_cache_destroy() - Deinitialize the control packet cache. + */ +void ssh_ctrl_packet_cache_destroy(void) +{ + kmem_cache_destroy(ssh_ctrl_packet_cache); + ssh_ctrl_packet_cache = NULL; +} + +/** + * ssh_ctrl_packet_alloc() - Allocate packet from control packet cache. + * @packet: Where the pointer to the newly allocated packet should be stored. + * @buffer: The buffer corresponding to this packet. + * @flags: Flags used for allocation. + * + * Allocates a packet and corresponding transport buffer from the control + * packet cache. Sets the packet's buffer reference to the allocated buffer. + * The packet must be freed via ssh_ctrl_packet_free(), which will also free + * the corresponding buffer. The corresponding buffer must not be freed + * separately. Intended to be used with %ssh_ptl_ctrl_packet_ops as packet + * operations. + * + * Return: Returns zero on success, %-ENOMEM if the allocation failed. + */ +static int ssh_ctrl_packet_alloc(struct ssh_packet **packet, + struct ssam_span *buffer, gfp_t flags) +{ + *packet = kmem_cache_alloc(ssh_ctrl_packet_cache, flags); + if (!*packet) + return -ENOMEM; + + buffer->ptr = (u8 *)(*packet + 1); + buffer->len = SSH_MSG_LEN_CTRL; + + trace_ssam_ctrl_packet_alloc(*packet, buffer->len); + return 0; +} + +/** + * ssh_ctrl_packet_free() - Free packet allocated from control packet cache. + * @p: The packet to free. + */ +static void ssh_ctrl_packet_free(struct ssh_packet *p) +{ + trace_ssam_ctrl_packet_free(p); + kmem_cache_free(ssh_ctrl_packet_cache, p); +} + +static const struct ssh_packet_ops ssh_ptl_ctrl_packet_ops = { + .complete = NULL, + .release = ssh_ctrl_packet_free, +}; + +static void ssh_ptl_timeout_reaper_mod(struct ssh_ptl *ptl, ktime_t now, + ktime_t expires) +{ + unsigned long delta = msecs_to_jiffies(ktime_ms_delta(expires, now)); + ktime_t aexp = ktime_add(expires, SSH_PTL_PACKET_TIMEOUT_RESOLUTION); + + spin_lock(&ptl->rtx_timeout.lock); + + /* Re-adjust / schedule reaper only if it is above resolution delta. */ + if (ktime_before(aexp, ptl->rtx_timeout.expires)) { + ptl->rtx_timeout.expires = expires; + mod_delayed_work(system_wq, &ptl->rtx_timeout.reaper, delta); + } + + spin_unlock(&ptl->rtx_timeout.lock); +} + +/* Must be called with queue lock held. */ +static void ssh_packet_next_try(struct ssh_packet *p) +{ + u8 base = ssh_packet_priority_get_base(p->priority); + u8 try = ssh_packet_priority_get_try(p->priority); + + lockdep_assert_held(&p->ptl->queue.lock); + + /* + * Ensure that we write the priority in one go via WRITE_ONCE() so we + * can access it via READ_ONCE() for tracing. Note that other access + * is guarded by the queue lock, so no need to use READ_ONCE() there. + */ + WRITE_ONCE(p->priority, __SSH_PACKET_PRIORITY(base, try + 1)); +} + +/* Must be called with queue lock held. */ +static struct list_head *__ssh_ptl_queue_find_entrypoint(struct ssh_packet *p) +{ + struct list_head *head; + struct ssh_packet *q; + + lockdep_assert_held(&p->ptl->queue.lock); + + /* + * We generally assume that there are less control (ACK/NAK) packets + * and re-submitted data packets as there are normal data packets (at + * least in situations in which many packets are queued; if there + * aren't many packets queued the decision on how to iterate should be + * basically irrelevant; the number of control/data packets is more or + * less limited via the maximum number of pending packets). Thus, when + * inserting a control or re-submitted data packet, (determined by + * their priority), we search from front to back. Normal data packets + * are, usually queued directly at the tail of the queue, so for those + * search from back to front. + */ + + if (p->priority > SSH_PACKET_PRIORITY(DATA, 0)) { + list_for_each(head, &p->ptl->queue.head) { + q = list_entry(head, struct ssh_packet, queue_node); + + if (q->priority < p->priority) + break; + } + } else { + list_for_each_prev(head, &p->ptl->queue.head) { + q = list_entry(head, struct ssh_packet, queue_node); + + if (q->priority >= p->priority) { + head = head->next; + break; + } + } + } + + return head; +} + +/* Must be called with queue lock held. */ +static int __ssh_ptl_queue_push(struct ssh_packet *packet) +{ + struct ssh_ptl *ptl = packet->ptl; + struct list_head *head; + + lockdep_assert_held(&ptl->queue.lock); + + if (test_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state)) + return -ESHUTDOWN; + + /* Avoid further transitions when canceling/completing. */ + if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state)) + return -EINVAL; + + /* If this packet has already been queued, do not add it. */ + if (test_and_set_bit(SSH_PACKET_SF_QUEUED_BIT, &packet->state)) + return -EALREADY; + + head = __ssh_ptl_queue_find_entrypoint(packet); + + list_add_tail(&ssh_packet_get(packet)->queue_node, head); + return 0; +} + +static int ssh_ptl_queue_push(struct ssh_packet *packet) +{ + int status; + + spin_lock(&packet->ptl->queue.lock); + status = __ssh_ptl_queue_push(packet); + spin_unlock(&packet->ptl->queue.lock); + + return status; +} + +static void ssh_ptl_queue_remove(struct ssh_packet *packet) +{ + struct ssh_ptl *ptl = packet->ptl; + + spin_lock(&ptl->queue.lock); + + if (!test_and_clear_bit(SSH_PACKET_SF_QUEUED_BIT, &packet->state)) { + spin_unlock(&ptl->queue.lock); + return; + } + + list_del(&packet->queue_node); + + spin_unlock(&ptl->queue.lock); + ssh_packet_put(packet); +} + +static void ssh_ptl_pending_push(struct ssh_packet *p) +{ + struct ssh_ptl *ptl = p->ptl; + const ktime_t timestamp = ktime_get_coarse_boottime(); + const ktime_t timeout = ptl->rtx_timeout.timeout; + + /* + * Note: We can get the time for the timestamp before acquiring the + * lock as this is the only place we're setting it and this function + * is called only from the transmitter thread. Thus it is not possible + * to overwrite the timestamp with an outdated value below. + */ + + spin_lock(&ptl->pending.lock); + + /* If we are canceling/completing this packet, do not add it. */ + if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state)) { + spin_unlock(&ptl->pending.lock); + return; + } + + /* + * On re-submission, the packet has already been added the pending + * set. We still need to update the timestamp as the packet timeout is + * reset for each (re-)submission. + */ + p->timestamp = timestamp; + + /* In case it is already pending (e.g. re-submission), do not add it. */ + if (!test_and_set_bit(SSH_PACKET_SF_PENDING_BIT, &p->state)) { + atomic_inc(&ptl->pending.count); + list_add_tail(&ssh_packet_get(p)->pending_node, &ptl->pending.head); + } + + spin_unlock(&ptl->pending.lock); + + /* Arm/update timeout reaper. */ + ssh_ptl_timeout_reaper_mod(ptl, timestamp, timestamp + timeout); +} + +static void ssh_ptl_pending_remove(struct ssh_packet *packet) +{ + struct ssh_ptl *ptl = packet->ptl; + + spin_lock(&ptl->pending.lock); + + if (!test_and_clear_bit(SSH_PACKET_SF_PENDING_BIT, &packet->state)) { + spin_unlock(&ptl->pending.lock); + return; + } + + list_del(&packet->pending_node); + atomic_dec(&ptl->pending.count); + + spin_unlock(&ptl->pending.lock); + + ssh_packet_put(packet); +} + +/* Warning: Does not check/set "completed" bit. */ +static void __ssh_ptl_complete(struct ssh_packet *p, int status) +{ + struct ssh_ptl *ptl = READ_ONCE(p->ptl); + + trace_ssam_packet_complete(p, status); + ptl_dbg_cond(ptl, "ptl: completing packet %p (status: %d)\n", p, status); + + if (p->ops->complete) + p->ops->complete(p, status); +} + +static void ssh_ptl_remove_and_complete(struct ssh_packet *p, int status) +{ + /* + * A call to this function should in general be preceded by + * set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->flags) to avoid re-adding the + * packet to the structures it's going to be removed from. + * + * The set_bit call does not need explicit memory barriers as the + * implicit barrier of the test_and_set_bit() call below ensure that the + * flag is visible before we actually attempt to remove the packet. + */ + + if (test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state)) + return; + + ssh_ptl_queue_remove(p); + ssh_ptl_pending_remove(p); + + __ssh_ptl_complete(p, status); +} + +static bool ssh_ptl_tx_can_process(struct ssh_packet *packet) +{ + struct ssh_ptl *ptl = packet->ptl; + + if (test_bit(SSH_PACKET_TY_FLUSH_BIT, &packet->state)) + return !atomic_read(&ptl->pending.count); + + /* We can always process non-blocking packets. */ + if (!test_bit(SSH_PACKET_TY_BLOCKING_BIT, &packet->state)) + return true; + + /* If we are already waiting for this packet, send it again. */ + if (test_bit(SSH_PACKET_SF_PENDING_BIT, &packet->state)) + return true; + + /* Otherwise: Check if we have the capacity to send. */ + return atomic_read(&ptl->pending.count) < SSH_PTL_MAX_PENDING; +} + +static struct ssh_packet *ssh_ptl_tx_pop(struct ssh_ptl *ptl) +{ + struct ssh_packet *packet = ERR_PTR(-ENOENT); + struct ssh_packet *p, *n; + + spin_lock(&ptl->queue.lock); + list_for_each_entry_safe(p, n, &ptl->queue.head, queue_node) { + /* + * If we are canceling or completing this packet, ignore it. + * It's going to be removed from this queue shortly. + */ + if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state)) + continue; + + /* + * Packets should be ordered non-blocking/to-be-resent first. + * If we cannot process this packet, assume that we can't + * process any following packet either and abort. + */ + if (!ssh_ptl_tx_can_process(p)) { + packet = ERR_PTR(-EBUSY); + break; + } + + /* + * We are allowed to change the state now. Remove it from the + * queue and mark it as being transmitted. + */ + + list_del(&p->queue_node); + + set_bit(SSH_PACKET_SF_TRANSMITTING_BIT, &p->state); + /* Ensure that state never gets zero. */ + smp_mb__before_atomic(); + clear_bit(SSH_PACKET_SF_QUEUED_BIT, &p->state); + + /* + * Update number of tries. This directly influences the + * priority in case the packet is re-submitted (e.g. via + * timeout/NAK). Note that all reads and writes to the + * priority after the first submission are guarded by the + * queue lock. + */ + ssh_packet_next_try(p); + + packet = p; + break; + } + spin_unlock(&ptl->queue.lock); + + return packet; +} + +static struct ssh_packet *ssh_ptl_tx_next(struct ssh_ptl *ptl) +{ + struct ssh_packet *p; + + p = ssh_ptl_tx_pop(ptl); + if (IS_ERR(p)) + return p; + + if (test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &p->state)) { + ptl_dbg(ptl, "ptl: transmitting sequenced packet %p\n", p); + ssh_ptl_pending_push(p); + } else { + ptl_dbg(ptl, "ptl: transmitting non-sequenced packet %p\n", p); + } + + return p; +} + +static void ssh_ptl_tx_compl_success(struct ssh_packet *packet) +{ + struct ssh_ptl *ptl = packet->ptl; + + ptl_dbg(ptl, "ptl: successfully transmitted packet %p\n", packet); + + /* Transition state to "transmitted". */ + set_bit(SSH_PACKET_SF_TRANSMITTED_BIT, &packet->state); + /* Ensure that state never gets zero. */ + smp_mb__before_atomic(); + clear_bit(SSH_PACKET_SF_TRANSMITTING_BIT, &packet->state); + + /* If the packet is unsequenced, we're done: Lock and complete. */ + if (!test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &packet->state)) { + set_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state); + ssh_ptl_remove_and_complete(packet, 0); + } + + /* + * Notify that a packet transmission has finished. In general we're only + * waiting for one packet (if any), so wake_up_all should be fine. + */ + wake_up_all(&ptl->tx.packet_wq); +} + +static void ssh_ptl_tx_compl_error(struct ssh_packet *packet, int status) +{ + /* Transmission failure: Lock the packet and try to complete it. */ + set_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state); + /* Ensure that state never gets zero. */ + smp_mb__before_atomic(); + clear_bit(SSH_PACKET_SF_TRANSMITTING_BIT, &packet->state); + + ptl_err(packet->ptl, "ptl: transmission error: %d\n", status); + ptl_dbg(packet->ptl, "ptl: failed to transmit packet: %p\n", packet); + + ssh_ptl_remove_and_complete(packet, status); + + /* + * Notify that a packet transmission has finished. In general we're only + * waiting for one packet (if any), so wake_up_all should be fine. + */ + wake_up_all(&packet->ptl->tx.packet_wq); +} + +static long ssh_ptl_tx_wait_packet(struct ssh_ptl *ptl) +{ + int status; + + status = wait_for_completion_interruptible(&ptl->tx.thread_cplt_pkt); + reinit_completion(&ptl->tx.thread_cplt_pkt); + + /* + * Ensure completion is cleared before continuing to avoid lost update + * problems. + */ + smp_mb__after_atomic(); + + return status; +} + +static long ssh_ptl_tx_wait_transfer(struct ssh_ptl *ptl, long timeout) +{ + long status; + + status = wait_for_completion_interruptible_timeout(&ptl->tx.thread_cplt_tx, + timeout); + reinit_completion(&ptl->tx.thread_cplt_tx); + + /* + * Ensure completion is cleared before continuing to avoid lost update + * problems. + */ + smp_mb__after_atomic(); + + return status; +} + +static int ssh_ptl_tx_packet(struct ssh_ptl *ptl, struct ssh_packet *packet) +{ + long timeout = SSH_PTL_TX_TIMEOUT; + size_t offset = 0; + + /* Note: Flush-packets don't have any data. */ + if (unlikely(!packet->data.ptr)) + return 0; + + /* Error injection: drop packet to simulate transmission problem. */ + if (ssh_ptl_should_drop_packet(packet)) + return 0; + + /* Error injection: simulate invalid packet data. */ + ssh_ptl_tx_inject_invalid_data(packet); + + ptl_dbg(ptl, "tx: sending data (length: %zu)\n", packet->data.len); + print_hex_dump_debug("tx: ", DUMP_PREFIX_OFFSET, 16, 1, + packet->data.ptr, packet->data.len, false); + + do { + ssize_t status, len; + u8 *buf; + + buf = packet->data.ptr + offset; + len = packet->data.len - offset; + + status = ssh_ptl_write_buf(ptl, packet, buf, len); + if (status < 0) + return status; + + if (status == len) + return 0; + + offset += status; + + timeout = ssh_ptl_tx_wait_transfer(ptl, timeout); + if (kthread_should_stop() || !atomic_read(&ptl->tx.running)) + return -ESHUTDOWN; + + if (timeout < 0) + return -EINTR; + + if (timeout == 0) + return -ETIMEDOUT; + } while (true); +} + +static int ssh_ptl_tx_threadfn(void *data) +{ + struct ssh_ptl *ptl = data; + + while (!kthread_should_stop() && atomic_read(&ptl->tx.running)) { + struct ssh_packet *packet; + int status; + + /* Try to get the next packet. */ + packet = ssh_ptl_tx_next(ptl); + + /* If no packet can be processed, we are done. */ + if (IS_ERR(packet)) { + ssh_ptl_tx_wait_packet(ptl); + continue; + } + + /* Transfer and complete packet. */ + status = ssh_ptl_tx_packet(ptl, packet); + if (status) + ssh_ptl_tx_compl_error(packet, status); + else + ssh_ptl_tx_compl_success(packet); + + ssh_packet_put(packet); + } + + return 0; +} + +/** + * ssh_ptl_tx_wakeup_packet() - Wake up packet transmitter thread for new + * packet. + * @ptl: The packet transport layer. + * + * Wakes up the packet transmitter thread, notifying it that a new packet has + * arrived and is ready for transfer. If the packet transport layer has been + * shut down, calls to this function will be ignored. + */ +static void ssh_ptl_tx_wakeup_packet(struct ssh_ptl *ptl) +{ + if (test_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state)) + return; + + complete(&ptl->tx.thread_cplt_pkt); +} + +/** + * ssh_ptl_tx_start() - Start packet transmitter thread. + * @ptl: The packet transport layer. + * + * Return: Returns zero on success, a negative error code on failure. + */ +int ssh_ptl_tx_start(struct ssh_ptl *ptl) +{ + atomic_set_release(&ptl->tx.running, 1); + + ptl->tx.thread = kthread_run(ssh_ptl_tx_threadfn, ptl, "ssam_serial_hub-tx"); + if (IS_ERR(ptl->tx.thread)) + return PTR_ERR(ptl->tx.thread); + + return 0; +} + +/** + * ssh_ptl_tx_stop() - Stop packet transmitter thread. + * @ptl: The packet transport layer. + * + * Return: Returns zero on success, a negative error code on failure. + */ +int ssh_ptl_tx_stop(struct ssh_ptl *ptl) +{ + int status = 0; + + if (!IS_ERR_OR_NULL(ptl->tx.thread)) { + /* Tell thread to stop. */ + atomic_set_release(&ptl->tx.running, 0); + + /* + * Wake up thread in case it is paused. Do not use wakeup + * helpers as this may be called when the shutdown bit has + * already been set. + */ + complete(&ptl->tx.thread_cplt_pkt); + complete(&ptl->tx.thread_cplt_tx); + + /* Finally, wait for thread to stop. */ + status = kthread_stop(ptl->tx.thread); + ptl->tx.thread = NULL; + } + + return status; +} + +static struct ssh_packet *ssh_ptl_ack_pop(struct ssh_ptl *ptl, u8 seq_id) +{ + struct ssh_packet *packet = ERR_PTR(-ENOENT); + struct ssh_packet *p, *n; + + spin_lock(&ptl->pending.lock); + list_for_each_entry_safe(p, n, &ptl->pending.head, pending_node) { + /* + * We generally expect packets to be in order, so first packet + * to be added to pending is first to be sent, is first to be + * ACKed. + */ + if (unlikely(ssh_packet_get_seq(p) != seq_id)) + continue; + + /* + * In case we receive an ACK while handling a transmission + * error completion. The packet will be removed shortly. + */ + if (unlikely(test_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))) { + packet = ERR_PTR(-EPERM); + break; + } + + /* + * Mark the packet as ACKed and remove it from pending by + * removing its node and decrementing the pending counter. + */ + set_bit(SSH_PACKET_SF_ACKED_BIT, &p->state); + /* Ensure that state never gets zero. */ + smp_mb__before_atomic(); + clear_bit(SSH_PACKET_SF_PENDING_BIT, &p->state); + + atomic_dec(&ptl->pending.count); + list_del(&p->pending_node); + packet = p; + + break; + } + spin_unlock(&ptl->pending.lock); + + return packet; +} + +static void ssh_ptl_wait_until_transmitted(struct ssh_packet *packet) +{ + wait_event(packet->ptl->tx.packet_wq, + test_bit(SSH_PACKET_SF_TRANSMITTED_BIT, &packet->state) || + test_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state)); +} + +static void ssh_ptl_acknowledge(struct ssh_ptl *ptl, u8 seq) +{ + struct ssh_packet *p; + + p = ssh_ptl_ack_pop(ptl, seq); + if (IS_ERR(p)) { + if (PTR_ERR(p) == -ENOENT) { + /* + * The packet has not been found in the set of pending + * packets. + */ + ptl_warn(ptl, "ptl: received ACK for non-pending packet\n"); + } else { + /* + * The packet is pending, but we are not allowed to take + * it because it has been locked. + */ + WARN_ON(PTR_ERR(p) != -EPERM); + } + return; + } + + ptl_dbg(ptl, "ptl: received ACK for packet %p\n", p); + + /* + * It is possible that the packet has been transmitted, but the state + * has not been updated from "transmitting" to "transmitted" yet. + * In that case, we need to wait for this transition to occur in order + * to determine between success or failure. + * + * On transmission failure, the packet will be locked after this call. + * On success, the transmitted bit will be set. + */ + ssh_ptl_wait_until_transmitted(p); + + /* + * The packet will already be locked in case of a transmission error or + * cancellation. Let the transmitter or cancellation issuer complete the + * packet. + */ + if (unlikely(test_and_set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))) { + if (unlikely(!test_bit(SSH_PACKET_SF_TRANSMITTED_BIT, &p->state))) + ptl_err(ptl, "ptl: received ACK before packet had been fully transmitted\n"); + + ssh_packet_put(p); + return; + } + + ssh_ptl_remove_and_complete(p, 0); + ssh_packet_put(p); + + if (atomic_read(&ptl->pending.count) < SSH_PTL_MAX_PENDING) + ssh_ptl_tx_wakeup_packet(ptl); +} + +/** + * ssh_ptl_submit() - Submit a packet to the transport layer. + * @ptl: The packet transport layer to submit the packet to. + * @p: The packet to submit. + * + * Submits a new packet to the transport layer, queuing it to be sent. This + * function should not be used for re-submission. + * + * Return: Returns zero on success, %-EINVAL if a packet field is invalid or + * the packet has been canceled prior to submission, %-EALREADY if the packet + * has already been submitted, or %-ESHUTDOWN if the packet transport layer + * has been shut down. + */ +int ssh_ptl_submit(struct ssh_ptl *ptl, struct ssh_packet *p) +{ + struct ssh_ptl *ptl_old; + int status; + + trace_ssam_packet_submit(p); + + /* Validate packet fields. */ + if (test_bit(SSH_PACKET_TY_FLUSH_BIT, &p->state)) { + if (p->data.ptr || test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &p->state)) + return -EINVAL; + } else if (!p->data.ptr) { + return -EINVAL; + } + + /* + * The ptl reference only gets set on or before the first submission. + * After the first submission, it has to be read-only. + * + * Note that ptl may already be set from upper-layer request + * submission, thus we cannot expect it to be NULL. + */ + ptl_old = READ_ONCE(p->ptl); + if (!ptl_old) + WRITE_ONCE(p->ptl, ptl); + else if (WARN_ON(ptl_old != ptl)) + return -EALREADY; /* Submitted on different PTL. */ + + status = ssh_ptl_queue_push(p); + if (status) + return status; + + if (!test_bit(SSH_PACKET_TY_BLOCKING_BIT, &p->state) || + (atomic_read(&ptl->pending.count) < SSH_PTL_MAX_PENDING)) + ssh_ptl_tx_wakeup_packet(ptl); + + return 0; +} + +/* + * __ssh_ptl_resubmit() - Re-submit a packet to the transport layer. + * @packet: The packet to re-submit. + * + * Re-submits the given packet: Checks if it can be re-submitted and queues it + * if it can, resetting the packet timestamp in the process. Must be called + * with the pending lock held. + * + * Return: Returns %-ECANCELED if the packet has exceeded its number of tries, + * %-EINVAL if the packet has been locked, %-EALREADY if the packet is already + * on the queue, and %-ESHUTDOWN if the transmission layer has been shut down. + */ +static int __ssh_ptl_resubmit(struct ssh_packet *packet) +{ + int status; + u8 try; + + lockdep_assert_held(&packet->ptl->pending.lock); + + trace_ssam_packet_resubmit(packet); + + spin_lock(&packet->ptl->queue.lock); + + /* Check if the packet is out of tries. */ + try = ssh_packet_priority_get_try(packet->priority); + if (try >= SSH_PTL_MAX_PACKET_TRIES) { + spin_unlock(&packet->ptl->queue.lock); + return -ECANCELED; + } + + status = __ssh_ptl_queue_push(packet); + if (status) { + /* + * An error here indicates that the packet has either already + * been queued, been locked, or the transport layer is being + * shut down. In all cases: Ignore the error. + */ + spin_unlock(&packet->ptl->queue.lock); + return status; + } + + packet->timestamp = KTIME_MAX; + + spin_unlock(&packet->ptl->queue.lock); + return 0; +} + +static void ssh_ptl_resubmit_pending(struct ssh_ptl *ptl) +{ + struct ssh_packet *p; + bool resub = false; + + /* + * Note: We deliberately do not remove/attempt to cancel and complete + * packets that are out of tires in this function. The packet will be + * eventually canceled and completed by the timeout. Removing the packet + * here could lead to overly eager cancellation if the packet has not + * been re-transmitted yet but the tries-counter already updated (i.e + * ssh_ptl_tx_next() removed the packet from the queue and updated the + * counter, but re-transmission for the last try has not actually + * started yet). + */ + + spin_lock(&ptl->pending.lock); + + /* Re-queue all pending packets. */ + list_for_each_entry(p, &ptl->pending.head, pending_node) { + /* + * Re-submission fails if the packet is out of tries, has been + * locked, is already queued, or the layer is being shut down. + * No need to re-schedule tx-thread in those cases. + */ + if (!__ssh_ptl_resubmit(p)) + resub = true; + } + + spin_unlock(&ptl->pending.lock); + + if (resub) + ssh_ptl_tx_wakeup_packet(ptl); +} + +/** + * ssh_ptl_cancel() - Cancel a packet. + * @p: The packet to cancel. + * + * Cancels a packet. There are no guarantees on when completion and release + * callbacks will be called. This may occur during execution of this function + * or may occur at any point later. + * + * Note that it is not guaranteed that the packet will actually be canceled if + * the packet is concurrently completed by another process. The only guarantee + * of this function is that the packet will be completed (with success, + * failure, or cancellation) and released from the transport layer in a + * reasonable time-frame. + * + * May be called before the packet has been submitted, in which case any later + * packet submission fails. + */ +void ssh_ptl_cancel(struct ssh_packet *p) +{ + if (test_and_set_bit(SSH_PACKET_SF_CANCELED_BIT, &p->state)) + return; + + trace_ssam_packet_cancel(p); + + /* + * Lock packet and commit with memory barrier. If this packet has + * already been locked, it's going to be removed and completed by + * another party, which should have precedence. + */ + if (test_and_set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state)) + return; + + /* + * By marking the packet as locked and employing the implicit memory + * barrier of test_and_set_bit, we have guaranteed that, at this point, + * the packet cannot be added to the queue any more. + * + * In case the packet has never been submitted, packet->ptl is NULL. If + * the packet is currently being submitted, packet->ptl may be NULL or + * non-NULL. Due marking the packet as locked above and committing with + * the memory barrier, we have guaranteed that, if packet->ptl is NULL, + * the packet will never be added to the queue. If packet->ptl is + * non-NULL, we don't have any guarantees. + */ + + if (READ_ONCE(p->ptl)) { + ssh_ptl_remove_and_complete(p, -ECANCELED); + + if (atomic_read(&p->ptl->pending.count) < SSH_PTL_MAX_PENDING) + ssh_ptl_tx_wakeup_packet(p->ptl); + + } else if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state)) { + __ssh_ptl_complete(p, -ECANCELED); + } +} + +/* Must be called with pending lock held */ +static ktime_t ssh_packet_get_expiration(struct ssh_packet *p, ktime_t timeout) +{ + lockdep_assert_held(&p->ptl->pending.lock); + + if (p->timestamp != KTIME_MAX) + return ktime_add(p->timestamp, timeout); + else + return KTIME_MAX; +} + +static void ssh_ptl_timeout_reap(struct work_struct *work) +{ + struct ssh_ptl *ptl = to_ssh_ptl(work, rtx_timeout.reaper.work); + struct ssh_packet *p, *n; + LIST_HEAD(claimed); + ktime_t now = ktime_get_coarse_boottime(); + ktime_t timeout = ptl->rtx_timeout.timeout; + ktime_t next = KTIME_MAX; + bool resub = false; + int status; + + trace_ssam_ptl_timeout_reap(atomic_read(&ptl->pending.count)); + + /* + * Mark reaper as "not pending". This is done before checking any + * packets to avoid lost-update type problems. + */ + spin_lock(&ptl->rtx_timeout.lock); + ptl->rtx_timeout.expires = KTIME_MAX; + spin_unlock(&ptl->rtx_timeout.lock); + + spin_lock(&ptl->pending.lock); + + list_for_each_entry_safe(p, n, &ptl->pending.head, pending_node) { + ktime_t expires = ssh_packet_get_expiration(p, timeout); + + /* + * Check if the timeout hasn't expired yet. Find out next + * expiration date to be handled after this run. + */ + if (ktime_after(expires, now)) { + next = ktime_before(expires, next) ? expires : next; + continue; + } + + trace_ssam_packet_timeout(p); + + status = __ssh_ptl_resubmit(p); + + /* + * Re-submission fails if the packet is out of tries, has been + * locked, is already queued, or the layer is being shut down. + * No need to re-schedule tx-thread in those cases. + */ + if (!status) + resub = true; + + /* Go to next packet if this packet is not out of tries. */ + if (status != -ECANCELED) + continue; + + /* No more tries left: Cancel the packet. */ + + /* + * If someone else has locked the packet already, don't use it + * and let the other party complete it. + */ + if (test_and_set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state)) + continue; + + /* + * We have now marked the packet as locked. Thus it cannot be + * added to the pending list again after we've removed it here. + * We can therefore re-use the pending_node of this packet + * temporarily. + */ + + clear_bit(SSH_PACKET_SF_PENDING_BIT, &p->state); + + atomic_dec(&ptl->pending.count); + list_move_tail(&p->pending_node, &claimed); + } + + spin_unlock(&ptl->pending.lock); + + /* Cancel and complete the packet. */ + list_for_each_entry_safe(p, n, &claimed, pending_node) { + if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state)) { + ssh_ptl_queue_remove(p); + __ssh_ptl_complete(p, -ETIMEDOUT); + } + + /* + * Drop the reference we've obtained by removing it from + * the pending set. + */ + list_del(&p->pending_node); + ssh_packet_put(p); + } + + /* Ensure that reaper doesn't run again immediately. */ + next = max(next, ktime_add(now, SSH_PTL_PACKET_TIMEOUT_RESOLUTION)); + if (next != KTIME_MAX) + ssh_ptl_timeout_reaper_mod(ptl, now, next); + + if (resub) + ssh_ptl_tx_wakeup_packet(ptl); +} + +static bool ssh_ptl_rx_retransmit_check(struct ssh_ptl *ptl, const struct ssh_frame *frame) +{ + int i; + + /* + * Ignore unsequenced packets. On some devices (notably Surface Pro 9), + * unsequenced events will always be sent with SEQ=0x00. Attempting to + * detect retransmission would thus just block all events. + * + * While sequence numbers would also allow detection of retransmitted + * packets in unsequenced communication, they have only ever been used + * to cover edge-cases in sequenced transmission. In particular, the + * only instance of packets being retransmitted (that we are aware of) + * is due to an ACK timeout. As this does not happen in unsequenced + * communication, skip the retransmission check for those packets + * entirely. + */ + if (frame->type == SSH_FRAME_TYPE_DATA_NSQ) + return false; + + /* + * Check if SEQ has been seen recently (i.e. packet was + * re-transmitted and we should ignore it). + */ + for (i = 0; i < ARRAY_SIZE(ptl->rx.blocked.seqs); i++) { + if (likely(ptl->rx.blocked.seqs[i] != frame->seq)) + continue; + + ptl_dbg(ptl, "ptl: ignoring repeated data packet\n"); + return true; + } + + /* Update list of blocked sequence IDs. */ + ptl->rx.blocked.seqs[ptl->rx.blocked.offset] = frame->seq; + ptl->rx.blocked.offset = (ptl->rx.blocked.offset + 1) + % ARRAY_SIZE(ptl->rx.blocked.seqs); + + return false; +} + +static void ssh_ptl_rx_dataframe(struct ssh_ptl *ptl, + const struct ssh_frame *frame, + const struct ssam_span *payload) +{ + if (ssh_ptl_rx_retransmit_check(ptl, frame)) + return; + + ptl->ops.data_received(ptl, payload); +} + +static void ssh_ptl_send_ack(struct ssh_ptl *ptl, u8 seq) +{ + struct ssh_packet *packet; + struct ssam_span buf; + struct msgbuf msgb; + int status; + + status = ssh_ctrl_packet_alloc(&packet, &buf, GFP_KERNEL); + if (status) { + ptl_err(ptl, "ptl: failed to allocate ACK packet\n"); + return; + } + + ssh_packet_init(packet, 0, SSH_PACKET_PRIORITY(ACK, 0), + &ssh_ptl_ctrl_packet_ops); + + msgb_init(&msgb, buf.ptr, buf.len); + msgb_push_ack(&msgb, seq); + ssh_packet_set_data(packet, msgb.begin, msgb_bytes_used(&msgb)); + + ssh_ptl_submit(ptl, packet); + ssh_packet_put(packet); +} + +static void ssh_ptl_send_nak(struct ssh_ptl *ptl) +{ + struct ssh_packet *packet; + struct ssam_span buf; + struct msgbuf msgb; + int status; + + status = ssh_ctrl_packet_alloc(&packet, &buf, GFP_KERNEL); + if (status) { + ptl_err(ptl, "ptl: failed to allocate NAK packet\n"); + return; + } + + ssh_packet_init(packet, 0, SSH_PACKET_PRIORITY(NAK, 0), + &ssh_ptl_ctrl_packet_ops); + + msgb_init(&msgb, buf.ptr, buf.len); + msgb_push_nak(&msgb); + ssh_packet_set_data(packet, msgb.begin, msgb_bytes_used(&msgb)); + + ssh_ptl_submit(ptl, packet); + ssh_packet_put(packet); +} + +static size_t ssh_ptl_rx_eval(struct ssh_ptl *ptl, struct ssam_span *source) +{ + struct ssh_frame *frame; + struct ssam_span payload; + struct ssam_span aligned; + bool syn_found; + int status; + + /* Error injection: Modify data to simulate corrupt SYN bytes. */ + ssh_ptl_rx_inject_invalid_syn(ptl, source); + + /* Find SYN. */ + syn_found = sshp_find_syn(source, &aligned); + + if (unlikely(aligned.ptr != source->ptr)) { + /* + * We expect aligned.ptr == source->ptr. If this is not the + * case, then aligned.ptr > source->ptr and we've encountered + * some unexpected data where we'd expect the start of a new + * message (i.e. the SYN sequence). + * + * This can happen when a CRC check for the previous message + * failed and we start actively searching for the next one + * (via the call to sshp_find_syn() above), or the first bytes + * of a message got dropped or corrupted. + * + * In any case, we issue a warning, send a NAK to the EC to + * request re-transmission of any data we haven't acknowledged + * yet, and finally, skip everything up to the next SYN + * sequence. + */ + + ptl_warn(ptl, "rx: parser: invalid start of frame, skipping\n"); + + /* + * Notes: + * - This might send multiple NAKs in case the communication + * starts with an invalid SYN and is broken down into multiple + * pieces. This should generally be handled fine, we just + * might receive duplicate data in this case, which is + * detected when handling data frames. + * - This path will also be executed on invalid CRCs: When an + * invalid CRC is encountered, the code below will skip data + * until directly after the SYN. This causes the search for + * the next SYN, which is generally not placed directly after + * the last one. + * + * Open question: Should we send this in case of invalid + * payload CRCs if the frame-type is non-sequential (current + * implementation) or should we drop that frame without + * telling the EC? + */ + ssh_ptl_send_nak(ptl); + } + + if (unlikely(!syn_found)) + return aligned.ptr - source->ptr; + + /* Error injection: Modify data to simulate corruption. */ + ssh_ptl_rx_inject_invalid_data(ptl, &aligned); + + /* Parse and validate frame. */ + status = sshp_parse_frame(&ptl->serdev->dev, &aligned, &frame, &payload, + SSH_PTL_RX_BUF_LEN); + if (status) /* Invalid frame: skip to next SYN. */ + return aligned.ptr - source->ptr + sizeof(u16); + if (!frame) /* Not enough data. */ + return aligned.ptr - source->ptr; + + trace_ssam_rx_frame_received(frame); + + switch (frame->type) { + case SSH_FRAME_TYPE_ACK: + ssh_ptl_acknowledge(ptl, frame->seq); + break; + + case SSH_FRAME_TYPE_NAK: + ssh_ptl_resubmit_pending(ptl); + break; + + case SSH_FRAME_TYPE_DATA_SEQ: + ssh_ptl_send_ack(ptl, frame->seq); + fallthrough; + + case SSH_FRAME_TYPE_DATA_NSQ: + ssh_ptl_rx_dataframe(ptl, frame, &payload); + break; + + default: + ptl_warn(ptl, "ptl: received frame with unknown type %#04x\n", + frame->type); + break; + } + + return aligned.ptr - source->ptr + SSH_MESSAGE_LENGTH(payload.len); +} + +static int ssh_ptl_rx_threadfn(void *data) +{ + struct ssh_ptl *ptl = data; + + while (true) { + struct ssam_span span; + size_t offs = 0; + size_t n; + + wait_event_interruptible(ptl->rx.wq, + !kfifo_is_empty(&ptl->rx.fifo) || + kthread_should_stop()); + if (kthread_should_stop()) + break; + + /* Copy from fifo to evaluation buffer. */ + n = sshp_buf_read_from_fifo(&ptl->rx.buf, &ptl->rx.fifo); + + ptl_dbg(ptl, "rx: received data (size: %zu)\n", n); + print_hex_dump_debug("rx: ", DUMP_PREFIX_OFFSET, 16, 1, + ptl->rx.buf.ptr + ptl->rx.buf.len - n, + n, false); + + /* Parse until we need more bytes or buffer is empty. */ + while (offs < ptl->rx.buf.len) { + sshp_buf_span_from(&ptl->rx.buf, offs, &span); + n = ssh_ptl_rx_eval(ptl, &span); + if (n == 0) + break; /* Need more bytes. */ + + offs += n; + } + + /* Throw away the evaluated parts. */ + sshp_buf_drop(&ptl->rx.buf, offs); + } + + return 0; +} + +static void ssh_ptl_rx_wakeup(struct ssh_ptl *ptl) +{ + wake_up(&ptl->rx.wq); +} + +/** + * ssh_ptl_rx_start() - Start packet transport layer receiver thread. + * @ptl: The packet transport layer. + * + * Return: Returns zero on success, a negative error code on failure. + */ +int ssh_ptl_rx_start(struct ssh_ptl *ptl) +{ + if (ptl->rx.thread) + return 0; + + ptl->rx.thread = kthread_run(ssh_ptl_rx_threadfn, ptl, + "ssam_serial_hub-rx"); + if (IS_ERR(ptl->rx.thread)) + return PTR_ERR(ptl->rx.thread); + + return 0; +} + +/** + * ssh_ptl_rx_stop() - Stop packet transport layer receiver thread. + * @ptl: The packet transport layer. + * + * Return: Returns zero on success, a negative error code on failure. + */ +int ssh_ptl_rx_stop(struct ssh_ptl *ptl) +{ + int status = 0; + + if (ptl->rx.thread) { + status = kthread_stop(ptl->rx.thread); + ptl->rx.thread = NULL; + } + + return status; +} + +/** + * ssh_ptl_rx_rcvbuf() - Push data from lower-layer transport to the packet + * layer. + * @ptl: The packet transport layer. + * @buf: Pointer to the data to push to the layer. + * @n: Size of the data to push to the layer, in bytes. + * + * Pushes data from a lower-layer transport to the receiver fifo buffer of the + * packet layer and notifies the receiver thread. Calls to this function are + * ignored once the packet layer has been shut down. + * + * Return: Returns the number of bytes transferred (positive or zero) on + * success. Returns %-ESHUTDOWN if the packet layer has been shut down. + */ +int ssh_ptl_rx_rcvbuf(struct ssh_ptl *ptl, const u8 *buf, size_t n) +{ + int used; + + if (test_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state)) + return -ESHUTDOWN; + + used = kfifo_in(&ptl->rx.fifo, buf, n); + if (used) + ssh_ptl_rx_wakeup(ptl); + + return used; +} + +/** + * ssh_ptl_shutdown() - Shut down the packet transport layer. + * @ptl: The packet transport layer. + * + * Shuts down the packet transport layer, removing and canceling all queued + * and pending packets. Packets canceled by this operation will be completed + * with %-ESHUTDOWN as status. Receiver and transmitter threads will be + * stopped. + * + * As a result of this function, the transport layer will be marked as shut + * down. Submission of packets after the transport layer has been shut down + * will fail with %-ESHUTDOWN. + */ +void ssh_ptl_shutdown(struct ssh_ptl *ptl) +{ + LIST_HEAD(complete_q); + LIST_HEAD(complete_p); + struct ssh_packet *p, *n; + int status; + + /* Ensure that no new packets (including ACK/NAK) can be submitted. */ + set_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state); + /* + * Ensure that the layer gets marked as shut-down before actually + * stopping it. In combination with the check in ssh_ptl_queue_push(), + * this guarantees that no new packets can be added and all already + * queued packets are properly canceled. In combination with the check + * in ssh_ptl_rx_rcvbuf(), this guarantees that received data is + * properly cut off. + */ + smp_mb__after_atomic(); + + status = ssh_ptl_rx_stop(ptl); + if (status) + ptl_err(ptl, "ptl: failed to stop receiver thread\n"); + + status = ssh_ptl_tx_stop(ptl); + if (status) + ptl_err(ptl, "ptl: failed to stop transmitter thread\n"); + + cancel_delayed_work_sync(&ptl->rtx_timeout.reaper); + + /* + * At this point, all threads have been stopped. This means that the + * only references to packets from inside the system are in the queue + * and pending set. + * + * Note: We still need locks here because someone could still be + * canceling packets. + * + * Note 2: We can re-use queue_node (or pending_node) if we mark the + * packet as locked an then remove it from the queue (or pending set + * respectively). Marking the packet as locked avoids re-queuing + * (which should already be prevented by having stopped the treads...) + * and not setting QUEUED_BIT (or PENDING_BIT) prevents removal from a + * new list via other threads (e.g. cancellation). + * + * Note 3: There may be overlap between complete_p and complete_q. + * This is handled via test_and_set_bit() on the "completed" flag + * (also handles cancellation). + */ + + /* Mark queued packets as locked and move them to complete_q. */ + spin_lock(&ptl->queue.lock); + list_for_each_entry_safe(p, n, &ptl->queue.head, queue_node) { + set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state); + /* Ensure that state does not get zero. */ + smp_mb__before_atomic(); + clear_bit(SSH_PACKET_SF_QUEUED_BIT, &p->state); + + list_move_tail(&p->queue_node, &complete_q); + } + spin_unlock(&ptl->queue.lock); + + /* Mark pending packets as locked and move them to complete_p. */ + spin_lock(&ptl->pending.lock); + list_for_each_entry_safe(p, n, &ptl->pending.head, pending_node) { + set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state); + /* Ensure that state does not get zero. */ + smp_mb__before_atomic(); + clear_bit(SSH_PACKET_SF_PENDING_BIT, &p->state); + + list_move_tail(&p->pending_node, &complete_q); + } + atomic_set(&ptl->pending.count, 0); + spin_unlock(&ptl->pending.lock); + + /* Complete and drop packets on complete_q. */ + list_for_each_entry(p, &complete_q, queue_node) { + if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state)) + __ssh_ptl_complete(p, -ESHUTDOWN); + + ssh_packet_put(p); + } + + /* Complete and drop packets on complete_p. */ + list_for_each_entry(p, &complete_p, pending_node) { + if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state)) + __ssh_ptl_complete(p, -ESHUTDOWN); + + ssh_packet_put(p); + } + + /* + * At this point we have guaranteed that the system doesn't reference + * any packets any more. + */ +} + +/** + * ssh_ptl_init() - Initialize packet transport layer. + * @ptl: The packet transport layer to initialize. + * @serdev: The underlying serial device, i.e. the lower-level transport. + * @ops: Packet layer operations. + * + * Initializes the given packet transport layer. Transmitter and receiver + * threads must be started separately via ssh_ptl_tx_start() and + * ssh_ptl_rx_start(), after the packet-layer has been initialized and the + * lower-level transport layer has been set up. + * + * Return: Returns zero on success and a nonzero error code on failure. + */ +int ssh_ptl_init(struct ssh_ptl *ptl, struct serdev_device *serdev, + struct ssh_ptl_ops *ops) +{ + int i, status; + + ptl->serdev = serdev; + ptl->state = 0; + + spin_lock_init(&ptl->queue.lock); + INIT_LIST_HEAD(&ptl->queue.head); + + spin_lock_init(&ptl->pending.lock); + INIT_LIST_HEAD(&ptl->pending.head); + atomic_set_release(&ptl->pending.count, 0); + + ptl->tx.thread = NULL; + atomic_set(&ptl->tx.running, 0); + init_completion(&ptl->tx.thread_cplt_pkt); + init_completion(&ptl->tx.thread_cplt_tx); + init_waitqueue_head(&ptl->tx.packet_wq); + + ptl->rx.thread = NULL; + init_waitqueue_head(&ptl->rx.wq); + + spin_lock_init(&ptl->rtx_timeout.lock); + ptl->rtx_timeout.timeout = SSH_PTL_PACKET_TIMEOUT; + ptl->rtx_timeout.expires = KTIME_MAX; + INIT_DELAYED_WORK(&ptl->rtx_timeout.reaper, ssh_ptl_timeout_reap); + + ptl->ops = *ops; + + /* Initialize list of recent/blocked SEQs with invalid sequence IDs. */ + for (i = 0; i < ARRAY_SIZE(ptl->rx.blocked.seqs); i++) + ptl->rx.blocked.seqs[i] = U16_MAX; + ptl->rx.blocked.offset = 0; + + status = kfifo_alloc(&ptl->rx.fifo, SSH_PTL_RX_FIFO_LEN, GFP_KERNEL); + if (status) + return status; + + status = sshp_buf_alloc(&ptl->rx.buf, SSH_PTL_RX_BUF_LEN, GFP_KERNEL); + if (status) + kfifo_free(&ptl->rx.fifo); + + return status; +} + +/** + * ssh_ptl_destroy() - Deinitialize packet transport layer. + * @ptl: The packet transport layer to deinitialize. + * + * Deinitializes the given packet transport layer and frees resources + * associated with it. If receiver and/or transmitter threads have been + * started, the layer must first be shut down via ssh_ptl_shutdown() before + * this function can be called. + */ +void ssh_ptl_destroy(struct ssh_ptl *ptl) +{ + kfifo_free(&ptl->rx.fifo); + sshp_buf_free(&ptl->rx.buf); +} diff --git a/drivers/platform/surface/aggregator/ssh_packet_layer.h b/drivers/platform/surface/aggregator/ssh_packet_layer.h new file mode 100644 index 0000000000..64633522f9 --- /dev/null +++ b/drivers/platform/surface/aggregator/ssh_packet_layer.h @@ -0,0 +1,190 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * SSH packet transport layer. + * + * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com> + */ + +#ifndef _SURFACE_AGGREGATOR_SSH_PACKET_LAYER_H +#define _SURFACE_AGGREGATOR_SSH_PACKET_LAYER_H + +#include <linux/atomic.h> +#include <linux/kfifo.h> +#include <linux/ktime.h> +#include <linux/list.h> +#include <linux/serdev.h> +#include <linux/spinlock.h> +#include <linux/types.h> +#include <linux/wait.h> +#include <linux/workqueue.h> + +#include <linux/surface_aggregator/serial_hub.h> +#include "ssh_parser.h" + +/** + * enum ssh_ptl_state_flags - State-flags for &struct ssh_ptl. + * + * @SSH_PTL_SF_SHUTDOWN_BIT: + * Indicates that the packet transport layer has been shut down or is + * being shut down and should not accept any new packets/data. + */ +enum ssh_ptl_state_flags { + SSH_PTL_SF_SHUTDOWN_BIT, +}; + +/** + * struct ssh_ptl_ops - Callback operations for packet transport layer. + * @data_received: Function called when a data-packet has been received. Both, + * the packet layer on which the packet has been received and + * the packet's payload data are provided to this function. + */ +struct ssh_ptl_ops { + void (*data_received)(struct ssh_ptl *p, const struct ssam_span *data); +}; + +/** + * struct ssh_ptl - SSH packet transport layer. + * @serdev: Serial device providing the underlying data transport. + * @state: State(-flags) of the transport layer. + * @queue: Packet submission queue. + * @queue.lock: Lock for modifying the packet submission queue. + * @queue.head: List-head of the packet submission queue. + * @pending: Set/list of pending packets. + * @pending.lock: Lock for modifying the pending set. + * @pending.head: List-head of the pending set/list. + * @pending.count: Number of currently pending packets. + * @tx: Transmitter subsystem. + * @tx.running: Flag indicating (desired) transmitter thread state. + * @tx.thread: Transmitter thread. + * @tx.thread_cplt_tx: Completion for transmitter thread waiting on transfer. + * @tx.thread_cplt_pkt: Completion for transmitter thread waiting on packets. + * @tx.packet_wq: Waitqueue-head for packet transmit completion. + * @rx: Receiver subsystem. + * @rx.thread: Receiver thread. + * @rx.wq: Waitqueue-head for receiver thread. + * @rx.fifo: Buffer for receiving data/pushing data to receiver thread. + * @rx.buf: Buffer for evaluating data on receiver thread. + * @rx.blocked: List of recent/blocked sequence IDs to detect retransmission. + * @rx.blocked.seqs: Array of blocked sequence IDs. + * @rx.blocked.offset: Offset indicating where a new ID should be inserted. + * @rtx_timeout: Retransmission timeout subsystem. + * @rtx_timeout.lock: Lock for modifying the retransmission timeout reaper. + * @rtx_timeout.timeout: Timeout interval for retransmission. + * @rtx_timeout.expires: Time specifying when the reaper work is next scheduled. + * @rtx_timeout.reaper: Work performing timeout checks and subsequent actions. + * @ops: Packet layer operations. + */ +struct ssh_ptl { + struct serdev_device *serdev; + unsigned long state; + + struct { + spinlock_t lock; + struct list_head head; + } queue; + + struct { + spinlock_t lock; + struct list_head head; + atomic_t count; + } pending; + + struct { + atomic_t running; + struct task_struct *thread; + struct completion thread_cplt_tx; + struct completion thread_cplt_pkt; + struct wait_queue_head packet_wq; + } tx; + + struct { + struct task_struct *thread; + struct wait_queue_head wq; + struct kfifo fifo; + struct sshp_buf buf; + + struct { + u16 seqs[8]; + u16 offset; + } blocked; + } rx; + + struct { + spinlock_t lock; + ktime_t timeout; + ktime_t expires; + struct delayed_work reaper; + } rtx_timeout; + + struct ssh_ptl_ops ops; +}; + +#define __ssam_prcond(func, p, fmt, ...) \ + do { \ + typeof(p) __p = (p); \ + \ + if (__p) \ + func(__p, fmt, ##__VA_ARGS__); \ + } while (0) + +#define ptl_dbg(p, fmt, ...) dev_dbg(&(p)->serdev->dev, fmt, ##__VA_ARGS__) +#define ptl_info(p, fmt, ...) dev_info(&(p)->serdev->dev, fmt, ##__VA_ARGS__) +#define ptl_warn(p, fmt, ...) dev_warn(&(p)->serdev->dev, fmt, ##__VA_ARGS__) +#define ptl_err(p, fmt, ...) dev_err(&(p)->serdev->dev, fmt, ##__VA_ARGS__) +#define ptl_dbg_cond(p, fmt, ...) __ssam_prcond(ptl_dbg, p, fmt, ##__VA_ARGS__) + +#define to_ssh_ptl(ptr, member) \ + container_of(ptr, struct ssh_ptl, member) + +int ssh_ptl_init(struct ssh_ptl *ptl, struct serdev_device *serdev, + struct ssh_ptl_ops *ops); + +void ssh_ptl_destroy(struct ssh_ptl *ptl); + +/** + * ssh_ptl_get_device() - Get device associated with packet transport layer. + * @ptl: The packet transport layer. + * + * Return: Returns the device on which the given packet transport layer builds + * upon. + */ +static inline struct device *ssh_ptl_get_device(struct ssh_ptl *ptl) +{ + return ptl->serdev ? &ptl->serdev->dev : NULL; +} + +int ssh_ptl_tx_start(struct ssh_ptl *ptl); +int ssh_ptl_tx_stop(struct ssh_ptl *ptl); +int ssh_ptl_rx_start(struct ssh_ptl *ptl); +int ssh_ptl_rx_stop(struct ssh_ptl *ptl); +void ssh_ptl_shutdown(struct ssh_ptl *ptl); + +int ssh_ptl_submit(struct ssh_ptl *ptl, struct ssh_packet *p); +void ssh_ptl_cancel(struct ssh_packet *p); + +int ssh_ptl_rx_rcvbuf(struct ssh_ptl *ptl, const u8 *buf, size_t n); + +/** + * ssh_ptl_tx_wakeup_transfer() - Wake up packet transmitter thread for + * transfer. + * @ptl: The packet transport layer. + * + * Wakes up the packet transmitter thread, notifying it that the underlying + * transport has more space for data to be transmitted. If the packet + * transport layer has been shut down, calls to this function will be ignored. + */ +static inline void ssh_ptl_tx_wakeup_transfer(struct ssh_ptl *ptl) +{ + if (test_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state)) + return; + + complete(&ptl->tx.thread_cplt_tx); +} + +void ssh_packet_init(struct ssh_packet *packet, unsigned long type, + u8 priority, const struct ssh_packet_ops *ops); + +int ssh_ctrl_packet_cache_init(void); +void ssh_ctrl_packet_cache_destroy(void); + +#endif /* _SURFACE_AGGREGATOR_SSH_PACKET_LAYER_H */ diff --git a/drivers/platform/surface/aggregator/ssh_parser.c b/drivers/platform/surface/aggregator/ssh_parser.c new file mode 100644 index 0000000000..a6f6686943 --- /dev/null +++ b/drivers/platform/surface/aggregator/ssh_parser.c @@ -0,0 +1,228 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * SSH message parser. + * + * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com> + */ + +#include <asm/unaligned.h> +#include <linux/compiler.h> +#include <linux/device.h> +#include <linux/types.h> + +#include <linux/surface_aggregator/serial_hub.h> +#include "ssh_parser.h" + +/** + * sshp_validate_crc() - Validate a CRC in raw message data. + * @src: The span of data over which the CRC should be computed. + * @crc: The pointer to the expected u16 CRC value. + * + * Computes the CRC of the provided data span (@src), compares it to the CRC + * stored at the given address (@crc), and returns the result of this + * comparison, i.e. %true if equal. This function is intended to run on raw + * input/message data. + * + * Return: Returns %true if the computed CRC matches the stored CRC, %false + * otherwise. + */ +static bool sshp_validate_crc(const struct ssam_span *src, const u8 *crc) +{ + u16 actual = ssh_crc(src->ptr, src->len); + u16 expected = get_unaligned_le16(crc); + + return actual == expected; +} + +/** + * sshp_starts_with_syn() - Check if the given data starts with SSH SYN bytes. + * @src: The data span to check the start of. + */ +static bool sshp_starts_with_syn(const struct ssam_span *src) +{ + return src->len >= 2 && get_unaligned_le16(src->ptr) == SSH_MSG_SYN; +} + +/** + * sshp_find_syn() - Find SSH SYN bytes in the given data span. + * @src: The data span to search in. + * @rem: The span (output) indicating the remaining data, starting with SSH + * SYN bytes, if found. + * + * Search for SSH SYN bytes in the given source span. If found, set the @rem + * span to the remaining data, starting with the first SYN bytes and capped by + * the source span length, and return %true. This function does not copy any + * data, but rather only sets pointers to the respective start addresses and + * length values. + * + * If no SSH SYN bytes could be found, set the @rem span to the zero-length + * span at the end of the source span and return %false. + * + * If partial SSH SYN bytes could be found at the end of the source span, set + * the @rem span to cover these partial SYN bytes, capped by the end of the + * source span, and return %false. This function should then be re-run once + * more data is available. + * + * Return: Returns %true if a complete SSH SYN sequence could be found, + * %false otherwise. + */ +bool sshp_find_syn(const struct ssam_span *src, struct ssam_span *rem) +{ + size_t i; + + for (i = 0; i < src->len - 1; i++) { + if (likely(get_unaligned_le16(src->ptr + i) == SSH_MSG_SYN)) { + rem->ptr = src->ptr + i; + rem->len = src->len - i; + return true; + } + } + + if (unlikely(src->ptr[src->len - 1] == (SSH_MSG_SYN & 0xff))) { + rem->ptr = src->ptr + src->len - 1; + rem->len = 1; + return false; + } + + rem->ptr = src->ptr + src->len; + rem->len = 0; + return false; +} + +/** + * sshp_parse_frame() - Parse SSH frame. + * @dev: The device used for logging. + * @source: The source to parse from. + * @frame: The parsed frame (output). + * @payload: The parsed payload (output). + * @maxlen: The maximum supported message length. + * + * Parses and validates a SSH frame, including its payload, from the given + * source. Sets the provided @frame pointer to the start of the frame and + * writes the limits of the frame payload to the provided @payload span + * pointer. + * + * This function does not copy any data, but rather only validates the message + * data and sets pointers (and length values) to indicate the respective parts. + * + * If no complete SSH frame could be found, the frame pointer will be set to + * the %NULL pointer and the payload span will be set to the null span (start + * pointer %NULL, size zero). + * + * Return: Returns zero on success or if the frame is incomplete, %-ENOMSG if + * the start of the message is invalid, %-EBADMSG if any (frame-header or + * payload) CRC is invalid, or %-EMSGSIZE if the SSH message is bigger than + * the maximum message length specified in the @maxlen parameter. + */ +int sshp_parse_frame(const struct device *dev, const struct ssam_span *source, + struct ssh_frame **frame, struct ssam_span *payload, + size_t maxlen) +{ + struct ssam_span sf; + struct ssam_span sp; + + /* Initialize output. */ + *frame = NULL; + payload->ptr = NULL; + payload->len = 0; + + if (!sshp_starts_with_syn(source)) { + dev_warn(dev, "rx: parser: invalid start of frame\n"); + return -ENOMSG; + } + + /* Check for minimum packet length. */ + if (unlikely(source->len < SSH_MESSAGE_LENGTH(0))) { + dev_dbg(dev, "rx: parser: not enough data for frame\n"); + return 0; + } + + /* Pin down frame. */ + sf.ptr = source->ptr + sizeof(u16); + sf.len = sizeof(struct ssh_frame); + + /* Validate frame CRC. */ + if (unlikely(!sshp_validate_crc(&sf, sf.ptr + sf.len))) { + dev_warn(dev, "rx: parser: invalid frame CRC\n"); + return -EBADMSG; + } + + /* Ensure packet does not exceed maximum length. */ + sp.len = get_unaligned_le16(&((struct ssh_frame *)sf.ptr)->len); + if (unlikely(SSH_MESSAGE_LENGTH(sp.len) > maxlen)) { + dev_warn(dev, "rx: parser: frame too large: %llu bytes\n", + SSH_MESSAGE_LENGTH(sp.len)); + return -EMSGSIZE; + } + + /* Pin down payload. */ + sp.ptr = sf.ptr + sf.len + sizeof(u16); + + /* Check for frame + payload length. */ + if (source->len < SSH_MESSAGE_LENGTH(sp.len)) { + dev_dbg(dev, "rx: parser: not enough data for payload\n"); + return 0; + } + + /* Validate payload CRC. */ + if (unlikely(!sshp_validate_crc(&sp, sp.ptr + sp.len))) { + dev_warn(dev, "rx: parser: invalid payload CRC\n"); + return -EBADMSG; + } + + *frame = (struct ssh_frame *)sf.ptr; + *payload = sp; + + dev_dbg(dev, "rx: parser: valid frame found (type: %#04x, len: %u)\n", + (*frame)->type, (*frame)->len); + + return 0; +} + +/** + * sshp_parse_command() - Parse SSH command frame payload. + * @dev: The device used for logging. + * @source: The source to parse from. + * @command: The parsed command (output). + * @command_data: The parsed command data/payload (output). + * + * Parses and validates a SSH command frame payload. Sets the @command pointer + * to the command header and the @command_data span to the command data (i.e. + * payload of the command). This will result in a zero-length span if the + * command does not have any associated data/payload. This function does not + * check the frame-payload-type field, which should be checked by the caller + * before calling this function. + * + * The @source parameter should be the complete frame payload, e.g. returned + * by the sshp_parse_frame() command. + * + * This function does not copy any data, but rather only validates the frame + * payload data and sets pointers (and length values) to indicate the + * respective parts. + * + * Return: Returns zero on success or %-ENOMSG if @source does not represent a + * valid command-type frame payload, i.e. is too short. + */ +int sshp_parse_command(const struct device *dev, const struct ssam_span *source, + struct ssh_command **command, + struct ssam_span *command_data) +{ + /* Check for minimum length. */ + if (unlikely(source->len < sizeof(struct ssh_command))) { + *command = NULL; + command_data->ptr = NULL; + command_data->len = 0; + + dev_err(dev, "rx: parser: command payload is too short\n"); + return -ENOMSG; + } + + *command = (struct ssh_command *)source->ptr; + command_data->ptr = source->ptr + sizeof(struct ssh_command); + command_data->len = source->len - sizeof(struct ssh_command); + + dev_dbg(dev, "rx: parser: valid command found (tc: %#04x, cid: %#04x)\n", + (*command)->tc, (*command)->cid); + + return 0; +} diff --git a/drivers/platform/surface/aggregator/ssh_parser.h b/drivers/platform/surface/aggregator/ssh_parser.h new file mode 100644 index 0000000000..801d8fa69f --- /dev/null +++ b/drivers/platform/surface/aggregator/ssh_parser.h @@ -0,0 +1,154 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * SSH message parser. + * + * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com> + */ + +#ifndef _SURFACE_AGGREGATOR_SSH_PARSER_H +#define _SURFACE_AGGREGATOR_SSH_PARSER_H + +#include <linux/device.h> +#include <linux/kfifo.h> +#include <linux/slab.h> +#include <linux/types.h> + +#include <linux/surface_aggregator/serial_hub.h> + +/** + * struct sshp_buf - Parser buffer for SSH messages. + * @ptr: Pointer to the beginning of the buffer. + * @len: Number of bytes used in the buffer. + * @cap: Maximum capacity of the buffer. + */ +struct sshp_buf { + u8 *ptr; + size_t len; + size_t cap; +}; + +/** + * sshp_buf_init() - Initialize a SSH parser buffer. + * @buf: The buffer to initialize. + * @ptr: The memory backing the buffer. + * @cap: The length of the memory backing the buffer, i.e. its capacity. + * + * Initializes the buffer with the given memory as backing and set its used + * length to zero. + */ +static inline void sshp_buf_init(struct sshp_buf *buf, u8 *ptr, size_t cap) +{ + buf->ptr = ptr; + buf->len = 0; + buf->cap = cap; +} + +/** + * sshp_buf_alloc() - Allocate and initialize a SSH parser buffer. + * @buf: The buffer to initialize/allocate to. + * @cap: The desired capacity of the buffer. + * @flags: The flags used for allocating the memory. + * + * Allocates @cap bytes and initializes the provided buffer struct with the + * allocated memory. + * + * Return: Returns zero on success and %-ENOMEM if allocation failed. + */ +static inline int sshp_buf_alloc(struct sshp_buf *buf, size_t cap, gfp_t flags) +{ + u8 *ptr; + + ptr = kzalloc(cap, flags); + if (!ptr) + return -ENOMEM; + + sshp_buf_init(buf, ptr, cap); + return 0; +} + +/** + * sshp_buf_free() - Free a SSH parser buffer. + * @buf: The buffer to free. + * + * Frees a SSH parser buffer by freeing the memory backing it and then + * resetting its pointer to %NULL and length and capacity to zero. Intended to + * free a buffer previously allocated with sshp_buf_alloc(). + */ +static inline void sshp_buf_free(struct sshp_buf *buf) +{ + kfree(buf->ptr); + buf->ptr = NULL; + buf->len = 0; + buf->cap = 0; +} + +/** + * sshp_buf_drop() - Drop data from the beginning of the buffer. + * @buf: The buffer to drop data from. + * @n: The number of bytes to drop. + * + * Drops the first @n bytes from the buffer. Re-aligns any remaining data to + * the beginning of the buffer. + */ +static inline void sshp_buf_drop(struct sshp_buf *buf, size_t n) +{ + memmove(buf->ptr, buf->ptr + n, buf->len - n); + buf->len -= n; +} + +/** + * sshp_buf_read_from_fifo() - Transfer data from a fifo to the buffer. + * @buf: The buffer to write the data into. + * @fifo: The fifo to read the data from. + * + * Transfers the data contained in the fifo to the buffer, removing it from + * the fifo. This function will try to transfer as much data as possible, + * limited either by the remaining space in the buffer or by the number of + * bytes available in the fifo. + * + * Return: Returns the number of bytes transferred. + */ +static inline size_t sshp_buf_read_from_fifo(struct sshp_buf *buf, + struct kfifo *fifo) +{ + size_t n; + + n = kfifo_out(fifo, buf->ptr + buf->len, buf->cap - buf->len); + buf->len += n; + + return n; +} + +/** + * sshp_buf_span_from() - Initialize a span from the given buffer and offset. + * @buf: The buffer to create the span from. + * @offset: The offset in the buffer at which the span should start. + * @span: The span to initialize (output). + * + * Initializes the provided span to point to the memory at the given offset in + * the buffer, with the length of the span being capped by the number of bytes + * used in the buffer after the offset (i.e. bytes remaining after the + * offset). + * + * Warning: This function does not validate that @offset is less than or equal + * to the number of bytes used in the buffer or the buffer capacity. This must + * be guaranteed by the caller. + */ +static inline void sshp_buf_span_from(struct sshp_buf *buf, size_t offset, + struct ssam_span *span) +{ + span->ptr = buf->ptr + offset; + span->len = buf->len - offset; +} + +bool sshp_find_syn(const struct ssam_span *src, struct ssam_span *rem); + +int sshp_parse_frame(const struct device *dev, const struct ssam_span *source, + struct ssh_frame **frame, struct ssam_span *payload, + size_t maxlen); + +int sshp_parse_command(const struct device *dev, const struct ssam_span *source, + struct ssh_command **command, + struct ssam_span *command_data); + +#endif /* _SURFACE_AGGREGATOR_SSH_PARSER_h */ diff --git a/drivers/platform/surface/aggregator/ssh_request_layer.c b/drivers/platform/surface/aggregator/ssh_request_layer.c new file mode 100644 index 0000000000..90634dcaca --- /dev/null +++ b/drivers/platform/surface/aggregator/ssh_request_layer.c @@ -0,0 +1,1274 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * SSH request transport layer. + * + * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com> + */ + +#include <asm/unaligned.h> +#include <linux/atomic.h> +#include <linux/completion.h> +#include <linux/error-injection.h> +#include <linux/ktime.h> +#include <linux/limits.h> +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/types.h> +#include <linux/workqueue.h> + +#include <linux/surface_aggregator/serial_hub.h> +#include <linux/surface_aggregator/controller.h> + +#include "ssh_packet_layer.h" +#include "ssh_request_layer.h" + +#include "trace.h" + +/* + * SSH_RTL_REQUEST_TIMEOUT - Request timeout. + * + * Timeout as ktime_t delta for request responses. If we have not received a + * response in this time-frame after finishing the underlying packet + * transmission, the request will be completed with %-ETIMEDOUT as status + * code. + */ +#define SSH_RTL_REQUEST_TIMEOUT ms_to_ktime(3000) + +/* + * SSH_RTL_REQUEST_TIMEOUT_RESOLUTION - Request timeout granularity. + * + * Time-resolution for timeouts. Should be larger than one jiffy to avoid + * direct re-scheduling of reaper work_struct. + */ +#define SSH_RTL_REQUEST_TIMEOUT_RESOLUTION ms_to_ktime(max(2000 / HZ, 50)) + +/* + * SSH_RTL_MAX_PENDING - Maximum number of pending requests. + * + * Maximum number of requests concurrently waiting to be completed (i.e. + * waiting for the corresponding packet transmission to finish if they don't + * have a response or waiting for a response if they have one). + */ +#define SSH_RTL_MAX_PENDING 3 + +/* + * SSH_RTL_TX_BATCH - Maximum number of requests processed per work execution. + * Used to prevent livelocking of the workqueue. Value chosen via educated + * guess, may be adjusted. + */ +#define SSH_RTL_TX_BATCH 10 + +#ifdef CONFIG_SURFACE_AGGREGATOR_ERROR_INJECTION + +/** + * ssh_rtl_should_drop_response() - Error injection hook to drop request + * responses. + * + * Useful to cause request transmission timeouts in the driver by dropping the + * response to a request. + */ +static noinline bool ssh_rtl_should_drop_response(void) +{ + return false; +} +ALLOW_ERROR_INJECTION(ssh_rtl_should_drop_response, TRUE); + +#else + +static inline bool ssh_rtl_should_drop_response(void) +{ + return false; +} + +#endif + +static u16 ssh_request_get_rqid(struct ssh_request *rqst) +{ + return get_unaligned_le16(rqst->packet.data.ptr + + SSH_MSGOFFSET_COMMAND(rqid)); +} + +static u32 ssh_request_get_rqid_safe(struct ssh_request *rqst) +{ + if (!rqst->packet.data.ptr) + return U32_MAX; + + return ssh_request_get_rqid(rqst); +} + +static void ssh_rtl_queue_remove(struct ssh_request *rqst) +{ + struct ssh_rtl *rtl = ssh_request_rtl(rqst); + + spin_lock(&rtl->queue.lock); + + if (!test_and_clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &rqst->state)) { + spin_unlock(&rtl->queue.lock); + return; + } + + list_del(&rqst->node); + + spin_unlock(&rtl->queue.lock); + ssh_request_put(rqst); +} + +static bool ssh_rtl_queue_empty(struct ssh_rtl *rtl) +{ + bool empty; + + spin_lock(&rtl->queue.lock); + empty = list_empty(&rtl->queue.head); + spin_unlock(&rtl->queue.lock); + + return empty; +} + +static void ssh_rtl_pending_remove(struct ssh_request *rqst) +{ + struct ssh_rtl *rtl = ssh_request_rtl(rqst); + + spin_lock(&rtl->pending.lock); + + if (!test_and_clear_bit(SSH_REQUEST_SF_PENDING_BIT, &rqst->state)) { + spin_unlock(&rtl->pending.lock); + return; + } + + atomic_dec(&rtl->pending.count); + list_del(&rqst->node); + + spin_unlock(&rtl->pending.lock); + + ssh_request_put(rqst); +} + +static int ssh_rtl_tx_pending_push(struct ssh_request *rqst) +{ + struct ssh_rtl *rtl = ssh_request_rtl(rqst); + + spin_lock(&rtl->pending.lock); + + if (test_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state)) { + spin_unlock(&rtl->pending.lock); + return -EINVAL; + } + + if (test_and_set_bit(SSH_REQUEST_SF_PENDING_BIT, &rqst->state)) { + spin_unlock(&rtl->pending.lock); + return -EALREADY; + } + + atomic_inc(&rtl->pending.count); + list_add_tail(&ssh_request_get(rqst)->node, &rtl->pending.head); + + spin_unlock(&rtl->pending.lock); + return 0; +} + +static void ssh_rtl_complete_with_status(struct ssh_request *rqst, int status) +{ + struct ssh_rtl *rtl = ssh_request_rtl(rqst); + + trace_ssam_request_complete(rqst, status); + + /* rtl/ptl may not be set if we're canceling before submitting. */ + rtl_dbg_cond(rtl, "rtl: completing request (rqid: %#06x, status: %d)\n", + ssh_request_get_rqid_safe(rqst), status); + + rqst->ops->complete(rqst, NULL, NULL, status); +} + +static void ssh_rtl_complete_with_rsp(struct ssh_request *rqst, + const struct ssh_command *cmd, + const struct ssam_span *data) +{ + struct ssh_rtl *rtl = ssh_request_rtl(rqst); + + trace_ssam_request_complete(rqst, 0); + + rtl_dbg(rtl, "rtl: completing request with response (rqid: %#06x)\n", + ssh_request_get_rqid(rqst)); + + rqst->ops->complete(rqst, cmd, data, 0); +} + +static bool ssh_rtl_tx_can_process(struct ssh_request *rqst) +{ + struct ssh_rtl *rtl = ssh_request_rtl(rqst); + + if (test_bit(SSH_REQUEST_TY_FLUSH_BIT, &rqst->state)) + return !atomic_read(&rtl->pending.count); + + return atomic_read(&rtl->pending.count) < SSH_RTL_MAX_PENDING; +} + +static struct ssh_request *ssh_rtl_tx_next(struct ssh_rtl *rtl) +{ + struct ssh_request *rqst = ERR_PTR(-ENOENT); + struct ssh_request *p, *n; + + spin_lock(&rtl->queue.lock); + + /* Find first non-locked request and remove it. */ + list_for_each_entry_safe(p, n, &rtl->queue.head, node) { + if (unlikely(test_bit(SSH_REQUEST_SF_LOCKED_BIT, &p->state))) + continue; + + if (!ssh_rtl_tx_can_process(p)) { + rqst = ERR_PTR(-EBUSY); + break; + } + + /* Remove from queue and mark as transmitting. */ + set_bit(SSH_REQUEST_SF_TRANSMITTING_BIT, &p->state); + /* Ensure state never gets zero. */ + smp_mb__before_atomic(); + clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &p->state); + + list_del(&p->node); + + rqst = p; + break; + } + + spin_unlock(&rtl->queue.lock); + return rqst; +} + +static int ssh_rtl_tx_try_process_one(struct ssh_rtl *rtl) +{ + struct ssh_request *rqst; + int status; + + /* Get and prepare next request for transmit. */ + rqst = ssh_rtl_tx_next(rtl); + if (IS_ERR(rqst)) + return PTR_ERR(rqst); + + /* Add it to/mark it as pending. */ + status = ssh_rtl_tx_pending_push(rqst); + if (status) { + ssh_request_put(rqst); + return -EAGAIN; + } + + /* Submit packet. */ + status = ssh_ptl_submit(&rtl->ptl, &rqst->packet); + if (status == -ESHUTDOWN) { + /* + * Packet has been refused due to the packet layer shutting + * down. Complete it here. + */ + set_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state); + /* + * Note: A barrier is not required here, as there are only two + * references in the system at this point: The one that we have, + * and the other one that belongs to the pending set. Due to the + * request being marked as "transmitting", our process is the + * only one allowed to remove the pending node and change the + * state. Normally, the task would fall to the packet callback, + * but as this is a path where submission failed, this callback + * will never be executed. + */ + + ssh_rtl_pending_remove(rqst); + ssh_rtl_complete_with_status(rqst, -ESHUTDOWN); + + ssh_request_put(rqst); + return -ESHUTDOWN; + + } else if (status) { + /* + * If submitting the packet failed and the packet layer isn't + * shutting down, the packet has either been submitted/queued + * before (-EALREADY, which cannot happen as we have + * guaranteed that requests cannot be re-submitted), or the + * packet was marked as locked (-EINVAL). To mark the packet + * locked at this stage, the request, and thus the packets + * itself, had to have been canceled. Simply drop the + * reference. Cancellation itself will remove it from the set + * of pending requests. + */ + + WARN_ON(status != -EINVAL); + + ssh_request_put(rqst); + return -EAGAIN; + } + + ssh_request_put(rqst); + return 0; +} + +static bool ssh_rtl_tx_schedule(struct ssh_rtl *rtl) +{ + if (atomic_read(&rtl->pending.count) >= SSH_RTL_MAX_PENDING) + return false; + + if (ssh_rtl_queue_empty(rtl)) + return false; + + return schedule_work(&rtl->tx.work); +} + +static void ssh_rtl_tx_work_fn(struct work_struct *work) +{ + struct ssh_rtl *rtl = to_ssh_rtl(work, tx.work); + unsigned int iterations = SSH_RTL_TX_BATCH; + int status; + + /* + * Try to be nice and not block/live-lock the workqueue: Run a maximum + * of 10 tries, then re-submit if necessary. This should not be + * necessary for normal execution, but guarantee it anyway. + */ + do { + status = ssh_rtl_tx_try_process_one(rtl); + if (status == -ENOENT || status == -EBUSY) + return; /* No more requests to process. */ + + if (status == -ESHUTDOWN) { + /* + * Packet system shutting down. No new packets can be + * transmitted. Return silently, the party initiating + * the shutdown should handle the rest. + */ + return; + } + + WARN_ON(status != 0 && status != -EAGAIN); + } while (--iterations); + + /* Out of tries, reschedule. */ + ssh_rtl_tx_schedule(rtl); +} + +/** + * ssh_rtl_submit() - Submit a request to the transport layer. + * @rtl: The request transport layer. + * @rqst: The request to submit. + * + * Submits a request to the transport layer. A single request may not be + * submitted multiple times without reinitializing it. + * + * Return: Returns zero on success, %-EINVAL if the request type is invalid or + * the request has been canceled prior to submission, %-EALREADY if the + * request has already been submitted, or %-ESHUTDOWN in case the request + * transport layer has been shut down. + */ +int ssh_rtl_submit(struct ssh_rtl *rtl, struct ssh_request *rqst) +{ + trace_ssam_request_submit(rqst); + + /* + * Ensure that requests expecting a response are sequenced. If this + * invariant ever changes, see the comment in ssh_rtl_complete() on what + * is required to be changed in the code. + */ + if (test_bit(SSH_REQUEST_TY_HAS_RESPONSE_BIT, &rqst->state)) + if (!test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &rqst->packet.state)) + return -EINVAL; + + spin_lock(&rtl->queue.lock); + + /* + * Try to set ptl and check if this request has already been submitted. + * + * Must be inside lock as we might run into a lost update problem + * otherwise: If this were outside of the lock, cancellation in + * ssh_rtl_cancel_nonpending() may run after we've set the ptl + * reference but before we enter the lock. In that case, we'd detect + * that the request is being added to the queue and would try to remove + * it from that, but removal might fail because it hasn't actually been + * added yet. By putting this cmpxchg in the critical section, we + * ensure that the queuing detection only triggers when we are already + * in the critical section and the remove process will wait until the + * push operation has been completed (via lock) due to that. Only then, + * we can safely try to remove it. + */ + if (cmpxchg(&rqst->packet.ptl, NULL, &rtl->ptl)) { + spin_unlock(&rtl->queue.lock); + return -EALREADY; + } + + /* + * Ensure that we set ptl reference before we continue modifying state. + * This is required for non-pending cancellation. This barrier is paired + * with the one in ssh_rtl_cancel_nonpending(). + * + * By setting the ptl reference before we test for "locked", we can + * check if the "locked" test may have already run. See comments in + * ssh_rtl_cancel_nonpending() for more detail. + */ + smp_mb__after_atomic(); + + if (test_bit(SSH_RTL_SF_SHUTDOWN_BIT, &rtl->state)) { + spin_unlock(&rtl->queue.lock); + return -ESHUTDOWN; + } + + if (test_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state)) { + spin_unlock(&rtl->queue.lock); + return -EINVAL; + } + + set_bit(SSH_REQUEST_SF_QUEUED_BIT, &rqst->state); + list_add_tail(&ssh_request_get(rqst)->node, &rtl->queue.head); + + spin_unlock(&rtl->queue.lock); + + ssh_rtl_tx_schedule(rtl); + return 0; +} + +static void ssh_rtl_timeout_reaper_mod(struct ssh_rtl *rtl, ktime_t now, + ktime_t expires) +{ + unsigned long delta = msecs_to_jiffies(ktime_ms_delta(expires, now)); + ktime_t aexp = ktime_add(expires, SSH_RTL_REQUEST_TIMEOUT_RESOLUTION); + + spin_lock(&rtl->rtx_timeout.lock); + + /* Re-adjust / schedule reaper only if it is above resolution delta. */ + if (ktime_before(aexp, rtl->rtx_timeout.expires)) { + rtl->rtx_timeout.expires = expires; + mod_delayed_work(system_wq, &rtl->rtx_timeout.reaper, delta); + } + + spin_unlock(&rtl->rtx_timeout.lock); +} + +static void ssh_rtl_timeout_start(struct ssh_request *rqst) +{ + struct ssh_rtl *rtl = ssh_request_rtl(rqst); + ktime_t timestamp = ktime_get_coarse_boottime(); + ktime_t timeout = rtl->rtx_timeout.timeout; + + if (test_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state)) + return; + + /* + * Note: The timestamp gets set only once. This happens on the packet + * callback. All other access to it is read-only. + */ + WRITE_ONCE(rqst->timestamp, timestamp); + /* + * Ensure timestamp is set before starting the reaper. Paired with + * implicit barrier following check on ssh_request_get_expiration() in + * ssh_rtl_timeout_reap. + */ + smp_mb__after_atomic(); + + ssh_rtl_timeout_reaper_mod(rtl, timestamp, timestamp + timeout); +} + +static void ssh_rtl_complete(struct ssh_rtl *rtl, + const struct ssh_command *command, + const struct ssam_span *command_data) +{ + struct ssh_request *r = NULL; + struct ssh_request *p, *n; + u16 rqid = get_unaligned_le16(&command->rqid); + + trace_ssam_rx_response_received(command, command_data->len); + + /* + * Get request from pending based on request ID and mark it as response + * received and locked. + */ + spin_lock(&rtl->pending.lock); + list_for_each_entry_safe(p, n, &rtl->pending.head, node) { + /* We generally expect requests to be processed in order. */ + if (unlikely(ssh_request_get_rqid(p) != rqid)) + continue; + + /* Simulate response timeout. */ + if (ssh_rtl_should_drop_response()) { + spin_unlock(&rtl->pending.lock); + + trace_ssam_ei_rx_drop_response(p); + rtl_info(rtl, "request error injection: dropping response for request %p\n", + &p->packet); + return; + } + + /* + * Mark as "response received" and "locked" as we're going to + * complete it. + */ + set_bit(SSH_REQUEST_SF_LOCKED_BIT, &p->state); + set_bit(SSH_REQUEST_SF_RSPRCVD_BIT, &p->state); + /* Ensure state never gets zero. */ + smp_mb__before_atomic(); + clear_bit(SSH_REQUEST_SF_PENDING_BIT, &p->state); + + atomic_dec(&rtl->pending.count); + list_del(&p->node); + + r = p; + break; + } + spin_unlock(&rtl->pending.lock); + + if (!r) { + rtl_warn(rtl, "rtl: dropping unexpected command message (rqid = %#06x)\n", + rqid); + return; + } + + /* If the request hasn't been completed yet, we will do this now. */ + if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state)) { + ssh_request_put(r); + ssh_rtl_tx_schedule(rtl); + return; + } + + /* + * Make sure the request has been transmitted. In case of a sequenced + * request, we are guaranteed that the completion callback will run on + * the receiver thread directly when the ACK for the packet has been + * received. Similarly, this function is guaranteed to run on the + * receiver thread. Thus we are guaranteed that if the packet has been + * successfully transmitted and received an ACK, the transmitted flag + * has been set and is visible here. + * + * We are currently not handling unsequenced packets here, as those + * should never expect a response as ensured in ssh_rtl_submit. If this + * ever changes, one would have to test for + * + * (r->state & (transmitting | transmitted)) + * + * on unsequenced packets to determine if they could have been + * transmitted. There are no synchronization guarantees as in the + * sequenced case, since, in this case, the callback function will not + * run on the same thread. Thus an exact determination is impossible. + */ + if (!test_bit(SSH_REQUEST_SF_TRANSMITTED_BIT, &r->state)) { + rtl_err(rtl, "rtl: received response before ACK for request (rqid = %#06x)\n", + rqid); + + /* + * NB: Timeout has already been canceled, request already been + * removed from pending and marked as locked and completed. As + * we receive a "false" response, the packet might still be + * queued though. + */ + ssh_rtl_queue_remove(r); + + ssh_rtl_complete_with_status(r, -EREMOTEIO); + ssh_request_put(r); + + ssh_rtl_tx_schedule(rtl); + return; + } + + /* + * NB: Timeout has already been canceled, request already been + * removed from pending and marked as locked and completed. The request + * can also not be queued any more, as it has been marked as + * transmitting and later transmitted. Thus no need to remove it from + * anywhere. + */ + + ssh_rtl_complete_with_rsp(r, command, command_data); + ssh_request_put(r); + + ssh_rtl_tx_schedule(rtl); +} + +static bool ssh_rtl_cancel_nonpending(struct ssh_request *r) +{ + struct ssh_rtl *rtl; + unsigned long flags, fixed; + bool remove; + + /* + * Handle unsubmitted request: Try to mark the packet as locked, + * expecting the state to be zero (i.e. unsubmitted). Note that, if + * setting the state worked, we might still be adding the packet to the + * queue in a currently executing submit call. In that case, however, + * ptl reference must have been set previously, as locked is checked + * after setting ptl. Furthermore, when the ptl reference is set, the + * submission process is guaranteed to have entered the critical + * section. Thus only if we successfully locked this request and ptl is + * NULL, we have successfully removed the request, i.e. we are + * guaranteed that, due to the "locked" check in ssh_rtl_submit(), the + * packet will never be added. Otherwise, we need to try and grab it + * from the queue, where we are now guaranteed that the packet is or has + * been due to the critical section. + * + * Note that if the cmpxchg() fails, we are guaranteed that ptl has + * been set and is non-NULL, as states can only be nonzero after this + * has been set. Also note that we need to fetch the static (type) + * flags to ensure that they don't cause the cmpxchg() to fail. + */ + fixed = READ_ONCE(r->state) & SSH_REQUEST_FLAGS_TY_MASK; + flags = cmpxchg(&r->state, fixed, SSH_REQUEST_SF_LOCKED_BIT); + + /* + * Force correct ordering with regards to state and ptl reference access + * to safe-guard cancellation to concurrent submission against a + * lost-update problem. First try to exchange state, then also check + * ptl if that worked. This barrier is paired with the + * one in ssh_rtl_submit(). + */ + smp_mb__after_atomic(); + + if (flags == fixed && !READ_ONCE(r->packet.ptl)) { + if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state)) + return true; + + ssh_rtl_complete_with_status(r, -ECANCELED); + return true; + } + + rtl = ssh_request_rtl(r); + spin_lock(&rtl->queue.lock); + + /* + * Note: 1) Requests cannot be re-submitted. 2) If a request is + * queued, it cannot be "transmitting"/"pending" yet. Thus, if we + * successfully remove the request here, we have removed all its + * occurrences in the system. + */ + + remove = test_and_clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &r->state); + if (!remove) { + spin_unlock(&rtl->queue.lock); + return false; + } + + set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state); + list_del(&r->node); + + spin_unlock(&rtl->queue.lock); + + ssh_request_put(r); /* Drop reference obtained from queue. */ + + if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state)) + return true; + + ssh_rtl_complete_with_status(r, -ECANCELED); + return true; +} + +static bool ssh_rtl_cancel_pending(struct ssh_request *r) +{ + /* If the packet is already locked, it's going to be removed shortly. */ + if (test_and_set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state)) + return true; + + /* + * Now that we have locked the packet, we have guaranteed that it can't + * be added to the system any more. If ptl is NULL, the locked + * check in ssh_rtl_submit() has not been run and any submission, + * currently in progress or called later, won't add the packet. Thus we + * can directly complete it. + * + * The implicit memory barrier of test_and_set_bit() should be enough + * to ensure that the correct order (first lock, then check ptl) is + * ensured. This is paired with the barrier in ssh_rtl_submit(). + */ + if (!READ_ONCE(r->packet.ptl)) { + if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state)) + return true; + + ssh_rtl_complete_with_status(r, -ECANCELED); + return true; + } + + /* + * Try to cancel the packet. If the packet has not been completed yet, + * this will subsequently (and synchronously) call the completion + * callback of the packet, which will complete the request. + */ + ssh_ptl_cancel(&r->packet); + + /* + * If the packet has been completed with success, i.e. has not been + * canceled by the above call, the request may not have been completed + * yet (may be waiting for a response). Check if we need to do this + * here. + */ + if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state)) + return true; + + ssh_rtl_queue_remove(r); + ssh_rtl_pending_remove(r); + ssh_rtl_complete_with_status(r, -ECANCELED); + + return true; +} + +/** + * ssh_rtl_cancel() - Cancel request. + * @rqst: The request to cancel. + * @pending: Whether to also cancel pending requests. + * + * Cancels the given request. If @pending is %false, this will not cancel + * pending requests, i.e. requests that have already been submitted to the + * packet layer but not been completed yet. If @pending is %true, this will + * cancel the given request regardless of the state it is in. + * + * If the request has been canceled by calling this function, both completion + * and release callbacks of the request will be executed in a reasonable + * time-frame. This may happen during execution of this function, however, + * there is no guarantee for this. For example, a request currently + * transmitting will be canceled/completed only after transmission has + * completed, and the respective callbacks will be executed on the transmitter + * thread, which may happen during, but also some time after execution of the + * cancel function. + * + * Return: Returns %true if the given request has been canceled or completed, + * either by this function or prior to calling this function, %false + * otherwise. If @pending is %true, this function will always return %true. + */ +bool ssh_rtl_cancel(struct ssh_request *rqst, bool pending) +{ + struct ssh_rtl *rtl; + bool canceled; + + if (test_and_set_bit(SSH_REQUEST_SF_CANCELED_BIT, &rqst->state)) + return true; + + trace_ssam_request_cancel(rqst); + + if (pending) + canceled = ssh_rtl_cancel_pending(rqst); + else + canceled = ssh_rtl_cancel_nonpending(rqst); + + /* Note: rtl may be NULL if request has not been submitted yet. */ + rtl = ssh_request_rtl(rqst); + if (canceled && rtl) + ssh_rtl_tx_schedule(rtl); + + return canceled; +} + +static void ssh_rtl_packet_callback(struct ssh_packet *p, int status) +{ + struct ssh_request *r = to_ssh_request(p); + + if (unlikely(status)) { + set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state); + + if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state)) + return; + + /* + * The packet may get canceled even though it has not been + * submitted yet. The request may still be queued. Check the + * queue and remove it if necessary. As the timeout would have + * been started in this function on success, there's no need + * to cancel it here. + */ + ssh_rtl_queue_remove(r); + ssh_rtl_pending_remove(r); + ssh_rtl_complete_with_status(r, status); + + ssh_rtl_tx_schedule(ssh_request_rtl(r)); + return; + } + + /* Update state: Mark as transmitted and clear transmitting. */ + set_bit(SSH_REQUEST_SF_TRANSMITTED_BIT, &r->state); + /* Ensure state never gets zero. */ + smp_mb__before_atomic(); + clear_bit(SSH_REQUEST_SF_TRANSMITTING_BIT, &r->state); + + /* If we expect a response, we just need to start the timeout. */ + if (test_bit(SSH_REQUEST_TY_HAS_RESPONSE_BIT, &r->state)) { + /* + * Note: This is the only place where the timestamp gets set, + * all other access to it is read-only. + */ + ssh_rtl_timeout_start(r); + return; + } + + /* + * If we don't expect a response, lock, remove, and complete the + * request. Note that, at this point, the request is guaranteed to have + * left the queue and no timeout has been started. Thus we only need to + * remove it from pending. If the request has already been completed (it + * may have been canceled) return. + */ + + set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state); + if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state)) + return; + + ssh_rtl_pending_remove(r); + ssh_rtl_complete_with_status(r, 0); + + ssh_rtl_tx_schedule(ssh_request_rtl(r)); +} + +static ktime_t ssh_request_get_expiration(struct ssh_request *r, ktime_t timeout) +{ + ktime_t timestamp = READ_ONCE(r->timestamp); + + if (timestamp != KTIME_MAX) + return ktime_add(timestamp, timeout); + else + return KTIME_MAX; +} + +static void ssh_rtl_timeout_reap(struct work_struct *work) +{ + struct ssh_rtl *rtl = to_ssh_rtl(work, rtx_timeout.reaper.work); + struct ssh_request *r, *n; + LIST_HEAD(claimed); + ktime_t now = ktime_get_coarse_boottime(); + ktime_t timeout = rtl->rtx_timeout.timeout; + ktime_t next = KTIME_MAX; + + trace_ssam_rtl_timeout_reap(atomic_read(&rtl->pending.count)); + + /* + * Mark reaper as "not pending". This is done before checking any + * requests to avoid lost-update type problems. + */ + spin_lock(&rtl->rtx_timeout.lock); + rtl->rtx_timeout.expires = KTIME_MAX; + spin_unlock(&rtl->rtx_timeout.lock); + + spin_lock(&rtl->pending.lock); + list_for_each_entry_safe(r, n, &rtl->pending.head, node) { + ktime_t expires = ssh_request_get_expiration(r, timeout); + + /* + * Check if the timeout hasn't expired yet. Find out next + * expiration date to be handled after this run. + */ + if (ktime_after(expires, now)) { + next = ktime_before(expires, next) ? expires : next; + continue; + } + + /* Avoid further transitions if locked. */ + if (test_and_set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state)) + continue; + + /* + * We have now marked the packet as locked. Thus it cannot be + * added to the pending or queued lists again after we've + * removed it here. We can therefore re-use the node of this + * packet temporarily. + */ + + clear_bit(SSH_REQUEST_SF_PENDING_BIT, &r->state); + + atomic_dec(&rtl->pending.count); + list_move_tail(&r->node, &claimed); + } + spin_unlock(&rtl->pending.lock); + + /* Cancel and complete the request. */ + list_for_each_entry_safe(r, n, &claimed, node) { + trace_ssam_request_timeout(r); + + /* + * At this point we've removed the packet from pending. This + * means that we've obtained the last (only) reference of the + * system to it. Thus we can just complete it. + */ + if (!test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state)) + ssh_rtl_complete_with_status(r, -ETIMEDOUT); + + /* + * Drop the reference we've obtained by removing it from the + * pending set. + */ + list_del(&r->node); + ssh_request_put(r); + } + + /* Ensure that the reaper doesn't run again immediately. */ + next = max(next, ktime_add(now, SSH_RTL_REQUEST_TIMEOUT_RESOLUTION)); + if (next != KTIME_MAX) + ssh_rtl_timeout_reaper_mod(rtl, now, next); + + ssh_rtl_tx_schedule(rtl); +} + +static void ssh_rtl_rx_event(struct ssh_rtl *rtl, const struct ssh_command *cmd, + const struct ssam_span *data) +{ + trace_ssam_rx_event_received(cmd, data->len); + + rtl_dbg(rtl, "rtl: handling event (rqid: %#06x)\n", + get_unaligned_le16(&cmd->rqid)); + + rtl->ops.handle_event(rtl, cmd, data); +} + +static void ssh_rtl_rx_command(struct ssh_ptl *p, const struct ssam_span *data) +{ + struct ssh_rtl *rtl = to_ssh_rtl(p, ptl); + struct device *dev = &p->serdev->dev; + struct ssh_command *command; + struct ssam_span command_data; + + if (sshp_parse_command(dev, data, &command, &command_data)) + return; + + /* + * Check if the message was intended for us. If not, drop it. + * + * Note: We will need to change this to handle debug messages. On newer + * generation devices, these seem to be sent to SSAM_SSH_TID_DEBUG. We + * as host can still receive them as they can be forwarded via an + * override option on SAM, but doing so does not change the target ID + * to SSAM_SSH_TID_HOST. + */ + if (command->tid != SSAM_SSH_TID_HOST) { + rtl_warn(rtl, "rtl: dropping message not intended for us (tid = %#04x)\n", + command->tid); + return; + } + + if (ssh_rqid_is_event(get_unaligned_le16(&command->rqid))) + ssh_rtl_rx_event(rtl, command, &command_data); + else + ssh_rtl_complete(rtl, command, &command_data); +} + +static void ssh_rtl_rx_data(struct ssh_ptl *p, const struct ssam_span *data) +{ + if (!data->len) { + ptl_err(p, "rtl: rx: no data frame payload\n"); + return; + } + + switch (data->ptr[0]) { + case SSH_PLD_TYPE_CMD: + ssh_rtl_rx_command(p, data); + break; + + default: + ptl_err(p, "rtl: rx: unknown frame payload type (type: %#04x)\n", + data->ptr[0]); + break; + } +} + +static void ssh_rtl_packet_release(struct ssh_packet *p) +{ + struct ssh_request *rqst; + + rqst = to_ssh_request(p); + rqst->ops->release(rqst); +} + +static const struct ssh_packet_ops ssh_rtl_packet_ops = { + .complete = ssh_rtl_packet_callback, + .release = ssh_rtl_packet_release, +}; + +/** + * ssh_request_init() - Initialize SSH request. + * @rqst: The request to initialize. + * @flags: Request flags, determining the type of the request. + * @ops: Request operations. + * + * Initializes the given SSH request and underlying packet. Sets the message + * buffer pointer to %NULL and the message buffer length to zero. This buffer + * has to be set separately via ssh_request_set_data() before submission and + * must contain a valid SSH request message. + * + * Return: Returns zero on success or %-EINVAL if the given flags are invalid. + */ +int ssh_request_init(struct ssh_request *rqst, enum ssam_request_flags flags, + const struct ssh_request_ops *ops) +{ + unsigned long type = BIT(SSH_PACKET_TY_BLOCKING_BIT); + + /* Unsequenced requests cannot have a response. */ + if (flags & SSAM_REQUEST_UNSEQUENCED && flags & SSAM_REQUEST_HAS_RESPONSE) + return -EINVAL; + + if (!(flags & SSAM_REQUEST_UNSEQUENCED)) + type |= BIT(SSH_PACKET_TY_SEQUENCED_BIT); + + ssh_packet_init(&rqst->packet, type, SSH_PACKET_PRIORITY(DATA, 0), + &ssh_rtl_packet_ops); + + INIT_LIST_HEAD(&rqst->node); + + rqst->state = 0; + if (flags & SSAM_REQUEST_HAS_RESPONSE) + rqst->state |= BIT(SSH_REQUEST_TY_HAS_RESPONSE_BIT); + + rqst->timestamp = KTIME_MAX; + rqst->ops = ops; + + return 0; +} + +/** + * ssh_rtl_init() - Initialize request transport layer. + * @rtl: The request transport layer to initialize. + * @serdev: The underlying serial device, i.e. the lower-level transport. + * @ops: Request transport layer operations. + * + * Initializes the given request transport layer and associated packet + * transport layer. Transmitter and receiver threads must be started + * separately via ssh_rtl_start(), after the request-layer has been + * initialized and the lower-level serial device layer has been set up. + * + * Return: Returns zero on success and a nonzero error code on failure. + */ +int ssh_rtl_init(struct ssh_rtl *rtl, struct serdev_device *serdev, + const struct ssh_rtl_ops *ops) +{ + struct ssh_ptl_ops ptl_ops; + int status; + + ptl_ops.data_received = ssh_rtl_rx_data; + + status = ssh_ptl_init(&rtl->ptl, serdev, &ptl_ops); + if (status) + return status; + + spin_lock_init(&rtl->queue.lock); + INIT_LIST_HEAD(&rtl->queue.head); + + spin_lock_init(&rtl->pending.lock); + INIT_LIST_HEAD(&rtl->pending.head); + atomic_set_release(&rtl->pending.count, 0); + + INIT_WORK(&rtl->tx.work, ssh_rtl_tx_work_fn); + + spin_lock_init(&rtl->rtx_timeout.lock); + rtl->rtx_timeout.timeout = SSH_RTL_REQUEST_TIMEOUT; + rtl->rtx_timeout.expires = KTIME_MAX; + INIT_DELAYED_WORK(&rtl->rtx_timeout.reaper, ssh_rtl_timeout_reap); + + rtl->ops = *ops; + + return 0; +} + +/** + * ssh_rtl_destroy() - Deinitialize request transport layer. + * @rtl: The request transport layer to deinitialize. + * + * Deinitializes the given request transport layer and frees resources + * associated with it. If receiver and/or transmitter threads have been + * started, the layer must first be shut down via ssh_rtl_shutdown() before + * this function can be called. + */ +void ssh_rtl_destroy(struct ssh_rtl *rtl) +{ + ssh_ptl_destroy(&rtl->ptl); +} + +/** + * ssh_rtl_start() - Start request transmitter and receiver. + * @rtl: The request transport layer. + * + * Return: Returns zero on success, a negative error code on failure. + */ +int ssh_rtl_start(struct ssh_rtl *rtl) +{ + int status; + + status = ssh_ptl_tx_start(&rtl->ptl); + if (status) + return status; + + ssh_rtl_tx_schedule(rtl); + + status = ssh_ptl_rx_start(&rtl->ptl); + if (status) { + ssh_rtl_flush(rtl, msecs_to_jiffies(5000)); + ssh_ptl_tx_stop(&rtl->ptl); + return status; + } + + return 0; +} + +struct ssh_flush_request { + struct ssh_request base; + struct completion completion; + int status; +}; + +static void ssh_rtl_flush_request_complete(struct ssh_request *r, + const struct ssh_command *cmd, + const struct ssam_span *data, + int status) +{ + struct ssh_flush_request *rqst; + + rqst = container_of(r, struct ssh_flush_request, base); + rqst->status = status; +} + +static void ssh_rtl_flush_request_release(struct ssh_request *r) +{ + struct ssh_flush_request *rqst; + + rqst = container_of(r, struct ssh_flush_request, base); + complete_all(&rqst->completion); +} + +static const struct ssh_request_ops ssh_rtl_flush_request_ops = { + .complete = ssh_rtl_flush_request_complete, + .release = ssh_rtl_flush_request_release, +}; + +/** + * ssh_rtl_flush() - Flush the request transport layer. + * @rtl: request transport layer + * @timeout: timeout for the flush operation in jiffies + * + * Queue a special flush request and wait for its completion. This request + * will be completed after all other currently queued and pending requests + * have been completed. Instead of a normal data packet, this request submits + * a special flush packet, meaning that upon completion, also the underlying + * packet transport layer has been flushed. + * + * Flushing the request layer guarantees that all previously submitted + * requests have been fully completed before this call returns. Additionally, + * flushing blocks execution of all later submitted requests until the flush + * has been completed. + * + * If the caller ensures that no new requests are submitted after a call to + * this function, the request transport layer is guaranteed to have no + * remaining requests when this call returns. The same guarantee does not hold + * for the packet layer, on which control packets may still be queued after + * this call. + * + * Return: Returns zero on success, %-ETIMEDOUT if the flush timed out and has + * been canceled as a result of the timeout, or %-ESHUTDOWN if the packet + * and/or request transport layer has been shut down before this call. May + * also return %-EINTR if the underlying packet transmission has been + * interrupted. + */ +int ssh_rtl_flush(struct ssh_rtl *rtl, unsigned long timeout) +{ + const unsigned int init_flags = SSAM_REQUEST_UNSEQUENCED; + struct ssh_flush_request rqst; + int status; + + ssh_request_init(&rqst.base, init_flags, &ssh_rtl_flush_request_ops); + rqst.base.packet.state |= BIT(SSH_PACKET_TY_FLUSH_BIT); + rqst.base.packet.priority = SSH_PACKET_PRIORITY(FLUSH, 0); + rqst.base.state |= BIT(SSH_REQUEST_TY_FLUSH_BIT); + + init_completion(&rqst.completion); + + status = ssh_rtl_submit(rtl, &rqst.base); + if (status) + return status; + + ssh_request_put(&rqst.base); + + if (!wait_for_completion_timeout(&rqst.completion, timeout)) { + ssh_rtl_cancel(&rqst.base, true); + wait_for_completion(&rqst.completion); + } + + WARN_ON(rqst.status != 0 && rqst.status != -ECANCELED && + rqst.status != -ESHUTDOWN && rqst.status != -EINTR); + + return rqst.status == -ECANCELED ? -ETIMEDOUT : rqst.status; +} + +/** + * ssh_rtl_shutdown() - Shut down request transport layer. + * @rtl: The request transport layer. + * + * Shuts down the request transport layer, removing and canceling all queued + * and pending requests. Requests canceled by this operation will be completed + * with %-ESHUTDOWN as status. Receiver and transmitter threads will be + * stopped, the lower-level packet layer will be shutdown. + * + * As a result of this function, the transport layer will be marked as shut + * down. Submission of requests after the transport layer has been shut down + * will fail with %-ESHUTDOWN. + */ +void ssh_rtl_shutdown(struct ssh_rtl *rtl) +{ + struct ssh_request *r, *n; + LIST_HEAD(claimed); + int pending; + + set_bit(SSH_RTL_SF_SHUTDOWN_BIT, &rtl->state); + /* + * Ensure that the layer gets marked as shut-down before actually + * stopping it. In combination with the check in ssh_rtl_submit(), + * this guarantees that no new requests can be added and all already + * queued requests are properly canceled. + */ + smp_mb__after_atomic(); + + /* Remove requests from queue. */ + spin_lock(&rtl->queue.lock); + list_for_each_entry_safe(r, n, &rtl->queue.head, node) { + set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state); + /* Ensure state never gets zero. */ + smp_mb__before_atomic(); + clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &r->state); + + list_move_tail(&r->node, &claimed); + } + spin_unlock(&rtl->queue.lock); + + /* + * We have now guaranteed that the queue is empty and no more new + * requests can be submitted (i.e. it will stay empty). This means that + * calling ssh_rtl_tx_schedule() will not schedule tx.work any more. So + * we can simply call cancel_work_sync() on tx.work here and when that + * returns, we've locked it down. This also means that after this call, + * we don't submit any more packets to the underlying packet layer, so + * we can also shut that down. + */ + + cancel_work_sync(&rtl->tx.work); + ssh_ptl_shutdown(&rtl->ptl); + cancel_delayed_work_sync(&rtl->rtx_timeout.reaper); + + /* + * Shutting down the packet layer should also have canceled all + * requests. Thus the pending set should be empty. Attempt to handle + * this gracefully anyways, even though this should be dead code. + */ + + pending = atomic_read(&rtl->pending.count); + if (WARN_ON(pending)) { + spin_lock(&rtl->pending.lock); + list_for_each_entry_safe(r, n, &rtl->pending.head, node) { + set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state); + /* Ensure state never gets zero. */ + smp_mb__before_atomic(); + clear_bit(SSH_REQUEST_SF_PENDING_BIT, &r->state); + + list_move_tail(&r->node, &claimed); + } + spin_unlock(&rtl->pending.lock); + } + + /* Finally, cancel and complete the requests we claimed before. */ + list_for_each_entry_safe(r, n, &claimed, node) { + /* + * We need test_and_set() because we still might compete with + * cancellation. + */ + if (!test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state)) + ssh_rtl_complete_with_status(r, -ESHUTDOWN); + + /* + * Drop the reference we've obtained by removing it from the + * lists. + */ + list_del(&r->node); + ssh_request_put(r); + } +} diff --git a/drivers/platform/surface/aggregator/ssh_request_layer.h b/drivers/platform/surface/aggregator/ssh_request_layer.h new file mode 100644 index 0000000000..4e387a0313 --- /dev/null +++ b/drivers/platform/surface/aggregator/ssh_request_layer.h @@ -0,0 +1,143 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * SSH request transport layer. + * + * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com> + */ + +#ifndef _SURFACE_AGGREGATOR_SSH_REQUEST_LAYER_H +#define _SURFACE_AGGREGATOR_SSH_REQUEST_LAYER_H + +#include <linux/atomic.h> +#include <linux/ktime.h> +#include <linux/list.h> +#include <linux/spinlock.h> +#include <linux/workqueue.h> + +#include <linux/surface_aggregator/serial_hub.h> +#include <linux/surface_aggregator/controller.h> + +#include "ssh_packet_layer.h" + +/** + * enum ssh_rtl_state_flags - State-flags for &struct ssh_rtl. + * + * @SSH_RTL_SF_SHUTDOWN_BIT: + * Indicates that the request transport layer has been shut down or is + * being shut down and should not accept any new requests. + */ +enum ssh_rtl_state_flags { + SSH_RTL_SF_SHUTDOWN_BIT, +}; + +/** + * struct ssh_rtl_ops - Callback operations for request transport layer. + * @handle_event: Function called when a SSH event has been received. The + * specified function takes the request layer, received command + * struct, and corresponding payload as arguments. If the event + * has no payload, the payload span is empty (not %NULL). + */ +struct ssh_rtl_ops { + void (*handle_event)(struct ssh_rtl *rtl, const struct ssh_command *cmd, + const struct ssam_span *data); +}; + +/** + * struct ssh_rtl - SSH request transport layer. + * @ptl: Underlying packet transport layer. + * @state: State(-flags) of the transport layer. + * @queue: Request submission queue. + * @queue.lock: Lock for modifying the request submission queue. + * @queue.head: List-head of the request submission queue. + * @pending: Set/list of pending requests. + * @pending.lock: Lock for modifying the request set. + * @pending.head: List-head of the pending set/list. + * @pending.count: Number of currently pending requests. + * @tx: Transmitter subsystem. + * @tx.work: Transmitter work item. + * @rtx_timeout: Retransmission timeout subsystem. + * @rtx_timeout.lock: Lock for modifying the retransmission timeout reaper. + * @rtx_timeout.timeout: Timeout interval for retransmission. + * @rtx_timeout.expires: Time specifying when the reaper work is next scheduled. + * @rtx_timeout.reaper: Work performing timeout checks and subsequent actions. + * @ops: Request layer operations. + */ +struct ssh_rtl { + struct ssh_ptl ptl; + unsigned long state; + + struct { + spinlock_t lock; + struct list_head head; + } queue; + + struct { + spinlock_t lock; + struct list_head head; + atomic_t count; + } pending; + + struct { + struct work_struct work; + } tx; + + struct { + spinlock_t lock; + ktime_t timeout; + ktime_t expires; + struct delayed_work reaper; + } rtx_timeout; + + struct ssh_rtl_ops ops; +}; + +#define rtl_dbg(r, fmt, ...) ptl_dbg(&(r)->ptl, fmt, ##__VA_ARGS__) +#define rtl_info(p, fmt, ...) ptl_info(&(p)->ptl, fmt, ##__VA_ARGS__) +#define rtl_warn(r, fmt, ...) ptl_warn(&(r)->ptl, fmt, ##__VA_ARGS__) +#define rtl_err(r, fmt, ...) ptl_err(&(r)->ptl, fmt, ##__VA_ARGS__) +#define rtl_dbg_cond(r, fmt, ...) __ssam_prcond(rtl_dbg, r, fmt, ##__VA_ARGS__) + +#define to_ssh_rtl(ptr, member) \ + container_of(ptr, struct ssh_rtl, member) + +/** + * ssh_rtl_get_device() - Get device associated with request transport layer. + * @rtl: The request transport layer. + * + * Return: Returns the device on which the given request transport layer + * builds upon. + */ +static inline struct device *ssh_rtl_get_device(struct ssh_rtl *rtl) +{ + return ssh_ptl_get_device(&rtl->ptl); +} + +/** + * ssh_request_rtl() - Get request transport layer associated with request. + * @rqst: The request to get the request transport layer reference for. + * + * Return: Returns the &struct ssh_rtl associated with the given SSH request. + */ +static inline struct ssh_rtl *ssh_request_rtl(struct ssh_request *rqst) +{ + struct ssh_ptl *ptl; + + ptl = READ_ONCE(rqst->packet.ptl); + return likely(ptl) ? to_ssh_rtl(ptl, ptl) : NULL; +} + +int ssh_rtl_submit(struct ssh_rtl *rtl, struct ssh_request *rqst); +bool ssh_rtl_cancel(struct ssh_request *rqst, bool pending); + +int ssh_rtl_init(struct ssh_rtl *rtl, struct serdev_device *serdev, + const struct ssh_rtl_ops *ops); + +int ssh_rtl_start(struct ssh_rtl *rtl); +int ssh_rtl_flush(struct ssh_rtl *rtl, unsigned long timeout); +void ssh_rtl_shutdown(struct ssh_rtl *rtl); +void ssh_rtl_destroy(struct ssh_rtl *rtl); + +int ssh_request_init(struct ssh_request *rqst, enum ssam_request_flags flags, + const struct ssh_request_ops *ops); + +#endif /* _SURFACE_AGGREGATOR_SSH_REQUEST_LAYER_H */ diff --git a/drivers/platform/surface/aggregator/trace.h b/drivers/platform/surface/aggregator/trace.h new file mode 100644 index 0000000000..55cc61bba1 --- /dev/null +++ b/drivers/platform/surface/aggregator/trace.h @@ -0,0 +1,703 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Trace points for SSAM/SSH. + * + * Copyright (C) 2020-2022 Maximilian Luz <luzmaximilian@gmail.com> + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM surface_aggregator + +#if !defined(_SURFACE_AGGREGATOR_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _SURFACE_AGGREGATOR_TRACE_H + +#include <linux/surface_aggregator/serial_hub.h> + +#include <asm/unaligned.h> +#include <linux/tracepoint.h> + +TRACE_DEFINE_ENUM(SSH_FRAME_TYPE_DATA_SEQ); +TRACE_DEFINE_ENUM(SSH_FRAME_TYPE_DATA_NSQ); +TRACE_DEFINE_ENUM(SSH_FRAME_TYPE_ACK); +TRACE_DEFINE_ENUM(SSH_FRAME_TYPE_NAK); + +TRACE_DEFINE_ENUM(SSH_PACKET_SF_LOCKED_BIT); +TRACE_DEFINE_ENUM(SSH_PACKET_SF_QUEUED_BIT); +TRACE_DEFINE_ENUM(SSH_PACKET_SF_PENDING_BIT); +TRACE_DEFINE_ENUM(SSH_PACKET_SF_TRANSMITTING_BIT); +TRACE_DEFINE_ENUM(SSH_PACKET_SF_TRANSMITTED_BIT); +TRACE_DEFINE_ENUM(SSH_PACKET_SF_ACKED_BIT); +TRACE_DEFINE_ENUM(SSH_PACKET_SF_CANCELED_BIT); +TRACE_DEFINE_ENUM(SSH_PACKET_SF_COMPLETED_BIT); + +TRACE_DEFINE_ENUM(SSH_PACKET_TY_FLUSH_BIT); +TRACE_DEFINE_ENUM(SSH_PACKET_TY_SEQUENCED_BIT); +TRACE_DEFINE_ENUM(SSH_PACKET_TY_BLOCKING_BIT); + +TRACE_DEFINE_ENUM(SSH_PACKET_FLAGS_SF_MASK); +TRACE_DEFINE_ENUM(SSH_PACKET_FLAGS_TY_MASK); + +TRACE_DEFINE_ENUM(SSH_REQUEST_SF_LOCKED_BIT); +TRACE_DEFINE_ENUM(SSH_REQUEST_SF_QUEUED_BIT); +TRACE_DEFINE_ENUM(SSH_REQUEST_SF_PENDING_BIT); +TRACE_DEFINE_ENUM(SSH_REQUEST_SF_TRANSMITTING_BIT); +TRACE_DEFINE_ENUM(SSH_REQUEST_SF_TRANSMITTED_BIT); +TRACE_DEFINE_ENUM(SSH_REQUEST_SF_RSPRCVD_BIT); +TRACE_DEFINE_ENUM(SSH_REQUEST_SF_CANCELED_BIT); +TRACE_DEFINE_ENUM(SSH_REQUEST_SF_COMPLETED_BIT); + +TRACE_DEFINE_ENUM(SSH_REQUEST_TY_FLUSH_BIT); +TRACE_DEFINE_ENUM(SSH_REQUEST_TY_HAS_RESPONSE_BIT); + +TRACE_DEFINE_ENUM(SSH_REQUEST_FLAGS_SF_MASK); +TRACE_DEFINE_ENUM(SSH_REQUEST_FLAGS_TY_MASK); + +TRACE_DEFINE_ENUM(SSAM_SSH_TC_SAM); +TRACE_DEFINE_ENUM(SSAM_SSH_TC_BAT); +TRACE_DEFINE_ENUM(SSAM_SSH_TC_TMP); +TRACE_DEFINE_ENUM(SSAM_SSH_TC_PMC); +TRACE_DEFINE_ENUM(SSAM_SSH_TC_FAN); +TRACE_DEFINE_ENUM(SSAM_SSH_TC_PoM); +TRACE_DEFINE_ENUM(SSAM_SSH_TC_DBG); +TRACE_DEFINE_ENUM(SSAM_SSH_TC_KBD); +TRACE_DEFINE_ENUM(SSAM_SSH_TC_FWU); +TRACE_DEFINE_ENUM(SSAM_SSH_TC_UNI); +TRACE_DEFINE_ENUM(SSAM_SSH_TC_LPC); +TRACE_DEFINE_ENUM(SSAM_SSH_TC_TCL); +TRACE_DEFINE_ENUM(SSAM_SSH_TC_SFL); +TRACE_DEFINE_ENUM(SSAM_SSH_TC_KIP); +TRACE_DEFINE_ENUM(SSAM_SSH_TC_EXT); +TRACE_DEFINE_ENUM(SSAM_SSH_TC_BLD); +TRACE_DEFINE_ENUM(SSAM_SSH_TC_BAS); +TRACE_DEFINE_ENUM(SSAM_SSH_TC_SEN); +TRACE_DEFINE_ENUM(SSAM_SSH_TC_SRQ); +TRACE_DEFINE_ENUM(SSAM_SSH_TC_MCU); +TRACE_DEFINE_ENUM(SSAM_SSH_TC_HID); +TRACE_DEFINE_ENUM(SSAM_SSH_TC_TCH); +TRACE_DEFINE_ENUM(SSAM_SSH_TC_BKL); +TRACE_DEFINE_ENUM(SSAM_SSH_TC_TAM); +TRACE_DEFINE_ENUM(SSAM_SSH_TC_ACC0); +TRACE_DEFINE_ENUM(SSAM_SSH_TC_UFI); +TRACE_DEFINE_ENUM(SSAM_SSH_TC_USC); +TRACE_DEFINE_ENUM(SSAM_SSH_TC_PEN); +TRACE_DEFINE_ENUM(SSAM_SSH_TC_VID); +TRACE_DEFINE_ENUM(SSAM_SSH_TC_AUD); +TRACE_DEFINE_ENUM(SSAM_SSH_TC_SMC); +TRACE_DEFINE_ENUM(SSAM_SSH_TC_KPD); +TRACE_DEFINE_ENUM(SSAM_SSH_TC_REG); +TRACE_DEFINE_ENUM(SSAM_SSH_TC_SPT); +TRACE_DEFINE_ENUM(SSAM_SSH_TC_SYS); +TRACE_DEFINE_ENUM(SSAM_SSH_TC_ACC1); +TRACE_DEFINE_ENUM(SSAM_SSH_TC_SHB); +TRACE_DEFINE_ENUM(SSAM_SSH_TC_POS); + +#define SSAM_PTR_UID_LEN 9 +#define SSAM_U8_FIELD_NOT_APPLICABLE ((u16)-1) +#define SSAM_SEQ_NOT_APPLICABLE ((u16)-1) +#define SSAM_RQID_NOT_APPLICABLE ((u32)-1) +#define SSAM_SSH_TC_NOT_APPLICABLE 0 +#define SSAM_SSH_TID_NOT_APPLICABLE ((u8)-1) + +#ifndef _SURFACE_AGGREGATOR_TRACE_HELPERS +#define _SURFACE_AGGREGATOR_TRACE_HELPERS + +/** + * ssam_trace_ptr_uid() - Convert the pointer to a non-pointer UID string. + * @ptr: The pointer to convert. + * @uid_str: A buffer of length SSAM_PTR_UID_LEN where the UID will be stored. + * + * Converts the given pointer into a UID string that is safe to be shared + * with userspace and logs, i.e. doesn't give away the real memory location. + */ +static inline void ssam_trace_ptr_uid(const void *ptr, char *uid_str) +{ + char buf[2 * sizeof(void *) + 1]; + + BUILD_BUG_ON(ARRAY_SIZE(buf) < SSAM_PTR_UID_LEN); + + snprintf(buf, ARRAY_SIZE(buf), "%p", ptr); + memcpy(uid_str, &buf[ARRAY_SIZE(buf) - SSAM_PTR_UID_LEN], + SSAM_PTR_UID_LEN); +} + +/** + * ssam_trace_get_packet_seq() - Read the packet's sequence ID. + * @p: The packet. + * + * Return: Returns the packet's sequence ID (SEQ) field if present, or + * %SSAM_SEQ_NOT_APPLICABLE if not (e.g. flush packet). + */ +static inline u16 ssam_trace_get_packet_seq(const struct ssh_packet *p) +{ + if (!p->data.ptr || p->data.len < SSH_MESSAGE_LENGTH(0)) + return SSAM_SEQ_NOT_APPLICABLE; + + return p->data.ptr[SSH_MSGOFFSET_FRAME(seq)]; +} + +/** + * ssam_trace_get_request_id() - Read the packet's request ID. + * @p: The packet. + * + * Return: Returns the packet's request ID (RQID) field if the packet + * represents a request with command data, or %SSAM_RQID_NOT_APPLICABLE if not + * (e.g. flush request, control packet). + */ +static inline u32 ssam_trace_get_request_id(const struct ssh_packet *p) +{ + if (!p->data.ptr || p->data.len < SSH_COMMAND_MESSAGE_LENGTH(0)) + return SSAM_RQID_NOT_APPLICABLE; + + return get_unaligned_le16(&p->data.ptr[SSH_MSGOFFSET_COMMAND(rqid)]); +} + +/** + * ssam_trace_get_request_tid() - Read the packet's request target ID. + * @p: The packet. + * + * Return: Returns the packet's request target ID (TID) field if the packet + * represents a request with command data, or %SSAM_SSH_TID_NOT_APPLICABLE + * if not (e.g. flush request, control packet). + */ +static inline u32 ssam_trace_get_request_tid(const struct ssh_packet *p) +{ + if (!p->data.ptr || p->data.len < SSH_COMMAND_MESSAGE_LENGTH(0)) + return SSAM_SSH_TID_NOT_APPLICABLE; + + return get_unaligned_le16(&p->data.ptr[SSH_MSGOFFSET_COMMAND(tid)]); +} + +/** + * ssam_trace_get_request_sid() - Read the packet's request source ID. + * @p: The packet. + * + * Return: Returns the packet's request source ID (SID) field if the packet + * represents a request with command data, or %SSAM_SSH_TID_NOT_APPLICABLE + * if not (e.g. flush request, control packet). + */ +static inline u32 ssam_trace_get_request_sid(const struct ssh_packet *p) +{ + if (!p->data.ptr || p->data.len < SSH_COMMAND_MESSAGE_LENGTH(0)) + return SSAM_SSH_TID_NOT_APPLICABLE; + + return get_unaligned_le16(&p->data.ptr[SSH_MSGOFFSET_COMMAND(sid)]); +} + +/** + * ssam_trace_get_request_tc() - Read the packet's request target category. + * @p: The packet. + * + * Return: Returns the packet's request target category (TC) field if the + * packet represents a request with command data, or %SSAM_SSH_TC_NOT_APPLICABLE + * if not (e.g. flush request, control packet). + */ +static inline u32 ssam_trace_get_request_tc(const struct ssh_packet *p) +{ + if (!p->data.ptr || p->data.len < SSH_COMMAND_MESSAGE_LENGTH(0)) + return SSAM_SSH_TC_NOT_APPLICABLE; + + return get_unaligned_le16(&p->data.ptr[SSH_MSGOFFSET_COMMAND(tc)]); +} + +#endif /* _SURFACE_AGGREGATOR_TRACE_HELPERS */ + +#define ssam_trace_get_command_field_u8(packet, field) \ + ((!(packet) || (packet)->data.len < SSH_COMMAND_MESSAGE_LENGTH(0)) \ + ? 0 : (packet)->data.ptr[SSH_MSGOFFSET_COMMAND(field)]) + +#define ssam_show_generic_u8_field(value) \ + __print_symbolic(value, \ + { SSAM_U8_FIELD_NOT_APPLICABLE, "N/A" } \ + ) + +#define ssam_show_frame_type(ty) \ + __print_symbolic(ty, \ + { SSH_FRAME_TYPE_DATA_SEQ, "DSEQ" }, \ + { SSH_FRAME_TYPE_DATA_NSQ, "DNSQ" }, \ + { SSH_FRAME_TYPE_ACK, "ACK" }, \ + { SSH_FRAME_TYPE_NAK, "NAK" } \ + ) + +#define ssam_show_packet_type(type) \ + __print_flags(flags & SSH_PACKET_FLAGS_TY_MASK, "", \ + { BIT(SSH_PACKET_TY_FLUSH_BIT), "F" }, \ + { BIT(SSH_PACKET_TY_SEQUENCED_BIT), "S" }, \ + { BIT(SSH_PACKET_TY_BLOCKING_BIT), "B" } \ + ) + +#define ssam_show_packet_state(state) \ + __print_flags(flags & SSH_PACKET_FLAGS_SF_MASK, "", \ + { BIT(SSH_PACKET_SF_LOCKED_BIT), "L" }, \ + { BIT(SSH_PACKET_SF_QUEUED_BIT), "Q" }, \ + { BIT(SSH_PACKET_SF_PENDING_BIT), "P" }, \ + { BIT(SSH_PACKET_SF_TRANSMITTING_BIT), "S" }, \ + { BIT(SSH_PACKET_SF_TRANSMITTED_BIT), "T" }, \ + { BIT(SSH_PACKET_SF_ACKED_BIT), "A" }, \ + { BIT(SSH_PACKET_SF_CANCELED_BIT), "C" }, \ + { BIT(SSH_PACKET_SF_COMPLETED_BIT), "F" } \ + ) + +#define ssam_show_packet_seq(seq) \ + __print_symbolic(seq, \ + { SSAM_SEQ_NOT_APPLICABLE, "N/A" } \ + ) + +#define ssam_show_request_type(flags) \ + __print_flags((flags) & SSH_REQUEST_FLAGS_TY_MASK, "", \ + { BIT(SSH_REQUEST_TY_FLUSH_BIT), "F" }, \ + { BIT(SSH_REQUEST_TY_HAS_RESPONSE_BIT), "R" } \ + ) + +#define ssam_show_request_state(flags) \ + __print_flags((flags) & SSH_REQUEST_FLAGS_SF_MASK, "", \ + { BIT(SSH_REQUEST_SF_LOCKED_BIT), "L" }, \ + { BIT(SSH_REQUEST_SF_QUEUED_BIT), "Q" }, \ + { BIT(SSH_REQUEST_SF_PENDING_BIT), "P" }, \ + { BIT(SSH_REQUEST_SF_TRANSMITTING_BIT), "S" }, \ + { BIT(SSH_REQUEST_SF_TRANSMITTED_BIT), "T" }, \ + { BIT(SSH_REQUEST_SF_RSPRCVD_BIT), "A" }, \ + { BIT(SSH_REQUEST_SF_CANCELED_BIT), "C" }, \ + { BIT(SSH_REQUEST_SF_COMPLETED_BIT), "F" } \ + ) + +#define ssam_show_request_id(rqid) \ + __print_symbolic(rqid, \ + { SSAM_RQID_NOT_APPLICABLE, "N/A" } \ + ) + +#define ssam_show_ssh_tid(tid) \ + __print_symbolic(tid, \ + { SSAM_SSH_TID_NOT_APPLICABLE, "N/A" }, \ + { SSAM_SSH_TID_HOST, "Host" }, \ + { SSAM_SSH_TID_SAM, "SAM" }, \ + { SSAM_SSH_TID_KIP, "KIP" }, \ + { SSAM_SSH_TID_DEBUG, "Debug" }, \ + { SSAM_SSH_TID_SURFLINK, "SurfLink" } \ + ) + +#define ssam_show_ssh_tc(tc) \ + __print_symbolic(tc, \ + { SSAM_SSH_TC_NOT_APPLICABLE, "N/A" }, \ + { SSAM_SSH_TC_SAM, "SAM" }, \ + { SSAM_SSH_TC_BAT, "BAT" }, \ + { SSAM_SSH_TC_TMP, "TMP" }, \ + { SSAM_SSH_TC_PMC, "PMC" }, \ + { SSAM_SSH_TC_FAN, "FAN" }, \ + { SSAM_SSH_TC_PoM, "PoM" }, \ + { SSAM_SSH_TC_DBG, "DBG" }, \ + { SSAM_SSH_TC_KBD, "KBD" }, \ + { SSAM_SSH_TC_FWU, "FWU" }, \ + { SSAM_SSH_TC_UNI, "UNI" }, \ + { SSAM_SSH_TC_LPC, "LPC" }, \ + { SSAM_SSH_TC_TCL, "TCL" }, \ + { SSAM_SSH_TC_SFL, "SFL" }, \ + { SSAM_SSH_TC_KIP, "KIP" }, \ + { SSAM_SSH_TC_EXT, "EXT" }, \ + { SSAM_SSH_TC_BLD, "BLD" }, \ + { SSAM_SSH_TC_BAS, "BAS" }, \ + { SSAM_SSH_TC_SEN, "SEN" }, \ + { SSAM_SSH_TC_SRQ, "SRQ" }, \ + { SSAM_SSH_TC_MCU, "MCU" }, \ + { SSAM_SSH_TC_HID, "HID" }, \ + { SSAM_SSH_TC_TCH, "TCH" }, \ + { SSAM_SSH_TC_BKL, "BKL" }, \ + { SSAM_SSH_TC_TAM, "TAM" }, \ + { SSAM_SSH_TC_ACC0, "ACC0" }, \ + { SSAM_SSH_TC_UFI, "UFI" }, \ + { SSAM_SSH_TC_USC, "USC" }, \ + { SSAM_SSH_TC_PEN, "PEN" }, \ + { SSAM_SSH_TC_VID, "VID" }, \ + { SSAM_SSH_TC_AUD, "AUD" }, \ + { SSAM_SSH_TC_SMC, "SMC" }, \ + { SSAM_SSH_TC_KPD, "KPD" }, \ + { SSAM_SSH_TC_REG, "REG" }, \ + { SSAM_SSH_TC_SPT, "SPT" }, \ + { SSAM_SSH_TC_SYS, "SYS" }, \ + { SSAM_SSH_TC_ACC1, "ACC1" }, \ + { SSAM_SSH_TC_SHB, "SMB" }, \ + { SSAM_SSH_TC_POS, "POS" } \ + ) + +DECLARE_EVENT_CLASS(ssam_frame_class, + TP_PROTO(const struct ssh_frame *frame), + + TP_ARGS(frame), + + TP_STRUCT__entry( + __field(u8, type) + __field(u8, seq) + __field(u16, len) + ), + + TP_fast_assign( + __entry->type = frame->type; + __entry->seq = frame->seq; + __entry->len = get_unaligned_le16(&frame->len); + ), + + TP_printk("ty=%s, seq=%#04x, len=%u", + ssam_show_frame_type(__entry->type), + __entry->seq, + __entry->len + ) +); + +#define DEFINE_SSAM_FRAME_EVENT(name) \ + DEFINE_EVENT(ssam_frame_class, ssam_##name, \ + TP_PROTO(const struct ssh_frame *frame), \ + TP_ARGS(frame) \ + ) + +DECLARE_EVENT_CLASS(ssam_command_class, + TP_PROTO(const struct ssh_command *cmd, u16 len), + + TP_ARGS(cmd, len), + + TP_STRUCT__entry( + __field(u16, rqid) + __field(u16, len) + __field(u8, tid) + __field(u8, sid) + __field(u8, tc) + __field(u8, cid) + __field(u8, iid) + ), + + TP_fast_assign( + __entry->rqid = get_unaligned_le16(&cmd->rqid); + __entry->tid = cmd->tid; + __entry->sid = cmd->sid; + __entry->tc = cmd->tc; + __entry->cid = cmd->cid; + __entry->iid = cmd->iid; + __entry->len = len; + ), + + TP_printk("rqid=%#06x, tid=%s, sid=%s, tc=%s, cid=%#04x, iid=%#04x, len=%u", + __entry->rqid, + ssam_show_ssh_tid(__entry->tid), + ssam_show_ssh_tid(__entry->sid), + ssam_show_ssh_tc(__entry->tc), + __entry->cid, + __entry->iid, + __entry->len + ) +); + +#define DEFINE_SSAM_COMMAND_EVENT(name) \ + DEFINE_EVENT(ssam_command_class, ssam_##name, \ + TP_PROTO(const struct ssh_command *cmd, u16 len), \ + TP_ARGS(cmd, len) \ + ) + +DECLARE_EVENT_CLASS(ssam_packet_class, + TP_PROTO(const struct ssh_packet *packet), + + TP_ARGS(packet), + + TP_STRUCT__entry( + __field(unsigned long, state) + __array(char, uid, SSAM_PTR_UID_LEN) + __field(u8, priority) + __field(u16, length) + __field(u16, seq) + ), + + TP_fast_assign( + __entry->state = READ_ONCE(packet->state); + ssam_trace_ptr_uid(packet, __entry->uid); + __entry->priority = READ_ONCE(packet->priority); + __entry->length = packet->data.len; + __entry->seq = ssam_trace_get_packet_seq(packet); + ), + + TP_printk("uid=%s, seq=%s, ty=%s, pri=%#04x, len=%u, sta=%s", + __entry->uid, + ssam_show_packet_seq(__entry->seq), + ssam_show_packet_type(__entry->state), + __entry->priority, + __entry->length, + ssam_show_packet_state(__entry->state) + ) +); + +#define DEFINE_SSAM_PACKET_EVENT(name) \ + DEFINE_EVENT(ssam_packet_class, ssam_##name, \ + TP_PROTO(const struct ssh_packet *packet), \ + TP_ARGS(packet) \ + ) + +DECLARE_EVENT_CLASS(ssam_packet_status_class, + TP_PROTO(const struct ssh_packet *packet, int status), + + TP_ARGS(packet, status), + + TP_STRUCT__entry( + __field(unsigned long, state) + __field(int, status) + __array(char, uid, SSAM_PTR_UID_LEN) + __field(u8, priority) + __field(u16, length) + __field(u16, seq) + ), + + TP_fast_assign( + __entry->state = READ_ONCE(packet->state); + __entry->status = status; + ssam_trace_ptr_uid(packet, __entry->uid); + __entry->priority = READ_ONCE(packet->priority); + __entry->length = packet->data.len; + __entry->seq = ssam_trace_get_packet_seq(packet); + ), + + TP_printk("uid=%s, seq=%s, ty=%s, pri=%#04x, len=%u, sta=%s, status=%d", + __entry->uid, + ssam_show_packet_seq(__entry->seq), + ssam_show_packet_type(__entry->state), + __entry->priority, + __entry->length, + ssam_show_packet_state(__entry->state), + __entry->status + ) +); + +#define DEFINE_SSAM_PACKET_STATUS_EVENT(name) \ + DEFINE_EVENT(ssam_packet_status_class, ssam_##name, \ + TP_PROTO(const struct ssh_packet *packet, int status), \ + TP_ARGS(packet, status) \ + ) + +DECLARE_EVENT_CLASS(ssam_request_class, + TP_PROTO(const struct ssh_request *request), + + TP_ARGS(request), + + TP_STRUCT__entry( + __field(unsigned long, state) + __field(u32, rqid) + __array(char, uid, SSAM_PTR_UID_LEN) + __field(u8, tc) + __field(u16, cid) + __field(u16, iid) + __field(u8, tid) + __field(u8, sid) + ), + + TP_fast_assign( + const struct ssh_packet *p = &request->packet; + + /* Use packet for UID so we can match requests to packets. */ + __entry->state = READ_ONCE(request->state); + __entry->rqid = ssam_trace_get_request_id(p); + ssam_trace_ptr_uid(p, __entry->uid); + __entry->tid = ssam_trace_get_request_tid(p); + __entry->sid = ssam_trace_get_request_sid(p); + __entry->tc = ssam_trace_get_request_tc(p); + __entry->cid = ssam_trace_get_command_field_u8(p, cid); + __entry->iid = ssam_trace_get_command_field_u8(p, iid); + ), + + TP_printk("uid=%s, rqid=%s, ty=%s, sta=%s, tid=%s, sid=%s, tc=%s, cid=%s, iid=%s", + __entry->uid, + ssam_show_request_id(__entry->rqid), + ssam_show_request_type(__entry->state), + ssam_show_request_state(__entry->state), + ssam_show_ssh_tid(__entry->tid), + ssam_show_ssh_tid(__entry->sid), + ssam_show_ssh_tc(__entry->tc), + ssam_show_generic_u8_field(__entry->cid), + ssam_show_generic_u8_field(__entry->iid) + ) +); + +#define DEFINE_SSAM_REQUEST_EVENT(name) \ + DEFINE_EVENT(ssam_request_class, ssam_##name, \ + TP_PROTO(const struct ssh_request *request), \ + TP_ARGS(request) \ + ) + +DECLARE_EVENT_CLASS(ssam_request_status_class, + TP_PROTO(const struct ssh_request *request, int status), + + TP_ARGS(request, status), + + TP_STRUCT__entry( + __field(unsigned long, state) + __field(u32, rqid) + __field(int, status) + __array(char, uid, SSAM_PTR_UID_LEN) + __field(u8, tc) + __field(u16, cid) + __field(u16, iid) + __field(u8, tid) + __field(u8, sid) + ), + + TP_fast_assign( + const struct ssh_packet *p = &request->packet; + + /* Use packet for UID so we can match requests to packets. */ + __entry->state = READ_ONCE(request->state); + __entry->rqid = ssam_trace_get_request_id(p); + __entry->status = status; + ssam_trace_ptr_uid(p, __entry->uid); + __entry->tid = ssam_trace_get_request_tid(p); + __entry->sid = ssam_trace_get_request_sid(p); + __entry->tc = ssam_trace_get_request_tc(p); + __entry->cid = ssam_trace_get_command_field_u8(p, cid); + __entry->iid = ssam_trace_get_command_field_u8(p, iid); + ), + + TP_printk("uid=%s, rqid=%s, ty=%s, sta=%s, tid=%s, sid=%s, tc=%s, cid=%s, iid=%s, status=%d", + __entry->uid, + ssam_show_request_id(__entry->rqid), + ssam_show_request_type(__entry->state), + ssam_show_request_state(__entry->state), + ssam_show_ssh_tid(__entry->tid), + ssam_show_ssh_tid(__entry->sid), + ssam_show_ssh_tc(__entry->tc), + ssam_show_generic_u8_field(__entry->cid), + ssam_show_generic_u8_field(__entry->iid), + __entry->status + ) +); + +#define DEFINE_SSAM_REQUEST_STATUS_EVENT(name) \ + DEFINE_EVENT(ssam_request_status_class, ssam_##name, \ + TP_PROTO(const struct ssh_request *request, int status),\ + TP_ARGS(request, status) \ + ) + +DECLARE_EVENT_CLASS(ssam_alloc_class, + TP_PROTO(void *ptr, size_t len), + + TP_ARGS(ptr, len), + + TP_STRUCT__entry( + __field(size_t, len) + __array(char, uid, SSAM_PTR_UID_LEN) + ), + + TP_fast_assign( + __entry->len = len; + ssam_trace_ptr_uid(ptr, __entry->uid); + ), + + TP_printk("uid=%s, len=%zu", __entry->uid, __entry->len) +); + +#define DEFINE_SSAM_ALLOC_EVENT(name) \ + DEFINE_EVENT(ssam_alloc_class, ssam_##name, \ + TP_PROTO(void *ptr, size_t len), \ + TP_ARGS(ptr, len) \ + ) + +DECLARE_EVENT_CLASS(ssam_free_class, + TP_PROTO(void *ptr), + + TP_ARGS(ptr), + + TP_STRUCT__entry( + __array(char, uid, SSAM_PTR_UID_LEN) + ), + + TP_fast_assign( + ssam_trace_ptr_uid(ptr, __entry->uid); + ), + + TP_printk("uid=%s", __entry->uid) +); + +#define DEFINE_SSAM_FREE_EVENT(name) \ + DEFINE_EVENT(ssam_free_class, ssam_##name, \ + TP_PROTO(void *ptr), \ + TP_ARGS(ptr) \ + ) + +DECLARE_EVENT_CLASS(ssam_pending_class, + TP_PROTO(unsigned int pending), + + TP_ARGS(pending), + + TP_STRUCT__entry( + __field(unsigned int, pending) + ), + + TP_fast_assign( + __entry->pending = pending; + ), + + TP_printk("pending=%u", __entry->pending) +); + +#define DEFINE_SSAM_PENDING_EVENT(name) \ + DEFINE_EVENT(ssam_pending_class, ssam_##name, \ + TP_PROTO(unsigned int pending), \ + TP_ARGS(pending) \ + ) + +DECLARE_EVENT_CLASS(ssam_data_class, + TP_PROTO(size_t length), + + TP_ARGS(length), + + TP_STRUCT__entry( + __field(size_t, length) + ), + + TP_fast_assign( + __entry->length = length; + ), + + TP_printk("length=%zu", __entry->length) +); + +#define DEFINE_SSAM_DATA_EVENT(name) \ + DEFINE_EVENT(ssam_data_class, ssam_##name, \ + TP_PROTO(size_t length), \ + TP_ARGS(length) \ + ) + +DEFINE_SSAM_FRAME_EVENT(rx_frame_received); +DEFINE_SSAM_COMMAND_EVENT(rx_response_received); +DEFINE_SSAM_COMMAND_EVENT(rx_event_received); + +DEFINE_SSAM_PACKET_EVENT(packet_release); +DEFINE_SSAM_PACKET_EVENT(packet_submit); +DEFINE_SSAM_PACKET_EVENT(packet_resubmit); +DEFINE_SSAM_PACKET_EVENT(packet_timeout); +DEFINE_SSAM_PACKET_EVENT(packet_cancel); +DEFINE_SSAM_PACKET_STATUS_EVENT(packet_complete); +DEFINE_SSAM_PENDING_EVENT(ptl_timeout_reap); + +DEFINE_SSAM_REQUEST_EVENT(request_submit); +DEFINE_SSAM_REQUEST_EVENT(request_timeout); +DEFINE_SSAM_REQUEST_EVENT(request_cancel); +DEFINE_SSAM_REQUEST_STATUS_EVENT(request_complete); +DEFINE_SSAM_PENDING_EVENT(rtl_timeout_reap); + +DEFINE_SSAM_PACKET_EVENT(ei_tx_drop_ack_packet); +DEFINE_SSAM_PACKET_EVENT(ei_tx_drop_nak_packet); +DEFINE_SSAM_PACKET_EVENT(ei_tx_drop_dsq_packet); +DEFINE_SSAM_PACKET_STATUS_EVENT(ei_tx_fail_write); +DEFINE_SSAM_PACKET_EVENT(ei_tx_corrupt_data); +DEFINE_SSAM_DATA_EVENT(ei_rx_corrupt_syn); +DEFINE_SSAM_FRAME_EVENT(ei_rx_corrupt_data); +DEFINE_SSAM_REQUEST_EVENT(ei_rx_drop_response); + +DEFINE_SSAM_ALLOC_EVENT(ctrl_packet_alloc); +DEFINE_SSAM_FREE_EVENT(ctrl_packet_free); + +DEFINE_SSAM_ALLOC_EVENT(event_item_alloc); +DEFINE_SSAM_FREE_EVENT(event_item_free); + +#endif /* _SURFACE_AGGREGATOR_TRACE_H */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE + +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE trace + +#include <trace/define_trace.h> diff --git a/drivers/platform/surface/surface3-wmi.c b/drivers/platform/surface/surface3-wmi.c new file mode 100644 index 0000000000..ca4602bcc7 --- /dev/null +++ b/drivers/platform/surface/surface3-wmi.c @@ -0,0 +1,287 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Driver for the LID cover switch of the Surface 3 + * + * Copyright (c) 2016 Red Hat Inc. + */ + + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> + +#include <linux/acpi.h> +#include <linux/dmi.h> +#include <linux/input.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/spi/spi.h> + +MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@redhat.com>"); +MODULE_DESCRIPTION("Surface 3 platform driver"); +MODULE_LICENSE("GPL"); + +#define ACPI_BUTTON_HID_LID "PNP0C0D" +#define SPI_CTL_OBJ_NAME "SPI" +#define SPI_TS_OBJ_NAME "NTRG" + +#define SURFACE3_LID_GUID "F7CC25EC-D20B-404C-8903-0ED4359C18AE" + +MODULE_ALIAS("wmi:" SURFACE3_LID_GUID); + +static const struct dmi_system_id surface3_dmi_table[] = { +#if defined(CONFIG_X86) + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "Surface 3"), + }, + }, +#endif + { } +}; + +struct surface3_wmi { + struct acpi_device *touchscreen_adev; + struct acpi_device *pnp0c0d_adev; + struct acpi_hotplug_context hp; + struct input_dev *input; +}; + +static struct platform_device *s3_wmi_pdev; + +static struct surface3_wmi s3_wmi; + +static DEFINE_MUTEX(s3_wmi_lock); + +static int s3_wmi_query_block(const char *guid, int instance, int *ret) +{ + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *obj = NULL; + acpi_status status; + int error = 0; + + mutex_lock(&s3_wmi_lock); + status = wmi_query_block(guid, instance, &output); + if (ACPI_FAILURE(status)) { + error = -EIO; + goto out_free_unlock; + } + + obj = output.pointer; + + if (!obj || obj->type != ACPI_TYPE_INTEGER) { + if (obj) { + pr_err("query block returned object type: %d - buffer length:%d\n", + obj->type, + obj->type == ACPI_TYPE_BUFFER ? + obj->buffer.length : 0); + } + error = -EINVAL; + goto out_free_unlock; + } + *ret = obj->integer.value; + out_free_unlock: + kfree(obj); + mutex_unlock(&s3_wmi_lock); + return error; +} + +static inline int s3_wmi_query_lid(int *ret) +{ + return s3_wmi_query_block(SURFACE3_LID_GUID, 0, ret); +} + +static int s3_wmi_send_lid_state(void) +{ + int ret, lid_sw; + + ret = s3_wmi_query_lid(&lid_sw); + if (ret) + return ret; + + input_report_switch(s3_wmi.input, SW_LID, lid_sw); + input_sync(s3_wmi.input); + + return 0; +} + +static int s3_wmi_hp_notify(struct acpi_device *adev, u32 value) +{ + return s3_wmi_send_lid_state(); +} + +static acpi_status s3_wmi_attach_spi_device(acpi_handle handle, + u32 level, + void *data, + void **return_value) +{ + struct acpi_device *adev = acpi_fetch_acpi_dev(handle); + struct acpi_device **ts_adev = data; + + if (!adev || strncmp(acpi_device_bid(adev), SPI_TS_OBJ_NAME, + strlen(SPI_TS_OBJ_NAME))) + return AE_OK; + + if (*ts_adev) { + pr_err("duplicate entry %s\n", SPI_TS_OBJ_NAME); + return AE_OK; + } + + *ts_adev = adev; + + return AE_OK; +} + +static int s3_wmi_check_platform_device(struct device *dev, void *data) +{ + struct acpi_device *adev = ACPI_COMPANION(dev); + struct acpi_device *ts_adev = NULL; + acpi_status status; + + /* ignore non ACPI devices */ + if (!adev) + return 0; + + /* check for LID ACPI switch */ + if (!strcmp(ACPI_BUTTON_HID_LID, acpi_device_hid(adev))) { + s3_wmi.pnp0c0d_adev = adev; + return 0; + } + + /* ignore non SPI controllers */ + if (strncmp(acpi_device_bid(adev), SPI_CTL_OBJ_NAME, + strlen(SPI_CTL_OBJ_NAME))) + return 0; + + status = acpi_walk_namespace(ACPI_TYPE_DEVICE, adev->handle, 1, + s3_wmi_attach_spi_device, NULL, + &ts_adev, NULL); + if (ACPI_FAILURE(status)) + dev_warn(dev, "failed to enumerate SPI slaves\n"); + + if (!ts_adev) + return 0; + + s3_wmi.touchscreen_adev = ts_adev; + + return 0; +} + +static int s3_wmi_create_and_register_input(struct platform_device *pdev) +{ + struct input_dev *input; + int error; + + input = devm_input_allocate_device(&pdev->dev); + if (!input) + return -ENOMEM; + + input->name = "Lid Switch"; + input->phys = "button/input0"; + input->id.bustype = BUS_HOST; + input->id.product = 0x0005; + + input_set_capability(input, EV_SW, SW_LID); + + error = input_register_device(input); + if (error) + return error; + + s3_wmi.input = input; + + return 0; +} + +static int __init s3_wmi_probe(struct platform_device *pdev) +{ + int error; + + if (!dmi_check_system(surface3_dmi_table)) + return -ENODEV; + + memset(&s3_wmi, 0, sizeof(s3_wmi)); + + bus_for_each_dev(&platform_bus_type, NULL, NULL, + s3_wmi_check_platform_device); + + if (!s3_wmi.touchscreen_adev) + return -ENODEV; + + acpi_bus_trim(s3_wmi.pnp0c0d_adev); + + error = s3_wmi_create_and_register_input(pdev); + if (error) + goto restore_acpi_lid; + + acpi_initialize_hp_context(s3_wmi.touchscreen_adev, &s3_wmi.hp, + s3_wmi_hp_notify, NULL); + + s3_wmi_send_lid_state(); + + return 0; + + restore_acpi_lid: + acpi_bus_scan(s3_wmi.pnp0c0d_adev->handle); + return error; +} + +static int s3_wmi_remove(struct platform_device *device) +{ + /* remove the hotplug context from the acpi device */ + s3_wmi.touchscreen_adev->hp = NULL; + + /* reinstall the actual PNPC0C0D LID default handle */ + acpi_bus_scan(s3_wmi.pnp0c0d_adev->handle); + return 0; +} + +static int __maybe_unused s3_wmi_resume(struct device *dev) +{ + s3_wmi_send_lid_state(); + return 0; +} +static SIMPLE_DEV_PM_OPS(s3_wmi_pm, NULL, s3_wmi_resume); + +static struct platform_driver s3_wmi_driver = { + .driver = { + .name = "surface3-wmi", + .pm = &s3_wmi_pm, + }, + .remove = s3_wmi_remove, +}; + +static int __init s3_wmi_init(void) +{ + int error; + + s3_wmi_pdev = platform_device_alloc("surface3-wmi", -1); + if (!s3_wmi_pdev) + return -ENOMEM; + + error = platform_device_add(s3_wmi_pdev); + if (error) + goto err_device_put; + + error = platform_driver_probe(&s3_wmi_driver, s3_wmi_probe); + if (error) + goto err_device_del; + + pr_info("Surface 3 WMI Extras loaded\n"); + return 0; + + err_device_del: + platform_device_del(s3_wmi_pdev); + err_device_put: + platform_device_put(s3_wmi_pdev); + return error; +} + +static void __exit s3_wmi_exit(void) +{ + platform_device_unregister(s3_wmi_pdev); + platform_driver_unregister(&s3_wmi_driver); +} + +module_init(s3_wmi_init); +module_exit(s3_wmi_exit); diff --git a/drivers/platform/surface/surface3_power.c b/drivers/platform/surface/surface3_power.c new file mode 100644 index 0000000000..4c0f92562a --- /dev/null +++ b/drivers/platform/surface/surface3_power.c @@ -0,0 +1,587 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Supports for the power IC on the Surface 3 tablet. + * + * (C) Copyright 2016-2018 Red Hat, Inc + * (C) Copyright 2016-2018 Benjamin Tissoires <benjamin.tissoires@gmail.com> + * (C) Copyright 2016 Stephen Just <stephenjust@gmail.com> + * + * This driver has been reverse-engineered by parsing the DSDT of the Surface 3 + * and looking at the registers of the chips. + * + * The DSDT allowed to find out that: + * - the driver is required for the ACPI BAT0 device to communicate to the chip + * through an operation region. + * - the various defines for the operation region functions to communicate with + * this driver + * - the DSM 3f99e367-6220-4955-8b0f-06ef2ae79412 allows to trigger ACPI + * events to BAT0 (the code is all available in the DSDT). + * + * Further findings regarding the 2 chips declared in the MSHW0011 are: + * - there are 2 chips declared: + * . 0x22 seems to control the ADP1 line status (and probably the charger) + * . 0x55 controls the battery directly + * - the battery chip uses a SMBus protocol (using plain SMBus allows non + * destructive commands): + * . the commands/registers used are in the range 0x00..0x7F + * . if bit 8 (0x80) is set in the SMBus command, the returned value is the + * same as when it is not set. There is a high chance this bit is the + * read/write + * . the various registers semantic as been deduced by observing the register + * dumps. + */ + +#include <linux/acpi.h> +#include <linux/bits.h> +#include <linux/freezer.h> +#include <linux/i2c.h> +#include <linux/kernel.h> +#include <linux/kthread.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/uuid.h> +#include <asm/unaligned.h> + +#define SURFACE_3_POLL_INTERVAL (2 * HZ) +#define SURFACE_3_STRLEN 10 + +struct mshw0011_data { + struct i2c_client *adp1; + struct i2c_client *bat0; + unsigned short notify_mask; + struct task_struct *poll_task; + bool kthread_running; + + bool charging; + bool bat_charging; + u8 trip_point; + s32 full_capacity; +}; + +struct mshw0011_handler_data { + struct acpi_connection_info info; + struct i2c_client *client; +}; + +struct bix { + u32 revision; + u32 power_unit; + u32 design_capacity; + u32 last_full_charg_capacity; + u32 battery_technology; + u32 design_voltage; + u32 design_capacity_of_warning; + u32 design_capacity_of_low; + u32 cycle_count; + u32 measurement_accuracy; + u32 max_sampling_time; + u32 min_sampling_time; + u32 max_average_interval; + u32 min_average_interval; + u32 battery_capacity_granularity_1; + u32 battery_capacity_granularity_2; + char model[SURFACE_3_STRLEN]; + char serial[SURFACE_3_STRLEN]; + char type[SURFACE_3_STRLEN]; + char OEM[SURFACE_3_STRLEN]; +} __packed; + +struct bst { + u32 battery_state; + s32 battery_present_rate; + u32 battery_remaining_capacity; + u32 battery_present_voltage; +} __packed; + +struct gsb_command { + u8 arg0; + u8 arg1; + u8 arg2; +} __packed; + +struct gsb_buffer { + u8 status; + u8 len; + u8 ret; + union { + struct gsb_command cmd; + struct bst bst; + struct bix bix; + } __packed; +} __packed; + +#define ACPI_BATTERY_STATE_DISCHARGING BIT(0) +#define ACPI_BATTERY_STATE_CHARGING BIT(1) +#define ACPI_BATTERY_STATE_CRITICAL BIT(2) + +#define MSHW0011_CMD_DEST_BAT0 0x01 +#define MSHW0011_CMD_DEST_ADP1 0x03 + +#define MSHW0011_CMD_BAT0_STA 0x01 +#define MSHW0011_CMD_BAT0_BIX 0x02 +#define MSHW0011_CMD_BAT0_BCT 0x03 +#define MSHW0011_CMD_BAT0_BTM 0x04 +#define MSHW0011_CMD_BAT0_BST 0x05 +#define MSHW0011_CMD_BAT0_BTP 0x06 +#define MSHW0011_CMD_ADP1_PSR 0x07 +#define MSHW0011_CMD_BAT0_PSOC 0x09 +#define MSHW0011_CMD_BAT0_PMAX 0x0a +#define MSHW0011_CMD_BAT0_PSRC 0x0b +#define MSHW0011_CMD_BAT0_CHGI 0x0c +#define MSHW0011_CMD_BAT0_ARTG 0x0d + +#define MSHW0011_NOTIFY_GET_VERSION 0x00 +#define MSHW0011_NOTIFY_ADP1 0x01 +#define MSHW0011_NOTIFY_BAT0_BST 0x02 +#define MSHW0011_NOTIFY_BAT0_BIX 0x05 + +#define MSHW0011_ADP1_REG_PSR 0x04 + +#define MSHW0011_BAT0_REG_CAPACITY 0x0c +#define MSHW0011_BAT0_REG_FULL_CHG_CAPACITY 0x0e +#define MSHW0011_BAT0_REG_DESIGN_CAPACITY 0x40 +#define MSHW0011_BAT0_REG_VOLTAGE 0x08 +#define MSHW0011_BAT0_REG_RATE 0x14 +#define MSHW0011_BAT0_REG_OEM 0x45 +#define MSHW0011_BAT0_REG_TYPE 0x4e +#define MSHW0011_BAT0_REG_SERIAL_NO 0x56 +#define MSHW0011_BAT0_REG_CYCLE_CNT 0x6e + +#define MSHW0011_EV_2_5_MASK GENMASK(8, 0) + +/* 3f99e367-6220-4955-8b0f-06ef2ae79412 */ +static const guid_t mshw0011_guid = + GUID_INIT(0x3F99E367, 0x6220, 0x4955, 0x8B, 0x0F, 0x06, 0xEF, + 0x2A, 0xE7, 0x94, 0x12); + +static int +mshw0011_notify(struct mshw0011_data *cdata, u8 arg1, u8 arg2, + unsigned int *ret_value) +{ + union acpi_object *obj; + acpi_handle handle; + unsigned int i; + + handle = ACPI_HANDLE(&cdata->adp1->dev); + if (!handle) + return -ENODEV; + + obj = acpi_evaluate_dsm_typed(handle, &mshw0011_guid, arg1, arg2, NULL, + ACPI_TYPE_BUFFER); + if (!obj) { + dev_err(&cdata->adp1->dev, "device _DSM execution failed\n"); + return -ENODEV; + } + + *ret_value = 0; + for (i = 0; i < obj->buffer.length; i++) + *ret_value |= obj->buffer.pointer[i] << (i * 8); + + ACPI_FREE(obj); + return 0; +} + +static const struct bix default_bix = { + .revision = 0x00, + .power_unit = 0x01, + .design_capacity = 0x1dca, + .last_full_charg_capacity = 0x1dca, + .battery_technology = 0x01, + .design_voltage = 0x10df, + .design_capacity_of_warning = 0x8f, + .design_capacity_of_low = 0x47, + .cycle_count = 0xffffffff, + .measurement_accuracy = 0x00015f90, + .max_sampling_time = 0x03e8, + .min_sampling_time = 0x03e8, + .max_average_interval = 0x03e8, + .min_average_interval = 0x03e8, + .battery_capacity_granularity_1 = 0x45, + .battery_capacity_granularity_2 = 0x11, + .model = "P11G8M", + .serial = "", + .type = "LION", + .OEM = "", +}; + +static int mshw0011_bix(struct mshw0011_data *cdata, struct bix *bix) +{ + struct i2c_client *client = cdata->bat0; + char buf[SURFACE_3_STRLEN]; + int ret; + + *bix = default_bix; + + /* get design capacity */ + ret = i2c_smbus_read_word_data(client, + MSHW0011_BAT0_REG_DESIGN_CAPACITY); + if (ret < 0) { + dev_err(&client->dev, "Error reading design capacity: %d\n", + ret); + return ret; + } + bix->design_capacity = ret; + + /* get last full charge capacity */ + ret = i2c_smbus_read_word_data(client, + MSHW0011_BAT0_REG_FULL_CHG_CAPACITY); + if (ret < 0) { + dev_err(&client->dev, + "Error reading last full charge capacity: %d\n", ret); + return ret; + } + bix->last_full_charg_capacity = ret; + + /* + * Get serial number, on some devices (with unofficial replacement + * battery?) reading any of the serial number range addresses gets + * nacked in this case just leave the serial number empty. + */ + ret = i2c_smbus_read_i2c_block_data(client, MSHW0011_BAT0_REG_SERIAL_NO, + sizeof(buf), buf); + if (ret == -EREMOTEIO) { + /* no serial number available */ + } else if (ret != sizeof(buf)) { + dev_err(&client->dev, "Error reading serial no: %d\n", ret); + return ret; + } else { + snprintf(bix->serial, ARRAY_SIZE(bix->serial), "%3pE%6pE", buf + 7, buf); + } + + /* get cycle count */ + ret = i2c_smbus_read_word_data(client, MSHW0011_BAT0_REG_CYCLE_CNT); + if (ret < 0) { + dev_err(&client->dev, "Error reading cycle count: %d\n", ret); + return ret; + } + bix->cycle_count = ret; + + /* get OEM name */ + ret = i2c_smbus_read_i2c_block_data(client, MSHW0011_BAT0_REG_OEM, + 4, buf); + if (ret != 4) { + dev_err(&client->dev, "Error reading cycle count: %d\n", ret); + return ret; + } + snprintf(bix->OEM, ARRAY_SIZE(bix->OEM), "%3pE", buf); + + return 0; +} + +static int mshw0011_bst(struct mshw0011_data *cdata, struct bst *bst) +{ + struct i2c_client *client = cdata->bat0; + int rate, capacity, voltage, state; + s16 tmp; + + rate = i2c_smbus_read_word_data(client, MSHW0011_BAT0_REG_RATE); + if (rate < 0) + return rate; + + capacity = i2c_smbus_read_word_data(client, MSHW0011_BAT0_REG_CAPACITY); + if (capacity < 0) + return capacity; + + voltage = i2c_smbus_read_word_data(client, MSHW0011_BAT0_REG_VOLTAGE); + if (voltage < 0) + return voltage; + + tmp = rate; + bst->battery_present_rate = abs((s32)tmp); + + state = 0; + if ((s32) tmp > 0) + state |= ACPI_BATTERY_STATE_CHARGING; + else if ((s32) tmp < 0) + state |= ACPI_BATTERY_STATE_DISCHARGING; + bst->battery_state = state; + + bst->battery_remaining_capacity = capacity; + bst->battery_present_voltage = voltage; + + return 0; +} + +static int mshw0011_adp_psr(struct mshw0011_data *cdata) +{ + return i2c_smbus_read_byte_data(cdata->adp1, MSHW0011_ADP1_REG_PSR); +} + +static int mshw0011_isr(struct mshw0011_data *cdata) +{ + struct bst bst; + struct bix bix; + int ret; + bool status, bat_status; + + ret = mshw0011_adp_psr(cdata); + if (ret < 0) + return ret; + + status = ret; + if (status != cdata->charging) + mshw0011_notify(cdata, cdata->notify_mask, + MSHW0011_NOTIFY_ADP1, &ret); + + cdata->charging = status; + + ret = mshw0011_bst(cdata, &bst); + if (ret < 0) + return ret; + + bat_status = bst.battery_state; + if (bat_status != cdata->bat_charging) + mshw0011_notify(cdata, cdata->notify_mask, + MSHW0011_NOTIFY_BAT0_BST, &ret); + + cdata->bat_charging = bat_status; + + ret = mshw0011_bix(cdata, &bix); + if (ret < 0) + return ret; + + if (bix.last_full_charg_capacity != cdata->full_capacity) + mshw0011_notify(cdata, cdata->notify_mask, + MSHW0011_NOTIFY_BAT0_BIX, &ret); + + cdata->full_capacity = bix.last_full_charg_capacity; + + return 0; +} + +static int mshw0011_poll_task(void *data) +{ + struct mshw0011_data *cdata = data; + int ret = 0; + + cdata->kthread_running = true; + + set_freezable(); + + while (!kthread_should_stop()) { + schedule_timeout_interruptible(SURFACE_3_POLL_INTERVAL); + try_to_freeze(); + ret = mshw0011_isr(data); + if (ret) + break; + } + + cdata->kthread_running = false; + return ret; +} + +static acpi_status +mshw0011_space_handler(u32 function, acpi_physical_address command, + u32 bits, u64 *value64, + void *handler_context, void *region_context) +{ + struct gsb_buffer *gsb = (struct gsb_buffer *)value64; + struct mshw0011_handler_data *data = handler_context; + struct acpi_connection_info *info = &data->info; + struct acpi_resource_i2c_serialbus *sb; + struct i2c_client *client = data->client; + struct mshw0011_data *cdata = i2c_get_clientdata(client); + struct acpi_resource *ares; + u32 accessor_type = function >> 16; + acpi_status ret; + int status = 1; + + ret = acpi_buffer_to_resource(info->connection, info->length, &ares); + if (ACPI_FAILURE(ret)) + return ret; + + if (!value64 || !i2c_acpi_get_i2c_resource(ares, &sb)) { + ret = AE_BAD_PARAMETER; + goto err; + } + + if (accessor_type != ACPI_GSB_ACCESS_ATTRIB_RAW_PROCESS) { + ret = AE_BAD_PARAMETER; + goto err; + } + + if (gsb->cmd.arg0 == MSHW0011_CMD_DEST_ADP1 && + gsb->cmd.arg1 == MSHW0011_CMD_ADP1_PSR) { + status = mshw0011_adp_psr(cdata); + if (status >= 0) { + ret = AE_OK; + goto out; + } else { + ret = AE_ERROR; + goto err; + } + } + + if (gsb->cmd.arg0 != MSHW0011_CMD_DEST_BAT0) { + ret = AE_BAD_PARAMETER; + goto err; + } + + switch (gsb->cmd.arg1) { + case MSHW0011_CMD_BAT0_STA: + break; + case MSHW0011_CMD_BAT0_BIX: + ret = mshw0011_bix(cdata, &gsb->bix); + break; + case MSHW0011_CMD_BAT0_BTP: + cdata->trip_point = gsb->cmd.arg2; + break; + case MSHW0011_CMD_BAT0_BST: + ret = mshw0011_bst(cdata, &gsb->bst); + break; + default: + dev_info(&cdata->bat0->dev, "command(0x%02x) is not supported.\n", gsb->cmd.arg1); + ret = AE_BAD_PARAMETER; + goto err; + } + + out: + gsb->ret = status; + gsb->status = 0; + + err: + ACPI_FREE(ares); + return ret; +} + +static int mshw0011_install_space_handler(struct i2c_client *client) +{ + struct acpi_device *adev; + struct mshw0011_handler_data *data; + acpi_status status; + + adev = ACPI_COMPANION(&client->dev); + if (!adev) + return -ENODEV; + + data = kzalloc(sizeof(struct mshw0011_handler_data), + GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->client = client; + status = acpi_bus_attach_private_data(adev->handle, (void *)data); + if (ACPI_FAILURE(status)) { + kfree(data); + return -ENOMEM; + } + + status = acpi_install_address_space_handler(adev->handle, + ACPI_ADR_SPACE_GSBUS, + &mshw0011_space_handler, + NULL, + data); + if (ACPI_FAILURE(status)) { + dev_err(&client->dev, "Error installing i2c space handler\n"); + acpi_bus_detach_private_data(adev->handle); + kfree(data); + return -ENOMEM; + } + + acpi_dev_clear_dependencies(adev); + return 0; +} + +static void mshw0011_remove_space_handler(struct i2c_client *client) +{ + struct mshw0011_handler_data *data; + acpi_handle handle; + acpi_status status; + + handle = ACPI_HANDLE(&client->dev); + if (!handle) + return; + + acpi_remove_address_space_handler(handle, + ACPI_ADR_SPACE_GSBUS, + &mshw0011_space_handler); + + status = acpi_bus_get_private_data(handle, (void **)&data); + if (ACPI_SUCCESS(status)) + kfree(data); + + acpi_bus_detach_private_data(handle); +} + +static int mshw0011_probe(struct i2c_client *client) +{ + struct i2c_board_info board_info; + struct device *dev = &client->dev; + struct i2c_client *bat0; + struct mshw0011_data *data; + int error, mask; + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->adp1 = client; + i2c_set_clientdata(client, data); + + memset(&board_info, 0, sizeof(board_info)); + strscpy(board_info.type, "MSHW0011-bat0", I2C_NAME_SIZE); + + bat0 = i2c_acpi_new_device(dev, 1, &board_info); + if (IS_ERR(bat0)) + return PTR_ERR(bat0); + + data->bat0 = bat0; + i2c_set_clientdata(bat0, data); + + error = mshw0011_notify(data, 1, MSHW0011_NOTIFY_GET_VERSION, &mask); + if (error) + goto out_err; + + data->notify_mask = mask == MSHW0011_EV_2_5_MASK; + + data->poll_task = kthread_run(mshw0011_poll_task, data, "mshw0011_adp"); + if (IS_ERR(data->poll_task)) { + error = PTR_ERR(data->poll_task); + dev_err(&client->dev, "Unable to run kthread err %d\n", error); + goto out_err; + } + + error = mshw0011_install_space_handler(client); + if (error) + goto out_err; + + return 0; + +out_err: + if (data->kthread_running) + kthread_stop(data->poll_task); + i2c_unregister_device(data->bat0); + return error; +} + +static void mshw0011_remove(struct i2c_client *client) +{ + struct mshw0011_data *cdata = i2c_get_clientdata(client); + + mshw0011_remove_space_handler(client); + + if (cdata->kthread_running) + kthread_stop(cdata->poll_task); + + i2c_unregister_device(cdata->bat0); +} + +static const struct acpi_device_id mshw0011_acpi_match[] = { + { "MSHW0011", 0 }, + { } +}; +MODULE_DEVICE_TABLE(acpi, mshw0011_acpi_match); + +static struct i2c_driver mshw0011_driver = { + .probe = mshw0011_probe, + .remove = mshw0011_remove, + .driver = { + .name = "mshw0011", + .acpi_match_table = mshw0011_acpi_match, + }, +}; +module_i2c_driver(mshw0011_driver); + +MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@gmail.com>"); +MODULE_DESCRIPTION("mshw0011 driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/platform/surface/surface_acpi_notify.c b/drivers/platform/surface/surface_acpi_notify.c new file mode 100644 index 0000000000..897cdd9c3a --- /dev/null +++ b/drivers/platform/surface/surface_acpi_notify.c @@ -0,0 +1,910 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Driver for the Surface ACPI Notify (SAN) interface/shim. + * + * Translates communication from ACPI to Surface System Aggregator Module + * (SSAM/SAM) requests and back, specifically SAM-over-SSH. Translates SSAM + * events back to ACPI notifications. Allows handling of discrete GPU + * notifications sent from ACPI via the SAN interface by providing them to any + * registered external driver. + * + * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com> + */ + +#include <asm/unaligned.h> +#include <linux/acpi.h> +#include <linux/delay.h> +#include <linux/jiffies.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/notifier.h> +#include <linux/platform_device.h> +#include <linux/rwsem.h> + +#include <linux/surface_aggregator/controller.h> +#include <linux/surface_acpi_notify.h> + +struct san_data { + struct device *dev; + struct ssam_controller *ctrl; + + struct acpi_connection_info info; + + struct ssam_event_notifier nf_bat; + struct ssam_event_notifier nf_tmp; +}; + +#define to_san_data(ptr, member) \ + container_of(ptr, struct san_data, member) + +static struct workqueue_struct *san_wq; + +/* -- dGPU notifier interface. ---------------------------------------------- */ + +struct san_rqsg_if { + struct rw_semaphore lock; + struct device *dev; + struct blocking_notifier_head nh; +}; + +static struct san_rqsg_if san_rqsg_if = { + .lock = __RWSEM_INITIALIZER(san_rqsg_if.lock), + .dev = NULL, + .nh = BLOCKING_NOTIFIER_INIT(san_rqsg_if.nh), +}; + +static int san_set_rqsg_interface_device(struct device *dev) +{ + int status = 0; + + down_write(&san_rqsg_if.lock); + if (!san_rqsg_if.dev && dev) + san_rqsg_if.dev = dev; + else + status = -EBUSY; + up_write(&san_rqsg_if.lock); + + return status; +} + +/** + * san_client_link() - Link client as consumer to SAN device. + * @client: The client to link. + * + * Sets up a device link between the provided client device as consumer and + * the SAN device as provider. This function can be used to ensure that the + * SAN interface has been set up and will be set up for as long as the driver + * of the client device is bound. This guarantees that, during that time, all + * dGPU events will be received by any registered notifier. + * + * The link will be automatically removed once the client device's driver is + * unbound. + * + * Return: Returns zero on success, %-ENXIO if the SAN interface has not been + * set up yet, and %-ENOMEM if device link creation failed. + */ +int san_client_link(struct device *client) +{ + const u32 flags = DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_CONSUMER; + struct device_link *link; + + down_read(&san_rqsg_if.lock); + + if (!san_rqsg_if.dev) { + up_read(&san_rqsg_if.lock); + return -ENXIO; + } + + link = device_link_add(client, san_rqsg_if.dev, flags); + if (!link) { + up_read(&san_rqsg_if.lock); + return -ENOMEM; + } + + if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND) { + up_read(&san_rqsg_if.lock); + return -ENXIO; + } + + up_read(&san_rqsg_if.lock); + return 0; +} +EXPORT_SYMBOL_GPL(san_client_link); + +/** + * san_dgpu_notifier_register() - Register a SAN dGPU notifier. + * @nb: The notifier-block to register. + * + * Registers a SAN dGPU notifier, receiving any new SAN dGPU events sent from + * ACPI. The registered notifier will be called with &struct san_dgpu_event + * as notifier data and the command ID of that event as notifier action. + */ +int san_dgpu_notifier_register(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&san_rqsg_if.nh, nb); +} +EXPORT_SYMBOL_GPL(san_dgpu_notifier_register); + +/** + * san_dgpu_notifier_unregister() - Unregister a SAN dGPU notifier. + * @nb: The notifier-block to unregister. + */ +int san_dgpu_notifier_unregister(struct notifier_block *nb) +{ + return blocking_notifier_chain_unregister(&san_rqsg_if.nh, nb); +} +EXPORT_SYMBOL_GPL(san_dgpu_notifier_unregister); + +static int san_dgpu_notifier_call(struct san_dgpu_event *evt) +{ + int ret; + + ret = blocking_notifier_call_chain(&san_rqsg_if.nh, evt->command, evt); + return notifier_to_errno(ret); +} + + +/* -- ACPI _DSM event relay. ------------------------------------------------ */ + +#define SAN_DSM_REVISION 0 + +/* 93b666c5-70c6-469f-a215-3d487c91ab3c */ +static const guid_t SAN_DSM_UUID = + GUID_INIT(0x93b666c5, 0x70c6, 0x469f, 0xa2, 0x15, 0x3d, + 0x48, 0x7c, 0x91, 0xab, 0x3c); + +enum san_dsm_event_fn { + SAN_DSM_EVENT_FN_BAT1_STAT = 0x03, + SAN_DSM_EVENT_FN_BAT1_INFO = 0x04, + SAN_DSM_EVENT_FN_ADP1_STAT = 0x05, + SAN_DSM_EVENT_FN_ADP1_INFO = 0x06, + SAN_DSM_EVENT_FN_BAT2_STAT = 0x07, + SAN_DSM_EVENT_FN_BAT2_INFO = 0x08, + SAN_DSM_EVENT_FN_THERMAL = 0x09, + SAN_DSM_EVENT_FN_DPTF = 0x0a, +}; + +enum sam_event_cid_bat { + SAM_EVENT_CID_BAT_BIX = 0x15, + SAM_EVENT_CID_BAT_BST = 0x16, + SAM_EVENT_CID_BAT_ADP = 0x17, + SAM_EVENT_CID_BAT_PROT = 0x18, + SAM_EVENT_CID_BAT_DPTF = 0x4f, +}; + +enum sam_event_cid_tmp { + SAM_EVENT_CID_TMP_TRIP = 0x0b, +}; + +struct san_event_work { + struct delayed_work work; + struct device *dev; + struct ssam_event event; /* must be last */ +}; + +static int san_acpi_notify_event(struct device *dev, u64 func, + union acpi_object *param) +{ + acpi_handle san = ACPI_HANDLE(dev); + union acpi_object *obj; + int status = 0; + + if (!acpi_check_dsm(san, &SAN_DSM_UUID, SAN_DSM_REVISION, BIT_ULL(func))) + return 0; + + dev_dbg(dev, "notify event %#04llx\n", func); + + obj = acpi_evaluate_dsm_typed(san, &SAN_DSM_UUID, SAN_DSM_REVISION, + func, param, ACPI_TYPE_BUFFER); + if (!obj) + return -EFAULT; + + if (obj->buffer.length != 1 || obj->buffer.pointer[0] != 0) { + dev_err(dev, "got unexpected result from _DSM\n"); + status = -EPROTO; + } + + ACPI_FREE(obj); + return status; +} + +static int san_evt_bat_adp(struct device *dev, const struct ssam_event *event) +{ + int status; + + status = san_acpi_notify_event(dev, SAN_DSM_EVENT_FN_ADP1_STAT, NULL); + if (status) + return status; + + /* + * Ensure that the battery states get updated correctly. When the + * battery is fully charged and an adapter is plugged in, it sometimes + * is not updated correctly, instead showing it as charging. + * Explicitly trigger battery updates to fix this. + */ + + status = san_acpi_notify_event(dev, SAN_DSM_EVENT_FN_BAT1_STAT, NULL); + if (status) + return status; + + return san_acpi_notify_event(dev, SAN_DSM_EVENT_FN_BAT2_STAT, NULL); +} + +static int san_evt_bat_bix(struct device *dev, const struct ssam_event *event) +{ + enum san_dsm_event_fn fn; + + if (event->instance_id == 0x02) + fn = SAN_DSM_EVENT_FN_BAT2_INFO; + else + fn = SAN_DSM_EVENT_FN_BAT1_INFO; + + return san_acpi_notify_event(dev, fn, NULL); +} + +static int san_evt_bat_bst(struct device *dev, const struct ssam_event *event) +{ + enum san_dsm_event_fn fn; + + if (event->instance_id == 0x02) + fn = SAN_DSM_EVENT_FN_BAT2_STAT; + else + fn = SAN_DSM_EVENT_FN_BAT1_STAT; + + return san_acpi_notify_event(dev, fn, NULL); +} + +static int san_evt_bat_dptf(struct device *dev, const struct ssam_event *event) +{ + union acpi_object payload; + + /* + * The Surface ACPI expects a buffer and not a package. It specifically + * checks for ObjectType (Arg3) == 0x03. This will cause a warning in + * acpica/nsarguments.c, but that warning can be safely ignored. + */ + payload.type = ACPI_TYPE_BUFFER; + payload.buffer.length = event->length; + payload.buffer.pointer = (u8 *)&event->data[0]; + + return san_acpi_notify_event(dev, SAN_DSM_EVENT_FN_DPTF, &payload); +} + +static unsigned long san_evt_bat_delay(u8 cid) +{ + switch (cid) { + case SAM_EVENT_CID_BAT_ADP: + /* + * Wait for battery state to update before signaling adapter + * change. + */ + return msecs_to_jiffies(5000); + + case SAM_EVENT_CID_BAT_BST: + /* Ensure we do not miss anything important due to caching. */ + return msecs_to_jiffies(2000); + + default: + return 0; + } +} + +static bool san_evt_bat(const struct ssam_event *event, struct device *dev) +{ + int status; + + switch (event->command_id) { + case SAM_EVENT_CID_BAT_BIX: + status = san_evt_bat_bix(dev, event); + break; + + case SAM_EVENT_CID_BAT_BST: + status = san_evt_bat_bst(dev, event); + break; + + case SAM_EVENT_CID_BAT_ADP: + status = san_evt_bat_adp(dev, event); + break; + + case SAM_EVENT_CID_BAT_PROT: + /* + * TODO: Implement support for battery protection status change + * event. + */ + return true; + + case SAM_EVENT_CID_BAT_DPTF: + status = san_evt_bat_dptf(dev, event); + break; + + default: + return false; + } + + if (status) { + dev_err(dev, "error handling power event (cid = %#04x)\n", + event->command_id); + } + + return true; +} + +static void san_evt_bat_workfn(struct work_struct *work) +{ + struct san_event_work *ev; + + ev = container_of(work, struct san_event_work, work.work); + san_evt_bat(&ev->event, ev->dev); + kfree(ev); +} + +static u32 san_evt_bat_nf(struct ssam_event_notifier *nf, + const struct ssam_event *event) +{ + struct san_data *d = to_san_data(nf, nf_bat); + struct san_event_work *work; + unsigned long delay = san_evt_bat_delay(event->command_id); + + if (delay == 0) + return san_evt_bat(event, d->dev) ? SSAM_NOTIF_HANDLED : 0; + + work = kzalloc(sizeof(*work) + event->length, GFP_KERNEL); + if (!work) + return ssam_notifier_from_errno(-ENOMEM); + + INIT_DELAYED_WORK(&work->work, san_evt_bat_workfn); + work->dev = d->dev; + + work->event = *event; + memcpy(work->event.data, event->data, event->length); + + queue_delayed_work(san_wq, &work->work, delay); + return SSAM_NOTIF_HANDLED; +} + +static int san_evt_tmp_trip(struct device *dev, const struct ssam_event *event) +{ + union acpi_object param; + + /* + * The Surface ACPI expects an integer and not a package. This will + * cause a warning in acpica/nsarguments.c, but that warning can be + * safely ignored. + */ + param.type = ACPI_TYPE_INTEGER; + param.integer.value = event->instance_id; + + return san_acpi_notify_event(dev, SAN_DSM_EVENT_FN_THERMAL, ¶m); +} + +static bool san_evt_tmp(const struct ssam_event *event, struct device *dev) +{ + int status; + + switch (event->command_id) { + case SAM_EVENT_CID_TMP_TRIP: + status = san_evt_tmp_trip(dev, event); + break; + + default: + return false; + } + + if (status) { + dev_err(dev, "error handling thermal event (cid = %#04x)\n", + event->command_id); + } + + return true; +} + +static u32 san_evt_tmp_nf(struct ssam_event_notifier *nf, + const struct ssam_event *event) +{ + struct san_data *d = to_san_data(nf, nf_tmp); + + return san_evt_tmp(event, d->dev) ? SSAM_NOTIF_HANDLED : 0; +} + + +/* -- ACPI GSB OperationRegion handler -------------------------------------- */ + +struct gsb_data_in { + u8 cv; +} __packed; + +struct gsb_data_rqsx { + u8 cv; /* Command value (san_gsb_request_cv). */ + u8 tc; /* Target category. */ + u8 tid; /* Target ID. */ + u8 iid; /* Instance ID. */ + u8 snc; /* Expect-response-flag. */ + u8 cid; /* Command ID. */ + u16 cdl; /* Payload length. */ + u8 pld[]; /* Payload. */ +} __packed; + +struct gsb_data_etwl { + u8 cv; /* Command value (should be 0x02). */ + u8 etw3; /* Unknown. */ + u8 etw4; /* Unknown. */ + u8 msg[]; /* Error message (ASCIIZ). */ +} __packed; + +struct gsb_data_out { + u8 status; /* _SSH communication status. */ + u8 len; /* _SSH payload length. */ + u8 pld[]; /* _SSH payload. */ +} __packed; + +union gsb_buffer_data { + struct gsb_data_in in; /* Common input. */ + struct gsb_data_rqsx rqsx; /* RQSX input. */ + struct gsb_data_etwl etwl; /* ETWL input. */ + struct gsb_data_out out; /* Output. */ +}; + +struct gsb_buffer { + u8 status; /* GSB AttribRawProcess status. */ + u8 len; /* GSB AttribRawProcess length. */ + union gsb_buffer_data data; +} __packed; + +#define SAN_GSB_MAX_RQSX_PAYLOAD (U8_MAX - 2 - sizeof(struct gsb_data_rqsx)) +#define SAN_GSB_MAX_RESPONSE (U8_MAX - 2 - sizeof(struct gsb_data_out)) + +#define SAN_GSB_COMMAND 0 + +enum san_gsb_request_cv { + SAN_GSB_REQUEST_CV_RQST = 0x01, + SAN_GSB_REQUEST_CV_ETWL = 0x02, + SAN_GSB_REQUEST_CV_RQSG = 0x03, +}; + +#define SAN_REQUEST_NUM_TRIES 5 + +static acpi_status san_etwl(struct san_data *d, struct gsb_buffer *b) +{ + struct gsb_data_etwl *etwl = &b->data.etwl; + + if (b->len < sizeof(struct gsb_data_etwl)) { + dev_err(d->dev, "invalid ETWL package (len = %d)\n", b->len); + return AE_OK; + } + + dev_err(d->dev, "ETWL(%#04x, %#04x): %.*s\n", etwl->etw3, etwl->etw4, + (unsigned int)(b->len - sizeof(struct gsb_data_etwl)), + (char *)etwl->msg); + + /* Indicate success. */ + b->status = 0x00; + b->len = 0x00; + + return AE_OK; +} + +static +struct gsb_data_rqsx *san_validate_rqsx(struct device *dev, const char *type, + struct gsb_buffer *b) +{ + struct gsb_data_rqsx *rqsx = &b->data.rqsx; + + if (b->len < sizeof(struct gsb_data_rqsx)) { + dev_err(dev, "invalid %s package (len = %d)\n", type, b->len); + return NULL; + } + + if (get_unaligned(&rqsx->cdl) != b->len - sizeof(struct gsb_data_rqsx)) { + dev_err(dev, "bogus %s package (len = %d, cdl = %d)\n", + type, b->len, get_unaligned(&rqsx->cdl)); + return NULL; + } + + if (get_unaligned(&rqsx->cdl) > SAN_GSB_MAX_RQSX_PAYLOAD) { + dev_err(dev, "payload for %s package too large (cdl = %d)\n", + type, get_unaligned(&rqsx->cdl)); + return NULL; + } + + return rqsx; +} + +static void gsb_rqsx_response_error(struct gsb_buffer *gsb, int status) +{ + gsb->status = 0x00; + gsb->len = 0x02; + gsb->data.out.status = (u8)(-status); + gsb->data.out.len = 0x00; +} + +static void gsb_rqsx_response_success(struct gsb_buffer *gsb, u8 *ptr, size_t len) +{ + gsb->status = 0x00; + gsb->len = len + 2; + gsb->data.out.status = 0x00; + gsb->data.out.len = len; + + if (len) + memcpy(&gsb->data.out.pld[0], ptr, len); +} + +static acpi_status san_rqst_fixup_suspended(struct san_data *d, + struct ssam_request *rqst, + struct gsb_buffer *gsb) +{ + if (rqst->target_category == SSAM_SSH_TC_BAS && rqst->command_id == 0x0D) { + u8 base_state = 1; + + /* Base state quirk: + * The base state may be queried from ACPI when the EC is still + * suspended. In this case it will return '-EPERM'. This query + * will only be triggered from the ACPI lid GPE interrupt, thus + * we are either in laptop or studio mode (base status 0x01 or + * 0x02). Furthermore, we will only get here if the device (and + * EC) have been suspended. + * + * We now assume that the device is in laptop mode (0x01). This + * has the drawback that it will wake the device when unfolding + * it in studio mode, but it also allows us to avoid actively + * waiting for the EC to wake up, which may incur a notable + * delay. + */ + + dev_dbg(d->dev, "rqst: fixup: base-state quirk\n"); + + gsb_rqsx_response_success(gsb, &base_state, sizeof(base_state)); + return AE_OK; + } + + gsb_rqsx_response_error(gsb, -ENXIO); + return AE_OK; +} + +static acpi_status san_rqst(struct san_data *d, struct gsb_buffer *buffer) +{ + u8 rspbuf[SAN_GSB_MAX_RESPONSE]; + struct gsb_data_rqsx *gsb_rqst; + struct ssam_request rqst; + struct ssam_response rsp; + int status = 0; + + gsb_rqst = san_validate_rqsx(d->dev, "RQST", buffer); + if (!gsb_rqst) + return AE_OK; + + rqst.target_category = gsb_rqst->tc; + rqst.target_id = gsb_rqst->tid; + rqst.command_id = gsb_rqst->cid; + rqst.instance_id = gsb_rqst->iid; + rqst.flags = gsb_rqst->snc ? SSAM_REQUEST_HAS_RESPONSE : 0; + rqst.length = get_unaligned(&gsb_rqst->cdl); + rqst.payload = &gsb_rqst->pld[0]; + + rsp.capacity = ARRAY_SIZE(rspbuf); + rsp.length = 0; + rsp.pointer = &rspbuf[0]; + + /* Handle suspended device. */ + if (d->dev->power.is_suspended) { + dev_warn(d->dev, "rqst: device is suspended, not executing\n"); + return san_rqst_fixup_suspended(d, &rqst, buffer); + } + + status = __ssam_retry(ssam_request_do_sync_onstack, SAN_REQUEST_NUM_TRIES, + d->ctrl, &rqst, &rsp, SAN_GSB_MAX_RQSX_PAYLOAD); + + if (!status) { + gsb_rqsx_response_success(buffer, rsp.pointer, rsp.length); + } else { + dev_err(d->dev, "rqst: failed with error %d\n", status); + gsb_rqsx_response_error(buffer, status); + } + + return AE_OK; +} + +static acpi_status san_rqsg(struct san_data *d, struct gsb_buffer *buffer) +{ + struct gsb_data_rqsx *gsb_rqsg; + struct san_dgpu_event evt; + int status; + + gsb_rqsg = san_validate_rqsx(d->dev, "RQSG", buffer); + if (!gsb_rqsg) + return AE_OK; + + evt.category = gsb_rqsg->tc; + evt.target = gsb_rqsg->tid; + evt.command = gsb_rqsg->cid; + evt.instance = gsb_rqsg->iid; + evt.length = get_unaligned(&gsb_rqsg->cdl); + evt.payload = &gsb_rqsg->pld[0]; + + status = san_dgpu_notifier_call(&evt); + if (!status) { + gsb_rqsx_response_success(buffer, NULL, 0); + } else { + dev_err(d->dev, "rqsg: failed with error %d\n", status); + gsb_rqsx_response_error(buffer, status); + } + + return AE_OK; +} + +static acpi_status san_opreg_handler(u32 function, acpi_physical_address command, + u32 bits, u64 *value64, void *opreg_context, + void *region_context) +{ + struct san_data *d = to_san_data(opreg_context, info); + struct gsb_buffer *buffer = (struct gsb_buffer *)value64; + int accessor_type = (function & 0xFFFF0000) >> 16; + + if (command != SAN_GSB_COMMAND) { + dev_warn(d->dev, "unsupported command: %#04llx\n", command); + return AE_OK; + } + + if (accessor_type != ACPI_GSB_ACCESS_ATTRIB_RAW_PROCESS) { + dev_err(d->dev, "invalid access type: %#04x\n", accessor_type); + return AE_OK; + } + + /* Buffer must have at least contain the command-value. */ + if (buffer->len == 0) { + dev_err(d->dev, "request-package too small\n"); + return AE_OK; + } + + switch (buffer->data.in.cv) { + case SAN_GSB_REQUEST_CV_RQST: + return san_rqst(d, buffer); + + case SAN_GSB_REQUEST_CV_ETWL: + return san_etwl(d, buffer); + + case SAN_GSB_REQUEST_CV_RQSG: + return san_rqsg(d, buffer); + + default: + dev_warn(d->dev, "unsupported SAN0 request (cv: %#04x)\n", + buffer->data.in.cv); + return AE_OK; + } +} + + +/* -- Driver setup. --------------------------------------------------------- */ + +static int san_events_register(struct platform_device *pdev) +{ + struct san_data *d = platform_get_drvdata(pdev); + int status; + + d->nf_bat.base.priority = 1; + d->nf_bat.base.fn = san_evt_bat_nf; + d->nf_bat.event.reg = SSAM_EVENT_REGISTRY_SAM; + d->nf_bat.event.id.target_category = SSAM_SSH_TC_BAT; + d->nf_bat.event.id.instance = 0; + d->nf_bat.event.mask = SSAM_EVENT_MASK_TARGET; + d->nf_bat.event.flags = SSAM_EVENT_SEQUENCED; + + d->nf_tmp.base.priority = 1; + d->nf_tmp.base.fn = san_evt_tmp_nf; + d->nf_tmp.event.reg = SSAM_EVENT_REGISTRY_SAM; + d->nf_tmp.event.id.target_category = SSAM_SSH_TC_TMP; + d->nf_tmp.event.id.instance = 0; + d->nf_tmp.event.mask = SSAM_EVENT_MASK_TARGET; + d->nf_tmp.event.flags = SSAM_EVENT_SEQUENCED; + + status = ssam_notifier_register(d->ctrl, &d->nf_bat); + if (status) + return status; + + status = ssam_notifier_register(d->ctrl, &d->nf_tmp); + if (status) + ssam_notifier_unregister(d->ctrl, &d->nf_bat); + + return status; +} + +static void san_events_unregister(struct platform_device *pdev) +{ + struct san_data *d = platform_get_drvdata(pdev); + + ssam_notifier_unregister(d->ctrl, &d->nf_bat); + ssam_notifier_unregister(d->ctrl, &d->nf_tmp); +} + +#define san_consumer_printk(level, dev, handle, fmt, ...) \ +do { \ + char *path = "<error getting consumer path>"; \ + struct acpi_buffer buffer = { \ + .length = ACPI_ALLOCATE_BUFFER, \ + .pointer = NULL, \ + }; \ + \ + if (ACPI_SUCCESS(acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer))) \ + path = buffer.pointer; \ + \ + dev_##level(dev, "[%s]: " fmt, path, ##__VA_ARGS__); \ + kfree(buffer.pointer); \ +} while (0) + +#define san_consumer_dbg(dev, handle, fmt, ...) \ + san_consumer_printk(dbg, dev, handle, fmt, ##__VA_ARGS__) + +#define san_consumer_warn(dev, handle, fmt, ...) \ + san_consumer_printk(warn, dev, handle, fmt, ##__VA_ARGS__) + +static bool is_san_consumer(struct platform_device *pdev, acpi_handle handle) +{ + struct acpi_handle_list dep_devices; + acpi_handle supplier = ACPI_HANDLE(&pdev->dev); + acpi_status status; + int i; + + if (!acpi_has_method(handle, "_DEP")) + return false; + + status = acpi_evaluate_reference(handle, "_DEP", NULL, &dep_devices); + if (ACPI_FAILURE(status)) { + san_consumer_dbg(&pdev->dev, handle, "failed to evaluate _DEP\n"); + return false; + } + + for (i = 0; i < dep_devices.count; i++) { + if (dep_devices.handles[i] == supplier) + return true; + } + + return false; +} + +static acpi_status san_consumer_setup(acpi_handle handle, u32 lvl, + void *context, void **rv) +{ + const u32 flags = DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER; + struct platform_device *pdev = context; + struct acpi_device *adev; + struct device_link *link; + + if (!is_san_consumer(pdev, handle)) + return AE_OK; + + /* Ignore ACPI devices that are not present. */ + adev = acpi_fetch_acpi_dev(handle); + if (!adev) + return AE_OK; + + san_consumer_dbg(&pdev->dev, handle, "creating device link\n"); + + /* Try to set up device links, ignore but log errors. */ + link = device_link_add(&adev->dev, &pdev->dev, flags); + if (!link) { + san_consumer_warn(&pdev->dev, handle, "failed to create device link\n"); + return AE_OK; + } + + return AE_OK; +} + +static int san_consumer_links_setup(struct platform_device *pdev) +{ + acpi_status status; + + status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, + ACPI_UINT32_MAX, san_consumer_setup, NULL, + pdev, NULL); + + return status ? -EFAULT : 0; +} + +static int san_probe(struct platform_device *pdev) +{ + struct acpi_device *san = ACPI_COMPANION(&pdev->dev); + struct ssam_controller *ctrl; + struct san_data *data; + acpi_status astatus; + int status; + + ctrl = ssam_client_bind(&pdev->dev); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl) == -ENODEV ? -EPROBE_DEFER : PTR_ERR(ctrl); + + status = san_consumer_links_setup(pdev); + if (status) + return status; + + data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->dev = &pdev->dev; + data->ctrl = ctrl; + + platform_set_drvdata(pdev, data); + + astatus = acpi_install_address_space_handler(san->handle, + ACPI_ADR_SPACE_GSBUS, + &san_opreg_handler, NULL, + &data->info); + if (ACPI_FAILURE(astatus)) + return -ENXIO; + + status = san_events_register(pdev); + if (status) + goto err_enable_events; + + status = san_set_rqsg_interface_device(&pdev->dev); + if (status) + goto err_install_dev; + + acpi_dev_clear_dependencies(san); + return 0; + +err_install_dev: + san_events_unregister(pdev); +err_enable_events: + acpi_remove_address_space_handler(san, ACPI_ADR_SPACE_GSBUS, + &san_opreg_handler); + return status; +} + +static int san_remove(struct platform_device *pdev) +{ + acpi_handle san = ACPI_HANDLE(&pdev->dev); + + san_set_rqsg_interface_device(NULL); + acpi_remove_address_space_handler(san, ACPI_ADR_SPACE_GSBUS, + &san_opreg_handler); + san_events_unregister(pdev); + + /* + * We have unregistered our event sources. Now we need to ensure that + * all delayed works they may have spawned are run to completion. + */ + flush_workqueue(san_wq); + + return 0; +} + +static const struct acpi_device_id san_match[] = { + { "MSHW0091" }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, san_match); + +static struct platform_driver surface_acpi_notify = { + .probe = san_probe, + .remove = san_remove, + .driver = { + .name = "surface_acpi_notify", + .acpi_match_table = san_match, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, +}; + +static int __init san_init(void) +{ + int ret; + + san_wq = alloc_workqueue("san_wq", 0, 0); + if (!san_wq) + return -ENOMEM; + ret = platform_driver_register(&surface_acpi_notify); + if (ret) + destroy_workqueue(san_wq); + return ret; +} +module_init(san_init); + +static void __exit san_exit(void) +{ + platform_driver_unregister(&surface_acpi_notify); + destroy_workqueue(san_wq); +} +module_exit(san_exit); + +MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>"); +MODULE_DESCRIPTION("Surface ACPI Notify driver for Surface System Aggregator Module"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/surface/surface_aggregator_cdev.c b/drivers/platform/surface/surface_aggregator_cdev.c new file mode 100644 index 0000000000..07f0ed6583 --- /dev/null +++ b/drivers/platform/surface/surface_aggregator_cdev.c @@ -0,0 +1,810 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Provides user-space access to the SSAM EC via the /dev/surface/aggregator + * misc device. Intended for debugging and development. + * + * Copyright (C) 2020-2022 Maximilian Luz <luzmaximilian@gmail.com> + */ + +#include <linux/fs.h> +#include <linux/ioctl.h> +#include <linux/kernel.h> +#include <linux/kfifo.h> +#include <linux/kref.h> +#include <linux/miscdevice.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/poll.h> +#include <linux/rwsem.h> +#include <linux/slab.h> +#include <linux/uaccess.h> +#include <linux/vmalloc.h> + +#include <linux/surface_aggregator/cdev.h> +#include <linux/surface_aggregator/controller.h> +#include <linux/surface_aggregator/serial_hub.h> + +#define SSAM_CDEV_DEVICE_NAME "surface_aggregator_cdev" + + +/* -- Main structures. ------------------------------------------------------ */ + +enum ssam_cdev_device_state { + SSAM_CDEV_DEVICE_SHUTDOWN_BIT = BIT(0), +}; + +struct ssam_cdev { + struct kref kref; + struct rw_semaphore lock; + + struct device *dev; + struct ssam_controller *ctrl; + struct miscdevice mdev; + unsigned long flags; + + struct rw_semaphore client_lock; /* Guards client list. */ + struct list_head client_list; +}; + +struct ssam_cdev_client; + +struct ssam_cdev_notifier { + struct ssam_cdev_client *client; + struct ssam_event_notifier nf; +}; + +struct ssam_cdev_client { + struct ssam_cdev *cdev; + struct list_head node; + + struct mutex notifier_lock; /* Guards notifier access for registration */ + struct ssam_cdev_notifier *notifier[SSH_NUM_EVENTS]; + + struct mutex read_lock; /* Guards FIFO buffer read access */ + struct mutex write_lock; /* Guards FIFO buffer write access */ + DECLARE_KFIFO(buffer, u8, 4096); + + wait_queue_head_t waitq; + struct fasync_struct *fasync; +}; + +static void __ssam_cdev_release(struct kref *kref) +{ + kfree(container_of(kref, struct ssam_cdev, kref)); +} + +static struct ssam_cdev *ssam_cdev_get(struct ssam_cdev *cdev) +{ + if (cdev) + kref_get(&cdev->kref); + + return cdev; +} + +static void ssam_cdev_put(struct ssam_cdev *cdev) +{ + if (cdev) + kref_put(&cdev->kref, __ssam_cdev_release); +} + + +/* -- Notifier handling. ---------------------------------------------------- */ + +static u32 ssam_cdev_notifier(struct ssam_event_notifier *nf, const struct ssam_event *in) +{ + struct ssam_cdev_notifier *cdev_nf = container_of(nf, struct ssam_cdev_notifier, nf); + struct ssam_cdev_client *client = cdev_nf->client; + struct ssam_cdev_event event; + size_t n = struct_size(&event, data, in->length); + + /* Translate event. */ + event.target_category = in->target_category; + event.target_id = in->target_id; + event.command_id = in->command_id; + event.instance_id = in->instance_id; + event.length = in->length; + + mutex_lock(&client->write_lock); + + /* Make sure we have enough space. */ + if (kfifo_avail(&client->buffer) < n) { + dev_warn(client->cdev->dev, + "buffer full, dropping event (tc: %#04x, tid: %#04x, cid: %#04x, iid: %#04x)\n", + in->target_category, in->target_id, in->command_id, in->instance_id); + mutex_unlock(&client->write_lock); + return 0; + } + + /* Copy event header and payload. */ + kfifo_in(&client->buffer, (const u8 *)&event, struct_size(&event, data, 0)); + kfifo_in(&client->buffer, &in->data[0], in->length); + + mutex_unlock(&client->write_lock); + + /* Notify waiting readers. */ + kill_fasync(&client->fasync, SIGIO, POLL_IN); + wake_up_interruptible(&client->waitq); + + /* + * Don't mark events as handled, this is the job of a proper driver and + * not the debugging interface. + */ + return 0; +} + +static int ssam_cdev_notifier_register(struct ssam_cdev_client *client, u8 tc, int priority) +{ + const u16 rqid = ssh_tc_to_rqid(tc); + const u16 event = ssh_rqid_to_event(rqid); + struct ssam_cdev_notifier *nf; + int status; + + lockdep_assert_held_read(&client->cdev->lock); + + /* Validate notifier target category. */ + if (!ssh_rqid_is_event(rqid)) + return -EINVAL; + + mutex_lock(&client->notifier_lock); + + /* Check if the notifier has already been registered. */ + if (client->notifier[event]) { + mutex_unlock(&client->notifier_lock); + return -EEXIST; + } + + /* Allocate new notifier. */ + nf = kzalloc(sizeof(*nf), GFP_KERNEL); + if (!nf) { + mutex_unlock(&client->notifier_lock); + return -ENOMEM; + } + + /* + * Create a dummy notifier with the minimal required fields for + * observer registration. Note that we can skip fully specifying event + * and registry here as we do not need any matching and use silent + * registration, which does not enable the corresponding event. + */ + nf->client = client; + nf->nf.base.fn = ssam_cdev_notifier; + nf->nf.base.priority = priority; + nf->nf.event.id.target_category = tc; + nf->nf.event.mask = 0; /* Do not do any matching. */ + nf->nf.flags = SSAM_EVENT_NOTIFIER_OBSERVER; + + /* Register notifier. */ + status = ssam_notifier_register(client->cdev->ctrl, &nf->nf); + if (status) + kfree(nf); + else + client->notifier[event] = nf; + + mutex_unlock(&client->notifier_lock); + return status; +} + +static int ssam_cdev_notifier_unregister(struct ssam_cdev_client *client, u8 tc) +{ + const u16 rqid = ssh_tc_to_rqid(tc); + const u16 event = ssh_rqid_to_event(rqid); + int status; + + lockdep_assert_held_read(&client->cdev->lock); + + /* Validate notifier target category. */ + if (!ssh_rqid_is_event(rqid)) + return -EINVAL; + + mutex_lock(&client->notifier_lock); + + /* Check if the notifier is currently registered. */ + if (!client->notifier[event]) { + mutex_unlock(&client->notifier_lock); + return -ENOENT; + } + + /* Unregister and free notifier. */ + status = ssam_notifier_unregister(client->cdev->ctrl, &client->notifier[event]->nf); + kfree(client->notifier[event]); + client->notifier[event] = NULL; + + mutex_unlock(&client->notifier_lock); + return status; +} + +static void ssam_cdev_notifier_unregister_all(struct ssam_cdev_client *client) +{ + int i; + + down_read(&client->cdev->lock); + + /* + * This function may be used during shutdown, thus we need to test for + * cdev->ctrl instead of the SSAM_CDEV_DEVICE_SHUTDOWN_BIT bit. + */ + if (client->cdev->ctrl) { + for (i = 0; i < SSH_NUM_EVENTS; i++) + ssam_cdev_notifier_unregister(client, i + 1); + + } else { + int count = 0; + + /* + * Device has been shut down. Any notifier remaining is a bug, + * so warn about that as this would otherwise hardly be + * noticeable. Nevertheless, free them as well. + */ + mutex_lock(&client->notifier_lock); + for (i = 0; i < SSH_NUM_EVENTS; i++) { + count += !!(client->notifier[i]); + kfree(client->notifier[i]); + client->notifier[i] = NULL; + } + mutex_unlock(&client->notifier_lock); + + WARN_ON(count > 0); + } + + up_read(&client->cdev->lock); +} + + +/* -- IOCTL functions. ------------------------------------------------------ */ + +static long ssam_cdev_request(struct ssam_cdev_client *client, struct ssam_cdev_request __user *r) +{ + struct ssam_cdev_request rqst; + struct ssam_request spec = {}; + struct ssam_response rsp = {}; + const void __user *plddata; + void __user *rspdata; + int status = 0, ret = 0, tmp; + + lockdep_assert_held_read(&client->cdev->lock); + + ret = copy_struct_from_user(&rqst, sizeof(rqst), r, sizeof(*r)); + if (ret) + goto out; + + plddata = u64_to_user_ptr(rqst.payload.data); + rspdata = u64_to_user_ptr(rqst.response.data); + + /* Setup basic request fields. */ + spec.target_category = rqst.target_category; + spec.target_id = rqst.target_id; + spec.command_id = rqst.command_id; + spec.instance_id = rqst.instance_id; + spec.flags = 0; + spec.length = rqst.payload.length; + spec.payload = NULL; + + if (rqst.flags & SSAM_CDEV_REQUEST_HAS_RESPONSE) + spec.flags |= SSAM_REQUEST_HAS_RESPONSE; + + if (rqst.flags & SSAM_CDEV_REQUEST_UNSEQUENCED) + spec.flags |= SSAM_REQUEST_UNSEQUENCED; + + rsp.capacity = rqst.response.length; + rsp.length = 0; + rsp.pointer = NULL; + + /* Get request payload from user-space. */ + if (spec.length) { + if (!plddata) { + ret = -EINVAL; + goto out; + } + + /* + * Note: spec.length is limited to U16_MAX bytes via struct + * ssam_cdev_request. This is slightly larger than the + * theoretical maximum (SSH_COMMAND_MAX_PAYLOAD_SIZE) of the + * underlying protocol (note that nothing remotely this size + * should ever be allocated in any normal case). This size is + * validated later in ssam_request_do_sync(), for allocation + * the bound imposed by u16 should be enough. + */ + spec.payload = kzalloc(spec.length, GFP_KERNEL); + if (!spec.payload) { + ret = -ENOMEM; + goto out; + } + + if (copy_from_user((void *)spec.payload, plddata, spec.length)) { + ret = -EFAULT; + goto out; + } + } + + /* Allocate response buffer. */ + if (rsp.capacity) { + if (!rspdata) { + ret = -EINVAL; + goto out; + } + + /* + * Note: rsp.capacity is limited to U16_MAX bytes via struct + * ssam_cdev_request. This is slightly larger than the + * theoretical maximum (SSH_COMMAND_MAX_PAYLOAD_SIZE) of the + * underlying protocol (note that nothing remotely this size + * should ever be allocated in any normal case). In later use, + * this capacity does not have to be strictly bounded, as it + * is only used as an output buffer to be written to. For + * allocation the bound imposed by u16 should be enough. + */ + rsp.pointer = kzalloc(rsp.capacity, GFP_KERNEL); + if (!rsp.pointer) { + ret = -ENOMEM; + goto out; + } + } + + /* Perform request. */ + status = ssam_request_do_sync(client->cdev->ctrl, &spec, &rsp); + if (status) + goto out; + + /* Copy response to user-space. */ + if (rsp.length && copy_to_user(rspdata, rsp.pointer, rsp.length)) + ret = -EFAULT; + +out: + /* Always try to set response-length and status. */ + tmp = put_user(rsp.length, &r->response.length); + if (tmp) + ret = tmp; + + tmp = put_user(status, &r->status); + if (tmp) + ret = tmp; + + /* Cleanup. */ + kfree(spec.payload); + kfree(rsp.pointer); + + return ret; +} + +static long ssam_cdev_notif_register(struct ssam_cdev_client *client, + const struct ssam_cdev_notifier_desc __user *d) +{ + struct ssam_cdev_notifier_desc desc; + long ret; + + lockdep_assert_held_read(&client->cdev->lock); + + ret = copy_struct_from_user(&desc, sizeof(desc), d, sizeof(*d)); + if (ret) + return ret; + + return ssam_cdev_notifier_register(client, desc.target_category, desc.priority); +} + +static long ssam_cdev_notif_unregister(struct ssam_cdev_client *client, + const struct ssam_cdev_notifier_desc __user *d) +{ + struct ssam_cdev_notifier_desc desc; + long ret; + + lockdep_assert_held_read(&client->cdev->lock); + + ret = copy_struct_from_user(&desc, sizeof(desc), d, sizeof(*d)); + if (ret) + return ret; + + return ssam_cdev_notifier_unregister(client, desc.target_category); +} + +static long ssam_cdev_event_enable(struct ssam_cdev_client *client, + const struct ssam_cdev_event_desc __user *d) +{ + struct ssam_cdev_event_desc desc; + struct ssam_event_registry reg; + struct ssam_event_id id; + long ret; + + lockdep_assert_held_read(&client->cdev->lock); + + /* Read descriptor from user-space. */ + ret = copy_struct_from_user(&desc, sizeof(desc), d, sizeof(*d)); + if (ret) + return ret; + + /* Translate descriptor. */ + reg.target_category = desc.reg.target_category; + reg.target_id = desc.reg.target_id; + reg.cid_enable = desc.reg.cid_enable; + reg.cid_disable = desc.reg.cid_disable; + + id.target_category = desc.id.target_category; + id.instance = desc.id.instance; + + /* Disable event. */ + return ssam_controller_event_enable(client->cdev->ctrl, reg, id, desc.flags); +} + +static long ssam_cdev_event_disable(struct ssam_cdev_client *client, + const struct ssam_cdev_event_desc __user *d) +{ + struct ssam_cdev_event_desc desc; + struct ssam_event_registry reg; + struct ssam_event_id id; + long ret; + + lockdep_assert_held_read(&client->cdev->lock); + + /* Read descriptor from user-space. */ + ret = copy_struct_from_user(&desc, sizeof(desc), d, sizeof(*d)); + if (ret) + return ret; + + /* Translate descriptor. */ + reg.target_category = desc.reg.target_category; + reg.target_id = desc.reg.target_id; + reg.cid_enable = desc.reg.cid_enable; + reg.cid_disable = desc.reg.cid_disable; + + id.target_category = desc.id.target_category; + id.instance = desc.id.instance; + + /* Disable event. */ + return ssam_controller_event_disable(client->cdev->ctrl, reg, id, desc.flags); +} + + +/* -- File operations. ------------------------------------------------------ */ + +static int ssam_cdev_device_open(struct inode *inode, struct file *filp) +{ + struct miscdevice *mdev = filp->private_data; + struct ssam_cdev_client *client; + struct ssam_cdev *cdev = container_of(mdev, struct ssam_cdev, mdev); + + /* Initialize client */ + client = vzalloc(sizeof(*client)); + if (!client) + return -ENOMEM; + + client->cdev = ssam_cdev_get(cdev); + + INIT_LIST_HEAD(&client->node); + + mutex_init(&client->notifier_lock); + + mutex_init(&client->read_lock); + mutex_init(&client->write_lock); + INIT_KFIFO(client->buffer); + init_waitqueue_head(&client->waitq); + + filp->private_data = client; + + /* Attach client. */ + down_write(&cdev->client_lock); + + if (test_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT, &cdev->flags)) { + up_write(&cdev->client_lock); + mutex_destroy(&client->write_lock); + mutex_destroy(&client->read_lock); + mutex_destroy(&client->notifier_lock); + ssam_cdev_put(client->cdev); + vfree(client); + return -ENODEV; + } + list_add_tail(&client->node, &cdev->client_list); + + up_write(&cdev->client_lock); + + stream_open(inode, filp); + return 0; +} + +static int ssam_cdev_device_release(struct inode *inode, struct file *filp) +{ + struct ssam_cdev_client *client = filp->private_data; + + /* Force-unregister all remaining notifiers of this client. */ + ssam_cdev_notifier_unregister_all(client); + + /* Detach client. */ + down_write(&client->cdev->client_lock); + list_del(&client->node); + up_write(&client->cdev->client_lock); + + /* Free client. */ + mutex_destroy(&client->write_lock); + mutex_destroy(&client->read_lock); + + mutex_destroy(&client->notifier_lock); + + ssam_cdev_put(client->cdev); + vfree(client); + + return 0; +} + +static long __ssam_cdev_device_ioctl(struct ssam_cdev_client *client, unsigned int cmd, + unsigned long arg) +{ + lockdep_assert_held_read(&client->cdev->lock); + + switch (cmd) { + case SSAM_CDEV_REQUEST: + return ssam_cdev_request(client, (struct ssam_cdev_request __user *)arg); + + case SSAM_CDEV_NOTIF_REGISTER: + return ssam_cdev_notif_register(client, + (struct ssam_cdev_notifier_desc __user *)arg); + + case SSAM_CDEV_NOTIF_UNREGISTER: + return ssam_cdev_notif_unregister(client, + (struct ssam_cdev_notifier_desc __user *)arg); + + case SSAM_CDEV_EVENT_ENABLE: + return ssam_cdev_event_enable(client, (struct ssam_cdev_event_desc __user *)arg); + + case SSAM_CDEV_EVENT_DISABLE: + return ssam_cdev_event_disable(client, (struct ssam_cdev_event_desc __user *)arg); + + default: + return -ENOTTY; + } +} + +static long ssam_cdev_device_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct ssam_cdev_client *client = file->private_data; + long status; + + /* Ensure that controller is valid for as long as we need it. */ + if (down_read_killable(&client->cdev->lock)) + return -ERESTARTSYS; + + if (test_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT, &client->cdev->flags)) { + up_read(&client->cdev->lock); + return -ENODEV; + } + + status = __ssam_cdev_device_ioctl(client, cmd, arg); + + up_read(&client->cdev->lock); + return status; +} + +static ssize_t ssam_cdev_read(struct file *file, char __user *buf, size_t count, loff_t *offs) +{ + struct ssam_cdev_client *client = file->private_data; + struct ssam_cdev *cdev = client->cdev; + unsigned int copied; + int status = 0; + + if (down_read_killable(&cdev->lock)) + return -ERESTARTSYS; + + /* Make sure we're not shut down. */ + if (test_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT, &cdev->flags)) { + up_read(&cdev->lock); + return -ENODEV; + } + + do { + /* Check availability, wait if necessary. */ + if (kfifo_is_empty(&client->buffer)) { + up_read(&cdev->lock); + + if (file->f_flags & O_NONBLOCK) + return -EAGAIN; + + status = wait_event_interruptible(client->waitq, + !kfifo_is_empty(&client->buffer) || + test_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT, + &cdev->flags)); + if (status < 0) + return status; + + if (down_read_killable(&cdev->lock)) + return -ERESTARTSYS; + + /* Need to check that we're not shut down again. */ + if (test_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT, &cdev->flags)) { + up_read(&cdev->lock); + return -ENODEV; + } + } + + /* Try to read from FIFO. */ + if (mutex_lock_interruptible(&client->read_lock)) { + up_read(&cdev->lock); + return -ERESTARTSYS; + } + + status = kfifo_to_user(&client->buffer, buf, count, &copied); + mutex_unlock(&client->read_lock); + + if (status < 0) { + up_read(&cdev->lock); + return status; + } + + /* We might not have gotten anything, check this here. */ + if (copied == 0 && (file->f_flags & O_NONBLOCK)) { + up_read(&cdev->lock); + return -EAGAIN; + } + } while (copied == 0); + + up_read(&cdev->lock); + return copied; +} + +static __poll_t ssam_cdev_poll(struct file *file, struct poll_table_struct *pt) +{ + struct ssam_cdev_client *client = file->private_data; + __poll_t events = 0; + + if (test_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT, &client->cdev->flags)) + return EPOLLHUP | EPOLLERR; + + poll_wait(file, &client->waitq, pt); + + if (!kfifo_is_empty(&client->buffer)) + events |= EPOLLIN | EPOLLRDNORM; + + return events; +} + +static int ssam_cdev_fasync(int fd, struct file *file, int on) +{ + struct ssam_cdev_client *client = file->private_data; + + return fasync_helper(fd, file, on, &client->fasync); +} + +static const struct file_operations ssam_controller_fops = { + .owner = THIS_MODULE, + .open = ssam_cdev_device_open, + .release = ssam_cdev_device_release, + .read = ssam_cdev_read, + .poll = ssam_cdev_poll, + .fasync = ssam_cdev_fasync, + .unlocked_ioctl = ssam_cdev_device_ioctl, + .compat_ioctl = ssam_cdev_device_ioctl, + .llseek = no_llseek, +}; + + +/* -- Device and driver setup ----------------------------------------------- */ + +static int ssam_dbg_device_probe(struct platform_device *pdev) +{ + struct ssam_controller *ctrl; + struct ssam_cdev *cdev; + int status; + + ctrl = ssam_client_bind(&pdev->dev); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl) == -ENODEV ? -EPROBE_DEFER : PTR_ERR(ctrl); + + cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); + if (!cdev) + return -ENOMEM; + + kref_init(&cdev->kref); + init_rwsem(&cdev->lock); + cdev->ctrl = ctrl; + cdev->dev = &pdev->dev; + + cdev->mdev.parent = &pdev->dev; + cdev->mdev.minor = MISC_DYNAMIC_MINOR; + cdev->mdev.name = "surface_aggregator"; + cdev->mdev.nodename = "surface/aggregator"; + cdev->mdev.fops = &ssam_controller_fops; + + init_rwsem(&cdev->client_lock); + INIT_LIST_HEAD(&cdev->client_list); + + status = misc_register(&cdev->mdev); + if (status) { + kfree(cdev); + return status; + } + + platform_set_drvdata(pdev, cdev); + return 0; +} + +static int ssam_dbg_device_remove(struct platform_device *pdev) +{ + struct ssam_cdev *cdev = platform_get_drvdata(pdev); + struct ssam_cdev_client *client; + + /* + * Mark device as shut-down. Prevent new clients from being added and + * new operations from being executed. + */ + set_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT, &cdev->flags); + + down_write(&cdev->client_lock); + + /* Remove all notifiers registered by us. */ + list_for_each_entry(client, &cdev->client_list, node) { + ssam_cdev_notifier_unregister_all(client); + } + + /* Wake up async clients. */ + list_for_each_entry(client, &cdev->client_list, node) { + kill_fasync(&client->fasync, SIGIO, POLL_HUP); + } + + /* Wake up blocking clients. */ + list_for_each_entry(client, &cdev->client_list, node) { + wake_up_interruptible(&client->waitq); + } + + up_write(&cdev->client_lock); + + /* + * The controller is only guaranteed to be valid for as long as the + * driver is bound. Remove controller so that any lingering open files + * cannot access it any more after we're gone. + */ + down_write(&cdev->lock); + cdev->ctrl = NULL; + cdev->dev = NULL; + up_write(&cdev->lock); + + misc_deregister(&cdev->mdev); + + ssam_cdev_put(cdev); + return 0; +} + +static struct platform_device *ssam_cdev_device; + +static struct platform_driver ssam_cdev_driver = { + .probe = ssam_dbg_device_probe, + .remove = ssam_dbg_device_remove, + .driver = { + .name = SSAM_CDEV_DEVICE_NAME, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, +}; + +static int __init ssam_debug_init(void) +{ + int status; + + ssam_cdev_device = platform_device_alloc(SSAM_CDEV_DEVICE_NAME, + PLATFORM_DEVID_NONE); + if (!ssam_cdev_device) + return -ENOMEM; + + status = platform_device_add(ssam_cdev_device); + if (status) + goto err_device; + + status = platform_driver_register(&ssam_cdev_driver); + if (status) + goto err_driver; + + return 0; + +err_driver: + platform_device_del(ssam_cdev_device); +err_device: + platform_device_put(ssam_cdev_device); + return status; +} +module_init(ssam_debug_init); + +static void __exit ssam_debug_exit(void) +{ + platform_driver_unregister(&ssam_cdev_driver); + platform_device_unregister(ssam_cdev_device); +} +module_exit(ssam_debug_exit); + +MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>"); +MODULE_DESCRIPTION("User-space interface for Surface System Aggregator Module"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/surface/surface_aggregator_hub.c b/drivers/platform/surface/surface_aggregator_hub.c new file mode 100644 index 0000000000..8b8b80228c --- /dev/null +++ b/drivers/platform/surface/surface_aggregator_hub.c @@ -0,0 +1,371 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Driver for Surface System Aggregator Module (SSAM) subsystem device hubs. + * + * Provides a driver for SSAM subsystems device hubs. This driver performs + * instantiation of the devices managed by said hubs and takes care of + * (hot-)removal. + * + * Copyright (C) 2020-2022 Maximilian Luz <luzmaximilian@gmail.com> + */ + +#include <linux/kernel.h> +#include <linux/limits.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/workqueue.h> + +#include <linux/surface_aggregator/device.h> + + +/* -- SSAM generic subsystem hub driver framework. -------------------------- */ + +enum ssam_hub_state { + SSAM_HUB_UNINITIALIZED, /* Only set during initialization. */ + SSAM_HUB_CONNECTED, + SSAM_HUB_DISCONNECTED, +}; + +enum ssam_hub_flags { + SSAM_HUB_HOT_REMOVED, +}; + +struct ssam_hub; + +struct ssam_hub_ops { + int (*get_state)(struct ssam_hub *hub, enum ssam_hub_state *state); +}; + +struct ssam_hub { + struct ssam_device *sdev; + + enum ssam_hub_state state; + unsigned long flags; + + struct delayed_work update_work; + unsigned long connect_delay; + + struct ssam_event_notifier notif; + struct ssam_hub_ops ops; +}; + +struct ssam_hub_desc { + struct { + struct ssam_event_registry reg; + struct ssam_event_id id; + enum ssam_event_mask mask; + } event; + + struct { + u32 (*notify)(struct ssam_event_notifier *nf, const struct ssam_event *event); + int (*get_state)(struct ssam_hub *hub, enum ssam_hub_state *state); + } ops; + + unsigned long connect_delay_ms; +}; + +static void ssam_hub_update_workfn(struct work_struct *work) +{ + struct ssam_hub *hub = container_of(work, struct ssam_hub, update_work.work); + enum ssam_hub_state state; + int status = 0; + + status = hub->ops.get_state(hub, &state); + if (status) + return; + + /* + * There is a small possibility that hub devices were hot-removed and + * re-added before we were able to remove them here. In that case, both + * the state returned by get_state() and the state of the hub will + * equal SSAM_HUB_CONNECTED and we would bail early below, which would + * leave child devices without proper (re-)initialization and the + * hot-remove flag set. + * + * Therefore, we check whether devices have been hot-removed via an + * additional flag on the hub and, in this case, override the returned + * hub state. In case of a missed disconnect (i.e. get_state returned + * "connected"), we further need to re-schedule this work (with the + * appropriate delay) as the actual connect work submission might have + * been merged with this one. + * + * This then leads to one of two cases: Either we submit an unnecessary + * work item (which will get ignored via either the queue or the state + * checks) or, in the unlikely case that the work is actually required, + * double the normal connect delay. + */ + if (test_and_clear_bit(SSAM_HUB_HOT_REMOVED, &hub->flags)) { + if (state == SSAM_HUB_CONNECTED) + schedule_delayed_work(&hub->update_work, hub->connect_delay); + + state = SSAM_HUB_DISCONNECTED; + } + + if (hub->state == state) + return; + hub->state = state; + + if (hub->state == SSAM_HUB_CONNECTED) + status = ssam_device_register_clients(hub->sdev); + else + ssam_remove_clients(&hub->sdev->dev); + + if (status) + dev_err(&hub->sdev->dev, "failed to update hub child devices: %d\n", status); +} + +static int ssam_hub_mark_hot_removed(struct device *dev, void *_data) +{ + struct ssam_device *sdev = to_ssam_device(dev); + + if (is_ssam_device(dev)) + ssam_device_mark_hot_removed(sdev); + + return 0; +} + +static void ssam_hub_update(struct ssam_hub *hub, bool connected) +{ + unsigned long delay; + + /* Mark devices as hot-removed before we remove any. */ + if (!connected) { + set_bit(SSAM_HUB_HOT_REMOVED, &hub->flags); + device_for_each_child_reverse(&hub->sdev->dev, NULL, ssam_hub_mark_hot_removed); + } + + /* + * Delay update when the base/keyboard cover is being connected to give + * devices/EC some time to set up. + */ + delay = connected ? hub->connect_delay : 0; + + schedule_delayed_work(&hub->update_work, delay); +} + +static int __maybe_unused ssam_hub_resume(struct device *dev) +{ + struct ssam_hub *hub = dev_get_drvdata(dev); + + schedule_delayed_work(&hub->update_work, 0); + return 0; +} +static SIMPLE_DEV_PM_OPS(ssam_hub_pm_ops, NULL, ssam_hub_resume); + +static int ssam_hub_probe(struct ssam_device *sdev) +{ + const struct ssam_hub_desc *desc; + struct ssam_hub *hub; + int status; + + desc = ssam_device_get_match_data(sdev); + if (!desc) { + WARN(1, "no driver match data specified"); + return -EINVAL; + } + + hub = devm_kzalloc(&sdev->dev, sizeof(*hub), GFP_KERNEL); + if (!hub) + return -ENOMEM; + + hub->sdev = sdev; + hub->state = SSAM_HUB_UNINITIALIZED; + + hub->notif.base.priority = INT_MAX; /* This notifier should run first. */ + hub->notif.base.fn = desc->ops.notify; + hub->notif.event.reg = desc->event.reg; + hub->notif.event.id = desc->event.id; + hub->notif.event.mask = desc->event.mask; + hub->notif.event.flags = SSAM_EVENT_SEQUENCED; + + hub->connect_delay = msecs_to_jiffies(desc->connect_delay_ms); + hub->ops.get_state = desc->ops.get_state; + + INIT_DELAYED_WORK(&hub->update_work, ssam_hub_update_workfn); + + ssam_device_set_drvdata(sdev, hub); + + status = ssam_device_notifier_register(sdev, &hub->notif); + if (status) + return status; + + schedule_delayed_work(&hub->update_work, 0); + return 0; +} + +static void ssam_hub_remove(struct ssam_device *sdev) +{ + struct ssam_hub *hub = ssam_device_get_drvdata(sdev); + + ssam_device_notifier_unregister(sdev, &hub->notif); + cancel_delayed_work_sync(&hub->update_work); + ssam_remove_clients(&sdev->dev); +} + + +/* -- SSAM base-subsystem hub driver. --------------------------------------- */ + +/* + * Some devices (especially battery) may need a bit of time to be fully usable + * after being (re-)connected. This delay has been determined via + * experimentation. + */ +#define SSAM_BASE_UPDATE_CONNECT_DELAY 2500 + +SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_query_opmode, u8, { + .target_category = SSAM_SSH_TC_BAS, + .target_id = SSAM_SSH_TID_SAM, + .command_id = 0x0d, + .instance_id = 0x00, +}); + +#define SSAM_BAS_OPMODE_TABLET 0x00 +#define SSAM_EVENT_BAS_CID_CONNECTION 0x0c + +static int ssam_base_hub_query_state(struct ssam_hub *hub, enum ssam_hub_state *state) +{ + u8 opmode; + int status; + + status = ssam_retry(ssam_bas_query_opmode, hub->sdev->ctrl, &opmode); + if (status < 0) { + dev_err(&hub->sdev->dev, "failed to query base state: %d\n", status); + return status; + } + + if (opmode != SSAM_BAS_OPMODE_TABLET) + *state = SSAM_HUB_CONNECTED; + else + *state = SSAM_HUB_DISCONNECTED; + + return 0; +} + +static u32 ssam_base_hub_notif(struct ssam_event_notifier *nf, const struct ssam_event *event) +{ + struct ssam_hub *hub = container_of(nf, struct ssam_hub, notif); + + if (event->command_id != SSAM_EVENT_BAS_CID_CONNECTION) + return 0; + + if (event->length < 1) { + dev_err(&hub->sdev->dev, "unexpected payload size: %u\n", event->length); + return 0; + } + + ssam_hub_update(hub, event->data[0]); + + /* + * Do not return SSAM_NOTIF_HANDLED: The event should be picked up and + * consumed by the detachment system driver. We're just a (more or less) + * silent observer. + */ + return 0; +} + +static const struct ssam_hub_desc base_hub = { + .event = { + .reg = SSAM_EVENT_REGISTRY_SAM, + .id = { + .target_category = SSAM_SSH_TC_BAS, + .instance = 0, + }, + .mask = SSAM_EVENT_MASK_NONE, + }, + .ops = { + .notify = ssam_base_hub_notif, + .get_state = ssam_base_hub_query_state, + }, + .connect_delay_ms = SSAM_BASE_UPDATE_CONNECT_DELAY, +}; + + +/* -- SSAM KIP-subsystem hub driver. ---------------------------------------- */ + +/* + * Some devices may need a bit of time to be fully usable after being + * (re-)connected. This delay has been determined via experimentation. + */ +#define SSAM_KIP_UPDATE_CONNECT_DELAY 250 + +#define SSAM_EVENT_KIP_CID_CONNECTION 0x2c + +SSAM_DEFINE_SYNC_REQUEST_R(__ssam_kip_query_state, u8, { + .target_category = SSAM_SSH_TC_KIP, + .target_id = SSAM_SSH_TID_SAM, + .command_id = 0x2c, + .instance_id = 0x00, +}); + +static int ssam_kip_hub_query_state(struct ssam_hub *hub, enum ssam_hub_state *state) +{ + int status; + u8 connected; + + status = ssam_retry(__ssam_kip_query_state, hub->sdev->ctrl, &connected); + if (status < 0) { + dev_err(&hub->sdev->dev, "failed to query KIP connection state: %d\n", status); + return status; + } + + *state = connected ? SSAM_HUB_CONNECTED : SSAM_HUB_DISCONNECTED; + return 0; +} + +static u32 ssam_kip_hub_notif(struct ssam_event_notifier *nf, const struct ssam_event *event) +{ + struct ssam_hub *hub = container_of(nf, struct ssam_hub, notif); + + if (event->command_id != SSAM_EVENT_KIP_CID_CONNECTION) + return 0; /* Return "unhandled". */ + + if (event->length < 1) { + dev_err(&hub->sdev->dev, "unexpected payload size: %u\n", event->length); + return 0; + } + + ssam_hub_update(hub, event->data[0]); + return SSAM_NOTIF_HANDLED; +} + +static const struct ssam_hub_desc kip_hub = { + .event = { + .reg = SSAM_EVENT_REGISTRY_SAM, + .id = { + .target_category = SSAM_SSH_TC_KIP, + .instance = 0, + }, + .mask = SSAM_EVENT_MASK_TARGET, + }, + .ops = { + .notify = ssam_kip_hub_notif, + .get_state = ssam_kip_hub_query_state, + }, + .connect_delay_ms = SSAM_KIP_UPDATE_CONNECT_DELAY, +}; + + +/* -- Driver registration. -------------------------------------------------- */ + +static const struct ssam_device_id ssam_hub_match[] = { + { SSAM_VDEV(HUB, SAM, SSAM_SSH_TC_KIP, 0x00), (unsigned long)&kip_hub }, + { SSAM_VDEV(HUB, SAM, SSAM_SSH_TC_BAS, 0x00), (unsigned long)&base_hub }, + { } +}; +MODULE_DEVICE_TABLE(ssam, ssam_hub_match); + +static struct ssam_device_driver ssam_subsystem_hub_driver = { + .probe = ssam_hub_probe, + .remove = ssam_hub_remove, + .match_table = ssam_hub_match, + .driver = { + .name = "surface_aggregator_subsystem_hub", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .pm = &ssam_hub_pm_ops, + }, +}; +module_ssam_device_driver(ssam_subsystem_hub_driver); + +MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>"); +MODULE_DESCRIPTION("Subsystem device hub driver for Surface System Aggregator Module"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/surface/surface_aggregator_registry.c b/drivers/platform/surface/surface_aggregator_registry.c new file mode 100644 index 0000000000..0fe5be5396 --- /dev/null +++ b/drivers/platform/surface/surface_aggregator_registry.c @@ -0,0 +1,444 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Surface System Aggregator Module (SSAM) client device registry. + * + * Registry for non-platform/non-ACPI SSAM client devices, i.e. devices that + * cannot be auto-detected. Provides device-hubs and performs instantiation + * for these devices. + * + * Copyright (C) 2020-2022 Maximilian Luz <luzmaximilian@gmail.com> + */ + +#include <linux/acpi.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/property.h> +#include <linux/types.h> + +#include <linux/surface_aggregator/device.h> + + +/* -- Device registry. ------------------------------------------------------ */ + +/* + * SSAM device names follow the SSAM module alias, meaning they are prefixed + * with 'ssam:', followed by domain, category, target ID, instance ID, and + * function, each encoded as two-digit hexadecimal, separated by ':'. In other + * words, it follows the scheme + * + * ssam:dd:cc:tt:ii:ff + * + * Where, 'dd', 'cc', 'tt', 'ii', and 'ff' are the two-digit hexadecimal + * values mentioned above, respectively. + */ + +/* Root node. */ +static const struct software_node ssam_node_root = { + .name = "ssam_platform_hub", +}; + +/* KIP device hub (connects keyboard cover devices on Surface Pro 8). */ +static const struct software_node ssam_node_hub_kip = { + .name = "ssam:00:00:01:0e:00", + .parent = &ssam_node_root, +}; + +/* Base device hub (devices attached to Surface Book 3 base). */ +static const struct software_node ssam_node_hub_base = { + .name = "ssam:00:00:01:11:00", + .parent = &ssam_node_root, +}; + +/* AC adapter. */ +static const struct software_node ssam_node_bat_ac = { + .name = "ssam:01:02:01:01:01", + .parent = &ssam_node_root, +}; + +/* Primary battery. */ +static const struct software_node ssam_node_bat_main = { + .name = "ssam:01:02:01:01:00", + .parent = &ssam_node_root, +}; + +/* Secondary battery (Surface Book 3). */ +static const struct software_node ssam_node_bat_sb3base = { + .name = "ssam:01:02:02:01:00", + .parent = &ssam_node_hub_base, +}; + +/* Platform profile / performance-mode device. */ +static const struct software_node ssam_node_tmp_pprof = { + .name = "ssam:01:03:01:00:01", + .parent = &ssam_node_root, +}; + +/* Tablet-mode switch via KIP subsystem. */ +static const struct software_node ssam_node_kip_tablet_switch = { + .name = "ssam:01:0e:01:00:01", + .parent = &ssam_node_root, +}; + +/* DTX / detachment-system device (Surface Book 3). */ +static const struct software_node ssam_node_bas_dtx = { + .name = "ssam:01:11:01:00:00", + .parent = &ssam_node_root, +}; + +/* HID keyboard (SAM, TID=1). */ +static const struct software_node ssam_node_hid_sam_keyboard = { + .name = "ssam:01:15:01:01:00", + .parent = &ssam_node_root, +}; + +/* HID pen stash (SAM, TID=1; pen taken / stashed away evens). */ +static const struct software_node ssam_node_hid_sam_penstash = { + .name = "ssam:01:15:01:02:00", + .parent = &ssam_node_root, +}; + +/* HID touchpad (SAM, TID=1). */ +static const struct software_node ssam_node_hid_sam_touchpad = { + .name = "ssam:01:15:01:03:00", + .parent = &ssam_node_root, +}; + +/* HID device instance 6 (SAM, TID=1, HID sensor collection). */ +static const struct software_node ssam_node_hid_sam_sensors = { + .name = "ssam:01:15:01:06:00", + .parent = &ssam_node_root, +}; + +/* HID device instance 7 (SAM, TID=1, UCM UCSI HID client). */ +static const struct software_node ssam_node_hid_sam_ucm_ucsi = { + .name = "ssam:01:15:01:07:00", + .parent = &ssam_node_root, +}; + +/* HID system controls (SAM, TID=1). */ +static const struct software_node ssam_node_hid_sam_sysctrl = { + .name = "ssam:01:15:01:08:00", + .parent = &ssam_node_root, +}; + +/* HID keyboard. */ +static const struct software_node ssam_node_hid_main_keyboard = { + .name = "ssam:01:15:02:01:00", + .parent = &ssam_node_root, +}; + +/* HID touchpad. */ +static const struct software_node ssam_node_hid_main_touchpad = { + .name = "ssam:01:15:02:03:00", + .parent = &ssam_node_root, +}; + +/* HID device instance 5 (unknown HID device). */ +static const struct software_node ssam_node_hid_main_iid5 = { + .name = "ssam:01:15:02:05:00", + .parent = &ssam_node_root, +}; + +/* HID keyboard (base hub). */ +static const struct software_node ssam_node_hid_base_keyboard = { + .name = "ssam:01:15:02:01:00", + .parent = &ssam_node_hub_base, +}; + +/* HID touchpad (base hub). */ +static const struct software_node ssam_node_hid_base_touchpad = { + .name = "ssam:01:15:02:03:00", + .parent = &ssam_node_hub_base, +}; + +/* HID device instance 5 (unknown HID device, base hub). */ +static const struct software_node ssam_node_hid_base_iid5 = { + .name = "ssam:01:15:02:05:00", + .parent = &ssam_node_hub_base, +}; + +/* HID device instance 6 (unknown HID device, base hub). */ +static const struct software_node ssam_node_hid_base_iid6 = { + .name = "ssam:01:15:02:06:00", + .parent = &ssam_node_hub_base, +}; + +/* HID keyboard (KIP hub). */ +static const struct software_node ssam_node_hid_kip_keyboard = { + .name = "ssam:01:15:02:01:00", + .parent = &ssam_node_hub_kip, +}; + +/* HID pen stash (KIP hub; pen taken / stashed away evens). */ +static const struct software_node ssam_node_hid_kip_penstash = { + .name = "ssam:01:15:02:02:00", + .parent = &ssam_node_hub_kip, +}; + +/* HID touchpad (KIP hub). */ +static const struct software_node ssam_node_hid_kip_touchpad = { + .name = "ssam:01:15:02:03:00", + .parent = &ssam_node_hub_kip, +}; + +/* HID device instance 5 (KIP hub, type-cover firmware update). */ +static const struct software_node ssam_node_hid_kip_fwupd = { + .name = "ssam:01:15:02:05:00", + .parent = &ssam_node_hub_kip, +}; + +/* Tablet-mode switch via POS subsystem. */ +static const struct software_node ssam_node_pos_tablet_switch = { + .name = "ssam:01:26:01:00:01", + .parent = &ssam_node_root, +}; + +/* + * Devices for 5th- and 6th-generations models: + * - Surface Book 2, + * - Surface Laptop 1 and 2, + * - Surface Pro 5 and 6. + */ +static const struct software_node *ssam_node_group_gen5[] = { + &ssam_node_root, + &ssam_node_tmp_pprof, + NULL, +}; + +/* Devices for Surface Book 3. */ +static const struct software_node *ssam_node_group_sb3[] = { + &ssam_node_root, + &ssam_node_hub_base, + &ssam_node_bat_ac, + &ssam_node_bat_main, + &ssam_node_bat_sb3base, + &ssam_node_tmp_pprof, + &ssam_node_bas_dtx, + &ssam_node_hid_base_keyboard, + &ssam_node_hid_base_touchpad, + &ssam_node_hid_base_iid5, + &ssam_node_hid_base_iid6, + NULL, +}; + +/* Devices for Surface Laptop 3 and 4. */ +static const struct software_node *ssam_node_group_sl3[] = { + &ssam_node_root, + &ssam_node_bat_ac, + &ssam_node_bat_main, + &ssam_node_tmp_pprof, + &ssam_node_hid_main_keyboard, + &ssam_node_hid_main_touchpad, + &ssam_node_hid_main_iid5, + NULL, +}; + +/* Devices for Surface Laptop 5. */ +static const struct software_node *ssam_node_group_sl5[] = { + &ssam_node_root, + &ssam_node_bat_ac, + &ssam_node_bat_main, + &ssam_node_tmp_pprof, + &ssam_node_hid_main_keyboard, + &ssam_node_hid_main_touchpad, + &ssam_node_hid_main_iid5, + &ssam_node_hid_sam_ucm_ucsi, + NULL, +}; + +/* Devices for Surface Laptop Studio. */ +static const struct software_node *ssam_node_group_sls[] = { + &ssam_node_root, + &ssam_node_bat_ac, + &ssam_node_bat_main, + &ssam_node_tmp_pprof, + &ssam_node_pos_tablet_switch, + &ssam_node_hid_sam_keyboard, + &ssam_node_hid_sam_penstash, + &ssam_node_hid_sam_touchpad, + &ssam_node_hid_sam_sensors, + &ssam_node_hid_sam_ucm_ucsi, + &ssam_node_hid_sam_sysctrl, + NULL, +}; + +/* Devices for Surface Laptop Go. */ +static const struct software_node *ssam_node_group_slg1[] = { + &ssam_node_root, + &ssam_node_bat_ac, + &ssam_node_bat_main, + &ssam_node_tmp_pprof, + NULL, +}; + +/* Devices for Surface Pro 7 and Surface Pro 7+. */ +static const struct software_node *ssam_node_group_sp7[] = { + &ssam_node_root, + &ssam_node_bat_ac, + &ssam_node_bat_main, + &ssam_node_tmp_pprof, + NULL, +}; + +/* Devices for Surface Pro 8 */ +static const struct software_node *ssam_node_group_sp8[] = { + &ssam_node_root, + &ssam_node_hub_kip, + &ssam_node_bat_ac, + &ssam_node_bat_main, + &ssam_node_tmp_pprof, + &ssam_node_kip_tablet_switch, + &ssam_node_hid_kip_keyboard, + &ssam_node_hid_kip_penstash, + &ssam_node_hid_kip_touchpad, + &ssam_node_hid_kip_fwupd, + &ssam_node_hid_sam_sensors, + &ssam_node_hid_sam_ucm_ucsi, + NULL, +}; + +/* Devices for Surface Pro 9 */ +static const struct software_node *ssam_node_group_sp9[] = { + &ssam_node_root, + &ssam_node_hub_kip, + &ssam_node_bat_ac, + &ssam_node_bat_main, + &ssam_node_tmp_pprof, + &ssam_node_pos_tablet_switch, + &ssam_node_hid_kip_keyboard, + &ssam_node_hid_kip_penstash, + &ssam_node_hid_kip_touchpad, + &ssam_node_hid_kip_fwupd, + &ssam_node_hid_sam_sensors, + &ssam_node_hid_sam_ucm_ucsi, + NULL, +}; + + +/* -- SSAM platform/meta-hub driver. ---------------------------------------- */ + +static const struct acpi_device_id ssam_platform_hub_match[] = { + /* Surface Pro 4, 5, and 6 (OMBR < 0x10) */ + { "MSHW0081", (unsigned long)ssam_node_group_gen5 }, + + /* Surface Pro 6 (OMBR >= 0x10) */ + { "MSHW0111", (unsigned long)ssam_node_group_gen5 }, + + /* Surface Pro 7 */ + { "MSHW0116", (unsigned long)ssam_node_group_sp7 }, + + /* Surface Pro 7+ */ + { "MSHW0119", (unsigned long)ssam_node_group_sp7 }, + + /* Surface Pro 8 */ + { "MSHW0263", (unsigned long)ssam_node_group_sp8 }, + + /* Surface Pro 9 */ + { "MSHW0343", (unsigned long)ssam_node_group_sp9 }, + + /* Surface Book 2 */ + { "MSHW0107", (unsigned long)ssam_node_group_gen5 }, + + /* Surface Book 3 */ + { "MSHW0117", (unsigned long)ssam_node_group_sb3 }, + + /* Surface Laptop 1 */ + { "MSHW0086", (unsigned long)ssam_node_group_gen5 }, + + /* Surface Laptop 2 */ + { "MSHW0112", (unsigned long)ssam_node_group_gen5 }, + + /* Surface Laptop 3 (13", Intel) */ + { "MSHW0114", (unsigned long)ssam_node_group_sl3 }, + + /* Surface Laptop 3 (15", AMD) and 4 (15", AMD) */ + { "MSHW0110", (unsigned long)ssam_node_group_sl3 }, + + /* Surface Laptop 4 (13", Intel) */ + { "MSHW0250", (unsigned long)ssam_node_group_sl3 }, + + /* Surface Laptop 5 */ + { "MSHW0350", (unsigned long)ssam_node_group_sl5 }, + + /* Surface Laptop Go 1 */ + { "MSHW0118", (unsigned long)ssam_node_group_slg1 }, + + /* Surface Laptop Go 2 */ + { "MSHW0290", (unsigned long)ssam_node_group_slg1 }, + + /* Surface Laptop Studio */ + { "MSHW0123", (unsigned long)ssam_node_group_sls }, + + { }, +}; +MODULE_DEVICE_TABLE(acpi, ssam_platform_hub_match); + +static int ssam_platform_hub_probe(struct platform_device *pdev) +{ + const struct software_node **nodes; + struct ssam_controller *ctrl; + struct fwnode_handle *root; + int status; + + nodes = (const struct software_node **)acpi_device_get_match_data(&pdev->dev); + if (!nodes) + return -ENODEV; + + /* + * As we're adding the SSAM client devices as children under this device + * and not the SSAM controller, we need to add a device link to the + * controller to ensure that we remove all of our devices before the + * controller is removed. This also guarantees proper ordering for + * suspend/resume of the devices on this hub. + */ + ctrl = ssam_client_bind(&pdev->dev); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl) == -ENODEV ? -EPROBE_DEFER : PTR_ERR(ctrl); + + status = software_node_register_node_group(nodes); + if (status) + return status; + + root = software_node_fwnode(&ssam_node_root); + if (!root) { + software_node_unregister_node_group(nodes); + return -ENOENT; + } + + set_secondary_fwnode(&pdev->dev, root); + + status = __ssam_register_clients(&pdev->dev, ctrl, root); + if (status) { + set_secondary_fwnode(&pdev->dev, NULL); + software_node_unregister_node_group(nodes); + } + + platform_set_drvdata(pdev, nodes); + return status; +} + +static int ssam_platform_hub_remove(struct platform_device *pdev) +{ + const struct software_node **nodes = platform_get_drvdata(pdev); + + ssam_remove_clients(&pdev->dev); + set_secondary_fwnode(&pdev->dev, NULL); + software_node_unregister_node_group(nodes); + return 0; +} + +static struct platform_driver ssam_platform_hub_driver = { + .probe = ssam_platform_hub_probe, + .remove = ssam_platform_hub_remove, + .driver = { + .name = "surface_aggregator_platform_hub", + .acpi_match_table = ssam_platform_hub_match, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, +}; +module_platform_driver(ssam_platform_hub_driver); + +MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>"); +MODULE_DESCRIPTION("Device-registry for Surface System Aggregator Module"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/surface/surface_aggregator_tabletsw.c b/drivers/platform/surface/surface_aggregator_tabletsw.c new file mode 100644 index 0000000000..c0a1a58692 --- /dev/null +++ b/drivers/platform/surface/surface_aggregator_tabletsw.c @@ -0,0 +1,645 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Surface System Aggregator Module (SSAM) tablet mode switch driver. + * + * Copyright (C) 2022 Maximilian Luz <luzmaximilian@gmail.com> + */ + +#include <asm/unaligned.h> +#include <linux/input.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/workqueue.h> + +#include <linux/surface_aggregator/controller.h> +#include <linux/surface_aggregator/device.h> + + +/* -- SSAM generic tablet switch driver framework. -------------------------- */ + +struct ssam_tablet_sw; + +struct ssam_tablet_sw_state { + u32 source; + u32 state; +}; + +struct ssam_tablet_sw_ops { + int (*get_state)(struct ssam_tablet_sw *sw, struct ssam_tablet_sw_state *state); + const char *(*state_name)(struct ssam_tablet_sw *sw, + const struct ssam_tablet_sw_state *state); + bool (*state_is_tablet_mode)(struct ssam_tablet_sw *sw, + const struct ssam_tablet_sw_state *state); +}; + +struct ssam_tablet_sw { + struct ssam_device *sdev; + + struct ssam_tablet_sw_state state; + struct work_struct update_work; + struct input_dev *mode_switch; + + struct ssam_tablet_sw_ops ops; + struct ssam_event_notifier notif; +}; + +struct ssam_tablet_sw_desc { + struct { + const char *name; + const char *phys; + } dev; + + struct { + u32 (*notify)(struct ssam_event_notifier *nf, const struct ssam_event *event); + int (*get_state)(struct ssam_tablet_sw *sw, struct ssam_tablet_sw_state *state); + const char *(*state_name)(struct ssam_tablet_sw *sw, + const struct ssam_tablet_sw_state *state); + bool (*state_is_tablet_mode)(struct ssam_tablet_sw *sw, + const struct ssam_tablet_sw_state *state); + } ops; + + struct { + struct ssam_event_registry reg; + struct ssam_event_id id; + enum ssam_event_mask mask; + u8 flags; + } event; +}; + +static ssize_t state_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct ssam_tablet_sw *sw = dev_get_drvdata(dev); + const char *state = sw->ops.state_name(sw, &sw->state); + + return sysfs_emit(buf, "%s\n", state); +} +static DEVICE_ATTR_RO(state); + +static struct attribute *ssam_tablet_sw_attrs[] = { + &dev_attr_state.attr, + NULL, +}; + +static const struct attribute_group ssam_tablet_sw_group = { + .attrs = ssam_tablet_sw_attrs, +}; + +static void ssam_tablet_sw_update_workfn(struct work_struct *work) +{ + struct ssam_tablet_sw *sw = container_of(work, struct ssam_tablet_sw, update_work); + struct ssam_tablet_sw_state state; + int tablet, status; + + status = sw->ops.get_state(sw, &state); + if (status) + return; + + if (sw->state.source == state.source && sw->state.state == state.state) + return; + sw->state = state; + + /* Send SW_TABLET_MODE event. */ + tablet = sw->ops.state_is_tablet_mode(sw, &state); + input_report_switch(sw->mode_switch, SW_TABLET_MODE, tablet); + input_sync(sw->mode_switch); +} + +static int __maybe_unused ssam_tablet_sw_resume(struct device *dev) +{ + struct ssam_tablet_sw *sw = dev_get_drvdata(dev); + + schedule_work(&sw->update_work); + return 0; +} +static SIMPLE_DEV_PM_OPS(ssam_tablet_sw_pm_ops, NULL, ssam_tablet_sw_resume); + +static int ssam_tablet_sw_probe(struct ssam_device *sdev) +{ + const struct ssam_tablet_sw_desc *desc; + struct ssam_tablet_sw *sw; + int tablet, status; + + desc = ssam_device_get_match_data(sdev); + if (!desc) { + WARN(1, "no driver match data specified"); + return -EINVAL; + } + + sw = devm_kzalloc(&sdev->dev, sizeof(*sw), GFP_KERNEL); + if (!sw) + return -ENOMEM; + + sw->sdev = sdev; + + sw->ops.get_state = desc->ops.get_state; + sw->ops.state_name = desc->ops.state_name; + sw->ops.state_is_tablet_mode = desc->ops.state_is_tablet_mode; + + INIT_WORK(&sw->update_work, ssam_tablet_sw_update_workfn); + + ssam_device_set_drvdata(sdev, sw); + + /* Get initial state. */ + status = sw->ops.get_state(sw, &sw->state); + if (status) + return status; + + /* Set up tablet mode switch. */ + sw->mode_switch = devm_input_allocate_device(&sdev->dev); + if (!sw->mode_switch) + return -ENOMEM; + + sw->mode_switch->name = desc->dev.name; + sw->mode_switch->phys = desc->dev.phys; + sw->mode_switch->id.bustype = BUS_HOST; + sw->mode_switch->dev.parent = &sdev->dev; + + tablet = sw->ops.state_is_tablet_mode(sw, &sw->state); + input_set_capability(sw->mode_switch, EV_SW, SW_TABLET_MODE); + input_report_switch(sw->mode_switch, SW_TABLET_MODE, tablet); + + status = input_register_device(sw->mode_switch); + if (status) + return status; + + /* Set up notifier. */ + sw->notif.base.priority = 0; + sw->notif.base.fn = desc->ops.notify; + sw->notif.event.reg = desc->event.reg; + sw->notif.event.id = desc->event.id; + sw->notif.event.mask = desc->event.mask; + sw->notif.event.flags = SSAM_EVENT_SEQUENCED; + + status = ssam_device_notifier_register(sdev, &sw->notif); + if (status) + return status; + + status = sysfs_create_group(&sdev->dev.kobj, &ssam_tablet_sw_group); + if (status) + goto err; + + /* We might have missed events during setup, so check again. */ + schedule_work(&sw->update_work); + return 0; + +err: + ssam_device_notifier_unregister(sdev, &sw->notif); + cancel_work_sync(&sw->update_work); + return status; +} + +static void ssam_tablet_sw_remove(struct ssam_device *sdev) +{ + struct ssam_tablet_sw *sw = ssam_device_get_drvdata(sdev); + + sysfs_remove_group(&sdev->dev.kobj, &ssam_tablet_sw_group); + + ssam_device_notifier_unregister(sdev, &sw->notif); + cancel_work_sync(&sw->update_work); +} + + +/* -- SSAM KIP tablet switch implementation. -------------------------------- */ + +#define SSAM_EVENT_KIP_CID_COVER_STATE_CHANGED 0x1d + +enum ssam_kip_cover_state { + SSAM_KIP_COVER_STATE_DISCONNECTED = 0x01, + SSAM_KIP_COVER_STATE_CLOSED = 0x02, + SSAM_KIP_COVER_STATE_LAPTOP = 0x03, + SSAM_KIP_COVER_STATE_FOLDED_CANVAS = 0x04, + SSAM_KIP_COVER_STATE_FOLDED_BACK = 0x05, + SSAM_KIP_COVER_STATE_BOOK = 0x06, +}; + +static const char *ssam_kip_cover_state_name(struct ssam_tablet_sw *sw, + const struct ssam_tablet_sw_state *state) +{ + switch (state->state) { + case SSAM_KIP_COVER_STATE_DISCONNECTED: + return "disconnected"; + + case SSAM_KIP_COVER_STATE_CLOSED: + return "closed"; + + case SSAM_KIP_COVER_STATE_LAPTOP: + return "laptop"; + + case SSAM_KIP_COVER_STATE_FOLDED_CANVAS: + return "folded-canvas"; + + case SSAM_KIP_COVER_STATE_FOLDED_BACK: + return "folded-back"; + + case SSAM_KIP_COVER_STATE_BOOK: + return "book"; + + default: + dev_warn(&sw->sdev->dev, "unknown KIP cover state: %u\n", state->state); + return "<unknown>"; + } +} + +static bool ssam_kip_cover_state_is_tablet_mode(struct ssam_tablet_sw *sw, + const struct ssam_tablet_sw_state *state) +{ + switch (state->state) { + case SSAM_KIP_COVER_STATE_DISCONNECTED: + case SSAM_KIP_COVER_STATE_FOLDED_CANVAS: + case SSAM_KIP_COVER_STATE_FOLDED_BACK: + case SSAM_KIP_COVER_STATE_BOOK: + return true; + + case SSAM_KIP_COVER_STATE_CLOSED: + case SSAM_KIP_COVER_STATE_LAPTOP: + return false; + + default: + dev_warn(&sw->sdev->dev, "unknown KIP cover state: %d\n", state->state); + return true; + } +} + +SSAM_DEFINE_SYNC_REQUEST_R(__ssam_kip_get_cover_state, u8, { + .target_category = SSAM_SSH_TC_KIP, + .target_id = SSAM_SSH_TID_SAM, + .command_id = 0x1d, + .instance_id = 0x00, +}); + +static int ssam_kip_get_cover_state(struct ssam_tablet_sw *sw, struct ssam_tablet_sw_state *state) +{ + int status; + u8 raw; + + status = ssam_retry(__ssam_kip_get_cover_state, sw->sdev->ctrl, &raw); + if (status < 0) { + dev_err(&sw->sdev->dev, "failed to query KIP lid state: %d\n", status); + return status; + } + + state->source = 0; /* Unused for KIP switch. */ + state->state = raw; + return 0; +} + +static u32 ssam_kip_sw_notif(struct ssam_event_notifier *nf, const struct ssam_event *event) +{ + struct ssam_tablet_sw *sw = container_of(nf, struct ssam_tablet_sw, notif); + + if (event->command_id != SSAM_EVENT_KIP_CID_COVER_STATE_CHANGED) + return 0; /* Return "unhandled". */ + + if (event->length < 1) + dev_warn(&sw->sdev->dev, "unexpected payload size: %u\n", event->length); + + schedule_work(&sw->update_work); + return SSAM_NOTIF_HANDLED; +} + +static const struct ssam_tablet_sw_desc ssam_kip_sw_desc = { + .dev = { + .name = "Microsoft Surface KIP Tablet Mode Switch", + .phys = "ssam/01:0e:01:00:01/input0", + }, + .ops = { + .notify = ssam_kip_sw_notif, + .get_state = ssam_kip_get_cover_state, + .state_name = ssam_kip_cover_state_name, + .state_is_tablet_mode = ssam_kip_cover_state_is_tablet_mode, + }, + .event = { + .reg = SSAM_EVENT_REGISTRY_SAM, + .id = { + .target_category = SSAM_SSH_TC_KIP, + .instance = 0, + }, + .mask = SSAM_EVENT_MASK_TARGET, + }, +}; + + +/* -- SSAM POS tablet switch implementation. -------------------------------- */ + +static bool tablet_mode_in_slate_state = true; +module_param(tablet_mode_in_slate_state, bool, 0644); +MODULE_PARM_DESC(tablet_mode_in_slate_state, "Enable tablet mode in slate device posture, default is 'true'"); + +#define SSAM_EVENT_POS_CID_POSTURE_CHANGED 0x03 +#define SSAM_POS_MAX_SOURCES 4 + +enum ssam_pos_source_id { + SSAM_POS_SOURCE_COVER = 0x00, + SSAM_POS_SOURCE_SLS = 0x03, +}; + +enum ssam_pos_state_cover { + SSAM_POS_COVER_DISCONNECTED = 0x01, + SSAM_POS_COVER_CLOSED = 0x02, + SSAM_POS_COVER_LAPTOP = 0x03, + SSAM_POS_COVER_FOLDED_CANVAS = 0x04, + SSAM_POS_COVER_FOLDED_BACK = 0x05, + SSAM_POS_COVER_BOOK = 0x06, +}; + +enum ssam_pos_state_sls { + SSAM_POS_SLS_LID_CLOSED = 0x00, + SSAM_POS_SLS_LAPTOP = 0x01, + SSAM_POS_SLS_SLATE = 0x02, + SSAM_POS_SLS_TABLET = 0x03, +}; + +struct ssam_sources_list { + __le32 count; + __le32 id[SSAM_POS_MAX_SOURCES]; +} __packed; + +static const char *ssam_pos_state_name_cover(struct ssam_tablet_sw *sw, u32 state) +{ + switch (state) { + case SSAM_POS_COVER_DISCONNECTED: + return "disconnected"; + + case SSAM_POS_COVER_CLOSED: + return "closed"; + + case SSAM_POS_COVER_LAPTOP: + return "laptop"; + + case SSAM_POS_COVER_FOLDED_CANVAS: + return "folded-canvas"; + + case SSAM_POS_COVER_FOLDED_BACK: + return "folded-back"; + + case SSAM_POS_COVER_BOOK: + return "book"; + + default: + dev_warn(&sw->sdev->dev, "unknown device posture for type-cover: %u\n", state); + return "<unknown>"; + } +} + +static const char *ssam_pos_state_name_sls(struct ssam_tablet_sw *sw, u32 state) +{ + switch (state) { + case SSAM_POS_SLS_LID_CLOSED: + return "closed"; + + case SSAM_POS_SLS_LAPTOP: + return "laptop"; + + case SSAM_POS_SLS_SLATE: + return "slate"; + + case SSAM_POS_SLS_TABLET: + return "tablet"; + + default: + dev_warn(&sw->sdev->dev, "unknown device posture for SLS: %u\n", state); + return "<unknown>"; + } +} + +static const char *ssam_pos_state_name(struct ssam_tablet_sw *sw, + const struct ssam_tablet_sw_state *state) +{ + switch (state->source) { + case SSAM_POS_SOURCE_COVER: + return ssam_pos_state_name_cover(sw, state->state); + + case SSAM_POS_SOURCE_SLS: + return ssam_pos_state_name_sls(sw, state->state); + + default: + dev_warn(&sw->sdev->dev, "unknown device posture source: %u\n", state->source); + return "<unknown>"; + } +} + +static bool ssam_pos_state_is_tablet_mode_cover(struct ssam_tablet_sw *sw, u32 state) +{ + switch (state) { + case SSAM_POS_COVER_DISCONNECTED: + case SSAM_POS_COVER_FOLDED_CANVAS: + case SSAM_POS_COVER_FOLDED_BACK: + case SSAM_POS_COVER_BOOK: + return true; + + case SSAM_POS_COVER_CLOSED: + case SSAM_POS_COVER_LAPTOP: + return false; + + default: + dev_warn(&sw->sdev->dev, "unknown device posture for type-cover: %u\n", state); + return true; + } +} + +static bool ssam_pos_state_is_tablet_mode_sls(struct ssam_tablet_sw *sw, u32 state) +{ + switch (state) { + case SSAM_POS_SLS_LAPTOP: + case SSAM_POS_SLS_LID_CLOSED: + return false; + + case SSAM_POS_SLS_SLATE: + return tablet_mode_in_slate_state; + + case SSAM_POS_SLS_TABLET: + return true; + + default: + dev_warn(&sw->sdev->dev, "unknown device posture for SLS: %u\n", state); + return true; + } +} + +static bool ssam_pos_state_is_tablet_mode(struct ssam_tablet_sw *sw, + const struct ssam_tablet_sw_state *state) +{ + switch (state->source) { + case SSAM_POS_SOURCE_COVER: + return ssam_pos_state_is_tablet_mode_cover(sw, state->state); + + case SSAM_POS_SOURCE_SLS: + return ssam_pos_state_is_tablet_mode_sls(sw, state->state); + + default: + dev_warn(&sw->sdev->dev, "unknown device posture source: %u\n", state->source); + return true; + } +} + +static int ssam_pos_get_sources_list(struct ssam_tablet_sw *sw, struct ssam_sources_list *sources) +{ + struct ssam_request rqst; + struct ssam_response rsp; + int status; + + rqst.target_category = SSAM_SSH_TC_POS; + rqst.target_id = SSAM_SSH_TID_SAM; + rqst.command_id = 0x01; + rqst.instance_id = 0x00; + rqst.flags = SSAM_REQUEST_HAS_RESPONSE; + rqst.length = 0; + rqst.payload = NULL; + + rsp.capacity = sizeof(*sources); + rsp.length = 0; + rsp.pointer = (u8 *)sources; + + status = ssam_retry(ssam_request_do_sync_onstack, sw->sdev->ctrl, &rqst, &rsp, 0); + if (status) + return status; + + /* We need at least the 'sources->count' field. */ + if (rsp.length < sizeof(__le32)) { + dev_err(&sw->sdev->dev, "received source list response is too small\n"); + return -EPROTO; + } + + /* Make sure 'sources->count' matches with the response length. */ + if (get_unaligned_le32(&sources->count) * sizeof(__le32) + sizeof(__le32) != rsp.length) { + dev_err(&sw->sdev->dev, "mismatch between number of sources and response size\n"); + return -EPROTO; + } + + return 0; +} + +static int ssam_pos_get_source(struct ssam_tablet_sw *sw, u32 *source_id) +{ + struct ssam_sources_list sources = {}; + int status; + + status = ssam_pos_get_sources_list(sw, &sources); + if (status) + return status; + + if (get_unaligned_le32(&sources.count) == 0) { + dev_err(&sw->sdev->dev, "no posture sources found\n"); + return -ENODEV; + } + + /* + * We currently don't know what to do with more than one posture + * source. At the moment, only one source seems to be used/provided. + * The WARN_ON() here should hopefully let us know quickly once there + * is a device that provides multiple sources, at which point we can + * then try to figure out how to handle them. + */ + WARN_ON(get_unaligned_le32(&sources.count) > 1); + + *source_id = get_unaligned_le32(&sources.id[0]); + return 0; +} + +SSAM_DEFINE_SYNC_REQUEST_WR(__ssam_pos_get_posture_for_source, __le32, __le32, { + .target_category = SSAM_SSH_TC_POS, + .target_id = SSAM_SSH_TID_SAM, + .command_id = 0x02, + .instance_id = 0x00, +}); + +static int ssam_pos_get_posture_for_source(struct ssam_tablet_sw *sw, u32 source_id, u32 *posture) +{ + __le32 source_le = cpu_to_le32(source_id); + __le32 rspval_le = 0; + int status; + + status = ssam_retry(__ssam_pos_get_posture_for_source, sw->sdev->ctrl, + &source_le, &rspval_le); + if (status) + return status; + + *posture = le32_to_cpu(rspval_le); + return 0; +} + +static int ssam_pos_get_posture(struct ssam_tablet_sw *sw, struct ssam_tablet_sw_state *state) +{ + u32 source_id; + u32 source_state; + int status; + + status = ssam_pos_get_source(sw, &source_id); + if (status) { + dev_err(&sw->sdev->dev, "failed to get posture source ID: %d\n", status); + return status; + } + + status = ssam_pos_get_posture_for_source(sw, source_id, &source_state); + if (status) { + dev_err(&sw->sdev->dev, "failed to get posture value for source %u: %d\n", + source_id, status); + return status; + } + + state->source = source_id; + state->state = source_state; + return 0; +} + +static u32 ssam_pos_sw_notif(struct ssam_event_notifier *nf, const struct ssam_event *event) +{ + struct ssam_tablet_sw *sw = container_of(nf, struct ssam_tablet_sw, notif); + + if (event->command_id != SSAM_EVENT_POS_CID_POSTURE_CHANGED) + return 0; /* Return "unhandled". */ + + if (event->length != sizeof(__le32) * 3) + dev_warn(&sw->sdev->dev, "unexpected payload size: %u\n", event->length); + + schedule_work(&sw->update_work); + return SSAM_NOTIF_HANDLED; +} + +static const struct ssam_tablet_sw_desc ssam_pos_sw_desc = { + .dev = { + .name = "Microsoft Surface POS Tablet Mode Switch", + .phys = "ssam/01:26:01:00:01/input0", + }, + .ops = { + .notify = ssam_pos_sw_notif, + .get_state = ssam_pos_get_posture, + .state_name = ssam_pos_state_name, + .state_is_tablet_mode = ssam_pos_state_is_tablet_mode, + }, + .event = { + .reg = SSAM_EVENT_REGISTRY_SAM, + .id = { + .target_category = SSAM_SSH_TC_POS, + .instance = 0, + }, + .mask = SSAM_EVENT_MASK_TARGET, + }, +}; + + +/* -- Driver registration. -------------------------------------------------- */ + +static const struct ssam_device_id ssam_tablet_sw_match[] = { + { SSAM_SDEV(KIP, SAM, 0x00, 0x01), (unsigned long)&ssam_kip_sw_desc }, + { SSAM_SDEV(POS, SAM, 0x00, 0x01), (unsigned long)&ssam_pos_sw_desc }, + { }, +}; +MODULE_DEVICE_TABLE(ssam, ssam_tablet_sw_match); + +static struct ssam_device_driver ssam_tablet_sw_driver = { + .probe = ssam_tablet_sw_probe, + .remove = ssam_tablet_sw_remove, + .match_table = ssam_tablet_sw_match, + .driver = { + .name = "surface_aggregator_tablet_mode_switch", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .pm = &ssam_tablet_sw_pm_ops, + }, +}; +module_ssam_device_driver(ssam_tablet_sw_driver); + +MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>"); +MODULE_DESCRIPTION("Tablet mode switch driver for Surface devices using the Surface Aggregator Module"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/surface/surface_dtx.c b/drivers/platform/surface/surface_dtx.c new file mode 100644 index 0000000000..30cbde278c --- /dev/null +++ b/drivers/platform/surface/surface_dtx.c @@ -0,0 +1,1284 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Surface Book (gen. 2 and later) detachment system (DTX) driver. + * + * Provides a user-space interface to properly handle clipboard/tablet + * (containing screen and processor) detachment from the base of the device + * (containing the keyboard and optionally a discrete GPU). Allows to + * acknowledge (to speed things up), abort (e.g. in case the dGPU is still in + * use), or request detachment via user-space. + * + * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com> + */ + +#include <linux/fs.h> +#include <linux/input.h> +#include <linux/ioctl.h> +#include <linux/kernel.h> +#include <linux/kfifo.h> +#include <linux/kref.h> +#include <linux/miscdevice.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/poll.h> +#include <linux/rwsem.h> +#include <linux/slab.h> +#include <linux/workqueue.h> + +#include <linux/surface_aggregator/controller.h> +#include <linux/surface_aggregator/device.h> +#include <linux/surface_aggregator/dtx.h> + + +/* -- SSAM interface. ------------------------------------------------------- */ + +enum sam_event_cid_bas { + SAM_EVENT_CID_DTX_CONNECTION = 0x0c, + SAM_EVENT_CID_DTX_REQUEST = 0x0e, + SAM_EVENT_CID_DTX_CANCEL = 0x0f, + SAM_EVENT_CID_DTX_LATCH_STATUS = 0x11, +}; + +enum ssam_bas_base_state { + SSAM_BAS_BASE_STATE_DETACH_SUCCESS = 0x00, + SSAM_BAS_BASE_STATE_ATTACHED = 0x01, + SSAM_BAS_BASE_STATE_NOT_FEASIBLE = 0x02, +}; + +enum ssam_bas_latch_status { + SSAM_BAS_LATCH_STATUS_CLOSED = 0x00, + SSAM_BAS_LATCH_STATUS_OPENED = 0x01, + SSAM_BAS_LATCH_STATUS_FAILED_TO_OPEN = 0x02, + SSAM_BAS_LATCH_STATUS_FAILED_TO_REMAIN_OPEN = 0x03, + SSAM_BAS_LATCH_STATUS_FAILED_TO_CLOSE = 0x04, +}; + +enum ssam_bas_cancel_reason { + SSAM_BAS_CANCEL_REASON_NOT_FEASIBLE = 0x00, /* Low battery. */ + SSAM_BAS_CANCEL_REASON_TIMEOUT = 0x02, + SSAM_BAS_CANCEL_REASON_FAILED_TO_OPEN = 0x03, + SSAM_BAS_CANCEL_REASON_FAILED_TO_REMAIN_OPEN = 0x04, + SSAM_BAS_CANCEL_REASON_FAILED_TO_CLOSE = 0x05, +}; + +struct ssam_bas_base_info { + u8 state; + u8 base_id; +} __packed; + +static_assert(sizeof(struct ssam_bas_base_info) == 2); + +SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_lock, { + .target_category = SSAM_SSH_TC_BAS, + .target_id = SSAM_SSH_TID_SAM, + .command_id = 0x06, + .instance_id = 0x00, +}); + +SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_unlock, { + .target_category = SSAM_SSH_TC_BAS, + .target_id = SSAM_SSH_TID_SAM, + .command_id = 0x07, + .instance_id = 0x00, +}); + +SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_request, { + .target_category = SSAM_SSH_TC_BAS, + .target_id = SSAM_SSH_TID_SAM, + .command_id = 0x08, + .instance_id = 0x00, +}); + +SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_confirm, { + .target_category = SSAM_SSH_TC_BAS, + .target_id = SSAM_SSH_TID_SAM, + .command_id = 0x09, + .instance_id = 0x00, +}); + +SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_heartbeat, { + .target_category = SSAM_SSH_TC_BAS, + .target_id = SSAM_SSH_TID_SAM, + .command_id = 0x0a, + .instance_id = 0x00, +}); + +SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_cancel, { + .target_category = SSAM_SSH_TC_BAS, + .target_id = SSAM_SSH_TID_SAM, + .command_id = 0x0b, + .instance_id = 0x00, +}); + +SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_get_base, struct ssam_bas_base_info, { + .target_category = SSAM_SSH_TC_BAS, + .target_id = SSAM_SSH_TID_SAM, + .command_id = 0x0c, + .instance_id = 0x00, +}); + +SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_get_device_mode, u8, { + .target_category = SSAM_SSH_TC_BAS, + .target_id = SSAM_SSH_TID_SAM, + .command_id = 0x0d, + .instance_id = 0x00, +}); + +SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_get_latch_status, u8, { + .target_category = SSAM_SSH_TC_BAS, + .target_id = SSAM_SSH_TID_SAM, + .command_id = 0x11, + .instance_id = 0x00, +}); + + +/* -- Main structures. ------------------------------------------------------ */ + +enum sdtx_device_state { + SDTX_DEVICE_SHUTDOWN_BIT = BIT(0), + SDTX_DEVICE_DIRTY_BASE_BIT = BIT(1), + SDTX_DEVICE_DIRTY_MODE_BIT = BIT(2), + SDTX_DEVICE_DIRTY_LATCH_BIT = BIT(3), +}; + +struct sdtx_device { + struct kref kref; + struct rw_semaphore lock; /* Guards device and controller reference. */ + + struct device *dev; + struct ssam_controller *ctrl; + unsigned long flags; + + struct miscdevice mdev; + wait_queue_head_t waitq; + struct mutex write_lock; /* Guards order of events/notifications. */ + struct rw_semaphore client_lock; /* Guards client list. */ + struct list_head client_list; + + struct delayed_work state_work; + struct { + struct ssam_bas_base_info base; + u8 device_mode; + u8 latch_status; + } state; + + struct delayed_work mode_work; + struct input_dev *mode_switch; + + struct ssam_event_notifier notif; +}; + +enum sdtx_client_state { + SDTX_CLIENT_EVENTS_ENABLED_BIT = BIT(0), +}; + +struct sdtx_client { + struct sdtx_device *ddev; + struct list_head node; + unsigned long flags; + + struct fasync_struct *fasync; + + struct mutex read_lock; /* Guards FIFO buffer read access. */ + DECLARE_KFIFO(buffer, u8, 512); +}; + +static void __sdtx_device_release(struct kref *kref) +{ + struct sdtx_device *ddev = container_of(kref, struct sdtx_device, kref); + + mutex_destroy(&ddev->write_lock); + kfree(ddev); +} + +static struct sdtx_device *sdtx_device_get(struct sdtx_device *ddev) +{ + if (ddev) + kref_get(&ddev->kref); + + return ddev; +} + +static void sdtx_device_put(struct sdtx_device *ddev) +{ + if (ddev) + kref_put(&ddev->kref, __sdtx_device_release); +} + + +/* -- Firmware value translations. ------------------------------------------ */ + +static u16 sdtx_translate_base_state(struct sdtx_device *ddev, u8 state) +{ + switch (state) { + case SSAM_BAS_BASE_STATE_ATTACHED: + return SDTX_BASE_ATTACHED; + + case SSAM_BAS_BASE_STATE_DETACH_SUCCESS: + return SDTX_BASE_DETACHED; + + case SSAM_BAS_BASE_STATE_NOT_FEASIBLE: + return SDTX_DETACH_NOT_FEASIBLE; + + default: + dev_err(ddev->dev, "unknown base state: %#04x\n", state); + return SDTX_UNKNOWN(state); + } +} + +static u16 sdtx_translate_latch_status(struct sdtx_device *ddev, u8 status) +{ + switch (status) { + case SSAM_BAS_LATCH_STATUS_CLOSED: + return SDTX_LATCH_CLOSED; + + case SSAM_BAS_LATCH_STATUS_OPENED: + return SDTX_LATCH_OPENED; + + case SSAM_BAS_LATCH_STATUS_FAILED_TO_OPEN: + return SDTX_ERR_FAILED_TO_OPEN; + + case SSAM_BAS_LATCH_STATUS_FAILED_TO_REMAIN_OPEN: + return SDTX_ERR_FAILED_TO_REMAIN_OPEN; + + case SSAM_BAS_LATCH_STATUS_FAILED_TO_CLOSE: + return SDTX_ERR_FAILED_TO_CLOSE; + + default: + dev_err(ddev->dev, "unknown latch status: %#04x\n", status); + return SDTX_UNKNOWN(status); + } +} + +static u16 sdtx_translate_cancel_reason(struct sdtx_device *ddev, u8 reason) +{ + switch (reason) { + case SSAM_BAS_CANCEL_REASON_NOT_FEASIBLE: + return SDTX_DETACH_NOT_FEASIBLE; + + case SSAM_BAS_CANCEL_REASON_TIMEOUT: + return SDTX_DETACH_TIMEDOUT; + + case SSAM_BAS_CANCEL_REASON_FAILED_TO_OPEN: + return SDTX_ERR_FAILED_TO_OPEN; + + case SSAM_BAS_CANCEL_REASON_FAILED_TO_REMAIN_OPEN: + return SDTX_ERR_FAILED_TO_REMAIN_OPEN; + + case SSAM_BAS_CANCEL_REASON_FAILED_TO_CLOSE: + return SDTX_ERR_FAILED_TO_CLOSE; + + default: + dev_err(ddev->dev, "unknown cancel reason: %#04x\n", reason); + return SDTX_UNKNOWN(reason); + } +} + + +/* -- IOCTLs. --------------------------------------------------------------- */ + +static int sdtx_ioctl_get_base_info(struct sdtx_device *ddev, + struct sdtx_base_info __user *buf) +{ + struct ssam_bas_base_info raw; + struct sdtx_base_info info; + int status; + + lockdep_assert_held_read(&ddev->lock); + + status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &raw); + if (status < 0) + return status; + + info.state = sdtx_translate_base_state(ddev, raw.state); + info.base_id = SDTX_BASE_TYPE_SSH(raw.base_id); + + if (copy_to_user(buf, &info, sizeof(info))) + return -EFAULT; + + return 0; +} + +static int sdtx_ioctl_get_device_mode(struct sdtx_device *ddev, u16 __user *buf) +{ + u8 mode; + int status; + + lockdep_assert_held_read(&ddev->lock); + + status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &mode); + if (status < 0) + return status; + + return put_user(mode, buf); +} + +static int sdtx_ioctl_get_latch_status(struct sdtx_device *ddev, u16 __user *buf) +{ + u8 latch; + int status; + + lockdep_assert_held_read(&ddev->lock); + + status = ssam_retry(ssam_bas_get_latch_status, ddev->ctrl, &latch); + if (status < 0) + return status; + + return put_user(sdtx_translate_latch_status(ddev, latch), buf); +} + +static long __surface_dtx_ioctl(struct sdtx_client *client, unsigned int cmd, unsigned long arg) +{ + struct sdtx_device *ddev = client->ddev; + + lockdep_assert_held_read(&ddev->lock); + + switch (cmd) { + case SDTX_IOCTL_EVENTS_ENABLE: + set_bit(SDTX_CLIENT_EVENTS_ENABLED_BIT, &client->flags); + return 0; + + case SDTX_IOCTL_EVENTS_DISABLE: + clear_bit(SDTX_CLIENT_EVENTS_ENABLED_BIT, &client->flags); + return 0; + + case SDTX_IOCTL_LATCH_LOCK: + return ssam_retry(ssam_bas_latch_lock, ddev->ctrl); + + case SDTX_IOCTL_LATCH_UNLOCK: + return ssam_retry(ssam_bas_latch_unlock, ddev->ctrl); + + case SDTX_IOCTL_LATCH_REQUEST: + return ssam_retry(ssam_bas_latch_request, ddev->ctrl); + + case SDTX_IOCTL_LATCH_CONFIRM: + return ssam_retry(ssam_bas_latch_confirm, ddev->ctrl); + + case SDTX_IOCTL_LATCH_HEARTBEAT: + return ssam_retry(ssam_bas_latch_heartbeat, ddev->ctrl); + + case SDTX_IOCTL_LATCH_CANCEL: + return ssam_retry(ssam_bas_latch_cancel, ddev->ctrl); + + case SDTX_IOCTL_GET_BASE_INFO: + return sdtx_ioctl_get_base_info(ddev, (struct sdtx_base_info __user *)arg); + + case SDTX_IOCTL_GET_DEVICE_MODE: + return sdtx_ioctl_get_device_mode(ddev, (u16 __user *)arg); + + case SDTX_IOCTL_GET_LATCH_STATUS: + return sdtx_ioctl_get_latch_status(ddev, (u16 __user *)arg); + + default: + return -EINVAL; + } +} + +static long surface_dtx_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct sdtx_client *client = file->private_data; + long status; + + if (down_read_killable(&client->ddev->lock)) + return -ERESTARTSYS; + + if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &client->ddev->flags)) { + up_read(&client->ddev->lock); + return -ENODEV; + } + + status = __surface_dtx_ioctl(client, cmd, arg); + + up_read(&client->ddev->lock); + return status; +} + + +/* -- File operations. ------------------------------------------------------ */ + +static int surface_dtx_open(struct inode *inode, struct file *file) +{ + struct sdtx_device *ddev = container_of(file->private_data, struct sdtx_device, mdev); + struct sdtx_client *client; + + /* Initialize client. */ + client = kzalloc(sizeof(*client), GFP_KERNEL); + if (!client) + return -ENOMEM; + + client->ddev = sdtx_device_get(ddev); + + INIT_LIST_HEAD(&client->node); + + mutex_init(&client->read_lock); + INIT_KFIFO(client->buffer); + + file->private_data = client; + + /* Attach client. */ + down_write(&ddev->client_lock); + + /* + * Do not add a new client if the device has been shut down. Note that + * it's enough to hold the client_lock here as, during shutdown, we + * only acquire that lock and remove clients after marking the device + * as shut down. + */ + if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags)) { + up_write(&ddev->client_lock); + mutex_destroy(&client->read_lock); + sdtx_device_put(client->ddev); + kfree(client); + return -ENODEV; + } + + list_add_tail(&client->node, &ddev->client_list); + up_write(&ddev->client_lock); + + stream_open(inode, file); + return 0; +} + +static int surface_dtx_release(struct inode *inode, struct file *file) +{ + struct sdtx_client *client = file->private_data; + + /* Detach client. */ + down_write(&client->ddev->client_lock); + list_del(&client->node); + up_write(&client->ddev->client_lock); + + /* Free client. */ + sdtx_device_put(client->ddev); + mutex_destroy(&client->read_lock); + kfree(client); + + return 0; +} + +static ssize_t surface_dtx_read(struct file *file, char __user *buf, size_t count, loff_t *offs) +{ + struct sdtx_client *client = file->private_data; + struct sdtx_device *ddev = client->ddev; + unsigned int copied; + int status = 0; + + if (down_read_killable(&ddev->lock)) + return -ERESTARTSYS; + + /* Make sure we're not shut down. */ + if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags)) { + up_read(&ddev->lock); + return -ENODEV; + } + + do { + /* Check availability, wait if necessary. */ + if (kfifo_is_empty(&client->buffer)) { + up_read(&ddev->lock); + + if (file->f_flags & O_NONBLOCK) + return -EAGAIN; + + status = wait_event_interruptible(ddev->waitq, + !kfifo_is_empty(&client->buffer) || + test_bit(SDTX_DEVICE_SHUTDOWN_BIT, + &ddev->flags)); + if (status < 0) + return status; + + if (down_read_killable(&ddev->lock)) + return -ERESTARTSYS; + + /* Need to check that we're not shut down again. */ + if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags)) { + up_read(&ddev->lock); + return -ENODEV; + } + } + + /* Try to read from FIFO. */ + if (mutex_lock_interruptible(&client->read_lock)) { + up_read(&ddev->lock); + return -ERESTARTSYS; + } + + status = kfifo_to_user(&client->buffer, buf, count, &copied); + mutex_unlock(&client->read_lock); + + if (status < 0) { + up_read(&ddev->lock); + return status; + } + + /* We might not have gotten anything, check this here. */ + if (copied == 0 && (file->f_flags & O_NONBLOCK)) { + up_read(&ddev->lock); + return -EAGAIN; + } + } while (copied == 0); + + up_read(&ddev->lock); + return copied; +} + +static __poll_t surface_dtx_poll(struct file *file, struct poll_table_struct *pt) +{ + struct sdtx_client *client = file->private_data; + __poll_t events = 0; + + if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &client->ddev->flags)) + return EPOLLHUP | EPOLLERR; + + poll_wait(file, &client->ddev->waitq, pt); + + if (!kfifo_is_empty(&client->buffer)) + events |= EPOLLIN | EPOLLRDNORM; + + return events; +} + +static int surface_dtx_fasync(int fd, struct file *file, int on) +{ + struct sdtx_client *client = file->private_data; + + return fasync_helper(fd, file, on, &client->fasync); +} + +static const struct file_operations surface_dtx_fops = { + .owner = THIS_MODULE, + .open = surface_dtx_open, + .release = surface_dtx_release, + .read = surface_dtx_read, + .poll = surface_dtx_poll, + .fasync = surface_dtx_fasync, + .unlocked_ioctl = surface_dtx_ioctl, + .compat_ioctl = surface_dtx_ioctl, + .llseek = no_llseek, +}; + + +/* -- Event handling/forwarding. -------------------------------------------- */ + +/* + * The device operation mode is not immediately updated on the EC when the + * base has been connected, i.e. querying the device mode inside the + * connection event callback yields an outdated value. Thus, we can only + * determine the new tablet-mode switch and device mode values after some + * time. + * + * These delays have been chosen by experimenting. We first delay on connect + * events, then check and validate the device mode against the base state and + * if invalid delay again by the "recheck" delay. + */ +#define SDTX_DEVICE_MODE_DELAY_CONNECT msecs_to_jiffies(100) +#define SDTX_DEVICE_MODE_DELAY_RECHECK msecs_to_jiffies(100) + +struct sdtx_status_event { + struct sdtx_event e; + __u16 v; +} __packed; + +struct sdtx_base_info_event { + struct sdtx_event e; + struct sdtx_base_info v; +} __packed; + +union sdtx_generic_event { + struct sdtx_event common; + struct sdtx_status_event status; + struct sdtx_base_info_event base; +}; + +static void sdtx_update_device_mode(struct sdtx_device *ddev, unsigned long delay); + +/* Must be executed with ddev->write_lock held. */ +static void sdtx_push_event(struct sdtx_device *ddev, struct sdtx_event *evt) +{ + const size_t len = sizeof(struct sdtx_event) + evt->length; + struct sdtx_client *client; + + lockdep_assert_held(&ddev->write_lock); + + down_read(&ddev->client_lock); + list_for_each_entry(client, &ddev->client_list, node) { + if (!test_bit(SDTX_CLIENT_EVENTS_ENABLED_BIT, &client->flags)) + continue; + + if (likely(kfifo_avail(&client->buffer) >= len)) + kfifo_in(&client->buffer, (const u8 *)evt, len); + else + dev_warn(ddev->dev, "event buffer overrun\n"); + + kill_fasync(&client->fasync, SIGIO, POLL_IN); + } + up_read(&ddev->client_lock); + + wake_up_interruptible(&ddev->waitq); +} + +static u32 sdtx_notifier(struct ssam_event_notifier *nf, const struct ssam_event *in) +{ + struct sdtx_device *ddev = container_of(nf, struct sdtx_device, notif); + union sdtx_generic_event event; + size_t len; + + /* Validate event payload length. */ + switch (in->command_id) { + case SAM_EVENT_CID_DTX_CONNECTION: + len = 2 * sizeof(u8); + break; + + case SAM_EVENT_CID_DTX_REQUEST: + len = 0; + break; + + case SAM_EVENT_CID_DTX_CANCEL: + len = sizeof(u8); + break; + + case SAM_EVENT_CID_DTX_LATCH_STATUS: + len = sizeof(u8); + break; + + default: + return 0; + } + + if (in->length != len) { + dev_err(ddev->dev, + "unexpected payload size for event %#04x: got %u, expected %zu\n", + in->command_id, in->length, len); + return 0; + } + + mutex_lock(&ddev->write_lock); + + /* Translate event. */ + switch (in->command_id) { + case SAM_EVENT_CID_DTX_CONNECTION: + clear_bit(SDTX_DEVICE_DIRTY_BASE_BIT, &ddev->flags); + + /* If state has not changed: do not send new event. */ + if (ddev->state.base.state == in->data[0] && + ddev->state.base.base_id == in->data[1]) + goto out; + + ddev->state.base.state = in->data[0]; + ddev->state.base.base_id = in->data[1]; + + event.base.e.length = sizeof(struct sdtx_base_info); + event.base.e.code = SDTX_EVENT_BASE_CONNECTION; + event.base.v.state = sdtx_translate_base_state(ddev, in->data[0]); + event.base.v.base_id = SDTX_BASE_TYPE_SSH(in->data[1]); + break; + + case SAM_EVENT_CID_DTX_REQUEST: + event.common.code = SDTX_EVENT_REQUEST; + event.common.length = 0; + break; + + case SAM_EVENT_CID_DTX_CANCEL: + event.status.e.length = sizeof(u16); + event.status.e.code = SDTX_EVENT_CANCEL; + event.status.v = sdtx_translate_cancel_reason(ddev, in->data[0]); + break; + + case SAM_EVENT_CID_DTX_LATCH_STATUS: + clear_bit(SDTX_DEVICE_DIRTY_LATCH_BIT, &ddev->flags); + + /* If state has not changed: do not send new event. */ + if (ddev->state.latch_status == in->data[0]) + goto out; + + ddev->state.latch_status = in->data[0]; + + event.status.e.length = sizeof(u16); + event.status.e.code = SDTX_EVENT_LATCH_STATUS; + event.status.v = sdtx_translate_latch_status(ddev, in->data[0]); + break; + } + + sdtx_push_event(ddev, &event.common); + + /* Update device mode on base connection change. */ + if (in->command_id == SAM_EVENT_CID_DTX_CONNECTION) { + unsigned long delay; + + delay = in->data[0] ? SDTX_DEVICE_MODE_DELAY_CONNECT : 0; + sdtx_update_device_mode(ddev, delay); + } + +out: + mutex_unlock(&ddev->write_lock); + return SSAM_NOTIF_HANDLED; +} + + +/* -- State update functions. ----------------------------------------------- */ + +static bool sdtx_device_mode_invalid(u8 mode, u8 base_state) +{ + return ((base_state == SSAM_BAS_BASE_STATE_ATTACHED) && + (mode == SDTX_DEVICE_MODE_TABLET)) || + ((base_state == SSAM_BAS_BASE_STATE_DETACH_SUCCESS) && + (mode != SDTX_DEVICE_MODE_TABLET)); +} + +static void sdtx_device_mode_workfn(struct work_struct *work) +{ + struct sdtx_device *ddev = container_of(work, struct sdtx_device, mode_work.work); + struct sdtx_status_event event; + struct ssam_bas_base_info base; + int status, tablet; + u8 mode; + + /* Get operation mode. */ + status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &mode); + if (status) { + dev_err(ddev->dev, "failed to get device mode: %d\n", status); + return; + } + + /* Get base info. */ + status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &base); + if (status) { + dev_err(ddev->dev, "failed to get base info: %d\n", status); + return; + } + + /* + * In some cases (specifically when attaching the base), the device + * mode isn't updated right away. Thus we check if the device mode + * makes sense for the given base state and try again later if it + * doesn't. + */ + if (sdtx_device_mode_invalid(mode, base.state)) { + dev_dbg(ddev->dev, "device mode is invalid, trying again\n"); + sdtx_update_device_mode(ddev, SDTX_DEVICE_MODE_DELAY_RECHECK); + return; + } + + mutex_lock(&ddev->write_lock); + clear_bit(SDTX_DEVICE_DIRTY_MODE_BIT, &ddev->flags); + + /* Avoid sending duplicate device-mode events. */ + if (ddev->state.device_mode == mode) { + mutex_unlock(&ddev->write_lock); + return; + } + + ddev->state.device_mode = mode; + + event.e.length = sizeof(u16); + event.e.code = SDTX_EVENT_DEVICE_MODE; + event.v = mode; + + sdtx_push_event(ddev, &event.e); + + /* Send SW_TABLET_MODE event. */ + tablet = mode != SDTX_DEVICE_MODE_LAPTOP; + input_report_switch(ddev->mode_switch, SW_TABLET_MODE, tablet); + input_sync(ddev->mode_switch); + + mutex_unlock(&ddev->write_lock); +} + +static void sdtx_update_device_mode(struct sdtx_device *ddev, unsigned long delay) +{ + schedule_delayed_work(&ddev->mode_work, delay); +} + +/* Must be executed with ddev->write_lock held. */ +static void __sdtx_device_state_update_base(struct sdtx_device *ddev, + struct ssam_bas_base_info info) +{ + struct sdtx_base_info_event event; + + lockdep_assert_held(&ddev->write_lock); + + /* Prevent duplicate events. */ + if (ddev->state.base.state == info.state && + ddev->state.base.base_id == info.base_id) + return; + + ddev->state.base = info; + + event.e.length = sizeof(struct sdtx_base_info); + event.e.code = SDTX_EVENT_BASE_CONNECTION; + event.v.state = sdtx_translate_base_state(ddev, info.state); + event.v.base_id = SDTX_BASE_TYPE_SSH(info.base_id); + + sdtx_push_event(ddev, &event.e); +} + +/* Must be executed with ddev->write_lock held. */ +static void __sdtx_device_state_update_mode(struct sdtx_device *ddev, u8 mode) +{ + struct sdtx_status_event event; + int tablet; + + /* + * Note: This function must be called after updating the base state + * via __sdtx_device_state_update_base(), as we rely on the updated + * base state value in the validity check below. + */ + + lockdep_assert_held(&ddev->write_lock); + + if (sdtx_device_mode_invalid(mode, ddev->state.base.state)) { + dev_dbg(ddev->dev, "device mode is invalid, trying again\n"); + sdtx_update_device_mode(ddev, SDTX_DEVICE_MODE_DELAY_RECHECK); + return; + } + + /* Prevent duplicate events. */ + if (ddev->state.device_mode == mode) + return; + + ddev->state.device_mode = mode; + + /* Send event. */ + event.e.length = sizeof(u16); + event.e.code = SDTX_EVENT_DEVICE_MODE; + event.v = mode; + + sdtx_push_event(ddev, &event.e); + + /* Send SW_TABLET_MODE event. */ + tablet = mode != SDTX_DEVICE_MODE_LAPTOP; + input_report_switch(ddev->mode_switch, SW_TABLET_MODE, tablet); + input_sync(ddev->mode_switch); +} + +/* Must be executed with ddev->write_lock held. */ +static void __sdtx_device_state_update_latch(struct sdtx_device *ddev, u8 status) +{ + struct sdtx_status_event event; + + lockdep_assert_held(&ddev->write_lock); + + /* Prevent duplicate events. */ + if (ddev->state.latch_status == status) + return; + + ddev->state.latch_status = status; + + event.e.length = sizeof(struct sdtx_base_info); + event.e.code = SDTX_EVENT_BASE_CONNECTION; + event.v = sdtx_translate_latch_status(ddev, status); + + sdtx_push_event(ddev, &event.e); +} + +static void sdtx_device_state_workfn(struct work_struct *work) +{ + struct sdtx_device *ddev = container_of(work, struct sdtx_device, state_work.work); + struct ssam_bas_base_info base; + u8 mode, latch; + int status; + + /* Mark everything as dirty. */ + set_bit(SDTX_DEVICE_DIRTY_BASE_BIT, &ddev->flags); + set_bit(SDTX_DEVICE_DIRTY_MODE_BIT, &ddev->flags); + set_bit(SDTX_DEVICE_DIRTY_LATCH_BIT, &ddev->flags); + + /* + * Ensure that the state gets marked as dirty before continuing to + * query it. Necessary to ensure that clear_bit() calls in + * sdtx_notifier() and sdtx_device_mode_workfn() actually clear these + * bits if an event is received while updating the state here. + */ + smp_mb__after_atomic(); + + status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &base); + if (status) { + dev_err(ddev->dev, "failed to get base state: %d\n", status); + return; + } + + status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &mode); + if (status) { + dev_err(ddev->dev, "failed to get device mode: %d\n", status); + return; + } + + status = ssam_retry(ssam_bas_get_latch_status, ddev->ctrl, &latch); + if (status) { + dev_err(ddev->dev, "failed to get latch status: %d\n", status); + return; + } + + mutex_lock(&ddev->write_lock); + + /* + * If the respective dirty-bit has been cleared, an event has been + * received, updating this state. The queried state may thus be out of + * date. At this point, we can safely assume that the state provided + * by the event is either up to date, or we're about to receive + * another event updating it. + */ + + if (test_and_clear_bit(SDTX_DEVICE_DIRTY_BASE_BIT, &ddev->flags)) + __sdtx_device_state_update_base(ddev, base); + + if (test_and_clear_bit(SDTX_DEVICE_DIRTY_MODE_BIT, &ddev->flags)) + __sdtx_device_state_update_mode(ddev, mode); + + if (test_and_clear_bit(SDTX_DEVICE_DIRTY_LATCH_BIT, &ddev->flags)) + __sdtx_device_state_update_latch(ddev, latch); + + mutex_unlock(&ddev->write_lock); +} + +static void sdtx_update_device_state(struct sdtx_device *ddev, unsigned long delay) +{ + schedule_delayed_work(&ddev->state_work, delay); +} + + +/* -- Common device initialization. ----------------------------------------- */ + +static int sdtx_device_init(struct sdtx_device *ddev, struct device *dev, + struct ssam_controller *ctrl) +{ + int status, tablet_mode; + + /* Basic initialization. */ + kref_init(&ddev->kref); + init_rwsem(&ddev->lock); + ddev->dev = dev; + ddev->ctrl = ctrl; + + ddev->mdev.minor = MISC_DYNAMIC_MINOR; + ddev->mdev.name = "surface_dtx"; + ddev->mdev.nodename = "surface/dtx"; + ddev->mdev.fops = &surface_dtx_fops; + + ddev->notif.base.priority = 1; + ddev->notif.base.fn = sdtx_notifier; + ddev->notif.event.reg = SSAM_EVENT_REGISTRY_SAM; + ddev->notif.event.id.target_category = SSAM_SSH_TC_BAS; + ddev->notif.event.id.instance = 0; + ddev->notif.event.mask = SSAM_EVENT_MASK_NONE; + ddev->notif.event.flags = SSAM_EVENT_SEQUENCED; + + init_waitqueue_head(&ddev->waitq); + mutex_init(&ddev->write_lock); + init_rwsem(&ddev->client_lock); + INIT_LIST_HEAD(&ddev->client_list); + + INIT_DELAYED_WORK(&ddev->mode_work, sdtx_device_mode_workfn); + INIT_DELAYED_WORK(&ddev->state_work, sdtx_device_state_workfn); + + /* + * Get current device state. We want to guarantee that events are only + * sent when state actually changes. Thus we cannot use special + * "uninitialized" values, as that would cause problems when manually + * querying the state in surface_dtx_pm_complete(). I.e. we would not + * be able to detect state changes there if no change event has been + * received between driver initialization and first device suspension. + * + * Note that we also need to do this before registering the event + * notifier, as that may access the state values. + */ + status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &ddev->state.base); + if (status) + return status; + + status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &ddev->state.device_mode); + if (status) + return status; + + status = ssam_retry(ssam_bas_get_latch_status, ddev->ctrl, &ddev->state.latch_status); + if (status) + return status; + + /* Set up tablet mode switch. */ + ddev->mode_switch = input_allocate_device(); + if (!ddev->mode_switch) + return -ENOMEM; + + ddev->mode_switch->name = "Microsoft Surface DTX Device Mode Switch"; + ddev->mode_switch->phys = "ssam/01:11:01:00:00/input0"; + ddev->mode_switch->id.bustype = BUS_HOST; + ddev->mode_switch->dev.parent = ddev->dev; + + tablet_mode = (ddev->state.device_mode != SDTX_DEVICE_MODE_LAPTOP); + input_set_capability(ddev->mode_switch, EV_SW, SW_TABLET_MODE); + input_report_switch(ddev->mode_switch, SW_TABLET_MODE, tablet_mode); + + status = input_register_device(ddev->mode_switch); + if (status) { + input_free_device(ddev->mode_switch); + return status; + } + + /* Set up event notifier. */ + status = ssam_notifier_register(ddev->ctrl, &ddev->notif); + if (status) + goto err_notif; + + /* Register miscdevice. */ + status = misc_register(&ddev->mdev); + if (status) + goto err_mdev; + + /* + * Update device state in case it has changed between getting the + * initial mode and registering the event notifier. + */ + sdtx_update_device_state(ddev, 0); + return 0; + +err_notif: + ssam_notifier_unregister(ddev->ctrl, &ddev->notif); + cancel_delayed_work_sync(&ddev->mode_work); +err_mdev: + input_unregister_device(ddev->mode_switch); + return status; +} + +static struct sdtx_device *sdtx_device_create(struct device *dev, struct ssam_controller *ctrl) +{ + struct sdtx_device *ddev; + int status; + + ddev = kzalloc(sizeof(*ddev), GFP_KERNEL); + if (!ddev) + return ERR_PTR(-ENOMEM); + + status = sdtx_device_init(ddev, dev, ctrl); + if (status) { + sdtx_device_put(ddev); + return ERR_PTR(status); + } + + return ddev; +} + +static void sdtx_device_destroy(struct sdtx_device *ddev) +{ + struct sdtx_client *client; + + /* + * Mark device as shut-down. Prevent new clients from being added and + * new operations from being executed. + */ + set_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags); + + /* Disable notifiers, prevent new events from arriving. */ + ssam_notifier_unregister(ddev->ctrl, &ddev->notif); + + /* Stop mode_work, prevent access to mode_switch. */ + cancel_delayed_work_sync(&ddev->mode_work); + + /* Stop state_work. */ + cancel_delayed_work_sync(&ddev->state_work); + + /* With mode_work canceled, we can unregister the mode_switch. */ + input_unregister_device(ddev->mode_switch); + + /* Wake up async clients. */ + down_write(&ddev->client_lock); + list_for_each_entry(client, &ddev->client_list, node) { + kill_fasync(&client->fasync, SIGIO, POLL_HUP); + } + up_write(&ddev->client_lock); + + /* Wake up blocking clients. */ + wake_up_interruptible(&ddev->waitq); + + /* + * Wait for clients to finish their current operation. After this, the + * controller and device references are guaranteed to be no longer in + * use. + */ + down_write(&ddev->lock); + ddev->dev = NULL; + ddev->ctrl = NULL; + up_write(&ddev->lock); + + /* Finally remove the misc-device. */ + misc_deregister(&ddev->mdev); + + /* + * We're now guaranteed that sdtx_device_open() won't be called any + * more, so we can now drop out reference. + */ + sdtx_device_put(ddev); +} + + +/* -- PM ops. --------------------------------------------------------------- */ + +#ifdef CONFIG_PM_SLEEP + +static void surface_dtx_pm_complete(struct device *dev) +{ + struct sdtx_device *ddev = dev_get_drvdata(dev); + + /* + * Normally, the EC will store events while suspended (i.e. in + * display-off state) and release them when resumed (i.e. transitioned + * to display-on state). During hibernation, however, the EC will be + * shut down and does not store events. Furthermore, events might be + * dropped during prolonged suspension (it is currently unknown how + * big this event buffer is and how it behaves on overruns). + * + * To prevent any problems, we update the device state here. We do + * this delayed to ensure that any events sent by the EC directly + * after resuming will be handled first. The delay below has been + * chosen (experimentally), so that there should be ample time for + * these events to be handled, before we check and, if necessary, + * update the state. + */ + sdtx_update_device_state(ddev, msecs_to_jiffies(1000)); +} + +static const struct dev_pm_ops surface_dtx_pm_ops = { + .complete = surface_dtx_pm_complete, +}; + +#else /* CONFIG_PM_SLEEP */ + +static const struct dev_pm_ops surface_dtx_pm_ops = {}; + +#endif /* CONFIG_PM_SLEEP */ + + +/* -- Platform driver. ------------------------------------------------------ */ + +static int surface_dtx_platform_probe(struct platform_device *pdev) +{ + struct ssam_controller *ctrl; + struct sdtx_device *ddev; + + /* Link to EC. */ + ctrl = ssam_client_bind(&pdev->dev); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl) == -ENODEV ? -EPROBE_DEFER : PTR_ERR(ctrl); + + ddev = sdtx_device_create(&pdev->dev, ctrl); + if (IS_ERR(ddev)) + return PTR_ERR(ddev); + + platform_set_drvdata(pdev, ddev); + return 0; +} + +static int surface_dtx_platform_remove(struct platform_device *pdev) +{ + sdtx_device_destroy(platform_get_drvdata(pdev)); + return 0; +} + +static const struct acpi_device_id surface_dtx_acpi_match[] = { + { "MSHW0133", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, surface_dtx_acpi_match); + +static struct platform_driver surface_dtx_platform_driver = { + .probe = surface_dtx_platform_probe, + .remove = surface_dtx_platform_remove, + .driver = { + .name = "surface_dtx_pltf", + .acpi_match_table = surface_dtx_acpi_match, + .pm = &surface_dtx_pm_ops, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, +}; + + +/* -- SSAM device driver. --------------------------------------------------- */ + +#ifdef CONFIG_SURFACE_AGGREGATOR_BUS + +static int surface_dtx_ssam_probe(struct ssam_device *sdev) +{ + struct sdtx_device *ddev; + + ddev = sdtx_device_create(&sdev->dev, sdev->ctrl); + if (IS_ERR(ddev)) + return PTR_ERR(ddev); + + ssam_device_set_drvdata(sdev, ddev); + return 0; +} + +static void surface_dtx_ssam_remove(struct ssam_device *sdev) +{ + sdtx_device_destroy(ssam_device_get_drvdata(sdev)); +} + +static const struct ssam_device_id surface_dtx_ssam_match[] = { + { SSAM_SDEV(BAS, SAM, 0x00, 0x00) }, + { }, +}; +MODULE_DEVICE_TABLE(ssam, surface_dtx_ssam_match); + +static struct ssam_device_driver surface_dtx_ssam_driver = { + .probe = surface_dtx_ssam_probe, + .remove = surface_dtx_ssam_remove, + .match_table = surface_dtx_ssam_match, + .driver = { + .name = "surface_dtx", + .pm = &surface_dtx_pm_ops, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, +}; + +static int ssam_dtx_driver_register(void) +{ + return ssam_device_driver_register(&surface_dtx_ssam_driver); +} + +static void ssam_dtx_driver_unregister(void) +{ + ssam_device_driver_unregister(&surface_dtx_ssam_driver); +} + +#else /* CONFIG_SURFACE_AGGREGATOR_BUS */ + +static int ssam_dtx_driver_register(void) +{ + return 0; +} + +static void ssam_dtx_driver_unregister(void) +{ +} + +#endif /* CONFIG_SURFACE_AGGREGATOR_BUS */ + + +/* -- Module setup. --------------------------------------------------------- */ + +static int __init surface_dtx_init(void) +{ + int status; + + status = ssam_dtx_driver_register(); + if (status) + return status; + + status = platform_driver_register(&surface_dtx_platform_driver); + if (status) + ssam_dtx_driver_unregister(); + + return status; +} +module_init(surface_dtx_init); + +static void __exit surface_dtx_exit(void) +{ + platform_driver_unregister(&surface_dtx_platform_driver); + ssam_dtx_driver_unregister(); +} +module_exit(surface_dtx_exit); + +MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>"); +MODULE_DESCRIPTION("Detachment-system driver for Surface System Aggregator Module"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/surface/surface_gpe.c b/drivers/platform/surface/surface_gpe.c new file mode 100644 index 0000000000..c219b840d4 --- /dev/null +++ b/drivers/platform/surface/surface_gpe.c @@ -0,0 +1,354 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Surface GPE/Lid driver to enable wakeup from suspend via the lid by + * properly configuring the respective GPEs. Required for wakeup via lid on + * newer Intel-based Microsoft Surface devices. + * + * Copyright (C) 2020-2022 Maximilian Luz <luzmaximilian@gmail.com> + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/acpi.h> +#include <linux/dmi.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_device.h> + +/* + * Note: The GPE numbers for the lid devices found below have been obtained + * from ACPI/the DSDT table, specifically from the GPE handler for the + * lid. + */ + +static const struct property_entry lid_device_props_l17[] = { + PROPERTY_ENTRY_U32("gpe", 0x17), + {}, +}; + +static const struct property_entry lid_device_props_l4B[] = { + PROPERTY_ENTRY_U32("gpe", 0x4B), + {}, +}; + +static const struct property_entry lid_device_props_l4D[] = { + PROPERTY_ENTRY_U32("gpe", 0x4D), + {}, +}; + +static const struct property_entry lid_device_props_l4F[] = { + PROPERTY_ENTRY_U32("gpe", 0x4F), + {}, +}; + +static const struct property_entry lid_device_props_l57[] = { + PROPERTY_ENTRY_U32("gpe", 0x57), + {}, +}; + +/* + * Note: When changing this, don't forget to check that the MODULE_ALIAS below + * still fits. + */ +static const struct dmi_system_id dmi_lid_device_table[] = { + { + .ident = "Surface Pro 4", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 4"), + }, + .driver_data = (void *)lid_device_props_l17, + }, + { + .ident = "Surface Pro 5", + .matches = { + /* + * We match for SKU here due to generic product name + * "Surface Pro". + */ + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Pro_1796"), + }, + .driver_data = (void *)lid_device_props_l4F, + }, + { + .ident = "Surface Pro 5 (LTE)", + .matches = { + /* + * We match for SKU here due to generic product name + * "Surface Pro" + */ + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Pro_1807"), + }, + .driver_data = (void *)lid_device_props_l4F, + }, + { + .ident = "Surface Pro 6", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 6"), + }, + .driver_data = (void *)lid_device_props_l4F, + }, + { + .ident = "Surface Pro 7", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 7"), + }, + .driver_data = (void *)lid_device_props_l4D, + }, + { + .ident = "Surface Pro 8", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 8"), + }, + .driver_data = (void *)lid_device_props_l4B, + }, + { + .ident = "Surface Book 1", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Book"), + }, + .driver_data = (void *)lid_device_props_l17, + }, + { + .ident = "Surface Book 2", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Book 2"), + }, + .driver_data = (void *)lid_device_props_l17, + }, + { + .ident = "Surface Book 3", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Book 3"), + }, + .driver_data = (void *)lid_device_props_l4D, + }, + { + .ident = "Surface Laptop 1", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Laptop"), + }, + .driver_data = (void *)lid_device_props_l57, + }, + { + .ident = "Surface Laptop 2", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Laptop 2"), + }, + .driver_data = (void *)lid_device_props_l57, + }, + { + .ident = "Surface Laptop 3 (Intel 13\")", + .matches = { + /* + * We match for SKU here due to different variants: The + * AMD (15") version does not rely on GPEs. + */ + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Laptop_3_1867:1868"), + }, + .driver_data = (void *)lid_device_props_l4D, + }, + { + .ident = "Surface Laptop 3 (Intel 15\")", + .matches = { + /* + * We match for SKU here due to different variants: The + * AMD (15") version does not rely on GPEs. + */ + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Laptop_3_1872"), + }, + .driver_data = (void *)lid_device_props_l4D, + }, + { + .ident = "Surface Laptop 4 (Intel 13\")", + .matches = { + /* + * We match for SKU here due to different variants: The + * AMD (15") version does not rely on GPEs. + */ + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Laptop_4_1950:1951"), + }, + .driver_data = (void *)lid_device_props_l4B, + }, + { + .ident = "Surface Laptop Studio", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Laptop Studio"), + }, + .driver_data = (void *)lid_device_props_l4B, + }, + { } +}; + +struct surface_lid_device { + u32 gpe_number; +}; + +static int surface_lid_enable_wakeup(struct device *dev, bool enable) +{ + const struct surface_lid_device *lid = dev_get_drvdata(dev); + int action = enable ? ACPI_GPE_ENABLE : ACPI_GPE_DISABLE; + acpi_status status; + + status = acpi_set_gpe_wake_mask(NULL, lid->gpe_number, action); + if (ACPI_FAILURE(status)) { + dev_err(dev, "failed to set GPE wake mask: %s\n", + acpi_format_exception(status)); + return -EINVAL; + } + + return 0; +} + +static int __maybe_unused surface_gpe_suspend(struct device *dev) +{ + return surface_lid_enable_wakeup(dev, true); +} + +static int __maybe_unused surface_gpe_resume(struct device *dev) +{ + return surface_lid_enable_wakeup(dev, false); +} + +static SIMPLE_DEV_PM_OPS(surface_gpe_pm, surface_gpe_suspend, surface_gpe_resume); + +static int surface_gpe_probe(struct platform_device *pdev) +{ + struct surface_lid_device *lid; + u32 gpe_number; + acpi_status status; + int ret; + + ret = device_property_read_u32(&pdev->dev, "gpe", &gpe_number); + if (ret) { + dev_err(&pdev->dev, "failed to read 'gpe' property: %d\n", ret); + return ret; + } + + lid = devm_kzalloc(&pdev->dev, sizeof(*lid), GFP_KERNEL); + if (!lid) + return -ENOMEM; + + lid->gpe_number = gpe_number; + platform_set_drvdata(pdev, lid); + + status = acpi_mark_gpe_for_wake(NULL, gpe_number); + if (ACPI_FAILURE(status)) { + dev_err(&pdev->dev, "failed to mark GPE for wake: %s\n", + acpi_format_exception(status)); + return -EINVAL; + } + + status = acpi_enable_gpe(NULL, gpe_number); + if (ACPI_FAILURE(status)) { + dev_err(&pdev->dev, "failed to enable GPE: %s\n", + acpi_format_exception(status)); + return -EINVAL; + } + + ret = surface_lid_enable_wakeup(&pdev->dev, false); + if (ret) + acpi_disable_gpe(NULL, gpe_number); + + return ret; +} + +static int surface_gpe_remove(struct platform_device *pdev) +{ + struct surface_lid_device *lid = dev_get_drvdata(&pdev->dev); + + /* restore default behavior without this module */ + surface_lid_enable_wakeup(&pdev->dev, false); + acpi_disable_gpe(NULL, lid->gpe_number); + + return 0; +} + +static struct platform_driver surface_gpe_driver = { + .probe = surface_gpe_probe, + .remove = surface_gpe_remove, + .driver = { + .name = "surface_gpe", + .pm = &surface_gpe_pm, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, +}; + +static struct platform_device *surface_gpe_device; + +static int __init surface_gpe_init(void) +{ + const struct dmi_system_id *match; + struct platform_device *pdev; + struct fwnode_handle *fwnode; + int status; + + match = dmi_first_match(dmi_lid_device_table); + if (!match) { + pr_info("no compatible Microsoft Surface device found, exiting\n"); + return -ENODEV; + } + + status = platform_driver_register(&surface_gpe_driver); + if (status) + return status; + + fwnode = fwnode_create_software_node(match->driver_data, NULL); + if (IS_ERR(fwnode)) { + status = PTR_ERR(fwnode); + goto err_node; + } + + pdev = platform_device_alloc("surface_gpe", PLATFORM_DEVID_NONE); + if (!pdev) { + status = -ENOMEM; + goto err_alloc; + } + + pdev->dev.fwnode = fwnode; + + status = platform_device_add(pdev); + if (status) + goto err_add; + + surface_gpe_device = pdev; + return 0; + +err_add: + platform_device_put(pdev); +err_alloc: + fwnode_remove_software_node(fwnode); +err_node: + platform_driver_unregister(&surface_gpe_driver); + return status; +} +module_init(surface_gpe_init); + +static void __exit surface_gpe_exit(void) +{ + struct fwnode_handle *fwnode = surface_gpe_device->dev.fwnode; + + platform_device_unregister(surface_gpe_device); + platform_driver_unregister(&surface_gpe_driver); + fwnode_remove_software_node(fwnode); +} +module_exit(surface_gpe_exit); + +MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>"); +MODULE_DESCRIPTION("Surface GPE/Lid Driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("dmi:*:svnMicrosoftCorporation:pnSurface*:*"); diff --git a/drivers/platform/surface/surface_hotplug.c b/drivers/platform/surface/surface_hotplug.c new file mode 100644 index 0000000000..7b6d887dcc --- /dev/null +++ b/drivers/platform/surface/surface_hotplug.c @@ -0,0 +1,275 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Surface Book (2 and later) hot-plug driver. + * + * Surface Book devices (can) have a hot-pluggable discrete GPU (dGPU). This + * driver is responsible for out-of-band hot-plug event signaling on these + * devices. It is specifically required when the hot-plug device is in D3cold + * and can thus not generate PCIe hot-plug events itself. + * + * Event signaling is handled via ACPI, which will generate the appropriate + * device-check notifications to be picked up by the PCIe hot-plug driver. + * + * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com> + */ + +#include <linux/acpi.h> +#include <linux/gpio.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> + +static const struct acpi_gpio_params shps_base_presence_int = { 0, 0, false }; +static const struct acpi_gpio_params shps_base_presence = { 1, 0, false }; +static const struct acpi_gpio_params shps_device_power_int = { 2, 0, false }; +static const struct acpi_gpio_params shps_device_power = { 3, 0, false }; +static const struct acpi_gpio_params shps_device_presence_int = { 4, 0, false }; +static const struct acpi_gpio_params shps_device_presence = { 5, 0, false }; + +static const struct acpi_gpio_mapping shps_acpi_gpios[] = { + { "base_presence-int-gpio", &shps_base_presence_int, 1 }, + { "base_presence-gpio", &shps_base_presence, 1 }, + { "device_power-int-gpio", &shps_device_power_int, 1 }, + { "device_power-gpio", &shps_device_power, 1 }, + { "device_presence-int-gpio", &shps_device_presence_int, 1 }, + { "device_presence-gpio", &shps_device_presence, 1 }, + { }, +}; + +/* 5515a847-ed55-4b27-8352-cd320e10360a */ +static const guid_t shps_dsm_guid = + GUID_INIT(0x5515a847, 0xed55, 0x4b27, 0x83, 0x52, 0xcd, 0x32, 0x0e, 0x10, 0x36, 0x0a); + +#define SHPS_DSM_REVISION 1 + +enum shps_dsm_fn { + SHPS_DSM_FN_PCI_NUM_ENTRIES = 0x01, + SHPS_DSM_FN_PCI_GET_ENTRIES = 0x02, + SHPS_DSM_FN_IRQ_BASE_PRESENCE = 0x03, + SHPS_DSM_FN_IRQ_DEVICE_POWER = 0x04, + SHPS_DSM_FN_IRQ_DEVICE_PRESENCE = 0x05, +}; + +enum shps_irq_type { + /* NOTE: Must be in order of enum shps_dsm_fn above. */ + SHPS_IRQ_TYPE_BASE_PRESENCE = 0, + SHPS_IRQ_TYPE_DEVICE_POWER = 1, + SHPS_IRQ_TYPE_DEVICE_PRESENCE = 2, + SHPS_NUM_IRQS, +}; + +static const char *const shps_gpio_names[] = { + [SHPS_IRQ_TYPE_BASE_PRESENCE] = "base_presence", + [SHPS_IRQ_TYPE_DEVICE_POWER] = "device_power", + [SHPS_IRQ_TYPE_DEVICE_PRESENCE] = "device_presence", +}; + +struct shps_device { + struct mutex lock[SHPS_NUM_IRQS]; /* Protects update in shps_dsm_notify_irq() */ + struct gpio_desc *gpio[SHPS_NUM_IRQS]; + unsigned int irq[SHPS_NUM_IRQS]; +}; + +#define SHPS_IRQ_NOT_PRESENT ((unsigned int)-1) + +static enum shps_dsm_fn shps_dsm_fn_for_irq(enum shps_irq_type type) +{ + return SHPS_DSM_FN_IRQ_BASE_PRESENCE + type; +} + +static void shps_dsm_notify_irq(struct platform_device *pdev, enum shps_irq_type type) +{ + struct shps_device *sdev = platform_get_drvdata(pdev); + acpi_handle handle = ACPI_HANDLE(&pdev->dev); + union acpi_object *result; + union acpi_object param; + int value; + + mutex_lock(&sdev->lock[type]); + + value = gpiod_get_value_cansleep(sdev->gpio[type]); + if (value < 0) { + mutex_unlock(&sdev->lock[type]); + dev_err(&pdev->dev, "failed to get gpio: %d (irq=%d)\n", type, value); + return; + } + + dev_dbg(&pdev->dev, "IRQ notification via DSM (irq=%d, value=%d)\n", type, value); + + param.type = ACPI_TYPE_INTEGER; + param.integer.value = value; + + result = acpi_evaluate_dsm_typed(handle, &shps_dsm_guid, SHPS_DSM_REVISION, + shps_dsm_fn_for_irq(type), ¶m, ACPI_TYPE_BUFFER); + if (!result) { + dev_err(&pdev->dev, "IRQ notification via DSM failed (irq=%d, gpio=%d)\n", + type, value); + + } else if (result->buffer.length != 1 || result->buffer.pointer[0] != 0) { + dev_err(&pdev->dev, + "IRQ notification via DSM failed: unexpected result value (irq=%d, gpio=%d)\n", + type, value); + } + + mutex_unlock(&sdev->lock[type]); + + ACPI_FREE(result); +} + +static irqreturn_t shps_handle_irq(int irq, void *data) +{ + struct platform_device *pdev = data; + struct shps_device *sdev = platform_get_drvdata(pdev); + int type; + + /* Figure out which IRQ we're handling. */ + for (type = 0; type < SHPS_NUM_IRQS; type++) + if (irq == sdev->irq[type]) + break; + + /* We should have found our interrupt, if not: this is a bug. */ + if (WARN(type >= SHPS_NUM_IRQS, "invalid IRQ number: %d\n", irq)) + return IRQ_HANDLED; + + /* Forward interrupt to ACPI via DSM. */ + shps_dsm_notify_irq(pdev, type); + return IRQ_HANDLED; +} + +static int shps_setup_irq(struct platform_device *pdev, enum shps_irq_type type) +{ + unsigned long flags = IRQF_ONESHOT | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING; + struct shps_device *sdev = platform_get_drvdata(pdev); + struct gpio_desc *gpiod; + acpi_handle handle = ACPI_HANDLE(&pdev->dev); + const char *irq_name; + const int dsm = shps_dsm_fn_for_irq(type); + int status, irq; + + /* + * Only set up interrupts that we actually need: The Surface Book 3 + * does not have a DSM for base presence, so don't set up an interrupt + * for that. + */ + if (!acpi_check_dsm(handle, &shps_dsm_guid, SHPS_DSM_REVISION, BIT(dsm))) { + dev_dbg(&pdev->dev, "IRQ notification via DSM not present (irq=%d)\n", type); + return 0; + } + + gpiod = devm_gpiod_get(&pdev->dev, shps_gpio_names[type], GPIOD_ASIS); + if (IS_ERR(gpiod)) + return PTR_ERR(gpiod); + + irq = gpiod_to_irq(gpiod); + if (irq < 0) + return irq; + + irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "shps-irq-%d", type); + if (!irq_name) + return -ENOMEM; + + status = devm_request_threaded_irq(&pdev->dev, irq, NULL, shps_handle_irq, + flags, irq_name, pdev); + if (status) + return status; + + dev_dbg(&pdev->dev, "set up irq %d as type %d\n", irq, type); + + sdev->gpio[type] = gpiod; + sdev->irq[type] = irq; + + return 0; +} + +static int surface_hotplug_remove(struct platform_device *pdev) +{ + struct shps_device *sdev = platform_get_drvdata(pdev); + int i; + + /* Ensure that IRQs have been fully handled and won't trigger any more. */ + for (i = 0; i < SHPS_NUM_IRQS; i++) { + if (sdev->irq[i] != SHPS_IRQ_NOT_PRESENT) + disable_irq(sdev->irq[i]); + + mutex_destroy(&sdev->lock[i]); + } + + return 0; +} + +static int surface_hotplug_probe(struct platform_device *pdev) +{ + struct shps_device *sdev; + int status, i; + + /* + * The MSHW0153 device is also present on the Surface Laptop 3, + * however that doesn't have a hot-pluggable PCIe device. It also + * doesn't have any GPIO interrupts/pins under the MSHW0153, so filter + * it out here. + */ + if (gpiod_count(&pdev->dev, NULL) < 0) + return -ENODEV; + + status = devm_acpi_dev_add_driver_gpios(&pdev->dev, shps_acpi_gpios); + if (status) + return status; + + sdev = devm_kzalloc(&pdev->dev, sizeof(*sdev), GFP_KERNEL); + if (!sdev) + return -ENOMEM; + + platform_set_drvdata(pdev, sdev); + + /* + * Initialize IRQs so that we can safely call surface_hotplug_remove() + * on errors. + */ + for (i = 0; i < SHPS_NUM_IRQS; i++) + sdev->irq[i] = SHPS_IRQ_NOT_PRESENT; + + /* Set up IRQs. */ + for (i = 0; i < SHPS_NUM_IRQS; i++) { + mutex_init(&sdev->lock[i]); + + status = shps_setup_irq(pdev, i); + if (status) { + dev_err(&pdev->dev, "failed to set up IRQ %d: %d\n", i, status); + goto err; + } + } + + /* Ensure everything is up-to-date. */ + for (i = 0; i < SHPS_NUM_IRQS; i++) + if (sdev->irq[i] != SHPS_IRQ_NOT_PRESENT) + shps_dsm_notify_irq(pdev, i); + + return 0; + +err: + surface_hotplug_remove(pdev); + return status; +} + +static const struct acpi_device_id surface_hotplug_acpi_match[] = { + { "MSHW0153", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, surface_hotplug_acpi_match); + +static struct platform_driver surface_hotplug_driver = { + .probe = surface_hotplug_probe, + .remove = surface_hotplug_remove, + .driver = { + .name = "surface_hotplug", + .acpi_match_table = surface_hotplug_acpi_match, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, +}; +module_platform_driver(surface_hotplug_driver); + +MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>"); +MODULE_DESCRIPTION("Surface Hot-Plug Signaling Driver for Surface Book Devices"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/surface/surface_platform_profile.c b/drivers/platform/surface/surface_platform_profile.c new file mode 100644 index 0000000000..a5a3941b3f --- /dev/null +++ b/drivers/platform/surface/surface_platform_profile.c @@ -0,0 +1,189 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Surface Platform Profile / Performance Mode driver for Surface System + * Aggregator Module (thermal subsystem). + * + * Copyright (C) 2021-2022 Maximilian Luz <luzmaximilian@gmail.com> + */ + +#include <asm/unaligned.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_profile.h> +#include <linux/types.h> + +#include <linux/surface_aggregator/device.h> + +enum ssam_tmp_profile { + SSAM_TMP_PROFILE_NORMAL = 1, + SSAM_TMP_PROFILE_BATTERY_SAVER = 2, + SSAM_TMP_PROFILE_BETTER_PERFORMANCE = 3, + SSAM_TMP_PROFILE_BEST_PERFORMANCE = 4, +}; + +struct ssam_tmp_profile_info { + __le32 profile; + __le16 unknown1; + __le16 unknown2; +} __packed; + +struct ssam_tmp_profile_device { + struct ssam_device *sdev; + struct platform_profile_handler handler; +}; + +SSAM_DEFINE_SYNC_REQUEST_CL_R(__ssam_tmp_profile_get, struct ssam_tmp_profile_info, { + .target_category = SSAM_SSH_TC_TMP, + .command_id = 0x02, +}); + +SSAM_DEFINE_SYNC_REQUEST_CL_W(__ssam_tmp_profile_set, __le32, { + .target_category = SSAM_SSH_TC_TMP, + .command_id = 0x03, +}); + +static int ssam_tmp_profile_get(struct ssam_device *sdev, enum ssam_tmp_profile *p) +{ + struct ssam_tmp_profile_info info; + int status; + + status = ssam_retry(__ssam_tmp_profile_get, sdev, &info); + if (status < 0) + return status; + + *p = le32_to_cpu(info.profile); + return 0; +} + +static int ssam_tmp_profile_set(struct ssam_device *sdev, enum ssam_tmp_profile p) +{ + __le32 profile_le = cpu_to_le32(p); + + return ssam_retry(__ssam_tmp_profile_set, sdev, &profile_le); +} + +static int convert_ssam_to_profile(struct ssam_device *sdev, enum ssam_tmp_profile p) +{ + switch (p) { + case SSAM_TMP_PROFILE_NORMAL: + return PLATFORM_PROFILE_BALANCED; + + case SSAM_TMP_PROFILE_BATTERY_SAVER: + return PLATFORM_PROFILE_LOW_POWER; + + case SSAM_TMP_PROFILE_BETTER_PERFORMANCE: + return PLATFORM_PROFILE_BALANCED_PERFORMANCE; + + case SSAM_TMP_PROFILE_BEST_PERFORMANCE: + return PLATFORM_PROFILE_PERFORMANCE; + + default: + dev_err(&sdev->dev, "invalid performance profile: %d", p); + return -EINVAL; + } +} + +static int convert_profile_to_ssam(struct ssam_device *sdev, enum platform_profile_option p) +{ + switch (p) { + case PLATFORM_PROFILE_LOW_POWER: + return SSAM_TMP_PROFILE_BATTERY_SAVER; + + case PLATFORM_PROFILE_BALANCED: + return SSAM_TMP_PROFILE_NORMAL; + + case PLATFORM_PROFILE_BALANCED_PERFORMANCE: + return SSAM_TMP_PROFILE_BETTER_PERFORMANCE; + + case PLATFORM_PROFILE_PERFORMANCE: + return SSAM_TMP_PROFILE_BEST_PERFORMANCE; + + default: + /* This should have already been caught by platform_profile_store(). */ + WARN(true, "unsupported platform profile"); + return -EOPNOTSUPP; + } +} + +static int ssam_platform_profile_get(struct platform_profile_handler *pprof, + enum platform_profile_option *profile) +{ + struct ssam_tmp_profile_device *tpd; + enum ssam_tmp_profile tp; + int status; + + tpd = container_of(pprof, struct ssam_tmp_profile_device, handler); + + status = ssam_tmp_profile_get(tpd->sdev, &tp); + if (status) + return status; + + status = convert_ssam_to_profile(tpd->sdev, tp); + if (status < 0) + return status; + + *profile = status; + return 0; +} + +static int ssam_platform_profile_set(struct platform_profile_handler *pprof, + enum platform_profile_option profile) +{ + struct ssam_tmp_profile_device *tpd; + int tp; + + tpd = container_of(pprof, struct ssam_tmp_profile_device, handler); + + tp = convert_profile_to_ssam(tpd->sdev, profile); + if (tp < 0) + return tp; + + return ssam_tmp_profile_set(tpd->sdev, tp); +} + +static int surface_platform_profile_probe(struct ssam_device *sdev) +{ + struct ssam_tmp_profile_device *tpd; + + tpd = devm_kzalloc(&sdev->dev, sizeof(*tpd), GFP_KERNEL); + if (!tpd) + return -ENOMEM; + + tpd->sdev = sdev; + + tpd->handler.profile_get = ssam_platform_profile_get; + tpd->handler.profile_set = ssam_platform_profile_set; + + set_bit(PLATFORM_PROFILE_LOW_POWER, tpd->handler.choices); + set_bit(PLATFORM_PROFILE_BALANCED, tpd->handler.choices); + set_bit(PLATFORM_PROFILE_BALANCED_PERFORMANCE, tpd->handler.choices); + set_bit(PLATFORM_PROFILE_PERFORMANCE, tpd->handler.choices); + + return platform_profile_register(&tpd->handler); +} + +static void surface_platform_profile_remove(struct ssam_device *sdev) +{ + platform_profile_remove(); +} + +static const struct ssam_device_id ssam_platform_profile_match[] = { + { SSAM_SDEV(TMP, SAM, 0x00, 0x01) }, + { }, +}; +MODULE_DEVICE_TABLE(ssam, ssam_platform_profile_match); + +static struct ssam_device_driver surface_platform_profile = { + .probe = surface_platform_profile_probe, + .remove = surface_platform_profile_remove, + .match_table = ssam_platform_profile_match, + .driver = { + .name = "surface_platform_profile", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, +}; +module_ssam_device_driver(surface_platform_profile); + +MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>"); +MODULE_DESCRIPTION("Platform Profile Support for Surface System Aggregator Module"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/surface/surfacepro3_button.c b/drivers/platform/surface/surfacepro3_button.c new file mode 100644 index 0000000000..2755601f97 --- /dev/null +++ b/drivers/platform/surface/surfacepro3_button.c @@ -0,0 +1,265 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * power/home/volume button support for + * Microsoft Surface Pro 3/4 tablet. + * + * Copyright (c) 2015 Intel Corporation. + * All rights reserved. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/types.h> +#include <linux/input.h> +#include <linux/acpi.h> +#include <acpi/button.h> + +#define SURFACE_PRO3_BUTTON_HID "MSHW0028" +#define SURFACE_PRO4_BUTTON_HID "MSHW0040" +#define SURFACE_BUTTON_OBJ_NAME "VGBI" +#define SURFACE_BUTTON_DEVICE_NAME "Surface Pro 3/4 Buttons" + +#define MSHW0040_DSM_REVISION 0x01 +#define MSHW0040_DSM_GET_OMPR 0x02 // get OEM Platform Revision +static const guid_t MSHW0040_DSM_UUID = + GUID_INIT(0x6fd05c69, 0xcde3, 0x49f4, 0x95, 0xed, 0xab, 0x16, 0x65, + 0x49, 0x80, 0x35); + +#define SURFACE_BUTTON_NOTIFY_TABLET_MODE 0xc8 + +#define SURFACE_BUTTON_NOTIFY_PRESS_POWER 0xc6 +#define SURFACE_BUTTON_NOTIFY_RELEASE_POWER 0xc7 + +#define SURFACE_BUTTON_NOTIFY_PRESS_HOME 0xc4 +#define SURFACE_BUTTON_NOTIFY_RELEASE_HOME 0xc5 + +#define SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_UP 0xc0 +#define SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_UP 0xc1 + +#define SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_DOWN 0xc2 +#define SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_DOWN 0xc3 + +MODULE_AUTHOR("Chen Yu"); +MODULE_DESCRIPTION("Surface Pro3 Button Driver"); +MODULE_LICENSE("GPL v2"); + +/* + * Power button, Home button, Volume buttons support is supposed to + * be covered by drivers/input/misc/soc_button_array.c, which is implemented + * according to "Windows ACPI Design Guide for SoC Platforms". + * However surface pro3 seems not to obey the specs, instead it uses + * device VGBI(MSHW0028) for dispatching the events. + * We choose acpi_driver rather than platform_driver/i2c_driver because + * although VGBI has an i2c resource connected to i2c controller, it + * is not embedded in any i2c controller's scope, thus neither platform_device + * will be created, nor i2c_client will be enumerated, we have to use + * acpi_driver. + */ +static const struct acpi_device_id surface_button_device_ids[] = { + {SURFACE_PRO3_BUTTON_HID, 0}, + {SURFACE_PRO4_BUTTON_HID, 0}, + {"", 0}, +}; +MODULE_DEVICE_TABLE(acpi, surface_button_device_ids); + +struct surface_button { + unsigned int type; + struct input_dev *input; + char phys[32]; /* for input device */ + unsigned long pushed; + bool suspended; +}; + +static void surface_button_notify(struct acpi_device *device, u32 event) +{ + struct surface_button *button = acpi_driver_data(device); + struct input_dev *input; + int key_code = KEY_RESERVED; + bool pressed = false; + + switch (event) { + /* Power button press,release handle */ + case SURFACE_BUTTON_NOTIFY_PRESS_POWER: + pressed = true; + fallthrough; + case SURFACE_BUTTON_NOTIFY_RELEASE_POWER: + key_code = KEY_POWER; + break; + /* Home button press,release handle */ + case SURFACE_BUTTON_NOTIFY_PRESS_HOME: + pressed = true; + fallthrough; + case SURFACE_BUTTON_NOTIFY_RELEASE_HOME: + key_code = KEY_LEFTMETA; + break; + /* Volume up button press,release handle */ + case SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_UP: + pressed = true; + fallthrough; + case SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_UP: + key_code = KEY_VOLUMEUP; + break; + /* Volume down button press,release handle */ + case SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_DOWN: + pressed = true; + fallthrough; + case SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_DOWN: + key_code = KEY_VOLUMEDOWN; + break; + case SURFACE_BUTTON_NOTIFY_TABLET_MODE: + dev_warn_once(&device->dev, "Tablet mode is not supported\n"); + break; + default: + dev_info_ratelimited(&device->dev, + "Unsupported event [0x%x]\n", event); + break; + } + input = button->input; + if (key_code == KEY_RESERVED) + return; + if (pressed) + pm_wakeup_dev_event(&device->dev, 0, button->suspended); + if (button->suspended) + return; + input_report_key(input, key_code, pressed?1:0); + input_sync(input); +} + +#ifdef CONFIG_PM_SLEEP +static int surface_button_suspend(struct device *dev) +{ + struct acpi_device *device = to_acpi_device(dev); + struct surface_button *button = acpi_driver_data(device); + + button->suspended = true; + return 0; +} + +static int surface_button_resume(struct device *dev) +{ + struct acpi_device *device = to_acpi_device(dev); + struct surface_button *button = acpi_driver_data(device); + + button->suspended = false; + return 0; +} +#endif + +/* + * Surface Pro 4 and Surface Book 2 / Surface Pro 2017 use the same device + * ID (MSHW0040) for the power/volume buttons. Make sure this is the right + * device by checking for the _DSM method and OEM Platform Revision. + * + * Returns true if the driver should bind to this device, i.e. the device is + * either MSWH0028 (Pro 3) or MSHW0040 on a Pro 4 or Book 1. + */ +static bool surface_button_check_MSHW0040(struct acpi_device *dev) +{ + acpi_handle handle = dev->handle; + union acpi_object *result; + u64 oem_platform_rev = 0; // valid revisions are nonzero + + // get OEM platform revision + result = acpi_evaluate_dsm_typed(handle, &MSHW0040_DSM_UUID, + MSHW0040_DSM_REVISION, + MSHW0040_DSM_GET_OMPR, + NULL, ACPI_TYPE_INTEGER); + + /* + * If evaluating the _DSM fails, the method is not present. This means + * that we have either MSHW0028 or MSHW0040 on Pro 4 or Book 1, so we + * should use this driver. We use revision 0 indicating it is + * unavailable. + */ + + if (result) { + oem_platform_rev = result->integer.value; + ACPI_FREE(result); + } + + dev_dbg(&dev->dev, "OEM Platform Revision %llu\n", oem_platform_rev); + + return oem_platform_rev == 0; +} + + +static int surface_button_add(struct acpi_device *device) +{ + struct surface_button *button; + struct input_dev *input; + const char *hid = acpi_device_hid(device); + char *name; + int error; + + if (strncmp(acpi_device_bid(device), SURFACE_BUTTON_OBJ_NAME, + strlen(SURFACE_BUTTON_OBJ_NAME))) + return -ENODEV; + + if (!surface_button_check_MSHW0040(device)) + return -ENODEV; + + button = kzalloc(sizeof(struct surface_button), GFP_KERNEL); + if (!button) + return -ENOMEM; + + device->driver_data = button; + button->input = input = input_allocate_device(); + if (!input) { + error = -ENOMEM; + goto err_free_button; + } + + name = acpi_device_name(device); + strcpy(name, SURFACE_BUTTON_DEVICE_NAME); + snprintf(button->phys, sizeof(button->phys), "%s/buttons", hid); + + input->name = name; + input->phys = button->phys; + input->id.bustype = BUS_HOST; + input->dev.parent = &device->dev; + input_set_capability(input, EV_KEY, KEY_POWER); + input_set_capability(input, EV_KEY, KEY_LEFTMETA); + input_set_capability(input, EV_KEY, KEY_VOLUMEUP); + input_set_capability(input, EV_KEY, KEY_VOLUMEDOWN); + + error = input_register_device(input); + if (error) + goto err_free_input; + + device_init_wakeup(&device->dev, true); + dev_info(&device->dev, + "%s [%s]\n", name, acpi_device_bid(device)); + return 0; + + err_free_input: + input_free_device(input); + err_free_button: + kfree(button); + return error; +} + +static void surface_button_remove(struct acpi_device *device) +{ + struct surface_button *button = acpi_driver_data(device); + + input_unregister_device(button->input); + kfree(button); +} + +static SIMPLE_DEV_PM_OPS(surface_button_pm, + surface_button_suspend, surface_button_resume); + +static struct acpi_driver surface_button_driver = { + .name = "surface_pro3_button", + .class = "SurfacePro3", + .ids = surface_button_device_ids, + .ops = { + .add = surface_button_add, + .remove = surface_button_remove, + .notify = surface_button_notify, + }, + .drv.pm = &surface_button_pm, +}; + +module_acpi_driver(surface_button_driver); |