summaryrefslogtreecommitdiffstats
path: root/drivers/hid/intel-ish-hid/ishtp
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
commit5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch)
treea94efe259b9009378be6d90eb30d2b019d95c194 /drivers/hid/intel-ish-hid/ishtp
parentInitial commit. (diff)
downloadlinux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.tar.xz
linux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.zip
Adding upstream version 5.10.209.upstream/5.10.209upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/hid/intel-ish-hid/ishtp')
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.c921
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.h76
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client-buffers.c297
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client.c1108
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client.h155
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/dma-if.c176
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/hbm.c990
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/hbm.h313
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/init.c106
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h237
10 files changed, 4379 insertions, 0 deletions
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
new file mode 100644
index 000000000..bba29cd36
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
@@ -0,0 +1,921 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ISHTP bus driver
+ *
+ * Copyright (c) 2012-2016, Intel Corporation.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include "bus.h"
+#include "ishtp-dev.h"
+#include "client.h"
+#include "hbm.h"
+
+static int ishtp_use_dma;
+module_param_named(ishtp_use_dma, ishtp_use_dma, int, 0600);
+MODULE_PARM_DESC(ishtp_use_dma, "Use DMA to send messages");
+
+#define to_ishtp_cl_driver(d) container_of(d, struct ishtp_cl_driver, driver)
+#define to_ishtp_cl_device(d) container_of(d, struct ishtp_cl_device, dev)
+static bool ishtp_device_ready;
+
+/**
+ * ishtp_recv() - process ishtp message
+ * @dev: ishtp device
+ *
+ * If a message with valid header and size is received, then
+ * this function calls appropriate handler. The host or firmware
+ * address is zero, then they are host bus management message,
+ * otherwise they are message fo clients.
+ */
+void ishtp_recv(struct ishtp_device *dev)
+{
+ uint32_t msg_hdr;
+ struct ishtp_msg_hdr *ishtp_hdr;
+
+ /* Read ISHTP header dword */
+ msg_hdr = dev->ops->ishtp_read_hdr(dev);
+ if (!msg_hdr)
+ return;
+
+ dev->ops->sync_fw_clock(dev);
+
+ ishtp_hdr = (struct ishtp_msg_hdr *)&msg_hdr;
+ dev->ishtp_msg_hdr = msg_hdr;
+
+ /* Sanity check: ISHTP frag. length in header */
+ if (ishtp_hdr->length > dev->mtu) {
+ dev_err(dev->devc,
+ "ISHTP hdr - bad length: %u; dropped [%08X]\n",
+ (unsigned int)ishtp_hdr->length, msg_hdr);
+ return;
+ }
+
+ /* ISHTP bus message */
+ if (!ishtp_hdr->host_addr && !ishtp_hdr->fw_addr)
+ recv_hbm(dev, ishtp_hdr);
+ /* ISHTP fixed-client message */
+ else if (!ishtp_hdr->host_addr)
+ recv_fixed_cl_msg(dev, ishtp_hdr);
+ else
+ /* ISHTP client message */
+ recv_ishtp_cl_msg(dev, ishtp_hdr);
+}
+EXPORT_SYMBOL(ishtp_recv);
+
+/**
+ * ishtp_send_msg() - Send ishtp message
+ * @dev: ishtp device
+ * @hdr: Message header
+ * @msg: Message contents
+ * @ipc_send_compl: completion callback
+ * @ipc_send_compl_prm: completion callback parameter
+ *
+ * Send a multi fragment message via IPC. After sending the first fragment
+ * the completion callback is called to schedule transmit of next fragment.
+ *
+ * Return: This returns IPC send message status.
+ */
+int ishtp_send_msg(struct ishtp_device *dev, struct ishtp_msg_hdr *hdr,
+ void *msg, void(*ipc_send_compl)(void *),
+ void *ipc_send_compl_prm)
+{
+ unsigned char ipc_msg[IPC_FULL_MSG_SIZE];
+ uint32_t drbl_val;
+
+ drbl_val = dev->ops->ipc_get_header(dev, hdr->length +
+ sizeof(struct ishtp_msg_hdr),
+ 1);
+
+ memcpy(ipc_msg, &drbl_val, sizeof(uint32_t));
+ memcpy(ipc_msg + sizeof(uint32_t), hdr, sizeof(uint32_t));
+ memcpy(ipc_msg + 2 * sizeof(uint32_t), msg, hdr->length);
+ return dev->ops->write(dev, ipc_send_compl, ipc_send_compl_prm,
+ ipc_msg, 2 * sizeof(uint32_t) + hdr->length);
+}
+
+/**
+ * ishtp_write_message() - Send ishtp single fragment message
+ * @dev: ishtp device
+ * @hdr: Message header
+ * @buf: message data
+ *
+ * Send a single fragment message via IPC. This returns IPC send message
+ * status.
+ *
+ * Return: This returns IPC send message status.
+ */
+int ishtp_write_message(struct ishtp_device *dev, struct ishtp_msg_hdr *hdr,
+ void *buf)
+{
+ return ishtp_send_msg(dev, hdr, buf, NULL, NULL);
+}
+
+/**
+ * ishtp_fw_cl_by_uuid() - locate index of fw client
+ * @dev: ishtp device
+ * @uuid: uuid of the client to search
+ *
+ * Search firmware client using UUID.
+ *
+ * Return: fw client index or -ENOENT if not found
+ */
+int ishtp_fw_cl_by_uuid(struct ishtp_device *dev, const guid_t *uuid)
+{
+ unsigned int i;
+
+ for (i = 0; i < dev->fw_clients_num; ++i) {
+ if (guid_equal(uuid, &dev->fw_clients[i].props.protocol_name))
+ return i;
+ }
+ return -ENOENT;
+}
+EXPORT_SYMBOL(ishtp_fw_cl_by_uuid);
+
+/**
+ * ishtp_fw_cl_get_client() - return client information to client
+ * @dev: the ishtp device structure
+ * @uuid: uuid of the client to search
+ *
+ * Search firmware client using UUID and reture related client information.
+ *
+ * Return: pointer of client information on success, NULL on failure.
+ */
+struct ishtp_fw_client *ishtp_fw_cl_get_client(struct ishtp_device *dev,
+ const guid_t *uuid)
+{
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->fw_clients_lock, flags);
+ i = ishtp_fw_cl_by_uuid(dev, uuid);
+ spin_unlock_irqrestore(&dev->fw_clients_lock, flags);
+ if (i < 0 || dev->fw_clients[i].props.fixed_address)
+ return NULL;
+
+ return &dev->fw_clients[i];
+}
+EXPORT_SYMBOL(ishtp_fw_cl_get_client);
+
+/**
+ * ishtp_get_fw_client_id() - Get fw client id
+ *
+ * This interface is used to reset HW get FW client id.
+ *
+ * Return: firmware client id.
+ */
+int ishtp_get_fw_client_id(struct ishtp_fw_client *fw_client)
+{
+ return fw_client->client_id;
+}
+EXPORT_SYMBOL(ishtp_get_fw_client_id);
+
+/**
+ * ishtp_fw_cl_by_id() - return index to fw_clients for client_id
+ * @dev: the ishtp device structure
+ * @client_id: fw client id to search
+ *
+ * Search firmware client using client id.
+ *
+ * Return: index on success, -ENOENT on failure.
+ */
+int ishtp_fw_cl_by_id(struct ishtp_device *dev, uint8_t client_id)
+{
+ int i, res = -ENOENT;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->fw_clients_lock, flags);
+ for (i = 0; i < dev->fw_clients_num; i++) {
+ if (dev->fw_clients[i].client_id == client_id) {
+ res = i;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dev->fw_clients_lock, flags);
+
+ return res;
+}
+
+/**
+ * ishtp_cl_device_probe() - Bus probe() callback
+ * @dev: the device structure
+ *
+ * This is a bus probe callback and calls the drive probe function.
+ *
+ * Return: Return value from driver probe() call.
+ */
+static int ishtp_cl_device_probe(struct device *dev)
+{
+ struct ishtp_cl_device *device = to_ishtp_cl_device(dev);
+ struct ishtp_cl_driver *driver;
+
+ if (!device)
+ return 0;
+
+ driver = to_ishtp_cl_driver(dev->driver);
+ if (!driver || !driver->probe)
+ return -ENODEV;
+
+ return driver->probe(device);
+}
+
+/**
+ * ishtp_cl_bus_match() - Bus match() callback
+ * @dev: the device structure
+ * @drv: the driver structure
+ *
+ * This is a bus match callback, called when a new ishtp_cl_device is
+ * registered during ishtp bus client enumeration. Use the guid_t in
+ * drv and dev to decide whether they match or not.
+ *
+ * Return: 1 if dev & drv matches, 0 otherwise.
+ */
+static int ishtp_cl_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct ishtp_cl_device *device = to_ishtp_cl_device(dev);
+ struct ishtp_cl_driver *driver = to_ishtp_cl_driver(drv);
+
+ return guid_equal(driver->guid,
+ &device->fw_client->props.protocol_name);
+}
+
+/**
+ * ishtp_cl_device_remove() - Bus remove() callback
+ * @dev: the device structure
+ *
+ * This is a bus remove callback and calls the drive remove function.
+ * Since the ISH driver model supports only built in, this is
+ * primarily can be called during pci driver init failure.
+ *
+ * Return: Return value from driver remove() call.
+ */
+static int ishtp_cl_device_remove(struct device *dev)
+{
+ struct ishtp_cl_device *device = to_ishtp_cl_device(dev);
+ struct ishtp_cl_driver *driver;
+
+ if (!device || !dev->driver)
+ return 0;
+
+ if (device->event_cb) {
+ device->event_cb = NULL;
+ cancel_work_sync(&device->event_work);
+ }
+
+ driver = to_ishtp_cl_driver(dev->driver);
+ if (!driver->remove) {
+ dev->driver = NULL;
+
+ return 0;
+ }
+
+ return driver->remove(device);
+}
+
+/**
+ * ishtp_cl_device_suspend() - Bus suspend callback
+ * @dev: device
+ *
+ * Called during device suspend process.
+ *
+ * Return: Return value from driver suspend() call.
+ */
+static int ishtp_cl_device_suspend(struct device *dev)
+{
+ struct ishtp_cl_device *device = to_ishtp_cl_device(dev);
+ struct ishtp_cl_driver *driver;
+ int ret = 0;
+
+ if (!device)
+ return 0;
+
+ driver = to_ishtp_cl_driver(dev->driver);
+ if (driver && driver->driver.pm) {
+ if (driver->driver.pm->suspend)
+ ret = driver->driver.pm->suspend(dev);
+ }
+
+ return ret;
+}
+
+/**
+ * ishtp_cl_device_resume() - Bus resume callback
+ * @dev: device
+ *
+ * Called during device resume process.
+ *
+ * Return: Return value from driver resume() call.
+ */
+static int ishtp_cl_device_resume(struct device *dev)
+{
+ struct ishtp_cl_device *device = to_ishtp_cl_device(dev);
+ struct ishtp_cl_driver *driver;
+ int ret = 0;
+
+ if (!device)
+ return 0;
+
+ /*
+ * When ISH needs hard reset, it is done asynchrnously, hence bus
+ * resume will be called before full ISH resume
+ */
+ if (device->ishtp_dev->resume_flag)
+ return 0;
+
+ driver = to_ishtp_cl_driver(dev->driver);
+ if (driver && driver->driver.pm) {
+ if (driver->driver.pm->resume)
+ ret = driver->driver.pm->resume(dev);
+ }
+
+ return ret;
+}
+
+/**
+ * ishtp_cl_device_reset() - Reset callback
+ * @device: ishtp client device instance
+ *
+ * This is a callback when HW reset is done and the device need
+ * reinit.
+ *
+ * Return: Return value from driver reset() call.
+ */
+static int ishtp_cl_device_reset(struct ishtp_cl_device *device)
+{
+ struct ishtp_cl_driver *driver;
+ int ret = 0;
+
+ device->event_cb = NULL;
+ cancel_work_sync(&device->event_work);
+
+ driver = to_ishtp_cl_driver(device->dev.driver);
+ if (driver && driver->reset)
+ ret = driver->reset(device);
+
+ return ret;
+}
+
+static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
+ char *buf)
+{
+ int len;
+
+ len = snprintf(buf, PAGE_SIZE, "ishtp:%s\n", dev_name(dev));
+ return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
+}
+static DEVICE_ATTR_RO(modalias);
+
+static struct attribute *ishtp_cl_dev_attrs[] = {
+ &dev_attr_modalias.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(ishtp_cl_dev);
+
+static int ishtp_cl_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ if (add_uevent_var(env, "MODALIAS=ishtp:%s", dev_name(dev)))
+ return -ENOMEM;
+ return 0;
+}
+
+static const struct dev_pm_ops ishtp_cl_bus_dev_pm_ops = {
+ /* Suspend callbacks */
+ .suspend = ishtp_cl_device_suspend,
+ .resume = ishtp_cl_device_resume,
+ /* Hibernate callbacks */
+ .freeze = ishtp_cl_device_suspend,
+ .thaw = ishtp_cl_device_resume,
+ .restore = ishtp_cl_device_resume,
+};
+
+static struct bus_type ishtp_cl_bus_type = {
+ .name = "ishtp",
+ .dev_groups = ishtp_cl_dev_groups,
+ .probe = ishtp_cl_device_probe,
+ .match = ishtp_cl_bus_match,
+ .remove = ishtp_cl_device_remove,
+ .pm = &ishtp_cl_bus_dev_pm_ops,
+ .uevent = ishtp_cl_uevent,
+};
+
+static void ishtp_cl_dev_release(struct device *dev)
+{
+ kfree(to_ishtp_cl_device(dev));
+}
+
+static const struct device_type ishtp_cl_device_type = {
+ .release = ishtp_cl_dev_release,
+};
+
+/**
+ * ishtp_bus_add_device() - Function to create device on bus
+ * @dev: ishtp device
+ * @uuid: uuid of the client
+ * @name: Name of the client
+ *
+ * Allocate ISHTP bus client device, attach it to uuid
+ * and register with ISHTP bus.
+ *
+ * Return: ishtp_cl_device pointer or NULL on failure
+ */
+static struct ishtp_cl_device *ishtp_bus_add_device(struct ishtp_device *dev,
+ guid_t uuid, char *name)
+{
+ struct ishtp_cl_device *device;
+ int status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->device_list_lock, flags);
+ list_for_each_entry(device, &dev->device_list, device_link) {
+ if (!strcmp(name, dev_name(&device->dev))) {
+ device->fw_client = &dev->fw_clients[
+ dev->fw_client_presentation_num - 1];
+ spin_unlock_irqrestore(&dev->device_list_lock, flags);
+ ishtp_cl_device_reset(device);
+ return device;
+ }
+ }
+ spin_unlock_irqrestore(&dev->device_list_lock, flags);
+
+ device = kzalloc(sizeof(struct ishtp_cl_device), GFP_KERNEL);
+ if (!device)
+ return NULL;
+
+ device->dev.parent = dev->devc;
+ device->dev.bus = &ishtp_cl_bus_type;
+ device->dev.type = &ishtp_cl_device_type;
+ device->ishtp_dev = dev;
+
+ device->fw_client =
+ &dev->fw_clients[dev->fw_client_presentation_num - 1];
+
+ dev_set_name(&device->dev, "%s", name);
+
+ spin_lock_irqsave(&dev->device_list_lock, flags);
+ list_add_tail(&device->device_link, &dev->device_list);
+ spin_unlock_irqrestore(&dev->device_list_lock, flags);
+
+ status = device_register(&device->dev);
+ if (status) {
+ spin_lock_irqsave(&dev->device_list_lock, flags);
+ list_del(&device->device_link);
+ spin_unlock_irqrestore(&dev->device_list_lock, flags);
+ dev_err(dev->devc, "Failed to register ISHTP client device\n");
+ put_device(&device->dev);
+ return NULL;
+ }
+
+ ishtp_device_ready = true;
+
+ return device;
+}
+
+/**
+ * ishtp_bus_remove_device() - Function to relase device on bus
+ * @device: client device instance
+ *
+ * This is a counterpart of ishtp_bus_add_device.
+ * Device is unregistered.
+ * the device structure is freed in 'ishtp_cl_dev_release' function
+ * Called only during error in pci driver init path.
+ */
+static void ishtp_bus_remove_device(struct ishtp_cl_device *device)
+{
+ device_unregister(&device->dev);
+}
+
+/**
+ * ishtp_cl_driver_register() - Client driver register
+ * @driver: the client driver instance
+ * @owner: Owner of this driver module
+ *
+ * Once a client driver is probed, it created a client
+ * instance and registers with the bus.
+ *
+ * Return: Return value of driver_register or -ENODEV if not ready
+ */
+int ishtp_cl_driver_register(struct ishtp_cl_driver *driver,
+ struct module *owner)
+{
+ if (!ishtp_device_ready)
+ return -ENODEV;
+
+ driver->driver.name = driver->name;
+ driver->driver.owner = owner;
+ driver->driver.bus = &ishtp_cl_bus_type;
+
+ return driver_register(&driver->driver);
+}
+EXPORT_SYMBOL(ishtp_cl_driver_register);
+
+/**
+ * ishtp_cl_driver_unregister() - Client driver unregister
+ * @driver: the client driver instance
+ *
+ * Unregister client during device removal process.
+ */
+void ishtp_cl_driver_unregister(struct ishtp_cl_driver *driver)
+{
+ driver_unregister(&driver->driver);
+}
+EXPORT_SYMBOL(ishtp_cl_driver_unregister);
+
+/**
+ * ishtp_bus_event_work() - event work function
+ * @work: work struct pointer
+ *
+ * Once an event is received for a client this work
+ * function is called. If the device has registered a
+ * callback then the callback is called.
+ */
+static void ishtp_bus_event_work(struct work_struct *work)
+{
+ struct ishtp_cl_device *device;
+
+ device = container_of(work, struct ishtp_cl_device, event_work);
+
+ if (device->event_cb)
+ device->event_cb(device);
+}
+
+/**
+ * ishtp_cl_bus_rx_event() - schedule event work
+ * @device: client device instance
+ *
+ * Once an event is received for a client this schedules
+ * a work function to process.
+ */
+void ishtp_cl_bus_rx_event(struct ishtp_cl_device *device)
+{
+ if (!device || !device->event_cb)
+ return;
+
+ if (device->event_cb)
+ schedule_work(&device->event_work);
+}
+
+/**
+ * ishtp_register_event_cb() - Register callback
+ * @device: client device instance
+ * @event_cb: Event processor for an client
+ *
+ * Register a callback for events, called from client driver
+ *
+ * Return: Return 0 or -EALREADY if already registered
+ */
+int ishtp_register_event_cb(struct ishtp_cl_device *device,
+ void (*event_cb)(struct ishtp_cl_device *))
+{
+ if (device->event_cb)
+ return -EALREADY;
+
+ device->event_cb = event_cb;
+ INIT_WORK(&device->event_work, ishtp_bus_event_work);
+
+ return 0;
+}
+EXPORT_SYMBOL(ishtp_register_event_cb);
+
+/**
+ * ishtp_get_device() - update usage count for the device
+ * @cl_device: client device instance
+ *
+ * Increment the usage count. The device can't be deleted
+ */
+void ishtp_get_device(struct ishtp_cl_device *cl_device)
+{
+ cl_device->reference_count++;
+}
+EXPORT_SYMBOL(ishtp_get_device);
+
+/**
+ * ishtp_put_device() - decrement usage count for the device
+ * @cl_device: client device instance
+ *
+ * Decrement the usage count. The device can be deleted is count = 0
+ */
+void ishtp_put_device(struct ishtp_cl_device *cl_device)
+{
+ cl_device->reference_count--;
+}
+EXPORT_SYMBOL(ishtp_put_device);
+
+/**
+ * ishtp_set_drvdata() - set client driver data
+ * @cl_device: client device instance
+ * @data: driver data need to be set
+ *
+ * Set client driver data to cl_device->driver_data.
+ */
+void ishtp_set_drvdata(struct ishtp_cl_device *cl_device, void *data)
+{
+ cl_device->driver_data = data;
+}
+EXPORT_SYMBOL(ishtp_set_drvdata);
+
+/**
+ * ishtp_get_drvdata() - get client driver data
+ * @cl_device: client device instance
+ *
+ * Get client driver data from cl_device->driver_data.
+ *
+ * Return: pointer of driver data
+ */
+void *ishtp_get_drvdata(struct ishtp_cl_device *cl_device)
+{
+ return cl_device->driver_data;
+}
+EXPORT_SYMBOL(ishtp_get_drvdata);
+
+/**
+ * ishtp_dev_to_cl_device() - get ishtp_cl_device instance from device instance
+ * @device: device instance
+ *
+ * Get ish_cl_device instance which embeds device instance in it.
+ *
+ * Return: pointer to ishtp_cl_device instance
+ */
+struct ishtp_cl_device *ishtp_dev_to_cl_device(struct device *device)
+{
+ return to_ishtp_cl_device(device);
+}
+EXPORT_SYMBOL(ishtp_dev_to_cl_device);
+
+/**
+ * ishtp_bus_new_client() - Create a new client
+ * @dev: ISHTP device instance
+ *
+ * Once bus protocol enumerates a client, this is called
+ * to add a device for the client.
+ *
+ * Return: 0 on success or error code on failure
+ */
+int ishtp_bus_new_client(struct ishtp_device *dev)
+{
+ int i;
+ char *dev_name;
+ struct ishtp_cl_device *cl_device;
+ guid_t device_uuid;
+
+ /*
+ * For all reported clients, create an unconnected client and add its
+ * device to ISHTP bus.
+ * If appropriate driver has loaded, this will trigger its probe().
+ * Otherwise, probe() will be called when driver is loaded
+ */
+ i = dev->fw_client_presentation_num - 1;
+ device_uuid = dev->fw_clients[i].props.protocol_name;
+ dev_name = kasprintf(GFP_KERNEL, "{%pUL}", &device_uuid);
+ if (!dev_name)
+ return -ENOMEM;
+
+ cl_device = ishtp_bus_add_device(dev, device_uuid, dev_name);
+ if (!cl_device) {
+ kfree(dev_name);
+ return -ENOENT;
+ }
+
+ kfree(dev_name);
+
+ return 0;
+}
+
+/**
+ * ishtp_cl_device_bind() - bind a device
+ * @cl: ishtp client device
+ *
+ * Binds connected ishtp_cl to ISHTP bus device
+ *
+ * Return: 0 on success or fault code
+ */
+int ishtp_cl_device_bind(struct ishtp_cl *cl)
+{
+ struct ishtp_cl_device *cl_device;
+ unsigned long flags;
+ int rv;
+
+ if (!cl->fw_client_id || cl->state != ISHTP_CL_CONNECTED)
+ return -EFAULT;
+
+ rv = -ENOENT;
+ spin_lock_irqsave(&cl->dev->device_list_lock, flags);
+ list_for_each_entry(cl_device, &cl->dev->device_list,
+ device_link) {
+ if (cl_device->fw_client &&
+ cl_device->fw_client->client_id == cl->fw_client_id) {
+ cl->device = cl_device;
+ rv = 0;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&cl->dev->device_list_lock, flags);
+ return rv;
+}
+
+/**
+ * ishtp_bus_remove_all_clients() - Remove all clients
+ * @ishtp_dev: ishtp device
+ * @warm_reset: Reset due to FW reset dure to errors or S3 suspend
+ *
+ * This is part of reset/remove flow. This function the main processing
+ * only targets error processing, if the FW has forced reset or
+ * error to remove connected clients. When warm reset the client devices are
+ * not removed.
+ */
+void ishtp_bus_remove_all_clients(struct ishtp_device *ishtp_dev,
+ bool warm_reset)
+{
+ struct ishtp_cl_device *cl_device, *n;
+ struct ishtp_cl *cl;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ishtp_dev->cl_list_lock, flags);
+ list_for_each_entry(cl, &ishtp_dev->cl_list, link) {
+ cl->state = ISHTP_CL_DISCONNECTED;
+
+ /*
+ * Wake any pending process. The waiter would check dev->state
+ * and determine that it's not enabled already,
+ * and will return error to its caller
+ */
+ wake_up_interruptible(&cl->wait_ctrl_res);
+
+ /* Disband any pending read/write requests and free rb */
+ ishtp_cl_flush_queues(cl);
+
+ /* Remove all free and in_process rings, both Rx and Tx */
+ ishtp_cl_free_rx_ring(cl);
+ ishtp_cl_free_tx_ring(cl);
+
+ /*
+ * Free client and ISHTP bus client device structures
+ * don't free host client because it is part of the OS fd
+ * structure
+ */
+ }
+ spin_unlock_irqrestore(&ishtp_dev->cl_list_lock, flags);
+
+ /* Release DMA buffers for client messages */
+ ishtp_cl_free_dma_buf(ishtp_dev);
+
+ /* remove bus clients */
+ spin_lock_irqsave(&ishtp_dev->device_list_lock, flags);
+ list_for_each_entry_safe(cl_device, n, &ishtp_dev->device_list,
+ device_link) {
+ cl_device->fw_client = NULL;
+ if (warm_reset && cl_device->reference_count)
+ continue;
+
+ list_del(&cl_device->device_link);
+ spin_unlock_irqrestore(&ishtp_dev->device_list_lock, flags);
+ ishtp_bus_remove_device(cl_device);
+ spin_lock_irqsave(&ishtp_dev->device_list_lock, flags);
+ }
+ spin_unlock_irqrestore(&ishtp_dev->device_list_lock, flags);
+
+ /* Free all client structures */
+ spin_lock_irqsave(&ishtp_dev->fw_clients_lock, flags);
+ kfree(ishtp_dev->fw_clients);
+ ishtp_dev->fw_clients = NULL;
+ ishtp_dev->fw_clients_num = 0;
+ ishtp_dev->fw_client_presentation_num = 0;
+ ishtp_dev->fw_client_index = 0;
+ bitmap_zero(ishtp_dev->fw_clients_map, ISHTP_CLIENTS_MAX);
+ spin_unlock_irqrestore(&ishtp_dev->fw_clients_lock, flags);
+}
+EXPORT_SYMBOL(ishtp_bus_remove_all_clients);
+
+/**
+ * ishtp_reset_handler() - IPC reset handler
+ * @dev: ishtp device
+ *
+ * ISHTP Handler for IPC_RESET notification
+ */
+void ishtp_reset_handler(struct ishtp_device *dev)
+{
+ unsigned long flags;
+
+ /* Handle FW-initiated reset */
+ dev->dev_state = ISHTP_DEV_RESETTING;
+
+ /* Clear BH processing queue - no further HBMs */
+ spin_lock_irqsave(&dev->rd_msg_spinlock, flags);
+ dev->rd_msg_fifo_head = dev->rd_msg_fifo_tail = 0;
+ spin_unlock_irqrestore(&dev->rd_msg_spinlock, flags);
+
+ /* Handle ISH FW reset against upper layers */
+ ishtp_bus_remove_all_clients(dev, true);
+}
+EXPORT_SYMBOL(ishtp_reset_handler);
+
+/**
+ * ishtp_reset_compl_handler() - Reset completion handler
+ * @dev: ishtp device
+ *
+ * ISHTP handler for IPC_RESET sequence completion to start
+ * host message bus start protocol sequence.
+ */
+void ishtp_reset_compl_handler(struct ishtp_device *dev)
+{
+ dev->dev_state = ISHTP_DEV_INIT_CLIENTS;
+ dev->hbm_state = ISHTP_HBM_START;
+ ishtp_hbm_start_req(dev);
+}
+EXPORT_SYMBOL(ishtp_reset_compl_handler);
+
+/**
+ * ishtp_use_dma_transfer() - Function to use DMA
+ *
+ * This interface is used to enable usage of DMA
+ *
+ * Return non zero if DMA can be enabled
+ */
+int ishtp_use_dma_transfer(void)
+{
+ return ishtp_use_dma;
+}
+
+/**
+ * ishtp_device() - Return device pointer
+ *
+ * This interface is used to return device pointer from ishtp_cl_device
+ * instance.
+ *
+ * Return: device *.
+ */
+struct device *ishtp_device(struct ishtp_cl_device *device)
+{
+ return &device->dev;
+}
+EXPORT_SYMBOL(ishtp_device);
+
+/**
+ * ishtp_get_pci_device() - Return PCI device dev pointer
+ * This interface is used to return PCI device pointer
+ * from ishtp_cl_device instance.
+ *
+ * Return: device *.
+ */
+struct device *ishtp_get_pci_device(struct ishtp_cl_device *device)
+{
+ return device->ishtp_dev->devc;
+}
+EXPORT_SYMBOL(ishtp_get_pci_device);
+
+/**
+ * ishtp_trace_callback() - Return trace callback
+ *
+ * This interface is used to return trace callback function pointer.
+ *
+ * Return: void *.
+ */
+void *ishtp_trace_callback(struct ishtp_cl_device *cl_device)
+{
+ return cl_device->ishtp_dev->print_log;
+}
+EXPORT_SYMBOL(ishtp_trace_callback);
+
+/**
+ * ish_hw_reset() - Call HW reset IPC callback
+ *
+ * This interface is used to reset HW in case of error.
+ *
+ * Return: value from IPC hw_reset callback
+ */
+int ish_hw_reset(struct ishtp_device *dev)
+{
+ return dev->ops->hw_reset(dev);
+}
+EXPORT_SYMBOL(ish_hw_reset);
+
+/**
+ * ishtp_bus_register() - Function to register bus
+ *
+ * This register ishtp bus
+ *
+ * Return: Return output of bus_register
+ */
+static int __init ishtp_bus_register(void)
+{
+ return bus_register(&ishtp_cl_bus_type);
+}
+
+/**
+ * ishtp_bus_unregister() - Function to unregister bus
+ *
+ * This unregister ishtp bus
+ */
+static void __exit ishtp_bus_unregister(void)
+{
+ bus_unregister(&ishtp_cl_bus_type);
+}
+
+module_init(ishtp_bus_register);
+module_exit(ishtp_bus_unregister);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.h b/drivers/hid/intel-ish-hid/ishtp/bus.h
new file mode 100644
index 000000000..5bb85c932
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * ISHTP bus definitions
+ *
+ * Copyright (c) 2014-2016, Intel Corporation.
+ */
+#ifndef _LINUX_ISHTP_CL_BUS_H
+#define _LINUX_ISHTP_CL_BUS_H
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/intel-ish-client-if.h>
+
+struct ishtp_cl;
+struct ishtp_cl_device;
+struct ishtp_device;
+struct ishtp_msg_hdr;
+
+/**
+ * struct ishtp_cl_device - ISHTP device handle
+ * @dev: device pointer
+ * @ishtp_dev: pointer to ishtp device structure to primarily to access
+ * hw device operation callbacks and properties
+ * @fw_client: fw_client pointer to get fw information like protocol name
+ * max message length etc.
+ * @device_link: Link to next client in the list on a bus
+ * @event_work: Used to schedule rx event for client
+ * @driver_data: Storage driver private data
+ * @reference_count: Used for get/put device
+ * @event_cb: Callback to driver to send events
+ *
+ * An ishtp_cl_device pointer is returned from ishtp_add_device()
+ * and links ISHTP bus clients to their actual host client pointer.
+ * Drivers for ISHTP devices will get an ishtp_cl_device pointer
+ * when being probed and shall use it for doing bus I/O.
+ */
+struct ishtp_cl_device {
+ struct device dev;
+ struct ishtp_device *ishtp_dev;
+ struct ishtp_fw_client *fw_client;
+ struct list_head device_link;
+ struct work_struct event_work;
+ void *driver_data;
+ int reference_count;
+ void (*event_cb)(struct ishtp_cl_device *device);
+};
+
+int ishtp_bus_new_client(struct ishtp_device *dev);
+void ishtp_remove_all_clients(struct ishtp_device *dev);
+int ishtp_cl_device_bind(struct ishtp_cl *cl);
+void ishtp_cl_bus_rx_event(struct ishtp_cl_device *device);
+
+/* Write a multi-fragment message */
+int ishtp_send_msg(struct ishtp_device *dev,
+ struct ishtp_msg_hdr *hdr, void *msg,
+ void (*ipc_send_compl)(void *),
+ void *ipc_send_compl_prm);
+
+/* Write a single-fragment message */
+int ishtp_write_message(struct ishtp_device *dev,
+ struct ishtp_msg_hdr *hdr,
+ void *buf);
+
+/* Use DMA to send/receive messages */
+int ishtp_use_dma_transfer(void);
+
+/* Exported functions */
+void ishtp_bus_remove_all_clients(struct ishtp_device *ishtp_dev,
+ bool warm_reset);
+
+void ishtp_recv(struct ishtp_device *dev);
+void ishtp_reset_handler(struct ishtp_device *dev);
+void ishtp_reset_compl_handler(struct ishtp_device *dev);
+
+int ishtp_fw_cl_by_uuid(struct ishtp_device *dev, const guid_t *cuuid);
+#endif /* _LINUX_ISHTP_CL_BUS_H */
diff --git a/drivers/hid/intel-ish-hid/ishtp/client-buffers.c b/drivers/hid/intel-ish-hid/ishtp/client-buffers.c
new file mode 100644
index 000000000..513d7a4a1
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/client-buffers.c
@@ -0,0 +1,297 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ISHTP Ring Buffers
+ *
+ * Copyright (c) 2003-2016, Intel Corporation.
+ */
+
+#include <linux/slab.h>
+#include "client.h"
+
+/**
+ * ishtp_cl_alloc_rx_ring() - Allocate RX ring buffers
+ * @cl: client device instance
+ *
+ * Allocate and initialize RX ring buffers
+ *
+ * Return: 0 on success else -ENOMEM
+ */
+int ishtp_cl_alloc_rx_ring(struct ishtp_cl *cl)
+{
+ size_t len = cl->device->fw_client->props.max_msg_length;
+ int j;
+ struct ishtp_cl_rb *rb;
+ int ret = 0;
+ unsigned long flags;
+
+ for (j = 0; j < cl->rx_ring_size; ++j) {
+ rb = ishtp_io_rb_init(cl);
+ if (!rb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = ishtp_io_rb_alloc_buf(rb, len);
+ if (ret)
+ goto out;
+ spin_lock_irqsave(&cl->free_list_spinlock, flags);
+ list_add_tail(&rb->list, &cl->free_rb_list.list);
+ spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
+ }
+
+ return 0;
+
+out:
+ dev_err(&cl->device->dev, "error in allocating Rx buffers\n");
+ ishtp_cl_free_rx_ring(cl);
+ return ret;
+}
+
+/**
+ * ishtp_cl_alloc_tx_ring() - Allocate TX ring buffers
+ * @cl: client device instance
+ *
+ * Allocate and initialize TX ring buffers
+ *
+ * Return: 0 on success else -ENOMEM
+ */
+int ishtp_cl_alloc_tx_ring(struct ishtp_cl *cl)
+{
+ size_t len = cl->device->fw_client->props.max_msg_length;
+ int j;
+ unsigned long flags;
+
+ cl->tx_ring_free_size = 0;
+
+ /* Allocate pool to free Tx bufs */
+ for (j = 0; j < cl->tx_ring_size; ++j) {
+ struct ishtp_cl_tx_ring *tx_buf;
+
+ tx_buf = kzalloc(sizeof(struct ishtp_cl_tx_ring), GFP_KERNEL);
+ if (!tx_buf)
+ goto out;
+
+ tx_buf->send_buf.data = kmalloc(len, GFP_KERNEL);
+ if (!tx_buf->send_buf.data) {
+ kfree(tx_buf);
+ goto out;
+ }
+
+ spin_lock_irqsave(&cl->tx_free_list_spinlock, flags);
+ list_add_tail(&tx_buf->list, &cl->tx_free_list.list);
+ ++cl->tx_ring_free_size;
+ spin_unlock_irqrestore(&cl->tx_free_list_spinlock, flags);
+ }
+ return 0;
+out:
+ dev_err(&cl->device->dev, "error in allocating Tx pool\n");
+ ishtp_cl_free_tx_ring(cl);
+ return -ENOMEM;
+}
+
+/**
+ * ishtp_cl_free_rx_ring() - Free RX ring buffers
+ * @cl: client device instance
+ *
+ * Free RX ring buffers
+ */
+void ishtp_cl_free_rx_ring(struct ishtp_cl *cl)
+{
+ struct ishtp_cl_rb *rb;
+ unsigned long flags;
+
+ /* release allocated memory - pass over free_rb_list */
+ spin_lock_irqsave(&cl->free_list_spinlock, flags);
+ while (!list_empty(&cl->free_rb_list.list)) {
+ rb = list_entry(cl->free_rb_list.list.next, struct ishtp_cl_rb,
+ list);
+ list_del(&rb->list);
+ kfree(rb->buffer.data);
+ kfree(rb);
+ }
+ spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
+ /* release allocated memory - pass over in_process_list */
+ spin_lock_irqsave(&cl->in_process_spinlock, flags);
+ while (!list_empty(&cl->in_process_list.list)) {
+ rb = list_entry(cl->in_process_list.list.next,
+ struct ishtp_cl_rb, list);
+ list_del(&rb->list);
+ kfree(rb->buffer.data);
+ kfree(rb);
+ }
+ spin_unlock_irqrestore(&cl->in_process_spinlock, flags);
+}
+
+/**
+ * ishtp_cl_free_tx_ring() - Free TX ring buffers
+ * @cl: client device instance
+ *
+ * Free TX ring buffers
+ */
+void ishtp_cl_free_tx_ring(struct ishtp_cl *cl)
+{
+ struct ishtp_cl_tx_ring *tx_buf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cl->tx_free_list_spinlock, flags);
+ /* release allocated memory - pass over tx_free_list */
+ while (!list_empty(&cl->tx_free_list.list)) {
+ tx_buf = list_entry(cl->tx_free_list.list.next,
+ struct ishtp_cl_tx_ring, list);
+ list_del(&tx_buf->list);
+ --cl->tx_ring_free_size;
+ kfree(tx_buf->send_buf.data);
+ kfree(tx_buf);
+ }
+ spin_unlock_irqrestore(&cl->tx_free_list_spinlock, flags);
+
+ spin_lock_irqsave(&cl->tx_list_spinlock, flags);
+ /* release allocated memory - pass over tx_list */
+ while (!list_empty(&cl->tx_list.list)) {
+ tx_buf = list_entry(cl->tx_list.list.next,
+ struct ishtp_cl_tx_ring, list);
+ list_del(&tx_buf->list);
+ kfree(tx_buf->send_buf.data);
+ kfree(tx_buf);
+ }
+ spin_unlock_irqrestore(&cl->tx_list_spinlock, flags);
+}
+
+/**
+ * ishtp_io_rb_free() - Free IO request block
+ * @rb: IO request block
+ *
+ * Free io request block memory
+ */
+void ishtp_io_rb_free(struct ishtp_cl_rb *rb)
+{
+ if (rb == NULL)
+ return;
+
+ kfree(rb->buffer.data);
+ kfree(rb);
+}
+
+/**
+ * ishtp_io_rb_init() - Allocate and init IO request block
+ * @cl: client device instance
+ *
+ * Allocate and initialize request block
+ *
+ * Return: Allocted IO request block pointer
+ */
+struct ishtp_cl_rb *ishtp_io_rb_init(struct ishtp_cl *cl)
+{
+ struct ishtp_cl_rb *rb;
+
+ rb = kzalloc(sizeof(struct ishtp_cl_rb), GFP_KERNEL);
+ if (!rb)
+ return NULL;
+
+ INIT_LIST_HEAD(&rb->list);
+ rb->cl = cl;
+ rb->buf_idx = 0;
+ return rb;
+}
+
+/**
+ * ishtp_io_rb_alloc_buf() - Allocate and init response buffer
+ * @rb: IO request block
+ * @length: length of response buffer
+ *
+ * Allocate respose buffer
+ *
+ * Return: 0 on success else -ENOMEM
+ */
+int ishtp_io_rb_alloc_buf(struct ishtp_cl_rb *rb, size_t length)
+{
+ if (!rb)
+ return -EINVAL;
+
+ if (length == 0)
+ return 0;
+
+ rb->buffer.data = kmalloc(length, GFP_KERNEL);
+ if (!rb->buffer.data)
+ return -ENOMEM;
+
+ rb->buffer.size = length;
+ return 0;
+}
+
+/**
+ * ishtp_cl_io_rb_recycle() - Recycle IO request blocks
+ * @rb: IO request block
+ *
+ * Re-append rb to its client's free list and send flow control if needed
+ *
+ * Return: 0 on success else -EFAULT
+ */
+int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb *rb)
+{
+ struct ishtp_cl *cl;
+ int rets = 0;
+ unsigned long flags;
+
+ if (!rb || !rb->cl)
+ return -EFAULT;
+
+ cl = rb->cl;
+ spin_lock_irqsave(&cl->free_list_spinlock, flags);
+ list_add_tail(&rb->list, &cl->free_rb_list.list);
+ spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
+
+ /*
+ * If we returned the first buffer to empty 'free' list,
+ * send flow control
+ */
+ if (!cl->out_flow_ctrl_creds)
+ rets = ishtp_cl_read_start(cl);
+
+ return rets;
+}
+EXPORT_SYMBOL(ishtp_cl_io_rb_recycle);
+
+/**
+ * ishtp_cl_tx_empty() -test whether client device tx buffer is empty
+ * @cl: Pointer to client device instance
+ *
+ * Look client device tx buffer list, and check whether this list is empty
+ *
+ * Return: true if client tx buffer list is empty else false
+ */
+bool ishtp_cl_tx_empty(struct ishtp_cl *cl)
+{
+ int tx_list_empty;
+ unsigned long tx_flags;
+
+ spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
+ tx_list_empty = list_empty(&cl->tx_list.list);
+ spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+
+ return !!tx_list_empty;
+}
+EXPORT_SYMBOL(ishtp_cl_tx_empty);
+
+/**
+ * ishtp_cl_rx_get_rb() -Get a rb from client device rx buffer list
+ * @cl: Pointer to client device instance
+ *
+ * Check client device in-processing buffer list and get a rb from it.
+ *
+ * Return: rb pointer if buffer list isn't empty else NULL
+ */
+struct ishtp_cl_rb *ishtp_cl_rx_get_rb(struct ishtp_cl *cl)
+{
+ unsigned long rx_flags;
+ struct ishtp_cl_rb *rb;
+
+ spin_lock_irqsave(&cl->in_process_spinlock, rx_flags);
+ rb = list_first_entry_or_null(&cl->in_process_list.list,
+ struct ishtp_cl_rb, list);
+ if (rb)
+ list_del_init(&rb->list);
+ spin_unlock_irqrestore(&cl->in_process_spinlock, rx_flags);
+
+ return rb;
+}
+EXPORT_SYMBOL(ishtp_cl_rx_get_rb);
diff --git a/drivers/hid/intel-ish-hid/ishtp/client.c b/drivers/hid/intel-ish-hid/ishtp/client.c
new file mode 100644
index 000000000..c0d69303e
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/client.c
@@ -0,0 +1,1108 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ISHTP client logic
+ *
+ * Copyright (c) 2003-2016, Intel Corporation.
+ */
+
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include "hbm.h"
+#include "client.h"
+
+int ishtp_cl_get_tx_free_buffer_size(struct ishtp_cl *cl)
+{
+ unsigned long tx_free_flags;
+ int size;
+
+ spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
+ size = cl->tx_ring_free_size * cl->device->fw_client->props.max_msg_length;
+ spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
+
+ return size;
+}
+EXPORT_SYMBOL(ishtp_cl_get_tx_free_buffer_size);
+
+int ishtp_cl_get_tx_free_rings(struct ishtp_cl *cl)
+{
+ return cl->tx_ring_free_size;
+}
+EXPORT_SYMBOL(ishtp_cl_get_tx_free_rings);
+
+/**
+ * ishtp_read_list_flush() - Flush read queue
+ * @cl: ishtp client instance
+ *
+ * Used to remove all entries from read queue for a client
+ */
+static void ishtp_read_list_flush(struct ishtp_cl *cl)
+{
+ struct ishtp_cl_rb *rb;
+ struct ishtp_cl_rb *next;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cl->dev->read_list_spinlock, flags);
+ list_for_each_entry_safe(rb, next, &cl->dev->read_list.list, list)
+ if (rb->cl && ishtp_cl_cmp_id(cl, rb->cl)) {
+ list_del(&rb->list);
+ ishtp_io_rb_free(rb);
+ }
+ spin_unlock_irqrestore(&cl->dev->read_list_spinlock, flags);
+}
+
+/**
+ * ishtp_cl_flush_queues() - Flush all queues for a client
+ * @cl: ishtp client instance
+ *
+ * Used to remove all queues for a client. This is called when a client device
+ * needs reset due to error, S3 resume or during module removal
+ *
+ * Return: 0 on success else -EINVAL if device is NULL
+ */
+int ishtp_cl_flush_queues(struct ishtp_cl *cl)
+{
+ if (WARN_ON(!cl || !cl->dev))
+ return -EINVAL;
+
+ ishtp_read_list_flush(cl);
+
+ return 0;
+}
+EXPORT_SYMBOL(ishtp_cl_flush_queues);
+
+/**
+ * ishtp_cl_init() - Initialize all fields of a client device
+ * @cl: ishtp client instance
+ * @dev: ishtp device
+ *
+ * Initializes a client device fields: Init spinlocks, init queues etc.
+ * This function is called during new client creation
+ */
+static void ishtp_cl_init(struct ishtp_cl *cl, struct ishtp_device *dev)
+{
+ memset(cl, 0, sizeof(struct ishtp_cl));
+ init_waitqueue_head(&cl->wait_ctrl_res);
+ spin_lock_init(&cl->free_list_spinlock);
+ spin_lock_init(&cl->in_process_spinlock);
+ spin_lock_init(&cl->tx_list_spinlock);
+ spin_lock_init(&cl->tx_free_list_spinlock);
+ spin_lock_init(&cl->fc_spinlock);
+ INIT_LIST_HEAD(&cl->link);
+ cl->dev = dev;
+
+ INIT_LIST_HEAD(&cl->free_rb_list.list);
+ INIT_LIST_HEAD(&cl->tx_list.list);
+ INIT_LIST_HEAD(&cl->tx_free_list.list);
+ INIT_LIST_HEAD(&cl->in_process_list.list);
+
+ cl->rx_ring_size = CL_DEF_RX_RING_SIZE;
+ cl->tx_ring_size = CL_DEF_TX_RING_SIZE;
+ cl->tx_ring_free_size = cl->tx_ring_size;
+
+ /* dma */
+ cl->last_tx_path = CL_TX_PATH_IPC;
+ cl->last_dma_acked = 1;
+ cl->last_dma_addr = NULL;
+ cl->last_ipc_acked = 1;
+}
+
+/**
+ * ishtp_cl_allocate() - allocates client structure and sets it up.
+ * @dev: ishtp device
+ *
+ * Allocate memory for new client device and call to initialize each field.
+ *
+ * Return: The allocated client instance or NULL on failure
+ */
+struct ishtp_cl *ishtp_cl_allocate(struct ishtp_cl_device *cl_device)
+{
+ struct ishtp_cl *cl;
+
+ cl = kmalloc(sizeof(struct ishtp_cl), GFP_KERNEL);
+ if (!cl)
+ return NULL;
+
+ ishtp_cl_init(cl, cl_device->ishtp_dev);
+ return cl;
+}
+EXPORT_SYMBOL(ishtp_cl_allocate);
+
+/**
+ * ishtp_cl_free() - Frees a client device
+ * @cl: client device instance
+ *
+ * Frees a client device
+ */
+void ishtp_cl_free(struct ishtp_cl *cl)
+{
+ struct ishtp_device *dev;
+ unsigned long flags;
+
+ if (!cl)
+ return;
+
+ dev = cl->dev;
+ if (!dev)
+ return;
+
+ spin_lock_irqsave(&dev->cl_list_lock, flags);
+ ishtp_cl_free_rx_ring(cl);
+ ishtp_cl_free_tx_ring(cl);
+ kfree(cl);
+ spin_unlock_irqrestore(&dev->cl_list_lock, flags);
+}
+EXPORT_SYMBOL(ishtp_cl_free);
+
+/**
+ * ishtp_cl_link() - Reserve a host id and link the client instance
+ * @cl: client device instance
+ *
+ * This allocates a single bit in the hostmap. This function will make sure
+ * that not many client sessions are opened at the same time. Once allocated
+ * the client device instance is added to the ishtp device in the current
+ * client list
+ *
+ * Return: 0 or error code on failure
+ */
+int ishtp_cl_link(struct ishtp_cl *cl)
+{
+ struct ishtp_device *dev;
+ unsigned long flags, flags_cl;
+ int id, ret = 0;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -EINVAL;
+
+ dev = cl->dev;
+
+ spin_lock_irqsave(&dev->device_lock, flags);
+
+ if (dev->open_handle_count >= ISHTP_MAX_OPEN_HANDLE_COUNT) {
+ ret = -EMFILE;
+ goto unlock_dev;
+ }
+
+ id = find_first_zero_bit(dev->host_clients_map, ISHTP_CLIENTS_MAX);
+
+ if (id >= ISHTP_CLIENTS_MAX) {
+ spin_unlock_irqrestore(&dev->device_lock, flags);
+ dev_err(&cl->device->dev, "id exceeded %d", ISHTP_CLIENTS_MAX);
+ return -ENOENT;
+ }
+
+ dev->open_handle_count++;
+ cl->host_client_id = id;
+ spin_lock_irqsave(&dev->cl_list_lock, flags_cl);
+ if (dev->dev_state != ISHTP_DEV_ENABLED) {
+ ret = -ENODEV;
+ goto unlock_cl;
+ }
+ list_add_tail(&cl->link, &dev->cl_list);
+ set_bit(id, dev->host_clients_map);
+ cl->state = ISHTP_CL_INITIALIZING;
+
+unlock_cl:
+ spin_unlock_irqrestore(&dev->cl_list_lock, flags_cl);
+unlock_dev:
+ spin_unlock_irqrestore(&dev->device_lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(ishtp_cl_link);
+
+/**
+ * ishtp_cl_unlink() - remove fw_cl from the client device list
+ * @cl: client device instance
+ *
+ * Remove a previously linked device to a ishtp device
+ */
+void ishtp_cl_unlink(struct ishtp_cl *cl)
+{
+ struct ishtp_device *dev;
+ struct ishtp_cl *pos;
+ unsigned long flags;
+
+ /* don't shout on error exit path */
+ if (!cl || !cl->dev)
+ return;
+
+ dev = cl->dev;
+
+ spin_lock_irqsave(&dev->device_lock, flags);
+ if (dev->open_handle_count > 0) {
+ clear_bit(cl->host_client_id, dev->host_clients_map);
+ dev->open_handle_count--;
+ }
+ spin_unlock_irqrestore(&dev->device_lock, flags);
+
+ /*
+ * This checks that 'cl' is actually linked into device's structure,
+ * before attempting 'list_del'
+ */
+ spin_lock_irqsave(&dev->cl_list_lock, flags);
+ list_for_each_entry(pos, &dev->cl_list, link)
+ if (cl->host_client_id == pos->host_client_id) {
+ list_del_init(&pos->link);
+ break;
+ }
+ spin_unlock_irqrestore(&dev->cl_list_lock, flags);
+}
+EXPORT_SYMBOL(ishtp_cl_unlink);
+
+/**
+ * ishtp_cl_disconnect() - Send disconnect request to firmware
+ * @cl: client device instance
+ *
+ * Send a disconnect request for a client to firmware.
+ *
+ * Return: 0 if successful disconnect response from the firmware or error
+ * code on failure
+ */
+int ishtp_cl_disconnect(struct ishtp_cl *cl)
+{
+ struct ishtp_device *dev;
+ int err;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -ENODEV;
+
+ dev = cl->dev;
+
+ dev->print_log(dev, "%s() state %d\n", __func__, cl->state);
+
+ if (cl->state != ISHTP_CL_DISCONNECTING) {
+ dev->print_log(dev, "%s() Disconnect in progress\n", __func__);
+ return 0;
+ }
+
+ if (ishtp_hbm_cl_disconnect_req(dev, cl)) {
+ dev->print_log(dev, "%s() Failed to disconnect\n", __func__);
+ dev_err(&cl->device->dev, "failed to disconnect.\n");
+ return -ENODEV;
+ }
+
+ err = wait_event_interruptible_timeout(cl->wait_ctrl_res,
+ (dev->dev_state != ISHTP_DEV_ENABLED ||
+ cl->state == ISHTP_CL_DISCONNECTED),
+ ishtp_secs_to_jiffies(ISHTP_CL_CONNECT_TIMEOUT));
+
+ /*
+ * If FW reset arrived, this will happen. Don't check cl->,
+ * as 'cl' may be freed already
+ */
+ if (dev->dev_state != ISHTP_DEV_ENABLED) {
+ dev->print_log(dev, "%s() dev_state != ISHTP_DEV_ENABLED\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ if (cl->state == ISHTP_CL_DISCONNECTED) {
+ dev->print_log(dev, "%s() successful\n", __func__);
+ return 0;
+ }
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL(ishtp_cl_disconnect);
+
+/**
+ * ishtp_cl_is_other_connecting() - Check other client is connecting
+ * @cl: client device instance
+ *
+ * Checks if other client with the same fw client id is connecting
+ *
+ * Return: true if other client is connected else false
+ */
+static bool ishtp_cl_is_other_connecting(struct ishtp_cl *cl)
+{
+ struct ishtp_device *dev;
+ struct ishtp_cl *pos;
+ unsigned long flags;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return false;
+
+ dev = cl->dev;
+ spin_lock_irqsave(&dev->cl_list_lock, flags);
+ list_for_each_entry(pos, &dev->cl_list, link) {
+ if ((pos->state == ISHTP_CL_CONNECTING) && (pos != cl) &&
+ cl->fw_client_id == pos->fw_client_id) {
+ spin_unlock_irqrestore(&dev->cl_list_lock, flags);
+ return true;
+ }
+ }
+ spin_unlock_irqrestore(&dev->cl_list_lock, flags);
+
+ return false;
+}
+
+/**
+ * ishtp_cl_connect() - Send connect request to firmware
+ * @cl: client device instance
+ *
+ * Send a connect request for a client to firmware. If successful it will
+ * RX and TX ring buffers
+ *
+ * Return: 0 if successful connect response from the firmware and able
+ * to bind and allocate ring buffers or error code on failure
+ */
+int ishtp_cl_connect(struct ishtp_cl *cl)
+{
+ struct ishtp_device *dev;
+ int rets;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -ENODEV;
+
+ dev = cl->dev;
+
+ dev->print_log(dev, "%s() current_state = %d\n", __func__, cl->state);
+
+ if (ishtp_cl_is_other_connecting(cl)) {
+ dev->print_log(dev, "%s() Busy\n", __func__);
+ return -EBUSY;
+ }
+
+ if (ishtp_hbm_cl_connect_req(dev, cl)) {
+ dev->print_log(dev, "%s() HBM connect req fail\n", __func__);
+ return -ENODEV;
+ }
+
+ rets = wait_event_interruptible_timeout(cl->wait_ctrl_res,
+ (dev->dev_state == ISHTP_DEV_ENABLED &&
+ (cl->state == ISHTP_CL_CONNECTED ||
+ cl->state == ISHTP_CL_DISCONNECTED)),
+ ishtp_secs_to_jiffies(
+ ISHTP_CL_CONNECT_TIMEOUT));
+ /*
+ * If FW reset arrived, this will happen. Don't check cl->,
+ * as 'cl' may be freed already
+ */
+ if (dev->dev_state != ISHTP_DEV_ENABLED) {
+ dev->print_log(dev, "%s() dev_state != ISHTP_DEV_ENABLED\n",
+ __func__);
+ return -EFAULT;
+ }
+
+ if (cl->state != ISHTP_CL_CONNECTED) {
+ dev->print_log(dev, "%s() state != ISHTP_CL_CONNECTED\n",
+ __func__);
+ return -EFAULT;
+ }
+
+ rets = cl->status;
+ if (rets) {
+ dev->print_log(dev, "%s() Invalid status\n", __func__);
+ return rets;
+ }
+
+ rets = ishtp_cl_device_bind(cl);
+ if (rets) {
+ dev->print_log(dev, "%s() Bind error\n", __func__);
+ ishtp_cl_disconnect(cl);
+ return rets;
+ }
+
+ rets = ishtp_cl_alloc_rx_ring(cl);
+ if (rets) {
+ dev->print_log(dev, "%s() Alloc RX ring failed\n", __func__);
+ /* if failed allocation, disconnect */
+ ishtp_cl_disconnect(cl);
+ return rets;
+ }
+
+ rets = ishtp_cl_alloc_tx_ring(cl);
+ if (rets) {
+ dev->print_log(dev, "%s() Alloc TX ring failed\n", __func__);
+ /* if failed allocation, disconnect */
+ ishtp_cl_free_rx_ring(cl);
+ ishtp_cl_disconnect(cl);
+ return rets;
+ }
+
+ /* Upon successful connection and allocation, emit flow-control */
+ rets = ishtp_cl_read_start(cl);
+
+ dev->print_log(dev, "%s() successful\n", __func__);
+
+ return rets;
+}
+EXPORT_SYMBOL(ishtp_cl_connect);
+
+/**
+ * ishtp_cl_read_start() - Prepare to read client message
+ * @cl: client device instance
+ *
+ * Get a free buffer from pool of free read buffers and add to read buffer
+ * pool to add contents. Send a flow control request to firmware to be able
+ * send next message.
+ *
+ * Return: 0 if successful or error code on failure
+ */
+int ishtp_cl_read_start(struct ishtp_cl *cl)
+{
+ struct ishtp_device *dev;
+ struct ishtp_cl_rb *rb;
+ int rets;
+ int i;
+ unsigned long flags;
+ unsigned long dev_flags;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -ENODEV;
+
+ dev = cl->dev;
+
+ if (cl->state != ISHTP_CL_CONNECTED)
+ return -ENODEV;
+
+ if (dev->dev_state != ISHTP_DEV_ENABLED)
+ return -ENODEV;
+
+ i = ishtp_fw_cl_by_id(dev, cl->fw_client_id);
+ if (i < 0) {
+ dev_err(&cl->device->dev, "no such fw client %d\n",
+ cl->fw_client_id);
+ return -ENODEV;
+ }
+
+ /* The current rb is the head of the free rb list */
+ spin_lock_irqsave(&cl->free_list_spinlock, flags);
+ if (list_empty(&cl->free_rb_list.list)) {
+ dev_warn(&cl->device->dev,
+ "[ishtp-ish] Rx buffers pool is empty\n");
+ rets = -ENOMEM;
+ rb = NULL;
+ spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
+ goto out;
+ }
+ rb = list_entry(cl->free_rb_list.list.next, struct ishtp_cl_rb, list);
+ list_del_init(&rb->list);
+ spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
+
+ rb->cl = cl;
+ rb->buf_idx = 0;
+
+ INIT_LIST_HEAD(&rb->list);
+ rets = 0;
+
+ /*
+ * This must be BEFORE sending flow control -
+ * response in ISR may come too fast...
+ */
+ spin_lock_irqsave(&dev->read_list_spinlock, dev_flags);
+ list_add_tail(&rb->list, &dev->read_list.list);
+ spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags);
+ if (ishtp_hbm_cl_flow_control_req(dev, cl)) {
+ rets = -ENODEV;
+ goto out;
+ }
+out:
+ /* if ishtp_hbm_cl_flow_control_req failed, return rb to free list */
+ if (rets && rb) {
+ spin_lock_irqsave(&dev->read_list_spinlock, dev_flags);
+ list_del(&rb->list);
+ spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags);
+
+ spin_lock_irqsave(&cl->free_list_spinlock, flags);
+ list_add_tail(&rb->list, &cl->free_rb_list.list);
+ spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
+ }
+ return rets;
+}
+
+/**
+ * ishtp_cl_send() - Send a message to firmware
+ * @cl: client device instance
+ * @buf: message buffer
+ * @length: length of message
+ *
+ * If the client is correct state to send message, this function gets a buffer
+ * from tx ring buffers, copy the message data and call to send the message
+ * using ishtp_cl_send_msg()
+ *
+ * Return: 0 if successful or error code on failure
+ */
+int ishtp_cl_send(struct ishtp_cl *cl, uint8_t *buf, size_t length)
+{
+ struct ishtp_device *dev;
+ int id;
+ struct ishtp_cl_tx_ring *cl_msg;
+ int have_msg_to_send = 0;
+ unsigned long tx_flags, tx_free_flags;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -ENODEV;
+
+ dev = cl->dev;
+
+ if (cl->state != ISHTP_CL_CONNECTED) {
+ ++cl->err_send_msg;
+ return -EPIPE;
+ }
+
+ if (dev->dev_state != ISHTP_DEV_ENABLED) {
+ ++cl->err_send_msg;
+ return -ENODEV;
+ }
+
+ /* Check if we have fw client device */
+ id = ishtp_fw_cl_by_id(dev, cl->fw_client_id);
+ if (id < 0) {
+ ++cl->err_send_msg;
+ return -ENOENT;
+ }
+
+ if (length > dev->fw_clients[id].props.max_msg_length) {
+ ++cl->err_send_msg;
+ return -EMSGSIZE;
+ }
+
+ /* No free bufs */
+ spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
+ if (list_empty(&cl->tx_free_list.list)) {
+ spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
+ tx_free_flags);
+ ++cl->err_send_msg;
+ return -ENOMEM;
+ }
+
+ cl_msg = list_first_entry(&cl->tx_free_list.list,
+ struct ishtp_cl_tx_ring, list);
+ if (!cl_msg->send_buf.data) {
+ spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
+ tx_free_flags);
+ return -EIO;
+ /* Should not happen, as free list is pre-allocated */
+ }
+ /*
+ * This is safe, as 'length' is already checked for not exceeding
+ * max ISHTP message size per client
+ */
+ list_del_init(&cl_msg->list);
+ --cl->tx_ring_free_size;
+
+ spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
+ memcpy(cl_msg->send_buf.data, buf, length);
+ cl_msg->send_buf.size = length;
+ spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
+ have_msg_to_send = !list_empty(&cl->tx_list.list);
+ list_add_tail(&cl_msg->list, &cl->tx_list.list);
+ spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+
+ if (!have_msg_to_send && cl->ishtp_flow_ctrl_creds > 0)
+ ishtp_cl_send_msg(dev, cl);
+
+ return 0;
+}
+EXPORT_SYMBOL(ishtp_cl_send);
+
+/**
+ * ishtp_cl_read_complete() - read complete
+ * @rb: Pointer to client request block
+ *
+ * If the message is completely received call ishtp_cl_bus_rx_event()
+ * to process message
+ */
+static void ishtp_cl_read_complete(struct ishtp_cl_rb *rb)
+{
+ unsigned long flags;
+ int schedule_work_flag = 0;
+ struct ishtp_cl *cl = rb->cl;
+
+ spin_lock_irqsave(&cl->in_process_spinlock, flags);
+ /*
+ * if in-process list is empty, then need to schedule
+ * the processing thread
+ */
+ schedule_work_flag = list_empty(&cl->in_process_list.list);
+ list_add_tail(&rb->list, &cl->in_process_list.list);
+ spin_unlock_irqrestore(&cl->in_process_spinlock, flags);
+
+ if (schedule_work_flag)
+ ishtp_cl_bus_rx_event(cl->device);
+}
+
+/**
+ * ipc_tx_send() - IPC tx send function
+ * @prm: Pointer to client device instance
+ *
+ * Send message over IPC. Message will be split into fragments
+ * if message size is bigger than IPC FIFO size, and all
+ * fragments will be sent one by one.
+ */
+static void ipc_tx_send(void *prm)
+{
+ struct ishtp_cl *cl = prm;
+ struct ishtp_cl_tx_ring *cl_msg;
+ size_t rem;
+ struct ishtp_device *dev = (cl ? cl->dev : NULL);
+ struct ishtp_msg_hdr ishtp_hdr;
+ unsigned long tx_flags, tx_free_flags;
+ unsigned char *pmsg;
+
+ if (!dev)
+ return;
+
+ /*
+ * Other conditions if some critical error has
+ * occurred before this callback is called
+ */
+ if (dev->dev_state != ISHTP_DEV_ENABLED)
+ return;
+
+ if (cl->state != ISHTP_CL_CONNECTED)
+ return;
+
+ spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
+ if (list_empty(&cl->tx_list.list)) {
+ spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+ return;
+ }
+
+ if (cl->ishtp_flow_ctrl_creds != 1 && !cl->sending) {
+ spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+ return;
+ }
+
+ if (!cl->sending) {
+ --cl->ishtp_flow_ctrl_creds;
+ cl->last_ipc_acked = 0;
+ cl->last_tx_path = CL_TX_PATH_IPC;
+ cl->sending = 1;
+ }
+
+ cl_msg = list_entry(cl->tx_list.list.next, struct ishtp_cl_tx_ring,
+ list);
+ rem = cl_msg->send_buf.size - cl->tx_offs;
+
+ while (rem > 0) {
+ ishtp_hdr.host_addr = cl->host_client_id;
+ ishtp_hdr.fw_addr = cl->fw_client_id;
+ ishtp_hdr.reserved = 0;
+ pmsg = cl_msg->send_buf.data + cl->tx_offs;
+
+ if (rem <= dev->mtu) {
+ /* Last fragment or only one packet */
+ ishtp_hdr.length = rem;
+ ishtp_hdr.msg_complete = 1;
+ /* Submit to IPC queue with no callback */
+ ishtp_write_message(dev, &ishtp_hdr, pmsg);
+ cl->tx_offs = 0;
+ cl->sending = 0;
+
+ break;
+ } else {
+ /* Send ipc fragment */
+ ishtp_hdr.length = dev->mtu;
+ ishtp_hdr.msg_complete = 0;
+ /* All fregments submitted to IPC queue with no callback */
+ ishtp_write_message(dev, &ishtp_hdr, pmsg);
+ cl->tx_offs += dev->mtu;
+ rem = cl_msg->send_buf.size - cl->tx_offs;
+ }
+ }
+
+ list_del_init(&cl_msg->list);
+ spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+
+ spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
+ list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
+ ++cl->tx_ring_free_size;
+ spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
+ tx_free_flags);
+}
+
+/**
+ * ishtp_cl_send_msg_ipc() -Send message using IPC
+ * @dev: ISHTP device instance
+ * @cl: Pointer to client device instance
+ *
+ * Send message over IPC not using DMA
+ */
+static void ishtp_cl_send_msg_ipc(struct ishtp_device *dev,
+ struct ishtp_cl *cl)
+{
+ /* If last DMA message wasn't acked yet, leave this one in Tx queue */
+ if (cl->last_tx_path == CL_TX_PATH_DMA && cl->last_dma_acked == 0)
+ return;
+
+ cl->tx_offs = 0;
+ ipc_tx_send(cl);
+ ++cl->send_msg_cnt_ipc;
+}
+
+/**
+ * ishtp_cl_send_msg_dma() -Send message using DMA
+ * @dev: ISHTP device instance
+ * @cl: Pointer to client device instance
+ *
+ * Send message using DMA
+ */
+static void ishtp_cl_send_msg_dma(struct ishtp_device *dev,
+ struct ishtp_cl *cl)
+{
+ struct ishtp_msg_hdr hdr;
+ struct dma_xfer_hbm dma_xfer;
+ unsigned char *msg_addr;
+ int off;
+ struct ishtp_cl_tx_ring *cl_msg;
+ unsigned long tx_flags, tx_free_flags;
+
+ /* If last IPC message wasn't acked yet, leave this one in Tx queue */
+ if (cl->last_tx_path == CL_TX_PATH_IPC && cl->last_ipc_acked == 0)
+ return;
+
+ spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
+ if (list_empty(&cl->tx_list.list)) {
+ spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+ return;
+ }
+
+ cl_msg = list_entry(cl->tx_list.list.next, struct ishtp_cl_tx_ring,
+ list);
+
+ msg_addr = ishtp_cl_get_dma_send_buf(dev, cl_msg->send_buf.size);
+ if (!msg_addr) {
+ spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+ if (dev->transfer_path == CL_TX_PATH_DEFAULT)
+ ishtp_cl_send_msg_ipc(dev, cl);
+ return;
+ }
+
+ list_del_init(&cl_msg->list); /* Must be before write */
+ spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+
+ --cl->ishtp_flow_ctrl_creds;
+ cl->last_dma_acked = 0;
+ cl->last_dma_addr = msg_addr;
+ cl->last_tx_path = CL_TX_PATH_DMA;
+
+ /* write msg to dma buf */
+ memcpy(msg_addr, cl_msg->send_buf.data, cl_msg->send_buf.size);
+
+ /* send dma_xfer hbm msg */
+ off = msg_addr - (unsigned char *)dev->ishtp_host_dma_tx_buf;
+ ishtp_hbm_hdr(&hdr, sizeof(struct dma_xfer_hbm));
+ dma_xfer.hbm = DMA_XFER;
+ dma_xfer.fw_client_id = cl->fw_client_id;
+ dma_xfer.host_client_id = cl->host_client_id;
+ dma_xfer.reserved = 0;
+ dma_xfer.msg_addr = dev->ishtp_host_dma_tx_buf_phys + off;
+ dma_xfer.msg_length = cl_msg->send_buf.size;
+ dma_xfer.reserved2 = 0;
+ ishtp_write_message(dev, &hdr, (unsigned char *)&dma_xfer);
+ spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
+ list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
+ ++cl->tx_ring_free_size;
+ spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
+ ++cl->send_msg_cnt_dma;
+}
+
+/**
+ * ishtp_cl_send_msg() -Send message using DMA or IPC
+ * @dev: ISHTP device instance
+ * @cl: Pointer to client device instance
+ *
+ * Send message using DMA or IPC based on transfer_path
+ */
+void ishtp_cl_send_msg(struct ishtp_device *dev, struct ishtp_cl *cl)
+{
+ if (dev->transfer_path == CL_TX_PATH_DMA)
+ ishtp_cl_send_msg_dma(dev, cl);
+ else
+ ishtp_cl_send_msg_ipc(dev, cl);
+}
+
+/**
+ * recv_ishtp_cl_msg() -Receive client message
+ * @dev: ISHTP device instance
+ * @ishtp_hdr: Pointer to message header
+ *
+ * Receive and dispatch ISHTP client messages. This function executes in ISR
+ * or work queue context
+ */
+void recv_ishtp_cl_msg(struct ishtp_device *dev,
+ struct ishtp_msg_hdr *ishtp_hdr)
+{
+ struct ishtp_cl *cl;
+ struct ishtp_cl_rb *rb;
+ struct ishtp_cl_rb *new_rb;
+ unsigned char *buffer = NULL;
+ struct ishtp_cl_rb *complete_rb = NULL;
+ unsigned long flags;
+ int rb_count;
+
+ if (ishtp_hdr->reserved) {
+ dev_err(dev->devc, "corrupted message header.\n");
+ goto eoi;
+ }
+
+ if (ishtp_hdr->length > IPC_PAYLOAD_SIZE) {
+ dev_err(dev->devc,
+ "ISHTP message length in hdr exceeds IPC MTU\n");
+ goto eoi;
+ }
+
+ spin_lock_irqsave(&dev->read_list_spinlock, flags);
+ rb_count = -1;
+ list_for_each_entry(rb, &dev->read_list.list, list) {
+ ++rb_count;
+ cl = rb->cl;
+ if (!cl || !(cl->host_client_id == ishtp_hdr->host_addr &&
+ cl->fw_client_id == ishtp_hdr->fw_addr) ||
+ !(cl->state == ISHTP_CL_CONNECTED))
+ continue;
+
+ /* If no Rx buffer is allocated, disband the rb */
+ if (rb->buffer.size == 0 || rb->buffer.data == NULL) {
+ spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
+ dev_err(&cl->device->dev,
+ "Rx buffer is not allocated.\n");
+ list_del(&rb->list);
+ ishtp_io_rb_free(rb);
+ cl->status = -ENOMEM;
+ goto eoi;
+ }
+
+ /*
+ * If message buffer overflown (exceeds max. client msg
+ * size, drop message and return to free buffer.
+ * Do we need to disconnect such a client? (We don't send
+ * back FC, so communication will be stuck anyway)
+ */
+ if (rb->buffer.size < ishtp_hdr->length + rb->buf_idx) {
+ spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
+ dev_err(&cl->device->dev,
+ "message overflow. size %d len %d idx %ld\n",
+ rb->buffer.size, ishtp_hdr->length,
+ rb->buf_idx);
+ list_del(&rb->list);
+ ishtp_cl_io_rb_recycle(rb);
+ cl->status = -EIO;
+ goto eoi;
+ }
+
+ buffer = rb->buffer.data + rb->buf_idx;
+ dev->ops->ishtp_read(dev, buffer, ishtp_hdr->length);
+
+ rb->buf_idx += ishtp_hdr->length;
+ if (ishtp_hdr->msg_complete) {
+ /* Last fragment in message - it's complete */
+ cl->status = 0;
+ list_del(&rb->list);
+ complete_rb = rb;
+
+ --cl->out_flow_ctrl_creds;
+ /*
+ * the whole msg arrived, send a new FC, and add a new
+ * rb buffer for the next coming msg
+ */
+ spin_lock(&cl->free_list_spinlock);
+
+ if (!list_empty(&cl->free_rb_list.list)) {
+ new_rb = list_entry(cl->free_rb_list.list.next,
+ struct ishtp_cl_rb, list);
+ list_del_init(&new_rb->list);
+ spin_unlock(&cl->free_list_spinlock);
+ new_rb->cl = cl;
+ new_rb->buf_idx = 0;
+ INIT_LIST_HEAD(&new_rb->list);
+ list_add_tail(&new_rb->list,
+ &dev->read_list.list);
+
+ ishtp_hbm_cl_flow_control_req(dev, cl);
+ } else {
+ spin_unlock(&cl->free_list_spinlock);
+ }
+ }
+ /* One more fragment in message (even if this was last) */
+ ++cl->recv_msg_num_frags;
+
+ /*
+ * We can safely break here (and in BH too),
+ * a single input message can go only to a single request!
+ */
+ break;
+ }
+
+ spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
+ /* If it's nobody's message, just read and discard it */
+ if (!buffer) {
+ uint8_t rd_msg_buf[ISHTP_RD_MSG_BUF_SIZE];
+
+ dev_err(dev->devc, "Dropped Rx msg - no request\n");
+ dev->ops->ishtp_read(dev, rd_msg_buf, ishtp_hdr->length);
+ goto eoi;
+ }
+
+ if (complete_rb) {
+ cl = complete_rb->cl;
+ cl->ts_rx = ktime_get();
+ ++cl->recv_msg_cnt_ipc;
+ ishtp_cl_read_complete(complete_rb);
+ }
+eoi:
+ return;
+}
+
+/**
+ * recv_ishtp_cl_msg_dma() -Receive client message
+ * @dev: ISHTP device instance
+ * @msg: message pointer
+ * @hbm: hbm buffer
+ *
+ * Receive and dispatch ISHTP client messages using DMA. This function executes
+ * in ISR or work queue context
+ */
+void recv_ishtp_cl_msg_dma(struct ishtp_device *dev, void *msg,
+ struct dma_xfer_hbm *hbm)
+{
+ struct ishtp_cl *cl;
+ struct ishtp_cl_rb *rb;
+ struct ishtp_cl_rb *new_rb;
+ unsigned char *buffer = NULL;
+ struct ishtp_cl_rb *complete_rb = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->read_list_spinlock, flags);
+
+ list_for_each_entry(rb, &dev->read_list.list, list) {
+ cl = rb->cl;
+ if (!cl || !(cl->host_client_id == hbm->host_client_id &&
+ cl->fw_client_id == hbm->fw_client_id) ||
+ !(cl->state == ISHTP_CL_CONNECTED))
+ continue;
+
+ /*
+ * If no Rx buffer is allocated, disband the rb
+ */
+ if (rb->buffer.size == 0 || rb->buffer.data == NULL) {
+ spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
+ dev_err(&cl->device->dev,
+ "response buffer is not allocated.\n");
+ list_del(&rb->list);
+ ishtp_io_rb_free(rb);
+ cl->status = -ENOMEM;
+ goto eoi;
+ }
+
+ /*
+ * If message buffer overflown (exceeds max. client msg
+ * size, drop message and return to free buffer.
+ * Do we need to disconnect such a client? (We don't send
+ * back FC, so communication will be stuck anyway)
+ */
+ if (rb->buffer.size < hbm->msg_length) {
+ spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
+ dev_err(&cl->device->dev,
+ "message overflow. size %d len %d idx %ld\n",
+ rb->buffer.size, hbm->msg_length, rb->buf_idx);
+ list_del(&rb->list);
+ ishtp_cl_io_rb_recycle(rb);
+ cl->status = -EIO;
+ goto eoi;
+ }
+
+ buffer = rb->buffer.data;
+ memcpy(buffer, msg, hbm->msg_length);
+ rb->buf_idx = hbm->msg_length;
+
+ /* Last fragment in message - it's complete */
+ cl->status = 0;
+ list_del(&rb->list);
+ complete_rb = rb;
+
+ --cl->out_flow_ctrl_creds;
+ /*
+ * the whole msg arrived, send a new FC, and add a new
+ * rb buffer for the next coming msg
+ */
+ spin_lock(&cl->free_list_spinlock);
+
+ if (!list_empty(&cl->free_rb_list.list)) {
+ new_rb = list_entry(cl->free_rb_list.list.next,
+ struct ishtp_cl_rb, list);
+ list_del_init(&new_rb->list);
+ spin_unlock(&cl->free_list_spinlock);
+ new_rb->cl = cl;
+ new_rb->buf_idx = 0;
+ INIT_LIST_HEAD(&new_rb->list);
+ list_add_tail(&new_rb->list,
+ &dev->read_list.list);
+
+ ishtp_hbm_cl_flow_control_req(dev, cl);
+ } else {
+ spin_unlock(&cl->free_list_spinlock);
+ }
+
+ /* One more fragment in message (this is always last) */
+ ++cl->recv_msg_num_frags;
+
+ /*
+ * We can safely break here (and in BH too),
+ * a single input message can go only to a single request!
+ */
+ break;
+ }
+
+ spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
+ /* If it's nobody's message, just read and discard it */
+ if (!buffer) {
+ dev_err(dev->devc, "Dropped Rx (DMA) msg - no request\n");
+ goto eoi;
+ }
+
+ if (complete_rb) {
+ cl = complete_rb->cl;
+ cl->ts_rx = ktime_get();
+ ++cl->recv_msg_cnt_dma;
+ ishtp_cl_read_complete(complete_rb);
+ }
+eoi:
+ return;
+}
+
+void *ishtp_get_client_data(struct ishtp_cl *cl)
+{
+ return cl->client_data;
+}
+EXPORT_SYMBOL(ishtp_get_client_data);
+
+void ishtp_set_client_data(struct ishtp_cl *cl, void *data)
+{
+ cl->client_data = data;
+}
+EXPORT_SYMBOL(ishtp_set_client_data);
+
+struct ishtp_device *ishtp_get_ishtp_device(struct ishtp_cl *cl)
+{
+ return cl->dev;
+}
+EXPORT_SYMBOL(ishtp_get_ishtp_device);
+
+void ishtp_set_tx_ring_size(struct ishtp_cl *cl, int size)
+{
+ cl->tx_ring_size = size;
+}
+EXPORT_SYMBOL(ishtp_set_tx_ring_size);
+
+void ishtp_set_rx_ring_size(struct ishtp_cl *cl, int size)
+{
+ cl->rx_ring_size = size;
+}
+EXPORT_SYMBOL(ishtp_set_rx_ring_size);
+
+void ishtp_set_connection_state(struct ishtp_cl *cl, int state)
+{
+ cl->state = state;
+}
+EXPORT_SYMBOL(ishtp_set_connection_state);
+
+void ishtp_cl_set_fw_client_id(struct ishtp_cl *cl, int fw_client_id)
+{
+ cl->fw_client_id = fw_client_id;
+}
+EXPORT_SYMBOL(ishtp_cl_set_fw_client_id);
diff --git a/drivers/hid/intel-ish-hid/ishtp/client.h b/drivers/hid/intel-ish-hid/ishtp/client.h
new file mode 100644
index 000000000..fc62dd149
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/client.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * ISHTP client logic
+ *
+ * Copyright (c) 2003-2016, Intel Corporation.
+ */
+
+#ifndef _ISHTP_CLIENT_H_
+#define _ISHTP_CLIENT_H_
+
+#include <linux/types.h>
+#include "ishtp-dev.h"
+
+/* Tx and Rx ring size */
+#define CL_DEF_RX_RING_SIZE 2
+#define CL_DEF_TX_RING_SIZE 2
+#define CL_MAX_RX_RING_SIZE 32
+#define CL_MAX_TX_RING_SIZE 32
+
+#define DMA_SLOT_SIZE 4096
+/* Number of IPC fragments after which it's worth sending via DMA */
+#define DMA_WORTH_THRESHOLD 3
+
+/* DMA/IPC Tx paths. Other the default means enforcement */
+#define CL_TX_PATH_DEFAULT 0
+#define CL_TX_PATH_IPC 1
+#define CL_TX_PATH_DMA 2
+
+/* Client Tx buffer list entry */
+struct ishtp_cl_tx_ring {
+ struct list_head list;
+ struct ishtp_msg_data send_buf;
+};
+
+/* ISHTP client instance */
+struct ishtp_cl {
+ struct list_head link;
+ struct ishtp_device *dev;
+ enum cl_state state;
+ int status;
+
+ /* Link to ISHTP bus device */
+ struct ishtp_cl_device *device;
+
+ /* ID of client connected */
+ uint8_t host_client_id;
+ uint8_t fw_client_id;
+ uint8_t ishtp_flow_ctrl_creds;
+ uint8_t out_flow_ctrl_creds;
+
+ /* dma */
+ int last_tx_path;
+ /* 0: ack wasn't received,1:ack was received */
+ int last_dma_acked;
+ unsigned char *last_dma_addr;
+ /* 0: ack wasn't received,1:ack was received */
+ int last_ipc_acked;
+
+ /* Rx ring buffer pool */
+ unsigned int rx_ring_size;
+ struct ishtp_cl_rb free_rb_list;
+ spinlock_t free_list_spinlock;
+ /* Rx in-process list */
+ struct ishtp_cl_rb in_process_list;
+ spinlock_t in_process_spinlock;
+
+ /* Client Tx buffers list */
+ unsigned int tx_ring_size;
+ struct ishtp_cl_tx_ring tx_list, tx_free_list;
+ int tx_ring_free_size;
+ spinlock_t tx_list_spinlock;
+ spinlock_t tx_free_list_spinlock;
+ size_t tx_offs; /* Offset in buffer at head of 'tx_list' */
+
+ /**
+ * if we get a FC, and the list is not empty, we must know whether we
+ * are at the middle of sending.
+ * if so -need to increase FC counter, otherwise, need to start sending
+ * the first msg in list
+ * (!)This is for counting-FC implementation only. Within single-FC the
+ * other party may NOT send FC until it receives complete message
+ */
+ int sending;
+
+ /* Send FC spinlock */
+ spinlock_t fc_spinlock;
+
+ /* wait queue for connect and disconnect response from FW */
+ wait_queue_head_t wait_ctrl_res;
+
+ /* Error stats */
+ unsigned int err_send_msg;
+ unsigned int err_send_fc;
+
+ /* Send/recv stats */
+ unsigned int send_msg_cnt_ipc;
+ unsigned int send_msg_cnt_dma;
+ unsigned int recv_msg_cnt_ipc;
+ unsigned int recv_msg_cnt_dma;
+ unsigned int recv_msg_num_frags;
+ unsigned int ishtp_flow_ctrl_cnt;
+ unsigned int out_flow_ctrl_cnt;
+
+ /* Rx msg ... out FC timing */
+ ktime_t ts_rx;
+ ktime_t ts_out_fc;
+ ktime_t ts_max_fc_delay;
+ void *client_data;
+};
+
+/* Client connection managenment internal functions */
+int ishtp_can_client_connect(struct ishtp_device *ishtp_dev, guid_t *uuid);
+int ishtp_fw_cl_by_id(struct ishtp_device *dev, uint8_t client_id);
+void ishtp_cl_send_msg(struct ishtp_device *dev, struct ishtp_cl *cl);
+void recv_ishtp_cl_msg(struct ishtp_device *dev,
+ struct ishtp_msg_hdr *ishtp_hdr);
+int ishtp_cl_read_start(struct ishtp_cl *cl);
+
+/* Ring Buffer I/F */
+int ishtp_cl_alloc_rx_ring(struct ishtp_cl *cl);
+int ishtp_cl_alloc_tx_ring(struct ishtp_cl *cl);
+void ishtp_cl_free_rx_ring(struct ishtp_cl *cl);
+void ishtp_cl_free_tx_ring(struct ishtp_cl *cl);
+int ishtp_cl_get_tx_free_buffer_size(struct ishtp_cl *cl);
+int ishtp_cl_get_tx_free_rings(struct ishtp_cl *cl);
+
+/* DMA I/F functions */
+void recv_ishtp_cl_msg_dma(struct ishtp_device *dev, void *msg,
+ struct dma_xfer_hbm *hbm);
+void ishtp_cl_alloc_dma_buf(struct ishtp_device *dev);
+void ishtp_cl_free_dma_buf(struct ishtp_device *dev);
+void *ishtp_cl_get_dma_send_buf(struct ishtp_device *dev,
+ uint32_t size);
+void ishtp_cl_release_dma_acked_mem(struct ishtp_device *dev,
+ void *msg_addr,
+ uint8_t size);
+
+/* Request blocks alloc/free I/F */
+struct ishtp_cl_rb *ishtp_io_rb_init(struct ishtp_cl *cl);
+void ishtp_io_rb_free(struct ishtp_cl_rb *priv_rb);
+int ishtp_io_rb_alloc_buf(struct ishtp_cl_rb *rb, size_t length);
+
+/**
+ * ishtp_cl_cmp_id - tells if file private data have same id
+ * returns true - if ids are the same and not NULL
+ */
+static inline bool ishtp_cl_cmp_id(const struct ishtp_cl *cl1,
+ const struct ishtp_cl *cl2)
+{
+ return cl1 && cl2 &&
+ (cl1->host_client_id == cl2->host_client_id) &&
+ (cl1->fw_client_id == cl2->fw_client_id);
+}
+
+#endif /* _ISHTP_CLIENT_H_ */
diff --git a/drivers/hid/intel-ish-hid/ishtp/dma-if.c b/drivers/hid/intel-ish-hid/ishtp/dma-if.c
new file mode 100644
index 000000000..00046cbfd
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/dma-if.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ISHTP DMA I/F functions
+ *
+ * Copyright (c) 2003-2016, Intel Corporation.
+ */
+
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include "ishtp-dev.h"
+#include "client.h"
+
+/**
+ * ishtp_cl_alloc_dma_buf() - Allocate DMA RX and TX buffer
+ * @dev: ishtp device
+ *
+ * Allocate RX and TX DMA buffer once during bus setup.
+ * It allocates 1MB, RX and TX DMA buffer, which are divided
+ * into slots.
+ */
+void ishtp_cl_alloc_dma_buf(struct ishtp_device *dev)
+{
+ dma_addr_t h;
+
+ if (dev->ishtp_host_dma_tx_buf)
+ return;
+
+ dev->ishtp_host_dma_tx_buf_size = 1024*1024;
+ dev->ishtp_host_dma_rx_buf_size = 1024*1024;
+
+ /* Allocate Tx buffer and init usage bitmap */
+ dev->ishtp_host_dma_tx_buf = dma_alloc_coherent(dev->devc,
+ dev->ishtp_host_dma_tx_buf_size,
+ &h, GFP_KERNEL);
+ if (dev->ishtp_host_dma_tx_buf)
+ dev->ishtp_host_dma_tx_buf_phys = h;
+
+ dev->ishtp_dma_num_slots = dev->ishtp_host_dma_tx_buf_size /
+ DMA_SLOT_SIZE;
+
+ dev->ishtp_dma_tx_map = kcalloc(dev->ishtp_dma_num_slots,
+ sizeof(uint8_t),
+ GFP_KERNEL);
+ spin_lock_init(&dev->ishtp_dma_tx_lock);
+
+ /* Allocate Rx buffer */
+ dev->ishtp_host_dma_rx_buf = dma_alloc_coherent(dev->devc,
+ dev->ishtp_host_dma_rx_buf_size,
+ &h, GFP_KERNEL);
+
+ if (dev->ishtp_host_dma_rx_buf)
+ dev->ishtp_host_dma_rx_buf_phys = h;
+}
+
+/**
+ * ishtp_cl_free_dma_buf() - Free DMA RX and TX buffer
+ * @dev: ishtp device
+ *
+ * Free DMA buffer when all clients are released. This is
+ * only happens during error path in ISH built in driver
+ * model
+ */
+void ishtp_cl_free_dma_buf(struct ishtp_device *dev)
+{
+ dma_addr_t h;
+
+ if (dev->ishtp_host_dma_tx_buf) {
+ h = dev->ishtp_host_dma_tx_buf_phys;
+ dma_free_coherent(dev->devc, dev->ishtp_host_dma_tx_buf_size,
+ dev->ishtp_host_dma_tx_buf, h);
+ }
+
+ if (dev->ishtp_host_dma_rx_buf) {
+ h = dev->ishtp_host_dma_rx_buf_phys;
+ dma_free_coherent(dev->devc, dev->ishtp_host_dma_rx_buf_size,
+ dev->ishtp_host_dma_rx_buf, h);
+ }
+
+ kfree(dev->ishtp_dma_tx_map);
+ dev->ishtp_host_dma_tx_buf = NULL;
+ dev->ishtp_host_dma_rx_buf = NULL;
+ dev->ishtp_dma_tx_map = NULL;
+}
+
+/*
+ * ishtp_cl_get_dma_send_buf() - Get a DMA memory slot
+ * @dev: ishtp device
+ * @size: Size of memory to get
+ *
+ * Find and return free address of "size" bytes in dma tx buffer.
+ * the function will mark this address as "in-used" memory.
+ *
+ * Return: NULL when no free buffer else a buffer to copy
+ */
+void *ishtp_cl_get_dma_send_buf(struct ishtp_device *dev,
+ uint32_t size)
+{
+ unsigned long flags;
+ int i, j, free;
+ /* additional slot is needed if there is rem */
+ int required_slots = (size / DMA_SLOT_SIZE)
+ + 1 * (size % DMA_SLOT_SIZE != 0);
+
+ if (!dev->ishtp_dma_tx_map) {
+ dev_err(dev->devc, "Fail to allocate Tx map\n");
+ return NULL;
+ }
+
+ spin_lock_irqsave(&dev->ishtp_dma_tx_lock, flags);
+ for (i = 0; i <= (dev->ishtp_dma_num_slots - required_slots); i++) {
+ free = 1;
+ for (j = 0; j < required_slots; j++)
+ if (dev->ishtp_dma_tx_map[i+j]) {
+ free = 0;
+ i += j;
+ break;
+ }
+ if (free) {
+ /* mark memory as "caught" */
+ for (j = 0; j < required_slots; j++)
+ dev->ishtp_dma_tx_map[i+j] = 1;
+ spin_unlock_irqrestore(&dev->ishtp_dma_tx_lock, flags);
+ return (i * DMA_SLOT_SIZE) +
+ (unsigned char *)dev->ishtp_host_dma_tx_buf;
+ }
+ }
+ spin_unlock_irqrestore(&dev->ishtp_dma_tx_lock, flags);
+ dev_err(dev->devc, "No free DMA buffer to send msg\n");
+ return NULL;
+}
+
+/*
+ * ishtp_cl_release_dma_acked_mem() - Release DMA memory slot
+ * @dev: ishtp device
+ * @msg_addr: message address of slot
+ * @size: Size of memory to get
+ *
+ * Release_dma_acked_mem - returnes the acked memory to free list.
+ * (from msg_addr, size bytes long)
+ */
+void ishtp_cl_release_dma_acked_mem(struct ishtp_device *dev,
+ void *msg_addr,
+ uint8_t size)
+{
+ unsigned long flags;
+ int acked_slots = (size / DMA_SLOT_SIZE)
+ + 1 * (size % DMA_SLOT_SIZE != 0);
+ int i, j;
+
+ if ((msg_addr - dev->ishtp_host_dma_tx_buf) % DMA_SLOT_SIZE) {
+ dev_err(dev->devc, "Bad DMA Tx ack address\n");
+ return;
+ }
+
+ if (!dev->ishtp_dma_tx_map) {
+ dev_err(dev->devc, "Fail to allocate Tx map\n");
+ return;
+ }
+
+ i = (msg_addr - dev->ishtp_host_dma_tx_buf) / DMA_SLOT_SIZE;
+ spin_lock_irqsave(&dev->ishtp_dma_tx_lock, flags);
+ for (j = 0; j < acked_slots; j++) {
+ if ((i + j) >= dev->ishtp_dma_num_slots ||
+ !dev->ishtp_dma_tx_map[i+j]) {
+ /* no such slot, or memory is already free */
+ spin_unlock_irqrestore(&dev->ishtp_dma_tx_lock, flags);
+ dev_err(dev->devc, "Bad DMA Tx ack address\n");
+ return;
+ }
+ dev->ishtp_dma_tx_map[i+j] = 0;
+ }
+ spin_unlock_irqrestore(&dev->ishtp_dma_tx_lock, flags);
+}
diff --git a/drivers/hid/intel-ish-hid/ishtp/hbm.c b/drivers/hid/intel-ish-hid/ishtp/hbm.c
new file mode 100644
index 000000000..30a91d068
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/hbm.c
@@ -0,0 +1,990 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ISHTP bus layer messages handling
+ *
+ * Copyright (c) 2003-2016, Intel Corporation.
+ */
+
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/spinlock.h>
+#include "ishtp-dev.h"
+#include "hbm.h"
+#include "client.h"
+
+/**
+ * ishtp_hbm_fw_cl_allocate() - Allocate FW clients
+ * @dev: ISHTP device instance
+ *
+ * Allocates storage for fw clients
+ */
+static void ishtp_hbm_fw_cl_allocate(struct ishtp_device *dev)
+{
+ struct ishtp_fw_client *clients;
+ int b;
+
+ /* count how many ISH clients we have */
+ for_each_set_bit(b, dev->fw_clients_map, ISHTP_CLIENTS_MAX)
+ dev->fw_clients_num++;
+
+ if (dev->fw_clients_num <= 0)
+ return;
+
+ /* allocate storage for fw clients representation */
+ clients = kcalloc(dev->fw_clients_num, sizeof(struct ishtp_fw_client),
+ GFP_KERNEL);
+ if (!clients) {
+ dev->dev_state = ISHTP_DEV_RESETTING;
+ ish_hw_reset(dev);
+ return;
+ }
+ dev->fw_clients = clients;
+}
+
+/**
+ * ishtp_hbm_cl_hdr() - construct client hbm header
+ * @cl: client
+ * @hbm_cmd: host bus message command
+ * @buf: buffer for cl header
+ * @len: buffer length
+ *
+ * Initialize HBM buffer
+ */
+static inline void ishtp_hbm_cl_hdr(struct ishtp_cl *cl, uint8_t hbm_cmd,
+ void *buf, size_t len)
+{
+ struct ishtp_hbm_cl_cmd *cmd = buf;
+
+ memset(cmd, 0, len);
+
+ cmd->hbm_cmd = hbm_cmd;
+ cmd->host_addr = cl->host_client_id;
+ cmd->fw_addr = cl->fw_client_id;
+}
+
+/**
+ * ishtp_hbm_cl_addr_equal() - Compare client address
+ * @cl: client
+ * @buf: Client command buffer
+ *
+ * Compare client address with the address in command buffer
+ *
+ * Return: True if they have the same address
+ */
+static inline bool ishtp_hbm_cl_addr_equal(struct ishtp_cl *cl, void *buf)
+{
+ struct ishtp_hbm_cl_cmd *cmd = buf;
+
+ return cl->host_client_id == cmd->host_addr &&
+ cl->fw_client_id == cmd->fw_addr;
+}
+
+/**
+ * ishtp_hbm_start_wait() - Wait for HBM start message
+ * @dev: ISHTP device instance
+ *
+ * Wait for HBM start message from firmware
+ *
+ * Return: 0 if HBM start is/was received else timeout error
+ */
+int ishtp_hbm_start_wait(struct ishtp_device *dev)
+{
+ int ret;
+
+ if (dev->hbm_state > ISHTP_HBM_START)
+ return 0;
+
+ dev_dbg(dev->devc, "Going to wait for ishtp start. hbm_state=%08X\n",
+ dev->hbm_state);
+ ret = wait_event_interruptible_timeout(dev->wait_hbm_recvd_msg,
+ dev->hbm_state >= ISHTP_HBM_STARTED,
+ (ISHTP_INTEROP_TIMEOUT * HZ));
+
+ dev_dbg(dev->devc,
+ "Woke up from waiting for ishtp start. hbm_state=%08X\n",
+ dev->hbm_state);
+
+ if (ret <= 0 && (dev->hbm_state <= ISHTP_HBM_START)) {
+ dev->hbm_state = ISHTP_HBM_IDLE;
+ dev_err(dev->devc,
+ "waiting for ishtp start failed. ret=%d hbm_state=%08X\n",
+ ret, dev->hbm_state);
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+/**
+ * ishtp_hbm_start_req() - Send HBM start message
+ * @dev: ISHTP device instance
+ *
+ * Send HBM start message to firmware
+ *
+ * Return: 0 if success else error code
+ */
+int ishtp_hbm_start_req(struct ishtp_device *dev)
+{
+ struct ishtp_msg_hdr hdr;
+ struct hbm_host_version_request start_req = { 0 };
+
+ ishtp_hbm_hdr(&hdr, sizeof(start_req));
+
+ /* host start message */
+ start_req.hbm_cmd = HOST_START_REQ_CMD;
+ start_req.host_version.major_version = HBM_MAJOR_VERSION;
+ start_req.host_version.minor_version = HBM_MINOR_VERSION;
+
+ /*
+ * (!) Response to HBM start may be so quick that this thread would get
+ * preempted BEFORE managing to set hbm_state = ISHTP_HBM_START.
+ * So set it at first, change back to ISHTP_HBM_IDLE upon failure
+ */
+ dev->hbm_state = ISHTP_HBM_START;
+ if (ishtp_write_message(dev, &hdr, &start_req)) {
+ dev_err(dev->devc, "version message send failed\n");
+ dev->dev_state = ISHTP_DEV_RESETTING;
+ dev->hbm_state = ISHTP_HBM_IDLE;
+ ish_hw_reset(dev);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/**
+ * ishtp_hbm_enum_clients_req() - Send client enum req
+ * @dev: ISHTP device instance
+ *
+ * Send enumeration client request message
+ *
+ * Return: 0 if success else error code
+ */
+void ishtp_hbm_enum_clients_req(struct ishtp_device *dev)
+{
+ struct ishtp_msg_hdr hdr;
+ struct hbm_host_enum_request enum_req = { 0 };
+
+ /* enumerate clients */
+ ishtp_hbm_hdr(&hdr, sizeof(enum_req));
+ enum_req.hbm_cmd = HOST_ENUM_REQ_CMD;
+
+ if (ishtp_write_message(dev, &hdr, &enum_req)) {
+ dev->dev_state = ISHTP_DEV_RESETTING;
+ dev_err(dev->devc, "enumeration request send failed\n");
+ ish_hw_reset(dev);
+ }
+ dev->hbm_state = ISHTP_HBM_ENUM_CLIENTS;
+}
+
+/**
+ * ishtp_hbm_prop_req() - Request property
+ * @dev: ISHTP device instance
+ *
+ * Request property for a single client
+ *
+ * Return: 0 if success else error code
+ */
+static int ishtp_hbm_prop_req(struct ishtp_device *dev)
+{
+ struct ishtp_msg_hdr hdr;
+ struct hbm_props_request prop_req = { 0 };
+ unsigned long next_client_index;
+ uint8_t client_num;
+
+ client_num = dev->fw_client_presentation_num;
+
+ next_client_index = find_next_bit(dev->fw_clients_map,
+ ISHTP_CLIENTS_MAX, dev->fw_client_index);
+
+ /* We got all client properties */
+ if (next_client_index == ISHTP_CLIENTS_MAX) {
+ dev->hbm_state = ISHTP_HBM_WORKING;
+ dev->dev_state = ISHTP_DEV_ENABLED;
+
+ for (dev->fw_client_presentation_num = 1;
+ dev->fw_client_presentation_num < client_num + 1;
+ ++dev->fw_client_presentation_num)
+ /* Add new client device */
+ ishtp_bus_new_client(dev);
+ return 0;
+ }
+
+ dev->fw_clients[client_num].client_id = next_client_index;
+
+ ishtp_hbm_hdr(&hdr, sizeof(prop_req));
+
+ prop_req.hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
+ prop_req.address = next_client_index;
+
+ if (ishtp_write_message(dev, &hdr, &prop_req)) {
+ dev->dev_state = ISHTP_DEV_RESETTING;
+ dev_err(dev->devc, "properties request send failed\n");
+ ish_hw_reset(dev);
+ return -EIO;
+ }
+
+ dev->fw_client_index = next_client_index;
+
+ return 0;
+}
+
+/**
+ * ishtp_hbm_stop_req() - Send HBM stop
+ * @dev: ISHTP device instance
+ *
+ * Send stop request message
+ */
+static void ishtp_hbm_stop_req(struct ishtp_device *dev)
+{
+ struct ishtp_msg_hdr hdr;
+ struct hbm_host_stop_request stop_req = { 0 } ;
+
+ ishtp_hbm_hdr(&hdr, sizeof(stop_req));
+
+ stop_req.hbm_cmd = HOST_STOP_REQ_CMD;
+ stop_req.reason = DRIVER_STOP_REQUEST;
+
+ ishtp_write_message(dev, &hdr, &stop_req);
+}
+
+/**
+ * ishtp_hbm_cl_flow_control_req() - Send flow control request
+ * @dev: ISHTP device instance
+ * @cl: ISHTP client instance
+ *
+ * Send flow control request
+ *
+ * Return: 0 if success else error code
+ */
+int ishtp_hbm_cl_flow_control_req(struct ishtp_device *dev,
+ struct ishtp_cl *cl)
+{
+ struct ishtp_msg_hdr hdr;
+ struct hbm_flow_control flow_ctrl;
+ const size_t len = sizeof(flow_ctrl);
+ int rv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cl->fc_spinlock, flags);
+
+ ishtp_hbm_hdr(&hdr, len);
+ ishtp_hbm_cl_hdr(cl, ISHTP_FLOW_CONTROL_CMD, &flow_ctrl, len);
+
+ /*
+ * Sync possible race when RB recycle and packet receive paths
+ * both try to send an out FC
+ */
+ if (cl->out_flow_ctrl_creds) {
+ spin_unlock_irqrestore(&cl->fc_spinlock, flags);
+ return 0;
+ }
+
+ cl->recv_msg_num_frags = 0;
+
+ rv = ishtp_write_message(dev, &hdr, &flow_ctrl);
+ if (!rv) {
+ ++cl->out_flow_ctrl_creds;
+ ++cl->out_flow_ctrl_cnt;
+ cl->ts_out_fc = ktime_get();
+ if (cl->ts_rx) {
+ ktime_t ts_diff = ktime_sub(cl->ts_out_fc, cl->ts_rx);
+ if (ktime_after(ts_diff, cl->ts_max_fc_delay))
+ cl->ts_max_fc_delay = ts_diff;
+ }
+ } else {
+ ++cl->err_send_fc;
+ }
+
+ spin_unlock_irqrestore(&cl->fc_spinlock, flags);
+ return rv;
+}
+
+/**
+ * ishtp_hbm_cl_disconnect_req() - Send disconnect request
+ * @dev: ISHTP device instance
+ * @cl: ISHTP client instance
+ *
+ * Send disconnect message to fw
+ *
+ * Return: 0 if success else error code
+ */
+int ishtp_hbm_cl_disconnect_req(struct ishtp_device *dev, struct ishtp_cl *cl)
+{
+ struct ishtp_msg_hdr hdr;
+ struct hbm_client_connect_request disconn_req;
+ const size_t len = sizeof(disconn_req);
+
+ ishtp_hbm_hdr(&hdr, len);
+ ishtp_hbm_cl_hdr(cl, CLIENT_DISCONNECT_REQ_CMD, &disconn_req, len);
+
+ return ishtp_write_message(dev, &hdr, &disconn_req);
+}
+
+/**
+ * ishtp_hbm_cl_disconnect_res() - Get disconnect response
+ * @dev: ISHTP device instance
+ * @rs: Response message
+ *
+ * Received disconnect response from fw
+ */
+static void ishtp_hbm_cl_disconnect_res(struct ishtp_device *dev,
+ struct hbm_client_connect_response *rs)
+{
+ struct ishtp_cl *cl = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->cl_list_lock, flags);
+ list_for_each_entry(cl, &dev->cl_list, link) {
+ if (!rs->status && ishtp_hbm_cl_addr_equal(cl, rs)) {
+ cl->state = ISHTP_CL_DISCONNECTED;
+ wake_up_interruptible(&cl->wait_ctrl_res);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dev->cl_list_lock, flags);
+}
+
+/**
+ * ishtp_hbm_cl_connect_req() - Send connect request
+ * @dev: ISHTP device instance
+ * @cl: client device instance
+ *
+ * Send connection request to specific fw client
+ *
+ * Return: 0 if success else error code
+ */
+int ishtp_hbm_cl_connect_req(struct ishtp_device *dev, struct ishtp_cl *cl)
+{
+ struct ishtp_msg_hdr hdr;
+ struct hbm_client_connect_request conn_req;
+ const size_t len = sizeof(conn_req);
+
+ ishtp_hbm_hdr(&hdr, len);
+ ishtp_hbm_cl_hdr(cl, CLIENT_CONNECT_REQ_CMD, &conn_req, len);
+
+ return ishtp_write_message(dev, &hdr, &conn_req);
+}
+
+/**
+ * ishtp_hbm_cl_connect_res() - Get connect response
+ * @dev: ISHTP device instance
+ * @rs: Response message
+ *
+ * Received connect response from fw
+ */
+static void ishtp_hbm_cl_connect_res(struct ishtp_device *dev,
+ struct hbm_client_connect_response *rs)
+{
+ struct ishtp_cl *cl = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->cl_list_lock, flags);
+ list_for_each_entry(cl, &dev->cl_list, link) {
+ if (ishtp_hbm_cl_addr_equal(cl, rs)) {
+ if (!rs->status) {
+ cl->state = ISHTP_CL_CONNECTED;
+ cl->status = 0;
+ } else {
+ cl->state = ISHTP_CL_DISCONNECTED;
+ cl->status = -ENODEV;
+ }
+ wake_up_interruptible(&cl->wait_ctrl_res);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dev->cl_list_lock, flags);
+}
+
+/**
+ * ishtp_client_disconnect_request() - Receive disconnect request
+ * @dev: ISHTP device instance
+ * @disconnect_req: disconnect request structure
+ *
+ * Disconnect request bus message from the fw. Send disconnect response.
+ */
+static void ishtp_hbm_fw_disconnect_req(struct ishtp_device *dev,
+ struct hbm_client_connect_request *disconnect_req)
+{
+ struct ishtp_cl *cl;
+ const size_t len = sizeof(struct hbm_client_connect_response);
+ unsigned long flags;
+ struct ishtp_msg_hdr hdr;
+ unsigned char data[4]; /* All HBM messages are 4 bytes */
+
+ spin_lock_irqsave(&dev->cl_list_lock, flags);
+ list_for_each_entry(cl, &dev->cl_list, link) {
+ if (ishtp_hbm_cl_addr_equal(cl, disconnect_req)) {
+ cl->state = ISHTP_CL_DISCONNECTED;
+
+ /* send disconnect response */
+ ishtp_hbm_hdr(&hdr, len);
+ ishtp_hbm_cl_hdr(cl, CLIENT_DISCONNECT_RES_CMD, data,
+ len);
+ ishtp_write_message(dev, &hdr, data);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dev->cl_list_lock, flags);
+}
+
+/**
+ * ishtp_hbm_dma_xfer_ack(() - Receive transfer ACK
+ * @dev: ISHTP device instance
+ * @dma_xfer: HBM transfer message
+ *
+ * Receive ack for ISHTP-over-DMA client message
+ */
+static void ishtp_hbm_dma_xfer_ack(struct ishtp_device *dev,
+ struct dma_xfer_hbm *dma_xfer)
+{
+ void *msg;
+ uint64_t offs;
+ struct ishtp_msg_hdr *ishtp_hdr =
+ (struct ishtp_msg_hdr *)&dev->ishtp_msg_hdr;
+ unsigned int msg_offs;
+ struct ishtp_cl *cl;
+
+ for (msg_offs = 0; msg_offs < ishtp_hdr->length;
+ msg_offs += sizeof(struct dma_xfer_hbm)) {
+ offs = dma_xfer->msg_addr - dev->ishtp_host_dma_tx_buf_phys;
+ if (offs > dev->ishtp_host_dma_tx_buf_size) {
+ dev_err(dev->devc, "Bad DMA Tx ack message address\n");
+ return;
+ }
+ if (dma_xfer->msg_length >
+ dev->ishtp_host_dma_tx_buf_size - offs) {
+ dev_err(dev->devc, "Bad DMA Tx ack message size\n");
+ return;
+ }
+
+ /* logical address of the acked mem */
+ msg = (unsigned char *)dev->ishtp_host_dma_tx_buf + offs;
+ ishtp_cl_release_dma_acked_mem(dev, msg, dma_xfer->msg_length);
+
+ list_for_each_entry(cl, &dev->cl_list, link) {
+ if (cl->fw_client_id == dma_xfer->fw_client_id &&
+ cl->host_client_id == dma_xfer->host_client_id)
+ /*
+ * in case that a single ack may be sent
+ * over several dma transfers, and the last msg
+ * addr was inside the acked memory, but not in
+ * its start
+ */
+ if (cl->last_dma_addr >=
+ (unsigned char *)msg &&
+ cl->last_dma_addr <
+ (unsigned char *)msg +
+ dma_xfer->msg_length) {
+ cl->last_dma_acked = 1;
+
+ if (!list_empty(&cl->tx_list.list) &&
+ cl->ishtp_flow_ctrl_creds) {
+ /*
+ * start sending the first msg
+ */
+ ishtp_cl_send_msg(dev, cl);
+ }
+ }
+ }
+ ++dma_xfer;
+ }
+}
+
+/**
+ * ishtp_hbm_dma_xfer() - Receive DMA transfer message
+ * @dev: ISHTP device instance
+ * @dma_xfer: HBM transfer message
+ *
+ * Receive ISHTP-over-DMA client message
+ */
+static void ishtp_hbm_dma_xfer(struct ishtp_device *dev,
+ struct dma_xfer_hbm *dma_xfer)
+{
+ void *msg;
+ uint64_t offs;
+ struct ishtp_msg_hdr hdr;
+ struct ishtp_msg_hdr *ishtp_hdr =
+ (struct ishtp_msg_hdr *) &dev->ishtp_msg_hdr;
+ struct dma_xfer_hbm *prm = dma_xfer;
+ unsigned int msg_offs;
+
+ for (msg_offs = 0; msg_offs < ishtp_hdr->length;
+ msg_offs += sizeof(struct dma_xfer_hbm)) {
+
+ offs = dma_xfer->msg_addr - dev->ishtp_host_dma_rx_buf_phys;
+ if (offs > dev->ishtp_host_dma_rx_buf_size) {
+ dev_err(dev->devc, "Bad DMA Rx message address\n");
+ return;
+ }
+ if (dma_xfer->msg_length >
+ dev->ishtp_host_dma_rx_buf_size - offs) {
+ dev_err(dev->devc, "Bad DMA Rx message size\n");
+ return;
+ }
+ msg = dev->ishtp_host_dma_rx_buf + offs;
+ recv_ishtp_cl_msg_dma(dev, msg, dma_xfer);
+ dma_xfer->hbm = DMA_XFER_ACK; /* Prepare for response */
+ ++dma_xfer;
+ }
+
+ /* Send DMA_XFER_ACK [...] */
+ ishtp_hbm_hdr(&hdr, ishtp_hdr->length);
+ ishtp_write_message(dev, &hdr, (unsigned char *)prm);
+}
+
+/**
+ * ishtp_hbm_dispatch() - HBM dispatch function
+ * @dev: ISHTP device instance
+ * @hdr: bus message
+ *
+ * Bottom half read routine after ISR to handle the read bus message cmd
+ * processing
+ */
+void ishtp_hbm_dispatch(struct ishtp_device *dev,
+ struct ishtp_bus_message *hdr)
+{
+ struct ishtp_bus_message *ishtp_msg;
+ struct ishtp_fw_client *fw_client;
+ struct hbm_host_version_response *version_res;
+ struct hbm_client_connect_response *connect_res;
+ struct hbm_client_connect_response *disconnect_res;
+ struct hbm_client_connect_request *disconnect_req;
+ struct hbm_props_response *props_res;
+ struct hbm_host_enum_response *enum_res;
+ struct ishtp_msg_hdr ishtp_hdr;
+ struct dma_alloc_notify dma_alloc_notify;
+ struct dma_xfer_hbm *dma_xfer;
+
+ ishtp_msg = hdr;
+
+ switch (ishtp_msg->hbm_cmd) {
+ case HOST_START_RES_CMD:
+ version_res = (struct hbm_host_version_response *)ishtp_msg;
+ if (!version_res->host_version_supported) {
+ dev->version = version_res->fw_max_version;
+
+ dev->hbm_state = ISHTP_HBM_STOPPED;
+ ishtp_hbm_stop_req(dev);
+ return;
+ }
+
+ dev->version.major_version = HBM_MAJOR_VERSION;
+ dev->version.minor_version = HBM_MINOR_VERSION;
+ if (dev->dev_state == ISHTP_DEV_INIT_CLIENTS &&
+ dev->hbm_state == ISHTP_HBM_START) {
+ dev->hbm_state = ISHTP_HBM_STARTED;
+ ishtp_hbm_enum_clients_req(dev);
+ } else {
+ dev_err(dev->devc,
+ "reset: wrong host start response\n");
+ /* BUG: why do we arrive here? */
+ ish_hw_reset(dev);
+ return;
+ }
+
+ wake_up_interruptible(&dev->wait_hbm_recvd_msg);
+ break;
+
+ case CLIENT_CONNECT_RES_CMD:
+ connect_res = (struct hbm_client_connect_response *)ishtp_msg;
+ ishtp_hbm_cl_connect_res(dev, connect_res);
+ break;
+
+ case CLIENT_DISCONNECT_RES_CMD:
+ disconnect_res =
+ (struct hbm_client_connect_response *)ishtp_msg;
+ ishtp_hbm_cl_disconnect_res(dev, disconnect_res);
+ break;
+
+ case HOST_CLIENT_PROPERTIES_RES_CMD:
+ props_res = (struct hbm_props_response *)ishtp_msg;
+ fw_client = &dev->fw_clients[dev->fw_client_presentation_num];
+
+ if (props_res->status || !dev->fw_clients) {
+ dev_err(dev->devc,
+ "reset: properties response hbm wrong status\n");
+ ish_hw_reset(dev);
+ return;
+ }
+
+ if (fw_client->client_id != props_res->address) {
+ dev_err(dev->devc,
+ "reset: host properties response address mismatch [%02X %02X]\n",
+ fw_client->client_id, props_res->address);
+ ish_hw_reset(dev);
+ return;
+ }
+
+ if (dev->dev_state != ISHTP_DEV_INIT_CLIENTS ||
+ dev->hbm_state != ISHTP_HBM_CLIENT_PROPERTIES) {
+ dev_err(dev->devc,
+ "reset: unexpected properties response\n");
+ ish_hw_reset(dev);
+ return;
+ }
+
+ fw_client->props = props_res->client_properties;
+ dev->fw_client_index++;
+ dev->fw_client_presentation_num++;
+
+ /* request property for the next client */
+ ishtp_hbm_prop_req(dev);
+
+ if (dev->dev_state != ISHTP_DEV_ENABLED)
+ break;
+
+ if (!ishtp_use_dma_transfer())
+ break;
+
+ dev_dbg(dev->devc, "Requesting to use DMA\n");
+ ishtp_cl_alloc_dma_buf(dev);
+ if (dev->ishtp_host_dma_rx_buf) {
+ const size_t len = sizeof(dma_alloc_notify);
+
+ memset(&dma_alloc_notify, 0, sizeof(dma_alloc_notify));
+ dma_alloc_notify.hbm = DMA_BUFFER_ALLOC_NOTIFY;
+ dma_alloc_notify.buf_size =
+ dev->ishtp_host_dma_rx_buf_size;
+ dma_alloc_notify.buf_address =
+ dev->ishtp_host_dma_rx_buf_phys;
+ ishtp_hbm_hdr(&ishtp_hdr, len);
+ ishtp_write_message(dev, &ishtp_hdr,
+ (unsigned char *)&dma_alloc_notify);
+ }
+
+ break;
+
+ case HOST_ENUM_RES_CMD:
+ enum_res = (struct hbm_host_enum_response *) ishtp_msg;
+ memcpy(dev->fw_clients_map, enum_res->valid_addresses, 32);
+ if (dev->dev_state == ISHTP_DEV_INIT_CLIENTS &&
+ dev->hbm_state == ISHTP_HBM_ENUM_CLIENTS) {
+ dev->fw_client_presentation_num = 0;
+ dev->fw_client_index = 0;
+
+ ishtp_hbm_fw_cl_allocate(dev);
+ dev->hbm_state = ISHTP_HBM_CLIENT_PROPERTIES;
+
+ /* first property request */
+ ishtp_hbm_prop_req(dev);
+ } else {
+ dev_err(dev->devc,
+ "reset: unexpected enumeration response hbm\n");
+ ish_hw_reset(dev);
+ return;
+ }
+ break;
+
+ case HOST_STOP_RES_CMD:
+ if (dev->hbm_state != ISHTP_HBM_STOPPED)
+ dev_err(dev->devc, "unexpected stop response\n");
+
+ dev->dev_state = ISHTP_DEV_DISABLED;
+ dev_info(dev->devc, "reset: FW stop response\n");
+ ish_hw_reset(dev);
+ break;
+
+ case CLIENT_DISCONNECT_REQ_CMD:
+ /* search for client */
+ disconnect_req =
+ (struct hbm_client_connect_request *)ishtp_msg;
+ ishtp_hbm_fw_disconnect_req(dev, disconnect_req);
+ break;
+
+ case FW_STOP_REQ_CMD:
+ dev->hbm_state = ISHTP_HBM_STOPPED;
+ break;
+
+ case DMA_BUFFER_ALLOC_RESPONSE:
+ dev->ishtp_host_dma_enabled = 1;
+ break;
+
+ case DMA_XFER:
+ dma_xfer = (struct dma_xfer_hbm *)ishtp_msg;
+ if (!dev->ishtp_host_dma_enabled) {
+ dev_err(dev->devc,
+ "DMA XFER requested but DMA is not enabled\n");
+ break;
+ }
+ ishtp_hbm_dma_xfer(dev, dma_xfer);
+ break;
+
+ case DMA_XFER_ACK:
+ dma_xfer = (struct dma_xfer_hbm *)ishtp_msg;
+ if (!dev->ishtp_host_dma_enabled ||
+ !dev->ishtp_host_dma_tx_buf) {
+ dev_err(dev->devc,
+ "DMA XFER acked but DMA Tx is not enabled\n");
+ break;
+ }
+ ishtp_hbm_dma_xfer_ack(dev, dma_xfer);
+ break;
+
+ default:
+ dev_err(dev->devc, "unknown HBM: %u\n",
+ (unsigned int)ishtp_msg->hbm_cmd);
+
+ break;
+ }
+}
+
+/**
+ * bh_hbm_work_fn() - HBM work function
+ * @work: work struct
+ *
+ * Bottom half processing work function (instead of thread handler)
+ * for processing hbm messages
+ */
+void bh_hbm_work_fn(struct work_struct *work)
+{
+ unsigned long flags;
+ struct ishtp_device *dev;
+ unsigned char hbm[IPC_PAYLOAD_SIZE];
+
+ dev = container_of(work, struct ishtp_device, bh_hbm_work);
+ spin_lock_irqsave(&dev->rd_msg_spinlock, flags);
+ if (dev->rd_msg_fifo_head != dev->rd_msg_fifo_tail) {
+ memcpy(hbm, dev->rd_msg_fifo + dev->rd_msg_fifo_head,
+ IPC_PAYLOAD_SIZE);
+ dev->rd_msg_fifo_head =
+ (dev->rd_msg_fifo_head + IPC_PAYLOAD_SIZE) %
+ (RD_INT_FIFO_SIZE * IPC_PAYLOAD_SIZE);
+ spin_unlock_irqrestore(&dev->rd_msg_spinlock, flags);
+ ishtp_hbm_dispatch(dev, (struct ishtp_bus_message *)hbm);
+ } else {
+ spin_unlock_irqrestore(&dev->rd_msg_spinlock, flags);
+ }
+}
+
+/**
+ * recv_hbm() - Receive HBM message
+ * @dev: ISHTP device instance
+ * @ishtp_hdr: received bus message
+ *
+ * Receive and process ISHTP bus messages in ISR context. This will schedule
+ * work function to process message
+ */
+void recv_hbm(struct ishtp_device *dev, struct ishtp_msg_hdr *ishtp_hdr)
+{
+ uint8_t rd_msg_buf[ISHTP_RD_MSG_BUF_SIZE];
+ struct ishtp_bus_message *ishtp_msg =
+ (struct ishtp_bus_message *)rd_msg_buf;
+ unsigned long flags;
+
+ dev->ops->ishtp_read(dev, rd_msg_buf, ishtp_hdr->length);
+
+ /* Flow control - handle in place */
+ if (ishtp_msg->hbm_cmd == ISHTP_FLOW_CONTROL_CMD) {
+ struct hbm_flow_control *flow_control =
+ (struct hbm_flow_control *)ishtp_msg;
+ struct ishtp_cl *cl = NULL;
+ unsigned long flags, tx_flags;
+
+ spin_lock_irqsave(&dev->cl_list_lock, flags);
+ list_for_each_entry(cl, &dev->cl_list, link) {
+ if (cl->host_client_id == flow_control->host_addr &&
+ cl->fw_client_id ==
+ flow_control->fw_addr) {
+ /*
+ * NOTE: It's valid only for counting
+ * flow-control implementation to receive a
+ * FC in the middle of sending. Meanwhile not
+ * supported
+ */
+ if (cl->ishtp_flow_ctrl_creds)
+ dev_err(dev->devc,
+ "recv extra FC from FW client %u (host client %u) (FC count was %d)\n",
+ (unsigned int)cl->fw_client_id,
+ (unsigned int)cl->host_client_id,
+ cl->ishtp_flow_ctrl_creds);
+ else {
+ ++cl->ishtp_flow_ctrl_creds;
+ ++cl->ishtp_flow_ctrl_cnt;
+ cl->last_ipc_acked = 1;
+ spin_lock_irqsave(
+ &cl->tx_list_spinlock,
+ tx_flags);
+ if (!list_empty(&cl->tx_list.list)) {
+ /*
+ * start sending the first msg
+ * = the callback function
+ */
+ spin_unlock_irqrestore(
+ &cl->tx_list_spinlock,
+ tx_flags);
+ ishtp_cl_send_msg(dev, cl);
+ } else {
+ spin_unlock_irqrestore(
+ &cl->tx_list_spinlock,
+ tx_flags);
+ }
+ }
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dev->cl_list_lock, flags);
+ goto eoi;
+ }
+
+ /*
+ * Some messages that are safe for ISR processing and important
+ * to be done "quickly" and in-order, go here
+ */
+ if (ishtp_msg->hbm_cmd == CLIENT_CONNECT_RES_CMD ||
+ ishtp_msg->hbm_cmd == CLIENT_DISCONNECT_RES_CMD ||
+ ishtp_msg->hbm_cmd == CLIENT_DISCONNECT_REQ_CMD ||
+ ishtp_msg->hbm_cmd == DMA_XFER) {
+ ishtp_hbm_dispatch(dev, ishtp_msg);
+ goto eoi;
+ }
+
+ /*
+ * All other HBMs go here.
+ * We schedule HBMs for processing serially by using system wq,
+ * possibly there will be multiple HBMs scheduled at the same time.
+ */
+ spin_lock_irqsave(&dev->rd_msg_spinlock, flags);
+ if ((dev->rd_msg_fifo_tail + IPC_PAYLOAD_SIZE) %
+ (RD_INT_FIFO_SIZE * IPC_PAYLOAD_SIZE) ==
+ dev->rd_msg_fifo_head) {
+ spin_unlock_irqrestore(&dev->rd_msg_spinlock, flags);
+ dev_err(dev->devc, "BH buffer overflow, dropping HBM %u\n",
+ (unsigned int)ishtp_msg->hbm_cmd);
+ goto eoi;
+ }
+ memcpy(dev->rd_msg_fifo + dev->rd_msg_fifo_tail, ishtp_msg,
+ ishtp_hdr->length);
+ dev->rd_msg_fifo_tail = (dev->rd_msg_fifo_tail + IPC_PAYLOAD_SIZE) %
+ (RD_INT_FIFO_SIZE * IPC_PAYLOAD_SIZE);
+ spin_unlock_irqrestore(&dev->rd_msg_spinlock, flags);
+ schedule_work(&dev->bh_hbm_work);
+eoi:
+ return;
+}
+
+/**
+ * recv_fixed_cl_msg() - Receive fixed client message
+ * @dev: ISHTP device instance
+ * @ishtp_hdr: received bus message
+ *
+ * Receive and process ISHTP fixed client messages (address == 0)
+ * in ISR context
+ */
+void recv_fixed_cl_msg(struct ishtp_device *dev,
+ struct ishtp_msg_hdr *ishtp_hdr)
+{
+ uint8_t rd_msg_buf[ISHTP_RD_MSG_BUF_SIZE];
+
+ dev->print_log(dev,
+ "%s() got fixed client msg from client #%d\n",
+ __func__, ishtp_hdr->fw_addr);
+ dev->ops->ishtp_read(dev, rd_msg_buf, ishtp_hdr->length);
+ if (ishtp_hdr->fw_addr == ISHTP_SYSTEM_STATE_CLIENT_ADDR) {
+ struct ish_system_states_header *msg_hdr =
+ (struct ish_system_states_header *)rd_msg_buf;
+ if (msg_hdr->cmd == SYSTEM_STATE_SUBSCRIBE)
+ ishtp_send_resume(dev);
+ /* if FW request arrived here, the system is not suspended */
+ else
+ dev_err(dev->devc, "unknown fixed client msg [%02X]\n",
+ msg_hdr->cmd);
+ }
+}
+
+/**
+ * fix_cl_hdr() - Initialize fixed client header
+ * @hdr: message header
+ * @length: length of message
+ * @cl_addr: Client address
+ *
+ * Initialize message header for fixed client
+ */
+static inline void fix_cl_hdr(struct ishtp_msg_hdr *hdr, size_t length,
+ uint8_t cl_addr)
+{
+ hdr->host_addr = 0;
+ hdr->fw_addr = cl_addr;
+ hdr->length = length;
+ hdr->msg_complete = 1;
+ hdr->reserved = 0;
+}
+
+/*** Suspend and resume notification ***/
+
+static uint32_t current_state;
+static uint32_t supported_states = 0 | SUSPEND_STATE_BIT;
+
+/**
+ * ishtp_send_suspend() - Send suspend message to FW
+ * @dev: ISHTP device instance
+ *
+ * Send suspend message to FW. This is useful for system freeze (non S3) case
+ */
+void ishtp_send_suspend(struct ishtp_device *dev)
+{
+ struct ishtp_msg_hdr ishtp_hdr;
+ struct ish_system_states_status state_status_msg;
+ const size_t len = sizeof(struct ish_system_states_status);
+
+ fix_cl_hdr(&ishtp_hdr, len, ISHTP_SYSTEM_STATE_CLIENT_ADDR);
+
+ memset(&state_status_msg, 0, len);
+ state_status_msg.hdr.cmd = SYSTEM_STATE_STATUS;
+ state_status_msg.supported_states = supported_states;
+ current_state |= SUSPEND_STATE_BIT;
+ dev->print_log(dev, "%s() sends SUSPEND notification\n", __func__);
+ state_status_msg.states_status = current_state;
+
+ ishtp_write_message(dev, &ishtp_hdr,
+ (unsigned char *)&state_status_msg);
+}
+EXPORT_SYMBOL(ishtp_send_suspend);
+
+/**
+ * ishtp_send_resume() - Send resume message to FW
+ * @dev: ISHTP device instance
+ *
+ * Send resume message to FW. This is useful for system freeze (non S3) case
+ */
+void ishtp_send_resume(struct ishtp_device *dev)
+{
+ struct ishtp_msg_hdr ishtp_hdr;
+ struct ish_system_states_status state_status_msg;
+ const size_t len = sizeof(struct ish_system_states_status);
+
+ fix_cl_hdr(&ishtp_hdr, len, ISHTP_SYSTEM_STATE_CLIENT_ADDR);
+
+ memset(&state_status_msg, 0, len);
+ state_status_msg.hdr.cmd = SYSTEM_STATE_STATUS;
+ state_status_msg.supported_states = supported_states;
+ current_state &= ~SUSPEND_STATE_BIT;
+ dev->print_log(dev, "%s() sends RESUME notification\n", __func__);
+ state_status_msg.states_status = current_state;
+
+ ishtp_write_message(dev, &ishtp_hdr,
+ (unsigned char *)&state_status_msg);
+}
+EXPORT_SYMBOL(ishtp_send_resume);
+
+/**
+ * ishtp_query_subscribers() - Send query subscribers message
+ * @dev: ISHTP device instance
+ *
+ * Send message to query subscribers
+ */
+void ishtp_query_subscribers(struct ishtp_device *dev)
+{
+ struct ishtp_msg_hdr ishtp_hdr;
+ struct ish_system_states_query_subscribers query_subscribers_msg;
+ const size_t len = sizeof(struct ish_system_states_query_subscribers);
+
+ fix_cl_hdr(&ishtp_hdr, len, ISHTP_SYSTEM_STATE_CLIENT_ADDR);
+
+ memset(&query_subscribers_msg, 0, len);
+ query_subscribers_msg.hdr.cmd = SYSTEM_STATE_QUERY_SUBSCRIBERS;
+
+ ishtp_write_message(dev, &ishtp_hdr,
+ (unsigned char *)&query_subscribers_msg);
+}
diff --git a/drivers/hid/intel-ish-hid/ishtp/hbm.h b/drivers/hid/intel-ish-hid/ishtp/hbm.h
new file mode 100644
index 000000000..7c445b203
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/hbm.h
@@ -0,0 +1,313 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * ISHTP bus layer messages handling
+ *
+ * Copyright (c) 2003-2016, Intel Corporation.
+ */
+
+#ifndef _ISHTP_HBM_H_
+#define _ISHTP_HBM_H_
+
+#include <linux/uuid.h>
+
+struct ishtp_device;
+struct ishtp_msg_hdr;
+struct ishtp_cl;
+
+/*
+ * Timeouts in Seconds
+ */
+#define ISHTP_INTEROP_TIMEOUT 7 /* Timeout on ready message */
+
+#define ISHTP_CL_CONNECT_TIMEOUT 15 /* HPS: Client Connect Timeout */
+
+/*
+ * ISHTP Version
+ */
+#define HBM_MINOR_VERSION 0
+#define HBM_MAJOR_VERSION 1
+
+/* Host bus message command opcode */
+#define ISHTP_HBM_CMD_OP_MSK 0x7f
+/* Host bus message command RESPONSE */
+#define ISHTP_HBM_CMD_RES_MSK 0x80
+
+/*
+ * ISHTP Bus Message Command IDs
+ */
+#define HOST_START_REQ_CMD 0x01
+#define HOST_START_RES_CMD 0x81
+
+#define HOST_STOP_REQ_CMD 0x02
+#define HOST_STOP_RES_CMD 0x82
+
+#define FW_STOP_REQ_CMD 0x03
+
+#define HOST_ENUM_REQ_CMD 0x04
+#define HOST_ENUM_RES_CMD 0x84
+
+#define HOST_CLIENT_PROPERTIES_REQ_CMD 0x05
+#define HOST_CLIENT_PROPERTIES_RES_CMD 0x85
+
+#define CLIENT_CONNECT_REQ_CMD 0x06
+#define CLIENT_CONNECT_RES_CMD 0x86
+
+#define CLIENT_DISCONNECT_REQ_CMD 0x07
+#define CLIENT_DISCONNECT_RES_CMD 0x87
+
+#define ISHTP_FLOW_CONTROL_CMD 0x08
+
+#define DMA_BUFFER_ALLOC_NOTIFY 0x11
+#define DMA_BUFFER_ALLOC_RESPONSE 0x91
+
+#define DMA_XFER 0x12
+#define DMA_XFER_ACK 0x92
+
+/*
+ * ISHTP Stop Reason
+ * used by hbm_host_stop_request.reason
+ */
+#define DRIVER_STOP_REQUEST 0x00
+
+/*
+ * ISHTP BUS Interface Section
+ */
+struct ishtp_msg_hdr {
+ uint32_t fw_addr:8;
+ uint32_t host_addr:8;
+ uint32_t length:9;
+ uint32_t reserved:6;
+ uint32_t msg_complete:1;
+} __packed;
+
+struct ishtp_bus_message {
+ uint8_t hbm_cmd;
+ uint8_t data[];
+} __packed;
+
+/**
+ * struct hbm_cl_cmd - client specific host bus command
+ * CONNECT, DISCONNECT, and FlOW CONTROL
+ *
+ * @hbm_cmd - bus message command header
+ * @fw_addr - address of the fw client
+ * @host_addr - address of the client in the driver
+ * @data
+ */
+struct ishtp_hbm_cl_cmd {
+ uint8_t hbm_cmd;
+ uint8_t fw_addr;
+ uint8_t host_addr;
+ uint8_t data;
+};
+
+struct hbm_version {
+ uint8_t minor_version;
+ uint8_t major_version;
+} __packed;
+
+struct hbm_host_version_request {
+ uint8_t hbm_cmd;
+ uint8_t reserved;
+ struct hbm_version host_version;
+} __packed;
+
+struct hbm_host_version_response {
+ uint8_t hbm_cmd;
+ uint8_t host_version_supported;
+ struct hbm_version fw_max_version;
+} __packed;
+
+struct hbm_host_stop_request {
+ uint8_t hbm_cmd;
+ uint8_t reason;
+ uint8_t reserved[2];
+} __packed;
+
+struct hbm_host_stop_response {
+ uint8_t hbm_cmd;
+ uint8_t reserved[3];
+} __packed;
+
+struct hbm_host_enum_request {
+ uint8_t hbm_cmd;
+ uint8_t reserved[3];
+} __packed;
+
+struct hbm_host_enum_response {
+ uint8_t hbm_cmd;
+ uint8_t reserved[3];
+ uint8_t valid_addresses[32];
+} __packed;
+
+struct ishtp_client_properties {
+ guid_t protocol_name;
+ uint8_t protocol_version;
+ uint8_t max_number_of_connections;
+ uint8_t fixed_address;
+ uint8_t single_recv_buf;
+ uint32_t max_msg_length;
+ uint8_t dma_hdr_len;
+#define ISHTP_CLIENT_DMA_ENABLED 0x80
+ uint8_t reserved4;
+ uint8_t reserved5;
+ uint8_t reserved6;
+} __packed;
+
+struct hbm_props_request {
+ uint8_t hbm_cmd;
+ uint8_t address;
+ uint8_t reserved[2];
+} __packed;
+
+struct hbm_props_response {
+ uint8_t hbm_cmd;
+ uint8_t address;
+ uint8_t status;
+ uint8_t reserved[1];
+ struct ishtp_client_properties client_properties;
+} __packed;
+
+/**
+ * struct hbm_client_connect_request - connect/disconnect request
+ *
+ * @hbm_cmd - bus message command header
+ * @fw_addr - address of the fw client
+ * @host_addr - address of the client in the driver
+ * @reserved
+ */
+struct hbm_client_connect_request {
+ uint8_t hbm_cmd;
+ uint8_t fw_addr;
+ uint8_t host_addr;
+ uint8_t reserved;
+} __packed;
+
+/**
+ * struct hbm_client_connect_response - connect/disconnect response
+ *
+ * @hbm_cmd - bus message command header
+ * @fw_addr - address of the fw client
+ * @host_addr - address of the client in the driver
+ * @status - status of the request
+ */
+struct hbm_client_connect_response {
+ uint8_t hbm_cmd;
+ uint8_t fw_addr;
+ uint8_t host_addr;
+ uint8_t status;
+} __packed;
+
+
+#define ISHTP_FC_MESSAGE_RESERVED_LENGTH 5
+
+struct hbm_flow_control {
+ uint8_t hbm_cmd;
+ uint8_t fw_addr;
+ uint8_t host_addr;
+ uint8_t reserved[ISHTP_FC_MESSAGE_RESERVED_LENGTH];
+} __packed;
+
+struct dma_alloc_notify {
+ uint8_t hbm;
+ uint8_t status;
+ uint8_t reserved[2];
+ uint32_t buf_size;
+ uint64_t buf_address;
+ /* [...] May come more size/address pairs */
+} __packed;
+
+struct dma_xfer_hbm {
+ uint8_t hbm;
+ uint8_t fw_client_id;
+ uint8_t host_client_id;
+ uint8_t reserved;
+ uint64_t msg_addr;
+ uint32_t msg_length;
+ uint32_t reserved2;
+} __packed;
+
+/* System state */
+#define ISHTP_SYSTEM_STATE_CLIENT_ADDR 13
+
+#define SYSTEM_STATE_SUBSCRIBE 0x1
+#define SYSTEM_STATE_STATUS 0x2
+#define SYSTEM_STATE_QUERY_SUBSCRIBERS 0x3
+#define SYSTEM_STATE_STATE_CHANGE_REQ 0x4
+/*indicates suspend and resume states*/
+#define SUSPEND_STATE_BIT (1<<1)
+
+struct ish_system_states_header {
+ uint32_t cmd;
+ uint32_t cmd_status; /*responses will have this set*/
+} __packed;
+
+struct ish_system_states_subscribe {
+ struct ish_system_states_header hdr;
+ uint32_t states;
+} __packed;
+
+struct ish_system_states_status {
+ struct ish_system_states_header hdr;
+ uint32_t supported_states;
+ uint32_t states_status;
+} __packed;
+
+struct ish_system_states_query_subscribers {
+ struct ish_system_states_header hdr;
+} __packed;
+
+struct ish_system_states_state_change_req {
+ struct ish_system_states_header hdr;
+ uint32_t requested_states;
+ uint32_t states_status;
+} __packed;
+
+/**
+ * enum ishtp_hbm_state - host bus message protocol state
+ *
+ * @ISHTP_HBM_IDLE : protocol not started
+ * @ISHTP_HBM_START : start request message was sent
+ * @ISHTP_HBM_ENUM_CLIENTS : enumeration request was sent
+ * @ISHTP_HBM_CLIENT_PROPERTIES : acquiring clients properties
+ */
+enum ishtp_hbm_state {
+ ISHTP_HBM_IDLE = 0,
+ ISHTP_HBM_START,
+ ISHTP_HBM_STARTED,
+ ISHTP_HBM_ENUM_CLIENTS,
+ ISHTP_HBM_CLIENT_PROPERTIES,
+ ISHTP_HBM_WORKING,
+ ISHTP_HBM_STOPPED,
+};
+
+static inline void ishtp_hbm_hdr(struct ishtp_msg_hdr *hdr, size_t length)
+{
+ hdr->host_addr = 0;
+ hdr->fw_addr = 0;
+ hdr->length = length;
+ hdr->msg_complete = 1;
+ hdr->reserved = 0;
+}
+
+int ishtp_hbm_start_req(struct ishtp_device *dev);
+int ishtp_hbm_start_wait(struct ishtp_device *dev);
+int ishtp_hbm_cl_flow_control_req(struct ishtp_device *dev,
+ struct ishtp_cl *cl);
+int ishtp_hbm_cl_disconnect_req(struct ishtp_device *dev, struct ishtp_cl *cl);
+int ishtp_hbm_cl_connect_req(struct ishtp_device *dev, struct ishtp_cl *cl);
+void ishtp_hbm_enum_clients_req(struct ishtp_device *dev);
+void bh_hbm_work_fn(struct work_struct *work);
+void recv_hbm(struct ishtp_device *dev, struct ishtp_msg_hdr *ishtp_hdr);
+void recv_fixed_cl_msg(struct ishtp_device *dev,
+ struct ishtp_msg_hdr *ishtp_hdr);
+void ishtp_hbm_dispatch(struct ishtp_device *dev,
+ struct ishtp_bus_message *hdr);
+
+void ishtp_query_subscribers(struct ishtp_device *dev);
+
+/* Exported I/F */
+void ishtp_send_suspend(struct ishtp_device *dev);
+void ishtp_send_resume(struct ishtp_device *dev);
+
+#endif /* _ISHTP_HBM_H_ */
diff --git a/drivers/hid/intel-ish-hid/ishtp/init.c b/drivers/hid/intel-ish-hid/ishtp/init.c
new file mode 100644
index 000000000..02a00cc2d
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/init.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Initialization protocol for ISHTP driver
+ *
+ * Copyright (c) 2003-2016, Intel Corporation.
+ */
+
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include "ishtp-dev.h"
+#include "hbm.h"
+#include "client.h"
+
+/**
+ * ishtp_dev_state_str() -Convert to string format
+ * @state: state to convert
+ *
+ * Convert state to string for prints
+ *
+ * Return: character pointer to converted string
+ */
+const char *ishtp_dev_state_str(int state)
+{
+ switch (state) {
+ case ISHTP_DEV_INITIALIZING:
+ return "INITIALIZING";
+ case ISHTP_DEV_INIT_CLIENTS:
+ return "INIT_CLIENTS";
+ case ISHTP_DEV_ENABLED:
+ return "ENABLED";
+ case ISHTP_DEV_RESETTING:
+ return "RESETTING";
+ case ISHTP_DEV_DISABLED:
+ return "DISABLED";
+ case ISHTP_DEV_POWER_DOWN:
+ return "POWER_DOWN";
+ case ISHTP_DEV_POWER_UP:
+ return "POWER_UP";
+ default:
+ return "unknown";
+ }
+}
+
+/**
+ * ishtp_device_init() - ishtp device init
+ * @dev: ISHTP device instance
+ *
+ * After ISHTP device is alloacted, this function is used to initialize
+ * each field which includes spin lock, work struct and lists
+ */
+void ishtp_device_init(struct ishtp_device *dev)
+{
+ dev->dev_state = ISHTP_DEV_INITIALIZING;
+ INIT_LIST_HEAD(&dev->cl_list);
+ INIT_LIST_HEAD(&dev->device_list);
+ dev->rd_msg_fifo_head = 0;
+ dev->rd_msg_fifo_tail = 0;
+ spin_lock_init(&dev->rd_msg_spinlock);
+
+ init_waitqueue_head(&dev->wait_hbm_recvd_msg);
+ spin_lock_init(&dev->read_list_spinlock);
+ spin_lock_init(&dev->device_lock);
+ spin_lock_init(&dev->device_list_lock);
+ spin_lock_init(&dev->cl_list_lock);
+ spin_lock_init(&dev->fw_clients_lock);
+ INIT_WORK(&dev->bh_hbm_work, bh_hbm_work_fn);
+
+ bitmap_zero(dev->host_clients_map, ISHTP_CLIENTS_MAX);
+ dev->open_handle_count = 0;
+
+ /*
+ * Reserving client ID 0 for ISHTP Bus Message communications
+ */
+ bitmap_set(dev->host_clients_map, 0, 1);
+
+ INIT_LIST_HEAD(&dev->read_list.list);
+
+}
+EXPORT_SYMBOL(ishtp_device_init);
+
+/**
+ * ishtp_start() - Start ISH processing
+ * @dev: ISHTP device instance
+ *
+ * Start ISHTP processing by sending query subscriber message
+ *
+ * Return: 0 on success else -ENODEV
+ */
+int ishtp_start(struct ishtp_device *dev)
+{
+ if (ishtp_hbm_start_wait(dev)) {
+ dev_err(dev->devc, "HBM haven't started");
+ goto err;
+ }
+
+ /* suspend & resume notification - send QUERY_SUBSCRIBERS msg */
+ ishtp_query_subscribers(dev);
+
+ return 0;
+err:
+ dev_err(dev->devc, "link layer initialization failed.\n");
+ dev->dev_state = ISHTP_DEV_DISABLED;
+ return -ENODEV;
+}
+EXPORT_SYMBOL(ishtp_start);
diff --git a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
new file mode 100644
index 000000000..1cc6364aa
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
@@ -0,0 +1,237 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Most ISHTP provider device and ISHTP logic declarations
+ *
+ * Copyright (c) 2003-2016, Intel Corporation.
+ */
+
+#ifndef _ISHTP_DEV_H_
+#define _ISHTP_DEV_H_
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include "bus.h"
+#include "hbm.h"
+
+#define IPC_PAYLOAD_SIZE 128
+#define ISHTP_RD_MSG_BUF_SIZE IPC_PAYLOAD_SIZE
+#define IPC_FULL_MSG_SIZE 132
+
+/* Number of messages to be held in ISR->BH FIFO */
+#define RD_INT_FIFO_SIZE 64
+
+/*
+ * Number of IPC messages to be held in Tx FIFO, to be sent by ISR -
+ * Tx complete interrupt or RX_COMPLETE handler
+ */
+#define IPC_TX_FIFO_SIZE 512
+
+/*
+ * Number of Maximum ISHTP Clients
+ */
+#define ISHTP_CLIENTS_MAX 256
+
+/*
+ * Number of File descriptors/handles
+ * that can be opened to the driver.
+ *
+ * Limit to 255: 256 Total Clients
+ * minus internal client for ISHTP Bus Messages
+ */
+#define ISHTP_MAX_OPEN_HANDLE_COUNT (ISHTP_CLIENTS_MAX - 1)
+
+/* Internal Clients Number */
+#define ISHTP_HOST_CLIENT_ID_ANY (-1)
+#define ISHTP_HBM_HOST_CLIENT_ID 0
+
+#define MAX_DMA_DELAY 20
+
+/* ISHTP device states */
+enum ishtp_dev_state {
+ ISHTP_DEV_INITIALIZING = 0,
+ ISHTP_DEV_INIT_CLIENTS,
+ ISHTP_DEV_ENABLED,
+ ISHTP_DEV_RESETTING,
+ ISHTP_DEV_DISABLED,
+ ISHTP_DEV_POWER_DOWN,
+ ISHTP_DEV_POWER_UP
+};
+const char *ishtp_dev_state_str(int state);
+
+struct ishtp_cl;
+
+/**
+ * struct ishtp_fw_client - representation of fw client
+ *
+ * @props - client properties
+ * @client_id - fw client id
+ */
+struct ishtp_fw_client {
+ struct ishtp_client_properties props;
+ uint8_t client_id;
+};
+
+/*
+ * Control info for IPC messages ISHTP/IPC sending FIFO -
+ * list with inline data buffer
+ * This structure will be filled with parameters submitted
+ * by the caller glue layer
+ * 'buf' may be pointing to the external buffer or to 'inline_data'
+ * 'offset' will be initialized to 0 by submitting
+ *
+ * 'ipc_send_compl' is intended for use by clients that send fragmented
+ * messages. When a fragment is sent down to IPC msg regs,
+ * it will be called.
+ * If it has more fragments to send, it will do it. With last fragment
+ * it will send appropriate ISHTP "message-complete" flag.
+ * It will remove the outstanding message
+ * (mark outstanding buffer as available).
+ * If counting flow control is in work and there are more flow control
+ * credits, it can put the next client message queued in cl.
+ * structure for IPC processing.
+ *
+ */
+struct wr_msg_ctl_info {
+ /* Will be called with 'ipc_send_compl_prm' as parameter */
+ void (*ipc_send_compl)(void *);
+
+ void *ipc_send_compl_prm;
+ size_t length;
+ struct list_head link;
+ unsigned char inline_data[IPC_FULL_MSG_SIZE];
+};
+
+/*
+ * The ISHTP layer talks to hardware IPC message using the following
+ * callbacks
+ */
+struct ishtp_hw_ops {
+ int (*hw_reset)(struct ishtp_device *dev);
+ int (*ipc_reset)(struct ishtp_device *dev);
+ uint32_t (*ipc_get_header)(struct ishtp_device *dev, int length,
+ int busy);
+ int (*write)(struct ishtp_device *dev,
+ void (*ipc_send_compl)(void *), void *ipc_send_compl_prm,
+ unsigned char *msg, int length);
+ uint32_t (*ishtp_read_hdr)(const struct ishtp_device *dev);
+ int (*ishtp_read)(struct ishtp_device *dev, unsigned char *buffer,
+ unsigned long buffer_length);
+ uint32_t (*get_fw_status)(struct ishtp_device *dev);
+ void (*sync_fw_clock)(struct ishtp_device *dev);
+};
+
+/**
+ * struct ishtp_device - ISHTP private device struct
+ */
+struct ishtp_device {
+ struct device *devc; /* pointer to lowest device */
+ struct pci_dev *pdev; /* PCI device to get device ids */
+
+ /* waitq for waiting for suspend response */
+ wait_queue_head_t suspend_wait;
+ bool suspend_flag; /* Suspend is active */
+
+ /* waitq for waiting for resume response */
+ wait_queue_head_t resume_wait;
+ bool resume_flag; /*Resume is active */
+
+ /*
+ * lock for the device, for everything that doesn't have
+ * a dedicated spinlock
+ */
+ spinlock_t device_lock;
+
+ bool recvd_hw_ready;
+ struct hbm_version version;
+ int transfer_path; /* Choice of transfer path: IPC or DMA */
+
+ /* ishtp device states */
+ enum ishtp_dev_state dev_state;
+ enum ishtp_hbm_state hbm_state;
+
+ /* driver read queue */
+ struct ishtp_cl_rb read_list;
+ spinlock_t read_list_spinlock;
+
+ /* list of ishtp_cl's */
+ struct list_head cl_list;
+ spinlock_t cl_list_lock;
+ long open_handle_count;
+
+ /* List of bus devices */
+ struct list_head device_list;
+ spinlock_t device_list_lock;
+
+ /* waiting queues for receive message from FW */
+ wait_queue_head_t wait_hw_ready;
+ wait_queue_head_t wait_hbm_recvd_msg;
+
+ /* FIFO for input messages for BH processing */
+ unsigned char rd_msg_fifo[RD_INT_FIFO_SIZE * IPC_PAYLOAD_SIZE];
+ unsigned int rd_msg_fifo_head, rd_msg_fifo_tail;
+ spinlock_t rd_msg_spinlock;
+ struct work_struct bh_hbm_work;
+
+ /* IPC write queue */
+ struct list_head wr_processing_list, wr_free_list;
+ /* For both processing list and free list */
+ spinlock_t wr_processing_spinlock;
+
+ struct ishtp_fw_client *fw_clients; /*Note:memory has to be allocated*/
+ DECLARE_BITMAP(fw_clients_map, ISHTP_CLIENTS_MAX);
+ DECLARE_BITMAP(host_clients_map, ISHTP_CLIENTS_MAX);
+ uint8_t fw_clients_num;
+ uint8_t fw_client_presentation_num;
+ uint8_t fw_client_index;
+ spinlock_t fw_clients_lock;
+
+ /* TX DMA buffers and slots */
+ int ishtp_host_dma_enabled;
+ void *ishtp_host_dma_tx_buf;
+ unsigned int ishtp_host_dma_tx_buf_size;
+ uint64_t ishtp_host_dma_tx_buf_phys;
+ int ishtp_dma_num_slots;
+
+ /* map of 4k blocks in Tx dma buf: 0-free, 1-used */
+ uint8_t *ishtp_dma_tx_map;
+ spinlock_t ishtp_dma_tx_lock;
+
+ /* RX DMA buffers and slots */
+ void *ishtp_host_dma_rx_buf;
+ unsigned int ishtp_host_dma_rx_buf_size;
+ uint64_t ishtp_host_dma_rx_buf_phys;
+
+ /* Dump to trace buffers if enabled*/
+ __printf(2, 3) void (*print_log)(struct ishtp_device *dev,
+ const char *format, ...);
+
+ /* Debug stats */
+ unsigned int ipc_rx_cnt;
+ unsigned long long ipc_rx_bytes_cnt;
+ unsigned int ipc_tx_cnt;
+ unsigned long long ipc_tx_bytes_cnt;
+
+ const struct ishtp_hw_ops *ops;
+ size_t mtu;
+ uint32_t ishtp_msg_hdr;
+ char hw[] __aligned(sizeof(void *));
+};
+
+static inline unsigned long ishtp_secs_to_jiffies(unsigned long sec)
+{
+ return msecs_to_jiffies(sec * MSEC_PER_SEC);
+}
+
+/*
+ * Register Access Function
+ */
+static inline int ish_ipc_reset(struct ishtp_device *dev)
+{
+ return dev->ops->ipc_reset(dev);
+}
+
+/* Exported function */
+void ishtp_device_init(struct ishtp_device *dev);
+int ishtp_start(struct ishtp_device *dev);
+
+#endif /*_ISHTP_DEV_H_*/