summaryrefslogtreecommitdiffstats
path: root/drivers/hid/intel-ish-hid
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/hid/intel-ish-hid
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/hid/intel-ish-hid')
-rw-r--r--drivers/hid/intel-ish-hid/Kconfig34
-rw-r--r--drivers/hid/intel-ish-hid/Makefile26
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish-regs.h220
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish.h93
-rw-r--r--drivers/hid/intel-ish-hid/ipc/ipc.c1016
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c414
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-fw-loader.c1066
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid-client.c986
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid.c273
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid.h178
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.c932
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.h76
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client-buffers.c297
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client.c1125
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client.h155
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/dma-if.c176
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/hbm.c990
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/hbm.h314
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/init.c106
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h238
20 files changed, 8715 insertions, 0 deletions
diff --git a/drivers/hid/intel-ish-hid/Kconfig b/drivers/hid/intel-ish-hid/Kconfig
new file mode 100644
index 000000000..689da84a5
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/Kconfig
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menu "Intel ISH HID support"
+ depends on (X86_64 || COMPILE_TEST) && PCI
+
+config INTEL_ISH_HID
+ tristate "Intel Integrated Sensor Hub"
+ default n
+ depends on X86
+ select HID
+ help
+ The Integrated Sensor Hub (ISH) enables the ability to offload
+ sensor polling and algorithm processing to a dedicated low power
+ processor in the chipset. This allows the core processor to go into
+ low power modes more often, resulting in the increased battery life.
+ The current processors that support ISH are: Cherrytrail, Skylake,
+ Broxton and Kaby Lake.
+
+ Say Y here if you want to support Intel ISH. If unsure, say N.
+
+config INTEL_ISH_FIRMWARE_DOWNLOADER
+ tristate "Host Firmware Load feature for Intel ISH"
+ depends on INTEL_ISH_HID
+ depends on X86
+ help
+ The Integrated Sensor Hub (ISH) enables the kernel to offload
+ sensor polling and algorithm processing to a dedicated low power
+ processor in the chipset.
+
+ The Host Firmware Load feature adds support to load the ISH
+ firmware from host file system at boot.
+
+ Say M here if you want to support Host Firmware Loading feature
+ for Intel ISH. If unsure, say N.
+endmenu
diff --git a/drivers/hid/intel-ish-hid/Makefile b/drivers/hid/intel-ish-hid/Makefile
new file mode 100644
index 000000000..f0a82b1c7
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/Makefile
@@ -0,0 +1,26 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile - Intel ISH HID drivers
+# Copyright (c) 2014-2016, Intel Corporation.
+#
+#
+obj-$(CONFIG_INTEL_ISH_HID) += intel-ishtp.o
+intel-ishtp-objs := ishtp/init.o
+intel-ishtp-objs += ishtp/hbm.o
+intel-ishtp-objs += ishtp/client.o
+intel-ishtp-objs += ishtp/bus.o
+intel-ishtp-objs += ishtp/dma-if.o
+intel-ishtp-objs += ishtp/client-buffers.o
+
+obj-$(CONFIG_INTEL_ISH_HID) += intel-ish-ipc.o
+intel-ish-ipc-objs := ipc/ipc.o
+intel-ish-ipc-objs += ipc/pci-ish.o
+
+obj-$(CONFIG_INTEL_ISH_HID) += intel-ishtp-hid.o
+intel-ishtp-hid-objs := ishtp-hid.o
+intel-ishtp-hid-objs += ishtp-hid-client.o
+
+obj-$(CONFIG_INTEL_ISH_FIRMWARE_DOWNLOADER) += intel-ishtp-loader.o
+intel-ishtp-loader-objs += ishtp-fw-loader.o
+
+ccflags-y += -I $(srctree)/$(src)/ishtp
diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish-regs.h b/drivers/hid/intel-ish-hid/ipc/hw-ish-regs.h
new file mode 100644
index 000000000..4db01bbef
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ipc/hw-ish-regs.h
@@ -0,0 +1,220 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * ISH registers definitions
+ *
+ * Copyright (c) 2012-2016, Intel Corporation.
+ */
+
+#ifndef _ISHTP_ISH_REGS_H_
+#define _ISHTP_ISH_REGS_H_
+
+
+/*** IPC PCI Offsets and sizes ***/
+/* ISH IPC Base Address */
+#define IPC_REG_BASE 0x0000
+/* Peripheral Interrupt Status Register */
+#define IPC_REG_PISR_CHV_AB (IPC_REG_BASE + 0x00)
+/* Peripheral Interrupt Mask Register */
+#define IPC_REG_PIMR_CHV_AB (IPC_REG_BASE + 0x04)
+/*BXT, CHV_K0*/
+/*Peripheral Interrupt Status Register */
+#define IPC_REG_PISR_BXT (IPC_REG_BASE + 0x0C)
+/*Peripheral Interrupt Mask Register */
+#define IPC_REG_PIMR_BXT (IPC_REG_BASE + 0x08)
+/***********************************/
+/* ISH Host Firmware status Register */
+#define IPC_REG_ISH_HOST_FWSTS (IPC_REG_BASE + 0x34)
+/* Host Communication Register */
+#define IPC_REG_HOST_COMM (IPC_REG_BASE + 0x38)
+/* Reset register */
+#define IPC_REG_ISH_RST (IPC_REG_BASE + 0x44)
+
+/* Inbound doorbell register Host to ISH */
+#define IPC_REG_HOST2ISH_DRBL (IPC_REG_BASE + 0x48)
+/* Outbound doorbell register ISH to Host */
+#define IPC_REG_ISH2HOST_DRBL (IPC_REG_BASE + 0x54)
+/* ISH to HOST message registers */
+#define IPC_REG_ISH2HOST_MSG (IPC_REG_BASE + 0x60)
+/* HOST to ISH message registers */
+#define IPC_REG_HOST2ISH_MSG (IPC_REG_BASE + 0xE0)
+/* REMAP2 to enable DMA (D3 RCR) */
+#define IPC_REG_ISH_RMP2 (IPC_REG_BASE + 0x368)
+
+#define IPC_REG_MAX (IPC_REG_BASE + 0x400)
+
+/*** register bits - HISR ***/
+/* bit corresponds HOST2ISH interrupt in PISR and PIMR registers */
+#define IPC_INT_HOST2ISH_BIT (1<<0)
+/***********************************/
+/*CHV_A0, CHV_B0*/
+/* bit corresponds ISH2HOST interrupt in PISR and PIMR registers */
+#define IPC_INT_ISH2HOST_BIT_CHV_AB (1<<3)
+/*BXT, CHV_K0*/
+/* bit corresponds ISH2HOST interrupt in PISR and PIMR registers */
+#define IPC_INT_ISH2HOST_BIT_BXT (1<<0)
+/***********************************/
+
+/* bit corresponds ISH2HOST busy clear interrupt in PIMR register */
+#define IPC_INT_ISH2HOST_CLR_MASK_BIT (1<<11)
+
+/* offset of ISH2HOST busy clear interrupt in IPC_BUSY_CLR register */
+#define IPC_INT_ISH2HOST_CLR_OFFS (0)
+
+/* bit corresponds ISH2HOST busy clear interrupt in IPC_BUSY_CLR register */
+#define IPC_INT_ISH2HOST_CLR_BIT (1<<IPC_INT_ISH2HOST_CLR_OFFS)
+
+/* bit corresponds busy bit in doorbell registers */
+#define IPC_DRBL_BUSY_OFFS (31)
+#define IPC_DRBL_BUSY_BIT (1<<IPC_DRBL_BUSY_OFFS)
+
+#define IPC_HOST_OWNS_MSG_OFFS (30)
+
+/*
+ * A0: bit means that host owns MSGnn registers and is reading them.
+ * ISH FW may not write to them
+ */
+#define IPC_HOST_OWNS_MSG_BIT (1<<IPC_HOST_OWNS_MSG_OFFS)
+
+/*
+ * Host status bits (HOSTCOMM)
+ */
+/* bit corresponds host ready bit in Host Status Register (HOST_COMM) */
+#define IPC_HOSTCOMM_READY_OFFS (7)
+#define IPC_HOSTCOMM_READY_BIT (1<<IPC_HOSTCOMM_READY_OFFS)
+
+/***********************************/
+/*CHV_A0, CHV_B0*/
+#define IPC_HOSTCOMM_INT_EN_OFFS_CHV_AB (31)
+#define IPC_HOSTCOMM_INT_EN_BIT_CHV_AB \
+ (1<<IPC_HOSTCOMM_INT_EN_OFFS_CHV_AB)
+/*BXT, CHV_K0*/
+#define IPC_PIMR_INT_EN_OFFS_BXT (0)
+#define IPC_PIMR_INT_EN_BIT_BXT (1<<IPC_PIMR_INT_EN_OFFS_BXT)
+
+#define IPC_HOST2ISH_BUSYCLEAR_MASK_OFFS_BXT (8)
+#define IPC_HOST2ISH_BUSYCLEAR_MASK_BIT \
+ (1<<IPC_HOST2ISH_BUSYCLEAR_MASK_OFFS_BXT)
+/***********************************/
+/*
+ * both Host and ISH have ILUP at bit 0
+ * bit corresponds host ready bit in both status registers
+ */
+#define IPC_ILUP_OFFS (0)
+#define IPC_ILUP_BIT (1<<IPC_ILUP_OFFS)
+
+/*
+ * ISH FW status bits in ISH FW Status Register
+ */
+#define IPC_ISH_FWSTS_SHIFT 12
+#define IPC_ISH_FWSTS_MASK GENMASK(15, 12)
+#define IPC_GET_ISH_FWSTS(status) \
+ (((status) & IPC_ISH_FWSTS_MASK) >> IPC_ISH_FWSTS_SHIFT)
+
+/*
+ * FW status bits (relevant)
+ */
+#define IPC_FWSTS_ILUP 0x1
+#define IPC_FWSTS_ISHTP_UP (1<<1)
+#define IPC_FWSTS_DMA0 (1<<16)
+#define IPC_FWSTS_DMA1 (1<<17)
+#define IPC_FWSTS_DMA2 (1<<18)
+#define IPC_FWSTS_DMA3 (1<<19)
+
+#define IPC_ISH_IN_DMA \
+ (IPC_FWSTS_DMA0 | IPC_FWSTS_DMA1 | IPC_FWSTS_DMA2 | IPC_FWSTS_DMA3)
+
+/* bit corresponds host ready bit in ISH FW Status Register */
+#define IPC_ISH_ISHTP_READY_OFFS (1)
+#define IPC_ISH_ISHTP_READY_BIT (1<<IPC_ISH_ISHTP_READY_OFFS)
+
+#define IPC_RMP2_DMA_ENABLED 0x1 /* Value to enable DMA, per D3 RCR */
+
+#define IPC_MSG_MAX_SIZE 0x80
+
+
+#define IPC_HEADER_LENGTH_MASK 0x03FF
+#define IPC_HEADER_PROTOCOL_MASK 0x0F
+#define IPC_HEADER_MNG_CMD_MASK 0x0F
+
+#define IPC_HEADER_LENGTH_OFFSET 0
+#define IPC_HEADER_PROTOCOL_OFFSET 10
+#define IPC_HEADER_MNG_CMD_OFFSET 16
+
+#define IPC_HEADER_GET_LENGTH(drbl_reg) \
+ (((drbl_reg) >> IPC_HEADER_LENGTH_OFFSET)&IPC_HEADER_LENGTH_MASK)
+#define IPC_HEADER_GET_PROTOCOL(drbl_reg) \
+ (((drbl_reg) >> IPC_HEADER_PROTOCOL_OFFSET)&IPC_HEADER_PROTOCOL_MASK)
+#define IPC_HEADER_GET_MNG_CMD(drbl_reg) \
+ (((drbl_reg) >> IPC_HEADER_MNG_CMD_OFFSET)&IPC_HEADER_MNG_CMD_MASK)
+
+#define IPC_IS_BUSY(drbl_reg) \
+ (((drbl_reg)&IPC_DRBL_BUSY_BIT) == ((uint32_t)IPC_DRBL_BUSY_BIT))
+
+/***********************************/
+/*CHV_A0, CHV_B0*/
+#define IPC_INT_FROM_ISH_TO_HOST_CHV_AB(drbl_reg) \
+ (((drbl_reg)&IPC_INT_ISH2HOST_BIT_CHV_AB) == \
+ ((u32)IPC_INT_ISH2HOST_BIT_CHV_AB))
+/*BXT, CHV_K0*/
+#define IPC_INT_FROM_ISH_TO_HOST_BXT(drbl_reg) \
+ (((drbl_reg)&IPC_INT_ISH2HOST_BIT_BXT) == \
+ ((u32)IPC_INT_ISH2HOST_BIT_BXT))
+/***********************************/
+
+#define IPC_BUILD_HEADER(length, protocol, busy) \
+ (((busy)<<IPC_DRBL_BUSY_OFFS) | \
+ ((protocol) << IPC_HEADER_PROTOCOL_OFFSET) | \
+ ((length)<<IPC_HEADER_LENGTH_OFFSET))
+
+#define IPC_BUILD_MNG_MSG(cmd, length) \
+ (((1)<<IPC_DRBL_BUSY_OFFS)| \
+ ((IPC_PROTOCOL_MNG)<<IPC_HEADER_PROTOCOL_OFFSET)| \
+ ((cmd)<<IPC_HEADER_MNG_CMD_OFFSET)| \
+ ((length)<<IPC_HEADER_LENGTH_OFFSET))
+
+
+#define IPC_SET_HOST_READY(host_status) \
+ ((host_status) |= (IPC_HOSTCOMM_READY_BIT))
+
+#define IPC_SET_HOST_ILUP(host_status) \
+ ((host_status) |= (IPC_ILUP_BIT))
+
+#define IPC_CLEAR_HOST_READY(host_status) \
+ ((host_status) ^= (IPC_HOSTCOMM_READY_BIT))
+
+#define IPC_CLEAR_HOST_ILUP(host_status) \
+ ((host_status) ^= (IPC_ILUP_BIT))
+
+/* todo - temp until PIMR HW ready */
+#define IPC_HOST_BUSY_READING_OFFS 6
+
+/* bit corresponds host ready bit in Host Status Register (HOST_COMM) */
+#define IPC_HOST_BUSY_READING_BIT (1<<IPC_HOST_BUSY_READING_OFFS)
+
+#define IPC_SET_HOST_BUSY_READING(host_status) \
+ ((host_status) |= (IPC_HOST_BUSY_READING_BIT))
+
+#define IPC_CLEAR_HOST_BUSY_READING(host_status)\
+ ((host_status) ^= (IPC_HOST_BUSY_READING_BIT))
+
+
+#define IPC_IS_ISH_ISHTP_READY(ish_status) \
+ (((ish_status) & IPC_ISH_ISHTP_READY_BIT) == \
+ ((uint32_t)IPC_ISH_ISHTP_READY_BIT))
+
+#define IPC_IS_ISH_ILUP(ish_status) \
+ (((ish_status) & IPC_ILUP_BIT) == ((uint32_t)IPC_ILUP_BIT))
+
+
+#define IPC_PROTOCOL_ISHTP 1
+#define IPC_PROTOCOL_MNG 3
+
+#define MNG_RX_CMPL_ENABLE 0
+#define MNG_RX_CMPL_DISABLE 1
+#define MNG_RX_CMPL_INDICATION 2
+#define MNG_RESET_NOTIFY 3
+#define MNG_RESET_NOTIFY_ACK 4
+#define MNG_SYNC_FW_CLOCK 5
+#define MNG_ILLEGAL_CMD 0xFF
+
+#endif /* _ISHTP_ISH_REGS_H_ */
diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
new file mode 100644
index 000000000..e99f3a3c6
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * H/W layer of ISHTP provider device (ISH)
+ *
+ * Copyright (c) 2014-2016, Intel Corporation.
+ */
+
+#ifndef _ISHTP_HW_ISH_H_
+#define _ISHTP_HW_ISH_H_
+
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include "hw-ish-regs.h"
+#include "ishtp-dev.h"
+
+#define CHV_DEVICE_ID 0x22D8
+#define BXT_Ax_DEVICE_ID 0x0AA2
+#define BXT_Bx_DEVICE_ID 0x1AA2
+#define APL_Ax_DEVICE_ID 0x5AA2
+#define SPT_Ax_DEVICE_ID 0x9D35
+#define CNL_Ax_DEVICE_ID 0x9DFC
+#define GLK_Ax_DEVICE_ID 0x31A2
+#define CNL_H_DEVICE_ID 0xA37C
+#define ICL_MOBILE_DEVICE_ID 0x34FC
+#define SPT_H_DEVICE_ID 0xA135
+#define CML_LP_DEVICE_ID 0x02FC
+#define CMP_H_DEVICE_ID 0x06FC
+#define EHL_Ax_DEVICE_ID 0x4BB3
+#define TGL_LP_DEVICE_ID 0xA0FC
+#define TGL_H_DEVICE_ID 0x43FC
+#define ADL_S_DEVICE_ID 0x7AF8
+#define ADL_P_DEVICE_ID 0x51FC
+#define ADL_N_DEVICE_ID 0x54FC
+#define RPL_S_DEVICE_ID 0x7A78
+#define MTL_P_DEVICE_ID 0x7E45
+#define ARL_H_DEVICE_ID 0x7745
+
+#define REVISION_ID_CHT_A0 0x6
+#define REVISION_ID_CHT_Ax_SI 0x0
+#define REVISION_ID_CHT_Bx_SI 0x10
+#define REVISION_ID_CHT_Kx_SI 0x20
+#define REVISION_ID_CHT_Dx_SI 0x30
+#define REVISION_ID_CHT_B0 0xB0
+#define REVISION_ID_SI_MASK 0x70
+
+struct ipc_rst_payload_type {
+ uint16_t reset_id;
+ uint16_t reserved;
+};
+
+struct time_sync_format {
+ uint8_t ts1_source;
+ uint8_t ts2_source;
+ uint16_t reserved;
+} __packed;
+
+struct ipc_time_update_msg {
+ uint64_t primary_host_time;
+ struct time_sync_format sync_info;
+ uint64_t secondary_host_time;
+} __packed;
+
+enum {
+ HOST_UTC_TIME_USEC = 0,
+ HOST_SYSTEM_TIME_USEC = 1
+};
+
+struct ish_hw {
+ void __iomem *mem_addr;
+};
+
+/*
+ * ISH FW status type
+ */
+enum {
+ FWSTS_AFTER_RESET = 0,
+ FWSTS_WAIT_FOR_HOST = 4,
+ FWSTS_START_KERNEL_DMA = 5,
+ FWSTS_FW_IS_RUNNING = 7,
+ FWSTS_SENSOR_APP_LOADED = 8,
+ FWSTS_SENSOR_APP_RUNNING = 15
+};
+
+#define to_ish_hw(dev) (struct ish_hw *)((dev)->hw)
+
+irqreturn_t ish_irq_handler(int irq, void *dev_id);
+struct ishtp_device *ish_dev_init(struct pci_dev *pdev);
+int ish_hw_start(struct ishtp_device *dev);
+void ish_device_disable(struct ishtp_device *dev);
+int ish_disable_dma(struct ishtp_device *dev);
+void ish_set_host_ready(struct ishtp_device *dev);
+
+#endif /* _ISHTP_HW_ISH_H_ */
diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c
new file mode 100644
index 000000000..a49c6affd
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
@@ -0,0 +1,1016 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * H/W layer of ISHTP provider device (ISH)
+ *
+ * Copyright (c) 2014-2016, Intel Corporation.
+ */
+
+#include <linux/devm-helpers.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include "client.h"
+#include "hw-ish.h"
+#include "hbm.h"
+
+/* For FW reset flow */
+static struct work_struct fw_reset_work;
+static struct ishtp_device *ishtp_dev;
+
+/**
+ * ish_reg_read() - Read register
+ * @dev: ISHTP device pointer
+ * @offset: Register offset
+ *
+ * Read 32 bit register at a given offset
+ *
+ * Return: Read register value
+ */
+static inline uint32_t ish_reg_read(const struct ishtp_device *dev,
+ unsigned long offset)
+{
+ struct ish_hw *hw = to_ish_hw(dev);
+
+ return readl(hw->mem_addr + offset);
+}
+
+/**
+ * ish_reg_write() - Write register
+ * @dev: ISHTP device pointer
+ * @offset: Register offset
+ * @value: Value to write
+ *
+ * Writes 32 bit register at a give offset
+ */
+static inline void ish_reg_write(struct ishtp_device *dev,
+ unsigned long offset,
+ uint32_t value)
+{
+ struct ish_hw *hw = to_ish_hw(dev);
+
+ writel(value, hw->mem_addr + offset);
+}
+
+/**
+ * _ish_read_fw_sts_reg() - Read FW status register
+ * @dev: ISHTP device pointer
+ *
+ * Read FW status register
+ *
+ * Return: Read register value
+ */
+static inline uint32_t _ish_read_fw_sts_reg(struct ishtp_device *dev)
+{
+ return ish_reg_read(dev, IPC_REG_ISH_HOST_FWSTS);
+}
+
+/**
+ * check_generated_interrupt() - Check if ISH interrupt
+ * @dev: ISHTP device pointer
+ *
+ * Check if an interrupt was generated for ISH
+ *
+ * Return: Read true or false
+ */
+static bool check_generated_interrupt(struct ishtp_device *dev)
+{
+ bool interrupt_generated = true;
+ uint32_t pisr_val = 0;
+
+ if (dev->pdev->device == CHV_DEVICE_ID) {
+ pisr_val = ish_reg_read(dev, IPC_REG_PISR_CHV_AB);
+ interrupt_generated =
+ IPC_INT_FROM_ISH_TO_HOST_CHV_AB(pisr_val);
+ } else {
+ pisr_val = ish_reg_read(dev, IPC_REG_PISR_BXT);
+ interrupt_generated = !!pisr_val;
+ /* only busy-clear bit is RW, others are RO */
+ if (pisr_val)
+ ish_reg_write(dev, IPC_REG_PISR_BXT, pisr_val);
+ }
+
+ return interrupt_generated;
+}
+
+/**
+ * ish_is_input_ready() - Check if FW ready for RX
+ * @dev: ISHTP device pointer
+ *
+ * Check if ISH FW is ready for receiving data
+ *
+ * Return: Read true or false
+ */
+static bool ish_is_input_ready(struct ishtp_device *dev)
+{
+ uint32_t doorbell_val;
+
+ doorbell_val = ish_reg_read(dev, IPC_REG_HOST2ISH_DRBL);
+ return !IPC_IS_BUSY(doorbell_val);
+}
+
+/**
+ * set_host_ready() - Indicate host ready
+ * @dev: ISHTP device pointer
+ *
+ * Set host ready indication to FW
+ */
+static void set_host_ready(struct ishtp_device *dev)
+{
+ if (dev->pdev->device == CHV_DEVICE_ID) {
+ if (dev->pdev->revision == REVISION_ID_CHT_A0 ||
+ (dev->pdev->revision & REVISION_ID_SI_MASK) ==
+ REVISION_ID_CHT_Ax_SI)
+ ish_reg_write(dev, IPC_REG_HOST_COMM, 0x81);
+ else if (dev->pdev->revision == REVISION_ID_CHT_B0 ||
+ (dev->pdev->revision & REVISION_ID_SI_MASK) ==
+ REVISION_ID_CHT_Bx_SI ||
+ (dev->pdev->revision & REVISION_ID_SI_MASK) ==
+ REVISION_ID_CHT_Kx_SI ||
+ (dev->pdev->revision & REVISION_ID_SI_MASK) ==
+ REVISION_ID_CHT_Dx_SI) {
+ uint32_t host_comm_val;
+
+ host_comm_val = ish_reg_read(dev, IPC_REG_HOST_COMM);
+ host_comm_val |= IPC_HOSTCOMM_INT_EN_BIT_CHV_AB | 0x81;
+ ish_reg_write(dev, IPC_REG_HOST_COMM, host_comm_val);
+ }
+ } else {
+ uint32_t host_pimr_val;
+
+ host_pimr_val = ish_reg_read(dev, IPC_REG_PIMR_BXT);
+ host_pimr_val |= IPC_PIMR_INT_EN_BIT_BXT;
+ /*
+ * disable interrupt generated instead of
+ * RX_complete_msg
+ */
+ host_pimr_val &= ~IPC_HOST2ISH_BUSYCLEAR_MASK_BIT;
+
+ ish_reg_write(dev, IPC_REG_PIMR_BXT, host_pimr_val);
+ }
+}
+
+/**
+ * ishtp_fw_is_ready() - Check if FW ready
+ * @dev: ISHTP device pointer
+ *
+ * Check if ISH FW is ready
+ *
+ * Return: Read true or false
+ */
+static bool ishtp_fw_is_ready(struct ishtp_device *dev)
+{
+ uint32_t ish_status = _ish_read_fw_sts_reg(dev);
+
+ return IPC_IS_ISH_ILUP(ish_status) &&
+ IPC_IS_ISH_ISHTP_READY(ish_status);
+}
+
+/**
+ * ish_set_host_rdy() - Indicate host ready
+ * @dev: ISHTP device pointer
+ *
+ * Set host ready indication to FW
+ */
+static void ish_set_host_rdy(struct ishtp_device *dev)
+{
+ uint32_t host_status = ish_reg_read(dev, IPC_REG_HOST_COMM);
+
+ IPC_SET_HOST_READY(host_status);
+ ish_reg_write(dev, IPC_REG_HOST_COMM, host_status);
+}
+
+/**
+ * ish_clr_host_rdy() - Indicate host not ready
+ * @dev: ISHTP device pointer
+ *
+ * Send host not ready indication to FW
+ */
+static void ish_clr_host_rdy(struct ishtp_device *dev)
+{
+ uint32_t host_status = ish_reg_read(dev, IPC_REG_HOST_COMM);
+
+ IPC_CLEAR_HOST_READY(host_status);
+ ish_reg_write(dev, IPC_REG_HOST_COMM, host_status);
+}
+
+static bool ish_chk_host_rdy(struct ishtp_device *dev)
+{
+ uint32_t host_status = ish_reg_read(dev, IPC_REG_HOST_COMM);
+
+ return (host_status & IPC_HOSTCOMM_READY_BIT);
+}
+
+/**
+ * ish_set_host_ready() - reconfig ipc host registers
+ * @dev: ishtp device pointer
+ *
+ * Set host to ready state
+ * This API is called in some case:
+ * fw is still on, but ipc is powered down.
+ * such as OOB case.
+ *
+ * Return: 0 for success else error fault code
+ */
+void ish_set_host_ready(struct ishtp_device *dev)
+{
+ if (ish_chk_host_rdy(dev))
+ return;
+
+ ish_set_host_rdy(dev);
+ set_host_ready(dev);
+}
+
+/**
+ * _ishtp_read_hdr() - Read message header
+ * @dev: ISHTP device pointer
+ *
+ * Read header of 32bit length
+ *
+ * Return: Read register value
+ */
+static uint32_t _ishtp_read_hdr(const struct ishtp_device *dev)
+{
+ return ish_reg_read(dev, IPC_REG_ISH2HOST_MSG);
+}
+
+/**
+ * _ishtp_read - Read message
+ * @dev: ISHTP device pointer
+ * @buffer: message buffer
+ * @buffer_length: length of message buffer
+ *
+ * Read message from FW
+ *
+ * Return: Always 0
+ */
+static int _ishtp_read(struct ishtp_device *dev, unsigned char *buffer,
+ unsigned long buffer_length)
+{
+ uint32_t i;
+ uint32_t *r_buf = (uint32_t *)buffer;
+ uint32_t msg_offs;
+
+ msg_offs = IPC_REG_ISH2HOST_MSG + sizeof(struct ishtp_msg_hdr);
+ for (i = 0; i < buffer_length; i += sizeof(uint32_t))
+ *r_buf++ = ish_reg_read(dev, msg_offs + i);
+
+ return 0;
+}
+
+/**
+ * write_ipc_from_queue() - try to write ipc msg from Tx queue to device
+ * @dev: ishtp device pointer
+ *
+ * Check if DRBL is cleared. if it is - write the first IPC msg, then call
+ * the callback function (unless it's NULL)
+ *
+ * Return: 0 for success else failure code
+ */
+static int write_ipc_from_queue(struct ishtp_device *dev)
+{
+ struct wr_msg_ctl_info *ipc_link;
+ unsigned long length;
+ unsigned long rem;
+ unsigned long flags;
+ uint32_t doorbell_val;
+ uint32_t *r_buf;
+ uint32_t reg_addr;
+ int i;
+ void (*ipc_send_compl)(void *);
+ void *ipc_send_compl_prm;
+
+ if (dev->dev_state == ISHTP_DEV_DISABLED)
+ return -EINVAL;
+
+ spin_lock_irqsave(&dev->wr_processing_spinlock, flags);
+ if (!ish_is_input_ready(dev)) {
+ spin_unlock_irqrestore(&dev->wr_processing_spinlock, flags);
+ return -EBUSY;
+ }
+
+ /*
+ * if tx send list is empty - return 0;
+ * may happen, as RX_COMPLETE handler doesn't check list emptiness.
+ */
+ if (list_empty(&dev->wr_processing_list)) {
+ spin_unlock_irqrestore(&dev->wr_processing_spinlock, flags);
+ return 0;
+ }
+
+ ipc_link = list_first_entry(&dev->wr_processing_list,
+ struct wr_msg_ctl_info, link);
+ /* first 4 bytes of the data is the doorbell value (IPC header) */
+ length = ipc_link->length - sizeof(uint32_t);
+ doorbell_val = *(uint32_t *)ipc_link->inline_data;
+ r_buf = (uint32_t *)(ipc_link->inline_data + sizeof(uint32_t));
+
+ /* If sending MNG_SYNC_FW_CLOCK, update clock again */
+ if (IPC_HEADER_GET_PROTOCOL(doorbell_val) == IPC_PROTOCOL_MNG &&
+ IPC_HEADER_GET_MNG_CMD(doorbell_val) == MNG_SYNC_FW_CLOCK) {
+ uint64_t usec_system, usec_utc;
+ struct ipc_time_update_msg time_update;
+ struct time_sync_format ts_format;
+
+ usec_system = ktime_to_us(ktime_get_boottime());
+ usec_utc = ktime_to_us(ktime_get_real());
+ ts_format.ts1_source = HOST_SYSTEM_TIME_USEC;
+ ts_format.ts2_source = HOST_UTC_TIME_USEC;
+ ts_format.reserved = 0;
+
+ time_update.primary_host_time = usec_system;
+ time_update.secondary_host_time = usec_utc;
+ time_update.sync_info = ts_format;
+
+ memcpy(r_buf, &time_update,
+ sizeof(struct ipc_time_update_msg));
+ }
+
+ for (i = 0, reg_addr = IPC_REG_HOST2ISH_MSG; i < length >> 2; i++,
+ reg_addr += 4)
+ ish_reg_write(dev, reg_addr, r_buf[i]);
+
+ rem = length & 0x3;
+ if (rem > 0) {
+ uint32_t reg = 0;
+
+ memcpy(&reg, &r_buf[length >> 2], rem);
+ ish_reg_write(dev, reg_addr, reg);
+ }
+ ish_reg_write(dev, IPC_REG_HOST2ISH_DRBL, doorbell_val);
+
+ /* Flush writes to msg registers and doorbell */
+ ish_reg_read(dev, IPC_REG_ISH_HOST_FWSTS);
+
+ /* Update IPC counters */
+ ++dev->ipc_tx_cnt;
+ dev->ipc_tx_bytes_cnt += IPC_HEADER_GET_LENGTH(doorbell_val);
+
+ ipc_send_compl = ipc_link->ipc_send_compl;
+ ipc_send_compl_prm = ipc_link->ipc_send_compl_prm;
+ list_del_init(&ipc_link->link);
+ list_add(&ipc_link->link, &dev->wr_free_list);
+ spin_unlock_irqrestore(&dev->wr_processing_spinlock, flags);
+
+ /*
+ * callback will be called out of spinlock,
+ * after ipc_link returned to free list
+ */
+ if (ipc_send_compl)
+ ipc_send_compl(ipc_send_compl_prm);
+
+ return 0;
+}
+
+/**
+ * write_ipc_to_queue() - write ipc msg to Tx queue
+ * @dev: ishtp device instance
+ * @ipc_send_compl: Send complete callback
+ * @ipc_send_compl_prm: Parameter to send in complete callback
+ * @msg: Pointer to message
+ * @length: Length of message
+ *
+ * Recived msg with IPC (and upper protocol) header and add it to the device
+ * Tx-to-write list then try to send the first IPC waiting msg
+ * (if DRBL is cleared)
+ * This function returns negative value for failure (means free list
+ * is empty, or msg too long) and 0 for success.
+ *
+ * Return: 0 for success else failure code
+ */
+static int write_ipc_to_queue(struct ishtp_device *dev,
+ void (*ipc_send_compl)(void *), void *ipc_send_compl_prm,
+ unsigned char *msg, int length)
+{
+ struct wr_msg_ctl_info *ipc_link;
+ unsigned long flags;
+
+ if (length > IPC_FULL_MSG_SIZE)
+ return -EMSGSIZE;
+
+ spin_lock_irqsave(&dev->wr_processing_spinlock, flags);
+ if (list_empty(&dev->wr_free_list)) {
+ spin_unlock_irqrestore(&dev->wr_processing_spinlock, flags);
+ return -ENOMEM;
+ }
+ ipc_link = list_first_entry(&dev->wr_free_list,
+ struct wr_msg_ctl_info, link);
+ list_del_init(&ipc_link->link);
+
+ ipc_link->ipc_send_compl = ipc_send_compl;
+ ipc_link->ipc_send_compl_prm = ipc_send_compl_prm;
+ ipc_link->length = length;
+ memcpy(ipc_link->inline_data, msg, length);
+
+ list_add_tail(&ipc_link->link, &dev->wr_processing_list);
+ spin_unlock_irqrestore(&dev->wr_processing_spinlock, flags);
+
+ write_ipc_from_queue(dev);
+
+ return 0;
+}
+
+/**
+ * ipc_send_mng_msg() - Send management message
+ * @dev: ishtp device instance
+ * @msg_code: Message code
+ * @msg: Pointer to message
+ * @size: Length of message
+ *
+ * Send management message to FW
+ *
+ * Return: 0 for success else failure code
+ */
+static int ipc_send_mng_msg(struct ishtp_device *dev, uint32_t msg_code,
+ void *msg, size_t size)
+{
+ unsigned char ipc_msg[IPC_FULL_MSG_SIZE];
+ uint32_t drbl_val = IPC_BUILD_MNG_MSG(msg_code, size);
+
+ memcpy(ipc_msg, &drbl_val, sizeof(uint32_t));
+ memcpy(ipc_msg + sizeof(uint32_t), msg, size);
+ return write_ipc_to_queue(dev, NULL, NULL, ipc_msg,
+ sizeof(uint32_t) + size);
+}
+
+#define WAIT_FOR_FW_RDY 0x1
+#define WAIT_FOR_INPUT_RDY 0x2
+
+/**
+ * timed_wait_for_timeout() - wait special event with timeout
+ * @dev: ISHTP device pointer
+ * @condition: indicate the condition for waiting
+ * @timeinc: time slice for every wait cycle, in ms
+ * @timeout: time in ms for timeout
+ *
+ * This function will check special event to be ready in a loop, the loop
+ * period is specificd in timeinc. Wait timeout will causes failure.
+ *
+ * Return: 0 for success else failure code
+ */
+static int timed_wait_for_timeout(struct ishtp_device *dev, int condition,
+ unsigned int timeinc, unsigned int timeout)
+{
+ bool complete = false;
+ int ret;
+
+ do {
+ if (condition == WAIT_FOR_FW_RDY) {
+ complete = ishtp_fw_is_ready(dev);
+ } else if (condition == WAIT_FOR_INPUT_RDY) {
+ complete = ish_is_input_ready(dev);
+ } else {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!complete) {
+ unsigned long left_time;
+
+ left_time = msleep_interruptible(timeinc);
+ timeout -= (timeinc - left_time);
+ }
+ } while (!complete && timeout > 0);
+
+ if (complete)
+ ret = 0;
+ else
+ ret = -EBUSY;
+
+out:
+ return ret;
+}
+
+#define TIME_SLICE_FOR_FW_RDY_MS 100
+#define TIME_SLICE_FOR_INPUT_RDY_MS 100
+#define TIMEOUT_FOR_FW_RDY_MS 2000
+#define TIMEOUT_FOR_INPUT_RDY_MS 2000
+
+/**
+ * ish_fw_reset_handler() - FW reset handler
+ * @dev: ishtp device pointer
+ *
+ * Handle FW reset
+ *
+ * Return: 0 for success else failure code
+ */
+static int ish_fw_reset_handler(struct ishtp_device *dev)
+{
+ uint32_t reset_id;
+ unsigned long flags;
+
+ /* Read reset ID */
+ reset_id = ish_reg_read(dev, IPC_REG_ISH2HOST_MSG) & 0xFFFF;
+
+ /* Clear IPC output queue */
+ spin_lock_irqsave(&dev->wr_processing_spinlock, flags);
+ list_splice_init(&dev->wr_processing_list, &dev->wr_free_list);
+ spin_unlock_irqrestore(&dev->wr_processing_spinlock, flags);
+
+ /* ISHTP notification in IPC_RESET */
+ ishtp_reset_handler(dev);
+
+ if (!ish_is_input_ready(dev))
+ timed_wait_for_timeout(dev, WAIT_FOR_INPUT_RDY,
+ TIME_SLICE_FOR_INPUT_RDY_MS, TIMEOUT_FOR_INPUT_RDY_MS);
+
+ /* ISH FW is dead */
+ if (!ish_is_input_ready(dev))
+ return -EPIPE;
+ /*
+ * Set HOST2ISH.ILUP. Apparently we need this BEFORE sending
+ * RESET_NOTIFY_ACK - FW will be checking for it
+ */
+ ish_set_host_rdy(dev);
+ /* Send RESET_NOTIFY_ACK (with reset_id) */
+ ipc_send_mng_msg(dev, MNG_RESET_NOTIFY_ACK, &reset_id,
+ sizeof(uint32_t));
+
+ /* Wait for ISH FW'es ILUP and ISHTP_READY */
+ timed_wait_for_timeout(dev, WAIT_FOR_FW_RDY,
+ TIME_SLICE_FOR_FW_RDY_MS, TIMEOUT_FOR_FW_RDY_MS);
+ if (!ishtp_fw_is_ready(dev)) {
+ /* ISH FW is dead */
+ uint32_t ish_status;
+
+ ish_status = _ish_read_fw_sts_reg(dev);
+ dev_err(dev->devc,
+ "[ishtp-ish]: completed reset, ISH is dead (FWSTS = %08X)\n",
+ ish_status);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+#define TIMEOUT_FOR_HW_RDY_MS 300
+
+/**
+ * fw_reset_work_fn() - FW reset worker function
+ * @unused: not used
+ *
+ * Call ish_fw_reset_handler to complete FW reset
+ */
+static void fw_reset_work_fn(struct work_struct *unused)
+{
+ int rv;
+
+ rv = ish_fw_reset_handler(ishtp_dev);
+ if (!rv) {
+ /* ISH is ILUP & ISHTP-ready. Restart ISHTP */
+ msleep_interruptible(TIMEOUT_FOR_HW_RDY_MS);
+ ishtp_dev->recvd_hw_ready = 1;
+ wake_up_interruptible(&ishtp_dev->wait_hw_ready);
+
+ /* ISHTP notification in IPC_RESET sequence completion */
+ ishtp_reset_compl_handler(ishtp_dev);
+ } else
+ dev_err(ishtp_dev->devc, "[ishtp-ish]: FW reset failed (%d)\n",
+ rv);
+}
+
+/**
+ * _ish_sync_fw_clock() -Sync FW clock with the OS clock
+ * @dev: ishtp device pointer
+ *
+ * Sync FW and OS time
+ */
+static void _ish_sync_fw_clock(struct ishtp_device *dev)
+{
+ static unsigned long prev_sync;
+ uint64_t usec;
+
+ if (prev_sync && time_before(jiffies, prev_sync + 20 * HZ))
+ return;
+
+ prev_sync = jiffies;
+ usec = ktime_to_us(ktime_get_boottime());
+ ipc_send_mng_msg(dev, MNG_SYNC_FW_CLOCK, &usec, sizeof(uint64_t));
+}
+
+/**
+ * recv_ipc() - Receive and process IPC management messages
+ * @dev: ishtp device instance
+ * @doorbell_val: doorbell value
+ *
+ * This function runs in ISR context.
+ * NOTE: Any other mng command than reset_notify and reset_notify_ack
+ * won't wake BH handler
+ */
+static void recv_ipc(struct ishtp_device *dev, uint32_t doorbell_val)
+{
+ uint32_t mng_cmd;
+
+ mng_cmd = IPC_HEADER_GET_MNG_CMD(doorbell_val);
+
+ switch (mng_cmd) {
+ default:
+ break;
+
+ case MNG_RX_CMPL_INDICATION:
+ if (dev->suspend_flag) {
+ dev->suspend_flag = 0;
+ wake_up_interruptible(&dev->suspend_wait);
+ }
+ if (dev->resume_flag) {
+ dev->resume_flag = 0;
+ wake_up_interruptible(&dev->resume_wait);
+ }
+
+ write_ipc_from_queue(dev);
+ break;
+
+ case MNG_RESET_NOTIFY:
+ if (!ishtp_dev) {
+ ishtp_dev = dev;
+ }
+ schedule_work(&fw_reset_work);
+ break;
+
+ case MNG_RESET_NOTIFY_ACK:
+ dev->recvd_hw_ready = 1;
+ wake_up_interruptible(&dev->wait_hw_ready);
+ break;
+ }
+}
+
+/**
+ * ish_irq_handler() - ISH IRQ handler
+ * @irq: irq number
+ * @dev_id: ishtp device pointer
+ *
+ * ISH IRQ handler. If interrupt is generated and is for ISH it will process
+ * the interrupt.
+ */
+irqreturn_t ish_irq_handler(int irq, void *dev_id)
+{
+ struct ishtp_device *dev = dev_id;
+ uint32_t doorbell_val;
+ bool interrupt_generated;
+
+ /* Check that it's interrupt from ISH (may be shared) */
+ interrupt_generated = check_generated_interrupt(dev);
+
+ if (!interrupt_generated)
+ return IRQ_NONE;
+
+ doorbell_val = ish_reg_read(dev, IPC_REG_ISH2HOST_DRBL);
+ if (!IPC_IS_BUSY(doorbell_val))
+ return IRQ_HANDLED;
+
+ if (dev->dev_state == ISHTP_DEV_DISABLED)
+ return IRQ_HANDLED;
+
+ /* Sanity check: IPC dgram length in header */
+ if (IPC_HEADER_GET_LENGTH(doorbell_val) > IPC_PAYLOAD_SIZE) {
+ dev_err(dev->devc,
+ "IPC hdr - bad length: %u; dropped\n",
+ (unsigned int)IPC_HEADER_GET_LENGTH(doorbell_val));
+ goto eoi;
+ }
+
+ switch (IPC_HEADER_GET_PROTOCOL(doorbell_val)) {
+ default:
+ break;
+ case IPC_PROTOCOL_MNG:
+ recv_ipc(dev, doorbell_val);
+ break;
+ case IPC_PROTOCOL_ISHTP:
+ ishtp_recv(dev);
+ break;
+ }
+
+eoi:
+ /* Update IPC counters */
+ ++dev->ipc_rx_cnt;
+ dev->ipc_rx_bytes_cnt += IPC_HEADER_GET_LENGTH(doorbell_val);
+
+ ish_reg_write(dev, IPC_REG_ISH2HOST_DRBL, 0);
+ /* Flush write to doorbell */
+ ish_reg_read(dev, IPC_REG_ISH_HOST_FWSTS);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ish_disable_dma() - disable dma communication between host and ISHFW
+ * @dev: ishtp device pointer
+ *
+ * Clear the dma enable bit and wait for dma inactive.
+ *
+ * Return: 0 for success else error code.
+ */
+int ish_disable_dma(struct ishtp_device *dev)
+{
+ unsigned int dma_delay;
+
+ /* Clear the dma enable bit */
+ ish_reg_write(dev, IPC_REG_ISH_RMP2, 0);
+
+ /* wait for dma inactive */
+ for (dma_delay = 0; dma_delay < MAX_DMA_DELAY &&
+ _ish_read_fw_sts_reg(dev) & (IPC_ISH_IN_DMA);
+ dma_delay += 5)
+ mdelay(5);
+
+ if (dma_delay >= MAX_DMA_DELAY) {
+ dev_err(dev->devc,
+ "Wait for DMA inactive timeout\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+/**
+ * ish_wakeup() - wakeup ishfw from waiting-for-host state
+ * @dev: ishtp device pointer
+ *
+ * Set the dma enable bit and send a void message to FW,
+ * it wil wakeup FW from waiting-for-host state.
+ */
+static void ish_wakeup(struct ishtp_device *dev)
+{
+ /* Set dma enable bit */
+ ish_reg_write(dev, IPC_REG_ISH_RMP2, IPC_RMP2_DMA_ENABLED);
+
+ /*
+ * Send 0 IPC message so that ISH FW wakes up if it was already
+ * asleep.
+ */
+ ish_reg_write(dev, IPC_REG_HOST2ISH_DRBL, IPC_DRBL_BUSY_BIT);
+
+ /* Flush writes to doorbell and REMAP2 */
+ ish_reg_read(dev, IPC_REG_ISH_HOST_FWSTS);
+}
+
+/**
+ * _ish_hw_reset() - HW reset
+ * @dev: ishtp device pointer
+ *
+ * Reset ISH HW to recover if any error
+ *
+ * Return: 0 for success else error fault code
+ */
+static int _ish_hw_reset(struct ishtp_device *dev)
+{
+ struct pci_dev *pdev = dev->pdev;
+ int rv;
+ uint16_t csr;
+
+ if (!pdev)
+ return -ENODEV;
+
+ rv = pci_reset_function(pdev);
+ if (!rv)
+ dev->dev_state = ISHTP_DEV_RESETTING;
+
+ if (!pdev->pm_cap) {
+ dev_err(&pdev->dev, "Can't reset - no PM caps\n");
+ return -EINVAL;
+ }
+
+ /* Disable dma communication between FW and host */
+ if (ish_disable_dma(dev)) {
+ dev_err(&pdev->dev,
+ "Can't reset - stuck with DMA in-progress\n");
+ return -EBUSY;
+ }
+
+ pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &csr);
+
+ csr &= ~PCI_PM_CTRL_STATE_MASK;
+ csr |= PCI_D3hot;
+ pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, csr);
+
+ mdelay(pdev->d3hot_delay);
+
+ csr &= ~PCI_PM_CTRL_STATE_MASK;
+ csr |= PCI_D0;
+ pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, csr);
+
+ /* Now we can enable ISH DMA operation and wakeup ISHFW */
+ ish_wakeup(dev);
+
+ return 0;
+}
+
+/**
+ * _ish_ipc_reset() - IPC reset
+ * @dev: ishtp device pointer
+ *
+ * Resets host and fw IPC and upper layers
+ *
+ * Return: 0 for success else error fault code
+ */
+static int _ish_ipc_reset(struct ishtp_device *dev)
+{
+ struct ipc_rst_payload_type ipc_mng_msg;
+ int rv = 0;
+
+ ipc_mng_msg.reset_id = 1;
+ ipc_mng_msg.reserved = 0;
+
+ set_host_ready(dev);
+
+ /* Clear the incoming doorbell */
+ ish_reg_write(dev, IPC_REG_ISH2HOST_DRBL, 0);
+ /* Flush write to doorbell */
+ ish_reg_read(dev, IPC_REG_ISH_HOST_FWSTS);
+
+ dev->recvd_hw_ready = 0;
+
+ /* send message */
+ rv = ipc_send_mng_msg(dev, MNG_RESET_NOTIFY, &ipc_mng_msg,
+ sizeof(struct ipc_rst_payload_type));
+ if (rv) {
+ dev_err(dev->devc, "Failed to send IPC MNG_RESET_NOTIFY\n");
+ return rv;
+ }
+
+ wait_event_interruptible_timeout(dev->wait_hw_ready,
+ dev->recvd_hw_ready, 2 * HZ);
+ if (!dev->recvd_hw_ready) {
+ dev_err(dev->devc, "Timed out waiting for HW ready\n");
+ rv = -ENODEV;
+ }
+
+ return rv;
+}
+
+/**
+ * ish_hw_start() -Start ISH HW
+ * @dev: ishtp device pointer
+ *
+ * Set host to ready state and wait for FW reset
+ *
+ * Return: 0 for success else error fault code
+ */
+int ish_hw_start(struct ishtp_device *dev)
+{
+ ish_set_host_rdy(dev);
+
+ set_host_ready(dev);
+
+ /* After that we can enable ISH DMA operation and wakeup ISHFW */
+ ish_wakeup(dev);
+
+ /* wait for FW-initiated reset flow */
+ if (!dev->recvd_hw_ready)
+ wait_event_interruptible_timeout(dev->wait_hw_ready,
+ dev->recvd_hw_ready,
+ 10 * HZ);
+
+ if (!dev->recvd_hw_ready) {
+ dev_err(dev->devc,
+ "[ishtp-ish]: Timed out waiting for FW-initiated reset\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/**
+ * ish_ipc_get_header() -Get doorbell value
+ * @dev: ishtp device pointer
+ * @length: length of message
+ * @busy: busy status
+ *
+ * Get door bell value from message header
+ *
+ * Return: door bell value
+ */
+static uint32_t ish_ipc_get_header(struct ishtp_device *dev, int length,
+ int busy)
+{
+ uint32_t drbl_val;
+
+ drbl_val = IPC_BUILD_HEADER(length, IPC_PROTOCOL_ISHTP, busy);
+
+ return drbl_val;
+}
+
+/**
+ * _dma_no_cache_snooping()
+ *
+ * Check on current platform, DMA supports cache snooping or not.
+ * This callback is used to notify uplayer driver if manully cache
+ * flush is needed when do DMA operation.
+ *
+ * Please pay attention to this callback implementation, if declare
+ * having cache snooping on a cache snooping not supported platform
+ * will cause uplayer driver receiving mismatched data; and if
+ * declare no cache snooping on a cache snooping supported platform
+ * will cause cache be flushed twice and performance hit.
+ *
+ * @dev: ishtp device pointer
+ *
+ * Return: false - has cache snooping capability
+ * true - no cache snooping, need manually cache flush
+ */
+static bool _dma_no_cache_snooping(struct ishtp_device *dev)
+{
+ return (dev->pdev->device == EHL_Ax_DEVICE_ID ||
+ dev->pdev->device == TGL_LP_DEVICE_ID ||
+ dev->pdev->device == TGL_H_DEVICE_ID ||
+ dev->pdev->device == ADL_S_DEVICE_ID ||
+ dev->pdev->device == ADL_P_DEVICE_ID);
+}
+
+static const struct ishtp_hw_ops ish_hw_ops = {
+ .hw_reset = _ish_hw_reset,
+ .ipc_reset = _ish_ipc_reset,
+ .ipc_get_header = ish_ipc_get_header,
+ .ishtp_read = _ishtp_read,
+ .write = write_ipc_to_queue,
+ .get_fw_status = _ish_read_fw_sts_reg,
+ .sync_fw_clock = _ish_sync_fw_clock,
+ .ishtp_read_hdr = _ishtp_read_hdr,
+ .dma_no_cache_snooping = _dma_no_cache_snooping
+};
+
+/**
+ * ish_dev_init() -Initialize ISH devoce
+ * @pdev: PCI device
+ *
+ * Allocate ISHTP device and initialize IPC processing
+ *
+ * Return: ISHTP device instance on success else NULL
+ */
+struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
+{
+ struct ishtp_device *dev;
+ int i;
+ int ret;
+
+ dev = devm_kzalloc(&pdev->dev,
+ sizeof(struct ishtp_device) + sizeof(struct ish_hw),
+ GFP_KERNEL);
+ if (!dev)
+ return NULL;
+
+ ishtp_device_init(dev);
+
+ init_waitqueue_head(&dev->wait_hw_ready);
+
+ spin_lock_init(&dev->wr_processing_spinlock);
+
+ /* Init IPC processing and free lists */
+ INIT_LIST_HEAD(&dev->wr_processing_list);
+ INIT_LIST_HEAD(&dev->wr_free_list);
+ for (i = 0; i < IPC_TX_FIFO_SIZE; i++) {
+ struct wr_msg_ctl_info *tx_buf;
+
+ tx_buf = devm_kzalloc(&pdev->dev,
+ sizeof(struct wr_msg_ctl_info),
+ GFP_KERNEL);
+ if (!tx_buf) {
+ /*
+ * IPC buffers may be limited or not available
+ * at all - although this shouldn't happen
+ */
+ dev_err(dev->devc,
+ "[ishtp-ish]: failure in Tx FIFO allocations (%d)\n",
+ i);
+ break;
+ }
+ list_add_tail(&tx_buf->link, &dev->wr_free_list);
+ }
+
+ ret = devm_work_autocancel(&pdev->dev, &fw_reset_work, fw_reset_work_fn);
+ if (ret) {
+ dev_err(dev->devc, "Failed to initialise FW reset work\n");
+ return NULL;
+ }
+
+ dev->ops = &ish_hw_ops;
+ dev->devc = &pdev->dev;
+ dev->mtu = IPC_PAYLOAD_SIZE - sizeof(struct ishtp_msg_hdr);
+ return dev;
+}
+
+/**
+ * ish_device_disable() - Disable ISH device
+ * @dev: ISHTP device pointer
+ *
+ * Disable ISH by clearing host ready to inform firmware.
+ */
+void ish_device_disable(struct ishtp_device *dev)
+{
+ struct pci_dev *pdev = dev->pdev;
+
+ if (!pdev)
+ return;
+
+ /* Disable dma communication between FW and host */
+ if (ish_disable_dma(dev)) {
+ dev_err(&pdev->dev,
+ "Can't reset - stuck with DMA in-progress\n");
+ return;
+ }
+
+ /* Put ISH to D3hot state for power saving */
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ dev->dev_state = ISHTP_DEV_DISABLED;
+ ish_clr_host_rdy(dev);
+}
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
new file mode 100644
index 000000000..710fda5f1
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -0,0 +1,414 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PCI glue for ISHTP provider device (ISH) driver
+ *
+ * Copyright (c) 2014-2016, Intel Corporation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/sched.h>
+#include <linux/suspend.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/intel_ish.h>
+#include "ishtp-dev.h"
+#include "hw-ish.h"
+
+static const struct pci_device_id ish_pci_tbl[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CHV_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, BXT_Ax_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, BXT_Bx_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, APL_Ax_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_Ax_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_Ax_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, GLK_Ax_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_H_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ICL_MOBILE_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CML_LP_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CMP_H_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, EHL_Ax_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, TGL_LP_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, TGL_H_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ADL_S_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ADL_P_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ADL_N_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, RPL_S_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MTL_P_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ARL_H_DEVICE_ID)},
+ {0, }
+};
+MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
+
+/**
+ * ish_event_tracer() - Callback function to dump trace messages
+ * @dev: ishtp device
+ * @format: printf style format
+ *
+ * Callback to direct log messages to Linux trace buffers
+ */
+static __printf(2, 3)
+void ish_event_tracer(struct ishtp_device *dev, const char *format, ...)
+{
+ if (trace_ishtp_dump_enabled()) {
+ va_list args;
+ char tmp_buf[100];
+
+ va_start(args, format);
+ vsnprintf(tmp_buf, sizeof(tmp_buf), format, args);
+ va_end(args);
+
+ trace_ishtp_dump(tmp_buf);
+ }
+}
+
+/**
+ * ish_init() - Init function
+ * @dev: ishtp device
+ *
+ * This function initialize wait queues for suspend/resume and call
+ * calls hadware initialization function. This will initiate
+ * startup sequence
+ *
+ * Return: 0 for success or error code for failure
+ */
+static int ish_init(struct ishtp_device *dev)
+{
+ int ret;
+
+ /* Set the state of ISH HW to start */
+ ret = ish_hw_start(dev);
+ if (ret) {
+ dev_err(dev->devc, "ISH: hw start failed.\n");
+ return ret;
+ }
+
+ /* Start the inter process communication to ISH processor */
+ ret = ishtp_start(dev);
+ if (ret) {
+ dev_err(dev->devc, "ISHTP: Protocol init failed.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct pci_device_id ish_invalid_pci_ids[] = {
+ /* Mehlow platform special pci ids */
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xA309)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xA30A)},
+ {}
+};
+
+static inline bool ish_should_enter_d0i3(struct pci_dev *pdev)
+{
+ return !pm_suspend_via_firmware() || pdev->device == CHV_DEVICE_ID;
+}
+
+static inline bool ish_should_leave_d0i3(struct pci_dev *pdev)
+{
+ return !pm_resume_via_firmware() || pdev->device == CHV_DEVICE_ID;
+}
+
+static int enable_gpe(struct device *dev)
+{
+#ifdef CONFIG_ACPI
+ acpi_status acpi_sts;
+ struct acpi_device *adev;
+ struct acpi_device_wakeup *wakeup;
+
+ adev = ACPI_COMPANION(dev);
+ if (!adev) {
+ dev_err(dev, "get acpi handle failed\n");
+ return -ENODEV;
+ }
+ wakeup = &adev->wakeup;
+
+ /*
+ * Call acpi_disable_gpe(), so that reference count
+ * gpe_event_info->runtime_count doesn't overflow.
+ * When gpe_event_info->runtime_count = 0, the call
+ * to acpi_disable_gpe() simply return.
+ */
+ acpi_disable_gpe(wakeup->gpe_device, wakeup->gpe_number);
+
+ acpi_sts = acpi_enable_gpe(wakeup->gpe_device, wakeup->gpe_number);
+ if (ACPI_FAILURE(acpi_sts)) {
+ dev_err(dev, "enable ose_gpe failed\n");
+ return -EIO;
+ }
+
+ return 0;
+#else
+ return -ENODEV;
+#endif
+}
+
+static void enable_pme_wake(struct pci_dev *pdev)
+{
+ if ((pci_pme_capable(pdev, PCI_D0) ||
+ pci_pme_capable(pdev, PCI_D3hot) ||
+ pci_pme_capable(pdev, PCI_D3cold)) && !enable_gpe(&pdev->dev)) {
+ pci_pme_active(pdev, true);
+ dev_dbg(&pdev->dev, "ish ipc driver pme wake enabled\n");
+ }
+}
+
+/**
+ * ish_probe() - PCI driver probe callback
+ * @pdev: pci device
+ * @ent: pci device id
+ *
+ * Initialize PCI function, setup interrupt and call for ISH initialization
+ *
+ * Return: 0 for success or error code for failure
+ */
+static int ish_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int ret;
+ struct ish_hw *hw;
+ unsigned long irq_flag = 0;
+ struct ishtp_device *ishtp;
+ struct device *dev = &pdev->dev;
+
+ /* Check for invalid platforms for ISH support */
+ if (pci_dev_present(ish_invalid_pci_ids))
+ return -ENODEV;
+
+ /* enable pci dev */
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ dev_err(dev, "ISH: Failed to enable PCI device\n");
+ return ret;
+ }
+
+ /* set PCI host mastering */
+ pci_set_master(pdev);
+
+ /* pci request regions for ISH driver */
+ ret = pcim_iomap_regions(pdev, 1 << 0, KBUILD_MODNAME);
+ if (ret) {
+ dev_err(dev, "ISH: Failed to get PCI regions\n");
+ return ret;
+ }
+
+ /* allocates and initializes the ISH dev structure */
+ ishtp = ish_dev_init(pdev);
+ if (!ishtp) {
+ ret = -ENOMEM;
+ return ret;
+ }
+ hw = to_ish_hw(ishtp);
+ ishtp->print_log = ish_event_tracer;
+
+ /* mapping IO device memory */
+ hw->mem_addr = pcim_iomap_table(pdev)[0];
+ ishtp->pdev = pdev;
+
+ /* request and enable interrupt */
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (!pdev->msi_enabled && !pdev->msix_enabled)
+ irq_flag = IRQF_SHARED;
+
+ ret = devm_request_irq(dev, pdev->irq, ish_irq_handler,
+ irq_flag, KBUILD_MODNAME, ishtp);
+ if (ret) {
+ dev_err(dev, "ISH: request IRQ %d failed\n", pdev->irq);
+ return ret;
+ }
+
+ dev_set_drvdata(ishtp->devc, ishtp);
+
+ init_waitqueue_head(&ishtp->suspend_wait);
+ init_waitqueue_head(&ishtp->resume_wait);
+
+ /* Enable PME for EHL */
+ if (pdev->device == EHL_Ax_DEVICE_ID)
+ enable_pme_wake(pdev);
+
+ ret = ish_init(ishtp);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * ish_remove() - PCI driver remove callback
+ * @pdev: pci device
+ *
+ * This function does cleanup of ISH on pci remove callback
+ */
+static void ish_remove(struct pci_dev *pdev)
+{
+ struct ishtp_device *ishtp_dev = pci_get_drvdata(pdev);
+
+ ishtp_bus_remove_all_clients(ishtp_dev, false);
+ ish_device_disable(ishtp_dev);
+}
+
+static struct device __maybe_unused *ish_resume_device;
+
+/* 50ms to get resume response */
+#define WAIT_FOR_RESUME_ACK_MS 50
+
+/**
+ * ish_resume_handler() - Work function to complete resume
+ * @work: work struct
+ *
+ * The resume work function to complete resume function asynchronously.
+ * There are two resume paths, one where ISH is not powered off,
+ * in that case a simple resume message is enough, others we need
+ * a reset sequence.
+ */
+static void __maybe_unused ish_resume_handler(struct work_struct *work)
+{
+ struct pci_dev *pdev = to_pci_dev(ish_resume_device);
+ struct ishtp_device *dev = pci_get_drvdata(pdev);
+ uint32_t fwsts = dev->ops->get_fw_status(dev);
+
+ if (ish_should_leave_d0i3(pdev) && !dev->suspend_flag
+ && IPC_IS_ISH_ILUP(fwsts)) {
+ if (device_may_wakeup(&pdev->dev))
+ disable_irq_wake(pdev->irq);
+
+ ish_set_host_ready(dev);
+
+ ishtp_send_resume(dev);
+
+ /* Waiting to get resume response */
+ if (dev->resume_flag)
+ wait_event_interruptible_timeout(dev->resume_wait,
+ !dev->resume_flag,
+ msecs_to_jiffies(WAIT_FOR_RESUME_ACK_MS));
+
+ /*
+ * If the flag is not cleared, something is wrong with ISH FW.
+ * So on resume, need to go through init sequence again.
+ */
+ if (dev->resume_flag)
+ ish_init(dev);
+ } else {
+ /*
+ * Resume from the D3, full reboot of ISH processor will happen,
+ * so need to go through init sequence again.
+ */
+ ish_init(dev);
+ }
+}
+
+/**
+ * ish_suspend() - ISH suspend callback
+ * @device: device pointer
+ *
+ * ISH suspend callback
+ *
+ * Return: 0 to the pm core
+ */
+static int __maybe_unused ish_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct ishtp_device *dev = pci_get_drvdata(pdev);
+
+ if (ish_should_enter_d0i3(pdev)) {
+ /*
+ * If previous suspend hasn't been asnwered then ISH is likely
+ * dead, don't attempt nested notification
+ */
+ if (dev->suspend_flag)
+ return 0;
+
+ dev->resume_flag = 0;
+ dev->suspend_flag = 1;
+ ishtp_send_suspend(dev);
+
+ /* 25 ms should be enough for live ISH to flush all IPC buf */
+ if (dev->suspend_flag)
+ wait_event_interruptible_timeout(dev->suspend_wait,
+ !dev->suspend_flag,
+ msecs_to_jiffies(25));
+
+ if (dev->suspend_flag) {
+ /*
+ * It looks like FW halt, clear the DMA bit, and put
+ * ISH into D3, and FW would reset on resume.
+ */
+ ish_disable_dma(dev);
+ } else {
+ /*
+ * Save state so PCI core will keep the device at D0,
+ * the ISH would enter D0i3
+ */
+ pci_save_state(pdev);
+
+ if (device_may_wakeup(&pdev->dev))
+ enable_irq_wake(pdev->irq);
+ }
+ } else {
+ /*
+ * Clear the DMA bit before putting ISH into D3,
+ * or ISH FW would reset automatically.
+ */
+ ish_disable_dma(dev);
+ }
+
+ return 0;
+}
+
+static __maybe_unused DECLARE_WORK(resume_work, ish_resume_handler);
+/**
+ * ish_resume() - ISH resume callback
+ * @device: device pointer
+ *
+ * ISH resume callback
+ *
+ * Return: 0 to the pm core
+ */
+static int __maybe_unused ish_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct ishtp_device *dev = pci_get_drvdata(pdev);
+
+ /* add this to finish power flow for EHL */
+ if (dev->pdev->device == EHL_Ax_DEVICE_ID) {
+ pci_set_power_state(pdev, PCI_D0);
+ enable_pme_wake(pdev);
+ dev_dbg(dev->devc, "set power state to D0 for ehl\n");
+ }
+
+ ish_resume_device = device;
+ dev->resume_flag = 1;
+
+ schedule_work(&resume_work);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ish_pm_ops, ish_suspend, ish_resume);
+
+static struct pci_driver ish_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = ish_pci_tbl,
+ .probe = ish_probe,
+ .remove = ish_remove,
+ .driver.pm = &ish_pm_ops,
+};
+
+module_pci_driver(ish_driver);
+
+/* Original author */
+MODULE_AUTHOR("Daniel Drubin <daniel.drubin@intel.com>");
+/* Adoption to upstream Linux kernel */
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
+
+MODULE_DESCRIPTION("Intel(R) Integrated Sensor Hub PCI Device Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
new file mode 100644
index 000000000..16aa030af
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
@@ -0,0 +1,1066 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ISH-TP client driver for ISH firmware loading
+ *
+ * Copyright (c) 2019, Intel Corporation.
+ */
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/intel-ish-client-if.h>
+#include <linux/property.h>
+#include <asm/cacheflush.h>
+
+/* Number of times we attempt to load the firmware before giving up */
+#define MAX_LOAD_ATTEMPTS 3
+
+/* ISH TX/RX ring buffer pool size */
+#define LOADER_CL_RX_RING_SIZE 1
+#define LOADER_CL_TX_RING_SIZE 1
+
+/*
+ * ISH Shim firmware loader reserves 4 Kb buffer in SRAM. The buffer is
+ * used to temporarily hold the data transferred from host to Shim
+ * firmware loader. Reason for the odd size of 3968 bytes? Each IPC
+ * transfer is 128 bytes (= 4 bytes header + 124 bytes payload). So the
+ * 4 Kb buffer can hold maximum of 32 IPC transfers, which means we can
+ * have a max payload of 3968 bytes (= 32 x 124 payload).
+ */
+#define LOADER_SHIM_IPC_BUF_SIZE 3968
+
+/**
+ * enum ish_loader_commands - ISH loader host commands.
+ * @LOADER_CMD_XFER_QUERY: Query the Shim firmware loader for
+ * capabilities
+ * @LOADER_CMD_XFER_FRAGMENT: Transfer one firmware image fragment at a
+ * time. The command may be executed
+ * multiple times until the entire firmware
+ * image is downloaded to SRAM.
+ * @LOADER_CMD_START: Start executing the main firmware.
+ */
+enum ish_loader_commands {
+ LOADER_CMD_XFER_QUERY = 0,
+ LOADER_CMD_XFER_FRAGMENT,
+ LOADER_CMD_START,
+};
+
+/* Command bit mask */
+#define CMD_MASK GENMASK(6, 0)
+#define IS_RESPONSE BIT(7)
+
+/*
+ * ISH firmware max delay for one transmit failure is 1 Hz,
+ * and firmware will retry 2 times, so 3 Hz is used for timeout.
+ */
+#define ISHTP_SEND_TIMEOUT (3 * HZ)
+
+/*
+ * Loader transfer modes:
+ *
+ * LOADER_XFER_MODE_ISHTP mode uses the existing ISH-TP mechanism to
+ * transfer data. This may use IPC or DMA if supported in firmware.
+ * The buffer size is limited to 4 Kb by the IPC/ISH-TP protocol for
+ * both IPC & DMA (legacy).
+ *
+ * LOADER_XFER_MODE_DIRECT_DMA - firmware loading is a bit different
+ * from the sensor data streaming. Here we download a large (300+ Kb)
+ * image directly to ISH SRAM memory. There is limited benefit of
+ * DMA'ing 300 Kb image in 4 Kb chucks limit. Hence, we introduce
+ * this "direct dma" mode, where we do not use ISH-TP for DMA, but
+ * instead manage the DMA directly in kernel driver and Shim firmware
+ * loader (allocate buffer, break in chucks and transfer). This allows
+ * to overcome 4 Kb limit, and optimize the data flow path in firmware.
+ */
+#define LOADER_XFER_MODE_DIRECT_DMA BIT(0)
+#define LOADER_XFER_MODE_ISHTP BIT(1)
+
+/* ISH Transport Loader client unique GUID */
+static const struct ishtp_device_id loader_ishtp_id_table[] = {
+ { .guid = GUID_INIT(0xc804d06a, 0x55bd, 0x4ea7,
+ 0xad, 0xed, 0x1e, 0x31, 0x22, 0x8c, 0x76, 0xdc) },
+ { }
+};
+MODULE_DEVICE_TABLE(ishtp, loader_ishtp_id_table);
+
+#define FILENAME_SIZE 256
+
+/*
+ * The firmware loading latency will be minimum if we can DMA the
+ * entire ISH firmware image in one go. This requires that we allocate
+ * a large DMA buffer in kernel, which could be problematic on some
+ * platforms. So here we limit the DMA buffer size via a module_param.
+ * We default to 4 pages, but a customer can set it to higher limit if
+ * deemed appropriate for his platform.
+ */
+static int dma_buf_size_limit = 4 * PAGE_SIZE;
+
+/**
+ * struct loader_msg_hdr - Header for ISH Loader commands.
+ * @command: LOADER_CMD* commands. Bit 7 is the response.
+ * @reserved: Reserved space
+ * @status: Command response status. Non 0, is error
+ * condition.
+ *
+ * This structure is used as header for every command/data sent/received
+ * between Host driver and ISH Shim firmware loader.
+ */
+struct loader_msg_hdr {
+ u8 command;
+ u8 reserved[2];
+ u8 status;
+} __packed;
+
+struct loader_xfer_query {
+ struct loader_msg_hdr hdr;
+ u32 image_size;
+} __packed;
+
+struct ish_fw_version {
+ u16 major;
+ u16 minor;
+ u16 hotfix;
+ u16 build;
+} __packed;
+
+union loader_version {
+ u32 value;
+ struct {
+ u8 major;
+ u8 minor;
+ u8 hotfix;
+ u8 build;
+ };
+} __packed;
+
+struct loader_capability {
+ u32 max_fw_image_size;
+ u32 xfer_mode;
+ u32 max_dma_buf_size; /* only for dma mode, multiples of cacheline */
+} __packed;
+
+struct shim_fw_info {
+ struct ish_fw_version ish_fw_version;
+ u32 protocol_version;
+ union loader_version ldr_version;
+ struct loader_capability ldr_capability;
+} __packed;
+
+struct loader_xfer_query_response {
+ struct loader_msg_hdr hdr;
+ struct shim_fw_info fw_info;
+} __packed;
+
+struct loader_xfer_fragment {
+ struct loader_msg_hdr hdr;
+ u32 xfer_mode;
+ u32 offset;
+ u32 size;
+ u32 is_last;
+} __packed;
+
+struct loader_xfer_ipc_fragment {
+ struct loader_xfer_fragment fragment;
+ u8 data[] ____cacheline_aligned; /* variable length payload here */
+} __packed;
+
+struct loader_xfer_dma_fragment {
+ struct loader_xfer_fragment fragment;
+ u64 ddr_phys_addr;
+} __packed;
+
+struct loader_start {
+ struct loader_msg_hdr hdr;
+} __packed;
+
+/**
+ * struct response_info - Encapsulate firmware response related
+ * information for passing between function
+ * loader_cl_send() and process_recv() callback.
+ * @data: Copy the data received from firmware here.
+ * @max_size: Max size allocated for the @data buffer. If the
+ * received data exceeds this value, we log an
+ * error.
+ * @size: Actual size of data received from firmware.
+ * @error: Returns 0 for success, negative error code for a
+ * failure in function process_recv().
+ * @received: Set to true on receiving a valid firmware
+ * response to host command
+ * @wait_queue: Wait queue for Host firmware loading where the
+ * client sends message to ISH firmware and waits
+ * for response
+ */
+struct response_info {
+ void *data;
+ size_t max_size;
+ size_t size;
+ int error;
+ bool received;
+ wait_queue_head_t wait_queue;
+};
+
+/*
+ * struct ishtp_cl_data - Encapsulate per ISH-TP Client Data.
+ * @work_ishtp_reset: Work queue for reset handling.
+ * @work_fw_load: Work queue for host firmware loading.
+ * @flag_retry: Flag for indicating host firmware loading should
+ * be retried.
+ * @retry_count: Count the number of retries.
+ *
+ * This structure is used to store data per client.
+ */
+struct ishtp_cl_data {
+ struct ishtp_cl *loader_ishtp_cl;
+ struct ishtp_cl_device *cl_device;
+
+ /*
+ * Used for passing firmware response information between
+ * loader_cl_send() and process_recv() callback.
+ */
+ struct response_info response;
+
+ struct work_struct work_ishtp_reset;
+ struct work_struct work_fw_load;
+
+ /*
+ * In certain failure scenrios, it makes sense to reset the ISH
+ * subsystem and retry Host firmware loading (e.g. bad message
+ * packet, ENOMEM, etc.). On the other hand, failures due to
+ * protocol mismatch, etc., are not recoverable. We do not
+ * retry them.
+ *
+ * If set, the flag indicates that we should re-try the
+ * particular failure.
+ */
+ bool flag_retry;
+ int retry_count;
+};
+
+#define IPC_FRAGMENT_DATA_PREAMBLE \
+ offsetof(struct loader_xfer_ipc_fragment, data)
+
+#define cl_data_to_dev(client_data) ishtp_device((client_data)->cl_device)
+
+/**
+ * get_firmware_variant() - Gets the filename of firmware image to be
+ * loaded based on platform variant.
+ * @client_data: Client data instance.
+ * @filename: Returns firmware filename.
+ *
+ * Queries the firmware-name device property string.
+ *
+ * Return: 0 for success, negative error code for failure.
+ */
+static int get_firmware_variant(struct ishtp_cl_data *client_data,
+ char *filename)
+{
+ int rv;
+ const char *val;
+ struct device *devc = ishtp_get_pci_device(client_data->cl_device);
+
+ rv = device_property_read_string(devc, "firmware-name", &val);
+ if (rv < 0) {
+ dev_err(devc,
+ "Error: ISH firmware-name device property required\n");
+ return rv;
+ }
+ return snprintf(filename, FILENAME_SIZE, "intel/%s", val);
+}
+
+/**
+ * loader_cl_send() - Send message from host to firmware
+ *
+ * @client_data: Client data instance
+ * @out_msg: Message buffer to be sent to firmware
+ * @out_size: Size of out going message
+ * @in_msg: Message buffer where the incoming data copied.
+ * This buffer is allocated by calling
+ * @in_size: Max size of incoming message
+ *
+ * Return: Number of bytes copied in the in_msg on success, negative
+ * error code on failure.
+ */
+static int loader_cl_send(struct ishtp_cl_data *client_data,
+ u8 *out_msg, size_t out_size,
+ u8 *in_msg, size_t in_size)
+{
+ int rv;
+ struct loader_msg_hdr *out_hdr = (struct loader_msg_hdr *)out_msg;
+ struct ishtp_cl *loader_ishtp_cl = client_data->loader_ishtp_cl;
+
+ dev_dbg(cl_data_to_dev(client_data),
+ "%s: command=%02lx is_response=%u status=%02x\n",
+ __func__,
+ out_hdr->command & CMD_MASK,
+ out_hdr->command & IS_RESPONSE ? 1 : 0,
+ out_hdr->status);
+
+ /* Setup in coming buffer & size */
+ client_data->response.data = in_msg;
+ client_data->response.max_size = in_size;
+ client_data->response.error = 0;
+ client_data->response.received = false;
+
+ rv = ishtp_cl_send(loader_ishtp_cl, out_msg, out_size);
+ if (rv < 0) {
+ dev_err(cl_data_to_dev(client_data),
+ "ishtp_cl_send error %d\n", rv);
+ return rv;
+ }
+
+ wait_event_interruptible_timeout(client_data->response.wait_queue,
+ client_data->response.received,
+ ISHTP_SEND_TIMEOUT);
+ if (!client_data->response.received) {
+ dev_err(cl_data_to_dev(client_data),
+ "Timed out for response to command=%02lx",
+ out_hdr->command & CMD_MASK);
+ return -ETIMEDOUT;
+ }
+
+ if (client_data->response.error < 0)
+ return client_data->response.error;
+
+ return client_data->response.size;
+}
+
+/**
+ * process_recv() - Receive and parse incoming packet
+ * @loader_ishtp_cl: Client instance to get stats
+ * @rb_in_proc: ISH received message buffer
+ *
+ * Parse the incoming packet. If it is a response packet then it will
+ * update received and wake up the caller waiting to for the response.
+ */
+static void process_recv(struct ishtp_cl *loader_ishtp_cl,
+ struct ishtp_cl_rb *rb_in_proc)
+{
+ struct loader_msg_hdr *hdr;
+ size_t data_len = rb_in_proc->buf_idx;
+ struct ishtp_cl_data *client_data =
+ ishtp_get_client_data(loader_ishtp_cl);
+
+ /* Sanity check */
+ if (!client_data->response.data) {
+ dev_err(cl_data_to_dev(client_data),
+ "Receiving buffer is null. Should be allocated by calling function\n");
+ client_data->response.error = -EINVAL;
+ goto end;
+ }
+
+ if (client_data->response.received) {
+ dev_err(cl_data_to_dev(client_data),
+ "Previous firmware message not yet processed\n");
+ client_data->response.error = -EINVAL;
+ goto end;
+ }
+ /*
+ * All firmware messages have a header. Check buffer size
+ * before accessing elements inside.
+ */
+ if (!rb_in_proc->buffer.data) {
+ dev_warn(cl_data_to_dev(client_data),
+ "rb_in_proc->buffer.data returned null");
+ client_data->response.error = -EBADMSG;
+ goto end;
+ }
+
+ if (data_len < sizeof(struct loader_msg_hdr)) {
+ dev_err(cl_data_to_dev(client_data),
+ "data size %zu is less than header %zu\n",
+ data_len, sizeof(struct loader_msg_hdr));
+ client_data->response.error = -EMSGSIZE;
+ goto end;
+ }
+
+ hdr = (struct loader_msg_hdr *)rb_in_proc->buffer.data;
+
+ dev_dbg(cl_data_to_dev(client_data),
+ "%s: command=%02lx is_response=%u status=%02x\n",
+ __func__,
+ hdr->command & CMD_MASK,
+ hdr->command & IS_RESPONSE ? 1 : 0,
+ hdr->status);
+
+ if (((hdr->command & CMD_MASK) != LOADER_CMD_XFER_QUERY) &&
+ ((hdr->command & CMD_MASK) != LOADER_CMD_XFER_FRAGMENT) &&
+ ((hdr->command & CMD_MASK) != LOADER_CMD_START)) {
+ dev_err(cl_data_to_dev(client_data),
+ "Invalid command=%02lx\n",
+ hdr->command & CMD_MASK);
+ client_data->response.error = -EPROTO;
+ goto end;
+ }
+
+ if (data_len > client_data->response.max_size) {
+ dev_err(cl_data_to_dev(client_data),
+ "Received buffer size %zu is larger than allocated buffer %zu\n",
+ data_len, client_data->response.max_size);
+ client_data->response.error = -EMSGSIZE;
+ goto end;
+ }
+
+ /* We expect only "response" messages from firmware */
+ if (!(hdr->command & IS_RESPONSE)) {
+ dev_err(cl_data_to_dev(client_data),
+ "Invalid response to command\n");
+ client_data->response.error = -EIO;
+ goto end;
+ }
+
+ if (hdr->status) {
+ dev_err(cl_data_to_dev(client_data),
+ "Loader returned status %d\n",
+ hdr->status);
+ client_data->response.error = -EIO;
+ goto end;
+ }
+
+ /* Update the actual received buffer size */
+ client_data->response.size = data_len;
+
+ /*
+ * Copy the buffer received in firmware response for the
+ * calling thread.
+ */
+ memcpy(client_data->response.data,
+ rb_in_proc->buffer.data, data_len);
+
+ /* Set flag before waking up the caller */
+ client_data->response.received = true;
+
+end:
+ /* Free the buffer */
+ ishtp_cl_io_rb_recycle(rb_in_proc);
+ rb_in_proc = NULL;
+
+ /* Wake the calling thread */
+ wake_up_interruptible(&client_data->response.wait_queue);
+}
+
+/**
+ * loader_cl_event_cb() - bus driver callback for incoming message
+ * @cl_device: Pointer to the ishtp client device for which this
+ * message is targeted
+ *
+ * Remove the packet from the list and process the message by calling
+ * process_recv
+ */
+static void loader_cl_event_cb(struct ishtp_cl_device *cl_device)
+{
+ struct ishtp_cl_rb *rb_in_proc;
+ struct ishtp_cl *loader_ishtp_cl = ishtp_get_drvdata(cl_device);
+
+ while ((rb_in_proc = ishtp_cl_rx_get_rb(loader_ishtp_cl)) != NULL) {
+ /* Process the data packet from firmware */
+ process_recv(loader_ishtp_cl, rb_in_proc);
+ }
+}
+
+/**
+ * ish_query_loader_prop() - Query ISH Shim firmware loader
+ * @client_data: Client data instance
+ * @fw: Pointer to firmware data struct in host memory
+ * @fw_info: Loader firmware properties
+ *
+ * This function queries the ISH Shim firmware loader for capabilities.
+ *
+ * Return: 0 for success, negative error code for failure.
+ */
+static int ish_query_loader_prop(struct ishtp_cl_data *client_data,
+ const struct firmware *fw,
+ struct shim_fw_info *fw_info)
+{
+ int rv;
+ struct loader_xfer_query ldr_xfer_query;
+ struct loader_xfer_query_response ldr_xfer_query_resp;
+
+ memset(&ldr_xfer_query, 0, sizeof(ldr_xfer_query));
+ ldr_xfer_query.hdr.command = LOADER_CMD_XFER_QUERY;
+ ldr_xfer_query.image_size = fw->size;
+ rv = loader_cl_send(client_data,
+ (u8 *)&ldr_xfer_query,
+ sizeof(ldr_xfer_query),
+ (u8 *)&ldr_xfer_query_resp,
+ sizeof(ldr_xfer_query_resp));
+ if (rv < 0) {
+ client_data->flag_retry = true;
+ *fw_info = (struct shim_fw_info){};
+ return rv;
+ }
+
+ /* On success, the return value is the received buffer size */
+ if (rv != sizeof(struct loader_xfer_query_response)) {
+ dev_err(cl_data_to_dev(client_data),
+ "data size %d is not equal to size of loader_xfer_query_response %zu\n",
+ rv, sizeof(struct loader_xfer_query_response));
+ client_data->flag_retry = true;
+ *fw_info = (struct shim_fw_info){};
+ return -EMSGSIZE;
+ }
+
+ /* Save fw_info for use outside this function */
+ *fw_info = ldr_xfer_query_resp.fw_info;
+
+ /* Loader firmware properties */
+ dev_dbg(cl_data_to_dev(client_data),
+ "ish_fw_version: major=%d minor=%d hotfix=%d build=%d protocol_version=0x%x loader_version=%d\n",
+ fw_info->ish_fw_version.major,
+ fw_info->ish_fw_version.minor,
+ fw_info->ish_fw_version.hotfix,
+ fw_info->ish_fw_version.build,
+ fw_info->protocol_version,
+ fw_info->ldr_version.value);
+
+ dev_dbg(cl_data_to_dev(client_data),
+ "loader_capability: max_fw_image_size=0x%x xfer_mode=%d max_dma_buf_size=0x%x dma_buf_size_limit=0x%x\n",
+ fw_info->ldr_capability.max_fw_image_size,
+ fw_info->ldr_capability.xfer_mode,
+ fw_info->ldr_capability.max_dma_buf_size,
+ dma_buf_size_limit);
+
+ /* Sanity checks */
+ if (fw_info->ldr_capability.max_fw_image_size < fw->size) {
+ dev_err(cl_data_to_dev(client_data),
+ "ISH firmware size %zu is greater than Shim firmware loader max supported %d\n",
+ fw->size,
+ fw_info->ldr_capability.max_fw_image_size);
+ return -ENOSPC;
+ }
+
+ /* For DMA the buffer size should be multiple of cacheline size */
+ if ((fw_info->ldr_capability.xfer_mode & LOADER_XFER_MODE_DIRECT_DMA) &&
+ (fw_info->ldr_capability.max_dma_buf_size % L1_CACHE_BYTES)) {
+ dev_err(cl_data_to_dev(client_data),
+ "Shim firmware loader buffer size %d should be multiple of cacheline\n",
+ fw_info->ldr_capability.max_dma_buf_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ish_fw_xfer_ishtp() - Loads ISH firmware using ishtp interface
+ * @client_data: Client data instance
+ * @fw: Pointer to firmware data struct in host memory
+ *
+ * This function uses ISH-TP to transfer ISH firmware from host to
+ * ISH SRAM. Lower layers may use IPC or DMA depending on firmware
+ * support.
+ *
+ * Return: 0 for success, negative error code for failure.
+ */
+static int ish_fw_xfer_ishtp(struct ishtp_cl_data *client_data,
+ const struct firmware *fw)
+{
+ int rv;
+ u32 fragment_offset, fragment_size, payload_max_size;
+ struct loader_xfer_ipc_fragment *ldr_xfer_ipc_frag;
+ struct loader_msg_hdr ldr_xfer_ipc_ack;
+
+ payload_max_size =
+ LOADER_SHIM_IPC_BUF_SIZE - IPC_FRAGMENT_DATA_PREAMBLE;
+
+ ldr_xfer_ipc_frag = kzalloc(LOADER_SHIM_IPC_BUF_SIZE, GFP_KERNEL);
+ if (!ldr_xfer_ipc_frag) {
+ client_data->flag_retry = true;
+ return -ENOMEM;
+ }
+
+ ldr_xfer_ipc_frag->fragment.hdr.command = LOADER_CMD_XFER_FRAGMENT;
+ ldr_xfer_ipc_frag->fragment.xfer_mode = LOADER_XFER_MODE_ISHTP;
+
+ /* Break the firmware image into fragments and send as ISH-TP payload */
+ fragment_offset = 0;
+ while (fragment_offset < fw->size) {
+ if (fragment_offset + payload_max_size < fw->size) {
+ fragment_size = payload_max_size;
+ ldr_xfer_ipc_frag->fragment.is_last = 0;
+ } else {
+ fragment_size = fw->size - fragment_offset;
+ ldr_xfer_ipc_frag->fragment.is_last = 1;
+ }
+
+ ldr_xfer_ipc_frag->fragment.offset = fragment_offset;
+ ldr_xfer_ipc_frag->fragment.size = fragment_size;
+ memcpy(ldr_xfer_ipc_frag->data,
+ &fw->data[fragment_offset],
+ fragment_size);
+
+ dev_dbg(cl_data_to_dev(client_data),
+ "xfer_mode=ipc offset=0x%08x size=0x%08x is_last=%d\n",
+ ldr_xfer_ipc_frag->fragment.offset,
+ ldr_xfer_ipc_frag->fragment.size,
+ ldr_xfer_ipc_frag->fragment.is_last);
+
+ rv = loader_cl_send(client_data,
+ (u8 *)ldr_xfer_ipc_frag,
+ IPC_FRAGMENT_DATA_PREAMBLE + fragment_size,
+ (u8 *)&ldr_xfer_ipc_ack,
+ sizeof(ldr_xfer_ipc_ack));
+ if (rv < 0) {
+ client_data->flag_retry = true;
+ goto end_err_resp_buf_release;
+ }
+
+ fragment_offset += fragment_size;
+ }
+
+ kfree(ldr_xfer_ipc_frag);
+ return 0;
+
+end_err_resp_buf_release:
+ /* Free ISH buffer if not done already, in error case */
+ kfree(ldr_xfer_ipc_frag);
+ return rv;
+}
+
+/**
+ * ish_fw_xfer_direct_dma() - Loads ISH firmware using direct dma
+ * @client_data: Client data instance
+ * @fw: Pointer to firmware data struct in host memory
+ * @fw_info: Loader firmware properties
+ *
+ * Host firmware load is a unique case where we need to download
+ * a large firmware image (200+ Kb). This function implements
+ * direct DMA transfer in kernel and ISH firmware. This allows
+ * us to overcome the ISH-TP 4 Kb limit, and allows us to DMA
+ * directly to ISH UMA at location of choice.
+ * Function depends on corresponding support in ISH firmware.
+ *
+ * Return: 0 for success, negative error code for failure.
+ */
+static int ish_fw_xfer_direct_dma(struct ishtp_cl_data *client_data,
+ const struct firmware *fw,
+ const struct shim_fw_info fw_info)
+{
+ int rv;
+ void *dma_buf;
+ dma_addr_t dma_buf_phy;
+ u32 fragment_offset, fragment_size, payload_max_size;
+ struct loader_msg_hdr ldr_xfer_dma_frag_ack;
+ struct loader_xfer_dma_fragment ldr_xfer_dma_frag;
+ struct device *devc = ishtp_get_pci_device(client_data->cl_device);
+ u32 shim_fw_buf_size =
+ fw_info.ldr_capability.max_dma_buf_size;
+
+ /*
+ * payload_max_size should be set to minimum of
+ * (1) Size of firmware to be loaded,
+ * (2) Max DMA buffer size supported by Shim firmware,
+ * (3) DMA buffer size limit set by boot_param dma_buf_size_limit.
+ */
+ payload_max_size = min3(fw->size,
+ (size_t)shim_fw_buf_size,
+ (size_t)dma_buf_size_limit);
+
+ /*
+ * Buffer size should be multiple of cacheline size
+ * if it's not, select the previous cacheline boundary.
+ */
+ payload_max_size &= ~(L1_CACHE_BYTES - 1);
+
+ dma_buf = dma_alloc_coherent(devc, payload_max_size, &dma_buf_phy, GFP_KERNEL);
+ if (!dma_buf) {
+ client_data->flag_retry = true;
+ return -ENOMEM;
+ }
+
+ ldr_xfer_dma_frag.fragment.hdr.command = LOADER_CMD_XFER_FRAGMENT;
+ ldr_xfer_dma_frag.fragment.xfer_mode = LOADER_XFER_MODE_DIRECT_DMA;
+ ldr_xfer_dma_frag.ddr_phys_addr = (u64)dma_buf_phy;
+
+ /* Send the firmware image in chucks of payload_max_size */
+ fragment_offset = 0;
+ while (fragment_offset < fw->size) {
+ if (fragment_offset + payload_max_size < fw->size) {
+ fragment_size = payload_max_size;
+ ldr_xfer_dma_frag.fragment.is_last = 0;
+ } else {
+ fragment_size = fw->size - fragment_offset;
+ ldr_xfer_dma_frag.fragment.is_last = 1;
+ }
+
+ ldr_xfer_dma_frag.fragment.offset = fragment_offset;
+ ldr_xfer_dma_frag.fragment.size = fragment_size;
+ memcpy(dma_buf, &fw->data[fragment_offset], fragment_size);
+
+ /* Flush cache to be sure the data is in main memory. */
+ clflush_cache_range(dma_buf, payload_max_size);
+
+ dev_dbg(cl_data_to_dev(client_data),
+ "xfer_mode=dma offset=0x%08x size=0x%x is_last=%d ddr_phys_addr=0x%016llx\n",
+ ldr_xfer_dma_frag.fragment.offset,
+ ldr_xfer_dma_frag.fragment.size,
+ ldr_xfer_dma_frag.fragment.is_last,
+ ldr_xfer_dma_frag.ddr_phys_addr);
+
+ rv = loader_cl_send(client_data,
+ (u8 *)&ldr_xfer_dma_frag,
+ sizeof(ldr_xfer_dma_frag),
+ (u8 *)&ldr_xfer_dma_frag_ack,
+ sizeof(ldr_xfer_dma_frag_ack));
+ if (rv < 0) {
+ client_data->flag_retry = true;
+ goto end_err_resp_buf_release;
+ }
+
+ fragment_offset += fragment_size;
+ }
+
+end_err_resp_buf_release:
+ dma_free_coherent(devc, payload_max_size, dma_buf, dma_buf_phy);
+ return rv;
+}
+
+/**
+ * ish_fw_start() - Start executing ISH main firmware
+ * @client_data: client data instance
+ *
+ * This function sends message to Shim firmware loader to start
+ * the execution of ISH main firmware.
+ *
+ * Return: 0 for success, negative error code for failure.
+ */
+static int ish_fw_start(struct ishtp_cl_data *client_data)
+{
+ struct loader_start ldr_start;
+ struct loader_msg_hdr ldr_start_ack;
+
+ memset(&ldr_start, 0, sizeof(ldr_start));
+ ldr_start.hdr.command = LOADER_CMD_START;
+ return loader_cl_send(client_data,
+ (u8 *)&ldr_start,
+ sizeof(ldr_start),
+ (u8 *)&ldr_start_ack,
+ sizeof(ldr_start_ack));
+}
+
+/**
+ * load_fw_from_host() - Loads ISH firmware from host
+ * @client_data: Client data instance
+ *
+ * This function loads the ISH firmware to ISH SRAM and starts execution
+ *
+ * Return: 0 for success, negative error code for failure.
+ */
+static int load_fw_from_host(struct ishtp_cl_data *client_data)
+{
+ int rv;
+ u32 xfer_mode;
+ char *filename;
+ const struct firmware *fw;
+ struct shim_fw_info fw_info;
+ struct ishtp_cl *loader_ishtp_cl = client_data->loader_ishtp_cl;
+
+ client_data->flag_retry = false;
+
+ filename = kzalloc(FILENAME_SIZE, GFP_KERNEL);
+ if (!filename) {
+ client_data->flag_retry = true;
+ rv = -ENOMEM;
+ goto end_error;
+ }
+
+ /* Get filename of the ISH firmware to be loaded */
+ rv = get_firmware_variant(client_data, filename);
+ if (rv < 0)
+ goto end_err_filename_buf_release;
+
+ rv = request_firmware(&fw, filename, cl_data_to_dev(client_data));
+ if (rv < 0)
+ goto end_err_filename_buf_release;
+
+ /* Step 1: Query Shim firmware loader properties */
+
+ rv = ish_query_loader_prop(client_data, fw, &fw_info);
+ if (rv < 0)
+ goto end_err_fw_release;
+
+ /* Step 2: Send the main firmware image to be loaded, to ISH SRAM */
+
+ xfer_mode = fw_info.ldr_capability.xfer_mode;
+ if (xfer_mode & LOADER_XFER_MODE_DIRECT_DMA) {
+ rv = ish_fw_xfer_direct_dma(client_data, fw, fw_info);
+ } else if (xfer_mode & LOADER_XFER_MODE_ISHTP) {
+ rv = ish_fw_xfer_ishtp(client_data, fw);
+ } else {
+ dev_err(cl_data_to_dev(client_data),
+ "No transfer mode selected in firmware\n");
+ rv = -EINVAL;
+ }
+ if (rv < 0)
+ goto end_err_fw_release;
+
+ /* Step 3: Start ISH main firmware exeuction */
+
+ rv = ish_fw_start(client_data);
+ if (rv < 0)
+ goto end_err_fw_release;
+
+ release_firmware(fw);
+ dev_info(cl_data_to_dev(client_data), "ISH firmware %s loaded\n",
+ filename);
+ kfree(filename);
+ return 0;
+
+end_err_fw_release:
+ release_firmware(fw);
+end_err_filename_buf_release:
+ kfree(filename);
+end_error:
+ /* Keep a count of retries, and give up after 3 attempts */
+ if (client_data->flag_retry &&
+ client_data->retry_count++ < MAX_LOAD_ATTEMPTS) {
+ dev_warn(cl_data_to_dev(client_data),
+ "ISH host firmware load failed %d. Resetting ISH, and trying again..\n",
+ rv);
+ ish_hw_reset(ishtp_get_ishtp_device(loader_ishtp_cl));
+ } else {
+ dev_err(cl_data_to_dev(client_data),
+ "ISH host firmware load failed %d\n", rv);
+ }
+ return rv;
+}
+
+static void load_fw_from_host_handler(struct work_struct *work)
+{
+ struct ishtp_cl_data *client_data;
+
+ client_data = container_of(work, struct ishtp_cl_data,
+ work_fw_load);
+ load_fw_from_host(client_data);
+}
+
+/**
+ * loader_init() - Init function for ISH-TP client
+ * @loader_ishtp_cl: ISH-TP client instance
+ * @reset: true if called for init after reset
+ *
+ * Return: 0 for success, negative error code for failure
+ */
+static int loader_init(struct ishtp_cl *loader_ishtp_cl, int reset)
+{
+ int rv;
+ struct ishtp_fw_client *fw_client;
+ struct ishtp_cl_data *client_data =
+ ishtp_get_client_data(loader_ishtp_cl);
+
+ dev_dbg(cl_data_to_dev(client_data), "reset flag: %d\n", reset);
+
+ rv = ishtp_cl_link(loader_ishtp_cl);
+ if (rv < 0) {
+ dev_err(cl_data_to_dev(client_data), "ishtp_cl_link failed\n");
+ return rv;
+ }
+
+ /* Connect to firmware client */
+ ishtp_set_tx_ring_size(loader_ishtp_cl, LOADER_CL_TX_RING_SIZE);
+ ishtp_set_rx_ring_size(loader_ishtp_cl, LOADER_CL_RX_RING_SIZE);
+
+ fw_client =
+ ishtp_fw_cl_get_client(ishtp_get_ishtp_device(loader_ishtp_cl),
+ &loader_ishtp_id_table[0].guid);
+ if (!fw_client) {
+ dev_err(cl_data_to_dev(client_data),
+ "ISH client uuid not found\n");
+ rv = -ENOENT;
+ goto err_cl_unlink;
+ }
+
+ ishtp_cl_set_fw_client_id(loader_ishtp_cl,
+ ishtp_get_fw_client_id(fw_client));
+ ishtp_set_connection_state(loader_ishtp_cl, ISHTP_CL_CONNECTING);
+
+ rv = ishtp_cl_connect(loader_ishtp_cl);
+ if (rv < 0) {
+ dev_err(cl_data_to_dev(client_data), "Client connect fail\n");
+ goto err_cl_unlink;
+ }
+
+ dev_dbg(cl_data_to_dev(client_data), "Client connected\n");
+
+ ishtp_register_event_cb(client_data->cl_device, loader_cl_event_cb);
+
+ return 0;
+
+err_cl_unlink:
+ ishtp_cl_unlink(loader_ishtp_cl);
+ return rv;
+}
+
+static void loader_deinit(struct ishtp_cl *loader_ishtp_cl)
+{
+ ishtp_set_connection_state(loader_ishtp_cl, ISHTP_CL_DISCONNECTING);
+ ishtp_cl_disconnect(loader_ishtp_cl);
+ ishtp_cl_unlink(loader_ishtp_cl);
+ ishtp_cl_flush_queues(loader_ishtp_cl);
+
+ /* Disband and free all Tx and Rx client-level rings */
+ ishtp_cl_free(loader_ishtp_cl);
+}
+
+static void reset_handler(struct work_struct *work)
+{
+ int rv;
+ struct ishtp_cl_data *client_data;
+ struct ishtp_cl *loader_ishtp_cl;
+ struct ishtp_cl_device *cl_device;
+
+ client_data = container_of(work, struct ishtp_cl_data,
+ work_ishtp_reset);
+
+ loader_ishtp_cl = client_data->loader_ishtp_cl;
+ cl_device = client_data->cl_device;
+
+ /* Unlink, flush queues & start again */
+ ishtp_cl_unlink(loader_ishtp_cl);
+ ishtp_cl_flush_queues(loader_ishtp_cl);
+ ishtp_cl_free(loader_ishtp_cl);
+
+ loader_ishtp_cl = ishtp_cl_allocate(cl_device);
+ if (!loader_ishtp_cl)
+ return;
+
+ ishtp_set_drvdata(cl_device, loader_ishtp_cl);
+ ishtp_set_client_data(loader_ishtp_cl, client_data);
+ client_data->loader_ishtp_cl = loader_ishtp_cl;
+ client_data->cl_device = cl_device;
+
+ rv = loader_init(loader_ishtp_cl, 1);
+ if (rv < 0) {
+ dev_err(ishtp_device(cl_device), "Reset Failed\n");
+ return;
+ }
+
+ /* ISH firmware loading from host */
+ load_fw_from_host(client_data);
+}
+
+/**
+ * loader_ishtp_cl_probe() - ISH-TP client driver probe
+ * @cl_device: ISH-TP client device instance
+ *
+ * This function gets called on device create on ISH-TP bus
+ *
+ * Return: 0 for success, negative error code for failure
+ */
+static int loader_ishtp_cl_probe(struct ishtp_cl_device *cl_device)
+{
+ struct ishtp_cl *loader_ishtp_cl;
+ struct ishtp_cl_data *client_data;
+ int rv;
+
+ client_data = devm_kzalloc(ishtp_device(cl_device),
+ sizeof(*client_data),
+ GFP_KERNEL);
+ if (!client_data)
+ return -ENOMEM;
+
+ loader_ishtp_cl = ishtp_cl_allocate(cl_device);
+ if (!loader_ishtp_cl)
+ return -ENOMEM;
+
+ ishtp_set_drvdata(cl_device, loader_ishtp_cl);
+ ishtp_set_client_data(loader_ishtp_cl, client_data);
+ client_data->loader_ishtp_cl = loader_ishtp_cl;
+ client_data->cl_device = cl_device;
+
+ init_waitqueue_head(&client_data->response.wait_queue);
+
+ INIT_WORK(&client_data->work_ishtp_reset,
+ reset_handler);
+ INIT_WORK(&client_data->work_fw_load,
+ load_fw_from_host_handler);
+
+ rv = loader_init(loader_ishtp_cl, 0);
+ if (rv < 0) {
+ ishtp_cl_free(loader_ishtp_cl);
+ return rv;
+ }
+ ishtp_get_device(cl_device);
+
+ client_data->retry_count = 0;
+
+ /* ISH firmware loading from host */
+ schedule_work(&client_data->work_fw_load);
+
+ return 0;
+}
+
+/**
+ * loader_ishtp_cl_remove() - ISH-TP client driver remove
+ * @cl_device: ISH-TP client device instance
+ *
+ * This function gets called on device remove on ISH-TP bus
+ *
+ * Return: 0
+ */
+static void loader_ishtp_cl_remove(struct ishtp_cl_device *cl_device)
+{
+ struct ishtp_cl_data *client_data;
+ struct ishtp_cl *loader_ishtp_cl = ishtp_get_drvdata(cl_device);
+
+ client_data = ishtp_get_client_data(loader_ishtp_cl);
+
+ /*
+ * The sequence of the following two cancel_work_sync() is
+ * important. The work_fw_load can in turn schedue
+ * work_ishtp_reset, so first cancel work_fw_load then
+ * cancel work_ishtp_reset.
+ */
+ cancel_work_sync(&client_data->work_fw_load);
+ cancel_work_sync(&client_data->work_ishtp_reset);
+ loader_deinit(loader_ishtp_cl);
+ ishtp_put_device(cl_device);
+}
+
+/**
+ * loader_ishtp_cl_reset() - ISH-TP client driver reset
+ * @cl_device: ISH-TP client device instance
+ *
+ * This function gets called on device reset on ISH-TP bus
+ *
+ * Return: 0
+ */
+static int loader_ishtp_cl_reset(struct ishtp_cl_device *cl_device)
+{
+ struct ishtp_cl_data *client_data;
+ struct ishtp_cl *loader_ishtp_cl = ishtp_get_drvdata(cl_device);
+
+ client_data = ishtp_get_client_data(loader_ishtp_cl);
+
+ schedule_work(&client_data->work_ishtp_reset);
+
+ return 0;
+}
+
+static struct ishtp_cl_driver loader_ishtp_cl_driver = {
+ .name = "ish-loader",
+ .id = loader_ishtp_id_table,
+ .probe = loader_ishtp_cl_probe,
+ .remove = loader_ishtp_cl_remove,
+ .reset = loader_ishtp_cl_reset,
+};
+
+static int __init ish_loader_init(void)
+{
+ return ishtp_cl_driver_register(&loader_ishtp_cl_driver, THIS_MODULE);
+}
+
+static void __exit ish_loader_exit(void)
+{
+ ishtp_cl_driver_unregister(&loader_ishtp_cl_driver);
+}
+
+late_initcall(ish_loader_init);
+module_exit(ish_loader_exit);
+
+module_param(dma_buf_size_limit, int, 0644);
+MODULE_PARM_DESC(dma_buf_size_limit, "Limit the DMA buf size to this value in bytes");
+
+MODULE_DESCRIPTION("ISH ISH-TP Host firmware Loader Client Driver");
+MODULE_AUTHOR("Rushikesh S Kadam <rushikesh.s.kadam@intel.com>");
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid-client.c b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
new file mode 100644
index 000000000..e3d70c546
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
@@ -0,0 +1,986 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ISHTP client driver for HID (ISH)
+ *
+ * Copyright (c) 2014-2016, Intel Corporation.
+ */
+
+#include <linux/module.h>
+#include <linux/hid.h>
+#include <linux/intel-ish-client-if.h>
+#include <linux/sched.h>
+#include "ishtp-hid.h"
+
+/* ISH Transport protocol (ISHTP in short) GUID */
+static const struct ishtp_device_id hid_ishtp_id_table[] = {
+ { .guid = GUID_INIT(0x33AECD58, 0xB679, 0x4E54,
+ 0x9B, 0xD9, 0xA0, 0x4D, 0x34, 0xF0, 0xC2, 0x26), },
+ { }
+};
+MODULE_DEVICE_TABLE(ishtp, hid_ishtp_id_table);
+
+/* Rx ring buffer pool size */
+#define HID_CL_RX_RING_SIZE 32
+#define HID_CL_TX_RING_SIZE 16
+
+#define cl_data_to_dev(client_data) ishtp_device(client_data->cl_device)
+
+/**
+ * report_bad_packet() - Report bad packets
+ * @hid_ishtp_cl: Client instance to get stats
+ * @recv_buf: Raw received host interface message
+ * @cur_pos: Current position index in payload
+ * @payload_len: Length of payload expected
+ *
+ * Dumps error in case bad packet is received
+ */
+static void report_bad_packet(struct ishtp_cl *hid_ishtp_cl, void *recv_buf,
+ size_t cur_pos, size_t payload_len)
+{
+ struct hostif_msg *recv_msg = recv_buf;
+ struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
+
+ dev_err(cl_data_to_dev(client_data), "[hid-ish]: BAD packet %02X\n"
+ "total_bad=%u cur_pos=%u\n"
+ "[%02X %02X %02X %02X]\n"
+ "payload_len=%u\n"
+ "multi_packet_cnt=%u\n"
+ "is_response=%02X\n",
+ recv_msg->hdr.command, client_data->bad_recv_cnt,
+ (unsigned int)cur_pos,
+ ((unsigned char *)recv_msg)[0], ((unsigned char *)recv_msg)[1],
+ ((unsigned char *)recv_msg)[2], ((unsigned char *)recv_msg)[3],
+ (unsigned int)payload_len, client_data->multi_packet_cnt,
+ recv_msg->hdr.command & ~CMD_MASK);
+}
+
+/**
+ * process_recv() - Received and parse incoming packet
+ * @hid_ishtp_cl: Client instance to get stats
+ * @recv_buf: Raw received host interface message
+ * @data_len: length of the message
+ *
+ * Parse the incoming packet. If it is a response packet then it will update
+ * per instance flags and wake up the caller waiting to for the response.
+ */
+static void process_recv(struct ishtp_cl *hid_ishtp_cl, void *recv_buf,
+ size_t data_len)
+{
+ struct hostif_msg *recv_msg;
+ unsigned char *payload;
+ struct device_info *dev_info;
+ int i, j;
+ size_t payload_len, total_len, cur_pos, raw_len;
+ int report_type;
+ struct report_list *reports_list;
+ char *reports;
+ size_t report_len;
+ struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
+ int curr_hid_dev = client_data->cur_hid_dev;
+ struct ishtp_hid_data *hid_data = NULL;
+ struct hid_device *hid = NULL;
+
+ payload = recv_buf + sizeof(struct hostif_msg_hdr);
+ total_len = data_len;
+ cur_pos = 0;
+
+ do {
+ if (cur_pos + sizeof(struct hostif_msg) > total_len) {
+ dev_err(cl_data_to_dev(client_data),
+ "[hid-ish]: error, received %u which is less than data header %u\n",
+ (unsigned int)data_len,
+ (unsigned int)sizeof(struct hostif_msg_hdr));
+ ++client_data->bad_recv_cnt;
+ ish_hw_reset(ishtp_get_ishtp_device(hid_ishtp_cl));
+ break;
+ }
+
+ recv_msg = (struct hostif_msg *)(recv_buf + cur_pos);
+ payload_len = recv_msg->hdr.size;
+
+ /* Sanity checks */
+ if (cur_pos + payload_len + sizeof(struct hostif_msg) >
+ total_len) {
+ ++client_data->bad_recv_cnt;
+ report_bad_packet(hid_ishtp_cl, recv_msg, cur_pos,
+ payload_len);
+ ish_hw_reset(ishtp_get_ishtp_device(hid_ishtp_cl));
+ break;
+ }
+
+ hid_ishtp_trace(client_data, "%s %d\n",
+ __func__, recv_msg->hdr.command & CMD_MASK);
+
+ switch (recv_msg->hdr.command & CMD_MASK) {
+ case HOSTIF_DM_ENUM_DEVICES:
+ if ((!(recv_msg->hdr.command & ~CMD_MASK) ||
+ client_data->init_done)) {
+ ++client_data->bad_recv_cnt;
+ report_bad_packet(hid_ishtp_cl, recv_msg,
+ cur_pos,
+ payload_len);
+ ish_hw_reset(ishtp_get_ishtp_device(hid_ishtp_cl));
+ break;
+ }
+ client_data->hid_dev_count = (unsigned int)*payload;
+ if (!client_data->hid_devices)
+ client_data->hid_devices = devm_kcalloc(
+ cl_data_to_dev(client_data),
+ client_data->hid_dev_count,
+ sizeof(struct device_info),
+ GFP_KERNEL);
+ if (!client_data->hid_devices) {
+ dev_err(cl_data_to_dev(client_data),
+ "Mem alloc failed for hid device info\n");
+ wake_up_interruptible(&client_data->init_wait);
+ break;
+ }
+ for (i = 0; i < client_data->hid_dev_count; ++i) {
+ if (1 + sizeof(struct device_info) * i >=
+ payload_len) {
+ dev_err(cl_data_to_dev(client_data),
+ "[hid-ish]: [ENUM_DEVICES]: content size %zu is bigger than payload_len %zu\n",
+ 1 + sizeof(struct device_info)
+ * i, payload_len);
+ }
+
+ if (1 + sizeof(struct device_info) * i >=
+ data_len)
+ break;
+
+ dev_info = (struct device_info *)(payload + 1 +
+ sizeof(struct device_info) * i);
+ if (client_data->hid_devices)
+ memcpy(client_data->hid_devices + i,
+ dev_info,
+ sizeof(struct device_info));
+ }
+
+ client_data->enum_devices_done = true;
+ wake_up_interruptible(&client_data->init_wait);
+
+ break;
+
+ case HOSTIF_GET_HID_DESCRIPTOR:
+ if ((!(recv_msg->hdr.command & ~CMD_MASK) ||
+ client_data->init_done)) {
+ ++client_data->bad_recv_cnt;
+ report_bad_packet(hid_ishtp_cl, recv_msg,
+ cur_pos,
+ payload_len);
+ ish_hw_reset(ishtp_get_ishtp_device(hid_ishtp_cl));
+ break;
+ }
+ if (!client_data->hid_descr[curr_hid_dev])
+ client_data->hid_descr[curr_hid_dev] =
+ devm_kmalloc(cl_data_to_dev(client_data),
+ payload_len, GFP_KERNEL);
+ if (client_data->hid_descr[curr_hid_dev]) {
+ memcpy(client_data->hid_descr[curr_hid_dev],
+ payload, payload_len);
+ client_data->hid_descr_size[curr_hid_dev] =
+ payload_len;
+ client_data->hid_descr_done = true;
+ }
+ wake_up_interruptible(&client_data->init_wait);
+
+ break;
+
+ case HOSTIF_GET_REPORT_DESCRIPTOR:
+ if ((!(recv_msg->hdr.command & ~CMD_MASK) ||
+ client_data->init_done)) {
+ ++client_data->bad_recv_cnt;
+ report_bad_packet(hid_ishtp_cl, recv_msg,
+ cur_pos,
+ payload_len);
+ ish_hw_reset(ishtp_get_ishtp_device(hid_ishtp_cl));
+ break;
+ }
+ if (!client_data->report_descr[curr_hid_dev])
+ client_data->report_descr[curr_hid_dev] =
+ devm_kmalloc(cl_data_to_dev(client_data),
+ payload_len, GFP_KERNEL);
+ if (client_data->report_descr[curr_hid_dev]) {
+ memcpy(client_data->report_descr[curr_hid_dev],
+ payload,
+ payload_len);
+ client_data->report_descr_size[curr_hid_dev] =
+ payload_len;
+ client_data->report_descr_done = true;
+ }
+ wake_up_interruptible(&client_data->init_wait);
+
+ break;
+
+ case HOSTIF_GET_FEATURE_REPORT:
+ report_type = HID_FEATURE_REPORT;
+ goto do_get_report;
+
+ case HOSTIF_GET_INPUT_REPORT:
+ report_type = HID_INPUT_REPORT;
+do_get_report:
+ /* Get index of device that matches this id */
+ for (i = 0; i < client_data->num_hid_devices; ++i) {
+ if (recv_msg->hdr.device_id ==
+ client_data->hid_devices[i].dev_id) {
+ hid = client_data->hid_sensor_hubs[i];
+ if (!hid)
+ break;
+
+ hid_data = hid->driver_data;
+ if (hid_data->raw_get_req) {
+ raw_len =
+ (hid_data->raw_buf_size <
+ payload_len) ?
+ hid_data->raw_buf_size :
+ payload_len;
+
+ memcpy(hid_data->raw_buf,
+ payload, raw_len);
+ } else {
+ hid_input_report
+ (hid, report_type,
+ payload, payload_len,
+ 0);
+ }
+
+ ishtp_hid_wakeup(hid);
+ break;
+ }
+ }
+ break;
+
+ case HOSTIF_SET_FEATURE_REPORT:
+ /* Get index of device that matches this id */
+ for (i = 0; i < client_data->num_hid_devices; ++i) {
+ if (recv_msg->hdr.device_id ==
+ client_data->hid_devices[i].dev_id)
+ if (client_data->hid_sensor_hubs[i]) {
+ ishtp_hid_wakeup(
+ client_data->hid_sensor_hubs[
+ i]);
+ break;
+ }
+ }
+ break;
+
+ case HOSTIF_PUBLISH_INPUT_REPORT:
+ report_type = HID_INPUT_REPORT;
+ for (i = 0; i < client_data->num_hid_devices; ++i)
+ if (recv_msg->hdr.device_id ==
+ client_data->hid_devices[i].dev_id)
+ if (client_data->hid_sensor_hubs[i])
+ hid_input_report(
+ client_data->hid_sensor_hubs[
+ i],
+ report_type, payload,
+ payload_len, 0);
+ break;
+
+ case HOSTIF_PUBLISH_INPUT_REPORT_LIST:
+ report_type = HID_INPUT_REPORT;
+ reports_list = (struct report_list *)payload;
+ reports = (char *)reports_list->reports;
+
+ for (j = 0; j < reports_list->num_of_reports; j++) {
+ recv_msg = (struct hostif_msg *)(reports +
+ sizeof(uint16_t));
+ report_len = *(uint16_t *)reports;
+ payload = reports + sizeof(uint16_t) +
+ sizeof(struct hostif_msg_hdr);
+ payload_len = report_len -
+ sizeof(struct hostif_msg_hdr);
+
+ for (i = 0; i < client_data->num_hid_devices;
+ ++i)
+ if (recv_msg->hdr.device_id ==
+ client_data->hid_devices[i].dev_id &&
+ client_data->hid_sensor_hubs[i]) {
+ hid_input_report(
+ client_data->hid_sensor_hubs[
+ i],
+ report_type,
+ payload, payload_len,
+ 0);
+ }
+
+ reports += sizeof(uint16_t) + report_len;
+ }
+ break;
+ default:
+ ++client_data->bad_recv_cnt;
+ report_bad_packet(hid_ishtp_cl, recv_msg, cur_pos,
+ payload_len);
+ ish_hw_reset(ishtp_get_ishtp_device(hid_ishtp_cl));
+ break;
+
+ }
+
+ if (!cur_pos && cur_pos + payload_len +
+ sizeof(struct hostif_msg) < total_len)
+ ++client_data->multi_packet_cnt;
+
+ cur_pos += payload_len + sizeof(struct hostif_msg);
+ payload += payload_len + sizeof(struct hostif_msg);
+
+ } while (cur_pos < total_len);
+}
+
+/**
+ * ish_cl_event_cb() - bus driver callback for incoming message/packet
+ * @device: Pointer to the ishtp client device for which this message
+ * is targeted
+ *
+ * Remove the packet from the list and process the message by calling
+ * process_recv
+ */
+static void ish_cl_event_cb(struct ishtp_cl_device *device)
+{
+ struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(device);
+ struct ishtp_cl_rb *rb_in_proc;
+ size_t r_length;
+
+ if (!hid_ishtp_cl)
+ return;
+
+ while ((rb_in_proc = ishtp_cl_rx_get_rb(hid_ishtp_cl)) != NULL) {
+ if (!rb_in_proc->buffer.data)
+ return;
+
+ r_length = rb_in_proc->buf_idx;
+
+ /* decide what to do with received data */
+ process_recv(hid_ishtp_cl, rb_in_proc->buffer.data, r_length);
+
+ ishtp_cl_io_rb_recycle(rb_in_proc);
+ }
+}
+
+/**
+ * hid_ishtp_set_feature() - send request to ISH FW to set a feature request
+ * @hid: hid device instance for this request
+ * @buf: feature buffer
+ * @len: Length of feature buffer
+ * @report_id: Report id for the feature set request
+ *
+ * This is called from hid core .request() callback. This function doesn't wait
+ * for response.
+ */
+void hid_ishtp_set_feature(struct hid_device *hid, char *buf, unsigned int len,
+ int report_id)
+{
+ struct ishtp_hid_data *hid_data = hid->driver_data;
+ struct ishtp_cl_data *client_data = hid_data->client_data;
+ struct hostif_msg *msg = (struct hostif_msg *)buf;
+ int rv;
+ int i;
+
+ hid_ishtp_trace(client_data, "%s hid %p\n", __func__, hid);
+
+ rv = ishtp_hid_link_ready_wait(client_data);
+ if (rv) {
+ hid_ishtp_trace(client_data, "%s hid %p link not ready\n",
+ __func__, hid);
+ return;
+ }
+
+ memset(msg, 0, sizeof(struct hostif_msg));
+ msg->hdr.command = HOSTIF_SET_FEATURE_REPORT;
+ for (i = 0; i < client_data->num_hid_devices; ++i) {
+ if (hid == client_data->hid_sensor_hubs[i]) {
+ msg->hdr.device_id =
+ client_data->hid_devices[i].dev_id;
+ break;
+ }
+ }
+
+ if (i == client_data->num_hid_devices)
+ return;
+
+ rv = ishtp_cl_send(client_data->hid_ishtp_cl, buf, len);
+ if (rv)
+ hid_ishtp_trace(client_data, "%s hid %p send failed\n",
+ __func__, hid);
+}
+
+/**
+ * hid_ishtp_get_report() - request to get feature/input report
+ * @hid: hid device instance for this request
+ * @report_id: Report id for the get request
+ * @report_type: Report type for the this request
+ *
+ * This is called from hid core .request() callback. This function will send
+ * request to FW and return without waiting for response.
+ */
+void hid_ishtp_get_report(struct hid_device *hid, int report_id,
+ int report_type)
+{
+ struct ishtp_hid_data *hid_data = hid->driver_data;
+ struct ishtp_cl_data *client_data = hid_data->client_data;
+ struct hostif_msg_to_sensor msg = {};
+ int rv;
+ int i;
+
+ hid_ishtp_trace(client_data, "%s hid %p\n", __func__, hid);
+ rv = ishtp_hid_link_ready_wait(client_data);
+ if (rv) {
+ hid_ishtp_trace(client_data, "%s hid %p link not ready\n",
+ __func__, hid);
+ return;
+ }
+
+ msg.hdr.command = (report_type == HID_FEATURE_REPORT) ?
+ HOSTIF_GET_FEATURE_REPORT : HOSTIF_GET_INPUT_REPORT;
+ for (i = 0; i < client_data->num_hid_devices; ++i) {
+ if (hid == client_data->hid_sensor_hubs[i]) {
+ msg.hdr.device_id =
+ client_data->hid_devices[i].dev_id;
+ break;
+ }
+ }
+
+ if (i == client_data->num_hid_devices)
+ return;
+
+ msg.report_id = report_id;
+ rv = ishtp_cl_send(client_data->hid_ishtp_cl, (uint8_t *)&msg,
+ sizeof(msg));
+ if (rv)
+ hid_ishtp_trace(client_data, "%s hid %p send failed\n",
+ __func__, hid);
+}
+
+/**
+ * ishtp_hid_link_ready_wait() - Wait for link ready
+ * @client_data: client data instance
+ *
+ * If the transport link started suspend process, then wait, till either
+ * resumed or timeout
+ *
+ * Return: 0 on success, non zero on error
+ */
+int ishtp_hid_link_ready_wait(struct ishtp_cl_data *client_data)
+{
+ int rc;
+
+ if (client_data->suspended) {
+ hid_ishtp_trace(client_data, "wait for link ready\n");
+ rc = wait_event_interruptible_timeout(
+ client_data->ishtp_resume_wait,
+ !client_data->suspended,
+ 5 * HZ);
+
+ if (rc == 0) {
+ hid_ishtp_trace(client_data, "link not ready\n");
+ return -EIO;
+ }
+ hid_ishtp_trace(client_data, "link ready\n");
+ }
+
+ return 0;
+}
+
+/**
+ * ishtp_enum_enum_devices() - Enumerate hid devices
+ * @hid_ishtp_cl: client instance
+ *
+ * Helper function to send request to firmware to enumerate HID devices
+ *
+ * Return: 0 on success, non zero on error
+ */
+static int ishtp_enum_enum_devices(struct ishtp_cl *hid_ishtp_cl)
+{
+ struct hostif_msg msg;
+ struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
+ int retry_count;
+ int rv;
+
+ /* Send HOSTIF_DM_ENUM_DEVICES */
+ memset(&msg, 0, sizeof(struct hostif_msg));
+ msg.hdr.command = HOSTIF_DM_ENUM_DEVICES;
+ rv = ishtp_cl_send(hid_ishtp_cl, (unsigned char *)&msg,
+ sizeof(struct hostif_msg));
+ if (rv)
+ return rv;
+
+ retry_count = 0;
+ while (!client_data->enum_devices_done &&
+ retry_count < 10) {
+ wait_event_interruptible_timeout(client_data->init_wait,
+ client_data->enum_devices_done,
+ 3 * HZ);
+ ++retry_count;
+ if (!client_data->enum_devices_done)
+ /* Send HOSTIF_DM_ENUM_DEVICES */
+ rv = ishtp_cl_send(hid_ishtp_cl,
+ (unsigned char *) &msg,
+ sizeof(struct hostif_msg));
+ }
+ if (!client_data->enum_devices_done) {
+ dev_err(cl_data_to_dev(client_data),
+ "[hid-ish]: timed out waiting for enum_devices\n");
+ return -ETIMEDOUT;
+ }
+ if (!client_data->hid_devices) {
+ dev_err(cl_data_to_dev(client_data),
+ "[hid-ish]: failed to allocate HID dev structures\n");
+ return -ENOMEM;
+ }
+
+ client_data->num_hid_devices = client_data->hid_dev_count;
+ dev_info(ishtp_device(client_data->cl_device),
+ "[hid-ish]: enum_devices_done OK, num_hid_devices=%d\n",
+ client_data->num_hid_devices);
+
+ return 0;
+}
+
+/**
+ * ishtp_get_hid_descriptor() - Get hid descriptor
+ * @hid_ishtp_cl: client instance
+ * @index: Index into the hid_descr array
+ *
+ * Helper function to send request to firmware get HID descriptor of a device
+ *
+ * Return: 0 on success, non zero on error
+ */
+static int ishtp_get_hid_descriptor(struct ishtp_cl *hid_ishtp_cl, int index)
+{
+ struct hostif_msg msg;
+ struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
+ int rv;
+
+ /* Get HID descriptor */
+ client_data->hid_descr_done = false;
+ memset(&msg, 0, sizeof(struct hostif_msg));
+ msg.hdr.command = HOSTIF_GET_HID_DESCRIPTOR;
+ msg.hdr.device_id = client_data->hid_devices[index].dev_id;
+ rv = ishtp_cl_send(hid_ishtp_cl, (unsigned char *) &msg,
+ sizeof(struct hostif_msg));
+ if (rv)
+ return rv;
+
+ if (!client_data->hid_descr_done) {
+ wait_event_interruptible_timeout(client_data->init_wait,
+ client_data->hid_descr_done,
+ 3 * HZ);
+ if (!client_data->hid_descr_done) {
+ dev_err(cl_data_to_dev(client_data),
+ "[hid-ish]: timed out for hid_descr_done\n");
+ return -EIO;
+ }
+
+ if (!client_data->hid_descr[index]) {
+ dev_err(cl_data_to_dev(client_data),
+ "[hid-ish]: allocation HID desc fail\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ishtp_get_report_descriptor() - Get report descriptor
+ * @hid_ishtp_cl: client instance
+ * @index: Index into the hid_descr array
+ *
+ * Helper function to send request to firmware get HID report descriptor of
+ * a device
+ *
+ * Return: 0 on success, non zero on error
+ */
+static int ishtp_get_report_descriptor(struct ishtp_cl *hid_ishtp_cl,
+ int index)
+{
+ struct hostif_msg msg;
+ struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
+ int rv;
+
+ /* Get report descriptor */
+ client_data->report_descr_done = false;
+ memset(&msg, 0, sizeof(struct hostif_msg));
+ msg.hdr.command = HOSTIF_GET_REPORT_DESCRIPTOR;
+ msg.hdr.device_id = client_data->hid_devices[index].dev_id;
+ rv = ishtp_cl_send(hid_ishtp_cl, (unsigned char *) &msg,
+ sizeof(struct hostif_msg));
+ if (rv)
+ return rv;
+
+ if (!client_data->report_descr_done)
+ wait_event_interruptible_timeout(client_data->init_wait,
+ client_data->report_descr_done,
+ 3 * HZ);
+ if (!client_data->report_descr_done) {
+ dev_err(cl_data_to_dev(client_data),
+ "[hid-ish]: timed out for report descr\n");
+ return -EIO;
+ }
+ if (!client_data->report_descr[index]) {
+ dev_err(cl_data_to_dev(client_data),
+ "[hid-ish]: failed to alloc report descr\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * hid_ishtp_cl_init() - Init function for ISHTP client
+ * @hid_ishtp_cl: ISHTP client instance
+ * @reset: true if called for init after reset
+ *
+ * This function complete the initializtion of the client. The summary of
+ * processing:
+ * - Send request to enumerate the hid clients
+ * Get the HID descriptor for each enumearated device
+ * Get report description of each device
+ * Register each device wik hid core by calling ishtp_hid_probe
+ *
+ * Return: 0 on success, non zero on error
+ */
+static int hid_ishtp_cl_init(struct ishtp_cl *hid_ishtp_cl, int reset)
+{
+ struct ishtp_device *dev;
+ struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
+ struct ishtp_fw_client *fw_client;
+ int i;
+ int rv;
+
+ dev_dbg(cl_data_to_dev(client_data), "%s\n", __func__);
+ hid_ishtp_trace(client_data, "%s reset flag: %d\n", __func__, reset);
+
+ rv = ishtp_cl_link(hid_ishtp_cl);
+ if (rv) {
+ dev_err(cl_data_to_dev(client_data),
+ "ishtp_cl_link failed\n");
+ return -ENOMEM;
+ }
+
+ client_data->init_done = 0;
+
+ dev = ishtp_get_ishtp_device(hid_ishtp_cl);
+
+ /* Connect to FW client */
+ ishtp_set_tx_ring_size(hid_ishtp_cl, HID_CL_TX_RING_SIZE);
+ ishtp_set_rx_ring_size(hid_ishtp_cl, HID_CL_RX_RING_SIZE);
+
+ fw_client = ishtp_fw_cl_get_client(dev, &hid_ishtp_id_table[0].guid);
+ if (!fw_client) {
+ dev_err(cl_data_to_dev(client_data),
+ "ish client uuid not found\n");
+ return -ENOENT;
+ }
+ ishtp_cl_set_fw_client_id(hid_ishtp_cl,
+ ishtp_get_fw_client_id(fw_client));
+ ishtp_set_connection_state(hid_ishtp_cl, ISHTP_CL_CONNECTING);
+
+ rv = ishtp_cl_connect(hid_ishtp_cl);
+ if (rv) {
+ dev_err(cl_data_to_dev(client_data),
+ "client connect fail\n");
+ goto err_cl_unlink;
+ }
+
+ hid_ishtp_trace(client_data, "%s client connected\n", __func__);
+
+ /* Register read callback */
+ ishtp_register_event_cb(client_data->cl_device, ish_cl_event_cb);
+
+ rv = ishtp_enum_enum_devices(hid_ishtp_cl);
+ if (rv)
+ goto err_cl_disconnect;
+
+ hid_ishtp_trace(client_data, "%s enumerated device count %d\n",
+ __func__, client_data->num_hid_devices);
+
+ for (i = 0; i < client_data->num_hid_devices; ++i) {
+ client_data->cur_hid_dev = i;
+
+ rv = ishtp_get_hid_descriptor(hid_ishtp_cl, i);
+ if (rv)
+ goto err_cl_disconnect;
+
+ rv = ishtp_get_report_descriptor(hid_ishtp_cl, i);
+ if (rv)
+ goto err_cl_disconnect;
+
+ if (!reset) {
+ rv = ishtp_hid_probe(i, client_data);
+ if (rv) {
+ dev_err(cl_data_to_dev(client_data),
+ "[hid-ish]: HID probe for #%u failed: %d\n",
+ i, rv);
+ goto err_cl_disconnect;
+ }
+ }
+ } /* for() on all hid devices */
+
+ client_data->init_done = 1;
+ client_data->suspended = false;
+ wake_up_interruptible(&client_data->ishtp_resume_wait);
+ hid_ishtp_trace(client_data, "%s successful init\n", __func__);
+ return 0;
+
+err_cl_disconnect:
+ ishtp_set_connection_state(hid_ishtp_cl, ISHTP_CL_DISCONNECTING);
+ ishtp_cl_disconnect(hid_ishtp_cl);
+err_cl_unlink:
+ ishtp_cl_unlink(hid_ishtp_cl);
+ return rv;
+}
+
+/**
+ * hid_ishtp_cl_deinit() - Deinit function for ISHTP client
+ * @hid_ishtp_cl: ISHTP client instance
+ *
+ * Unlink and free hid client
+ */
+static void hid_ishtp_cl_deinit(struct ishtp_cl *hid_ishtp_cl)
+{
+ ishtp_cl_unlink(hid_ishtp_cl);
+ ishtp_cl_flush_queues(hid_ishtp_cl);
+
+ /* disband and free all Tx and Rx client-level rings */
+ ishtp_cl_free(hid_ishtp_cl);
+}
+
+static void hid_ishtp_cl_reset_handler(struct work_struct *work)
+{
+ struct ishtp_cl_data *client_data;
+ struct ishtp_cl *hid_ishtp_cl;
+ struct ishtp_cl_device *cl_device;
+ int retry;
+ int rv;
+
+ client_data = container_of(work, struct ishtp_cl_data, work);
+
+ hid_ishtp_cl = client_data->hid_ishtp_cl;
+ cl_device = client_data->cl_device;
+
+ hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
+ hid_ishtp_cl);
+ dev_dbg(ishtp_device(client_data->cl_device), "%s\n", __func__);
+
+ hid_ishtp_cl_deinit(hid_ishtp_cl);
+
+ hid_ishtp_cl = ishtp_cl_allocate(cl_device);
+ if (!hid_ishtp_cl)
+ return;
+
+ ishtp_set_drvdata(cl_device, hid_ishtp_cl);
+ ishtp_set_client_data(hid_ishtp_cl, client_data);
+ client_data->hid_ishtp_cl = hid_ishtp_cl;
+
+ client_data->num_hid_devices = 0;
+
+ for (retry = 0; retry < 3; ++retry) {
+ rv = hid_ishtp_cl_init(hid_ishtp_cl, 1);
+ if (!rv)
+ break;
+ dev_err(cl_data_to_dev(client_data), "Retry reset init\n");
+ }
+ if (rv) {
+ dev_err(cl_data_to_dev(client_data), "Reset Failed\n");
+ hid_ishtp_trace(client_data, "%s Failed hid_ishtp_cl %p\n",
+ __func__, hid_ishtp_cl);
+ }
+}
+
+static void hid_ishtp_cl_resume_handler(struct work_struct *work)
+{
+ struct ishtp_cl_data *client_data = container_of(work, struct ishtp_cl_data, resume_work);
+ struct ishtp_cl *hid_ishtp_cl = client_data->hid_ishtp_cl;
+
+ if (ishtp_wait_resume(ishtp_get_ishtp_device(hid_ishtp_cl))) {
+ client_data->suspended = false;
+ wake_up_interruptible(&client_data->ishtp_resume_wait);
+ }
+}
+
+ishtp_print_log ishtp_hid_print_trace;
+
+/**
+ * hid_ishtp_cl_probe() - ISHTP client driver probe
+ * @cl_device: ISHTP client device instance
+ *
+ * This function gets called on device create on ISHTP bus
+ *
+ * Return: 0 on success, non zero on error
+ */
+static int hid_ishtp_cl_probe(struct ishtp_cl_device *cl_device)
+{
+ struct ishtp_cl *hid_ishtp_cl;
+ struct ishtp_cl_data *client_data;
+ int rv;
+
+ if (!cl_device)
+ return -ENODEV;
+
+ client_data = devm_kzalloc(ishtp_device(cl_device),
+ sizeof(*client_data),
+ GFP_KERNEL);
+ if (!client_data)
+ return -ENOMEM;
+
+ hid_ishtp_cl = ishtp_cl_allocate(cl_device);
+ if (!hid_ishtp_cl)
+ return -ENOMEM;
+
+ ishtp_set_drvdata(cl_device, hid_ishtp_cl);
+ ishtp_set_client_data(hid_ishtp_cl, client_data);
+ client_data->hid_ishtp_cl = hid_ishtp_cl;
+ client_data->cl_device = cl_device;
+
+ init_waitqueue_head(&client_data->init_wait);
+ init_waitqueue_head(&client_data->ishtp_resume_wait);
+
+ INIT_WORK(&client_data->work, hid_ishtp_cl_reset_handler);
+ INIT_WORK(&client_data->resume_work, hid_ishtp_cl_resume_handler);
+
+
+ ishtp_hid_print_trace = ishtp_trace_callback(cl_device);
+
+ rv = hid_ishtp_cl_init(hid_ishtp_cl, 0);
+ if (rv) {
+ ishtp_cl_free(hid_ishtp_cl);
+ return rv;
+ }
+ ishtp_get_device(cl_device);
+
+ return 0;
+}
+
+/**
+ * hid_ishtp_cl_remove() - ISHTP client driver remove
+ * @cl_device: ISHTP client device instance
+ *
+ * This function gets called on device remove on ISHTP bus
+ *
+ * Return: 0
+ */
+static void hid_ishtp_cl_remove(struct ishtp_cl_device *cl_device)
+{
+ struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(cl_device);
+ struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
+
+ hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
+ hid_ishtp_cl);
+
+ dev_dbg(ishtp_device(cl_device), "%s\n", __func__);
+ ishtp_set_connection_state(hid_ishtp_cl, ISHTP_CL_DISCONNECTING);
+ ishtp_cl_disconnect(hid_ishtp_cl);
+ ishtp_put_device(cl_device);
+ ishtp_hid_remove(client_data);
+ hid_ishtp_cl_deinit(hid_ishtp_cl);
+
+ hid_ishtp_cl = NULL;
+
+ client_data->num_hid_devices = 0;
+}
+
+/**
+ * hid_ishtp_cl_reset() - ISHTP client driver reset
+ * @cl_device: ISHTP client device instance
+ *
+ * This function gets called on device reset on ISHTP bus
+ *
+ * Return: 0
+ */
+static int hid_ishtp_cl_reset(struct ishtp_cl_device *cl_device)
+{
+ struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(cl_device);
+ struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
+
+ hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
+ hid_ishtp_cl);
+
+ schedule_work(&client_data->work);
+
+ return 0;
+}
+
+/**
+ * hid_ishtp_cl_suspend() - ISHTP client driver suspend
+ * @device: device instance
+ *
+ * This function gets called on system suspend
+ *
+ * Return: 0
+ */
+static int hid_ishtp_cl_suspend(struct device *device)
+{
+ struct ishtp_cl_device *cl_device = ishtp_dev_to_cl_device(device);
+ struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(cl_device);
+ struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
+
+ hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
+ hid_ishtp_cl);
+ client_data->suspended = true;
+
+ return 0;
+}
+
+/**
+ * hid_ishtp_cl_resume() - ISHTP client driver resume
+ * @device: device instance
+ *
+ * This function gets called on system resume
+ *
+ * Return: 0
+ */
+static int hid_ishtp_cl_resume(struct device *device)
+{
+ struct ishtp_cl_device *cl_device = ishtp_dev_to_cl_device(device);
+ struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(cl_device);
+ struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
+
+ hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
+ hid_ishtp_cl);
+ schedule_work(&client_data->resume_work);
+ return 0;
+}
+
+static const struct dev_pm_ops hid_ishtp_pm_ops = {
+ .suspend = hid_ishtp_cl_suspend,
+ .resume = hid_ishtp_cl_resume,
+};
+
+static struct ishtp_cl_driver hid_ishtp_cl_driver = {
+ .name = "ish-hid",
+ .id = hid_ishtp_id_table,
+ .probe = hid_ishtp_cl_probe,
+ .remove = hid_ishtp_cl_remove,
+ .reset = hid_ishtp_cl_reset,
+ .driver.pm = &hid_ishtp_pm_ops,
+};
+
+static int __init ish_hid_init(void)
+{
+ int rv;
+
+ /* Register ISHTP client device driver with ISHTP Bus */
+ rv = ishtp_cl_driver_register(&hid_ishtp_cl_driver, THIS_MODULE);
+
+ return rv;
+
+}
+
+static void __exit ish_hid_exit(void)
+{
+ ishtp_cl_driver_unregister(&hid_ishtp_cl_driver);
+}
+
+late_initcall(ish_hid_init);
+module_exit(ish_hid_exit);
+
+MODULE_DESCRIPTION("ISH ISHTP HID client driver");
+/* Primary author */
+MODULE_AUTHOR("Daniel Drubin <daniel.drubin@intel.com>");
+/*
+ * Several modification for multi instance support
+ * suspend/resume and clean up
+ */
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid.c b/drivers/hid/intel-ish-hid/ishtp-hid.c
new file mode 100644
index 000000000..14c271d7d
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp-hid.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ISHTP-HID glue driver.
+ *
+ * Copyright (c) 2012-2016, Intel Corporation.
+ */
+
+#include <linux/hid.h>
+#include <linux/intel-ish-client-if.h>
+#include <uapi/linux/input.h>
+#include "ishtp-hid.h"
+
+/**
+ * ishtp_hid_parse() - hid-core .parse() callback
+ * @hid: hid device instance
+ *
+ * This function gets called during call to hid_add_device
+ *
+ * Return: 0 on success and non zero on error
+ */
+static int ishtp_hid_parse(struct hid_device *hid)
+{
+ struct ishtp_hid_data *hid_data = hid->driver_data;
+ struct ishtp_cl_data *client_data = hid_data->client_data;
+ int rv;
+
+ rv = hid_parse_report(hid, client_data->report_descr[hid_data->index],
+ client_data->report_descr_size[hid_data->index]);
+ if (rv)
+ return rv;
+
+ return 0;
+}
+
+/* Empty callbacks with success return code */
+static int ishtp_hid_start(struct hid_device *hid)
+{
+ return 0;
+}
+
+static void ishtp_hid_stop(struct hid_device *hid)
+{
+}
+
+static int ishtp_hid_open(struct hid_device *hid)
+{
+ return 0;
+}
+
+static void ishtp_hid_close(struct hid_device *hid)
+{
+}
+
+static int ishtp_raw_request(struct hid_device *hid, unsigned char reportnum,
+ __u8 *buf, size_t len, unsigned char rtype,
+ int reqtype)
+{
+ struct ishtp_hid_data *hid_data = hid->driver_data;
+ char *ishtp_buf = NULL;
+ size_t ishtp_buf_len;
+ unsigned int header_size = sizeof(struct hostif_msg);
+
+ if (rtype == HID_OUTPUT_REPORT)
+ return -EINVAL;
+
+ hid_data->request_done = false;
+ switch (reqtype) {
+ case HID_REQ_GET_REPORT:
+ hid_data->raw_buf = buf;
+ hid_data->raw_buf_size = len;
+ hid_data->raw_get_req = true;
+
+ hid_ishtp_get_report(hid, reportnum, rtype);
+ break;
+ case HID_REQ_SET_REPORT:
+ /*
+ * Spare 7 bytes for 64b accesses through
+ * get/put_unaligned_le64()
+ */
+ ishtp_buf_len = len + header_size;
+ ishtp_buf = kzalloc(ishtp_buf_len + 7, GFP_KERNEL);
+ if (!ishtp_buf)
+ return -ENOMEM;
+
+ memcpy(ishtp_buf + header_size, buf, len);
+ hid_ishtp_set_feature(hid, ishtp_buf, ishtp_buf_len, reportnum);
+ kfree(ishtp_buf);
+ break;
+ }
+
+ hid_hw_wait(hid);
+
+ return len;
+}
+
+/**
+ * ishtp_hid_request() - hid-core .request() callback
+ * @hid: hid device instance
+ * @rep: pointer to hid_report
+ * @reqtype: type of req. [GET|SET]_REPORT
+ *
+ * This function is used to set/get feaure/input report.
+ */
+static void ishtp_hid_request(struct hid_device *hid, struct hid_report *rep,
+ int reqtype)
+{
+ struct ishtp_hid_data *hid_data = hid->driver_data;
+ /* the specific report length, just HID part of it */
+ unsigned int len = ((rep->size - 1) >> 3) + 1 + (rep->id > 0);
+ char *buf;
+ unsigned int header_size = sizeof(struct hostif_msg);
+
+ len += header_size;
+
+ hid_data->request_done = false;
+ switch (reqtype) {
+ case HID_REQ_GET_REPORT:
+ hid_data->raw_get_req = false;
+ hid_ishtp_get_report(hid, rep->id, rep->type);
+ break;
+ case HID_REQ_SET_REPORT:
+ /*
+ * Spare 7 bytes for 64b accesses through
+ * get/put_unaligned_le64()
+ */
+ buf = kzalloc(len + 7, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ hid_output_report(rep, buf + header_size);
+ hid_ishtp_set_feature(hid, buf, len, rep->id);
+ kfree(buf);
+ break;
+ }
+}
+
+/**
+ * ishtp_wait_for_response() - hid-core .wait() callback
+ * @hid: hid device instance
+ *
+ * This function is used to wait after get feaure/input report.
+ *
+ * Return: 0 on success and non zero on error
+ */
+static int ishtp_wait_for_response(struct hid_device *hid)
+{
+ struct ishtp_hid_data *hid_data = hid->driver_data;
+ int rv;
+
+ hid_ishtp_trace(client_data, "%s hid %p\n", __func__, hid);
+
+ rv = ishtp_hid_link_ready_wait(hid_data->client_data);
+ if (rv)
+ return rv;
+
+ if (!hid_data->request_done)
+ wait_event_interruptible_timeout(hid_data->hid_wait,
+ hid_data->request_done, 3 * HZ);
+
+ if (!hid_data->request_done) {
+ hid_err(hid,
+ "timeout waiting for response from ISHTP device\n");
+ return -ETIMEDOUT;
+ }
+ hid_ishtp_trace(client_data, "%s hid %p done\n", __func__, hid);
+
+ hid_data->request_done = false;
+
+ return 0;
+}
+
+/**
+ * ishtp_hid_wakeup() - Wakeup caller
+ * @hid: hid device instance
+ *
+ * This function will wakeup caller waiting for Get/Set feature report
+ */
+void ishtp_hid_wakeup(struct hid_device *hid)
+{
+ struct ishtp_hid_data *hid_data = hid->driver_data;
+
+ hid_data->request_done = true;
+ wake_up_interruptible(&hid_data->hid_wait);
+}
+
+static struct hid_ll_driver ishtp_hid_ll_driver = {
+ .parse = ishtp_hid_parse,
+ .start = ishtp_hid_start,
+ .stop = ishtp_hid_stop,
+ .open = ishtp_hid_open,
+ .close = ishtp_hid_close,
+ .request = ishtp_hid_request,
+ .wait = ishtp_wait_for_response,
+ .raw_request = ishtp_raw_request
+};
+
+/**
+ * ishtp_hid_probe() - hid register ll driver
+ * @cur_hid_dev: Index of hid device calling to register
+ * @client_data: Client data pointer
+ *
+ * This function is used to allocate and add HID device.
+ *
+ * Return: 0 on success, non zero on error
+ */
+int ishtp_hid_probe(unsigned int cur_hid_dev,
+ struct ishtp_cl_data *client_data)
+{
+ int rv;
+ struct hid_device *hid;
+ struct ishtp_hid_data *hid_data;
+
+ hid = hid_allocate_device();
+ if (IS_ERR(hid))
+ return PTR_ERR(hid);
+
+ hid_data = kzalloc(sizeof(*hid_data), GFP_KERNEL);
+ if (!hid_data) {
+ rv = -ENOMEM;
+ goto err_hid_data;
+ }
+
+ hid_data->index = cur_hid_dev;
+ hid_data->client_data = client_data;
+ init_waitqueue_head(&hid_data->hid_wait);
+
+ hid->driver_data = hid_data;
+
+ client_data->hid_sensor_hubs[cur_hid_dev] = hid;
+
+ hid->ll_driver = &ishtp_hid_ll_driver;
+ hid->bus = BUS_INTEL_ISHTP;
+ hid->dev.parent = ishtp_device(client_data->cl_device);
+
+ hid->version = le16_to_cpu(ISH_HID_VERSION);
+ hid->vendor = le16_to_cpu(client_data->hid_devices[cur_hid_dev].vid);
+ hid->product = le16_to_cpu(client_data->hid_devices[cur_hid_dev].pid);
+ snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X", "hid-ishtp",
+ hid->vendor, hid->product);
+
+ rv = hid_add_device(hid);
+ if (rv)
+ goto err_hid_device;
+
+ hid_ishtp_trace(client_data, "%s allocated hid %p\n", __func__, hid);
+
+ return 0;
+
+err_hid_device:
+ kfree(hid_data);
+err_hid_data:
+ hid_destroy_device(hid);
+ return rv;
+}
+
+/**
+ * ishtp_hid_remove() - Remove registered hid device
+ * @client_data: client data pointer
+ *
+ * This function is used to destroy allocatd HID device.
+ */
+void ishtp_hid_remove(struct ishtp_cl_data *client_data)
+{
+ int i;
+
+ for (i = 0; i < client_data->num_hid_devices; ++i) {
+ if (client_data->hid_sensor_hubs[i]) {
+ kfree(client_data->hid_sensor_hubs[i]->driver_data);
+ hid_destroy_device(client_data->hid_sensor_hubs[i]);
+ client_data->hid_sensor_hubs[i] = NULL;
+ }
+ }
+}
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid.h b/drivers/hid/intel-ish-hid/ishtp-hid.h
new file mode 100644
index 000000000..35dddc501
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp-hid.h
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * ISHTP-HID glue driver's definitions.
+ *
+ * Copyright (c) 2014-2016, Intel Corporation.
+ */
+#ifndef ISHTP_HID__H
+#define ISHTP_HID__H
+
+/* The fixed ISH product and vendor id */
+#define ISH_HID_VENDOR 0x8086
+#define ISH_HID_PRODUCT 0x22D8
+#define ISH_HID_VERSION 0x0200
+
+#define CMD_MASK 0x7F
+#define IS_RESPONSE 0x80
+
+/* Used to dump to Linux trace buffer, if enabled */
+extern ishtp_print_log ishtp_hid_print_trace;
+#define hid_ishtp_trace(client, ...) \
+ (ishtp_hid_print_trace)(NULL, __VA_ARGS__)
+
+/* ISH HID message structure */
+struct hostif_msg_hdr {
+ uint8_t command; /* Bit 7: is_response */
+ uint8_t device_id;
+ uint8_t status;
+ uint8_t flags;
+ uint16_t size;
+} __packed;
+
+struct hostif_msg {
+ struct hostif_msg_hdr hdr;
+} __packed;
+
+struct hostif_msg_to_sensor {
+ struct hostif_msg_hdr hdr;
+ uint8_t report_id;
+} __packed;
+
+struct device_info {
+ uint32_t dev_id;
+ uint8_t dev_class;
+ uint16_t pid;
+ uint16_t vid;
+} __packed;
+
+struct ishtp_version {
+ uint8_t major;
+ uint8_t minor;
+ uint8_t hotfix;
+ uint16_t build;
+} __packed;
+
+/* struct for ISHTP aggregated input data */
+struct report_list {
+ uint16_t total_size;
+ uint8_t num_of_reports;
+ uint8_t flags;
+ struct {
+ uint16_t size_of_report;
+ uint8_t report[1];
+ } __packed reports[1];
+} __packed;
+
+/* HOSTIF commands */
+#define HOSTIF_HID_COMMAND_BASE 0
+#define HOSTIF_GET_HID_DESCRIPTOR 0
+#define HOSTIF_GET_REPORT_DESCRIPTOR 1
+#define HOSTIF_GET_FEATURE_REPORT 2
+#define HOSTIF_SET_FEATURE_REPORT 3
+#define HOSTIF_GET_INPUT_REPORT 4
+#define HOSTIF_PUBLISH_INPUT_REPORT 5
+#define HOSTIF_PUBLISH_INPUT_REPORT_LIST 6
+#define HOSTIF_DM_COMMAND_BASE 32
+#define HOSTIF_DM_ENUM_DEVICES 33
+#define HOSTIF_DM_ADD_DEVICE 34
+
+#define MAX_HID_DEVICES 32
+
+/**
+ * struct ishtp_cl_data - Encapsulate per ISH TP HID Client
+ * @enum_device_done: Enum devices response complete flag
+ * @hid_descr_done: HID descriptor complete flag
+ * @report_descr_done: Get report descriptor complete flag
+ * @init_done: Init process completed successfully
+ * @suspended: System is under suspend state or in progress
+ * @num_hid_devices: Number of HID devices enumerated in this client
+ * @cur_hid_dev: This keeps track of the device index for which
+ * initialization and registration with HID core
+ * in progress.
+ * @hid_devices: Store vid/pid/devid for each enumerated HID device
+ * @report_descr: Stores the raw report descriptors for each HID device
+ * @report_descr_size: Report description of size of above repo_descr[]
+ * @hid_sensor_hubs: Pointer to hid_device for all HID device, so that
+ * when clients are removed, they can be freed
+ * @hid_descr: Pointer to hid descriptor for each enumerated hid
+ * device
+ * @hid_descr_size: Size of each above report descriptor
+ * @init_wait: Wait queue to wait during initialization, where the
+ * client send message to ISH FW and wait for response
+ * @ishtp_hid_wait: The wait for get report during wait callback from hid
+ * core
+ * @bad_recv_cnt: Running count of packets received with error
+ * @multi_packet_cnt: Count of fragmented packet count
+ *
+ * This structure is used to store completion flags and per client data like
+ * report description, number of HID devices etc.
+ */
+struct ishtp_cl_data {
+ /* completion flags */
+ bool enum_devices_done;
+ bool hid_descr_done;
+ bool report_descr_done;
+ bool init_done;
+ bool suspended;
+
+ unsigned int num_hid_devices;
+ unsigned int cur_hid_dev;
+ unsigned int hid_dev_count;
+
+ struct device_info *hid_devices;
+ unsigned char *report_descr[MAX_HID_DEVICES];
+ int report_descr_size[MAX_HID_DEVICES];
+ struct hid_device *hid_sensor_hubs[MAX_HID_DEVICES];
+ unsigned char *hid_descr[MAX_HID_DEVICES];
+ int hid_descr_size[MAX_HID_DEVICES];
+
+ wait_queue_head_t init_wait;
+ wait_queue_head_t ishtp_resume_wait;
+ struct ishtp_cl *hid_ishtp_cl;
+
+ /* Statistics */
+ unsigned int bad_recv_cnt;
+ int multi_packet_cnt;
+
+ struct work_struct work;
+ struct work_struct resume_work;
+ struct ishtp_cl_device *cl_device;
+};
+
+/**
+ * struct ishtp_hid_data - Per instance HID data
+ * @index: Device index in the order of enumeration
+ * @request_done: Get Feature/Input report complete flag
+ * used during get/set request from hid core
+ * @client_data: Link to the client instance
+ * @hid_wait: Completion waitq
+ *
+ * @raw_get_req: Flag indicating raw get request ongoing
+ * @raw_buf: raw request buffer filled on receiving get report
+ * @raw_buf_size: raw request buffer size
+ * Used to tie hid hid->driver data to driver client instance
+ */
+struct ishtp_hid_data {
+ int index;
+ bool request_done;
+ struct ishtp_cl_data *client_data;
+ wait_queue_head_t hid_wait;
+
+ /* raw request */
+ bool raw_get_req;
+ u8 *raw_buf;
+ size_t raw_buf_size;
+};
+
+/* Interface functions between HID LL driver and ISH TP client */
+void hid_ishtp_set_feature(struct hid_device *hid, char *buf, unsigned int len,
+ int report_id);
+void hid_ishtp_get_report(struct hid_device *hid, int report_id,
+ int report_type);
+int ishtp_hid_probe(unsigned int cur_hid_dev,
+ struct ishtp_cl_data *client_data);
+void ishtp_hid_remove(struct ishtp_cl_data *client_data);
+int ishtp_hid_link_ready_wait(struct ishtp_cl_data *client_data);
+void ishtp_hid_wakeup(struct hid_device *hid);
+
+#endif /* ISHTP_HID__H */
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
new file mode 100644
index 000000000..d4296681c
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
@@ -0,0 +1,932 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ISHTP bus driver
+ *
+ * Copyright (c) 2012-2016, Intel Corporation.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include "bus.h"
+#include "ishtp-dev.h"
+#include "client.h"
+#include "hbm.h"
+
+static int ishtp_use_dma;
+module_param_named(ishtp_use_dma, ishtp_use_dma, int, 0600);
+MODULE_PARM_DESC(ishtp_use_dma, "Use DMA to send messages");
+
+#define to_ishtp_cl_driver(d) container_of(d, struct ishtp_cl_driver, driver)
+#define to_ishtp_cl_device(d) container_of(d, struct ishtp_cl_device, dev)
+static bool ishtp_device_ready;
+
+/**
+ * ishtp_recv() - process ishtp message
+ * @dev: ishtp device
+ *
+ * If a message with valid header and size is received, then
+ * this function calls appropriate handler. The host or firmware
+ * address is zero, then they are host bus management message,
+ * otherwise they are message fo clients.
+ */
+void ishtp_recv(struct ishtp_device *dev)
+{
+ uint32_t msg_hdr;
+ struct ishtp_msg_hdr *ishtp_hdr;
+
+ /* Read ISHTP header dword */
+ msg_hdr = dev->ops->ishtp_read_hdr(dev);
+ if (!msg_hdr)
+ return;
+
+ dev->ops->sync_fw_clock(dev);
+
+ ishtp_hdr = (struct ishtp_msg_hdr *)&msg_hdr;
+ dev->ishtp_msg_hdr = msg_hdr;
+
+ /* Sanity check: ISHTP frag. length in header */
+ if (ishtp_hdr->length > dev->mtu) {
+ dev_err(dev->devc,
+ "ISHTP hdr - bad length: %u; dropped [%08X]\n",
+ (unsigned int)ishtp_hdr->length, msg_hdr);
+ return;
+ }
+
+ /* ISHTP bus message */
+ if (!ishtp_hdr->host_addr && !ishtp_hdr->fw_addr)
+ recv_hbm(dev, ishtp_hdr);
+ /* ISHTP fixed-client message */
+ else if (!ishtp_hdr->host_addr)
+ recv_fixed_cl_msg(dev, ishtp_hdr);
+ else
+ /* ISHTP client message */
+ recv_ishtp_cl_msg(dev, ishtp_hdr);
+}
+EXPORT_SYMBOL(ishtp_recv);
+
+/**
+ * ishtp_send_msg() - Send ishtp message
+ * @dev: ishtp device
+ * @hdr: Message header
+ * @msg: Message contents
+ * @ipc_send_compl: completion callback
+ * @ipc_send_compl_prm: completion callback parameter
+ *
+ * Send a multi fragment message via IPC. After sending the first fragment
+ * the completion callback is called to schedule transmit of next fragment.
+ *
+ * Return: This returns IPC send message status.
+ */
+int ishtp_send_msg(struct ishtp_device *dev, struct ishtp_msg_hdr *hdr,
+ void *msg, void(*ipc_send_compl)(void *),
+ void *ipc_send_compl_prm)
+{
+ unsigned char ipc_msg[IPC_FULL_MSG_SIZE];
+ uint32_t drbl_val;
+
+ drbl_val = dev->ops->ipc_get_header(dev, hdr->length +
+ sizeof(struct ishtp_msg_hdr),
+ 1);
+
+ memcpy(ipc_msg, &drbl_val, sizeof(uint32_t));
+ memcpy(ipc_msg + sizeof(uint32_t), hdr, sizeof(uint32_t));
+ memcpy(ipc_msg + 2 * sizeof(uint32_t), msg, hdr->length);
+ return dev->ops->write(dev, ipc_send_compl, ipc_send_compl_prm,
+ ipc_msg, 2 * sizeof(uint32_t) + hdr->length);
+}
+
+/**
+ * ishtp_write_message() - Send ishtp single fragment message
+ * @dev: ishtp device
+ * @hdr: Message header
+ * @buf: message data
+ *
+ * Send a single fragment message via IPC. This returns IPC send message
+ * status.
+ *
+ * Return: This returns IPC send message status.
+ */
+int ishtp_write_message(struct ishtp_device *dev, struct ishtp_msg_hdr *hdr,
+ void *buf)
+{
+ return ishtp_send_msg(dev, hdr, buf, NULL, NULL);
+}
+
+/**
+ * ishtp_fw_cl_by_uuid() - locate index of fw client
+ * @dev: ishtp device
+ * @uuid: uuid of the client to search
+ *
+ * Search firmware client using UUID.
+ *
+ * Return: fw client index or -ENOENT if not found
+ */
+int ishtp_fw_cl_by_uuid(struct ishtp_device *dev, const guid_t *uuid)
+{
+ unsigned int i;
+
+ for (i = 0; i < dev->fw_clients_num; ++i) {
+ if (guid_equal(uuid, &dev->fw_clients[i].props.protocol_name))
+ return i;
+ }
+ return -ENOENT;
+}
+EXPORT_SYMBOL(ishtp_fw_cl_by_uuid);
+
+/**
+ * ishtp_fw_cl_get_client() - return client information to client
+ * @dev: the ishtp device structure
+ * @uuid: uuid of the client to search
+ *
+ * Search firmware client using UUID and reture related client information.
+ *
+ * Return: pointer of client information on success, NULL on failure.
+ */
+struct ishtp_fw_client *ishtp_fw_cl_get_client(struct ishtp_device *dev,
+ const guid_t *uuid)
+{
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->fw_clients_lock, flags);
+ i = ishtp_fw_cl_by_uuid(dev, uuid);
+ spin_unlock_irqrestore(&dev->fw_clients_lock, flags);
+ if (i < 0 || dev->fw_clients[i].props.fixed_address)
+ return NULL;
+
+ return &dev->fw_clients[i];
+}
+EXPORT_SYMBOL(ishtp_fw_cl_get_client);
+
+/**
+ * ishtp_get_fw_client_id() - Get fw client id
+ * @fw_client: firmware client used to fetch the ID
+ *
+ * This interface is used to reset HW get FW client id.
+ *
+ * Return: firmware client id.
+ */
+int ishtp_get_fw_client_id(struct ishtp_fw_client *fw_client)
+{
+ return fw_client->client_id;
+}
+EXPORT_SYMBOL(ishtp_get_fw_client_id);
+
+/**
+ * ishtp_fw_cl_by_id() - return index to fw_clients for client_id
+ * @dev: the ishtp device structure
+ * @client_id: fw client id to search
+ *
+ * Search firmware client using client id.
+ *
+ * Return: index on success, -ENOENT on failure.
+ */
+int ishtp_fw_cl_by_id(struct ishtp_device *dev, uint8_t client_id)
+{
+ int i, res = -ENOENT;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->fw_clients_lock, flags);
+ for (i = 0; i < dev->fw_clients_num; i++) {
+ if (dev->fw_clients[i].client_id == client_id) {
+ res = i;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dev->fw_clients_lock, flags);
+
+ return res;
+}
+
+/**
+ * ishtp_cl_device_probe() - Bus probe() callback
+ * @dev: the device structure
+ *
+ * This is a bus probe callback and calls the drive probe function.
+ *
+ * Return: Return value from driver probe() call.
+ */
+static int ishtp_cl_device_probe(struct device *dev)
+{
+ struct ishtp_cl_device *device = to_ishtp_cl_device(dev);
+ struct ishtp_cl_driver *driver;
+
+ if (!device)
+ return 0;
+
+ driver = to_ishtp_cl_driver(dev->driver);
+ if (!driver || !driver->probe)
+ return -ENODEV;
+
+ return driver->probe(device);
+}
+
+/**
+ * ishtp_cl_bus_match() - Bus match() callback
+ * @dev: the device structure
+ * @drv: the driver structure
+ *
+ * This is a bus match callback, called when a new ishtp_cl_device is
+ * registered during ishtp bus client enumeration. Use the guid_t in
+ * drv and dev to decide whether they match or not.
+ *
+ * Return: 1 if dev & drv matches, 0 otherwise.
+ */
+static int ishtp_cl_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct ishtp_cl_device *device = to_ishtp_cl_device(dev);
+ struct ishtp_cl_driver *driver = to_ishtp_cl_driver(drv);
+
+ return(device->fw_client ? guid_equal(&driver->id[0].guid,
+ &device->fw_client->props.protocol_name) : 0);
+}
+
+/**
+ * ishtp_cl_device_remove() - Bus remove() callback
+ * @dev: the device structure
+ *
+ * This is a bus remove callback and calls the drive remove function.
+ * Since the ISH driver model supports only built in, this is
+ * primarily can be called during pci driver init failure.
+ *
+ * Return: Return value from driver remove() call.
+ */
+static void ishtp_cl_device_remove(struct device *dev)
+{
+ struct ishtp_cl_device *device = to_ishtp_cl_device(dev);
+ struct ishtp_cl_driver *driver = to_ishtp_cl_driver(dev->driver);
+
+ if (device->event_cb) {
+ device->event_cb = NULL;
+ cancel_work_sync(&device->event_work);
+ }
+
+ if (driver->remove)
+ driver->remove(device);
+}
+
+/**
+ * ishtp_cl_device_suspend() - Bus suspend callback
+ * @dev: device
+ *
+ * Called during device suspend process.
+ *
+ * Return: Return value from driver suspend() call.
+ */
+static int ishtp_cl_device_suspend(struct device *dev)
+{
+ struct ishtp_cl_device *device = to_ishtp_cl_device(dev);
+ struct ishtp_cl_driver *driver;
+ int ret = 0;
+
+ if (!device)
+ return 0;
+
+ driver = to_ishtp_cl_driver(dev->driver);
+ if (driver && driver->driver.pm) {
+ if (driver->driver.pm->suspend)
+ ret = driver->driver.pm->suspend(dev);
+ }
+
+ return ret;
+}
+
+/**
+ * ishtp_cl_device_resume() - Bus resume callback
+ * @dev: device
+ *
+ * Called during device resume process.
+ *
+ * Return: Return value from driver resume() call.
+ */
+static int ishtp_cl_device_resume(struct device *dev)
+{
+ struct ishtp_cl_device *device = to_ishtp_cl_device(dev);
+ struct ishtp_cl_driver *driver;
+ int ret = 0;
+
+ if (!device)
+ return 0;
+
+ driver = to_ishtp_cl_driver(dev->driver);
+ if (driver && driver->driver.pm) {
+ if (driver->driver.pm->resume)
+ ret = driver->driver.pm->resume(dev);
+ }
+
+ return ret;
+}
+
+/**
+ * ishtp_cl_device_reset() - Reset callback
+ * @device: ishtp client device instance
+ *
+ * This is a callback when HW reset is done and the device need
+ * reinit.
+ *
+ * Return: Return value from driver reset() call.
+ */
+static int ishtp_cl_device_reset(struct ishtp_cl_device *device)
+{
+ struct ishtp_cl_driver *driver;
+ int ret = 0;
+
+ device->event_cb = NULL;
+ cancel_work_sync(&device->event_work);
+
+ driver = to_ishtp_cl_driver(device->dev.driver);
+ if (driver && driver->reset)
+ ret = driver->reset(device);
+
+ return ret;
+}
+
+static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
+ char *buf)
+{
+ int len;
+
+ len = snprintf(buf, PAGE_SIZE, ISHTP_MODULE_PREFIX "%s\n", dev_name(dev));
+ return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
+}
+static DEVICE_ATTR_RO(modalias);
+
+static struct attribute *ishtp_cl_dev_attrs[] = {
+ &dev_attr_modalias.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(ishtp_cl_dev);
+
+static int ishtp_cl_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ if (add_uevent_var(env, "MODALIAS=" ISHTP_MODULE_PREFIX "%s", dev_name(dev)))
+ return -ENOMEM;
+ return 0;
+}
+
+static const struct dev_pm_ops ishtp_cl_bus_dev_pm_ops = {
+ /* Suspend callbacks */
+ .suspend = ishtp_cl_device_suspend,
+ .resume = ishtp_cl_device_resume,
+ /* Hibernate callbacks */
+ .freeze = ishtp_cl_device_suspend,
+ .thaw = ishtp_cl_device_resume,
+ .restore = ishtp_cl_device_resume,
+};
+
+static struct bus_type ishtp_cl_bus_type = {
+ .name = "ishtp",
+ .dev_groups = ishtp_cl_dev_groups,
+ .probe = ishtp_cl_device_probe,
+ .match = ishtp_cl_bus_match,
+ .remove = ishtp_cl_device_remove,
+ .pm = &ishtp_cl_bus_dev_pm_ops,
+ .uevent = ishtp_cl_uevent,
+};
+
+static void ishtp_cl_dev_release(struct device *dev)
+{
+ kfree(to_ishtp_cl_device(dev));
+}
+
+static const struct device_type ishtp_cl_device_type = {
+ .release = ishtp_cl_dev_release,
+};
+
+/**
+ * ishtp_bus_add_device() - Function to create device on bus
+ * @dev: ishtp device
+ * @uuid: uuid of the client
+ * @name: Name of the client
+ *
+ * Allocate ISHTP bus client device, attach it to uuid
+ * and register with ISHTP bus.
+ *
+ * Return: ishtp_cl_device pointer or NULL on failure
+ */
+static struct ishtp_cl_device *ishtp_bus_add_device(struct ishtp_device *dev,
+ guid_t uuid, char *name)
+{
+ struct ishtp_cl_device *device;
+ int status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->device_list_lock, flags);
+ list_for_each_entry(device, &dev->device_list, device_link) {
+ if (!strcmp(name, dev_name(&device->dev))) {
+ device->fw_client = &dev->fw_clients[
+ dev->fw_client_presentation_num - 1];
+ spin_unlock_irqrestore(&dev->device_list_lock, flags);
+ ishtp_cl_device_reset(device);
+ return device;
+ }
+ }
+ spin_unlock_irqrestore(&dev->device_list_lock, flags);
+
+ device = kzalloc(sizeof(struct ishtp_cl_device), GFP_KERNEL);
+ if (!device)
+ return NULL;
+
+ device->dev.parent = dev->devc;
+ device->dev.bus = &ishtp_cl_bus_type;
+ device->dev.type = &ishtp_cl_device_type;
+ device->ishtp_dev = dev;
+
+ device->fw_client =
+ &dev->fw_clients[dev->fw_client_presentation_num - 1];
+
+ dev_set_name(&device->dev, "%s", name);
+
+ spin_lock_irqsave(&dev->device_list_lock, flags);
+ list_add_tail(&device->device_link, &dev->device_list);
+ spin_unlock_irqrestore(&dev->device_list_lock, flags);
+
+ status = device_register(&device->dev);
+ if (status) {
+ spin_lock_irqsave(&dev->device_list_lock, flags);
+ list_del(&device->device_link);
+ spin_unlock_irqrestore(&dev->device_list_lock, flags);
+ dev_err(dev->devc, "Failed to register ISHTP client device\n");
+ put_device(&device->dev);
+ return NULL;
+ }
+
+ ishtp_device_ready = true;
+
+ return device;
+}
+
+/**
+ * ishtp_bus_remove_device() - Function to relase device on bus
+ * @device: client device instance
+ *
+ * This is a counterpart of ishtp_bus_add_device.
+ * Device is unregistered.
+ * the device structure is freed in 'ishtp_cl_dev_release' function
+ * Called only during error in pci driver init path.
+ */
+static void ishtp_bus_remove_device(struct ishtp_cl_device *device)
+{
+ device_unregister(&device->dev);
+}
+
+/**
+ * ishtp_cl_driver_register() - Client driver register
+ * @driver: the client driver instance
+ * @owner: Owner of this driver module
+ *
+ * Once a client driver is probed, it created a client
+ * instance and registers with the bus.
+ *
+ * Return: Return value of driver_register or -ENODEV if not ready
+ */
+int ishtp_cl_driver_register(struct ishtp_cl_driver *driver,
+ struct module *owner)
+{
+ if (!ishtp_device_ready)
+ return -ENODEV;
+
+ driver->driver.name = driver->name;
+ driver->driver.owner = owner;
+ driver->driver.bus = &ishtp_cl_bus_type;
+
+ return driver_register(&driver->driver);
+}
+EXPORT_SYMBOL(ishtp_cl_driver_register);
+
+/**
+ * ishtp_cl_driver_unregister() - Client driver unregister
+ * @driver: the client driver instance
+ *
+ * Unregister client during device removal process.
+ */
+void ishtp_cl_driver_unregister(struct ishtp_cl_driver *driver)
+{
+ driver_unregister(&driver->driver);
+}
+EXPORT_SYMBOL(ishtp_cl_driver_unregister);
+
+/**
+ * ishtp_bus_event_work() - event work function
+ * @work: work struct pointer
+ *
+ * Once an event is received for a client this work
+ * function is called. If the device has registered a
+ * callback then the callback is called.
+ */
+static void ishtp_bus_event_work(struct work_struct *work)
+{
+ struct ishtp_cl_device *device;
+
+ device = container_of(work, struct ishtp_cl_device, event_work);
+
+ if (device->event_cb)
+ device->event_cb(device);
+}
+
+/**
+ * ishtp_cl_bus_rx_event() - schedule event work
+ * @device: client device instance
+ *
+ * Once an event is received for a client this schedules
+ * a work function to process.
+ */
+void ishtp_cl_bus_rx_event(struct ishtp_cl_device *device)
+{
+ if (!device || !device->event_cb)
+ return;
+
+ if (device->event_cb)
+ schedule_work(&device->event_work);
+}
+
+/**
+ * ishtp_register_event_cb() - Register callback
+ * @device: client device instance
+ * @event_cb: Event processor for an client
+ *
+ * Register a callback for events, called from client driver
+ *
+ * Return: Return 0 or -EALREADY if already registered
+ */
+int ishtp_register_event_cb(struct ishtp_cl_device *device,
+ void (*event_cb)(struct ishtp_cl_device *))
+{
+ if (device->event_cb)
+ return -EALREADY;
+
+ device->event_cb = event_cb;
+ INIT_WORK(&device->event_work, ishtp_bus_event_work);
+
+ return 0;
+}
+EXPORT_SYMBOL(ishtp_register_event_cb);
+
+/**
+ * ishtp_get_device() - update usage count for the device
+ * @cl_device: client device instance
+ *
+ * Increment the usage count. The device can't be deleted
+ */
+void ishtp_get_device(struct ishtp_cl_device *cl_device)
+{
+ cl_device->reference_count++;
+}
+EXPORT_SYMBOL(ishtp_get_device);
+
+/**
+ * ishtp_put_device() - decrement usage count for the device
+ * @cl_device: client device instance
+ *
+ * Decrement the usage count. The device can be deleted is count = 0
+ */
+void ishtp_put_device(struct ishtp_cl_device *cl_device)
+{
+ cl_device->reference_count--;
+}
+EXPORT_SYMBOL(ishtp_put_device);
+
+/**
+ * ishtp_set_drvdata() - set client driver data
+ * @cl_device: client device instance
+ * @data: driver data need to be set
+ *
+ * Set client driver data to cl_device->driver_data.
+ */
+void ishtp_set_drvdata(struct ishtp_cl_device *cl_device, void *data)
+{
+ cl_device->driver_data = data;
+}
+EXPORT_SYMBOL(ishtp_set_drvdata);
+
+/**
+ * ishtp_get_drvdata() - get client driver data
+ * @cl_device: client device instance
+ *
+ * Get client driver data from cl_device->driver_data.
+ *
+ * Return: pointer of driver data
+ */
+void *ishtp_get_drvdata(struct ishtp_cl_device *cl_device)
+{
+ return cl_device->driver_data;
+}
+EXPORT_SYMBOL(ishtp_get_drvdata);
+
+/**
+ * ishtp_dev_to_cl_device() - get ishtp_cl_device instance from device instance
+ * @device: device instance
+ *
+ * Get ish_cl_device instance which embeds device instance in it.
+ *
+ * Return: pointer to ishtp_cl_device instance
+ */
+struct ishtp_cl_device *ishtp_dev_to_cl_device(struct device *device)
+{
+ return to_ishtp_cl_device(device);
+}
+EXPORT_SYMBOL(ishtp_dev_to_cl_device);
+
+/**
+ * ishtp_bus_new_client() - Create a new client
+ * @dev: ISHTP device instance
+ *
+ * Once bus protocol enumerates a client, this is called
+ * to add a device for the client.
+ *
+ * Return: 0 on success or error code on failure
+ */
+int ishtp_bus_new_client(struct ishtp_device *dev)
+{
+ int i;
+ char *dev_name;
+ struct ishtp_cl_device *cl_device;
+ guid_t device_uuid;
+
+ /*
+ * For all reported clients, create an unconnected client and add its
+ * device to ISHTP bus.
+ * If appropriate driver has loaded, this will trigger its probe().
+ * Otherwise, probe() will be called when driver is loaded
+ */
+ i = dev->fw_client_presentation_num - 1;
+ device_uuid = dev->fw_clients[i].props.protocol_name;
+ dev_name = kasprintf(GFP_KERNEL, "{%pUL}", &device_uuid);
+ if (!dev_name)
+ return -ENOMEM;
+
+ cl_device = ishtp_bus_add_device(dev, device_uuid, dev_name);
+ if (!cl_device) {
+ kfree(dev_name);
+ return -ENOENT;
+ }
+
+ kfree(dev_name);
+
+ return 0;
+}
+
+/**
+ * ishtp_cl_device_bind() - bind a device
+ * @cl: ishtp client device
+ *
+ * Binds connected ishtp_cl to ISHTP bus device
+ *
+ * Return: 0 on success or fault code
+ */
+int ishtp_cl_device_bind(struct ishtp_cl *cl)
+{
+ struct ishtp_cl_device *cl_device;
+ unsigned long flags;
+ int rv;
+
+ if (!cl->fw_client_id || cl->state != ISHTP_CL_CONNECTED)
+ return -EFAULT;
+
+ rv = -ENOENT;
+ spin_lock_irqsave(&cl->dev->device_list_lock, flags);
+ list_for_each_entry(cl_device, &cl->dev->device_list,
+ device_link) {
+ if (cl_device->fw_client &&
+ cl_device->fw_client->client_id == cl->fw_client_id) {
+ cl->device = cl_device;
+ rv = 0;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&cl->dev->device_list_lock, flags);
+ return rv;
+}
+
+/**
+ * ishtp_bus_remove_all_clients() - Remove all clients
+ * @ishtp_dev: ishtp device
+ * @warm_reset: Reset due to FW reset dure to errors or S3 suspend
+ *
+ * This is part of reset/remove flow. This function the main processing
+ * only targets error processing, if the FW has forced reset or
+ * error to remove connected clients. When warm reset the client devices are
+ * not removed.
+ */
+void ishtp_bus_remove_all_clients(struct ishtp_device *ishtp_dev,
+ bool warm_reset)
+{
+ struct ishtp_cl_device *cl_device, *n;
+ struct ishtp_cl *cl;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ishtp_dev->cl_list_lock, flags);
+ list_for_each_entry(cl, &ishtp_dev->cl_list, link) {
+ cl->state = ISHTP_CL_DISCONNECTED;
+
+ /*
+ * Wake any pending process. The waiter would check dev->state
+ * and determine that it's not enabled already,
+ * and will return error to its caller
+ */
+ wake_up_interruptible(&cl->wait_ctrl_res);
+
+ /* Disband any pending read/write requests and free rb */
+ ishtp_cl_flush_queues(cl);
+
+ /* Remove all free and in_process rings, both Rx and Tx */
+ ishtp_cl_free_rx_ring(cl);
+ ishtp_cl_free_tx_ring(cl);
+
+ /*
+ * Free client and ISHTP bus client device structures
+ * don't free host client because it is part of the OS fd
+ * structure
+ */
+ }
+ spin_unlock_irqrestore(&ishtp_dev->cl_list_lock, flags);
+
+ /* Release DMA buffers for client messages */
+ ishtp_cl_free_dma_buf(ishtp_dev);
+
+ /* remove bus clients */
+ spin_lock_irqsave(&ishtp_dev->device_list_lock, flags);
+ list_for_each_entry_safe(cl_device, n, &ishtp_dev->device_list,
+ device_link) {
+ cl_device->fw_client = NULL;
+ if (warm_reset && cl_device->reference_count)
+ continue;
+
+ list_del(&cl_device->device_link);
+ spin_unlock_irqrestore(&ishtp_dev->device_list_lock, flags);
+ ishtp_bus_remove_device(cl_device);
+ spin_lock_irqsave(&ishtp_dev->device_list_lock, flags);
+ }
+ spin_unlock_irqrestore(&ishtp_dev->device_list_lock, flags);
+
+ /* Free all client structures */
+ spin_lock_irqsave(&ishtp_dev->fw_clients_lock, flags);
+ kfree(ishtp_dev->fw_clients);
+ ishtp_dev->fw_clients = NULL;
+ ishtp_dev->fw_clients_num = 0;
+ ishtp_dev->fw_client_presentation_num = 0;
+ ishtp_dev->fw_client_index = 0;
+ bitmap_zero(ishtp_dev->fw_clients_map, ISHTP_CLIENTS_MAX);
+ spin_unlock_irqrestore(&ishtp_dev->fw_clients_lock, flags);
+}
+EXPORT_SYMBOL(ishtp_bus_remove_all_clients);
+
+/**
+ * ishtp_reset_handler() - IPC reset handler
+ * @dev: ishtp device
+ *
+ * ISHTP Handler for IPC_RESET notification
+ */
+void ishtp_reset_handler(struct ishtp_device *dev)
+{
+ unsigned long flags;
+
+ /* Handle FW-initiated reset */
+ dev->dev_state = ISHTP_DEV_RESETTING;
+
+ /* Clear BH processing queue - no further HBMs */
+ spin_lock_irqsave(&dev->rd_msg_spinlock, flags);
+ dev->rd_msg_fifo_head = dev->rd_msg_fifo_tail = 0;
+ spin_unlock_irqrestore(&dev->rd_msg_spinlock, flags);
+
+ /* Handle ISH FW reset against upper layers */
+ ishtp_bus_remove_all_clients(dev, true);
+}
+EXPORT_SYMBOL(ishtp_reset_handler);
+
+/**
+ * ishtp_reset_compl_handler() - Reset completion handler
+ * @dev: ishtp device
+ *
+ * ISHTP handler for IPC_RESET sequence completion to start
+ * host message bus start protocol sequence.
+ */
+void ishtp_reset_compl_handler(struct ishtp_device *dev)
+{
+ dev->dev_state = ISHTP_DEV_INIT_CLIENTS;
+ dev->hbm_state = ISHTP_HBM_START;
+ ishtp_hbm_start_req(dev);
+}
+EXPORT_SYMBOL(ishtp_reset_compl_handler);
+
+/**
+ * ishtp_use_dma_transfer() - Function to use DMA
+ *
+ * This interface is used to enable usage of DMA
+ *
+ * Return non zero if DMA can be enabled
+ */
+int ishtp_use_dma_transfer(void)
+{
+ return ishtp_use_dma;
+}
+
+/**
+ * ishtp_device() - Return device pointer
+ * @device: ISH-TP client device instance
+ *
+ * This interface is used to return device pointer from ishtp_cl_device
+ * instance.
+ *
+ * Return: device *.
+ */
+struct device *ishtp_device(struct ishtp_cl_device *device)
+{
+ return &device->dev;
+}
+EXPORT_SYMBOL(ishtp_device);
+
+/**
+ * ishtp_wait_resume() - Wait for IPC resume
+ *
+ * Wait for IPC resume
+ *
+ * Return: resume complete or not
+ */
+bool ishtp_wait_resume(struct ishtp_device *dev)
+{
+ /* 50ms to get resume response */
+ #define WAIT_FOR_RESUME_ACK_MS 50
+
+ /* Waiting to get resume response */
+ if (dev->resume_flag)
+ wait_event_interruptible_timeout(dev->resume_wait,
+ !dev->resume_flag,
+ msecs_to_jiffies(WAIT_FOR_RESUME_ACK_MS));
+
+ return (!dev->resume_flag);
+}
+EXPORT_SYMBOL_GPL(ishtp_wait_resume);
+
+/**
+ * ishtp_get_pci_device() - Return PCI device dev pointer
+ * This interface is used to return PCI device pointer
+ * from ishtp_cl_device instance.
+ * @device: ISH-TP client device instance
+ *
+ * Return: device *.
+ */
+struct device *ishtp_get_pci_device(struct ishtp_cl_device *device)
+{
+ return device->ishtp_dev->devc;
+}
+EXPORT_SYMBOL(ishtp_get_pci_device);
+
+/**
+ * ishtp_trace_callback() - Return trace callback
+ * @cl_device: ISH-TP client device instance
+ *
+ * This interface is used to return trace callback function pointer.
+ *
+ * Return: *ishtp_print_log()
+ */
+ishtp_print_log ishtp_trace_callback(struct ishtp_cl_device *cl_device)
+{
+ return cl_device->ishtp_dev->print_log;
+}
+EXPORT_SYMBOL(ishtp_trace_callback);
+
+/**
+ * ish_hw_reset() - Call HW reset IPC callback
+ * @dev: ISHTP device instance
+ *
+ * This interface is used to reset HW in case of error.
+ *
+ * Return: value from IPC hw_reset callback
+ */
+int ish_hw_reset(struct ishtp_device *dev)
+{
+ return dev->ops->hw_reset(dev);
+}
+EXPORT_SYMBOL(ish_hw_reset);
+
+/**
+ * ishtp_bus_register() - Function to register bus
+ *
+ * This register ishtp bus
+ *
+ * Return: Return output of bus_register
+ */
+static int __init ishtp_bus_register(void)
+{
+ return bus_register(&ishtp_cl_bus_type);
+}
+
+/**
+ * ishtp_bus_unregister() - Function to unregister bus
+ *
+ * This unregister ishtp bus
+ */
+static void __exit ishtp_bus_unregister(void)
+{
+ bus_unregister(&ishtp_cl_bus_type);
+}
+
+module_init(ishtp_bus_register);
+module_exit(ishtp_bus_unregister);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.h b/drivers/hid/intel-ish-hid/ishtp/bus.h
new file mode 100644
index 000000000..5bb85c932
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * ISHTP bus definitions
+ *
+ * Copyright (c) 2014-2016, Intel Corporation.
+ */
+#ifndef _LINUX_ISHTP_CL_BUS_H
+#define _LINUX_ISHTP_CL_BUS_H
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/intel-ish-client-if.h>
+
+struct ishtp_cl;
+struct ishtp_cl_device;
+struct ishtp_device;
+struct ishtp_msg_hdr;
+
+/**
+ * struct ishtp_cl_device - ISHTP device handle
+ * @dev: device pointer
+ * @ishtp_dev: pointer to ishtp device structure to primarily to access
+ * hw device operation callbacks and properties
+ * @fw_client: fw_client pointer to get fw information like protocol name
+ * max message length etc.
+ * @device_link: Link to next client in the list on a bus
+ * @event_work: Used to schedule rx event for client
+ * @driver_data: Storage driver private data
+ * @reference_count: Used for get/put device
+ * @event_cb: Callback to driver to send events
+ *
+ * An ishtp_cl_device pointer is returned from ishtp_add_device()
+ * and links ISHTP bus clients to their actual host client pointer.
+ * Drivers for ISHTP devices will get an ishtp_cl_device pointer
+ * when being probed and shall use it for doing bus I/O.
+ */
+struct ishtp_cl_device {
+ struct device dev;
+ struct ishtp_device *ishtp_dev;
+ struct ishtp_fw_client *fw_client;
+ struct list_head device_link;
+ struct work_struct event_work;
+ void *driver_data;
+ int reference_count;
+ void (*event_cb)(struct ishtp_cl_device *device);
+};
+
+int ishtp_bus_new_client(struct ishtp_device *dev);
+void ishtp_remove_all_clients(struct ishtp_device *dev);
+int ishtp_cl_device_bind(struct ishtp_cl *cl);
+void ishtp_cl_bus_rx_event(struct ishtp_cl_device *device);
+
+/* Write a multi-fragment message */
+int ishtp_send_msg(struct ishtp_device *dev,
+ struct ishtp_msg_hdr *hdr, void *msg,
+ void (*ipc_send_compl)(void *),
+ void *ipc_send_compl_prm);
+
+/* Write a single-fragment message */
+int ishtp_write_message(struct ishtp_device *dev,
+ struct ishtp_msg_hdr *hdr,
+ void *buf);
+
+/* Use DMA to send/receive messages */
+int ishtp_use_dma_transfer(void);
+
+/* Exported functions */
+void ishtp_bus_remove_all_clients(struct ishtp_device *ishtp_dev,
+ bool warm_reset);
+
+void ishtp_recv(struct ishtp_device *dev);
+void ishtp_reset_handler(struct ishtp_device *dev);
+void ishtp_reset_compl_handler(struct ishtp_device *dev);
+
+int ishtp_fw_cl_by_uuid(struct ishtp_device *dev, const guid_t *cuuid);
+#endif /* _LINUX_ISHTP_CL_BUS_H */
diff --git a/drivers/hid/intel-ish-hid/ishtp/client-buffers.c b/drivers/hid/intel-ish-hid/ishtp/client-buffers.c
new file mode 100644
index 000000000..513d7a4a1
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/client-buffers.c
@@ -0,0 +1,297 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ISHTP Ring Buffers
+ *
+ * Copyright (c) 2003-2016, Intel Corporation.
+ */
+
+#include <linux/slab.h>
+#include "client.h"
+
+/**
+ * ishtp_cl_alloc_rx_ring() - Allocate RX ring buffers
+ * @cl: client device instance
+ *
+ * Allocate and initialize RX ring buffers
+ *
+ * Return: 0 on success else -ENOMEM
+ */
+int ishtp_cl_alloc_rx_ring(struct ishtp_cl *cl)
+{
+ size_t len = cl->device->fw_client->props.max_msg_length;
+ int j;
+ struct ishtp_cl_rb *rb;
+ int ret = 0;
+ unsigned long flags;
+
+ for (j = 0; j < cl->rx_ring_size; ++j) {
+ rb = ishtp_io_rb_init(cl);
+ if (!rb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = ishtp_io_rb_alloc_buf(rb, len);
+ if (ret)
+ goto out;
+ spin_lock_irqsave(&cl->free_list_spinlock, flags);
+ list_add_tail(&rb->list, &cl->free_rb_list.list);
+ spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
+ }
+
+ return 0;
+
+out:
+ dev_err(&cl->device->dev, "error in allocating Rx buffers\n");
+ ishtp_cl_free_rx_ring(cl);
+ return ret;
+}
+
+/**
+ * ishtp_cl_alloc_tx_ring() - Allocate TX ring buffers
+ * @cl: client device instance
+ *
+ * Allocate and initialize TX ring buffers
+ *
+ * Return: 0 on success else -ENOMEM
+ */
+int ishtp_cl_alloc_tx_ring(struct ishtp_cl *cl)
+{
+ size_t len = cl->device->fw_client->props.max_msg_length;
+ int j;
+ unsigned long flags;
+
+ cl->tx_ring_free_size = 0;
+
+ /* Allocate pool to free Tx bufs */
+ for (j = 0; j < cl->tx_ring_size; ++j) {
+ struct ishtp_cl_tx_ring *tx_buf;
+
+ tx_buf = kzalloc(sizeof(struct ishtp_cl_tx_ring), GFP_KERNEL);
+ if (!tx_buf)
+ goto out;
+
+ tx_buf->send_buf.data = kmalloc(len, GFP_KERNEL);
+ if (!tx_buf->send_buf.data) {
+ kfree(tx_buf);
+ goto out;
+ }
+
+ spin_lock_irqsave(&cl->tx_free_list_spinlock, flags);
+ list_add_tail(&tx_buf->list, &cl->tx_free_list.list);
+ ++cl->tx_ring_free_size;
+ spin_unlock_irqrestore(&cl->tx_free_list_spinlock, flags);
+ }
+ return 0;
+out:
+ dev_err(&cl->device->dev, "error in allocating Tx pool\n");
+ ishtp_cl_free_tx_ring(cl);
+ return -ENOMEM;
+}
+
+/**
+ * ishtp_cl_free_rx_ring() - Free RX ring buffers
+ * @cl: client device instance
+ *
+ * Free RX ring buffers
+ */
+void ishtp_cl_free_rx_ring(struct ishtp_cl *cl)
+{
+ struct ishtp_cl_rb *rb;
+ unsigned long flags;
+
+ /* release allocated memory - pass over free_rb_list */
+ spin_lock_irqsave(&cl->free_list_spinlock, flags);
+ while (!list_empty(&cl->free_rb_list.list)) {
+ rb = list_entry(cl->free_rb_list.list.next, struct ishtp_cl_rb,
+ list);
+ list_del(&rb->list);
+ kfree(rb->buffer.data);
+ kfree(rb);
+ }
+ spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
+ /* release allocated memory - pass over in_process_list */
+ spin_lock_irqsave(&cl->in_process_spinlock, flags);
+ while (!list_empty(&cl->in_process_list.list)) {
+ rb = list_entry(cl->in_process_list.list.next,
+ struct ishtp_cl_rb, list);
+ list_del(&rb->list);
+ kfree(rb->buffer.data);
+ kfree(rb);
+ }
+ spin_unlock_irqrestore(&cl->in_process_spinlock, flags);
+}
+
+/**
+ * ishtp_cl_free_tx_ring() - Free TX ring buffers
+ * @cl: client device instance
+ *
+ * Free TX ring buffers
+ */
+void ishtp_cl_free_tx_ring(struct ishtp_cl *cl)
+{
+ struct ishtp_cl_tx_ring *tx_buf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cl->tx_free_list_spinlock, flags);
+ /* release allocated memory - pass over tx_free_list */
+ while (!list_empty(&cl->tx_free_list.list)) {
+ tx_buf = list_entry(cl->tx_free_list.list.next,
+ struct ishtp_cl_tx_ring, list);
+ list_del(&tx_buf->list);
+ --cl->tx_ring_free_size;
+ kfree(tx_buf->send_buf.data);
+ kfree(tx_buf);
+ }
+ spin_unlock_irqrestore(&cl->tx_free_list_spinlock, flags);
+
+ spin_lock_irqsave(&cl->tx_list_spinlock, flags);
+ /* release allocated memory - pass over tx_list */
+ while (!list_empty(&cl->tx_list.list)) {
+ tx_buf = list_entry(cl->tx_list.list.next,
+ struct ishtp_cl_tx_ring, list);
+ list_del(&tx_buf->list);
+ kfree(tx_buf->send_buf.data);
+ kfree(tx_buf);
+ }
+ spin_unlock_irqrestore(&cl->tx_list_spinlock, flags);
+}
+
+/**
+ * ishtp_io_rb_free() - Free IO request block
+ * @rb: IO request block
+ *
+ * Free io request block memory
+ */
+void ishtp_io_rb_free(struct ishtp_cl_rb *rb)
+{
+ if (rb == NULL)
+ return;
+
+ kfree(rb->buffer.data);
+ kfree(rb);
+}
+
+/**
+ * ishtp_io_rb_init() - Allocate and init IO request block
+ * @cl: client device instance
+ *
+ * Allocate and initialize request block
+ *
+ * Return: Allocted IO request block pointer
+ */
+struct ishtp_cl_rb *ishtp_io_rb_init(struct ishtp_cl *cl)
+{
+ struct ishtp_cl_rb *rb;
+
+ rb = kzalloc(sizeof(struct ishtp_cl_rb), GFP_KERNEL);
+ if (!rb)
+ return NULL;
+
+ INIT_LIST_HEAD(&rb->list);
+ rb->cl = cl;
+ rb->buf_idx = 0;
+ return rb;
+}
+
+/**
+ * ishtp_io_rb_alloc_buf() - Allocate and init response buffer
+ * @rb: IO request block
+ * @length: length of response buffer
+ *
+ * Allocate respose buffer
+ *
+ * Return: 0 on success else -ENOMEM
+ */
+int ishtp_io_rb_alloc_buf(struct ishtp_cl_rb *rb, size_t length)
+{
+ if (!rb)
+ return -EINVAL;
+
+ if (length == 0)
+ return 0;
+
+ rb->buffer.data = kmalloc(length, GFP_KERNEL);
+ if (!rb->buffer.data)
+ return -ENOMEM;
+
+ rb->buffer.size = length;
+ return 0;
+}
+
+/**
+ * ishtp_cl_io_rb_recycle() - Recycle IO request blocks
+ * @rb: IO request block
+ *
+ * Re-append rb to its client's free list and send flow control if needed
+ *
+ * Return: 0 on success else -EFAULT
+ */
+int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb *rb)
+{
+ struct ishtp_cl *cl;
+ int rets = 0;
+ unsigned long flags;
+
+ if (!rb || !rb->cl)
+ return -EFAULT;
+
+ cl = rb->cl;
+ spin_lock_irqsave(&cl->free_list_spinlock, flags);
+ list_add_tail(&rb->list, &cl->free_rb_list.list);
+ spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
+
+ /*
+ * If we returned the first buffer to empty 'free' list,
+ * send flow control
+ */
+ if (!cl->out_flow_ctrl_creds)
+ rets = ishtp_cl_read_start(cl);
+
+ return rets;
+}
+EXPORT_SYMBOL(ishtp_cl_io_rb_recycle);
+
+/**
+ * ishtp_cl_tx_empty() -test whether client device tx buffer is empty
+ * @cl: Pointer to client device instance
+ *
+ * Look client device tx buffer list, and check whether this list is empty
+ *
+ * Return: true if client tx buffer list is empty else false
+ */
+bool ishtp_cl_tx_empty(struct ishtp_cl *cl)
+{
+ int tx_list_empty;
+ unsigned long tx_flags;
+
+ spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
+ tx_list_empty = list_empty(&cl->tx_list.list);
+ spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+
+ return !!tx_list_empty;
+}
+EXPORT_SYMBOL(ishtp_cl_tx_empty);
+
+/**
+ * ishtp_cl_rx_get_rb() -Get a rb from client device rx buffer list
+ * @cl: Pointer to client device instance
+ *
+ * Check client device in-processing buffer list and get a rb from it.
+ *
+ * Return: rb pointer if buffer list isn't empty else NULL
+ */
+struct ishtp_cl_rb *ishtp_cl_rx_get_rb(struct ishtp_cl *cl)
+{
+ unsigned long rx_flags;
+ struct ishtp_cl_rb *rb;
+
+ spin_lock_irqsave(&cl->in_process_spinlock, rx_flags);
+ rb = list_first_entry_or_null(&cl->in_process_list.list,
+ struct ishtp_cl_rb, list);
+ if (rb)
+ list_del_init(&rb->list);
+ spin_unlock_irqrestore(&cl->in_process_spinlock, rx_flags);
+
+ return rb;
+}
+EXPORT_SYMBOL(ishtp_cl_rx_get_rb);
diff --git a/drivers/hid/intel-ish-hid/ishtp/client.c b/drivers/hid/intel-ish-hid/ishtp/client.c
new file mode 100644
index 000000000..df0a82569
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/client.c
@@ -0,0 +1,1125 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ISHTP client logic
+ *
+ * Copyright (c) 2003-2016, Intel Corporation.
+ */
+
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <asm/cacheflush.h>
+#include "hbm.h"
+#include "client.h"
+
+int ishtp_cl_get_tx_free_buffer_size(struct ishtp_cl *cl)
+{
+ unsigned long tx_free_flags;
+ int size;
+
+ spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
+ size = cl->tx_ring_free_size * cl->device->fw_client->props.max_msg_length;
+ spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
+
+ return size;
+}
+EXPORT_SYMBOL(ishtp_cl_get_tx_free_buffer_size);
+
+int ishtp_cl_get_tx_free_rings(struct ishtp_cl *cl)
+{
+ return cl->tx_ring_free_size;
+}
+EXPORT_SYMBOL(ishtp_cl_get_tx_free_rings);
+
+/**
+ * ishtp_read_list_flush() - Flush read queue
+ * @cl: ishtp client instance
+ *
+ * Used to remove all entries from read queue for a client
+ */
+static void ishtp_read_list_flush(struct ishtp_cl *cl)
+{
+ struct ishtp_cl_rb *rb;
+ struct ishtp_cl_rb *next;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cl->dev->read_list_spinlock, flags);
+ list_for_each_entry_safe(rb, next, &cl->dev->read_list.list, list)
+ if (rb->cl && ishtp_cl_cmp_id(cl, rb->cl)) {
+ list_del(&rb->list);
+ ishtp_io_rb_free(rb);
+ }
+ spin_unlock_irqrestore(&cl->dev->read_list_spinlock, flags);
+}
+
+/**
+ * ishtp_cl_flush_queues() - Flush all queues for a client
+ * @cl: ishtp client instance
+ *
+ * Used to remove all queues for a client. This is called when a client device
+ * needs reset due to error, S3 resume or during module removal
+ *
+ * Return: 0 on success else -EINVAL if device is NULL
+ */
+int ishtp_cl_flush_queues(struct ishtp_cl *cl)
+{
+ if (WARN_ON(!cl || !cl->dev))
+ return -EINVAL;
+
+ ishtp_read_list_flush(cl);
+
+ return 0;
+}
+EXPORT_SYMBOL(ishtp_cl_flush_queues);
+
+/**
+ * ishtp_cl_init() - Initialize all fields of a client device
+ * @cl: ishtp client instance
+ * @dev: ishtp device
+ *
+ * Initializes a client device fields: Init spinlocks, init queues etc.
+ * This function is called during new client creation
+ */
+static void ishtp_cl_init(struct ishtp_cl *cl, struct ishtp_device *dev)
+{
+ memset(cl, 0, sizeof(struct ishtp_cl));
+ init_waitqueue_head(&cl->wait_ctrl_res);
+ spin_lock_init(&cl->free_list_spinlock);
+ spin_lock_init(&cl->in_process_spinlock);
+ spin_lock_init(&cl->tx_list_spinlock);
+ spin_lock_init(&cl->tx_free_list_spinlock);
+ spin_lock_init(&cl->fc_spinlock);
+ INIT_LIST_HEAD(&cl->link);
+ cl->dev = dev;
+
+ INIT_LIST_HEAD(&cl->free_rb_list.list);
+ INIT_LIST_HEAD(&cl->tx_list.list);
+ INIT_LIST_HEAD(&cl->tx_free_list.list);
+ INIT_LIST_HEAD(&cl->in_process_list.list);
+
+ cl->rx_ring_size = CL_DEF_RX_RING_SIZE;
+ cl->tx_ring_size = CL_DEF_TX_RING_SIZE;
+ cl->tx_ring_free_size = cl->tx_ring_size;
+
+ /* dma */
+ cl->last_tx_path = CL_TX_PATH_IPC;
+ cl->last_dma_acked = 1;
+ cl->last_dma_addr = NULL;
+ cl->last_ipc_acked = 1;
+}
+
+/**
+ * ishtp_cl_allocate() - allocates client structure and sets it up.
+ * @cl_device: ishtp client device
+ *
+ * Allocate memory for new client device and call to initialize each field.
+ *
+ * Return: The allocated client instance or NULL on failure
+ */
+struct ishtp_cl *ishtp_cl_allocate(struct ishtp_cl_device *cl_device)
+{
+ struct ishtp_cl *cl;
+
+ cl = kmalloc(sizeof(struct ishtp_cl), GFP_KERNEL);
+ if (!cl)
+ return NULL;
+
+ ishtp_cl_init(cl, cl_device->ishtp_dev);
+ return cl;
+}
+EXPORT_SYMBOL(ishtp_cl_allocate);
+
+/**
+ * ishtp_cl_free() - Frees a client device
+ * @cl: client device instance
+ *
+ * Frees a client device
+ */
+void ishtp_cl_free(struct ishtp_cl *cl)
+{
+ struct ishtp_device *dev;
+ unsigned long flags;
+
+ if (!cl)
+ return;
+
+ dev = cl->dev;
+ if (!dev)
+ return;
+
+ spin_lock_irqsave(&dev->cl_list_lock, flags);
+ ishtp_cl_free_rx_ring(cl);
+ ishtp_cl_free_tx_ring(cl);
+ kfree(cl);
+ spin_unlock_irqrestore(&dev->cl_list_lock, flags);
+}
+EXPORT_SYMBOL(ishtp_cl_free);
+
+/**
+ * ishtp_cl_link() - Reserve a host id and link the client instance
+ * @cl: client device instance
+ *
+ * This allocates a single bit in the hostmap. This function will make sure
+ * that not many client sessions are opened at the same time. Once allocated
+ * the client device instance is added to the ishtp device in the current
+ * client list
+ *
+ * Return: 0 or error code on failure
+ */
+int ishtp_cl_link(struct ishtp_cl *cl)
+{
+ struct ishtp_device *dev;
+ unsigned long flags, flags_cl;
+ int id, ret = 0;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -EINVAL;
+
+ dev = cl->dev;
+
+ spin_lock_irqsave(&dev->device_lock, flags);
+
+ if (dev->open_handle_count >= ISHTP_MAX_OPEN_HANDLE_COUNT) {
+ ret = -EMFILE;
+ goto unlock_dev;
+ }
+
+ id = find_first_zero_bit(dev->host_clients_map, ISHTP_CLIENTS_MAX);
+
+ if (id >= ISHTP_CLIENTS_MAX) {
+ spin_unlock_irqrestore(&dev->device_lock, flags);
+ dev_err(&cl->device->dev, "id exceeded %d", ISHTP_CLIENTS_MAX);
+ return -ENOENT;
+ }
+
+ dev->open_handle_count++;
+ cl->host_client_id = id;
+ spin_lock_irqsave(&dev->cl_list_lock, flags_cl);
+ if (dev->dev_state != ISHTP_DEV_ENABLED) {
+ ret = -ENODEV;
+ goto unlock_cl;
+ }
+ list_add_tail(&cl->link, &dev->cl_list);
+ set_bit(id, dev->host_clients_map);
+ cl->state = ISHTP_CL_INITIALIZING;
+
+unlock_cl:
+ spin_unlock_irqrestore(&dev->cl_list_lock, flags_cl);
+unlock_dev:
+ spin_unlock_irqrestore(&dev->device_lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(ishtp_cl_link);
+
+/**
+ * ishtp_cl_unlink() - remove fw_cl from the client device list
+ * @cl: client device instance
+ *
+ * Remove a previously linked device to a ishtp device
+ */
+void ishtp_cl_unlink(struct ishtp_cl *cl)
+{
+ struct ishtp_device *dev;
+ struct ishtp_cl *pos;
+ unsigned long flags;
+
+ /* don't shout on error exit path */
+ if (!cl || !cl->dev)
+ return;
+
+ dev = cl->dev;
+
+ spin_lock_irqsave(&dev->device_lock, flags);
+ if (dev->open_handle_count > 0) {
+ clear_bit(cl->host_client_id, dev->host_clients_map);
+ dev->open_handle_count--;
+ }
+ spin_unlock_irqrestore(&dev->device_lock, flags);
+
+ /*
+ * This checks that 'cl' is actually linked into device's structure,
+ * before attempting 'list_del'
+ */
+ spin_lock_irqsave(&dev->cl_list_lock, flags);
+ list_for_each_entry(pos, &dev->cl_list, link)
+ if (cl->host_client_id == pos->host_client_id) {
+ list_del_init(&pos->link);
+ break;
+ }
+ spin_unlock_irqrestore(&dev->cl_list_lock, flags);
+}
+EXPORT_SYMBOL(ishtp_cl_unlink);
+
+/**
+ * ishtp_cl_disconnect() - Send disconnect request to firmware
+ * @cl: client device instance
+ *
+ * Send a disconnect request for a client to firmware.
+ *
+ * Return: 0 if successful disconnect response from the firmware or error
+ * code on failure
+ */
+int ishtp_cl_disconnect(struct ishtp_cl *cl)
+{
+ struct ishtp_device *dev;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -ENODEV;
+
+ dev = cl->dev;
+
+ dev->print_log(dev, "%s() state %d\n", __func__, cl->state);
+
+ if (cl->state != ISHTP_CL_DISCONNECTING) {
+ dev->print_log(dev, "%s() Disconnect in progress\n", __func__);
+ return 0;
+ }
+
+ if (ishtp_hbm_cl_disconnect_req(dev, cl)) {
+ dev->print_log(dev, "%s() Failed to disconnect\n", __func__);
+ dev_err(&cl->device->dev, "failed to disconnect.\n");
+ return -ENODEV;
+ }
+
+ wait_event_interruptible_timeout(cl->wait_ctrl_res,
+ (dev->dev_state != ISHTP_DEV_ENABLED ||
+ cl->state == ISHTP_CL_DISCONNECTED),
+ ishtp_secs_to_jiffies(ISHTP_CL_CONNECT_TIMEOUT));
+
+ /*
+ * If FW reset arrived, this will happen. Don't check cl->,
+ * as 'cl' may be freed already
+ */
+ if (dev->dev_state != ISHTP_DEV_ENABLED) {
+ dev->print_log(dev, "%s() dev_state != ISHTP_DEV_ENABLED\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ if (cl->state == ISHTP_CL_DISCONNECTED) {
+ dev->print_log(dev, "%s() successful\n", __func__);
+ return 0;
+ }
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL(ishtp_cl_disconnect);
+
+/**
+ * ishtp_cl_is_other_connecting() - Check other client is connecting
+ * @cl: client device instance
+ *
+ * Checks if other client with the same fw client id is connecting
+ *
+ * Return: true if other client is connected else false
+ */
+static bool ishtp_cl_is_other_connecting(struct ishtp_cl *cl)
+{
+ struct ishtp_device *dev;
+ struct ishtp_cl *pos;
+ unsigned long flags;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return false;
+
+ dev = cl->dev;
+ spin_lock_irqsave(&dev->cl_list_lock, flags);
+ list_for_each_entry(pos, &dev->cl_list, link) {
+ if ((pos->state == ISHTP_CL_CONNECTING) && (pos != cl) &&
+ cl->fw_client_id == pos->fw_client_id) {
+ spin_unlock_irqrestore(&dev->cl_list_lock, flags);
+ return true;
+ }
+ }
+ spin_unlock_irqrestore(&dev->cl_list_lock, flags);
+
+ return false;
+}
+
+/**
+ * ishtp_cl_connect() - Send connect request to firmware
+ * @cl: client device instance
+ *
+ * Send a connect request for a client to firmware. If successful it will
+ * RX and TX ring buffers
+ *
+ * Return: 0 if successful connect response from the firmware and able
+ * to bind and allocate ring buffers or error code on failure
+ */
+int ishtp_cl_connect(struct ishtp_cl *cl)
+{
+ struct ishtp_device *dev;
+ int rets;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -ENODEV;
+
+ dev = cl->dev;
+
+ dev->print_log(dev, "%s() current_state = %d\n", __func__, cl->state);
+
+ if (ishtp_cl_is_other_connecting(cl)) {
+ dev->print_log(dev, "%s() Busy\n", __func__);
+ return -EBUSY;
+ }
+
+ if (ishtp_hbm_cl_connect_req(dev, cl)) {
+ dev->print_log(dev, "%s() HBM connect req fail\n", __func__);
+ return -ENODEV;
+ }
+
+ rets = wait_event_interruptible_timeout(cl->wait_ctrl_res,
+ (dev->dev_state == ISHTP_DEV_ENABLED &&
+ (cl->state == ISHTP_CL_CONNECTED ||
+ cl->state == ISHTP_CL_DISCONNECTED)),
+ ishtp_secs_to_jiffies(
+ ISHTP_CL_CONNECT_TIMEOUT));
+ /*
+ * If FW reset arrived, this will happen. Don't check cl->,
+ * as 'cl' may be freed already
+ */
+ if (dev->dev_state != ISHTP_DEV_ENABLED) {
+ dev->print_log(dev, "%s() dev_state != ISHTP_DEV_ENABLED\n",
+ __func__);
+ return -EFAULT;
+ }
+
+ if (cl->state != ISHTP_CL_CONNECTED) {
+ dev->print_log(dev, "%s() state != ISHTP_CL_CONNECTED\n",
+ __func__);
+ return -EFAULT;
+ }
+
+ rets = cl->status;
+ if (rets) {
+ dev->print_log(dev, "%s() Invalid status\n", __func__);
+ return rets;
+ }
+
+ rets = ishtp_cl_device_bind(cl);
+ if (rets) {
+ dev->print_log(dev, "%s() Bind error\n", __func__);
+ ishtp_cl_disconnect(cl);
+ return rets;
+ }
+
+ rets = ishtp_cl_alloc_rx_ring(cl);
+ if (rets) {
+ dev->print_log(dev, "%s() Alloc RX ring failed\n", __func__);
+ /* if failed allocation, disconnect */
+ ishtp_cl_disconnect(cl);
+ return rets;
+ }
+
+ rets = ishtp_cl_alloc_tx_ring(cl);
+ if (rets) {
+ dev->print_log(dev, "%s() Alloc TX ring failed\n", __func__);
+ /* if failed allocation, disconnect */
+ ishtp_cl_free_rx_ring(cl);
+ ishtp_cl_disconnect(cl);
+ return rets;
+ }
+
+ /* Upon successful connection and allocation, emit flow-control */
+ rets = ishtp_cl_read_start(cl);
+
+ dev->print_log(dev, "%s() successful\n", __func__);
+
+ return rets;
+}
+EXPORT_SYMBOL(ishtp_cl_connect);
+
+/**
+ * ishtp_cl_read_start() - Prepare to read client message
+ * @cl: client device instance
+ *
+ * Get a free buffer from pool of free read buffers and add to read buffer
+ * pool to add contents. Send a flow control request to firmware to be able
+ * send next message.
+ *
+ * Return: 0 if successful or error code on failure
+ */
+int ishtp_cl_read_start(struct ishtp_cl *cl)
+{
+ struct ishtp_device *dev;
+ struct ishtp_cl_rb *rb;
+ int rets;
+ int i;
+ unsigned long flags;
+ unsigned long dev_flags;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -ENODEV;
+
+ dev = cl->dev;
+
+ if (cl->state != ISHTP_CL_CONNECTED)
+ return -ENODEV;
+
+ if (dev->dev_state != ISHTP_DEV_ENABLED)
+ return -ENODEV;
+
+ i = ishtp_fw_cl_by_id(dev, cl->fw_client_id);
+ if (i < 0) {
+ dev_err(&cl->device->dev, "no such fw client %d\n",
+ cl->fw_client_id);
+ return -ENODEV;
+ }
+
+ /* The current rb is the head of the free rb list */
+ spin_lock_irqsave(&cl->free_list_spinlock, flags);
+ if (list_empty(&cl->free_rb_list.list)) {
+ dev_warn(&cl->device->dev,
+ "[ishtp-ish] Rx buffers pool is empty\n");
+ rets = -ENOMEM;
+ rb = NULL;
+ spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
+ goto out;
+ }
+ rb = list_entry(cl->free_rb_list.list.next, struct ishtp_cl_rb, list);
+ list_del_init(&rb->list);
+ spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
+
+ rb->cl = cl;
+ rb->buf_idx = 0;
+
+ INIT_LIST_HEAD(&rb->list);
+ rets = 0;
+
+ /*
+ * This must be BEFORE sending flow control -
+ * response in ISR may come too fast...
+ */
+ spin_lock_irqsave(&dev->read_list_spinlock, dev_flags);
+ list_add_tail(&rb->list, &dev->read_list.list);
+ spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags);
+ if (ishtp_hbm_cl_flow_control_req(dev, cl)) {
+ rets = -ENODEV;
+ goto out;
+ }
+out:
+ /* if ishtp_hbm_cl_flow_control_req failed, return rb to free list */
+ if (rets && rb) {
+ spin_lock_irqsave(&dev->read_list_spinlock, dev_flags);
+ list_del(&rb->list);
+ spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags);
+
+ spin_lock_irqsave(&cl->free_list_spinlock, flags);
+ list_add_tail(&rb->list, &cl->free_rb_list.list);
+ spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
+ }
+ return rets;
+}
+
+/**
+ * ishtp_cl_send() - Send a message to firmware
+ * @cl: client device instance
+ * @buf: message buffer
+ * @length: length of message
+ *
+ * If the client is correct state to send message, this function gets a buffer
+ * from tx ring buffers, copy the message data and call to send the message
+ * using ishtp_cl_send_msg()
+ *
+ * Return: 0 if successful or error code on failure
+ */
+int ishtp_cl_send(struct ishtp_cl *cl, uint8_t *buf, size_t length)
+{
+ struct ishtp_device *dev;
+ int id;
+ struct ishtp_cl_tx_ring *cl_msg;
+ int have_msg_to_send = 0;
+ unsigned long tx_flags, tx_free_flags;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -ENODEV;
+
+ dev = cl->dev;
+
+ if (cl->state != ISHTP_CL_CONNECTED) {
+ ++cl->err_send_msg;
+ return -EPIPE;
+ }
+
+ if (dev->dev_state != ISHTP_DEV_ENABLED) {
+ ++cl->err_send_msg;
+ return -ENODEV;
+ }
+
+ /* Check if we have fw client device */
+ id = ishtp_fw_cl_by_id(dev, cl->fw_client_id);
+ if (id < 0) {
+ ++cl->err_send_msg;
+ return -ENOENT;
+ }
+
+ if (length > dev->fw_clients[id].props.max_msg_length) {
+ ++cl->err_send_msg;
+ return -EMSGSIZE;
+ }
+
+ /* No free bufs */
+ spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
+ if (list_empty(&cl->tx_free_list.list)) {
+ spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
+ tx_free_flags);
+ ++cl->err_send_msg;
+ return -ENOMEM;
+ }
+
+ cl_msg = list_first_entry(&cl->tx_free_list.list,
+ struct ishtp_cl_tx_ring, list);
+ if (!cl_msg->send_buf.data) {
+ spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
+ tx_free_flags);
+ return -EIO;
+ /* Should not happen, as free list is pre-allocated */
+ }
+ /*
+ * This is safe, as 'length' is already checked for not exceeding
+ * max ISHTP message size per client
+ */
+ list_del_init(&cl_msg->list);
+ --cl->tx_ring_free_size;
+
+ spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
+ memcpy(cl_msg->send_buf.data, buf, length);
+ cl_msg->send_buf.size = length;
+ spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
+ have_msg_to_send = !list_empty(&cl->tx_list.list);
+ list_add_tail(&cl_msg->list, &cl->tx_list.list);
+ spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+
+ if (!have_msg_to_send && cl->ishtp_flow_ctrl_creds > 0)
+ ishtp_cl_send_msg(dev, cl);
+
+ return 0;
+}
+EXPORT_SYMBOL(ishtp_cl_send);
+
+/**
+ * ishtp_cl_read_complete() - read complete
+ * @rb: Pointer to client request block
+ *
+ * If the message is completely received call ishtp_cl_bus_rx_event()
+ * to process message
+ */
+static void ishtp_cl_read_complete(struct ishtp_cl_rb *rb)
+{
+ unsigned long flags;
+ int schedule_work_flag = 0;
+ struct ishtp_cl *cl = rb->cl;
+
+ spin_lock_irqsave(&cl->in_process_spinlock, flags);
+ /*
+ * if in-process list is empty, then need to schedule
+ * the processing thread
+ */
+ schedule_work_flag = list_empty(&cl->in_process_list.list);
+ list_add_tail(&rb->list, &cl->in_process_list.list);
+ spin_unlock_irqrestore(&cl->in_process_spinlock, flags);
+
+ if (schedule_work_flag)
+ ishtp_cl_bus_rx_event(cl->device);
+}
+
+/**
+ * ipc_tx_send() - IPC tx send function
+ * @prm: Pointer to client device instance
+ *
+ * Send message over IPC. Message will be split into fragments
+ * if message size is bigger than IPC FIFO size, and all
+ * fragments will be sent one by one.
+ */
+static void ipc_tx_send(void *prm)
+{
+ struct ishtp_cl *cl = prm;
+ struct ishtp_cl_tx_ring *cl_msg;
+ size_t rem;
+ struct ishtp_device *dev = (cl ? cl->dev : NULL);
+ struct ishtp_msg_hdr ishtp_hdr;
+ unsigned long tx_flags, tx_free_flags;
+ unsigned char *pmsg;
+
+ if (!dev)
+ return;
+
+ /*
+ * Other conditions if some critical error has
+ * occurred before this callback is called
+ */
+ if (dev->dev_state != ISHTP_DEV_ENABLED)
+ return;
+
+ if (cl->state != ISHTP_CL_CONNECTED)
+ return;
+
+ spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
+ if (list_empty(&cl->tx_list.list)) {
+ spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+ return;
+ }
+
+ if (cl->ishtp_flow_ctrl_creds != 1 && !cl->sending) {
+ spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+ return;
+ }
+
+ if (!cl->sending) {
+ --cl->ishtp_flow_ctrl_creds;
+ cl->last_ipc_acked = 0;
+ cl->last_tx_path = CL_TX_PATH_IPC;
+ cl->sending = 1;
+ }
+
+ cl_msg = list_entry(cl->tx_list.list.next, struct ishtp_cl_tx_ring,
+ list);
+ rem = cl_msg->send_buf.size - cl->tx_offs;
+
+ while (rem > 0) {
+ ishtp_hdr.host_addr = cl->host_client_id;
+ ishtp_hdr.fw_addr = cl->fw_client_id;
+ ishtp_hdr.reserved = 0;
+ pmsg = cl_msg->send_buf.data + cl->tx_offs;
+
+ if (rem <= dev->mtu) {
+ /* Last fragment or only one packet */
+ ishtp_hdr.length = rem;
+ ishtp_hdr.msg_complete = 1;
+ /* Submit to IPC queue with no callback */
+ ishtp_write_message(dev, &ishtp_hdr, pmsg);
+ cl->tx_offs = 0;
+ cl->sending = 0;
+
+ break;
+ } else {
+ /* Send ipc fragment */
+ ishtp_hdr.length = dev->mtu;
+ ishtp_hdr.msg_complete = 0;
+ /* All fregments submitted to IPC queue with no callback */
+ ishtp_write_message(dev, &ishtp_hdr, pmsg);
+ cl->tx_offs += dev->mtu;
+ rem = cl_msg->send_buf.size - cl->tx_offs;
+ }
+ }
+
+ list_del_init(&cl_msg->list);
+ spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+
+ spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
+ list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
+ ++cl->tx_ring_free_size;
+ spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
+ tx_free_flags);
+}
+
+/**
+ * ishtp_cl_send_msg_ipc() -Send message using IPC
+ * @dev: ISHTP device instance
+ * @cl: Pointer to client device instance
+ *
+ * Send message over IPC not using DMA
+ */
+static void ishtp_cl_send_msg_ipc(struct ishtp_device *dev,
+ struct ishtp_cl *cl)
+{
+ /* If last DMA message wasn't acked yet, leave this one in Tx queue */
+ if (cl->last_tx_path == CL_TX_PATH_DMA && cl->last_dma_acked == 0)
+ return;
+
+ cl->tx_offs = 0;
+ ipc_tx_send(cl);
+ ++cl->send_msg_cnt_ipc;
+}
+
+/**
+ * ishtp_cl_send_msg_dma() -Send message using DMA
+ * @dev: ISHTP device instance
+ * @cl: Pointer to client device instance
+ *
+ * Send message using DMA
+ */
+static void ishtp_cl_send_msg_dma(struct ishtp_device *dev,
+ struct ishtp_cl *cl)
+{
+ struct ishtp_msg_hdr hdr;
+ struct dma_xfer_hbm dma_xfer;
+ unsigned char *msg_addr;
+ int off;
+ struct ishtp_cl_tx_ring *cl_msg;
+ unsigned long tx_flags, tx_free_flags;
+
+ /* If last IPC message wasn't acked yet, leave this one in Tx queue */
+ if (cl->last_tx_path == CL_TX_PATH_IPC && cl->last_ipc_acked == 0)
+ return;
+
+ spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
+ if (list_empty(&cl->tx_list.list)) {
+ spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+ return;
+ }
+
+ cl_msg = list_entry(cl->tx_list.list.next, struct ishtp_cl_tx_ring,
+ list);
+
+ msg_addr = ishtp_cl_get_dma_send_buf(dev, cl_msg->send_buf.size);
+ if (!msg_addr) {
+ spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+ if (dev->transfer_path == CL_TX_PATH_DEFAULT)
+ ishtp_cl_send_msg_ipc(dev, cl);
+ return;
+ }
+
+ list_del_init(&cl_msg->list); /* Must be before write */
+ spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+
+ --cl->ishtp_flow_ctrl_creds;
+ cl->last_dma_acked = 0;
+ cl->last_dma_addr = msg_addr;
+ cl->last_tx_path = CL_TX_PATH_DMA;
+
+ /* write msg to dma buf */
+ memcpy(msg_addr, cl_msg->send_buf.data, cl_msg->send_buf.size);
+
+ /*
+ * if current fw don't support cache snooping, driver have to
+ * flush the cache manually.
+ */
+ if (dev->ops->dma_no_cache_snooping &&
+ dev->ops->dma_no_cache_snooping(dev))
+ clflush_cache_range(msg_addr, cl_msg->send_buf.size);
+
+ /* send dma_xfer hbm msg */
+ off = msg_addr - (unsigned char *)dev->ishtp_host_dma_tx_buf;
+ ishtp_hbm_hdr(&hdr, sizeof(struct dma_xfer_hbm));
+ dma_xfer.hbm = DMA_XFER;
+ dma_xfer.fw_client_id = cl->fw_client_id;
+ dma_xfer.host_client_id = cl->host_client_id;
+ dma_xfer.reserved = 0;
+ dma_xfer.msg_addr = dev->ishtp_host_dma_tx_buf_phys + off;
+ dma_xfer.msg_length = cl_msg->send_buf.size;
+ dma_xfer.reserved2 = 0;
+ ishtp_write_message(dev, &hdr, (unsigned char *)&dma_xfer);
+ spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
+ list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
+ ++cl->tx_ring_free_size;
+ spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
+ ++cl->send_msg_cnt_dma;
+}
+
+/**
+ * ishtp_cl_send_msg() -Send message using DMA or IPC
+ * @dev: ISHTP device instance
+ * @cl: Pointer to client device instance
+ *
+ * Send message using DMA or IPC based on transfer_path
+ */
+void ishtp_cl_send_msg(struct ishtp_device *dev, struct ishtp_cl *cl)
+{
+ if (dev->transfer_path == CL_TX_PATH_DMA)
+ ishtp_cl_send_msg_dma(dev, cl);
+ else
+ ishtp_cl_send_msg_ipc(dev, cl);
+}
+
+/**
+ * recv_ishtp_cl_msg() -Receive client message
+ * @dev: ISHTP device instance
+ * @ishtp_hdr: Pointer to message header
+ *
+ * Receive and dispatch ISHTP client messages. This function executes in ISR
+ * or work queue context
+ */
+void recv_ishtp_cl_msg(struct ishtp_device *dev,
+ struct ishtp_msg_hdr *ishtp_hdr)
+{
+ struct ishtp_cl *cl;
+ struct ishtp_cl_rb *rb;
+ struct ishtp_cl_rb *new_rb;
+ unsigned char *buffer = NULL;
+ struct ishtp_cl_rb *complete_rb = NULL;
+ unsigned long flags;
+ int rb_count;
+
+ if (ishtp_hdr->reserved) {
+ dev_err(dev->devc, "corrupted message header.\n");
+ goto eoi;
+ }
+
+ if (ishtp_hdr->length > IPC_PAYLOAD_SIZE) {
+ dev_err(dev->devc,
+ "ISHTP message length in hdr exceeds IPC MTU\n");
+ goto eoi;
+ }
+
+ spin_lock_irqsave(&dev->read_list_spinlock, flags);
+ rb_count = -1;
+ list_for_each_entry(rb, &dev->read_list.list, list) {
+ ++rb_count;
+ cl = rb->cl;
+ if (!cl || !(cl->host_client_id == ishtp_hdr->host_addr &&
+ cl->fw_client_id == ishtp_hdr->fw_addr) ||
+ !(cl->state == ISHTP_CL_CONNECTED))
+ continue;
+
+ /* If no Rx buffer is allocated, disband the rb */
+ if (rb->buffer.size == 0 || rb->buffer.data == NULL) {
+ spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
+ dev_err(&cl->device->dev,
+ "Rx buffer is not allocated.\n");
+ list_del(&rb->list);
+ ishtp_io_rb_free(rb);
+ cl->status = -ENOMEM;
+ goto eoi;
+ }
+
+ /*
+ * If message buffer overflown (exceeds max. client msg
+ * size, drop message and return to free buffer.
+ * Do we need to disconnect such a client? (We don't send
+ * back FC, so communication will be stuck anyway)
+ */
+ if (rb->buffer.size < ishtp_hdr->length + rb->buf_idx) {
+ spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
+ dev_err(&cl->device->dev,
+ "message overflow. size %d len %d idx %ld\n",
+ rb->buffer.size, ishtp_hdr->length,
+ rb->buf_idx);
+ list_del(&rb->list);
+ ishtp_cl_io_rb_recycle(rb);
+ cl->status = -EIO;
+ goto eoi;
+ }
+
+ buffer = rb->buffer.data + rb->buf_idx;
+ dev->ops->ishtp_read(dev, buffer, ishtp_hdr->length);
+
+ rb->buf_idx += ishtp_hdr->length;
+ if (ishtp_hdr->msg_complete) {
+ /* Last fragment in message - it's complete */
+ cl->status = 0;
+ list_del(&rb->list);
+ complete_rb = rb;
+
+ --cl->out_flow_ctrl_creds;
+ /*
+ * the whole msg arrived, send a new FC, and add a new
+ * rb buffer for the next coming msg
+ */
+ spin_lock(&cl->free_list_spinlock);
+
+ if (!list_empty(&cl->free_rb_list.list)) {
+ new_rb = list_entry(cl->free_rb_list.list.next,
+ struct ishtp_cl_rb, list);
+ list_del_init(&new_rb->list);
+ spin_unlock(&cl->free_list_spinlock);
+ new_rb->cl = cl;
+ new_rb->buf_idx = 0;
+ INIT_LIST_HEAD(&new_rb->list);
+ list_add_tail(&new_rb->list,
+ &dev->read_list.list);
+
+ ishtp_hbm_cl_flow_control_req(dev, cl);
+ } else {
+ spin_unlock(&cl->free_list_spinlock);
+ }
+ }
+ /* One more fragment in message (even if this was last) */
+ ++cl->recv_msg_num_frags;
+
+ /*
+ * We can safely break here (and in BH too),
+ * a single input message can go only to a single request!
+ */
+ break;
+ }
+
+ spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
+ /* If it's nobody's message, just read and discard it */
+ if (!buffer) {
+ uint8_t rd_msg_buf[ISHTP_RD_MSG_BUF_SIZE];
+
+ dev_err(dev->devc, "Dropped Rx msg - no request\n");
+ dev->ops->ishtp_read(dev, rd_msg_buf, ishtp_hdr->length);
+ goto eoi;
+ }
+
+ if (complete_rb) {
+ cl = complete_rb->cl;
+ cl->ts_rx = ktime_get();
+ ++cl->recv_msg_cnt_ipc;
+ ishtp_cl_read_complete(complete_rb);
+ }
+eoi:
+ return;
+}
+
+/**
+ * recv_ishtp_cl_msg_dma() -Receive client message
+ * @dev: ISHTP device instance
+ * @msg: message pointer
+ * @hbm: hbm buffer
+ *
+ * Receive and dispatch ISHTP client messages using DMA. This function executes
+ * in ISR or work queue context
+ */
+void recv_ishtp_cl_msg_dma(struct ishtp_device *dev, void *msg,
+ struct dma_xfer_hbm *hbm)
+{
+ struct ishtp_cl *cl;
+ struct ishtp_cl_rb *rb;
+ struct ishtp_cl_rb *new_rb;
+ unsigned char *buffer = NULL;
+ struct ishtp_cl_rb *complete_rb = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->read_list_spinlock, flags);
+
+ list_for_each_entry(rb, &dev->read_list.list, list) {
+ cl = rb->cl;
+ if (!cl || !(cl->host_client_id == hbm->host_client_id &&
+ cl->fw_client_id == hbm->fw_client_id) ||
+ !(cl->state == ISHTP_CL_CONNECTED))
+ continue;
+
+ /*
+ * If no Rx buffer is allocated, disband the rb
+ */
+ if (rb->buffer.size == 0 || rb->buffer.data == NULL) {
+ spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
+ dev_err(&cl->device->dev,
+ "response buffer is not allocated.\n");
+ list_del(&rb->list);
+ ishtp_io_rb_free(rb);
+ cl->status = -ENOMEM;
+ goto eoi;
+ }
+
+ /*
+ * If message buffer overflown (exceeds max. client msg
+ * size, drop message and return to free buffer.
+ * Do we need to disconnect such a client? (We don't send
+ * back FC, so communication will be stuck anyway)
+ */
+ if (rb->buffer.size < hbm->msg_length) {
+ spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
+ dev_err(&cl->device->dev,
+ "message overflow. size %d len %d idx %ld\n",
+ rb->buffer.size, hbm->msg_length, rb->buf_idx);
+ list_del(&rb->list);
+ ishtp_cl_io_rb_recycle(rb);
+ cl->status = -EIO;
+ goto eoi;
+ }
+
+ buffer = rb->buffer.data;
+
+ /*
+ * if current fw don't support cache snooping, driver have to
+ * flush the cache manually.
+ */
+ if (dev->ops->dma_no_cache_snooping &&
+ dev->ops->dma_no_cache_snooping(dev))
+ clflush_cache_range(msg, hbm->msg_length);
+
+ memcpy(buffer, msg, hbm->msg_length);
+ rb->buf_idx = hbm->msg_length;
+
+ /* Last fragment in message - it's complete */
+ cl->status = 0;
+ list_del(&rb->list);
+ complete_rb = rb;
+
+ --cl->out_flow_ctrl_creds;
+ /*
+ * the whole msg arrived, send a new FC, and add a new
+ * rb buffer for the next coming msg
+ */
+ spin_lock(&cl->free_list_spinlock);
+
+ if (!list_empty(&cl->free_rb_list.list)) {
+ new_rb = list_entry(cl->free_rb_list.list.next,
+ struct ishtp_cl_rb, list);
+ list_del_init(&new_rb->list);
+ spin_unlock(&cl->free_list_spinlock);
+ new_rb->cl = cl;
+ new_rb->buf_idx = 0;
+ INIT_LIST_HEAD(&new_rb->list);
+ list_add_tail(&new_rb->list,
+ &dev->read_list.list);
+
+ ishtp_hbm_cl_flow_control_req(dev, cl);
+ } else {
+ spin_unlock(&cl->free_list_spinlock);
+ }
+
+ /* One more fragment in message (this is always last) */
+ ++cl->recv_msg_num_frags;
+
+ /*
+ * We can safely break here (and in BH too),
+ * a single input message can go only to a single request!
+ */
+ break;
+ }
+
+ spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
+ /* If it's nobody's message, just read and discard it */
+ if (!buffer) {
+ dev_err(dev->devc, "Dropped Rx (DMA) msg - no request\n");
+ goto eoi;
+ }
+
+ if (complete_rb) {
+ cl = complete_rb->cl;
+ cl->ts_rx = ktime_get();
+ ++cl->recv_msg_cnt_dma;
+ ishtp_cl_read_complete(complete_rb);
+ }
+eoi:
+ return;
+}
+
+void *ishtp_get_client_data(struct ishtp_cl *cl)
+{
+ return cl->client_data;
+}
+EXPORT_SYMBOL(ishtp_get_client_data);
+
+void ishtp_set_client_data(struct ishtp_cl *cl, void *data)
+{
+ cl->client_data = data;
+}
+EXPORT_SYMBOL(ishtp_set_client_data);
+
+struct ishtp_device *ishtp_get_ishtp_device(struct ishtp_cl *cl)
+{
+ return cl->dev;
+}
+EXPORT_SYMBOL(ishtp_get_ishtp_device);
+
+void ishtp_set_tx_ring_size(struct ishtp_cl *cl, int size)
+{
+ cl->tx_ring_size = size;
+}
+EXPORT_SYMBOL(ishtp_set_tx_ring_size);
+
+void ishtp_set_rx_ring_size(struct ishtp_cl *cl, int size)
+{
+ cl->rx_ring_size = size;
+}
+EXPORT_SYMBOL(ishtp_set_rx_ring_size);
+
+void ishtp_set_connection_state(struct ishtp_cl *cl, int state)
+{
+ cl->state = state;
+}
+EXPORT_SYMBOL(ishtp_set_connection_state);
+
+void ishtp_cl_set_fw_client_id(struct ishtp_cl *cl, int fw_client_id)
+{
+ cl->fw_client_id = fw_client_id;
+}
+EXPORT_SYMBOL(ishtp_cl_set_fw_client_id);
diff --git a/drivers/hid/intel-ish-hid/ishtp/client.h b/drivers/hid/intel-ish-hid/ishtp/client.h
new file mode 100644
index 000000000..fc62dd149
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/client.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * ISHTP client logic
+ *
+ * Copyright (c) 2003-2016, Intel Corporation.
+ */
+
+#ifndef _ISHTP_CLIENT_H_
+#define _ISHTP_CLIENT_H_
+
+#include <linux/types.h>
+#include "ishtp-dev.h"
+
+/* Tx and Rx ring size */
+#define CL_DEF_RX_RING_SIZE 2
+#define CL_DEF_TX_RING_SIZE 2
+#define CL_MAX_RX_RING_SIZE 32
+#define CL_MAX_TX_RING_SIZE 32
+
+#define DMA_SLOT_SIZE 4096
+/* Number of IPC fragments after which it's worth sending via DMA */
+#define DMA_WORTH_THRESHOLD 3
+
+/* DMA/IPC Tx paths. Other the default means enforcement */
+#define CL_TX_PATH_DEFAULT 0
+#define CL_TX_PATH_IPC 1
+#define CL_TX_PATH_DMA 2
+
+/* Client Tx buffer list entry */
+struct ishtp_cl_tx_ring {
+ struct list_head list;
+ struct ishtp_msg_data send_buf;
+};
+
+/* ISHTP client instance */
+struct ishtp_cl {
+ struct list_head link;
+ struct ishtp_device *dev;
+ enum cl_state state;
+ int status;
+
+ /* Link to ISHTP bus device */
+ struct ishtp_cl_device *device;
+
+ /* ID of client connected */
+ uint8_t host_client_id;
+ uint8_t fw_client_id;
+ uint8_t ishtp_flow_ctrl_creds;
+ uint8_t out_flow_ctrl_creds;
+
+ /* dma */
+ int last_tx_path;
+ /* 0: ack wasn't received,1:ack was received */
+ int last_dma_acked;
+ unsigned char *last_dma_addr;
+ /* 0: ack wasn't received,1:ack was received */
+ int last_ipc_acked;
+
+ /* Rx ring buffer pool */
+ unsigned int rx_ring_size;
+ struct ishtp_cl_rb free_rb_list;
+ spinlock_t free_list_spinlock;
+ /* Rx in-process list */
+ struct ishtp_cl_rb in_process_list;
+ spinlock_t in_process_spinlock;
+
+ /* Client Tx buffers list */
+ unsigned int tx_ring_size;
+ struct ishtp_cl_tx_ring tx_list, tx_free_list;
+ int tx_ring_free_size;
+ spinlock_t tx_list_spinlock;
+ spinlock_t tx_free_list_spinlock;
+ size_t tx_offs; /* Offset in buffer at head of 'tx_list' */
+
+ /**
+ * if we get a FC, and the list is not empty, we must know whether we
+ * are at the middle of sending.
+ * if so -need to increase FC counter, otherwise, need to start sending
+ * the first msg in list
+ * (!)This is for counting-FC implementation only. Within single-FC the
+ * other party may NOT send FC until it receives complete message
+ */
+ int sending;
+
+ /* Send FC spinlock */
+ spinlock_t fc_spinlock;
+
+ /* wait queue for connect and disconnect response from FW */
+ wait_queue_head_t wait_ctrl_res;
+
+ /* Error stats */
+ unsigned int err_send_msg;
+ unsigned int err_send_fc;
+
+ /* Send/recv stats */
+ unsigned int send_msg_cnt_ipc;
+ unsigned int send_msg_cnt_dma;
+ unsigned int recv_msg_cnt_ipc;
+ unsigned int recv_msg_cnt_dma;
+ unsigned int recv_msg_num_frags;
+ unsigned int ishtp_flow_ctrl_cnt;
+ unsigned int out_flow_ctrl_cnt;
+
+ /* Rx msg ... out FC timing */
+ ktime_t ts_rx;
+ ktime_t ts_out_fc;
+ ktime_t ts_max_fc_delay;
+ void *client_data;
+};
+
+/* Client connection managenment internal functions */
+int ishtp_can_client_connect(struct ishtp_device *ishtp_dev, guid_t *uuid);
+int ishtp_fw_cl_by_id(struct ishtp_device *dev, uint8_t client_id);
+void ishtp_cl_send_msg(struct ishtp_device *dev, struct ishtp_cl *cl);
+void recv_ishtp_cl_msg(struct ishtp_device *dev,
+ struct ishtp_msg_hdr *ishtp_hdr);
+int ishtp_cl_read_start(struct ishtp_cl *cl);
+
+/* Ring Buffer I/F */
+int ishtp_cl_alloc_rx_ring(struct ishtp_cl *cl);
+int ishtp_cl_alloc_tx_ring(struct ishtp_cl *cl);
+void ishtp_cl_free_rx_ring(struct ishtp_cl *cl);
+void ishtp_cl_free_tx_ring(struct ishtp_cl *cl);
+int ishtp_cl_get_tx_free_buffer_size(struct ishtp_cl *cl);
+int ishtp_cl_get_tx_free_rings(struct ishtp_cl *cl);
+
+/* DMA I/F functions */
+void recv_ishtp_cl_msg_dma(struct ishtp_device *dev, void *msg,
+ struct dma_xfer_hbm *hbm);
+void ishtp_cl_alloc_dma_buf(struct ishtp_device *dev);
+void ishtp_cl_free_dma_buf(struct ishtp_device *dev);
+void *ishtp_cl_get_dma_send_buf(struct ishtp_device *dev,
+ uint32_t size);
+void ishtp_cl_release_dma_acked_mem(struct ishtp_device *dev,
+ void *msg_addr,
+ uint8_t size);
+
+/* Request blocks alloc/free I/F */
+struct ishtp_cl_rb *ishtp_io_rb_init(struct ishtp_cl *cl);
+void ishtp_io_rb_free(struct ishtp_cl_rb *priv_rb);
+int ishtp_io_rb_alloc_buf(struct ishtp_cl_rb *rb, size_t length);
+
+/**
+ * ishtp_cl_cmp_id - tells if file private data have same id
+ * returns true - if ids are the same and not NULL
+ */
+static inline bool ishtp_cl_cmp_id(const struct ishtp_cl *cl1,
+ const struct ishtp_cl *cl2)
+{
+ return cl1 && cl2 &&
+ (cl1->host_client_id == cl2->host_client_id) &&
+ (cl1->fw_client_id == cl2->fw_client_id);
+}
+
+#endif /* _ISHTP_CLIENT_H_ */
diff --git a/drivers/hid/intel-ish-hid/ishtp/dma-if.c b/drivers/hid/intel-ish-hid/ishtp/dma-if.c
new file mode 100644
index 000000000..00046cbfd
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/dma-if.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ISHTP DMA I/F functions
+ *
+ * Copyright (c) 2003-2016, Intel Corporation.
+ */
+
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include "ishtp-dev.h"
+#include "client.h"
+
+/**
+ * ishtp_cl_alloc_dma_buf() - Allocate DMA RX and TX buffer
+ * @dev: ishtp device
+ *
+ * Allocate RX and TX DMA buffer once during bus setup.
+ * It allocates 1MB, RX and TX DMA buffer, which are divided
+ * into slots.
+ */
+void ishtp_cl_alloc_dma_buf(struct ishtp_device *dev)
+{
+ dma_addr_t h;
+
+ if (dev->ishtp_host_dma_tx_buf)
+ return;
+
+ dev->ishtp_host_dma_tx_buf_size = 1024*1024;
+ dev->ishtp_host_dma_rx_buf_size = 1024*1024;
+
+ /* Allocate Tx buffer and init usage bitmap */
+ dev->ishtp_host_dma_tx_buf = dma_alloc_coherent(dev->devc,
+ dev->ishtp_host_dma_tx_buf_size,
+ &h, GFP_KERNEL);
+ if (dev->ishtp_host_dma_tx_buf)
+ dev->ishtp_host_dma_tx_buf_phys = h;
+
+ dev->ishtp_dma_num_slots = dev->ishtp_host_dma_tx_buf_size /
+ DMA_SLOT_SIZE;
+
+ dev->ishtp_dma_tx_map = kcalloc(dev->ishtp_dma_num_slots,
+ sizeof(uint8_t),
+ GFP_KERNEL);
+ spin_lock_init(&dev->ishtp_dma_tx_lock);
+
+ /* Allocate Rx buffer */
+ dev->ishtp_host_dma_rx_buf = dma_alloc_coherent(dev->devc,
+ dev->ishtp_host_dma_rx_buf_size,
+ &h, GFP_KERNEL);
+
+ if (dev->ishtp_host_dma_rx_buf)
+ dev->ishtp_host_dma_rx_buf_phys = h;
+}
+
+/**
+ * ishtp_cl_free_dma_buf() - Free DMA RX and TX buffer
+ * @dev: ishtp device
+ *
+ * Free DMA buffer when all clients are released. This is
+ * only happens during error path in ISH built in driver
+ * model
+ */
+void ishtp_cl_free_dma_buf(struct ishtp_device *dev)
+{
+ dma_addr_t h;
+
+ if (dev->ishtp_host_dma_tx_buf) {
+ h = dev->ishtp_host_dma_tx_buf_phys;
+ dma_free_coherent(dev->devc, dev->ishtp_host_dma_tx_buf_size,
+ dev->ishtp_host_dma_tx_buf, h);
+ }
+
+ if (dev->ishtp_host_dma_rx_buf) {
+ h = dev->ishtp_host_dma_rx_buf_phys;
+ dma_free_coherent(dev->devc, dev->ishtp_host_dma_rx_buf_size,
+ dev->ishtp_host_dma_rx_buf, h);
+ }
+
+ kfree(dev->ishtp_dma_tx_map);
+ dev->ishtp_host_dma_tx_buf = NULL;
+ dev->ishtp_host_dma_rx_buf = NULL;
+ dev->ishtp_dma_tx_map = NULL;
+}
+
+/*
+ * ishtp_cl_get_dma_send_buf() - Get a DMA memory slot
+ * @dev: ishtp device
+ * @size: Size of memory to get
+ *
+ * Find and return free address of "size" bytes in dma tx buffer.
+ * the function will mark this address as "in-used" memory.
+ *
+ * Return: NULL when no free buffer else a buffer to copy
+ */
+void *ishtp_cl_get_dma_send_buf(struct ishtp_device *dev,
+ uint32_t size)
+{
+ unsigned long flags;
+ int i, j, free;
+ /* additional slot is needed if there is rem */
+ int required_slots = (size / DMA_SLOT_SIZE)
+ + 1 * (size % DMA_SLOT_SIZE != 0);
+
+ if (!dev->ishtp_dma_tx_map) {
+ dev_err(dev->devc, "Fail to allocate Tx map\n");
+ return NULL;
+ }
+
+ spin_lock_irqsave(&dev->ishtp_dma_tx_lock, flags);
+ for (i = 0; i <= (dev->ishtp_dma_num_slots - required_slots); i++) {
+ free = 1;
+ for (j = 0; j < required_slots; j++)
+ if (dev->ishtp_dma_tx_map[i+j]) {
+ free = 0;
+ i += j;
+ break;
+ }
+ if (free) {
+ /* mark memory as "caught" */
+ for (j = 0; j < required_slots; j++)
+ dev->ishtp_dma_tx_map[i+j] = 1;
+ spin_unlock_irqrestore(&dev->ishtp_dma_tx_lock, flags);
+ return (i * DMA_SLOT_SIZE) +
+ (unsigned char *)dev->ishtp_host_dma_tx_buf;
+ }
+ }
+ spin_unlock_irqrestore(&dev->ishtp_dma_tx_lock, flags);
+ dev_err(dev->devc, "No free DMA buffer to send msg\n");
+ return NULL;
+}
+
+/*
+ * ishtp_cl_release_dma_acked_mem() - Release DMA memory slot
+ * @dev: ishtp device
+ * @msg_addr: message address of slot
+ * @size: Size of memory to get
+ *
+ * Release_dma_acked_mem - returnes the acked memory to free list.
+ * (from msg_addr, size bytes long)
+ */
+void ishtp_cl_release_dma_acked_mem(struct ishtp_device *dev,
+ void *msg_addr,
+ uint8_t size)
+{
+ unsigned long flags;
+ int acked_slots = (size / DMA_SLOT_SIZE)
+ + 1 * (size % DMA_SLOT_SIZE != 0);
+ int i, j;
+
+ if ((msg_addr - dev->ishtp_host_dma_tx_buf) % DMA_SLOT_SIZE) {
+ dev_err(dev->devc, "Bad DMA Tx ack address\n");
+ return;
+ }
+
+ if (!dev->ishtp_dma_tx_map) {
+ dev_err(dev->devc, "Fail to allocate Tx map\n");
+ return;
+ }
+
+ i = (msg_addr - dev->ishtp_host_dma_tx_buf) / DMA_SLOT_SIZE;
+ spin_lock_irqsave(&dev->ishtp_dma_tx_lock, flags);
+ for (j = 0; j < acked_slots; j++) {
+ if ((i + j) >= dev->ishtp_dma_num_slots ||
+ !dev->ishtp_dma_tx_map[i+j]) {
+ /* no such slot, or memory is already free */
+ spin_unlock_irqrestore(&dev->ishtp_dma_tx_lock, flags);
+ dev_err(dev->devc, "Bad DMA Tx ack address\n");
+ return;
+ }
+ dev->ishtp_dma_tx_map[i+j] = 0;
+ }
+ spin_unlock_irqrestore(&dev->ishtp_dma_tx_lock, flags);
+}
diff --git a/drivers/hid/intel-ish-hid/ishtp/hbm.c b/drivers/hid/intel-ish-hid/ishtp/hbm.c
new file mode 100644
index 000000000..9c031a06e
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/hbm.c
@@ -0,0 +1,990 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ISHTP bus layer messages handling
+ *
+ * Copyright (c) 2003-2016, Intel Corporation.
+ */
+
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/spinlock.h>
+#include "ishtp-dev.h"
+#include "hbm.h"
+#include "client.h"
+
+/**
+ * ishtp_hbm_fw_cl_allocate() - Allocate FW clients
+ * @dev: ISHTP device instance
+ *
+ * Allocates storage for fw clients
+ */
+static void ishtp_hbm_fw_cl_allocate(struct ishtp_device *dev)
+{
+ struct ishtp_fw_client *clients;
+ int b;
+
+ /* count how many ISH clients we have */
+ for_each_set_bit(b, dev->fw_clients_map, ISHTP_CLIENTS_MAX)
+ dev->fw_clients_num++;
+
+ if (dev->fw_clients_num <= 0)
+ return;
+
+ /* allocate storage for fw clients representation */
+ clients = kcalloc(dev->fw_clients_num, sizeof(struct ishtp_fw_client),
+ GFP_KERNEL);
+ if (!clients) {
+ dev->dev_state = ISHTP_DEV_RESETTING;
+ ish_hw_reset(dev);
+ return;
+ }
+ dev->fw_clients = clients;
+}
+
+/**
+ * ishtp_hbm_cl_hdr() - construct client hbm header
+ * @cl: client
+ * @hbm_cmd: host bus message command
+ * @buf: buffer for cl header
+ * @len: buffer length
+ *
+ * Initialize HBM buffer
+ */
+static inline void ishtp_hbm_cl_hdr(struct ishtp_cl *cl, uint8_t hbm_cmd,
+ void *buf, size_t len)
+{
+ struct ishtp_hbm_cl_cmd *cmd = buf;
+
+ memset(cmd, 0, len);
+
+ cmd->hbm_cmd = hbm_cmd;
+ cmd->host_addr = cl->host_client_id;
+ cmd->fw_addr = cl->fw_client_id;
+}
+
+/**
+ * ishtp_hbm_cl_addr_equal() - Compare client address
+ * @cl: client
+ * @buf: Client command buffer
+ *
+ * Compare client address with the address in command buffer
+ *
+ * Return: True if they have the same address
+ */
+static inline bool ishtp_hbm_cl_addr_equal(struct ishtp_cl *cl, void *buf)
+{
+ struct ishtp_hbm_cl_cmd *cmd = buf;
+
+ return cl->host_client_id == cmd->host_addr &&
+ cl->fw_client_id == cmd->fw_addr;
+}
+
+/**
+ * ishtp_hbm_start_wait() - Wait for HBM start message
+ * @dev: ISHTP device instance
+ *
+ * Wait for HBM start message from firmware
+ *
+ * Return: 0 if HBM start is/was received else timeout error
+ */
+int ishtp_hbm_start_wait(struct ishtp_device *dev)
+{
+ int ret;
+
+ if (dev->hbm_state > ISHTP_HBM_START)
+ return 0;
+
+ dev_dbg(dev->devc, "Going to wait for ishtp start. hbm_state=%08X\n",
+ dev->hbm_state);
+ ret = wait_event_interruptible_timeout(dev->wait_hbm_recvd_msg,
+ dev->hbm_state >= ISHTP_HBM_STARTED,
+ (ISHTP_INTEROP_TIMEOUT * HZ));
+
+ dev_dbg(dev->devc,
+ "Woke up from waiting for ishtp start. hbm_state=%08X\n",
+ dev->hbm_state);
+
+ if (ret <= 0 && (dev->hbm_state <= ISHTP_HBM_START)) {
+ dev->hbm_state = ISHTP_HBM_IDLE;
+ dev_err(dev->devc,
+ "waiting for ishtp start failed. ret=%d hbm_state=%08X\n",
+ ret, dev->hbm_state);
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+/**
+ * ishtp_hbm_start_req() - Send HBM start message
+ * @dev: ISHTP device instance
+ *
+ * Send HBM start message to firmware
+ *
+ * Return: 0 if success else error code
+ */
+int ishtp_hbm_start_req(struct ishtp_device *dev)
+{
+ struct ishtp_msg_hdr hdr;
+ struct hbm_host_version_request start_req = { 0 };
+
+ ishtp_hbm_hdr(&hdr, sizeof(start_req));
+
+ /* host start message */
+ start_req.hbm_cmd = HOST_START_REQ_CMD;
+ start_req.host_version.major_version = HBM_MAJOR_VERSION;
+ start_req.host_version.minor_version = HBM_MINOR_VERSION;
+
+ /*
+ * (!) Response to HBM start may be so quick that this thread would get
+ * preempted BEFORE managing to set hbm_state = ISHTP_HBM_START.
+ * So set it at first, change back to ISHTP_HBM_IDLE upon failure
+ */
+ dev->hbm_state = ISHTP_HBM_START;
+ if (ishtp_write_message(dev, &hdr, &start_req)) {
+ dev_err(dev->devc, "version message send failed\n");
+ dev->dev_state = ISHTP_DEV_RESETTING;
+ dev->hbm_state = ISHTP_HBM_IDLE;
+ ish_hw_reset(dev);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/**
+ * ishtp_hbm_enum_clients_req() - Send client enum req
+ * @dev: ISHTP device instance
+ *
+ * Send enumeration client request message
+ *
+ * Return: 0 if success else error code
+ */
+void ishtp_hbm_enum_clients_req(struct ishtp_device *dev)
+{
+ struct ishtp_msg_hdr hdr;
+ struct hbm_host_enum_request enum_req = { 0 };
+
+ /* enumerate clients */
+ ishtp_hbm_hdr(&hdr, sizeof(enum_req));
+ enum_req.hbm_cmd = HOST_ENUM_REQ_CMD;
+
+ if (ishtp_write_message(dev, &hdr, &enum_req)) {
+ dev->dev_state = ISHTP_DEV_RESETTING;
+ dev_err(dev->devc, "enumeration request send failed\n");
+ ish_hw_reset(dev);
+ }
+ dev->hbm_state = ISHTP_HBM_ENUM_CLIENTS;
+}
+
+/**
+ * ishtp_hbm_prop_req() - Request property
+ * @dev: ISHTP device instance
+ *
+ * Request property for a single client
+ *
+ * Return: 0 if success else error code
+ */
+static int ishtp_hbm_prop_req(struct ishtp_device *dev)
+{
+ struct ishtp_msg_hdr hdr;
+ struct hbm_props_request prop_req = { 0 };
+ unsigned long next_client_index;
+ uint8_t client_num;
+
+ client_num = dev->fw_client_presentation_num;
+
+ next_client_index = find_next_bit(dev->fw_clients_map,
+ ISHTP_CLIENTS_MAX, dev->fw_client_index);
+
+ /* We got all client properties */
+ if (next_client_index == ISHTP_CLIENTS_MAX) {
+ dev->hbm_state = ISHTP_HBM_WORKING;
+ dev->dev_state = ISHTP_DEV_ENABLED;
+
+ for (dev->fw_client_presentation_num = 1;
+ dev->fw_client_presentation_num < client_num + 1;
+ ++dev->fw_client_presentation_num)
+ /* Add new client device */
+ ishtp_bus_new_client(dev);
+ return 0;
+ }
+
+ dev->fw_clients[client_num].client_id = next_client_index;
+
+ ishtp_hbm_hdr(&hdr, sizeof(prop_req));
+
+ prop_req.hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
+ prop_req.address = next_client_index;
+
+ if (ishtp_write_message(dev, &hdr, &prop_req)) {
+ dev->dev_state = ISHTP_DEV_RESETTING;
+ dev_err(dev->devc, "properties request send failed\n");
+ ish_hw_reset(dev);
+ return -EIO;
+ }
+
+ dev->fw_client_index = next_client_index;
+
+ return 0;
+}
+
+/**
+ * ishtp_hbm_stop_req() - Send HBM stop
+ * @dev: ISHTP device instance
+ *
+ * Send stop request message
+ */
+static void ishtp_hbm_stop_req(struct ishtp_device *dev)
+{
+ struct ishtp_msg_hdr hdr;
+ struct hbm_host_stop_request stop_req = { 0 } ;
+
+ ishtp_hbm_hdr(&hdr, sizeof(stop_req));
+
+ stop_req.hbm_cmd = HOST_STOP_REQ_CMD;
+ stop_req.reason = DRIVER_STOP_REQUEST;
+
+ ishtp_write_message(dev, &hdr, &stop_req);
+}
+
+/**
+ * ishtp_hbm_cl_flow_control_req() - Send flow control request
+ * @dev: ISHTP device instance
+ * @cl: ISHTP client instance
+ *
+ * Send flow control request
+ *
+ * Return: 0 if success else error code
+ */
+int ishtp_hbm_cl_flow_control_req(struct ishtp_device *dev,
+ struct ishtp_cl *cl)
+{
+ struct ishtp_msg_hdr hdr;
+ struct hbm_flow_control flow_ctrl;
+ const size_t len = sizeof(flow_ctrl);
+ int rv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cl->fc_spinlock, flags);
+
+ ishtp_hbm_hdr(&hdr, len);
+ ishtp_hbm_cl_hdr(cl, ISHTP_FLOW_CONTROL_CMD, &flow_ctrl, len);
+
+ /*
+ * Sync possible race when RB recycle and packet receive paths
+ * both try to send an out FC
+ */
+ if (cl->out_flow_ctrl_creds) {
+ spin_unlock_irqrestore(&cl->fc_spinlock, flags);
+ return 0;
+ }
+
+ cl->recv_msg_num_frags = 0;
+
+ rv = ishtp_write_message(dev, &hdr, &flow_ctrl);
+ if (!rv) {
+ ++cl->out_flow_ctrl_creds;
+ ++cl->out_flow_ctrl_cnt;
+ cl->ts_out_fc = ktime_get();
+ if (cl->ts_rx) {
+ ktime_t ts_diff = ktime_sub(cl->ts_out_fc, cl->ts_rx);
+ if (ktime_after(ts_diff, cl->ts_max_fc_delay))
+ cl->ts_max_fc_delay = ts_diff;
+ }
+ } else {
+ ++cl->err_send_fc;
+ }
+
+ spin_unlock_irqrestore(&cl->fc_spinlock, flags);
+ return rv;
+}
+
+/**
+ * ishtp_hbm_cl_disconnect_req() - Send disconnect request
+ * @dev: ISHTP device instance
+ * @cl: ISHTP client instance
+ *
+ * Send disconnect message to fw
+ *
+ * Return: 0 if success else error code
+ */
+int ishtp_hbm_cl_disconnect_req(struct ishtp_device *dev, struct ishtp_cl *cl)
+{
+ struct ishtp_msg_hdr hdr;
+ struct hbm_client_connect_request disconn_req;
+ const size_t len = sizeof(disconn_req);
+
+ ishtp_hbm_hdr(&hdr, len);
+ ishtp_hbm_cl_hdr(cl, CLIENT_DISCONNECT_REQ_CMD, &disconn_req, len);
+
+ return ishtp_write_message(dev, &hdr, &disconn_req);
+}
+
+/**
+ * ishtp_hbm_cl_disconnect_res() - Get disconnect response
+ * @dev: ISHTP device instance
+ * @rs: Response message
+ *
+ * Received disconnect response from fw
+ */
+static void ishtp_hbm_cl_disconnect_res(struct ishtp_device *dev,
+ struct hbm_client_connect_response *rs)
+{
+ struct ishtp_cl *cl = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->cl_list_lock, flags);
+ list_for_each_entry(cl, &dev->cl_list, link) {
+ if (!rs->status && ishtp_hbm_cl_addr_equal(cl, rs)) {
+ cl->state = ISHTP_CL_DISCONNECTED;
+ wake_up_interruptible(&cl->wait_ctrl_res);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dev->cl_list_lock, flags);
+}
+
+/**
+ * ishtp_hbm_cl_connect_req() - Send connect request
+ * @dev: ISHTP device instance
+ * @cl: client device instance
+ *
+ * Send connection request to specific fw client
+ *
+ * Return: 0 if success else error code
+ */
+int ishtp_hbm_cl_connect_req(struct ishtp_device *dev, struct ishtp_cl *cl)
+{
+ struct ishtp_msg_hdr hdr;
+ struct hbm_client_connect_request conn_req;
+ const size_t len = sizeof(conn_req);
+
+ ishtp_hbm_hdr(&hdr, len);
+ ishtp_hbm_cl_hdr(cl, CLIENT_CONNECT_REQ_CMD, &conn_req, len);
+
+ return ishtp_write_message(dev, &hdr, &conn_req);
+}
+
+/**
+ * ishtp_hbm_cl_connect_res() - Get connect response
+ * @dev: ISHTP device instance
+ * @rs: Response message
+ *
+ * Received connect response from fw
+ */
+static void ishtp_hbm_cl_connect_res(struct ishtp_device *dev,
+ struct hbm_client_connect_response *rs)
+{
+ struct ishtp_cl *cl = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->cl_list_lock, flags);
+ list_for_each_entry(cl, &dev->cl_list, link) {
+ if (ishtp_hbm_cl_addr_equal(cl, rs)) {
+ if (!rs->status) {
+ cl->state = ISHTP_CL_CONNECTED;
+ cl->status = 0;
+ } else {
+ cl->state = ISHTP_CL_DISCONNECTED;
+ cl->status = -ENODEV;
+ }
+ wake_up_interruptible(&cl->wait_ctrl_res);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dev->cl_list_lock, flags);
+}
+
+/**
+ * ishtp_hbm_fw_disconnect_req() - Receive disconnect request
+ * @dev: ISHTP device instance
+ * @disconnect_req: disconnect request structure
+ *
+ * Disconnect request bus message from the fw. Send disconnect response.
+ */
+static void ishtp_hbm_fw_disconnect_req(struct ishtp_device *dev,
+ struct hbm_client_connect_request *disconnect_req)
+{
+ struct ishtp_cl *cl;
+ const size_t len = sizeof(struct hbm_client_connect_response);
+ unsigned long flags;
+ struct ishtp_msg_hdr hdr;
+ unsigned char data[4]; /* All HBM messages are 4 bytes */
+
+ spin_lock_irqsave(&dev->cl_list_lock, flags);
+ list_for_each_entry(cl, &dev->cl_list, link) {
+ if (ishtp_hbm_cl_addr_equal(cl, disconnect_req)) {
+ cl->state = ISHTP_CL_DISCONNECTED;
+
+ /* send disconnect response */
+ ishtp_hbm_hdr(&hdr, len);
+ ishtp_hbm_cl_hdr(cl, CLIENT_DISCONNECT_RES_CMD, data,
+ len);
+ ishtp_write_message(dev, &hdr, data);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dev->cl_list_lock, flags);
+}
+
+/**
+ * ishtp_hbm_dma_xfer_ack() - Receive transfer ACK
+ * @dev: ISHTP device instance
+ * @dma_xfer: HBM transfer message
+ *
+ * Receive ack for ISHTP-over-DMA client message
+ */
+static void ishtp_hbm_dma_xfer_ack(struct ishtp_device *dev,
+ struct dma_xfer_hbm *dma_xfer)
+{
+ void *msg;
+ uint64_t offs;
+ struct ishtp_msg_hdr *ishtp_hdr =
+ (struct ishtp_msg_hdr *)&dev->ishtp_msg_hdr;
+ unsigned int msg_offs;
+ struct ishtp_cl *cl;
+
+ for (msg_offs = 0; msg_offs < ishtp_hdr->length;
+ msg_offs += sizeof(struct dma_xfer_hbm)) {
+ offs = dma_xfer->msg_addr - dev->ishtp_host_dma_tx_buf_phys;
+ if (offs > dev->ishtp_host_dma_tx_buf_size) {
+ dev_err(dev->devc, "Bad DMA Tx ack message address\n");
+ return;
+ }
+ if (dma_xfer->msg_length >
+ dev->ishtp_host_dma_tx_buf_size - offs) {
+ dev_err(dev->devc, "Bad DMA Tx ack message size\n");
+ return;
+ }
+
+ /* logical address of the acked mem */
+ msg = (unsigned char *)dev->ishtp_host_dma_tx_buf + offs;
+ ishtp_cl_release_dma_acked_mem(dev, msg, dma_xfer->msg_length);
+
+ list_for_each_entry(cl, &dev->cl_list, link) {
+ if (cl->fw_client_id == dma_xfer->fw_client_id &&
+ cl->host_client_id == dma_xfer->host_client_id)
+ /*
+ * in case that a single ack may be sent
+ * over several dma transfers, and the last msg
+ * addr was inside the acked memory, but not in
+ * its start
+ */
+ if (cl->last_dma_addr >=
+ (unsigned char *)msg &&
+ cl->last_dma_addr <
+ (unsigned char *)msg +
+ dma_xfer->msg_length) {
+ cl->last_dma_acked = 1;
+
+ if (!list_empty(&cl->tx_list.list) &&
+ cl->ishtp_flow_ctrl_creds) {
+ /*
+ * start sending the first msg
+ */
+ ishtp_cl_send_msg(dev, cl);
+ }
+ }
+ }
+ ++dma_xfer;
+ }
+}
+
+/**
+ * ishtp_hbm_dma_xfer() - Receive DMA transfer message
+ * @dev: ISHTP device instance
+ * @dma_xfer: HBM transfer message
+ *
+ * Receive ISHTP-over-DMA client message
+ */
+static void ishtp_hbm_dma_xfer(struct ishtp_device *dev,
+ struct dma_xfer_hbm *dma_xfer)
+{
+ void *msg;
+ uint64_t offs;
+ struct ishtp_msg_hdr hdr;
+ struct ishtp_msg_hdr *ishtp_hdr =
+ (struct ishtp_msg_hdr *) &dev->ishtp_msg_hdr;
+ struct dma_xfer_hbm *prm = dma_xfer;
+ unsigned int msg_offs;
+
+ for (msg_offs = 0; msg_offs < ishtp_hdr->length;
+ msg_offs += sizeof(struct dma_xfer_hbm)) {
+
+ offs = dma_xfer->msg_addr - dev->ishtp_host_dma_rx_buf_phys;
+ if (offs > dev->ishtp_host_dma_rx_buf_size) {
+ dev_err(dev->devc, "Bad DMA Rx message address\n");
+ return;
+ }
+ if (dma_xfer->msg_length >
+ dev->ishtp_host_dma_rx_buf_size - offs) {
+ dev_err(dev->devc, "Bad DMA Rx message size\n");
+ return;
+ }
+ msg = dev->ishtp_host_dma_rx_buf + offs;
+ recv_ishtp_cl_msg_dma(dev, msg, dma_xfer);
+ dma_xfer->hbm = DMA_XFER_ACK; /* Prepare for response */
+ ++dma_xfer;
+ }
+
+ /* Send DMA_XFER_ACK [...] */
+ ishtp_hbm_hdr(&hdr, ishtp_hdr->length);
+ ishtp_write_message(dev, &hdr, (unsigned char *)prm);
+}
+
+/**
+ * ishtp_hbm_dispatch() - HBM dispatch function
+ * @dev: ISHTP device instance
+ * @hdr: bus message
+ *
+ * Bottom half read routine after ISR to handle the read bus message cmd
+ * processing
+ */
+void ishtp_hbm_dispatch(struct ishtp_device *dev,
+ struct ishtp_bus_message *hdr)
+{
+ struct ishtp_bus_message *ishtp_msg;
+ struct ishtp_fw_client *fw_client;
+ struct hbm_host_version_response *version_res;
+ struct hbm_client_connect_response *connect_res;
+ struct hbm_client_connect_response *disconnect_res;
+ struct hbm_client_connect_request *disconnect_req;
+ struct hbm_props_response *props_res;
+ struct hbm_host_enum_response *enum_res;
+ struct ishtp_msg_hdr ishtp_hdr;
+ struct dma_alloc_notify dma_alloc_notify;
+ struct dma_xfer_hbm *dma_xfer;
+
+ ishtp_msg = hdr;
+
+ switch (ishtp_msg->hbm_cmd) {
+ case HOST_START_RES_CMD:
+ version_res = (struct hbm_host_version_response *)ishtp_msg;
+ if (!version_res->host_version_supported) {
+ dev->version = version_res->fw_max_version;
+
+ dev->hbm_state = ISHTP_HBM_STOPPED;
+ ishtp_hbm_stop_req(dev);
+ return;
+ }
+
+ dev->version.major_version = HBM_MAJOR_VERSION;
+ dev->version.minor_version = HBM_MINOR_VERSION;
+ if (dev->dev_state == ISHTP_DEV_INIT_CLIENTS &&
+ dev->hbm_state == ISHTP_HBM_START) {
+ dev->hbm_state = ISHTP_HBM_STARTED;
+ ishtp_hbm_enum_clients_req(dev);
+ } else {
+ dev_err(dev->devc,
+ "reset: wrong host start response\n");
+ /* BUG: why do we arrive here? */
+ ish_hw_reset(dev);
+ return;
+ }
+
+ wake_up_interruptible(&dev->wait_hbm_recvd_msg);
+ break;
+
+ case CLIENT_CONNECT_RES_CMD:
+ connect_res = (struct hbm_client_connect_response *)ishtp_msg;
+ ishtp_hbm_cl_connect_res(dev, connect_res);
+ break;
+
+ case CLIENT_DISCONNECT_RES_CMD:
+ disconnect_res =
+ (struct hbm_client_connect_response *)ishtp_msg;
+ ishtp_hbm_cl_disconnect_res(dev, disconnect_res);
+ break;
+
+ case HOST_CLIENT_PROPERTIES_RES_CMD:
+ props_res = (struct hbm_props_response *)ishtp_msg;
+ fw_client = &dev->fw_clients[dev->fw_client_presentation_num];
+
+ if (props_res->status || !dev->fw_clients) {
+ dev_err(dev->devc,
+ "reset: properties response hbm wrong status\n");
+ ish_hw_reset(dev);
+ return;
+ }
+
+ if (fw_client->client_id != props_res->address) {
+ dev_err(dev->devc,
+ "reset: host properties response address mismatch [%02X %02X]\n",
+ fw_client->client_id, props_res->address);
+ ish_hw_reset(dev);
+ return;
+ }
+
+ if (dev->dev_state != ISHTP_DEV_INIT_CLIENTS ||
+ dev->hbm_state != ISHTP_HBM_CLIENT_PROPERTIES) {
+ dev_err(dev->devc,
+ "reset: unexpected properties response\n");
+ ish_hw_reset(dev);
+ return;
+ }
+
+ fw_client->props = props_res->client_properties;
+ dev->fw_client_index++;
+ dev->fw_client_presentation_num++;
+
+ /* request property for the next client */
+ ishtp_hbm_prop_req(dev);
+
+ if (dev->dev_state != ISHTP_DEV_ENABLED)
+ break;
+
+ if (!ishtp_use_dma_transfer())
+ break;
+
+ dev_dbg(dev->devc, "Requesting to use DMA\n");
+ ishtp_cl_alloc_dma_buf(dev);
+ if (dev->ishtp_host_dma_rx_buf) {
+ const size_t len = sizeof(dma_alloc_notify);
+
+ memset(&dma_alloc_notify, 0, sizeof(dma_alloc_notify));
+ dma_alloc_notify.hbm = DMA_BUFFER_ALLOC_NOTIFY;
+ dma_alloc_notify.buf_size =
+ dev->ishtp_host_dma_rx_buf_size;
+ dma_alloc_notify.buf_address =
+ dev->ishtp_host_dma_rx_buf_phys;
+ ishtp_hbm_hdr(&ishtp_hdr, len);
+ ishtp_write_message(dev, &ishtp_hdr,
+ (unsigned char *)&dma_alloc_notify);
+ }
+
+ break;
+
+ case HOST_ENUM_RES_CMD:
+ enum_res = (struct hbm_host_enum_response *) ishtp_msg;
+ memcpy(dev->fw_clients_map, enum_res->valid_addresses, 32);
+ if (dev->dev_state == ISHTP_DEV_INIT_CLIENTS &&
+ dev->hbm_state == ISHTP_HBM_ENUM_CLIENTS) {
+ dev->fw_client_presentation_num = 0;
+ dev->fw_client_index = 0;
+
+ ishtp_hbm_fw_cl_allocate(dev);
+ dev->hbm_state = ISHTP_HBM_CLIENT_PROPERTIES;
+
+ /* first property request */
+ ishtp_hbm_prop_req(dev);
+ } else {
+ dev_err(dev->devc,
+ "reset: unexpected enumeration response hbm\n");
+ ish_hw_reset(dev);
+ return;
+ }
+ break;
+
+ case HOST_STOP_RES_CMD:
+ if (dev->hbm_state != ISHTP_HBM_STOPPED)
+ dev_err(dev->devc, "unexpected stop response\n");
+
+ dev->dev_state = ISHTP_DEV_DISABLED;
+ dev_info(dev->devc, "reset: FW stop response\n");
+ ish_hw_reset(dev);
+ break;
+
+ case CLIENT_DISCONNECT_REQ_CMD:
+ /* search for client */
+ disconnect_req =
+ (struct hbm_client_connect_request *)ishtp_msg;
+ ishtp_hbm_fw_disconnect_req(dev, disconnect_req);
+ break;
+
+ case FW_STOP_REQ_CMD:
+ dev->hbm_state = ISHTP_HBM_STOPPED;
+ break;
+
+ case DMA_BUFFER_ALLOC_RESPONSE:
+ dev->ishtp_host_dma_enabled = 1;
+ break;
+
+ case DMA_XFER:
+ dma_xfer = (struct dma_xfer_hbm *)ishtp_msg;
+ if (!dev->ishtp_host_dma_enabled) {
+ dev_err(dev->devc,
+ "DMA XFER requested but DMA is not enabled\n");
+ break;
+ }
+ ishtp_hbm_dma_xfer(dev, dma_xfer);
+ break;
+
+ case DMA_XFER_ACK:
+ dma_xfer = (struct dma_xfer_hbm *)ishtp_msg;
+ if (!dev->ishtp_host_dma_enabled ||
+ !dev->ishtp_host_dma_tx_buf) {
+ dev_err(dev->devc,
+ "DMA XFER acked but DMA Tx is not enabled\n");
+ break;
+ }
+ ishtp_hbm_dma_xfer_ack(dev, dma_xfer);
+ break;
+
+ default:
+ dev_err(dev->devc, "unknown HBM: %u\n",
+ (unsigned int)ishtp_msg->hbm_cmd);
+
+ break;
+ }
+}
+
+/**
+ * bh_hbm_work_fn() - HBM work function
+ * @work: work struct
+ *
+ * Bottom half processing work function (instead of thread handler)
+ * for processing hbm messages
+ */
+void bh_hbm_work_fn(struct work_struct *work)
+{
+ unsigned long flags;
+ struct ishtp_device *dev;
+ unsigned char hbm[IPC_PAYLOAD_SIZE];
+
+ dev = container_of(work, struct ishtp_device, bh_hbm_work);
+ spin_lock_irqsave(&dev->rd_msg_spinlock, flags);
+ if (dev->rd_msg_fifo_head != dev->rd_msg_fifo_tail) {
+ memcpy(hbm, dev->rd_msg_fifo + dev->rd_msg_fifo_head,
+ IPC_PAYLOAD_SIZE);
+ dev->rd_msg_fifo_head =
+ (dev->rd_msg_fifo_head + IPC_PAYLOAD_SIZE) %
+ (RD_INT_FIFO_SIZE * IPC_PAYLOAD_SIZE);
+ spin_unlock_irqrestore(&dev->rd_msg_spinlock, flags);
+ ishtp_hbm_dispatch(dev, (struct ishtp_bus_message *)hbm);
+ } else {
+ spin_unlock_irqrestore(&dev->rd_msg_spinlock, flags);
+ }
+}
+
+/**
+ * recv_hbm() - Receive HBM message
+ * @dev: ISHTP device instance
+ * @ishtp_hdr: received bus message
+ *
+ * Receive and process ISHTP bus messages in ISR context. This will schedule
+ * work function to process message
+ */
+void recv_hbm(struct ishtp_device *dev, struct ishtp_msg_hdr *ishtp_hdr)
+{
+ uint8_t rd_msg_buf[ISHTP_RD_MSG_BUF_SIZE];
+ struct ishtp_bus_message *ishtp_msg =
+ (struct ishtp_bus_message *)rd_msg_buf;
+ unsigned long flags;
+
+ dev->ops->ishtp_read(dev, rd_msg_buf, ishtp_hdr->length);
+
+ /* Flow control - handle in place */
+ if (ishtp_msg->hbm_cmd == ISHTP_FLOW_CONTROL_CMD) {
+ struct hbm_flow_control *flow_control =
+ (struct hbm_flow_control *)ishtp_msg;
+ struct ishtp_cl *cl = NULL;
+ unsigned long flags, tx_flags;
+
+ spin_lock_irqsave(&dev->cl_list_lock, flags);
+ list_for_each_entry(cl, &dev->cl_list, link) {
+ if (cl->host_client_id == flow_control->host_addr &&
+ cl->fw_client_id ==
+ flow_control->fw_addr) {
+ /*
+ * NOTE: It's valid only for counting
+ * flow-control implementation to receive a
+ * FC in the middle of sending. Meanwhile not
+ * supported
+ */
+ if (cl->ishtp_flow_ctrl_creds)
+ dev_err(dev->devc,
+ "recv extra FC from FW client %u (host client %u) (FC count was %d)\n",
+ (unsigned int)cl->fw_client_id,
+ (unsigned int)cl->host_client_id,
+ cl->ishtp_flow_ctrl_creds);
+ else {
+ ++cl->ishtp_flow_ctrl_creds;
+ ++cl->ishtp_flow_ctrl_cnt;
+ cl->last_ipc_acked = 1;
+ spin_lock_irqsave(
+ &cl->tx_list_spinlock,
+ tx_flags);
+ if (!list_empty(&cl->tx_list.list)) {
+ /*
+ * start sending the first msg
+ * = the callback function
+ */
+ spin_unlock_irqrestore(
+ &cl->tx_list_spinlock,
+ tx_flags);
+ ishtp_cl_send_msg(dev, cl);
+ } else {
+ spin_unlock_irqrestore(
+ &cl->tx_list_spinlock,
+ tx_flags);
+ }
+ }
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dev->cl_list_lock, flags);
+ goto eoi;
+ }
+
+ /*
+ * Some messages that are safe for ISR processing and important
+ * to be done "quickly" and in-order, go here
+ */
+ if (ishtp_msg->hbm_cmd == CLIENT_CONNECT_RES_CMD ||
+ ishtp_msg->hbm_cmd == CLIENT_DISCONNECT_RES_CMD ||
+ ishtp_msg->hbm_cmd == CLIENT_DISCONNECT_REQ_CMD ||
+ ishtp_msg->hbm_cmd == DMA_XFER) {
+ ishtp_hbm_dispatch(dev, ishtp_msg);
+ goto eoi;
+ }
+
+ /*
+ * All other HBMs go here.
+ * We schedule HBMs for processing serially by using system wq,
+ * possibly there will be multiple HBMs scheduled at the same time.
+ */
+ spin_lock_irqsave(&dev->rd_msg_spinlock, flags);
+ if ((dev->rd_msg_fifo_tail + IPC_PAYLOAD_SIZE) %
+ (RD_INT_FIFO_SIZE * IPC_PAYLOAD_SIZE) ==
+ dev->rd_msg_fifo_head) {
+ spin_unlock_irqrestore(&dev->rd_msg_spinlock, flags);
+ dev_err(dev->devc, "BH buffer overflow, dropping HBM %u\n",
+ (unsigned int)ishtp_msg->hbm_cmd);
+ goto eoi;
+ }
+ memcpy(dev->rd_msg_fifo + dev->rd_msg_fifo_tail, ishtp_msg,
+ ishtp_hdr->length);
+ dev->rd_msg_fifo_tail = (dev->rd_msg_fifo_tail + IPC_PAYLOAD_SIZE) %
+ (RD_INT_FIFO_SIZE * IPC_PAYLOAD_SIZE);
+ spin_unlock_irqrestore(&dev->rd_msg_spinlock, flags);
+ schedule_work(&dev->bh_hbm_work);
+eoi:
+ return;
+}
+
+/**
+ * recv_fixed_cl_msg() - Receive fixed client message
+ * @dev: ISHTP device instance
+ * @ishtp_hdr: received bus message
+ *
+ * Receive and process ISHTP fixed client messages (address == 0)
+ * in ISR context
+ */
+void recv_fixed_cl_msg(struct ishtp_device *dev,
+ struct ishtp_msg_hdr *ishtp_hdr)
+{
+ uint8_t rd_msg_buf[ISHTP_RD_MSG_BUF_SIZE];
+
+ dev->print_log(dev,
+ "%s() got fixed client msg from client #%d\n",
+ __func__, ishtp_hdr->fw_addr);
+ dev->ops->ishtp_read(dev, rd_msg_buf, ishtp_hdr->length);
+ if (ishtp_hdr->fw_addr == ISHTP_SYSTEM_STATE_CLIENT_ADDR) {
+ struct ish_system_states_header *msg_hdr =
+ (struct ish_system_states_header *)rd_msg_buf;
+ if (msg_hdr->cmd == SYSTEM_STATE_SUBSCRIBE)
+ ishtp_send_resume(dev);
+ /* if FW request arrived here, the system is not suspended */
+ else
+ dev_err(dev->devc, "unknown fixed client msg [%02X]\n",
+ msg_hdr->cmd);
+ }
+}
+
+/**
+ * fix_cl_hdr() - Initialize fixed client header
+ * @hdr: message header
+ * @length: length of message
+ * @cl_addr: Client address
+ *
+ * Initialize message header for fixed client
+ */
+static inline void fix_cl_hdr(struct ishtp_msg_hdr *hdr, size_t length,
+ uint8_t cl_addr)
+{
+ hdr->host_addr = 0;
+ hdr->fw_addr = cl_addr;
+ hdr->length = length;
+ hdr->msg_complete = 1;
+ hdr->reserved = 0;
+}
+
+/*** Suspend and resume notification ***/
+
+static uint32_t current_state;
+static uint32_t supported_states = SUSPEND_STATE_BIT | CONNECTED_STANDBY_STATE_BIT;
+
+/**
+ * ishtp_send_suspend() - Send suspend message to FW
+ * @dev: ISHTP device instance
+ *
+ * Send suspend message to FW. This is useful for system freeze (non S3) case
+ */
+void ishtp_send_suspend(struct ishtp_device *dev)
+{
+ struct ishtp_msg_hdr ishtp_hdr;
+ struct ish_system_states_status state_status_msg;
+ const size_t len = sizeof(struct ish_system_states_status);
+
+ fix_cl_hdr(&ishtp_hdr, len, ISHTP_SYSTEM_STATE_CLIENT_ADDR);
+
+ memset(&state_status_msg, 0, len);
+ state_status_msg.hdr.cmd = SYSTEM_STATE_STATUS;
+ state_status_msg.supported_states = supported_states;
+ current_state |= (SUSPEND_STATE_BIT | CONNECTED_STANDBY_STATE_BIT);
+ dev->print_log(dev, "%s() sends SUSPEND notification\n", __func__);
+ state_status_msg.states_status = current_state;
+
+ ishtp_write_message(dev, &ishtp_hdr,
+ (unsigned char *)&state_status_msg);
+}
+EXPORT_SYMBOL(ishtp_send_suspend);
+
+/**
+ * ishtp_send_resume() - Send resume message to FW
+ * @dev: ISHTP device instance
+ *
+ * Send resume message to FW. This is useful for system freeze (non S3) case
+ */
+void ishtp_send_resume(struct ishtp_device *dev)
+{
+ struct ishtp_msg_hdr ishtp_hdr;
+ struct ish_system_states_status state_status_msg;
+ const size_t len = sizeof(struct ish_system_states_status);
+
+ fix_cl_hdr(&ishtp_hdr, len, ISHTP_SYSTEM_STATE_CLIENT_ADDR);
+
+ memset(&state_status_msg, 0, len);
+ state_status_msg.hdr.cmd = SYSTEM_STATE_STATUS;
+ state_status_msg.supported_states = supported_states;
+ current_state &= ~(CONNECTED_STANDBY_STATE_BIT | SUSPEND_STATE_BIT);
+ dev->print_log(dev, "%s() sends RESUME notification\n", __func__);
+ state_status_msg.states_status = current_state;
+
+ ishtp_write_message(dev, &ishtp_hdr,
+ (unsigned char *)&state_status_msg);
+}
+EXPORT_SYMBOL(ishtp_send_resume);
+
+/**
+ * ishtp_query_subscribers() - Send query subscribers message
+ * @dev: ISHTP device instance
+ *
+ * Send message to query subscribers
+ */
+void ishtp_query_subscribers(struct ishtp_device *dev)
+{
+ struct ishtp_msg_hdr ishtp_hdr;
+ struct ish_system_states_query_subscribers query_subscribers_msg;
+ const size_t len = sizeof(struct ish_system_states_query_subscribers);
+
+ fix_cl_hdr(&ishtp_hdr, len, ISHTP_SYSTEM_STATE_CLIENT_ADDR);
+
+ memset(&query_subscribers_msg, 0, len);
+ query_subscribers_msg.hdr.cmd = SYSTEM_STATE_QUERY_SUBSCRIBERS;
+
+ ishtp_write_message(dev, &ishtp_hdr,
+ (unsigned char *)&query_subscribers_msg);
+}
diff --git a/drivers/hid/intel-ish-hid/ishtp/hbm.h b/drivers/hid/intel-ish-hid/ishtp/hbm.h
new file mode 100644
index 000000000..08f3f3ceb
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/hbm.h
@@ -0,0 +1,314 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * ISHTP bus layer messages handling
+ *
+ * Copyright (c) 2003-2016, Intel Corporation.
+ */
+
+#ifndef _ISHTP_HBM_H_
+#define _ISHTP_HBM_H_
+
+#include <linux/uuid.h>
+
+struct ishtp_device;
+struct ishtp_msg_hdr;
+struct ishtp_cl;
+
+/*
+ * Timeouts in Seconds
+ */
+#define ISHTP_INTEROP_TIMEOUT 7 /* Timeout on ready message */
+
+#define ISHTP_CL_CONNECT_TIMEOUT 15 /* HPS: Client Connect Timeout */
+
+/*
+ * ISHTP Version
+ */
+#define HBM_MINOR_VERSION 0
+#define HBM_MAJOR_VERSION 1
+
+/* Host bus message command opcode */
+#define ISHTP_HBM_CMD_OP_MSK 0x7f
+/* Host bus message command RESPONSE */
+#define ISHTP_HBM_CMD_RES_MSK 0x80
+
+/*
+ * ISHTP Bus Message Command IDs
+ */
+#define HOST_START_REQ_CMD 0x01
+#define HOST_START_RES_CMD 0x81
+
+#define HOST_STOP_REQ_CMD 0x02
+#define HOST_STOP_RES_CMD 0x82
+
+#define FW_STOP_REQ_CMD 0x03
+
+#define HOST_ENUM_REQ_CMD 0x04
+#define HOST_ENUM_RES_CMD 0x84
+
+#define HOST_CLIENT_PROPERTIES_REQ_CMD 0x05
+#define HOST_CLIENT_PROPERTIES_RES_CMD 0x85
+
+#define CLIENT_CONNECT_REQ_CMD 0x06
+#define CLIENT_CONNECT_RES_CMD 0x86
+
+#define CLIENT_DISCONNECT_REQ_CMD 0x07
+#define CLIENT_DISCONNECT_RES_CMD 0x87
+
+#define ISHTP_FLOW_CONTROL_CMD 0x08
+
+#define DMA_BUFFER_ALLOC_NOTIFY 0x11
+#define DMA_BUFFER_ALLOC_RESPONSE 0x91
+
+#define DMA_XFER 0x12
+#define DMA_XFER_ACK 0x92
+
+/*
+ * ISHTP Stop Reason
+ * used by hbm_host_stop_request.reason
+ */
+#define DRIVER_STOP_REQUEST 0x00
+
+/*
+ * ISHTP BUS Interface Section
+ */
+struct ishtp_msg_hdr {
+ uint32_t fw_addr:8;
+ uint32_t host_addr:8;
+ uint32_t length:9;
+ uint32_t reserved:6;
+ uint32_t msg_complete:1;
+} __packed;
+
+struct ishtp_bus_message {
+ uint8_t hbm_cmd;
+ uint8_t data[];
+} __packed;
+
+/**
+ * struct hbm_cl_cmd - client specific host bus command
+ * CONNECT, DISCONNECT, and FlOW CONTROL
+ *
+ * @hbm_cmd - bus message command header
+ * @fw_addr - address of the fw client
+ * @host_addr - address of the client in the driver
+ * @data
+ */
+struct ishtp_hbm_cl_cmd {
+ uint8_t hbm_cmd;
+ uint8_t fw_addr;
+ uint8_t host_addr;
+ uint8_t data;
+};
+
+struct hbm_version {
+ uint8_t minor_version;
+ uint8_t major_version;
+} __packed;
+
+struct hbm_host_version_request {
+ uint8_t hbm_cmd;
+ uint8_t reserved;
+ struct hbm_version host_version;
+} __packed;
+
+struct hbm_host_version_response {
+ uint8_t hbm_cmd;
+ uint8_t host_version_supported;
+ struct hbm_version fw_max_version;
+} __packed;
+
+struct hbm_host_stop_request {
+ uint8_t hbm_cmd;
+ uint8_t reason;
+ uint8_t reserved[2];
+} __packed;
+
+struct hbm_host_stop_response {
+ uint8_t hbm_cmd;
+ uint8_t reserved[3];
+} __packed;
+
+struct hbm_host_enum_request {
+ uint8_t hbm_cmd;
+ uint8_t reserved[3];
+} __packed;
+
+struct hbm_host_enum_response {
+ uint8_t hbm_cmd;
+ uint8_t reserved[3];
+ uint8_t valid_addresses[32];
+} __packed;
+
+struct ishtp_client_properties {
+ guid_t protocol_name;
+ uint8_t protocol_version;
+ uint8_t max_number_of_connections;
+ uint8_t fixed_address;
+ uint8_t single_recv_buf;
+ uint32_t max_msg_length;
+ uint8_t dma_hdr_len;
+#define ISHTP_CLIENT_DMA_ENABLED 0x80
+ uint8_t reserved4;
+ uint8_t reserved5;
+ uint8_t reserved6;
+} __packed;
+
+struct hbm_props_request {
+ uint8_t hbm_cmd;
+ uint8_t address;
+ uint8_t reserved[2];
+} __packed;
+
+struct hbm_props_response {
+ uint8_t hbm_cmd;
+ uint8_t address;
+ uint8_t status;
+ uint8_t reserved[1];
+ struct ishtp_client_properties client_properties;
+} __packed;
+
+/**
+ * struct hbm_client_connect_request - connect/disconnect request
+ *
+ * @hbm_cmd - bus message command header
+ * @fw_addr - address of the fw client
+ * @host_addr - address of the client in the driver
+ * @reserved
+ */
+struct hbm_client_connect_request {
+ uint8_t hbm_cmd;
+ uint8_t fw_addr;
+ uint8_t host_addr;
+ uint8_t reserved;
+} __packed;
+
+/**
+ * struct hbm_client_connect_response - connect/disconnect response
+ *
+ * @hbm_cmd - bus message command header
+ * @fw_addr - address of the fw client
+ * @host_addr - address of the client in the driver
+ * @status - status of the request
+ */
+struct hbm_client_connect_response {
+ uint8_t hbm_cmd;
+ uint8_t fw_addr;
+ uint8_t host_addr;
+ uint8_t status;
+} __packed;
+
+
+#define ISHTP_FC_MESSAGE_RESERVED_LENGTH 5
+
+struct hbm_flow_control {
+ uint8_t hbm_cmd;
+ uint8_t fw_addr;
+ uint8_t host_addr;
+ uint8_t reserved[ISHTP_FC_MESSAGE_RESERVED_LENGTH];
+} __packed;
+
+struct dma_alloc_notify {
+ uint8_t hbm;
+ uint8_t status;
+ uint8_t reserved[2];
+ uint32_t buf_size;
+ uint64_t buf_address;
+ /* [...] May come more size/address pairs */
+} __packed;
+
+struct dma_xfer_hbm {
+ uint8_t hbm;
+ uint8_t fw_client_id;
+ uint8_t host_client_id;
+ uint8_t reserved;
+ uint64_t msg_addr;
+ uint32_t msg_length;
+ uint32_t reserved2;
+} __packed;
+
+/* System state */
+#define ISHTP_SYSTEM_STATE_CLIENT_ADDR 13
+
+#define SYSTEM_STATE_SUBSCRIBE 0x1
+#define SYSTEM_STATE_STATUS 0x2
+#define SYSTEM_STATE_QUERY_SUBSCRIBERS 0x3
+#define SYSTEM_STATE_STATE_CHANGE_REQ 0x4
+/*indicates suspend and resume states*/
+#define CONNECTED_STANDBY_STATE_BIT (1<<0)
+#define SUSPEND_STATE_BIT (1<<1)
+
+struct ish_system_states_header {
+ uint32_t cmd;
+ uint32_t cmd_status; /*responses will have this set*/
+} __packed;
+
+struct ish_system_states_subscribe {
+ struct ish_system_states_header hdr;
+ uint32_t states;
+} __packed;
+
+struct ish_system_states_status {
+ struct ish_system_states_header hdr;
+ uint32_t supported_states;
+ uint32_t states_status;
+} __packed;
+
+struct ish_system_states_query_subscribers {
+ struct ish_system_states_header hdr;
+} __packed;
+
+struct ish_system_states_state_change_req {
+ struct ish_system_states_header hdr;
+ uint32_t requested_states;
+ uint32_t states_status;
+} __packed;
+
+/**
+ * enum ishtp_hbm_state - host bus message protocol state
+ *
+ * @ISHTP_HBM_IDLE : protocol not started
+ * @ISHTP_HBM_START : start request message was sent
+ * @ISHTP_HBM_ENUM_CLIENTS : enumeration request was sent
+ * @ISHTP_HBM_CLIENT_PROPERTIES : acquiring clients properties
+ */
+enum ishtp_hbm_state {
+ ISHTP_HBM_IDLE = 0,
+ ISHTP_HBM_START,
+ ISHTP_HBM_STARTED,
+ ISHTP_HBM_ENUM_CLIENTS,
+ ISHTP_HBM_CLIENT_PROPERTIES,
+ ISHTP_HBM_WORKING,
+ ISHTP_HBM_STOPPED,
+};
+
+static inline void ishtp_hbm_hdr(struct ishtp_msg_hdr *hdr, size_t length)
+{
+ hdr->host_addr = 0;
+ hdr->fw_addr = 0;
+ hdr->length = length;
+ hdr->msg_complete = 1;
+ hdr->reserved = 0;
+}
+
+int ishtp_hbm_start_req(struct ishtp_device *dev);
+int ishtp_hbm_start_wait(struct ishtp_device *dev);
+int ishtp_hbm_cl_flow_control_req(struct ishtp_device *dev,
+ struct ishtp_cl *cl);
+int ishtp_hbm_cl_disconnect_req(struct ishtp_device *dev, struct ishtp_cl *cl);
+int ishtp_hbm_cl_connect_req(struct ishtp_device *dev, struct ishtp_cl *cl);
+void ishtp_hbm_enum_clients_req(struct ishtp_device *dev);
+void bh_hbm_work_fn(struct work_struct *work);
+void recv_hbm(struct ishtp_device *dev, struct ishtp_msg_hdr *ishtp_hdr);
+void recv_fixed_cl_msg(struct ishtp_device *dev,
+ struct ishtp_msg_hdr *ishtp_hdr);
+void ishtp_hbm_dispatch(struct ishtp_device *dev,
+ struct ishtp_bus_message *hdr);
+
+void ishtp_query_subscribers(struct ishtp_device *dev);
+
+/* Exported I/F */
+void ishtp_send_suspend(struct ishtp_device *dev);
+void ishtp_send_resume(struct ishtp_device *dev);
+
+#endif /* _ISHTP_HBM_H_ */
diff --git a/drivers/hid/intel-ish-hid/ishtp/init.c b/drivers/hid/intel-ish-hid/ishtp/init.c
new file mode 100644
index 000000000..02a00cc2d
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/init.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Initialization protocol for ISHTP driver
+ *
+ * Copyright (c) 2003-2016, Intel Corporation.
+ */
+
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include "ishtp-dev.h"
+#include "hbm.h"
+#include "client.h"
+
+/**
+ * ishtp_dev_state_str() -Convert to string format
+ * @state: state to convert
+ *
+ * Convert state to string for prints
+ *
+ * Return: character pointer to converted string
+ */
+const char *ishtp_dev_state_str(int state)
+{
+ switch (state) {
+ case ISHTP_DEV_INITIALIZING:
+ return "INITIALIZING";
+ case ISHTP_DEV_INIT_CLIENTS:
+ return "INIT_CLIENTS";
+ case ISHTP_DEV_ENABLED:
+ return "ENABLED";
+ case ISHTP_DEV_RESETTING:
+ return "RESETTING";
+ case ISHTP_DEV_DISABLED:
+ return "DISABLED";
+ case ISHTP_DEV_POWER_DOWN:
+ return "POWER_DOWN";
+ case ISHTP_DEV_POWER_UP:
+ return "POWER_UP";
+ default:
+ return "unknown";
+ }
+}
+
+/**
+ * ishtp_device_init() - ishtp device init
+ * @dev: ISHTP device instance
+ *
+ * After ISHTP device is alloacted, this function is used to initialize
+ * each field which includes spin lock, work struct and lists
+ */
+void ishtp_device_init(struct ishtp_device *dev)
+{
+ dev->dev_state = ISHTP_DEV_INITIALIZING;
+ INIT_LIST_HEAD(&dev->cl_list);
+ INIT_LIST_HEAD(&dev->device_list);
+ dev->rd_msg_fifo_head = 0;
+ dev->rd_msg_fifo_tail = 0;
+ spin_lock_init(&dev->rd_msg_spinlock);
+
+ init_waitqueue_head(&dev->wait_hbm_recvd_msg);
+ spin_lock_init(&dev->read_list_spinlock);
+ spin_lock_init(&dev->device_lock);
+ spin_lock_init(&dev->device_list_lock);
+ spin_lock_init(&dev->cl_list_lock);
+ spin_lock_init(&dev->fw_clients_lock);
+ INIT_WORK(&dev->bh_hbm_work, bh_hbm_work_fn);
+
+ bitmap_zero(dev->host_clients_map, ISHTP_CLIENTS_MAX);
+ dev->open_handle_count = 0;
+
+ /*
+ * Reserving client ID 0 for ISHTP Bus Message communications
+ */
+ bitmap_set(dev->host_clients_map, 0, 1);
+
+ INIT_LIST_HEAD(&dev->read_list.list);
+
+}
+EXPORT_SYMBOL(ishtp_device_init);
+
+/**
+ * ishtp_start() - Start ISH processing
+ * @dev: ISHTP device instance
+ *
+ * Start ISHTP processing by sending query subscriber message
+ *
+ * Return: 0 on success else -ENODEV
+ */
+int ishtp_start(struct ishtp_device *dev)
+{
+ if (ishtp_hbm_start_wait(dev)) {
+ dev_err(dev->devc, "HBM haven't started");
+ goto err;
+ }
+
+ /* suspend & resume notification - send QUERY_SUBSCRIBERS msg */
+ ishtp_query_subscribers(dev);
+
+ return 0;
+err:
+ dev_err(dev->devc, "link layer initialization failed.\n");
+ dev->dev_state = ISHTP_DEV_DISABLED;
+ return -ENODEV;
+}
+EXPORT_SYMBOL(ishtp_start);
diff --git a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
new file mode 100644
index 000000000..32142c7d9
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
@@ -0,0 +1,238 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Most ISHTP provider device and ISHTP logic declarations
+ *
+ * Copyright (c) 2003-2016, Intel Corporation.
+ */
+
+#ifndef _ISHTP_DEV_H_
+#define _ISHTP_DEV_H_
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/intel-ish-client-if.h>
+#include "bus.h"
+#include "hbm.h"
+
+#define IPC_PAYLOAD_SIZE 128
+#define ISHTP_RD_MSG_BUF_SIZE IPC_PAYLOAD_SIZE
+#define IPC_FULL_MSG_SIZE 132
+
+/* Number of messages to be held in ISR->BH FIFO */
+#define RD_INT_FIFO_SIZE 64
+
+/*
+ * Number of IPC messages to be held in Tx FIFO, to be sent by ISR -
+ * Tx complete interrupt or RX_COMPLETE handler
+ */
+#define IPC_TX_FIFO_SIZE 512
+
+/*
+ * Number of Maximum ISHTP Clients
+ */
+#define ISHTP_CLIENTS_MAX 256
+
+/*
+ * Number of File descriptors/handles
+ * that can be opened to the driver.
+ *
+ * Limit to 255: 256 Total Clients
+ * minus internal client for ISHTP Bus Messages
+ */
+#define ISHTP_MAX_OPEN_HANDLE_COUNT (ISHTP_CLIENTS_MAX - 1)
+
+/* Internal Clients Number */
+#define ISHTP_HOST_CLIENT_ID_ANY (-1)
+#define ISHTP_HBM_HOST_CLIENT_ID 0
+
+#define MAX_DMA_DELAY 20
+
+/* ISHTP device states */
+enum ishtp_dev_state {
+ ISHTP_DEV_INITIALIZING = 0,
+ ISHTP_DEV_INIT_CLIENTS,
+ ISHTP_DEV_ENABLED,
+ ISHTP_DEV_RESETTING,
+ ISHTP_DEV_DISABLED,
+ ISHTP_DEV_POWER_DOWN,
+ ISHTP_DEV_POWER_UP
+};
+const char *ishtp_dev_state_str(int state);
+
+struct ishtp_cl;
+
+/**
+ * struct ishtp_fw_client - representation of fw client
+ *
+ * @props - client properties
+ * @client_id - fw client id
+ */
+struct ishtp_fw_client {
+ struct ishtp_client_properties props;
+ uint8_t client_id;
+};
+
+/*
+ * Control info for IPC messages ISHTP/IPC sending FIFO -
+ * list with inline data buffer
+ * This structure will be filled with parameters submitted
+ * by the caller glue layer
+ * 'buf' may be pointing to the external buffer or to 'inline_data'
+ * 'offset' will be initialized to 0 by submitting
+ *
+ * 'ipc_send_compl' is intended for use by clients that send fragmented
+ * messages. When a fragment is sent down to IPC msg regs,
+ * it will be called.
+ * If it has more fragments to send, it will do it. With last fragment
+ * it will send appropriate ISHTP "message-complete" flag.
+ * It will remove the outstanding message
+ * (mark outstanding buffer as available).
+ * If counting flow control is in work and there are more flow control
+ * credits, it can put the next client message queued in cl.
+ * structure for IPC processing.
+ *
+ */
+struct wr_msg_ctl_info {
+ /* Will be called with 'ipc_send_compl_prm' as parameter */
+ void (*ipc_send_compl)(void *);
+
+ void *ipc_send_compl_prm;
+ size_t length;
+ struct list_head link;
+ unsigned char inline_data[IPC_FULL_MSG_SIZE];
+};
+
+/*
+ * The ISHTP layer talks to hardware IPC message using the following
+ * callbacks
+ */
+struct ishtp_hw_ops {
+ int (*hw_reset)(struct ishtp_device *dev);
+ int (*ipc_reset)(struct ishtp_device *dev);
+ uint32_t (*ipc_get_header)(struct ishtp_device *dev, int length,
+ int busy);
+ int (*write)(struct ishtp_device *dev,
+ void (*ipc_send_compl)(void *), void *ipc_send_compl_prm,
+ unsigned char *msg, int length);
+ uint32_t (*ishtp_read_hdr)(const struct ishtp_device *dev);
+ int (*ishtp_read)(struct ishtp_device *dev, unsigned char *buffer,
+ unsigned long buffer_length);
+ uint32_t (*get_fw_status)(struct ishtp_device *dev);
+ void (*sync_fw_clock)(struct ishtp_device *dev);
+ bool (*dma_no_cache_snooping)(struct ishtp_device *dev);
+};
+
+/**
+ * struct ishtp_device - ISHTP private device struct
+ */
+struct ishtp_device {
+ struct device *devc; /* pointer to lowest device */
+ struct pci_dev *pdev; /* PCI device to get device ids */
+
+ /* waitq for waiting for suspend response */
+ wait_queue_head_t suspend_wait;
+ bool suspend_flag; /* Suspend is active */
+
+ /* waitq for waiting for resume response */
+ wait_queue_head_t resume_wait;
+ bool resume_flag; /*Resume is active */
+
+ /*
+ * lock for the device, for everything that doesn't have
+ * a dedicated spinlock
+ */
+ spinlock_t device_lock;
+
+ bool recvd_hw_ready;
+ struct hbm_version version;
+ int transfer_path; /* Choice of transfer path: IPC or DMA */
+
+ /* ishtp device states */
+ enum ishtp_dev_state dev_state;
+ enum ishtp_hbm_state hbm_state;
+
+ /* driver read queue */
+ struct ishtp_cl_rb read_list;
+ spinlock_t read_list_spinlock;
+
+ /* list of ishtp_cl's */
+ struct list_head cl_list;
+ spinlock_t cl_list_lock;
+ long open_handle_count;
+
+ /* List of bus devices */
+ struct list_head device_list;
+ spinlock_t device_list_lock;
+
+ /* waiting queues for receive message from FW */
+ wait_queue_head_t wait_hw_ready;
+ wait_queue_head_t wait_hbm_recvd_msg;
+
+ /* FIFO for input messages for BH processing */
+ unsigned char rd_msg_fifo[RD_INT_FIFO_SIZE * IPC_PAYLOAD_SIZE];
+ unsigned int rd_msg_fifo_head, rd_msg_fifo_tail;
+ spinlock_t rd_msg_spinlock;
+ struct work_struct bh_hbm_work;
+
+ /* IPC write queue */
+ struct list_head wr_processing_list, wr_free_list;
+ /* For both processing list and free list */
+ spinlock_t wr_processing_spinlock;
+
+ struct ishtp_fw_client *fw_clients; /*Note:memory has to be allocated*/
+ DECLARE_BITMAP(fw_clients_map, ISHTP_CLIENTS_MAX);
+ DECLARE_BITMAP(host_clients_map, ISHTP_CLIENTS_MAX);
+ uint8_t fw_clients_num;
+ uint8_t fw_client_presentation_num;
+ uint8_t fw_client_index;
+ spinlock_t fw_clients_lock;
+
+ /* TX DMA buffers and slots */
+ int ishtp_host_dma_enabled;
+ void *ishtp_host_dma_tx_buf;
+ unsigned int ishtp_host_dma_tx_buf_size;
+ uint64_t ishtp_host_dma_tx_buf_phys;
+ int ishtp_dma_num_slots;
+
+ /* map of 4k blocks in Tx dma buf: 0-free, 1-used */
+ uint8_t *ishtp_dma_tx_map;
+ spinlock_t ishtp_dma_tx_lock;
+
+ /* RX DMA buffers and slots */
+ void *ishtp_host_dma_rx_buf;
+ unsigned int ishtp_host_dma_rx_buf_size;
+ uint64_t ishtp_host_dma_rx_buf_phys;
+
+ /* Dump to trace buffers if enabled*/
+ ishtp_print_log print_log;
+
+ /* Debug stats */
+ unsigned int ipc_rx_cnt;
+ unsigned long long ipc_rx_bytes_cnt;
+ unsigned int ipc_tx_cnt;
+ unsigned long long ipc_tx_bytes_cnt;
+
+ const struct ishtp_hw_ops *ops;
+ size_t mtu;
+ uint32_t ishtp_msg_hdr;
+ char hw[] __aligned(sizeof(void *));
+};
+
+static inline unsigned long ishtp_secs_to_jiffies(unsigned long sec)
+{
+ return msecs_to_jiffies(sec * MSEC_PER_SEC);
+}
+
+/*
+ * Register Access Function
+ */
+static inline int ish_ipc_reset(struct ishtp_device *dev)
+{
+ return dev->ops->ipc_reset(dev);
+}
+
+/* Exported function */
+void ishtp_device_init(struct ishtp_device *dev);
+int ishtp_start(struct ishtp_device *dev);
+
+#endif /*_ISHTP_DEV_H_*/