summaryrefslogtreecommitdiffstats
path: root/drivers/net/wwan
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/net/wwan
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/wwan')
-rw-r--r--drivers/net/wwan/Kconfig125
-rw-r--r--drivers/net/wwan/Makefile16
-rw-r--r--drivers/net/wwan/iosm/Makefile30
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c91
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.h60
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_coredump.c126
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_coredump.h56
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_debugfs.c30
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_debugfs.h17
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_devlink.c322
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_devlink.h205
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_flash.c594
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_flash.h229
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem.c1503
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem.h590
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem_ops.c658
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem_ops.h147
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_irq.c90
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_irq.h33
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mmio.c227
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mmio.h183
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mux.c483
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mux.h442
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mux_codec.c1553
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mux_codec.h325
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_pcie.c532
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_pcie.h210
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_pm.c333
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_pm.h207
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_port.c86
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_port.h50
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_protocol.c283
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_protocol.h237
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c541
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_protocol_ops.h444
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_task_queue.c202
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_task_queue.h97
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_trace.c182
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_trace.h74
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_uevent.c44
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_uevent.h41
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_wwan.c316
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_wwan.h45
-rw-r--r--drivers/net/wwan/mhi_wwan_ctrl.c285
-rw-r--r--drivers/net/wwan/mhi_wwan_mbim.c659
-rw-r--r--drivers/net/wwan/qcom_bam_dmux.c907
-rw-r--r--drivers/net/wwan/rpmsg_wwan_ctrl.c167
-rw-r--r--drivers/net/wwan/t7xx/Makefile21
-rw-r--r--drivers/net/wwan/t7xx/t7xx_cldma.c281
-rw-r--r--drivers/net/wwan/t7xx/t7xx_cldma.h180
-rw-r--r--drivers/net/wwan/t7xx/t7xx_dpmaif.c1281
-rw-r--r--drivers/net/wwan/t7xx/t7xx_dpmaif.h179
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_cldma.c1347
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_cldma.h127
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c583
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h202
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c1168
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.h117
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c684
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.h78
-rw-r--r--drivers/net/wwan/t7xx/t7xx_mhccif.c122
-rw-r--r--drivers/net/wwan/t7xx/t7xx_mhccif.h38
-rw-r--r--drivers/net/wwan/t7xx/t7xx_modem_ops.c775
-rw-r--r--drivers/net/wwan/t7xx/t7xx_modem_ops.h90
-rw-r--r--drivers/net/wwan/t7xx/t7xx_netdev.c516
-rw-r--r--drivers/net/wwan/t7xx/t7xx_netdev.h60
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.c781
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.h124
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pcie_mac.c262
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pcie_mac.h31
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port.h146
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c277
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_proxy.c533
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_proxy.h102
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_trace.c116
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_wwan.c178
-rw-r--r--drivers/net/wwan/t7xx/t7xx_reg.h350
-rw-r--r--drivers/net/wwan/t7xx/t7xx_state_monitor.c559
-rw-r--r--drivers/net/wwan/t7xx/t7xx_state_monitor.h137
-rw-r--r--drivers/net/wwan/wwan_core.c1250
-rw-r--r--drivers/net/wwan/wwan_hwsim.c559
81 files changed, 27331 insertions, 0 deletions
diff --git a/drivers/net/wwan/Kconfig b/drivers/net/wwan/Kconfig
new file mode 100644
index 0000000000..410b024511
--- /dev/null
+++ b/drivers/net/wwan/Kconfig
@@ -0,0 +1,125 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Wireless WAN device configuration
+#
+
+menu "Wireless WAN"
+
+config WWAN
+ tristate "WWAN Driver Core"
+ help
+ Say Y here if you want to use the WWAN driver core. This driver
+ provides a common framework for WWAN drivers.
+
+ To compile this driver as a module, choose M here: the module will be
+ called wwan.
+
+if WWAN
+
+config WWAN_DEBUGFS
+ bool "WWAN devices debugfs interface" if EXPERT
+ depends on DEBUG_FS
+ default y
+ help
+ Enables debugfs infrastructure for the WWAN core and device drivers.
+
+ If this option is selected, then you can find the debug interface
+ elements for each WWAN device in a directory that is corresponding to
+ the device name: debugfs/wwan/wwanX.
+
+config WWAN_HWSIM
+ tristate "Simulated WWAN device"
+ help
+ This driver is a developer testing tool that can be used to test WWAN
+ framework.
+
+ To compile this driver as a module, choose M here: the module will be
+ called wwan_hwsim. If unsure, say N.
+
+config MHI_WWAN_CTRL
+ tristate "MHI WWAN control driver for QCOM-based PCIe modems"
+ depends on MHI_BUS
+ help
+ MHI WWAN CTRL allows QCOM-based PCIe modems to expose different modem
+ control protocols/ports to userspace, including AT, MBIM, QMI, DIAG
+ and FIREHOSE. These protocols can be accessed directly from userspace
+ (e.g. AT commands) or via libraries/tools (e.g. libmbim, libqmi,
+ libqcdm...).
+
+ To compile this driver as a module, choose M here: the module will be
+ called mhi_wwan_ctrl.
+
+config MHI_WWAN_MBIM
+ tristate "MHI WWAN MBIM network driver for QCOM-based PCIe modems"
+ depends on MHI_BUS
+ help
+ MHI WWAN MBIM is a WWAN network driver for QCOM-based PCIe modems.
+ It implements MBIM over MHI, for IP data aggregation and muxing.
+ A default wwan0 network interface is created for MBIM data session
+ ID 0. Additional links can be created via wwan rtnetlink type.
+
+ To compile this driver as a module, choose M here: the module will be
+ called mhi_wwan_mbim.
+
+config QCOM_BAM_DMUX
+ tristate "Qualcomm BAM-DMUX WWAN network driver"
+ depends on (DMA_ENGINE && PM && QCOM_SMEM_STATE) || COMPILE_TEST
+ help
+ The BAM Data Multiplexer provides access to the network data channels
+ of modems integrated into many older Qualcomm SoCs, e.g. Qualcomm
+ MSM8916 or MSM8974. The connection can be established via QMI/AT from
+ userspace with control ports available through the WWAN subsystem
+ (CONFIG_RPMSG_WWAN_CTRL) or QRTR network sockets (CONFIG_QRTR).
+
+ To compile this driver as a module, choose M here: the module will be
+ called qcom_bam_dmux.
+
+config RPMSG_WWAN_CTRL
+ tristate "RPMSG WWAN control driver"
+ depends on RPMSG
+ help
+ RPMSG WWAN CTRL allows modems available via RPMSG channels to expose
+ different modem protocols/ports to userspace, including AT and QMI.
+ These protocols can be accessed directly from userspace
+ (e.g. AT commands) or via libraries/tools (e.g. libqmi, libqcdm...).
+
+ This is mainly used for modems integrated into many Qualcomm SoCs,
+ e.g. for AT and QMI on Qualcomm MSM8916 or MSM8974. Note that many
+ newer Qualcomm SoCs (e.g. SDM845) still provide an AT port through
+ this driver but the QMI messages can only be sent through
+ QRTR network sockets (CONFIG_QRTR).
+
+ To compile this driver as a module, choose M here: the module will be
+ called rpmsg_wwan_ctrl.
+
+config IOSM
+ tristate "IOSM Driver for Intel M.2 WWAN Device"
+ depends on PCI
+ select NET_DEVLINK
+ select RELAY if WWAN_DEBUGFS
+ help
+ This driver enables Intel M.2 WWAN Device communication.
+
+ If you have one of those Intel M.2 WWAN Modules and wish to use it in
+ Linux say Y/M here.
+
+ If unsure, say N.
+
+config MTK_T7XX
+ tristate "MediaTek PCIe 5G WWAN modem T7xx device"
+ depends on PCI
+ select RELAY if WWAN_DEBUGFS
+ help
+ Enables MediaTek PCIe based 5G WWAN modem (T7xx series) device.
+ Adapts WWAN framework and provides network interface like wwan0
+ and tty interfaces like wwan0at0 (AT protocol), wwan0mbim0
+ (MBIM protocol), etc.
+
+ To compile this driver as a module, choose M here: the module will be
+ called mtk_t7xx.
+
+ If unsure, say N.
+
+endif # WWAN
+
+endmenu
diff --git a/drivers/net/wwan/Makefile b/drivers/net/wwan/Makefile
new file mode 100644
index 0000000000..3960c0ae24
--- /dev/null
+++ b/drivers/net/wwan/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Linux WWAN device drivers.
+#
+
+obj-$(CONFIG_WWAN) += wwan.o
+wwan-objs += wwan_core.o
+
+obj-$(CONFIG_WWAN_HWSIM) += wwan_hwsim.o
+
+obj-$(CONFIG_MHI_WWAN_CTRL) += mhi_wwan_ctrl.o
+obj-$(CONFIG_MHI_WWAN_MBIM) += mhi_wwan_mbim.o
+obj-$(CONFIG_QCOM_BAM_DMUX) += qcom_bam_dmux.o
+obj-$(CONFIG_RPMSG_WWAN_CTRL) += rpmsg_wwan_ctrl.o
+obj-$(CONFIG_IOSM) += iosm/
+obj-$(CONFIG_MTK_T7XX) += t7xx/
diff --git a/drivers/net/wwan/iosm/Makefile b/drivers/net/wwan/iosm/Makefile
new file mode 100644
index 0000000000..fa8d6afd18
--- /dev/null
+++ b/drivers/net/wwan/iosm/Makefile
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: (GPL-2.0-only)
+#
+# Copyright (C) 2020-21 Intel Corporation.
+#
+
+iosm-y = \
+ iosm_ipc_task_queue.o \
+ iosm_ipc_imem.o \
+ iosm_ipc_imem_ops.o \
+ iosm_ipc_mmio.o \
+ iosm_ipc_port.o \
+ iosm_ipc_wwan.o \
+ iosm_ipc_uevent.o \
+ iosm_ipc_pm.o \
+ iosm_ipc_pcie.o \
+ iosm_ipc_irq.o \
+ iosm_ipc_chnl_cfg.o \
+ iosm_ipc_protocol.o \
+ iosm_ipc_protocol_ops.o \
+ iosm_ipc_mux.o \
+ iosm_ipc_mux_codec.o \
+ iosm_ipc_devlink.o \
+ iosm_ipc_flash.o \
+ iosm_ipc_coredump.o
+
+iosm-$(CONFIG_WWAN_DEBUGFS) += \
+ iosm_ipc_debugfs.o \
+ iosm_ipc_trace.o
+
+obj-$(CONFIG_IOSM) := iosm.o
diff --git a/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c b/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c
new file mode 100644
index 0000000000..bcfbc6b3d6
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#include <linux/wwan.h>
+
+#include "iosm_ipc_chnl_cfg.h"
+
+/* Max. sizes of a downlink buffers */
+#define IPC_MEM_MAX_DL_FLASH_BUF_SIZE (64 * 1024)
+#define IPC_MEM_MAX_DL_LOOPBACK_SIZE (1 * 1024 * 1024)
+#define IPC_MEM_MAX_DL_AT_BUF_SIZE 2048
+#define IPC_MEM_MAX_DL_RPC_BUF_SIZE (32 * 1024)
+#define IPC_MEM_MAX_DL_MBIM_BUF_SIZE IPC_MEM_MAX_DL_RPC_BUF_SIZE
+
+/* Max. transfer descriptors for a pipe. */
+#define IPC_MEM_MAX_TDS_FLASH_DL 3
+#define IPC_MEM_MAX_TDS_FLASH_UL 6
+#define IPC_MEM_MAX_TDS_AT 4
+#define IPC_MEM_MAX_TDS_RPC 4
+#define IPC_MEM_MAX_TDS_MBIM IPC_MEM_MAX_TDS_RPC
+#define IPC_MEM_MAX_TDS_LOOPBACK 11
+
+/* Accumulation backoff usec */
+#define IRQ_ACC_BACKOFF_OFF 0
+
+/* MUX acc backoff 1ms */
+#define IRQ_ACC_BACKOFF_MUX 1000
+
+/* Modem channel configuration table
+ * Always reserve element zero for flash channel.
+ */
+static struct ipc_chnl_cfg modem_cfg[] = {
+ /* IP Mux */
+ { IPC_MEM_IP_CHL_ID_0, IPC_MEM_PIPE_0, IPC_MEM_PIPE_1,
+ IPC_MEM_MAX_TDS_MUX_LITE_UL, IPC_MEM_MAX_TDS_MUX_LITE_DL,
+ IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE, WWAN_PORT_UNKNOWN },
+ /* RPC - 0 */
+ { IPC_MEM_CTRL_CHL_ID_1, IPC_MEM_PIPE_2, IPC_MEM_PIPE_3,
+ IPC_MEM_MAX_TDS_RPC, IPC_MEM_MAX_TDS_RPC,
+ IPC_MEM_MAX_DL_RPC_BUF_SIZE, WWAN_PORT_XMMRPC },
+ /* IAT0 */
+ { IPC_MEM_CTRL_CHL_ID_2, IPC_MEM_PIPE_4, IPC_MEM_PIPE_5,
+ IPC_MEM_MAX_TDS_AT, IPC_MEM_MAX_TDS_AT, IPC_MEM_MAX_DL_AT_BUF_SIZE,
+ WWAN_PORT_AT },
+ /* Trace */
+ { IPC_MEM_CTRL_CHL_ID_3, IPC_MEM_PIPE_6, IPC_MEM_PIPE_7,
+ IPC_MEM_TDS_TRC, IPC_MEM_TDS_TRC, IPC_MEM_MAX_DL_TRC_BUF_SIZE,
+ WWAN_PORT_UNKNOWN },
+ /* IAT1 */
+ { IPC_MEM_CTRL_CHL_ID_4, IPC_MEM_PIPE_8, IPC_MEM_PIPE_9,
+ IPC_MEM_MAX_TDS_AT, IPC_MEM_MAX_TDS_AT, IPC_MEM_MAX_DL_AT_BUF_SIZE,
+ WWAN_PORT_AT },
+ /* Loopback */
+ { IPC_MEM_CTRL_CHL_ID_5, IPC_MEM_PIPE_10, IPC_MEM_PIPE_11,
+ IPC_MEM_MAX_TDS_LOOPBACK, IPC_MEM_MAX_TDS_LOOPBACK,
+ IPC_MEM_MAX_DL_LOOPBACK_SIZE, WWAN_PORT_UNKNOWN },
+ /* MBIM Channel */
+ { IPC_MEM_CTRL_CHL_ID_6, IPC_MEM_PIPE_12, IPC_MEM_PIPE_13,
+ IPC_MEM_MAX_TDS_MBIM, IPC_MEM_MAX_TDS_MBIM,
+ IPC_MEM_MAX_DL_MBIM_BUF_SIZE, WWAN_PORT_MBIM },
+ /* Flash Channel/Coredump Channel */
+ { IPC_MEM_CTRL_CHL_ID_7, IPC_MEM_PIPE_0, IPC_MEM_PIPE_1,
+ IPC_MEM_MAX_TDS_FLASH_UL, IPC_MEM_MAX_TDS_FLASH_DL,
+ IPC_MEM_MAX_DL_FLASH_BUF_SIZE, WWAN_PORT_UNKNOWN },
+};
+
+int ipc_chnl_cfg_get(struct ipc_chnl_cfg *chnl_cfg, int index)
+{
+ if (index >= ARRAY_SIZE(modem_cfg)) {
+ pr_err("index: %d and array size %zu", index,
+ ARRAY_SIZE(modem_cfg));
+ return -ECHRNG;
+ }
+
+ if (index == IPC_MEM_MUX_IP_CH_IF_ID)
+ chnl_cfg->accumulation_backoff = IRQ_ACC_BACKOFF_MUX;
+ else
+ chnl_cfg->accumulation_backoff = IRQ_ACC_BACKOFF_OFF;
+
+ chnl_cfg->ul_nr_of_entries = modem_cfg[index].ul_nr_of_entries;
+ chnl_cfg->dl_nr_of_entries = modem_cfg[index].dl_nr_of_entries;
+ chnl_cfg->dl_buf_size = modem_cfg[index].dl_buf_size;
+ chnl_cfg->id = modem_cfg[index].id;
+ chnl_cfg->ul_pipe = modem_cfg[index].ul_pipe;
+ chnl_cfg->dl_pipe = modem_cfg[index].dl_pipe;
+ chnl_cfg->wwan_port_type = modem_cfg[index].wwan_port_type;
+
+ return 0;
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.h b/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.h
new file mode 100644
index 0000000000..e77084e767
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-21 Intel Corporation
+ */
+
+#ifndef IOSM_IPC_CHNL_CFG_H
+#define IOSM_IPC_CHNL_CFG_H
+
+#include "iosm_ipc_mux.h"
+
+/* Number of TDs on the trace channel */
+#define IPC_MEM_TDS_TRC 32
+
+/* Trace channel TD buffer size. */
+#define IPC_MEM_MAX_DL_TRC_BUF_SIZE 8192
+
+/* Channel ID */
+enum ipc_channel_id {
+ IPC_MEM_IP_CHL_ID_0 = 0,
+ IPC_MEM_CTRL_CHL_ID_1,
+ IPC_MEM_CTRL_CHL_ID_2,
+ IPC_MEM_CTRL_CHL_ID_3,
+ IPC_MEM_CTRL_CHL_ID_4,
+ IPC_MEM_CTRL_CHL_ID_5,
+ IPC_MEM_CTRL_CHL_ID_6,
+ IPC_MEM_CTRL_CHL_ID_7,
+};
+
+/**
+ * struct ipc_chnl_cfg - IPC channel configuration structure
+ * @id: Interface ID
+ * @ul_pipe: Uplink datastream
+ * @dl_pipe: Downlink datastream
+ * @ul_nr_of_entries: Number of Transfer descriptor uplink pipe
+ * @dl_nr_of_entries: Number of Transfer descriptor downlink pipe
+ * @dl_buf_size: Downlink buffer size
+ * @wwan_port_type: Wwan subsystem port type
+ * @accumulation_backoff: Time in usec for data accumalation
+ */
+struct ipc_chnl_cfg {
+ u32 id;
+ u32 ul_pipe;
+ u32 dl_pipe;
+ u32 ul_nr_of_entries;
+ u32 dl_nr_of_entries;
+ u32 dl_buf_size;
+ u32 wwan_port_type;
+ u32 accumulation_backoff;
+};
+
+/**
+ * ipc_chnl_cfg_get - Get pipe configuration.
+ * @chnl_cfg: Array of ipc_chnl_cfg struct
+ * @index: Channel index (upto MAX_CHANNELS)
+ *
+ * Return: 0 on success and failure value on error
+ */
+int ipc_chnl_cfg_get(struct ipc_chnl_cfg *chnl_cfg, int index);
+
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_coredump.c b/drivers/net/wwan/iosm/iosm_ipc_coredump.c
new file mode 100644
index 0000000000..26ca30476f
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_coredump.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2021 Intel Corporation.
+ */
+#include <linux/vmalloc.h>
+
+#include "iosm_ipc_coredump.h"
+
+/**
+ * ipc_coredump_collect - To collect coredump
+ * @devlink: Pointer to devlink instance.
+ * @data: Pointer to snapshot
+ * @entry: ID of requested snapshot
+ * @region_size: Region size
+ *
+ * Returns: 0 on success, error on failure
+ */
+int ipc_coredump_collect(struct iosm_devlink *devlink, u8 **data, int entry,
+ u32 region_size)
+{
+ int ret, bytes_to_read, bytes_read = 0, i = 0;
+ s32 remaining;
+ u8 *data_ptr;
+
+ data_ptr = vmalloc(region_size);
+ if (!data_ptr)
+ return -ENOMEM;
+
+ remaining = devlink->cd_file_info[entry].actual_size;
+ ret = ipc_devlink_send_cmd(devlink, rpsi_cmd_coredump_get, entry);
+ if (ret) {
+ dev_err(devlink->dev, "Send coredump_get cmd failed");
+ goto get_cd_fail;
+ }
+ while (remaining > 0) {
+ bytes_to_read = min(remaining, MAX_DATA_SIZE);
+ bytes_read = 0;
+ ret = ipc_imem_sys_devlink_read(devlink, data_ptr + i,
+ bytes_to_read, &bytes_read);
+ if (ret) {
+ dev_err(devlink->dev, "CD data read failed");
+ goto get_cd_fail;
+ }
+ remaining -= bytes_read;
+ i += bytes_read;
+ }
+
+ *data = data_ptr;
+
+ return 0;
+
+get_cd_fail:
+ vfree(data_ptr);
+ return ret;
+}
+
+/**
+ * ipc_coredump_get_list - Get coredump list from modem
+ * @devlink: Pointer to devlink instance.
+ * @cmd: RPSI command to be sent
+ *
+ * Returns: 0 on success, error on failure
+ */
+int ipc_coredump_get_list(struct iosm_devlink *devlink, u16 cmd)
+{
+ u32 byte_read, num_entries, file_size;
+ struct iosm_cd_table *cd_table;
+ u8 size[MAX_SIZE_LEN], i;
+ char *filename;
+ int ret;
+
+ cd_table = kzalloc(MAX_CD_LIST_SIZE, GFP_KERNEL);
+ if (!cd_table) {
+ ret = -ENOMEM;
+ goto cd_init_fail;
+ }
+
+ ret = ipc_devlink_send_cmd(devlink, cmd, MAX_CD_LIST_SIZE);
+ if (ret) {
+ dev_err(devlink->dev, "rpsi_cmd_coredump_start failed");
+ goto cd_init_fail;
+ }
+
+ ret = ipc_imem_sys_devlink_read(devlink, (u8 *)cd_table,
+ MAX_CD_LIST_SIZE, &byte_read);
+ if (ret) {
+ dev_err(devlink->dev, "Coredump data is invalid");
+ goto cd_init_fail;
+ }
+
+ if (byte_read != MAX_CD_LIST_SIZE)
+ goto cd_init_fail;
+
+ if (cmd == rpsi_cmd_coredump_start) {
+ num_entries = le32_to_cpu(cd_table->list.num_entries);
+ if (num_entries == 0 || num_entries > IOSM_NOF_CD_REGION) {
+ ret = -EINVAL;
+ goto cd_init_fail;
+ }
+
+ for (i = 0; i < num_entries; i++) {
+ file_size = le32_to_cpu(cd_table->list.entry[i].size);
+ filename = cd_table->list.entry[i].filename;
+
+ if (file_size > devlink->cd_file_info[i].default_size) {
+ ret = -EINVAL;
+ goto cd_init_fail;
+ }
+
+ devlink->cd_file_info[i].actual_size = file_size;
+ dev_dbg(devlink->dev, "file: %s actual size %d",
+ filename, file_size);
+ devlink_flash_update_status_notify(devlink->devlink_ctx,
+ filename,
+ "FILENAME", 0, 0);
+ snprintf(size, sizeof(size), "%d", file_size);
+ devlink_flash_update_status_notify(devlink->devlink_ctx,
+ size, "FILE SIZE",
+ 0, 0);
+ }
+ }
+
+cd_init_fail:
+ kfree(cd_table);
+ return ret;
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_coredump.h b/drivers/net/wwan/iosm/iosm_ipc_coredump.h
new file mode 100644
index 0000000000..3da5ec75e0
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_coredump.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-2021 Intel Corporation.
+ */
+
+#ifndef _IOSM_IPC_COREDUMP_H_
+#define _IOSM_IPC_COREDUMP_H_
+
+#include "iosm_ipc_devlink.h"
+
+/* Max number of bytes to receive for Coredump list structure */
+#define MAX_CD_LIST_SIZE 0x1000
+
+/* Max buffer allocated to receive coredump data */
+#define MAX_DATA_SIZE 0x00010000
+
+/* Max length */
+#define MAX_SIZE_LEN 32
+
+/**
+ * struct iosm_cd_list_entry - Structure to hold coredump file info.
+ * @size: Number of bytes for the entry
+ * @filename: Coredump filename to be generated on host
+ */
+struct iosm_cd_list_entry {
+ __le32 size;
+ char filename[IOSM_MAX_FILENAME_LEN];
+} __packed;
+
+/**
+ * struct iosm_cd_list - Structure to hold list of coredump files
+ * to be collected.
+ * @num_entries: Number of entries to be received
+ * @entry: Contains File info
+ */
+struct iosm_cd_list {
+ __le32 num_entries;
+ struct iosm_cd_list_entry entry[];
+} __packed;
+
+/**
+ * struct iosm_cd_table - Common Coredump table
+ * @version: Version of coredump structure
+ * @list: Coredump list structure
+ */
+struct iosm_cd_table {
+ __le32 version;
+ struct iosm_cd_list list;
+} __packed;
+
+int ipc_coredump_collect(struct iosm_devlink *devlink, u8 **data, int entry,
+ u32 region_size);
+
+int ipc_coredump_get_list(struct iosm_devlink *devlink, u16 cmd);
+
+#endif /* _IOSM_IPC_COREDUMP_H_ */
diff --git a/drivers/net/wwan/iosm/iosm_ipc_debugfs.c b/drivers/net/wwan/iosm/iosm_ipc_debugfs.c
new file mode 100644
index 0000000000..e916139b8c
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_debugfs.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2021 Intel Corporation.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/wwan.h>
+
+#include "iosm_ipc_imem.h"
+#include "iosm_ipc_trace.h"
+#include "iosm_ipc_debugfs.h"
+
+void ipc_debugfs_init(struct iosm_imem *ipc_imem)
+{
+ ipc_imem->debugfs_wwan_dir = wwan_get_debugfs_dir(ipc_imem->dev);
+
+ ipc_imem->debugfs_dir = debugfs_create_dir(KBUILD_MODNAME,
+ ipc_imem->debugfs_wwan_dir);
+
+ ipc_imem->trace = ipc_trace_init(ipc_imem);
+ if (!ipc_imem->trace)
+ dev_warn(ipc_imem->dev, "trace channel init failed");
+}
+
+void ipc_debugfs_deinit(struct iosm_imem *ipc_imem)
+{
+ ipc_trace_deinit(ipc_imem->trace);
+ debugfs_remove_recursive(ipc_imem->debugfs_dir);
+ wwan_put_debugfs_dir(ipc_imem->debugfs_wwan_dir);
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_debugfs.h b/drivers/net/wwan/iosm/iosm_ipc_debugfs.h
new file mode 100644
index 0000000000..8a84bfa2c1
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_debugfs.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-2021 Intel Corporation.
+ */
+
+#ifndef IOSM_IPC_DEBUGFS_H
+#define IOSM_IPC_DEBUGFS_H
+
+#ifdef CONFIG_WWAN_DEBUGFS
+void ipc_debugfs_init(struct iosm_imem *ipc_imem);
+void ipc_debugfs_deinit(struct iosm_imem *ipc_imem);
+#else
+static inline void ipc_debugfs_init(struct iosm_imem *ipc_imem) {}
+static inline void ipc_debugfs_deinit(struct iosm_imem *ipc_imem) {}
+#endif
+
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_devlink.c b/drivers/net/wwan/iosm/iosm_ipc_devlink.c
new file mode 100644
index 0000000000..2fe724d623
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_devlink.c
@@ -0,0 +1,322 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2021 Intel Corporation.
+ */
+#include <linux/vmalloc.h>
+
+#include "iosm_ipc_chnl_cfg.h"
+#include "iosm_ipc_coredump.h"
+#include "iosm_ipc_devlink.h"
+#include "iosm_ipc_flash.h"
+
+/* Coredump list */
+static struct iosm_coredump_file_info list[IOSM_NOF_CD_REGION] = {
+ {"report.json", REPORT_JSON_SIZE,},
+ {"coredump.fcd", COREDUMP_FCD_SIZE,},
+ {"cdd.log", CDD_LOG_SIZE,},
+ {"eeprom.bin", EEPROM_BIN_SIZE,},
+ {"bootcore_trace.bin", BOOTCORE_TRC_BIN_SIZE,},
+ {"bootcore_prev_trace.bin", BOOTCORE_PREV_TRC_BIN_SIZE,},
+};
+
+/* Get the param values for the specific param ID's */
+static int ipc_devlink_get_param(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct iosm_devlink *ipc_devlink = devlink_priv(dl);
+
+ if (id == IOSM_DEVLINK_PARAM_ID_ERASE_FULL_FLASH)
+ ctx->val.vu8 = ipc_devlink->param.erase_full_flash;
+
+ return 0;
+}
+
+/* Set the param values for the specific param ID's */
+static int ipc_devlink_set_param(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct iosm_devlink *ipc_devlink = devlink_priv(dl);
+
+ if (id == IOSM_DEVLINK_PARAM_ID_ERASE_FULL_FLASH)
+ ipc_devlink->param.erase_full_flash = ctx->val.vu8;
+
+ return 0;
+}
+
+/* Devlink param structure array */
+static const struct devlink_param iosm_devlink_params[] = {
+ DEVLINK_PARAM_DRIVER(IOSM_DEVLINK_PARAM_ID_ERASE_FULL_FLASH,
+ "erase_full_flash", DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ ipc_devlink_get_param, ipc_devlink_set_param,
+ NULL),
+};
+
+/* Get devlink flash component type */
+static enum iosm_flash_comp_type
+ipc_devlink_get_flash_comp_type(const char comp_str[], u32 len)
+{
+ enum iosm_flash_comp_type fls_type;
+
+ if (!strncmp("PSI", comp_str, len))
+ fls_type = FLASH_COMP_TYPE_PSI;
+ else if (!strncmp("EBL", comp_str, len))
+ fls_type = FLASH_COMP_TYPE_EBL;
+ else if (!strncmp("FLS", comp_str, len))
+ fls_type = FLASH_COMP_TYPE_FLS;
+ else
+ fls_type = FLASH_COMP_TYPE_INVAL;
+
+ return fls_type;
+}
+
+/* Function triggered on devlink flash command
+ * Flash update function which calls multiple functions based on
+ * component type specified in the flash command
+ */
+static int ipc_devlink_flash_update(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct iosm_devlink *ipc_devlink = devlink_priv(devlink);
+ enum iosm_flash_comp_type fls_type;
+ struct iosm_devlink_image *header;
+ int rc = -EINVAL;
+ u8 *mdm_rsp;
+
+ header = (struct iosm_devlink_image *)params->fw->data;
+
+ if (!header || params->fw->size <= IOSM_DEVLINK_HDR_SIZE ||
+ (memcmp(header->magic_header, IOSM_DEVLINK_MAGIC_HEADER,
+ IOSM_DEVLINK_MAGIC_HEADER_LEN) != 0))
+ return -EINVAL;
+
+ mdm_rsp = kzalloc(IOSM_EBL_DW_PACK_SIZE, GFP_KERNEL);
+ if (!mdm_rsp)
+ return -ENOMEM;
+
+ fls_type = ipc_devlink_get_flash_comp_type(header->image_type,
+ IOSM_DEVLINK_MAX_IMG_LEN);
+
+ switch (fls_type) {
+ case FLASH_COMP_TYPE_PSI:
+ rc = ipc_flash_boot_psi(ipc_devlink, params->fw);
+ break;
+ case FLASH_COMP_TYPE_EBL:
+ rc = ipc_flash_boot_ebl(ipc_devlink, params->fw);
+ if (rc)
+ break;
+ rc = ipc_flash_boot_set_capabilities(ipc_devlink, mdm_rsp);
+ if (rc)
+ break;
+ rc = ipc_flash_read_swid(ipc_devlink, mdm_rsp);
+ break;
+ case FLASH_COMP_TYPE_FLS:
+ rc = ipc_flash_send_fls(ipc_devlink, params->fw, mdm_rsp);
+ break;
+ default:
+ devlink_flash_update_status_notify(devlink, "Invalid component",
+ NULL, 0, 0);
+ break;
+ }
+
+ if (!rc)
+ devlink_flash_update_status_notify(devlink, "Flashing success",
+ header->image_type, 0, 0);
+ else
+ devlink_flash_update_status_notify(devlink, "Flashing failed",
+ header->image_type, 0, 0);
+
+ kfree(mdm_rsp);
+ return rc;
+}
+
+/* Call back function for devlink ops */
+static const struct devlink_ops devlink_flash_ops = {
+ .flash_update = ipc_devlink_flash_update,
+};
+
+/**
+ * ipc_devlink_send_cmd - Send command to Modem
+ * @ipc_devlink: Pointer to struct iosm_devlink
+ * @cmd: Command to be sent to modem
+ * @entry: Command entry number
+ *
+ * Returns: 0 on success and failure value on error
+ */
+int ipc_devlink_send_cmd(struct iosm_devlink *ipc_devlink, u16 cmd, u32 entry)
+{
+ struct iosm_rpsi_cmd rpsi_cmd;
+
+ rpsi_cmd.param.dword = cpu_to_le32(entry);
+ rpsi_cmd.cmd = cpu_to_le16(cmd);
+ rpsi_cmd.crc = rpsi_cmd.param.word[0] ^ rpsi_cmd.param.word[1] ^
+ rpsi_cmd.cmd;
+
+ return ipc_imem_sys_devlink_write(ipc_devlink, (u8 *)&rpsi_cmd,
+ sizeof(rpsi_cmd));
+}
+
+/* Function to create snapshot */
+static int ipc_devlink_coredump_snapshot(struct devlink *dl,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u8 **data)
+{
+ struct iosm_devlink *ipc_devlink = devlink_priv(dl);
+ struct iosm_coredump_file_info *cd_list = ops->priv;
+ u32 region_size;
+ int rc;
+
+ dev_dbg(ipc_devlink->dev, "Region:%s, ID:%d", ops->name,
+ cd_list->entry);
+ region_size = cd_list->default_size;
+ rc = ipc_coredump_collect(ipc_devlink, data, cd_list->entry,
+ region_size);
+ if (rc) {
+ dev_err(ipc_devlink->dev, "Fail to create snapshot,err %d", rc);
+ goto coredump_collect_err;
+ }
+
+ /* Send coredump end cmd indicating end of coredump collection */
+ if (cd_list->entry == (IOSM_NOF_CD_REGION - 1))
+ ipc_coredump_get_list(ipc_devlink, rpsi_cmd_coredump_end);
+
+ return 0;
+
+coredump_collect_err:
+ ipc_coredump_get_list(ipc_devlink, rpsi_cmd_coredump_end);
+ return rc;
+}
+
+/* To create regions for coredump files */
+static int ipc_devlink_create_region(struct iosm_devlink *devlink)
+{
+ struct devlink_region_ops *mdm_coredump;
+ int rc = 0;
+ int i;
+
+ mdm_coredump = devlink->iosm_devlink_mdm_coredump;
+ for (i = 0; i < IOSM_NOF_CD_REGION; i++) {
+ mdm_coredump[i].name = list[i].filename;
+ mdm_coredump[i].snapshot = ipc_devlink_coredump_snapshot;
+ mdm_coredump[i].destructor = vfree;
+ devlink->cd_regions[i] =
+ devlink_region_create(devlink->devlink_ctx,
+ &mdm_coredump[i], MAX_SNAPSHOTS,
+ list[i].default_size);
+
+ if (IS_ERR(devlink->cd_regions[i])) {
+ rc = PTR_ERR(devlink->cd_regions[i]);
+ dev_err(devlink->dev, "Devlink region fail,err %d", rc);
+ /* Delete previously created regions */
+ for ( ; i >= 0; i--)
+ devlink_region_destroy(devlink->cd_regions[i]);
+ goto region_create_fail;
+ }
+ list[i].entry = i;
+ mdm_coredump[i].priv = list + i;
+ }
+region_create_fail:
+ return rc;
+}
+
+/* To Destroy devlink regions */
+static void ipc_devlink_destroy_region(struct iosm_devlink *ipc_devlink)
+{
+ u8 i;
+
+ for (i = 0; i < IOSM_NOF_CD_REGION; i++)
+ devlink_region_destroy(ipc_devlink->cd_regions[i]);
+}
+
+/**
+ * ipc_devlink_init - Initialize/register devlink to IOSM driver
+ * @ipc_imem: Pointer to struct iosm_imem
+ *
+ * Returns: Pointer to iosm_devlink on success and NULL on failure
+ */
+struct iosm_devlink *ipc_devlink_init(struct iosm_imem *ipc_imem)
+{
+ struct ipc_chnl_cfg chnl_cfg_flash = { 0 };
+ struct iosm_devlink *ipc_devlink;
+ struct devlink *devlink_ctx;
+ int rc;
+
+ devlink_ctx = devlink_alloc(&devlink_flash_ops,
+ sizeof(struct iosm_devlink),
+ ipc_imem->dev);
+ if (!devlink_ctx) {
+ dev_err(ipc_imem->dev, "devlink_alloc failed");
+ goto devlink_alloc_fail;
+ }
+
+ ipc_devlink = devlink_priv(devlink_ctx);
+ ipc_devlink->devlink_ctx = devlink_ctx;
+ ipc_devlink->pcie = ipc_imem->pcie;
+ ipc_devlink->dev = ipc_imem->dev;
+
+ rc = devlink_params_register(devlink_ctx, iosm_devlink_params,
+ ARRAY_SIZE(iosm_devlink_params));
+ if (rc) {
+ dev_err(ipc_devlink->dev,
+ "devlink_params_register failed. rc %d", rc);
+ goto param_reg_fail;
+ }
+
+ ipc_devlink->cd_file_info = list;
+
+ rc = ipc_devlink_create_region(ipc_devlink);
+ if (rc) {
+ dev_err(ipc_devlink->dev, "Devlink Region create failed, rc %d",
+ rc);
+ goto region_create_fail;
+ }
+
+ if (ipc_chnl_cfg_get(&chnl_cfg_flash, IPC_MEM_CTRL_CHL_ID_7) < 0)
+ goto chnl_get_fail;
+
+ ipc_imem_channel_init(ipc_imem, IPC_CTYPE_CTRL,
+ chnl_cfg_flash, IRQ_MOD_OFF);
+
+ init_completion(&ipc_devlink->devlink_sio.read_sem);
+ skb_queue_head_init(&ipc_devlink->devlink_sio.rx_list);
+
+ devlink_register(devlink_ctx);
+ dev_dbg(ipc_devlink->dev, "iosm devlink register success");
+
+ return ipc_devlink;
+
+chnl_get_fail:
+ ipc_devlink_destroy_region(ipc_devlink);
+region_create_fail:
+ devlink_params_unregister(devlink_ctx, iosm_devlink_params,
+ ARRAY_SIZE(iosm_devlink_params));
+param_reg_fail:
+ devlink_free(devlink_ctx);
+devlink_alloc_fail:
+ return NULL;
+}
+
+/**
+ * ipc_devlink_deinit - To unintialize the devlink from IOSM driver.
+ * @ipc_devlink: Devlink instance
+ */
+void ipc_devlink_deinit(struct iosm_devlink *ipc_devlink)
+{
+ struct devlink *devlink_ctx = ipc_devlink->devlink_ctx;
+
+ devlink_unregister(devlink_ctx);
+ ipc_devlink_destroy_region(ipc_devlink);
+ devlink_params_unregister(devlink_ctx, iosm_devlink_params,
+ ARRAY_SIZE(iosm_devlink_params));
+ if (ipc_devlink->devlink_sio.devlink_read_pend) {
+ complete(&ipc_devlink->devlink_sio.read_sem);
+ complete(&ipc_devlink->devlink_sio.channel->ul_sem);
+ }
+ if (!ipc_devlink->devlink_sio.devlink_read_pend)
+ skb_queue_purge(&ipc_devlink->devlink_sio.rx_list);
+
+ ipc_imem_sys_devlink_close(ipc_devlink);
+ devlink_free(devlink_ctx);
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_devlink.h b/drivers/net/wwan/iosm/iosm_ipc_devlink.h
new file mode 100644
index 0000000000..35c2d013b9
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_devlink.h
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-2021 Intel Corporation.
+ */
+
+#ifndef _IOSM_IPC_DEVLINK_H_
+#define _IOSM_IPC_DEVLINK_H_
+
+#include <net/devlink.h>
+
+#include "iosm_ipc_imem.h"
+#include "iosm_ipc_imem_ops.h"
+#include "iosm_ipc_pcie.h"
+
+/* Image ext max len */
+#define IOSM_DEVLINK_MAX_IMG_LEN 3
+/* Magic Header */
+#define IOSM_DEVLINK_MAGIC_HEADER "IOSM_DEVLINK_HEADER"
+/* Magic Header len */
+#define IOSM_DEVLINK_MAGIC_HEADER_LEN 20
+/* Devlink image type */
+#define IOSM_DEVLINK_IMG_TYPE 4
+/* Reserve header size */
+#define IOSM_DEVLINK_RESERVED 34
+/* Devlink Image Header size */
+#define IOSM_DEVLINK_HDR_SIZE sizeof(struct iosm_devlink_image)
+/* MAX file name length */
+#define IOSM_MAX_FILENAME_LEN 32
+/* EBL response size */
+#define IOSM_EBL_RSP_SIZE 76
+/* MAX number of regions supported */
+#define IOSM_NOF_CD_REGION 6
+/* MAX number of SNAPSHOTS supported */
+#define MAX_SNAPSHOTS 1
+/* Default Coredump file size */
+#define REPORT_JSON_SIZE 0x800
+#define COREDUMP_FCD_SIZE 0x10E00000
+#define CDD_LOG_SIZE 0x30000
+#define EEPROM_BIN_SIZE 0x10000
+#define BOOTCORE_TRC_BIN_SIZE 0x8000
+#define BOOTCORE_PREV_TRC_BIN_SIZE 0x20000
+
+/**
+ * enum iosm_devlink_param_id - Enum type to different devlink params
+ * @IOSM_DEVLINK_PARAM_ID_BASE: Devlink param base ID
+ * @IOSM_DEVLINK_PARAM_ID_ERASE_FULL_FLASH: Set if full erase required
+ */
+
+enum iosm_devlink_param_id {
+ IOSM_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ IOSM_DEVLINK_PARAM_ID_ERASE_FULL_FLASH,
+};
+
+/**
+ * enum iosm_rpsi_cmd_code - Enum type for RPSI command list
+ * @rpsi_cmd_code_ebl: Command to load ebl
+ * @rpsi_cmd_coredump_start: Command to get list of files and
+ * file size info from PSI
+ * @rpsi_cmd_coredump_get: Command to get the coredump data
+ * @rpsi_cmd_coredump_end: Command to stop receiving the coredump
+ */
+enum iosm_rpsi_cmd_code {
+ rpsi_cmd_code_ebl = 0x02,
+ rpsi_cmd_coredump_start = 0x10,
+ rpsi_cmd_coredump_get = 0x11,
+ rpsi_cmd_coredump_end = 0x12,
+};
+
+/**
+ * enum iosm_flash_comp_type - Enum for different flash component types
+ * @FLASH_COMP_TYPE_PSI: PSI flash comp type
+ * @FLASH_COMP_TYPE_EBL: EBL flash comp type
+ * @FLASH_COMP_TYPE_FLS: FLS flash comp type
+ * @FLASH_COMP_TYPE_INVAL: Invalid flash comp type
+ */
+enum iosm_flash_comp_type {
+ FLASH_COMP_TYPE_PSI,
+ FLASH_COMP_TYPE_EBL,
+ FLASH_COMP_TYPE_FLS,
+ FLASH_COMP_TYPE_INVAL,
+};
+
+/**
+ * struct iosm_devlink_sio - SIO instance
+ * @rx_list: Downlink skbuf list received from CP
+ * @read_sem: Needed for the blocking read or downlink transfer
+ * @channel_id: Reserved channel id for flashing/CD collection to RAM
+ * @channel: Channel instance for flashing and coredump
+ * @devlink_read_pend: Check if read is pending
+ */
+struct iosm_devlink_sio {
+ struct sk_buff_head rx_list;
+ struct completion read_sem;
+ int channel_id;
+ struct ipc_mem_channel *channel;
+ u32 devlink_read_pend;
+};
+
+/**
+ * struct iosm_flash_params - List of flash params required for flashing
+ * @erase_full_flash: To set the flashing mode
+ * erase_full_flash = 1; full erase
+ * erase_full_flash = 0; no erase
+ * @erase_full_flash_done: Flag to check if it is a full erase
+ */
+struct iosm_flash_params {
+ u8 erase_full_flash;
+ u8 erase_full_flash_done;
+};
+
+/**
+ * struct iosm_devlink_image - Structure with Fls file header info
+ * @magic_header: Header of the firmware image
+ * @image_type: Firmware image type
+ * @region_address: Address of the region to be flashed
+ * @download_region: Field to identify if it is a region
+ * @last_region: Field to identify if it is last region
+ * @reserved: Reserved field
+ */
+struct iosm_devlink_image {
+ char magic_header[IOSM_DEVLINK_MAGIC_HEADER_LEN];
+ char image_type[IOSM_DEVLINK_IMG_TYPE];
+ __le32 region_address;
+ u8 download_region;
+ u8 last_region;
+ u8 reserved[IOSM_DEVLINK_RESERVED];
+} __packed;
+
+/**
+ * struct iosm_ebl_ctx_data - EBL ctx data used during flashing
+ * @ebl_sw_info_version: SWID version info obtained from EBL
+ * @m_ebl_resp: Buffer used to read and write the ebl data
+ */
+struct iosm_ebl_ctx_data {
+ u8 ebl_sw_info_version;
+ u8 m_ebl_resp[IOSM_EBL_RSP_SIZE];
+};
+
+/**
+ * struct iosm_coredump_file_info - Coredump file info
+ * @filename: Name of coredump file
+ * @default_size: Default size of coredump file
+ * @actual_size: Actual size of coredump file
+ * @entry: Index of the coredump file
+ */
+struct iosm_coredump_file_info {
+ char filename[IOSM_MAX_FILENAME_LEN];
+ u32 default_size;
+ u32 actual_size;
+ u32 entry;
+};
+
+/**
+ * struct iosm_devlink - IOSM Devlink structure
+ * @devlink_sio: SIO instance for read/write functionality
+ * @pcie: Pointer to PCIe component
+ * @dev: Pointer to device struct
+ * @devlink_ctx: Pointer to devlink context
+ * @param: Params required for flashing
+ * @ebl_ctx: Data to be read and written to Modem
+ * @cd_file_info: coredump file info
+ * @iosm_devlink_mdm_coredump: region ops for coredump collection
+ * @cd_regions: coredump regions
+ */
+struct iosm_devlink {
+ struct iosm_devlink_sio devlink_sio;
+ struct iosm_pcie *pcie;
+ struct device *dev;
+ struct devlink *devlink_ctx;
+ struct iosm_flash_params param;
+ struct iosm_ebl_ctx_data ebl_ctx;
+ struct iosm_coredump_file_info *cd_file_info;
+ struct devlink_region_ops iosm_devlink_mdm_coredump[IOSM_NOF_CD_REGION];
+ struct devlink_region *cd_regions[IOSM_NOF_CD_REGION];
+};
+
+/**
+ * union iosm_rpsi_param_u - RPSI cmd param for CRC calculation
+ * @word: Words member used in CRC calculation
+ * @dword: Actual data
+ */
+union iosm_rpsi_param_u {
+ __le16 word[2];
+ __le32 dword;
+};
+
+/**
+ * struct iosm_rpsi_cmd - Structure for RPSI Command
+ * @param: Used to calculate CRC
+ * @cmd: Stores the RPSI command
+ * @crc: Stores the CRC value
+ */
+struct iosm_rpsi_cmd {
+ union iosm_rpsi_param_u param;
+ __le16 cmd;
+ __le16 crc;
+};
+
+struct iosm_devlink *ipc_devlink_init(struct iosm_imem *ipc_imem);
+
+void ipc_devlink_deinit(struct iosm_devlink *ipc_devlink);
+
+int ipc_devlink_send_cmd(struct iosm_devlink *ipc_devlink, u16 cmd, u32 entry);
+
+#endif /* _IOSM_IPC_DEVLINK_H */
diff --git a/drivers/net/wwan/iosm/iosm_ipc_flash.c b/drivers/net/wwan/iosm/iosm_ipc_flash.c
new file mode 100644
index 0000000000..d890914aa3
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_flash.c
@@ -0,0 +1,594 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2021 Intel Corporation.
+ */
+
+#include "iosm_ipc_coredump.h"
+#include "iosm_ipc_devlink.h"
+#include "iosm_ipc_flash.h"
+
+/* This function will pack the data to be sent to the modem using the
+ * payload, payload length and pack id
+ */
+static int ipc_flash_proc_format_ebl_pack(struct iosm_flash_data *flash_req,
+ u32 pack_length, u16 pack_id,
+ u8 *payload, u32 payload_length)
+{
+ u16 checksum = pack_id;
+ u32 i;
+
+ if (payload_length + IOSM_EBL_HEAD_SIZE > pack_length)
+ return -EINVAL;
+
+ flash_req->pack_id = cpu_to_le16(pack_id);
+ flash_req->msg_length = cpu_to_le32(payload_length);
+ checksum += (payload_length >> IOSM_EBL_PAYL_SHIFT) +
+ (payload_length & IOSM_EBL_CKSM);
+
+ for (i = 0; i < payload_length; i++)
+ checksum += payload[i];
+
+ flash_req->checksum = cpu_to_le16(checksum);
+
+ return 0;
+}
+
+/* validate the response received from modem and
+ * check the type of errors received
+ */
+static int ipc_flash_proc_check_ebl_rsp(void *hdr_rsp, void *payload_rsp)
+{
+ struct iosm_ebl_error *err_info = payload_rsp;
+ u16 *rsp_code = hdr_rsp;
+ u32 i;
+
+ if (*rsp_code == IOSM_EBL_RSP_BUFF) {
+ for (i = 0; i < IOSM_MAX_ERRORS; i++) {
+ if (!err_info->error[i].error_code) {
+ pr_err("EBL: error_class = %d, error_code = %d",
+ err_info->error[i].error_class,
+ err_info->error[i].error_code);
+ }
+ }
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Send data to the modem */
+static int ipc_flash_send_data(struct iosm_devlink *ipc_devlink, u32 size,
+ u16 pack_id, u8 *payload, u32 payload_length)
+{
+ struct iosm_flash_data flash_req;
+ int ret;
+
+ ret = ipc_flash_proc_format_ebl_pack(&flash_req, size,
+ pack_id, payload, payload_length);
+ if (ret) {
+ dev_err(ipc_devlink->dev, "EBL2 pack failed for pack_id:%d",
+ pack_id);
+ goto ipc_free_payload;
+ }
+
+ ret = ipc_imem_sys_devlink_write(ipc_devlink, (u8 *)&flash_req,
+ IOSM_EBL_HEAD_SIZE);
+ if (ret) {
+ dev_err(ipc_devlink->dev, "EBL Header write failed for Id:%x",
+ pack_id);
+ goto ipc_free_payload;
+ }
+
+ ret = ipc_imem_sys_devlink_write(ipc_devlink, payload, payload_length);
+ if (ret) {
+ dev_err(ipc_devlink->dev, "EBL Payload write failed for Id:%x",
+ pack_id);
+ }
+
+ipc_free_payload:
+ return ret;
+}
+
+/**
+ * ipc_flash_link_establish - Flash link establishment
+ * @ipc_imem: Pointer to struct iosm_imem
+ *
+ * Returns: 0 on success and failure value on error
+ */
+int ipc_flash_link_establish(struct iosm_imem *ipc_imem)
+{
+ u8 ler_data[IOSM_LER_RSP_SIZE];
+ u32 bytes_read;
+
+ /* Allocate channel for flashing/cd collection */
+ ipc_imem->ipc_devlink->devlink_sio.channel =
+ ipc_imem_sys_devlink_open(ipc_imem);
+
+ if (!ipc_imem->ipc_devlink->devlink_sio.channel)
+ goto chl_open_fail;
+
+ if (ipc_imem_sys_devlink_read(ipc_imem->ipc_devlink, ler_data,
+ IOSM_LER_RSP_SIZE, &bytes_read))
+ goto devlink_read_fail;
+
+ if (bytes_read != IOSM_LER_RSP_SIZE)
+ goto devlink_read_fail;
+
+ return 0;
+
+devlink_read_fail:
+ ipc_imem_sys_devlink_close(ipc_imem->ipc_devlink);
+chl_open_fail:
+ return -EIO;
+}
+
+/* Receive data from the modem */
+static int ipc_flash_receive_data(struct iosm_devlink *ipc_devlink, u32 size,
+ u8 *mdm_rsp)
+{
+ u8 mdm_rsp_hdr[IOSM_EBL_HEAD_SIZE];
+ u32 bytes_read;
+ int ret;
+
+ ret = ipc_imem_sys_devlink_read(ipc_devlink, mdm_rsp_hdr,
+ IOSM_EBL_HEAD_SIZE, &bytes_read);
+ if (ret) {
+ dev_err(ipc_devlink->dev, "EBL rsp to read %d bytes failed",
+ IOSM_EBL_HEAD_SIZE);
+ goto ipc_flash_recv_err;
+ }
+
+ if (bytes_read != IOSM_EBL_HEAD_SIZE) {
+ ret = -EINVAL;
+ goto ipc_flash_recv_err;
+ }
+
+ ret = ipc_imem_sys_devlink_read(ipc_devlink, mdm_rsp, size,
+ &bytes_read);
+ if (ret) {
+ dev_err(ipc_devlink->dev, "EBL rsp to read %d bytes failed",
+ size);
+ goto ipc_flash_recv_err;
+ }
+
+ if (bytes_read != size) {
+ ret = -EINVAL;
+ goto ipc_flash_recv_err;
+ }
+
+ ret = ipc_flash_proc_check_ebl_rsp(mdm_rsp_hdr + 2, mdm_rsp);
+
+ipc_flash_recv_err:
+ return ret;
+}
+
+/* Function to send command to modem and receive response */
+static int ipc_flash_send_receive(struct iosm_devlink *ipc_devlink, u16 pack_id,
+ u8 *payload, u32 payload_length, u8 *mdm_rsp)
+{
+ size_t frame_len = IOSM_EBL_DW_PACK_SIZE;
+ int ret;
+
+ if (pack_id == FLASH_SET_PROT_CONF)
+ frame_len = IOSM_EBL_W_PACK_SIZE;
+
+ ret = ipc_flash_send_data(ipc_devlink, frame_len, pack_id, payload,
+ payload_length);
+ if (ret)
+ goto ipc_flash_send_rcv;
+
+ ret = ipc_flash_receive_data(ipc_devlink,
+ frame_len - IOSM_EBL_HEAD_SIZE, mdm_rsp);
+
+ipc_flash_send_rcv:
+ return ret;
+}
+
+/**
+ * ipc_flash_boot_set_capabilities - Set modem boot capabilities in flash
+ * @ipc_devlink: Pointer to devlink structure
+ * @mdm_rsp: Pointer to modem response buffer
+ *
+ * Returns: 0 on success and failure value on error
+ */
+int ipc_flash_boot_set_capabilities(struct iosm_devlink *ipc_devlink,
+ u8 *mdm_rsp)
+{
+ ipc_devlink->ebl_ctx.ebl_sw_info_version =
+ ipc_devlink->ebl_ctx.m_ebl_resp[EBL_RSP_SW_INFO_VER];
+ ipc_devlink->ebl_ctx.m_ebl_resp[EBL_SKIP_ERASE] = IOSM_CAP_NOT_ENHANCED;
+ ipc_devlink->ebl_ctx.m_ebl_resp[EBL_SKIP_CRC] = IOSM_CAP_NOT_ENHANCED;
+
+ if (ipc_devlink->ebl_ctx.m_ebl_resp[EBL_CAPS_FLAG] &
+ IOSM_CAP_USE_EXT_CAP) {
+ if (ipc_devlink->param.erase_full_flash)
+ ipc_devlink->ebl_ctx.m_ebl_resp[EBL_OOS_CONFIG] &=
+ ~((u8)IOSM_EXT_CAP_ERASE_ALL);
+ else
+ ipc_devlink->ebl_ctx.m_ebl_resp[EBL_OOS_CONFIG] &=
+ ~((u8)IOSM_EXT_CAP_COMMIT_ALL);
+ ipc_devlink->ebl_ctx.m_ebl_resp[EBL_EXT_CAPS_HANDLED] =
+ IOSM_CAP_USE_EXT_CAP;
+ }
+
+ /* Write back the EBL capability to modem
+ * Request Set Protcnf command
+ */
+ return ipc_flash_send_receive(ipc_devlink, FLASH_SET_PROT_CONF,
+ ipc_devlink->ebl_ctx.m_ebl_resp,
+ IOSM_EBL_RSP_SIZE, mdm_rsp);
+}
+
+/* Read the SWID type and SWID value from the EBL */
+int ipc_flash_read_swid(struct iosm_devlink *ipc_devlink, u8 *mdm_rsp)
+{
+ struct iosm_flash_msg_control cmd_msg;
+ struct iosm_swid_table *swid;
+ char ebl_swid[IOSM_SWID_STR];
+ int ret;
+
+ if (ipc_devlink->ebl_ctx.ebl_sw_info_version !=
+ IOSM_EXT_CAP_SWID_OOS_PACK)
+ return -EINVAL;
+
+ cmd_msg.action = cpu_to_le32(FLASH_OOSC_ACTION_READ);
+ cmd_msg.type = cpu_to_le32(FLASH_OOSC_TYPE_SWID_TABLE);
+ cmd_msg.length = cpu_to_le32(IOSM_MSG_LEN_ARG);
+ cmd_msg.arguments = cpu_to_le32(IOSM_MSG_LEN_ARG);
+
+ ret = ipc_flash_send_receive(ipc_devlink, FLASH_OOS_CONTROL,
+ (u8 *)&cmd_msg, IOSM_MDM_SEND_16, mdm_rsp);
+ if (ret)
+ goto ipc_swid_err;
+
+ cmd_msg.action = cpu_to_le32(*((u32 *)mdm_rsp));
+
+ ret = ipc_flash_send_receive(ipc_devlink, FLASH_OOS_DATA_READ,
+ (u8 *)&cmd_msg, IOSM_MDM_SEND_4, mdm_rsp);
+ if (ret)
+ goto ipc_swid_err;
+
+ swid = (struct iosm_swid_table *)mdm_rsp;
+ dev_dbg(ipc_devlink->dev, "SWID %x RF_ENGINE_ID %x", swid->sw_id_val,
+ swid->rf_engine_id_val);
+
+ snprintf(ebl_swid, sizeof(ebl_swid), "SWID: %x, RF_ENGINE_ID: %x",
+ swid->sw_id_val, swid->rf_engine_id_val);
+
+ devlink_flash_update_status_notify(ipc_devlink->devlink_ctx, ebl_swid,
+ NULL, 0, 0);
+ipc_swid_err:
+ return ret;
+}
+
+/* Function to check if full erase or conditional erase was successful */
+static int ipc_flash_erase_check(struct iosm_devlink *ipc_devlink, u8 *mdm_rsp)
+{
+ int ret, count = 0;
+ u16 mdm_rsp_data;
+
+ /* Request Flash Erase Check */
+ do {
+ mdm_rsp_data = IOSM_MDM_SEND_DATA;
+ ret = ipc_flash_send_receive(ipc_devlink, FLASH_ERASE_CHECK,
+ (u8 *)&mdm_rsp_data,
+ IOSM_MDM_SEND_2, mdm_rsp);
+ if (ret)
+ goto ipc_erase_chk_err;
+
+ mdm_rsp_data = *((u16 *)mdm_rsp);
+ if (mdm_rsp_data > IOSM_MDM_ERASE_RSP) {
+ dev_err(ipc_devlink->dev,
+ "Flash Erase Check resp wrong 0x%04X",
+ mdm_rsp_data);
+ ret = -EINVAL;
+ goto ipc_erase_chk_err;
+ }
+ count++;
+ msleep(IOSM_FLASH_ERASE_CHECK_INTERVAL);
+ } while ((mdm_rsp_data != IOSM_MDM_ERASE_RSP) &&
+ (count < (IOSM_FLASH_ERASE_CHECK_TIMEOUT /
+ IOSM_FLASH_ERASE_CHECK_INTERVAL)));
+
+ if (mdm_rsp_data != IOSM_MDM_ERASE_RSP) {
+ dev_err(ipc_devlink->dev, "Modem erase check timeout failure!");
+ ret = -ETIMEDOUT;
+ }
+
+ipc_erase_chk_err:
+ return ret;
+}
+
+/* Full erase function which will erase the nand flash through EBL command */
+static int ipc_flash_full_erase(struct iosm_devlink *ipc_devlink, u8 *mdm_rsp)
+{
+ u32 erase_address = IOSM_ERASE_START_ADDR;
+ struct iosm_flash_msg_control cmd_msg;
+ u32 erase_length = IOSM_ERASE_LEN;
+ int ret;
+
+ dev_dbg(ipc_devlink->dev, "Erase full nand flash");
+ cmd_msg.action = cpu_to_le32(FLASH_OOSC_ACTION_ERASE);
+ cmd_msg.type = cpu_to_le32(FLASH_OOSC_TYPE_ALL_FLASH);
+ cmd_msg.length = cpu_to_le32(erase_length);
+ cmd_msg.arguments = cpu_to_le32(erase_address);
+
+ ret = ipc_flash_send_receive(ipc_devlink, FLASH_OOS_CONTROL,
+ (unsigned char *)&cmd_msg,
+ IOSM_MDM_SEND_16, mdm_rsp);
+ if (ret)
+ goto ipc_flash_erase_err;
+
+ ipc_devlink->param.erase_full_flash_done = IOSM_SET_FLAG;
+ ret = ipc_flash_erase_check(ipc_devlink, mdm_rsp);
+
+ipc_flash_erase_err:
+ return ret;
+}
+
+/* Logic for flashing all the Loadmaps available for individual fls file */
+static int ipc_flash_download_region(struct iosm_devlink *ipc_devlink,
+ const struct firmware *fw, u8 *mdm_rsp)
+{
+ u32 raw_len, rest_len = fw->size - IOSM_DEVLINK_HDR_SIZE;
+ struct iosm_devlink_image *fls_data;
+ __le32 reg_info[2]; /* 0th position region address, 1st position size */
+ u32 nand_address;
+ char *file_ptr;
+ int ret;
+
+ fls_data = (struct iosm_devlink_image *)fw->data;
+ file_ptr = (void *)(fls_data + 1);
+ nand_address = le32_to_cpu(fls_data->region_address);
+ reg_info[0] = cpu_to_le32(nand_address);
+
+ if (!ipc_devlink->param.erase_full_flash_done) {
+ reg_info[1] = cpu_to_le32(nand_address + rest_len - 2);
+ ret = ipc_flash_send_receive(ipc_devlink, FLASH_ERASE_START,
+ (u8 *)reg_info, IOSM_MDM_SEND_8,
+ mdm_rsp);
+ if (ret)
+ goto dl_region_fail;
+
+ ret = ipc_flash_erase_check(ipc_devlink, mdm_rsp);
+ if (ret)
+ goto dl_region_fail;
+ }
+
+ /* Request Flash Set Address */
+ ret = ipc_flash_send_receive(ipc_devlink, FLASH_SET_ADDRESS,
+ (u8 *)reg_info, IOSM_MDM_SEND_4, mdm_rsp);
+ if (ret)
+ goto dl_region_fail;
+
+ /* Request Flash Write Raw Image */
+ ret = ipc_flash_send_data(ipc_devlink, IOSM_EBL_DW_PACK_SIZE,
+ FLASH_WRITE_IMAGE_RAW, (u8 *)&rest_len,
+ IOSM_MDM_SEND_4);
+ if (ret)
+ goto dl_region_fail;
+
+ do {
+ raw_len = (rest_len > IOSM_FLS_BUF_SIZE) ? IOSM_FLS_BUF_SIZE :
+ rest_len;
+ ret = ipc_imem_sys_devlink_write(ipc_devlink, file_ptr,
+ raw_len);
+ if (ret) {
+ dev_err(ipc_devlink->dev, "Image write failed");
+ goto dl_region_fail;
+ }
+ file_ptr += raw_len;
+ rest_len -= raw_len;
+ } while (rest_len);
+
+ ret = ipc_flash_receive_data(ipc_devlink, IOSM_EBL_DW_PAYL_SIZE,
+ mdm_rsp);
+
+dl_region_fail:
+ return ret;
+}
+
+/**
+ * ipc_flash_send_fls - Inject Modem subsystem fls file to device
+ * @ipc_devlink: Pointer to devlink structure
+ * @fw: FW image
+ * @mdm_rsp: Pointer to modem response buffer
+ *
+ * Returns: 0 on success and failure value on error
+ */
+int ipc_flash_send_fls(struct iosm_devlink *ipc_devlink,
+ const struct firmware *fw, u8 *mdm_rsp)
+{
+ u32 fw_size = fw->size - IOSM_DEVLINK_HDR_SIZE;
+ struct iosm_devlink_image *fls_data;
+ u16 flash_cmd;
+ int ret;
+
+ fls_data = (struct iosm_devlink_image *)fw->data;
+ if (ipc_devlink->param.erase_full_flash) {
+ ipc_devlink->param.erase_full_flash = false;
+ ret = ipc_flash_full_erase(ipc_devlink, mdm_rsp);
+ if (ret)
+ goto ipc_flash_err;
+ }
+
+ /* Request Sec Start */
+ if (!fls_data->download_region) {
+ ret = ipc_flash_send_receive(ipc_devlink, FLASH_SEC_START,
+ (u8 *)fw->data +
+ IOSM_DEVLINK_HDR_SIZE, fw_size,
+ mdm_rsp);
+ if (ret)
+ goto ipc_flash_err;
+ } else {
+ /* Download regions */
+ ret = ipc_flash_download_region(ipc_devlink, fw, mdm_rsp);
+ if (ret)
+ goto ipc_flash_err;
+
+ if (fls_data->last_region) {
+ /* Request Sec End */
+ flash_cmd = IOSM_MDM_SEND_DATA;
+ ret = ipc_flash_send_receive(ipc_devlink, FLASH_SEC_END,
+ (u8 *)&flash_cmd,
+ IOSM_MDM_SEND_2, mdm_rsp);
+ }
+ }
+
+ipc_flash_err:
+ return ret;
+}
+
+/**
+ * ipc_flash_boot_psi - Inject PSI image
+ * @ipc_devlink: Pointer to devlink structure
+ * @fw: FW image
+ *
+ * Returns: 0 on success and failure value on error
+ */
+int ipc_flash_boot_psi(struct iosm_devlink *ipc_devlink,
+ const struct firmware *fw)
+{
+ u32 bytes_read, psi_size = fw->size - IOSM_DEVLINK_HDR_SIZE;
+ u8 psi_ack_byte[IOSM_PSI_ACK], read_data[2];
+ u8 *psi_code;
+ int ret;
+
+ dev_dbg(ipc_devlink->dev, "Boot transfer PSI");
+ psi_code = kmemdup(fw->data + IOSM_DEVLINK_HDR_SIZE, psi_size,
+ GFP_KERNEL);
+ if (!psi_code)
+ return -ENOMEM;
+
+ ret = ipc_imem_sys_devlink_write(ipc_devlink, psi_code, psi_size);
+ if (ret) {
+ dev_err(ipc_devlink->dev, "RPSI Image write failed");
+ goto ipc_flash_psi_free;
+ }
+
+ ret = ipc_imem_sys_devlink_read(ipc_devlink, read_data,
+ IOSM_LER_ACK_SIZE, &bytes_read);
+ if (ret) {
+ dev_err(ipc_devlink->dev, "ipc_devlink_sio_read ACK failed");
+ goto ipc_flash_psi_free;
+ }
+
+ if (bytes_read != IOSM_LER_ACK_SIZE) {
+ ret = -EINVAL;
+ goto ipc_flash_psi_free;
+ }
+
+ snprintf(psi_ack_byte, sizeof(psi_ack_byte), "%x%x", read_data[0],
+ read_data[1]);
+ devlink_flash_update_status_notify(ipc_devlink->devlink_ctx,
+ psi_ack_byte, "PSI ACK", 0, 0);
+
+ if (read_data[0] == 0x00 && read_data[1] == 0xCD) {
+ dev_dbg(ipc_devlink->dev, "Coredump detected");
+ ret = ipc_coredump_get_list(ipc_devlink,
+ rpsi_cmd_coredump_start);
+ if (ret)
+ dev_err(ipc_devlink->dev, "Failed to get cd list");
+ }
+
+ipc_flash_psi_free:
+ kfree(psi_code);
+ return ret;
+}
+
+/**
+ * ipc_flash_boot_ebl - Inject EBL image
+ * @ipc_devlink: Pointer to devlink structure
+ * @fw: FW image
+ *
+ * Returns: 0 on success and failure value on error
+ */
+int ipc_flash_boot_ebl(struct iosm_devlink *ipc_devlink,
+ const struct firmware *fw)
+{
+ u32 ebl_size = fw->size - IOSM_DEVLINK_HDR_SIZE;
+ u8 read_data[2];
+ u32 bytes_read;
+ int ret;
+
+ if (ipc_mmio_get_exec_stage(ipc_devlink->pcie->imem->mmio) !=
+ IPC_MEM_EXEC_STAGE_PSI) {
+ devlink_flash_update_status_notify(ipc_devlink->devlink_ctx,
+ "Invalid execution stage",
+ NULL, 0, 0);
+ return -EINVAL;
+ }
+
+ dev_dbg(ipc_devlink->dev, "Boot transfer EBL");
+ ret = ipc_devlink_send_cmd(ipc_devlink, rpsi_cmd_code_ebl,
+ IOSM_RPSI_LOAD_SIZE);
+ if (ret) {
+ dev_err(ipc_devlink->dev, "Sending rpsi_cmd_code_ebl failed");
+ goto ipc_flash_ebl_err;
+ }
+
+ ret = ipc_imem_sys_devlink_read(ipc_devlink, read_data, IOSM_READ_SIZE,
+ &bytes_read);
+ if (ret) {
+ dev_err(ipc_devlink->dev, "rpsi_cmd_code_ebl read failed");
+ goto ipc_flash_ebl_err;
+ }
+
+ if (bytes_read != IOSM_READ_SIZE) {
+ ret = -EINVAL;
+ goto ipc_flash_ebl_err;
+ }
+
+ ret = ipc_imem_sys_devlink_write(ipc_devlink, (u8 *)&ebl_size,
+ sizeof(ebl_size));
+ if (ret) {
+ dev_err(ipc_devlink->dev, "EBL length write failed");
+ goto ipc_flash_ebl_err;
+ }
+
+ ret = ipc_imem_sys_devlink_read(ipc_devlink, read_data, IOSM_READ_SIZE,
+ &bytes_read);
+ if (ret) {
+ dev_err(ipc_devlink->dev, "EBL read failed");
+ goto ipc_flash_ebl_err;
+ }
+
+ if (bytes_read != IOSM_READ_SIZE) {
+ ret = -EINVAL;
+ goto ipc_flash_ebl_err;
+ }
+
+ ret = ipc_imem_sys_devlink_write(ipc_devlink,
+ (u8 *)fw->data + IOSM_DEVLINK_HDR_SIZE,
+ ebl_size);
+ if (ret) {
+ dev_err(ipc_devlink->dev, "EBL data transfer failed");
+ goto ipc_flash_ebl_err;
+ }
+
+ ret = ipc_imem_sys_devlink_read(ipc_devlink, read_data, IOSM_READ_SIZE,
+ &bytes_read);
+ if (ret) {
+ dev_err(ipc_devlink->dev, "EBL read failed");
+ goto ipc_flash_ebl_err;
+ }
+
+ if (bytes_read != IOSM_READ_SIZE) {
+ ret = -EINVAL;
+ goto ipc_flash_ebl_err;
+ }
+
+ ret = ipc_imem_sys_devlink_read(ipc_devlink,
+ ipc_devlink->ebl_ctx.m_ebl_resp,
+ IOSM_EBL_RSP_SIZE, &bytes_read);
+ if (ret) {
+ dev_err(ipc_devlink->dev, "EBL response read failed");
+ goto ipc_flash_ebl_err;
+ }
+
+ if (bytes_read != IOSM_EBL_RSP_SIZE)
+ ret = -EINVAL;
+
+ipc_flash_ebl_err:
+ return ret;
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_flash.h b/drivers/net/wwan/iosm/iosm_ipc_flash.h
new file mode 100644
index 0000000000..132d59d60f
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_flash.h
@@ -0,0 +1,229 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-2021 Intel Corporation.
+ */
+
+#ifndef _IOSM_IPC_FLASH_H
+#define _IOSM_IPC_FLASH_H
+
+/* Buffer size used to read the fls image */
+#define IOSM_FLS_BUF_SIZE 0x00100000
+/* Full erase start address */
+#define IOSM_ERASE_START_ADDR 0x00000000
+/* Erase length for NAND flash */
+#define IOSM_ERASE_LEN 0xFFFFFFFF
+/* EBL response Header size */
+#define IOSM_EBL_HEAD_SIZE 8
+/* EBL payload size */
+#define IOSM_EBL_W_PAYL_SIZE 2048
+/* Total EBL pack size */
+#define IOSM_EBL_W_PACK_SIZE (IOSM_EBL_HEAD_SIZE + IOSM_EBL_W_PAYL_SIZE)
+/* EBL payload size */
+#define IOSM_EBL_DW_PAYL_SIZE 16384
+/* Total EBL pack size */
+#define IOSM_EBL_DW_PACK_SIZE (IOSM_EBL_HEAD_SIZE + IOSM_EBL_DW_PAYL_SIZE)
+/* EBL name size */
+#define IOSM_EBL_NAME 32
+/* Maximum supported error types */
+#define IOSM_MAX_ERRORS 8
+/* Read size for RPSI/EBL response */
+#define IOSM_READ_SIZE 2
+/* Link establishment response ack size */
+#define IOSM_LER_ACK_SIZE 2
+/* PSI ACK len */
+#define IOSM_PSI_ACK 8
+/* SWID capability for packed swid type */
+#define IOSM_EXT_CAP_SWID_OOS_PACK 0x02
+/* EBL error response buffer */
+#define IOSM_EBL_RSP_BUFF 0x0041
+/* SWID string length */
+#define IOSM_SWID_STR 64
+/* Load EBL command size */
+#define IOSM_RPSI_LOAD_SIZE 0
+/* EBL payload checksum */
+#define IOSM_EBL_CKSM 0x0000FFFF
+/* SWID msg len and argument */
+#define IOSM_MSG_LEN_ARG 0
+/* Data to be sent to modem */
+#define IOSM_MDM_SEND_DATA 0x0000
+/* Data received from modem as part of erase check */
+#define IOSM_MDM_ERASE_RSP 0x0001
+/* Bit shift to calculate Checksum */
+#define IOSM_EBL_PAYL_SHIFT 16
+/* Flag To be set */
+#define IOSM_SET_FLAG 1
+/* Set flash erase check timeout to 100 msec */
+#define IOSM_FLASH_ERASE_CHECK_TIMEOUT 100
+/* Set flash erase check interval to 20 msec */
+#define IOSM_FLASH_ERASE_CHECK_INTERVAL 20
+/* Link establishment response ack size */
+#define IOSM_LER_RSP_SIZE 60
+
+/**
+ * enum iosm_flash_package_type - Enum for the flashing operations
+ * @FLASH_SET_PROT_CONF: Write EBL capabilities
+ * @FLASH_SEC_START: Start writing the secpack
+ * @FLASH_SEC_END: Validate secpack end
+ * @FLASH_SET_ADDRESS: Set the address for flashing
+ * @FLASH_ERASE_START: Start erase before flashing
+ * @FLASH_ERASE_CHECK: Validate the erase functionality
+ * @FLASH_OOS_CONTROL: Retrieve data based on oos actions
+ * @FLASH_OOS_DATA_READ: Read data from EBL
+ * @FLASH_WRITE_IMAGE_RAW: Write the raw image to flash
+ */
+enum iosm_flash_package_type {
+ FLASH_SET_PROT_CONF = 0x0086,
+ FLASH_SEC_START = 0x0204,
+ FLASH_SEC_END,
+ FLASH_SET_ADDRESS = 0x0802,
+ FLASH_ERASE_START = 0x0805,
+ FLASH_ERASE_CHECK,
+ FLASH_OOS_CONTROL = 0x080C,
+ FLASH_OOS_DATA_READ = 0x080E,
+ FLASH_WRITE_IMAGE_RAW,
+};
+
+/**
+ * enum iosm_out_of_session_action - Actions possible over the
+ * OutOfSession command interface
+ * @FLASH_OOSC_ACTION_READ: Read data according to its type
+ * @FLASH_OOSC_ACTION_ERASE: Erase data according to its type
+ */
+enum iosm_out_of_session_action {
+ FLASH_OOSC_ACTION_READ = 2,
+ FLASH_OOSC_ACTION_ERASE = 3,
+};
+
+/**
+ * enum iosm_out_of_session_type - Data types that can be handled over the
+ * Out Of Session command Interface
+ * @FLASH_OOSC_TYPE_ALL_FLASH: The whole flash area
+ * @FLASH_OOSC_TYPE_SWID_TABLE: Read the swid table from the target
+ */
+enum iosm_out_of_session_type {
+ FLASH_OOSC_TYPE_ALL_FLASH = 8,
+ FLASH_OOSC_TYPE_SWID_TABLE = 16,
+};
+
+/**
+ * enum iosm_ebl_caps - EBL capability settings
+ * @IOSM_CAP_NOT_ENHANCED: If capability not supported
+ * @IOSM_CAP_USE_EXT_CAP: To be set if extended capability is set
+ * @IOSM_EXT_CAP_ERASE_ALL: Set Erase all capability
+ * @IOSM_EXT_CAP_COMMIT_ALL: Set the commit all capability
+ */
+enum iosm_ebl_caps {
+ IOSM_CAP_NOT_ENHANCED = 0x00,
+ IOSM_CAP_USE_EXT_CAP = 0x01,
+ IOSM_EXT_CAP_ERASE_ALL = 0x08,
+ IOSM_EXT_CAP_COMMIT_ALL = 0x20,
+};
+
+/**
+ * enum iosm_ebl_rsp - EBL response field
+ * @EBL_CAPS_FLAG: EBL capability flag
+ * @EBL_SKIP_ERASE: EBL skip erase flag
+ * @EBL_SKIP_CRC: EBL skip wr_pack crc
+ * @EBL_EXT_CAPS_HANDLED: EBL extended capability handled flag
+ * @EBL_OOS_CONFIG: EBL oos configuration
+ * @EBL_RSP_SW_INFO_VER: EBL SW info version
+ */
+enum iosm_ebl_rsp {
+ EBL_CAPS_FLAG = 50,
+ EBL_SKIP_ERASE = 54,
+ EBL_SKIP_CRC = 55,
+ EBL_EXT_CAPS_HANDLED = 57,
+ EBL_OOS_CONFIG = 64,
+ EBL_RSP_SW_INFO_VER = 70,
+};
+
+/**
+ * enum iosm_mdm_send_recv_data - Data to send to modem
+ * @IOSM_MDM_SEND_2: Send 2 bytes of payload
+ * @IOSM_MDM_SEND_4: Send 4 bytes of payload
+ * @IOSM_MDM_SEND_8: Send 8 bytes of payload
+ * @IOSM_MDM_SEND_16: Send 16 bytes of payload
+ */
+enum iosm_mdm_send_recv_data {
+ IOSM_MDM_SEND_2 = 2,
+ IOSM_MDM_SEND_4 = 4,
+ IOSM_MDM_SEND_8 = 8,
+ IOSM_MDM_SEND_16 = 16,
+};
+
+/**
+ * struct iosm_ebl_one_error - Structure containing error details
+ * @error_class: Error type- standard, security and text error
+ * @error_code: Specific error from error type
+ */
+struct iosm_ebl_one_error {
+ u16 error_class;
+ u16 error_code;
+};
+
+/**
+ * struct iosm_ebl_error- Structure with max error type supported
+ * @error: Array of one_error structure with max errors
+ */
+struct iosm_ebl_error {
+ struct iosm_ebl_one_error error[IOSM_MAX_ERRORS];
+};
+
+/**
+ * struct iosm_swid_table - SWID table data for modem
+ * @number_of_data_sets: Number of swid types
+ * @sw_id_type: SWID type - SWID
+ * @sw_id_val: SWID value
+ * @rf_engine_id_type: RF engine ID type - RF_ENGINE_ID
+ * @rf_engine_id_val: RF engine ID value
+ */
+struct iosm_swid_table {
+ u32 number_of_data_sets;
+ char sw_id_type[IOSM_EBL_NAME];
+ u32 sw_id_val;
+ char rf_engine_id_type[IOSM_EBL_NAME];
+ u32 rf_engine_id_val;
+};
+
+/**
+ * struct iosm_flash_msg_control - Data sent to modem
+ * @action: Action to be performed
+ * @type: Type of action
+ * @length: Length of the action
+ * @arguments: Argument value sent to modem
+ */
+struct iosm_flash_msg_control {
+ __le32 action;
+ __le32 type;
+ __le32 length;
+ __le32 arguments;
+};
+
+/**
+ * struct iosm_flash_data - Header Data to be sent to modem
+ * @checksum: Checksum value calculated for the payload data
+ * @pack_id: Flash Action type
+ * @msg_length: Payload length
+ */
+struct iosm_flash_data {
+ __le16 checksum;
+ __le16 pack_id;
+ __le32 msg_length;
+};
+
+int ipc_flash_boot_psi(struct iosm_devlink *ipc_devlink,
+ const struct firmware *fw);
+
+int ipc_flash_boot_ebl(struct iosm_devlink *ipc_devlink,
+ const struct firmware *fw);
+
+int ipc_flash_boot_set_capabilities(struct iosm_devlink *ipc_devlink,
+ u8 *mdm_rsp);
+
+int ipc_flash_link_establish(struct iosm_imem *ipc_imem);
+
+int ipc_flash_read_swid(struct iosm_devlink *ipc_devlink, u8 *mdm_rsp);
+
+int ipc_flash_send_fls(struct iosm_devlink *ipc_devlink,
+ const struct firmware *fw, u8 *mdm_rsp);
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.c b/drivers/net/wwan/iosm/iosm_ipc_imem.c
new file mode 100644
index 0000000000..829515a601
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem.c
@@ -0,0 +1,1503 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#include <linux/delay.h>
+
+#include "iosm_ipc_chnl_cfg.h"
+#include "iosm_ipc_devlink.h"
+#include "iosm_ipc_flash.h"
+#include "iosm_ipc_imem.h"
+#include "iosm_ipc_port.h"
+#include "iosm_ipc_trace.h"
+#include "iosm_ipc_debugfs.h"
+
+/* Check the wwan ips if it is valid with Channel as input. */
+static int ipc_imem_check_wwan_ips(struct ipc_mem_channel *chnl)
+{
+ if (chnl)
+ return chnl->ctype == IPC_CTYPE_WWAN &&
+ chnl->if_id == IPC_MEM_MUX_IP_CH_IF_ID;
+ return false;
+}
+
+static int ipc_imem_msg_send_device_sleep(struct iosm_imem *ipc_imem, u32 state)
+{
+ union ipc_msg_prep_args prep_args = {
+ .sleep.target = 1,
+ .sleep.state = state,
+ };
+
+ ipc_imem->device_sleep = state;
+
+ return ipc_protocol_tq_msg_send(ipc_imem->ipc_protocol,
+ IPC_MSG_PREP_SLEEP, &prep_args, NULL);
+}
+
+static bool ipc_imem_dl_skb_alloc(struct iosm_imem *ipc_imem,
+ struct ipc_pipe *pipe)
+{
+ /* limit max. nr of entries */
+ if (pipe->nr_of_queued_entries >= pipe->max_nr_of_queued_entries)
+ return false;
+
+ return ipc_protocol_dl_td_prepare(ipc_imem->ipc_protocol, pipe);
+}
+
+/* This timer handler will retry DL buff allocation if a pipe has no free buf
+ * and gives doorbell if TD is available
+ */
+static int ipc_imem_tq_td_alloc_timer(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size)
+{
+ bool new_buffers_available = false;
+ bool retry_allocation = false;
+ int i;
+
+ for (i = 0; i < IPC_MEM_MAX_CHANNELS; i++) {
+ struct ipc_pipe *pipe = &ipc_imem->channels[i].dl_pipe;
+
+ if (!pipe->is_open || pipe->nr_of_queued_entries > 0)
+ continue;
+
+ while (ipc_imem_dl_skb_alloc(ipc_imem, pipe))
+ new_buffers_available = true;
+
+ if (pipe->nr_of_queued_entries == 0)
+ retry_allocation = true;
+ }
+
+ if (new_buffers_available)
+ ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
+ IPC_HP_DL_PROCESS);
+
+ if (retry_allocation) {
+ ipc_imem->hrtimer_period =
+ ktime_set(0, IPC_TD_ALLOC_TIMER_PERIOD_MS * 1000 * 1000ULL);
+ if (!hrtimer_active(&ipc_imem->td_alloc_timer))
+ hrtimer_start(&ipc_imem->td_alloc_timer,
+ ipc_imem->hrtimer_period,
+ HRTIMER_MODE_REL);
+ }
+ return 0;
+}
+
+static enum hrtimer_restart ipc_imem_td_alloc_timer_cb(struct hrtimer *hr_timer)
+{
+ struct iosm_imem *ipc_imem =
+ container_of(hr_timer, struct iosm_imem, td_alloc_timer);
+ /* Post an async tasklet event to trigger HP update Doorbell */
+ ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_td_alloc_timer, 0, NULL,
+ 0, false);
+ return HRTIMER_NORESTART;
+}
+
+/* Fast update timer tasklet handler to trigger HP update */
+static int ipc_imem_tq_fast_update_timer_cb(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size)
+{
+ ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
+ IPC_HP_FAST_TD_UPD_TMR);
+
+ return 0;
+}
+
+static enum hrtimer_restart
+ipc_imem_fast_update_timer_cb(struct hrtimer *hr_timer)
+{
+ struct iosm_imem *ipc_imem =
+ container_of(hr_timer, struct iosm_imem, fast_update_timer);
+ /* Post an async tasklet event to trigger HP update Doorbell */
+ ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_fast_update_timer_cb, 0,
+ NULL, 0, false);
+ return HRTIMER_NORESTART;
+}
+
+static int ipc_imem_tq_adb_timer_cb(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size)
+{
+ ipc_mux_ul_adb_finish(ipc_imem->mux);
+ return 0;
+}
+
+static enum hrtimer_restart
+ipc_imem_adb_timer_cb(struct hrtimer *hr_timer)
+{
+ struct iosm_imem *ipc_imem =
+ container_of(hr_timer, struct iosm_imem, adb_timer);
+
+ ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_adb_timer_cb, 0,
+ NULL, 0, false);
+ return HRTIMER_NORESTART;
+}
+
+static int ipc_imem_setup_cp_mux_cap_init(struct iosm_imem *ipc_imem,
+ struct ipc_mux_config *cfg)
+{
+ ipc_mmio_update_cp_capability(ipc_imem->mmio);
+
+ if (ipc_imem->mmio->mux_protocol == MUX_UNKNOWN) {
+ dev_err(ipc_imem->dev, "Failed to get Mux capability.");
+ return -EINVAL;
+ }
+
+ cfg->protocol = ipc_imem->mmio->mux_protocol;
+
+ cfg->ul_flow = (ipc_imem->mmio->has_ul_flow_credit == 1) ?
+ MUX_UL_ON_CREDITS :
+ MUX_UL;
+
+ /* The instance ID is same as channel ID because this is been reused
+ * for channel alloc function.
+ */
+ cfg->instance_id = IPC_MEM_MUX_IP_CH_IF_ID;
+
+ return 0;
+}
+
+void ipc_imem_msg_send_feature_set(struct iosm_imem *ipc_imem,
+ unsigned int reset_enable, bool atomic_ctx)
+{
+ union ipc_msg_prep_args prep_args = { .feature_set.reset_enable =
+ reset_enable };
+
+ if (atomic_ctx)
+ ipc_protocol_tq_msg_send(ipc_imem->ipc_protocol,
+ IPC_MSG_PREP_FEATURE_SET, &prep_args,
+ NULL);
+ else
+ ipc_protocol_msg_send(ipc_imem->ipc_protocol,
+ IPC_MSG_PREP_FEATURE_SET, &prep_args);
+}
+
+/**
+ * ipc_imem_td_update_timer_start - Starts the TD Update Timer if not started.
+ * @ipc_imem: Pointer to imem data-struct
+ */
+void ipc_imem_td_update_timer_start(struct iosm_imem *ipc_imem)
+{
+ /* Use the TD update timer only in the runtime phase */
+ if (!ipc_imem->enter_runtime || ipc_imem->td_update_timer_suspended) {
+ /* trigger the doorbell irq on CP directly. */
+ ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
+ IPC_HP_TD_UPD_TMR_START);
+ return;
+ }
+
+ if (!hrtimer_active(&ipc_imem->tdupdate_timer)) {
+ ipc_imem->hrtimer_period =
+ ktime_set(0, TD_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
+ if (!hrtimer_active(&ipc_imem->tdupdate_timer))
+ hrtimer_start(&ipc_imem->tdupdate_timer,
+ ipc_imem->hrtimer_period,
+ HRTIMER_MODE_REL);
+ }
+}
+
+void ipc_imem_hrtimer_stop(struct hrtimer *hr_timer)
+{
+ if (hrtimer_active(hr_timer))
+ hrtimer_cancel(hr_timer);
+}
+
+/**
+ * ipc_imem_adb_timer_start - Starts the adb Timer if not starting.
+ * @ipc_imem: Pointer to imem data-struct
+ */
+void ipc_imem_adb_timer_start(struct iosm_imem *ipc_imem)
+{
+ if (!hrtimer_active(&ipc_imem->adb_timer)) {
+ ipc_imem->hrtimer_period =
+ ktime_set(0, IOSM_AGGR_MUX_ADB_FINISH_TIMEOUT_NSEC);
+ hrtimer_start(&ipc_imem->adb_timer,
+ ipc_imem->hrtimer_period,
+ HRTIMER_MODE_REL);
+ }
+}
+
+bool ipc_imem_ul_write_td(struct iosm_imem *ipc_imem)
+{
+ struct ipc_mem_channel *channel;
+ bool hpda_ctrl_pending = false;
+ struct sk_buff_head *ul_list;
+ bool hpda_pending = false;
+ struct ipc_pipe *pipe;
+ int i;
+
+ /* Analyze the uplink pipe of all active channels. */
+ for (i = 0; i < ipc_imem->nr_of_channels; i++) {
+ channel = &ipc_imem->channels[i];
+
+ if (channel->state != IMEM_CHANNEL_ACTIVE)
+ continue;
+
+ pipe = &channel->ul_pipe;
+
+ /* Get the reference to the skbuf accumulator list. */
+ ul_list = &channel->ul_list;
+
+ /* Fill the transfer descriptor with the uplink buffer info. */
+ if (!ipc_imem_check_wwan_ips(channel)) {
+ hpda_ctrl_pending |=
+ ipc_protocol_ul_td_send(ipc_imem->ipc_protocol,
+ pipe, ul_list);
+ } else {
+ hpda_pending |=
+ ipc_protocol_ul_td_send(ipc_imem->ipc_protocol,
+ pipe, ul_list);
+ }
+ }
+
+ /* forced HP update needed for non data channels */
+ if (hpda_ctrl_pending) {
+ hpda_pending = false;
+ ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
+ IPC_HP_UL_WRITE_TD);
+ }
+
+ return hpda_pending;
+}
+
+void ipc_imem_ipc_init_check(struct iosm_imem *ipc_imem)
+{
+ int timeout = IPC_MODEM_BOOT_TIMEOUT;
+
+ ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_INIT;
+
+ /* Trigger the CP interrupt to enter the init state. */
+ ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
+ IPC_MEM_DEVICE_IPC_INIT);
+ /* Wait for the CP update. */
+ do {
+ if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
+ ipc_imem->ipc_requested_state) {
+ /* Prepare the MMIO space */
+ ipc_mmio_config(ipc_imem->mmio);
+
+ /* Trigger the CP irq to enter the running state. */
+ ipc_imem->ipc_requested_state =
+ IPC_MEM_DEVICE_IPC_RUNNING;
+ ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
+ IPC_MEM_DEVICE_IPC_RUNNING);
+
+ return;
+ }
+ msleep(20);
+ } while (--timeout);
+
+ /* timeout */
+ dev_err(ipc_imem->dev, "%s: ipc_status(%d) ne. IPC_MEM_DEVICE_IPC_INIT",
+ ipc_imem_phase_get_string(ipc_imem->phase),
+ ipc_mmio_get_ipc_state(ipc_imem->mmio));
+
+ ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_TIMEOUT);
+}
+
+/* Analyze the packet type and distribute it. */
+static void ipc_imem_dl_skb_process(struct iosm_imem *ipc_imem,
+ struct ipc_pipe *pipe, struct sk_buff *skb)
+{
+ u16 port_id;
+
+ if (!skb)
+ return;
+
+ /* An AT/control or IP packet is expected. */
+ switch (pipe->channel->ctype) {
+ case IPC_CTYPE_CTRL:
+ port_id = pipe->channel->channel_id;
+ ipc_pcie_addr_unmap(ipc_imem->pcie, IPC_CB(skb)->len,
+ IPC_CB(skb)->mapping,
+ IPC_CB(skb)->direction);
+ if (port_id == IPC_MEM_CTRL_CHL_ID_7)
+ ipc_imem_sys_devlink_notify_rx(ipc_imem->ipc_devlink,
+ skb);
+ else if (ipc_is_trace_channel(ipc_imem, port_id))
+ ipc_trace_port_rx(ipc_imem, skb);
+ else
+ wwan_port_rx(ipc_imem->ipc_port[port_id]->iosm_port,
+ skb);
+ break;
+
+ case IPC_CTYPE_WWAN:
+ if (pipe->channel->if_id == IPC_MEM_MUX_IP_CH_IF_ID)
+ ipc_mux_dl_decode(ipc_imem->mux, skb);
+ break;
+ default:
+ dev_err(ipc_imem->dev, "Invalid channel type");
+ break;
+ }
+}
+
+/* Process the downlink data and pass them to the char or net layer. */
+static void ipc_imem_dl_pipe_process(struct iosm_imem *ipc_imem,
+ struct ipc_pipe *pipe)
+{
+ s32 cnt = 0, processed_td_cnt = 0;
+ struct ipc_mem_channel *channel;
+ u32 head = 0, tail = 0;
+ bool processed = false;
+ struct sk_buff *skb;
+
+ channel = pipe->channel;
+
+ ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol, pipe, &head,
+ &tail);
+ if (pipe->old_tail != tail) {
+ if (pipe->old_tail < tail)
+ cnt = tail - pipe->old_tail;
+ else
+ cnt = pipe->nr_of_entries - pipe->old_tail + tail;
+ }
+
+ processed_td_cnt = cnt;
+
+ /* Seek for pipes with pending DL data. */
+ while (cnt--) {
+ skb = ipc_protocol_dl_td_process(ipc_imem->ipc_protocol, pipe);
+
+ /* Analyze the packet type and distribute it. */
+ ipc_imem_dl_skb_process(ipc_imem, pipe, skb);
+ }
+
+ /* try to allocate new empty DL SKbs from head..tail - 1*/
+ while (ipc_imem_dl_skb_alloc(ipc_imem, pipe))
+ processed = true;
+
+ if (processed && !ipc_imem_check_wwan_ips(channel)) {
+ /* Force HP update for non IP channels */
+ ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
+ IPC_HP_DL_PROCESS);
+ processed = false;
+
+ /* If Fast Update timer is already running then stop */
+ ipc_imem_hrtimer_stop(&ipc_imem->fast_update_timer);
+ }
+
+ /* Any control channel process will get immediate HP update.
+ * Start Fast update timer only for IP channel if all the TDs were
+ * used in last process.
+ */
+ if (processed && (processed_td_cnt == pipe->nr_of_entries - 1)) {
+ ipc_imem->hrtimer_period =
+ ktime_set(0, FORCE_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
+ hrtimer_start(&ipc_imem->fast_update_timer,
+ ipc_imem->hrtimer_period, HRTIMER_MODE_REL);
+ }
+
+ if (ipc_imem->app_notify_dl_pend)
+ complete(&ipc_imem->dl_pend_sem);
+}
+
+/* process open uplink pipe */
+static void ipc_imem_ul_pipe_process(struct iosm_imem *ipc_imem,
+ struct ipc_pipe *pipe)
+{
+ struct ipc_mem_channel *channel;
+ u32 tail = 0, head = 0;
+ struct sk_buff *skb;
+ s32 cnt = 0;
+
+ channel = pipe->channel;
+
+ /* Get the internal phase. */
+ ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol, pipe, &head,
+ &tail);
+
+ if (pipe->old_tail != tail) {
+ if (pipe->old_tail < tail)
+ cnt = tail - pipe->old_tail;
+ else
+ cnt = pipe->nr_of_entries - pipe->old_tail + tail;
+ }
+
+ /* Free UL buffers. */
+ while (cnt--) {
+ skb = ipc_protocol_ul_td_process(ipc_imem->ipc_protocol, pipe);
+
+ if (!skb)
+ continue;
+
+ /* If the user app was suspended in uplink direction - blocking
+ * write, resume it.
+ */
+ if (IPC_CB(skb)->op_type == UL_USR_OP_BLOCKED)
+ complete(&channel->ul_sem);
+
+ /* Free the skbuf element. */
+ if (IPC_CB(skb)->op_type == UL_MUX_OP_ADB) {
+ if (channel->if_id == IPC_MEM_MUX_IP_CH_IF_ID)
+ ipc_mux_ul_encoded_process(ipc_imem->mux, skb);
+ else
+ dev_err(ipc_imem->dev,
+ "OP Type is UL_MUX, unknown if_id %d",
+ channel->if_id);
+ } else {
+ ipc_pcie_kfree_skb(ipc_imem->pcie, skb);
+ }
+ }
+
+ /* Trace channel stats for IP UL pipe. */
+ if (ipc_imem_check_wwan_ips(pipe->channel))
+ ipc_mux_check_n_restart_tx(ipc_imem->mux);
+
+ if (ipc_imem->app_notify_ul_pend)
+ complete(&ipc_imem->ul_pend_sem);
+}
+
+/* Executes the irq. */
+static void ipc_imem_rom_irq_exec(struct iosm_imem *ipc_imem)
+{
+ struct ipc_mem_channel *channel;
+
+ channel = ipc_imem->ipc_devlink->devlink_sio.channel;
+ ipc_imem->rom_exit_code = ipc_mmio_get_rom_exit_code(ipc_imem->mmio);
+ complete(&channel->ul_sem);
+}
+
+/* Execute the UL bundle timer actions, generating the doorbell irq. */
+static int ipc_imem_tq_td_update_timer_cb(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size)
+{
+ ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
+ IPC_HP_TD_UPD_TMR);
+ return 0;
+}
+
+/* Consider link power management in the runtime phase. */
+static void ipc_imem_slp_control_exec(struct iosm_imem *ipc_imem)
+{
+ /* link will go down, Test pending UL packets.*/
+ if (ipc_protocol_pm_dev_sleep_handle(ipc_imem->ipc_protocol) &&
+ hrtimer_active(&ipc_imem->tdupdate_timer)) {
+ /* Generate the doorbell irq. */
+ ipc_imem_tq_td_update_timer_cb(ipc_imem, 0, NULL, 0);
+ /* Stop the TD update timer. */
+ ipc_imem_hrtimer_stop(&ipc_imem->tdupdate_timer);
+ /* Stop the fast update timer. */
+ ipc_imem_hrtimer_stop(&ipc_imem->fast_update_timer);
+ }
+}
+
+/* Execute startup timer and wait for delayed start (e.g. NAND) */
+static int ipc_imem_tq_startup_timer_cb(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size)
+{
+ /* Update & check the current operation phase. */
+ if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN)
+ return -EIO;
+
+ if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
+ IPC_MEM_DEVICE_IPC_UNINIT) {
+ ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_INIT;
+
+ ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
+ IPC_MEM_DEVICE_IPC_INIT);
+
+ ipc_imem->hrtimer_period = ktime_set(0, 100 * 1000UL * 1000ULL);
+ /* reduce period to 100 ms to check for mmio init state */
+ if (!hrtimer_active(&ipc_imem->startup_timer))
+ hrtimer_start(&ipc_imem->startup_timer,
+ ipc_imem->hrtimer_period,
+ HRTIMER_MODE_REL);
+ } else if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
+ IPC_MEM_DEVICE_IPC_INIT) {
+ /* Startup complete - disable timer */
+ ipc_imem_hrtimer_stop(&ipc_imem->startup_timer);
+
+ /* Prepare the MMIO space */
+ ipc_mmio_config(ipc_imem->mmio);
+ ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_RUNNING;
+ ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
+ IPC_MEM_DEVICE_IPC_RUNNING);
+ }
+
+ return 0;
+}
+
+static enum hrtimer_restart ipc_imem_startup_timer_cb(struct hrtimer *hr_timer)
+{
+ enum hrtimer_restart result = HRTIMER_NORESTART;
+ struct iosm_imem *ipc_imem =
+ container_of(hr_timer, struct iosm_imem, startup_timer);
+
+ if (ktime_to_ns(ipc_imem->hrtimer_period)) {
+ hrtimer_forward_now(&ipc_imem->startup_timer,
+ ipc_imem->hrtimer_period);
+ result = HRTIMER_RESTART;
+ }
+
+ ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_startup_timer_cb, 0,
+ NULL, 0, false);
+ return result;
+}
+
+/* Get the CP execution stage */
+static enum ipc_mem_exec_stage
+ipc_imem_get_exec_stage_buffered(struct iosm_imem *ipc_imem)
+{
+ return (ipc_imem->phase == IPC_P_RUN &&
+ ipc_imem->ipc_status == IPC_MEM_DEVICE_IPC_RUNNING) ?
+ ipc_protocol_get_ap_exec_stage(ipc_imem->ipc_protocol) :
+ ipc_mmio_get_exec_stage(ipc_imem->mmio);
+}
+
+/* Callback to send the modem ready uevent */
+static int ipc_imem_send_mdm_rdy_cb(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size)
+{
+ enum ipc_mem_exec_stage exec_stage =
+ ipc_imem_get_exec_stage_buffered(ipc_imem);
+
+ if (exec_stage == IPC_MEM_EXEC_STAGE_RUN)
+ ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_READY);
+
+ return 0;
+}
+
+/* This function is executed in a task context via an ipc_worker object,
+ * as the creation or removal of device can't be done from tasklet.
+ */
+static void ipc_imem_run_state_worker(struct work_struct *instance)
+{
+ struct ipc_chnl_cfg chnl_cfg_port = { 0 };
+ struct ipc_mux_config mux_cfg;
+ struct iosm_imem *ipc_imem;
+ u8 ctrl_chl_idx = 0;
+ int ret;
+
+ ipc_imem = container_of(instance, struct iosm_imem, run_state_worker);
+
+ if (ipc_imem->phase != IPC_P_RUN) {
+ dev_err(ipc_imem->dev,
+ "Modem link down. Exit run state worker.");
+ goto err_out;
+ }
+
+ if (test_and_clear_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag))
+ ipc_devlink_deinit(ipc_imem->ipc_devlink);
+
+ ret = ipc_imem_setup_cp_mux_cap_init(ipc_imem, &mux_cfg);
+ if (ret < 0)
+ goto err_out;
+
+ ipc_imem->mux = ipc_mux_init(&mux_cfg, ipc_imem);
+ if (!ipc_imem->mux)
+ goto err_out;
+
+ ret = ipc_imem_wwan_channel_init(ipc_imem, mux_cfg.protocol);
+ if (ret < 0)
+ goto err_ipc_mux_deinit;
+
+ ipc_imem->mux->wwan = ipc_imem->wwan;
+
+ while (ctrl_chl_idx < IPC_MEM_MAX_CHANNELS) {
+ if (!ipc_chnl_cfg_get(&chnl_cfg_port, ctrl_chl_idx)) {
+ ipc_imem->ipc_port[ctrl_chl_idx] = NULL;
+
+ if (ipc_imem->pcie->pci->device == INTEL_CP_DEVICE_7560_ID &&
+ chnl_cfg_port.wwan_port_type == WWAN_PORT_XMMRPC) {
+ ctrl_chl_idx++;
+ continue;
+ }
+
+ if (ipc_imem->pcie->pci->device == INTEL_CP_DEVICE_7360_ID &&
+ chnl_cfg_port.wwan_port_type == WWAN_PORT_MBIM) {
+ ctrl_chl_idx++;
+ continue;
+ }
+ if (chnl_cfg_port.wwan_port_type != WWAN_PORT_UNKNOWN) {
+ ipc_imem_channel_init(ipc_imem, IPC_CTYPE_CTRL,
+ chnl_cfg_port,
+ IRQ_MOD_OFF);
+ ipc_imem->ipc_port[ctrl_chl_idx] =
+ ipc_port_init(ipc_imem, chnl_cfg_port);
+ }
+ }
+ ctrl_chl_idx++;
+ }
+
+ ipc_debugfs_init(ipc_imem);
+
+ ipc_task_queue_send_task(ipc_imem, ipc_imem_send_mdm_rdy_cb, 0, NULL, 0,
+ false);
+
+ /* Complete all memory stores before setting bit */
+ smp_mb__before_atomic();
+
+ set_bit(FULLY_FUNCTIONAL, &ipc_imem->flag);
+
+ /* Complete all memory stores after setting bit */
+ smp_mb__after_atomic();
+
+ return;
+
+err_ipc_mux_deinit:
+ ipc_mux_deinit(ipc_imem->mux);
+err_out:
+ ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY_LINK_DOWN);
+}
+
+static void ipc_imem_handle_irq(struct iosm_imem *ipc_imem, int irq)
+{
+ enum ipc_mem_device_ipc_state curr_ipc_status;
+ enum ipc_phase old_phase, phase;
+ bool retry_allocation = false;
+ bool ul_pending = false;
+ int i;
+
+ if (irq != IMEM_IRQ_DONT_CARE)
+ ipc_imem->ev_irq_pending[irq] = false;
+
+ /* Get the internal phase. */
+ old_phase = ipc_imem->phase;
+
+ if (old_phase == IPC_P_OFF_REQ) {
+ dev_dbg(ipc_imem->dev,
+ "[%s]: Ignoring MSI. Deinit sequence in progress!",
+ ipc_imem_phase_get_string(old_phase));
+ return;
+ }
+
+ /* Update the phase controlled by CP. */
+ phase = ipc_imem_phase_update(ipc_imem);
+
+ switch (phase) {
+ case IPC_P_RUN:
+ if (!ipc_imem->enter_runtime) {
+ /* Excute the transition from flash/boot to runtime. */
+ ipc_imem->enter_runtime = 1;
+
+ /* allow device to sleep, default value is
+ * IPC_HOST_SLEEP_ENTER_SLEEP
+ */
+ ipc_imem_msg_send_device_sleep(ipc_imem,
+ ipc_imem->device_sleep);
+
+ ipc_imem_msg_send_feature_set(ipc_imem,
+ IPC_MEM_INBAND_CRASH_SIG,
+ true);
+ }
+
+ curr_ipc_status =
+ ipc_protocol_get_ipc_status(ipc_imem->ipc_protocol);
+
+ /* check ipc_status change */
+ if (ipc_imem->ipc_status != curr_ipc_status) {
+ ipc_imem->ipc_status = curr_ipc_status;
+
+ if (ipc_imem->ipc_status ==
+ IPC_MEM_DEVICE_IPC_RUNNING) {
+ schedule_work(&ipc_imem->run_state_worker);
+ }
+ }
+
+ /* Consider power management in the runtime phase. */
+ ipc_imem_slp_control_exec(ipc_imem);
+ break; /* Continue with skbuf processing. */
+
+ /* Unexpected phases. */
+ case IPC_P_OFF:
+ case IPC_P_OFF_REQ:
+ dev_err(ipc_imem->dev, "confused phase %s",
+ ipc_imem_phase_get_string(phase));
+ return;
+
+ case IPC_P_PSI:
+ if (old_phase != IPC_P_ROM)
+ break;
+
+ fallthrough;
+ /* On CP the PSI phase is already active. */
+
+ case IPC_P_ROM:
+ /* Before CP ROM driver starts the PSI image, it sets
+ * the exit_code field on the doorbell scratchpad and
+ * triggers the irq.
+ */
+ ipc_imem_rom_irq_exec(ipc_imem);
+ return;
+
+ default:
+ break;
+ }
+
+ /* process message ring */
+ ipc_protocol_msg_process(ipc_imem, irq);
+
+ /* process all open pipes */
+ for (i = 0; i < IPC_MEM_MAX_CHANNELS; i++) {
+ struct ipc_pipe *ul_pipe = &ipc_imem->channels[i].ul_pipe;
+ struct ipc_pipe *dl_pipe = &ipc_imem->channels[i].dl_pipe;
+
+ if (dl_pipe->is_open &&
+ (irq == IMEM_IRQ_DONT_CARE || irq == dl_pipe->irq)) {
+ ipc_imem_dl_pipe_process(ipc_imem, dl_pipe);
+
+ if (dl_pipe->nr_of_queued_entries == 0)
+ retry_allocation = true;
+ }
+
+ if (ul_pipe->is_open)
+ ipc_imem_ul_pipe_process(ipc_imem, ul_pipe);
+ }
+
+ /* Try to generate new ADB or ADGH. */
+ if (ipc_mux_ul_data_encode(ipc_imem->mux)) {
+ ipc_imem_td_update_timer_start(ipc_imem);
+ if (ipc_imem->mux->protocol == MUX_AGGREGATION)
+ ipc_imem_adb_timer_start(ipc_imem);
+ }
+
+ /* Continue the send procedure with accumulated SIO or NETIF packets.
+ * Reset the debounce flags.
+ */
+ ul_pending |= ipc_imem_ul_write_td(ipc_imem);
+
+ /* if UL data is pending restart TD update timer */
+ if (ul_pending) {
+ ipc_imem->hrtimer_period =
+ ktime_set(0, TD_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
+ if (!hrtimer_active(&ipc_imem->tdupdate_timer))
+ hrtimer_start(&ipc_imem->tdupdate_timer,
+ ipc_imem->hrtimer_period,
+ HRTIMER_MODE_REL);
+ }
+
+ /* If CP has executed the transition
+ * from IPC_INIT to IPC_RUNNING in the PSI
+ * phase, wake up the flash app to open the pipes.
+ */
+ if ((phase == IPC_P_PSI || phase == IPC_P_EBL) &&
+ ipc_imem->ipc_requested_state == IPC_MEM_DEVICE_IPC_RUNNING &&
+ ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
+ IPC_MEM_DEVICE_IPC_RUNNING) {
+ complete(&ipc_imem->ipc_devlink->devlink_sio.channel->ul_sem);
+ }
+
+ /* Reset the expected CP state. */
+ ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_DONT_CARE;
+
+ if (retry_allocation) {
+ ipc_imem->hrtimer_period =
+ ktime_set(0, IPC_TD_ALLOC_TIMER_PERIOD_MS * 1000 * 1000ULL);
+ if (!hrtimer_active(&ipc_imem->td_alloc_timer))
+ hrtimer_start(&ipc_imem->td_alloc_timer,
+ ipc_imem->hrtimer_period,
+ HRTIMER_MODE_REL);
+ }
+}
+
+/* Callback by tasklet for handling interrupt events. */
+static int ipc_imem_tq_irq_cb(struct iosm_imem *ipc_imem, int arg, void *msg,
+ size_t size)
+{
+ ipc_imem_handle_irq(ipc_imem, arg);
+
+ return 0;
+}
+
+void ipc_imem_ul_send(struct iosm_imem *ipc_imem)
+{
+ /* start doorbell irq delay timer if UL is pending */
+ if (ipc_imem_ul_write_td(ipc_imem))
+ ipc_imem_td_update_timer_start(ipc_imem);
+}
+
+/* Check the execution stage and update the AP phase */
+static enum ipc_phase ipc_imem_phase_update_check(struct iosm_imem *ipc_imem,
+ enum ipc_mem_exec_stage stage)
+{
+ switch (stage) {
+ case IPC_MEM_EXEC_STAGE_BOOT:
+ if (ipc_imem->phase != IPC_P_ROM) {
+ /* Send this event only once */
+ ipc_uevent_send(ipc_imem->dev, UEVENT_ROM_READY);
+ }
+
+ ipc_imem->phase = IPC_P_ROM;
+ break;
+
+ case IPC_MEM_EXEC_STAGE_PSI:
+ ipc_imem->phase = IPC_P_PSI;
+ break;
+
+ case IPC_MEM_EXEC_STAGE_EBL:
+ ipc_imem->phase = IPC_P_EBL;
+ break;
+
+ case IPC_MEM_EXEC_STAGE_RUN:
+ if (ipc_imem->phase != IPC_P_RUN &&
+ ipc_imem->ipc_status == IPC_MEM_DEVICE_IPC_RUNNING) {
+ ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_READY);
+ }
+ ipc_imem->phase = IPC_P_RUN;
+ break;
+
+ case IPC_MEM_EXEC_STAGE_CRASH:
+ if (ipc_imem->phase != IPC_P_CRASH)
+ ipc_uevent_send(ipc_imem->dev, UEVENT_CRASH);
+
+ ipc_imem->phase = IPC_P_CRASH;
+ break;
+
+ case IPC_MEM_EXEC_STAGE_CD_READY:
+ if (ipc_imem->phase != IPC_P_CD_READY)
+ ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY);
+ ipc_imem->phase = IPC_P_CD_READY;
+ break;
+
+ default:
+ /* unknown exec stage:
+ * assume that link is down and send info to listeners
+ */
+ ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY_LINK_DOWN);
+ break;
+ }
+
+ return ipc_imem->phase;
+}
+
+/* Send msg to device to open pipe */
+static bool ipc_imem_pipe_open(struct iosm_imem *ipc_imem,
+ struct ipc_pipe *pipe)
+{
+ union ipc_msg_prep_args prep_args = {
+ .pipe_open.pipe = pipe,
+ };
+
+ if (ipc_protocol_msg_send(ipc_imem->ipc_protocol,
+ IPC_MSG_PREP_PIPE_OPEN, &prep_args) == 0)
+ pipe->is_open = true;
+
+ return pipe->is_open;
+}
+
+/* Allocates the TDs for the given pipe along with firing HP update DB. */
+static int ipc_imem_tq_pipe_td_alloc(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size)
+{
+ struct ipc_pipe *dl_pipe = msg;
+ bool processed = false;
+ int i;
+
+ for (i = 0; i < dl_pipe->nr_of_entries - 1; i++)
+ processed |= ipc_imem_dl_skb_alloc(ipc_imem, dl_pipe);
+
+ /* Trigger the doorbell irq to inform CP that new downlink buffers are
+ * available.
+ */
+ if (processed)
+ ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol, arg);
+
+ return 0;
+}
+
+static enum hrtimer_restart
+ipc_imem_td_update_timer_cb(struct hrtimer *hr_timer)
+{
+ struct iosm_imem *ipc_imem =
+ container_of(hr_timer, struct iosm_imem, tdupdate_timer);
+
+ ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_td_update_timer_cb, 0,
+ NULL, 0, false);
+ return HRTIMER_NORESTART;
+}
+
+/* Get the CP execution state and map it to the AP phase. */
+enum ipc_phase ipc_imem_phase_update(struct iosm_imem *ipc_imem)
+{
+ enum ipc_mem_exec_stage exec_stage =
+ ipc_imem_get_exec_stage_buffered(ipc_imem);
+ /* If the CP stage is undef, return the internal precalculated phase. */
+ return ipc_imem->phase == IPC_P_OFF_REQ ?
+ ipc_imem->phase :
+ ipc_imem_phase_update_check(ipc_imem, exec_stage);
+}
+
+const char *ipc_imem_phase_get_string(enum ipc_phase phase)
+{
+ switch (phase) {
+ case IPC_P_RUN:
+ return "A-RUN";
+
+ case IPC_P_OFF:
+ return "A-OFF";
+
+ case IPC_P_ROM:
+ return "A-ROM";
+
+ case IPC_P_PSI:
+ return "A-PSI";
+
+ case IPC_P_EBL:
+ return "A-EBL";
+
+ case IPC_P_CRASH:
+ return "A-CRASH";
+
+ case IPC_P_CD_READY:
+ return "A-CD_READY";
+
+ case IPC_P_OFF_REQ:
+ return "A-OFF_REQ";
+
+ default:
+ return "A-???";
+ }
+}
+
+void ipc_imem_pipe_close(struct iosm_imem *ipc_imem, struct ipc_pipe *pipe)
+{
+ union ipc_msg_prep_args prep_args = { .pipe_close.pipe = pipe };
+
+ pipe->is_open = false;
+ ipc_protocol_msg_send(ipc_imem->ipc_protocol, IPC_MSG_PREP_PIPE_CLOSE,
+ &prep_args);
+
+ ipc_imem_pipe_cleanup(ipc_imem, pipe);
+}
+
+void ipc_imem_channel_close(struct iosm_imem *ipc_imem, int channel_id)
+{
+ struct ipc_mem_channel *channel;
+
+ if (channel_id < 0 || channel_id >= ipc_imem->nr_of_channels) {
+ dev_err(ipc_imem->dev, "invalid channel id %d", channel_id);
+ return;
+ }
+
+ channel = &ipc_imem->channels[channel_id];
+
+ if (channel->state == IMEM_CHANNEL_FREE) {
+ dev_err(ipc_imem->dev, "ch[%d]: invalid channel state %d",
+ channel_id, channel->state);
+ return;
+ }
+
+ /* Free only the channel id in the CP power off mode. */
+ if (channel->state == IMEM_CHANNEL_RESERVED)
+ /* Release only the channel id. */
+ goto channel_free;
+
+ if (ipc_imem->phase == IPC_P_RUN) {
+ ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
+ ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
+ }
+
+ ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
+ ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
+
+channel_free:
+ ipc_imem_channel_free(channel);
+}
+
+struct ipc_mem_channel *ipc_imem_channel_open(struct iosm_imem *ipc_imem,
+ int channel_id, u32 db_id)
+{
+ struct ipc_mem_channel *channel;
+
+ if (channel_id < 0 || channel_id >= IPC_MEM_MAX_CHANNELS) {
+ dev_err(ipc_imem->dev, "invalid channel ID: %d", channel_id);
+ return NULL;
+ }
+
+ channel = &ipc_imem->channels[channel_id];
+
+ channel->state = IMEM_CHANNEL_ACTIVE;
+
+ if (!ipc_imem_pipe_open(ipc_imem, &channel->ul_pipe))
+ goto ul_pipe_err;
+
+ if (!ipc_imem_pipe_open(ipc_imem, &channel->dl_pipe))
+ goto dl_pipe_err;
+
+ /* Allocate the downlink buffers in tasklet context. */
+ if (ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_pipe_td_alloc, db_id,
+ &channel->dl_pipe, 0, false)) {
+ dev_err(ipc_imem->dev, "td allocation failed : %d", channel_id);
+ goto task_failed;
+ }
+
+ /* Active channel. */
+ return channel;
+task_failed:
+ ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
+dl_pipe_err:
+ ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
+ul_pipe_err:
+ ipc_imem_channel_free(channel);
+ return NULL;
+}
+
+void ipc_imem_pm_suspend(struct iosm_imem *ipc_imem)
+{
+ ipc_protocol_suspend(ipc_imem->ipc_protocol);
+}
+
+void ipc_imem_pm_s2idle_sleep(struct iosm_imem *ipc_imem, bool sleep)
+{
+ ipc_protocol_s2idle_sleep(ipc_imem->ipc_protocol, sleep);
+}
+
+void ipc_imem_pm_resume(struct iosm_imem *ipc_imem)
+{
+ enum ipc_mem_exec_stage stage;
+
+ if (ipc_protocol_resume(ipc_imem->ipc_protocol)) {
+ stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
+ ipc_imem_phase_update_check(ipc_imem, stage);
+ }
+}
+
+void ipc_imem_channel_free(struct ipc_mem_channel *channel)
+{
+ /* Reset dynamic channel elements. */
+ channel->state = IMEM_CHANNEL_FREE;
+}
+
+int ipc_imem_channel_alloc(struct iosm_imem *ipc_imem, int index,
+ enum ipc_ctype ctype)
+{
+ struct ipc_mem_channel *channel;
+ int i;
+
+ /* Find channel of given type/index */
+ for (i = 0; i < ipc_imem->nr_of_channels; i++) {
+ channel = &ipc_imem->channels[i];
+ if (channel->ctype == ctype && channel->index == index)
+ break;
+ }
+
+ if (i >= ipc_imem->nr_of_channels) {
+ dev_dbg(ipc_imem->dev,
+ "no channel definition for index=%d ctype=%d", index,
+ ctype);
+ return -ECHRNG;
+ }
+
+ if (ipc_imem->channels[i].state != IMEM_CHANNEL_FREE) {
+ dev_dbg(ipc_imem->dev, "channel is in use");
+ return -EBUSY;
+ }
+
+ if (channel->ctype == IPC_CTYPE_WWAN &&
+ index == IPC_MEM_MUX_IP_CH_IF_ID)
+ channel->if_id = index;
+
+ channel->channel_id = index;
+ channel->state = IMEM_CHANNEL_RESERVED;
+
+ return i;
+}
+
+void ipc_imem_channel_init(struct iosm_imem *ipc_imem, enum ipc_ctype ctype,
+ struct ipc_chnl_cfg chnl_cfg, u32 irq_moderation)
+{
+ struct ipc_mem_channel *channel;
+
+ if (chnl_cfg.ul_pipe >= IPC_MEM_MAX_PIPES ||
+ chnl_cfg.dl_pipe >= IPC_MEM_MAX_PIPES) {
+ dev_err(ipc_imem->dev, "invalid pipe: ul_pipe=%d, dl_pipe=%d",
+ chnl_cfg.ul_pipe, chnl_cfg.dl_pipe);
+ return;
+ }
+
+ if (ipc_imem->nr_of_channels >= IPC_MEM_MAX_CHANNELS) {
+ dev_err(ipc_imem->dev, "too many channels");
+ return;
+ }
+
+ channel = &ipc_imem->channels[ipc_imem->nr_of_channels];
+ channel->channel_id = ipc_imem->nr_of_channels;
+ channel->ctype = ctype;
+ channel->index = chnl_cfg.id;
+ channel->net_err_count = 0;
+ channel->state = IMEM_CHANNEL_FREE;
+ ipc_imem->nr_of_channels++;
+
+ ipc_imem_channel_update(ipc_imem, channel->channel_id, chnl_cfg,
+ IRQ_MOD_OFF);
+
+ skb_queue_head_init(&channel->ul_list);
+
+ init_completion(&channel->ul_sem);
+}
+
+void ipc_imem_channel_update(struct iosm_imem *ipc_imem, int id,
+ struct ipc_chnl_cfg chnl_cfg, u32 irq_moderation)
+{
+ struct ipc_mem_channel *channel;
+
+ if (id < 0 || id >= ipc_imem->nr_of_channels) {
+ dev_err(ipc_imem->dev, "invalid channel id %d", id);
+ return;
+ }
+
+ channel = &ipc_imem->channels[id];
+
+ if (channel->state != IMEM_CHANNEL_FREE &&
+ channel->state != IMEM_CHANNEL_RESERVED) {
+ dev_err(ipc_imem->dev, "invalid channel state %d",
+ channel->state);
+ return;
+ }
+
+ channel->ul_pipe.nr_of_entries = chnl_cfg.ul_nr_of_entries;
+ channel->ul_pipe.pipe_nr = chnl_cfg.ul_pipe;
+ channel->ul_pipe.is_open = false;
+ channel->ul_pipe.irq = IPC_UL_PIPE_IRQ_VECTOR;
+ channel->ul_pipe.channel = channel;
+ channel->ul_pipe.dir = IPC_MEM_DIR_UL;
+ channel->ul_pipe.accumulation_backoff = chnl_cfg.accumulation_backoff;
+ channel->ul_pipe.irq_moderation = irq_moderation;
+ channel->ul_pipe.buf_size = 0;
+
+ channel->dl_pipe.nr_of_entries = chnl_cfg.dl_nr_of_entries;
+ channel->dl_pipe.pipe_nr = chnl_cfg.dl_pipe;
+ channel->dl_pipe.is_open = false;
+ channel->dl_pipe.irq = IPC_DL_PIPE_IRQ_VECTOR;
+ channel->dl_pipe.channel = channel;
+ channel->dl_pipe.dir = IPC_MEM_DIR_DL;
+ channel->dl_pipe.accumulation_backoff = chnl_cfg.accumulation_backoff;
+ channel->dl_pipe.irq_moderation = irq_moderation;
+ channel->dl_pipe.buf_size = chnl_cfg.dl_buf_size;
+}
+
+static void ipc_imem_channel_reset(struct iosm_imem *ipc_imem)
+{
+ int i;
+
+ for (i = 0; i < ipc_imem->nr_of_channels; i++) {
+ struct ipc_mem_channel *channel;
+
+ channel = &ipc_imem->channels[i];
+
+ ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
+ ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
+
+ ipc_imem_channel_free(channel);
+ }
+}
+
+void ipc_imem_pipe_cleanup(struct iosm_imem *ipc_imem, struct ipc_pipe *pipe)
+{
+ struct sk_buff *skb;
+
+ /* Force pipe to closed state also when not explicitly closed through
+ * ipc_imem_pipe_close()
+ */
+ pipe->is_open = false;
+
+ /* Empty the uplink skb accumulator. */
+ while ((skb = skb_dequeue(&pipe->channel->ul_list)))
+ ipc_pcie_kfree_skb(ipc_imem->pcie, skb);
+
+ ipc_protocol_pipe_cleanup(ipc_imem->ipc_protocol, pipe);
+}
+
+/* Send IPC protocol uninit to the modem when Link is active. */
+static void ipc_imem_device_ipc_uninit(struct iosm_imem *ipc_imem)
+{
+ int timeout = IPC_MODEM_UNINIT_TIMEOUT_MS;
+ enum ipc_mem_device_ipc_state ipc_state;
+
+ /* When PCIe link is up set IPC_UNINIT
+ * of the modem otherwise ignore it when PCIe link down happens.
+ */
+ if (ipc_pcie_check_data_link_active(ipc_imem->pcie)) {
+ /* set modem to UNINIT
+ * (in case we want to reload the AP driver without resetting
+ * the modem)
+ */
+ ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
+ IPC_MEM_DEVICE_IPC_UNINIT);
+ ipc_state = ipc_mmio_get_ipc_state(ipc_imem->mmio);
+
+ /* Wait for maximum 30ms to allow the Modem to uninitialize the
+ * protocol.
+ */
+ while ((ipc_state <= IPC_MEM_DEVICE_IPC_DONT_CARE) &&
+ (ipc_state != IPC_MEM_DEVICE_IPC_UNINIT) &&
+ (timeout > 0)) {
+ usleep_range(1000, 1250);
+ timeout--;
+ ipc_state = ipc_mmio_get_ipc_state(ipc_imem->mmio);
+ }
+ }
+}
+
+void ipc_imem_cleanup(struct iosm_imem *ipc_imem)
+{
+ ipc_imem->phase = IPC_P_OFF_REQ;
+
+ /* forward MDM_NOT_READY to listeners */
+ ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_NOT_READY);
+
+ hrtimer_cancel(&ipc_imem->td_alloc_timer);
+ hrtimer_cancel(&ipc_imem->tdupdate_timer);
+ hrtimer_cancel(&ipc_imem->fast_update_timer);
+ hrtimer_cancel(&ipc_imem->startup_timer);
+
+ /* cancel the workqueue */
+ cancel_work_sync(&ipc_imem->run_state_worker);
+
+ if (test_and_clear_bit(FULLY_FUNCTIONAL, &ipc_imem->flag)) {
+ ipc_mux_deinit(ipc_imem->mux);
+ ipc_debugfs_deinit(ipc_imem);
+ ipc_wwan_deinit(ipc_imem->wwan);
+ ipc_port_deinit(ipc_imem->ipc_port);
+ }
+
+ if (test_and_clear_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag))
+ ipc_devlink_deinit(ipc_imem->ipc_devlink);
+
+ ipc_imem_device_ipc_uninit(ipc_imem);
+ ipc_imem_channel_reset(ipc_imem);
+
+ ipc_protocol_deinit(ipc_imem->ipc_protocol);
+ ipc_task_deinit(ipc_imem->ipc_task);
+
+ kfree(ipc_imem->ipc_task);
+ kfree(ipc_imem->mmio);
+
+ ipc_imem->phase = IPC_P_OFF;
+}
+
+/* After CP has unblocked the PCIe link, save the start address of the doorbell
+ * scratchpad and prepare the shared memory region. If the flashing to RAM
+ * procedure shall be executed, copy the chip information from the doorbell
+ * scratchtpad to the application buffer and wake up the flash app.
+ */
+static int ipc_imem_config(struct iosm_imem *ipc_imem)
+{
+ enum ipc_phase phase;
+
+ /* Initialize the semaphore for the blocking read UL/DL transfer. */
+ init_completion(&ipc_imem->ul_pend_sem);
+
+ init_completion(&ipc_imem->dl_pend_sem);
+
+ /* clear internal flags */
+ ipc_imem->ipc_status = IPC_MEM_DEVICE_IPC_UNINIT;
+ ipc_imem->enter_runtime = 0;
+
+ phase = ipc_imem_phase_update(ipc_imem);
+
+ /* Either CP shall be in the power off or power on phase. */
+ switch (phase) {
+ case IPC_P_ROM:
+ ipc_imem->hrtimer_period = ktime_set(0, 1000 * 1000 * 1000ULL);
+ /* poll execution stage (for delayed start, e.g. NAND) */
+ if (!hrtimer_active(&ipc_imem->startup_timer))
+ hrtimer_start(&ipc_imem->startup_timer,
+ ipc_imem->hrtimer_period,
+ HRTIMER_MODE_REL);
+ return 0;
+
+ case IPC_P_PSI:
+ case IPC_P_EBL:
+ case IPC_P_RUN:
+ /* The initial IPC state is IPC_MEM_DEVICE_IPC_UNINIT. */
+ ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_UNINIT;
+
+ /* Verify the exepected initial state. */
+ if (ipc_imem->ipc_requested_state ==
+ ipc_mmio_get_ipc_state(ipc_imem->mmio)) {
+ ipc_imem_ipc_init_check(ipc_imem);
+
+ return 0;
+ }
+ dev_err(ipc_imem->dev,
+ "ipc_status(%d) != IPC_MEM_DEVICE_IPC_UNINIT",
+ ipc_mmio_get_ipc_state(ipc_imem->mmio));
+ break;
+ case IPC_P_CRASH:
+ case IPC_P_CD_READY:
+ dev_dbg(ipc_imem->dev,
+ "Modem is in phase %d, reset Modem to collect CD",
+ phase);
+ return 0;
+ default:
+ dev_err(ipc_imem->dev, "unexpected operation phase %d", phase);
+ break;
+ }
+
+ complete(&ipc_imem->dl_pend_sem);
+ complete(&ipc_imem->ul_pend_sem);
+ ipc_imem->phase = IPC_P_OFF;
+ return -EIO;
+}
+
+/* Pass the dev ptr to the shared memory driver and request the entry points */
+struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
+ void __iomem *mmio, struct device *dev)
+{
+ struct iosm_imem *ipc_imem = kzalloc(sizeof(*pcie->imem), GFP_KERNEL);
+ enum ipc_mem_exec_stage stage;
+
+ if (!ipc_imem)
+ return NULL;
+
+ /* Save the device address. */
+ ipc_imem->pcie = pcie;
+ ipc_imem->dev = dev;
+
+ ipc_imem->pci_device_id = device_id;
+
+ ipc_imem->cp_version = 0;
+ ipc_imem->device_sleep = IPC_HOST_SLEEP_ENTER_SLEEP;
+
+ /* Reset the max number of configured channels */
+ ipc_imem->nr_of_channels = 0;
+
+ /* allocate IPC MMIO */
+ ipc_imem->mmio = ipc_mmio_init(mmio, ipc_imem->dev);
+ if (!ipc_imem->mmio) {
+ dev_err(ipc_imem->dev, "failed to initialize mmio region");
+ goto mmio_init_fail;
+ }
+
+ ipc_imem->ipc_task = kzalloc(sizeof(*ipc_imem->ipc_task),
+ GFP_KERNEL);
+
+ /* Create tasklet for event handling*/
+ if (!ipc_imem->ipc_task)
+ goto ipc_task_fail;
+
+ if (ipc_task_init(ipc_imem->ipc_task))
+ goto ipc_task_init_fail;
+
+ ipc_imem->ipc_task->dev = ipc_imem->dev;
+
+ INIT_WORK(&ipc_imem->run_state_worker, ipc_imem_run_state_worker);
+
+ ipc_imem->ipc_protocol = ipc_protocol_init(ipc_imem);
+
+ if (!ipc_imem->ipc_protocol)
+ goto protocol_init_fail;
+
+ /* The phase is set to power off. */
+ ipc_imem->phase = IPC_P_OFF;
+
+ hrtimer_init(&ipc_imem->startup_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ ipc_imem->startup_timer.function = ipc_imem_startup_timer_cb;
+
+ hrtimer_init(&ipc_imem->tdupdate_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ ipc_imem->tdupdate_timer.function = ipc_imem_td_update_timer_cb;
+
+ hrtimer_init(&ipc_imem->fast_update_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ ipc_imem->fast_update_timer.function = ipc_imem_fast_update_timer_cb;
+
+ hrtimer_init(&ipc_imem->td_alloc_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ ipc_imem->td_alloc_timer.function = ipc_imem_td_alloc_timer_cb;
+
+ hrtimer_init(&ipc_imem->adb_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ ipc_imem->adb_timer.function = ipc_imem_adb_timer_cb;
+
+ if (ipc_imem_config(ipc_imem)) {
+ dev_err(ipc_imem->dev, "failed to initialize the imem");
+ goto imem_config_fail;
+ }
+
+ stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
+ if (stage == IPC_MEM_EXEC_STAGE_BOOT) {
+ /* Alloc and Register devlink */
+ ipc_imem->ipc_devlink = ipc_devlink_init(ipc_imem);
+ if (!ipc_imem->ipc_devlink) {
+ dev_err(ipc_imem->dev, "Devlink register failed");
+ goto imem_config_fail;
+ }
+
+ if (ipc_flash_link_establish(ipc_imem))
+ goto devlink_channel_fail;
+
+ set_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag);
+ }
+ return ipc_imem;
+devlink_channel_fail:
+ ipc_devlink_deinit(ipc_imem->ipc_devlink);
+imem_config_fail:
+ hrtimer_cancel(&ipc_imem->td_alloc_timer);
+ hrtimer_cancel(&ipc_imem->fast_update_timer);
+ hrtimer_cancel(&ipc_imem->tdupdate_timer);
+ hrtimer_cancel(&ipc_imem->startup_timer);
+protocol_init_fail:
+ cancel_work_sync(&ipc_imem->run_state_worker);
+ ipc_task_deinit(ipc_imem->ipc_task);
+ipc_task_init_fail:
+ kfree(ipc_imem->ipc_task);
+ipc_task_fail:
+ kfree(ipc_imem->mmio);
+mmio_init_fail:
+ kfree(ipc_imem);
+ return NULL;
+}
+
+void ipc_imem_irq_process(struct iosm_imem *ipc_imem, int irq)
+{
+ /* Debounce IPC_EV_IRQ. */
+ if (ipc_imem && !ipc_imem->ev_irq_pending[irq]) {
+ ipc_imem->ev_irq_pending[irq] = true;
+ ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_irq_cb, irq,
+ NULL, 0, false);
+ }
+}
+
+void ipc_imem_td_update_timer_suspend(struct iosm_imem *ipc_imem, bool suspend)
+{
+ ipc_imem->td_update_timer_suspended = suspend;
+}
+
+/* Verify the CP execution state, copy the chip info,
+ * change the execution phase to ROM
+ */
+static int ipc_imem_devlink_trigger_chip_info_cb(struct iosm_imem *ipc_imem,
+ int arg, void *msg,
+ size_t msgsize)
+{
+ enum ipc_mem_exec_stage stage;
+ struct sk_buff *skb;
+ int rc = -EINVAL;
+ size_t size;
+
+ /* Test the CP execution state. */
+ stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
+ if (stage != IPC_MEM_EXEC_STAGE_BOOT) {
+ dev_err(ipc_imem->dev,
+ "Execution_stage: expected BOOT, received = %X", stage);
+ goto trigger_chip_info_fail;
+ }
+ /* Allocate a new sk buf for the chip info. */
+ size = ipc_imem->mmio->chip_info_size;
+ if (size > IOSM_CHIP_INFO_SIZE_MAX)
+ goto trigger_chip_info_fail;
+
+ skb = ipc_pcie_alloc_local_skb(ipc_imem->pcie, GFP_ATOMIC, size);
+ if (!skb) {
+ dev_err(ipc_imem->dev, "exhausted skbuf kernel DL memory");
+ rc = -ENOMEM;
+ goto trigger_chip_info_fail;
+ }
+ /* Copy the chip info characters into the ipc_skb. */
+ ipc_mmio_copy_chip_info(ipc_imem->mmio, skb_put(skb, size), size);
+ /* First change to the ROM boot phase. */
+ dev_dbg(ipc_imem->dev, "execution_stage[%X] eq. BOOT", stage);
+ ipc_imem->phase = ipc_imem_phase_update(ipc_imem);
+ ipc_imem_sys_devlink_notify_rx(ipc_imem->ipc_devlink, skb);
+ rc = 0;
+trigger_chip_info_fail:
+ return rc;
+}
+
+int ipc_imem_devlink_trigger_chip_info(struct iosm_imem *ipc_imem)
+{
+ return ipc_task_queue_send_task(ipc_imem,
+ ipc_imem_devlink_trigger_chip_info_cb,
+ 0, NULL, 0, true);
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.h b/drivers/net/wwan/iosm/iosm_ipc_imem.h
new file mode 100644
index 0000000000..5664ac507c
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem.h
@@ -0,0 +1,590 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#ifndef IOSM_IPC_IMEM_H
+#define IOSM_IPC_IMEM_H
+
+#include <linux/skbuff.h>
+
+#include "iosm_ipc_mmio.h"
+#include "iosm_ipc_pcie.h"
+#include "iosm_ipc_uevent.h"
+#include "iosm_ipc_wwan.h"
+#include "iosm_ipc_task_queue.h"
+
+struct ipc_chnl_cfg;
+
+/* IRQ moderation in usec */
+#define IRQ_MOD_OFF 0
+#define IRQ_MOD_NET 1000
+#define IRQ_MOD_TRC 4000
+
+/* Either the PSI image is accepted by CP or the suspended flash tool is waken,
+ * informed that the CP ROM driver is not ready to process the PSI image.
+ * unit : milliseconds
+ */
+#define IPC_PSI_TRANSFER_TIMEOUT 3000
+
+/* Timeout in 20 msec to wait for the modem to boot up to
+ * IPC_MEM_DEVICE_IPC_INIT state.
+ * unit : milliseconds (500 * ipc_util_msleep(20))
+ */
+#define IPC_MODEM_BOOT_TIMEOUT 500
+
+/* Wait timeout for ipc status reflects IPC_MEM_DEVICE_IPC_UNINIT
+ * unit : milliseconds
+ */
+#define IPC_MODEM_UNINIT_TIMEOUT_MS 30
+
+/* Pending time for processing data.
+ * unit : milliseconds
+ */
+#define IPC_PEND_DATA_TIMEOUT 500
+
+/* The timeout in milliseconds for application to wait for remote time. */
+#define IPC_REMOTE_TS_TIMEOUT_MS 10
+
+/* Timeout for TD allocation retry.
+ * unit : milliseconds
+ */
+#define IPC_TD_ALLOC_TIMER_PERIOD_MS 100
+
+/* Host sleep target is host */
+#define IPC_HOST_SLEEP_HOST 0
+
+/* Host sleep target is device */
+#define IPC_HOST_SLEEP_DEVICE 1
+
+/* Sleep message, target host: AP enters sleep / target device: CP is
+ * allowed to enter sleep and shall use the host sleep protocol
+ */
+#define IPC_HOST_SLEEP_ENTER_SLEEP 0
+
+/* Sleep_message, target host: AP exits sleep / target device: CP is
+ * NOT allowed to enter sleep
+ */
+#define IPC_HOST_SLEEP_EXIT_SLEEP 1
+
+#define IMEM_IRQ_DONT_CARE (-1)
+
+#define IPC_MEM_MAX_CHANNELS 8
+
+#define IPC_MEM_MUX_IP_SESSION_ENTRIES 8
+
+#define IPC_MEM_MUX_IP_CH_IF_ID 0
+
+#define TD_UPDATE_DEFAULT_TIMEOUT_USEC 1900
+
+#define FORCE_UPDATE_DEFAULT_TIMEOUT_USEC 500
+
+/* Sleep_message, target host: not applicable / target device: CP is
+ * allowed to enter sleep and shall NOT use the device sleep protocol
+ */
+#define IPC_HOST_SLEEP_ENTER_SLEEP_NO_PROTOCOL 2
+
+/* in_band_crash_signal IPC_MEM_INBAND_CRASH_SIG
+ * Modem crash notification configuration. If this value is non-zero then
+ * FEATURE_SET message will be sent to the Modem as a result the Modem will
+ * signal Crash via Execution Stage register. If this value is zero then Modem
+ * will use out-of-band method to notify about it's Crash.
+ */
+#define IPC_MEM_INBAND_CRASH_SIG 1
+
+/* Extra headroom to be allocated for DL SKBs to allow addition of Ethernet
+ * header
+ */
+#define IPC_MEM_DL_ETH_OFFSET 16
+
+#define IPC_CB(skb) ((struct ipc_skb_cb *)((skb)->cb))
+#define IOSM_CHIP_INFO_SIZE_MAX 100
+
+#define FULLY_FUNCTIONAL 0
+#define IOSM_DEVLINK_INIT 1
+
+/* List of the supported UL/DL pipes. */
+enum ipc_mem_pipes {
+ IPC_MEM_PIPE_0 = 0,
+ IPC_MEM_PIPE_1,
+ IPC_MEM_PIPE_2,
+ IPC_MEM_PIPE_3,
+ IPC_MEM_PIPE_4,
+ IPC_MEM_PIPE_5,
+ IPC_MEM_PIPE_6,
+ IPC_MEM_PIPE_7,
+ IPC_MEM_PIPE_8,
+ IPC_MEM_PIPE_9,
+ IPC_MEM_PIPE_10,
+ IPC_MEM_PIPE_11,
+ IPC_MEM_PIPE_12,
+ IPC_MEM_PIPE_13,
+ IPC_MEM_PIPE_14,
+ IPC_MEM_PIPE_15,
+ IPC_MEM_PIPE_16,
+ IPC_MEM_PIPE_17,
+ IPC_MEM_PIPE_18,
+ IPC_MEM_PIPE_19,
+ IPC_MEM_PIPE_20,
+ IPC_MEM_PIPE_21,
+ IPC_MEM_PIPE_22,
+ IPC_MEM_PIPE_23,
+ IPC_MEM_MAX_PIPES
+};
+
+/* Enum defining channel states. */
+enum ipc_channel_state {
+ IMEM_CHANNEL_FREE,
+ IMEM_CHANNEL_RESERVED,
+ IMEM_CHANNEL_ACTIVE,
+ IMEM_CHANNEL_CLOSING,
+};
+
+/**
+ * enum ipc_ctype - Enum defining supported channel type needed for control
+ * /IP traffic.
+ * @IPC_CTYPE_WWAN: Used for IP traffic
+ * @IPC_CTYPE_CTRL: Used for Control Communication
+ */
+enum ipc_ctype {
+ IPC_CTYPE_WWAN,
+ IPC_CTYPE_CTRL,
+};
+
+/* Pipe direction. */
+enum ipc_mem_pipe_dir {
+ IPC_MEM_DIR_UL,
+ IPC_MEM_DIR_DL,
+};
+
+/* HP update identifier. To be used as data for ipc_cp_irq_hpda_update() */
+enum ipc_hp_identifier {
+ IPC_HP_MR = 0,
+ IPC_HP_PM_TRIGGER,
+ IPC_HP_WAKEUP_SPEC_TMR,
+ IPC_HP_TD_UPD_TMR_START,
+ IPC_HP_TD_UPD_TMR,
+ IPC_HP_FAST_TD_UPD_TMR,
+ IPC_HP_UL_WRITE_TD,
+ IPC_HP_DL_PROCESS,
+ IPC_HP_NET_CHANNEL_INIT,
+ IPC_HP_CDEV_OPEN,
+};
+
+/**
+ * struct ipc_pipe - Structure for Pipe.
+ * @tdr_start: Ipc private protocol Transfer Descriptor Ring
+ * @channel: Id of the sio device, set by imem_sio_open,
+ * needed to pass DL char to the user terminal
+ * @skbr_start: Circular buffer for skbuf and the buffer
+ * reference in a tdr_start entry.
+ * @phy_tdr_start: Transfer descriptor start address
+ * @old_head: last head pointer reported to CP.
+ * @old_tail: AP read position before CP moves the read
+ * position to write/head. If CP has consumed the
+ * buffers, AP has to freed the skbuf starting at
+ * tdr_start[old_tail].
+ * @nr_of_entries: Number of elements of skb_start and tdr_start.
+ * @max_nr_of_queued_entries: Maximum number of queued entries in TDR
+ * @accumulation_backoff: Accumulation in usec for accumulation
+ * backoff (0 = no acc backoff)
+ * @irq_moderation: timer in usec for irq_moderation
+ * (0=no irq moderation)
+ * @pipe_nr: Pipe identification number
+ * @irq: Interrupt vector
+ * @dir: Direction of data stream in pipe
+ * @buf_size: Buffer size (in bytes) for preallocated
+ * buffers (for DL pipes)
+ * @nr_of_queued_entries: Aueued number of entries
+ * @is_open: Check for open pipe status
+ */
+struct ipc_pipe {
+ struct ipc_protocol_td *tdr_start;
+ struct ipc_mem_channel *channel;
+ struct sk_buff **skbr_start;
+ dma_addr_t phy_tdr_start;
+ u32 old_head;
+ u32 old_tail;
+ u32 nr_of_entries;
+ u32 max_nr_of_queued_entries;
+ u32 accumulation_backoff;
+ u32 irq_moderation;
+ u32 pipe_nr;
+ u32 irq;
+ enum ipc_mem_pipe_dir dir;
+ u32 buf_size;
+ u16 nr_of_queued_entries;
+ u8 is_open:1;
+};
+
+/**
+ * struct ipc_mem_channel - Structure for Channel.
+ * @channel_id: Instance of the channel list and is return to the user
+ * at the end of the open operation.
+ * @ctype: Control or netif channel.
+ * @index: unique index per ctype
+ * @ul_pipe: pipe objects
+ * @dl_pipe: pipe objects
+ * @if_id: Interface ID
+ * @net_err_count: Number of downlink errors returned by ipc_wwan_receive
+ * interface at the entry point of the IP stack.
+ * @state: Free, reserved or busy (in use).
+ * @ul_sem: Needed for the blocking write or uplink transfer.
+ * @ul_list: Uplink accumulator which is filled by the uplink
+ * char app or IP stack. The socket buffer pointer are
+ * added to the descriptor list in the kthread context.
+ */
+struct ipc_mem_channel {
+ int channel_id;
+ enum ipc_ctype ctype;
+ int index;
+ struct ipc_pipe ul_pipe;
+ struct ipc_pipe dl_pipe;
+ int if_id;
+ u32 net_err_count;
+ enum ipc_channel_state state;
+ struct completion ul_sem;
+ struct sk_buff_head ul_list;
+};
+
+/**
+ * enum ipc_phase - Different AP and CP phases.
+ * The enums defined after "IPC_P_ROM" and before
+ * "IPC_P_RUN" indicates the operating state where CP can
+ * respond to any requests. So while introducing new phase
+ * this shall be taken into consideration.
+ * @IPC_P_OFF: On host PC, the PCIe device link settings are known
+ * about the combined power on. PC is running, the driver
+ * is loaded and CP is in power off mode. The PCIe bus
+ * driver call the device power mode D3hot. In this phase
+ * the driver the polls the device, until the device is in
+ * the power on state and signals the power mode D0.
+ * @IPC_P_OFF_REQ: The intermediate phase between cleanup activity starts
+ * and ends.
+ * @IPC_P_CRASH: The phase indicating CP crash
+ * @IPC_P_CD_READY: The phase indicating CP core dump is ready
+ * @IPC_P_ROM: After power on, CP starts in ROM mode and the IPC ROM
+ * driver is waiting 150 ms for the AP active notification
+ * saved in the PCI link status register.
+ * @IPC_P_PSI: Primary signed image download phase
+ * @IPC_P_EBL: Extended bootloader pahse
+ * @IPC_P_RUN: The phase after flashing to RAM is the RUNTIME phase.
+ */
+enum ipc_phase {
+ IPC_P_OFF,
+ IPC_P_OFF_REQ,
+ IPC_P_CRASH,
+ IPC_P_CD_READY,
+ IPC_P_ROM,
+ IPC_P_PSI,
+ IPC_P_EBL,
+ IPC_P_RUN,
+};
+
+/**
+ * struct iosm_imem - Current state of the IPC shared memory.
+ * @mmio: mmio instance to access CP MMIO area /
+ * doorbell scratchpad.
+ * @ipc_protocol: IPC Protocol instance
+ * @ipc_task: Task for entry into ipc task queue
+ * @wwan: WWAN device pointer
+ * @mux: IP Data multiplexing state.
+ * @sio: IPC SIO data structure pointer
+ * @ipc_port: IPC PORT data structure pointer
+ * @pcie: IPC PCIe
+ * @trace: IPC trace data structure pointer
+ * @dev: Pointer to device structure
+ * @ipc_requested_state: Expected IPC state on CP.
+ * @channels: Channel list with UL/DL pipe pairs.
+ * @ipc_devlink: IPC Devlink data structure pointer
+ * @ipc_status: local ipc_status
+ * @nr_of_channels: number of configured channels
+ * @startup_timer: startup timer for NAND support.
+ * @hrtimer_period: Hr timer period
+ * @tdupdate_timer: Delay the TD update doorbell.
+ * @fast_update_timer: forced head pointer update delay timer.
+ * @td_alloc_timer: Timer for DL pipe TD allocation retry
+ * @adb_timer: Timer for finishing the ADB.
+ * @rom_exit_code: Mapped boot rom exit code.
+ * @enter_runtime: 1 means the transition to runtime phase was
+ * executed.
+ * @ul_pend_sem: Semaphore to wait/complete of UL TDs
+ * before closing pipe.
+ * @app_notify_ul_pend: Signal app if UL TD is pending
+ * @dl_pend_sem: Semaphore to wait/complete of DL TDs
+ * before closing pipe.
+ * @app_notify_dl_pend: Signal app if DL TD is pending
+ * @phase: Operating phase like runtime.
+ * @pci_device_id: Device ID
+ * @cp_version: CP version
+ * @device_sleep: Device sleep state
+ * @run_state_worker: Pointer to worker component for device
+ * setup operations to be called when modem
+ * reaches RUN state
+ * @ev_irq_pending: 0 means inform the IPC tasklet to
+ * process the irq actions.
+ * @flag: Flag to monitor the state of driver
+ * @td_update_timer_suspended: if true then td update timer suspend
+ * @ev_mux_net_transmit_pending:0 means inform the IPC tasklet to pass
+ * @reset_det_n: Reset detect flag
+ * @pcie_wake_n: Pcie wake flag
+ * @debugfs_wwan_dir: WWAN Debug FS directory entry
+ * @debugfs_dir: Debug FS directory for driver-specific entries
+ */
+struct iosm_imem {
+ struct iosm_mmio *mmio;
+ struct iosm_protocol *ipc_protocol;
+ struct ipc_task *ipc_task;
+ struct iosm_wwan *wwan;
+ struct iosm_mux *mux;
+ struct iosm_cdev *ipc_port[IPC_MEM_MAX_CHANNELS];
+ struct iosm_pcie *pcie;
+#ifdef CONFIG_WWAN_DEBUGFS
+ struct iosm_trace *trace;
+#endif
+ struct device *dev;
+ enum ipc_mem_device_ipc_state ipc_requested_state;
+ struct ipc_mem_channel channels[IPC_MEM_MAX_CHANNELS];
+ struct iosm_devlink *ipc_devlink;
+ u32 ipc_status;
+ u32 nr_of_channels;
+ struct hrtimer startup_timer;
+ ktime_t hrtimer_period;
+ struct hrtimer tdupdate_timer;
+ struct hrtimer fast_update_timer;
+ struct hrtimer td_alloc_timer;
+ struct hrtimer adb_timer;
+ enum rom_exit_code rom_exit_code;
+ u32 enter_runtime;
+ struct completion ul_pend_sem;
+ u32 app_notify_ul_pend;
+ struct completion dl_pend_sem;
+ u32 app_notify_dl_pend;
+ enum ipc_phase phase;
+ u16 pci_device_id;
+ int cp_version;
+ int device_sleep;
+ struct work_struct run_state_worker;
+ u8 ev_irq_pending[IPC_IRQ_VECTORS];
+ unsigned long flag;
+ u8 td_update_timer_suspended:1,
+ ev_mux_net_transmit_pending:1,
+ reset_det_n:1,
+ pcie_wake_n:1;
+#ifdef CONFIG_WWAN_DEBUGFS
+ struct dentry *debugfs_wwan_dir;
+ struct dentry *debugfs_dir;
+#endif
+};
+
+/**
+ * ipc_imem_init - Initialize the shared memory region
+ * @pcie: Pointer to core driver data-struct
+ * @device_id: PCI device ID
+ * @mmio: Pointer to the mmio area
+ * @dev: Pointer to device structure
+ *
+ * Returns: Initialized imem pointer on success else NULL
+ */
+struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
+ void __iomem *mmio, struct device *dev);
+
+/**
+ * ipc_imem_pm_s2idle_sleep - Set PM variables to sleep/active for
+ * s2idle sleep/active
+ * @ipc_imem: Pointer to imem data-struct
+ * @sleep: Set PM Variable to sleep/active
+ */
+void ipc_imem_pm_s2idle_sleep(struct iosm_imem *ipc_imem, bool sleep);
+
+/**
+ * ipc_imem_pm_suspend - The HAL shall ask the shared memory layer
+ * whether D3 is allowed.
+ * @ipc_imem: Pointer to imem data-struct
+ */
+void ipc_imem_pm_suspend(struct iosm_imem *ipc_imem);
+
+/**
+ * ipc_imem_pm_resume - The HAL shall inform the shared memory layer
+ * that the device is active.
+ * @ipc_imem: Pointer to imem data-struct
+ */
+void ipc_imem_pm_resume(struct iosm_imem *ipc_imem);
+
+/**
+ * ipc_imem_cleanup - Inform CP and free the shared memory resources.
+ * @ipc_imem: Pointer to imem data-struct
+ */
+void ipc_imem_cleanup(struct iosm_imem *ipc_imem);
+
+/**
+ * ipc_imem_irq_process - Shift the IRQ actions to the IPC thread.
+ * @ipc_imem: Pointer to imem data-struct
+ * @irq: Irq number
+ */
+void ipc_imem_irq_process(struct iosm_imem *ipc_imem, int irq);
+
+/**
+ * imem_get_device_sleep_state - Get the device sleep state value.
+ * @ipc_imem: Pointer to imem instance
+ *
+ * Returns: device sleep state
+ */
+int imem_get_device_sleep_state(struct iosm_imem *ipc_imem);
+
+/**
+ * ipc_imem_td_update_timer_suspend - Updates the TD Update Timer suspend flag.
+ * @ipc_imem: Pointer to imem data-struct
+ * @suspend: Flag to update. If TRUE then HP update doorbell is triggered to
+ * device without any wait. If FALSE then HP update doorbell is
+ * delayed until timeout.
+ */
+void ipc_imem_td_update_timer_suspend(struct iosm_imem *ipc_imem, bool suspend);
+
+/**
+ * ipc_imem_channel_close - Release the channel resources.
+ * @ipc_imem: Pointer to imem data-struct
+ * @channel_id: Channel ID to be cleaned up.
+ */
+void ipc_imem_channel_close(struct iosm_imem *ipc_imem, int channel_id);
+
+/**
+ * ipc_imem_channel_alloc - Reserves a channel
+ * @ipc_imem: Pointer to imem data-struct
+ * @index: ID to lookup from the preallocated list.
+ * @ctype: Channel type.
+ *
+ * Returns: Index on success and failure value on error
+ */
+int ipc_imem_channel_alloc(struct iosm_imem *ipc_imem, int index,
+ enum ipc_ctype ctype);
+
+/**
+ * ipc_imem_channel_open - Establish the pipes.
+ * @ipc_imem: Pointer to imem data-struct
+ * @channel_id: Channel ID returned during alloc.
+ * @db_id: Doorbell ID for trigger identifier.
+ *
+ * Returns: Pointer of ipc_mem_channel on success and NULL on failure.
+ */
+struct ipc_mem_channel *ipc_imem_channel_open(struct iosm_imem *ipc_imem,
+ int channel_id, u32 db_id);
+
+/**
+ * ipc_imem_td_update_timer_start - Starts the TD Update Timer if not running.
+ * @ipc_imem: Pointer to imem data-struct
+ */
+void ipc_imem_td_update_timer_start(struct iosm_imem *ipc_imem);
+
+/**
+ * ipc_imem_ul_write_td - Pass the channel UL list to protocol layer for TD
+ * preparation and sending them to the device.
+ * @ipc_imem: Pointer to imem data-struct
+ *
+ * Returns: TRUE of HP Doorbell trigger is pending. FALSE otherwise.
+ */
+bool ipc_imem_ul_write_td(struct iosm_imem *ipc_imem);
+
+/**
+ * ipc_imem_ul_send - Dequeue SKB from channel list and start with
+ * the uplink transfer.If HP Doorbell is pending to be
+ * triggered then starts the TD Update Timer.
+ * @ipc_imem: Pointer to imem data-struct
+ */
+void ipc_imem_ul_send(struct iosm_imem *ipc_imem);
+
+/**
+ * ipc_imem_channel_update - Set or modify pipe config of an existing channel
+ * @ipc_imem: Pointer to imem data-struct
+ * @id: Channel config index
+ * @chnl_cfg: Channel config struct
+ * @irq_moderation: Timer in usec for irq_moderation
+ */
+void ipc_imem_channel_update(struct iosm_imem *ipc_imem, int id,
+ struct ipc_chnl_cfg chnl_cfg, u32 irq_moderation);
+
+/**
+ * ipc_imem_channel_free -Free an IPC channel.
+ * @channel: Channel to be freed
+ */
+void ipc_imem_channel_free(struct ipc_mem_channel *channel);
+
+/**
+ * ipc_imem_hrtimer_stop - Stop the hrtimer
+ * @hr_timer: Pointer to hrtimer instance
+ */
+void ipc_imem_hrtimer_stop(struct hrtimer *hr_timer);
+
+/**
+ * ipc_imem_pipe_cleanup - Reset volatile pipe content for all channels
+ * @ipc_imem: Pointer to imem data-struct
+ * @pipe: Pipe to cleaned up
+ */
+void ipc_imem_pipe_cleanup(struct iosm_imem *ipc_imem, struct ipc_pipe *pipe);
+
+/**
+ * ipc_imem_pipe_close - Send msg to device to close pipe
+ * @ipc_imem: Pointer to imem data-struct
+ * @pipe: Pipe to be closed
+ */
+void ipc_imem_pipe_close(struct iosm_imem *ipc_imem, struct ipc_pipe *pipe);
+
+/**
+ * ipc_imem_phase_update - Get the CP execution state
+ * and map it to the AP phase.
+ * @ipc_imem: Pointer to imem data-struct
+ *
+ * Returns: Current ap updated phase
+ */
+enum ipc_phase ipc_imem_phase_update(struct iosm_imem *ipc_imem);
+
+/**
+ * ipc_imem_phase_get_string - Return the current operation
+ * phase as string.
+ * @phase: AP phase
+ *
+ * Returns: AP phase string
+ */
+const char *ipc_imem_phase_get_string(enum ipc_phase phase);
+
+/**
+ * ipc_imem_msg_send_feature_set - Send feature set message to modem
+ * @ipc_imem: Pointer to imem data-struct
+ * @reset_enable: 0 = out-of-band, 1 = in-band-crash notification
+ * @atomic_ctx: if disabled call in tasklet context
+ *
+ */
+void ipc_imem_msg_send_feature_set(struct iosm_imem *ipc_imem,
+ unsigned int reset_enable, bool atomic_ctx);
+
+/**
+ * ipc_imem_ipc_init_check - Send the init event to CP, wait a certain time and
+ * set CP to runtime with the context information
+ * @ipc_imem: Pointer to imem data-struct
+ */
+void ipc_imem_ipc_init_check(struct iosm_imem *ipc_imem);
+
+/**
+ * ipc_imem_channel_init - Initialize the channel list with UL/DL pipe pairs.
+ * @ipc_imem: Pointer to imem data-struct
+ * @ctype: Channel type
+ * @chnl_cfg: Channel configuration struct
+ * @irq_moderation: Timer in usec for irq_moderation
+ */
+void ipc_imem_channel_init(struct iosm_imem *ipc_imem, enum ipc_ctype ctype,
+ struct ipc_chnl_cfg chnl_cfg, u32 irq_moderation);
+
+/**
+ * ipc_imem_devlink_trigger_chip_info - Inform devlink that the chip
+ * information are available if the
+ * flashing to RAM interworking shall be
+ * executed.
+ * @ipc_imem: Pointer to imem structure
+ *
+ * Returns: 0 on success, -1 on failure
+ */
+int ipc_imem_devlink_trigger_chip_info(struct iosm_imem *ipc_imem);
+
+void ipc_imem_adb_timer_start(struct iosm_imem *ipc_imem);
+
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
new file mode 100644
index 0000000000..109cf89304
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
@@ -0,0 +1,658 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#include <linux/delay.h>
+
+#include "iosm_ipc_chnl_cfg.h"
+#include "iosm_ipc_devlink.h"
+#include "iosm_ipc_imem.h"
+#include "iosm_ipc_imem_ops.h"
+#include "iosm_ipc_port.h"
+#include "iosm_ipc_task_queue.h"
+
+/* Open a packet data online channel between the network layer and CP. */
+int ipc_imem_sys_wwan_open(struct iosm_imem *ipc_imem, int if_id)
+{
+ dev_dbg(ipc_imem->dev, "%s if id: %d",
+ ipc_imem_phase_get_string(ipc_imem->phase), if_id);
+
+ /* The network interface is only supported in the runtime phase. */
+ if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN) {
+ dev_err(ipc_imem->dev, "net:%d : refused phase %s", if_id,
+ ipc_imem_phase_get_string(ipc_imem->phase));
+ return -EIO;
+ }
+
+ return ipc_mux_open_session(ipc_imem->mux, if_id);
+}
+
+/* Release a net link to CP. */
+void ipc_imem_sys_wwan_close(struct iosm_imem *ipc_imem, int if_id,
+ int channel_id)
+{
+ if (ipc_imem->mux && if_id >= IP_MUX_SESSION_START &&
+ if_id <= IP_MUX_SESSION_END)
+ ipc_mux_close_session(ipc_imem->mux, if_id);
+}
+
+/* Tasklet call to do uplink transfer. */
+static int ipc_imem_tq_cdev_write(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size)
+{
+ ipc_imem_ul_send(ipc_imem);
+
+ return 0;
+}
+
+/* Through tasklet to do sio write. */
+static int ipc_imem_call_cdev_write(struct iosm_imem *ipc_imem)
+{
+ return ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_cdev_write, 0,
+ NULL, 0, false);
+}
+
+/* Function for transfer UL data */
+int ipc_imem_sys_wwan_transmit(struct iosm_imem *ipc_imem,
+ int if_id, int channel_id, struct sk_buff *skb)
+{
+ int ret = -EINVAL;
+
+ if (!ipc_imem || channel_id < 0)
+ goto out;
+
+ /* Is CP Running? */
+ if (ipc_imem->phase != IPC_P_RUN) {
+ dev_dbg(ipc_imem->dev, "phase %s transmit",
+ ipc_imem_phase_get_string(ipc_imem->phase));
+ ret = -EIO;
+ goto out;
+ }
+
+ /* Route the UL packet through IP MUX Layer */
+ ret = ipc_mux_ul_trigger_encode(ipc_imem->mux, if_id, skb);
+out:
+ return ret;
+}
+
+/* Initialize wwan channel */
+int ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
+ enum ipc_mux_protocol mux_type)
+{
+ struct ipc_chnl_cfg chnl_cfg = { 0 };
+
+ ipc_imem->cp_version = ipc_mmio_get_cp_version(ipc_imem->mmio);
+
+ /* If modem version is invalid (0xffffffff), do not initialize WWAN. */
+ if (ipc_imem->cp_version == -1) {
+ dev_err(ipc_imem->dev, "invalid CP version");
+ return -EIO;
+ }
+
+ ipc_chnl_cfg_get(&chnl_cfg, ipc_imem->nr_of_channels);
+
+ if (ipc_imem->mmio->mux_protocol == MUX_AGGREGATION &&
+ ipc_imem->nr_of_channels == IPC_MEM_IP_CHL_ID_0) {
+ chnl_cfg.ul_nr_of_entries = IPC_MEM_MAX_TDS_MUX_AGGR_UL;
+ chnl_cfg.dl_nr_of_entries = IPC_MEM_MAX_TDS_MUX_AGGR_DL;
+ chnl_cfg.dl_buf_size = IPC_MEM_MAX_ADB_BUF_SIZE;
+ }
+
+ ipc_imem_channel_init(ipc_imem, IPC_CTYPE_WWAN, chnl_cfg,
+ IRQ_MOD_OFF);
+
+ /* WWAN registration. */
+ ipc_imem->wwan = ipc_wwan_init(ipc_imem, ipc_imem->dev);
+ if (!ipc_imem->wwan) {
+ dev_err(ipc_imem->dev,
+ "failed to register the ipc_wwan interfaces");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/* Map SKB to DMA for transfer */
+static int ipc_imem_map_skb_to_dma(struct iosm_imem *ipc_imem,
+ struct sk_buff *skb)
+{
+ struct iosm_pcie *ipc_pcie = ipc_imem->pcie;
+ char *buf = skb->data;
+ int len = skb->len;
+ dma_addr_t mapping;
+ int ret;
+
+ ret = ipc_pcie_addr_map(ipc_pcie, buf, len, &mapping, DMA_TO_DEVICE);
+
+ if (ret)
+ goto err;
+
+ BUILD_BUG_ON(sizeof(*IPC_CB(skb)) > sizeof(skb->cb));
+
+ IPC_CB(skb)->mapping = mapping;
+ IPC_CB(skb)->direction = DMA_TO_DEVICE;
+ IPC_CB(skb)->len = len;
+ IPC_CB(skb)->op_type = (u8)UL_DEFAULT;
+
+err:
+ return ret;
+}
+
+/* return true if channel is ready for use */
+static bool ipc_imem_is_channel_active(struct iosm_imem *ipc_imem,
+ struct ipc_mem_channel *channel)
+{
+ enum ipc_phase phase;
+
+ /* Update the current operation phase. */
+ phase = ipc_imem->phase;
+
+ /* Select the operation depending on the execution stage. */
+ switch (phase) {
+ case IPC_P_RUN:
+ case IPC_P_PSI:
+ case IPC_P_EBL:
+ break;
+
+ case IPC_P_ROM:
+ /* Prepare the PSI image for the CP ROM driver and
+ * suspend the flash app.
+ */
+ if (channel->state != IMEM_CHANNEL_RESERVED) {
+ dev_err(ipc_imem->dev,
+ "ch[%d]:invalid channel state %d,expected %d",
+ channel->channel_id, channel->state,
+ IMEM_CHANNEL_RESERVED);
+ goto channel_unavailable;
+ }
+ goto channel_available;
+
+ default:
+ /* Ignore uplink actions in all other phases. */
+ dev_err(ipc_imem->dev, "ch[%d]: confused phase %d",
+ channel->channel_id, phase);
+ goto channel_unavailable;
+ }
+ /* Check the full availability of the channel. */
+ if (channel->state != IMEM_CHANNEL_ACTIVE) {
+ dev_err(ipc_imem->dev, "ch[%d]: confused channel state %d",
+ channel->channel_id, channel->state);
+ goto channel_unavailable;
+ }
+
+channel_available:
+ return true;
+
+channel_unavailable:
+ return false;
+}
+
+/**
+ * ipc_imem_sys_port_close - Release a sio link to CP.
+ * @ipc_imem: Imem instance.
+ * @channel: Channel instance.
+ */
+void ipc_imem_sys_port_close(struct iosm_imem *ipc_imem,
+ struct ipc_mem_channel *channel)
+{
+ enum ipc_phase curr_phase;
+ int status = 0;
+ u32 tail = 0;
+
+ curr_phase = ipc_imem->phase;
+
+ /* If current phase is IPC_P_OFF or SIO ID is -ve then
+ * channel is already freed. Nothing to do.
+ */
+ if (curr_phase == IPC_P_OFF) {
+ dev_err(ipc_imem->dev,
+ "nothing to do. Current Phase: %s",
+ ipc_imem_phase_get_string(curr_phase));
+ return;
+ }
+
+ if (channel->state == IMEM_CHANNEL_FREE) {
+ dev_err(ipc_imem->dev, "ch[%d]: invalid channel state %d",
+ channel->channel_id, channel->state);
+ return;
+ }
+
+ /* If there are any pending TDs then wait for Timeout/Completion before
+ * closing pipe.
+ */
+ if (channel->ul_pipe.old_tail != channel->ul_pipe.old_head) {
+ ipc_imem->app_notify_ul_pend = 1;
+
+ /* Suspend the user app and wait a certain time for processing
+ * UL Data.
+ */
+ status = wait_for_completion_interruptible_timeout
+ (&ipc_imem->ul_pend_sem,
+ msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
+ if (status == 0) {
+ dev_dbg(ipc_imem->dev,
+ "Pend data Timeout UL-Pipe:%d Head:%d Tail:%d",
+ channel->ul_pipe.pipe_nr,
+ channel->ul_pipe.old_head,
+ channel->ul_pipe.old_tail);
+ }
+
+ ipc_imem->app_notify_ul_pend = 0;
+ }
+
+ /* If there are any pending TDs then wait for Timeout/Completion before
+ * closing pipe.
+ */
+ ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol,
+ &channel->dl_pipe, NULL, &tail);
+
+ if (tail != channel->dl_pipe.old_tail) {
+ ipc_imem->app_notify_dl_pend = 1;
+
+ /* Suspend the user app and wait a certain time for processing
+ * DL Data.
+ */
+ status = wait_for_completion_interruptible_timeout
+ (&ipc_imem->dl_pend_sem,
+ msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
+ if (status == 0) {
+ dev_dbg(ipc_imem->dev,
+ "Pend data Timeout DL-Pipe:%d Head:%d Tail:%d",
+ channel->dl_pipe.pipe_nr,
+ channel->dl_pipe.old_head,
+ channel->dl_pipe.old_tail);
+ }
+
+ ipc_imem->app_notify_dl_pend = 0;
+ }
+
+ /* Due to wait for completion in messages, there is a small window
+ * between closing the pipe and updating the channel is closed. In this
+ * small window there could be HP update from Host Driver. Hence update
+ * the channel state as CLOSING to aviod unnecessary interrupt
+ * towards CP.
+ */
+ channel->state = IMEM_CHANNEL_CLOSING;
+
+ ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
+ ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
+
+ ipc_imem_channel_free(channel);
+}
+
+/* Open a PORT link to CP and return the channel */
+struct ipc_mem_channel *ipc_imem_sys_port_open(struct iosm_imem *ipc_imem,
+ int chl_id, int hp_id)
+{
+ struct ipc_mem_channel *channel;
+ int ch_id;
+
+ /* The PORT interface is only supported in the runtime phase. */
+ if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN) {
+ dev_err(ipc_imem->dev, "PORT open refused, phase %s",
+ ipc_imem_phase_get_string(ipc_imem->phase));
+ return NULL;
+ }
+
+ ch_id = ipc_imem_channel_alloc(ipc_imem, chl_id, IPC_CTYPE_CTRL);
+
+ if (ch_id < 0) {
+ dev_err(ipc_imem->dev, "reservation of an PORT chnl id failed");
+ return NULL;
+ }
+
+ channel = ipc_imem_channel_open(ipc_imem, ch_id, hp_id);
+
+ if (!channel) {
+ dev_err(ipc_imem->dev, "PORT channel id open failed");
+ return NULL;
+ }
+
+ return channel;
+}
+
+/* transfer skb to modem */
+int ipc_imem_sys_cdev_write(struct iosm_cdev *ipc_cdev, struct sk_buff *skb)
+{
+ struct ipc_mem_channel *channel = ipc_cdev->channel;
+ struct iosm_imem *ipc_imem = ipc_cdev->ipc_imem;
+ int ret = -EIO;
+
+ if (!ipc_imem_is_channel_active(ipc_imem, channel) ||
+ ipc_imem->phase == IPC_P_OFF_REQ)
+ goto out;
+
+ ret = ipc_imem_map_skb_to_dma(ipc_imem, skb);
+
+ if (ret)
+ goto out;
+
+ /* Add skb to the uplink skbuf accumulator. */
+ skb_queue_tail(&channel->ul_list, skb);
+
+ ret = ipc_imem_call_cdev_write(ipc_imem);
+
+ if (ret) {
+ skb_dequeue_tail(&channel->ul_list);
+ dev_err(ipc_cdev->dev, "channel id[%d] write failed\n",
+ ipc_cdev->channel->channel_id);
+ }
+out:
+ return ret;
+}
+
+/* Open a SIO link to CP and return the channel instance */
+struct ipc_mem_channel *ipc_imem_sys_devlink_open(struct iosm_imem *ipc_imem)
+{
+ struct ipc_mem_channel *channel;
+ enum ipc_phase phase;
+ int channel_id;
+
+ phase = ipc_imem_phase_update(ipc_imem);
+ switch (phase) {
+ case IPC_P_OFF:
+ case IPC_P_ROM:
+ /* Get a channel id as flash id and reserve it. */
+ channel_id = ipc_imem_channel_alloc(ipc_imem,
+ IPC_MEM_CTRL_CHL_ID_7,
+ IPC_CTYPE_CTRL);
+
+ if (channel_id < 0) {
+ dev_err(ipc_imem->dev,
+ "reservation of a flash channel id failed");
+ goto error;
+ }
+
+ ipc_imem->ipc_devlink->devlink_sio.channel_id = channel_id;
+ channel = &ipc_imem->channels[channel_id];
+
+ /* Enqueue chip info data to be read */
+ if (ipc_imem_devlink_trigger_chip_info(ipc_imem)) {
+ dev_err(ipc_imem->dev, "Enqueue of chip info failed");
+ channel->state = IMEM_CHANNEL_FREE;
+ goto error;
+ }
+
+ return channel;
+
+ case IPC_P_PSI:
+ case IPC_P_EBL:
+ ipc_imem->cp_version = ipc_mmio_get_cp_version(ipc_imem->mmio);
+ if (ipc_imem->cp_version == -1) {
+ dev_err(ipc_imem->dev, "invalid CP version");
+ goto error;
+ }
+
+ channel_id = ipc_imem->ipc_devlink->devlink_sio.channel_id;
+ return ipc_imem_channel_open(ipc_imem, channel_id,
+ IPC_HP_CDEV_OPEN);
+
+ default:
+ /* CP is in the wrong state (e.g. CRASH or CD_READY) */
+ dev_err(ipc_imem->dev, "SIO open refused, phase %d", phase);
+ }
+error:
+ return NULL;
+}
+
+/* Release a SIO channel link to CP. */
+void ipc_imem_sys_devlink_close(struct iosm_devlink *ipc_devlink)
+{
+ struct iosm_imem *ipc_imem = ipc_devlink->pcie->imem;
+ int boot_check_timeout = BOOT_CHECK_DEFAULT_TIMEOUT;
+ enum ipc_mem_exec_stage exec_stage;
+ struct ipc_mem_channel *channel;
+ int status = 0;
+ u32 tail = 0;
+
+ channel = ipc_imem->ipc_devlink->devlink_sio.channel;
+ /* Increase the total wait time to boot_check_timeout */
+ do {
+ exec_stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
+ if (exec_stage == IPC_MEM_EXEC_STAGE_RUN ||
+ exec_stage == IPC_MEM_EXEC_STAGE_PSI)
+ break;
+ msleep(20);
+ boot_check_timeout -= 20;
+ } while (boot_check_timeout > 0);
+
+ /* If there are any pending TDs then wait for Timeout/Completion before
+ * closing pipe.
+ */
+ if (channel->ul_pipe.old_tail != channel->ul_pipe.old_head) {
+ status = wait_for_completion_interruptible_timeout
+ (&ipc_imem->ul_pend_sem,
+ msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
+ if (status == 0) {
+ dev_dbg(ipc_imem->dev,
+ "Data Timeout on UL-Pipe:%d Head:%d Tail:%d",
+ channel->ul_pipe.pipe_nr,
+ channel->ul_pipe.old_head,
+ channel->ul_pipe.old_tail);
+ }
+ }
+
+ ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol,
+ &channel->dl_pipe, NULL, &tail);
+
+ if (tail != channel->dl_pipe.old_tail) {
+ status = wait_for_completion_interruptible_timeout
+ (&ipc_imem->dl_pend_sem,
+ msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
+ if (status == 0) {
+ dev_dbg(ipc_imem->dev,
+ "Data Timeout on DL-Pipe:%d Head:%d Tail:%d",
+ channel->dl_pipe.pipe_nr,
+ channel->dl_pipe.old_head,
+ channel->dl_pipe.old_tail);
+ }
+ }
+
+ /* Due to wait for completion in messages, there is a small window
+ * between closing the pipe and updating the channel is closed. In this
+ * small window there could be HP update from Host Driver. Hence update
+ * the channel state as CLOSING to aviod unnecessary interrupt
+ * towards CP.
+ */
+ channel->state = IMEM_CHANNEL_CLOSING;
+ /* Release the pipe resources */
+ ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
+ ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
+ ipc_imem->nr_of_channels--;
+}
+
+void ipc_imem_sys_devlink_notify_rx(struct iosm_devlink *ipc_devlink,
+ struct sk_buff *skb)
+{
+ skb_queue_tail(&ipc_devlink->devlink_sio.rx_list, skb);
+ complete(&ipc_devlink->devlink_sio.read_sem);
+}
+
+/* PSI transfer */
+static int ipc_imem_sys_psi_transfer(struct iosm_imem *ipc_imem,
+ struct ipc_mem_channel *channel,
+ unsigned char *buf, int count)
+{
+ int psi_start_timeout = PSI_START_DEFAULT_TIMEOUT;
+ enum ipc_mem_exec_stage exec_stage;
+
+ dma_addr_t mapping = 0;
+ int ret;
+
+ ret = ipc_pcie_addr_map(ipc_imem->pcie, buf, count, &mapping,
+ DMA_TO_DEVICE);
+ if (ret)
+ goto pcie_addr_map_fail;
+
+ /* Save the PSI information for the CP ROM driver on the doorbell
+ * scratchpad.
+ */
+ ipc_mmio_set_psi_addr_and_size(ipc_imem->mmio, mapping, count);
+ ipc_doorbell_fire(ipc_imem->pcie, 0, IPC_MEM_EXEC_STAGE_BOOT);
+
+ ret = wait_for_completion_interruptible_timeout
+ (&channel->ul_sem,
+ msecs_to_jiffies(IPC_PSI_TRANSFER_TIMEOUT));
+
+ if (ret <= 0) {
+ dev_err(ipc_imem->dev, "Failed PSI transfer to CP, Error-%d",
+ ret);
+ goto psi_transfer_fail;
+ }
+ /* If the PSI download fails, return the CP boot ROM exit code */
+ if (ipc_imem->rom_exit_code != IMEM_ROM_EXIT_OPEN_EXT &&
+ ipc_imem->rom_exit_code != IMEM_ROM_EXIT_CERT_EXT) {
+ ret = (-1) * ((int)ipc_imem->rom_exit_code);
+ goto psi_transfer_fail;
+ }
+
+ dev_dbg(ipc_imem->dev, "PSI image successfully downloaded");
+
+ /* Wait psi_start_timeout milliseconds until the CP PSI image is
+ * running and updates the execution_stage field with
+ * IPC_MEM_EXEC_STAGE_PSI. Verify the execution stage.
+ */
+ do {
+ exec_stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
+
+ if (exec_stage == IPC_MEM_EXEC_STAGE_PSI)
+ break;
+
+ msleep(20);
+ psi_start_timeout -= 20;
+ } while (psi_start_timeout > 0);
+
+ if (exec_stage != IPC_MEM_EXEC_STAGE_PSI)
+ goto psi_transfer_fail; /* Unknown status of CP PSI process. */
+
+ ipc_imem->phase = IPC_P_PSI;
+
+ /* Enter the PSI phase. */
+ dev_dbg(ipc_imem->dev, "execution_stage[%X] eq. PSI", exec_stage);
+
+ /* Request the RUNNING state from CP and wait until it was reached
+ * or timeout.
+ */
+ ipc_imem_ipc_init_check(ipc_imem);
+
+ ret = wait_for_completion_interruptible_timeout
+ (&channel->ul_sem, msecs_to_jiffies(IPC_PSI_TRANSFER_TIMEOUT));
+ if (ret <= 0) {
+ dev_err(ipc_imem->dev,
+ "Failed PSI RUNNING state on CP, Error-%d", ret);
+ goto psi_transfer_fail;
+ }
+
+ if (ipc_mmio_get_ipc_state(ipc_imem->mmio) !=
+ IPC_MEM_DEVICE_IPC_RUNNING) {
+ dev_err(ipc_imem->dev,
+ "ch[%d] %s: unexpected CP IPC state %d, not RUNNING",
+ channel->channel_id,
+ ipc_imem_phase_get_string(ipc_imem->phase),
+ ipc_mmio_get_ipc_state(ipc_imem->mmio));
+
+ goto psi_transfer_fail;
+ }
+
+ /* Create the flash channel for the transfer of the images. */
+ if (!ipc_imem_sys_devlink_open(ipc_imem)) {
+ dev_err(ipc_imem->dev, "can't open flash_channel");
+ goto psi_transfer_fail;
+ }
+
+ ret = 0;
+psi_transfer_fail:
+ ipc_pcie_addr_unmap(ipc_imem->pcie, count, mapping, DMA_TO_DEVICE);
+pcie_addr_map_fail:
+ return ret;
+}
+
+int ipc_imem_sys_devlink_write(struct iosm_devlink *ipc_devlink,
+ unsigned char *buf, int count)
+{
+ struct iosm_imem *ipc_imem = ipc_devlink->pcie->imem;
+ struct ipc_mem_channel *channel;
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+ int ret;
+
+ channel = ipc_imem->ipc_devlink->devlink_sio.channel;
+
+ /* In the ROM phase the PSI image is passed to CP about a specific
+ * shared memory area and doorbell scratchpad directly.
+ */
+ if (ipc_imem->phase == IPC_P_ROM) {
+ ret = ipc_imem_sys_psi_transfer(ipc_imem, channel, buf, count);
+ /* If the PSI transfer fails then send crash
+ * Signature.
+ */
+ if (ret > 0)
+ ipc_imem_msg_send_feature_set(ipc_imem,
+ IPC_MEM_INBAND_CRASH_SIG,
+ false);
+ goto out;
+ }
+
+ /* Allocate skb memory for the uplink buffer. */
+ skb = ipc_pcie_alloc_skb(ipc_devlink->pcie, count, GFP_KERNEL, &mapping,
+ DMA_TO_DEVICE, 0);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ skb_put_data(skb, buf, count);
+
+ IPC_CB(skb)->op_type = UL_USR_OP_BLOCKED;
+
+ /* Add skb to the uplink skbuf accumulator. */
+ skb_queue_tail(&channel->ul_list, skb);
+
+ /* Inform the IPC tasklet to pass uplink IP packets to CP. */
+ if (!ipc_imem_call_cdev_write(ipc_imem)) {
+ ret = wait_for_completion_interruptible(&channel->ul_sem);
+
+ if (ret < 0) {
+ dev_err(ipc_imem->dev,
+ "ch[%d] no CP confirmation, status = %d",
+ channel->channel_id, ret);
+ ipc_pcie_kfree_skb(ipc_devlink->pcie, skb);
+ goto out;
+ }
+ }
+ ret = 0;
+out:
+ return ret;
+}
+
+int ipc_imem_sys_devlink_read(struct iosm_devlink *devlink, u8 *data,
+ u32 bytes_to_read, u32 *bytes_read)
+{
+ struct sk_buff *skb = NULL;
+ int rc = 0;
+
+ /* check skb is available in rx_list or wait for skb */
+ devlink->devlink_sio.devlink_read_pend = 1;
+ while (!skb && !(skb = skb_dequeue(&devlink->devlink_sio.rx_list))) {
+ if (!wait_for_completion_interruptible_timeout
+ (&devlink->devlink_sio.read_sem,
+ msecs_to_jiffies(IPC_READ_TIMEOUT))) {
+ dev_err(devlink->dev, "Read timedout");
+ rc = -ETIMEDOUT;
+ goto devlink_read_fail;
+ }
+ }
+ devlink->devlink_sio.devlink_read_pend = 0;
+ if (bytes_to_read < skb->len) {
+ dev_err(devlink->dev, "Invalid size,expected len %d", skb->len);
+ rc = -EINVAL;
+ goto devlink_read_fail;
+ }
+ *bytes_read = skb->len;
+ memcpy(data, skb->data, skb->len);
+
+devlink_read_fail:
+ dev_kfree_skb(skb);
+ return rc;
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h
new file mode 100644
index 0000000000..026c5bd0f9
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#ifndef IOSM_IPC_IMEM_OPS_H
+#define IOSM_IPC_IMEM_OPS_H
+
+#include "iosm_ipc_mux_codec.h"
+
+/* Maximum wait time for blocking read */
+#define IPC_READ_TIMEOUT 3000
+
+/* The delay in ms for defering the unregister */
+#define SIO_UNREGISTER_DEFER_DELAY_MS 1
+
+/* Default delay till CP PSI image is running and modem updates the
+ * execution stage.
+ * unit : milliseconds
+ */
+#define PSI_START_DEFAULT_TIMEOUT 3000
+
+/* Default time out when closing SIO, till the modem is in
+ * running state.
+ * unit : milliseconds
+ */
+#define BOOT_CHECK_DEFAULT_TIMEOUT 400
+
+/* IP MUX channel range */
+#define IP_MUX_SESSION_START 0
+#define IP_MUX_SESSION_END 7
+
+/* Default IP MUX channel */
+#define IP_MUX_SESSION_DEFAULT 0
+
+/**
+ * ipc_imem_sys_port_open - Open a port link to CP.
+ * @ipc_imem: Imem instance.
+ * @chl_id: Channel Indentifier.
+ * @hp_id: HP Indentifier.
+ *
+ * Return: channel instance on success, NULL for failure
+ */
+struct ipc_mem_channel *ipc_imem_sys_port_open(struct iosm_imem *ipc_imem,
+ int chl_id, int hp_id);
+void ipc_imem_sys_port_close(struct iosm_imem *ipc_imem,
+ struct ipc_mem_channel *channel);
+
+/**
+ * ipc_imem_sys_cdev_write - Route the uplink buffer to CP.
+ * @ipc_cdev: iosm_cdev instance.
+ * @skb: Pointer to skb.
+ *
+ * Return: 0 on success and failure value on error
+ */
+int ipc_imem_sys_cdev_write(struct iosm_cdev *ipc_cdev, struct sk_buff *skb);
+
+/**
+ * ipc_imem_sys_wwan_open - Open packet data online channel between network
+ * layer and CP.
+ * @ipc_imem: Imem instance.
+ * @if_id: ip link tag of the net device.
+ *
+ * Return: Channel ID on success and failure value on error
+ */
+int ipc_imem_sys_wwan_open(struct iosm_imem *ipc_imem, int if_id);
+
+/**
+ * ipc_imem_sys_wwan_close - Close packet data online channel between network
+ * layer and CP.
+ * @ipc_imem: Imem instance.
+ * @if_id: IP link id net device.
+ * @channel_id: Channel ID to be closed.
+ */
+void ipc_imem_sys_wwan_close(struct iosm_imem *ipc_imem, int if_id,
+ int channel_id);
+
+/**
+ * ipc_imem_sys_wwan_transmit - Function for transfer UL data
+ * @ipc_imem: Imem instance.
+ * @if_id: link ID of the device.
+ * @channel_id: Channel ID used
+ * @skb: Pointer to sk buffer
+ *
+ * Return: 0 on success and failure value on error
+ */
+int ipc_imem_sys_wwan_transmit(struct iosm_imem *ipc_imem, int if_id,
+ int channel_id, struct sk_buff *skb);
+/**
+ * ipc_imem_wwan_channel_init - Initializes WWAN channels and the channel for
+ * MUX.
+ * @ipc_imem: Pointer to iosm_imem struct.
+ * @mux_type: Type of mux protocol.
+ *
+ * Return: 0 on success and failure value on error
+ */
+int ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
+ enum ipc_mux_protocol mux_type);
+
+/**
+ * ipc_imem_sys_devlink_open - Open a Flash/CD Channel link to CP
+ * @ipc_imem: iosm_imem instance
+ *
+ * Return: channel instance on success, NULL for failure
+ */
+struct ipc_mem_channel *ipc_imem_sys_devlink_open(struct iosm_imem *ipc_imem);
+
+/**
+ * ipc_imem_sys_devlink_close - Release a Flash/CD channel link to CP
+ * @ipc_devlink: Pointer to ipc_devlink data-struct
+ *
+ */
+void ipc_imem_sys_devlink_close(struct iosm_devlink *ipc_devlink);
+
+/**
+ * ipc_imem_sys_devlink_notify_rx - Receive downlink characters from CP,
+ * the downlink skbuf is added at the end of the
+ * downlink or rx list
+ * @ipc_devlink: Pointer to ipc_devlink data-struct
+ * @skb: Pointer to sk buffer
+ */
+void ipc_imem_sys_devlink_notify_rx(struct iosm_devlink *ipc_devlink,
+ struct sk_buff *skb);
+
+/**
+ * ipc_imem_sys_devlink_read - Copy the rx data and free the skbuf
+ * @ipc_devlink: Devlink instance
+ * @data: Buffer to read the data from modem
+ * @bytes_to_read: Size of destination buffer
+ * @bytes_read: Number of bytes read
+ *
+ * Return: 0 on success and failure value on error
+ */
+int ipc_imem_sys_devlink_read(struct iosm_devlink *ipc_devlink, u8 *data,
+ u32 bytes_to_read, u32 *bytes_read);
+
+/**
+ * ipc_imem_sys_devlink_write - Route the uplink buffer to CP
+ * @ipc_devlink: Devlink_sio instance
+ * @buf: Pointer to buffer
+ * @count: Number of data bytes to write
+ * Return: 0 on success and failure value on error
+ */
+int ipc_imem_sys_devlink_write(struct iosm_devlink *ipc_devlink,
+ unsigned char *buf, int count);
+
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_irq.c b/drivers/net/wwan/iosm/iosm_ipc_irq.c
new file mode 100644
index 0000000000..702f50a481
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_irq.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#include "iosm_ipc_pcie.h"
+#include "iosm_ipc_protocol.h"
+
+static void ipc_write_dbell_reg(struct iosm_pcie *ipc_pcie, int irq_n, u32 data)
+{
+ void __iomem *write_reg;
+
+ /* Select the first doorbell register, which is only currently needed
+ * by CP.
+ */
+ write_reg = (void __iomem *)((u8 __iomem *)ipc_pcie->ipc_regs +
+ ipc_pcie->doorbell_write +
+ (irq_n * ipc_pcie->doorbell_reg_offset));
+
+ /* Fire the doorbell irq by writing data on the doorbell write pointer
+ * register.
+ */
+ iowrite32(data, write_reg);
+}
+
+void ipc_doorbell_fire(struct iosm_pcie *ipc_pcie, int irq_n, u32 data)
+{
+ ipc_write_dbell_reg(ipc_pcie, irq_n, data);
+}
+
+/* Threaded Interrupt handler for MSI interrupts */
+static irqreturn_t ipc_msi_interrupt(int irq, void *dev_id)
+{
+ struct iosm_pcie *ipc_pcie = dev_id;
+ int instance = irq - ipc_pcie->pci->irq;
+
+ /* Shift the MSI irq actions to the IPC tasklet. IRQ_NONE means the
+ * irq was not from the IPC device or could not be served.
+ */
+ if (instance >= ipc_pcie->nvec)
+ return IRQ_NONE;
+
+ if (!test_bit(0, &ipc_pcie->suspend))
+ ipc_imem_irq_process(ipc_pcie->imem, instance);
+
+ return IRQ_HANDLED;
+}
+
+void ipc_release_irq(struct iosm_pcie *ipc_pcie)
+{
+ struct pci_dev *pdev = ipc_pcie->pci;
+
+ if (pdev->msi_enabled) {
+ while (--ipc_pcie->nvec >= 0)
+ free_irq(pdev->irq + ipc_pcie->nvec, ipc_pcie);
+ }
+ pci_free_irq_vectors(pdev);
+}
+
+int ipc_acquire_irq(struct iosm_pcie *ipc_pcie)
+{
+ struct pci_dev *pdev = ipc_pcie->pci;
+ int i, rc = -EINVAL;
+
+ ipc_pcie->nvec = pci_alloc_irq_vectors(pdev, IPC_MSI_VECTORS,
+ IPC_MSI_VECTORS, PCI_IRQ_MSI);
+
+ if (ipc_pcie->nvec < 0) {
+ rc = ipc_pcie->nvec;
+ goto error;
+ }
+
+ if (!pdev->msi_enabled)
+ goto error;
+
+ for (i = 0; i < ipc_pcie->nvec; ++i) {
+ rc = request_threaded_irq(pdev->irq + i, NULL,
+ ipc_msi_interrupt, IRQF_ONESHOT,
+ KBUILD_MODNAME, ipc_pcie);
+ if (rc) {
+ dev_err(ipc_pcie->dev, "unable to grab IRQ, rc=%d", rc);
+ ipc_pcie->nvec = i;
+ ipc_release_irq(ipc_pcie);
+ goto error;
+ }
+ }
+
+error:
+ return rc;
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_irq.h b/drivers/net/wwan/iosm/iosm_ipc_irq.h
new file mode 100644
index 0000000000..a8ed596cb6
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_irq.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#ifndef IOSM_IPC_IRQ_H
+#define IOSM_IPC_IRQ_H
+
+struct iosm_pcie;
+
+/**
+ * ipc_doorbell_fire - fire doorbell to CP
+ * @ipc_pcie: Pointer to iosm_pcie
+ * @irq_n: Doorbell type
+ * @data: ipc state
+ */
+void ipc_doorbell_fire(struct iosm_pcie *ipc_pcie, int irq_n, u32 data);
+
+/**
+ * ipc_release_irq - Release the IRQ handler.
+ * @ipc_pcie: Pointer to iosm_pcie struct
+ */
+void ipc_release_irq(struct iosm_pcie *ipc_pcie);
+
+/**
+ * ipc_acquire_irq - acquire IRQ & register IRQ handler.
+ * @ipc_pcie: Pointer to iosm_pcie struct
+ *
+ * Return: 0 on success and failure value on error
+ */
+int ipc_acquire_irq(struct iosm_pcie *ipc_pcie);
+
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mmio.c b/drivers/net/wwan/iosm/iosm_ipc_mmio.c
new file mode 100644
index 0000000000..63eb08c43c
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_mmio.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/slab.h>
+
+#include "iosm_ipc_mmio.h"
+#include "iosm_ipc_mux.h"
+
+/* Definition of MMIO offsets
+ * note that MMIO_CI offsets are relative to end of chip info structure
+ */
+
+/* MMIO chip info size in bytes */
+#define MMIO_CHIP_INFO_SIZE 60
+
+/* CP execution stage */
+#define MMIO_OFFSET_EXECUTION_STAGE 0x00
+
+/* Boot ROM Chip Info struct */
+#define MMIO_OFFSET_CHIP_INFO 0x04
+
+#define MMIO_OFFSET_ROM_EXIT_CODE 0x40
+
+#define MMIO_OFFSET_PSI_ADDRESS 0x54
+
+#define MMIO_OFFSET_PSI_SIZE 0x5C
+
+#define MMIO_OFFSET_IPC_STATUS 0x60
+
+#define MMIO_OFFSET_CONTEXT_INFO 0x64
+
+#define MMIO_OFFSET_BASE_ADDR 0x6C
+
+#define MMIO_OFFSET_END_ADDR 0x74
+
+#define MMIO_OFFSET_CP_VERSION 0xF0
+
+#define MMIO_OFFSET_CP_CAPABILITIES 0xF4
+
+/* Timeout in 50 msec to wait for the modem boot code to write a valid
+ * execution stage into mmio area
+ */
+#define IPC_MMIO_EXEC_STAGE_TIMEOUT 50
+
+/* check if exec stage has one of the valid values */
+static bool ipc_mmio_is_valid_exec_stage(enum ipc_mem_exec_stage stage)
+{
+ switch (stage) {
+ case IPC_MEM_EXEC_STAGE_BOOT:
+ case IPC_MEM_EXEC_STAGE_PSI:
+ case IPC_MEM_EXEC_STAGE_EBL:
+ case IPC_MEM_EXEC_STAGE_RUN:
+ case IPC_MEM_EXEC_STAGE_CRASH:
+ case IPC_MEM_EXEC_STAGE_CD_READY:
+ return true;
+ default:
+ return false;
+ }
+}
+
+void ipc_mmio_update_cp_capability(struct iosm_mmio *ipc_mmio)
+{
+ u32 cp_cap;
+ unsigned int ver;
+
+ ver = ipc_mmio_get_cp_version(ipc_mmio);
+ cp_cap = ioread32(ipc_mmio->base + ipc_mmio->offset.cp_capability);
+
+ ipc_mmio->mux_protocol = ((ver >= IOSM_CP_VERSION) && (cp_cap &
+ (UL_AGGR | DL_AGGR))) ? MUX_AGGREGATION
+ : MUX_LITE;
+
+ ipc_mmio->has_ul_flow_credit =
+ (ver >= IOSM_CP_VERSION) && (cp_cap & UL_FLOW_CREDIT);
+}
+
+struct iosm_mmio *ipc_mmio_init(void __iomem *mmio, struct device *dev)
+{
+ struct iosm_mmio *ipc_mmio = kzalloc(sizeof(*ipc_mmio), GFP_KERNEL);
+ int retries = IPC_MMIO_EXEC_STAGE_TIMEOUT;
+ enum ipc_mem_exec_stage stage;
+
+ if (!ipc_mmio)
+ return NULL;
+
+ ipc_mmio->dev = dev;
+
+ ipc_mmio->base = mmio;
+
+ ipc_mmio->offset.exec_stage = MMIO_OFFSET_EXECUTION_STAGE;
+
+ /* Check for a valid execution stage to make sure that the boot code
+ * has correctly initialized the MMIO area.
+ */
+ do {
+ stage = ipc_mmio_get_exec_stage(ipc_mmio);
+ if (ipc_mmio_is_valid_exec_stage(stage))
+ break;
+
+ msleep(20);
+ } while (retries-- > 0);
+
+ if (!retries) {
+ dev_err(ipc_mmio->dev, "invalid exec stage %X", stage);
+ goto init_fail;
+ }
+
+ ipc_mmio->offset.chip_info = MMIO_OFFSET_CHIP_INFO;
+
+ /* read chip info size and version from chip info structure */
+ ipc_mmio->chip_info_version =
+ ioread8(ipc_mmio->base + ipc_mmio->offset.chip_info);
+
+ /* Increment of 2 is needed as the size value in the chip info
+ * excludes the version and size field, which are always present
+ */
+ ipc_mmio->chip_info_size =
+ ioread8(ipc_mmio->base + ipc_mmio->offset.chip_info + 1) + 2;
+
+ if (ipc_mmio->chip_info_size != MMIO_CHIP_INFO_SIZE) {
+ dev_err(ipc_mmio->dev, "Unexpected Chip Info");
+ goto init_fail;
+ }
+
+ ipc_mmio->offset.rom_exit_code = MMIO_OFFSET_ROM_EXIT_CODE;
+
+ ipc_mmio->offset.psi_address = MMIO_OFFSET_PSI_ADDRESS;
+ ipc_mmio->offset.psi_size = MMIO_OFFSET_PSI_SIZE;
+ ipc_mmio->offset.ipc_status = MMIO_OFFSET_IPC_STATUS;
+ ipc_mmio->offset.context_info = MMIO_OFFSET_CONTEXT_INFO;
+ ipc_mmio->offset.ap_win_base = MMIO_OFFSET_BASE_ADDR;
+ ipc_mmio->offset.ap_win_end = MMIO_OFFSET_END_ADDR;
+
+ ipc_mmio->offset.cp_version = MMIO_OFFSET_CP_VERSION;
+ ipc_mmio->offset.cp_capability = MMIO_OFFSET_CP_CAPABILITIES;
+
+ return ipc_mmio;
+
+init_fail:
+ kfree(ipc_mmio);
+ return NULL;
+}
+
+enum ipc_mem_exec_stage ipc_mmio_get_exec_stage(struct iosm_mmio *ipc_mmio)
+{
+ if (!ipc_mmio)
+ return IPC_MEM_EXEC_STAGE_INVALID;
+
+ return (enum ipc_mem_exec_stage)ioread32(ipc_mmio->base +
+ ipc_mmio->offset.exec_stage);
+}
+
+void ipc_mmio_copy_chip_info(struct iosm_mmio *ipc_mmio, void *dest,
+ size_t size)
+{
+ if (ipc_mmio && dest)
+ memcpy_fromio(dest, ipc_mmio->base + ipc_mmio->offset.chip_info,
+ size);
+}
+
+enum ipc_mem_device_ipc_state ipc_mmio_get_ipc_state(struct iosm_mmio *ipc_mmio)
+{
+ if (!ipc_mmio)
+ return IPC_MEM_DEVICE_IPC_INVALID;
+
+ return (enum ipc_mem_device_ipc_state)ioread32(ipc_mmio->base +
+ ipc_mmio->offset.ipc_status);
+}
+
+enum rom_exit_code ipc_mmio_get_rom_exit_code(struct iosm_mmio *ipc_mmio)
+{
+ if (!ipc_mmio)
+ return IMEM_ROM_EXIT_FAIL;
+
+ return (enum rom_exit_code)ioread32(ipc_mmio->base +
+ ipc_mmio->offset.rom_exit_code);
+}
+
+void ipc_mmio_config(struct iosm_mmio *ipc_mmio)
+{
+ if (!ipc_mmio)
+ return;
+
+ /* AP memory window (full window is open and active so that modem checks
+ * each AP address) 0 means don't check on modem side.
+ */
+ iowrite64(0, ipc_mmio->base + ipc_mmio->offset.ap_win_base);
+ iowrite64(0, ipc_mmio->base + ipc_mmio->offset.ap_win_end);
+
+ iowrite64(ipc_mmio->context_info_addr,
+ ipc_mmio->base + ipc_mmio->offset.context_info);
+}
+
+void ipc_mmio_set_psi_addr_and_size(struct iosm_mmio *ipc_mmio, dma_addr_t addr,
+ u32 size)
+{
+ if (!ipc_mmio)
+ return;
+
+ iowrite64(addr, ipc_mmio->base + ipc_mmio->offset.psi_address);
+ iowrite32(size, ipc_mmio->base + ipc_mmio->offset.psi_size);
+}
+
+void ipc_mmio_set_contex_info_addr(struct iosm_mmio *ipc_mmio, phys_addr_t addr)
+{
+ if (!ipc_mmio)
+ return;
+
+ /* store context_info address. This will be stored in the mmio area
+ * during IPC_MEM_DEVICE_IPC_INIT state via ipc_mmio_config()
+ */
+ ipc_mmio->context_info_addr = addr;
+}
+
+int ipc_mmio_get_cp_version(struct iosm_mmio *ipc_mmio)
+{
+ if (ipc_mmio)
+ return ioread32(ipc_mmio->base + ipc_mmio->offset.cp_version);
+
+ return -EFAULT;
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mmio.h b/drivers/net/wwan/iosm/iosm_ipc_mmio.h
new file mode 100644
index 0000000000..193d7ba247
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_mmio.h
@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#ifndef IOSM_IPC_MMIO_H
+#define IOSM_IPC_MMIO_H
+
+/* Minimal IOSM CP VERSION which has valid CP_CAPABILITIES field */
+#define IOSM_CP_VERSION 0x0100UL
+
+/* DL dir Aggregation support mask */
+#define DL_AGGR BIT(9)
+
+/* UL dir Aggregation support mask */
+#define UL_AGGR BIT(8)
+
+/* UL flow credit support mask */
+#define UL_FLOW_CREDIT BIT(21)
+
+/* Possible states of the IPC finite state machine. */
+enum ipc_mem_device_ipc_state {
+ IPC_MEM_DEVICE_IPC_UNINIT,
+ IPC_MEM_DEVICE_IPC_INIT,
+ IPC_MEM_DEVICE_IPC_RUNNING,
+ IPC_MEM_DEVICE_IPC_RECOVERY,
+ IPC_MEM_DEVICE_IPC_ERROR,
+ IPC_MEM_DEVICE_IPC_DONT_CARE,
+ IPC_MEM_DEVICE_IPC_INVALID = -1
+};
+
+/* Boot ROM exit status. */
+enum rom_exit_code {
+ IMEM_ROM_EXIT_OPEN_EXT = 0x01,
+ IMEM_ROM_EXIT_OPEN_MEM = 0x02,
+ IMEM_ROM_EXIT_CERT_EXT = 0x10,
+ IMEM_ROM_EXIT_CERT_MEM = 0x20,
+ IMEM_ROM_EXIT_FAIL = 0xFF
+};
+
+/* Boot stages */
+enum ipc_mem_exec_stage {
+ IPC_MEM_EXEC_STAGE_RUN = 0x600DF00D,
+ IPC_MEM_EXEC_STAGE_CRASH = 0x8BADF00D,
+ IPC_MEM_EXEC_STAGE_CD_READY = 0xBADC0DED,
+ IPC_MEM_EXEC_STAGE_BOOT = 0xFEEDB007,
+ IPC_MEM_EXEC_STAGE_PSI = 0xFEEDBEEF,
+ IPC_MEM_EXEC_STAGE_EBL = 0xFEEDCAFE,
+ IPC_MEM_EXEC_STAGE_INVALID = 0xFFFFFFFF
+};
+
+/* mmio scratchpad info */
+struct mmio_offset {
+ int exec_stage;
+ int chip_info;
+ int rom_exit_code;
+ int psi_address;
+ int psi_size;
+ int ipc_status;
+ int context_info;
+ int ap_win_base;
+ int ap_win_end;
+ int cp_version;
+ int cp_capability;
+};
+
+/**
+ * struct iosm_mmio - MMIO region mapped to the doorbell scratchpad.
+ * @base: Base address of MMIO region
+ * @dev: Pointer to device structure
+ * @offset: Start offset
+ * @context_info_addr: Physical base address of context info structure
+ * @chip_info_version: Version of chip info structure
+ * @chip_info_size: Size of chip info structure
+ * @mux_protocol: mux protocol
+ * @has_ul_flow_credit: Ul flow credit support
+ * @has_slp_no_prot: Device sleep no protocol support
+ * @has_mcr_support: Usage of mcr support
+ */
+struct iosm_mmio {
+ unsigned char __iomem *base;
+ struct device *dev;
+ struct mmio_offset offset;
+ phys_addr_t context_info_addr;
+ unsigned int chip_info_version;
+ unsigned int chip_info_size;
+ u32 mux_protocol;
+ u8 has_ul_flow_credit:1,
+ has_slp_no_prot:1,
+ has_mcr_support:1;
+};
+
+/**
+ * ipc_mmio_init - Allocate mmio instance data
+ * @mmio_addr: Mapped AP base address of the MMIO area.
+ * @dev: Pointer to device structure
+ *
+ * Returns: address of mmio instance data or NULL if fails.
+ */
+struct iosm_mmio *ipc_mmio_init(void __iomem *mmio_addr, struct device *dev);
+
+/**
+ * ipc_mmio_set_psi_addr_and_size - Set start address and size of the
+ * primary system image (PSI) for the
+ * FW dowload.
+ * @ipc_mmio: Pointer to mmio instance
+ * @addr: PSI address
+ * @size: PSI immage size
+ */
+void ipc_mmio_set_psi_addr_and_size(struct iosm_mmio *ipc_mmio, dma_addr_t addr,
+ u32 size);
+
+/**
+ * ipc_mmio_set_contex_info_addr - Stores the Context Info Address in
+ * MMIO instance to share it with CP during
+ * mmio_init.
+ * @ipc_mmio: Pointer to mmio instance
+ * @addr: 64-bit address of AP context information.
+ */
+void ipc_mmio_set_contex_info_addr(struct iosm_mmio *ipc_mmio,
+ phys_addr_t addr);
+
+/**
+ * ipc_mmio_get_cp_version - Get the CP IPC version
+ * @ipc_mmio: Pointer to mmio instance
+ *
+ * Returns: version number on success and failure value on error.
+ */
+int ipc_mmio_get_cp_version(struct iosm_mmio *ipc_mmio);
+
+/**
+ * ipc_mmio_get_rom_exit_code - Get exit code from CP boot rom download app
+ * @ipc_mmio: Pointer to mmio instance
+ *
+ * Returns: exit code from CP boot rom download APP
+ */
+enum rom_exit_code ipc_mmio_get_rom_exit_code(struct iosm_mmio *ipc_mmio);
+
+/**
+ * ipc_mmio_get_exec_stage - Query CP execution stage
+ * @ipc_mmio: Pointer to mmio instance
+ *
+ * Returns: CP execution stage
+ */
+enum ipc_mem_exec_stage ipc_mmio_get_exec_stage(struct iosm_mmio *ipc_mmio);
+
+/**
+ * ipc_mmio_get_ipc_state - Query CP IPC state
+ * @ipc_mmio: Pointer to mmio instance
+ *
+ * Returns: CP IPC state
+ */
+enum ipc_mem_device_ipc_state
+ipc_mmio_get_ipc_state(struct iosm_mmio *ipc_mmio);
+
+/**
+ * ipc_mmio_copy_chip_info - Copy size bytes of CP chip info structure
+ * into caller provided buffer
+ * @ipc_mmio: Pointer to mmio instance
+ * @dest: Pointer to caller provided buff
+ * @size: Number of bytes to copy
+ */
+void ipc_mmio_copy_chip_info(struct iosm_mmio *ipc_mmio, void *dest,
+ size_t size);
+
+/**
+ * ipc_mmio_config - Write context info and AP memory range addresses.
+ * This needs to be called when CP is in
+ * IPC_MEM_DEVICE_IPC_INIT state
+ *
+ * @ipc_mmio: Pointer to mmio instance
+ */
+void ipc_mmio_config(struct iosm_mmio *ipc_mmio);
+
+/**
+ * ipc_mmio_update_cp_capability - Read and update modem capability, from mmio
+ * capability offset
+ *
+ * @ipc_mmio: Pointer to mmio instance
+ */
+void ipc_mmio_update_cp_capability(struct iosm_mmio *ipc_mmio);
+
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux.c b/drivers/net/wwan/iosm/iosm_ipc_mux.c
new file mode 100644
index 0000000000..fc928b298a
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_mux.c
@@ -0,0 +1,483 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#include "iosm_ipc_mux_codec.h"
+
+/* At the begin of the runtime phase the IP MUX channel shall created. */
+static int ipc_mux_channel_create(struct iosm_mux *ipc_mux)
+{
+ int channel_id;
+
+ channel_id = ipc_imem_channel_alloc(ipc_mux->imem, ipc_mux->instance_id,
+ IPC_CTYPE_WWAN);
+
+ if (channel_id < 0) {
+ dev_err(ipc_mux->dev,
+ "allocation of the MUX channel id failed");
+ ipc_mux->state = MUX_S_ERROR;
+ ipc_mux->event = MUX_E_NOT_APPLICABLE;
+ goto no_channel;
+ }
+
+ /* Establish the MUX channel in blocking mode. */
+ ipc_mux->channel = ipc_imem_channel_open(ipc_mux->imem, channel_id,
+ IPC_HP_NET_CHANNEL_INIT);
+
+ if (!ipc_mux->channel) {
+ dev_err(ipc_mux->dev, "ipc_imem_channel_open failed");
+ ipc_mux->state = MUX_S_ERROR;
+ ipc_mux->event = MUX_E_NOT_APPLICABLE;
+ return -ENODEV; /* MUX channel is not available. */
+ }
+
+ /* Define the MUX active state properties. */
+ ipc_mux->state = MUX_S_ACTIVE;
+ ipc_mux->event = MUX_E_NO_ORDERS;
+
+no_channel:
+ return channel_id;
+}
+
+/* Reset the session/if id state. */
+static void ipc_mux_session_free(struct iosm_mux *ipc_mux, int if_id)
+{
+ struct mux_session *if_entry;
+
+ if_entry = &ipc_mux->session[if_id];
+ /* Reset the session state. */
+ if_entry->wwan = NULL;
+}
+
+/* Create and send the session open command. */
+static struct mux_cmd_open_session_resp *
+ipc_mux_session_open_send(struct iosm_mux *ipc_mux, int if_id)
+{
+ struct mux_cmd_open_session_resp *open_session_resp;
+ struct mux_acb *acb = &ipc_mux->acb;
+ union mux_cmd_param param;
+
+ /* open_session commands to one ACB and start transmission. */
+ param.open_session.flow_ctrl = 0;
+ param.open_session.ipv4v6_hints = 0;
+ param.open_session.reserved2 = 0;
+ param.open_session.dl_head_pad_len = cpu_to_le32(IPC_MEM_DL_ETH_OFFSET);
+
+ /* Finish and transfer ACB. The user thread is suspended.
+ * It is a blocking function call, until CP responds or timeout.
+ */
+ acb->wanted_response = MUX_CMD_OPEN_SESSION_RESP;
+ if (ipc_mux_dl_acb_send_cmds(ipc_mux, MUX_CMD_OPEN_SESSION, if_id, 0,
+ &param, sizeof(param.open_session), true,
+ false) ||
+ acb->got_response != MUX_CMD_OPEN_SESSION_RESP) {
+ dev_err(ipc_mux->dev, "if_id %d: OPEN_SESSION send failed",
+ if_id);
+ return NULL;
+ }
+
+ open_session_resp = &ipc_mux->acb.got_param.open_session_resp;
+ if (open_session_resp->response != cpu_to_le32(MUX_CMD_RESP_SUCCESS)) {
+ dev_err(ipc_mux->dev,
+ "if_id %d,session open failed,response=%d", if_id,
+ open_session_resp->response);
+ return NULL;
+ }
+
+ return open_session_resp;
+}
+
+/* Open the first IP session. */
+static bool ipc_mux_session_open(struct iosm_mux *ipc_mux,
+ struct mux_session_open *session_open)
+{
+ struct mux_cmd_open_session_resp *open_session_resp;
+ int if_id;
+
+ /* Search for a free session interface id. */
+ if_id = le32_to_cpu(session_open->if_id);
+ if (if_id < 0 || if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
+ dev_err(ipc_mux->dev, "invalid interface id=%d", if_id);
+ return false;
+ }
+
+ /* Create and send the session open command.
+ * It is a blocking function call, until CP responds or timeout.
+ */
+ open_session_resp = ipc_mux_session_open_send(ipc_mux, if_id);
+ if (!open_session_resp) {
+ ipc_mux_session_free(ipc_mux, if_id);
+ session_open->if_id = cpu_to_le32(-1);
+ return false;
+ }
+
+ /* Initialize the uplink skb accumulator. */
+ skb_queue_head_init(&ipc_mux->session[if_id].ul_list);
+
+ ipc_mux->session[if_id].dl_head_pad_len = IPC_MEM_DL_ETH_OFFSET;
+ ipc_mux->session[if_id].ul_head_pad_len =
+ le32_to_cpu(open_session_resp->ul_head_pad_len);
+ ipc_mux->session[if_id].wwan = ipc_mux->wwan;
+
+ /* Reset the flow ctrl stats of the session */
+ ipc_mux->session[if_id].flow_ctl_en_cnt = 0;
+ ipc_mux->session[if_id].flow_ctl_dis_cnt = 0;
+ ipc_mux->session[if_id].ul_flow_credits = 0;
+ ipc_mux->session[if_id].net_tx_stop = false;
+ ipc_mux->session[if_id].flow_ctl_mask = 0;
+
+ /* Save and return the assigned if id. */
+ session_open->if_id = cpu_to_le32(if_id);
+ ipc_mux->nr_sessions++;
+
+ return true;
+}
+
+/* Free pending session UL packet. */
+static void ipc_mux_session_reset(struct iosm_mux *ipc_mux, int if_id)
+{
+ /* Reset the session/if id state. */
+ ipc_mux_session_free(ipc_mux, if_id);
+
+ /* Empty the uplink skb accumulator. */
+ skb_queue_purge(&ipc_mux->session[if_id].ul_list);
+}
+
+static void ipc_mux_session_close(struct iosm_mux *ipc_mux,
+ struct mux_session_close *msg)
+{
+ int if_id;
+
+ /* Copy the session interface id. */
+ if_id = le32_to_cpu(msg->if_id);
+
+ if (if_id < 0 || if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
+ dev_err(ipc_mux->dev, "invalid session id %d", if_id);
+ return;
+ }
+
+ /* Create and send the session close command.
+ * It is a blocking function call, until CP responds or timeout.
+ */
+ if (ipc_mux_dl_acb_send_cmds(ipc_mux, MUX_CMD_CLOSE_SESSION, if_id, 0,
+ NULL, 0, true, false))
+ dev_err(ipc_mux->dev, "if_id %d: CLOSE_SESSION send failed",
+ if_id);
+
+ /* Reset the flow ctrl stats of the session */
+ ipc_mux->session[if_id].flow_ctl_en_cnt = 0;
+ ipc_mux->session[if_id].flow_ctl_dis_cnt = 0;
+ ipc_mux->session[if_id].flow_ctl_mask = 0;
+
+ ipc_mux_session_reset(ipc_mux, if_id);
+ ipc_mux->nr_sessions--;
+}
+
+static void ipc_mux_channel_close(struct iosm_mux *ipc_mux,
+ struct mux_channel_close *channel_close_p)
+{
+ int i;
+
+ /* Free pending session UL packet. */
+ for (i = 0; i < IPC_MEM_MUX_IP_SESSION_ENTRIES; i++)
+ if (ipc_mux->session[i].wwan)
+ ipc_mux_session_reset(ipc_mux, i);
+
+ ipc_imem_channel_close(ipc_mux->imem, ipc_mux->channel_id);
+
+ /* Reset the MUX object. */
+ ipc_mux->state = MUX_S_INACTIVE;
+ ipc_mux->event = MUX_E_INACTIVE;
+}
+
+/* CP has interrupted AP. If AP is in IP MUX mode, execute the pending ops. */
+static int ipc_mux_schedule(struct iosm_mux *ipc_mux, union mux_msg *msg)
+{
+ enum mux_event order;
+ bool success;
+ int ret = -EIO;
+
+ if (!ipc_mux->initialized) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ order = msg->common.event;
+
+ switch (ipc_mux->state) {
+ case MUX_S_INACTIVE:
+ if (order != MUX_E_MUX_SESSION_OPEN)
+ goto out; /* Wait for the request to open a session */
+
+ if (ipc_mux->event == MUX_E_INACTIVE)
+ /* Establish the MUX channel and the new state. */
+ ipc_mux->channel_id = ipc_mux_channel_create(ipc_mux);
+
+ if (ipc_mux->state != MUX_S_ACTIVE) {
+ ret = ipc_mux->channel_id; /* Missing the MUX channel */
+ goto out;
+ }
+
+ /* Disable the TD update timer and open the first IP session. */
+ ipc_imem_td_update_timer_suspend(ipc_mux->imem, true);
+ ipc_mux->event = MUX_E_MUX_SESSION_OPEN;
+ success = ipc_mux_session_open(ipc_mux, &msg->session_open);
+
+ ipc_imem_td_update_timer_suspend(ipc_mux->imem, false);
+ if (success)
+ ret = ipc_mux->channel_id;
+ goto out;
+
+ case MUX_S_ACTIVE:
+ switch (order) {
+ case MUX_E_MUX_SESSION_OPEN:
+ /* Disable the TD update timer and open a session */
+ ipc_imem_td_update_timer_suspend(ipc_mux->imem, true);
+ ipc_mux->event = MUX_E_MUX_SESSION_OPEN;
+ success = ipc_mux_session_open(ipc_mux,
+ &msg->session_open);
+ ipc_imem_td_update_timer_suspend(ipc_mux->imem, false);
+ if (success)
+ ret = ipc_mux->channel_id;
+ goto out;
+
+ case MUX_E_MUX_SESSION_CLOSE:
+ /* Release an IP session. */
+ ipc_mux->event = MUX_E_MUX_SESSION_CLOSE;
+ ipc_mux_session_close(ipc_mux, &msg->session_close);
+ if (!ipc_mux->nr_sessions) {
+ ipc_mux->event = MUX_E_MUX_CHANNEL_CLOSE;
+ ipc_mux_channel_close(ipc_mux,
+ &msg->channel_close);
+ }
+ ret = ipc_mux->channel_id;
+ goto out;
+
+ case MUX_E_MUX_CHANNEL_CLOSE:
+ /* Close the MUX channel pipes. */
+ ipc_mux->event = MUX_E_MUX_CHANNEL_CLOSE;
+ ipc_mux_channel_close(ipc_mux, &msg->channel_close);
+ ret = ipc_mux->channel_id;
+ goto out;
+
+ default:
+ /* Invalid order. */
+ goto out;
+ }
+
+ default:
+ dev_err(ipc_mux->dev,
+ "unexpected MUX transition: state=%d, event=%d",
+ ipc_mux->state, ipc_mux->event);
+ }
+out:
+ return ret;
+}
+
+struct iosm_mux *ipc_mux_init(struct ipc_mux_config *mux_cfg,
+ struct iosm_imem *imem)
+{
+ struct iosm_mux *ipc_mux = kzalloc(sizeof(*ipc_mux), GFP_KERNEL);
+ int i, j, ul_tds, ul_td_size;
+ struct sk_buff_head *free_list;
+ struct sk_buff *skb;
+ int qlt_size;
+
+ if (!ipc_mux)
+ return NULL;
+
+ ipc_mux->protocol = mux_cfg->protocol;
+ ipc_mux->ul_flow = mux_cfg->ul_flow;
+ ipc_mux->instance_id = mux_cfg->instance_id;
+ ipc_mux->wwan_q_offset = 0;
+
+ ipc_mux->pcie = imem->pcie;
+ ipc_mux->imem = imem;
+ ipc_mux->ipc_protocol = imem->ipc_protocol;
+ ipc_mux->dev = imem->dev;
+ ipc_mux->wwan = imem->wwan;
+
+ /* Get the reference to the UL ADB list. */
+ free_list = &ipc_mux->ul_adb.free_list;
+
+ /* Initialize the list with free ADB. */
+ skb_queue_head_init(free_list);
+
+ ul_td_size = IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE;
+
+ ul_tds = IPC_MEM_MAX_TDS_MUX_LITE_UL;
+
+ ipc_mux->ul_adb.dest_skb = NULL;
+
+ ipc_mux->initialized = true;
+ ipc_mux->adb_prep_ongoing = false;
+ ipc_mux->size_needed = 0;
+ ipc_mux->ul_data_pend_bytes = 0;
+ ipc_mux->state = MUX_S_INACTIVE;
+ ipc_mux->ev_mux_net_transmit_pending = false;
+ ipc_mux->tx_transaction_id = 0;
+ ipc_mux->rr_next_session = 0;
+ ipc_mux->event = MUX_E_INACTIVE;
+ ipc_mux->channel_id = -1;
+ ipc_mux->channel = NULL;
+
+ if (ipc_mux->protocol != MUX_LITE) {
+ qlt_size = offsetof(struct mux_qlth, ql) +
+ MUX_QUEUE_LEVEL * sizeof(struct mux_qlth_ql);
+
+ for (i = 0; i < IPC_MEM_MUX_IP_SESSION_ENTRIES; i++) {
+ ipc_mux->ul_adb.pp_qlt[i] = kzalloc(qlt_size,
+ GFP_ATOMIC);
+ if (!ipc_mux->ul_adb.pp_qlt[i]) {
+ for (j = i - 1; j >= 0; j--)
+ kfree(ipc_mux->ul_adb.pp_qlt[j]);
+ kfree(ipc_mux);
+ return NULL;
+ }
+ }
+
+ ul_td_size = IPC_MEM_MAX_UL_ADB_BUF_SIZE;
+ ul_tds = IPC_MEM_MAX_TDS_MUX_AGGR_UL;
+ }
+
+ /* Allocate the list of UL ADB. */
+ for (i = 0; i < ul_tds; i++) {
+ dma_addr_t mapping;
+
+ skb = ipc_pcie_alloc_skb(ipc_mux->pcie, ul_td_size, GFP_ATOMIC,
+ &mapping, DMA_TO_DEVICE, 0);
+ if (!skb) {
+ ipc_mux_deinit(ipc_mux);
+ return NULL;
+ }
+ /* Extend the UL ADB list. */
+ skb_queue_tail(free_list, skb);
+ }
+
+ return ipc_mux;
+}
+
+/* Informs the network stack to restart transmission for all opened session if
+ * Flow Control is not ON for that session.
+ */
+static void ipc_mux_restart_tx_for_all_sessions(struct iosm_mux *ipc_mux)
+{
+ struct mux_session *session;
+ int idx;
+
+ for (idx = 0; idx < IPC_MEM_MUX_IP_SESSION_ENTRIES; idx++) {
+ session = &ipc_mux->session[idx];
+
+ if (!session->wwan)
+ continue;
+
+ /* If flow control of the session is OFF and if there was tx
+ * stop then restart. Inform the network interface to restart
+ * sending data.
+ */
+ if (session->flow_ctl_mask == 0) {
+ session->net_tx_stop = false;
+ ipc_mux_netif_tx_flowctrl(session, idx, false);
+ }
+ }
+}
+
+/* Informs the network stack to stop sending further pkt for all opened
+ * sessions
+ */
+static void ipc_mux_stop_netif_for_all_sessions(struct iosm_mux *ipc_mux)
+{
+ struct mux_session *session;
+ int idx;
+
+ for (idx = 0; idx < IPC_MEM_MUX_IP_SESSION_ENTRIES; idx++) {
+ session = &ipc_mux->session[idx];
+
+ if (!session->wwan)
+ continue;
+
+ ipc_mux_netif_tx_flowctrl(session, session->if_id, true);
+ }
+}
+
+void ipc_mux_check_n_restart_tx(struct iosm_mux *ipc_mux)
+{
+ if (ipc_mux->ul_flow == MUX_UL) {
+ int low_thresh = IPC_MEM_MUX_UL_FLOWCTRL_LOW_B;
+
+ if (ipc_mux->ul_data_pend_bytes < low_thresh)
+ ipc_mux_restart_tx_for_all_sessions(ipc_mux);
+ }
+}
+
+int ipc_mux_get_max_sessions(struct iosm_mux *ipc_mux)
+{
+ return ipc_mux ? IPC_MEM_MUX_IP_SESSION_ENTRIES : -EFAULT;
+}
+
+enum ipc_mux_protocol ipc_mux_get_active_protocol(struct iosm_mux *ipc_mux)
+{
+ return ipc_mux ? ipc_mux->protocol : MUX_UNKNOWN;
+}
+
+int ipc_mux_open_session(struct iosm_mux *ipc_mux, int session_nr)
+{
+ struct mux_session_open *session_open;
+ union mux_msg mux_msg;
+
+ session_open = &mux_msg.session_open;
+ session_open->event = MUX_E_MUX_SESSION_OPEN;
+
+ session_open->if_id = cpu_to_le32(session_nr);
+ ipc_mux->session[session_nr].flags |= IPC_MEM_WWAN_MUX;
+ return ipc_mux_schedule(ipc_mux, &mux_msg);
+}
+
+int ipc_mux_close_session(struct iosm_mux *ipc_mux, int session_nr)
+{
+ struct mux_session_close *session_close;
+ union mux_msg mux_msg;
+ int ret_val;
+
+ session_close = &mux_msg.session_close;
+ session_close->event = MUX_E_MUX_SESSION_CLOSE;
+
+ session_close->if_id = cpu_to_le32(session_nr);
+ ret_val = ipc_mux_schedule(ipc_mux, &mux_msg);
+ ipc_mux->session[session_nr].flags &= ~IPC_MEM_WWAN_MUX;
+
+ return ret_val;
+}
+
+void ipc_mux_deinit(struct iosm_mux *ipc_mux)
+{
+ struct mux_channel_close *channel_close;
+ struct sk_buff_head *free_list;
+ union mux_msg mux_msg;
+ struct sk_buff *skb;
+
+ if (!ipc_mux->initialized)
+ return;
+ ipc_mux_stop_netif_for_all_sessions(ipc_mux);
+
+ if (ipc_mux->state == MUX_S_ACTIVE) {
+ channel_close = &mux_msg.channel_close;
+ channel_close->event = MUX_E_MUX_CHANNEL_CLOSE;
+ ipc_mux_schedule(ipc_mux, &mux_msg);
+ }
+
+ /* Empty the ADB free list. */
+ free_list = &ipc_mux->ul_adb.free_list;
+
+ /* Remove from the head of the downlink queue. */
+ while ((skb = skb_dequeue(free_list)))
+ ipc_pcie_kfree_skb(ipc_mux->pcie, skb);
+
+ if (ipc_mux->channel) {
+ ipc_mux->channel->ul_pipe.is_open = false;
+ ipc_mux->channel->dl_pipe.is_open = false;
+ }
+
+ kfree(ipc_mux);
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux.h b/drivers/net/wwan/iosm/iosm_ipc_mux.h
new file mode 100644
index 0000000000..17ca8d1f93
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_mux.h
@@ -0,0 +1,442 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#ifndef IOSM_IPC_MUX_H
+#define IOSM_IPC_MUX_H
+
+#include "iosm_ipc_protocol.h"
+
+#define IPC_MEM_MAX_UL_DG_ENTRIES 100
+#define IPC_MEM_MAX_TDS_MUX_AGGR_UL 60
+#define IPC_MEM_MAX_TDS_MUX_AGGR_DL 60
+
+#define IPC_MEM_MAX_ADB_BUF_SIZE (16 * 1024)
+#define IPC_MEM_MAX_UL_ADB_BUF_SIZE IPC_MEM_MAX_ADB_BUF_SIZE
+#define IPC_MEM_MAX_DL_ADB_BUF_SIZE IPC_MEM_MAX_ADB_BUF_SIZE
+
+/* Size of the buffer for the IP MUX Lite data buffer. */
+#define IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE (2 * 1024)
+
+/* TD counts for IP MUX Lite */
+#define IPC_MEM_MAX_TDS_MUX_LITE_UL 800
+#define IPC_MEM_MAX_TDS_MUX_LITE_DL 1200
+
+/* open session request (AP->CP) */
+#define MUX_CMD_OPEN_SESSION 1
+
+/* response to open session request (CP->AP) */
+#define MUX_CMD_OPEN_SESSION_RESP 2
+
+/* close session request (AP->CP) */
+#define MUX_CMD_CLOSE_SESSION 3
+
+/* response to close session request (CP->AP) */
+#define MUX_CMD_CLOSE_SESSION_RESP 4
+
+/* Flow control command with mask of the flow per queue/flow. */
+#define MUX_LITE_CMD_FLOW_CTL 5
+
+/* ACK the flow control command. Shall have the same Transaction ID as the
+ * matching FLOW_CTL command.
+ */
+#define MUX_LITE_CMD_FLOW_CTL_ACK 6
+
+/* Command for report packet indicating link quality metrics. */
+#define MUX_LITE_CMD_LINK_STATUS_REPORT 7
+
+/* Response to a report packet */
+#define MUX_LITE_CMD_LINK_STATUS_REPORT_RESP 8
+
+/* Used to reset a command/response state. */
+#define MUX_CMD_INVALID 255
+
+/* command response : command processed successfully */
+#define MUX_CMD_RESP_SUCCESS 0
+
+/* MUX for route link devices */
+#define IPC_MEM_WWAN_MUX BIT(0)
+
+/* Initiated actions to change the state of the MUX object. */
+enum mux_event {
+ MUX_E_INACTIVE, /* No initiated actions. */
+ MUX_E_MUX_SESSION_OPEN, /* Create the MUX channel and a session. */
+ MUX_E_MUX_SESSION_CLOSE, /* Release a session. */
+ MUX_E_MUX_CHANNEL_CLOSE, /* Release the MUX channel. */
+ MUX_E_NO_ORDERS, /* No MUX order. */
+ MUX_E_NOT_APPLICABLE, /* Defect IP MUX. */
+};
+
+/* MUX session open command. */
+struct mux_session_open {
+ enum mux_event event;
+ __le32 if_id;
+};
+
+/* MUX session close command. */
+struct mux_session_close {
+ enum mux_event event;
+ __le32 if_id;
+};
+
+/* MUX channel close command. */
+struct mux_channel_close {
+ enum mux_event event;
+};
+
+/* Default message type to find out the right message type. */
+struct mux_common {
+ enum mux_event event;
+};
+
+/* List of ops in MUX mode. */
+union mux_msg {
+ struct mux_session_open session_open;
+ struct mux_session_close session_close;
+ struct mux_channel_close channel_close;
+ struct mux_common common;
+};
+
+/* Parameter definition of the open session command. */
+struct mux_cmd_open_session {
+ u8 flow_ctrl; /* 0: Flow control disabled (flow allowed). */
+ /* 1: Flow control enabled (flow not allowed)*/
+ u8 ipv4v6_hints; /* 0: IPv4/IPv6 hints not supported.*/
+ /* 1: IPv4/IPv6 hints supported*/
+ __le16 reserved2; /* Reserved. Set to zero. */
+ __le32 dl_head_pad_len; /* Maximum length supported */
+ /* for DL head padding on a datagram. */
+};
+
+/* Parameter definition of the open session response. */
+struct mux_cmd_open_session_resp {
+ __le32 response; /* Response code */
+ u8 flow_ctrl; /* 0: Flow control disabled (flow allowed). */
+ /* 1: Flow control enabled (flow not allowed) */
+ u8 ipv4v6_hints; /* 0: IPv4/IPv6 hints not supported */
+ /* 1: IPv4/IPv6 hints supported */
+ __le16 reserved2; /* Reserved. Set to zero. */
+ __le32 ul_head_pad_len; /* Actual length supported for */
+ /* UL head padding on adatagram.*/
+};
+
+/* Parameter definition of the close session response code */
+struct mux_cmd_close_session_resp {
+ __le32 response;
+};
+
+/* Parameter definition of the flow control command. */
+struct mux_cmd_flow_ctl {
+ __le32 mask; /* indicating the desired flow control */
+ /* state for various flows/queues */
+};
+
+/* Parameter definition of the link status report code*/
+struct mux_cmd_link_status_report {
+ u8 payload;
+};
+
+/* Parameter definition of the link status report response code. */
+struct mux_cmd_link_status_report_resp {
+ __le32 response;
+};
+
+/**
+ * union mux_cmd_param - Union-definition of the command parameters.
+ * @open_session: Inband command for open session
+ * @open_session_resp: Inband command for open session response
+ * @close_session_resp: Inband command for close session response
+ * @flow_ctl: In-band flow control on the opened interfaces
+ * @link_status: In-band Link Status Report
+ * @link_status_resp: In-band command for link status report response
+ */
+union mux_cmd_param {
+ struct mux_cmd_open_session open_session;
+ struct mux_cmd_open_session_resp open_session_resp;
+ struct mux_cmd_close_session_resp close_session_resp;
+ struct mux_cmd_flow_ctl flow_ctl;
+ struct mux_cmd_link_status_report link_status;
+ struct mux_cmd_link_status_report_resp link_status_resp;
+};
+
+/* States of the MUX object.. */
+enum mux_state {
+ MUX_S_INACTIVE, /* IP MUX is unused. */
+ MUX_S_ACTIVE, /* IP MUX channel is available. */
+ MUX_S_ERROR, /* Defect IP MUX. */
+};
+
+/* Supported MUX protocols. */
+enum ipc_mux_protocol {
+ MUX_UNKNOWN,
+ MUX_LITE,
+ MUX_AGGREGATION,
+};
+
+/* Supported UL data transfer methods. */
+enum ipc_mux_ul_flow {
+ MUX_UL_UNKNOWN,
+ MUX_UL, /* Normal UL data transfer */
+ MUX_UL_ON_CREDITS, /* UL data transfer will be based on credits */
+};
+
+/* List of the MUX session. */
+struct mux_session {
+ struct iosm_wwan *wwan; /*Network i/f used for communication*/
+ int if_id; /* i/f id for session open message.*/
+ u32 flags;
+ u32 ul_head_pad_len; /* Nr of bytes for UL head padding. */
+ u32 dl_head_pad_len; /* Nr of bytes for DL head padding. */
+ struct sk_buff_head ul_list; /* skb entries for an ADT. */
+ u32 flow_ctl_mask; /* UL flow control */
+ u32 flow_ctl_en_cnt; /* Flow control Enable cmd count */
+ u32 flow_ctl_dis_cnt; /* Flow Control Disable cmd count */
+ int ul_flow_credits; /* UL flow credits */
+ u8 net_tx_stop:1,
+ flush:1; /* flush net interface ? */
+};
+
+/**
+ * struct mux_adth_dg - Structure of the datagram in the Aggregated Datagram
+ * Table Header.
+ * @datagram_index : Index (in bytes) to the k-th datagram in the table.
+ * Index shall count from the start of the block including
+ * the 16-byte header. This value shall be non-zero.
+ * @datagram_length: Length of the k-th datagram including the head padding.
+ * This value shall be non-zero.
+ * @service_class: Service class identifier for the datagram.
+ * @reserved: Reserved bytes. Set to zero
+ */
+struct mux_adth_dg {
+ __le32 datagram_index;
+ __le16 datagram_length;
+ u8 service_class;
+ u8 reserved;
+};
+
+/**
+ * struct mux_qlth_ql - Structure of the queue level in the Aggregated
+ * Datagram Queue Level Table Header.
+ * @nr_of_bytes: Number of bytes available to transmit in the queue.
+ */
+struct mux_qlth_ql {
+ __le32 nr_of_bytes;
+};
+
+/**
+ * struct mux_qlth - Structure of Aggregated Datagram Queue Level Table
+ * Header.
+ * @signature: Signature of the Queue Level Table Header
+ * Value: 0x48544C51 (ASCII characters: 'Q' 'L' 'T' 'H')
+ * @table_length: Length (in bytes) of the datagram table. This length
+ * shall include the queue level table header size.
+ * Minimum value:0x10
+ * @if_id: ID of the interface the queue levels in the table
+ * belong to.
+ * @reserved: Reserved byte. Set to zero.
+ * @next_table_index: Index (in bytes) to the next table in the buffer. Index
+ * shall count from the start of the block including the
+ * 16-byte header. Value of zero indicates end of the list.
+ * @reserved2: Reserved bytes. Set to zero
+ * @ql: Queue level table with variable length
+ */
+struct mux_qlth {
+ __le32 signature;
+ __le16 table_length;
+ u8 if_id;
+ u8 reserved;
+ __le32 next_table_index;
+ __le32 reserved2;
+ struct mux_qlth_ql ql;
+};
+
+/**
+ * struct mux_adb - Structure of State of a single UL data block.
+ * @dest_skb: Current UL skb for the data block.
+ * @buf: ADB memory
+ * @adgh: ADGH pointer
+ * @qlth_skb: QLTH pointer
+ * @next_table_index: Pointer to next table index.
+ * @free_list: List of alloc. ADB for the UL sess.
+ * @size: Size of the ADB memory.
+ * @if_cnt: Statistic counter
+ * @dg_cnt_total: Datagram count total
+ * @payload_size: Payload Size
+ * @dg: Datagram table.
+ * @pp_qlt: Pointers to hold Queue Level Tables of session
+ * @adbh: ADBH pointer
+ * @qlt_updated: Queue level table updated
+ * @dg_count: Datagram count
+ */
+struct mux_adb {
+ struct sk_buff *dest_skb;
+ u8 *buf;
+ struct mux_adgh *adgh;
+ struct sk_buff *qlth_skb;
+ u32 *next_table_index;
+ struct sk_buff_head free_list;
+ int size;
+ u32 if_cnt;
+ u32 dg_cnt_total;
+ u32 payload_size;
+ struct mux_adth_dg
+ dg[IPC_MEM_MUX_IP_SESSION_ENTRIES][IPC_MEM_MAX_UL_DG_ENTRIES];
+ struct mux_qlth *pp_qlt[IPC_MEM_MUX_IP_SESSION_ENTRIES];
+ struct mux_adbh *adbh;
+ u32 qlt_updated[IPC_MEM_MUX_IP_SESSION_ENTRIES];
+ u32 dg_count[IPC_MEM_MUX_IP_SESSION_ENTRIES];
+};
+
+/**
+ * struct mux_acb - Structure of Temporary ACB state.
+ * @skb: Used UL skb.
+ * @if_id: Session id.
+ * @buf_p: Command buffer.
+ * @wanted_response: Wanted Response
+ * @got_response: Got response
+ * @cmd: command
+ * @got_param: Received command/response parameter
+ */
+struct mux_acb {
+ struct sk_buff *skb; /* Used UL skb. */
+ int if_id; /* Session id. */
+ u8 *buf_p;
+ u32 wanted_response;
+ u32 got_response;
+ u32 cmd;
+ union mux_cmd_param got_param; /* Received command/response parameter */
+};
+
+/**
+ * struct iosm_mux - Structure of the data multiplexing over an IP channel.
+ * @dev: Pointer to device structure
+ * @session: Array of the MUX sessions.
+ * @channel: Reference to the IP MUX channel
+ * @pcie: Pointer to iosm_pcie struct
+ * @imem: Pointer to iosm_imem
+ * @wwan: Poinetr to iosm_wwan
+ * @ipc_protocol: Pointer to iosm_protocol
+ * @channel_id: Channel ID for MUX
+ * @protocol: Type of the MUX protocol
+ * @ul_flow: UL Flow type
+ * @nr_sessions: Number of sessions
+ * @instance_id: Instance ID
+ * @state: States of the MUX object
+ * @event: Initiated actions to change the state of the MUX object
+ * @tx_transaction_id: Transaction id for the ACB command.
+ * @rr_next_session: Next session number for round robin.
+ * @ul_adb: State of the UL ADB/ADGH.
+ * @size_needed: Variable to store the size needed during ADB preparation
+ * @ul_data_pend_bytes: Pending UL data to be processed in bytes
+ * @acb: Temporary ACB state
+ * @wwan_q_offset: This will hold the offset of the given instance
+ * Useful while passing or receiving packets from
+ * wwan/imem layer.
+ * @acb_tx_sequence_nr: Sequence number for the ACB header.
+ * @adb_tx_sequence_nr: Sequence number for ADB header
+ * @acc_adb_size: Statistic data for logging
+ * @acc_payload_size: Statistic data for logging
+ * @initialized: MUX object is initialized
+ * @ev_mux_net_transmit_pending:
+ * 0 means inform the IPC tasklet to pass the
+ * accumulated uplink ADB to CP.
+ * @adb_prep_ongoing: Flag for ADB preparation status
+ */
+struct iosm_mux {
+ struct device *dev;
+ struct mux_session session[IPC_MEM_MUX_IP_SESSION_ENTRIES];
+ struct ipc_mem_channel *channel;
+ struct iosm_pcie *pcie;
+ struct iosm_imem *imem;
+ struct iosm_wwan *wwan;
+ struct iosm_protocol *ipc_protocol;
+ int channel_id;
+ enum ipc_mux_protocol protocol;
+ enum ipc_mux_ul_flow ul_flow;
+ int nr_sessions;
+ int instance_id;
+ enum mux_state state;
+ enum mux_event event;
+ u32 tx_transaction_id;
+ int rr_next_session;
+ struct mux_adb ul_adb;
+ int size_needed;
+ long long ul_data_pend_bytes;
+ struct mux_acb acb;
+ int wwan_q_offset;
+ u16 acb_tx_sequence_nr;
+ u16 adb_tx_sequence_nr;
+ unsigned long long acc_adb_size;
+ unsigned long long acc_payload_size;
+ u8 initialized:1,
+ ev_mux_net_transmit_pending:1,
+ adb_prep_ongoing;
+} __packed;
+
+/* MUX configuration structure */
+struct ipc_mux_config {
+ enum ipc_mux_protocol protocol;
+ enum ipc_mux_ul_flow ul_flow;
+ int instance_id;
+};
+
+/**
+ * ipc_mux_init - Allocates and Init MUX instance
+ * @mux_cfg: Pointer to MUX configuration structure
+ * @ipc_imem: Pointer to imem data-struct
+ *
+ * Returns: Initialized mux pointer on success else NULL
+ */
+struct iosm_mux *ipc_mux_init(struct ipc_mux_config *mux_cfg,
+ struct iosm_imem *ipc_imem);
+
+/**
+ * ipc_mux_deinit - Deallocates MUX instance
+ * @ipc_mux: Pointer to the MUX instance.
+ */
+void ipc_mux_deinit(struct iosm_mux *ipc_mux);
+
+/**
+ * ipc_mux_check_n_restart_tx - Checks for pending UL date bytes and then
+ * it restarts the net interface tx queue if
+ * device has set flow control as off.
+ * @ipc_mux: Pointer to MUX data-struct
+ */
+void ipc_mux_check_n_restart_tx(struct iosm_mux *ipc_mux);
+
+/**
+ * ipc_mux_get_active_protocol - Returns the active MUX protocol type.
+ * @ipc_mux: Pointer to MUX data-struct
+ *
+ * Returns: enum of type ipc_mux_protocol
+ */
+enum ipc_mux_protocol ipc_mux_get_active_protocol(struct iosm_mux *ipc_mux);
+
+/**
+ * ipc_mux_open_session - Opens a MUX session for IP traffic.
+ * @ipc_mux: Pointer to MUX data-struct
+ * @session_nr: Interface ID or session number
+ *
+ * Returns: channel id on success, failure value on error
+ */
+int ipc_mux_open_session(struct iosm_mux *ipc_mux, int session_nr);
+
+/**
+ * ipc_mux_close_session - Closes a MUX session.
+ * @ipc_mux: Pointer to MUX data-struct
+ * @session_nr: Interface ID or session number
+ *
+ * Returns: channel id on success, failure value on error
+ */
+int ipc_mux_close_session(struct iosm_mux *ipc_mux, int session_nr);
+
+/**
+ * ipc_mux_get_max_sessions - Retuns the maximum sessions supported on the
+ * provided MUX instance..
+ * @ipc_mux: Pointer to MUX data-struct
+ *
+ * Returns: Number of sessions supported on Success and failure value on error
+ */
+int ipc_mux_get_max_sessions(struct iosm_mux *ipc_mux);
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c
new file mode 100644
index 0000000000..bff46f7ca5
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c
@@ -0,0 +1,1553 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#include <linux/nospec.h>
+
+#include "iosm_ipc_imem_ops.h"
+#include "iosm_ipc_mux_codec.h"
+#include "iosm_ipc_task_queue.h"
+
+/* Test the link power state and send a MUX command in blocking mode. */
+static int ipc_mux_tq_cmd_send(struct iosm_imem *ipc_imem, int arg, void *msg,
+ size_t size)
+{
+ struct iosm_mux *ipc_mux = ipc_imem->mux;
+ const struct mux_acb *acb = msg;
+
+ skb_queue_tail(&ipc_mux->channel->ul_list, acb->skb);
+ ipc_imem_ul_send(ipc_mux->imem);
+
+ return 0;
+}
+
+static int ipc_mux_acb_send(struct iosm_mux *ipc_mux, bool blocking)
+{
+ struct completion *completion = &ipc_mux->channel->ul_sem;
+ int ret = ipc_task_queue_send_task(ipc_mux->imem, ipc_mux_tq_cmd_send,
+ 0, &ipc_mux->acb,
+ sizeof(ipc_mux->acb), false);
+ if (ret) {
+ dev_err(ipc_mux->dev, "unable to send mux command");
+ return ret;
+ }
+
+ /* if blocking, suspend the app and wait for irq in the flash or
+ * crash phase. return false on timeout to indicate failure.
+ */
+ if (blocking) {
+ u32 wait_time_milliseconds = IPC_MUX_CMD_RUN_DEFAULT_TIMEOUT;
+
+ reinit_completion(completion);
+
+ if (wait_for_completion_interruptible_timeout
+ (completion, msecs_to_jiffies(wait_time_milliseconds)) ==
+ 0) {
+ dev_err(ipc_mux->dev, "ch[%d] timeout",
+ ipc_mux->channel_id);
+ ipc_uevent_send(ipc_mux->imem->dev, UEVENT_MDM_TIMEOUT);
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+/* Initialize the command header. */
+static void ipc_mux_acb_init(struct iosm_mux *ipc_mux)
+{
+ struct mux_acb *acb = &ipc_mux->acb;
+ struct mux_acbh *header;
+
+ header = (struct mux_acbh *)(acb->skb)->data;
+ header->block_length = cpu_to_le32(sizeof(struct mux_acbh));
+ header->first_cmd_index = header->block_length;
+ header->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ACBH);
+ header->sequence_nr = cpu_to_le16(ipc_mux->acb_tx_sequence_nr++);
+}
+
+/* Add a command to the ACB. */
+static struct mux_cmdh *ipc_mux_acb_add_cmd(struct iosm_mux *ipc_mux, u32 cmd,
+ void *param, u32 param_size)
+{
+ struct mux_acbh *header;
+ struct mux_cmdh *cmdh;
+ struct mux_acb *acb;
+
+ acb = &ipc_mux->acb;
+ header = (struct mux_acbh *)(acb->skb)->data;
+ cmdh = (struct mux_cmdh *)
+ ((acb->skb)->data + le32_to_cpu(header->block_length));
+
+ cmdh->signature = cpu_to_le32(MUX_SIG_CMDH);
+ cmdh->command_type = cpu_to_le32(cmd);
+ cmdh->if_id = acb->if_id;
+
+ acb->cmd = cmd;
+ cmdh->cmd_len = cpu_to_le16(offsetof(struct mux_cmdh, param) +
+ param_size);
+ cmdh->transaction_id = cpu_to_le32(ipc_mux->tx_transaction_id++);
+ if (param)
+ memcpy(&cmdh->param, param, param_size);
+
+ skb_put(acb->skb, le32_to_cpu(header->block_length) +
+ le16_to_cpu(cmdh->cmd_len));
+
+ return cmdh;
+}
+
+/* Prepare mux Command */
+static struct mux_lite_cmdh *ipc_mux_lite_add_cmd(struct iosm_mux *ipc_mux,
+ u32 cmd, struct mux_acb *acb,
+ void *param, u32 param_size)
+{
+ struct mux_lite_cmdh *cmdh = (struct mux_lite_cmdh *)acb->skb->data;
+
+ cmdh->signature = cpu_to_le32(MUX_SIG_CMDH);
+ cmdh->command_type = cpu_to_le32(cmd);
+ cmdh->if_id = acb->if_id;
+
+ acb->cmd = cmd;
+
+ cmdh->cmd_len = cpu_to_le16(offsetof(struct mux_lite_cmdh, param) +
+ param_size);
+ cmdh->transaction_id = cpu_to_le32(ipc_mux->tx_transaction_id++);
+
+ if (param)
+ memcpy(&cmdh->param, param, param_size);
+
+ skb_put(acb->skb, le16_to_cpu(cmdh->cmd_len));
+
+ return cmdh;
+}
+
+static int ipc_mux_acb_alloc(struct iosm_mux *ipc_mux)
+{
+ struct mux_acb *acb = &ipc_mux->acb;
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+
+ /* Allocate skb memory for the uplink buffer. */
+ skb = ipc_pcie_alloc_skb(ipc_mux->pcie, MUX_MAX_UL_ACB_BUF_SIZE,
+ GFP_ATOMIC, &mapping, DMA_TO_DEVICE, 0);
+ if (!skb)
+ return -ENOMEM;
+
+ /* Save the skb address. */
+ acb->skb = skb;
+
+ memset(skb->data, 0, MUX_MAX_UL_ACB_BUF_SIZE);
+
+ return 0;
+}
+
+int ipc_mux_dl_acb_send_cmds(struct iosm_mux *ipc_mux, u32 cmd_type, u8 if_id,
+ u32 transaction_id, union mux_cmd_param *param,
+ size_t res_size, bool blocking, bool respond)
+{
+ struct mux_acb *acb = &ipc_mux->acb;
+ union mux_type_cmdh cmdh;
+ int ret = 0;
+
+ acb->if_id = if_id;
+ ret = ipc_mux_acb_alloc(ipc_mux);
+ if (ret)
+ return ret;
+
+ if (ipc_mux->protocol == MUX_LITE) {
+ cmdh.ack_lite = ipc_mux_lite_add_cmd(ipc_mux, cmd_type, acb,
+ param, res_size);
+
+ if (respond)
+ cmdh.ack_lite->transaction_id =
+ cpu_to_le32(transaction_id);
+ } else {
+ /* Initialize the ACB header. */
+ ipc_mux_acb_init(ipc_mux);
+ cmdh.ack_aggr = ipc_mux_acb_add_cmd(ipc_mux, cmd_type, param,
+ res_size);
+
+ if (respond)
+ cmdh.ack_aggr->transaction_id =
+ cpu_to_le32(transaction_id);
+ }
+ ret = ipc_mux_acb_send(ipc_mux, blocking);
+
+ return ret;
+}
+
+void ipc_mux_netif_tx_flowctrl(struct mux_session *session, int idx, bool on)
+{
+ /* Inform the network interface to start/stop flow ctrl */
+ ipc_wwan_tx_flowctrl(session->wwan, idx, on);
+}
+
+static int ipc_mux_dl_cmdresps_decode_process(struct iosm_mux *ipc_mux,
+ union mux_cmd_param param,
+ __le32 command_type, u8 if_id,
+ __le32 transaction_id)
+{
+ struct mux_acb *acb = &ipc_mux->acb;
+
+ switch (le32_to_cpu(command_type)) {
+ case MUX_CMD_OPEN_SESSION_RESP:
+ case MUX_CMD_CLOSE_SESSION_RESP:
+ /* Resume the control application. */
+ acb->got_param = param;
+ break;
+
+ case MUX_LITE_CMD_FLOW_CTL_ACK:
+ /* This command type is not expected as response for
+ * Aggregation version of the protocol. So return non-zero.
+ */
+ if (ipc_mux->protocol != MUX_LITE)
+ return -EINVAL;
+
+ dev_dbg(ipc_mux->dev, "if_id %u FLOW_CTL_ACK %u received",
+ if_id, le32_to_cpu(transaction_id));
+ break;
+
+ case IOSM_AGGR_MUX_CMD_FLOW_CTL_ACK:
+ /* This command type is not expected as response for
+ * Lite version of the protocol. So return non-zero.
+ */
+ if (ipc_mux->protocol == MUX_LITE)
+ return -EINVAL;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ acb->wanted_response = MUX_CMD_INVALID;
+ acb->got_response = le32_to_cpu(command_type);
+ complete(&ipc_mux->channel->ul_sem);
+
+ return 0;
+}
+
+static int ipc_mux_dl_cmds_decode_process(struct iosm_mux *ipc_mux,
+ union mux_cmd_param *param,
+ __le32 command_type, u8 if_id,
+ __le16 cmd_len, int size)
+{
+ struct mux_session *session;
+ struct hrtimer *adb_timer;
+
+ dev_dbg(ipc_mux->dev, "if_id[%d]: dlcmds decode process %d",
+ if_id, le32_to_cpu(command_type));
+
+ switch (le32_to_cpu(command_type)) {
+ case MUX_LITE_CMD_FLOW_CTL:
+ case IOSM_AGGR_MUX_CMD_FLOW_CTL_DISABLE:
+
+ if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
+ dev_err(ipc_mux->dev, "if_id [%d] not valid",
+ if_id);
+ return -EINVAL; /* No session interface id. */
+ }
+
+ session = &ipc_mux->session[if_id];
+ adb_timer = &ipc_mux->imem->adb_timer;
+
+ if (param->flow_ctl.mask == cpu_to_le32(0xFFFFFFFF)) {
+ /* Backward Compatibility */
+ if (cmd_len == cpu_to_le16(size))
+ session->flow_ctl_mask =
+ le32_to_cpu(param->flow_ctl.mask);
+ else
+ session->flow_ctl_mask = ~0;
+ /* if CP asks for FLOW CTRL Enable
+ * then set our internal flow control Tx flag
+ * to limit uplink session queueing
+ */
+ session->net_tx_stop = true;
+
+ /* We have to call Finish ADB here.
+ * Otherwise any already queued data
+ * will be sent to CP when ADB is full
+ * for some other sessions.
+ */
+ if (ipc_mux->protocol == MUX_AGGREGATION) {
+ ipc_mux_ul_adb_finish(ipc_mux);
+ ipc_imem_hrtimer_stop(adb_timer);
+ }
+ /* Update the stats */
+ session->flow_ctl_en_cnt++;
+ } else if (param->flow_ctl.mask == 0) {
+ /* Just reset the Flow control mask and let
+ * mux_flow_ctrl_low_thre_b take control on
+ * our internal Tx flag and enabling kernel
+ * flow control
+ */
+ dev_dbg(ipc_mux->dev, "if_id[%u] flow_ctl mask 0x%08X",
+ if_id, le32_to_cpu(param->flow_ctl.mask));
+ /* Backward Compatibility */
+ if (cmd_len == cpu_to_le16(size))
+ session->flow_ctl_mask =
+ le32_to_cpu(param->flow_ctl.mask);
+ else
+ session->flow_ctl_mask = 0;
+ /* Update the stats */
+ session->flow_ctl_dis_cnt++;
+ } else {
+ break;
+ }
+
+ ipc_mux->acc_adb_size = 0;
+ ipc_mux->acc_payload_size = 0;
+
+ dev_dbg(ipc_mux->dev, "if_id[%u] FLOW CTRL 0x%08X", if_id,
+ le32_to_cpu(param->flow_ctl.mask));
+ break;
+
+ case MUX_LITE_CMD_LINK_STATUS_REPORT:
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* Decode and Send appropriate response to a command block. */
+static void ipc_mux_dl_cmd_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
+{
+ struct mux_lite_cmdh *cmdh = (struct mux_lite_cmdh *)skb->data;
+ __le32 trans_id = cmdh->transaction_id;
+ int size;
+
+ if (ipc_mux_dl_cmdresps_decode_process(ipc_mux, cmdh->param,
+ cmdh->command_type, cmdh->if_id,
+ cmdh->transaction_id)) {
+ /* Unable to decode command response indicates the cmd_type
+ * may be a command instead of response. So try to decoding it.
+ */
+ size = offsetof(struct mux_lite_cmdh, param) +
+ sizeof(cmdh->param.flow_ctl);
+ if (!ipc_mux_dl_cmds_decode_process(ipc_mux, &cmdh->param,
+ cmdh->command_type,
+ cmdh->if_id,
+ cmdh->cmd_len, size)) {
+ /* Decoded command may need a response. Give the
+ * response according to the command type.
+ */
+ union mux_cmd_param *mux_cmd = NULL;
+ size_t size = 0;
+ u32 cmd = MUX_LITE_CMD_LINK_STATUS_REPORT_RESP;
+
+ if (cmdh->command_type ==
+ cpu_to_le32(MUX_LITE_CMD_LINK_STATUS_REPORT)) {
+ mux_cmd = &cmdh->param;
+ mux_cmd->link_status_resp.response =
+ cpu_to_le32(MUX_CMD_RESP_SUCCESS);
+ /* response field is u32 */
+ size = sizeof(u32);
+ } else if (cmdh->command_type ==
+ cpu_to_le32(MUX_LITE_CMD_FLOW_CTL)) {
+ cmd = MUX_LITE_CMD_FLOW_CTL_ACK;
+ } else {
+ return;
+ }
+
+ if (ipc_mux_dl_acb_send_cmds(ipc_mux, cmd, cmdh->if_id,
+ le32_to_cpu(trans_id),
+ mux_cmd, size, false,
+ true))
+ dev_err(ipc_mux->dev,
+ "if_id %d: cmd send failed",
+ cmdh->if_id);
+ }
+ }
+}
+
+/* Pass the DL packet to the netif layer. */
+static int ipc_mux_net_receive(struct iosm_mux *ipc_mux, int if_id,
+ struct iosm_wwan *wwan, u32 offset,
+ u8 service_class, struct sk_buff *skb,
+ u32 pkt_len)
+{
+ struct sk_buff *dest_skb = skb_clone(skb, GFP_ATOMIC);
+
+ if (!dest_skb)
+ return -ENOMEM;
+
+ skb_pull(dest_skb, offset);
+ skb_trim(dest_skb, pkt_len);
+ /* Pass the packet to the netif layer. */
+ dest_skb->priority = service_class;
+
+ return ipc_wwan_receive(wwan, dest_skb, false, if_id);
+}
+
+/* Decode Flow Credit Table in the block */
+static void ipc_mux_dl_fcth_decode(struct iosm_mux *ipc_mux,
+ unsigned char *block)
+{
+ struct ipc_mem_lite_gen_tbl *fct = (struct ipc_mem_lite_gen_tbl *)block;
+ struct iosm_wwan *wwan;
+ int ul_credits;
+ int if_id;
+
+ if (fct->vfl_length != sizeof(fct->vfl.nr_of_bytes)) {
+ dev_err(ipc_mux->dev, "unexpected FCT length: %d",
+ fct->vfl_length);
+ return;
+ }
+
+ if_id = fct->if_id;
+ if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
+ dev_err(ipc_mux->dev, "not supported if_id: %d", if_id);
+ return;
+ }
+
+ /* Is the session active ? */
+ if_id = array_index_nospec(if_id, IPC_MEM_MUX_IP_SESSION_ENTRIES);
+ wwan = ipc_mux->session[if_id].wwan;
+ if (!wwan) {
+ dev_err(ipc_mux->dev, "session Net ID is NULL");
+ return;
+ }
+
+ ul_credits = le32_to_cpu(fct->vfl.nr_of_bytes);
+
+ dev_dbg(ipc_mux->dev, "Flow_Credit:: if_id[%d] Old: %d Grants: %d",
+ if_id, ipc_mux->session[if_id].ul_flow_credits, ul_credits);
+
+ /* Update the Flow Credit information from ADB */
+ ipc_mux->session[if_id].ul_flow_credits += ul_credits;
+
+ /* Check whether the TX can be started */
+ if (ipc_mux->session[if_id].ul_flow_credits > 0) {
+ ipc_mux->session[if_id].net_tx_stop = false;
+ ipc_mux_netif_tx_flowctrl(&ipc_mux->session[if_id],
+ ipc_mux->session[if_id].if_id, false);
+ }
+}
+
+/* Decode non-aggregated datagram */
+static void ipc_mux_dl_adgh_decode(struct iosm_mux *ipc_mux,
+ struct sk_buff *skb)
+{
+ u32 pad_len, packet_offset, adgh_len;
+ struct iosm_wwan *wwan;
+ struct mux_adgh *adgh;
+ u8 *block = skb->data;
+ int rc = 0;
+ u8 if_id;
+
+ adgh = (struct mux_adgh *)block;
+
+ if (adgh->signature != cpu_to_le32(IOSM_AGGR_MUX_SIG_ADGH)) {
+ dev_err(ipc_mux->dev, "invalid ADGH signature received");
+ return;
+ }
+
+ if_id = adgh->if_id;
+ if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
+ dev_err(ipc_mux->dev, "invalid if_id while decoding %d", if_id);
+ return;
+ }
+
+ /* Is the session active ? */
+ if_id = array_index_nospec(if_id, IPC_MEM_MUX_IP_SESSION_ENTRIES);
+ wwan = ipc_mux->session[if_id].wwan;
+ if (!wwan) {
+ dev_err(ipc_mux->dev, "session Net ID is NULL");
+ return;
+ }
+
+ /* Store the pad len for the corresponding session
+ * Pad bytes as negotiated in the open session less the header size
+ * (see session management chapter for details).
+ * If resulting padding is zero or less, the additional head padding is
+ * omitted. For e.g., if HEAD_PAD_LEN = 16 or less, this field is
+ * omitted if HEAD_PAD_LEN = 20, then this field will have 4 bytes
+ * set to zero
+ */
+ pad_len =
+ ipc_mux->session[if_id].dl_head_pad_len - IPC_MEM_DL_ETH_OFFSET;
+ packet_offset = sizeof(*adgh) + pad_len;
+
+ if_id += ipc_mux->wwan_q_offset;
+ adgh_len = le16_to_cpu(adgh->length);
+
+ /* Pass the packet to the netif layer */
+ rc = ipc_mux_net_receive(ipc_mux, if_id, wwan, packet_offset,
+ adgh->service_class, skb,
+ adgh_len - packet_offset);
+ if (rc) {
+ dev_err(ipc_mux->dev, "mux adgh decoding error");
+ return;
+ }
+ ipc_mux->session[if_id].flush = 1;
+}
+
+static void ipc_mux_dl_acbcmd_decode(struct iosm_mux *ipc_mux,
+ struct mux_cmdh *cmdh, int size)
+{
+ u32 link_st = IOSM_AGGR_MUX_CMD_LINK_STATUS_REPORT_RESP;
+ u32 fctl_dis = IOSM_AGGR_MUX_CMD_FLOW_CTL_DISABLE;
+ u32 fctl_ena = IOSM_AGGR_MUX_CMD_FLOW_CTL_ENABLE;
+ u32 fctl_ack = IOSM_AGGR_MUX_CMD_FLOW_CTL_ACK;
+ union mux_cmd_param *cmd_p = NULL;
+ u32 cmd = link_st;
+ u32 trans_id;
+
+ if (!ipc_mux_dl_cmds_decode_process(ipc_mux, &cmdh->param,
+ cmdh->command_type, cmdh->if_id,
+ cmdh->cmd_len, size)) {
+ size = 0;
+ if (cmdh->command_type == cpu_to_le32(link_st)) {
+ cmd_p = &cmdh->param;
+ cmd_p->link_status_resp.response = MUX_CMD_RESP_SUCCESS;
+ } else if ((cmdh->command_type == cpu_to_le32(fctl_ena)) ||
+ (cmdh->command_type == cpu_to_le32(fctl_dis))) {
+ cmd = fctl_ack;
+ } else {
+ return;
+ }
+ trans_id = le32_to_cpu(cmdh->transaction_id);
+ ipc_mux_dl_acb_send_cmds(ipc_mux, cmd, cmdh->if_id,
+ trans_id, cmd_p, size, false, true);
+ }
+}
+
+/* Decode an aggregated command block. */
+static void ipc_mux_dl_acb_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
+{
+ struct mux_acbh *acbh;
+ struct mux_cmdh *cmdh;
+ u32 next_cmd_index;
+ u8 *block;
+ int size;
+
+ acbh = (struct mux_acbh *)(skb->data);
+ block = (u8 *)(skb->data);
+
+ next_cmd_index = le32_to_cpu(acbh->first_cmd_index);
+ next_cmd_index = array_index_nospec(next_cmd_index,
+ sizeof(struct mux_cmdh));
+
+ while (next_cmd_index != 0) {
+ cmdh = (struct mux_cmdh *)&block[next_cmd_index];
+ next_cmd_index = le32_to_cpu(cmdh->next_cmd_index);
+ if (ipc_mux_dl_cmdresps_decode_process(ipc_mux, cmdh->param,
+ cmdh->command_type,
+ cmdh->if_id,
+ cmdh->transaction_id)) {
+ size = offsetof(struct mux_cmdh, param) +
+ sizeof(cmdh->param.flow_ctl);
+ ipc_mux_dl_acbcmd_decode(ipc_mux, cmdh, size);
+ }
+ }
+}
+
+/* process datagram */
+static int mux_dl_process_dg(struct iosm_mux *ipc_mux, struct mux_adbh *adbh,
+ struct mux_adth_dg *dg, struct sk_buff *skb,
+ int if_id, int nr_of_dg)
+{
+ u32 dl_head_pad_len = ipc_mux->session[if_id].dl_head_pad_len;
+ u32 packet_offset, i, rc, dg_len;
+
+ for (i = 0; i < nr_of_dg; i++, dg++) {
+ if (le32_to_cpu(dg->datagram_index)
+ < sizeof(struct mux_adbh))
+ goto dg_error;
+
+ /* Is the packet inside of the ADB */
+ if (le32_to_cpu(dg->datagram_index) >=
+ le32_to_cpu(adbh->block_length)) {
+ goto dg_error;
+ } else {
+ packet_offset =
+ le32_to_cpu(dg->datagram_index) +
+ dl_head_pad_len;
+ dg_len = le16_to_cpu(dg->datagram_length);
+ /* Pass the packet to the netif layer. */
+ rc = ipc_mux_net_receive(ipc_mux, if_id, ipc_mux->wwan,
+ packet_offset,
+ dg->service_class, skb,
+ dg_len - dl_head_pad_len);
+ if (rc)
+ goto dg_error;
+ }
+ }
+ return 0;
+dg_error:
+ return -1;
+}
+
+/* Decode an aggregated data block. */
+static void mux_dl_adb_decode(struct iosm_mux *ipc_mux,
+ struct sk_buff *skb)
+{
+ struct mux_adth_dg *dg;
+ struct iosm_wwan *wwan;
+ struct mux_adbh *adbh;
+ struct mux_adth *adth;
+ int nr_of_dg, if_id;
+ u32 adth_index;
+ u8 *block;
+
+ block = skb->data;
+ adbh = (struct mux_adbh *)block;
+
+ /* Process the aggregated datagram tables. */
+ adth_index = le32_to_cpu(adbh->first_table_index);
+
+ /* Has CP sent an empty ADB ? */
+ if (adth_index < 1) {
+ dev_err(ipc_mux->dev, "unexpected empty ADB");
+ goto adb_decode_err;
+ }
+
+ /* Loop through mixed session tables. */
+ while (adth_index) {
+ /* Get the reference to the table header. */
+ adth = (struct mux_adth *)(block + adth_index);
+
+ /* Get the interface id and map it to the netif id. */
+ if_id = adth->if_id;
+ if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES)
+ goto adb_decode_err;
+
+ if_id = array_index_nospec(if_id,
+ IPC_MEM_MUX_IP_SESSION_ENTRIES);
+
+ /* Is the session active ? */
+ wwan = ipc_mux->session[if_id].wwan;
+ if (!wwan)
+ goto adb_decode_err;
+
+ /* Consistency checks for aggregated datagram table. */
+ if (adth->signature != cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH))
+ goto adb_decode_err;
+
+ if (le16_to_cpu(adth->table_length) < sizeof(struct mux_adth))
+ goto adb_decode_err;
+
+ /* Calculate the number of datagrams. */
+ nr_of_dg = (le16_to_cpu(adth->table_length) -
+ sizeof(struct mux_adth)) /
+ sizeof(struct mux_adth_dg);
+
+ /* Is the datagram table empty ? */
+ if (nr_of_dg < 1) {
+ dev_err(ipc_mux->dev,
+ "adthidx=%u,nr_of_dg=%d,next_tblidx=%u",
+ adth_index, nr_of_dg,
+ le32_to_cpu(adth->next_table_index));
+
+ /* Move to the next aggregated datagram table. */
+ adth_index = le32_to_cpu(adth->next_table_index);
+ continue;
+ }
+
+ /* New aggregated datagram table. */
+ dg = adth->dg;
+ if (mux_dl_process_dg(ipc_mux, adbh, dg, skb, if_id,
+ nr_of_dg) < 0)
+ goto adb_decode_err;
+
+ /* mark session for final flush */
+ ipc_mux->session[if_id].flush = 1;
+
+ /* Move to the next aggregated datagram table. */
+ adth_index = le32_to_cpu(adth->next_table_index);
+ }
+
+adb_decode_err:
+ return;
+}
+
+/**
+ * ipc_mux_dl_decode - Route the DL packet through the IP MUX layer
+ * depending on Header.
+ * @ipc_mux: Pointer to MUX data-struct
+ * @skb: Pointer to ipc_skb.
+ */
+void ipc_mux_dl_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
+{
+ u32 signature;
+
+ if (!skb->data)
+ return;
+
+ /* Decode the MUX header type. */
+ signature = le32_to_cpup((__le32 *)skb->data);
+
+ switch (signature) {
+ case IOSM_AGGR_MUX_SIG_ADBH: /* Aggregated Data Block Header */
+ mux_dl_adb_decode(ipc_mux, skb);
+ break;
+ case IOSM_AGGR_MUX_SIG_ADGH:
+ ipc_mux_dl_adgh_decode(ipc_mux, skb);
+ break;
+ case MUX_SIG_FCTH:
+ ipc_mux_dl_fcth_decode(ipc_mux, skb->data);
+ break;
+ case IOSM_AGGR_MUX_SIG_ACBH: /* Aggregated Command Block Header */
+ ipc_mux_dl_acb_decode(ipc_mux, skb);
+ break;
+ case MUX_SIG_CMDH:
+ ipc_mux_dl_cmd_decode(ipc_mux, skb);
+ break;
+
+ default:
+ dev_err(ipc_mux->dev, "invalid ABH signature");
+ }
+
+ ipc_pcie_kfree_skb(ipc_mux->pcie, skb);
+}
+
+static int ipc_mux_ul_skb_alloc(struct iosm_mux *ipc_mux,
+ struct mux_adb *ul_adb, u32 type)
+{
+ /* Take the first element of the free list. */
+ struct sk_buff *skb = skb_dequeue(&ul_adb->free_list);
+ u32 no_if = IPC_MEM_MUX_IP_SESSION_ENTRIES;
+ u32 *next_tb_id;
+ int qlt_size;
+ u32 if_id;
+
+ if (!skb)
+ return -EBUSY; /* Wait for a free ADB skb. */
+
+ /* Mark it as UL ADB to select the right free operation. */
+ IPC_CB(skb)->op_type = (u8)UL_MUX_OP_ADB;
+
+ switch (type) {
+ case IOSM_AGGR_MUX_SIG_ADBH:
+ /* Save the ADB memory settings. */
+ ul_adb->dest_skb = skb;
+ ul_adb->buf = skb->data;
+ ul_adb->size = IPC_MEM_MAX_ADB_BUF_SIZE;
+
+ /* reset statistic counter */
+ ul_adb->if_cnt = 0;
+ ul_adb->payload_size = 0;
+ ul_adb->dg_cnt_total = 0;
+
+ /* Initialize the ADBH. */
+ ul_adb->adbh = (struct mux_adbh *)ul_adb->buf;
+ memset(ul_adb->adbh, 0, sizeof(struct mux_adbh));
+ ul_adb->adbh->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ADBH);
+ ul_adb->adbh->block_length =
+ cpu_to_le32(sizeof(struct mux_adbh));
+ next_tb_id = (unsigned int *)&ul_adb->adbh->first_table_index;
+ ul_adb->next_table_index = next_tb_id;
+
+ /* Clear the local copy of DGs for new ADB */
+ memset(ul_adb->dg, 0, sizeof(ul_adb->dg));
+
+ /* Clear the DG count and QLT updated status for new ADB */
+ for (if_id = 0; if_id < no_if; if_id++) {
+ ul_adb->dg_count[if_id] = 0;
+ ul_adb->qlt_updated[if_id] = 0;
+ }
+ break;
+
+ case IOSM_AGGR_MUX_SIG_ADGH:
+ /* Save the ADB memory settings. */
+ ul_adb->dest_skb = skb;
+ ul_adb->buf = skb->data;
+ ul_adb->size = IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE;
+ /* reset statistic counter */
+ ul_adb->if_cnt = 0;
+ ul_adb->payload_size = 0;
+ ul_adb->dg_cnt_total = 0;
+
+ ul_adb->adgh = (struct mux_adgh *)skb->data;
+ memset(ul_adb->adgh, 0, sizeof(struct mux_adgh));
+ break;
+
+ case MUX_SIG_QLTH:
+ qlt_size = offsetof(struct ipc_mem_lite_gen_tbl, vfl) +
+ (MUX_QUEUE_LEVEL * sizeof(struct mux_lite_vfl));
+
+ if (qlt_size > IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE) {
+ dev_err(ipc_mux->dev,
+ "can't support. QLT size:%d SKB size: %d",
+ qlt_size, IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE);
+ return -ERANGE;
+ }
+
+ ul_adb->qlth_skb = skb;
+ memset((ul_adb->qlth_skb)->data, 0, qlt_size);
+ skb_put(skb, qlt_size);
+ break;
+ }
+
+ return 0;
+}
+
+static void ipc_mux_ul_adgh_finish(struct iosm_mux *ipc_mux)
+{
+ struct mux_adb *ul_adb = &ipc_mux->ul_adb;
+ u16 adgh_len;
+ long long bytes;
+ char *str;
+
+ if (!ul_adb->dest_skb) {
+ dev_err(ipc_mux->dev, "no dest skb");
+ return;
+ }
+
+ adgh_len = le16_to_cpu(ul_adb->adgh->length);
+ skb_put(ul_adb->dest_skb, adgh_len);
+ skb_queue_tail(&ipc_mux->channel->ul_list, ul_adb->dest_skb);
+ ul_adb->dest_skb = NULL;
+
+ if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS) {
+ struct mux_session *session;
+
+ session = &ipc_mux->session[ul_adb->adgh->if_id];
+ str = "available_credits";
+ bytes = (long long)session->ul_flow_credits;
+
+ } else {
+ str = "pend_bytes";
+ bytes = ipc_mux->ul_data_pend_bytes;
+ ipc_mux->ul_data_pend_bytes = ipc_mux->ul_data_pend_bytes +
+ adgh_len;
+ }
+
+ dev_dbg(ipc_mux->dev, "UL ADGH: size=%u, if_id=%d, payload=%d, %s=%lld",
+ adgh_len, ul_adb->adgh->if_id, ul_adb->payload_size,
+ str, bytes);
+}
+
+static void ipc_mux_ul_encode_adth(struct iosm_mux *ipc_mux,
+ struct mux_adb *ul_adb, int *out_offset)
+{
+ int i, qlt_size, offset = *out_offset;
+ struct mux_qlth *p_adb_qlt;
+ struct mux_adth_dg *dg;
+ struct mux_adth *adth;
+ u16 adth_dg_size;
+ u32 *next_tb_id;
+
+ qlt_size = offsetof(struct mux_qlth, ql) +
+ MUX_QUEUE_LEVEL * sizeof(struct mux_qlth_ql);
+
+ for (i = 0; i < ipc_mux->nr_sessions; i++) {
+ if (ul_adb->dg_count[i] > 0) {
+ adth_dg_size = offsetof(struct mux_adth, dg) +
+ ul_adb->dg_count[i] * sizeof(*dg);
+
+ *ul_adb->next_table_index = offset;
+ adth = (struct mux_adth *)&ul_adb->buf[offset];
+ next_tb_id = (unsigned int *)&adth->next_table_index;
+ ul_adb->next_table_index = next_tb_id;
+ offset += adth_dg_size;
+ adth->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH);
+ adth->if_id = i;
+ adth->table_length = cpu_to_le16(adth_dg_size);
+ adth_dg_size -= offsetof(struct mux_adth, dg);
+ memcpy(adth->dg, ul_adb->dg[i], adth_dg_size);
+ ul_adb->if_cnt++;
+ }
+
+ if (ul_adb->qlt_updated[i]) {
+ *ul_adb->next_table_index = offset;
+ p_adb_qlt = (struct mux_qlth *)&ul_adb->buf[offset];
+ ul_adb->next_table_index =
+ (u32 *)&p_adb_qlt->next_table_index;
+ memcpy(p_adb_qlt, ul_adb->pp_qlt[i], qlt_size);
+ offset += qlt_size;
+ }
+ }
+ *out_offset = offset;
+}
+
+/**
+ * ipc_mux_ul_adb_finish - Add the TD of the aggregated session packets to TDR.
+ * @ipc_mux: Pointer to MUX data-struct.
+ */
+void ipc_mux_ul_adb_finish(struct iosm_mux *ipc_mux)
+{
+ bool ul_data_pend = false;
+ struct mux_adb *ul_adb;
+ unsigned long flags;
+ int offset;
+
+ ul_adb = &ipc_mux->ul_adb;
+ if (!ul_adb->dest_skb)
+ return;
+
+ offset = *ul_adb->next_table_index;
+ ipc_mux_ul_encode_adth(ipc_mux, ul_adb, &offset);
+ ul_adb->adbh->block_length = cpu_to_le32(offset);
+
+ if (le32_to_cpu(ul_adb->adbh->block_length) > ul_adb->size) {
+ ul_adb->dest_skb = NULL;
+ return;
+ }
+
+ *ul_adb->next_table_index = 0;
+ ul_adb->adbh->sequence_nr = cpu_to_le16(ipc_mux->adb_tx_sequence_nr++);
+ skb_put(ul_adb->dest_skb, le32_to_cpu(ul_adb->adbh->block_length));
+
+ spin_lock_irqsave(&(&ipc_mux->channel->ul_list)->lock, flags);
+ __skb_queue_tail(&ipc_mux->channel->ul_list, ul_adb->dest_skb);
+ spin_unlock_irqrestore(&(&ipc_mux->channel->ul_list)->lock, flags);
+
+ ul_adb->dest_skb = NULL;
+ /* Updates the TDs with ul_list */
+ ul_data_pend = ipc_imem_ul_write_td(ipc_mux->imem);
+
+ /* Delay the doorbell irq */
+ if (ul_data_pend)
+ ipc_imem_td_update_timer_start(ipc_mux->imem);
+
+ ipc_mux->acc_adb_size += le32_to_cpu(ul_adb->adbh->block_length);
+ ipc_mux->acc_payload_size += ul_adb->payload_size;
+ ipc_mux->ul_data_pend_bytes += ul_adb->payload_size;
+}
+
+/* Allocates an ADB from the free list and initializes it with ADBH */
+static bool ipc_mux_ul_adb_allocate(struct iosm_mux *ipc_mux,
+ struct mux_adb *adb, int *size_needed,
+ u32 type)
+{
+ bool ret_val = false;
+ int status;
+
+ if (!adb->dest_skb) {
+ /* Allocate memory for the ADB including of the
+ * datagram table header.
+ */
+ status = ipc_mux_ul_skb_alloc(ipc_mux, adb, type);
+ if (status)
+ /* Is a pending ADB available ? */
+ ret_val = true; /* None. */
+
+ /* Update size need to zero only for new ADB memory */
+ *size_needed = 0;
+ }
+
+ return ret_val;
+}
+
+/* Informs the network stack to stop sending further packets for all opened
+ * sessions
+ */
+static void ipc_mux_stop_tx_for_all_sessions(struct iosm_mux *ipc_mux)
+{
+ struct mux_session *session;
+ int idx;
+
+ for (idx = 0; idx < IPC_MEM_MUX_IP_SESSION_ENTRIES; idx++) {
+ session = &ipc_mux->session[idx];
+
+ if (!session->wwan)
+ continue;
+
+ session->net_tx_stop = true;
+ }
+}
+
+/* Sends Queue Level Table of all opened sessions */
+static bool ipc_mux_lite_send_qlt(struct iosm_mux *ipc_mux)
+{
+ struct ipc_mem_lite_gen_tbl *qlt;
+ struct mux_session *session;
+ bool qlt_updated = false;
+ int i;
+ int qlt_size;
+
+ if (!ipc_mux->initialized || ipc_mux->state != MUX_S_ACTIVE)
+ return qlt_updated;
+
+ qlt_size = offsetof(struct ipc_mem_lite_gen_tbl, vfl) +
+ MUX_QUEUE_LEVEL * sizeof(struct mux_lite_vfl);
+
+ for (i = 0; i < IPC_MEM_MUX_IP_SESSION_ENTRIES; i++) {
+ session = &ipc_mux->session[i];
+
+ if (!session->wwan || session->flow_ctl_mask)
+ continue;
+
+ if (ipc_mux_ul_skb_alloc(ipc_mux, &ipc_mux->ul_adb,
+ MUX_SIG_QLTH)) {
+ dev_err(ipc_mux->dev,
+ "no reserved mem to send QLT of if_id: %d", i);
+ break;
+ }
+
+ /* Prepare QLT */
+ qlt = (struct ipc_mem_lite_gen_tbl *)(ipc_mux->ul_adb.qlth_skb)
+ ->data;
+ qlt->signature = cpu_to_le32(MUX_SIG_QLTH);
+ qlt->length = cpu_to_le16(qlt_size);
+ qlt->if_id = i;
+ qlt->vfl_length = MUX_QUEUE_LEVEL * sizeof(struct mux_lite_vfl);
+ qlt->reserved[0] = 0;
+ qlt->reserved[1] = 0;
+
+ qlt->vfl.nr_of_bytes = cpu_to_le32(session->ul_list.qlen);
+
+ /* Add QLT to the transfer list. */
+ skb_queue_tail(&ipc_mux->channel->ul_list,
+ ipc_mux->ul_adb.qlth_skb);
+
+ qlt_updated = true;
+ ipc_mux->ul_adb.qlth_skb = NULL;
+ }
+
+ if (qlt_updated)
+ /* Updates the TDs with ul_list */
+ (void)ipc_imem_ul_write_td(ipc_mux->imem);
+
+ return qlt_updated;
+}
+
+/* Checks the available credits for the specified session and returns
+ * number of packets for which credits are available.
+ */
+static int ipc_mux_ul_bytes_credits_check(struct iosm_mux *ipc_mux,
+ struct mux_session *session,
+ struct sk_buff_head *ul_list,
+ int max_nr_of_pkts)
+{
+ int pkts_to_send = 0;
+ struct sk_buff *skb;
+ int credits = 0;
+
+ if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS) {
+ credits = session->ul_flow_credits;
+ if (credits <= 0) {
+ dev_dbg(ipc_mux->dev,
+ "FC::if_id[%d] Insuff.Credits/Qlen:%d/%u",
+ session->if_id, session->ul_flow_credits,
+ session->ul_list.qlen); /* nr_of_bytes */
+ return 0;
+ }
+ } else {
+ credits = IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B -
+ ipc_mux->ul_data_pend_bytes;
+ if (credits <= 0) {
+ ipc_mux_stop_tx_for_all_sessions(ipc_mux);
+
+ dev_dbg(ipc_mux->dev,
+ "if_id[%d] encod. fail Bytes: %llu, thresh: %d",
+ session->if_id, ipc_mux->ul_data_pend_bytes,
+ IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B);
+ return 0;
+ }
+ }
+
+ /* Check if there are enough credits/bytes available to send the
+ * requested max_nr_of_pkts. Otherwise restrict the nr_of_pkts
+ * depending on available credits.
+ */
+ skb_queue_walk(ul_list, skb)
+ {
+ if (!(credits >= skb->len && pkts_to_send < max_nr_of_pkts))
+ break;
+ credits -= skb->len;
+ pkts_to_send++;
+ }
+
+ return pkts_to_send;
+}
+
+/* Encode the UL IP packet according to Lite spec. */
+static int ipc_mux_ul_adgh_encode(struct iosm_mux *ipc_mux, int session_id,
+ struct mux_session *session,
+ struct sk_buff_head *ul_list,
+ struct mux_adb *adb, int nr_of_pkts)
+{
+ int offset = sizeof(struct mux_adgh);
+ int adb_updated = -EINVAL;
+ struct sk_buff *src_skb;
+ int aligned_size = 0;
+ int nr_of_skb = 0;
+ u32 pad_len = 0;
+
+ /* Re-calculate the number of packets depending on number of bytes to be
+ * processed/available credits.
+ */
+ nr_of_pkts = ipc_mux_ul_bytes_credits_check(ipc_mux, session, ul_list,
+ nr_of_pkts);
+
+ /* If calculated nr_of_pkts from available credits is <= 0
+ * then nothing to do.
+ */
+ if (nr_of_pkts <= 0)
+ return 0;
+
+ /* Read configured UL head_pad_length for session.*/
+ if (session->ul_head_pad_len > IPC_MEM_DL_ETH_OFFSET)
+ pad_len = session->ul_head_pad_len - IPC_MEM_DL_ETH_OFFSET;
+
+ /* Process all pending UL packets for this session
+ * depending on the allocated datagram table size.
+ */
+ while (nr_of_pkts > 0) {
+ /* get destination skb allocated */
+ if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed,
+ IOSM_AGGR_MUX_SIG_ADGH)) {
+ dev_err(ipc_mux->dev, "no reserved memory for ADGH");
+ return -ENOMEM;
+ }
+
+ /* Peek at the head of the list. */
+ src_skb = skb_peek(ul_list);
+ if (!src_skb) {
+ dev_err(ipc_mux->dev,
+ "skb peek return NULL with count : %d",
+ nr_of_pkts);
+ break;
+ }
+
+ /* Calculate the memory value. */
+ aligned_size = ALIGN((pad_len + src_skb->len), 4);
+
+ ipc_mux->size_needed = sizeof(struct mux_adgh) + aligned_size;
+
+ if (ipc_mux->size_needed > adb->size) {
+ dev_dbg(ipc_mux->dev, "size needed %d, adgh size %d",
+ ipc_mux->size_needed, adb->size);
+ /* Return 1 if any IP packet is added to the transfer
+ * list.
+ */
+ return nr_of_skb ? 1 : 0;
+ }
+
+ /* Add buffer (without head padding to next pending transfer) */
+ memcpy(adb->buf + offset + pad_len, src_skb->data,
+ src_skb->len);
+
+ adb->adgh->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ADGH);
+ adb->adgh->if_id = session_id;
+ adb->adgh->length =
+ cpu_to_le16(sizeof(struct mux_adgh) + pad_len +
+ src_skb->len);
+ adb->adgh->service_class = src_skb->priority;
+ adb->adgh->next_count = --nr_of_pkts;
+ adb->dg_cnt_total++;
+ adb->payload_size += src_skb->len;
+
+ if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS)
+ /* Decrement the credit value as we are processing the
+ * datagram from the UL list.
+ */
+ session->ul_flow_credits -= src_skb->len;
+
+ /* Remove the processed elements and free it. */
+ src_skb = skb_dequeue(ul_list);
+ dev_kfree_skb(src_skb);
+ nr_of_skb++;
+
+ ipc_mux_ul_adgh_finish(ipc_mux);
+ }
+
+ if (nr_of_skb) {
+ /* Send QLT info to modem if pending bytes > high watermark
+ * in case of mux lite
+ */
+ if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS ||
+ ipc_mux->ul_data_pend_bytes >=
+ IPC_MEM_MUX_UL_FLOWCTRL_LOW_B)
+ adb_updated = ipc_mux_lite_send_qlt(ipc_mux);
+ else
+ adb_updated = 1;
+
+ /* Updates the TDs with ul_list */
+ (void)ipc_imem_ul_write_td(ipc_mux->imem);
+ }
+
+ return adb_updated;
+}
+
+/**
+ * ipc_mux_ul_adb_update_ql - Adds Queue Level Table and Queue Level to ADB
+ * @ipc_mux: pointer to MUX instance data
+ * @p_adb: pointer to UL aggegated data block
+ * @session_id: session id
+ * @qlth_n_ql_size: Length (in bytes) of the datagram table
+ * @ul_list: pointer to skb buffer head
+ */
+void ipc_mux_ul_adb_update_ql(struct iosm_mux *ipc_mux, struct mux_adb *p_adb,
+ int session_id, int qlth_n_ql_size,
+ struct sk_buff_head *ul_list)
+{
+ int qlevel = ul_list->qlen;
+ struct mux_qlth *p_qlt;
+
+ p_qlt = (struct mux_qlth *)p_adb->pp_qlt[session_id];
+
+ /* Initialize QLTH if not been done */
+ if (p_adb->qlt_updated[session_id] == 0) {
+ p_qlt->signature = cpu_to_le32(MUX_SIG_QLTH);
+ p_qlt->if_id = session_id;
+ p_qlt->table_length = cpu_to_le16(qlth_n_ql_size);
+ p_qlt->reserved = 0;
+ p_qlt->reserved2 = 0;
+ }
+
+ /* Update Queue Level information always */
+ p_qlt->ql.nr_of_bytes = cpu_to_le32(qlevel);
+ p_adb->qlt_updated[session_id] = 1;
+}
+
+/* Update the next table index. */
+static int mux_ul_dg_update_tbl_index(struct iosm_mux *ipc_mux,
+ int session_id,
+ struct sk_buff_head *ul_list,
+ struct mux_adth_dg *dg,
+ int aligned_size,
+ u32 qlth_n_ql_size,
+ struct mux_adb *adb,
+ struct sk_buff *src_skb)
+{
+ ipc_mux_ul_adb_update_ql(ipc_mux, adb, session_id,
+ qlth_n_ql_size, ul_list);
+ ipc_mux_ul_adb_finish(ipc_mux);
+ if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed,
+ IOSM_AGGR_MUX_SIG_ADBH))
+ return -ENOMEM;
+
+ ipc_mux->size_needed = le32_to_cpu(adb->adbh->block_length);
+
+ ipc_mux->size_needed += offsetof(struct mux_adth, dg);
+ ipc_mux->size_needed += qlth_n_ql_size;
+ ipc_mux->size_needed += sizeof(*dg) + aligned_size;
+ return 0;
+}
+
+/* Process encode session UL data. */
+static int mux_ul_dg_encode(struct iosm_mux *ipc_mux, struct mux_adb *adb,
+ struct mux_adth_dg *dg,
+ struct sk_buff_head *ul_list,
+ struct sk_buff *src_skb, int session_id,
+ int pkt_to_send, u32 qlth_n_ql_size,
+ int *out_offset, int head_pad_len)
+{
+ int aligned_size;
+ int offset = *out_offset;
+ unsigned long flags;
+ int nr_of_skb = 0;
+
+ while (pkt_to_send > 0) {
+ /* Peek at the head of the list. */
+ src_skb = skb_peek(ul_list);
+ if (!src_skb) {
+ dev_err(ipc_mux->dev,
+ "skb peek return NULL with count : %d",
+ pkt_to_send);
+ return -1;
+ }
+ aligned_size = ALIGN((head_pad_len + src_skb->len), 4);
+ ipc_mux->size_needed += sizeof(*dg) + aligned_size;
+
+ if (ipc_mux->size_needed > adb->size ||
+ ((ipc_mux->size_needed + ipc_mux->ul_data_pend_bytes) >=
+ IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B)) {
+ *adb->next_table_index = offset;
+ if (mux_ul_dg_update_tbl_index(ipc_mux, session_id,
+ ul_list, dg,
+ aligned_size,
+ qlth_n_ql_size, adb,
+ src_skb) < 0)
+ return -ENOMEM;
+ nr_of_skb = 0;
+ offset = le32_to_cpu(adb->adbh->block_length);
+ /* Load pointer to next available datagram entry */
+ dg = adb->dg[session_id] + adb->dg_count[session_id];
+ }
+ /* Add buffer without head padding to next pending transfer. */
+ memcpy(adb->buf + offset + head_pad_len,
+ src_skb->data, src_skb->len);
+ /* Setup datagram entry. */
+ dg->datagram_index = cpu_to_le32(offset);
+ dg->datagram_length = cpu_to_le16(src_skb->len + head_pad_len);
+ dg->service_class = (((struct sk_buff *)src_skb)->priority);
+ dg->reserved = 0;
+ adb->dg_cnt_total++;
+ adb->payload_size += le16_to_cpu(dg->datagram_length);
+ dg++;
+ adb->dg_count[session_id]++;
+ offset += aligned_size;
+ /* Remove the processed elements and free it. */
+ spin_lock_irqsave(&ul_list->lock, flags);
+ src_skb = __skb_dequeue(ul_list);
+ spin_unlock_irqrestore(&ul_list->lock, flags);
+
+ dev_kfree_skb(src_skb);
+ nr_of_skb++;
+ pkt_to_send--;
+ }
+ *out_offset = offset;
+ return nr_of_skb;
+}
+
+/* Process encode session UL data to ADB. */
+static int mux_ul_adb_encode(struct iosm_mux *ipc_mux, int session_id,
+ struct mux_session *session,
+ struct sk_buff_head *ul_list, struct mux_adb *adb,
+ int pkt_to_send)
+{
+ int adb_updated = -EINVAL;
+ int head_pad_len, offset;
+ struct sk_buff *src_skb = NULL;
+ struct mux_adth_dg *dg;
+ u32 qlth_n_ql_size;
+
+ /* If any of the opened session has set Flow Control ON then limit the
+ * UL data to mux_flow_ctrl_high_thresh_b bytes
+ */
+ if (ipc_mux->ul_data_pend_bytes >=
+ IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B) {
+ ipc_mux_stop_tx_for_all_sessions(ipc_mux);
+ return adb_updated;
+ }
+
+ qlth_n_ql_size = offsetof(struct mux_qlth, ql) +
+ MUX_QUEUE_LEVEL * sizeof(struct mux_qlth_ql);
+ head_pad_len = session->ul_head_pad_len;
+
+ if (session->ul_head_pad_len > IPC_MEM_DL_ETH_OFFSET)
+ head_pad_len = session->ul_head_pad_len - IPC_MEM_DL_ETH_OFFSET;
+
+ if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed,
+ IOSM_AGGR_MUX_SIG_ADBH))
+ return -ENOMEM;
+
+ offset = le32_to_cpu(adb->adbh->block_length);
+
+ if (ipc_mux->size_needed == 0)
+ ipc_mux->size_needed = offset;
+
+ /* Calculate the size needed for ADTH, QLTH and QL*/
+ if (adb->dg_count[session_id] == 0) {
+ ipc_mux->size_needed += offsetof(struct mux_adth, dg);
+ ipc_mux->size_needed += qlth_n_ql_size;
+ }
+
+ dg = adb->dg[session_id] + adb->dg_count[session_id];
+
+ if (mux_ul_dg_encode(ipc_mux, adb, dg, ul_list, src_skb,
+ session_id, pkt_to_send, qlth_n_ql_size, &offset,
+ head_pad_len) > 0) {
+ adb_updated = 1;
+ *adb->next_table_index = offset;
+ ipc_mux_ul_adb_update_ql(ipc_mux, adb, session_id,
+ qlth_n_ql_size, ul_list);
+ adb->adbh->block_length = cpu_to_le32(offset);
+ }
+
+ return adb_updated;
+}
+
+bool ipc_mux_ul_data_encode(struct iosm_mux *ipc_mux)
+{
+ struct sk_buff_head *ul_list;
+ struct mux_session *session;
+ int updated = 0;
+ int session_id;
+ int dg_n;
+ int i;
+
+ if (!ipc_mux || ipc_mux->state != MUX_S_ACTIVE ||
+ ipc_mux->adb_prep_ongoing)
+ return false;
+
+ ipc_mux->adb_prep_ongoing = true;
+
+ for (i = 0; i < IPC_MEM_MUX_IP_SESSION_ENTRIES; i++) {
+ session_id = ipc_mux->rr_next_session;
+ session = &ipc_mux->session[session_id];
+
+ /* Go to next handle rr_next_session overflow */
+ ipc_mux->rr_next_session++;
+ if (ipc_mux->rr_next_session >= IPC_MEM_MUX_IP_SESSION_ENTRIES)
+ ipc_mux->rr_next_session = 0;
+
+ if (!session->wwan || session->flow_ctl_mask ||
+ session->net_tx_stop)
+ continue;
+
+ ul_list = &session->ul_list;
+
+ /* Is something pending in UL and flow ctrl off */
+ dg_n = skb_queue_len(ul_list);
+ if (dg_n > MUX_MAX_UL_DG_ENTRIES)
+ dg_n = MUX_MAX_UL_DG_ENTRIES;
+
+ if (dg_n == 0)
+ /* Nothing to do for ipc_mux session
+ * -> try next session id.
+ */
+ continue;
+ if (ipc_mux->protocol == MUX_LITE)
+ updated = ipc_mux_ul_adgh_encode(ipc_mux, session_id,
+ session, ul_list,
+ &ipc_mux->ul_adb,
+ dg_n);
+ else
+ updated = mux_ul_adb_encode(ipc_mux, session_id,
+ session, ul_list,
+ &ipc_mux->ul_adb,
+ dg_n);
+ }
+
+ ipc_mux->adb_prep_ongoing = false;
+ return updated == 1;
+}
+
+/* Calculates the Payload from any given ADB. */
+static int ipc_mux_get_payload_from_adb(struct iosm_mux *ipc_mux,
+ struct mux_adbh *p_adbh)
+{
+ struct mux_adth_dg *dg;
+ struct mux_adth *adth;
+ u32 payload_size = 0;
+ u32 next_table_idx;
+ int nr_of_dg, i;
+
+ /* Process the aggregated datagram tables. */
+ next_table_idx = le32_to_cpu(p_adbh->first_table_index);
+
+ if (next_table_idx < sizeof(struct mux_adbh)) {
+ dev_err(ipc_mux->dev, "unexpected empty ADB");
+ return payload_size;
+ }
+
+ while (next_table_idx != 0) {
+ /* Get the reference to the table header. */
+ adth = (struct mux_adth *)((u8 *)p_adbh + next_table_idx);
+
+ if (adth->signature == cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH)) {
+ nr_of_dg = (le16_to_cpu(adth->table_length) -
+ sizeof(struct mux_adth)) /
+ sizeof(struct mux_adth_dg);
+
+ if (nr_of_dg <= 0)
+ return payload_size;
+
+ dg = adth->dg;
+
+ for (i = 0; i < nr_of_dg; i++, dg++) {
+ if (le32_to_cpu(dg->datagram_index) <
+ sizeof(struct mux_adbh)) {
+ return payload_size;
+ }
+ payload_size +=
+ le16_to_cpu(dg->datagram_length);
+ }
+ }
+ next_table_idx = le32_to_cpu(adth->next_table_index);
+ }
+
+ return payload_size;
+}
+
+void ipc_mux_ul_encoded_process(struct iosm_mux *ipc_mux, struct sk_buff *skb)
+{
+ union mux_type_header hr;
+ u16 adgh_len;
+ int payload;
+
+ if (ipc_mux->protocol == MUX_LITE) {
+ hr.adgh = (struct mux_adgh *)skb->data;
+ adgh_len = le16_to_cpu(hr.adgh->length);
+ if (hr.adgh->signature == cpu_to_le32(IOSM_AGGR_MUX_SIG_ADGH) &&
+ ipc_mux->ul_flow == MUX_UL)
+ ipc_mux->ul_data_pend_bytes =
+ ipc_mux->ul_data_pend_bytes - adgh_len;
+ } else {
+ hr.adbh = (struct mux_adbh *)(skb->data);
+ payload = ipc_mux_get_payload_from_adb(ipc_mux, hr.adbh);
+ ipc_mux->ul_data_pend_bytes -= payload;
+ }
+
+ if (ipc_mux->ul_flow == MUX_UL)
+ dev_dbg(ipc_mux->dev, "ul_data_pend_bytes: %lld",
+ ipc_mux->ul_data_pend_bytes);
+
+ /* Reset the skb settings. */
+ skb_trim(skb, 0);
+
+ /* Add the consumed ADB to the free list. */
+ skb_queue_tail((&ipc_mux->ul_adb.free_list), skb);
+}
+
+/* Start the NETIF uplink send transfer in MUX mode. */
+static int ipc_mux_tq_ul_trigger_encode(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size)
+{
+ struct iosm_mux *ipc_mux = ipc_imem->mux;
+ bool ul_data_pend = false;
+
+ /* Add session UL data to a ADB and ADGH */
+ ul_data_pend = ipc_mux_ul_data_encode(ipc_mux);
+ if (ul_data_pend) {
+ if (ipc_mux->protocol == MUX_AGGREGATION)
+ ipc_imem_adb_timer_start(ipc_mux->imem);
+
+ /* Delay the doorbell irq */
+ ipc_imem_td_update_timer_start(ipc_mux->imem);
+ }
+ /* reset the debounce flag */
+ ipc_mux->ev_mux_net_transmit_pending = false;
+
+ return 0;
+}
+
+int ipc_mux_ul_trigger_encode(struct iosm_mux *ipc_mux, int if_id,
+ struct sk_buff *skb)
+{
+ struct mux_session *session = &ipc_mux->session[if_id];
+ int ret = -EINVAL;
+
+ if (ipc_mux->channel &&
+ ipc_mux->channel->state != IMEM_CHANNEL_ACTIVE) {
+ dev_err(ipc_mux->dev,
+ "channel state is not IMEM_CHANNEL_ACTIVE");
+ goto out;
+ }
+
+ if (!session->wwan) {
+ dev_err(ipc_mux->dev, "session net ID is NULL");
+ ret = -EFAULT;
+ goto out;
+ }
+
+ /* Session is under flow control.
+ * Check if packet can be queued in session list, if not
+ * suspend net tx
+ */
+ if (skb_queue_len(&session->ul_list) >=
+ (session->net_tx_stop ?
+ IPC_MEM_MUX_UL_SESS_FCON_THRESHOLD :
+ (IPC_MEM_MUX_UL_SESS_FCON_THRESHOLD *
+ IPC_MEM_MUX_UL_SESS_FCOFF_THRESHOLD_FACTOR))) {
+ ipc_mux_netif_tx_flowctrl(session, session->if_id, true);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* Add skb to the uplink skb accumulator. */
+ skb_queue_tail(&session->ul_list, skb);
+
+ /* Inform the IPC kthread to pass uplink IP packets to CP. */
+ if (!ipc_mux->ev_mux_net_transmit_pending) {
+ ipc_mux->ev_mux_net_transmit_pending = true;
+ ret = ipc_task_queue_send_task(ipc_mux->imem,
+ ipc_mux_tq_ul_trigger_encode, 0,
+ NULL, 0, false);
+ if (ret)
+ goto out;
+ }
+ dev_dbg(ipc_mux->dev, "mux ul if[%d] qlen=%d/%u, len=%d/%d, prio=%d",
+ if_id, skb_queue_len(&session->ul_list), session->ul_list.qlen,
+ skb->len, skb->truesize, skb->priority);
+ ret = 0;
+out:
+ return ret;
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.h b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.h
new file mode 100644
index 0000000000..f8df88f816
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.h
@@ -0,0 +1,325 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#ifndef IOSM_IPC_MUX_CODEC_H
+#define IOSM_IPC_MUX_CODEC_H
+
+#include "iosm_ipc_mux.h"
+
+/* Queue level size and reporting
+ * >1 is enable, 0 is disable
+ */
+#define MUX_QUEUE_LEVEL 1
+
+/* ADB finish timer value */
+#define IOSM_AGGR_MUX_ADB_FINISH_TIMEOUT_NSEC (500 * 1000)
+
+/* Enables the flow control (Flow is not allowed) */
+#define IOSM_AGGR_MUX_CMD_FLOW_CTL_ENABLE 5
+
+/* Disables the flow control (Flow is allowed) */
+#define IOSM_AGGR_MUX_CMD_FLOW_CTL_DISABLE 6
+
+/* ACK the flow control command. Shall have the same Transaction ID as the
+ * matching FLOW_CTL command
+ */
+#define IOSM_AGGR_MUX_CMD_FLOW_CTL_ACK 7
+
+/* Aggregation Protocol Command for report packet indicating link quality
+ */
+#define IOSM_AGGR_MUX_CMD_LINK_STATUS_REPORT 8
+
+/* Response to a report packet */
+#define IOSM_AGGR_MUX_CMD_LINK_STATUS_REPORT_RESP 9
+
+/* ACBH: Signature of the Aggregated Command Block Header. */
+#define IOSM_AGGR_MUX_SIG_ACBH 0x48424341
+
+/* ADTH: Signature of the Aggregated Datagram Table Header. */
+#define IOSM_AGGR_MUX_SIG_ADTH 0x48544441
+
+/* ADBH: Signature of the Aggregated Data Block Header. */
+#define IOSM_AGGR_MUX_SIG_ADBH 0x48424441
+
+/* ADGH: Signature of the Datagram Header. */
+#define IOSM_AGGR_MUX_SIG_ADGH 0x48474441
+
+/* Size of the buffer for the IP MUX commands. */
+#define MUX_MAX_UL_ACB_BUF_SIZE 256
+
+/* Maximum number of packets in a go per session */
+#define MUX_MAX_UL_DG_ENTRIES 100
+
+/* ADGH: Signature of the Datagram Header. */
+#define MUX_SIG_ADGH 0x48474441
+
+/* CMDH: Signature of the Command Header. */
+#define MUX_SIG_CMDH 0x48444D43
+
+/* QLTH: Signature of the Queue Level Table */
+#define MUX_SIG_QLTH 0x48544C51
+
+/* FCTH: Signature of the Flow Credit Table */
+#define MUX_SIG_FCTH 0x48544346
+
+/* MUX UL session threshold factor */
+#define IPC_MEM_MUX_UL_SESS_FCOFF_THRESHOLD_FACTOR (4)
+
+/* Size of the buffer for the IP MUX Lite data buffer. */
+#define IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE (2 * 1024)
+
+/* MUX UL session threshold in number of packets */
+#define IPC_MEM_MUX_UL_SESS_FCON_THRESHOLD (64)
+
+/* Default time out for sending IPC session commands like
+ * open session, close session etc
+ * unit : milliseconds
+ */
+#define IPC_MUX_CMD_RUN_DEFAULT_TIMEOUT 1000 /* 1 second */
+
+/* MUX UL flow control lower threshold in bytes */
+#define IPC_MEM_MUX_UL_FLOWCTRL_LOW_B 10240 /* 10KB */
+
+/* MUX UL flow control higher threshold in bytes (5ms worth of data)*/
+#define IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B (110 * 1024)
+
+/**
+ * struct mux_cmdh - Structure of Command Header.
+ * @signature: Signature of the Command Header.
+ * @cmd_len: Length (in bytes) of the Aggregated Command Block.
+ * @if_id: ID of the interface the commands in the table belong to.
+ * @reserved: Reserved. Set to zero.
+ * @next_cmd_index: Index (in bytes) to the next command in the buffer.
+ * @command_type: Command Enum. See table Session Management chapter for
+ * details.
+ * @transaction_id: The Transaction ID shall be unique to the command
+ * @param: Optional parameters used with the command.
+ */
+struct mux_cmdh {
+ __le32 signature;
+ __le16 cmd_len;
+ u8 if_id;
+ u8 reserved;
+ __le32 next_cmd_index;
+ __le32 command_type;
+ __le32 transaction_id;
+ union mux_cmd_param param;
+};
+
+/**
+ * struct mux_acbh - Structure of the Aggregated Command Block Header.
+ * @signature: Signature of the Aggregated Command Block Header.
+ * @reserved: Reserved bytes. Set to zero.
+ * @sequence_nr: Block sequence number.
+ * @block_length: Length (in bytes) of the Aggregated Command Block.
+ * @first_cmd_index: Index (in bytes) to the first command in the buffer.
+ */
+struct mux_acbh {
+ __le32 signature;
+ __le16 reserved;
+ __le16 sequence_nr;
+ __le32 block_length;
+ __le32 first_cmd_index;
+};
+
+/**
+ * struct mux_adbh - Structure of the Aggregated Data Block Header.
+ * @signature: Signature of the Aggregated Data Block Header.
+ * @reserved: Reserved bytes. Set to zero.
+ * @sequence_nr: Block sequence number.
+ * @block_length: Length (in bytes) of the Aggregated Data Block.
+ * @first_table_index: Index (in bytes) to the first Datagram Table in
+ * the buffer.
+ */
+struct mux_adbh {
+ __le32 signature;
+ __le16 reserved;
+ __le16 sequence_nr;
+ __le32 block_length;
+ __le32 first_table_index;
+};
+
+/**
+ * struct mux_adth - Structure of the Aggregated Datagram Table Header.
+ * @signature: Signature of the Aggregated Datagram Table Header.
+ * @table_length: Length (in bytes) of the datagram table.
+ * @if_id: ID of the interface the datagrams in the table
+ * belong to.
+ * @opt_ipv4v6: Indicates IPv4(=0)/IPv6(=1) hint.
+ * @reserved: Reserved bits. Set to zero.
+ * @next_table_index: Index (in bytes) to the next Datagram Table in
+ * the buffer.
+ * @reserved2: Reserved bytes. Set to zero
+ * @dg: datagramm table with variable length
+ */
+struct mux_adth {
+ __le32 signature;
+ __le16 table_length;
+ u8 if_id;
+ u8 opt_ipv4v6;
+ __le32 next_table_index;
+ __le32 reserved2;
+ struct mux_adth_dg dg[];
+};
+
+/**
+ * struct mux_adgh - Aggregated Datagram Header.
+ * @signature: Signature of the Aggregated Datagram Header(0x48474441)
+ * @length: Length (in bytes) of the datagram header. This length
+ * shall include the header size. Min value: 0x10
+ * @if_id: ID of the interface the datagrams belong to
+ * @opt_ipv4v6: Indicates IPv4(=0)/IPv6(=1), It is optional if not
+ * used set it to zero.
+ * @reserved: Reserved bits. Set to zero.
+ * @service_class: Service class identifier for the datagram.
+ * @next_count: Count of the datagrams that shall be following this
+ * datagrams for this interface. A count of zero means
+ * the next datagram may not belong to this interface.
+ * @reserved1: Reserved bytes, Set to zero
+ */
+struct mux_adgh {
+ __le32 signature;
+ __le16 length;
+ u8 if_id;
+ u8 opt_ipv4v6;
+ u8 service_class;
+ u8 next_count;
+ u8 reserved1[6];
+};
+
+/**
+ * struct mux_lite_cmdh - MUX Lite Command Header
+ * @signature: Signature of the Command Header(0x48444D43)
+ * @cmd_len: Length (in bytes) of the command. This length shall
+ * include the header size. Minimum value: 0x10
+ * @if_id: ID of the interface the commands in the table belong to.
+ * @reserved: Reserved Set to zero.
+ * @command_type: Command Enum.
+ * @transaction_id: 4 byte value shall be generated and sent along with a
+ * command Responses and ACKs shall have the same
+ * Transaction ID as their commands. It shall be unique to
+ * the command transaction on the given interface.
+ * @param: Optional parameters used with the command.
+ */
+struct mux_lite_cmdh {
+ __le32 signature;
+ __le16 cmd_len;
+ u8 if_id;
+ u8 reserved;
+ __le32 command_type;
+ __le32 transaction_id;
+ union mux_cmd_param param;
+};
+
+/**
+ * struct mux_lite_vfl - value field in generic table
+ * @nr_of_bytes: Number of bytes available to transmit in the queue.
+ */
+struct mux_lite_vfl {
+ __le32 nr_of_bytes;
+};
+
+/**
+ * struct ipc_mem_lite_gen_tbl - Generic table format for Queue Level
+ * and Flow Credit
+ * @signature: Signature of the table
+ * @length: Length of the table
+ * @if_id: ID of the interface the table belongs to
+ * @vfl_length: Value field length
+ * @reserved: Reserved
+ * @vfl: Value field of variable length
+ */
+struct ipc_mem_lite_gen_tbl {
+ __le32 signature;
+ __le16 length;
+ u8 if_id;
+ u8 vfl_length;
+ u32 reserved[2];
+ struct mux_lite_vfl vfl;
+};
+
+/**
+ * struct mux_type_cmdh - Structure of command header for mux lite and aggr
+ * @ack_lite: MUX Lite Command Header pointer
+ * @ack_aggr: Command Header pointer
+ */
+union mux_type_cmdh {
+ struct mux_lite_cmdh *ack_lite;
+ struct mux_cmdh *ack_aggr;
+};
+
+/**
+ * struct mux_type_header - Structure of mux header type
+ * @adgh: Aggregated Datagram Header pointer
+ * @adbh: Aggregated Data Block Header pointer
+ */
+union mux_type_header {
+ struct mux_adgh *adgh;
+ struct mux_adbh *adbh;
+};
+
+void ipc_mux_dl_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb);
+
+/**
+ * ipc_mux_dl_acb_send_cmds - Respond to the Command blocks.
+ * @ipc_mux: Pointer to MUX data-struct
+ * @cmd_type: Command
+ * @if_id: Session interface id.
+ * @transaction_id: Command transaction id.
+ * @param: Pointer to command params.
+ * @res_size: Response size
+ * @blocking: True for blocking send
+ * @respond: If true return transaction ID
+ *
+ * Returns: 0 in success and failure value on error
+ */
+int ipc_mux_dl_acb_send_cmds(struct iosm_mux *ipc_mux, u32 cmd_type, u8 if_id,
+ u32 transaction_id, union mux_cmd_param *param,
+ size_t res_size, bool blocking, bool respond);
+
+/**
+ * ipc_mux_netif_tx_flowctrl - Enable/Disable TX flow control on MUX sessions.
+ * @session: Pointer to mux_session struct
+ * @idx: Session ID
+ * @on: true for Enable and false for disable flow control
+ */
+void ipc_mux_netif_tx_flowctrl(struct mux_session *session, int idx, bool on);
+
+/**
+ * ipc_mux_ul_trigger_encode - Route the UL packet through the IP MUX layer
+ * for encoding.
+ * @ipc_mux: Pointer to MUX data-struct
+ * @if_id: Session ID.
+ * @skb: Pointer to ipc_skb.
+ *
+ * Returns: 0 if successfully encoded
+ * failure value on error
+ * -EBUSY if packet has to be retransmitted.
+ */
+int ipc_mux_ul_trigger_encode(struct iosm_mux *ipc_mux, int if_id,
+ struct sk_buff *skb);
+/**
+ * ipc_mux_ul_data_encode - UL encode function for calling from Tasklet context.
+ * @ipc_mux: Pointer to MUX data-struct
+ *
+ * Returns: TRUE if any packet of any session is encoded FALSE otherwise.
+ */
+bool ipc_mux_ul_data_encode(struct iosm_mux *ipc_mux);
+
+/**
+ * ipc_mux_ul_encoded_process - Handles the Modem processed UL data by adding
+ * the SKB to the UL free list.
+ * @ipc_mux: Pointer to MUX data-struct
+ * @skb: Pointer to ipc_skb.
+ */
+void ipc_mux_ul_encoded_process(struct iosm_mux *ipc_mux, struct sk_buff *skb);
+
+void ipc_mux_ul_adb_finish(struct iosm_mux *ipc_mux);
+
+void ipc_mux_ul_adb_update_ql(struct iosm_mux *ipc_mux, struct mux_adb *p_adb,
+ int session_id, int qlth_n_ql_size,
+ struct sk_buff_head *ul_list);
+
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_pcie.c b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
new file mode 100644
index 0000000000..04517bd332
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
@@ -0,0 +1,532 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/module.h>
+#include <net/rtnetlink.h>
+
+#include "iosm_ipc_imem.h"
+#include "iosm_ipc_pcie.h"
+#include "iosm_ipc_protocol.h"
+
+MODULE_DESCRIPTION("IOSM Driver");
+MODULE_LICENSE("GPL v2");
+
+/* WWAN GUID */
+static guid_t wwan_acpi_guid = GUID_INIT(0xbad01b75, 0x22a8, 0x4f48, 0x87, 0x92,
+ 0xbd, 0xde, 0x94, 0x67, 0x74, 0x7d);
+
+static void ipc_pcie_resources_release(struct iosm_pcie *ipc_pcie)
+{
+ /* Free the MSI resources. */
+ ipc_release_irq(ipc_pcie);
+
+ /* Free mapped doorbell scratchpad bus memory into CPU space. */
+ iounmap(ipc_pcie->scratchpad);
+
+ /* Free mapped IPC_REGS bus memory into CPU space. */
+ iounmap(ipc_pcie->ipc_regs);
+
+ /* Releases all PCI I/O and memory resources previously reserved by a
+ * successful call to pci_request_regions. Call this function only
+ * after all use of the PCI regions has ceased.
+ */
+ pci_release_regions(ipc_pcie->pci);
+}
+
+static void ipc_pcie_cleanup(struct iosm_pcie *ipc_pcie)
+{
+ /* Free the shared memory resources. */
+ ipc_imem_cleanup(ipc_pcie->imem);
+
+ ipc_pcie_resources_release(ipc_pcie);
+
+ /* Signal to the system that the PCI device is not in use. */
+ pci_disable_device(ipc_pcie->pci);
+}
+
+static void ipc_pcie_deinit(struct iosm_pcie *ipc_pcie)
+{
+ kfree(ipc_pcie->imem);
+ kfree(ipc_pcie);
+}
+
+static void ipc_pcie_remove(struct pci_dev *pci)
+{
+ struct iosm_pcie *ipc_pcie = pci_get_drvdata(pci);
+
+ ipc_pcie_cleanup(ipc_pcie);
+
+ ipc_pcie_deinit(ipc_pcie);
+}
+
+static int ipc_pcie_resources_request(struct iosm_pcie *ipc_pcie)
+{
+ struct pci_dev *pci = ipc_pcie->pci;
+ u32 cap = 0;
+ u32 ret;
+
+ /* Reserved PCI I/O and memory resources.
+ * Mark all PCI regions associated with PCI device pci as
+ * being reserved by owner IOSM_IPC.
+ */
+ ret = pci_request_regions(pci, "IOSM_IPC");
+ if (ret) {
+ dev_err(ipc_pcie->dev, "failed pci request regions");
+ goto pci_request_region_fail;
+ }
+
+ /* Reserve the doorbell IPC REGS memory resources.
+ * Remap the memory into CPU space. Arrange for the physical address
+ * (BAR) to be visible from this driver.
+ * pci_ioremap_bar() ensures that the memory is marked uncachable.
+ */
+ ipc_pcie->ipc_regs = pci_ioremap_bar(pci, ipc_pcie->ipc_regs_bar_nr);
+
+ if (!ipc_pcie->ipc_regs) {
+ dev_err(ipc_pcie->dev, "IPC REGS ioremap error");
+ ret = -EBUSY;
+ goto ipc_regs_remap_fail;
+ }
+
+ /* Reserve the MMIO scratchpad memory resources.
+ * Remap the memory into CPU space. Arrange for the physical address
+ * (BAR) to be visible from this driver.
+ * pci_ioremap_bar() ensures that the memory is marked uncachable.
+ */
+ ipc_pcie->scratchpad =
+ pci_ioremap_bar(pci, ipc_pcie->scratchpad_bar_nr);
+
+ if (!ipc_pcie->scratchpad) {
+ dev_err(ipc_pcie->dev, "doorbell scratchpad ioremap error");
+ ret = -EBUSY;
+ goto scratch_remap_fail;
+ }
+
+ /* Install the irq handler triggered by CP. */
+ ret = ipc_acquire_irq(ipc_pcie);
+ if (ret) {
+ dev_err(ipc_pcie->dev, "acquiring MSI irq failed!");
+ goto irq_acquire_fail;
+ }
+
+ /* Enable bus-mastering for the IOSM IPC device. */
+ pci_set_master(pci);
+
+ /* Enable LTR if possible
+ * This is needed for L1.2!
+ */
+ pcie_capability_read_dword(ipc_pcie->pci, PCI_EXP_DEVCAP2, &cap);
+ if (cap & PCI_EXP_DEVCAP2_LTR)
+ pcie_capability_set_word(ipc_pcie->pci, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_LTR_EN);
+
+ dev_dbg(ipc_pcie->dev, "link between AP and CP is fully on");
+
+ return ret;
+
+irq_acquire_fail:
+ iounmap(ipc_pcie->scratchpad);
+scratch_remap_fail:
+ iounmap(ipc_pcie->ipc_regs);
+ipc_regs_remap_fail:
+ pci_release_regions(pci);
+pci_request_region_fail:
+ return ret;
+}
+
+bool ipc_pcie_check_aspm_enabled(struct iosm_pcie *ipc_pcie,
+ bool parent)
+{
+ struct pci_dev *pdev;
+ u16 value = 0;
+ u32 enabled;
+
+ if (parent)
+ pdev = ipc_pcie->pci->bus->self;
+ else
+ pdev = ipc_pcie->pci;
+
+ pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &value);
+ enabled = value & PCI_EXP_LNKCTL_ASPMC;
+ dev_dbg(ipc_pcie->dev, "ASPM L1: 0x%04X 0x%03X", pdev->device, value);
+
+ return (enabled == PCI_EXP_LNKCTL_ASPM_L1 ||
+ enabled == PCI_EXP_LNKCTL_ASPMC);
+}
+
+bool ipc_pcie_check_data_link_active(struct iosm_pcie *ipc_pcie)
+{
+ struct pci_dev *parent;
+ u16 link_status = 0;
+
+ if (!ipc_pcie->pci->bus || !ipc_pcie->pci->bus->self) {
+ dev_err(ipc_pcie->dev, "root port not found");
+ return false;
+ }
+
+ parent = ipc_pcie->pci->bus->self;
+
+ pcie_capability_read_word(parent, PCI_EXP_LNKSTA, &link_status);
+ dev_dbg(ipc_pcie->dev, "Link status: 0x%04X", link_status);
+
+ return link_status & PCI_EXP_LNKSTA_DLLLA;
+}
+
+static bool ipc_pcie_check_aspm_supported(struct iosm_pcie *ipc_pcie,
+ bool parent)
+{
+ struct pci_dev *pdev;
+ u32 support;
+ u32 cap = 0;
+
+ if (parent)
+ pdev = ipc_pcie->pci->bus->self;
+ else
+ pdev = ipc_pcie->pci;
+ pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &cap);
+ support = u32_get_bits(cap, PCI_EXP_LNKCAP_ASPMS);
+ if (support < PCI_EXP_LNKCTL_ASPM_L1) {
+ dev_dbg(ipc_pcie->dev, "ASPM L1 not supported: 0x%04X",
+ pdev->device);
+ return false;
+ }
+ return true;
+}
+
+void ipc_pcie_config_aspm(struct iosm_pcie *ipc_pcie)
+{
+ bool parent_aspm_enabled, dev_aspm_enabled;
+
+ /* check if both root port and child supports ASPM L1 */
+ if (!ipc_pcie_check_aspm_supported(ipc_pcie, true) ||
+ !ipc_pcie_check_aspm_supported(ipc_pcie, false))
+ return;
+
+ parent_aspm_enabled = ipc_pcie_check_aspm_enabled(ipc_pcie, true);
+ dev_aspm_enabled = ipc_pcie_check_aspm_enabled(ipc_pcie, false);
+
+ dev_dbg(ipc_pcie->dev, "ASPM parent: %s device: %s",
+ parent_aspm_enabled ? "Enabled" : "Disabled",
+ dev_aspm_enabled ? "Enabled" : "Disabled");
+}
+
+/* Initializes PCIe endpoint configuration */
+static void ipc_pcie_config_init(struct iosm_pcie *ipc_pcie)
+{
+ /* BAR0 is used for doorbell */
+ ipc_pcie->ipc_regs_bar_nr = IPC_DOORBELL_BAR0;
+
+ /* update HW configuration */
+ ipc_pcie->scratchpad_bar_nr = IPC_SCRATCHPAD_BAR2;
+ ipc_pcie->doorbell_reg_offset = IPC_DOORBELL_CH_OFFSET;
+ ipc_pcie->doorbell_write = IPC_WRITE_PTR_REG_0;
+ ipc_pcie->doorbell_capture = IPC_CAPTURE_PTR_REG_0;
+}
+
+/* This will read the BIOS WWAN RTD3 settings:
+ * D0L1.2/D3L2/Disabled
+ */
+static enum ipc_pcie_sleep_state ipc_pcie_read_bios_cfg(struct device *dev)
+{
+ enum ipc_pcie_sleep_state sleep_state = IPC_PCIE_D0L12;
+ union acpi_object *object;
+ acpi_handle handle_acpi;
+
+ handle_acpi = ACPI_HANDLE(dev);
+ if (!handle_acpi) {
+ pr_debug("pci device is NOT ACPI supporting device\n");
+ goto default_ret;
+ }
+
+ object = acpi_evaluate_dsm(handle_acpi, &wwan_acpi_guid, 0, 3, NULL);
+ if (!object)
+ goto default_ret;
+
+ if (object->integer.value == 3)
+ sleep_state = IPC_PCIE_D3L2;
+
+ ACPI_FREE(object);
+
+default_ret:
+ return sleep_state;
+}
+
+static int ipc_pcie_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
+{
+ struct iosm_pcie *ipc_pcie = kzalloc(sizeof(*ipc_pcie), GFP_KERNEL);
+ int ret;
+
+ pr_debug("Probing device 0x%X from the vendor 0x%X", pci_id->device,
+ pci_id->vendor);
+
+ if (!ipc_pcie)
+ goto ret_fail;
+
+ /* Initialize ipc dbg component for the PCIe device */
+ ipc_pcie->dev = &pci->dev;
+
+ /* Set the driver specific data. */
+ pci_set_drvdata(pci, ipc_pcie);
+
+ /* Save the address of the PCI device configuration. */
+ ipc_pcie->pci = pci;
+
+ /* Update platform configuration */
+ ipc_pcie_config_init(ipc_pcie);
+
+ /* Initialize the device before it is used. Ask low-level code
+ * to enable I/O and memory. Wake up the device if it was suspended.
+ */
+ if (pci_enable_device(pci)) {
+ dev_err(ipc_pcie->dev, "failed to enable the AP PCIe device");
+ /* If enable of PCIe device has failed then calling
+ * ipc_pcie_cleanup will panic the system. More over
+ * ipc_pcie_cleanup() is required to be called after
+ * ipc_imem_mount()
+ */
+ goto pci_enable_fail;
+ }
+
+ ret = dma_set_mask(ipc_pcie->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ dev_err(ipc_pcie->dev, "Could not set PCI DMA mask: %d", ret);
+ goto set_mask_fail;
+ }
+
+ ipc_pcie_config_aspm(ipc_pcie);
+ dev_dbg(ipc_pcie->dev, "PCIe device enabled.");
+
+ /* Read WWAN RTD3 BIOS Setting
+ */
+ ipc_pcie->d3l2_support = ipc_pcie_read_bios_cfg(&pci->dev);
+
+ ipc_pcie->suspend = 0;
+
+ if (ipc_pcie_resources_request(ipc_pcie))
+ goto resources_req_fail;
+
+ /* Establish the link to the imem layer. */
+ ipc_pcie->imem = ipc_imem_init(ipc_pcie, pci->device,
+ ipc_pcie->scratchpad, ipc_pcie->dev);
+ if (!ipc_pcie->imem) {
+ dev_err(ipc_pcie->dev, "failed to init imem");
+ goto imem_init_fail;
+ }
+
+ return 0;
+
+imem_init_fail:
+ ipc_pcie_resources_release(ipc_pcie);
+resources_req_fail:
+set_mask_fail:
+ pci_disable_device(pci);
+pci_enable_fail:
+ kfree(ipc_pcie);
+ret_fail:
+ return -EIO;
+}
+
+static const struct pci_device_id iosm_ipc_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_CP_DEVICE_7560_ID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_CP_DEVICE_7360_ID) },
+ {}
+};
+MODULE_DEVICE_TABLE(pci, iosm_ipc_ids);
+
+/* Enter sleep in s2idle case
+ */
+static int __maybe_unused ipc_pcie_suspend_s2idle(struct iosm_pcie *ipc_pcie)
+{
+ ipc_cp_irq_sleep_control(ipc_pcie, IPC_MEM_DEV_PM_FORCE_SLEEP);
+
+ /* Complete all memory stores before setting bit */
+ smp_mb__before_atomic();
+
+ set_bit(0, &ipc_pcie->suspend);
+
+ /* Complete all memory stores after setting bit */
+ smp_mb__after_atomic();
+
+ ipc_imem_pm_s2idle_sleep(ipc_pcie->imem, true);
+
+ return 0;
+}
+
+/* Resume from sleep in s2idle case
+ */
+static int __maybe_unused ipc_pcie_resume_s2idle(struct iosm_pcie *ipc_pcie)
+{
+ ipc_cp_irq_sleep_control(ipc_pcie, IPC_MEM_DEV_PM_FORCE_ACTIVE);
+
+ ipc_imem_pm_s2idle_sleep(ipc_pcie->imem, false);
+
+ /* Complete all memory stores before clearing bit. */
+ smp_mb__before_atomic();
+
+ clear_bit(0, &ipc_pcie->suspend);
+
+ /* Complete all memory stores after clearing bit. */
+ smp_mb__after_atomic();
+ return 0;
+}
+
+int __maybe_unused ipc_pcie_suspend(struct iosm_pcie *ipc_pcie)
+{
+ /* The HAL shall ask the shared memory layer whether D3 is allowed. */
+ ipc_imem_pm_suspend(ipc_pcie->imem);
+
+ dev_dbg(ipc_pcie->dev, "SUSPEND done");
+ return 0;
+}
+
+int __maybe_unused ipc_pcie_resume(struct iosm_pcie *ipc_pcie)
+{
+ /* The HAL shall inform the shared memory layer that the device is
+ * active.
+ */
+ ipc_imem_pm_resume(ipc_pcie->imem);
+
+ dev_dbg(ipc_pcie->dev, "RESUME done");
+ return 0;
+}
+
+static int __maybe_unused ipc_pcie_suspend_cb(struct device *dev)
+{
+ struct iosm_pcie *ipc_pcie;
+ struct pci_dev *pdev;
+
+ pdev = to_pci_dev(dev);
+
+ ipc_pcie = pci_get_drvdata(pdev);
+
+ switch (ipc_pcie->d3l2_support) {
+ case IPC_PCIE_D0L12:
+ ipc_pcie_suspend_s2idle(ipc_pcie);
+ break;
+ case IPC_PCIE_D3L2:
+ ipc_pcie_suspend(ipc_pcie);
+ break;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused ipc_pcie_resume_cb(struct device *dev)
+{
+ struct iosm_pcie *ipc_pcie;
+ struct pci_dev *pdev;
+
+ pdev = to_pci_dev(dev);
+
+ ipc_pcie = pci_get_drvdata(pdev);
+
+ switch (ipc_pcie->d3l2_support) {
+ case IPC_PCIE_D0L12:
+ ipc_pcie_resume_s2idle(ipc_pcie);
+ break;
+ case IPC_PCIE_D3L2:
+ ipc_pcie_resume(ipc_pcie);
+ break;
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(iosm_ipc_pm, ipc_pcie_suspend_cb, ipc_pcie_resume_cb);
+
+static struct pci_driver iosm_ipc_driver = {
+ .name = KBUILD_MODNAME,
+ .probe = ipc_pcie_probe,
+ .remove = ipc_pcie_remove,
+ .driver = {
+ .pm = &iosm_ipc_pm,
+ },
+ .id_table = iosm_ipc_ids,
+};
+module_pci_driver(iosm_ipc_driver);
+
+int ipc_pcie_addr_map(struct iosm_pcie *ipc_pcie, unsigned char *data,
+ size_t size, dma_addr_t *mapping, int direction)
+{
+ if (ipc_pcie->pci) {
+ *mapping = dma_map_single(&ipc_pcie->pci->dev, data, size,
+ direction);
+ if (dma_mapping_error(&ipc_pcie->pci->dev, *mapping)) {
+ dev_err(ipc_pcie->dev, "dma mapping failed");
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+void ipc_pcie_addr_unmap(struct iosm_pcie *ipc_pcie, size_t size,
+ dma_addr_t mapping, int direction)
+{
+ if (!mapping)
+ return;
+ if (ipc_pcie->pci)
+ dma_unmap_single(&ipc_pcie->pci->dev, mapping, size, direction);
+}
+
+struct sk_buff *ipc_pcie_alloc_local_skb(struct iosm_pcie *ipc_pcie,
+ gfp_t flags, size_t size)
+{
+ struct sk_buff *skb;
+
+ if (!ipc_pcie || !size) {
+ pr_err("invalid pcie object or size");
+ return NULL;
+ }
+
+ skb = __netdev_alloc_skb(NULL, size, flags);
+ if (!skb)
+ return NULL;
+
+ IPC_CB(skb)->op_type = (u8)UL_DEFAULT;
+ IPC_CB(skb)->mapping = 0;
+
+ return skb;
+}
+
+struct sk_buff *ipc_pcie_alloc_skb(struct iosm_pcie *ipc_pcie, size_t size,
+ gfp_t flags, dma_addr_t *mapping,
+ int direction, size_t headroom)
+{
+ struct sk_buff *skb = ipc_pcie_alloc_local_skb(ipc_pcie, flags,
+ size + headroom);
+ if (!skb)
+ return NULL;
+
+ if (headroom)
+ skb_reserve(skb, headroom);
+
+ if (ipc_pcie_addr_map(ipc_pcie, skb->data, size, mapping, direction)) {
+ dev_kfree_skb(skb);
+ return NULL;
+ }
+
+ BUILD_BUG_ON(sizeof(*IPC_CB(skb)) > sizeof(skb->cb));
+
+ /* Store the mapping address in skb scratch pad for later usage */
+ IPC_CB(skb)->mapping = *mapping;
+ IPC_CB(skb)->direction = direction;
+ IPC_CB(skb)->len = size;
+
+ return skb;
+}
+
+void ipc_pcie_kfree_skb(struct iosm_pcie *ipc_pcie, struct sk_buff *skb)
+{
+ if (!skb)
+ return;
+
+ ipc_pcie_addr_unmap(ipc_pcie, IPC_CB(skb)->len, IPC_CB(skb)->mapping,
+ IPC_CB(skb)->direction);
+ IPC_CB(skb)->mapping = 0;
+ dev_kfree_skb(skb);
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_pcie.h b/drivers/net/wwan/iosm/iosm_ipc_pcie.h
new file mode 100644
index 0000000000..844cf1fed9
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_pcie.h
@@ -0,0 +1,210 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#ifndef IOSM_IPC_PCIE_H
+#define IOSM_IPC_PCIE_H
+
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/skbuff.h>
+
+#include "iosm_ipc_irq.h"
+
+/* Device ID */
+#define INTEL_CP_DEVICE_7560_ID 0x7560
+#define INTEL_CP_DEVICE_7360_ID 0x7360
+
+/* Define for BAR area usage */
+#define IPC_DOORBELL_BAR0 0
+#define IPC_SCRATCHPAD_BAR2 2
+
+/* Defines for DOORBELL registers information */
+#define IPC_DOORBELL_CH_OFFSET BIT(5)
+#define IPC_WRITE_PTR_REG_0 BIT(4)
+#define IPC_CAPTURE_PTR_REG_0 BIT(3)
+
+/* Number of MSI used for IPC */
+#define IPC_MSI_VECTORS 1
+
+/* Total number of Maximum IPC IRQ vectors used for IPC */
+#define IPC_IRQ_VECTORS IPC_MSI_VECTORS
+
+/**
+ * enum ipc_pcie_sleep_state - Enum type to different sleep state transitions
+ * @IPC_PCIE_D0L12: Put the sleep state in D0L12
+ * @IPC_PCIE_D3L2: Put the sleep state in D3L2
+ */
+enum ipc_pcie_sleep_state {
+ IPC_PCIE_D0L12,
+ IPC_PCIE_D3L2,
+};
+
+/**
+ * struct iosm_pcie - IPC_PCIE struct.
+ * @pci: Address of the device description
+ * @dev: Pointer to generic device structure
+ * @ipc_regs: Remapped CP doorbell address of the irq register
+ * set, to fire the doorbell irq.
+ * @scratchpad: Remapped CP scratchpad address, to send the
+ * configuration. tuple and the IPC descriptors
+ * to CP in the ROM phase. The config tuple
+ * information are saved on the MSI scratchpad.
+ * @imem: Pointer to imem data struct
+ * @ipc_regs_bar_nr: BAR number to be used for IPC doorbell
+ * @scratchpad_bar_nr: BAR number to be used for Scratchpad
+ * @nvec: number of requested irq vectors
+ * @doorbell_reg_offset: doorbell_reg_offset
+ * @doorbell_write: doorbell write register
+ * @doorbell_capture: doorbell capture resgister
+ * @suspend: S2IDLE sleep/active
+ * @d3l2_support: Read WWAN RTD3 BIOS setting for D3L2 support
+ */
+struct iosm_pcie {
+ struct pci_dev *pci;
+ struct device *dev;
+ void __iomem *ipc_regs;
+ void __iomem *scratchpad;
+ struct iosm_imem *imem;
+ int ipc_regs_bar_nr;
+ int scratchpad_bar_nr;
+ int nvec;
+ u32 doorbell_reg_offset;
+ u32 doorbell_write;
+ u32 doorbell_capture;
+ unsigned long suspend;
+ enum ipc_pcie_sleep_state d3l2_support;
+};
+
+/**
+ * struct ipc_skb_cb - Struct definition of the socket buffer which is mapped to
+ * the cb field of sbk
+ * @mapping: Store physical or IOVA mapped address of skb virtual add.
+ * @direction: DMA direction
+ * @len: Length of the DMA mapped region
+ * @op_type: Expected values are defined about enum ipc_ul_usr_op.
+ */
+struct ipc_skb_cb {
+ dma_addr_t mapping;
+ int direction;
+ int len;
+ u8 op_type;
+};
+
+/**
+ * enum ipc_ul_usr_op - Control operation to execute the right action on
+ * the user interface.
+ * @UL_USR_OP_BLOCKED: The uplink app was blocked until CP confirms that the
+ * uplink buffer was consumed triggered by the IRQ.
+ * @UL_MUX_OP_ADB: In MUX mode the UL ADB shall be addedd to the free list.
+ * @UL_DEFAULT: SKB in non muxing mode
+ */
+enum ipc_ul_usr_op {
+ UL_USR_OP_BLOCKED,
+ UL_MUX_OP_ADB,
+ UL_DEFAULT,
+};
+
+/**
+ * ipc_pcie_addr_map - Maps the kernel's virtual address to either IOVA
+ * address space or Physical address space, the mapping is
+ * stored in the skb's cb.
+ * @ipc_pcie: Pointer to struct iosm_pcie
+ * @data: Skb mem containing data
+ * @size: Data size
+ * @mapping: Dma mapping address
+ * @direction: Data direction
+ *
+ * Returns: 0 on success and failure value on error
+ */
+int ipc_pcie_addr_map(struct iosm_pcie *ipc_pcie, unsigned char *data,
+ size_t size, dma_addr_t *mapping, int direction);
+
+/**
+ * ipc_pcie_addr_unmap - Unmaps the skb memory region from IOVA address space
+ * @ipc_pcie: Pointer to struct iosm_pcie
+ * @size: Data size
+ * @mapping: Dma mapping address
+ * @direction: Data direction
+ */
+void ipc_pcie_addr_unmap(struct iosm_pcie *ipc_pcie, size_t size,
+ dma_addr_t mapping, int direction);
+
+/**
+ * ipc_pcie_alloc_skb - Allocate an uplink SKB for the given size.
+ * @ipc_pcie: Pointer to struct iosm_pcie
+ * @size: Size of the SKB required.
+ * @flags: Allocation flags
+ * @mapping: Copies either mapped IOVA add. or converted Phy address
+ * @direction: DMA data direction
+ * @headroom: Header data offset
+ *
+ * Returns: Pointer to ipc_skb on Success, NULL on failure.
+ */
+struct sk_buff *ipc_pcie_alloc_skb(struct iosm_pcie *ipc_pcie, size_t size,
+ gfp_t flags, dma_addr_t *mapping,
+ int direction, size_t headroom);
+
+/**
+ * ipc_pcie_alloc_local_skb - Allocate a local SKB for the given size.
+ * @ipc_pcie: Pointer to struct iosm_pcie
+ * @flags: Allocation flags
+ * @size: Size of the SKB required.
+ *
+ * Returns: Pointer to ipc_skb on Success, NULL on failure.
+ */
+struct sk_buff *ipc_pcie_alloc_local_skb(struct iosm_pcie *ipc_pcie,
+ gfp_t flags, size_t size);
+
+/**
+ * ipc_pcie_kfree_skb - Free skb allocated by ipc_pcie_alloc_*_skb().
+ * @ipc_pcie: Pointer to struct iosm_pcie
+ * @skb: Pointer to the skb
+ */
+void ipc_pcie_kfree_skb(struct iosm_pcie *ipc_pcie, struct sk_buff *skb);
+
+/**
+ * ipc_pcie_check_data_link_active - Check Data Link Layer Active
+ * @ipc_pcie: Pointer to struct iosm_pcie
+ *
+ * Returns: true if active, otherwise false
+ */
+bool ipc_pcie_check_data_link_active(struct iosm_pcie *ipc_pcie);
+
+/**
+ * ipc_pcie_suspend - Callback invoked by pm_runtime_suspend. It decrements
+ * the device's usage count then, carry out a suspend,
+ * either synchronous or asynchronous.
+ * @ipc_pcie: Pointer to struct iosm_pcie
+ *
+ * Returns: 0 on success and failure value on error
+ */
+int ipc_pcie_suspend(struct iosm_pcie *ipc_pcie);
+
+/**
+ * ipc_pcie_resume - Callback invoked by pm_runtime_resume. It increments
+ * the device's usage count then, carry out a resume,
+ * either synchronous or asynchronous.
+ * @ipc_pcie: Pointer to struct iosm_pcie
+ *
+ * Returns: 0 on success and failure value on error
+ */
+int ipc_pcie_resume(struct iosm_pcie *ipc_pcie);
+
+/**
+ * ipc_pcie_check_aspm_enabled - Check if ASPM L1 is already enabled
+ * @ipc_pcie: Pointer to struct iosm_pcie
+ * @parent: True if checking ASPM L1 for parent else false
+ *
+ * Returns: true if ASPM is already enabled else false
+ */
+bool ipc_pcie_check_aspm_enabled(struct iosm_pcie *ipc_pcie,
+ bool parent);
+/**
+ * ipc_pcie_config_aspm - Configure ASPM L1
+ * @ipc_pcie: Pointer to struct iosm_pcie
+ */
+void ipc_pcie_config_aspm(struct iosm_pcie *ipc_pcie);
+
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_pm.c b/drivers/net/wwan/iosm/iosm_ipc_pm.c
new file mode 100644
index 0000000000..413601c72d
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_pm.c
@@ -0,0 +1,333 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#include "iosm_ipc_protocol.h"
+
+/* Timeout value in MS for the PM to wait for device to reach active state */
+#define IPC_PM_ACTIVE_TIMEOUT_MS (500)
+
+/* Note that here "active" has the value 1, as compared to the enums
+ * ipc_mem_host_pm_state or ipc_mem_dev_pm_state, where "active" is 0
+ */
+#define IPC_PM_SLEEP (0)
+#define CONSUME_STATE (0)
+#define IPC_PM_ACTIVE (1)
+
+void ipc_pm_signal_hpda_doorbell(struct iosm_pm *ipc_pm, u32 identifier,
+ bool host_slp_check)
+{
+ if (host_slp_check && ipc_pm->host_pm_state != IPC_MEM_HOST_PM_ACTIVE &&
+ ipc_pm->host_pm_state != IPC_MEM_HOST_PM_ACTIVE_WAIT) {
+ ipc_pm->pending_hpda_update = true;
+ dev_dbg(ipc_pm->dev,
+ "Pend HPDA update set. Host PM_State: %d identifier:%d",
+ ipc_pm->host_pm_state, identifier);
+ return;
+ }
+
+ if (!ipc_pm_trigger(ipc_pm, IPC_PM_UNIT_IRQ, true)) {
+ ipc_pm->pending_hpda_update = true;
+ dev_dbg(ipc_pm->dev, "Pending HPDA update set. identifier:%d",
+ identifier);
+ return;
+ }
+ ipc_pm->pending_hpda_update = false;
+
+ /* Trigger the irq towards CP */
+ ipc_cp_irq_hpda_update(ipc_pm->pcie, identifier);
+
+ ipc_pm_trigger(ipc_pm, IPC_PM_UNIT_IRQ, false);
+}
+
+/* Wake up the device if it is in low power mode. */
+static bool ipc_pm_link_activate(struct iosm_pm *ipc_pm)
+{
+ if (ipc_pm->cp_state == IPC_MEM_DEV_PM_ACTIVE)
+ return true;
+
+ if (ipc_pm->cp_state == IPC_MEM_DEV_PM_SLEEP) {
+ if (ipc_pm->ap_state == IPC_MEM_DEV_PM_SLEEP) {
+ /* Wake up the device. */
+ ipc_cp_irq_sleep_control(ipc_pm->pcie,
+ IPC_MEM_DEV_PM_WAKEUP);
+ ipc_pm->ap_state = IPC_MEM_DEV_PM_ACTIVE_WAIT;
+
+ goto not_active;
+ }
+
+ if (ipc_pm->ap_state == IPC_MEM_DEV_PM_ACTIVE_WAIT)
+ goto not_active;
+
+ return true;
+ }
+
+not_active:
+ /* link is not ready */
+ return false;
+}
+
+bool ipc_pm_wait_for_device_active(struct iosm_pm *ipc_pm)
+{
+ bool ret_val = false;
+
+ if (ipc_pm->ap_state != IPC_MEM_DEV_PM_ACTIVE) {
+ /* Complete all memory stores before setting bit */
+ smp_mb__before_atomic();
+
+ /* Wait for IPC_PM_ACTIVE_TIMEOUT_MS for Device sleep state
+ * machine to enter ACTIVE state.
+ */
+ set_bit(0, &ipc_pm->host_sleep_pend);
+
+ /* Complete all memory stores after setting bit */
+ smp_mb__after_atomic();
+
+ if (!wait_for_completion_interruptible_timeout
+ (&ipc_pm->host_sleep_complete,
+ msecs_to_jiffies(IPC_PM_ACTIVE_TIMEOUT_MS))) {
+ dev_err(ipc_pm->dev,
+ "PM timeout. Expected State:%d. Actual: %d",
+ IPC_MEM_DEV_PM_ACTIVE, ipc_pm->ap_state);
+ goto active_timeout;
+ }
+ }
+
+ ret_val = true;
+active_timeout:
+ /* Complete all memory stores before clearing bit */
+ smp_mb__before_atomic();
+
+ /* Reset the atomic variable in any case as device sleep
+ * state machine change is no longer of interest.
+ */
+ clear_bit(0, &ipc_pm->host_sleep_pend);
+
+ /* Complete all memory stores after clearing bit */
+ smp_mb__after_atomic();
+
+ return ret_val;
+}
+
+static void ipc_pm_on_link_sleep(struct iosm_pm *ipc_pm)
+{
+ /* pending sleep ack and all conditions are cleared
+ * -> signal SLEEP__ACK to CP
+ */
+ ipc_pm->cp_state = IPC_MEM_DEV_PM_SLEEP;
+ ipc_pm->ap_state = IPC_MEM_DEV_PM_SLEEP;
+
+ ipc_cp_irq_sleep_control(ipc_pm->pcie, IPC_MEM_DEV_PM_SLEEP);
+}
+
+static void ipc_pm_on_link_wake(struct iosm_pm *ipc_pm, bool ack)
+{
+ ipc_pm->ap_state = IPC_MEM_DEV_PM_ACTIVE;
+
+ if (ack) {
+ ipc_pm->cp_state = IPC_MEM_DEV_PM_ACTIVE;
+
+ ipc_cp_irq_sleep_control(ipc_pm->pcie, IPC_MEM_DEV_PM_ACTIVE);
+
+ /* check the consume state !!! */
+ if (test_bit(CONSUME_STATE, &ipc_pm->host_sleep_pend))
+ complete(&ipc_pm->host_sleep_complete);
+ }
+
+ /* Check for pending HPDA update.
+ * Pending HP update could be because of sending message was
+ * put on hold due to Device sleep state or due to TD update
+ * which could be because of Device Sleep and Host Sleep
+ * states.
+ */
+ if (ipc_pm->pending_hpda_update &&
+ ipc_pm->host_pm_state == IPC_MEM_HOST_PM_ACTIVE)
+ ipc_pm_signal_hpda_doorbell(ipc_pm, IPC_HP_PM_TRIGGER, true);
+}
+
+bool ipc_pm_trigger(struct iosm_pm *ipc_pm, enum ipc_pm_unit unit, bool active)
+{
+ union ipc_pm_cond old_cond;
+ union ipc_pm_cond new_cond;
+ bool link_active;
+
+ /* Save the current D3 state. */
+ new_cond = ipc_pm->pm_cond;
+ old_cond = ipc_pm->pm_cond;
+
+ /* Calculate the power state only in the runtime phase. */
+ switch (unit) {
+ case IPC_PM_UNIT_IRQ: /* CP irq */
+ new_cond.irq = active;
+ break;
+
+ case IPC_PM_UNIT_LINK: /* Device link state. */
+ new_cond.link = active;
+ break;
+
+ case IPC_PM_UNIT_HS: /* Host sleep trigger requires Link. */
+ new_cond.hs = active;
+ break;
+
+ default:
+ break;
+ }
+
+ /* Something changed ? */
+ if (old_cond.raw == new_cond.raw) {
+ /* Stay in the current PM state. */
+ link_active = old_cond.link == IPC_PM_ACTIVE;
+ goto ret;
+ }
+
+ ipc_pm->pm_cond = new_cond;
+
+ if (new_cond.link)
+ ipc_pm_on_link_wake(ipc_pm, unit == IPC_PM_UNIT_LINK);
+ else if (unit == IPC_PM_UNIT_LINK)
+ ipc_pm_on_link_sleep(ipc_pm);
+
+ if (old_cond.link == IPC_PM_SLEEP && new_cond.raw) {
+ link_active = ipc_pm_link_activate(ipc_pm);
+ goto ret;
+ }
+
+ link_active = old_cond.link == IPC_PM_ACTIVE;
+
+ret:
+ return link_active;
+}
+
+bool ipc_pm_prepare_host_sleep(struct iosm_pm *ipc_pm)
+{
+ /* suspend not allowed if host_pm_state is not IPC_MEM_HOST_PM_ACTIVE */
+ if (ipc_pm->host_pm_state != IPC_MEM_HOST_PM_ACTIVE) {
+ dev_err(ipc_pm->dev, "host_pm_state=%d\tExpected to be: %d",
+ ipc_pm->host_pm_state, IPC_MEM_HOST_PM_ACTIVE);
+ return false;
+ }
+
+ ipc_pm->host_pm_state = IPC_MEM_HOST_PM_SLEEP_WAIT_D3;
+
+ return true;
+}
+
+bool ipc_pm_prepare_host_active(struct iosm_pm *ipc_pm)
+{
+ if (ipc_pm->host_pm_state != IPC_MEM_HOST_PM_SLEEP) {
+ dev_err(ipc_pm->dev, "host_pm_state=%d\tExpected to be: %d",
+ ipc_pm->host_pm_state, IPC_MEM_HOST_PM_SLEEP);
+ return false;
+ }
+
+ /* Sending Sleep Exit message to CP. Update the state */
+ ipc_pm->host_pm_state = IPC_MEM_HOST_PM_ACTIVE_WAIT;
+
+ return true;
+}
+
+void ipc_pm_set_s2idle_sleep(struct iosm_pm *ipc_pm, bool sleep)
+{
+ if (sleep) {
+ ipc_pm->ap_state = IPC_MEM_DEV_PM_SLEEP;
+ ipc_pm->cp_state = IPC_MEM_DEV_PM_SLEEP;
+ ipc_pm->device_sleep_notification = IPC_MEM_DEV_PM_SLEEP;
+ } else {
+ ipc_pm->ap_state = IPC_MEM_DEV_PM_ACTIVE;
+ ipc_pm->cp_state = IPC_MEM_DEV_PM_ACTIVE;
+ ipc_pm->device_sleep_notification = IPC_MEM_DEV_PM_ACTIVE;
+ ipc_pm->pm_cond.link = IPC_PM_ACTIVE;
+ }
+}
+
+bool ipc_pm_dev_slp_notification(struct iosm_pm *ipc_pm, u32 cp_pm_req)
+{
+ if (cp_pm_req == ipc_pm->device_sleep_notification)
+ return false;
+
+ ipc_pm->device_sleep_notification = cp_pm_req;
+
+ /* Evaluate the PM request. */
+ switch (ipc_pm->cp_state) {
+ case IPC_MEM_DEV_PM_ACTIVE:
+ switch (cp_pm_req) {
+ case IPC_MEM_DEV_PM_ACTIVE:
+ break;
+
+ case IPC_MEM_DEV_PM_SLEEP:
+ /* Inform the PM that the device link can go down. */
+ ipc_pm_trigger(ipc_pm, IPC_PM_UNIT_LINK, false);
+ return true;
+
+ default:
+ dev_err(ipc_pm->dev,
+ "loc-pm=%d active: confused req-pm=%d",
+ ipc_pm->cp_state, cp_pm_req);
+ break;
+ }
+ break;
+
+ case IPC_MEM_DEV_PM_SLEEP:
+ switch (cp_pm_req) {
+ case IPC_MEM_DEV_PM_ACTIVE:
+ /* Inform the PM that the device link is active. */
+ ipc_pm_trigger(ipc_pm, IPC_PM_UNIT_LINK, true);
+ break;
+
+ case IPC_MEM_DEV_PM_SLEEP:
+ break;
+
+ default:
+ dev_err(ipc_pm->dev,
+ "loc-pm=%d sleep: confused req-pm=%d",
+ ipc_pm->cp_state, cp_pm_req);
+ break;
+ }
+ break;
+
+ default:
+ dev_err(ipc_pm->dev, "confused loc-pm=%d, req-pm=%d",
+ ipc_pm->cp_state, cp_pm_req);
+ break;
+ }
+
+ return false;
+}
+
+void ipc_pm_init(struct iosm_protocol *ipc_protocol)
+{
+ struct iosm_imem *ipc_imem = ipc_protocol->imem;
+ struct iosm_pm *ipc_pm = &ipc_protocol->pm;
+
+ ipc_pm->pcie = ipc_imem->pcie;
+ ipc_pm->dev = ipc_imem->dev;
+
+ ipc_pm->pm_cond.irq = IPC_PM_SLEEP;
+ ipc_pm->pm_cond.hs = IPC_PM_SLEEP;
+ ipc_pm->pm_cond.link = IPC_PM_ACTIVE;
+
+ ipc_pm->cp_state = IPC_MEM_DEV_PM_ACTIVE;
+ ipc_pm->ap_state = IPC_MEM_DEV_PM_ACTIVE;
+ ipc_pm->host_pm_state = IPC_MEM_HOST_PM_ACTIVE;
+
+ /* Create generic wait-for-completion handler for Host Sleep
+ * and device sleep coordination.
+ */
+ init_completion(&ipc_pm->host_sleep_complete);
+
+ /* Complete all memory stores before clearing bit */
+ smp_mb__before_atomic();
+
+ clear_bit(0, &ipc_pm->host_sleep_pend);
+
+ /* Complete all memory stores after clearing bit */
+ smp_mb__after_atomic();
+}
+
+void ipc_pm_deinit(struct iosm_protocol *proto)
+{
+ struct iosm_pm *ipc_pm = &proto->pm;
+
+ complete(&ipc_pm->host_sleep_complete);
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_pm.h b/drivers/net/wwan/iosm/iosm_ipc_pm.h
new file mode 100644
index 0000000000..e7c00f388c
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_pm.h
@@ -0,0 +1,207 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#ifndef IOSM_IPC_PM_H
+#define IOSM_IPC_PM_H
+
+/* Trigger the doorbell interrupt on cp to change the PM sleep/active status */
+#define ipc_cp_irq_sleep_control(ipc_pcie, data) \
+ ipc_doorbell_fire(ipc_pcie, IPC_DOORBELL_IRQ_SLEEP, data)
+
+/* Trigger the doorbell interrupt on CP to do hpda update */
+#define ipc_cp_irq_hpda_update(ipc_pcie, data) \
+ ipc_doorbell_fire(ipc_pcie, IPC_DOORBELL_IRQ_HPDA, 0xFF & (data))
+
+/**
+ * union ipc_pm_cond - Conditions for D3 and the sleep message to CP.
+ * @raw: raw/combined value for faster check
+ * @irq: IRQ towards CP
+ * @hs: Host Sleep
+ * @link: Device link state.
+ */
+union ipc_pm_cond {
+ unsigned int raw;
+
+ struct {
+ unsigned int irq:1,
+ hs:1,
+ link:1;
+ };
+};
+
+/**
+ * enum ipc_mem_host_pm_state - Possible states of the HOST SLEEP finite state
+ * machine.
+ * @IPC_MEM_HOST_PM_ACTIVE: Host is active
+ * @IPC_MEM_HOST_PM_ACTIVE_WAIT: Intermediate state before going to
+ * active
+ * @IPC_MEM_HOST_PM_SLEEP_WAIT_IDLE: Intermediate state to wait for idle
+ * before going into sleep
+ * @IPC_MEM_HOST_PM_SLEEP_WAIT_D3: Intermediate state to wait for D3
+ * before going to sleep
+ * @IPC_MEM_HOST_PM_SLEEP: after this state the interface is not
+ * accessible host is in suspend to RAM
+ * @IPC_MEM_HOST_PM_SLEEP_WAIT_EXIT_SLEEP: Intermediate state before exiting
+ * sleep
+ */
+enum ipc_mem_host_pm_state {
+ IPC_MEM_HOST_PM_ACTIVE,
+ IPC_MEM_HOST_PM_ACTIVE_WAIT,
+ IPC_MEM_HOST_PM_SLEEP_WAIT_IDLE,
+ IPC_MEM_HOST_PM_SLEEP_WAIT_D3,
+ IPC_MEM_HOST_PM_SLEEP,
+ IPC_MEM_HOST_PM_SLEEP_WAIT_EXIT_SLEEP,
+};
+
+/**
+ * enum ipc_mem_dev_pm_state - Possible states of the DEVICE SLEEP finite state
+ * machine.
+ * @IPC_MEM_DEV_PM_ACTIVE: IPC_MEM_DEV_PM_ACTIVE is the initial
+ * power management state.
+ * IRQ(struct ipc_mem_device_info:
+ * device_sleep_notification)
+ * and DOORBELL-IRQ-HPDA(data) values.
+ * @IPC_MEM_DEV_PM_SLEEP: IPC_MEM_DEV_PM_SLEEP is PM state for
+ * sleep.
+ * @IPC_MEM_DEV_PM_WAKEUP: DOORBELL-IRQ-DEVICE_WAKE(data).
+ * @IPC_MEM_DEV_PM_HOST_SLEEP: DOORBELL-IRQ-HOST_SLEEP(data).
+ * @IPC_MEM_DEV_PM_ACTIVE_WAIT: Local intermediate states.
+ * @IPC_MEM_DEV_PM_FORCE_SLEEP: DOORBELL-IRQ-FORCE_SLEEP.
+ * @IPC_MEM_DEV_PM_FORCE_ACTIVE: DOORBELL-IRQ-FORCE_ACTIVE.
+ */
+enum ipc_mem_dev_pm_state {
+ IPC_MEM_DEV_PM_ACTIVE,
+ IPC_MEM_DEV_PM_SLEEP,
+ IPC_MEM_DEV_PM_WAKEUP,
+ IPC_MEM_DEV_PM_HOST_SLEEP,
+ IPC_MEM_DEV_PM_ACTIVE_WAIT,
+ IPC_MEM_DEV_PM_FORCE_SLEEP = 7,
+ IPC_MEM_DEV_PM_FORCE_ACTIVE,
+};
+
+/**
+ * struct iosm_pm - Power management instance
+ * @pcie: Pointer to iosm_pcie structure
+ * @dev: Pointer to device structure
+ * @host_pm_state: PM states for host
+ * @host_sleep_pend: Variable to indicate Host Sleep Pending
+ * @host_sleep_complete: Generic wait-for-completion used in
+ * case of Host Sleep
+ * @pm_cond: Conditions for power management
+ * @ap_state: Current power management state, the
+ * initial state is IPC_MEM_DEV_PM_ACTIVE eq. 0.
+ * @cp_state: PM State of CP
+ * @device_sleep_notification: last handled device_sleep_notfication
+ * @pending_hpda_update: is a HPDA update pending?
+ */
+struct iosm_pm {
+ struct iosm_pcie *pcie;
+ struct device *dev;
+ enum ipc_mem_host_pm_state host_pm_state;
+ unsigned long host_sleep_pend;
+ struct completion host_sleep_complete;
+ union ipc_pm_cond pm_cond;
+ enum ipc_mem_dev_pm_state ap_state;
+ enum ipc_mem_dev_pm_state cp_state;
+ u32 device_sleep_notification;
+ u8 pending_hpda_update:1;
+};
+
+/**
+ * enum ipc_pm_unit - Power management units.
+ * @IPC_PM_UNIT_IRQ: IRQ towards CP
+ * @IPC_PM_UNIT_HS: Host Sleep for converged protocol
+ * @IPC_PM_UNIT_LINK: Link state controlled by CP.
+ */
+enum ipc_pm_unit {
+ IPC_PM_UNIT_IRQ,
+ IPC_PM_UNIT_HS,
+ IPC_PM_UNIT_LINK,
+};
+
+/**
+ * ipc_pm_init - Allocate power management component
+ * @ipc_protocol: Pointer to iosm_protocol structure
+ */
+void ipc_pm_init(struct iosm_protocol *ipc_protocol);
+
+/**
+ * ipc_pm_deinit - Free power management component, invalidating its pointer.
+ * @ipc_protocol: Pointer to iosm_protocol structure
+ */
+void ipc_pm_deinit(struct iosm_protocol *ipc_protocol);
+
+/**
+ * ipc_pm_dev_slp_notification - Handle a sleep notification message from the
+ * device. This can be called from interrupt state
+ * This function handles Host Sleep requests too
+ * if the Host Sleep protocol is register based.
+ * @ipc_pm: Pointer to power management component
+ * @sleep_notification: Actual notification from device
+ *
+ * Returns: true if dev sleep state has to be checked, false otherwise.
+ */
+bool ipc_pm_dev_slp_notification(struct iosm_pm *ipc_pm,
+ u32 sleep_notification);
+
+/**
+ * ipc_pm_set_s2idle_sleep - Set PM variables to sleep/active
+ * @ipc_pm: Pointer to power management component
+ * @sleep: true to enter sleep/false to exit sleep
+ */
+void ipc_pm_set_s2idle_sleep(struct iosm_pm *ipc_pm, bool sleep);
+
+/**
+ * ipc_pm_prepare_host_sleep - Prepare the PM for sleep by entering
+ * IPC_MEM_HOST_PM_SLEEP_WAIT_D3 state.
+ * @ipc_pm: Pointer to power management component
+ *
+ * Returns: true on success, false if the host was not active.
+ */
+bool ipc_pm_prepare_host_sleep(struct iosm_pm *ipc_pm);
+
+/**
+ * ipc_pm_prepare_host_active - Prepare the PM for wakeup by entering
+ * IPC_MEM_HOST_PM_ACTIVE_WAIT state.
+ * @ipc_pm: Pointer to power management component
+ *
+ * Returns: true on success, false if the host was not sleeping.
+ */
+bool ipc_pm_prepare_host_active(struct iosm_pm *ipc_pm);
+
+/**
+ * ipc_pm_wait_for_device_active - Wait upto IPC_PM_ACTIVE_TIMEOUT_MS ms
+ * for the device to reach active state
+ * @ipc_pm: Pointer to power management component
+ *
+ * Returns: true if device is active, false on timeout
+ */
+bool ipc_pm_wait_for_device_active(struct iosm_pm *ipc_pm);
+
+/**
+ * ipc_pm_signal_hpda_doorbell - Wake up the device if it is in low power mode
+ * and trigger a head pointer update interrupt.
+ * @ipc_pm: Pointer to power management component
+ * @identifier: specifies what component triggered hpda update irq
+ * @host_slp_check: if set to true then Host Sleep state machine check will
+ * be performed. If Host Sleep state machine allows HP
+ * update then only doorbell is triggered otherwise pending
+ * flag will be set. If set to false then Host Sleep check
+ * will not be performed. This is helpful for Host Sleep
+ * negotiation through message ring.
+ */
+void ipc_pm_signal_hpda_doorbell(struct iosm_pm *ipc_pm, u32 identifier,
+ bool host_slp_check);
+/**
+ * ipc_pm_trigger - Update power manager and wake up the link if needed
+ * @ipc_pm: Pointer to power management component
+ * @unit: Power management units
+ * @active: Device link state
+ *
+ * Returns: true if link is unchanged or active, false otherwise
+ */
+bool ipc_pm_trigger(struct iosm_pm *ipc_pm, enum ipc_pm_unit unit, bool active);
+
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_port.c b/drivers/net/wwan/iosm/iosm_ipc_port.c
new file mode 100644
index 0000000000..5d5b4183e1
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_port.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#include "iosm_ipc_chnl_cfg.h"
+#include "iosm_ipc_imem_ops.h"
+#include "iosm_ipc_port.h"
+
+/* open logical channel for control communication */
+static int ipc_port_ctrl_start(struct wwan_port *port)
+{
+ struct iosm_cdev *ipc_port = wwan_port_get_drvdata(port);
+ int ret = 0;
+
+ ipc_port->channel = ipc_imem_sys_port_open(ipc_port->ipc_imem,
+ ipc_port->chl_id,
+ IPC_HP_CDEV_OPEN);
+ if (!ipc_port->channel)
+ ret = -EIO;
+
+ return ret;
+}
+
+/* close logical channel */
+static void ipc_port_ctrl_stop(struct wwan_port *port)
+{
+ struct iosm_cdev *ipc_port = wwan_port_get_drvdata(port);
+
+ ipc_imem_sys_port_close(ipc_port->ipc_imem, ipc_port->channel);
+}
+
+/* transfer control data to modem */
+static int ipc_port_ctrl_tx(struct wwan_port *port, struct sk_buff *skb)
+{
+ struct iosm_cdev *ipc_port = wwan_port_get_drvdata(port);
+
+ return ipc_imem_sys_cdev_write(ipc_port, skb);
+}
+
+static const struct wwan_port_ops ipc_wwan_ctrl_ops = {
+ .start = ipc_port_ctrl_start,
+ .stop = ipc_port_ctrl_stop,
+ .tx = ipc_port_ctrl_tx,
+};
+
+/* Port init func */
+struct iosm_cdev *ipc_port_init(struct iosm_imem *ipc_imem,
+ struct ipc_chnl_cfg ipc_port_cfg)
+{
+ struct iosm_cdev *ipc_port = kzalloc(sizeof(*ipc_port), GFP_KERNEL);
+ enum wwan_port_type port_type = ipc_port_cfg.wwan_port_type;
+ enum ipc_channel_id chl_id = ipc_port_cfg.id;
+
+ if (!ipc_port)
+ return NULL;
+
+ ipc_port->dev = ipc_imem->dev;
+ ipc_port->pcie = ipc_imem->pcie;
+
+ ipc_port->port_type = port_type;
+ ipc_port->chl_id = chl_id;
+ ipc_port->ipc_imem = ipc_imem;
+
+ ipc_port->iosm_port = wwan_create_port(ipc_port->dev, port_type,
+ &ipc_wwan_ctrl_ops, NULL,
+ ipc_port);
+
+ return ipc_port;
+}
+
+/* Port deinit func */
+void ipc_port_deinit(struct iosm_cdev *port[])
+{
+ struct iosm_cdev *ipc_port;
+ u8 ctrl_chl_nr;
+
+ for (ctrl_chl_nr = 0; ctrl_chl_nr < IPC_MEM_MAX_CHANNELS;
+ ctrl_chl_nr++) {
+ if (port[ctrl_chl_nr]) {
+ ipc_port = port[ctrl_chl_nr];
+ wwan_remove_port(ipc_port->iosm_port);
+ kfree(ipc_port);
+ }
+ }
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_port.h b/drivers/net/wwan/iosm/iosm_ipc_port.h
new file mode 100644
index 0000000000..11bc8ed216
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_port.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#ifndef IOSM_IPC_PORT_H
+#define IOSM_IPC_PORT_H
+
+#include <linux/wwan.h>
+
+#include "iosm_ipc_imem_ops.h"
+
+/**
+ * struct iosm_cdev - State of the char driver layer.
+ * @iosm_port: Pointer of type wwan_port
+ * @ipc_imem: imem instance
+ * @dev: Pointer to device struct
+ * @pcie: PCIe component
+ * @port_type: WWAN port type
+ * @channel: Channel instance
+ * @chl_id: Channel Indentifier
+ */
+struct iosm_cdev {
+ struct wwan_port *iosm_port;
+ struct iosm_imem *ipc_imem;
+ struct device *dev;
+ struct iosm_pcie *pcie;
+ enum wwan_port_type port_type;
+ struct ipc_mem_channel *channel;
+ enum ipc_channel_id chl_id;
+};
+
+/**
+ * ipc_port_init - Allocate IPC port & register to wwan subsystem for AT/MBIM
+ * communication.
+ * @ipc_imem: Pointer to iosm_imem structure
+ * @ipc_port_cfg: IPC Port Config
+ *
+ * Returns: 0 on success & NULL on failure
+ */
+struct iosm_cdev *ipc_port_init(struct iosm_imem *ipc_imem,
+ struct ipc_chnl_cfg ipc_port_cfg);
+
+/**
+ * ipc_port_deinit - Free IPC port & unregister port with wwan subsystem.
+ * @ipc_port: Array of pointer to the ipc port data-struct
+ */
+void ipc_port_deinit(struct iosm_cdev *ipc_port[]);
+
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_protocol.c b/drivers/net/wwan/iosm/iosm_ipc_protocol.c
new file mode 100644
index 0000000000..63fc7012f0
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_protocol.c
@@ -0,0 +1,283 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#include "iosm_ipc_imem.h"
+#include "iosm_ipc_protocol.h"
+#include "iosm_ipc_protocol_ops.h"
+#include "iosm_ipc_pm.h"
+#include "iosm_ipc_task_queue.h"
+
+int ipc_protocol_tq_msg_send(struct iosm_protocol *ipc_protocol,
+ enum ipc_msg_prep_type msg_type,
+ union ipc_msg_prep_args *prep_args,
+ struct ipc_rsp *response)
+{
+ int index = ipc_protocol_msg_prep(ipc_protocol->imem, msg_type,
+ prep_args);
+
+ /* Store reference towards caller specified response in response ring
+ * and signal CP
+ */
+ if (index >= 0 && index < IPC_MEM_MSG_ENTRIES) {
+ ipc_protocol->rsp_ring[index] = response;
+ ipc_protocol_msg_hp_update(ipc_protocol->imem);
+ }
+
+ return index;
+}
+
+/* Callback for message send */
+static int ipc_protocol_tq_msg_send_cb(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size)
+{
+ struct ipc_call_msg_send_args *send_args = msg;
+ struct iosm_protocol *ipc_protocol = ipc_imem->ipc_protocol;
+
+ return ipc_protocol_tq_msg_send(ipc_protocol, send_args->msg_type,
+ send_args->prep_args,
+ send_args->response);
+}
+
+/* Remove reference to a response. This is typically used when a requestor timed
+ * out and is no longer interested in the response.
+ */
+static int ipc_protocol_tq_msg_remove(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size)
+{
+ struct iosm_protocol *ipc_protocol = ipc_imem->ipc_protocol;
+
+ ipc_protocol->rsp_ring[arg] = NULL;
+ return 0;
+}
+
+int ipc_protocol_msg_send(struct iosm_protocol *ipc_protocol,
+ enum ipc_msg_prep_type prep,
+ union ipc_msg_prep_args *prep_args)
+{
+ struct ipc_call_msg_send_args send_args;
+ unsigned int exec_timeout;
+ struct ipc_rsp response;
+ int index;
+
+ exec_timeout = (ipc_protocol_get_ap_exec_stage(ipc_protocol) ==
+ IPC_MEM_EXEC_STAGE_RUN ?
+ IPC_MSG_COMPLETE_RUN_DEFAULT_TIMEOUT :
+ IPC_MSG_COMPLETE_BOOT_DEFAULT_TIMEOUT);
+
+ /* Trap if called from non-preemptible context */
+ might_sleep();
+
+ response.status = IPC_MEM_MSG_CS_INVALID;
+ init_completion(&response.completion);
+
+ send_args.msg_type = prep;
+ send_args.prep_args = prep_args;
+ send_args.response = &response;
+
+ /* Allocate and prepare message to be sent in tasklet context.
+ * A positive index returned form tasklet_call references the message
+ * in case it needs to be cancelled when there is a timeout.
+ */
+ index = ipc_task_queue_send_task(ipc_protocol->imem,
+ ipc_protocol_tq_msg_send_cb, 0,
+ &send_args, 0, true);
+
+ if (index < 0) {
+ dev_err(ipc_protocol->dev, "msg %d failed", prep);
+ return index;
+ }
+
+ /* Wait for the device to respond to the message */
+ switch (wait_for_completion_timeout(&response.completion,
+ msecs_to_jiffies(exec_timeout))) {
+ case 0:
+ /* Timeout, there was no response from the device.
+ * Remove the reference to the local response completion
+ * object as we are no longer interested in the response.
+ */
+ ipc_task_queue_send_task(ipc_protocol->imem,
+ ipc_protocol_tq_msg_remove, index,
+ NULL, 0, true);
+ dev_err(ipc_protocol->dev, "msg timeout");
+ ipc_uevent_send(ipc_protocol->pcie->dev, UEVENT_MDM_TIMEOUT);
+ break;
+ default:
+ /* We got a response in time; check completion status: */
+ if (response.status != IPC_MEM_MSG_CS_SUCCESS) {
+ dev_err(ipc_protocol->dev,
+ "msg completion status error %d",
+ response.status);
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static int ipc_protocol_msg_send_host_sleep(struct iosm_protocol *ipc_protocol,
+ u32 state)
+{
+ union ipc_msg_prep_args prep_args = {
+ .sleep.target = 0,
+ .sleep.state = state,
+ };
+
+ return ipc_protocol_msg_send(ipc_protocol, IPC_MSG_PREP_SLEEP,
+ &prep_args);
+}
+
+void ipc_protocol_doorbell_trigger(struct iosm_protocol *ipc_protocol,
+ u32 identifier)
+{
+ ipc_pm_signal_hpda_doorbell(&ipc_protocol->pm, identifier, true);
+}
+
+bool ipc_protocol_pm_dev_sleep_handle(struct iosm_protocol *ipc_protocol)
+{
+ u32 ipc_status = ipc_protocol_get_ipc_status(ipc_protocol);
+ u32 requested;
+
+ if (ipc_status != IPC_MEM_DEVICE_IPC_RUNNING) {
+ dev_err(ipc_protocol->dev,
+ "irq ignored, CP IPC state is %d, should be RUNNING",
+ ipc_status);
+
+ /* Stop further processing. */
+ return false;
+ }
+
+ /* Get a copy of the requested PM state by the device and the local
+ * device PM state.
+ */
+ requested = ipc_protocol_pm_dev_get_sleep_notification(ipc_protocol);
+
+ return ipc_pm_dev_slp_notification(&ipc_protocol->pm, requested);
+}
+
+static int ipc_protocol_tq_wakeup_dev_slp(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size)
+{
+ struct iosm_pm *ipc_pm = &ipc_imem->ipc_protocol->pm;
+
+ /* Wakeup from device sleep if it is not ACTIVE */
+ ipc_pm_trigger(ipc_pm, IPC_PM_UNIT_HS, true);
+
+ ipc_pm_trigger(ipc_pm, IPC_PM_UNIT_HS, false);
+
+ return 0;
+}
+
+void ipc_protocol_s2idle_sleep(struct iosm_protocol *ipc_protocol, bool sleep)
+{
+ ipc_pm_set_s2idle_sleep(&ipc_protocol->pm, sleep);
+}
+
+bool ipc_protocol_suspend(struct iosm_protocol *ipc_protocol)
+{
+ if (!ipc_pm_prepare_host_sleep(&ipc_protocol->pm))
+ goto err;
+
+ ipc_task_queue_send_task(ipc_protocol->imem,
+ ipc_protocol_tq_wakeup_dev_slp, 0, NULL, 0,
+ true);
+
+ if (!ipc_pm_wait_for_device_active(&ipc_protocol->pm)) {
+ ipc_uevent_send(ipc_protocol->pcie->dev, UEVENT_MDM_TIMEOUT);
+ goto err;
+ }
+
+ /* Send the sleep message for sync sys calls. */
+ dev_dbg(ipc_protocol->dev, "send TARGET_HOST, ENTER_SLEEP");
+ if (ipc_protocol_msg_send_host_sleep(ipc_protocol,
+ IPC_HOST_SLEEP_ENTER_SLEEP)) {
+ /* Sending ENTER_SLEEP message failed, we are still active */
+ ipc_protocol->pm.host_pm_state = IPC_MEM_HOST_PM_ACTIVE;
+ goto err;
+ }
+
+ ipc_protocol->pm.host_pm_state = IPC_MEM_HOST_PM_SLEEP;
+ return true;
+err:
+ return false;
+}
+
+bool ipc_protocol_resume(struct iosm_protocol *ipc_protocol)
+{
+ if (!ipc_pm_prepare_host_active(&ipc_protocol->pm))
+ return false;
+
+ dev_dbg(ipc_protocol->dev, "send TARGET_HOST, EXIT_SLEEP");
+ if (ipc_protocol_msg_send_host_sleep(ipc_protocol,
+ IPC_HOST_SLEEP_EXIT_SLEEP)) {
+ ipc_protocol->pm.host_pm_state = IPC_MEM_HOST_PM_SLEEP;
+ return false;
+ }
+
+ ipc_protocol->pm.host_pm_state = IPC_MEM_HOST_PM_ACTIVE;
+
+ return true;
+}
+
+struct iosm_protocol *ipc_protocol_init(struct iosm_imem *ipc_imem)
+{
+ struct iosm_protocol *ipc_protocol =
+ kzalloc(sizeof(*ipc_protocol), GFP_KERNEL);
+ struct ipc_protocol_context_info *p_ci;
+ u64 addr;
+
+ if (!ipc_protocol)
+ return NULL;
+
+ ipc_protocol->dev = ipc_imem->dev;
+ ipc_protocol->pcie = ipc_imem->pcie;
+ ipc_protocol->imem = ipc_imem;
+ ipc_protocol->p_ap_shm = NULL;
+ ipc_protocol->phy_ap_shm = 0;
+
+ ipc_protocol->old_msg_tail = 0;
+
+ ipc_protocol->p_ap_shm =
+ dma_alloc_coherent(&ipc_protocol->pcie->pci->dev,
+ sizeof(*ipc_protocol->p_ap_shm),
+ &ipc_protocol->phy_ap_shm, GFP_KERNEL);
+
+ if (!ipc_protocol->p_ap_shm) {
+ dev_err(ipc_protocol->dev, "pci shm alloc error");
+ kfree(ipc_protocol);
+ return NULL;
+ }
+
+ /* Prepare the context info for CP. */
+ addr = ipc_protocol->phy_ap_shm;
+ p_ci = &ipc_protocol->p_ap_shm->ci;
+ p_ci->device_info_addr =
+ addr + offsetof(struct ipc_protocol_ap_shm, device_info);
+ p_ci->head_array =
+ addr + offsetof(struct ipc_protocol_ap_shm, head_array);
+ p_ci->tail_array =
+ addr + offsetof(struct ipc_protocol_ap_shm, tail_array);
+ p_ci->msg_head = addr + offsetof(struct ipc_protocol_ap_shm, msg_head);
+ p_ci->msg_tail = addr + offsetof(struct ipc_protocol_ap_shm, msg_tail);
+ p_ci->msg_ring_addr =
+ addr + offsetof(struct ipc_protocol_ap_shm, msg_ring);
+ p_ci->msg_ring_entries = cpu_to_le16(IPC_MEM_MSG_ENTRIES);
+ p_ci->msg_irq_vector = IPC_MSG_IRQ_VECTOR;
+ p_ci->device_info_irq_vector = IPC_DEVICE_IRQ_VECTOR;
+
+ ipc_mmio_set_contex_info_addr(ipc_imem->mmio, addr);
+
+ ipc_pm_init(ipc_protocol);
+
+ return ipc_protocol;
+}
+
+void ipc_protocol_deinit(struct iosm_protocol *proto)
+{
+ dma_free_coherent(&proto->pcie->pci->dev, sizeof(*proto->p_ap_shm),
+ proto->p_ap_shm, proto->phy_ap_shm);
+
+ ipc_pm_deinit(proto);
+ kfree(proto);
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_protocol.h b/drivers/net/wwan/iosm/iosm_ipc_protocol.h
new file mode 100644
index 0000000000..289397c4ea
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_protocol.h
@@ -0,0 +1,237 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#ifndef IOSM_IPC_PROTOCOL_H
+#define IOSM_IPC_PROTOCOL_H
+
+#include "iosm_ipc_imem.h"
+#include "iosm_ipc_pm.h"
+#include "iosm_ipc_protocol_ops.h"
+
+/* Trigger the doorbell interrupt on CP. */
+#define IPC_DOORBELL_IRQ_HPDA 0
+#define IPC_DOORBELL_IRQ_IPC 1
+#define IPC_DOORBELL_IRQ_SLEEP 2
+
+/* IRQ vector number. */
+#define IPC_DEVICE_IRQ_VECTOR 0
+#define IPC_MSG_IRQ_VECTOR 0
+#define IPC_UL_PIPE_IRQ_VECTOR 0
+#define IPC_DL_PIPE_IRQ_VECTOR 0
+
+#define IPC_MEM_MSG_ENTRIES 128
+
+/* Default time out for sending IPC messages like open pipe, close pipe etc.
+ * during run mode.
+ *
+ * If the message interface lock to CP times out, the link to CP is broken.
+ * mode : run mode (IPC_MEM_EXEC_STAGE_RUN)
+ * unit : milliseconds
+ */
+#define IPC_MSG_COMPLETE_RUN_DEFAULT_TIMEOUT 500 /* 0.5 seconds */
+
+/* Default time out for sending IPC messages like open pipe, close pipe etc.
+ * during boot mode.
+ *
+ * If the message interface lock to CP times out, the link to CP is broken.
+ * mode : boot mode
+ * (IPC_MEM_EXEC_STAGE_BOOT | IPC_MEM_EXEC_STAGE_PSI | IPC_MEM_EXEC_STAGE_EBL)
+ * unit : milliseconds
+ */
+#define IPC_MSG_COMPLETE_BOOT_DEFAULT_TIMEOUT 500 /* 0.5 seconds */
+
+/**
+ * struct ipc_protocol_context_info - Structure of the context info
+ * @device_info_addr: 64 bit address to device info
+ * @head_array: 64 bit address to head pointer arr for the pipes
+ * @tail_array: 64 bit address to tail pointer arr for the pipes
+ * @msg_head: 64 bit address to message head pointer
+ * @msg_tail: 64 bit address to message tail pointer
+ * @msg_ring_addr: 64 bit pointer to the message ring buffer
+ * @msg_ring_entries: This field provides the number of entries which
+ * the MR can hold
+ * @msg_irq_vector: This field provides the IRQ which shall be
+ * generated by the EP device when generating
+ * completion for Messages.
+ * @device_info_irq_vector: This field provides the IRQ which shall be
+ * generated by the EP dev after updating Dev. Info
+ */
+struct ipc_protocol_context_info {
+ phys_addr_t device_info_addr;
+ phys_addr_t head_array;
+ phys_addr_t tail_array;
+ phys_addr_t msg_head;
+ phys_addr_t msg_tail;
+ phys_addr_t msg_ring_addr;
+ __le16 msg_ring_entries;
+ u8 msg_irq_vector;
+ u8 device_info_irq_vector;
+};
+
+/**
+ * struct ipc_protocol_device_info - Structure for the device information
+ * @execution_stage: CP execution stage
+ * @ipc_status: IPC states
+ * @device_sleep_notification: Requested device pm states
+ */
+struct ipc_protocol_device_info {
+ __le32 execution_stage;
+ __le32 ipc_status;
+ __le32 device_sleep_notification;
+};
+
+/**
+ * struct ipc_protocol_ap_shm - Protocol Shared Memory Structure
+ * @ci: Context information struct
+ * @device_info: Device information struct
+ * @msg_head: Point to msg head
+ * @head_array: Array of head pointer
+ * @msg_tail: Point to msg tail
+ * @tail_array: Array of tail pointer
+ * @msg_ring: Circular buffers for the read/tail and write/head
+ * indeces.
+ */
+struct ipc_protocol_ap_shm {
+ struct ipc_protocol_context_info ci;
+ struct ipc_protocol_device_info device_info;
+ __le32 msg_head;
+ __le32 head_array[IPC_MEM_MAX_PIPES];
+ __le32 msg_tail;
+ __le32 tail_array[IPC_MEM_MAX_PIPES];
+ union ipc_mem_msg_entry msg_ring[IPC_MEM_MSG_ENTRIES];
+};
+
+/**
+ * struct iosm_protocol - Structure for IPC protocol.
+ * @p_ap_shm: Pointer to Protocol Shared Memory Structure
+ * @pm: Instance to struct iosm_pm
+ * @pcie: Pointer to struct iosm_pcie
+ * @imem: Pointer to struct iosm_imem
+ * @rsp_ring: Array of OS completion objects to be triggered once CP
+ * acknowledges a request in the message ring
+ * @dev: Pointer to device structure
+ * @phy_ap_shm: Physical/Mapped representation of the shared memory info
+ * @old_msg_tail: Old msg tail ptr, until AP has handled ACK's from CP
+ */
+struct iosm_protocol {
+ struct ipc_protocol_ap_shm *p_ap_shm;
+ struct iosm_pm pm;
+ struct iosm_pcie *pcie;
+ struct iosm_imem *imem;
+ struct ipc_rsp *rsp_ring[IPC_MEM_MSG_ENTRIES];
+ struct device *dev;
+ dma_addr_t phy_ap_shm;
+ u32 old_msg_tail;
+};
+
+/**
+ * struct ipc_call_msg_send_args - Structure for message argument for
+ * tasklet function.
+ * @prep_args: Arguments for message preparation function
+ * @response: Can be NULL if result can be ignored
+ * @msg_type: Message Type
+ */
+struct ipc_call_msg_send_args {
+ union ipc_msg_prep_args *prep_args;
+ struct ipc_rsp *response;
+ enum ipc_msg_prep_type msg_type;
+};
+
+/**
+ * ipc_protocol_tq_msg_send - prepare the msg and send to CP
+ * @ipc_protocol: Pointer to ipc_protocol instance
+ * @msg_type: Message type
+ * @prep_args: Message arguments
+ * @response: Pointer to a response object which has a
+ * completion object and return code.
+ *
+ * Returns: 0 on success and failure value on error
+ */
+int ipc_protocol_tq_msg_send(struct iosm_protocol *ipc_protocol,
+ enum ipc_msg_prep_type msg_type,
+ union ipc_msg_prep_args *prep_args,
+ struct ipc_rsp *response);
+
+/**
+ * ipc_protocol_msg_send - Send ipc control message to CP and wait for response
+ * @ipc_protocol: Pointer to ipc_protocol instance
+ * @prep: Message type
+ * @prep_args: Message arguments
+ *
+ * Returns: 0 on success and failure value on error
+ */
+int ipc_protocol_msg_send(struct iosm_protocol *ipc_protocol,
+ enum ipc_msg_prep_type prep,
+ union ipc_msg_prep_args *prep_args);
+
+/**
+ * ipc_protocol_suspend - Signal to CP that host wants to go to sleep (suspend).
+ * @ipc_protocol: Pointer to ipc_protocol instance
+ *
+ * Returns: true if host can suspend, false if suspend must be aborted.
+ */
+bool ipc_protocol_suspend(struct iosm_protocol *ipc_protocol);
+
+/**
+ * ipc_protocol_s2idle_sleep - Call PM function to set PM variables in s2idle
+ * sleep/active case
+ * @ipc_protocol: Pointer to ipc_protocol instance
+ * @sleep: True for sleep/False for active
+ */
+void ipc_protocol_s2idle_sleep(struct iosm_protocol *ipc_protocol, bool sleep);
+
+/**
+ * ipc_protocol_resume - Signal to CP that host wants to resume operation.
+ * @ipc_protocol: Pointer to ipc_protocol instance
+ *
+ * Returns: true if host can resume, false if there is a problem.
+ */
+bool ipc_protocol_resume(struct iosm_protocol *ipc_protocol);
+
+/**
+ * ipc_protocol_pm_dev_sleep_handle - Handles the Device Sleep state change
+ * notification.
+ * @ipc_protocol: Pointer to ipc_protocol instance.
+ *
+ * Returns: true if sleep notification handled, false otherwise.
+ */
+bool ipc_protocol_pm_dev_sleep_handle(struct iosm_protocol *ipc_protocol);
+
+/**
+ * ipc_protocol_doorbell_trigger - Wrapper for PM function which wake up the
+ * device if it is in low power mode
+ * and trigger a head pointer update interrupt.
+ * @ipc_protocol: Pointer to ipc_protocol instance.
+ * @identifier: Specifies what component triggered hpda
+ * update irq
+ */
+void ipc_protocol_doorbell_trigger(struct iosm_protocol *ipc_protocol,
+ u32 identifier);
+
+/**
+ * ipc_protocol_sleep_notification_string - Returns last Sleep Notification as
+ * string.
+ * @ipc_protocol: Instance pointer of Protocol module.
+ *
+ * Returns: Pointer to string.
+ */
+const char *
+ipc_protocol_sleep_notification_string(struct iosm_protocol *ipc_protocol);
+
+/**
+ * ipc_protocol_init - Allocates IPC protocol instance
+ * @ipc_imem: Pointer to iosm_imem structure
+ *
+ * Returns: Address of IPC protocol instance on success & NULL on failure.
+ */
+struct iosm_protocol *ipc_protocol_init(struct iosm_imem *ipc_imem);
+
+/**
+ * ipc_protocol_deinit - Deallocates IPC protocol instance
+ * @ipc_protocol: pointer to the IPC protocol instance
+ */
+void ipc_protocol_deinit(struct iosm_protocol *ipc_protocol);
+
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c b/drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c
new file mode 100644
index 0000000000..4627847c6d
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c
@@ -0,0 +1,541 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#include "iosm_ipc_protocol.h"
+#include "iosm_ipc_protocol_ops.h"
+
+/* Get the next free message element.*/
+static union ipc_mem_msg_entry *
+ipc_protocol_free_msg_get(struct iosm_protocol *ipc_protocol, int *index)
+{
+ u32 head = le32_to_cpu(ipc_protocol->p_ap_shm->msg_head);
+ u32 new_head = (head + 1) % IPC_MEM_MSG_ENTRIES;
+ union ipc_mem_msg_entry *msg;
+
+ if (new_head == le32_to_cpu(ipc_protocol->p_ap_shm->msg_tail)) {
+ dev_err(ipc_protocol->dev, "message ring is full");
+ return NULL;
+ }
+
+ /* Get the pointer to the next free message element,
+ * reset the fields and mark is as invalid.
+ */
+ msg = &ipc_protocol->p_ap_shm->msg_ring[head];
+ memset(msg, 0, sizeof(*msg));
+
+ /* return index in message ring */
+ *index = head;
+
+ return msg;
+}
+
+/* Updates the message ring Head pointer */
+void ipc_protocol_msg_hp_update(struct iosm_imem *ipc_imem)
+{
+ struct iosm_protocol *ipc_protocol = ipc_imem->ipc_protocol;
+ u32 head = le32_to_cpu(ipc_protocol->p_ap_shm->msg_head);
+ u32 new_head = (head + 1) % IPC_MEM_MSG_ENTRIES;
+
+ /* Update head pointer and fire doorbell. */
+ ipc_protocol->p_ap_shm->msg_head = cpu_to_le32(new_head);
+ ipc_protocol->old_msg_tail =
+ le32_to_cpu(ipc_protocol->p_ap_shm->msg_tail);
+
+ ipc_pm_signal_hpda_doorbell(&ipc_protocol->pm, IPC_HP_MR, false);
+}
+
+/* Allocate and prepare a OPEN_PIPE message.
+ * This also allocates the memory for the new TDR structure and
+ * updates the pipe structure referenced in the preparation arguments.
+ */
+static int ipc_protocol_msg_prepipe_open(struct iosm_protocol *ipc_protocol,
+ union ipc_msg_prep_args *args)
+{
+ int index;
+ union ipc_mem_msg_entry *msg =
+ ipc_protocol_free_msg_get(ipc_protocol, &index);
+ struct ipc_pipe *pipe = args->pipe_open.pipe;
+ struct ipc_protocol_td *tdr;
+ struct sk_buff **skbr;
+
+ if (!msg) {
+ dev_err(ipc_protocol->dev, "failed to get free message");
+ return -EIO;
+ }
+
+ /* Allocate the skbuf elements for the skbuf which are on the way.
+ * SKB ring is internal memory allocation for driver. No need to
+ * re-calculate the start and end addresses.
+ */
+ skbr = kcalloc(pipe->nr_of_entries, sizeof(*skbr), GFP_ATOMIC);
+ if (!skbr)
+ return -ENOMEM;
+
+ /* Allocate the transfer descriptors for the pipe. */
+ tdr = dma_alloc_coherent(&ipc_protocol->pcie->pci->dev,
+ pipe->nr_of_entries * sizeof(*tdr),
+ &pipe->phy_tdr_start, GFP_ATOMIC);
+ if (!tdr) {
+ kfree(skbr);
+ dev_err(ipc_protocol->dev, "tdr alloc error");
+ return -ENOMEM;
+ }
+
+ pipe->max_nr_of_queued_entries = pipe->nr_of_entries - 1;
+ pipe->nr_of_queued_entries = 0;
+ pipe->tdr_start = tdr;
+ pipe->skbr_start = skbr;
+ pipe->old_tail = 0;
+
+ ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr] = 0;
+
+ msg->open_pipe.type_of_message = IPC_MEM_MSG_OPEN_PIPE;
+ msg->open_pipe.pipe_nr = pipe->pipe_nr;
+ msg->open_pipe.tdr_addr = cpu_to_le64(pipe->phy_tdr_start);
+ msg->open_pipe.tdr_entries = cpu_to_le16(pipe->nr_of_entries);
+ msg->open_pipe.accumulation_backoff =
+ cpu_to_le32(pipe->accumulation_backoff);
+ msg->open_pipe.irq_vector = cpu_to_le32(pipe->irq);
+
+ return index;
+}
+
+static int ipc_protocol_msg_prepipe_close(struct iosm_protocol *ipc_protocol,
+ union ipc_msg_prep_args *args)
+{
+ int index = -1;
+ union ipc_mem_msg_entry *msg =
+ ipc_protocol_free_msg_get(ipc_protocol, &index);
+ struct ipc_pipe *pipe = args->pipe_close.pipe;
+
+ if (!msg)
+ return -EIO;
+
+ msg->close_pipe.type_of_message = IPC_MEM_MSG_CLOSE_PIPE;
+ msg->close_pipe.pipe_nr = pipe->pipe_nr;
+
+ dev_dbg(ipc_protocol->dev, "IPC_MEM_MSG_CLOSE_PIPE(pipe_nr=%d)",
+ msg->close_pipe.pipe_nr);
+
+ return index;
+}
+
+static int ipc_protocol_msg_prep_sleep(struct iosm_protocol *ipc_protocol,
+ union ipc_msg_prep_args *args)
+{
+ int index = -1;
+ union ipc_mem_msg_entry *msg =
+ ipc_protocol_free_msg_get(ipc_protocol, &index);
+
+ if (!msg) {
+ dev_err(ipc_protocol->dev, "failed to get free message");
+ return -EIO;
+ }
+
+ /* Prepare and send the host sleep message to CP to enter or exit D3. */
+ msg->host_sleep.type_of_message = IPC_MEM_MSG_SLEEP;
+ msg->host_sleep.target = args->sleep.target; /* 0=host, 1=device */
+
+ /* state; 0=enter, 1=exit 2=enter w/o protocol */
+ msg->host_sleep.state = args->sleep.state;
+
+ dev_dbg(ipc_protocol->dev, "IPC_MEM_MSG_SLEEP(target=%d; state=%d)",
+ msg->host_sleep.target, msg->host_sleep.state);
+
+ return index;
+}
+
+static int ipc_protocol_msg_prep_feature_set(struct iosm_protocol *ipc_protocol,
+ union ipc_msg_prep_args *args)
+{
+ int index = -1;
+ union ipc_mem_msg_entry *msg =
+ ipc_protocol_free_msg_get(ipc_protocol, &index);
+
+ if (!msg) {
+ dev_err(ipc_protocol->dev, "failed to get free message");
+ return -EIO;
+ }
+
+ msg->feature_set.type_of_message = IPC_MEM_MSG_FEATURE_SET;
+ msg->feature_set.reset_enable = args->feature_set.reset_enable <<
+ RESET_BIT;
+
+ dev_dbg(ipc_protocol->dev, "IPC_MEM_MSG_FEATURE_SET(reset_enable=%d)",
+ msg->feature_set.reset_enable >> RESET_BIT);
+
+ return index;
+}
+
+/* Processes the message consumed by CP. */
+bool ipc_protocol_msg_process(struct iosm_imem *ipc_imem, int irq)
+{
+ struct iosm_protocol *ipc_protocol = ipc_imem->ipc_protocol;
+ struct ipc_rsp **rsp_ring = ipc_protocol->rsp_ring;
+ bool msg_processed = false;
+ u32 i;
+
+ if (le32_to_cpu(ipc_protocol->p_ap_shm->msg_tail) >=
+ IPC_MEM_MSG_ENTRIES) {
+ dev_err(ipc_protocol->dev, "msg_tail out of range: %d",
+ le32_to_cpu(ipc_protocol->p_ap_shm->msg_tail));
+ return msg_processed;
+ }
+
+ if (irq != IMEM_IRQ_DONT_CARE &&
+ irq != ipc_protocol->p_ap_shm->ci.msg_irq_vector)
+ return msg_processed;
+
+ for (i = ipc_protocol->old_msg_tail;
+ i != le32_to_cpu(ipc_protocol->p_ap_shm->msg_tail);
+ i = (i + 1) % IPC_MEM_MSG_ENTRIES) {
+ union ipc_mem_msg_entry *msg =
+ &ipc_protocol->p_ap_shm->msg_ring[i];
+
+ dev_dbg(ipc_protocol->dev, "msg[%d]: type=%u status=%d", i,
+ msg->common.type_of_message,
+ msg->common.completion_status);
+
+ /* Update response with status and wake up waiting requestor */
+ if (rsp_ring[i]) {
+ rsp_ring[i]->status =
+ le32_to_cpu(msg->common.completion_status);
+ complete(&rsp_ring[i]->completion);
+ rsp_ring[i] = NULL;
+ }
+ msg_processed = true;
+ }
+
+ ipc_protocol->old_msg_tail = i;
+ return msg_processed;
+}
+
+/* Sends data from UL list to CP for the provided pipe by updating the Head
+ * pointer of given pipe.
+ */
+bool ipc_protocol_ul_td_send(struct iosm_protocol *ipc_protocol,
+ struct ipc_pipe *pipe,
+ struct sk_buff_head *p_ul_list)
+{
+ struct ipc_protocol_td *td;
+ bool hpda_pending = false;
+ struct sk_buff *skb;
+ s32 free_elements;
+ u32 head;
+ u32 tail;
+
+ if (!ipc_protocol->p_ap_shm) {
+ dev_err(ipc_protocol->dev, "driver is not initialized");
+ return false;
+ }
+
+ /* Get head and tail of the td list and calculate
+ * the number of free elements.
+ */
+ head = le32_to_cpu(ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr]);
+ tail = pipe->old_tail;
+
+ while (!skb_queue_empty(p_ul_list)) {
+ if (head < tail)
+ free_elements = tail - head - 1;
+ else
+ free_elements =
+ pipe->nr_of_entries - head + ((s32)tail - 1);
+
+ if (free_elements <= 0) {
+ dev_dbg(ipc_protocol->dev,
+ "no free td elements for UL pipe %d",
+ pipe->pipe_nr);
+ break;
+ }
+
+ /* Get the td address. */
+ td = &pipe->tdr_start[head];
+
+ /* Take the first element of the uplink list and add it
+ * to the td list.
+ */
+ skb = skb_dequeue(p_ul_list);
+ if (WARN_ON(!skb))
+ break;
+
+ /* Save the reference to the uplink skbuf. */
+ pipe->skbr_start[head] = skb;
+
+ td->buffer.address = IPC_CB(skb)->mapping;
+ td->scs = cpu_to_le32(skb->len) & cpu_to_le32(SIZE_MASK);
+ td->next = 0;
+
+ pipe->nr_of_queued_entries++;
+
+ /* Calculate the new head and save it. */
+ head++;
+ if (head >= pipe->nr_of_entries)
+ head = 0;
+
+ ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr] =
+ cpu_to_le32(head);
+ }
+
+ if (pipe->old_head != head) {
+ dev_dbg(ipc_protocol->dev, "New UL TDs Pipe:%d", pipe->pipe_nr);
+
+ pipe->old_head = head;
+ /* Trigger doorbell because of pending UL packets. */
+ hpda_pending = true;
+ }
+
+ return hpda_pending;
+}
+
+/* Checks for Tail pointer update from CP and returns the data as SKB. */
+struct sk_buff *ipc_protocol_ul_td_process(struct iosm_protocol *ipc_protocol,
+ struct ipc_pipe *pipe)
+{
+ struct ipc_protocol_td *p_td = &pipe->tdr_start[pipe->old_tail];
+ struct sk_buff *skb = pipe->skbr_start[pipe->old_tail];
+
+ pipe->nr_of_queued_entries--;
+ pipe->old_tail++;
+ if (pipe->old_tail >= pipe->nr_of_entries)
+ pipe->old_tail = 0;
+
+ if (!p_td->buffer.address) {
+ dev_err(ipc_protocol->dev, "Td buffer address is NULL");
+ return NULL;
+ }
+
+ if (p_td->buffer.address != IPC_CB(skb)->mapping) {
+ dev_err(ipc_protocol->dev,
+ "pipe %d: invalid buf_addr or skb_data",
+ pipe->pipe_nr);
+ return NULL;
+ }
+
+ return skb;
+}
+
+/* Allocates an SKB for CP to send data and updates the Head Pointer
+ * of the given Pipe#.
+ */
+bool ipc_protocol_dl_td_prepare(struct iosm_protocol *ipc_protocol,
+ struct ipc_pipe *pipe)
+{
+ struct ipc_protocol_td *td;
+ dma_addr_t mapping = 0;
+ u32 head, new_head;
+ struct sk_buff *skb;
+ u32 tail;
+
+ /* Get head and tail of the td list and calculate
+ * the number of free elements.
+ */
+ head = le32_to_cpu(ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr]);
+ tail = le32_to_cpu(ipc_protocol->p_ap_shm->tail_array[pipe->pipe_nr]);
+
+ new_head = head + 1;
+ if (new_head >= pipe->nr_of_entries)
+ new_head = 0;
+
+ if (new_head == tail)
+ return false;
+
+ /* Get the td address. */
+ td = &pipe->tdr_start[head];
+
+ /* Allocate the skbuf for the descriptor. */
+ skb = ipc_pcie_alloc_skb(ipc_protocol->pcie, pipe->buf_size, GFP_ATOMIC,
+ &mapping, DMA_FROM_DEVICE,
+ IPC_MEM_DL_ETH_OFFSET);
+ if (!skb)
+ return false;
+
+ td->buffer.address = mapping;
+ td->scs = cpu_to_le32(pipe->buf_size) & cpu_to_le32(SIZE_MASK);
+ td->next = 0;
+
+ /* store the new head value. */
+ ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr] =
+ cpu_to_le32(new_head);
+
+ /* Save the reference to the skbuf. */
+ pipe->skbr_start[head] = skb;
+
+ pipe->nr_of_queued_entries++;
+
+ return true;
+}
+
+/* Processes DL TD's */
+struct sk_buff *ipc_protocol_dl_td_process(struct iosm_protocol *ipc_protocol,
+ struct ipc_pipe *pipe)
+{
+ struct ipc_protocol_td *p_td;
+ struct sk_buff *skb;
+
+ if (!pipe->tdr_start)
+ return NULL;
+
+ /* Copy the reference to the downlink buffer. */
+ p_td = &pipe->tdr_start[pipe->old_tail];
+ skb = pipe->skbr_start[pipe->old_tail];
+
+ /* Reset the ring elements. */
+ pipe->skbr_start[pipe->old_tail] = NULL;
+
+ pipe->nr_of_queued_entries--;
+
+ pipe->old_tail++;
+ if (pipe->old_tail >= pipe->nr_of_entries)
+ pipe->old_tail = 0;
+
+ if (!skb) {
+ dev_err(ipc_protocol->dev, "skb is null");
+ goto ret;
+ } else if (!p_td->buffer.address) {
+ dev_err(ipc_protocol->dev, "td/buffer address is null");
+ ipc_pcie_kfree_skb(ipc_protocol->pcie, skb);
+ skb = NULL;
+ goto ret;
+ }
+
+ if (p_td->buffer.address != IPC_CB(skb)->mapping) {
+ dev_err(ipc_protocol->dev, "invalid buf=%llx or skb=%p",
+ (unsigned long long)p_td->buffer.address, skb->data);
+ ipc_pcie_kfree_skb(ipc_protocol->pcie, skb);
+ skb = NULL;
+ goto ret;
+ } else if ((le32_to_cpu(p_td->scs) & SIZE_MASK) > pipe->buf_size) {
+ dev_err(ipc_protocol->dev, "invalid buffer size %d > %d",
+ le32_to_cpu(p_td->scs) & SIZE_MASK,
+ pipe->buf_size);
+ ipc_pcie_kfree_skb(ipc_protocol->pcie, skb);
+ skb = NULL;
+ goto ret;
+ } else if (le32_to_cpu(p_td->scs) >> COMPLETION_STATUS ==
+ IPC_MEM_TD_CS_ABORT) {
+ /* Discard aborted buffers. */
+ dev_dbg(ipc_protocol->dev, "discard 'aborted' buffers");
+ ipc_pcie_kfree_skb(ipc_protocol->pcie, skb);
+ skb = NULL;
+ goto ret;
+ }
+
+ /* Set the length field in skbuf. */
+ skb_put(skb, le32_to_cpu(p_td->scs) & SIZE_MASK);
+
+ret:
+ return skb;
+}
+
+void ipc_protocol_get_head_tail_index(struct iosm_protocol *ipc_protocol,
+ struct ipc_pipe *pipe, u32 *head,
+ u32 *tail)
+{
+ struct ipc_protocol_ap_shm *ipc_ap_shm = ipc_protocol->p_ap_shm;
+
+ if (head)
+ *head = le32_to_cpu(ipc_ap_shm->head_array[pipe->pipe_nr]);
+
+ if (tail)
+ *tail = le32_to_cpu(ipc_ap_shm->tail_array[pipe->pipe_nr]);
+}
+
+/* Frees the TDs given to CP. */
+void ipc_protocol_pipe_cleanup(struct iosm_protocol *ipc_protocol,
+ struct ipc_pipe *pipe)
+{
+ struct sk_buff *skb;
+ u32 head;
+ u32 tail;
+
+ /* Get the start and the end of the buffer list. */
+ head = le32_to_cpu(ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr]);
+ tail = pipe->old_tail;
+
+ /* Reset tail and head to 0. */
+ ipc_protocol->p_ap_shm->tail_array[pipe->pipe_nr] = 0;
+ ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr] = 0;
+
+ /* Free pending uplink and downlink buffers. */
+ if (pipe->skbr_start) {
+ while (head != tail) {
+ /* Get the reference to the skbuf,
+ * which is on the way and free it.
+ */
+ skb = pipe->skbr_start[tail];
+ if (skb)
+ ipc_pcie_kfree_skb(ipc_protocol->pcie, skb);
+
+ tail++;
+ if (tail >= pipe->nr_of_entries)
+ tail = 0;
+ }
+
+ kfree(pipe->skbr_start);
+ pipe->skbr_start = NULL;
+ }
+
+ pipe->old_tail = 0;
+
+ /* Free and reset the td and skbuf circular buffers. kfree is save! */
+ if (pipe->tdr_start) {
+ dma_free_coherent(&ipc_protocol->pcie->pci->dev,
+ sizeof(*pipe->tdr_start) * pipe->nr_of_entries,
+ pipe->tdr_start, pipe->phy_tdr_start);
+
+ pipe->tdr_start = NULL;
+ }
+}
+
+enum ipc_mem_device_ipc_state ipc_protocol_get_ipc_status(struct iosm_protocol
+ *ipc_protocol)
+{
+ return (enum ipc_mem_device_ipc_state)
+ le32_to_cpu(ipc_protocol->p_ap_shm->device_info.ipc_status);
+}
+
+enum ipc_mem_exec_stage
+ipc_protocol_get_ap_exec_stage(struct iosm_protocol *ipc_protocol)
+{
+ return le32_to_cpu(ipc_protocol->p_ap_shm->device_info.execution_stage);
+}
+
+int ipc_protocol_msg_prep(struct iosm_imem *ipc_imem,
+ enum ipc_msg_prep_type msg_type,
+ union ipc_msg_prep_args *args)
+{
+ struct iosm_protocol *ipc_protocol = ipc_imem->ipc_protocol;
+
+ switch (msg_type) {
+ case IPC_MSG_PREP_SLEEP:
+ return ipc_protocol_msg_prep_sleep(ipc_protocol, args);
+
+ case IPC_MSG_PREP_PIPE_OPEN:
+ return ipc_protocol_msg_prepipe_open(ipc_protocol, args);
+
+ case IPC_MSG_PREP_PIPE_CLOSE:
+ return ipc_protocol_msg_prepipe_close(ipc_protocol, args);
+
+ case IPC_MSG_PREP_FEATURE_SET:
+ return ipc_protocol_msg_prep_feature_set(ipc_protocol, args);
+
+ /* Unsupported messages in protocol */
+ case IPC_MSG_PREP_MAP:
+ case IPC_MSG_PREP_UNMAP:
+ default:
+ dev_err(ipc_protocol->dev,
+ "unsupported message type: %d in protocol", msg_type);
+ return -EINVAL;
+ }
+}
+
+u32
+ipc_protocol_pm_dev_get_sleep_notification(struct iosm_protocol *ipc_protocol)
+{
+ struct ipc_protocol_ap_shm *ipc_ap_shm = ipc_protocol->p_ap_shm;
+
+ return le32_to_cpu(ipc_ap_shm->device_info.device_sleep_notification);
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_protocol_ops.h b/drivers/net/wwan/iosm/iosm_ipc_protocol_ops.h
new file mode 100644
index 0000000000..35aa138730
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_protocol_ops.h
@@ -0,0 +1,444 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#ifndef IOSM_IPC_PROTOCOL_OPS_H
+#define IOSM_IPC_PROTOCOL_OPS_H
+
+#define SIZE_MASK 0x00FFFFFF
+#define COMPLETION_STATUS 24
+#define RESET_BIT 7
+
+/**
+ * enum ipc_mem_td_cs - Completion status of a TD
+ * @IPC_MEM_TD_CS_INVALID: Initial status - td not yet used.
+ * @IPC_MEM_TD_CS_PARTIAL_TRANSFER: More data pending -> next TD used for this
+ * @IPC_MEM_TD_CS_END_TRANSFER: IO transfer is complete.
+ * @IPC_MEM_TD_CS_OVERFLOW: IO transfer to small for the buff to write
+ * @IPC_MEM_TD_CS_ABORT: TD marked as abort and shall be discarded
+ * by AP.
+ * @IPC_MEM_TD_CS_ERROR: General error.
+ */
+enum ipc_mem_td_cs {
+ IPC_MEM_TD_CS_INVALID,
+ IPC_MEM_TD_CS_PARTIAL_TRANSFER,
+ IPC_MEM_TD_CS_END_TRANSFER,
+ IPC_MEM_TD_CS_OVERFLOW,
+ IPC_MEM_TD_CS_ABORT,
+ IPC_MEM_TD_CS_ERROR,
+};
+
+/**
+ * enum ipc_mem_msg_cs - Completion status of IPC Message
+ * @IPC_MEM_MSG_CS_INVALID: Initial status.
+ * @IPC_MEM_MSG_CS_SUCCESS: IPC Message completion success.
+ * @IPC_MEM_MSG_CS_ERROR: Message send error.
+ */
+enum ipc_mem_msg_cs {
+ IPC_MEM_MSG_CS_INVALID,
+ IPC_MEM_MSG_CS_SUCCESS,
+ IPC_MEM_MSG_CS_ERROR,
+};
+
+/**
+ * struct ipc_msg_prep_args_pipe - struct for pipe args for message preparation
+ * @pipe: Pipe to open/close
+ */
+struct ipc_msg_prep_args_pipe {
+ struct ipc_pipe *pipe;
+};
+
+/**
+ * struct ipc_msg_prep_args_sleep - struct for sleep args for message
+ * preparation
+ * @target: 0=host, 1=device
+ * @state: 0=enter sleep, 1=exit sleep
+ */
+struct ipc_msg_prep_args_sleep {
+ unsigned int target;
+ unsigned int state;
+};
+
+/**
+ * struct ipc_msg_prep_feature_set - struct for feature set argument for
+ * message preparation
+ * @reset_enable: 0=out-of-band, 1=in-band-crash notification
+ */
+struct ipc_msg_prep_feature_set {
+ u8 reset_enable;
+};
+
+/**
+ * struct ipc_msg_prep_map - struct for map argument for message preparation
+ * @region_id: Region to map
+ * @addr: Pcie addr of region to map
+ * @size: Size of the region to map
+ */
+struct ipc_msg_prep_map {
+ unsigned int region_id;
+ unsigned long addr;
+ size_t size;
+};
+
+/**
+ * struct ipc_msg_prep_unmap - struct for unmap argument for message preparation
+ * @region_id: Region to unmap
+ */
+struct ipc_msg_prep_unmap {
+ unsigned int region_id;
+};
+
+/**
+ * struct ipc_msg_prep_args - Union to handle different message types
+ * @pipe_open: Pipe open message preparation struct
+ * @pipe_close: Pipe close message preparation struct
+ * @sleep: Sleep message preparation struct
+ * @feature_set: Feature set message preparation struct
+ * @map: Memory map message preparation struct
+ * @unmap: Memory unmap message preparation struct
+ */
+union ipc_msg_prep_args {
+ struct ipc_msg_prep_args_pipe pipe_open;
+ struct ipc_msg_prep_args_pipe pipe_close;
+ struct ipc_msg_prep_args_sleep sleep;
+ struct ipc_msg_prep_feature_set feature_set;
+ struct ipc_msg_prep_map map;
+ struct ipc_msg_prep_unmap unmap;
+};
+
+/**
+ * enum ipc_msg_prep_type - Enum for message prepare actions
+ * @IPC_MSG_PREP_SLEEP: Sleep message preparation type
+ * @IPC_MSG_PREP_PIPE_OPEN: Pipe open message preparation type
+ * @IPC_MSG_PREP_PIPE_CLOSE: Pipe close message preparation type
+ * @IPC_MSG_PREP_FEATURE_SET: Feature set message preparation type
+ * @IPC_MSG_PREP_MAP: Memory map message preparation type
+ * @IPC_MSG_PREP_UNMAP: Memory unmap message preparation type
+ */
+enum ipc_msg_prep_type {
+ IPC_MSG_PREP_SLEEP,
+ IPC_MSG_PREP_PIPE_OPEN,
+ IPC_MSG_PREP_PIPE_CLOSE,
+ IPC_MSG_PREP_FEATURE_SET,
+ IPC_MSG_PREP_MAP,
+ IPC_MSG_PREP_UNMAP,
+};
+
+/**
+ * struct ipc_rsp - Response to sent message
+ * @completion: For waking up requestor
+ * @status: Completion status
+ */
+struct ipc_rsp {
+ struct completion completion;
+ enum ipc_mem_msg_cs status;
+};
+
+/**
+ * enum ipc_mem_msg - Type-definition of the messages.
+ * @IPC_MEM_MSG_OPEN_PIPE: AP ->CP: Open a pipe
+ * @IPC_MEM_MSG_CLOSE_PIPE: AP ->CP: Close a pipe
+ * @IPC_MEM_MSG_ABORT_PIPE: AP ->CP: wait for completion of the
+ * running transfer and abort all pending
+ * IO-transfers for the pipe
+ * @IPC_MEM_MSG_SLEEP: AP ->CP: host enter or exit sleep
+ * @IPC_MEM_MSG_FEATURE_SET: AP ->CP: Intel feature configuration
+ */
+enum ipc_mem_msg {
+ IPC_MEM_MSG_OPEN_PIPE = 0x01,
+ IPC_MEM_MSG_CLOSE_PIPE = 0x02,
+ IPC_MEM_MSG_ABORT_PIPE = 0x03,
+ IPC_MEM_MSG_SLEEP = 0x04,
+ IPC_MEM_MSG_FEATURE_SET = 0xF0,
+};
+
+/**
+ * struct ipc_mem_msg_open_pipe - Message structure for open pipe
+ * @tdr_addr: Tdr address
+ * @tdr_entries: Tdr entries
+ * @pipe_nr: Pipe number
+ * @type_of_message: Message type
+ * @irq_vector: MSI vector number
+ * @accumulation_backoff: Time in usec for data accumalation
+ * @completion_status: Message Completion Status
+ */
+struct ipc_mem_msg_open_pipe {
+ __le64 tdr_addr;
+ __le16 tdr_entries;
+ u8 pipe_nr;
+ u8 type_of_message;
+ __le32 irq_vector;
+ __le32 accumulation_backoff;
+ __le32 completion_status;
+};
+
+/**
+ * struct ipc_mem_msg_close_pipe - Message structure for close pipe
+ * @reserved1: Reserved
+ * @reserved2: Reserved
+ * @pipe_nr: Pipe number
+ * @type_of_message: Message type
+ * @reserved3: Reserved
+ * @reserved4: Reserved
+ * @completion_status: Message Completion Status
+ */
+struct ipc_mem_msg_close_pipe {
+ __le32 reserved1[2];
+ __le16 reserved2;
+ u8 pipe_nr;
+ u8 type_of_message;
+ __le32 reserved3;
+ __le32 reserved4;
+ __le32 completion_status;
+};
+
+/**
+ * struct ipc_mem_msg_abort_pipe - Message structure for abort pipe
+ * @reserved1: Reserved
+ * @reserved2: Reserved
+ * @pipe_nr: Pipe number
+ * @type_of_message: Message type
+ * @reserved3: Reserved
+ * @reserved4: Reserved
+ * @completion_status: Message Completion Status
+ */
+struct ipc_mem_msg_abort_pipe {
+ __le32 reserved1[2];
+ __le16 reserved2;
+ u8 pipe_nr;
+ u8 type_of_message;
+ __le32 reserved3;
+ __le32 reserved4;
+ __le32 completion_status;
+};
+
+/**
+ * struct ipc_mem_msg_host_sleep - Message structure for sleep message.
+ * @reserved1: Reserved
+ * @target: 0=host, 1=device, host or EP devie
+ * is the message target
+ * @state: 0=enter sleep, 1=exit sleep,
+ * 2=enter sleep no protocol
+ * @reserved2: Reserved
+ * @type_of_message: Message type
+ * @reserved3: Reserved
+ * @reserved4: Reserved
+ * @completion_status: Message Completion Status
+ */
+struct ipc_mem_msg_host_sleep {
+ __le32 reserved1[2];
+ u8 target;
+ u8 state;
+ u8 reserved2;
+ u8 type_of_message;
+ __le32 reserved3;
+ __le32 reserved4;
+ __le32 completion_status;
+};
+
+/**
+ * struct ipc_mem_msg_feature_set - Message structure for feature_set message
+ * @reserved1: Reserved
+ * @reserved2: Reserved
+ * @reset_enable: 0=out-of-band, 1=in-band-crash notification
+ * @type_of_message: Message type
+ * @reserved3: Reserved
+ * @reserved4: Reserved
+ * @completion_status: Message Completion Status
+ */
+struct ipc_mem_msg_feature_set {
+ __le32 reserved1[2];
+ __le16 reserved2;
+ u8 reset_enable;
+ u8 type_of_message;
+ __le32 reserved3;
+ __le32 reserved4;
+ __le32 completion_status;
+};
+
+/**
+ * struct ipc_mem_msg_common - Message structure for completion status update.
+ * @reserved1: Reserved
+ * @reserved2: Reserved
+ * @type_of_message: Message type
+ * @reserved3: Reserved
+ * @reserved4: Reserved
+ * @completion_status: Message Completion Status
+ */
+struct ipc_mem_msg_common {
+ __le32 reserved1[2];
+ u8 reserved2[3];
+ u8 type_of_message;
+ __le32 reserved3;
+ __le32 reserved4;
+ __le32 completion_status;
+};
+
+/**
+ * union ipc_mem_msg_entry - Union with all possible messages.
+ * @open_pipe: Open pipe message struct
+ * @close_pipe: Close pipe message struct
+ * @abort_pipe: Abort pipe message struct
+ * @host_sleep: Host sleep message struct
+ * @feature_set: Featuer set message struct
+ * @common: Used to access msg_type and to set the completion status
+ */
+union ipc_mem_msg_entry {
+ struct ipc_mem_msg_open_pipe open_pipe;
+ struct ipc_mem_msg_close_pipe close_pipe;
+ struct ipc_mem_msg_abort_pipe abort_pipe;
+ struct ipc_mem_msg_host_sleep host_sleep;
+ struct ipc_mem_msg_feature_set feature_set;
+ struct ipc_mem_msg_common common;
+};
+
+/* Transfer descriptor definition. */
+struct ipc_protocol_td {
+ union {
+ /* 0 : 63 - 64-bit address of a buffer in host memory. */
+ dma_addr_t address;
+ struct {
+ /* 0 : 31 - 32 bit address */
+ __le32 address;
+ /* 32 : 63 - corresponding descriptor */
+ __le32 desc;
+ } __packed shm;
+ } buffer;
+
+ /* 0 - 2nd byte - Size of the buffer.
+ * The host provides the size of the buffer queued.
+ * The EP device reads this value and shall update
+ * it for downlink transfers to indicate the
+ * amount of data written in buffer.
+ * 3rd byte - This field provides the completion status
+ * of the TD. When queuing the TD, the host sets
+ * the status to 0. The EP device updates this
+ * field when completing the TD.
+ */
+ __le32 scs;
+
+ /* 0th - nr of following descriptors
+ * 1 - 3rd byte - reserved
+ */
+ __le32 next;
+} __packed;
+
+/**
+ * ipc_protocol_msg_prep - Prepare message based upon message type
+ * @ipc_imem: iosm_protocol instance
+ * @msg_type: message prepare type
+ * @args: message arguments
+ *
+ * Return: 0 on success and failure value on error
+ */
+int ipc_protocol_msg_prep(struct iosm_imem *ipc_imem,
+ enum ipc_msg_prep_type msg_type,
+ union ipc_msg_prep_args *args);
+
+/**
+ * ipc_protocol_msg_hp_update - Function for head pointer update
+ * of message ring
+ * @ipc_imem: iosm_protocol instance
+ */
+void ipc_protocol_msg_hp_update(struct iosm_imem *ipc_imem);
+
+/**
+ * ipc_protocol_msg_process - Function for processing responses
+ * to IPC messages
+ * @ipc_imem: iosm_protocol instance
+ * @irq: IRQ vector
+ *
+ * Return: True on success, false if error
+ */
+bool ipc_protocol_msg_process(struct iosm_imem *ipc_imem, int irq);
+
+/**
+ * ipc_protocol_ul_td_send - Function for sending the data to CP
+ * @ipc_protocol: iosm_protocol instance
+ * @pipe: Pipe instance
+ * @p_ul_list: uplink sk_buff list
+ *
+ * Return: true in success, false in case of error
+ */
+bool ipc_protocol_ul_td_send(struct iosm_protocol *ipc_protocol,
+ struct ipc_pipe *pipe,
+ struct sk_buff_head *p_ul_list);
+
+/**
+ * ipc_protocol_ul_td_process - Function for processing the sent data
+ * @ipc_protocol: iosm_protocol instance
+ * @pipe: Pipe instance
+ *
+ * Return: sk_buff instance
+ */
+struct sk_buff *ipc_protocol_ul_td_process(struct iosm_protocol *ipc_protocol,
+ struct ipc_pipe *pipe);
+
+/**
+ * ipc_protocol_dl_td_prepare - Function for providing DL TDs to CP
+ * @ipc_protocol: iosm_protocol instance
+ * @pipe: Pipe instance
+ *
+ * Return: true in success, false in case of error
+ */
+bool ipc_protocol_dl_td_prepare(struct iosm_protocol *ipc_protocol,
+ struct ipc_pipe *pipe);
+
+/**
+ * ipc_protocol_dl_td_process - Function for processing the DL data
+ * @ipc_protocol: iosm_protocol instance
+ * @pipe: Pipe instance
+ *
+ * Return: sk_buff instance
+ */
+struct sk_buff *ipc_protocol_dl_td_process(struct iosm_protocol *ipc_protocol,
+ struct ipc_pipe *pipe);
+
+/**
+ * ipc_protocol_get_head_tail_index - Function for getting Head and Tail
+ * pointer index of given pipe
+ * @ipc_protocol: iosm_protocol instance
+ * @pipe: Pipe Instance
+ * @head: head pointer index of the given pipe
+ * @tail: tail pointer index of the given pipe
+ */
+void ipc_protocol_get_head_tail_index(struct iosm_protocol *ipc_protocol,
+ struct ipc_pipe *pipe, u32 *head,
+ u32 *tail);
+/**
+ * ipc_protocol_get_ipc_status - Function for getting the IPC Status
+ * @ipc_protocol: iosm_protocol instance
+ *
+ * Return: Returns IPC State
+ */
+enum ipc_mem_device_ipc_state ipc_protocol_get_ipc_status(struct iosm_protocol
+ *ipc_protocol);
+
+/**
+ * ipc_protocol_pipe_cleanup - Function to cleanup pipe resources
+ * @ipc_protocol: iosm_protocol instance
+ * @pipe: Pipe instance
+ */
+void ipc_protocol_pipe_cleanup(struct iosm_protocol *ipc_protocol,
+ struct ipc_pipe *pipe);
+
+/**
+ * ipc_protocol_get_ap_exec_stage - Function for getting AP Exec Stage
+ * @ipc_protocol: pointer to struct iosm protocol
+ *
+ * Return: returns BOOT Stages
+ */
+enum ipc_mem_exec_stage
+ipc_protocol_get_ap_exec_stage(struct iosm_protocol *ipc_protocol);
+
+/**
+ * ipc_protocol_pm_dev_get_sleep_notification - Function for getting Dev Sleep
+ * notification
+ * @ipc_protocol: iosm_protocol instance
+ *
+ * Return: Returns dev PM State
+ */
+u32 ipc_protocol_pm_dev_get_sleep_notification(struct iosm_protocol
+ *ipc_protocol);
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_task_queue.c b/drivers/net/wwan/iosm/iosm_ipc_task_queue.c
new file mode 100644
index 0000000000..852a991661
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_task_queue.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#include "iosm_ipc_imem.h"
+#include "iosm_ipc_task_queue.h"
+
+/* Actual tasklet function, will be called whenever tasklet is scheduled.
+ * Calls event handler involves callback for each element in the message queue
+ */
+static void ipc_task_queue_handler(unsigned long data)
+{
+ struct ipc_task_queue *ipc_task = (struct ipc_task_queue *)data;
+ unsigned int q_rpos = ipc_task->q_rpos;
+
+ /* Loop over the input queue contents. */
+ while (q_rpos != ipc_task->q_wpos) {
+ /* Get the current first queue element. */
+ struct ipc_task_queue_args *args = &ipc_task->args[q_rpos];
+
+ /* Process the input message. */
+ if (args->func)
+ args->response = args->func(args->ipc_imem, args->arg,
+ args->msg, args->size);
+
+ /* Signal completion for synchronous calls */
+ if (args->completion)
+ complete(args->completion);
+
+ /* Free message if copy was allocated. */
+ if (args->is_copy)
+ kfree(args->msg);
+
+ /* Set invalid queue element. Technically
+ * spin_lock_irqsave is not required here as
+ * the array element has been processed already
+ * so we can assume that immediately after processing
+ * ipc_task element, queue will not rotate again to
+ * ipc_task same element within such short time.
+ */
+ args->completion = NULL;
+ args->func = NULL;
+ args->msg = NULL;
+ args->size = 0;
+ args->is_copy = false;
+
+ /* calculate the new read ptr and update the volatile read
+ * ptr
+ */
+ q_rpos = (q_rpos + 1) % IPC_THREAD_QUEUE_SIZE;
+ ipc_task->q_rpos = q_rpos;
+ }
+}
+
+/* Free memory alloc and trigger completions left in the queue during dealloc */
+static void ipc_task_queue_cleanup(struct ipc_task_queue *ipc_task)
+{
+ unsigned int q_rpos = ipc_task->q_rpos;
+
+ while (q_rpos != ipc_task->q_wpos) {
+ struct ipc_task_queue_args *args = &ipc_task->args[q_rpos];
+
+ if (args->completion)
+ complete(args->completion);
+
+ if (args->is_copy)
+ kfree(args->msg);
+
+ q_rpos = (q_rpos + 1) % IPC_THREAD_QUEUE_SIZE;
+ ipc_task->q_rpos = q_rpos;
+ }
+}
+
+/* Add a message to the queue and trigger the ipc_task. */
+static int
+ipc_task_queue_add_task(struct iosm_imem *ipc_imem,
+ int arg, void *msg,
+ int (*func)(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size),
+ size_t size, bool is_copy, bool wait)
+{
+ struct tasklet_struct *ipc_tasklet = ipc_imem->ipc_task->ipc_tasklet;
+ struct ipc_task_queue *ipc_task = &ipc_imem->ipc_task->ipc_queue;
+ struct completion completion;
+ unsigned int pos, nextpos;
+ unsigned long flags;
+ int result = -EIO;
+
+ init_completion(&completion);
+
+ /* tasklet send may be called from both interrupt or thread
+ * context, therefore protect queue operation by spinlock
+ */
+ spin_lock_irqsave(&ipc_task->q_lock, flags);
+
+ pos = ipc_task->q_wpos;
+ nextpos = (pos + 1) % IPC_THREAD_QUEUE_SIZE;
+
+ /* Get next queue position. */
+ if (nextpos != ipc_task->q_rpos) {
+ /* Get the reference to the queue element and save the passed
+ * values.
+ */
+ ipc_task->args[pos].arg = arg;
+ ipc_task->args[pos].msg = msg;
+ ipc_task->args[pos].func = func;
+ ipc_task->args[pos].ipc_imem = ipc_imem;
+ ipc_task->args[pos].size = size;
+ ipc_task->args[pos].is_copy = is_copy;
+ ipc_task->args[pos].completion = wait ? &completion : NULL;
+ ipc_task->args[pos].response = -1;
+
+ /* apply write barrier so that ipc_task->q_rpos elements
+ * are updated before ipc_task->q_wpos is being updated.
+ */
+ smp_wmb();
+
+ /* Update the status of the free queue space. */
+ ipc_task->q_wpos = nextpos;
+ result = 0;
+ }
+
+ spin_unlock_irqrestore(&ipc_task->q_lock, flags);
+
+ if (result == 0) {
+ tasklet_schedule(ipc_tasklet);
+
+ if (wait) {
+ wait_for_completion(&completion);
+ result = ipc_task->args[pos].response;
+ }
+ } else {
+ dev_err(ipc_imem->ipc_task->dev, "queue is full");
+ }
+
+ return result;
+}
+
+int ipc_task_queue_send_task(struct iosm_imem *imem,
+ int (*func)(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size),
+ int arg, void *msg, size_t size, bool wait)
+{
+ bool is_copy = false;
+ void *copy = msg;
+ int ret = -ENOMEM;
+
+ if (size > 0) {
+ copy = kmemdup(msg, size, GFP_ATOMIC);
+ if (!copy)
+ goto out;
+
+ is_copy = true;
+ }
+
+ ret = ipc_task_queue_add_task(imem, arg, copy, func,
+ size, is_copy, wait);
+ if (ret < 0) {
+ dev_err(imem->ipc_task->dev,
+ "add task failed for %ps %d, %p, %zu, %d", func, arg,
+ copy, size, is_copy);
+ if (is_copy)
+ kfree(copy);
+ goto out;
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
+
+int ipc_task_init(struct ipc_task *ipc_task)
+{
+ struct ipc_task_queue *ipc_queue = &ipc_task->ipc_queue;
+
+ ipc_task->ipc_tasklet = kzalloc(sizeof(*ipc_task->ipc_tasklet),
+ GFP_KERNEL);
+
+ if (!ipc_task->ipc_tasklet)
+ return -ENOMEM;
+
+ /* Initialize the spinlock needed to protect the message queue of the
+ * ipc_task
+ */
+ spin_lock_init(&ipc_queue->q_lock);
+
+ tasklet_init(ipc_task->ipc_tasklet, ipc_task_queue_handler,
+ (unsigned long)ipc_queue);
+ return 0;
+}
+
+void ipc_task_deinit(struct ipc_task *ipc_task)
+{
+ tasklet_kill(ipc_task->ipc_tasklet);
+
+ kfree(ipc_task->ipc_tasklet);
+ /* This will free/complete any outstanding messages,
+ * without calling the actual handler
+ */
+ ipc_task_queue_cleanup(&ipc_task->ipc_queue);
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_task_queue.h b/drivers/net/wwan/iosm/iosm_ipc_task_queue.h
new file mode 100644
index 0000000000..df6e9cd925
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_task_queue.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#ifndef IOSM_IPC_TASK_QUEUE_H
+#define IOSM_IPC_TASK_QUEUE_H
+
+/* Number of available element for the input message queue of the IPC
+ * ipc_task
+ */
+#define IPC_THREAD_QUEUE_SIZE 256
+
+/**
+ * struct ipc_task_queue_args - Struct for Task queue elements
+ * @ipc_imem: Pointer to struct iosm_imem
+ * @msg: Message argument for tasklet function. (optional, can be NULL)
+ * @completion: OS object used to wait for the tasklet function to finish for
+ * synchronous calls
+ * @func: Function to be called in tasklet (tl) context
+ * @arg: Generic integer argument for tasklet function (optional)
+ * @size: Message size argument for tasklet function (optional)
+ * @response: Return code of tasklet function for synchronous calls
+ * @is_copy: Is true if msg contains a pointer to a copy of the original msg
+ * for async. calls that needs to be freed once the tasklet returns
+ */
+struct ipc_task_queue_args {
+ struct iosm_imem *ipc_imem;
+ void *msg;
+ struct completion *completion;
+ int (*func)(struct iosm_imem *ipc_imem, int arg, void *msg,
+ size_t size);
+ int arg;
+ size_t size;
+ int response;
+ u8 is_copy:1;
+};
+
+/**
+ * struct ipc_task_queue - Struct for Task queue
+ * @q_lock: Protect the message queue of the ipc ipc_task
+ * @args: Message queue of the IPC ipc_task
+ * @q_rpos: First queue element to process.
+ * @q_wpos: First free element of the input queue.
+ */
+struct ipc_task_queue {
+ spinlock_t q_lock; /* for atomic operation on queue */
+ struct ipc_task_queue_args args[IPC_THREAD_QUEUE_SIZE];
+ unsigned int q_rpos;
+ unsigned int q_wpos;
+};
+
+/**
+ * struct ipc_task - Struct for Task
+ * @dev: Pointer to device structure
+ * @ipc_tasklet: Tasklet for serialized work offload
+ * from interrupts and OS callbacks
+ * @ipc_queue: Task for entry into ipc task queue
+ */
+struct ipc_task {
+ struct device *dev;
+ struct tasklet_struct *ipc_tasklet;
+ struct ipc_task_queue ipc_queue;
+};
+
+/**
+ * ipc_task_init - Allocate a tasklet
+ * @ipc_task: Pointer to ipc_task structure
+ * Returns: 0 on success and failure value on error.
+ */
+int ipc_task_init(struct ipc_task *ipc_task);
+
+/**
+ * ipc_task_deinit - Free a tasklet, invalidating its pointer.
+ * @ipc_task: Pointer to ipc_task structure
+ */
+void ipc_task_deinit(struct ipc_task *ipc_task);
+
+/**
+ * ipc_task_queue_send_task - Synchronously/Asynchronously call a function in
+ * tasklet context.
+ * @imem: Pointer to iosm_imem struct
+ * @func: Function to be called in tasklet context
+ * @arg: Integer argument for func
+ * @msg: Message pointer argument for func
+ * @size: Size argument for func
+ * @wait: if true wait for result
+ *
+ * Returns: Result value returned by func or failure value if func could not
+ * be called.
+ */
+int ipc_task_queue_send_task(struct iosm_imem *imem,
+ int (*func)(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size),
+ int arg, void *msg, size_t size, bool wait);
+
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_trace.c b/drivers/net/wwan/iosm/iosm_ipc_trace.c
new file mode 100644
index 0000000000..eeecfa3d10
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_trace.c
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2021 Intel Corporation.
+ */
+
+#include <linux/wwan.h>
+#include "iosm_ipc_trace.h"
+
+/* sub buffer size and number of sub buffer */
+#define IOSM_TRC_SUB_BUFF_SIZE 131072
+#define IOSM_TRC_N_SUB_BUFF 32
+
+#define IOSM_TRC_FILE_PERM 0600
+
+#define IOSM_TRC_DEBUGFS_TRACE "trace"
+#define IOSM_TRC_DEBUGFS_TRACE_CTRL "trace_ctrl"
+
+/**
+ * ipc_trace_port_rx - Receive trace packet from cp and write to relay buffer
+ * @ipc_imem: Pointer to iosm_imem structure
+ * @skb: Pointer to struct sk_buff
+ */
+void ipc_trace_port_rx(struct iosm_imem *ipc_imem, struct sk_buff *skb)
+{
+ struct iosm_trace *ipc_trace = ipc_imem->trace;
+
+ if (ipc_trace->ipc_rchan)
+ relay_write(ipc_trace->ipc_rchan, skb->data, skb->len);
+
+ dev_kfree_skb(skb);
+}
+
+/* Creates relay file in debugfs. */
+static struct dentry *
+ipc_trace_create_buf_file_handler(const char *filename,
+ struct dentry *parent,
+ umode_t mode,
+ struct rchan_buf *buf,
+ int *is_global)
+{
+ *is_global = 1;
+ return debugfs_create_file(filename, mode, parent, buf,
+ &relay_file_operations);
+}
+
+/* Removes relay file from debugfs. */
+static int ipc_trace_remove_buf_file_handler(struct dentry *dentry)
+{
+ debugfs_remove(dentry);
+ return 0;
+}
+
+static int ipc_trace_subbuf_start_handler(struct rchan_buf *buf, void *subbuf,
+ void *prev_subbuf,
+ size_t prev_padding)
+{
+ if (relay_buf_full(buf)) {
+ pr_err_ratelimited("Relay_buf full dropping traces");
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Relay interface callbacks */
+static struct rchan_callbacks relay_callbacks = {
+ .subbuf_start = ipc_trace_subbuf_start_handler,
+ .create_buf_file = ipc_trace_create_buf_file_handler,
+ .remove_buf_file = ipc_trace_remove_buf_file_handler,
+};
+
+/* Copy the trace control mode to user buffer */
+static ssize_t ipc_trace_ctrl_file_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct iosm_trace *ipc_trace = filp->private_data;
+ char buf[16];
+ int len;
+
+ mutex_lock(&ipc_trace->trc_mutex);
+ len = snprintf(buf, sizeof(buf), "%d\n", ipc_trace->mode);
+ mutex_unlock(&ipc_trace->trc_mutex);
+
+ return simple_read_from_buffer(buffer, count, ppos, buf, len);
+}
+
+/* Open and close the trace channel depending on user input */
+static ssize_t ipc_trace_ctrl_file_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct iosm_trace *ipc_trace = filp->private_data;
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul_from_user(buffer, count, 10, &val);
+ if (ret)
+ return ret;
+
+ mutex_lock(&ipc_trace->trc_mutex);
+ if (val == TRACE_ENABLE && ipc_trace->mode != TRACE_ENABLE) {
+ ipc_trace->channel = ipc_imem_sys_port_open(ipc_trace->ipc_imem,
+ ipc_trace->chl_id,
+ IPC_HP_CDEV_OPEN);
+ if (!ipc_trace->channel) {
+ ret = -EIO;
+ goto unlock;
+ }
+ ipc_trace->mode = TRACE_ENABLE;
+ } else if (val == TRACE_DISABLE && ipc_trace->mode != TRACE_DISABLE) {
+ ipc_trace->mode = TRACE_DISABLE;
+ /* close trace channel */
+ ipc_imem_sys_port_close(ipc_trace->ipc_imem,
+ ipc_trace->channel);
+ relay_flush(ipc_trace->ipc_rchan);
+ }
+ ret = count;
+unlock:
+ mutex_unlock(&ipc_trace->trc_mutex);
+ return ret;
+}
+
+static const struct file_operations ipc_trace_fops = {
+ .open = simple_open,
+ .write = ipc_trace_ctrl_file_write,
+ .read = ipc_trace_ctrl_file_read,
+};
+
+/**
+ * ipc_trace_init - Create trace interface & debugfs entries
+ * @ipc_imem: Pointer to iosm_imem structure
+ *
+ * Returns: Pointer to trace instance on success else NULL
+ */
+struct iosm_trace *ipc_trace_init(struct iosm_imem *ipc_imem)
+{
+ struct ipc_chnl_cfg chnl_cfg = { 0 };
+ struct iosm_trace *ipc_trace;
+
+ ipc_chnl_cfg_get(&chnl_cfg, IPC_MEM_CTRL_CHL_ID_3);
+ ipc_imem_channel_init(ipc_imem, IPC_CTYPE_CTRL, chnl_cfg,
+ IRQ_MOD_OFF);
+
+ ipc_trace = kzalloc(sizeof(*ipc_trace), GFP_KERNEL);
+ if (!ipc_trace)
+ return NULL;
+
+ ipc_trace->mode = TRACE_DISABLE;
+ ipc_trace->dev = ipc_imem->dev;
+ ipc_trace->ipc_imem = ipc_imem;
+ ipc_trace->chl_id = IPC_MEM_CTRL_CHL_ID_3;
+
+ mutex_init(&ipc_trace->trc_mutex);
+
+ ipc_trace->ctrl_file = debugfs_create_file(IOSM_TRC_DEBUGFS_TRACE_CTRL,
+ IOSM_TRC_FILE_PERM,
+ ipc_imem->debugfs_dir,
+ ipc_trace, &ipc_trace_fops);
+
+ ipc_trace->ipc_rchan = relay_open(IOSM_TRC_DEBUGFS_TRACE,
+ ipc_imem->debugfs_dir,
+ IOSM_TRC_SUB_BUFF_SIZE,
+ IOSM_TRC_N_SUB_BUFF,
+ &relay_callbacks, NULL);
+
+ return ipc_trace;
+}
+
+/**
+ * ipc_trace_deinit - Closing relayfs, removing debugfs entries
+ * @ipc_trace: Pointer to the iosm_trace data struct
+ */
+void ipc_trace_deinit(struct iosm_trace *ipc_trace)
+{
+ if (!ipc_trace)
+ return;
+
+ debugfs_remove(ipc_trace->ctrl_file);
+ relay_close(ipc_trace->ipc_rchan);
+ mutex_destroy(&ipc_trace->trc_mutex);
+ kfree(ipc_trace);
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_trace.h b/drivers/net/wwan/iosm/iosm_ipc_trace.h
new file mode 100644
index 0000000000..5ebe779058
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_trace.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-2021 Intel Corporation.
+ */
+
+#ifndef IOSM_IPC_TRACE_H
+#define IOSM_IPC_TRACE_H
+
+#include <linux/debugfs.h>
+#include <linux/relay.h>
+
+#include "iosm_ipc_chnl_cfg.h"
+#include "iosm_ipc_imem_ops.h"
+
+/**
+ * enum trace_ctrl_mode - State of trace channel
+ * @TRACE_DISABLE: mode for disable trace
+ * @TRACE_ENABLE: mode for enable trace
+ */
+enum trace_ctrl_mode {
+ TRACE_DISABLE = 0,
+ TRACE_ENABLE,
+};
+
+/**
+ * struct iosm_trace - Struct for trace interface
+ * @ipc_rchan: Pointer to relay channel
+ * @ctrl_file: Pointer to trace control file
+ * @ipc_imem: Imem instance
+ * @dev: Pointer to device struct
+ * @channel: Channel instance
+ * @chl_id: Channel Indentifier
+ * @trc_mutex: Mutex used for read and write mode
+ * @mode: Mode for enable and disable trace
+ */
+
+struct iosm_trace {
+ struct rchan *ipc_rchan;
+ struct dentry *ctrl_file;
+ struct iosm_imem *ipc_imem;
+ struct device *dev;
+ struct ipc_mem_channel *channel;
+ enum ipc_channel_id chl_id;
+ struct mutex trc_mutex; /* Mutex used for read and write mode */
+ enum trace_ctrl_mode mode;
+};
+
+#ifdef CONFIG_WWAN_DEBUGFS
+
+static inline bool ipc_is_trace_channel(struct iosm_imem *ipc_mem, u16 chl_id)
+{
+ return ipc_mem->trace && ipc_mem->trace->chl_id == chl_id;
+}
+
+struct iosm_trace *ipc_trace_init(struct iosm_imem *ipc_imem);
+void ipc_trace_deinit(struct iosm_trace *ipc_trace);
+void ipc_trace_port_rx(struct iosm_imem *ipc_imem, struct sk_buff *skb);
+
+#else
+
+static inline bool ipc_is_trace_channel(struct iosm_imem *ipc_mem, u16 chl_id)
+{
+ return false;
+}
+
+static inline void ipc_trace_port_rx(struct iosm_imem *ipc_imem,
+ struct sk_buff *skb)
+{
+ dev_kfree_skb(skb);
+}
+
+#endif
+
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_uevent.c b/drivers/net/wwan/iosm/iosm_ipc_uevent.c
new file mode 100644
index 0000000000..d12188ffed
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_uevent.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#include <linux/device.h>
+#include <linux/kobject.h>
+#include <linux/slab.h>
+
+#include "iosm_ipc_uevent.h"
+
+/* Update the uevent in work queue context */
+static void ipc_uevent_work(struct work_struct *data)
+{
+ struct ipc_uevent_info *info;
+ char *envp[2] = { NULL, NULL };
+
+ info = container_of(data, struct ipc_uevent_info, work);
+
+ envp[0] = info->uevent;
+
+ if (kobject_uevent_env(&info->dev->kobj, KOBJ_CHANGE, envp))
+ pr_err("uevent %s failed to sent", info->uevent);
+
+ kfree(info);
+}
+
+void ipc_uevent_send(struct device *dev, char *uevent)
+{
+ struct ipc_uevent_info *info = kzalloc(sizeof(*info), GFP_ATOMIC);
+
+ if (!info)
+ return;
+
+ /* Initialize the kernel work queue */
+ INIT_WORK(&info->work, ipc_uevent_work);
+
+ /* Store the device and event information */
+ info->dev = dev;
+ snprintf(info->uevent, MAX_UEVENT_LEN, "IOSM_EVENT=%s", uevent);
+
+ /* Schedule uevent in process context using work queue */
+ schedule_work(&info->work);
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_uevent.h b/drivers/net/wwan/iosm/iosm_ipc_uevent.h
new file mode 100644
index 0000000000..2e45c051b5
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_uevent.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#ifndef IOSM_IPC_UEVENT_H
+#define IOSM_IPC_UEVENT_H
+
+/* Baseband event strings */
+#define UEVENT_MDM_NOT_READY "MDM_NOT_READY"
+#define UEVENT_ROM_READY "ROM_READY"
+#define UEVENT_MDM_READY "MDM_READY"
+#define UEVENT_CRASH "CRASH"
+#define UEVENT_CD_READY "CD_READY"
+#define UEVENT_CD_READY_LINK_DOWN "CD_READY_LINK_DOWN"
+#define UEVENT_MDM_TIMEOUT "MDM_TIMEOUT"
+
+/* Maximum length of user events */
+#define MAX_UEVENT_LEN 64
+
+/**
+ * struct ipc_uevent_info - Uevent information structure.
+ * @dev: Pointer to device structure
+ * @uevent: Uevent information
+ * @work: Uevent work struct
+ */
+struct ipc_uevent_info {
+ struct device *dev;
+ char uevent[MAX_UEVENT_LEN];
+ struct work_struct work;
+};
+
+/**
+ * ipc_uevent_send - Send modem event to user space.
+ * @dev: Generic device pointer
+ * @uevent: Uevent information
+ *
+ */
+void ipc_uevent_send(struct device *dev, char *uevent);
+
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_wwan.c b/drivers/net/wwan/iosm/iosm_ipc_wwan.c
new file mode 100644
index 0000000000..ff747fc79a
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_wwan.c
@@ -0,0 +1,316 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+#include <linux/if_link.h>
+#include <linux/rtnetlink.h>
+#include <linux/wwan.h>
+#include <net/pkt_sched.h>
+
+#include "iosm_ipc_chnl_cfg.h"
+#include "iosm_ipc_imem_ops.h"
+#include "iosm_ipc_wwan.h"
+
+#define IOSM_IP_TYPE_MASK 0xF0
+#define IOSM_IP_TYPE_IPV4 0x40
+#define IOSM_IP_TYPE_IPV6 0x60
+
+/**
+ * struct iosm_netdev_priv - netdev WWAN driver specific private data
+ * @ipc_wwan: Pointer to iosm_wwan struct
+ * @netdev: Pointer to network interface device structure
+ * @if_id: Interface id for device.
+ * @ch_id: IPC channel number for which interface device is created.
+ */
+struct iosm_netdev_priv {
+ struct iosm_wwan *ipc_wwan;
+ struct net_device *netdev;
+ int if_id;
+ int ch_id;
+};
+
+/**
+ * struct iosm_wwan - This structure contains information about WWAN root device
+ * and interface to the IPC layer.
+ * @ipc_imem: Pointer to imem data-struct
+ * @sub_netlist: List of active netdevs
+ * @dev: Pointer device structure
+ */
+struct iosm_wwan {
+ struct iosm_imem *ipc_imem;
+ struct iosm_netdev_priv __rcu *sub_netlist[IP_MUX_SESSION_END + 1];
+ struct device *dev;
+};
+
+/* Bring-up the wwan net link */
+static int ipc_wwan_link_open(struct net_device *netdev)
+{
+ struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev);
+ struct iosm_wwan *ipc_wwan = priv->ipc_wwan;
+ int if_id = priv->if_id;
+
+ if (if_id < IP_MUX_SESSION_START ||
+ if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist))
+ return -EINVAL;
+
+ /* get channel id */
+ priv->ch_id = ipc_imem_sys_wwan_open(ipc_wwan->ipc_imem, if_id);
+
+ if (priv->ch_id < 0) {
+ dev_err(ipc_wwan->dev,
+ "cannot connect wwan0 & id %d to the IPC mem layer",
+ if_id);
+ return -ENODEV;
+ }
+
+ /* enable tx path, DL data may follow */
+ netif_start_queue(netdev);
+
+ dev_dbg(ipc_wwan->dev, "Channel id %d allocated to if_id %d",
+ priv->ch_id, priv->if_id);
+
+ return 0;
+}
+
+/* Bring-down the wwan net link */
+static int ipc_wwan_link_stop(struct net_device *netdev)
+{
+ struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev);
+
+ netif_stop_queue(netdev);
+
+ ipc_imem_sys_wwan_close(priv->ipc_wwan->ipc_imem, priv->if_id,
+ priv->ch_id);
+ priv->ch_id = -1;
+
+ return 0;
+}
+
+/* Transmit a packet */
+static netdev_tx_t ipc_wwan_link_transmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev);
+ struct iosm_wwan *ipc_wwan = priv->ipc_wwan;
+ unsigned int len = skb->len;
+ int if_id = priv->if_id;
+ int ret;
+
+ /* Interface IDs from 1 to 8 are for IP data
+ * & from 257 to 261 are for non-IP data
+ */
+ if (if_id < IP_MUX_SESSION_START ||
+ if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist))
+ return -EINVAL;
+
+ /* Send the SKB to device for transmission */
+ ret = ipc_imem_sys_wwan_transmit(ipc_wwan->ipc_imem,
+ if_id, priv->ch_id, skb);
+
+ /* Return code of zero is success */
+ if (ret == 0) {
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += len;
+ ret = NETDEV_TX_OK;
+ } else if (ret == -EBUSY) {
+ ret = NETDEV_TX_BUSY;
+ dev_err(ipc_wwan->dev, "unable to push packets");
+ } else {
+ goto exit;
+ }
+
+ return ret;
+
+exit:
+ /* Log any skb drop */
+ if (if_id)
+ dev_dbg(ipc_wwan->dev, "skb dropped. IF_ID: %d, ret: %d", if_id,
+ ret);
+
+ dev_kfree_skb_any(skb);
+ netdev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+}
+
+/* Ops structure for wwan net link */
+static const struct net_device_ops ipc_inm_ops = {
+ .ndo_open = ipc_wwan_link_open,
+ .ndo_stop = ipc_wwan_link_stop,
+ .ndo_start_xmit = ipc_wwan_link_transmit,
+};
+
+/* Setup function for creating new net link */
+static void ipc_wwan_setup(struct net_device *iosm_dev)
+{
+ iosm_dev->header_ops = NULL;
+ iosm_dev->hard_header_len = 0;
+ iosm_dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
+
+ iosm_dev->type = ARPHRD_NONE;
+ iosm_dev->mtu = ETH_DATA_LEN;
+ iosm_dev->min_mtu = ETH_MIN_MTU;
+ iosm_dev->max_mtu = ETH_MAX_MTU;
+
+ iosm_dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+ iosm_dev->needs_free_netdev = true;
+
+ iosm_dev->netdev_ops = &ipc_inm_ops;
+}
+
+/* Create new wwan net link */
+static int ipc_wwan_newlink(void *ctxt, struct net_device *dev,
+ u32 if_id, struct netlink_ext_ack *extack)
+{
+ struct iosm_wwan *ipc_wwan = ctxt;
+ struct iosm_netdev_priv *priv;
+ int err;
+
+ if (if_id < IP_MUX_SESSION_START ||
+ if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist))
+ return -EINVAL;
+
+ priv = wwan_netdev_drvpriv(dev);
+ priv->if_id = if_id;
+ priv->netdev = dev;
+ priv->ipc_wwan = ipc_wwan;
+
+ if (rcu_access_pointer(ipc_wwan->sub_netlist[if_id]))
+ return -EBUSY;
+
+ err = register_netdevice(dev);
+ if (err)
+ return err;
+
+ rcu_assign_pointer(ipc_wwan->sub_netlist[if_id], priv);
+ netif_device_attach(dev);
+
+ return 0;
+}
+
+static void ipc_wwan_dellink(void *ctxt, struct net_device *dev,
+ struct list_head *head)
+{
+ struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(dev);
+ struct iosm_wwan *ipc_wwan = ctxt;
+ int if_id = priv->if_id;
+
+ if (WARN_ON(if_id < IP_MUX_SESSION_START ||
+ if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist)))
+ return;
+
+ if (WARN_ON(rcu_access_pointer(ipc_wwan->sub_netlist[if_id]) != priv))
+ return;
+
+ RCU_INIT_POINTER(ipc_wwan->sub_netlist[if_id], NULL);
+ /* unregistering includes synchronize_net() */
+ unregister_netdevice_queue(dev, head);
+}
+
+static const struct wwan_ops iosm_wwan_ops = {
+ .priv_size = sizeof(struct iosm_netdev_priv),
+ .setup = ipc_wwan_setup,
+ .newlink = ipc_wwan_newlink,
+ .dellink = ipc_wwan_dellink,
+};
+
+int ipc_wwan_receive(struct iosm_wwan *ipc_wwan, struct sk_buff *skb_arg,
+ bool dss, int if_id)
+{
+ struct sk_buff *skb = skb_arg;
+ struct net_device_stats *stats;
+ struct iosm_netdev_priv *priv;
+ int ret;
+
+ if ((skb->data[0] & IOSM_IP_TYPE_MASK) == IOSM_IP_TYPE_IPV4)
+ skb->protocol = htons(ETH_P_IP);
+ else if ((skb->data[0] & IOSM_IP_TYPE_MASK) ==
+ IOSM_IP_TYPE_IPV6)
+ skb->protocol = htons(ETH_P_IPV6);
+
+ skb->pkt_type = PACKET_HOST;
+
+ if (if_id < IP_MUX_SESSION_START ||
+ if_id > IP_MUX_SESSION_END) {
+ ret = -EINVAL;
+ goto free;
+ }
+
+ rcu_read_lock();
+ priv = rcu_dereference(ipc_wwan->sub_netlist[if_id]);
+ if (!priv) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+ skb->dev = priv->netdev;
+ stats = &priv->netdev->stats;
+ stats->rx_packets++;
+ stats->rx_bytes += skb->len;
+
+ ret = netif_rx(skb);
+ skb = NULL;
+unlock:
+ rcu_read_unlock();
+free:
+ dev_kfree_skb(skb);
+ return ret;
+}
+
+void ipc_wwan_tx_flowctrl(struct iosm_wwan *ipc_wwan, int if_id, bool on)
+{
+ struct net_device *netdev;
+ struct iosm_netdev_priv *priv;
+ bool is_tx_blk;
+
+ rcu_read_lock();
+ priv = rcu_dereference(ipc_wwan->sub_netlist[if_id]);
+ if (!priv) {
+ rcu_read_unlock();
+ return;
+ }
+
+ netdev = priv->netdev;
+
+ is_tx_blk = netif_queue_stopped(netdev);
+
+ if (on)
+ dev_dbg(ipc_wwan->dev, "session id[%d]: flowctrl enable",
+ if_id);
+
+ if (on && !is_tx_blk)
+ netif_stop_queue(netdev);
+ else if (!on && is_tx_blk)
+ netif_wake_queue(netdev);
+ rcu_read_unlock();
+}
+
+struct iosm_wwan *ipc_wwan_init(struct iosm_imem *ipc_imem, struct device *dev)
+{
+ struct iosm_wwan *ipc_wwan;
+
+ ipc_wwan = kzalloc(sizeof(*ipc_wwan), GFP_KERNEL);
+ if (!ipc_wwan)
+ return NULL;
+
+ ipc_wwan->dev = dev;
+ ipc_wwan->ipc_imem = ipc_imem;
+
+ /* WWAN core will create a netdev for the default IP MUX channel */
+ if (wwan_register_ops(ipc_wwan->dev, &iosm_wwan_ops, ipc_wwan,
+ IP_MUX_SESSION_DEFAULT)) {
+ kfree(ipc_wwan);
+ return NULL;
+ }
+
+ return ipc_wwan;
+}
+
+void ipc_wwan_deinit(struct iosm_wwan *ipc_wwan)
+{
+ /* This call will remove all child netdev(s) */
+ wwan_unregister_ops(ipc_wwan->dev);
+
+ kfree(ipc_wwan);
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_wwan.h b/drivers/net/wwan/iosm/iosm_ipc_wwan.h
new file mode 100644
index 0000000000..a23e926398
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_wwan.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#ifndef IOSM_IPC_WWAN_H
+#define IOSM_IPC_WWAN_H
+
+/**
+ * ipc_wwan_init - Allocate, Init and register WWAN device
+ * @ipc_imem: Pointer to imem data-struct
+ * @dev: Pointer to device structure
+ *
+ * Returns: Pointer to instance on success else NULL
+ */
+struct iosm_wwan *ipc_wwan_init(struct iosm_imem *ipc_imem, struct device *dev);
+
+/**
+ * ipc_wwan_deinit - Unregister and free WWAN device, clear pointer
+ * @ipc_wwan: Pointer to wwan instance data
+ */
+void ipc_wwan_deinit(struct iosm_wwan *ipc_wwan);
+
+/**
+ * ipc_wwan_receive - Receive a downlink packet from CP.
+ * @ipc_wwan: Pointer to wwan instance
+ * @skb_arg: Pointer to struct sk_buff
+ * @dss: Set to true if interafce id is from 257 to 261,
+ * else false
+ * @if_id: Interface ID
+ *
+ * Return: 0 on success and failure value on error
+ */
+int ipc_wwan_receive(struct iosm_wwan *ipc_wwan, struct sk_buff *skb_arg,
+ bool dss, int if_id);
+
+/**
+ * ipc_wwan_tx_flowctrl - Enable/Disable TX flow control
+ * @ipc_wwan: Pointer to wwan instance
+ * @id: Ipc mux channel session id
+ * @on: if true then flow ctrl would be enabled else disable
+ *
+ */
+void ipc_wwan_tx_flowctrl(struct iosm_wwan *ipc_wwan, int id, bool on);
+#endif
diff --git a/drivers/net/wwan/mhi_wwan_ctrl.c b/drivers/net/wwan/mhi_wwan_ctrl.c
new file mode 100644
index 0000000000..e9f979d2d8
--- /dev/null
+++ b/drivers/net/wwan/mhi_wwan_ctrl.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2021, Linaro Ltd <loic.poulain@linaro.org> */
+#include <linux/kernel.h>
+#include <linux/mhi.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/wwan.h>
+
+/* MHI wwan flags */
+enum mhi_wwan_flags {
+ MHI_WWAN_DL_CAP,
+ MHI_WWAN_UL_CAP,
+ MHI_WWAN_RX_REFILL,
+};
+
+#define MHI_WWAN_MAX_MTU 0x8000
+
+struct mhi_wwan_dev {
+ /* Lower level is a mhi dev, upper level is a wwan port */
+ struct mhi_device *mhi_dev;
+ struct wwan_port *wwan_port;
+
+ /* State and capabilities */
+ unsigned long flags;
+ size_t mtu;
+
+ /* Protect against concurrent TX and TX-completion (bh) */
+ spinlock_t tx_lock;
+
+ /* Protect RX budget and rx_refill scheduling */
+ spinlock_t rx_lock;
+ struct work_struct rx_refill;
+
+ /* RX budget is initially set to the size of the MHI RX queue and is
+ * used to limit the number of allocated and queued packets. It is
+ * decremented on data queueing and incremented on data release.
+ */
+ unsigned int rx_budget;
+};
+
+/* Increment RX budget and schedule RX refill if necessary */
+static void mhi_wwan_rx_budget_inc(struct mhi_wwan_dev *mhiwwan)
+{
+ spin_lock_bh(&mhiwwan->rx_lock);
+
+ mhiwwan->rx_budget++;
+
+ if (test_bit(MHI_WWAN_RX_REFILL, &mhiwwan->flags))
+ schedule_work(&mhiwwan->rx_refill);
+
+ spin_unlock_bh(&mhiwwan->rx_lock);
+}
+
+/* Decrement RX budget if non-zero and return true on success */
+static bool mhi_wwan_rx_budget_dec(struct mhi_wwan_dev *mhiwwan)
+{
+ bool ret = false;
+
+ spin_lock_bh(&mhiwwan->rx_lock);
+
+ if (mhiwwan->rx_budget) {
+ mhiwwan->rx_budget--;
+ if (test_bit(MHI_WWAN_RX_REFILL, &mhiwwan->flags))
+ ret = true;
+ }
+
+ spin_unlock_bh(&mhiwwan->rx_lock);
+
+ return ret;
+}
+
+static void __mhi_skb_destructor(struct sk_buff *skb)
+{
+ /* RX buffer has been consumed, increase the allowed budget */
+ mhi_wwan_rx_budget_inc(skb_shinfo(skb)->destructor_arg);
+}
+
+static void mhi_wwan_ctrl_refill_work(struct work_struct *work)
+{
+ struct mhi_wwan_dev *mhiwwan = container_of(work, struct mhi_wwan_dev, rx_refill);
+ struct mhi_device *mhi_dev = mhiwwan->mhi_dev;
+
+ while (mhi_wwan_rx_budget_dec(mhiwwan)) {
+ struct sk_buff *skb;
+
+ skb = alloc_skb(mhiwwan->mtu, GFP_KERNEL);
+ if (!skb) {
+ mhi_wwan_rx_budget_inc(mhiwwan);
+ break;
+ }
+
+ /* To prevent unlimited buffer allocation if nothing consumes
+ * the RX buffers (passed to WWAN core), track their lifespan
+ * to not allocate more than allowed budget.
+ */
+ skb->destructor = __mhi_skb_destructor;
+ skb_shinfo(skb)->destructor_arg = mhiwwan;
+
+ if (mhi_queue_skb(mhi_dev, DMA_FROM_DEVICE, skb, mhiwwan->mtu, MHI_EOT)) {
+ dev_err(&mhi_dev->dev, "Failed to queue buffer\n");
+ kfree_skb(skb);
+ break;
+ }
+ }
+}
+
+static int mhi_wwan_ctrl_start(struct wwan_port *port)
+{
+ struct mhi_wwan_dev *mhiwwan = wwan_port_get_drvdata(port);
+ int ret;
+
+ /* Start mhi device's channel(s) */
+ ret = mhi_prepare_for_transfer(mhiwwan->mhi_dev);
+ if (ret)
+ return ret;
+
+ /* Don't allocate more buffers than MHI channel queue size */
+ mhiwwan->rx_budget = mhi_get_free_desc_count(mhiwwan->mhi_dev, DMA_FROM_DEVICE);
+
+ /* Add buffers to the MHI inbound queue */
+ if (test_bit(MHI_WWAN_DL_CAP, &mhiwwan->flags)) {
+ set_bit(MHI_WWAN_RX_REFILL, &mhiwwan->flags);
+ mhi_wwan_ctrl_refill_work(&mhiwwan->rx_refill);
+ }
+
+ return 0;
+}
+
+static void mhi_wwan_ctrl_stop(struct wwan_port *port)
+{
+ struct mhi_wwan_dev *mhiwwan = wwan_port_get_drvdata(port);
+
+ spin_lock_bh(&mhiwwan->rx_lock);
+ clear_bit(MHI_WWAN_RX_REFILL, &mhiwwan->flags);
+ spin_unlock_bh(&mhiwwan->rx_lock);
+
+ cancel_work_sync(&mhiwwan->rx_refill);
+
+ mhi_unprepare_from_transfer(mhiwwan->mhi_dev);
+}
+
+static int mhi_wwan_ctrl_tx(struct wwan_port *port, struct sk_buff *skb)
+{
+ struct mhi_wwan_dev *mhiwwan = wwan_port_get_drvdata(port);
+ int ret;
+
+ if (skb->len > mhiwwan->mtu)
+ return -EMSGSIZE;
+
+ if (!test_bit(MHI_WWAN_UL_CAP, &mhiwwan->flags))
+ return -EOPNOTSUPP;
+
+ /* Queue the packet for MHI transfer and check fullness of the queue */
+ spin_lock_bh(&mhiwwan->tx_lock);
+ ret = mhi_queue_skb(mhiwwan->mhi_dev, DMA_TO_DEVICE, skb, skb->len, MHI_EOT);
+ if (mhi_queue_is_full(mhiwwan->mhi_dev, DMA_TO_DEVICE))
+ wwan_port_txoff(port);
+ spin_unlock_bh(&mhiwwan->tx_lock);
+
+ return ret;
+}
+
+static const struct wwan_port_ops wwan_pops = {
+ .start = mhi_wwan_ctrl_start,
+ .stop = mhi_wwan_ctrl_stop,
+ .tx = mhi_wwan_ctrl_tx,
+};
+
+static void mhi_ul_xfer_cb(struct mhi_device *mhi_dev,
+ struct mhi_result *mhi_result)
+{
+ struct mhi_wwan_dev *mhiwwan = dev_get_drvdata(&mhi_dev->dev);
+ struct wwan_port *port = mhiwwan->wwan_port;
+ struct sk_buff *skb = mhi_result->buf_addr;
+
+ dev_dbg(&mhi_dev->dev, "%s: status: %d xfer_len: %zu\n", __func__,
+ mhi_result->transaction_status, mhi_result->bytes_xferd);
+
+ /* MHI core has done with the buffer, release it */
+ consume_skb(skb);
+
+ /* There is likely new slot available in the MHI queue, re-allow TX */
+ spin_lock_bh(&mhiwwan->tx_lock);
+ if (!mhi_queue_is_full(mhiwwan->mhi_dev, DMA_TO_DEVICE))
+ wwan_port_txon(port);
+ spin_unlock_bh(&mhiwwan->tx_lock);
+}
+
+static void mhi_dl_xfer_cb(struct mhi_device *mhi_dev,
+ struct mhi_result *mhi_result)
+{
+ struct mhi_wwan_dev *mhiwwan = dev_get_drvdata(&mhi_dev->dev);
+ struct wwan_port *port = mhiwwan->wwan_port;
+ struct sk_buff *skb = mhi_result->buf_addr;
+
+ dev_dbg(&mhi_dev->dev, "%s: status: %d receive_len: %zu\n", __func__,
+ mhi_result->transaction_status, mhi_result->bytes_xferd);
+
+ if (mhi_result->transaction_status &&
+ mhi_result->transaction_status != -EOVERFLOW) {
+ kfree_skb(skb);
+ return;
+ }
+
+ /* MHI core does not update skb->len, do it before forward */
+ skb_put(skb, mhi_result->bytes_xferd);
+ wwan_port_rx(port, skb);
+
+ /* Do not increment rx budget nor refill RX buffers now, wait for the
+ * buffer to be consumed. Done from __mhi_skb_destructor().
+ */
+}
+
+static int mhi_wwan_ctrl_probe(struct mhi_device *mhi_dev,
+ const struct mhi_device_id *id)
+{
+ struct mhi_controller *cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_wwan_dev *mhiwwan;
+ struct wwan_port *port;
+
+ mhiwwan = kzalloc(sizeof(*mhiwwan), GFP_KERNEL);
+ if (!mhiwwan)
+ return -ENOMEM;
+
+ mhiwwan->mhi_dev = mhi_dev;
+ mhiwwan->mtu = MHI_WWAN_MAX_MTU;
+ INIT_WORK(&mhiwwan->rx_refill, mhi_wwan_ctrl_refill_work);
+ spin_lock_init(&mhiwwan->tx_lock);
+ spin_lock_init(&mhiwwan->rx_lock);
+
+ if (mhi_dev->dl_chan)
+ set_bit(MHI_WWAN_DL_CAP, &mhiwwan->flags);
+ if (mhi_dev->ul_chan)
+ set_bit(MHI_WWAN_UL_CAP, &mhiwwan->flags);
+
+ dev_set_drvdata(&mhi_dev->dev, mhiwwan);
+
+ /* Register as a wwan port, id->driver_data contains wwan port type */
+ port = wwan_create_port(&cntrl->mhi_dev->dev, id->driver_data,
+ &wwan_pops, NULL, mhiwwan);
+ if (IS_ERR(port)) {
+ kfree(mhiwwan);
+ return PTR_ERR(port);
+ }
+
+ mhiwwan->wwan_port = port;
+
+ return 0;
+};
+
+static void mhi_wwan_ctrl_remove(struct mhi_device *mhi_dev)
+{
+ struct mhi_wwan_dev *mhiwwan = dev_get_drvdata(&mhi_dev->dev);
+
+ wwan_remove_port(mhiwwan->wwan_port);
+ kfree(mhiwwan);
+}
+
+static const struct mhi_device_id mhi_wwan_ctrl_match_table[] = {
+ { .chan = "DUN", .driver_data = WWAN_PORT_AT },
+ { .chan = "DUN2", .driver_data = WWAN_PORT_AT },
+ { .chan = "MBIM", .driver_data = WWAN_PORT_MBIM },
+ { .chan = "QMI", .driver_data = WWAN_PORT_QMI },
+ { .chan = "DIAG", .driver_data = WWAN_PORT_QCDM },
+ { .chan = "FIREHOSE", .driver_data = WWAN_PORT_FIREHOSE },
+ {},
+};
+MODULE_DEVICE_TABLE(mhi, mhi_wwan_ctrl_match_table);
+
+static struct mhi_driver mhi_wwan_ctrl_driver = {
+ .id_table = mhi_wwan_ctrl_match_table,
+ .remove = mhi_wwan_ctrl_remove,
+ .probe = mhi_wwan_ctrl_probe,
+ .ul_xfer_cb = mhi_ul_xfer_cb,
+ .dl_xfer_cb = mhi_dl_xfer_cb,
+ .driver = {
+ .name = "mhi_wwan_ctrl",
+ },
+};
+
+module_mhi_driver(mhi_wwan_ctrl_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MHI WWAN CTRL Driver");
+MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>");
diff --git a/drivers/net/wwan/mhi_wwan_mbim.c b/drivers/net/wwan/mhi_wwan_mbim.c
new file mode 100644
index 0000000000..3f72ae943b
--- /dev/null
+++ b/drivers/net/wwan/mhi_wwan_mbim.c
@@ -0,0 +1,659 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* MHI MBIM Network driver - Network/MBIM over MHI bus
+ *
+ * Copyright (C) 2021 Linaro Ltd <loic.poulain@linaro.org>
+ *
+ * This driver copy some code from cdc_ncm, which is:
+ * Copyright (C) ST-Ericsson 2010-2012
+ * and cdc_mbim, which is:
+ * Copyright (c) 2012 Smith Micro Software, Inc.
+ * Copyright (c) 2012 Bjørn Mork <bjorn@mork.no>
+ *
+ */
+
+#include <linux/ethtool.h>
+#include <linux/if_arp.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/mhi.h>
+#include <linux/mii.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/usb.h>
+#include <linux/usb/cdc.h>
+#include <linux/usb/usbnet.h>
+#include <linux/usb/cdc_ncm.h>
+#include <linux/wwan.h>
+
+/* 3500 allows to optimize skb allocation, the skbs will basically fit in
+ * one 4K page. Large MBIM packets will simply be split over several MHI
+ * transfers and chained by the MHI net layer (zerocopy).
+ */
+#define MHI_DEFAULT_MRU 3500
+
+#define MHI_MBIM_DEFAULT_MTU 1500
+#define MHI_MAX_BUF_SZ 0xffff
+
+#define MBIM_NDP16_SIGN_MASK 0x00ffffff
+
+#define MHI_MBIM_LINK_HASH_SIZE 8
+#define LINK_HASH(session) ((session) % MHI_MBIM_LINK_HASH_SIZE)
+
+struct mhi_mbim_link {
+ struct mhi_mbim_context *mbim;
+ struct net_device *ndev;
+ unsigned int session;
+
+ /* stats */
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
+ u64_stats_t rx_errors;
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
+ u64_stats_t tx_errors;
+ u64_stats_t tx_dropped;
+ struct u64_stats_sync tx_syncp;
+ struct u64_stats_sync rx_syncp;
+
+ struct hlist_node hlnode;
+};
+
+struct mhi_mbim_context {
+ struct mhi_device *mdev;
+ struct sk_buff *skbagg_head;
+ struct sk_buff *skbagg_tail;
+ unsigned int mru;
+ u32 rx_queue_sz;
+ u16 rx_seq;
+ u16 tx_seq;
+ struct delayed_work rx_refill;
+ spinlock_t tx_lock;
+ struct hlist_head link_list[MHI_MBIM_LINK_HASH_SIZE];
+};
+
+struct mbim_tx_hdr {
+ struct usb_cdc_ncm_nth16 nth16;
+ struct usb_cdc_ncm_ndp16 ndp16;
+ struct usb_cdc_ncm_dpe16 dpe16[2];
+} __packed;
+
+static struct mhi_mbim_link *mhi_mbim_get_link_rcu(struct mhi_mbim_context *mbim,
+ unsigned int session)
+{
+ struct mhi_mbim_link *link;
+
+ hlist_for_each_entry_rcu(link, &mbim->link_list[LINK_HASH(session)], hlnode) {
+ if (link->session == session)
+ return link;
+ }
+
+ return NULL;
+}
+
+static struct sk_buff *mbim_tx_fixup(struct sk_buff *skb, unsigned int session,
+ u16 tx_seq)
+{
+ unsigned int dgram_size = skb->len;
+ struct usb_cdc_ncm_nth16 *nth16;
+ struct usb_cdc_ncm_ndp16 *ndp16;
+ struct mbim_tx_hdr *mbim_hdr;
+
+ /* Only one NDP is sent, containing the IP packet (no aggregation) */
+
+ /* Ensure we have enough headroom for crafting MBIM header */
+ if (skb_cow_head(skb, sizeof(struct mbim_tx_hdr))) {
+ dev_kfree_skb_any(skb);
+ return NULL;
+ }
+
+ mbim_hdr = skb_push(skb, sizeof(struct mbim_tx_hdr));
+
+ /* Fill NTB header */
+ nth16 = &mbim_hdr->nth16;
+ nth16->dwSignature = cpu_to_le32(USB_CDC_NCM_NTH16_SIGN);
+ nth16->wHeaderLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16));
+ nth16->wSequence = cpu_to_le16(tx_seq);
+ nth16->wBlockLength = cpu_to_le16(skb->len);
+ nth16->wNdpIndex = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16));
+
+ /* Fill the unique NDP */
+ ndp16 = &mbim_hdr->ndp16;
+ ndp16->dwSignature = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN | (session << 24));
+ ndp16->wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp16)
+ + sizeof(struct usb_cdc_ncm_dpe16) * 2);
+ ndp16->wNextNdpIndex = 0;
+
+ /* Datagram follows the mbim header */
+ ndp16->dpe16[0].wDatagramIndex = cpu_to_le16(sizeof(struct mbim_tx_hdr));
+ ndp16->dpe16[0].wDatagramLength = cpu_to_le16(dgram_size);
+
+ /* null termination */
+ ndp16->dpe16[1].wDatagramIndex = 0;
+ ndp16->dpe16[1].wDatagramLength = 0;
+
+ return skb;
+}
+
+static netdev_tx_t mhi_mbim_ndo_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev);
+ struct mhi_mbim_context *mbim = link->mbim;
+ unsigned long flags;
+ int err = -ENOMEM;
+
+ /* Serialize MHI channel queuing and MBIM seq */
+ spin_lock_irqsave(&mbim->tx_lock, flags);
+
+ skb = mbim_tx_fixup(skb, link->session, mbim->tx_seq);
+ if (unlikely(!skb))
+ goto exit_unlock;
+
+ err = mhi_queue_skb(mbim->mdev, DMA_TO_DEVICE, skb, skb->len, MHI_EOT);
+
+ if (mhi_queue_is_full(mbim->mdev, DMA_TO_DEVICE))
+ netif_stop_queue(ndev);
+
+ if (!err)
+ mbim->tx_seq++;
+
+exit_unlock:
+ spin_unlock_irqrestore(&mbim->tx_lock, flags);
+
+ if (unlikely(err)) {
+ net_err_ratelimited("%s: Failed to queue TX buf (%d)\n",
+ ndev->name, err);
+ dev_kfree_skb_any(skb);
+ goto exit_drop;
+ }
+
+ return NETDEV_TX_OK;
+
+exit_drop:
+ u64_stats_update_begin(&link->tx_syncp);
+ u64_stats_inc(&link->tx_dropped);
+ u64_stats_update_end(&link->tx_syncp);
+
+ return NETDEV_TX_OK;
+}
+
+static int mbim_rx_verify_nth16(struct mhi_mbim_context *mbim, struct sk_buff *skb)
+{
+ struct usb_cdc_ncm_nth16 *nth16;
+ int len;
+
+ if (skb->len < sizeof(struct usb_cdc_ncm_nth16) +
+ sizeof(struct usb_cdc_ncm_ndp16)) {
+ net_err_ratelimited("frame too short\n");
+ return -EINVAL;
+ }
+
+ nth16 = (struct usb_cdc_ncm_nth16 *)skb->data;
+
+ if (nth16->dwSignature != cpu_to_le32(USB_CDC_NCM_NTH16_SIGN)) {
+ net_err_ratelimited("invalid NTH16 signature <%#010x>\n",
+ le32_to_cpu(nth16->dwSignature));
+ return -EINVAL;
+ }
+
+ /* No limit on the block length, except the size of the data pkt */
+ len = le16_to_cpu(nth16->wBlockLength);
+ if (len > skb->len) {
+ net_err_ratelimited("NTB does not fit into the skb %u/%u\n",
+ len, skb->len);
+ return -EINVAL;
+ }
+
+ if (mbim->rx_seq + 1 != le16_to_cpu(nth16->wSequence) &&
+ (mbim->rx_seq || le16_to_cpu(nth16->wSequence)) &&
+ !(mbim->rx_seq == 0xffff && !le16_to_cpu(nth16->wSequence))) {
+ net_err_ratelimited("sequence number glitch prev=%d curr=%d\n",
+ mbim->rx_seq, le16_to_cpu(nth16->wSequence));
+ }
+ mbim->rx_seq = le16_to_cpu(nth16->wSequence);
+
+ return le16_to_cpu(nth16->wNdpIndex);
+}
+
+static int mbim_rx_verify_ndp16(struct sk_buff *skb, struct usb_cdc_ncm_ndp16 *ndp16)
+{
+ int ret;
+
+ if (le16_to_cpu(ndp16->wLength) < USB_CDC_NCM_NDP16_LENGTH_MIN) {
+ net_err_ratelimited("invalid DPT16 length <%u>\n",
+ le16_to_cpu(ndp16->wLength));
+ return -EINVAL;
+ }
+
+ ret = ((le16_to_cpu(ndp16->wLength) - sizeof(struct usb_cdc_ncm_ndp16))
+ / sizeof(struct usb_cdc_ncm_dpe16));
+ ret--; /* Last entry is always a NULL terminator */
+
+ if (sizeof(struct usb_cdc_ncm_ndp16) +
+ ret * sizeof(struct usb_cdc_ncm_dpe16) > skb->len) {
+ net_err_ratelimited("Invalid nframes = %d\n", ret);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static void mhi_mbim_rx(struct mhi_mbim_context *mbim, struct sk_buff *skb)
+{
+ int ndpoffset;
+
+ /* Check NTB header and retrieve first NDP offset */
+ ndpoffset = mbim_rx_verify_nth16(mbim, skb);
+ if (ndpoffset < 0) {
+ net_err_ratelimited("mbim: Incorrect NTB header\n");
+ goto error;
+ }
+
+ /* Process each NDP */
+ while (1) {
+ struct usb_cdc_ncm_ndp16 ndp16;
+ struct usb_cdc_ncm_dpe16 dpe16;
+ struct mhi_mbim_link *link;
+ int nframes, n, dpeoffset;
+ unsigned int session;
+
+ if (skb_copy_bits(skb, ndpoffset, &ndp16, sizeof(ndp16))) {
+ net_err_ratelimited("mbim: Incorrect NDP offset (%u)\n",
+ ndpoffset);
+ goto error;
+ }
+
+ /* Check NDP header and retrieve number of datagrams */
+ nframes = mbim_rx_verify_ndp16(skb, &ndp16);
+ if (nframes < 0) {
+ net_err_ratelimited("mbim: Incorrect NDP16\n");
+ goto error;
+ }
+
+ /* Only IP data type supported, no DSS in MHI context */
+ if ((ndp16.dwSignature & cpu_to_le32(MBIM_NDP16_SIGN_MASK))
+ != cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN)) {
+ net_err_ratelimited("mbim: Unsupported NDP type\n");
+ goto next_ndp;
+ }
+
+ session = (le32_to_cpu(ndp16.dwSignature) & ~MBIM_NDP16_SIGN_MASK) >> 24;
+
+ rcu_read_lock();
+
+ link = mhi_mbim_get_link_rcu(mbim, session);
+ if (!link) {
+ net_err_ratelimited("mbim: bad packet session (%u)\n", session);
+ goto unlock;
+ }
+
+ /* de-aggregate and deliver IP packets */
+ dpeoffset = ndpoffset + sizeof(struct usb_cdc_ncm_ndp16);
+ for (n = 0; n < nframes; n++, dpeoffset += sizeof(dpe16)) {
+ u16 dgram_offset, dgram_len;
+ struct sk_buff *skbn;
+
+ if (skb_copy_bits(skb, dpeoffset, &dpe16, sizeof(dpe16)))
+ break;
+
+ dgram_offset = le16_to_cpu(dpe16.wDatagramIndex);
+ dgram_len = le16_to_cpu(dpe16.wDatagramLength);
+
+ if (!dgram_offset || !dgram_len)
+ break; /* null terminator */
+
+ skbn = netdev_alloc_skb(link->ndev, dgram_len);
+ if (!skbn)
+ continue;
+
+ skb_put(skbn, dgram_len);
+ skb_copy_bits(skb, dgram_offset, skbn->data, dgram_len);
+
+ switch (skbn->data[0] & 0xf0) {
+ case 0x40:
+ skbn->protocol = htons(ETH_P_IP);
+ break;
+ case 0x60:
+ skbn->protocol = htons(ETH_P_IPV6);
+ break;
+ default:
+ net_err_ratelimited("%s: unknown protocol\n",
+ link->ndev->name);
+ dev_kfree_skb_any(skbn);
+ u64_stats_update_begin(&link->rx_syncp);
+ u64_stats_inc(&link->rx_errors);
+ u64_stats_update_end(&link->rx_syncp);
+ continue;
+ }
+
+ u64_stats_update_begin(&link->rx_syncp);
+ u64_stats_inc(&link->rx_packets);
+ u64_stats_add(&link->rx_bytes, skbn->len);
+ u64_stats_update_end(&link->rx_syncp);
+
+ netif_rx(skbn);
+ }
+unlock:
+ rcu_read_unlock();
+next_ndp:
+ /* Other NDP to process? */
+ ndpoffset = (int)le16_to_cpu(ndp16.wNextNdpIndex);
+ if (!ndpoffset)
+ break;
+ }
+
+ /* free skb */
+ dev_consume_skb_any(skb);
+ return;
+error:
+ dev_kfree_skb_any(skb);
+}
+
+static struct sk_buff *mhi_net_skb_agg(struct mhi_mbim_context *mbim,
+ struct sk_buff *skb)
+{
+ struct sk_buff *head = mbim->skbagg_head;
+ struct sk_buff *tail = mbim->skbagg_tail;
+
+ /* This is non-paged skb chaining using frag_list */
+ if (!head) {
+ mbim->skbagg_head = skb;
+ return skb;
+ }
+
+ if (!skb_shinfo(head)->frag_list)
+ skb_shinfo(head)->frag_list = skb;
+ else
+ tail->next = skb;
+
+ head->len += skb->len;
+ head->data_len += skb->len;
+ head->truesize += skb->truesize;
+
+ mbim->skbagg_tail = skb;
+
+ return mbim->skbagg_head;
+}
+
+static void mhi_net_rx_refill_work(struct work_struct *work)
+{
+ struct mhi_mbim_context *mbim = container_of(work, struct mhi_mbim_context,
+ rx_refill.work);
+ struct mhi_device *mdev = mbim->mdev;
+ int err;
+
+ while (!mhi_queue_is_full(mdev, DMA_FROM_DEVICE)) {
+ struct sk_buff *skb = alloc_skb(mbim->mru, GFP_KERNEL);
+
+ if (unlikely(!skb))
+ break;
+
+ err = mhi_queue_skb(mdev, DMA_FROM_DEVICE, skb,
+ mbim->mru, MHI_EOT);
+ if (unlikely(err)) {
+ kfree_skb(skb);
+ break;
+ }
+
+ /* Do not hog the CPU if rx buffers are consumed faster than
+ * queued (unlikely).
+ */
+ cond_resched();
+ }
+
+ /* If we're still starved of rx buffers, reschedule later */
+ if (mhi_get_free_desc_count(mdev, DMA_FROM_DEVICE) == mbim->rx_queue_sz)
+ schedule_delayed_work(&mbim->rx_refill, HZ / 2);
+}
+
+static void mhi_mbim_dl_callback(struct mhi_device *mhi_dev,
+ struct mhi_result *mhi_res)
+{
+ struct mhi_mbim_context *mbim = dev_get_drvdata(&mhi_dev->dev);
+ struct sk_buff *skb = mhi_res->buf_addr;
+ int free_desc_count;
+
+ free_desc_count = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
+
+ if (unlikely(mhi_res->transaction_status)) {
+ switch (mhi_res->transaction_status) {
+ case -EOVERFLOW:
+ /* Packet has been split over multiple transfers */
+ skb_put(skb, mhi_res->bytes_xferd);
+ mhi_net_skb_agg(mbim, skb);
+ break;
+ case -ENOTCONN:
+ /* MHI layer stopping/resetting the DL channel */
+ dev_kfree_skb_any(skb);
+ return;
+ default:
+ /* Unknown error, simply drop */
+ dev_kfree_skb_any(skb);
+ }
+ } else {
+ skb_put(skb, mhi_res->bytes_xferd);
+
+ if (mbim->skbagg_head) {
+ /* Aggregate the final fragment */
+ skb = mhi_net_skb_agg(mbim, skb);
+ mbim->skbagg_head = NULL;
+ }
+
+ mhi_mbim_rx(mbim, skb);
+ }
+
+ /* Refill if RX buffers queue becomes low */
+ if (free_desc_count >= mbim->rx_queue_sz / 2)
+ schedule_delayed_work(&mbim->rx_refill, 0);
+}
+
+static void mhi_mbim_ndo_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev);
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin(&link->rx_syncp);
+ stats->rx_packets = u64_stats_read(&link->rx_packets);
+ stats->rx_bytes = u64_stats_read(&link->rx_bytes);
+ stats->rx_errors = u64_stats_read(&link->rx_errors);
+ } while (u64_stats_fetch_retry(&link->rx_syncp, start));
+
+ do {
+ start = u64_stats_fetch_begin(&link->tx_syncp);
+ stats->tx_packets = u64_stats_read(&link->tx_packets);
+ stats->tx_bytes = u64_stats_read(&link->tx_bytes);
+ stats->tx_errors = u64_stats_read(&link->tx_errors);
+ stats->tx_dropped = u64_stats_read(&link->tx_dropped);
+ } while (u64_stats_fetch_retry(&link->tx_syncp, start));
+}
+
+static void mhi_mbim_ul_callback(struct mhi_device *mhi_dev,
+ struct mhi_result *mhi_res)
+{
+ struct mhi_mbim_context *mbim = dev_get_drvdata(&mhi_dev->dev);
+ struct sk_buff *skb = mhi_res->buf_addr;
+ struct net_device *ndev = skb->dev;
+ struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev);
+
+ /* Hardware has consumed the buffer, so free the skb (which is not
+ * freed by the MHI stack) and perform accounting.
+ */
+ dev_consume_skb_any(skb);
+
+ u64_stats_update_begin(&link->tx_syncp);
+ if (unlikely(mhi_res->transaction_status)) {
+ /* MHI layer stopping/resetting the UL channel */
+ if (mhi_res->transaction_status == -ENOTCONN) {
+ u64_stats_update_end(&link->tx_syncp);
+ return;
+ }
+
+ u64_stats_inc(&link->tx_errors);
+ } else {
+ u64_stats_inc(&link->tx_packets);
+ u64_stats_add(&link->tx_bytes, mhi_res->bytes_xferd);
+ }
+ u64_stats_update_end(&link->tx_syncp);
+
+ if (netif_queue_stopped(ndev) && !mhi_queue_is_full(mbim->mdev, DMA_TO_DEVICE))
+ netif_wake_queue(ndev);
+}
+
+static int mhi_mbim_ndo_open(struct net_device *ndev)
+{
+ struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev);
+
+ /* Feed the MHI rx buffer pool */
+ schedule_delayed_work(&link->mbim->rx_refill, 0);
+
+ /* Carrier is established via out-of-band channel (e.g. qmi) */
+ netif_carrier_on(ndev);
+
+ netif_start_queue(ndev);
+
+ return 0;
+}
+
+static int mhi_mbim_ndo_stop(struct net_device *ndev)
+{
+ netif_stop_queue(ndev);
+ netif_carrier_off(ndev);
+
+ return 0;
+}
+
+static const struct net_device_ops mhi_mbim_ndo = {
+ .ndo_open = mhi_mbim_ndo_open,
+ .ndo_stop = mhi_mbim_ndo_stop,
+ .ndo_start_xmit = mhi_mbim_ndo_xmit,
+ .ndo_get_stats64 = mhi_mbim_ndo_get_stats64,
+};
+
+static int mhi_mbim_newlink(void *ctxt, struct net_device *ndev, u32 if_id,
+ struct netlink_ext_ack *extack)
+{
+ struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev);
+ struct mhi_mbim_context *mbim = ctxt;
+
+ link->session = if_id;
+ link->mbim = mbim;
+ link->ndev = ndev;
+ u64_stats_init(&link->rx_syncp);
+ u64_stats_init(&link->tx_syncp);
+
+ rcu_read_lock();
+ if (mhi_mbim_get_link_rcu(mbim, if_id)) {
+ rcu_read_unlock();
+ return -EEXIST;
+ }
+ rcu_read_unlock();
+
+ /* Already protected by RTNL lock */
+ hlist_add_head_rcu(&link->hlnode, &mbim->link_list[LINK_HASH(if_id)]);
+
+ return register_netdevice(ndev);
+}
+
+static void mhi_mbim_dellink(void *ctxt, struct net_device *ndev,
+ struct list_head *head)
+{
+ struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev);
+
+ hlist_del_init_rcu(&link->hlnode);
+ synchronize_rcu();
+
+ unregister_netdevice_queue(ndev, head);
+}
+
+static void mhi_mbim_setup(struct net_device *ndev)
+{
+ ndev->header_ops = NULL; /* No header */
+ ndev->type = ARPHRD_RAWIP;
+ ndev->needed_headroom = sizeof(struct mbim_tx_hdr);
+ ndev->hard_header_len = 0;
+ ndev->addr_len = 0;
+ ndev->flags = IFF_POINTOPOINT | IFF_NOARP;
+ ndev->netdev_ops = &mhi_mbim_ndo;
+ ndev->mtu = MHI_MBIM_DEFAULT_MTU;
+ ndev->min_mtu = ETH_MIN_MTU;
+ ndev->max_mtu = MHI_MAX_BUF_SZ - ndev->needed_headroom;
+ ndev->tx_queue_len = 1000;
+ ndev->needs_free_netdev = true;
+}
+
+static const struct wwan_ops mhi_mbim_wwan_ops = {
+ .priv_size = sizeof(struct mhi_mbim_link),
+ .setup = mhi_mbim_setup,
+ .newlink = mhi_mbim_newlink,
+ .dellink = mhi_mbim_dellink,
+};
+
+static int mhi_mbim_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
+{
+ struct mhi_controller *cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_mbim_context *mbim;
+ int err;
+
+ mbim = devm_kzalloc(&mhi_dev->dev, sizeof(*mbim), GFP_KERNEL);
+ if (!mbim)
+ return -ENOMEM;
+
+ spin_lock_init(&mbim->tx_lock);
+ dev_set_drvdata(&mhi_dev->dev, mbim);
+ mbim->mdev = mhi_dev;
+ mbim->mru = mhi_dev->mhi_cntrl->mru ? mhi_dev->mhi_cntrl->mru : MHI_DEFAULT_MRU;
+
+ INIT_DELAYED_WORK(&mbim->rx_refill, mhi_net_rx_refill_work);
+
+ /* Start MHI channels */
+ err = mhi_prepare_for_transfer(mhi_dev);
+ if (err)
+ return err;
+
+ /* Number of transfer descriptors determines size of the queue */
+ mbim->rx_queue_sz = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
+
+ /* Register wwan link ops with MHI controller representing WWAN instance */
+ return wwan_register_ops(&cntrl->mhi_dev->dev, &mhi_mbim_wwan_ops, mbim, 0);
+}
+
+static void mhi_mbim_remove(struct mhi_device *mhi_dev)
+{
+ struct mhi_mbim_context *mbim = dev_get_drvdata(&mhi_dev->dev);
+ struct mhi_controller *cntrl = mhi_dev->mhi_cntrl;
+
+ mhi_unprepare_from_transfer(mhi_dev);
+ cancel_delayed_work_sync(&mbim->rx_refill);
+ wwan_unregister_ops(&cntrl->mhi_dev->dev);
+ kfree_skb(mbim->skbagg_head);
+ dev_set_drvdata(&mhi_dev->dev, NULL);
+}
+
+static const struct mhi_device_id mhi_mbim_id_table[] = {
+ /* Hardware accelerated data PATH (to modem IPA), MBIM protocol */
+ { .chan = "IP_HW0_MBIM", .driver_data = 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(mhi, mhi_mbim_id_table);
+
+static struct mhi_driver mhi_mbim_driver = {
+ .probe = mhi_mbim_probe,
+ .remove = mhi_mbim_remove,
+ .dl_xfer_cb = mhi_mbim_dl_callback,
+ .ul_xfer_cb = mhi_mbim_ul_callback,
+ .id_table = mhi_mbim_id_table,
+ .driver = {
+ .name = "mhi_wwan_mbim",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_mhi_driver(mhi_mbim_driver);
+
+MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>");
+MODULE_DESCRIPTION("Network/MBIM over MHI");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/wwan/qcom_bam_dmux.c b/drivers/net/wwan/qcom_bam_dmux.c
new file mode 100644
index 0000000000..17d46f4d29
--- /dev/null
+++ b/drivers/net/wwan/qcom_bam_dmux.c
@@ -0,0 +1,907 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Qualcomm BAM-DMUX WWAN network driver
+ * Copyright (c) 2020, Stephan Gerhold <stephan@gerhold.net>
+ */
+
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/completion.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/if_arp.h>
+#include <linux/interrupt.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/soc/qcom/smem_state.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <net/pkt_sched.h>
+
+#define BAM_DMUX_BUFFER_SIZE SZ_2K
+#define BAM_DMUX_HDR_SIZE sizeof(struct bam_dmux_hdr)
+#define BAM_DMUX_MAX_DATA_SIZE (BAM_DMUX_BUFFER_SIZE - BAM_DMUX_HDR_SIZE)
+#define BAM_DMUX_NUM_SKB 32
+
+#define BAM_DMUX_HDR_MAGIC 0x33fc
+
+#define BAM_DMUX_AUTOSUSPEND_DELAY 1000
+#define BAM_DMUX_REMOTE_TIMEOUT msecs_to_jiffies(2000)
+
+enum {
+ BAM_DMUX_CMD_DATA,
+ BAM_DMUX_CMD_OPEN,
+ BAM_DMUX_CMD_CLOSE,
+};
+
+enum {
+ BAM_DMUX_CH_DATA_0,
+ BAM_DMUX_CH_DATA_1,
+ BAM_DMUX_CH_DATA_2,
+ BAM_DMUX_CH_DATA_3,
+ BAM_DMUX_CH_DATA_4,
+ BAM_DMUX_CH_DATA_5,
+ BAM_DMUX_CH_DATA_6,
+ BAM_DMUX_CH_DATA_7,
+ BAM_DMUX_NUM_CH
+};
+
+struct bam_dmux_hdr {
+ u16 magic;
+ u8 signal;
+ u8 cmd;
+ u8 pad;
+ u8 ch;
+ u16 len;
+};
+
+struct bam_dmux_skb_dma {
+ struct bam_dmux *dmux;
+ struct sk_buff *skb;
+ dma_addr_t addr;
+};
+
+struct bam_dmux {
+ struct device *dev;
+
+ int pc_irq;
+ bool pc_state, pc_ack_state;
+ struct qcom_smem_state *pc, *pc_ack;
+ u32 pc_mask, pc_ack_mask;
+ wait_queue_head_t pc_wait;
+ struct completion pc_ack_completion;
+
+ struct dma_chan *rx, *tx;
+ struct bam_dmux_skb_dma rx_skbs[BAM_DMUX_NUM_SKB];
+ struct bam_dmux_skb_dma tx_skbs[BAM_DMUX_NUM_SKB];
+ spinlock_t tx_lock; /* Protect tx_skbs, tx_next_skb */
+ unsigned int tx_next_skb;
+ atomic_long_t tx_deferred_skb;
+ struct work_struct tx_wakeup_work;
+
+ DECLARE_BITMAP(remote_channels, BAM_DMUX_NUM_CH);
+ struct work_struct register_netdev_work;
+ struct net_device *netdevs[BAM_DMUX_NUM_CH];
+};
+
+struct bam_dmux_netdev {
+ struct bam_dmux *dmux;
+ u8 ch;
+};
+
+static void bam_dmux_pc_vote(struct bam_dmux *dmux, bool enable)
+{
+ reinit_completion(&dmux->pc_ack_completion);
+ qcom_smem_state_update_bits(dmux->pc, dmux->pc_mask,
+ enable ? dmux->pc_mask : 0);
+}
+
+static void bam_dmux_pc_ack(struct bam_dmux *dmux)
+{
+ qcom_smem_state_update_bits(dmux->pc_ack, dmux->pc_ack_mask,
+ dmux->pc_ack_state ? 0 : dmux->pc_ack_mask);
+ dmux->pc_ack_state = !dmux->pc_ack_state;
+}
+
+static bool bam_dmux_skb_dma_map(struct bam_dmux_skb_dma *skb_dma,
+ enum dma_data_direction dir)
+{
+ struct device *dev = skb_dma->dmux->dev;
+
+ skb_dma->addr = dma_map_single(dev, skb_dma->skb->data, skb_dma->skb->len, dir);
+ if (dma_mapping_error(dev, skb_dma->addr)) {
+ dev_err(dev, "Failed to DMA map buffer\n");
+ skb_dma->addr = 0;
+ return false;
+ }
+
+ return true;
+}
+
+static void bam_dmux_skb_dma_unmap(struct bam_dmux_skb_dma *skb_dma,
+ enum dma_data_direction dir)
+{
+ dma_unmap_single(skb_dma->dmux->dev, skb_dma->addr, skb_dma->skb->len, dir);
+ skb_dma->addr = 0;
+}
+
+static void bam_dmux_tx_wake_queues(struct bam_dmux *dmux)
+{
+ int i;
+
+ dev_dbg(dmux->dev, "wake queues\n");
+
+ for (i = 0; i < BAM_DMUX_NUM_CH; ++i) {
+ struct net_device *netdev = dmux->netdevs[i];
+
+ if (netdev && netif_running(netdev))
+ netif_wake_queue(netdev);
+ }
+}
+
+static void bam_dmux_tx_stop_queues(struct bam_dmux *dmux)
+{
+ int i;
+
+ dev_dbg(dmux->dev, "stop queues\n");
+
+ for (i = 0; i < BAM_DMUX_NUM_CH; ++i) {
+ struct net_device *netdev = dmux->netdevs[i];
+
+ if (netdev)
+ netif_stop_queue(netdev);
+ }
+}
+
+static void bam_dmux_tx_done(struct bam_dmux_skb_dma *skb_dma)
+{
+ struct bam_dmux *dmux = skb_dma->dmux;
+ unsigned long flags;
+
+ pm_runtime_mark_last_busy(dmux->dev);
+ pm_runtime_put_autosuspend(dmux->dev);
+
+ if (skb_dma->addr)
+ bam_dmux_skb_dma_unmap(skb_dma, DMA_TO_DEVICE);
+
+ spin_lock_irqsave(&dmux->tx_lock, flags);
+ skb_dma->skb = NULL;
+ if (skb_dma == &dmux->tx_skbs[dmux->tx_next_skb % BAM_DMUX_NUM_SKB])
+ bam_dmux_tx_wake_queues(dmux);
+ spin_unlock_irqrestore(&dmux->tx_lock, flags);
+}
+
+static void bam_dmux_tx_callback(void *data)
+{
+ struct bam_dmux_skb_dma *skb_dma = data;
+ struct sk_buff *skb = skb_dma->skb;
+
+ bam_dmux_tx_done(skb_dma);
+ dev_consume_skb_any(skb);
+}
+
+static bool bam_dmux_skb_dma_submit_tx(struct bam_dmux_skb_dma *skb_dma)
+{
+ struct bam_dmux *dmux = skb_dma->dmux;
+ struct dma_async_tx_descriptor *desc;
+
+ desc = dmaengine_prep_slave_single(dmux->tx, skb_dma->addr,
+ skb_dma->skb->len, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(dmux->dev, "Failed to prepare TX DMA buffer\n");
+ return false;
+ }
+
+ desc->callback = bam_dmux_tx_callback;
+ desc->callback_param = skb_dma;
+ desc->cookie = dmaengine_submit(desc);
+ return true;
+}
+
+static struct bam_dmux_skb_dma *
+bam_dmux_tx_queue(struct bam_dmux *dmux, struct sk_buff *skb)
+{
+ struct bam_dmux_skb_dma *skb_dma;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dmux->tx_lock, flags);
+
+ skb_dma = &dmux->tx_skbs[dmux->tx_next_skb % BAM_DMUX_NUM_SKB];
+ if (skb_dma->skb) {
+ bam_dmux_tx_stop_queues(dmux);
+ spin_unlock_irqrestore(&dmux->tx_lock, flags);
+ return NULL;
+ }
+ skb_dma->skb = skb;
+
+ dmux->tx_next_skb++;
+ if (dmux->tx_skbs[dmux->tx_next_skb % BAM_DMUX_NUM_SKB].skb)
+ bam_dmux_tx_stop_queues(dmux);
+
+ spin_unlock_irqrestore(&dmux->tx_lock, flags);
+ return skb_dma;
+}
+
+static int bam_dmux_send_cmd(struct bam_dmux_netdev *bndev, u8 cmd)
+{
+ struct bam_dmux *dmux = bndev->dmux;
+ struct bam_dmux_skb_dma *skb_dma;
+ struct bam_dmux_hdr *hdr;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = alloc_skb(sizeof(*hdr), GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ hdr = skb_put_zero(skb, sizeof(*hdr));
+ hdr->magic = BAM_DMUX_HDR_MAGIC;
+ hdr->cmd = cmd;
+ hdr->ch = bndev->ch;
+
+ skb_dma = bam_dmux_tx_queue(dmux, skb);
+ if (!skb_dma) {
+ ret = -EAGAIN;
+ goto free_skb;
+ }
+
+ ret = pm_runtime_get_sync(dmux->dev);
+ if (ret < 0)
+ goto tx_fail;
+
+ if (!bam_dmux_skb_dma_map(skb_dma, DMA_TO_DEVICE)) {
+ ret = -ENOMEM;
+ goto tx_fail;
+ }
+
+ if (!bam_dmux_skb_dma_submit_tx(skb_dma)) {
+ ret = -EIO;
+ goto tx_fail;
+ }
+
+ dma_async_issue_pending(dmux->tx);
+ return 0;
+
+tx_fail:
+ bam_dmux_tx_done(skb_dma);
+free_skb:
+ dev_kfree_skb(skb);
+ return ret;
+}
+
+static int bam_dmux_netdev_open(struct net_device *netdev)
+{
+ struct bam_dmux_netdev *bndev = netdev_priv(netdev);
+ int ret;
+
+ ret = bam_dmux_send_cmd(bndev, BAM_DMUX_CMD_OPEN);
+ if (ret)
+ return ret;
+
+ netif_start_queue(netdev);
+ return 0;
+}
+
+static int bam_dmux_netdev_stop(struct net_device *netdev)
+{
+ struct bam_dmux_netdev *bndev = netdev_priv(netdev);
+
+ netif_stop_queue(netdev);
+ bam_dmux_send_cmd(bndev, BAM_DMUX_CMD_CLOSE);
+ return 0;
+}
+
+static unsigned int needed_room(unsigned int avail, unsigned int needed)
+{
+ if (avail >= needed)
+ return 0;
+ return needed - avail;
+}
+
+static int bam_dmux_tx_prepare_skb(struct bam_dmux_netdev *bndev,
+ struct sk_buff *skb)
+{
+ unsigned int head = needed_room(skb_headroom(skb), BAM_DMUX_HDR_SIZE);
+ unsigned int pad = sizeof(u32) - skb->len % sizeof(u32);
+ unsigned int tail = needed_room(skb_tailroom(skb), pad);
+ struct bam_dmux_hdr *hdr;
+ int ret;
+
+ if (head || tail || skb_cloned(skb)) {
+ ret = pskb_expand_head(skb, head, tail, GFP_ATOMIC);
+ if (ret)
+ return ret;
+ }
+
+ hdr = skb_push(skb, sizeof(*hdr));
+ hdr->magic = BAM_DMUX_HDR_MAGIC;
+ hdr->signal = 0;
+ hdr->cmd = BAM_DMUX_CMD_DATA;
+ hdr->pad = pad;
+ hdr->ch = bndev->ch;
+ hdr->len = skb->len - sizeof(*hdr);
+ if (pad)
+ skb_put_zero(skb, pad);
+
+ return 0;
+}
+
+static netdev_tx_t bam_dmux_netdev_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct bam_dmux_netdev *bndev = netdev_priv(netdev);
+ struct bam_dmux *dmux = bndev->dmux;
+ struct bam_dmux_skb_dma *skb_dma;
+ int active, ret;
+
+ skb_dma = bam_dmux_tx_queue(dmux, skb);
+ if (!skb_dma)
+ return NETDEV_TX_BUSY;
+
+ active = pm_runtime_get(dmux->dev);
+ if (active < 0 && active != -EINPROGRESS)
+ goto drop;
+
+ ret = bam_dmux_tx_prepare_skb(bndev, skb);
+ if (ret)
+ goto drop;
+
+ if (!bam_dmux_skb_dma_map(skb_dma, DMA_TO_DEVICE))
+ goto drop;
+
+ if (active <= 0) {
+ /* Cannot sleep here so mark skb for wakeup handler and return */
+ if (!atomic_long_fetch_or(BIT(skb_dma - dmux->tx_skbs),
+ &dmux->tx_deferred_skb))
+ queue_pm_work(&dmux->tx_wakeup_work);
+ return NETDEV_TX_OK;
+ }
+
+ if (!bam_dmux_skb_dma_submit_tx(skb_dma))
+ goto drop;
+
+ dma_async_issue_pending(dmux->tx);
+ return NETDEV_TX_OK;
+
+drop:
+ bam_dmux_tx_done(skb_dma);
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+static void bam_dmux_tx_wakeup_work(struct work_struct *work)
+{
+ struct bam_dmux *dmux = container_of(work, struct bam_dmux, tx_wakeup_work);
+ unsigned long pending;
+ int ret, i;
+
+ ret = pm_runtime_resume_and_get(dmux->dev);
+ if (ret < 0) {
+ dev_err(dmux->dev, "Failed to resume: %d\n", ret);
+ return;
+ }
+
+ pending = atomic_long_xchg(&dmux->tx_deferred_skb, 0);
+ if (!pending)
+ goto out;
+
+ dev_dbg(dmux->dev, "pending skbs after wakeup: %#lx\n", pending);
+ for_each_set_bit(i, &pending, BAM_DMUX_NUM_SKB) {
+ bam_dmux_skb_dma_submit_tx(&dmux->tx_skbs[i]);
+ }
+ dma_async_issue_pending(dmux->tx);
+
+out:
+ pm_runtime_mark_last_busy(dmux->dev);
+ pm_runtime_put_autosuspend(dmux->dev);
+}
+
+static const struct net_device_ops bam_dmux_ops = {
+ .ndo_open = bam_dmux_netdev_open,
+ .ndo_stop = bam_dmux_netdev_stop,
+ .ndo_start_xmit = bam_dmux_netdev_start_xmit,
+};
+
+static const struct device_type wwan_type = {
+ .name = "wwan",
+};
+
+static void bam_dmux_netdev_setup(struct net_device *dev)
+{
+ dev->netdev_ops = &bam_dmux_ops;
+
+ dev->type = ARPHRD_RAWIP;
+ SET_NETDEV_DEVTYPE(dev, &wwan_type);
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+
+ dev->mtu = ETH_DATA_LEN;
+ dev->max_mtu = BAM_DMUX_MAX_DATA_SIZE;
+ dev->needed_headroom = sizeof(struct bam_dmux_hdr);
+ dev->needed_tailroom = sizeof(u32); /* word-aligned */
+ dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
+
+ /* This perm addr will be used as interface identifier by IPv6 */
+ dev->addr_assign_type = NET_ADDR_RANDOM;
+ eth_random_addr(dev->perm_addr);
+}
+
+static void bam_dmux_register_netdev_work(struct work_struct *work)
+{
+ struct bam_dmux *dmux = container_of(work, struct bam_dmux, register_netdev_work);
+ struct bam_dmux_netdev *bndev;
+ struct net_device *netdev;
+ int ch, ret;
+
+ for_each_set_bit(ch, dmux->remote_channels, BAM_DMUX_NUM_CH) {
+ if (dmux->netdevs[ch])
+ continue;
+
+ netdev = alloc_netdev(sizeof(*bndev), "wwan%d", NET_NAME_ENUM,
+ bam_dmux_netdev_setup);
+ if (!netdev)
+ return;
+
+ SET_NETDEV_DEV(netdev, dmux->dev);
+ netdev->dev_port = ch;
+
+ bndev = netdev_priv(netdev);
+ bndev->dmux = dmux;
+ bndev->ch = ch;
+
+ ret = register_netdev(netdev);
+ if (ret) {
+ dev_err(dmux->dev, "Failed to register netdev for channel %u: %d\n",
+ ch, ret);
+ free_netdev(netdev);
+ return;
+ }
+
+ dmux->netdevs[ch] = netdev;
+ }
+}
+
+static void bam_dmux_rx_callback(void *data);
+
+static bool bam_dmux_skb_dma_submit_rx(struct bam_dmux_skb_dma *skb_dma)
+{
+ struct bam_dmux *dmux = skb_dma->dmux;
+ struct dma_async_tx_descriptor *desc;
+
+ desc = dmaengine_prep_slave_single(dmux->rx, skb_dma->addr,
+ skb_dma->skb->len, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(dmux->dev, "Failed to prepare RX DMA buffer\n");
+ return false;
+ }
+
+ desc->callback = bam_dmux_rx_callback;
+ desc->callback_param = skb_dma;
+ desc->cookie = dmaengine_submit(desc);
+ return true;
+}
+
+static bool bam_dmux_skb_dma_queue_rx(struct bam_dmux_skb_dma *skb_dma, gfp_t gfp)
+{
+ if (!skb_dma->skb) {
+ skb_dma->skb = __netdev_alloc_skb(NULL, BAM_DMUX_BUFFER_SIZE, gfp);
+ if (!skb_dma->skb)
+ return false;
+ skb_put(skb_dma->skb, BAM_DMUX_BUFFER_SIZE);
+ }
+
+ return bam_dmux_skb_dma_map(skb_dma, DMA_FROM_DEVICE) &&
+ bam_dmux_skb_dma_submit_rx(skb_dma);
+}
+
+static void bam_dmux_cmd_data(struct bam_dmux_skb_dma *skb_dma)
+{
+ struct bam_dmux *dmux = skb_dma->dmux;
+ struct sk_buff *skb = skb_dma->skb;
+ struct bam_dmux_hdr *hdr = (struct bam_dmux_hdr *)skb->data;
+ struct net_device *netdev = dmux->netdevs[hdr->ch];
+
+ if (!netdev || !netif_running(netdev)) {
+ dev_warn(dmux->dev, "Data for inactive channel %u\n", hdr->ch);
+ return;
+ }
+
+ if (hdr->len > BAM_DMUX_MAX_DATA_SIZE) {
+ dev_err(dmux->dev, "Data larger than buffer? (%u > %u)\n",
+ hdr->len, (u16)BAM_DMUX_MAX_DATA_SIZE);
+ return;
+ }
+
+ skb_dma->skb = NULL; /* Hand over to network stack */
+
+ skb_pull(skb, sizeof(*hdr));
+ skb_trim(skb, hdr->len);
+ skb->dev = netdev;
+
+ /* Only Raw-IP/QMAP is supported by this driver */
+ switch (skb->data[0] & 0xf0) {
+ case 0x40:
+ skb->protocol = htons(ETH_P_IP);
+ break;
+ case 0x60:
+ skb->protocol = htons(ETH_P_IPV6);
+ break;
+ default:
+ skb->protocol = htons(ETH_P_MAP);
+ break;
+ }
+
+ netif_receive_skb(skb);
+}
+
+static void bam_dmux_cmd_open(struct bam_dmux *dmux, struct bam_dmux_hdr *hdr)
+{
+ struct net_device *netdev = dmux->netdevs[hdr->ch];
+
+ dev_dbg(dmux->dev, "open channel: %u\n", hdr->ch);
+
+ if (__test_and_set_bit(hdr->ch, dmux->remote_channels)) {
+ dev_warn(dmux->dev, "Channel already open: %u\n", hdr->ch);
+ return;
+ }
+
+ if (netdev) {
+ netif_device_attach(netdev);
+ } else {
+ /* Cannot sleep here, schedule work to register the netdev */
+ schedule_work(&dmux->register_netdev_work);
+ }
+}
+
+static void bam_dmux_cmd_close(struct bam_dmux *dmux, struct bam_dmux_hdr *hdr)
+{
+ struct net_device *netdev = dmux->netdevs[hdr->ch];
+
+ dev_dbg(dmux->dev, "close channel: %u\n", hdr->ch);
+
+ if (!__test_and_clear_bit(hdr->ch, dmux->remote_channels)) {
+ dev_err(dmux->dev, "Channel not open: %u\n", hdr->ch);
+ return;
+ }
+
+ if (netdev)
+ netif_device_detach(netdev);
+}
+
+static void bam_dmux_rx_callback(void *data)
+{
+ struct bam_dmux_skb_dma *skb_dma = data;
+ struct bam_dmux *dmux = skb_dma->dmux;
+ struct sk_buff *skb = skb_dma->skb;
+ struct bam_dmux_hdr *hdr = (struct bam_dmux_hdr *)skb->data;
+
+ bam_dmux_skb_dma_unmap(skb_dma, DMA_FROM_DEVICE);
+
+ if (hdr->magic != BAM_DMUX_HDR_MAGIC) {
+ dev_err(dmux->dev, "Invalid magic in header: %#x\n", hdr->magic);
+ goto out;
+ }
+
+ if (hdr->ch >= BAM_DMUX_NUM_CH) {
+ dev_dbg(dmux->dev, "Unsupported channel: %u\n", hdr->ch);
+ goto out;
+ }
+
+ switch (hdr->cmd) {
+ case BAM_DMUX_CMD_DATA:
+ bam_dmux_cmd_data(skb_dma);
+ break;
+ case BAM_DMUX_CMD_OPEN:
+ bam_dmux_cmd_open(dmux, hdr);
+ break;
+ case BAM_DMUX_CMD_CLOSE:
+ bam_dmux_cmd_close(dmux, hdr);
+ break;
+ default:
+ dev_err(dmux->dev, "Unsupported command %u on channel %u\n",
+ hdr->cmd, hdr->ch);
+ break;
+ }
+
+out:
+ if (bam_dmux_skb_dma_queue_rx(skb_dma, GFP_ATOMIC))
+ dma_async_issue_pending(dmux->rx);
+}
+
+static bool bam_dmux_power_on(struct bam_dmux *dmux)
+{
+ struct device *dev = dmux->dev;
+ struct dma_slave_config dma_rx_conf = {
+ .direction = DMA_DEV_TO_MEM,
+ .src_maxburst = BAM_DMUX_BUFFER_SIZE,
+ };
+ int i;
+
+ dmux->rx = dma_request_chan(dev, "rx");
+ if (IS_ERR(dmux->rx)) {
+ dev_err(dev, "Failed to request RX DMA channel: %pe\n", dmux->rx);
+ dmux->rx = NULL;
+ return false;
+ }
+ dmaengine_slave_config(dmux->rx, &dma_rx_conf);
+
+ for (i = 0; i < BAM_DMUX_NUM_SKB; i++) {
+ if (!bam_dmux_skb_dma_queue_rx(&dmux->rx_skbs[i], GFP_KERNEL))
+ return false;
+ }
+ dma_async_issue_pending(dmux->rx);
+
+ return true;
+}
+
+static void bam_dmux_free_skbs(struct bam_dmux_skb_dma skbs[],
+ enum dma_data_direction dir)
+{
+ int i;
+
+ for (i = 0; i < BAM_DMUX_NUM_SKB; i++) {
+ struct bam_dmux_skb_dma *skb_dma = &skbs[i];
+
+ if (skb_dma->addr)
+ bam_dmux_skb_dma_unmap(skb_dma, dir);
+ if (skb_dma->skb) {
+ dev_kfree_skb(skb_dma->skb);
+ skb_dma->skb = NULL;
+ }
+ }
+}
+
+static void bam_dmux_power_off(struct bam_dmux *dmux)
+{
+ if (dmux->tx) {
+ dmaengine_terminate_sync(dmux->tx);
+ dma_release_channel(dmux->tx);
+ dmux->tx = NULL;
+ }
+
+ if (dmux->rx) {
+ dmaengine_terminate_sync(dmux->rx);
+ dma_release_channel(dmux->rx);
+ dmux->rx = NULL;
+ }
+
+ bam_dmux_free_skbs(dmux->rx_skbs, DMA_FROM_DEVICE);
+}
+
+static irqreturn_t bam_dmux_pc_irq(int irq, void *data)
+{
+ struct bam_dmux *dmux = data;
+ bool new_state = !dmux->pc_state;
+
+ dev_dbg(dmux->dev, "pc: %u\n", new_state);
+
+ if (new_state) {
+ if (bam_dmux_power_on(dmux))
+ bam_dmux_pc_ack(dmux);
+ else
+ bam_dmux_power_off(dmux);
+ } else {
+ bam_dmux_power_off(dmux);
+ bam_dmux_pc_ack(dmux);
+ }
+
+ dmux->pc_state = new_state;
+ wake_up_all(&dmux->pc_wait);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t bam_dmux_pc_ack_irq(int irq, void *data)
+{
+ struct bam_dmux *dmux = data;
+
+ dev_dbg(dmux->dev, "pc ack\n");
+ complete_all(&dmux->pc_ack_completion);
+
+ return IRQ_HANDLED;
+}
+
+static int bam_dmux_runtime_suspend(struct device *dev)
+{
+ struct bam_dmux *dmux = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "runtime suspend\n");
+ bam_dmux_pc_vote(dmux, false);
+
+ return 0;
+}
+
+static int __maybe_unused bam_dmux_runtime_resume(struct device *dev)
+{
+ struct bam_dmux *dmux = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "runtime resume\n");
+
+ /* Wait until previous power down was acked */
+ if (!wait_for_completion_timeout(&dmux->pc_ack_completion,
+ BAM_DMUX_REMOTE_TIMEOUT))
+ return -ETIMEDOUT;
+
+ /* Vote for power state */
+ bam_dmux_pc_vote(dmux, true);
+
+ /* Wait for ack */
+ if (!wait_for_completion_timeout(&dmux->pc_ack_completion,
+ BAM_DMUX_REMOTE_TIMEOUT)) {
+ bam_dmux_pc_vote(dmux, false);
+ return -ETIMEDOUT;
+ }
+
+ /* Wait until we're up */
+ if (!wait_event_timeout(dmux->pc_wait, dmux->pc_state,
+ BAM_DMUX_REMOTE_TIMEOUT)) {
+ bam_dmux_pc_vote(dmux, false);
+ return -ETIMEDOUT;
+ }
+
+ /* Ensure that we actually initialized successfully */
+ if (!dmux->rx) {
+ bam_dmux_pc_vote(dmux, false);
+ return -ENXIO;
+ }
+
+ /* Request TX channel if necessary */
+ if (dmux->tx)
+ return 0;
+
+ dmux->tx = dma_request_chan(dev, "tx");
+ if (IS_ERR(dmux->tx)) {
+ dev_err(dev, "Failed to request TX DMA channel: %pe\n", dmux->tx);
+ dmux->tx = NULL;
+ bam_dmux_runtime_suspend(dev);
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int bam_dmux_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bam_dmux *dmux;
+ int ret, pc_ack_irq, i;
+ unsigned int bit;
+
+ dmux = devm_kzalloc(dev, sizeof(*dmux), GFP_KERNEL);
+ if (!dmux)
+ return -ENOMEM;
+
+ dmux->dev = dev;
+ platform_set_drvdata(pdev, dmux);
+
+ dmux->pc_irq = platform_get_irq_byname(pdev, "pc");
+ if (dmux->pc_irq < 0)
+ return dmux->pc_irq;
+
+ pc_ack_irq = platform_get_irq_byname(pdev, "pc-ack");
+ if (pc_ack_irq < 0)
+ return pc_ack_irq;
+
+ dmux->pc = devm_qcom_smem_state_get(dev, "pc", &bit);
+ if (IS_ERR(dmux->pc))
+ return dev_err_probe(dev, PTR_ERR(dmux->pc),
+ "Failed to get pc state\n");
+ dmux->pc_mask = BIT(bit);
+
+ dmux->pc_ack = devm_qcom_smem_state_get(dev, "pc-ack", &bit);
+ if (IS_ERR(dmux->pc_ack))
+ return dev_err_probe(dev, PTR_ERR(dmux->pc_ack),
+ "Failed to get pc-ack state\n");
+ dmux->pc_ack_mask = BIT(bit);
+
+ init_waitqueue_head(&dmux->pc_wait);
+ init_completion(&dmux->pc_ack_completion);
+ complete_all(&dmux->pc_ack_completion);
+
+ spin_lock_init(&dmux->tx_lock);
+ INIT_WORK(&dmux->tx_wakeup_work, bam_dmux_tx_wakeup_work);
+ INIT_WORK(&dmux->register_netdev_work, bam_dmux_register_netdev_work);
+
+ for (i = 0; i < BAM_DMUX_NUM_SKB; i++) {
+ dmux->rx_skbs[i].dmux = dmux;
+ dmux->tx_skbs[i].dmux = dmux;
+ }
+
+ /* Runtime PM manages our own power vote.
+ * Note that the RX path may be active even if we are runtime suspended,
+ * since it is controlled by the remote side.
+ */
+ pm_runtime_set_autosuspend_delay(dev, BAM_DMUX_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_enable(dev);
+
+ ret = devm_request_threaded_irq(dev, pc_ack_irq, NULL, bam_dmux_pc_ack_irq,
+ IRQF_ONESHOT, NULL, dmux);
+ if (ret)
+ return ret;
+
+ ret = devm_request_threaded_irq(dev, dmux->pc_irq, NULL, bam_dmux_pc_irq,
+ IRQF_ONESHOT, NULL, dmux);
+ if (ret)
+ return ret;
+
+ ret = irq_get_irqchip_state(dmux->pc_irq, IRQCHIP_STATE_LINE_LEVEL,
+ &dmux->pc_state);
+ if (ret)
+ return ret;
+
+ /* Check if remote finished initialization before us */
+ if (dmux->pc_state) {
+ if (bam_dmux_power_on(dmux))
+ bam_dmux_pc_ack(dmux);
+ else
+ bam_dmux_power_off(dmux);
+ }
+
+ return 0;
+}
+
+static int bam_dmux_remove(struct platform_device *pdev)
+{
+ struct bam_dmux *dmux = platform_get_drvdata(pdev);
+ struct device *dev = dmux->dev;
+ LIST_HEAD(list);
+ int i;
+
+ /* Unregister network interfaces */
+ cancel_work_sync(&dmux->register_netdev_work);
+ rtnl_lock();
+ for (i = 0; i < BAM_DMUX_NUM_CH; ++i)
+ if (dmux->netdevs[i])
+ unregister_netdevice_queue(dmux->netdevs[i], &list);
+ unregister_netdevice_many(&list);
+ rtnl_unlock();
+ cancel_work_sync(&dmux->tx_wakeup_work);
+
+ /* Drop our own power vote */
+ pm_runtime_disable(dev);
+ pm_runtime_dont_use_autosuspend(dev);
+ bam_dmux_runtime_suspend(dev);
+ pm_runtime_set_suspended(dev);
+
+ /* Try to wait for remote side to drop power vote */
+ if (!wait_event_timeout(dmux->pc_wait, !dmux->rx, BAM_DMUX_REMOTE_TIMEOUT))
+ dev_err(dev, "Timed out waiting for remote side to suspend\n");
+
+ /* Make sure everything is cleaned up before we return */
+ disable_irq(dmux->pc_irq);
+ bam_dmux_power_off(dmux);
+ bam_dmux_free_skbs(dmux->tx_skbs, DMA_TO_DEVICE);
+
+ return 0;
+}
+
+static const struct dev_pm_ops bam_dmux_pm_ops = {
+ SET_RUNTIME_PM_OPS(bam_dmux_runtime_suspend, bam_dmux_runtime_resume, NULL)
+};
+
+static const struct of_device_id bam_dmux_of_match[] = {
+ { .compatible = "qcom,bam-dmux" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, bam_dmux_of_match);
+
+static struct platform_driver bam_dmux_driver = {
+ .probe = bam_dmux_probe,
+ .remove = bam_dmux_remove,
+ .driver = {
+ .name = "bam-dmux",
+ .pm = &bam_dmux_pm_ops,
+ .of_match_table = bam_dmux_of_match,
+ },
+};
+module_platform_driver(bam_dmux_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm BAM-DMUX WWAN Network Driver");
+MODULE_AUTHOR("Stephan Gerhold <stephan@gerhold.net>");
diff --git a/drivers/net/wwan/rpmsg_wwan_ctrl.c b/drivers/net/wwan/rpmsg_wwan_ctrl.c
new file mode 100644
index 0000000000..86b60aadfa
--- /dev/null
+++ b/drivers/net/wwan/rpmsg_wwan_ctrl.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2021, Stephan Gerhold <stephan@gerhold.net> */
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/rpmsg.h>
+#include <linux/wwan.h>
+
+struct rpmsg_wwan_dev {
+ /* Lower level is a rpmsg dev, upper level is a wwan port */
+ struct rpmsg_device *rpdev;
+ struct wwan_port *wwan_port;
+ struct rpmsg_endpoint *ept;
+};
+
+static int rpmsg_wwan_ctrl_callback(struct rpmsg_device *rpdev,
+ void *buf, int len, void *priv, u32 src)
+{
+ struct rpmsg_wwan_dev *rpwwan = priv;
+ struct sk_buff *skb;
+
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put_data(skb, buf, len);
+ wwan_port_rx(rpwwan->wwan_port, skb);
+ return 0;
+}
+
+static int rpmsg_wwan_ctrl_start(struct wwan_port *port)
+{
+ struct rpmsg_wwan_dev *rpwwan = wwan_port_get_drvdata(port);
+ struct rpmsg_channel_info chinfo = {
+ .src = rpwwan->rpdev->src,
+ .dst = RPMSG_ADDR_ANY,
+ };
+
+ strncpy(chinfo.name, rpwwan->rpdev->id.name, RPMSG_NAME_SIZE);
+ rpwwan->ept = rpmsg_create_ept(rpwwan->rpdev, rpmsg_wwan_ctrl_callback,
+ rpwwan, chinfo);
+ if (!rpwwan->ept)
+ return -EREMOTEIO;
+
+ return 0;
+}
+
+static void rpmsg_wwan_ctrl_stop(struct wwan_port *port)
+{
+ struct rpmsg_wwan_dev *rpwwan = wwan_port_get_drvdata(port);
+
+ rpmsg_destroy_ept(rpwwan->ept);
+ rpwwan->ept = NULL;
+}
+
+static int rpmsg_wwan_ctrl_tx(struct wwan_port *port, struct sk_buff *skb)
+{
+ struct rpmsg_wwan_dev *rpwwan = wwan_port_get_drvdata(port);
+ int ret;
+
+ ret = rpmsg_trysend(rpwwan->ept, skb->data, skb->len);
+ if (ret)
+ return ret;
+
+ consume_skb(skb);
+ return 0;
+}
+
+static int rpmsg_wwan_ctrl_tx_blocking(struct wwan_port *port, struct sk_buff *skb)
+{
+ struct rpmsg_wwan_dev *rpwwan = wwan_port_get_drvdata(port);
+ int ret;
+
+ ret = rpmsg_send(rpwwan->ept, skb->data, skb->len);
+ if (ret)
+ return ret;
+
+ consume_skb(skb);
+ return 0;
+}
+
+static __poll_t rpmsg_wwan_ctrl_tx_poll(struct wwan_port *port,
+ struct file *filp, poll_table *wait)
+{
+ struct rpmsg_wwan_dev *rpwwan = wwan_port_get_drvdata(port);
+
+ return rpmsg_poll(rpwwan->ept, filp, wait);
+}
+
+static const struct wwan_port_ops rpmsg_wwan_pops = {
+ .start = rpmsg_wwan_ctrl_start,
+ .stop = rpmsg_wwan_ctrl_stop,
+ .tx = rpmsg_wwan_ctrl_tx,
+ .tx_blocking = rpmsg_wwan_ctrl_tx_blocking,
+ .tx_poll = rpmsg_wwan_ctrl_tx_poll,
+};
+
+static struct device *rpmsg_wwan_find_parent(struct device *dev)
+{
+ /* Select first platform device as parent for the WWAN ports.
+ * On Qualcomm platforms this is usually the platform device that
+ * represents the modem remote processor. This might need to be
+ * adjusted when adding device IDs for other platforms.
+ */
+ for (dev = dev->parent; dev; dev = dev->parent) {
+ if (dev_is_platform(dev))
+ return dev;
+ }
+ return NULL;
+}
+
+static int rpmsg_wwan_ctrl_probe(struct rpmsg_device *rpdev)
+{
+ struct rpmsg_wwan_dev *rpwwan;
+ struct wwan_port *port;
+ struct device *parent;
+
+ parent = rpmsg_wwan_find_parent(&rpdev->dev);
+ if (!parent)
+ return -ENODEV;
+
+ rpwwan = devm_kzalloc(&rpdev->dev, sizeof(*rpwwan), GFP_KERNEL);
+ if (!rpwwan)
+ return -ENOMEM;
+
+ rpwwan->rpdev = rpdev;
+ dev_set_drvdata(&rpdev->dev, rpwwan);
+
+ /* Register as a wwan port, id.driver_data contains wwan port type */
+ port = wwan_create_port(parent, rpdev->id.driver_data,
+ &rpmsg_wwan_pops, NULL, rpwwan);
+ if (IS_ERR(port))
+ return PTR_ERR(port);
+
+ rpwwan->wwan_port = port;
+
+ return 0;
+};
+
+static void rpmsg_wwan_ctrl_remove(struct rpmsg_device *rpdev)
+{
+ struct rpmsg_wwan_dev *rpwwan = dev_get_drvdata(&rpdev->dev);
+
+ wwan_remove_port(rpwwan->wwan_port);
+}
+
+static const struct rpmsg_device_id rpmsg_wwan_ctrl_id_table[] = {
+ /* RPMSG channels for Qualcomm SoCs with integrated modem */
+ { .name = "DATA5_CNTL", .driver_data = WWAN_PORT_QMI },
+ { .name = "DATA4", .driver_data = WWAN_PORT_AT },
+ { .name = "DATA1", .driver_data = WWAN_PORT_AT },
+ {},
+};
+MODULE_DEVICE_TABLE(rpmsg, rpmsg_wwan_ctrl_id_table);
+
+static struct rpmsg_driver rpmsg_wwan_ctrl_driver = {
+ .drv.name = "rpmsg_wwan_ctrl",
+ .id_table = rpmsg_wwan_ctrl_id_table,
+ .probe = rpmsg_wwan_ctrl_probe,
+ .remove = rpmsg_wwan_ctrl_remove,
+};
+module_rpmsg_driver(rpmsg_wwan_ctrl_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("RPMSG WWAN CTRL Driver");
+MODULE_AUTHOR("Stephan Gerhold <stephan@gerhold.net>");
diff --git a/drivers/net/wwan/t7xx/Makefile b/drivers/net/wwan/t7xx/Makefile
new file mode 100644
index 0000000000..2652cd0050
--- /dev/null
+++ b/drivers/net/wwan/t7xx/Makefile
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-${CONFIG_MTK_T7XX} := mtk_t7xx.o
+mtk_t7xx-y:= t7xx_pci.o \
+ t7xx_pcie_mac.o \
+ t7xx_mhccif.o \
+ t7xx_state_monitor.o \
+ t7xx_modem_ops.o \
+ t7xx_cldma.o \
+ t7xx_hif_cldma.o \
+ t7xx_port_proxy.o \
+ t7xx_port_ctrl_msg.o \
+ t7xx_port_wwan.o \
+ t7xx_hif_dpmaif.o \
+ t7xx_hif_dpmaif_tx.o \
+ t7xx_hif_dpmaif_rx.o \
+ t7xx_dpmaif.o \
+ t7xx_netdev.o
+
+mtk_t7xx-$(CONFIG_WWAN_DEBUGFS) += \
+ t7xx_port_trace.o \
diff --git a/drivers/net/wwan/t7xx/t7xx_cldma.c b/drivers/net/wwan/t7xx/t7xx_cldma.c
new file mode 100644
index 0000000000..9f43f256db
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_cldma.c
@@ -0,0 +1,281 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ *
+ * Contributors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/types.h>
+
+#include "t7xx_cldma.h"
+
+#define ADDR_SIZE 8
+
+void t7xx_cldma_clear_ip_busy(struct t7xx_cldma_hw *hw_info)
+{
+ u32 val;
+
+ val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_IP_BUSY);
+ val |= IP_BUSY_WAKEUP;
+ iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_IP_BUSY);
+}
+
+/**
+ * t7xx_cldma_hw_restore() - Restore CLDMA HW registers.
+ * @hw_info: Pointer to struct t7xx_cldma_hw.
+ *
+ * Restore HW after resume. Writes uplink configuration for CLDMA HW.
+ */
+void t7xx_cldma_hw_restore(struct t7xx_cldma_hw *hw_info)
+{
+ u32 ul_cfg;
+
+ ul_cfg = ioread32(hw_info->ap_pdn_base + REG_CLDMA_UL_CFG);
+ ul_cfg &= ~UL_CFG_BIT_MODE_MASK;
+
+ if (hw_info->hw_mode == MODE_BIT_64)
+ ul_cfg |= UL_CFG_BIT_MODE_64;
+ else if (hw_info->hw_mode == MODE_BIT_40)
+ ul_cfg |= UL_CFG_BIT_MODE_40;
+ else if (hw_info->hw_mode == MODE_BIT_36)
+ ul_cfg |= UL_CFG_BIT_MODE_36;
+
+ iowrite32(ul_cfg, hw_info->ap_pdn_base + REG_CLDMA_UL_CFG);
+ /* Disable TX and RX invalid address check */
+ iowrite32(UL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_UL_MEM);
+ iowrite32(DL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_DL_MEM);
+}
+
+void t7xx_cldma_hw_start_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno,
+ enum mtk_txrx tx_rx)
+{
+ void __iomem *reg;
+ u32 val;
+
+ reg = tx_rx == MTK_RX ? hw_info->ap_pdn_base + REG_CLDMA_DL_START_CMD :
+ hw_info->ap_pdn_base + REG_CLDMA_UL_START_CMD;
+ val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno);
+ iowrite32(val, reg);
+}
+
+void t7xx_cldma_hw_start(struct t7xx_cldma_hw *hw_info)
+{
+ /* Enable the TX & RX interrupts */
+ iowrite32(TXRX_STATUS_BITMASK, hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0);
+ iowrite32(TXRX_STATUS_BITMASK, hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0);
+ /* Enable the empty queue interrupt */
+ iowrite32(EMPTY_STATUS_BITMASK, hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0);
+ iowrite32(EMPTY_STATUS_BITMASK, hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0);
+}
+
+void t7xx_cldma_hw_reset(void __iomem *ao_base)
+{
+ u32 val;
+
+ val = ioread32(ao_base + REG_INFRA_RST2_SET);
+ val |= RST2_PMIC_SW_RST_SET;
+ iowrite32(val, ao_base + REG_INFRA_RST2_SET);
+ val = ioread32(ao_base + REG_INFRA_RST4_SET);
+ val |= RST4_CLDMA1_SW_RST_SET;
+ iowrite32(val, ao_base + REG_INFRA_RST4_SET);
+ udelay(1);
+
+ val = ioread32(ao_base + REG_INFRA_RST4_CLR);
+ val |= RST4_CLDMA1_SW_RST_CLR;
+ iowrite32(val, ao_base + REG_INFRA_RST4_CLR);
+ val = ioread32(ao_base + REG_INFRA_RST2_CLR);
+ val |= RST2_PMIC_SW_RST_CLR;
+ iowrite32(val, ao_base + REG_INFRA_RST2_CLR);
+}
+
+bool t7xx_cldma_tx_addr_is_set(struct t7xx_cldma_hw *hw_info, unsigned int qno)
+{
+ u32 offset = REG_CLDMA_UL_START_ADDRL_0 + qno * ADDR_SIZE;
+
+ return ioread64(hw_info->ap_pdn_base + offset);
+}
+
+void t7xx_cldma_hw_set_start_addr(struct t7xx_cldma_hw *hw_info, unsigned int qno, u64 address,
+ enum mtk_txrx tx_rx)
+{
+ u32 offset = qno * ADDR_SIZE;
+ void __iomem *reg;
+
+ reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_DL_START_ADDRL_0 :
+ hw_info->ap_pdn_base + REG_CLDMA_UL_START_ADDRL_0;
+ iowrite64(address, reg + offset);
+}
+
+void t7xx_cldma_hw_resume_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno,
+ enum mtk_txrx tx_rx)
+{
+ void __iomem *base = hw_info->ap_pdn_base;
+
+ if (tx_rx == MTK_RX)
+ iowrite32(BIT(qno), base + REG_CLDMA_DL_RESUME_CMD);
+ else
+ iowrite32(BIT(qno), base + REG_CLDMA_UL_RESUME_CMD);
+}
+
+unsigned int t7xx_cldma_hw_queue_status(struct t7xx_cldma_hw *hw_info, unsigned int qno,
+ enum mtk_txrx tx_rx)
+{
+ void __iomem *reg;
+ u32 mask, val;
+
+ mask = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno);
+ reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_DL_STATUS :
+ hw_info->ap_pdn_base + REG_CLDMA_UL_STATUS;
+ val = ioread32(reg);
+
+ return val & mask;
+}
+
+void t7xx_cldma_hw_tx_done(struct t7xx_cldma_hw *hw_info, unsigned int bitmask)
+{
+ unsigned int ch_id;
+
+ ch_id = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0);
+ ch_id &= bitmask;
+ /* Clear the ch IDs in the TX interrupt status register */
+ iowrite32(ch_id, hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0);
+ ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0);
+}
+
+void t7xx_cldma_hw_rx_done(struct t7xx_cldma_hw *hw_info, unsigned int bitmask)
+{
+ unsigned int ch_id;
+
+ ch_id = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0);
+ ch_id &= bitmask;
+ /* Clear the ch IDs in the RX interrupt status register */
+ iowrite32(ch_id, hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0);
+ ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0);
+}
+
+unsigned int t7xx_cldma_hw_int_status(struct t7xx_cldma_hw *hw_info, unsigned int bitmask,
+ enum mtk_txrx tx_rx)
+{
+ void __iomem *reg;
+ u32 val;
+
+ reg = tx_rx == MTK_RX ? hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0 :
+ hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0;
+ val = ioread32(reg);
+ return val & bitmask;
+}
+
+void t7xx_cldma_hw_irq_dis_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno,
+ enum mtk_txrx tx_rx)
+{
+ void __iomem *reg;
+ u32 val;
+
+ reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMSR0 :
+ hw_info->ap_pdn_base + REG_CLDMA_L2TIMSR0;
+ val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno);
+ iowrite32(val, reg);
+}
+
+void t7xx_cldma_hw_irq_dis_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno, enum mtk_txrx tx_rx)
+{
+ void __iomem *reg;
+ u32 val;
+
+ reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMSR0 :
+ hw_info->ap_pdn_base + REG_CLDMA_L2TIMSR0;
+ val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno);
+ iowrite32(val << EQ_STA_BIT_OFFSET, reg);
+}
+
+void t7xx_cldma_hw_irq_en_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno,
+ enum mtk_txrx tx_rx)
+{
+ void __iomem *reg;
+ u32 val;
+
+ reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0 :
+ hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0;
+ val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno);
+ iowrite32(val, reg);
+}
+
+void t7xx_cldma_hw_irq_en_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno, enum mtk_txrx tx_rx)
+{
+ void __iomem *reg;
+ u32 val;
+
+ reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0 :
+ hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0;
+ val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno);
+ iowrite32(val << EQ_STA_BIT_OFFSET, reg);
+}
+
+/**
+ * t7xx_cldma_hw_init() - Initialize CLDMA HW.
+ * @hw_info: Pointer to struct t7xx_cldma_hw.
+ *
+ * Write uplink and downlink configuration to CLDMA HW.
+ */
+void t7xx_cldma_hw_init(struct t7xx_cldma_hw *hw_info)
+{
+ u32 ul_cfg, dl_cfg;
+
+ ul_cfg = ioread32(hw_info->ap_pdn_base + REG_CLDMA_UL_CFG);
+ dl_cfg = ioread32(hw_info->ap_ao_base + REG_CLDMA_DL_CFG);
+ /* Configure the DRAM address mode */
+ ul_cfg &= ~UL_CFG_BIT_MODE_MASK;
+ dl_cfg &= ~DL_CFG_BIT_MODE_MASK;
+
+ if (hw_info->hw_mode == MODE_BIT_64) {
+ ul_cfg |= UL_CFG_BIT_MODE_64;
+ dl_cfg |= DL_CFG_BIT_MODE_64;
+ } else if (hw_info->hw_mode == MODE_BIT_40) {
+ ul_cfg |= UL_CFG_BIT_MODE_40;
+ dl_cfg |= DL_CFG_BIT_MODE_40;
+ } else if (hw_info->hw_mode == MODE_BIT_36) {
+ ul_cfg |= UL_CFG_BIT_MODE_36;
+ dl_cfg |= DL_CFG_BIT_MODE_36;
+ }
+
+ iowrite32(ul_cfg, hw_info->ap_pdn_base + REG_CLDMA_UL_CFG);
+ dl_cfg |= DL_CFG_UP_HW_LAST;
+ iowrite32(dl_cfg, hw_info->ap_ao_base + REG_CLDMA_DL_CFG);
+ iowrite32(0, hw_info->ap_ao_base + REG_CLDMA_INT_MASK);
+ iowrite32(BUSY_MASK_MD, hw_info->ap_ao_base + REG_CLDMA_BUSY_MASK);
+ iowrite32(UL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_UL_MEM);
+ iowrite32(DL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_DL_MEM);
+}
+
+void t7xx_cldma_hw_stop_all_qs(struct t7xx_cldma_hw *hw_info, enum mtk_txrx tx_rx)
+{
+ void __iomem *reg;
+
+ reg = tx_rx == MTK_RX ? hw_info->ap_pdn_base + REG_CLDMA_DL_STOP_CMD :
+ hw_info->ap_pdn_base + REG_CLDMA_UL_STOP_CMD;
+ iowrite32(CLDMA_ALL_Q, reg);
+}
+
+void t7xx_cldma_hw_stop(struct t7xx_cldma_hw *hw_info, enum mtk_txrx tx_rx)
+{
+ void __iomem *reg;
+
+ reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMSR0 :
+ hw_info->ap_pdn_base + REG_CLDMA_L2TIMSR0;
+ iowrite32(TXRX_STATUS_BITMASK, reg);
+ iowrite32(EMPTY_STATUS_BITMASK, reg);
+}
diff --git a/drivers/net/wwan/t7xx/t7xx_cldma.h b/drivers/net/wwan/t7xx/t7xx_cldma.h
new file mode 100644
index 0000000000..8949e8377f
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_cldma.h
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ *
+ * Contributors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ */
+
+#ifndef __T7XX_CLDMA_H__
+#define __T7XX_CLDMA_H__
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+#define CLDMA_TXQ_NUM 8
+#define CLDMA_RXQ_NUM 8
+#define CLDMA_ALL_Q GENMASK(7, 0)
+
+/* Interrupt status bits */
+#define EMPTY_STATUS_BITMASK GENMASK(15, 8)
+#define TXRX_STATUS_BITMASK GENMASK(7, 0)
+#define EQ_STA_BIT_OFFSET 8
+#define L2_INT_BIT_COUNT 16
+#define EQ_STA_BIT(index) (BIT((index) + EQ_STA_BIT_OFFSET) & EMPTY_STATUS_BITMASK)
+
+#define TQ_ERR_INT_BITMASK GENMASK(23, 16)
+#define TQ_ACTIVE_START_ERR_INT_BITMASK GENMASK(31, 24)
+
+#define RQ_ERR_INT_BITMASK GENMASK(23, 16)
+#define RQ_ACTIVE_START_ERR_INT_BITMASK GENMASK(31, 24)
+
+#define CLDMA0_AO_BASE 0x10049000
+#define CLDMA0_PD_BASE 0x1021d000
+#define CLDMA1_AO_BASE 0x1004b000
+#define CLDMA1_PD_BASE 0x1021f000
+
+#define CLDMA_R_AO_BASE 0x10023000
+#define CLDMA_R_PD_BASE 0x1023d000
+
+/* CLDMA TX */
+#define REG_CLDMA_UL_START_ADDRL_0 0x0004
+#define REG_CLDMA_UL_START_ADDRH_0 0x0008
+#define REG_CLDMA_UL_CURRENT_ADDRL_0 0x0044
+#define REG_CLDMA_UL_CURRENT_ADDRH_0 0x0048
+#define REG_CLDMA_UL_STATUS 0x0084
+#define REG_CLDMA_UL_START_CMD 0x0088
+#define REG_CLDMA_UL_RESUME_CMD 0x008c
+#define REG_CLDMA_UL_STOP_CMD 0x0090
+#define REG_CLDMA_UL_ERROR 0x0094
+#define REG_CLDMA_UL_CFG 0x0098
+#define UL_CFG_BIT_MODE_36 BIT(5)
+#define UL_CFG_BIT_MODE_40 BIT(6)
+#define UL_CFG_BIT_MODE_64 BIT(7)
+#define UL_CFG_BIT_MODE_MASK GENMASK(7, 5)
+
+#define REG_CLDMA_UL_MEM 0x009c
+#define UL_MEM_CHECK_DIS BIT(0)
+
+/* CLDMA RX */
+#define REG_CLDMA_DL_START_CMD 0x05bc
+#define REG_CLDMA_DL_RESUME_CMD 0x05c0
+#define REG_CLDMA_DL_STOP_CMD 0x05c4
+#define REG_CLDMA_DL_MEM 0x0508
+#define DL_MEM_CHECK_DIS BIT(0)
+
+#define REG_CLDMA_DL_CFG 0x0404
+#define DL_CFG_UP_HW_LAST BIT(2)
+#define DL_CFG_BIT_MODE_36 BIT(10)
+#define DL_CFG_BIT_MODE_40 BIT(11)
+#define DL_CFG_BIT_MODE_64 BIT(12)
+#define DL_CFG_BIT_MODE_MASK GENMASK(12, 10)
+
+#define REG_CLDMA_DL_START_ADDRL_0 0x0478
+#define REG_CLDMA_DL_START_ADDRH_0 0x047c
+#define REG_CLDMA_DL_CURRENT_ADDRL_0 0x04b8
+#define REG_CLDMA_DL_CURRENT_ADDRH_0 0x04bc
+#define REG_CLDMA_DL_STATUS 0x04f8
+
+/* CLDMA MISC */
+#define REG_CLDMA_L2TISAR0 0x0810
+#define REG_CLDMA_L2TISAR1 0x0814
+#define REG_CLDMA_L2TIMR0 0x0818
+#define REG_CLDMA_L2TIMR1 0x081c
+#define REG_CLDMA_L2TIMCR0 0x0820
+#define REG_CLDMA_L2TIMCR1 0x0824
+#define REG_CLDMA_L2TIMSR0 0x0828
+#define REG_CLDMA_L2TIMSR1 0x082c
+#define REG_CLDMA_L3TISAR0 0x0830
+#define REG_CLDMA_L3TISAR1 0x0834
+#define REG_CLDMA_L2RISAR0 0x0850
+#define REG_CLDMA_L2RISAR1 0x0854
+#define REG_CLDMA_L3RISAR0 0x0870
+#define REG_CLDMA_L3RISAR1 0x0874
+#define REG_CLDMA_IP_BUSY 0x08b4
+#define IP_BUSY_WAKEUP BIT(0)
+#define CLDMA_L2TISAR0_ALL_INT_MASK GENMASK(15, 0)
+#define CLDMA_L2RISAR0_ALL_INT_MASK GENMASK(15, 0)
+
+/* CLDMA MISC */
+#define REG_CLDMA_L2RIMR0 0x0858
+#define REG_CLDMA_L2RIMR1 0x085c
+#define REG_CLDMA_L2RIMCR0 0x0860
+#define REG_CLDMA_L2RIMCR1 0x0864
+#define REG_CLDMA_L2RIMSR0 0x0868
+#define REG_CLDMA_L2RIMSR1 0x086c
+#define REG_CLDMA_BUSY_MASK 0x0954
+#define BUSY_MASK_PCIE BIT(0)
+#define BUSY_MASK_AP BIT(1)
+#define BUSY_MASK_MD BIT(2)
+
+#define REG_CLDMA_INT_MASK 0x0960
+
+/* CLDMA RESET */
+#define REG_INFRA_RST4_SET 0x0730
+#define RST4_CLDMA1_SW_RST_SET BIT(20)
+
+#define REG_INFRA_RST4_CLR 0x0734
+#define RST4_CLDMA1_SW_RST_CLR BIT(20)
+
+#define REG_INFRA_RST2_SET 0x0140
+#define RST2_PMIC_SW_RST_SET BIT(18)
+
+#define REG_INFRA_RST2_CLR 0x0144
+#define RST2_PMIC_SW_RST_CLR BIT(18)
+
+enum mtk_txrx {
+ MTK_TX,
+ MTK_RX,
+};
+
+enum t7xx_hw_mode {
+ MODE_BIT_32,
+ MODE_BIT_36,
+ MODE_BIT_40,
+ MODE_BIT_64,
+};
+
+struct t7xx_cldma_hw {
+ enum t7xx_hw_mode hw_mode;
+ void __iomem *ap_ao_base;
+ void __iomem *ap_pdn_base;
+ u32 phy_interrupt_id;
+};
+
+void t7xx_cldma_hw_irq_dis_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno,
+ enum mtk_txrx tx_rx);
+void t7xx_cldma_hw_irq_dis_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno,
+ enum mtk_txrx tx_rx);
+void t7xx_cldma_hw_irq_en_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno,
+ enum mtk_txrx tx_rx);
+void t7xx_cldma_hw_irq_en_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno, enum mtk_txrx tx_rx);
+unsigned int t7xx_cldma_hw_queue_status(struct t7xx_cldma_hw *hw_info, unsigned int qno,
+ enum mtk_txrx tx_rx);
+void t7xx_cldma_hw_init(struct t7xx_cldma_hw *hw_info);
+void t7xx_cldma_hw_resume_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno,
+ enum mtk_txrx tx_rx);
+void t7xx_cldma_hw_start(struct t7xx_cldma_hw *hw_info);
+void t7xx_cldma_hw_start_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno,
+ enum mtk_txrx tx_rx);
+void t7xx_cldma_hw_tx_done(struct t7xx_cldma_hw *hw_info, unsigned int bitmask);
+void t7xx_cldma_hw_rx_done(struct t7xx_cldma_hw *hw_info, unsigned int bitmask);
+void t7xx_cldma_hw_stop_all_qs(struct t7xx_cldma_hw *hw_info, enum mtk_txrx tx_rx);
+void t7xx_cldma_hw_set_start_addr(struct t7xx_cldma_hw *hw_info,
+ unsigned int qno, u64 address, enum mtk_txrx tx_rx);
+void t7xx_cldma_hw_reset(void __iomem *ao_base);
+void t7xx_cldma_hw_stop(struct t7xx_cldma_hw *hw_info, enum mtk_txrx tx_rx);
+unsigned int t7xx_cldma_hw_int_status(struct t7xx_cldma_hw *hw_info, unsigned int bitmask,
+ enum mtk_txrx tx_rx);
+void t7xx_cldma_hw_restore(struct t7xx_cldma_hw *hw_info);
+void t7xx_cldma_clear_ip_busy(struct t7xx_cldma_hw *hw_info);
+bool t7xx_cldma_tx_addr_is_set(struct t7xx_cldma_hw *hw_info, unsigned int qno);
+#endif
diff --git a/drivers/net/wwan/t7xx/t7xx_dpmaif.c b/drivers/net/wwan/t7xx/t7xx_dpmaif.c
new file mode 100644
index 0000000000..6d3edadecb
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_dpmaif.c
@@ -0,0 +1,1281 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ *
+ * Contributors:
+ * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/dev_printk.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/types.h>
+
+#include "t7xx_dpmaif.h"
+#include "t7xx_reg.h"
+
+#define ioread32_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout_atomic(ioread32, addr, val, cond, delay_us, timeout_us)
+
+static int t7xx_dpmaif_init_intr(struct dpmaif_hw_info *hw_info)
+{
+ struct dpmaif_isr_en_mask *isr_en_msk = &hw_info->isr_en_mask;
+ u32 value, ul_intr_enable, dl_intr_enable;
+ int ret;
+
+ ul_intr_enable = DP_UL_INT_ERR_MSK | DP_UL_INT_QDONE_MSK;
+ isr_en_msk->ap_ul_l2intr_en_msk = ul_intr_enable;
+ iowrite32(DPMAIF_AP_ALL_L2TISAR0_MASK, hw_info->pcie_base + DPMAIF_AP_L2TISAR0);
+
+ /* Set interrupt enable mask */
+ iowrite32(ul_intr_enable, hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMCR0);
+ iowrite32(~ul_intr_enable, hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMSR0);
+
+ /* Check mask status */
+ ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMR0,
+ value, (value & ul_intr_enable) != ul_intr_enable, 0,
+ DPMAIF_CHECK_INIT_TIMEOUT_US);
+ if (ret)
+ return ret;
+
+ dl_intr_enable = DP_DL_INT_PITCNT_LEN_ERR | DP_DL_INT_BATCNT_LEN_ERR;
+ isr_en_msk->ap_dl_l2intr_err_en_msk = dl_intr_enable;
+ ul_intr_enable = DPMAIF_DL_INT_DLQ0_QDONE | DPMAIF_DL_INT_DLQ0_PITCNT_LEN |
+ DPMAIF_DL_INT_DLQ1_QDONE | DPMAIF_DL_INT_DLQ1_PITCNT_LEN;
+ isr_en_msk->ap_ul_l2intr_en_msk = ul_intr_enable;
+ iowrite32(DPMAIF_AP_APDL_ALL_L2TISAR0_MASK, hw_info->pcie_base + DPMAIF_AP_APDL_L2TISAR0);
+
+ /* Set DL ISR PD enable mask */
+ iowrite32(~ul_intr_enable, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0);
+ ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMR0,
+ value, (value & ul_intr_enable) != ul_intr_enable, 0,
+ DPMAIF_CHECK_INIT_TIMEOUT_US);
+ if (ret)
+ return ret;
+
+ isr_en_msk->ap_udl_ip_busy_en_msk = DPMAIF_UDL_IP_BUSY;
+ iowrite32(DPMAIF_AP_IP_BUSY_MASK, hw_info->pcie_base + DPMAIF_AP_IP_BUSY);
+ iowrite32(isr_en_msk->ap_udl_ip_busy_en_msk,
+ hw_info->pcie_base + DPMAIF_AO_AP_DLUL_IP_BUSY_MASK);
+ value = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_AP_L1TIMR0);
+ value |= DPMAIF_DL_INT_Q2APTOP | DPMAIF_DL_INT_Q2TOQ1;
+ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_UL_AP_L1TIMR0);
+ iowrite32(DPMA_HPC_ALL_INT_MASK, hw_info->pcie_base + DPMAIF_HPC_INTR_MASK);
+
+ return 0;
+}
+
+static void t7xx_dpmaif_mask_ulq_intr(struct dpmaif_hw_info *hw_info, unsigned int q_num)
+{
+ struct dpmaif_isr_en_mask *isr_en_msk;
+ u32 value, ul_int_que_done;
+ int ret;
+
+ isr_en_msk = &hw_info->isr_en_mask;
+ ul_int_que_done = BIT(q_num + DP_UL_INT_DONE_OFFSET) & DP_UL_INT_QDONE_MSK;
+ isr_en_msk->ap_ul_l2intr_en_msk &= ~ul_int_que_done;
+ iowrite32(ul_int_que_done, hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMSR0);
+
+ ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMR0,
+ value, (value & ul_int_que_done) == ul_int_que_done, 0,
+ DPMAIF_CHECK_TIMEOUT_US);
+ if (ret)
+ dev_err(hw_info->dev,
+ "Could not mask the UL interrupt. DPMAIF_AO_UL_AP_L2TIMR0 is 0x%x\n",
+ value);
+}
+
+void t7xx_dpmaif_unmask_ulq_intr(struct dpmaif_hw_info *hw_info, unsigned int q_num)
+{
+ struct dpmaif_isr_en_mask *isr_en_msk;
+ u32 value, ul_int_que_done;
+ int ret;
+
+ isr_en_msk = &hw_info->isr_en_mask;
+ ul_int_que_done = BIT(q_num + DP_UL_INT_DONE_OFFSET) & DP_UL_INT_QDONE_MSK;
+ isr_en_msk->ap_ul_l2intr_en_msk |= ul_int_que_done;
+ iowrite32(ul_int_que_done, hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMCR0);
+
+ ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMR0,
+ value, (value & ul_int_que_done) != ul_int_que_done, 0,
+ DPMAIF_CHECK_TIMEOUT_US);
+ if (ret)
+ dev_err(hw_info->dev,
+ "Could not unmask the UL interrupt. DPMAIF_AO_UL_AP_L2TIMR0 is 0x%x\n",
+ value);
+}
+
+void t7xx_dpmaif_dl_unmask_batcnt_len_err_intr(struct dpmaif_hw_info *hw_info)
+{
+ hw_info->isr_en_mask.ap_dl_l2intr_en_msk |= DP_DL_INT_BATCNT_LEN_ERR;
+ iowrite32(DP_DL_INT_BATCNT_LEN_ERR, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMCR0);
+}
+
+void t7xx_dpmaif_dl_unmask_pitcnt_len_err_intr(struct dpmaif_hw_info *hw_info)
+{
+ hw_info->isr_en_mask.ap_dl_l2intr_en_msk |= DP_DL_INT_PITCNT_LEN_ERR;
+ iowrite32(DP_DL_INT_PITCNT_LEN_ERR, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMCR0);
+}
+
+static u32 t7xx_update_dlq_intr(struct dpmaif_hw_info *hw_info, u32 q_done)
+{
+ u32 value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMR0);
+ iowrite32(q_done, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0);
+ return value;
+}
+
+static int t7xx_mask_dlq_intr(struct dpmaif_hw_info *hw_info, unsigned int qno)
+{
+ u32 value, q_done;
+ int ret;
+
+ q_done = qno == DPF_RX_QNO0 ? DPMAIF_DL_INT_DLQ0_QDONE : DPMAIF_DL_INT_DLQ1_QDONE;
+ iowrite32(q_done, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0);
+
+ ret = read_poll_timeout_atomic(t7xx_update_dlq_intr, value, value & q_done,
+ 0, DPMAIF_CHECK_TIMEOUT_US, false, hw_info, q_done);
+ if (ret) {
+ dev_err(hw_info->dev,
+ "Could not mask the DL interrupt. DPMAIF_AO_UL_AP_L2TIMR0 is 0x%x\n",
+ value);
+ return -ETIMEDOUT;
+ }
+
+ hw_info->isr_en_mask.ap_dl_l2intr_en_msk &= ~q_done;
+ return 0;
+}
+
+void t7xx_dpmaif_dlq_unmask_rx_done(struct dpmaif_hw_info *hw_info, unsigned int qno)
+{
+ u32 mask;
+
+ mask = qno == DPF_RX_QNO0 ? DPMAIF_DL_INT_DLQ0_QDONE : DPMAIF_DL_INT_DLQ1_QDONE;
+ iowrite32(mask, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMCR0);
+ hw_info->isr_en_mask.ap_dl_l2intr_en_msk |= mask;
+}
+
+void t7xx_dpmaif_clr_ip_busy_sts(struct dpmaif_hw_info *hw_info)
+{
+ u32 ip_busy_sts;
+
+ ip_busy_sts = ioread32(hw_info->pcie_base + DPMAIF_AP_IP_BUSY);
+ iowrite32(ip_busy_sts, hw_info->pcie_base + DPMAIF_AP_IP_BUSY);
+}
+
+static void t7xx_dpmaif_dlq_mask_rx_pitcnt_len_err_intr(struct dpmaif_hw_info *hw_info,
+ unsigned int qno)
+{
+ if (qno == DPF_RX_QNO0)
+ iowrite32(DPMAIF_DL_INT_DLQ0_PITCNT_LEN,
+ hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0);
+ else
+ iowrite32(DPMAIF_DL_INT_DLQ1_PITCNT_LEN,
+ hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0);
+}
+
+void t7xx_dpmaif_dlq_unmask_pitcnt_len_err_intr(struct dpmaif_hw_info *hw_info,
+ unsigned int qno)
+{
+ if (qno == DPF_RX_QNO0)
+ iowrite32(DPMAIF_DL_INT_DLQ0_PITCNT_LEN,
+ hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMCR0);
+ else
+ iowrite32(DPMAIF_DL_INT_DLQ1_PITCNT_LEN,
+ hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMCR0);
+}
+
+void t7xx_dpmaif_ul_clr_all_intr(struct dpmaif_hw_info *hw_info)
+{
+ iowrite32(DPMAIF_AP_ALL_L2TISAR0_MASK, hw_info->pcie_base + DPMAIF_AP_L2TISAR0);
+}
+
+void t7xx_dpmaif_dl_clr_all_intr(struct dpmaif_hw_info *hw_info)
+{
+ iowrite32(DPMAIF_AP_APDL_ALL_L2TISAR0_MASK, hw_info->pcie_base + DPMAIF_AP_APDL_L2TISAR0);
+}
+
+static void t7xx_dpmaif_set_intr_para(struct dpmaif_hw_intr_st_para *para,
+ enum dpmaif_hw_intr_type intr_type, unsigned int intr_queue)
+{
+ para->intr_types[para->intr_cnt] = intr_type;
+ para->intr_queues[para->intr_cnt] = intr_queue;
+ para->intr_cnt++;
+}
+
+/* The para->intr_cnt counter is set to zero before this function is called.
+ * It does not check for overflow as there is no risk of overflowing intr_types or intr_queues.
+ */
+static void t7xx_dpmaif_hw_check_tx_intr(struct dpmaif_hw_info *hw_info,
+ unsigned int intr_status,
+ struct dpmaif_hw_intr_st_para *para)
+{
+ unsigned long value;
+
+ value = FIELD_GET(DP_UL_INT_QDONE_MSK, intr_status);
+ if (value) {
+ unsigned int index;
+
+ t7xx_dpmaif_set_intr_para(para, DPF_INTR_UL_DONE, value);
+
+ for_each_set_bit(index, &value, DPMAIF_TXQ_NUM)
+ t7xx_dpmaif_mask_ulq_intr(hw_info, index);
+ }
+
+ value = FIELD_GET(DP_UL_INT_EMPTY_MSK, intr_status);
+ if (value)
+ t7xx_dpmaif_set_intr_para(para, DPF_INTR_UL_DRB_EMPTY, value);
+
+ value = FIELD_GET(DP_UL_INT_MD_NOTREADY_MSK, intr_status);
+ if (value)
+ t7xx_dpmaif_set_intr_para(para, DPF_INTR_UL_MD_NOTREADY, value);
+
+ value = FIELD_GET(DP_UL_INT_MD_PWR_NOTREADY_MSK, intr_status);
+ if (value)
+ t7xx_dpmaif_set_intr_para(para, DPF_INTR_UL_MD_PWR_NOTREADY, value);
+
+ value = FIELD_GET(DP_UL_INT_ERR_MSK, intr_status);
+ if (value)
+ t7xx_dpmaif_set_intr_para(para, DPF_INTR_UL_LEN_ERR, value);
+
+ /* Clear interrupt status */
+ iowrite32(intr_status, hw_info->pcie_base + DPMAIF_AP_L2TISAR0);
+}
+
+/* The para->intr_cnt counter is set to zero before this function is called.
+ * It does not check for overflow as there is no risk of overflowing intr_types or intr_queues.
+ */
+static void t7xx_dpmaif_hw_check_rx_intr(struct dpmaif_hw_info *hw_info,
+ unsigned int intr_status,
+ struct dpmaif_hw_intr_st_para *para, int qno)
+{
+ if (qno == DPF_RX_QNO_DFT) {
+ if (intr_status & DP_DL_INT_SKB_LEN_ERR)
+ t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_SKB_LEN_ERR, DPF_RX_QNO_DFT);
+
+ if (intr_status & DP_DL_INT_BATCNT_LEN_ERR) {
+ t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_BATCNT_LEN_ERR, DPF_RX_QNO_DFT);
+ hw_info->isr_en_mask.ap_dl_l2intr_en_msk &= ~DP_DL_INT_BATCNT_LEN_ERR;
+ iowrite32(DP_DL_INT_BATCNT_LEN_ERR,
+ hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0);
+ }
+
+ if (intr_status & DP_DL_INT_PITCNT_LEN_ERR) {
+ t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_PITCNT_LEN_ERR, DPF_RX_QNO_DFT);
+ hw_info->isr_en_mask.ap_dl_l2intr_en_msk &= ~DP_DL_INT_PITCNT_LEN_ERR;
+ iowrite32(DP_DL_INT_PITCNT_LEN_ERR,
+ hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0);
+ }
+
+ if (intr_status & DP_DL_INT_PKT_EMPTY_MSK)
+ t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_PKT_EMPTY_SET, DPF_RX_QNO_DFT);
+
+ if (intr_status & DP_DL_INT_FRG_EMPTY_MSK)
+ t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_FRG_EMPTY_SET, DPF_RX_QNO_DFT);
+
+ if (intr_status & DP_DL_INT_MTU_ERR_MSK)
+ t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_MTU_ERR, DPF_RX_QNO_DFT);
+
+ if (intr_status & DP_DL_INT_FRG_LEN_ERR_MSK)
+ t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_FRGCNT_LEN_ERR, DPF_RX_QNO_DFT);
+
+ if (intr_status & DP_DL_INT_Q0_PITCNT_LEN_ERR) {
+ t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_Q0_PITCNT_LEN_ERR, BIT(qno));
+ t7xx_dpmaif_dlq_mask_rx_pitcnt_len_err_intr(hw_info, qno);
+ }
+
+ if (intr_status & DP_DL_INT_HPC_ENT_TYPE_ERR)
+ t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_HPC_ENT_TYPE_ERR,
+ DPF_RX_QNO_DFT);
+
+ if (intr_status & DP_DL_INT_Q0_DONE) {
+ /* Mask RX done interrupt immediately after it occurs, do not clear
+ * the interrupt if the mask operation fails.
+ */
+ if (!t7xx_mask_dlq_intr(hw_info, qno))
+ t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_Q0_DONE, BIT(qno));
+ else
+ intr_status &= ~DP_DL_INT_Q0_DONE;
+ }
+ } else {
+ if (intr_status & DP_DL_INT_Q1_PITCNT_LEN_ERR) {
+ t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_Q1_PITCNT_LEN_ERR, BIT(qno));
+ t7xx_dpmaif_dlq_mask_rx_pitcnt_len_err_intr(hw_info, qno);
+ }
+
+ if (intr_status & DP_DL_INT_Q1_DONE) {
+ if (!t7xx_mask_dlq_intr(hw_info, qno))
+ t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_Q1_DONE, BIT(qno));
+ else
+ intr_status &= ~DP_DL_INT_Q1_DONE;
+ }
+ }
+
+ intr_status |= DP_DL_INT_BATCNT_LEN_ERR;
+ /* Clear interrupt status */
+ iowrite32(intr_status, hw_info->pcie_base + DPMAIF_AP_APDL_L2TISAR0);
+}
+
+/**
+ * t7xx_dpmaif_hw_get_intr_cnt() - Reads interrupt status and count from HW.
+ * @hw_info: Pointer to struct hw_info.
+ * @para: Pointer to struct dpmaif_hw_intr_st_para.
+ * @qno: Queue number.
+ *
+ * Reads RX/TX interrupt status from HW and clears UL/DL status as needed.
+ *
+ * Return: Interrupt count.
+ */
+int t7xx_dpmaif_hw_get_intr_cnt(struct dpmaif_hw_info *hw_info,
+ struct dpmaif_hw_intr_st_para *para, int qno)
+{
+ u32 rx_intr_status, tx_intr_status = 0;
+ u32 rx_intr_qdone, tx_intr_qdone = 0;
+
+ rx_intr_status = ioread32(hw_info->pcie_base + DPMAIF_AP_APDL_L2TISAR0);
+ rx_intr_qdone = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMR0);
+
+ /* TX interrupt status */
+ if (qno == DPF_RX_QNO_DFT) {
+ /* All ULQ and DLQ0 interrupts use the same source no need to check ULQ interrupts
+ * when a DLQ1 interrupt has occurred.
+ */
+ tx_intr_status = ioread32(hw_info->pcie_base + DPMAIF_AP_L2TISAR0);
+ tx_intr_qdone = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMR0);
+ }
+
+ t7xx_dpmaif_clr_ip_busy_sts(hw_info);
+
+ if (qno == DPF_RX_QNO_DFT) {
+ /* Do not schedule bottom half again or clear UL interrupt status when we
+ * have already masked it.
+ */
+ tx_intr_status &= ~tx_intr_qdone;
+ if (tx_intr_status)
+ t7xx_dpmaif_hw_check_tx_intr(hw_info, tx_intr_status, para);
+ }
+
+ if (rx_intr_status) {
+ if (qno == DPF_RX_QNO0) {
+ rx_intr_status &= DP_DL_Q0_STATUS_MASK;
+ if (rx_intr_qdone & DPMAIF_DL_INT_DLQ0_QDONE)
+ /* Do not schedule bottom half again or clear DL
+ * queue done interrupt status when we have already masked it.
+ */
+ rx_intr_status &= ~DP_DL_INT_Q0_DONE;
+ } else {
+ rx_intr_status &= DP_DL_Q1_STATUS_MASK;
+ if (rx_intr_qdone & DPMAIF_DL_INT_DLQ1_QDONE)
+ rx_intr_status &= ~DP_DL_INT_Q1_DONE;
+ }
+
+ if (rx_intr_status)
+ t7xx_dpmaif_hw_check_rx_intr(hw_info, rx_intr_status, para, qno);
+ }
+
+ return para->intr_cnt;
+}
+
+static int t7xx_dpmaif_sram_init(struct dpmaif_hw_info *hw_info)
+{
+ u32 value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_AP_MEM_CLR);
+ value |= DPMAIF_MEM_CLR;
+ iowrite32(value, hw_info->pcie_base + DPMAIF_AP_MEM_CLR);
+
+ return ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AP_MEM_CLR,
+ value, !(value & DPMAIF_MEM_CLR), 0,
+ DPMAIF_CHECK_INIT_TIMEOUT_US);
+}
+
+static void t7xx_dpmaif_hw_reset(struct dpmaif_hw_info *hw_info)
+{
+ iowrite32(DPMAIF_AP_AO_RST_BIT, hw_info->pcie_base + DPMAIF_AP_AO_RGU_ASSERT);
+ udelay(2);
+ iowrite32(DPMAIF_AP_RST_BIT, hw_info->pcie_base + DPMAIF_AP_RGU_ASSERT);
+ udelay(2);
+ iowrite32(DPMAIF_AP_AO_RST_BIT, hw_info->pcie_base + DPMAIF_AP_AO_RGU_DEASSERT);
+ udelay(2);
+ iowrite32(DPMAIF_AP_RST_BIT, hw_info->pcie_base + DPMAIF_AP_RGU_DEASSERT);
+ udelay(2);
+}
+
+static int t7xx_dpmaif_hw_config(struct dpmaif_hw_info *hw_info)
+{
+ u32 ap_port_mode;
+ int ret;
+
+ t7xx_dpmaif_hw_reset(hw_info);
+
+ ret = t7xx_dpmaif_sram_init(hw_info);
+ if (ret)
+ return ret;
+
+ ap_port_mode = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES);
+ ap_port_mode |= DPMAIF_PORT_MODE_PCIE;
+ iowrite32(ap_port_mode, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES);
+ iowrite32(DPMAIF_CG_EN, hw_info->pcie_base + DPMAIF_AP_CG_EN);
+ return 0;
+}
+
+static void t7xx_dpmaif_pcie_dpmaif_sign(struct dpmaif_hw_info *hw_info)
+{
+ iowrite32(DPMAIF_PCIE_MODE_SET_VALUE, hw_info->pcie_base + DPMAIF_UL_RESERVE_AO_RW);
+}
+
+static void t7xx_dpmaif_dl_performance(struct dpmaif_hw_info *hw_info)
+{
+ u32 enable_bat_cache, enable_pit_burst;
+
+ enable_bat_cache = ioread32(hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1);
+ enable_bat_cache |= DPMAIF_DL_BAT_CACHE_PRI;
+ iowrite32(enable_bat_cache, hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1);
+
+ enable_pit_burst = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES);
+ enable_pit_burst |= DPMAIF_DL_BURST_PIT_EN;
+ iowrite32(enable_pit_burst, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES);
+}
+
+ /* DPMAIF DL DLQ part HW setting */
+
+static void t7xx_dpmaif_hw_hpc_cntl_set(struct dpmaif_hw_info *hw_info)
+{
+ unsigned int value;
+
+ value = DPMAIF_HPC_DLQ_PATH_MODE | DPMAIF_HPC_ADD_MODE_DF << 2;
+ value |= DPMAIF_HASH_PRIME_DF << 4;
+ value |= DPMAIF_HPC_TOTAL_NUM << 8;
+ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_HPC_CNTL);
+}
+
+static void t7xx_dpmaif_hw_agg_cfg_set(struct dpmaif_hw_info *hw_info)
+{
+ unsigned int value;
+
+ value = DPMAIF_AGG_MAX_LEN_DF | DPMAIF_AGG_TBL_ENT_NUM_DF << 16;
+ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_DLQ_AGG_CFG);
+}
+
+static void t7xx_dpmaif_hw_hash_bit_choose_set(struct dpmaif_hw_info *hw_info)
+{
+ iowrite32(DPMAIF_DLQ_HASH_BIT_CHOOSE_DF,
+ hw_info->pcie_base + DPMAIF_AO_DL_DLQPIT_INIT_CON5);
+}
+
+static void t7xx_dpmaif_hw_mid_pit_timeout_thres_set(struct dpmaif_hw_info *hw_info)
+{
+ iowrite32(DPMAIF_MID_TIMEOUT_THRES_DF, hw_info->pcie_base + DPMAIF_AO_DL_DLQPIT_TIMEOUT0);
+}
+
+static void t7xx_dpmaif_hw_dlq_timeout_thres_set(struct dpmaif_hw_info *hw_info)
+{
+ unsigned int value, i;
+
+ /* Each register holds two DLQ threshold timeout values */
+ for (i = 0; i < DPMAIF_HPC_MAX_TOTAL_NUM / 2; i++) {
+ value = FIELD_PREP(DPMAIF_DLQ_LOW_TIMEOUT_THRES_MKS, DPMAIF_DLQ_TIMEOUT_THRES_DF);
+ value |= FIELD_PREP(DPMAIF_DLQ_HIGH_TIMEOUT_THRES_MSK,
+ DPMAIF_DLQ_TIMEOUT_THRES_DF);
+ iowrite32(value,
+ hw_info->pcie_base + DPMAIF_AO_DL_DLQPIT_TIMEOUT1 + sizeof(u32) * i);
+ }
+}
+
+static void t7xx_dpmaif_hw_dlq_start_prs_thres_set(struct dpmaif_hw_info *hw_info)
+{
+ iowrite32(DPMAIF_DLQ_PRS_THRES_DF, hw_info->pcie_base + DPMAIF_AO_DL_DLQPIT_TRIG_THRES);
+}
+
+static void t7xx_dpmaif_dl_dlq_hpc_hw_init(struct dpmaif_hw_info *hw_info)
+{
+ t7xx_dpmaif_hw_hpc_cntl_set(hw_info);
+ t7xx_dpmaif_hw_agg_cfg_set(hw_info);
+ t7xx_dpmaif_hw_hash_bit_choose_set(hw_info);
+ t7xx_dpmaif_hw_mid_pit_timeout_thres_set(hw_info);
+ t7xx_dpmaif_hw_dlq_timeout_thres_set(hw_info);
+ t7xx_dpmaif_hw_dlq_start_prs_thres_set(hw_info);
+}
+
+static int t7xx_dpmaif_dl_bat_init_done(struct dpmaif_hw_info *hw_info, bool frg_en)
+{
+ u32 value, dl_bat_init = 0;
+ int ret;
+
+ if (frg_en)
+ dl_bat_init = DPMAIF_DL_BAT_FRG_INIT;
+
+ dl_bat_init |= DPMAIF_DL_BAT_INIT_ALLSET;
+ dl_bat_init |= DPMAIF_DL_BAT_INIT_EN;
+
+ ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_BAT_INIT,
+ value, !(value & DPMAIF_DL_BAT_INIT_NOT_READY), 0,
+ DPMAIF_CHECK_INIT_TIMEOUT_US);
+ if (ret) {
+ dev_err(hw_info->dev, "Data plane modem DL BAT is not ready\n");
+ return ret;
+ }
+
+ iowrite32(dl_bat_init, hw_info->pcie_base + DPMAIF_DL_BAT_INIT);
+
+ ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_BAT_INIT,
+ value, !(value & DPMAIF_DL_BAT_INIT_NOT_READY), 0,
+ DPMAIF_CHECK_INIT_TIMEOUT_US);
+ if (ret)
+ dev_err(hw_info->dev, "Data plane modem DL BAT initialization failed\n");
+
+ return ret;
+}
+
+static void t7xx_dpmaif_dl_set_bat_base_addr(struct dpmaif_hw_info *hw_info,
+ dma_addr_t addr)
+{
+ iowrite32(lower_32_bits(addr), hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON0);
+ iowrite32(upper_32_bits(addr), hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON3);
+}
+
+static void t7xx_dpmaif_dl_set_bat_size(struct dpmaif_hw_info *hw_info, unsigned int size)
+{
+ unsigned int value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1);
+ value &= ~DPMAIF_BAT_SIZE_MSK;
+ value |= size & DPMAIF_BAT_SIZE_MSK;
+ iowrite32(value, hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1);
+}
+
+static void t7xx_dpmaif_dl_bat_en(struct dpmaif_hw_info *hw_info, bool enable)
+{
+ unsigned int value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1);
+
+ if (enable)
+ value |= DPMAIF_BAT_EN_MSK;
+ else
+ value &= ~DPMAIF_BAT_EN_MSK;
+
+ iowrite32(value, hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1);
+}
+
+static void t7xx_dpmaif_dl_set_ao_bid_maxcnt(struct dpmaif_hw_info *hw_info)
+{
+ unsigned int value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON0);
+ value &= ~DPMAIF_BAT_BID_MAXCNT_MSK;
+ value |= FIELD_PREP(DPMAIF_BAT_BID_MAXCNT_MSK, DPMAIF_HW_PKT_BIDCNT);
+ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON0);
+}
+
+static void t7xx_dpmaif_dl_set_ao_mtu(struct dpmaif_hw_info *hw_info)
+{
+ iowrite32(DPMAIF_HW_MTU_SIZE, hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON1);
+}
+
+static void t7xx_dpmaif_dl_set_ao_pit_chknum(struct dpmaif_hw_info *hw_info)
+{
+ unsigned int value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON2);
+ value &= ~DPMAIF_PIT_CHK_NUM_MSK;
+ value |= FIELD_PREP(DPMAIF_PIT_CHK_NUM_MSK, DPMAIF_HW_CHK_PIT_NUM);
+ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON2);
+}
+
+static void t7xx_dpmaif_dl_set_ao_remain_minsz(struct dpmaif_hw_info *hw_info)
+{
+ unsigned int value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON0);
+ value &= ~DPMAIF_BAT_REMAIN_MINSZ_MSK;
+ value |= FIELD_PREP(DPMAIF_BAT_REMAIN_MINSZ_MSK,
+ DPMAIF_HW_BAT_REMAIN / DPMAIF_BAT_REMAIN_SZ_BASE);
+ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON0);
+}
+
+static void t7xx_dpmaif_dl_set_ao_bat_bufsz(struct dpmaif_hw_info *hw_info)
+{
+ unsigned int value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON2);
+ value &= ~DPMAIF_BAT_BUF_SZ_MSK;
+ value |= FIELD_PREP(DPMAIF_BAT_BUF_SZ_MSK,
+ DPMAIF_HW_BAT_PKTBUF / DPMAIF_BAT_BUFFER_SZ_BASE);
+ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON2);
+}
+
+static void t7xx_dpmaif_dl_set_ao_bat_rsv_length(struct dpmaif_hw_info *hw_info)
+{
+ unsigned int value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON2);
+ value &= ~DPMAIF_BAT_RSV_LEN_MSK;
+ value |= DPMAIF_HW_BAT_RSVLEN;
+ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON2);
+}
+
+static void t7xx_dpmaif_dl_set_pkt_alignment(struct dpmaif_hw_info *hw_info)
+{
+ unsigned int value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES);
+ value &= ~DPMAIF_PKT_ALIGN_MSK;
+ value |= DPMAIF_PKT_ALIGN_EN;
+ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES);
+}
+
+static void t7xx_dpmaif_dl_set_pkt_checksum(struct dpmaif_hw_info *hw_info)
+{
+ unsigned int value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES);
+ value |= DPMAIF_DL_PKT_CHECKSUM_EN;
+ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES);
+}
+
+static void t7xx_dpmaif_dl_set_ao_frg_check_thres(struct dpmaif_hw_info *hw_info)
+{
+ unsigned int value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_FRG_THRES);
+ value &= ~DPMAIF_FRG_CHECK_THRES_MSK;
+ value |= DPMAIF_HW_CHK_FRG_NUM;
+ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_FRG_THRES);
+}
+
+static void t7xx_dpmaif_dl_set_ao_frg_bufsz(struct dpmaif_hw_info *hw_info)
+{
+ unsigned int value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_FRG_THRES);
+ value &= ~DPMAIF_FRG_BUF_SZ_MSK;
+ value |= FIELD_PREP(DPMAIF_FRG_BUF_SZ_MSK,
+ DPMAIF_HW_FRG_PKTBUF / DPMAIF_FRG_BUFFER_SZ_BASE);
+ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_FRG_THRES);
+}
+
+static void t7xx_dpmaif_dl_frg_ao_en(struct dpmaif_hw_info *hw_info, bool enable)
+{
+ unsigned int value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_FRG_THRES);
+
+ if (enable)
+ value |= DPMAIF_FRG_EN_MSK;
+ else
+ value &= ~DPMAIF_FRG_EN_MSK;
+
+ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_FRG_THRES);
+}
+
+static void t7xx_dpmaif_dl_set_ao_bat_check_thres(struct dpmaif_hw_info *hw_info)
+{
+ unsigned int value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES);
+ value &= ~DPMAIF_BAT_CHECK_THRES_MSK;
+ value |= FIELD_PREP(DPMAIF_BAT_CHECK_THRES_MSK, DPMAIF_HW_CHK_BAT_NUM);
+ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES);
+}
+
+static void t7xx_dpmaif_dl_set_pit_seqnum(struct dpmaif_hw_info *hw_info)
+{
+ unsigned int value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PIT_SEQ_END);
+ value &= ~DPMAIF_DL_PIT_SEQ_MSK;
+ value |= DPMAIF_DL_PIT_SEQ_VALUE;
+ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_PIT_SEQ_END);
+}
+
+static void t7xx_dpmaif_dl_set_dlq_pit_base_addr(struct dpmaif_hw_info *hw_info,
+ dma_addr_t addr)
+{
+ iowrite32(lower_32_bits(addr), hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON0);
+ iowrite32(upper_32_bits(addr), hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON4);
+}
+
+static void t7xx_dpmaif_dl_set_dlq_pit_size(struct dpmaif_hw_info *hw_info, unsigned int size)
+{
+ unsigned int value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON1);
+ value &= ~DPMAIF_PIT_SIZE_MSK;
+ value |= size & DPMAIF_PIT_SIZE_MSK;
+ iowrite32(value, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON1);
+ iowrite32(0, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON2);
+ iowrite32(0, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON3);
+ iowrite32(0, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON5);
+ iowrite32(0, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON6);
+}
+
+static void t7xx_dpmaif_dl_dlq_pit_en(struct dpmaif_hw_info *hw_info)
+{
+ unsigned int value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON3);
+ value |= DPMAIF_DLQPIT_EN_MSK;
+ iowrite32(value, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON3);
+}
+
+static void t7xx_dpmaif_dl_dlq_pit_init_done(struct dpmaif_hw_info *hw_info,
+ unsigned int pit_idx)
+{
+ unsigned int dl_pit_init;
+ int timeout;
+ u32 value;
+
+ dl_pit_init = DPMAIF_DL_PIT_INIT_ALLSET;
+ dl_pit_init |= (pit_idx << DPMAIF_DLQPIT_CHAN_OFS);
+ dl_pit_init |= DPMAIF_DL_PIT_INIT_EN;
+
+ timeout = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT,
+ value, !(value & DPMAIF_DL_PIT_INIT_NOT_READY),
+ DPMAIF_CHECK_DELAY_US,
+ DPMAIF_CHECK_INIT_TIMEOUT_US);
+ if (timeout) {
+ dev_err(hw_info->dev, "Data plane modem DL PIT is not ready\n");
+ return;
+ }
+
+ iowrite32(dl_pit_init, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT);
+ timeout = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT,
+ value, !(value & DPMAIF_DL_PIT_INIT_NOT_READY),
+ DPMAIF_CHECK_DELAY_US,
+ DPMAIF_CHECK_INIT_TIMEOUT_US);
+ if (timeout)
+ dev_err(hw_info->dev, "Data plane modem DL PIT initialization failed\n");
+}
+
+static void t7xx_dpmaif_config_dlq_pit_hw(struct dpmaif_hw_info *hw_info, unsigned int q_num,
+ struct dpmaif_dl *dl_que)
+{
+ t7xx_dpmaif_dl_set_dlq_pit_base_addr(hw_info, dl_que->pit_base);
+ t7xx_dpmaif_dl_set_dlq_pit_size(hw_info, dl_que->pit_size_cnt);
+ t7xx_dpmaif_dl_dlq_pit_en(hw_info);
+ t7xx_dpmaif_dl_dlq_pit_init_done(hw_info, q_num);
+}
+
+static void t7xx_dpmaif_config_all_dlq_hw(struct dpmaif_hw_info *hw_info)
+{
+ int i;
+
+ for (i = 0; i < DPMAIF_RXQ_NUM; i++)
+ t7xx_dpmaif_config_dlq_pit_hw(hw_info, i, &hw_info->dl_que[i]);
+}
+
+static void t7xx_dpmaif_dl_all_q_en(struct dpmaif_hw_info *hw_info, bool enable)
+{
+ u32 dl_bat_init, value;
+ int timeout;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1);
+
+ if (enable)
+ value |= DPMAIF_BAT_EN_MSK;
+ else
+ value &= ~DPMAIF_BAT_EN_MSK;
+
+ iowrite32(value, hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1);
+ dl_bat_init = DPMAIF_DL_BAT_INIT_ONLY_ENABLE_BIT;
+ dl_bat_init |= DPMAIF_DL_BAT_INIT_EN;
+
+ timeout = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_BAT_INIT,
+ value, !(value & DPMAIF_DL_BAT_INIT_NOT_READY), 0,
+ DPMAIF_CHECK_TIMEOUT_US);
+ if (timeout)
+ dev_err(hw_info->dev, "Timeout updating BAT setting to HW\n");
+
+ iowrite32(dl_bat_init, hw_info->pcie_base + DPMAIF_DL_BAT_INIT);
+ timeout = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_BAT_INIT,
+ value, !(value & DPMAIF_DL_BAT_INIT_NOT_READY), 0,
+ DPMAIF_CHECK_TIMEOUT_US);
+ if (timeout)
+ dev_err(hw_info->dev, "Data plane modem DL BAT is not ready\n");
+}
+
+static int t7xx_dpmaif_config_dlq_hw(struct dpmaif_hw_info *hw_info)
+{
+ struct dpmaif_dl *dl_que;
+ int ret;
+
+ t7xx_dpmaif_dl_dlq_hpc_hw_init(hw_info);
+
+ dl_que = &hw_info->dl_que[0]; /* All queues share one BAT/frag BAT table */
+ if (!dl_que->que_started)
+ return -EBUSY;
+
+ t7xx_dpmaif_dl_set_ao_remain_minsz(hw_info);
+ t7xx_dpmaif_dl_set_ao_bat_bufsz(hw_info);
+ t7xx_dpmaif_dl_set_ao_frg_bufsz(hw_info);
+ t7xx_dpmaif_dl_set_ao_bat_rsv_length(hw_info);
+ t7xx_dpmaif_dl_set_ao_bid_maxcnt(hw_info);
+ t7xx_dpmaif_dl_set_pkt_alignment(hw_info);
+ t7xx_dpmaif_dl_set_pit_seqnum(hw_info);
+ t7xx_dpmaif_dl_set_ao_mtu(hw_info);
+ t7xx_dpmaif_dl_set_ao_pit_chknum(hw_info);
+ t7xx_dpmaif_dl_set_ao_bat_check_thres(hw_info);
+ t7xx_dpmaif_dl_set_ao_frg_check_thres(hw_info);
+ t7xx_dpmaif_dl_frg_ao_en(hw_info, true);
+
+ t7xx_dpmaif_dl_set_bat_base_addr(hw_info, dl_que->frg_base);
+ t7xx_dpmaif_dl_set_bat_size(hw_info, dl_que->frg_size_cnt);
+ t7xx_dpmaif_dl_bat_en(hw_info, true);
+
+ ret = t7xx_dpmaif_dl_bat_init_done(hw_info, true);
+ if (ret)
+ return ret;
+
+ t7xx_dpmaif_dl_set_bat_base_addr(hw_info, dl_que->bat_base);
+ t7xx_dpmaif_dl_set_bat_size(hw_info, dl_que->bat_size_cnt);
+ t7xx_dpmaif_dl_bat_en(hw_info, false);
+
+ ret = t7xx_dpmaif_dl_bat_init_done(hw_info, false);
+ if (ret)
+ return ret;
+
+ /* Init PIT (two PIT table) */
+ t7xx_dpmaif_config_all_dlq_hw(hw_info);
+ t7xx_dpmaif_dl_all_q_en(hw_info, true);
+ t7xx_dpmaif_dl_set_pkt_checksum(hw_info);
+ return 0;
+}
+
+static void t7xx_dpmaif_ul_update_drb_size(struct dpmaif_hw_info *hw_info,
+ unsigned int q_num, unsigned int size)
+{
+ unsigned int value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_UL_DRBSIZE_ADDRH_n(q_num));
+ value &= ~DPMAIF_DRB_SIZE_MSK;
+ value |= size & DPMAIF_DRB_SIZE_MSK;
+ iowrite32(value, hw_info->pcie_base + DPMAIF_UL_DRBSIZE_ADDRH_n(q_num));
+}
+
+static void t7xx_dpmaif_ul_update_drb_base_addr(struct dpmaif_hw_info *hw_info,
+ unsigned int q_num, dma_addr_t addr)
+{
+ iowrite32(lower_32_bits(addr), hw_info->pcie_base + DPMAIF_ULQSAR_n(q_num));
+ iowrite32(upper_32_bits(addr), hw_info->pcie_base + DPMAIF_UL_DRB_ADDRH_n(q_num));
+}
+
+static void t7xx_dpmaif_ul_rdy_en(struct dpmaif_hw_info *hw_info,
+ unsigned int q_num, bool ready)
+{
+ u32 value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_CHNL_ARB0);
+
+ if (ready)
+ value |= BIT(q_num);
+ else
+ value &= ~BIT(q_num);
+
+ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_UL_CHNL_ARB0);
+}
+
+static void t7xx_dpmaif_ul_arb_en(struct dpmaif_hw_info *hw_info,
+ unsigned int q_num, bool enable)
+{
+ u32 value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_CHNL_ARB0);
+
+ if (enable)
+ value |= BIT(q_num + 8);
+ else
+ value &= ~BIT(q_num + 8);
+
+ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_UL_CHNL_ARB0);
+}
+
+static void t7xx_dpmaif_config_ulq_hw(struct dpmaif_hw_info *hw_info)
+{
+ struct dpmaif_ul *ul_que;
+ int i;
+
+ for (i = 0; i < DPMAIF_TXQ_NUM; i++) {
+ ul_que = &hw_info->ul_que[i];
+ if (ul_que->que_started) {
+ t7xx_dpmaif_ul_update_drb_size(hw_info, i, ul_que->drb_size_cnt *
+ DPMAIF_UL_DRB_SIZE_WORD);
+ t7xx_dpmaif_ul_update_drb_base_addr(hw_info, i, ul_que->drb_base);
+ t7xx_dpmaif_ul_rdy_en(hw_info, i, true);
+ t7xx_dpmaif_ul_arb_en(hw_info, i, true);
+ } else {
+ t7xx_dpmaif_ul_arb_en(hw_info, i, false);
+ }
+ }
+}
+
+static int t7xx_dpmaif_hw_init_done(struct dpmaif_hw_info *hw_info)
+{
+ u32 ap_cfg;
+ int ret;
+
+ ap_cfg = ioread32(hw_info->pcie_base + DPMAIF_AP_OVERWRITE_CFG);
+ ap_cfg |= DPMAIF_SRAM_SYNC;
+ iowrite32(ap_cfg, hw_info->pcie_base + DPMAIF_AP_OVERWRITE_CFG);
+
+ ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AP_OVERWRITE_CFG,
+ ap_cfg, !(ap_cfg & DPMAIF_SRAM_SYNC), 0,
+ DPMAIF_CHECK_TIMEOUT_US);
+ if (ret)
+ return ret;
+
+ iowrite32(DPMAIF_UL_INIT_DONE, hw_info->pcie_base + DPMAIF_AO_UL_INIT_SET);
+ iowrite32(DPMAIF_DL_INIT_DONE, hw_info->pcie_base + DPMAIF_AO_DL_INIT_SET);
+ return 0;
+}
+
+static bool t7xx_dpmaif_dl_idle_check(struct dpmaif_hw_info *hw_info)
+{
+ u32 dpmaif_dl_is_busy = ioread32(hw_info->pcie_base + DPMAIF_DL_CHK_BUSY);
+
+ return !(dpmaif_dl_is_busy & DPMAIF_DL_IDLE_STS);
+}
+
+static void t7xx_dpmaif_ul_all_q_en(struct dpmaif_hw_info *hw_info, bool enable)
+{
+ u32 ul_arb_en = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_CHNL_ARB0);
+
+ if (enable)
+ ul_arb_en |= DPMAIF_UL_ALL_QUE_ARB_EN;
+ else
+ ul_arb_en &= ~DPMAIF_UL_ALL_QUE_ARB_EN;
+
+ iowrite32(ul_arb_en, hw_info->pcie_base + DPMAIF_AO_UL_CHNL_ARB0);
+}
+
+static bool t7xx_dpmaif_ul_idle_check(struct dpmaif_hw_info *hw_info)
+{
+ u32 dpmaif_ul_is_busy = ioread32(hw_info->pcie_base + DPMAIF_UL_CHK_BUSY);
+
+ return !(dpmaif_ul_is_busy & DPMAIF_UL_IDLE_STS);
+}
+
+void t7xx_dpmaif_ul_update_hw_drb_cnt(struct dpmaif_hw_info *hw_info, unsigned int q_num,
+ unsigned int drb_entry_cnt)
+{
+ u32 ul_update, value;
+ int err;
+
+ ul_update = drb_entry_cnt & DPMAIF_UL_ADD_COUNT_MASK;
+ ul_update |= DPMAIF_UL_ADD_UPDATE;
+
+ err = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_ULQ_ADD_DESC_CH_n(q_num),
+ value, !(value & DPMAIF_UL_ADD_NOT_READY), 0,
+ DPMAIF_CHECK_TIMEOUT_US);
+ if (err) {
+ dev_err(hw_info->dev, "UL add is not ready\n");
+ return;
+ }
+
+ iowrite32(ul_update, hw_info->pcie_base + DPMAIF_ULQ_ADD_DESC_CH_n(q_num));
+
+ err = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_ULQ_ADD_DESC_CH_n(q_num),
+ value, !(value & DPMAIF_UL_ADD_NOT_READY), 0,
+ DPMAIF_CHECK_TIMEOUT_US);
+ if (err)
+ dev_err(hw_info->dev, "Timeout updating UL add\n");
+}
+
+unsigned int t7xx_dpmaif_ul_get_rd_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num)
+{
+ unsigned int value = ioread32(hw_info->pcie_base + DPMAIF_ULQ_STA0_n(q_num));
+
+ return FIELD_GET(DPMAIF_UL_DRB_RIDX_MSK, value) / DPMAIF_UL_DRB_SIZE_WORD;
+}
+
+int t7xx_dpmaif_dlq_add_pit_remain_cnt(struct dpmaif_hw_info *hw_info, unsigned int dlq_pit_idx,
+ unsigned int pit_remain_cnt)
+{
+ u32 dl_update, value;
+ int ret;
+
+ dl_update = pit_remain_cnt & DPMAIF_PIT_REM_CNT_MSK;
+ dl_update |= DPMAIF_DL_ADD_UPDATE | (dlq_pit_idx << DPMAIF_ADD_DLQ_PIT_CHAN_OFS);
+
+ ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_DLQPIT_ADD,
+ value, !(value & DPMAIF_DL_ADD_NOT_READY), 0,
+ DPMAIF_CHECK_TIMEOUT_US);
+ if (ret) {
+ dev_err(hw_info->dev, "Data plane modem is not ready to add dlq\n");
+ return ret;
+ }
+
+ iowrite32(dl_update, hw_info->pcie_base + DPMAIF_DL_DLQPIT_ADD);
+
+ ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_DLQPIT_ADD,
+ value, !(value & DPMAIF_DL_ADD_NOT_READY), 0,
+ DPMAIF_CHECK_TIMEOUT_US);
+ if (ret) {
+ dev_err(hw_info->dev, "Data plane modem add dlq failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+unsigned int t7xx_dpmaif_dl_dlq_pit_get_wr_idx(struct dpmaif_hw_info *hw_info,
+ unsigned int dlq_pit_idx)
+{
+ u32 value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_DLQ_WR_IDX +
+ dlq_pit_idx * DLQ_PIT_IDX_SIZE);
+ return value & DPMAIF_DL_RD_WR_IDX_MSK;
+}
+
+static int t7xx_dl_add_timedout(struct dpmaif_hw_info *hw_info)
+{
+ u32 value;
+
+ return ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_BAT_ADD,
+ value, !(value & DPMAIF_DL_ADD_NOT_READY), 0,
+ DPMAIF_CHECK_TIMEOUT_US);
+}
+
+int t7xx_dpmaif_dl_snd_hw_bat_cnt(struct dpmaif_hw_info *hw_info, unsigned int bat_entry_cnt)
+{
+ unsigned int value;
+
+ if (t7xx_dl_add_timedout(hw_info)) {
+ dev_err(hw_info->dev, "DL add BAT not ready\n");
+ return -EBUSY;
+ }
+
+ value = bat_entry_cnt & DPMAIF_DL_ADD_COUNT_MASK;
+ value |= DPMAIF_DL_ADD_UPDATE;
+ iowrite32(value, hw_info->pcie_base + DPMAIF_DL_BAT_ADD);
+
+ if (t7xx_dl_add_timedout(hw_info)) {
+ dev_err(hw_info->dev, "DL add BAT timeout\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+unsigned int t7xx_dpmaif_dl_get_bat_rd_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num)
+{
+ u32 value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_BAT_RD_IDX);
+ return value & DPMAIF_DL_RD_WR_IDX_MSK;
+}
+
+unsigned int t7xx_dpmaif_dl_get_bat_wr_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num)
+{
+ u32 value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_BAT_WR_IDX);
+ return value & DPMAIF_DL_RD_WR_IDX_MSK;
+}
+
+int t7xx_dpmaif_dl_snd_hw_frg_cnt(struct dpmaif_hw_info *hw_info, unsigned int frg_entry_cnt)
+{
+ unsigned int value;
+
+ if (t7xx_dl_add_timedout(hw_info)) {
+ dev_err(hw_info->dev, "Data plane modem is not ready to add frag DLQ\n");
+ return -EBUSY;
+ }
+
+ value = frg_entry_cnt & DPMAIF_DL_ADD_COUNT_MASK;
+ value |= DPMAIF_DL_FRG_ADD_UPDATE | DPMAIF_DL_ADD_UPDATE;
+ iowrite32(value, hw_info->pcie_base + DPMAIF_DL_BAT_ADD);
+
+ if (t7xx_dl_add_timedout(hw_info)) {
+ dev_err(hw_info->dev, "Data plane modem add frag DLQ failed");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+unsigned int t7xx_dpmaif_dl_get_frg_rd_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num)
+{
+ u32 value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_FRGBAT_RD_IDX);
+ return value & DPMAIF_DL_RD_WR_IDX_MSK;
+}
+
+static void t7xx_dpmaif_set_queue_property(struct dpmaif_hw_info *hw_info,
+ struct dpmaif_hw_params *init_para)
+{
+ struct dpmaif_dl *dl_que;
+ struct dpmaif_ul *ul_que;
+ int i;
+
+ for (i = 0; i < DPMAIF_RXQ_NUM; i++) {
+ dl_que = &hw_info->dl_que[i];
+ dl_que->bat_base = init_para->pkt_bat_base_addr[i];
+ dl_que->bat_size_cnt = init_para->pkt_bat_size_cnt[i];
+ dl_que->pit_base = init_para->pit_base_addr[i];
+ dl_que->pit_size_cnt = init_para->pit_size_cnt[i];
+ dl_que->frg_base = init_para->frg_bat_base_addr[i];
+ dl_que->frg_size_cnt = init_para->frg_bat_size_cnt[i];
+ dl_que->que_started = true;
+ }
+
+ for (i = 0; i < DPMAIF_TXQ_NUM; i++) {
+ ul_que = &hw_info->ul_que[i];
+ ul_que->drb_base = init_para->drb_base_addr[i];
+ ul_que->drb_size_cnt = init_para->drb_size_cnt[i];
+ ul_que->que_started = true;
+ }
+}
+
+/**
+ * t7xx_dpmaif_hw_stop_all_txq() - Stop all TX queues.
+ * @hw_info: Pointer to struct hw_info.
+ *
+ * Disable HW UL queues. Checks busy UL queues to go to idle
+ * with an attempt count of 1000000.
+ *
+ * Return:
+ * * 0 - Success
+ * * -ETIMEDOUT - Timed out checking busy queues
+ */
+int t7xx_dpmaif_hw_stop_all_txq(struct dpmaif_hw_info *hw_info)
+{
+ int count = 0;
+
+ t7xx_dpmaif_ul_all_q_en(hw_info, false);
+ while (t7xx_dpmaif_ul_idle_check(hw_info)) {
+ if (++count >= DPMAIF_MAX_CHECK_COUNT) {
+ dev_err(hw_info->dev, "Failed to stop TX, status: 0x%x\n",
+ ioread32(hw_info->pcie_base + DPMAIF_UL_CHK_BUSY));
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * t7xx_dpmaif_hw_stop_all_rxq() - Stop all RX queues.
+ * @hw_info: Pointer to struct hw_info.
+ *
+ * Disable HW DL queue. Checks busy UL queues to go to idle
+ * with an attempt count of 1000000.
+ * Check that HW PIT write index equals read index with the same
+ * attempt count.
+ *
+ * Return:
+ * * 0 - Success.
+ * * -ETIMEDOUT - Timed out checking busy queues.
+ */
+int t7xx_dpmaif_hw_stop_all_rxq(struct dpmaif_hw_info *hw_info)
+{
+ unsigned int wr_idx, rd_idx;
+ int count = 0;
+
+ t7xx_dpmaif_dl_all_q_en(hw_info, false);
+ while (t7xx_dpmaif_dl_idle_check(hw_info)) {
+ if (++count >= DPMAIF_MAX_CHECK_COUNT) {
+ dev_err(hw_info->dev, "Failed to stop RX, status: 0x%x\n",
+ ioread32(hw_info->pcie_base + DPMAIF_DL_CHK_BUSY));
+ return -ETIMEDOUT;
+ }
+ }
+
+ /* Check middle PIT sync done */
+ count = 0;
+ do {
+ wr_idx = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PIT_WR_IDX);
+ wr_idx &= DPMAIF_DL_RD_WR_IDX_MSK;
+ rd_idx = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PIT_RD_IDX);
+ rd_idx &= DPMAIF_DL_RD_WR_IDX_MSK;
+
+ if (wr_idx == rd_idx)
+ return 0;
+ } while (++count < DPMAIF_MAX_CHECK_COUNT);
+
+ dev_err(hw_info->dev, "Check middle PIT sync fail\n");
+ return -ETIMEDOUT;
+}
+
+void t7xx_dpmaif_start_hw(struct dpmaif_hw_info *hw_info)
+{
+ t7xx_dpmaif_ul_all_q_en(hw_info, true);
+ t7xx_dpmaif_dl_all_q_en(hw_info, true);
+}
+
+/**
+ * t7xx_dpmaif_hw_init() - Initialize HW data path API.
+ * @hw_info: Pointer to struct hw_info.
+ * @init_param: Pointer to struct dpmaif_hw_params.
+ *
+ * Configures port mode, clock config, HW interrupt initialization, and HW queue.
+ *
+ * Return:
+ * * 0 - Success.
+ * * -ERROR - Error code from failure sub-initializations.
+ */
+int t7xx_dpmaif_hw_init(struct dpmaif_hw_info *hw_info, struct dpmaif_hw_params *init_param)
+{
+ int ret;
+
+ ret = t7xx_dpmaif_hw_config(hw_info);
+ if (ret) {
+ dev_err(hw_info->dev, "DPMAIF HW config failed\n");
+ return ret;
+ }
+
+ ret = t7xx_dpmaif_init_intr(hw_info);
+ if (ret) {
+ dev_err(hw_info->dev, "DPMAIF HW interrupts init failed\n");
+ return ret;
+ }
+
+ t7xx_dpmaif_set_queue_property(hw_info, init_param);
+ t7xx_dpmaif_pcie_dpmaif_sign(hw_info);
+ t7xx_dpmaif_dl_performance(hw_info);
+
+ ret = t7xx_dpmaif_config_dlq_hw(hw_info);
+ if (ret) {
+ dev_err(hw_info->dev, "DPMAIF HW dlq config failed\n");
+ return ret;
+ }
+
+ t7xx_dpmaif_config_ulq_hw(hw_info);
+
+ ret = t7xx_dpmaif_hw_init_done(hw_info);
+ if (ret)
+ dev_err(hw_info->dev, "DPMAIF HW queue init failed\n");
+
+ return ret;
+}
+
+bool t7xx_dpmaif_ul_clr_done(struct dpmaif_hw_info *hw_info, unsigned int qno)
+{
+ u32 intr_status;
+
+ intr_status = ioread32(hw_info->pcie_base + DPMAIF_AP_L2TISAR0);
+ intr_status &= BIT(DP_UL_INT_DONE_OFFSET + qno);
+ if (intr_status) {
+ iowrite32(intr_status, hw_info->pcie_base + DPMAIF_AP_L2TISAR0);
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/net/wwan/t7xx/t7xx_dpmaif.h b/drivers/net/wwan/t7xx/t7xx_dpmaif.h
new file mode 100644
index 0000000000..ae292355a3
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_dpmaif.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ *
+ * Contributors:
+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ */
+
+#ifndef __T7XX_DPMAIF_H__
+#define __T7XX_DPMAIF_H__
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+#define DPMAIF_DL_PIT_SEQ_VALUE 251
+#define DPMAIF_UL_DRB_SIZE_WORD 4
+
+#define DPMAIF_MAX_CHECK_COUNT 1000000
+#define DPMAIF_CHECK_TIMEOUT_US 10000
+#define DPMAIF_CHECK_INIT_TIMEOUT_US 100000
+#define DPMAIF_CHECK_DELAY_US 10
+
+#define DPMAIF_RXQ_NUM 2
+#define DPMAIF_TXQ_NUM 5
+
+struct dpmaif_isr_en_mask {
+ unsigned int ap_ul_l2intr_en_msk;
+ unsigned int ap_dl_l2intr_en_msk;
+ unsigned int ap_udl_ip_busy_en_msk;
+ unsigned int ap_dl_l2intr_err_en_msk;
+};
+
+struct dpmaif_ul {
+ bool que_started;
+ unsigned char reserved[3];
+ dma_addr_t drb_base;
+ unsigned int drb_size_cnt;
+};
+
+struct dpmaif_dl {
+ bool que_started;
+ unsigned char reserved[3];
+ dma_addr_t pit_base;
+ unsigned int pit_size_cnt;
+ dma_addr_t bat_base;
+ unsigned int bat_size_cnt;
+ dma_addr_t frg_base;
+ unsigned int frg_size_cnt;
+ unsigned int pit_seq;
+};
+
+struct dpmaif_hw_info {
+ struct device *dev;
+ void __iomem *pcie_base;
+ struct dpmaif_dl dl_que[DPMAIF_RXQ_NUM];
+ struct dpmaif_ul ul_que[DPMAIF_TXQ_NUM];
+ struct dpmaif_isr_en_mask isr_en_mask;
+};
+
+/* DPMAIF HW Initialization parameter structure */
+struct dpmaif_hw_params {
+ /* UL part */
+ dma_addr_t drb_base_addr[DPMAIF_TXQ_NUM];
+ unsigned int drb_size_cnt[DPMAIF_TXQ_NUM];
+ /* DL part */
+ dma_addr_t pkt_bat_base_addr[DPMAIF_RXQ_NUM];
+ unsigned int pkt_bat_size_cnt[DPMAIF_RXQ_NUM];
+ dma_addr_t frg_bat_base_addr[DPMAIF_RXQ_NUM];
+ unsigned int frg_bat_size_cnt[DPMAIF_RXQ_NUM];
+ dma_addr_t pit_base_addr[DPMAIF_RXQ_NUM];
+ unsigned int pit_size_cnt[DPMAIF_RXQ_NUM];
+};
+
+enum dpmaif_hw_intr_type {
+ DPF_INTR_INVALID_MIN,
+ DPF_INTR_UL_DONE,
+ DPF_INTR_UL_DRB_EMPTY,
+ DPF_INTR_UL_MD_NOTREADY,
+ DPF_INTR_UL_MD_PWR_NOTREADY,
+ DPF_INTR_UL_LEN_ERR,
+ DPF_INTR_DL_DONE,
+ DPF_INTR_DL_SKB_LEN_ERR,
+ DPF_INTR_DL_BATCNT_LEN_ERR,
+ DPF_INTR_DL_PITCNT_LEN_ERR,
+ DPF_INTR_DL_PKT_EMPTY_SET,
+ DPF_INTR_DL_FRG_EMPTY_SET,
+ DPF_INTR_DL_MTU_ERR,
+ DPF_INTR_DL_FRGCNT_LEN_ERR,
+ DPF_INTR_DL_Q0_PITCNT_LEN_ERR,
+ DPF_INTR_DL_Q1_PITCNT_LEN_ERR,
+ DPF_INTR_DL_HPC_ENT_TYPE_ERR,
+ DPF_INTR_DL_Q0_DONE,
+ DPF_INTR_DL_Q1_DONE,
+ DPF_INTR_INVALID_MAX
+};
+
+#define DPF_RX_QNO0 0
+#define DPF_RX_QNO1 1
+#define DPF_RX_QNO_DFT DPF_RX_QNO0
+
+struct dpmaif_hw_intr_st_para {
+ unsigned int intr_cnt;
+ enum dpmaif_hw_intr_type intr_types[DPF_INTR_INVALID_MAX - 1];
+ unsigned int intr_queues[DPF_INTR_INVALID_MAX - 1];
+};
+
+#define DPMAIF_HW_BAT_REMAIN 64
+#define DPMAIF_HW_BAT_PKTBUF (128 * 28)
+#define DPMAIF_HW_FRG_PKTBUF 128
+#define DPMAIF_HW_BAT_RSVLEN 64
+#define DPMAIF_HW_PKT_BIDCNT 1
+#define DPMAIF_HW_MTU_SIZE (3 * 1024 + 8)
+#define DPMAIF_HW_CHK_BAT_NUM 62
+#define DPMAIF_HW_CHK_FRG_NUM 3
+#define DPMAIF_HW_CHK_PIT_NUM (2 * DPMAIF_HW_CHK_BAT_NUM)
+
+#define DP_UL_INT_DONE_OFFSET 0
+#define DP_UL_INT_QDONE_MSK GENMASK(4, 0)
+#define DP_UL_INT_EMPTY_MSK GENMASK(9, 5)
+#define DP_UL_INT_MD_NOTREADY_MSK GENMASK(14, 10)
+#define DP_UL_INT_MD_PWR_NOTREADY_MSK GENMASK(19, 15)
+#define DP_UL_INT_ERR_MSK GENMASK(24, 20)
+
+#define DP_DL_INT_QDONE_MSK BIT(0)
+#define DP_DL_INT_SKB_LEN_ERR BIT(1)
+#define DP_DL_INT_BATCNT_LEN_ERR BIT(2)
+#define DP_DL_INT_PITCNT_LEN_ERR BIT(3)
+#define DP_DL_INT_PKT_EMPTY_MSK BIT(4)
+#define DP_DL_INT_FRG_EMPTY_MSK BIT(5)
+#define DP_DL_INT_MTU_ERR_MSK BIT(6)
+#define DP_DL_INT_FRG_LEN_ERR_MSK BIT(7)
+#define DP_DL_INT_Q0_PITCNT_LEN_ERR BIT(8)
+#define DP_DL_INT_Q1_PITCNT_LEN_ERR BIT(9)
+#define DP_DL_INT_HPC_ENT_TYPE_ERR BIT(10)
+#define DP_DL_INT_Q0_DONE BIT(13)
+#define DP_DL_INT_Q1_DONE BIT(14)
+
+#define DP_DL_Q0_STATUS_MASK (DP_DL_INT_Q0_PITCNT_LEN_ERR | DP_DL_INT_Q0_DONE)
+#define DP_DL_Q1_STATUS_MASK (DP_DL_INT_Q1_PITCNT_LEN_ERR | DP_DL_INT_Q1_DONE)
+
+int t7xx_dpmaif_hw_init(struct dpmaif_hw_info *hw_info, struct dpmaif_hw_params *init_param);
+int t7xx_dpmaif_hw_stop_all_txq(struct dpmaif_hw_info *hw_info);
+int t7xx_dpmaif_hw_stop_all_rxq(struct dpmaif_hw_info *hw_info);
+void t7xx_dpmaif_start_hw(struct dpmaif_hw_info *hw_info);
+int t7xx_dpmaif_hw_get_intr_cnt(struct dpmaif_hw_info *hw_info,
+ struct dpmaif_hw_intr_st_para *para, int qno);
+void t7xx_dpmaif_unmask_ulq_intr(struct dpmaif_hw_info *hw_info, unsigned int q_num);
+void t7xx_dpmaif_ul_update_hw_drb_cnt(struct dpmaif_hw_info *hw_info, unsigned int q_num,
+ unsigned int drb_entry_cnt);
+int t7xx_dpmaif_dl_snd_hw_bat_cnt(struct dpmaif_hw_info *hw_info, unsigned int bat_entry_cnt);
+int t7xx_dpmaif_dl_snd_hw_frg_cnt(struct dpmaif_hw_info *hw_info, unsigned int frg_entry_cnt);
+int t7xx_dpmaif_dlq_add_pit_remain_cnt(struct dpmaif_hw_info *hw_info, unsigned int dlq_pit_idx,
+ unsigned int pit_remain_cnt);
+void t7xx_dpmaif_dlq_unmask_pitcnt_len_err_intr(struct dpmaif_hw_info *hw_info,
+ unsigned int qno);
+void t7xx_dpmaif_dlq_unmask_rx_done(struct dpmaif_hw_info *hw_info, unsigned int qno);
+bool t7xx_dpmaif_ul_clr_done(struct dpmaif_hw_info *hw_info, unsigned int qno);
+void t7xx_dpmaif_ul_clr_all_intr(struct dpmaif_hw_info *hw_info);
+void t7xx_dpmaif_dl_clr_all_intr(struct dpmaif_hw_info *hw_info);
+void t7xx_dpmaif_clr_ip_busy_sts(struct dpmaif_hw_info *hw_info);
+void t7xx_dpmaif_dl_unmask_batcnt_len_err_intr(struct dpmaif_hw_info *hw_info);
+void t7xx_dpmaif_dl_unmask_pitcnt_len_err_intr(struct dpmaif_hw_info *hw_info);
+unsigned int t7xx_dpmaif_ul_get_rd_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num);
+unsigned int t7xx_dpmaif_dl_get_bat_rd_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num);
+unsigned int t7xx_dpmaif_dl_get_bat_wr_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num);
+unsigned int t7xx_dpmaif_dl_get_frg_rd_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num);
+unsigned int t7xx_dpmaif_dl_dlq_pit_get_wr_idx(struct dpmaif_hw_info *hw_info,
+ unsigned int dlq_pit_idx);
+
+#endif /* __T7XX_DPMAIF_H__ */
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
new file mode 100644
index 0000000000..cc70360364
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
@@ -0,0 +1,1347 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ *
+ * Contributors:
+ * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-direction.h>
+#include <linux/gfp.h>
+#include <linux/io.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/iopoll.h>
+#include <linux/irqreturn.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#include "t7xx_cldma.h"
+#include "t7xx_hif_cldma.h"
+#include "t7xx_mhccif.h"
+#include "t7xx_pci.h"
+#include "t7xx_pcie_mac.h"
+#include "t7xx_port_proxy.h"
+#include "t7xx_reg.h"
+#include "t7xx_state_monitor.h"
+
+#define MAX_TX_BUDGET 16
+#define MAX_RX_BUDGET 16
+
+#define CHECK_Q_STOP_TIMEOUT_US 1000000
+#define CHECK_Q_STOP_STEP_US 10000
+
+#define CLDMA_JUMBO_BUFF_SZ (63 * 1024 + sizeof(struct ccci_header))
+
+static void md_cd_queue_struct_reset(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl,
+ enum mtk_txrx tx_rx, unsigned int index)
+{
+ queue->dir = tx_rx;
+ queue->index = index;
+ queue->md_ctrl = md_ctrl;
+ queue->tr_ring = NULL;
+ queue->tr_done = NULL;
+ queue->tx_next = NULL;
+}
+
+static void md_cd_queue_struct_init(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl,
+ enum mtk_txrx tx_rx, unsigned int index)
+{
+ md_cd_queue_struct_reset(queue, md_ctrl, tx_rx, index);
+ init_waitqueue_head(&queue->req_wq);
+ spin_lock_init(&queue->ring_lock);
+}
+
+static void t7xx_cldma_gpd_set_data_ptr(struct cldma_gpd *gpd, dma_addr_t data_ptr)
+{
+ gpd->data_buff_bd_ptr_h = cpu_to_le32(upper_32_bits(data_ptr));
+ gpd->data_buff_bd_ptr_l = cpu_to_le32(lower_32_bits(data_ptr));
+}
+
+static void t7xx_cldma_gpd_set_next_ptr(struct cldma_gpd *gpd, dma_addr_t next_ptr)
+{
+ gpd->next_gpd_ptr_h = cpu_to_le32(upper_32_bits(next_ptr));
+ gpd->next_gpd_ptr_l = cpu_to_le32(lower_32_bits(next_ptr));
+}
+
+static int t7xx_cldma_alloc_and_map_skb(struct cldma_ctrl *md_ctrl, struct cldma_request *req,
+ size_t size, gfp_t gfp_mask)
+{
+ req->skb = __dev_alloc_skb(size, gfp_mask);
+ if (!req->skb)
+ return -ENOMEM;
+
+ req->mapped_buff = dma_map_single(md_ctrl->dev, req->skb->data, size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(md_ctrl->dev, req->mapped_buff)) {
+ dev_kfree_skb_any(req->skb);
+ req->skb = NULL;
+ req->mapped_buff = 0;
+ dev_err(md_ctrl->dev, "DMA mapping failed\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int t7xx_cldma_gpd_rx_from_q(struct cldma_queue *queue, int budget, bool *over_budget)
+{
+ struct cldma_ctrl *md_ctrl = queue->md_ctrl;
+ unsigned int hwo_polling_count = 0;
+ struct t7xx_cldma_hw *hw_info;
+ bool rx_not_done = true;
+ unsigned long flags;
+ int count = 0;
+
+ hw_info = &md_ctrl->hw_info;
+
+ do {
+ struct cldma_request *req;
+ struct cldma_gpd *gpd;
+ struct sk_buff *skb;
+ int ret;
+
+ req = queue->tr_done;
+ if (!req)
+ return -ENODATA;
+
+ gpd = req->gpd;
+ if ((gpd->flags & GPD_FLAGS_HWO) || !req->skb) {
+ dma_addr_t gpd_addr;
+
+ if (!pci_device_is_present(to_pci_dev(md_ctrl->dev))) {
+ dev_err(md_ctrl->dev, "PCIe Link disconnected\n");
+ return -ENODEV;
+ }
+
+ gpd_addr = ioread64(hw_info->ap_pdn_base + REG_CLDMA_DL_CURRENT_ADDRL_0 +
+ queue->index * sizeof(u64));
+ if (req->gpd_addr == gpd_addr || hwo_polling_count++ >= 100)
+ return 0;
+
+ udelay(1);
+ continue;
+ }
+
+ hwo_polling_count = 0;
+ skb = req->skb;
+
+ if (req->mapped_buff) {
+ dma_unmap_single(md_ctrl->dev, req->mapped_buff,
+ queue->tr_ring->pkt_size, DMA_FROM_DEVICE);
+ req->mapped_buff = 0;
+ }
+
+ skb->len = 0;
+ skb_reset_tail_pointer(skb);
+ skb_put(skb, le16_to_cpu(gpd->data_buff_len));
+
+ ret = md_ctrl->recv_skb(queue, skb);
+ /* Break processing, will try again later */
+ if (ret < 0)
+ return ret;
+
+ req->skb = NULL;
+ t7xx_cldma_gpd_set_data_ptr(gpd, 0);
+
+ spin_lock_irqsave(&queue->ring_lock, flags);
+ queue->tr_done = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry);
+ spin_unlock_irqrestore(&queue->ring_lock, flags);
+ req = queue->rx_refill;
+
+ ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, queue->tr_ring->pkt_size, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ gpd = req->gpd;
+ t7xx_cldma_gpd_set_data_ptr(gpd, req->mapped_buff);
+ gpd->data_buff_len = 0;
+ gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO;
+
+ spin_lock_irqsave(&queue->ring_lock, flags);
+ queue->rx_refill = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry);
+ spin_unlock_irqrestore(&queue->ring_lock, flags);
+
+ rx_not_done = ++count < budget || !need_resched();
+ } while (rx_not_done);
+
+ *over_budget = true;
+ return 0;
+}
+
+static int t7xx_cldma_gpd_rx_collect(struct cldma_queue *queue, int budget)
+{
+ struct cldma_ctrl *md_ctrl = queue->md_ctrl;
+ struct t7xx_cldma_hw *hw_info;
+ unsigned int pending_rx_int;
+ bool over_budget = false;
+ unsigned long flags;
+ int ret;
+
+ hw_info = &md_ctrl->hw_info;
+
+ do {
+ ret = t7xx_cldma_gpd_rx_from_q(queue, budget, &over_budget);
+ if (ret == -ENODATA)
+ return 0;
+ else if (ret)
+ return ret;
+
+ pending_rx_int = 0;
+
+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
+ if (md_ctrl->rxq_active & BIT(queue->index)) {
+ if (!t7xx_cldma_hw_queue_status(hw_info, queue->index, MTK_RX))
+ t7xx_cldma_hw_resume_queue(hw_info, queue->index, MTK_RX);
+
+ pending_rx_int = t7xx_cldma_hw_int_status(hw_info, BIT(queue->index),
+ MTK_RX);
+ if (pending_rx_int) {
+ t7xx_cldma_hw_rx_done(hw_info, pending_rx_int);
+
+ if (over_budget) {
+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
+ return -EAGAIN;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
+ } while (pending_rx_int);
+
+ return 0;
+}
+
+static void t7xx_cldma_rx_done(struct work_struct *work)
+{
+ struct cldma_queue *queue = container_of(work, struct cldma_queue, cldma_work);
+ struct cldma_ctrl *md_ctrl = queue->md_ctrl;
+ int value;
+
+ value = t7xx_cldma_gpd_rx_collect(queue, queue->budget);
+ if (value && md_ctrl->rxq_active & BIT(queue->index)) {
+ queue_work(queue->worker, &queue->cldma_work);
+ return;
+ }
+
+ t7xx_cldma_clear_ip_busy(&md_ctrl->hw_info);
+ t7xx_cldma_hw_irq_en_txrx(&md_ctrl->hw_info, queue->index, MTK_RX);
+ t7xx_cldma_hw_irq_en_eq(&md_ctrl->hw_info, queue->index, MTK_RX);
+ pm_runtime_mark_last_busy(md_ctrl->dev);
+ pm_runtime_put_autosuspend(md_ctrl->dev);
+}
+
+static int t7xx_cldma_gpd_tx_collect(struct cldma_queue *queue)
+{
+ struct cldma_ctrl *md_ctrl = queue->md_ctrl;
+ unsigned int dma_len, count = 0;
+ struct cldma_request *req;
+ struct cldma_gpd *gpd;
+ unsigned long flags;
+ dma_addr_t dma_free;
+ struct sk_buff *skb;
+
+ while (!kthread_should_stop()) {
+ spin_lock_irqsave(&queue->ring_lock, flags);
+ req = queue->tr_done;
+ if (!req) {
+ spin_unlock_irqrestore(&queue->ring_lock, flags);
+ break;
+ }
+ gpd = req->gpd;
+ if ((gpd->flags & GPD_FLAGS_HWO) || !req->skb) {
+ spin_unlock_irqrestore(&queue->ring_lock, flags);
+ break;
+ }
+ queue->budget++;
+ dma_free = req->mapped_buff;
+ dma_len = le16_to_cpu(gpd->data_buff_len);
+ skb = req->skb;
+ req->skb = NULL;
+ queue->tr_done = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry);
+ spin_unlock_irqrestore(&queue->ring_lock, flags);
+
+ count++;
+ dma_unmap_single(md_ctrl->dev, dma_free, dma_len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
+
+ if (count)
+ wake_up_nr(&queue->req_wq, count);
+
+ return count;
+}
+
+static void t7xx_cldma_txq_empty_hndl(struct cldma_queue *queue)
+{
+ struct cldma_ctrl *md_ctrl = queue->md_ctrl;
+ struct cldma_request *req;
+ dma_addr_t ul_curr_addr;
+ unsigned long flags;
+ bool pending_gpd;
+
+ if (!(md_ctrl->txq_active & BIT(queue->index)))
+ return;
+
+ spin_lock_irqsave(&queue->ring_lock, flags);
+ req = list_prev_entry_circular(queue->tx_next, &queue->tr_ring->gpd_ring, entry);
+ spin_unlock_irqrestore(&queue->ring_lock, flags);
+
+ pending_gpd = (req->gpd->flags & GPD_FLAGS_HWO) && req->skb;
+
+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
+ if (pending_gpd) {
+ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
+
+ /* Check current processing TGPD, 64-bit address is in a table by Q index */
+ ul_curr_addr = ioread64(hw_info->ap_pdn_base + REG_CLDMA_UL_CURRENT_ADDRL_0 +
+ queue->index * sizeof(u64));
+ if (req->gpd_addr != ul_curr_addr) {
+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
+ dev_err(md_ctrl->dev, "CLDMA%d queue %d is not empty\n",
+ md_ctrl->hif_id, queue->index);
+ return;
+ }
+
+ t7xx_cldma_hw_resume_queue(hw_info, queue->index, MTK_TX);
+ }
+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
+}
+
+static void t7xx_cldma_tx_done(struct work_struct *work)
+{
+ struct cldma_queue *queue = container_of(work, struct cldma_queue, cldma_work);
+ struct cldma_ctrl *md_ctrl = queue->md_ctrl;
+ struct t7xx_cldma_hw *hw_info;
+ unsigned int l2_tx_int;
+ unsigned long flags;
+
+ hw_info = &md_ctrl->hw_info;
+ t7xx_cldma_gpd_tx_collect(queue);
+ l2_tx_int = t7xx_cldma_hw_int_status(hw_info, BIT(queue->index) | EQ_STA_BIT(queue->index),
+ MTK_TX);
+ if (l2_tx_int & EQ_STA_BIT(queue->index)) {
+ t7xx_cldma_hw_tx_done(hw_info, EQ_STA_BIT(queue->index));
+ t7xx_cldma_txq_empty_hndl(queue);
+ }
+
+ if (l2_tx_int & BIT(queue->index)) {
+ t7xx_cldma_hw_tx_done(hw_info, BIT(queue->index));
+ queue_work(queue->worker, &queue->cldma_work);
+ return;
+ }
+
+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
+ if (md_ctrl->txq_active & BIT(queue->index)) {
+ t7xx_cldma_clear_ip_busy(hw_info);
+ t7xx_cldma_hw_irq_en_eq(hw_info, queue->index, MTK_TX);
+ t7xx_cldma_hw_irq_en_txrx(hw_info, queue->index, MTK_TX);
+ }
+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
+
+ pm_runtime_mark_last_busy(md_ctrl->dev);
+ pm_runtime_put_autosuspend(md_ctrl->dev);
+}
+
+static void t7xx_cldma_ring_free(struct cldma_ctrl *md_ctrl,
+ struct cldma_ring *ring, enum dma_data_direction tx_rx)
+{
+ struct cldma_request *req_cur, *req_next;
+
+ list_for_each_entry_safe(req_cur, req_next, &ring->gpd_ring, entry) {
+ if (req_cur->mapped_buff && req_cur->skb) {
+ dma_unmap_single(md_ctrl->dev, req_cur->mapped_buff,
+ ring->pkt_size, tx_rx);
+ req_cur->mapped_buff = 0;
+ }
+
+ dev_kfree_skb_any(req_cur->skb);
+
+ if (req_cur->gpd)
+ dma_pool_free(md_ctrl->gpd_dmapool, req_cur->gpd, req_cur->gpd_addr);
+
+ list_del(&req_cur->entry);
+ kfree(req_cur);
+ }
+}
+
+static struct cldma_request *t7xx_alloc_rx_request(struct cldma_ctrl *md_ctrl, size_t pkt_size)
+{
+ struct cldma_request *req;
+ int val;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return NULL;
+
+ req->gpd = dma_pool_zalloc(md_ctrl->gpd_dmapool, GFP_KERNEL, &req->gpd_addr);
+ if (!req->gpd)
+ goto err_free_req;
+
+ val = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, pkt_size, GFP_KERNEL);
+ if (val)
+ goto err_free_pool;
+
+ return req;
+
+err_free_pool:
+ dma_pool_free(md_ctrl->gpd_dmapool, req->gpd, req->gpd_addr);
+
+err_free_req:
+ kfree(req);
+
+ return NULL;
+}
+
+static int t7xx_cldma_rx_ring_init(struct cldma_ctrl *md_ctrl, struct cldma_ring *ring)
+{
+ struct cldma_request *req;
+ struct cldma_gpd *gpd;
+ int i;
+
+ INIT_LIST_HEAD(&ring->gpd_ring);
+ ring->length = MAX_RX_BUDGET;
+
+ for (i = 0; i < ring->length; i++) {
+ req = t7xx_alloc_rx_request(md_ctrl, ring->pkt_size);
+ if (!req) {
+ t7xx_cldma_ring_free(md_ctrl, ring, DMA_FROM_DEVICE);
+ return -ENOMEM;
+ }
+
+ gpd = req->gpd;
+ t7xx_cldma_gpd_set_data_ptr(gpd, req->mapped_buff);
+ gpd->rx_data_allow_len = cpu_to_le16(ring->pkt_size);
+ gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO;
+ INIT_LIST_HEAD(&req->entry);
+ list_add_tail(&req->entry, &ring->gpd_ring);
+ }
+
+ /* Link previous GPD to next GPD, circular */
+ list_for_each_entry(req, &ring->gpd_ring, entry) {
+ t7xx_cldma_gpd_set_next_ptr(gpd, req->gpd_addr);
+ gpd = req->gpd;
+ }
+
+ return 0;
+}
+
+static struct cldma_request *t7xx_alloc_tx_request(struct cldma_ctrl *md_ctrl)
+{
+ struct cldma_request *req;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return NULL;
+
+ req->gpd = dma_pool_zalloc(md_ctrl->gpd_dmapool, GFP_KERNEL, &req->gpd_addr);
+ if (!req->gpd) {
+ kfree(req);
+ return NULL;
+ }
+
+ return req;
+}
+
+static int t7xx_cldma_tx_ring_init(struct cldma_ctrl *md_ctrl, struct cldma_ring *ring)
+{
+ struct cldma_request *req;
+ struct cldma_gpd *gpd;
+ int i;
+
+ INIT_LIST_HEAD(&ring->gpd_ring);
+ ring->length = MAX_TX_BUDGET;
+
+ for (i = 0; i < ring->length; i++) {
+ req = t7xx_alloc_tx_request(md_ctrl);
+ if (!req) {
+ t7xx_cldma_ring_free(md_ctrl, ring, DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
+
+ gpd = req->gpd;
+ gpd->flags = GPD_FLAGS_IOC;
+ INIT_LIST_HEAD(&req->entry);
+ list_add_tail(&req->entry, &ring->gpd_ring);
+ }
+
+ /* Link previous GPD to next GPD, circular */
+ list_for_each_entry(req, &ring->gpd_ring, entry) {
+ t7xx_cldma_gpd_set_next_ptr(gpd, req->gpd_addr);
+ gpd = req->gpd;
+ }
+
+ return 0;
+}
+
+/**
+ * t7xx_cldma_q_reset() - Reset CLDMA request pointers to their initial values.
+ * @queue: Pointer to the queue structure.
+ *
+ * Called with ring_lock (unless called during initialization phase)
+ */
+static void t7xx_cldma_q_reset(struct cldma_queue *queue)
+{
+ struct cldma_request *req;
+
+ req = list_first_entry(&queue->tr_ring->gpd_ring, struct cldma_request, entry);
+ queue->tr_done = req;
+ queue->budget = queue->tr_ring->length;
+
+ if (queue->dir == MTK_TX)
+ queue->tx_next = req;
+ else
+ queue->rx_refill = req;
+}
+
+static void t7xx_cldma_rxq_init(struct cldma_queue *queue)
+{
+ struct cldma_ctrl *md_ctrl = queue->md_ctrl;
+
+ queue->dir = MTK_RX;
+ queue->tr_ring = &md_ctrl->rx_ring[queue->index];
+ t7xx_cldma_q_reset(queue);
+}
+
+static void t7xx_cldma_txq_init(struct cldma_queue *queue)
+{
+ struct cldma_ctrl *md_ctrl = queue->md_ctrl;
+
+ queue->dir = MTK_TX;
+ queue->tr_ring = &md_ctrl->tx_ring[queue->index];
+ t7xx_cldma_q_reset(queue);
+}
+
+static void t7xx_cldma_enable_irq(struct cldma_ctrl *md_ctrl)
+{
+ t7xx_pcie_mac_set_int(md_ctrl->t7xx_dev, md_ctrl->hw_info.phy_interrupt_id);
+}
+
+static void t7xx_cldma_disable_irq(struct cldma_ctrl *md_ctrl)
+{
+ t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, md_ctrl->hw_info.phy_interrupt_id);
+}
+
+static void t7xx_cldma_irq_work_cb(struct cldma_ctrl *md_ctrl)
+{
+ unsigned long l2_tx_int_msk, l2_rx_int_msk, l2_tx_int, l2_rx_int, val;
+ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
+ int i;
+
+ /* L2 raw interrupt status */
+ l2_tx_int = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0);
+ l2_rx_int = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0);
+ l2_tx_int_msk = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TIMR0);
+ l2_rx_int_msk = ioread32(hw_info->ap_ao_base + REG_CLDMA_L2RIMR0);
+ l2_tx_int &= ~l2_tx_int_msk;
+ l2_rx_int &= ~l2_rx_int_msk;
+
+ if (l2_tx_int) {
+ if (l2_tx_int & (TQ_ERR_INT_BITMASK | TQ_ACTIVE_START_ERR_INT_BITMASK)) {
+ /* Read and clear L3 TX interrupt status */
+ val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3TISAR0);
+ iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3TISAR0);
+ val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3TISAR1);
+ iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3TISAR1);
+ }
+
+ t7xx_cldma_hw_tx_done(hw_info, l2_tx_int);
+ if (l2_tx_int & (TXRX_STATUS_BITMASK | EMPTY_STATUS_BITMASK)) {
+ for_each_set_bit(i, &l2_tx_int, L2_INT_BIT_COUNT) {
+ if (i < CLDMA_TXQ_NUM) {
+ pm_runtime_get(md_ctrl->dev);
+ t7xx_cldma_hw_irq_dis_eq(hw_info, i, MTK_TX);
+ t7xx_cldma_hw_irq_dis_txrx(hw_info, i, MTK_TX);
+ queue_work(md_ctrl->txq[i].worker,
+ &md_ctrl->txq[i].cldma_work);
+ } else {
+ t7xx_cldma_txq_empty_hndl(&md_ctrl->txq[i - CLDMA_TXQ_NUM]);
+ }
+ }
+ }
+ }
+
+ if (l2_rx_int) {
+ if (l2_rx_int & (RQ_ERR_INT_BITMASK | RQ_ACTIVE_START_ERR_INT_BITMASK)) {
+ /* Read and clear L3 RX interrupt status */
+ val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3RISAR0);
+ iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3RISAR0);
+ val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3RISAR1);
+ iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3RISAR1);
+ }
+
+ t7xx_cldma_hw_rx_done(hw_info, l2_rx_int);
+ if (l2_rx_int & (TXRX_STATUS_BITMASK | EMPTY_STATUS_BITMASK)) {
+ l2_rx_int |= l2_rx_int >> CLDMA_RXQ_NUM;
+ for_each_set_bit(i, &l2_rx_int, CLDMA_RXQ_NUM) {
+ pm_runtime_get(md_ctrl->dev);
+ t7xx_cldma_hw_irq_dis_eq(hw_info, i, MTK_RX);
+ t7xx_cldma_hw_irq_dis_txrx(hw_info, i, MTK_RX);
+ queue_work(md_ctrl->rxq[i].worker, &md_ctrl->rxq[i].cldma_work);
+ }
+ }
+ }
+}
+
+static bool t7xx_cldma_qs_are_active(struct cldma_ctrl *md_ctrl)
+{
+ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
+ unsigned int tx_active;
+ unsigned int rx_active;
+
+ if (!pci_device_is_present(to_pci_dev(md_ctrl->dev)))
+ return false;
+
+ tx_active = t7xx_cldma_hw_queue_status(hw_info, CLDMA_ALL_Q, MTK_TX);
+ rx_active = t7xx_cldma_hw_queue_status(hw_info, CLDMA_ALL_Q, MTK_RX);
+
+ return tx_active || rx_active;
+}
+
+/**
+ * t7xx_cldma_stop() - Stop CLDMA.
+ * @md_ctrl: CLDMA context structure.
+ *
+ * Stop TX and RX queues. Disable L1 and L2 interrupts.
+ * Clear status registers.
+ *
+ * Return:
+ * * 0 - Success.
+ * * -ERROR - Error code from polling cldma_queues_active.
+ */
+int t7xx_cldma_stop(struct cldma_ctrl *md_ctrl)
+{
+ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
+ bool active;
+ int i, ret;
+
+ md_ctrl->rxq_active = 0;
+ t7xx_cldma_hw_stop_all_qs(hw_info, MTK_RX);
+ md_ctrl->txq_active = 0;
+ t7xx_cldma_hw_stop_all_qs(hw_info, MTK_TX);
+ md_ctrl->txq_started = 0;
+ t7xx_cldma_disable_irq(md_ctrl);
+ t7xx_cldma_hw_stop(hw_info, MTK_RX);
+ t7xx_cldma_hw_stop(hw_info, MTK_TX);
+ t7xx_cldma_hw_tx_done(hw_info, CLDMA_L2TISAR0_ALL_INT_MASK);
+ t7xx_cldma_hw_rx_done(hw_info, CLDMA_L2RISAR0_ALL_INT_MASK);
+
+ if (md_ctrl->is_late_init) {
+ for (i = 0; i < CLDMA_TXQ_NUM; i++)
+ flush_work(&md_ctrl->txq[i].cldma_work);
+
+ for (i = 0; i < CLDMA_RXQ_NUM; i++)
+ flush_work(&md_ctrl->rxq[i].cldma_work);
+ }
+
+ ret = read_poll_timeout(t7xx_cldma_qs_are_active, active, !active, CHECK_Q_STOP_STEP_US,
+ CHECK_Q_STOP_TIMEOUT_US, true, md_ctrl);
+ if (ret)
+ dev_err(md_ctrl->dev, "Could not stop CLDMA%d queues", md_ctrl->hif_id);
+
+ return ret;
+}
+
+static void t7xx_cldma_late_release(struct cldma_ctrl *md_ctrl)
+{
+ int i;
+
+ if (!md_ctrl->is_late_init)
+ return;
+
+ for (i = 0; i < CLDMA_TXQ_NUM; i++)
+ t7xx_cldma_ring_free(md_ctrl, &md_ctrl->tx_ring[i], DMA_TO_DEVICE);
+
+ for (i = 0; i < CLDMA_RXQ_NUM; i++)
+ t7xx_cldma_ring_free(md_ctrl, &md_ctrl->rx_ring[i], DMA_FROM_DEVICE);
+
+ dma_pool_destroy(md_ctrl->gpd_dmapool);
+ md_ctrl->gpd_dmapool = NULL;
+ md_ctrl->is_late_init = false;
+}
+
+void t7xx_cldma_reset(struct cldma_ctrl *md_ctrl)
+{
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
+ md_ctrl->txq_active = 0;
+ md_ctrl->rxq_active = 0;
+ t7xx_cldma_disable_irq(md_ctrl);
+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
+
+ for (i = 0; i < CLDMA_TXQ_NUM; i++) {
+ cancel_work_sync(&md_ctrl->txq[i].cldma_work);
+
+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
+ md_cd_queue_struct_reset(&md_ctrl->txq[i], md_ctrl, MTK_TX, i);
+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
+ }
+
+ for (i = 0; i < CLDMA_RXQ_NUM; i++) {
+ cancel_work_sync(&md_ctrl->rxq[i].cldma_work);
+
+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
+ md_cd_queue_struct_reset(&md_ctrl->rxq[i], md_ctrl, MTK_RX, i);
+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
+ }
+
+ t7xx_cldma_late_release(md_ctrl);
+}
+
+/**
+ * t7xx_cldma_start() - Start CLDMA.
+ * @md_ctrl: CLDMA context structure.
+ *
+ * Set TX/RX start address.
+ * Start all RX queues and enable L2 interrupt.
+ */
+void t7xx_cldma_start(struct cldma_ctrl *md_ctrl)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
+ if (md_ctrl->is_late_init) {
+ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
+ int i;
+
+ t7xx_cldma_enable_irq(md_ctrl);
+
+ for (i = 0; i < CLDMA_TXQ_NUM; i++) {
+ if (md_ctrl->txq[i].tr_done)
+ t7xx_cldma_hw_set_start_addr(hw_info, i,
+ md_ctrl->txq[i].tr_done->gpd_addr,
+ MTK_TX);
+ }
+
+ for (i = 0; i < CLDMA_RXQ_NUM; i++) {
+ if (md_ctrl->rxq[i].tr_done)
+ t7xx_cldma_hw_set_start_addr(hw_info, i,
+ md_ctrl->rxq[i].tr_done->gpd_addr,
+ MTK_RX);
+ }
+
+ /* Enable L2 interrupt */
+ t7xx_cldma_hw_start_queue(hw_info, CLDMA_ALL_Q, MTK_RX);
+ t7xx_cldma_hw_start(hw_info);
+ md_ctrl->txq_started = 0;
+ md_ctrl->txq_active |= TXRX_STATUS_BITMASK;
+ md_ctrl->rxq_active |= TXRX_STATUS_BITMASK;
+ }
+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
+}
+
+static void t7xx_cldma_clear_txq(struct cldma_ctrl *md_ctrl, int qnum)
+{
+ struct cldma_queue *txq = &md_ctrl->txq[qnum];
+ struct cldma_request *req;
+ struct cldma_gpd *gpd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&txq->ring_lock, flags);
+ t7xx_cldma_q_reset(txq);
+ list_for_each_entry(req, &txq->tr_ring->gpd_ring, entry) {
+ gpd = req->gpd;
+ gpd->flags &= ~GPD_FLAGS_HWO;
+ t7xx_cldma_gpd_set_data_ptr(gpd, 0);
+ gpd->data_buff_len = 0;
+ dev_kfree_skb_any(req->skb);
+ req->skb = NULL;
+ }
+ spin_unlock_irqrestore(&txq->ring_lock, flags);
+}
+
+static int t7xx_cldma_clear_rxq(struct cldma_ctrl *md_ctrl, int qnum)
+{
+ struct cldma_queue *rxq = &md_ctrl->rxq[qnum];
+ struct cldma_request *req;
+ struct cldma_gpd *gpd;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&rxq->ring_lock, flags);
+ t7xx_cldma_q_reset(rxq);
+ list_for_each_entry(req, &rxq->tr_ring->gpd_ring, entry) {
+ gpd = req->gpd;
+ gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO;
+ gpd->data_buff_len = 0;
+
+ if (req->skb) {
+ req->skb->len = 0;
+ skb_reset_tail_pointer(req->skb);
+ }
+ }
+
+ list_for_each_entry(req, &rxq->tr_ring->gpd_ring, entry) {
+ if (req->skb)
+ continue;
+
+ ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, rxq->tr_ring->pkt_size, GFP_ATOMIC);
+ if (ret)
+ break;
+
+ t7xx_cldma_gpd_set_data_ptr(req->gpd, req->mapped_buff);
+ }
+ spin_unlock_irqrestore(&rxq->ring_lock, flags);
+
+ return ret;
+}
+
+void t7xx_cldma_clear_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx)
+{
+ int i;
+
+ if (tx_rx == MTK_TX) {
+ for (i = 0; i < CLDMA_TXQ_NUM; i++)
+ t7xx_cldma_clear_txq(md_ctrl, i);
+ } else {
+ for (i = 0; i < CLDMA_RXQ_NUM; i++)
+ t7xx_cldma_clear_rxq(md_ctrl, i);
+ }
+}
+
+void t7xx_cldma_stop_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx)
+{
+ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
+ unsigned long flags;
+
+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
+ t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, tx_rx);
+ t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, tx_rx);
+ if (tx_rx == MTK_RX)
+ md_ctrl->rxq_active &= ~TXRX_STATUS_BITMASK;
+ else
+ md_ctrl->txq_active &= ~TXRX_STATUS_BITMASK;
+ t7xx_cldma_hw_stop_all_qs(hw_info, tx_rx);
+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
+}
+
+static int t7xx_cldma_gpd_handle_tx_request(struct cldma_queue *queue, struct cldma_request *tx_req,
+ struct sk_buff *skb)
+{
+ struct cldma_ctrl *md_ctrl = queue->md_ctrl;
+ struct cldma_gpd *gpd = tx_req->gpd;
+ unsigned long flags;
+
+ /* Update GPD */
+ tx_req->mapped_buff = dma_map_single(md_ctrl->dev, skb->data, skb->len, DMA_TO_DEVICE);
+
+ if (dma_mapping_error(md_ctrl->dev, tx_req->mapped_buff)) {
+ dev_err(md_ctrl->dev, "DMA mapping failed\n");
+ return -ENOMEM;
+ }
+
+ t7xx_cldma_gpd_set_data_ptr(gpd, tx_req->mapped_buff);
+ gpd->data_buff_len = cpu_to_le16(skb->len);
+
+ /* This lock must cover TGPD setting, as even without a resume operation,
+ * CLDMA can send next HWO=1 if last TGPD just finished.
+ */
+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
+ if (md_ctrl->txq_active & BIT(queue->index))
+ gpd->flags |= GPD_FLAGS_HWO;
+
+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
+
+ tx_req->skb = skb;
+ return 0;
+}
+
+/* Called with cldma_lock */
+static void t7xx_cldma_hw_start_send(struct cldma_ctrl *md_ctrl, int qno,
+ struct cldma_request *prev_req)
+{
+ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
+
+ /* Check whether the device was powered off (CLDMA start address is not set) */
+ if (!t7xx_cldma_tx_addr_is_set(hw_info, qno)) {
+ t7xx_cldma_hw_init(hw_info);
+ t7xx_cldma_hw_set_start_addr(hw_info, qno, prev_req->gpd_addr, MTK_TX);
+ md_ctrl->txq_started &= ~BIT(qno);
+ }
+
+ if (!t7xx_cldma_hw_queue_status(hw_info, qno, MTK_TX)) {
+ if (md_ctrl->txq_started & BIT(qno))
+ t7xx_cldma_hw_resume_queue(hw_info, qno, MTK_TX);
+ else
+ t7xx_cldma_hw_start_queue(hw_info, qno, MTK_TX);
+
+ md_ctrl->txq_started |= BIT(qno);
+ }
+}
+
+/**
+ * t7xx_cldma_set_recv_skb() - Set the callback to handle RX packets.
+ * @md_ctrl: CLDMA context structure.
+ * @recv_skb: Receiving skb callback.
+ */
+void t7xx_cldma_set_recv_skb(struct cldma_ctrl *md_ctrl,
+ int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb))
+{
+ md_ctrl->recv_skb = recv_skb;
+}
+
+/**
+ * t7xx_cldma_send_skb() - Send control data to modem.
+ * @md_ctrl: CLDMA context structure.
+ * @qno: Queue number.
+ * @skb: Socket buffer.
+ *
+ * Return:
+ * * 0 - Success.
+ * * -ENOMEM - Allocation failure.
+ * * -EINVAL - Invalid queue request.
+ * * -EIO - Queue is not active.
+ * * -ETIMEDOUT - Timeout waiting for the device to wake up.
+ */
+int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb)
+{
+ struct cldma_request *tx_req;
+ struct cldma_queue *queue;
+ unsigned long flags;
+ int ret;
+
+ if (qno >= CLDMA_TXQ_NUM)
+ return -EINVAL;
+
+ ret = pm_runtime_resume_and_get(md_ctrl->dev);
+ if (ret < 0 && ret != -EACCES)
+ return ret;
+
+ t7xx_pci_disable_sleep(md_ctrl->t7xx_dev);
+ queue = &md_ctrl->txq[qno];
+
+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
+ if (!(md_ctrl->txq_active & BIT(qno))) {
+ ret = -EIO;
+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
+ goto allow_sleep;
+ }
+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
+
+ do {
+ spin_lock_irqsave(&queue->ring_lock, flags);
+ tx_req = queue->tx_next;
+ if (queue->budget > 0 && !tx_req->skb) {
+ struct list_head *gpd_ring = &queue->tr_ring->gpd_ring;
+
+ queue->budget--;
+ t7xx_cldma_gpd_handle_tx_request(queue, tx_req, skb);
+ queue->tx_next = list_next_entry_circular(tx_req, gpd_ring, entry);
+ spin_unlock_irqrestore(&queue->ring_lock, flags);
+
+ if (!t7xx_pci_sleep_disable_complete(md_ctrl->t7xx_dev)) {
+ ret = -ETIMEDOUT;
+ break;
+ }
+
+ /* Protect the access to the modem for queues operations (resume/start)
+ * which access shared locations by all the queues.
+ * cldma_lock is independent of ring_lock which is per queue.
+ */
+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
+ t7xx_cldma_hw_start_send(md_ctrl, qno, tx_req);
+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
+
+ break;
+ }
+ spin_unlock_irqrestore(&queue->ring_lock, flags);
+
+ if (!t7xx_pci_sleep_disable_complete(md_ctrl->t7xx_dev)) {
+ ret = -ETIMEDOUT;
+ break;
+ }
+
+ if (!t7xx_cldma_hw_queue_status(&md_ctrl->hw_info, qno, MTK_TX)) {
+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
+ t7xx_cldma_hw_resume_queue(&md_ctrl->hw_info, qno, MTK_TX);
+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
+ }
+
+ ret = wait_event_interruptible_exclusive(queue->req_wq, queue->budget > 0);
+ } while (!ret);
+
+allow_sleep:
+ t7xx_pci_enable_sleep(md_ctrl->t7xx_dev);
+ pm_runtime_mark_last_busy(md_ctrl->dev);
+ pm_runtime_put_autosuspend(md_ctrl->dev);
+ return ret;
+}
+
+static int t7xx_cldma_late_init(struct cldma_ctrl *md_ctrl)
+{
+ char dma_pool_name[32];
+ int i, j, ret;
+
+ if (md_ctrl->is_late_init) {
+ dev_err(md_ctrl->dev, "CLDMA late init was already done\n");
+ return -EALREADY;
+ }
+
+ snprintf(dma_pool_name, sizeof(dma_pool_name), "cldma_req_hif%d", md_ctrl->hif_id);
+
+ md_ctrl->gpd_dmapool = dma_pool_create(dma_pool_name, md_ctrl->dev,
+ sizeof(struct cldma_gpd), GPD_DMAPOOL_ALIGN, 0);
+ if (!md_ctrl->gpd_dmapool) {
+ dev_err(md_ctrl->dev, "DMA pool alloc fail\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < CLDMA_TXQ_NUM; i++) {
+ ret = t7xx_cldma_tx_ring_init(md_ctrl, &md_ctrl->tx_ring[i]);
+ if (ret) {
+ dev_err(md_ctrl->dev, "control TX ring init fail\n");
+ goto err_free_tx_ring;
+ }
+
+ md_ctrl->tx_ring[i].pkt_size = CLDMA_MTU;
+ }
+
+ for (j = 0; j < CLDMA_RXQ_NUM; j++) {
+ md_ctrl->rx_ring[j].pkt_size = CLDMA_MTU;
+
+ if (j == CLDMA_RXQ_NUM - 1)
+ md_ctrl->rx_ring[j].pkt_size = CLDMA_JUMBO_BUFF_SZ;
+
+ ret = t7xx_cldma_rx_ring_init(md_ctrl, &md_ctrl->rx_ring[j]);
+ if (ret) {
+ dev_err(md_ctrl->dev, "Control RX ring init fail\n");
+ goto err_free_rx_ring;
+ }
+ }
+
+ for (i = 0; i < CLDMA_TXQ_NUM; i++)
+ t7xx_cldma_txq_init(&md_ctrl->txq[i]);
+
+ for (j = 0; j < CLDMA_RXQ_NUM; j++)
+ t7xx_cldma_rxq_init(&md_ctrl->rxq[j]);
+
+ md_ctrl->is_late_init = true;
+ return 0;
+
+err_free_rx_ring:
+ while (j--)
+ t7xx_cldma_ring_free(md_ctrl, &md_ctrl->rx_ring[j], DMA_FROM_DEVICE);
+
+err_free_tx_ring:
+ while (i--)
+ t7xx_cldma_ring_free(md_ctrl, &md_ctrl->tx_ring[i], DMA_TO_DEVICE);
+
+ return ret;
+}
+
+static void __iomem *t7xx_pcie_addr_transfer(void __iomem *addr, u32 addr_trs1, u32 phy_addr)
+{
+ return addr + phy_addr - addr_trs1;
+}
+
+static void t7xx_hw_info_init(struct cldma_ctrl *md_ctrl)
+{
+ struct t7xx_addr_base *pbase = &md_ctrl->t7xx_dev->base_addr;
+ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
+ u32 phy_ao_base, phy_pd_base;
+
+ hw_info->hw_mode = MODE_BIT_64;
+
+ if (md_ctrl->hif_id == CLDMA_ID_MD) {
+ phy_ao_base = CLDMA1_AO_BASE;
+ phy_pd_base = CLDMA1_PD_BASE;
+ hw_info->phy_interrupt_id = CLDMA1_INT;
+ } else {
+ phy_ao_base = CLDMA0_AO_BASE;
+ phy_pd_base = CLDMA0_PD_BASE;
+ hw_info->phy_interrupt_id = CLDMA0_INT;
+ }
+
+ hw_info->ap_ao_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base,
+ pbase->pcie_dev_reg_trsl_addr, phy_ao_base);
+ hw_info->ap_pdn_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base,
+ pbase->pcie_dev_reg_trsl_addr, phy_pd_base);
+}
+
+static int t7xx_cldma_default_recv_skb(struct cldma_queue *queue, struct sk_buff *skb)
+{
+ dev_kfree_skb_any(skb);
+ return 0;
+}
+
+int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev)
+{
+ struct device *dev = &t7xx_dev->pdev->dev;
+ struct cldma_ctrl *md_ctrl;
+
+ md_ctrl = devm_kzalloc(dev, sizeof(*md_ctrl), GFP_KERNEL);
+ if (!md_ctrl)
+ return -ENOMEM;
+
+ md_ctrl->t7xx_dev = t7xx_dev;
+ md_ctrl->dev = dev;
+ md_ctrl->hif_id = hif_id;
+ md_ctrl->recv_skb = t7xx_cldma_default_recv_skb;
+ t7xx_hw_info_init(md_ctrl);
+ t7xx_dev->md->md_ctrl[hif_id] = md_ctrl;
+ return 0;
+}
+
+static void t7xx_cldma_resume_early(struct t7xx_pci_dev *t7xx_dev, void *entity_param)
+{
+ struct cldma_ctrl *md_ctrl = entity_param;
+ struct t7xx_cldma_hw *hw_info;
+ unsigned long flags;
+ int qno_t;
+
+ hw_info = &md_ctrl->hw_info;
+
+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
+ t7xx_cldma_hw_restore(hw_info);
+ for (qno_t = 0; qno_t < CLDMA_TXQ_NUM; qno_t++) {
+ t7xx_cldma_hw_set_start_addr(hw_info, qno_t, md_ctrl->txq[qno_t].tx_next->gpd_addr,
+ MTK_TX);
+ t7xx_cldma_hw_set_start_addr(hw_info, qno_t, md_ctrl->rxq[qno_t].tr_done->gpd_addr,
+ MTK_RX);
+ }
+ t7xx_cldma_enable_irq(md_ctrl);
+ t7xx_cldma_hw_start_queue(hw_info, CLDMA_ALL_Q, MTK_RX);
+ md_ctrl->rxq_active |= TXRX_STATUS_BITMASK;
+ t7xx_cldma_hw_irq_en_eq(hw_info, CLDMA_ALL_Q, MTK_RX);
+ t7xx_cldma_hw_irq_en_txrx(hw_info, CLDMA_ALL_Q, MTK_RX);
+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
+}
+
+static int t7xx_cldma_resume(struct t7xx_pci_dev *t7xx_dev, void *entity_param)
+{
+ struct cldma_ctrl *md_ctrl = entity_param;
+ unsigned long flags;
+
+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
+ md_ctrl->txq_active |= TXRX_STATUS_BITMASK;
+ t7xx_cldma_hw_irq_en_txrx(&md_ctrl->hw_info, CLDMA_ALL_Q, MTK_TX);
+ t7xx_cldma_hw_irq_en_eq(&md_ctrl->hw_info, CLDMA_ALL_Q, MTK_TX);
+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
+
+ if (md_ctrl->hif_id == CLDMA_ID_MD)
+ t7xx_mhccif_mask_clr(t7xx_dev, D2H_SW_INT_MASK);
+
+ return 0;
+}
+
+static void t7xx_cldma_suspend_late(struct t7xx_pci_dev *t7xx_dev, void *entity_param)
+{
+ struct cldma_ctrl *md_ctrl = entity_param;
+ struct t7xx_cldma_hw *hw_info;
+ unsigned long flags;
+
+ hw_info = &md_ctrl->hw_info;
+
+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
+ t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, MTK_RX);
+ t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, MTK_RX);
+ md_ctrl->rxq_active &= ~TXRX_STATUS_BITMASK;
+ t7xx_cldma_hw_stop_all_qs(hw_info, MTK_RX);
+ t7xx_cldma_clear_ip_busy(hw_info);
+ t7xx_cldma_disable_irq(md_ctrl);
+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
+}
+
+static int t7xx_cldma_suspend(struct t7xx_pci_dev *t7xx_dev, void *entity_param)
+{
+ struct cldma_ctrl *md_ctrl = entity_param;
+ struct t7xx_cldma_hw *hw_info;
+ unsigned long flags;
+
+ if (md_ctrl->hif_id == CLDMA_ID_MD)
+ t7xx_mhccif_mask_set(t7xx_dev, D2H_SW_INT_MASK);
+
+ hw_info = &md_ctrl->hw_info;
+
+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
+ t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, MTK_TX);
+ t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, MTK_TX);
+ md_ctrl->txq_active &= ~TXRX_STATUS_BITMASK;
+ t7xx_cldma_hw_stop_all_qs(hw_info, MTK_TX);
+ md_ctrl->txq_started = 0;
+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
+
+ return 0;
+}
+
+static int t7xx_cldma_pm_init(struct cldma_ctrl *md_ctrl)
+{
+ md_ctrl->pm_entity = kzalloc(sizeof(*md_ctrl->pm_entity), GFP_KERNEL);
+ if (!md_ctrl->pm_entity)
+ return -ENOMEM;
+
+ md_ctrl->pm_entity->entity_param = md_ctrl;
+
+ if (md_ctrl->hif_id == CLDMA_ID_MD)
+ md_ctrl->pm_entity->id = PM_ENTITY_ID_CTRL1;
+ else
+ md_ctrl->pm_entity->id = PM_ENTITY_ID_CTRL2;
+
+ md_ctrl->pm_entity->suspend = t7xx_cldma_suspend;
+ md_ctrl->pm_entity->suspend_late = t7xx_cldma_suspend_late;
+ md_ctrl->pm_entity->resume = t7xx_cldma_resume;
+ md_ctrl->pm_entity->resume_early = t7xx_cldma_resume_early;
+
+ return t7xx_pci_pm_entity_register(md_ctrl->t7xx_dev, md_ctrl->pm_entity);
+}
+
+static int t7xx_cldma_pm_uninit(struct cldma_ctrl *md_ctrl)
+{
+ if (!md_ctrl->pm_entity)
+ return -EINVAL;
+
+ t7xx_pci_pm_entity_unregister(md_ctrl->t7xx_dev, md_ctrl->pm_entity);
+ kfree(md_ctrl->pm_entity);
+ md_ctrl->pm_entity = NULL;
+ return 0;
+}
+
+void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl)
+{
+ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
+ unsigned long flags;
+
+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
+ t7xx_cldma_hw_stop(hw_info, MTK_TX);
+ t7xx_cldma_hw_stop(hw_info, MTK_RX);
+ t7xx_cldma_hw_rx_done(hw_info, EMPTY_STATUS_BITMASK | TXRX_STATUS_BITMASK);
+ t7xx_cldma_hw_tx_done(hw_info, EMPTY_STATUS_BITMASK | TXRX_STATUS_BITMASK);
+ t7xx_cldma_hw_init(hw_info);
+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
+}
+
+static irqreturn_t t7xx_cldma_isr_handler(int irq, void *data)
+{
+ struct cldma_ctrl *md_ctrl = data;
+ u32 interrupt;
+
+ interrupt = md_ctrl->hw_info.phy_interrupt_id;
+ t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, interrupt);
+ t7xx_cldma_irq_work_cb(md_ctrl);
+ t7xx_pcie_mac_clear_int_status(md_ctrl->t7xx_dev, interrupt);
+ t7xx_pcie_mac_set_int(md_ctrl->t7xx_dev, interrupt);
+ return IRQ_HANDLED;
+}
+
+static void t7xx_cldma_destroy_wqs(struct cldma_ctrl *md_ctrl)
+{
+ int i;
+
+ for (i = 0; i < CLDMA_TXQ_NUM; i++) {
+ if (md_ctrl->txq[i].worker) {
+ destroy_workqueue(md_ctrl->txq[i].worker);
+ md_ctrl->txq[i].worker = NULL;
+ }
+ }
+
+ for (i = 0; i < CLDMA_RXQ_NUM; i++) {
+ if (md_ctrl->rxq[i].worker) {
+ destroy_workqueue(md_ctrl->rxq[i].worker);
+ md_ctrl->rxq[i].worker = NULL;
+ }
+ }
+}
+
+/**
+ * t7xx_cldma_init() - Initialize CLDMA.
+ * @md_ctrl: CLDMA context structure.
+ *
+ * Allocate and initialize device power management entity.
+ * Initialize HIF TX/RX queue structure.
+ * Register CLDMA callback ISR with PCIe driver.
+ *
+ * Return:
+ * * 0 - Success.
+ * * -ERROR - Error code from failure sub-initializations.
+ */
+int t7xx_cldma_init(struct cldma_ctrl *md_ctrl)
+{
+ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
+ int ret, i;
+
+ md_ctrl->txq_active = 0;
+ md_ctrl->rxq_active = 0;
+ md_ctrl->is_late_init = false;
+
+ ret = t7xx_cldma_pm_init(md_ctrl);
+ if (ret)
+ return ret;
+
+ spin_lock_init(&md_ctrl->cldma_lock);
+
+ for (i = 0; i < CLDMA_TXQ_NUM; i++) {
+ md_cd_queue_struct_init(&md_ctrl->txq[i], md_ctrl, MTK_TX, i);
+ md_ctrl->txq[i].worker =
+ alloc_ordered_workqueue("md_hif%d_tx%d_worker",
+ WQ_MEM_RECLAIM | (i ? 0 : WQ_HIGHPRI),
+ md_ctrl->hif_id, i);
+ if (!md_ctrl->txq[i].worker)
+ goto err_workqueue;
+
+ INIT_WORK(&md_ctrl->txq[i].cldma_work, t7xx_cldma_tx_done);
+ }
+
+ for (i = 0; i < CLDMA_RXQ_NUM; i++) {
+ md_cd_queue_struct_init(&md_ctrl->rxq[i], md_ctrl, MTK_RX, i);
+ INIT_WORK(&md_ctrl->rxq[i].cldma_work, t7xx_cldma_rx_done);
+
+ md_ctrl->rxq[i].worker =
+ alloc_ordered_workqueue("md_hif%d_rx%d_worker",
+ WQ_MEM_RECLAIM,
+ md_ctrl->hif_id, i);
+ if (!md_ctrl->rxq[i].worker)
+ goto err_workqueue;
+ }
+
+ t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, hw_info->phy_interrupt_id);
+ md_ctrl->t7xx_dev->intr_handler[hw_info->phy_interrupt_id] = t7xx_cldma_isr_handler;
+ md_ctrl->t7xx_dev->intr_thread[hw_info->phy_interrupt_id] = NULL;
+ md_ctrl->t7xx_dev->callback_param[hw_info->phy_interrupt_id] = md_ctrl;
+ t7xx_pcie_mac_clear_int_status(md_ctrl->t7xx_dev, hw_info->phy_interrupt_id);
+ return 0;
+
+err_workqueue:
+ t7xx_cldma_destroy_wqs(md_ctrl);
+ t7xx_cldma_pm_uninit(md_ctrl);
+ return -ENOMEM;
+}
+
+void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl)
+{
+ t7xx_cldma_late_release(md_ctrl);
+ t7xx_cldma_late_init(md_ctrl);
+}
+
+void t7xx_cldma_exit(struct cldma_ctrl *md_ctrl)
+{
+ t7xx_cldma_stop(md_ctrl);
+ t7xx_cldma_late_release(md_ctrl);
+ t7xx_cldma_destroy_wqs(md_ctrl);
+ t7xx_cldma_pm_uninit(md_ctrl);
+}
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.h b/drivers/net/wwan/t7xx/t7xx_hif_cldma.h
new file mode 100644
index 0000000000..4410bac699
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ *
+ * Contributors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ */
+
+#ifndef __T7XX_HIF_CLDMA_H__
+#define __T7XX_HIF_CLDMA_H__
+
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/pci.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <linux/types.h>
+
+#include "t7xx_cldma.h"
+#include "t7xx_pci.h"
+
+/**
+ * enum cldma_id - Identifiers for CLDMA HW units.
+ * @CLDMA_ID_MD: Modem control channel.
+ * @CLDMA_ID_AP: Application Processor control channel.
+ * @CLDMA_NUM: Number of CLDMA HW units available.
+ */
+enum cldma_id {
+ CLDMA_ID_MD,
+ CLDMA_ID_AP,
+ CLDMA_NUM
+};
+
+struct cldma_gpd {
+ u8 flags;
+ u8 not_used1;
+ __le16 rx_data_allow_len;
+ __le32 next_gpd_ptr_h;
+ __le32 next_gpd_ptr_l;
+ __le32 data_buff_bd_ptr_h;
+ __le32 data_buff_bd_ptr_l;
+ __le16 data_buff_len;
+ __le16 not_used2;
+};
+
+struct cldma_request {
+ struct cldma_gpd *gpd; /* Virtual address for CPU */
+ dma_addr_t gpd_addr; /* Physical address for DMA */
+ struct sk_buff *skb;
+ dma_addr_t mapped_buff;
+ struct list_head entry;
+};
+
+struct cldma_ring {
+ struct list_head gpd_ring; /* Ring of struct cldma_request */
+ unsigned int length; /* Number of struct cldma_request */
+ int pkt_size;
+};
+
+struct cldma_queue {
+ struct cldma_ctrl *md_ctrl;
+ enum mtk_txrx dir;
+ unsigned int index;
+ struct cldma_ring *tr_ring;
+ struct cldma_request *tr_done;
+ struct cldma_request *rx_refill;
+ struct cldma_request *tx_next;
+ int budget; /* Same as ring buffer size by default */
+ spinlock_t ring_lock;
+ wait_queue_head_t req_wq; /* Only for TX */
+ struct workqueue_struct *worker;
+ struct work_struct cldma_work;
+};
+
+struct cldma_ctrl {
+ enum cldma_id hif_id;
+ struct device *dev;
+ struct t7xx_pci_dev *t7xx_dev;
+ struct cldma_queue txq[CLDMA_TXQ_NUM];
+ struct cldma_queue rxq[CLDMA_RXQ_NUM];
+ unsigned short txq_active;
+ unsigned short rxq_active;
+ unsigned short txq_started;
+ spinlock_t cldma_lock; /* Protects CLDMA structure */
+ /* Assumes T/R GPD/BD/SPD have the same size */
+ struct dma_pool *gpd_dmapool;
+ struct cldma_ring tx_ring[CLDMA_TXQ_NUM];
+ struct cldma_ring rx_ring[CLDMA_RXQ_NUM];
+ struct md_pm_entity *pm_entity;
+ struct t7xx_cldma_hw hw_info;
+ bool is_late_init;
+ int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb);
+};
+
+#define GPD_FLAGS_HWO BIT(0)
+#define GPD_FLAGS_IOC BIT(7)
+#define GPD_DMAPOOL_ALIGN 16
+
+#define CLDMA_MTU 3584 /* 3.5kB */
+
+int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev);
+void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl);
+int t7xx_cldma_init(struct cldma_ctrl *md_ctrl);
+void t7xx_cldma_exit(struct cldma_ctrl *md_ctrl);
+void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl);
+void t7xx_cldma_start(struct cldma_ctrl *md_ctrl);
+int t7xx_cldma_stop(struct cldma_ctrl *md_ctrl);
+void t7xx_cldma_reset(struct cldma_ctrl *md_ctrl);
+void t7xx_cldma_set_recv_skb(struct cldma_ctrl *md_ctrl,
+ int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb));
+int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb);
+void t7xx_cldma_stop_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx);
+void t7xx_cldma_clear_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx);
+
+#endif /* __T7XX_HIF_CLDMA_H__ */
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c
new file mode 100644
index 0000000000..7ff33c1d6a
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c
@@ -0,0 +1,583 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ *
+ * Contributors:
+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ */
+
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <linux/irqreturn.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/string.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#include "t7xx_dpmaif.h"
+#include "t7xx_hif_dpmaif.h"
+#include "t7xx_hif_dpmaif_rx.h"
+#include "t7xx_hif_dpmaif_tx.h"
+#include "t7xx_pci.h"
+#include "t7xx_pcie_mac.h"
+#include "t7xx_state_monitor.h"
+
+unsigned int t7xx_ring_buf_get_next_wr_idx(unsigned int buf_len, unsigned int buf_idx)
+{
+ buf_idx++;
+
+ return buf_idx < buf_len ? buf_idx : 0;
+}
+
+unsigned int t7xx_ring_buf_rd_wr_count(unsigned int total_cnt, unsigned int rd_idx,
+ unsigned int wr_idx, enum dpmaif_rdwr rd_wr)
+{
+ int pkt_cnt;
+
+ if (rd_wr == DPMAIF_READ)
+ pkt_cnt = wr_idx - rd_idx;
+ else
+ pkt_cnt = rd_idx - wr_idx - 1;
+
+ if (pkt_cnt < 0)
+ pkt_cnt += total_cnt;
+
+ return (unsigned int)pkt_cnt;
+}
+
+static void t7xx_dpmaif_enable_irq(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ struct dpmaif_isr_para *isr_para;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dpmaif_ctrl->isr_para); i++) {
+ isr_para = &dpmaif_ctrl->isr_para[i];
+ t7xx_pcie_mac_set_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int);
+ }
+}
+
+static void t7xx_dpmaif_disable_irq(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ struct dpmaif_isr_para *isr_para;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dpmaif_ctrl->isr_para); i++) {
+ isr_para = &dpmaif_ctrl->isr_para[i];
+ t7xx_pcie_mac_clear_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int);
+ }
+}
+
+static void t7xx_dpmaif_irq_cb(struct dpmaif_isr_para *isr_para)
+{
+ struct dpmaif_ctrl *dpmaif_ctrl = isr_para->dpmaif_ctrl;
+ struct dpmaif_hw_intr_st_para intr_status;
+ struct device *dev = dpmaif_ctrl->dev;
+ struct dpmaif_hw_info *hw_info;
+ int i;
+
+ memset(&intr_status, 0, sizeof(intr_status));
+ hw_info = &dpmaif_ctrl->hw_info;
+
+ if (t7xx_dpmaif_hw_get_intr_cnt(hw_info, &intr_status, isr_para->dlq_id) < 0) {
+ dev_err(dev, "Failed to get HW interrupt count\n");
+ return;
+ }
+
+ t7xx_pcie_mac_clear_int_status(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int);
+
+ for (i = 0; i < intr_status.intr_cnt; i++) {
+ switch (intr_status.intr_types[i]) {
+ case DPF_INTR_UL_DONE:
+ t7xx_dpmaif_irq_tx_done(dpmaif_ctrl, intr_status.intr_queues[i]);
+ break;
+
+ case DPF_INTR_UL_DRB_EMPTY:
+ case DPF_INTR_UL_MD_NOTREADY:
+ case DPF_INTR_UL_MD_PWR_NOTREADY:
+ /* No need to log an error for these */
+ break;
+
+ case DPF_INTR_DL_BATCNT_LEN_ERR:
+ dev_err_ratelimited(dev, "DL interrupt: packet BAT count length error\n");
+ t7xx_dpmaif_dl_unmask_batcnt_len_err_intr(hw_info);
+ break;
+
+ case DPF_INTR_DL_PITCNT_LEN_ERR:
+ dev_err_ratelimited(dev, "DL interrupt: PIT count length error\n");
+ t7xx_dpmaif_dl_unmask_pitcnt_len_err_intr(hw_info);
+ break;
+
+ case DPF_INTR_DL_Q0_PITCNT_LEN_ERR:
+ dev_err_ratelimited(dev, "DL interrupt: DLQ0 PIT count length error\n");
+ t7xx_dpmaif_dlq_unmask_pitcnt_len_err_intr(hw_info, DPF_RX_QNO_DFT);
+ break;
+
+ case DPF_INTR_DL_Q1_PITCNT_LEN_ERR:
+ dev_err_ratelimited(dev, "DL interrupt: DLQ1 PIT count length error\n");
+ t7xx_dpmaif_dlq_unmask_pitcnt_len_err_intr(hw_info, DPF_RX_QNO1);
+ break;
+
+ case DPF_INTR_DL_DONE:
+ case DPF_INTR_DL_Q0_DONE:
+ case DPF_INTR_DL_Q1_DONE:
+ t7xx_dpmaif_irq_rx_done(dpmaif_ctrl, intr_status.intr_queues[i]);
+ break;
+
+ default:
+ dev_err_ratelimited(dev, "DL interrupt error: unknown type : %d\n",
+ intr_status.intr_types[i]);
+ }
+ }
+}
+
+static irqreturn_t t7xx_dpmaif_isr_handler(int irq, void *data)
+{
+ struct dpmaif_isr_para *isr_para = data;
+ struct dpmaif_ctrl *dpmaif_ctrl;
+
+ dpmaif_ctrl = isr_para->dpmaif_ctrl;
+ if (dpmaif_ctrl->state != DPMAIF_STATE_PWRON) {
+ dev_err(dpmaif_ctrl->dev, "Interrupt received before initializing DPMAIF\n");
+ return IRQ_HANDLED;
+ }
+
+ t7xx_pcie_mac_clear_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t t7xx_dpmaif_isr_thread(int irq, void *data)
+{
+ struct dpmaif_isr_para *isr_para = data;
+ struct dpmaif_ctrl *dpmaif_ctrl = isr_para->dpmaif_ctrl;
+
+ t7xx_dpmaif_irq_cb(isr_para);
+ t7xx_pcie_mac_set_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int);
+ return IRQ_HANDLED;
+}
+
+static void t7xx_dpmaif_isr_parameter_init(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ struct dpmaif_isr_para *isr_para;
+ unsigned char i;
+
+ dpmaif_ctrl->rxq_int_mapping[DPF_RX_QNO0] = DPMAIF_INT;
+ dpmaif_ctrl->rxq_int_mapping[DPF_RX_QNO1] = DPMAIF2_INT;
+
+ for (i = 0; i < DPMAIF_RXQ_NUM; i++) {
+ isr_para = &dpmaif_ctrl->isr_para[i];
+ isr_para->dpmaif_ctrl = dpmaif_ctrl;
+ isr_para->dlq_id = i;
+ isr_para->pcie_int = dpmaif_ctrl->rxq_int_mapping[i];
+ }
+}
+
+static void t7xx_dpmaif_register_pcie_irq(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ struct t7xx_pci_dev *t7xx_dev = dpmaif_ctrl->t7xx_dev;
+ struct dpmaif_isr_para *isr_para;
+ enum t7xx_int int_type;
+ int i;
+
+ t7xx_dpmaif_isr_parameter_init(dpmaif_ctrl);
+
+ for (i = 0; i < DPMAIF_RXQ_NUM; i++) {
+ isr_para = &dpmaif_ctrl->isr_para[i];
+ int_type = isr_para->pcie_int;
+ t7xx_pcie_mac_clear_int(t7xx_dev, int_type);
+
+ t7xx_dev->intr_handler[int_type] = t7xx_dpmaif_isr_handler;
+ t7xx_dev->intr_thread[int_type] = t7xx_dpmaif_isr_thread;
+ t7xx_dev->callback_param[int_type] = isr_para;
+
+ t7xx_pcie_mac_clear_int_status(t7xx_dev, int_type);
+ t7xx_pcie_mac_set_int(t7xx_dev, int_type);
+ }
+}
+
+static int t7xx_dpmaif_rxtx_sw_allocs(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ struct dpmaif_rx_queue *rx_q;
+ struct dpmaif_tx_queue *tx_q;
+ int ret, rx_idx, tx_idx, i;
+
+ ret = t7xx_dpmaif_bat_alloc(dpmaif_ctrl, &dpmaif_ctrl->bat_req, BAT_TYPE_NORMAL);
+ if (ret) {
+ dev_err(dpmaif_ctrl->dev, "Failed to allocate normal BAT table: %d\n", ret);
+ return ret;
+ }
+
+ ret = t7xx_dpmaif_bat_alloc(dpmaif_ctrl, &dpmaif_ctrl->bat_frag, BAT_TYPE_FRAG);
+ if (ret) {
+ dev_err(dpmaif_ctrl->dev, "Failed to allocate frag BAT table: %d\n", ret);
+ goto err_free_normal_bat;
+ }
+
+ for (rx_idx = 0; rx_idx < DPMAIF_RXQ_NUM; rx_idx++) {
+ rx_q = &dpmaif_ctrl->rxq[rx_idx];
+ rx_q->index = rx_idx;
+ rx_q->dpmaif_ctrl = dpmaif_ctrl;
+ ret = t7xx_dpmaif_rxq_init(rx_q);
+ if (ret)
+ goto err_free_rxq;
+ }
+
+ for (tx_idx = 0; tx_idx < DPMAIF_TXQ_NUM; tx_idx++) {
+ tx_q = &dpmaif_ctrl->txq[tx_idx];
+ tx_q->index = tx_idx;
+ tx_q->dpmaif_ctrl = dpmaif_ctrl;
+ ret = t7xx_dpmaif_txq_init(tx_q);
+ if (ret)
+ goto err_free_txq;
+ }
+
+ ret = t7xx_dpmaif_tx_thread_init(dpmaif_ctrl);
+ if (ret) {
+ dev_err(dpmaif_ctrl->dev, "Failed to start TX thread\n");
+ goto err_free_txq;
+ }
+
+ ret = t7xx_dpmaif_bat_rel_wq_alloc(dpmaif_ctrl);
+ if (ret)
+ goto err_thread_rel;
+
+ return 0;
+
+err_thread_rel:
+ t7xx_dpmaif_tx_thread_rel(dpmaif_ctrl);
+
+err_free_txq:
+ for (i = 0; i < tx_idx; i++) {
+ tx_q = &dpmaif_ctrl->txq[i];
+ t7xx_dpmaif_txq_free(tx_q);
+ }
+
+err_free_rxq:
+ for (i = 0; i < rx_idx; i++) {
+ rx_q = &dpmaif_ctrl->rxq[i];
+ t7xx_dpmaif_rxq_free(rx_q);
+ }
+
+ t7xx_dpmaif_bat_free(dpmaif_ctrl, &dpmaif_ctrl->bat_frag);
+
+err_free_normal_bat:
+ t7xx_dpmaif_bat_free(dpmaif_ctrl, &dpmaif_ctrl->bat_req);
+
+ return ret;
+}
+
+static void t7xx_dpmaif_sw_release(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ struct dpmaif_rx_queue *rx_q;
+ struct dpmaif_tx_queue *tx_q;
+ int i;
+
+ t7xx_dpmaif_tx_thread_rel(dpmaif_ctrl);
+ t7xx_dpmaif_bat_wq_rel(dpmaif_ctrl);
+
+ for (i = 0; i < DPMAIF_TXQ_NUM; i++) {
+ tx_q = &dpmaif_ctrl->txq[i];
+ t7xx_dpmaif_txq_free(tx_q);
+ }
+
+ for (i = 0; i < DPMAIF_RXQ_NUM; i++) {
+ rx_q = &dpmaif_ctrl->rxq[i];
+ t7xx_dpmaif_rxq_free(rx_q);
+ }
+}
+
+static int t7xx_dpmaif_start(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ struct dpmaif_hw_info *hw_info = &dpmaif_ctrl->hw_info;
+ struct dpmaif_hw_params hw_init_para;
+ struct dpmaif_rx_queue *rxq;
+ struct dpmaif_tx_queue *txq;
+ unsigned int buf_cnt;
+ int i, ret = 0;
+
+ if (dpmaif_ctrl->state == DPMAIF_STATE_PWRON)
+ return -EFAULT;
+
+ memset(&hw_init_para, 0, sizeof(hw_init_para));
+
+ for (i = 0; i < DPMAIF_RXQ_NUM; i++) {
+ rxq = &dpmaif_ctrl->rxq[i];
+ rxq->que_started = true;
+ rxq->index = i;
+ rxq->budget = rxq->bat_req->bat_size_cnt - 1;
+
+ hw_init_para.pkt_bat_base_addr[i] = rxq->bat_req->bat_bus_addr;
+ hw_init_para.pkt_bat_size_cnt[i] = rxq->bat_req->bat_size_cnt;
+ hw_init_para.pit_base_addr[i] = rxq->pit_bus_addr;
+ hw_init_para.pit_size_cnt[i] = rxq->pit_size_cnt;
+ hw_init_para.frg_bat_base_addr[i] = rxq->bat_frag->bat_bus_addr;
+ hw_init_para.frg_bat_size_cnt[i] = rxq->bat_frag->bat_size_cnt;
+ }
+
+ bitmap_zero(dpmaif_ctrl->bat_req.bat_bitmap, dpmaif_ctrl->bat_req.bat_size_cnt);
+ buf_cnt = dpmaif_ctrl->bat_req.bat_size_cnt - 1;
+ ret = t7xx_dpmaif_rx_buf_alloc(dpmaif_ctrl, &dpmaif_ctrl->bat_req, 0, buf_cnt, true);
+ if (ret) {
+ dev_err(dpmaif_ctrl->dev, "Failed to allocate RX buffer: %d\n", ret);
+ return ret;
+ }
+
+ buf_cnt = dpmaif_ctrl->bat_frag.bat_size_cnt - 1;
+ ret = t7xx_dpmaif_rx_frag_alloc(dpmaif_ctrl, &dpmaif_ctrl->bat_frag, buf_cnt, true);
+ if (ret) {
+ dev_err(dpmaif_ctrl->dev, "Failed to allocate frag RX buffer: %d\n", ret);
+ goto err_free_normal_bat;
+ }
+
+ for (i = 0; i < DPMAIF_TXQ_NUM; i++) {
+ txq = &dpmaif_ctrl->txq[i];
+ txq->que_started = true;
+
+ hw_init_para.drb_base_addr[i] = txq->drb_bus_addr;
+ hw_init_para.drb_size_cnt[i] = txq->drb_size_cnt;
+ }
+
+ ret = t7xx_dpmaif_hw_init(hw_info, &hw_init_para);
+ if (ret) {
+ dev_err(dpmaif_ctrl->dev, "Failed to initialize DPMAIF HW: %d\n", ret);
+ goto err_free_frag_bat;
+ }
+
+ ret = t7xx_dpmaif_dl_snd_hw_bat_cnt(hw_info, rxq->bat_req->bat_size_cnt - 1);
+ if (ret)
+ goto err_free_frag_bat;
+
+ ret = t7xx_dpmaif_dl_snd_hw_frg_cnt(hw_info, rxq->bat_frag->bat_size_cnt - 1);
+ if (ret)
+ goto err_free_frag_bat;
+
+ t7xx_dpmaif_ul_clr_all_intr(hw_info);
+ t7xx_dpmaif_dl_clr_all_intr(hw_info);
+ dpmaif_ctrl->state = DPMAIF_STATE_PWRON;
+ t7xx_dpmaif_enable_irq(dpmaif_ctrl);
+ wake_up(&dpmaif_ctrl->tx_wq);
+ return 0;
+
+err_free_frag_bat:
+ t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_frag);
+
+err_free_normal_bat:
+ t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_req);
+
+ return ret;
+}
+
+static void t7xx_dpmaif_stop_sw(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ t7xx_dpmaif_tx_stop(dpmaif_ctrl);
+ t7xx_dpmaif_rx_stop(dpmaif_ctrl);
+}
+
+static void t7xx_dpmaif_stop_hw(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ t7xx_dpmaif_hw_stop_all_txq(&dpmaif_ctrl->hw_info);
+ t7xx_dpmaif_hw_stop_all_rxq(&dpmaif_ctrl->hw_info);
+}
+
+static int t7xx_dpmaif_stop(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ if (!dpmaif_ctrl->dpmaif_sw_init_done) {
+ dev_err(dpmaif_ctrl->dev, "dpmaif SW init fail\n");
+ return -EFAULT;
+ }
+
+ if (dpmaif_ctrl->state == DPMAIF_STATE_PWROFF)
+ return -EFAULT;
+
+ t7xx_dpmaif_disable_irq(dpmaif_ctrl);
+ dpmaif_ctrl->state = DPMAIF_STATE_PWROFF;
+ t7xx_dpmaif_stop_sw(dpmaif_ctrl);
+ t7xx_dpmaif_tx_clear(dpmaif_ctrl);
+ t7xx_dpmaif_rx_clear(dpmaif_ctrl);
+ return 0;
+}
+
+static int t7xx_dpmaif_suspend(struct t7xx_pci_dev *t7xx_dev, void *param)
+{
+ struct dpmaif_ctrl *dpmaif_ctrl = param;
+
+ t7xx_dpmaif_tx_stop(dpmaif_ctrl);
+ t7xx_dpmaif_hw_stop_all_txq(&dpmaif_ctrl->hw_info);
+ t7xx_dpmaif_hw_stop_all_rxq(&dpmaif_ctrl->hw_info);
+ t7xx_dpmaif_disable_irq(dpmaif_ctrl);
+ t7xx_dpmaif_rx_stop(dpmaif_ctrl);
+ return 0;
+}
+
+static void t7xx_dpmaif_unmask_dlq_intr(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ int qno;
+
+ for (qno = 0; qno < DPMAIF_RXQ_NUM; qno++)
+ t7xx_dpmaif_dlq_unmask_rx_done(&dpmaif_ctrl->hw_info, qno);
+}
+
+static void t7xx_dpmaif_start_txrx_qs(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ struct dpmaif_rx_queue *rxq;
+ struct dpmaif_tx_queue *txq;
+ unsigned int que_cnt;
+
+ for (que_cnt = 0; que_cnt < DPMAIF_TXQ_NUM; que_cnt++) {
+ txq = &dpmaif_ctrl->txq[que_cnt];
+ txq->que_started = true;
+ }
+
+ for (que_cnt = 0; que_cnt < DPMAIF_RXQ_NUM; que_cnt++) {
+ rxq = &dpmaif_ctrl->rxq[que_cnt];
+ rxq->que_started = true;
+ }
+}
+
+static int t7xx_dpmaif_resume(struct t7xx_pci_dev *t7xx_dev, void *param)
+{
+ struct dpmaif_ctrl *dpmaif_ctrl = param;
+
+ if (!dpmaif_ctrl)
+ return 0;
+
+ t7xx_dpmaif_start_txrx_qs(dpmaif_ctrl);
+ t7xx_dpmaif_enable_irq(dpmaif_ctrl);
+ t7xx_dpmaif_unmask_dlq_intr(dpmaif_ctrl);
+ t7xx_dpmaif_start_hw(&dpmaif_ctrl->hw_info);
+ wake_up(&dpmaif_ctrl->tx_wq);
+ return 0;
+}
+
+static int t7xx_dpmaif_pm_entity_init(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ struct md_pm_entity *dpmaif_pm_entity = &dpmaif_ctrl->dpmaif_pm_entity;
+ int ret;
+
+ INIT_LIST_HEAD(&dpmaif_pm_entity->entity);
+ dpmaif_pm_entity->suspend = &t7xx_dpmaif_suspend;
+ dpmaif_pm_entity->suspend_late = NULL;
+ dpmaif_pm_entity->resume_early = NULL;
+ dpmaif_pm_entity->resume = &t7xx_dpmaif_resume;
+ dpmaif_pm_entity->id = PM_ENTITY_ID_DATA;
+ dpmaif_pm_entity->entity_param = dpmaif_ctrl;
+
+ ret = t7xx_pci_pm_entity_register(dpmaif_ctrl->t7xx_dev, dpmaif_pm_entity);
+ if (ret)
+ dev_err(dpmaif_ctrl->dev, "dpmaif register pm_entity fail\n");
+
+ return ret;
+}
+
+static int t7xx_dpmaif_pm_entity_release(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ struct md_pm_entity *dpmaif_pm_entity = &dpmaif_ctrl->dpmaif_pm_entity;
+ int ret;
+
+ ret = t7xx_pci_pm_entity_unregister(dpmaif_ctrl->t7xx_dev, dpmaif_pm_entity);
+ if (ret < 0)
+ dev_err(dpmaif_ctrl->dev, "dpmaif register pm_entity fail\n");
+
+ return ret;
+}
+
+int t7xx_dpmaif_md_state_callback(struct dpmaif_ctrl *dpmaif_ctrl, enum md_state state)
+{
+ int ret = 0;
+
+ switch (state) {
+ case MD_STATE_WAITING_FOR_HS1:
+ ret = t7xx_dpmaif_start(dpmaif_ctrl);
+ break;
+
+ case MD_STATE_EXCEPTION:
+ ret = t7xx_dpmaif_stop(dpmaif_ctrl);
+ break;
+
+ case MD_STATE_STOPPED:
+ ret = t7xx_dpmaif_stop(dpmaif_ctrl);
+ break;
+
+ case MD_STATE_WAITING_TO_STOP:
+ t7xx_dpmaif_stop_hw(dpmaif_ctrl);
+ break;
+
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * t7xx_dpmaif_hif_init() - Initialize data path.
+ * @t7xx_dev: MTK context structure.
+ * @callbacks: Callbacks implemented by the network layer to handle RX skb and
+ * event notifications.
+ *
+ * Allocate and initialize datapath control block.
+ * Register datapath ISR, TX and RX resources.
+ *
+ * Return:
+ * * dpmaif_ctrl pointer - Pointer to DPMAIF context structure.
+ * * NULL - In case of error.
+ */
+struct dpmaif_ctrl *t7xx_dpmaif_hif_init(struct t7xx_pci_dev *t7xx_dev,
+ struct dpmaif_callbacks *callbacks)
+{
+ struct device *dev = &t7xx_dev->pdev->dev;
+ struct dpmaif_ctrl *dpmaif_ctrl;
+ int ret;
+
+ if (!callbacks)
+ return NULL;
+
+ dpmaif_ctrl = devm_kzalloc(dev, sizeof(*dpmaif_ctrl), GFP_KERNEL);
+ if (!dpmaif_ctrl)
+ return NULL;
+
+ dpmaif_ctrl->t7xx_dev = t7xx_dev;
+ dpmaif_ctrl->callbacks = callbacks;
+ dpmaif_ctrl->dev = dev;
+ dpmaif_ctrl->dpmaif_sw_init_done = false;
+ dpmaif_ctrl->hw_info.dev = dev;
+ dpmaif_ctrl->hw_info.pcie_base = t7xx_dev->base_addr.pcie_ext_reg_base -
+ t7xx_dev->base_addr.pcie_dev_reg_trsl_addr;
+
+ ret = t7xx_dpmaif_pm_entity_init(dpmaif_ctrl);
+ if (ret)
+ return NULL;
+
+ t7xx_dpmaif_register_pcie_irq(dpmaif_ctrl);
+ t7xx_dpmaif_disable_irq(dpmaif_ctrl);
+
+ ret = t7xx_dpmaif_rxtx_sw_allocs(dpmaif_ctrl);
+ if (ret) {
+ t7xx_dpmaif_pm_entity_release(dpmaif_ctrl);
+ dev_err(dev, "Failed to allocate RX/TX SW resources: %d\n", ret);
+ return NULL;
+ }
+
+ dpmaif_ctrl->dpmaif_sw_init_done = true;
+ return dpmaif_ctrl;
+}
+
+void t7xx_dpmaif_hif_exit(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ if (dpmaif_ctrl->dpmaif_sw_init_done) {
+ t7xx_dpmaif_stop(dpmaif_ctrl);
+ t7xx_dpmaif_pm_entity_release(dpmaif_ctrl);
+ t7xx_dpmaif_sw_release(dpmaif_ctrl);
+ dpmaif_ctrl->dpmaif_sw_init_done = false;
+ }
+}
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h
new file mode 100644
index 0000000000..0ce4505e81
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ *
+ * Contributors:
+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ */
+
+#ifndef __T7XX_HIF_DPMAIF_H__
+#define __T7XX_HIF_DPMAIF_H__
+
+#include <linux/bitmap.h>
+#include <linux/mm_types.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#include "t7xx_dpmaif.h"
+#include "t7xx_pci.h"
+#include "t7xx_state_monitor.h"
+
+/* SKB control buffer */
+struct t7xx_skb_cb {
+ u8 netif_idx;
+ u8 txq_number;
+ u8 rx_pkt_type;
+};
+
+#define T7XX_SKB_CB(__skb) ((struct t7xx_skb_cb *)(__skb)->cb)
+
+enum dpmaif_rdwr {
+ DPMAIF_READ,
+ DPMAIF_WRITE,
+};
+
+/* Structure of DL BAT */
+struct dpmaif_cur_rx_skb_info {
+ bool msg_pit_received;
+ struct sk_buff *cur_skb;
+ unsigned int cur_chn_idx;
+ unsigned int check_sum;
+ unsigned int pit_dp;
+ unsigned int pkt_type;
+ int err_payload;
+};
+
+struct dpmaif_bat {
+ unsigned int p_buffer_addr;
+ unsigned int buffer_addr_ext;
+};
+
+struct dpmaif_bat_skb {
+ struct sk_buff *skb;
+ dma_addr_t data_bus_addr;
+ unsigned int data_len;
+};
+
+struct dpmaif_bat_page {
+ struct page *page;
+ dma_addr_t data_bus_addr;
+ unsigned int offset;
+ unsigned int data_len;
+};
+
+enum bat_type {
+ BAT_TYPE_NORMAL,
+ BAT_TYPE_FRAG,
+};
+
+struct dpmaif_bat_request {
+ void *bat_base;
+ dma_addr_t bat_bus_addr;
+ unsigned int bat_size_cnt;
+ unsigned int bat_wr_idx;
+ unsigned int bat_release_rd_idx;
+ void *bat_skb;
+ unsigned int pkt_buf_sz;
+ unsigned long *bat_bitmap;
+ atomic_t refcnt;
+ spinlock_t mask_lock; /* Protects BAT mask */
+ enum bat_type type;
+};
+
+struct dpmaif_rx_queue {
+ unsigned int index;
+ bool que_started;
+ unsigned int budget;
+
+ void *pit_base;
+ dma_addr_t pit_bus_addr;
+ unsigned int pit_size_cnt;
+
+ unsigned int pit_rd_idx;
+ unsigned int pit_wr_idx;
+ unsigned int pit_release_rd_idx;
+
+ struct dpmaif_bat_request *bat_req;
+ struct dpmaif_bat_request *bat_frag;
+
+ atomic_t rx_processing;
+
+ struct dpmaif_ctrl *dpmaif_ctrl;
+ unsigned int expect_pit_seq;
+ unsigned int pit_remain_release_cnt;
+ struct dpmaif_cur_rx_skb_info rx_data_info;
+ struct napi_struct napi;
+ bool sleep_lock_pending;
+};
+
+struct dpmaif_tx_queue {
+ unsigned int index;
+ bool que_started;
+ atomic_t tx_budget;
+ void *drb_base;
+ dma_addr_t drb_bus_addr;
+ unsigned int drb_size_cnt;
+ unsigned int drb_wr_idx;
+ unsigned int drb_rd_idx;
+ unsigned int drb_release_rd_idx;
+ void *drb_skb_base;
+ wait_queue_head_t req_wq;
+ struct workqueue_struct *worker;
+ struct work_struct dpmaif_tx_work;
+ spinlock_t tx_lock; /* Protects txq DRB */
+ atomic_t tx_processing;
+
+ struct dpmaif_ctrl *dpmaif_ctrl;
+ struct sk_buff_head tx_skb_head;
+};
+
+struct dpmaif_isr_para {
+ struct dpmaif_ctrl *dpmaif_ctrl;
+ unsigned char pcie_int;
+ unsigned char dlq_id;
+};
+
+enum dpmaif_state {
+ DPMAIF_STATE_MIN,
+ DPMAIF_STATE_PWROFF,
+ DPMAIF_STATE_PWRON,
+ DPMAIF_STATE_EXCEPTION,
+ DPMAIF_STATE_MAX
+};
+
+enum dpmaif_txq_state {
+ DMPAIF_TXQ_STATE_IRQ,
+ DMPAIF_TXQ_STATE_FULL,
+};
+
+struct dpmaif_callbacks {
+ void (*state_notify)(struct t7xx_pci_dev *t7xx_dev,
+ enum dpmaif_txq_state state, int txq_number);
+ void (*recv_skb)(struct t7xx_ccmni_ctrl *ccmni_ctlb, struct sk_buff *skb,
+ struct napi_struct *napi);
+};
+
+struct dpmaif_ctrl {
+ struct device *dev;
+ struct t7xx_pci_dev *t7xx_dev;
+ struct md_pm_entity dpmaif_pm_entity;
+ enum dpmaif_state state;
+ bool dpmaif_sw_init_done;
+ struct dpmaif_hw_info hw_info;
+ struct dpmaif_tx_queue txq[DPMAIF_TXQ_NUM];
+ struct dpmaif_rx_queue rxq[DPMAIF_RXQ_NUM];
+
+ unsigned char rxq_int_mapping[DPMAIF_RXQ_NUM];
+ struct dpmaif_isr_para isr_para[DPMAIF_RXQ_NUM];
+
+ struct dpmaif_bat_request bat_req;
+ struct dpmaif_bat_request bat_frag;
+ struct workqueue_struct *bat_release_wq;
+ struct work_struct bat_release_work;
+
+ wait_queue_head_t tx_wq;
+ struct task_struct *tx_thread;
+
+ struct dpmaif_callbacks *callbacks;
+};
+
+struct dpmaif_ctrl *t7xx_dpmaif_hif_init(struct t7xx_pci_dev *t7xx_dev,
+ struct dpmaif_callbacks *callbacks);
+void t7xx_dpmaif_hif_exit(struct dpmaif_ctrl *dpmaif_ctrl);
+int t7xx_dpmaif_md_state_callback(struct dpmaif_ctrl *dpmaif_ctrl, enum md_state state);
+unsigned int t7xx_ring_buf_get_next_wr_idx(unsigned int buf_len, unsigned int buf_idx);
+unsigned int t7xx_ring_buf_rd_wr_count(unsigned int total_cnt, unsigned int rd_idx,
+ unsigned int wr_idx, enum dpmaif_rdwr);
+
+#endif /* __T7XX_HIF_DPMAIF_H__ */
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
new file mode 100644
index 0000000000..f4ff2198b5
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
@@ -0,0 +1,1168 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ *
+ * Contributors:
+ * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ */
+
+#include <linux/atomic.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/gfp.h>
+#include <linux/err.h>
+#include <linux/iopoll.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/minmax.h>
+#include <linux/mm.h>
+#include <linux/netdevice.h>
+#include <linux/pm_runtime.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#include "t7xx_dpmaif.h"
+#include "t7xx_hif_dpmaif.h"
+#include "t7xx_hif_dpmaif_rx.h"
+#include "t7xx_netdev.h"
+#include "t7xx_pci.h"
+
+#define DPMAIF_BAT_COUNT 8192
+#define DPMAIF_FRG_COUNT 4814
+#define DPMAIF_PIT_COUNT (DPMAIF_BAT_COUNT * 2)
+
+#define DPMAIF_BAT_CNT_THRESHOLD 30
+#define DPMAIF_PIT_CNT_THRESHOLD 60
+#define DPMAIF_RX_PUSH_THRESHOLD_MASK GENMASK(2, 0)
+#define DPMAIF_NOTIFY_RELEASE_COUNT 128
+#define DPMAIF_POLL_PIT_TIME_US 20
+#define DPMAIF_POLL_PIT_MAX_TIME_US 2000
+#define DPMAIF_WQ_TIME_LIMIT_MS 2
+#define DPMAIF_CS_RESULT_PASS 0
+
+/* Packet type */
+#define DES_PT_PD 0
+#define DES_PT_MSG 1
+/* Buffer type */
+#define PKT_BUF_FRAG 1
+
+static unsigned int t7xx_normal_pit_bid(const struct dpmaif_pit *pit_info)
+{
+ u32 value;
+
+ value = FIELD_GET(PD_PIT_H_BID, le32_to_cpu(pit_info->pd.footer));
+ value <<= 13;
+ value += FIELD_GET(PD_PIT_BUFFER_ID, le32_to_cpu(pit_info->header));
+ return value;
+}
+
+static int t7xx_dpmaif_update_bat_wr_idx(struct dpmaif_ctrl *dpmaif_ctrl,
+ const unsigned int q_num, const unsigned int bat_cnt)
+{
+ struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[q_num];
+ struct dpmaif_bat_request *bat_req = rxq->bat_req;
+ unsigned int old_rl_idx, new_wr_idx, old_wr_idx;
+
+ if (!rxq->que_started) {
+ dev_err(dpmaif_ctrl->dev, "RX queue %d has not been started\n", rxq->index);
+ return -EINVAL;
+ }
+
+ old_rl_idx = bat_req->bat_release_rd_idx;
+ old_wr_idx = bat_req->bat_wr_idx;
+ new_wr_idx = old_wr_idx + bat_cnt;
+
+ if (old_rl_idx > old_wr_idx && new_wr_idx >= old_rl_idx)
+ goto err_flow;
+
+ if (new_wr_idx >= bat_req->bat_size_cnt) {
+ new_wr_idx -= bat_req->bat_size_cnt;
+ if (new_wr_idx >= old_rl_idx)
+ goto err_flow;
+ }
+
+ bat_req->bat_wr_idx = new_wr_idx;
+ return 0;
+
+err_flow:
+ dev_err(dpmaif_ctrl->dev, "RX BAT flow check fail\n");
+ return -EINVAL;
+}
+
+static bool t7xx_alloc_and_map_skb_info(const struct dpmaif_ctrl *dpmaif_ctrl,
+ const unsigned int size, struct dpmaif_bat_skb *cur_skb)
+{
+ dma_addr_t data_bus_addr;
+ struct sk_buff *skb;
+
+ skb = __dev_alloc_skb(size, GFP_KERNEL);
+ if (!skb)
+ return false;
+
+ data_bus_addr = dma_map_single(dpmaif_ctrl->dev, skb->data, size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dpmaif_ctrl->dev, data_bus_addr)) {
+ dev_err_ratelimited(dpmaif_ctrl->dev, "DMA mapping error\n");
+ dev_kfree_skb_any(skb);
+ return false;
+ }
+
+ cur_skb->skb = skb;
+ cur_skb->data_bus_addr = data_bus_addr;
+ cur_skb->data_len = size;
+
+ return true;
+}
+
+static void t7xx_unmap_bat_skb(struct device *dev, struct dpmaif_bat_skb *bat_skb_base,
+ unsigned int index)
+{
+ struct dpmaif_bat_skb *bat_skb = bat_skb_base + index;
+
+ if (bat_skb->skb) {
+ dma_unmap_single(dev, bat_skb->data_bus_addr, bat_skb->data_len, DMA_FROM_DEVICE);
+ dev_kfree_skb(bat_skb->skb);
+ bat_skb->skb = NULL;
+ }
+}
+
+/**
+ * t7xx_dpmaif_rx_buf_alloc() - Allocate buffers for the BAT ring.
+ * @dpmaif_ctrl: Pointer to DPMAIF context structure.
+ * @bat_req: Pointer to BAT request structure.
+ * @q_num: Queue number.
+ * @buf_cnt: Number of buffers to allocate.
+ * @initial: Indicates if the ring is being populated for the first time.
+ *
+ * Allocate skb and store the start address of the data buffer into the BAT ring.
+ * If this is not the initial call, notify the HW about the new entries.
+ *
+ * Return:
+ * * 0 - Success.
+ * * -ERROR - Error code.
+ */
+int t7xx_dpmaif_rx_buf_alloc(struct dpmaif_ctrl *dpmaif_ctrl,
+ const struct dpmaif_bat_request *bat_req,
+ const unsigned int q_num, const unsigned int buf_cnt,
+ const bool initial)
+{
+ unsigned int i, bat_cnt, bat_max_cnt, bat_start_idx;
+ int ret;
+
+ if (!buf_cnt || buf_cnt > bat_req->bat_size_cnt)
+ return -EINVAL;
+
+ /* Check BAT buffer space */
+ bat_max_cnt = bat_req->bat_size_cnt;
+
+ bat_cnt = t7xx_ring_buf_rd_wr_count(bat_max_cnt, bat_req->bat_release_rd_idx,
+ bat_req->bat_wr_idx, DPMAIF_WRITE);
+ if (buf_cnt > bat_cnt)
+ return -ENOMEM;
+
+ bat_start_idx = bat_req->bat_wr_idx;
+
+ for (i = 0; i < buf_cnt; i++) {
+ unsigned int cur_bat_idx = bat_start_idx + i;
+ struct dpmaif_bat_skb *cur_skb;
+ struct dpmaif_bat *cur_bat;
+
+ if (cur_bat_idx >= bat_max_cnt)
+ cur_bat_idx -= bat_max_cnt;
+
+ cur_skb = (struct dpmaif_bat_skb *)bat_req->bat_skb + cur_bat_idx;
+ if (!cur_skb->skb &&
+ !t7xx_alloc_and_map_skb_info(dpmaif_ctrl, bat_req->pkt_buf_sz, cur_skb))
+ break;
+
+ cur_bat = (struct dpmaif_bat *)bat_req->bat_base + cur_bat_idx;
+ cur_bat->buffer_addr_ext = upper_32_bits(cur_skb->data_bus_addr);
+ cur_bat->p_buffer_addr = lower_32_bits(cur_skb->data_bus_addr);
+ }
+
+ if (!i)
+ return -ENOMEM;
+
+ ret = t7xx_dpmaif_update_bat_wr_idx(dpmaif_ctrl, q_num, i);
+ if (ret)
+ goto err_unmap_skbs;
+
+ if (!initial) {
+ unsigned int hw_wr_idx;
+
+ ret = t7xx_dpmaif_dl_snd_hw_bat_cnt(&dpmaif_ctrl->hw_info, i);
+ if (ret)
+ goto err_unmap_skbs;
+
+ hw_wr_idx = t7xx_dpmaif_dl_get_bat_wr_idx(&dpmaif_ctrl->hw_info,
+ DPF_RX_QNO_DFT);
+ if (hw_wr_idx != bat_req->bat_wr_idx) {
+ ret = -EFAULT;
+ dev_err(dpmaif_ctrl->dev, "Write index mismatch in RX ring\n");
+ goto err_unmap_skbs;
+ }
+ }
+
+ return 0;
+
+err_unmap_skbs:
+ while (--i > 0)
+ t7xx_unmap_bat_skb(dpmaif_ctrl->dev, bat_req->bat_skb, i);
+
+ return ret;
+}
+
+static int t7xx_dpmaifq_release_pit_entry(struct dpmaif_rx_queue *rxq,
+ const unsigned int rel_entry_num)
+{
+ struct dpmaif_hw_info *hw_info = &rxq->dpmaif_ctrl->hw_info;
+ unsigned int old_rel_idx, new_rel_idx, hw_wr_idx;
+ int ret;
+
+ if (!rxq->que_started)
+ return 0;
+
+ if (rel_entry_num >= rxq->pit_size_cnt) {
+ dev_err(rxq->dpmaif_ctrl->dev, "Invalid PIT release index\n");
+ return -EINVAL;
+ }
+
+ old_rel_idx = rxq->pit_release_rd_idx;
+ new_rel_idx = old_rel_idx + rel_entry_num;
+ hw_wr_idx = rxq->pit_wr_idx;
+ if (hw_wr_idx < old_rel_idx && new_rel_idx >= rxq->pit_size_cnt)
+ new_rel_idx -= rxq->pit_size_cnt;
+
+ ret = t7xx_dpmaif_dlq_add_pit_remain_cnt(hw_info, rxq->index, rel_entry_num);
+ if (ret) {
+ dev_err(rxq->dpmaif_ctrl->dev, "PIT release failure: %d\n", ret);
+ return ret;
+ }
+
+ rxq->pit_release_rd_idx = new_rel_idx;
+ return 0;
+}
+
+static void t7xx_dpmaif_set_bat_mask(struct dpmaif_bat_request *bat_req, unsigned int idx)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&bat_req->mask_lock, flags);
+ set_bit(idx, bat_req->bat_bitmap);
+ spin_unlock_irqrestore(&bat_req->mask_lock, flags);
+}
+
+static int t7xx_frag_bat_cur_bid_check(struct dpmaif_rx_queue *rxq,
+ const unsigned int cur_bid)
+{
+ struct dpmaif_bat_request *bat_frag = rxq->bat_frag;
+ struct dpmaif_bat_page *bat_page;
+
+ if (cur_bid >= DPMAIF_FRG_COUNT)
+ return -EINVAL;
+
+ bat_page = bat_frag->bat_skb + cur_bid;
+ if (!bat_page->page)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void t7xx_unmap_bat_page(struct device *dev, struct dpmaif_bat_page *bat_page_base,
+ unsigned int index)
+{
+ struct dpmaif_bat_page *bat_page = bat_page_base + index;
+
+ if (bat_page->page) {
+ dma_unmap_page(dev, bat_page->data_bus_addr, bat_page->data_len, DMA_FROM_DEVICE);
+ put_page(bat_page->page);
+ bat_page->page = NULL;
+ }
+}
+
+/**
+ * t7xx_dpmaif_rx_frag_alloc() - Allocates buffers for the Fragment BAT ring.
+ * @dpmaif_ctrl: Pointer to DPMAIF context structure.
+ * @bat_req: Pointer to BAT request structure.
+ * @buf_cnt: Number of buffers to allocate.
+ * @initial: Indicates if the ring is being populated for the first time.
+ *
+ * Fragment BAT is used when the received packet does not fit in a normal BAT entry.
+ * This function allocates a page fragment and stores the start address of the page
+ * into the Fragment BAT ring.
+ * If this is not the initial call, notify the HW about the new entries.
+ *
+ * Return:
+ * * 0 - Success.
+ * * -ERROR - Error code.
+ */
+int t7xx_dpmaif_rx_frag_alloc(struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req,
+ const unsigned int buf_cnt, const bool initial)
+{
+ unsigned int buf_space, cur_bat_idx = bat_req->bat_wr_idx;
+ struct dpmaif_bat_page *bat_skb = bat_req->bat_skb;
+ int ret = 0, i;
+
+ if (!buf_cnt || buf_cnt > bat_req->bat_size_cnt)
+ return -EINVAL;
+
+ buf_space = t7xx_ring_buf_rd_wr_count(bat_req->bat_size_cnt,
+ bat_req->bat_release_rd_idx, bat_req->bat_wr_idx,
+ DPMAIF_WRITE);
+ if (buf_cnt > buf_space) {
+ dev_err(dpmaif_ctrl->dev,
+ "Requested more buffers than the space available in RX frag ring\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < buf_cnt; i++) {
+ struct dpmaif_bat_page *cur_page = bat_skb + cur_bat_idx;
+ struct dpmaif_bat *cur_bat;
+ dma_addr_t data_base_addr;
+
+ if (!cur_page->page) {
+ unsigned long offset;
+ struct page *page;
+ void *data;
+
+ data = netdev_alloc_frag(bat_req->pkt_buf_sz);
+ if (!data)
+ break;
+
+ page = virt_to_head_page(data);
+ offset = data - page_address(page);
+
+ data_base_addr = dma_map_page(dpmaif_ctrl->dev, page, offset,
+ bat_req->pkt_buf_sz, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dpmaif_ctrl->dev, data_base_addr)) {
+ put_page(virt_to_head_page(data));
+ dev_err(dpmaif_ctrl->dev, "DMA mapping fail\n");
+ break;
+ }
+
+ cur_page->page = page;
+ cur_page->data_bus_addr = data_base_addr;
+ cur_page->offset = offset;
+ cur_page->data_len = bat_req->pkt_buf_sz;
+ }
+
+ data_base_addr = cur_page->data_bus_addr;
+ cur_bat = (struct dpmaif_bat *)bat_req->bat_base + cur_bat_idx;
+ cur_bat->buffer_addr_ext = upper_32_bits(data_base_addr);
+ cur_bat->p_buffer_addr = lower_32_bits(data_base_addr);
+ cur_bat_idx = t7xx_ring_buf_get_next_wr_idx(bat_req->bat_size_cnt, cur_bat_idx);
+ }
+
+ bat_req->bat_wr_idx = cur_bat_idx;
+
+ if (!initial)
+ t7xx_dpmaif_dl_snd_hw_frg_cnt(&dpmaif_ctrl->hw_info, i);
+
+ if (i < buf_cnt) {
+ ret = -ENOMEM;
+ if (initial) {
+ while (--i > 0)
+ t7xx_unmap_bat_page(dpmaif_ctrl->dev, bat_req->bat_skb, i);
+ }
+ }
+
+ return ret;
+}
+
+static int t7xx_dpmaif_set_frag_to_skb(const struct dpmaif_rx_queue *rxq,
+ const struct dpmaif_pit *pkt_info,
+ struct sk_buff *skb)
+{
+ unsigned long long data_bus_addr, data_base_addr;
+ struct device *dev = rxq->dpmaif_ctrl->dev;
+ struct dpmaif_bat_page *page_info;
+ unsigned int data_len;
+ int data_offset;
+
+ page_info = rxq->bat_frag->bat_skb;
+ page_info += t7xx_normal_pit_bid(pkt_info);
+ dma_unmap_page(dev, page_info->data_bus_addr, page_info->data_len, DMA_FROM_DEVICE);
+
+ if (!page_info->page)
+ return -EINVAL;
+
+ data_bus_addr = le32_to_cpu(pkt_info->pd.data_addr_h);
+ data_bus_addr = (data_bus_addr << 32) + le32_to_cpu(pkt_info->pd.data_addr_l);
+ data_base_addr = page_info->data_bus_addr;
+ data_offset = data_bus_addr - data_base_addr;
+ data_offset += page_info->offset;
+ data_len = FIELD_GET(PD_PIT_DATA_LEN, le32_to_cpu(pkt_info->header));
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page_info->page,
+ data_offset, data_len, page_info->data_len);
+
+ page_info->page = NULL;
+ page_info->offset = 0;
+ page_info->data_len = 0;
+ return 0;
+}
+
+static int t7xx_dpmaif_get_frag(struct dpmaif_rx_queue *rxq,
+ const struct dpmaif_pit *pkt_info,
+ const struct dpmaif_cur_rx_skb_info *skb_info)
+{
+ unsigned int cur_bid = t7xx_normal_pit_bid(pkt_info);
+ int ret;
+
+ ret = t7xx_frag_bat_cur_bid_check(rxq, cur_bid);
+ if (ret < 0)
+ return ret;
+
+ ret = t7xx_dpmaif_set_frag_to_skb(rxq, pkt_info, skb_info->cur_skb);
+ if (ret < 0) {
+ dev_err(rxq->dpmaif_ctrl->dev, "Failed to set frag data to skb: %d\n", ret);
+ return ret;
+ }
+
+ t7xx_dpmaif_set_bat_mask(rxq->bat_frag, cur_bid);
+ return 0;
+}
+
+static int t7xx_bat_cur_bid_check(struct dpmaif_rx_queue *rxq, const unsigned int cur_bid)
+{
+ struct dpmaif_bat_skb *bat_skb = rxq->bat_req->bat_skb;
+
+ bat_skb += cur_bid;
+ if (cur_bid >= DPMAIF_BAT_COUNT || !bat_skb->skb)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int t7xx_dpmaif_read_pit_seq(const struct dpmaif_pit *pit)
+{
+ return FIELD_GET(PD_PIT_PIT_SEQ, le32_to_cpu(pit->pd.footer));
+}
+
+static int t7xx_dpmaif_check_pit_seq(struct dpmaif_rx_queue *rxq,
+ const struct dpmaif_pit *pit)
+{
+ unsigned int cur_pit_seq, expect_pit_seq = rxq->expect_pit_seq;
+
+ if (read_poll_timeout_atomic(t7xx_dpmaif_read_pit_seq, cur_pit_seq,
+ cur_pit_seq == expect_pit_seq, DPMAIF_POLL_PIT_TIME_US,
+ DPMAIF_POLL_PIT_MAX_TIME_US, false, pit))
+ return -EFAULT;
+
+ rxq->expect_pit_seq++;
+ if (rxq->expect_pit_seq >= DPMAIF_DL_PIT_SEQ_VALUE)
+ rxq->expect_pit_seq = 0;
+
+ return 0;
+}
+
+static unsigned int t7xx_dpmaif_avail_pkt_bat_cnt(struct dpmaif_bat_request *bat_req)
+{
+ unsigned int zero_index;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bat_req->mask_lock, flags);
+
+ zero_index = find_next_zero_bit(bat_req->bat_bitmap, bat_req->bat_size_cnt,
+ bat_req->bat_release_rd_idx);
+
+ if (zero_index < bat_req->bat_size_cnt) {
+ spin_unlock_irqrestore(&bat_req->mask_lock, flags);
+ return zero_index - bat_req->bat_release_rd_idx;
+ }
+
+ /* limiting the search till bat_release_rd_idx */
+ zero_index = find_first_zero_bit(bat_req->bat_bitmap, bat_req->bat_release_rd_idx);
+ spin_unlock_irqrestore(&bat_req->mask_lock, flags);
+ return bat_req->bat_size_cnt - bat_req->bat_release_rd_idx + zero_index;
+}
+
+static int t7xx_dpmaif_release_bat_entry(const struct dpmaif_rx_queue *rxq,
+ const unsigned int rel_entry_num,
+ const enum bat_type buf_type)
+{
+ struct dpmaif_hw_info *hw_info = &rxq->dpmaif_ctrl->hw_info;
+ unsigned int old_rel_idx, new_rel_idx, hw_rd_idx, i;
+ struct dpmaif_bat_request *bat;
+ unsigned long flags;
+
+ if (!rxq->que_started || !rel_entry_num)
+ return -EINVAL;
+
+ if (buf_type == BAT_TYPE_FRAG) {
+ bat = rxq->bat_frag;
+ hw_rd_idx = t7xx_dpmaif_dl_get_frg_rd_idx(hw_info, rxq->index);
+ } else {
+ bat = rxq->bat_req;
+ hw_rd_idx = t7xx_dpmaif_dl_get_bat_rd_idx(hw_info, rxq->index);
+ }
+
+ if (rel_entry_num >= bat->bat_size_cnt)
+ return -EINVAL;
+
+ old_rel_idx = bat->bat_release_rd_idx;
+ new_rel_idx = old_rel_idx + rel_entry_num;
+
+ /* Do not need to release if the queue is empty */
+ if (bat->bat_wr_idx == old_rel_idx)
+ return 0;
+
+ if (hw_rd_idx >= old_rel_idx) {
+ if (new_rel_idx > hw_rd_idx)
+ return -EINVAL;
+ }
+
+ if (new_rel_idx >= bat->bat_size_cnt) {
+ new_rel_idx -= bat->bat_size_cnt;
+ if (new_rel_idx > hw_rd_idx)
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&bat->mask_lock, flags);
+ for (i = 0; i < rel_entry_num; i++) {
+ unsigned int index = bat->bat_release_rd_idx + i;
+
+ if (index >= bat->bat_size_cnt)
+ index -= bat->bat_size_cnt;
+
+ clear_bit(index, bat->bat_bitmap);
+ }
+ spin_unlock_irqrestore(&bat->mask_lock, flags);
+
+ bat->bat_release_rd_idx = new_rel_idx;
+ return rel_entry_num;
+}
+
+static int t7xx_dpmaif_pit_release_and_add(struct dpmaif_rx_queue *rxq)
+{
+ int ret;
+
+ if (rxq->pit_remain_release_cnt < DPMAIF_PIT_CNT_THRESHOLD)
+ return 0;
+
+ ret = t7xx_dpmaifq_release_pit_entry(rxq, rxq->pit_remain_release_cnt);
+ if (ret)
+ return ret;
+
+ rxq->pit_remain_release_cnt = 0;
+ return 0;
+}
+
+static int t7xx_dpmaif_bat_release_and_add(const struct dpmaif_rx_queue *rxq)
+{
+ unsigned int bid_cnt;
+ int ret;
+
+ bid_cnt = t7xx_dpmaif_avail_pkt_bat_cnt(rxq->bat_req);
+ if (bid_cnt < DPMAIF_BAT_CNT_THRESHOLD)
+ return 0;
+
+ ret = t7xx_dpmaif_release_bat_entry(rxq, bid_cnt, BAT_TYPE_NORMAL);
+ if (ret <= 0) {
+ dev_err(rxq->dpmaif_ctrl->dev, "Release PKT BAT failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = t7xx_dpmaif_rx_buf_alloc(rxq->dpmaif_ctrl, rxq->bat_req, rxq->index, bid_cnt, false);
+ if (ret < 0)
+ dev_err(rxq->dpmaif_ctrl->dev, "Allocate new RX buffer failed: %d\n", ret);
+
+ return ret;
+}
+
+static int t7xx_dpmaif_frag_bat_release_and_add(const struct dpmaif_rx_queue *rxq)
+{
+ unsigned int bid_cnt;
+ int ret;
+
+ bid_cnt = t7xx_dpmaif_avail_pkt_bat_cnt(rxq->bat_frag);
+ if (bid_cnt < DPMAIF_BAT_CNT_THRESHOLD)
+ return 0;
+
+ ret = t7xx_dpmaif_release_bat_entry(rxq, bid_cnt, BAT_TYPE_FRAG);
+ if (ret <= 0) {
+ dev_err(rxq->dpmaif_ctrl->dev, "Release BAT entry failed: %d\n", ret);
+ return ret;
+ }
+
+ return t7xx_dpmaif_rx_frag_alloc(rxq->dpmaif_ctrl, rxq->bat_frag, bid_cnt, false);
+}
+
+static void t7xx_dpmaif_parse_msg_pit(const struct dpmaif_rx_queue *rxq,
+ const struct dpmaif_pit *msg_pit,
+ struct dpmaif_cur_rx_skb_info *skb_info)
+{
+ int header = le32_to_cpu(msg_pit->header);
+
+ skb_info->cur_chn_idx = FIELD_GET(MSG_PIT_CHANNEL_ID, header);
+ skb_info->check_sum = FIELD_GET(MSG_PIT_CHECKSUM, header);
+ skb_info->pit_dp = FIELD_GET(MSG_PIT_DP, header);
+ skb_info->pkt_type = FIELD_GET(MSG_PIT_IP, le32_to_cpu(msg_pit->msg.params_3));
+}
+
+static int t7xx_dpmaif_set_data_to_skb(const struct dpmaif_rx_queue *rxq,
+ const struct dpmaif_pit *pkt_info,
+ struct dpmaif_cur_rx_skb_info *skb_info)
+{
+ unsigned long long data_bus_addr, data_base_addr;
+ struct device *dev = rxq->dpmaif_ctrl->dev;
+ struct dpmaif_bat_skb *bat_skb;
+ unsigned int data_len;
+ struct sk_buff *skb;
+ int data_offset;
+
+ bat_skb = rxq->bat_req->bat_skb;
+ bat_skb += t7xx_normal_pit_bid(pkt_info);
+ dma_unmap_single(dev, bat_skb->data_bus_addr, bat_skb->data_len, DMA_FROM_DEVICE);
+
+ data_bus_addr = le32_to_cpu(pkt_info->pd.data_addr_h);
+ data_bus_addr = (data_bus_addr << 32) + le32_to_cpu(pkt_info->pd.data_addr_l);
+ data_base_addr = bat_skb->data_bus_addr;
+ data_offset = data_bus_addr - data_base_addr;
+ data_len = FIELD_GET(PD_PIT_DATA_LEN, le32_to_cpu(pkt_info->header));
+ skb = bat_skb->skb;
+ skb->len = 0;
+ skb_reset_tail_pointer(skb);
+ skb_reserve(skb, data_offset);
+
+ if (skb->tail + data_len > skb->end) {
+ dev_err(dev, "No buffer space available\n");
+ return -ENOBUFS;
+ }
+
+ skb_put(skb, data_len);
+ skb_info->cur_skb = skb;
+ bat_skb->skb = NULL;
+ return 0;
+}
+
+static int t7xx_dpmaif_get_rx_pkt(struct dpmaif_rx_queue *rxq,
+ const struct dpmaif_pit *pkt_info,
+ struct dpmaif_cur_rx_skb_info *skb_info)
+{
+ unsigned int cur_bid = t7xx_normal_pit_bid(pkt_info);
+ int ret;
+
+ ret = t7xx_bat_cur_bid_check(rxq, cur_bid);
+ if (ret < 0)
+ return ret;
+
+ ret = t7xx_dpmaif_set_data_to_skb(rxq, pkt_info, skb_info);
+ if (ret < 0) {
+ dev_err(rxq->dpmaif_ctrl->dev, "RX set data to skb failed: %d\n", ret);
+ return ret;
+ }
+
+ t7xx_dpmaif_set_bat_mask(rxq->bat_req, cur_bid);
+ return 0;
+}
+
+static int t7xx_dpmaifq_rx_notify_hw(struct dpmaif_rx_queue *rxq)
+{
+ struct dpmaif_ctrl *dpmaif_ctrl = rxq->dpmaif_ctrl;
+ int ret;
+
+ queue_work(dpmaif_ctrl->bat_release_wq, &dpmaif_ctrl->bat_release_work);
+
+ ret = t7xx_dpmaif_pit_release_and_add(rxq);
+ if (ret < 0)
+ dev_err(dpmaif_ctrl->dev, "RXQ%u update PIT failed: %d\n", rxq->index, ret);
+
+ return ret;
+}
+
+static void t7xx_dpmaif_rx_skb(struct dpmaif_rx_queue *rxq,
+ struct dpmaif_cur_rx_skb_info *skb_info)
+{
+ struct dpmaif_ctrl *dpmaif_ctrl = rxq->dpmaif_ctrl;
+ struct sk_buff *skb = skb_info->cur_skb;
+ struct t7xx_skb_cb *skb_cb;
+ u8 netif_id;
+
+ skb_info->cur_skb = NULL;
+
+ if (skb_info->pit_dp) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ skb->ip_summed = skb_info->check_sum == DPMAIF_CS_RESULT_PASS ? CHECKSUM_UNNECESSARY :
+ CHECKSUM_NONE;
+ netif_id = FIELD_GET(NETIF_MASK, skb_info->cur_chn_idx);
+ skb_cb = T7XX_SKB_CB(skb);
+ skb_cb->netif_idx = netif_id;
+ skb_cb->rx_pkt_type = skb_info->pkt_type;
+ dpmaif_ctrl->callbacks->recv_skb(dpmaif_ctrl->t7xx_dev->ccmni_ctlb, skb, &rxq->napi);
+}
+
+static int t7xx_dpmaif_rx_start(struct dpmaif_rx_queue *rxq, const unsigned int pit_cnt,
+ const unsigned int budget, int *once_more)
+{
+ unsigned int cur_pit, pit_len, rx_cnt, recv_skb_cnt = 0;
+ struct device *dev = rxq->dpmaif_ctrl->dev;
+ struct dpmaif_cur_rx_skb_info *skb_info;
+ int ret = 0;
+
+ pit_len = rxq->pit_size_cnt;
+ skb_info = &rxq->rx_data_info;
+ cur_pit = rxq->pit_rd_idx;
+
+ for (rx_cnt = 0; rx_cnt < pit_cnt; rx_cnt++) {
+ struct dpmaif_pit *pkt_info;
+ u32 val;
+
+ if (!skb_info->msg_pit_received && recv_skb_cnt >= budget)
+ break;
+
+ pkt_info = (struct dpmaif_pit *)rxq->pit_base + cur_pit;
+ if (t7xx_dpmaif_check_pit_seq(rxq, pkt_info)) {
+ dev_err_ratelimited(dev, "RXQ%u checks PIT SEQ fail\n", rxq->index);
+ *once_more = 1;
+ return recv_skb_cnt;
+ }
+
+ val = FIELD_GET(PD_PIT_PACKET_TYPE, le32_to_cpu(pkt_info->header));
+ if (val == DES_PT_MSG) {
+ if (skb_info->msg_pit_received)
+ dev_err(dev, "RXQ%u received repeated PIT\n", rxq->index);
+
+ skb_info->msg_pit_received = true;
+ t7xx_dpmaif_parse_msg_pit(rxq, pkt_info, skb_info);
+ } else { /* DES_PT_PD */
+ val = FIELD_GET(PD_PIT_BUFFER_TYPE, le32_to_cpu(pkt_info->header));
+ if (val != PKT_BUF_FRAG)
+ ret = t7xx_dpmaif_get_rx_pkt(rxq, pkt_info, skb_info);
+ else if (!skb_info->cur_skb)
+ ret = -EINVAL;
+ else
+ ret = t7xx_dpmaif_get_frag(rxq, pkt_info, skb_info);
+
+ if (ret < 0) {
+ skb_info->err_payload = 1;
+ dev_err_ratelimited(dev, "RXQ%u error payload\n", rxq->index);
+ }
+
+ val = FIELD_GET(PD_PIT_CONT, le32_to_cpu(pkt_info->header));
+ if (!val) {
+ if (!skb_info->err_payload) {
+ t7xx_dpmaif_rx_skb(rxq, skb_info);
+ } else if (skb_info->cur_skb) {
+ dev_kfree_skb_any(skb_info->cur_skb);
+ skb_info->cur_skb = NULL;
+ }
+
+ memset(skb_info, 0, sizeof(*skb_info));
+ recv_skb_cnt++;
+ }
+ }
+
+ cur_pit = t7xx_ring_buf_get_next_wr_idx(pit_len, cur_pit);
+ rxq->pit_rd_idx = cur_pit;
+ rxq->pit_remain_release_cnt++;
+
+ if (rx_cnt > 0 && !(rx_cnt % DPMAIF_NOTIFY_RELEASE_COUNT)) {
+ ret = t7xx_dpmaifq_rx_notify_hw(rxq);
+ if (ret < 0)
+ break;
+ }
+ }
+
+ if (!ret)
+ ret = t7xx_dpmaifq_rx_notify_hw(rxq);
+
+ if (ret)
+ return ret;
+
+ return recv_skb_cnt;
+}
+
+static unsigned int t7xx_dpmaifq_poll_pit(struct dpmaif_rx_queue *rxq)
+{
+ unsigned int hw_wr_idx, pit_cnt;
+
+ if (!rxq->que_started)
+ return 0;
+
+ hw_wr_idx = t7xx_dpmaif_dl_dlq_pit_get_wr_idx(&rxq->dpmaif_ctrl->hw_info, rxq->index);
+ pit_cnt = t7xx_ring_buf_rd_wr_count(rxq->pit_size_cnt, rxq->pit_rd_idx, hw_wr_idx,
+ DPMAIF_READ);
+ rxq->pit_wr_idx = hw_wr_idx;
+ return pit_cnt;
+}
+
+static int t7xx_dpmaif_napi_rx_data_collect(struct dpmaif_ctrl *dpmaif_ctrl,
+ const unsigned int q_num,
+ const unsigned int budget, int *once_more)
+{
+ struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[q_num];
+ unsigned int cnt;
+ int ret = 0;
+
+ cnt = t7xx_dpmaifq_poll_pit(rxq);
+ if (!cnt)
+ return ret;
+
+ ret = t7xx_dpmaif_rx_start(rxq, cnt, budget, once_more);
+ if (ret < 0)
+ dev_err(dpmaif_ctrl->dev, "dlq%u rx ERR:%d\n", rxq->index, ret);
+
+ return ret;
+}
+
+int t7xx_dpmaif_napi_rx_poll(struct napi_struct *napi, const int budget)
+{
+ struct dpmaif_rx_queue *rxq = container_of(napi, struct dpmaif_rx_queue, napi);
+ struct t7xx_pci_dev *t7xx_dev = rxq->dpmaif_ctrl->t7xx_dev;
+ int ret, once_more = 0, work_done = 0;
+
+ atomic_set(&rxq->rx_processing, 1);
+ /* Ensure rx_processing is changed to 1 before actually begin RX flow */
+ smp_mb();
+
+ if (!rxq->que_started) {
+ atomic_set(&rxq->rx_processing, 0);
+ pm_runtime_put_autosuspend(rxq->dpmaif_ctrl->dev);
+ dev_err(rxq->dpmaif_ctrl->dev, "Work RXQ: %d has not been started\n", rxq->index);
+ return work_done;
+ }
+
+ if (!rxq->sleep_lock_pending)
+ t7xx_pci_disable_sleep(t7xx_dev);
+
+ ret = try_wait_for_completion(&t7xx_dev->sleep_lock_acquire);
+ if (!ret) {
+ napi_complete_done(napi, work_done);
+ rxq->sleep_lock_pending = true;
+ napi_reschedule(napi);
+ return work_done;
+ }
+
+ rxq->sleep_lock_pending = false;
+ while (work_done < budget) {
+ int each_budget = budget - work_done;
+ int rx_cnt = t7xx_dpmaif_napi_rx_data_collect(rxq->dpmaif_ctrl, rxq->index,
+ each_budget, &once_more);
+ if (rx_cnt > 0)
+ work_done += rx_cnt;
+ else
+ break;
+ }
+
+ if (once_more) {
+ napi_gro_flush(napi, false);
+ work_done = budget;
+ t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info);
+ } else if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+ t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info);
+ t7xx_dpmaif_dlq_unmask_rx_done(&rxq->dpmaif_ctrl->hw_info, rxq->index);
+ t7xx_pci_enable_sleep(rxq->dpmaif_ctrl->t7xx_dev);
+ pm_runtime_mark_last_busy(rxq->dpmaif_ctrl->dev);
+ pm_runtime_put_autosuspend(rxq->dpmaif_ctrl->dev);
+ atomic_set(&rxq->rx_processing, 0);
+ } else {
+ t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info);
+ }
+
+ return work_done;
+}
+
+void t7xx_dpmaif_irq_rx_done(struct dpmaif_ctrl *dpmaif_ctrl, const unsigned int que_mask)
+{
+ struct dpmaif_rx_queue *rxq;
+ struct dpmaif_ctrl *ctrl;
+ int qno, ret;
+
+ qno = ffs(que_mask) - 1;
+ if (qno < 0 || qno > DPMAIF_RXQ_NUM - 1) {
+ dev_err(dpmaif_ctrl->dev, "Invalid RXQ number: %u\n", qno);
+ return;
+ }
+
+ rxq = &dpmaif_ctrl->rxq[qno];
+ ctrl = rxq->dpmaif_ctrl;
+ /* We need to make sure that the modem has been resumed before
+ * calling napi. This can't be done inside the polling function
+ * as we could be blocked waiting for device to be resumed,
+ * which can't be done from softirq context the poll function
+ * is running in.
+ */
+ ret = pm_runtime_resume_and_get(ctrl->dev);
+ if (ret < 0 && ret != -EACCES) {
+ dev_err(ctrl->dev, "Failed to resume device: %d\n", ret);
+ return;
+ }
+ napi_schedule(&rxq->napi);
+}
+
+static void t7xx_dpmaif_base_free(const struct dpmaif_ctrl *dpmaif_ctrl,
+ const struct dpmaif_bat_request *bat_req)
+{
+ if (bat_req->bat_base)
+ dma_free_coherent(dpmaif_ctrl->dev,
+ bat_req->bat_size_cnt * sizeof(struct dpmaif_bat),
+ bat_req->bat_base, bat_req->bat_bus_addr);
+}
+
+/**
+ * t7xx_dpmaif_bat_alloc() - Allocate the BAT ring buffer.
+ * @dpmaif_ctrl: Pointer to DPMAIF context structure.
+ * @bat_req: Pointer to BAT request structure.
+ * @buf_type: BAT ring type.
+ *
+ * This function allocates the BAT ring buffer shared with the HW device, also allocates
+ * a buffer used to store information about the BAT skbs for further release.
+ *
+ * Return:
+ * * 0 - Success.
+ * * -ERROR - Error code.
+ */
+int t7xx_dpmaif_bat_alloc(const struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req,
+ const enum bat_type buf_type)
+{
+ int sw_buf_size;
+
+ if (buf_type == BAT_TYPE_FRAG) {
+ sw_buf_size = sizeof(struct dpmaif_bat_page);
+ bat_req->bat_size_cnt = DPMAIF_FRG_COUNT;
+ bat_req->pkt_buf_sz = DPMAIF_HW_FRG_PKTBUF;
+ } else {
+ sw_buf_size = sizeof(struct dpmaif_bat_skb);
+ bat_req->bat_size_cnt = DPMAIF_BAT_COUNT;
+ bat_req->pkt_buf_sz = DPMAIF_HW_BAT_PKTBUF;
+ }
+
+ bat_req->type = buf_type;
+ bat_req->bat_wr_idx = 0;
+ bat_req->bat_release_rd_idx = 0;
+
+ bat_req->bat_base = dma_alloc_coherent(dpmaif_ctrl->dev,
+ bat_req->bat_size_cnt * sizeof(struct dpmaif_bat),
+ &bat_req->bat_bus_addr, GFP_KERNEL | __GFP_ZERO);
+ if (!bat_req->bat_base)
+ return -ENOMEM;
+
+ /* For AP SW to record skb information */
+ bat_req->bat_skb = devm_kzalloc(dpmaif_ctrl->dev, bat_req->bat_size_cnt * sw_buf_size,
+ GFP_KERNEL);
+ if (!bat_req->bat_skb)
+ goto err_free_dma_mem;
+
+ bat_req->bat_bitmap = bitmap_zalloc(bat_req->bat_size_cnt, GFP_KERNEL);
+ if (!bat_req->bat_bitmap)
+ goto err_free_dma_mem;
+
+ spin_lock_init(&bat_req->mask_lock);
+ atomic_set(&bat_req->refcnt, 0);
+ return 0;
+
+err_free_dma_mem:
+ t7xx_dpmaif_base_free(dpmaif_ctrl, bat_req);
+
+ return -ENOMEM;
+}
+
+void t7xx_dpmaif_bat_free(const struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req)
+{
+ if (!bat_req || !atomic_dec_and_test(&bat_req->refcnt))
+ return;
+
+ bitmap_free(bat_req->bat_bitmap);
+ bat_req->bat_bitmap = NULL;
+
+ if (bat_req->bat_skb) {
+ unsigned int i;
+
+ for (i = 0; i < bat_req->bat_size_cnt; i++) {
+ if (bat_req->type == BAT_TYPE_FRAG)
+ t7xx_unmap_bat_page(dpmaif_ctrl->dev, bat_req->bat_skb, i);
+ else
+ t7xx_unmap_bat_skb(dpmaif_ctrl->dev, bat_req->bat_skb, i);
+ }
+ }
+
+ t7xx_dpmaif_base_free(dpmaif_ctrl, bat_req);
+}
+
+static int t7xx_dpmaif_rx_alloc(struct dpmaif_rx_queue *rxq)
+{
+ rxq->pit_size_cnt = DPMAIF_PIT_COUNT;
+ rxq->pit_rd_idx = 0;
+ rxq->pit_wr_idx = 0;
+ rxq->pit_release_rd_idx = 0;
+ rxq->expect_pit_seq = 0;
+ rxq->pit_remain_release_cnt = 0;
+ memset(&rxq->rx_data_info, 0, sizeof(rxq->rx_data_info));
+
+ rxq->pit_base = dma_alloc_coherent(rxq->dpmaif_ctrl->dev,
+ rxq->pit_size_cnt * sizeof(struct dpmaif_pit),
+ &rxq->pit_bus_addr, GFP_KERNEL | __GFP_ZERO);
+ if (!rxq->pit_base)
+ return -ENOMEM;
+
+ rxq->bat_req = &rxq->dpmaif_ctrl->bat_req;
+ atomic_inc(&rxq->bat_req->refcnt);
+
+ rxq->bat_frag = &rxq->dpmaif_ctrl->bat_frag;
+ atomic_inc(&rxq->bat_frag->refcnt);
+ return 0;
+}
+
+static void t7xx_dpmaif_rx_buf_free(const struct dpmaif_rx_queue *rxq)
+{
+ if (!rxq->dpmaif_ctrl)
+ return;
+
+ t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_req);
+ t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_frag);
+
+ if (rxq->pit_base)
+ dma_free_coherent(rxq->dpmaif_ctrl->dev,
+ rxq->pit_size_cnt * sizeof(struct dpmaif_pit),
+ rxq->pit_base, rxq->pit_bus_addr);
+}
+
+int t7xx_dpmaif_rxq_init(struct dpmaif_rx_queue *queue)
+{
+ int ret;
+
+ ret = t7xx_dpmaif_rx_alloc(queue);
+ if (ret < 0)
+ dev_err(queue->dpmaif_ctrl->dev, "Failed to allocate RX buffers: %d\n", ret);
+
+ return ret;
+}
+
+void t7xx_dpmaif_rxq_free(struct dpmaif_rx_queue *queue)
+{
+ t7xx_dpmaif_rx_buf_free(queue);
+}
+
+static void t7xx_dpmaif_bat_release_work(struct work_struct *work)
+{
+ struct dpmaif_ctrl *dpmaif_ctrl = container_of(work, struct dpmaif_ctrl, bat_release_work);
+ struct dpmaif_rx_queue *rxq;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev);
+ if (ret < 0 && ret != -EACCES)
+ return;
+
+ t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev);
+
+ /* ALL RXQ use one BAT table, so choose DPF_RX_QNO_DFT */
+ rxq = &dpmaif_ctrl->rxq[DPF_RX_QNO_DFT];
+ if (t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev)) {
+ t7xx_dpmaif_bat_release_and_add(rxq);
+ t7xx_dpmaif_frag_bat_release_and_add(rxq);
+ }
+
+ t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev);
+ pm_runtime_mark_last_busy(dpmaif_ctrl->dev);
+ pm_runtime_put_autosuspend(dpmaif_ctrl->dev);
+}
+
+int t7xx_dpmaif_bat_rel_wq_alloc(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ dpmaif_ctrl->bat_release_wq = alloc_workqueue("dpmaif_bat_release_work_queue",
+ WQ_MEM_RECLAIM, 1);
+ if (!dpmaif_ctrl->bat_release_wq)
+ return -ENOMEM;
+
+ INIT_WORK(&dpmaif_ctrl->bat_release_work, t7xx_dpmaif_bat_release_work);
+ return 0;
+}
+
+void t7xx_dpmaif_bat_wq_rel(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ flush_work(&dpmaif_ctrl->bat_release_work);
+
+ if (dpmaif_ctrl->bat_release_wq) {
+ destroy_workqueue(dpmaif_ctrl->bat_release_wq);
+ dpmaif_ctrl->bat_release_wq = NULL;
+ }
+}
+
+/**
+ * t7xx_dpmaif_rx_stop() - Suspend RX flow.
+ * @dpmaif_ctrl: Pointer to data path control struct dpmaif_ctrl.
+ *
+ * Wait for all the RX work to finish executing and mark the RX queue as paused.
+ */
+void t7xx_dpmaif_rx_stop(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ unsigned int i;
+
+ for (i = 0; i < DPMAIF_RXQ_NUM; i++) {
+ struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[i];
+ int timeout, value;
+
+ timeout = readx_poll_timeout_atomic(atomic_read, &rxq->rx_processing, value,
+ !value, 0, DPMAIF_CHECK_INIT_TIMEOUT_US);
+ if (timeout)
+ dev_err(dpmaif_ctrl->dev, "Stop RX SW failed\n");
+
+ /* Ensure RX processing has stopped before we set rxq->que_started to false */
+ smp_mb();
+ rxq->que_started = false;
+ }
+}
+
+static void t7xx_dpmaif_stop_rxq(struct dpmaif_rx_queue *rxq)
+{
+ int cnt, j = 0;
+
+ rxq->que_started = false;
+
+ do {
+ cnt = t7xx_ring_buf_rd_wr_count(rxq->pit_size_cnt, rxq->pit_rd_idx,
+ rxq->pit_wr_idx, DPMAIF_READ);
+
+ if (++j >= DPMAIF_MAX_CHECK_COUNT) {
+ dev_err(rxq->dpmaif_ctrl->dev, "Stop RX SW failed, %d\n", cnt);
+ break;
+ }
+ } while (cnt);
+
+ memset(rxq->pit_base, 0, rxq->pit_size_cnt * sizeof(struct dpmaif_pit));
+ memset(rxq->bat_req->bat_base, 0, rxq->bat_req->bat_size_cnt * sizeof(struct dpmaif_bat));
+ bitmap_zero(rxq->bat_req->bat_bitmap, rxq->bat_req->bat_size_cnt);
+ memset(&rxq->rx_data_info, 0, sizeof(rxq->rx_data_info));
+
+ rxq->pit_rd_idx = 0;
+ rxq->pit_wr_idx = 0;
+ rxq->pit_release_rd_idx = 0;
+ rxq->expect_pit_seq = 0;
+ rxq->pit_remain_release_cnt = 0;
+ rxq->bat_req->bat_release_rd_idx = 0;
+ rxq->bat_req->bat_wr_idx = 0;
+ rxq->bat_frag->bat_release_rd_idx = 0;
+ rxq->bat_frag->bat_wr_idx = 0;
+}
+
+void t7xx_dpmaif_rx_clear(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ int i;
+
+ for (i = 0; i < DPMAIF_RXQ_NUM; i++)
+ t7xx_dpmaif_stop_rxq(&dpmaif_ctrl->rxq[i]);
+}
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.h b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.h
new file mode 100644
index 0000000000..f4e1b69ad4
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ *
+ * Contributors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ */
+
+#ifndef __T7XX_HIF_DPMA_RX_H__
+#define __T7XX_HIF_DPMA_RX_H__
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+#include "t7xx_hif_dpmaif.h"
+
+#define NETIF_MASK GENMASK(4, 0)
+
+#define PKT_TYPE_IP4 0
+#define PKT_TYPE_IP6 1
+
+/* Structure of DL PIT */
+struct dpmaif_pit {
+ __le32 header;
+ union {
+ struct {
+ __le32 data_addr_l;
+ __le32 data_addr_h;
+ __le32 footer;
+ } pd;
+ struct {
+ __le32 params_1;
+ __le32 params_2;
+ __le32 params_3;
+ } msg;
+ };
+};
+
+/* PIT header fields */
+#define PD_PIT_DATA_LEN GENMASK(31, 16)
+#define PD_PIT_BUFFER_ID GENMASK(15, 3)
+#define PD_PIT_BUFFER_TYPE BIT(2)
+#define PD_PIT_CONT BIT(1)
+#define PD_PIT_PACKET_TYPE BIT(0)
+/* PIT footer fields */
+#define PD_PIT_DLQ_DONE GENMASK(31, 30)
+#define PD_PIT_ULQ_DONE GENMASK(29, 24)
+#define PD_PIT_HEADER_OFFSET GENMASK(23, 19)
+#define PD_PIT_BI_F GENMASK(18, 17)
+#define PD_PIT_IG BIT(16)
+#define PD_PIT_RES GENMASK(15, 11)
+#define PD_PIT_H_BID GENMASK(10, 8)
+#define PD_PIT_PIT_SEQ GENMASK(7, 0)
+
+#define MSG_PIT_DP BIT(31)
+#define MSG_PIT_RES GENMASK(30, 27)
+#define MSG_PIT_NETWORK_TYPE GENMASK(26, 24)
+#define MSG_PIT_CHANNEL_ID GENMASK(23, 16)
+#define MSG_PIT_RES2 GENMASK(15, 12)
+#define MSG_PIT_HPC_IDX GENMASK(11, 8)
+#define MSG_PIT_SRC_QID GENMASK(7, 5)
+#define MSG_PIT_ERROR_BIT BIT(4)
+#define MSG_PIT_CHECKSUM GENMASK(3, 2)
+#define MSG_PIT_CONT BIT(1)
+#define MSG_PIT_PACKET_TYPE BIT(0)
+
+#define MSG_PIT_HP_IDX GENMASK(31, 27)
+#define MSG_PIT_CMD GENMASK(26, 24)
+#define MSG_PIT_RES3 GENMASK(23, 21)
+#define MSG_PIT_FLOW GENMASK(20, 16)
+#define MSG_PIT_COUNT GENMASK(15, 0)
+
+#define MSG_PIT_HASH GENMASK(31, 24)
+#define MSG_PIT_RES4 GENMASK(23, 18)
+#define MSG_PIT_PRO GENMASK(17, 16)
+#define MSG_PIT_VBID GENMASK(15, 3)
+#define MSG_PIT_RES5 GENMASK(2, 0)
+
+#define MSG_PIT_DLQ_DONE GENMASK(31, 30)
+#define MSG_PIT_ULQ_DONE GENMASK(29, 24)
+#define MSG_PIT_IP BIT(23)
+#define MSG_PIT_RES6 BIT(22)
+#define MSG_PIT_MR GENMASK(21, 20)
+#define MSG_PIT_RES7 GENMASK(19, 17)
+#define MSG_PIT_IG BIT(16)
+#define MSG_PIT_RES8 GENMASK(15, 11)
+#define MSG_PIT_H_BID GENMASK(10, 8)
+#define MSG_PIT_PIT_SEQ GENMASK(7, 0)
+
+int t7xx_dpmaif_rxq_init(struct dpmaif_rx_queue *queue);
+void t7xx_dpmaif_rx_clear(struct dpmaif_ctrl *dpmaif_ctrl);
+int t7xx_dpmaif_bat_rel_wq_alloc(struct dpmaif_ctrl *dpmaif_ctrl);
+int t7xx_dpmaif_rx_buf_alloc(struct dpmaif_ctrl *dpmaif_ctrl,
+ const struct dpmaif_bat_request *bat_req,
+ const unsigned int q_num, const unsigned int buf_cnt,
+ const bool initial);
+int t7xx_dpmaif_rx_frag_alloc(struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req,
+ const unsigned int buf_cnt, const bool first_time);
+void t7xx_dpmaif_rx_stop(struct dpmaif_ctrl *dpmaif_ctrl);
+void t7xx_dpmaif_irq_rx_done(struct dpmaif_ctrl *dpmaif_ctrl, const unsigned int que_mask);
+void t7xx_dpmaif_rxq_free(struct dpmaif_rx_queue *queue);
+void t7xx_dpmaif_bat_wq_rel(struct dpmaif_ctrl *dpmaif_ctrl);
+int t7xx_dpmaif_bat_alloc(const struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req,
+ const enum bat_type buf_type);
+void t7xx_dpmaif_bat_free(const struct dpmaif_ctrl *dpmaif_ctrl,
+ struct dpmaif_bat_request *bat_req);
+int t7xx_dpmaif_napi_rx_poll(struct napi_struct *napi, const int budget);
+
+#endif /* __T7XX_HIF_DPMA_RX_H__ */
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c
new file mode 100644
index 0000000000..8dab025a08
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c
@@ -0,0 +1,684 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ *
+ * Contributors:
+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ */
+
+#include <linux/atomic.h>
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/minmax.h>
+#include <linux/netdevice.h>
+#include <linux/pm_runtime.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#include "t7xx_dpmaif.h"
+#include "t7xx_hif_dpmaif.h"
+#include "t7xx_hif_dpmaif_tx.h"
+#include "t7xx_pci.h"
+
+#define DPMAIF_SKB_TX_BURST_CNT 5
+#define DPMAIF_DRB_LIST_LEN 6144
+
+/* DRB dtype */
+#define DES_DTYP_PD 0
+#define DES_DTYP_MSG 1
+
+static unsigned int t7xx_dpmaif_update_drb_rd_idx(struct dpmaif_ctrl *dpmaif_ctrl,
+ unsigned int q_num)
+{
+ struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[q_num];
+ unsigned int old_sw_rd_idx, new_hw_rd_idx, drb_cnt;
+ unsigned long flags;
+
+ if (!txq->que_started)
+ return 0;
+
+ old_sw_rd_idx = txq->drb_rd_idx;
+ new_hw_rd_idx = t7xx_dpmaif_ul_get_rd_idx(&dpmaif_ctrl->hw_info, q_num);
+ if (new_hw_rd_idx >= DPMAIF_DRB_LIST_LEN) {
+ dev_err(dpmaif_ctrl->dev, "Out of range read index: %u\n", new_hw_rd_idx);
+ return 0;
+ }
+
+ if (old_sw_rd_idx <= new_hw_rd_idx)
+ drb_cnt = new_hw_rd_idx - old_sw_rd_idx;
+ else
+ drb_cnt = txq->drb_size_cnt - old_sw_rd_idx + new_hw_rd_idx;
+
+ spin_lock_irqsave(&txq->tx_lock, flags);
+ txq->drb_rd_idx = new_hw_rd_idx;
+ spin_unlock_irqrestore(&txq->tx_lock, flags);
+
+ return drb_cnt;
+}
+
+static unsigned int t7xx_dpmaif_release_tx_buffer(struct dpmaif_ctrl *dpmaif_ctrl,
+ unsigned int q_num, unsigned int release_cnt)
+{
+ struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[q_num];
+ struct dpmaif_callbacks *cb = dpmaif_ctrl->callbacks;
+ struct dpmaif_drb_skb *cur_drb_skb, *drb_skb_base;
+ struct dpmaif_drb *cur_drb, *drb_base;
+ unsigned int drb_cnt, i, cur_idx;
+ unsigned long flags;
+
+ drb_skb_base = txq->drb_skb_base;
+ drb_base = txq->drb_base;
+
+ spin_lock_irqsave(&txq->tx_lock, flags);
+ drb_cnt = txq->drb_size_cnt;
+ cur_idx = txq->drb_release_rd_idx;
+ spin_unlock_irqrestore(&txq->tx_lock, flags);
+
+ for (i = 0; i < release_cnt; i++) {
+ cur_drb = drb_base + cur_idx;
+ if (FIELD_GET(DRB_HDR_DTYP, le32_to_cpu(cur_drb->header)) == DES_DTYP_PD) {
+ cur_drb_skb = drb_skb_base + cur_idx;
+ if (!cur_drb_skb->is_msg)
+ dma_unmap_single(dpmaif_ctrl->dev, cur_drb_skb->bus_addr,
+ cur_drb_skb->data_len, DMA_TO_DEVICE);
+
+ if (!FIELD_GET(DRB_HDR_CONT, le32_to_cpu(cur_drb->header))) {
+ if (!cur_drb_skb->skb) {
+ dev_err(dpmaif_ctrl->dev,
+ "txq%u: DRB check fail, invalid skb\n", q_num);
+ continue;
+ }
+
+ dev_kfree_skb_any(cur_drb_skb->skb);
+ }
+
+ cur_drb_skb->skb = NULL;
+ }
+
+ spin_lock_irqsave(&txq->tx_lock, flags);
+ cur_idx = t7xx_ring_buf_get_next_wr_idx(drb_cnt, cur_idx);
+ txq->drb_release_rd_idx = cur_idx;
+ spin_unlock_irqrestore(&txq->tx_lock, flags);
+
+ if (atomic_inc_return(&txq->tx_budget) > txq->drb_size_cnt / 8)
+ cb->state_notify(dpmaif_ctrl->t7xx_dev, DMPAIF_TXQ_STATE_IRQ, txq->index);
+ }
+
+ if (FIELD_GET(DRB_HDR_CONT, le32_to_cpu(cur_drb->header)))
+ dev_err(dpmaif_ctrl->dev, "txq%u: DRB not marked as the last one\n", q_num);
+
+ return i;
+}
+
+static int t7xx_dpmaif_tx_release(struct dpmaif_ctrl *dpmaif_ctrl,
+ unsigned int q_num, unsigned int budget)
+{
+ struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[q_num];
+ unsigned int rel_cnt, real_rel_cnt;
+
+ /* Update read index from HW */
+ t7xx_dpmaif_update_drb_rd_idx(dpmaif_ctrl, q_num);
+
+ rel_cnt = t7xx_ring_buf_rd_wr_count(txq->drb_size_cnt, txq->drb_release_rd_idx,
+ txq->drb_rd_idx, DPMAIF_READ);
+
+ real_rel_cnt = min_not_zero(budget, rel_cnt);
+ if (real_rel_cnt)
+ real_rel_cnt = t7xx_dpmaif_release_tx_buffer(dpmaif_ctrl, q_num, real_rel_cnt);
+
+ return real_rel_cnt < rel_cnt ? -EAGAIN : 0;
+}
+
+static bool t7xx_dpmaif_drb_ring_not_empty(struct dpmaif_tx_queue *txq)
+{
+ return !!t7xx_dpmaif_update_drb_rd_idx(txq->dpmaif_ctrl, txq->index);
+}
+
+static void t7xx_dpmaif_tx_done(struct work_struct *work)
+{
+ struct dpmaif_tx_queue *txq = container_of(work, struct dpmaif_tx_queue, dpmaif_tx_work);
+ struct dpmaif_ctrl *dpmaif_ctrl = txq->dpmaif_ctrl;
+ struct dpmaif_hw_info *hw_info;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev);
+ if (ret < 0 && ret != -EACCES)
+ return;
+
+ /* The device may be in low power state. Disable sleep if needed */
+ t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev);
+ if (t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev)) {
+ hw_info = &dpmaif_ctrl->hw_info;
+ ret = t7xx_dpmaif_tx_release(dpmaif_ctrl, txq->index, txq->drb_size_cnt);
+ if (ret == -EAGAIN ||
+ (t7xx_dpmaif_ul_clr_done(hw_info, txq->index) &&
+ t7xx_dpmaif_drb_ring_not_empty(txq))) {
+ queue_work(dpmaif_ctrl->txq[txq->index].worker,
+ &dpmaif_ctrl->txq[txq->index].dpmaif_tx_work);
+ /* Give the device time to enter the low power state */
+ t7xx_dpmaif_clr_ip_busy_sts(hw_info);
+ } else {
+ t7xx_dpmaif_clr_ip_busy_sts(hw_info);
+ t7xx_dpmaif_unmask_ulq_intr(hw_info, txq->index);
+ }
+ }
+
+ t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev);
+ pm_runtime_mark_last_busy(dpmaif_ctrl->dev);
+ pm_runtime_put_autosuspend(dpmaif_ctrl->dev);
+}
+
+static void t7xx_setup_msg_drb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int q_num,
+ unsigned int cur_idx, unsigned int pkt_len, unsigned int count_l,
+ unsigned int channel_id)
+{
+ struct dpmaif_drb *drb_base = dpmaif_ctrl->txq[q_num].drb_base;
+ struct dpmaif_drb *drb = drb_base + cur_idx;
+
+ drb->header = cpu_to_le32(FIELD_PREP(DRB_HDR_DTYP, DES_DTYP_MSG) |
+ FIELD_PREP(DRB_HDR_CONT, 1) |
+ FIELD_PREP(DRB_HDR_DATA_LEN, pkt_len));
+
+ drb->msg.msg_hdr = cpu_to_le32(FIELD_PREP(DRB_MSG_COUNT_L, count_l) |
+ FIELD_PREP(DRB_MSG_CHANNEL_ID, channel_id) |
+ FIELD_PREP(DRB_MSG_L4_CHK, 1));
+}
+
+static void t7xx_setup_payload_drb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int q_num,
+ unsigned int cur_idx, dma_addr_t data_addr,
+ unsigned int pkt_size, bool last_one)
+{
+ struct dpmaif_drb *drb_base = dpmaif_ctrl->txq[q_num].drb_base;
+ struct dpmaif_drb *drb = drb_base + cur_idx;
+ u32 header;
+
+ header = FIELD_PREP(DRB_HDR_DTYP, DES_DTYP_PD) | FIELD_PREP(DRB_HDR_DATA_LEN, pkt_size);
+ if (!last_one)
+ header |= FIELD_PREP(DRB_HDR_CONT, 1);
+
+ drb->header = cpu_to_le32(header);
+ drb->pd.data_addr_l = cpu_to_le32(lower_32_bits(data_addr));
+ drb->pd.data_addr_h = cpu_to_le32(upper_32_bits(data_addr));
+}
+
+static void t7xx_record_drb_skb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int q_num,
+ unsigned int cur_idx, struct sk_buff *skb, bool is_msg,
+ bool is_frag, bool is_last_one, dma_addr_t bus_addr,
+ unsigned int data_len)
+{
+ struct dpmaif_drb_skb *drb_skb_base = dpmaif_ctrl->txq[q_num].drb_skb_base;
+ struct dpmaif_drb_skb *drb_skb = drb_skb_base + cur_idx;
+
+ drb_skb->skb = skb;
+ drb_skb->bus_addr = bus_addr;
+ drb_skb->data_len = data_len;
+ drb_skb->index = cur_idx;
+ drb_skb->is_msg = is_msg;
+ drb_skb->is_frag = is_frag;
+ drb_skb->is_last = is_last_one;
+}
+
+static int t7xx_dpmaif_add_skb_to_ring(struct dpmaif_ctrl *dpmaif_ctrl, struct sk_buff *skb)
+{
+ struct dpmaif_callbacks *cb = dpmaif_ctrl->callbacks;
+ unsigned int wr_cnt, send_cnt, payload_cnt;
+ unsigned int cur_idx, drb_wr_idx_backup;
+ struct skb_shared_info *shinfo;
+ struct dpmaif_tx_queue *txq;
+ struct t7xx_skb_cb *skb_cb;
+ unsigned long flags;
+
+ skb_cb = T7XX_SKB_CB(skb);
+ txq = &dpmaif_ctrl->txq[skb_cb->txq_number];
+ if (!txq->que_started || dpmaif_ctrl->state != DPMAIF_STATE_PWRON)
+ return -ENODEV;
+
+ atomic_set(&txq->tx_processing, 1);
+ /* Ensure tx_processing is changed to 1 before actually begin TX flow */
+ smp_mb();
+
+ shinfo = skb_shinfo(skb);
+ if (shinfo->frag_list)
+ dev_warn_ratelimited(dpmaif_ctrl->dev, "frag_list not supported\n");
+
+ payload_cnt = shinfo->nr_frags + 1;
+ /* nr_frags: frag cnt, 1: skb->data, 1: msg DRB */
+ send_cnt = payload_cnt + 1;
+
+ spin_lock_irqsave(&txq->tx_lock, flags);
+ cur_idx = txq->drb_wr_idx;
+ drb_wr_idx_backup = cur_idx;
+ txq->drb_wr_idx += send_cnt;
+ if (txq->drb_wr_idx >= txq->drb_size_cnt)
+ txq->drb_wr_idx -= txq->drb_size_cnt;
+ t7xx_setup_msg_drb(dpmaif_ctrl, txq->index, cur_idx, skb->len, 0, skb_cb->netif_idx);
+ t7xx_record_drb_skb(dpmaif_ctrl, txq->index, cur_idx, skb, true, 0, 0, 0, 0);
+ spin_unlock_irqrestore(&txq->tx_lock, flags);
+
+ for (wr_cnt = 0; wr_cnt < payload_cnt; wr_cnt++) {
+ bool is_frag, is_last_one = wr_cnt == payload_cnt - 1;
+ unsigned int data_len;
+ dma_addr_t bus_addr;
+ void *data_addr;
+
+ if (!wr_cnt) {
+ data_len = skb_headlen(skb);
+ data_addr = skb->data;
+ is_frag = false;
+ } else {
+ skb_frag_t *frag = shinfo->frags + wr_cnt - 1;
+
+ data_len = skb_frag_size(frag);
+ data_addr = skb_frag_address(frag);
+ is_frag = true;
+ }
+
+ bus_addr = dma_map_single(dpmaif_ctrl->dev, data_addr, data_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dpmaif_ctrl->dev, bus_addr))
+ goto unmap_buffers;
+
+ cur_idx = t7xx_ring_buf_get_next_wr_idx(txq->drb_size_cnt, cur_idx);
+
+ spin_lock_irqsave(&txq->tx_lock, flags);
+ t7xx_setup_payload_drb(dpmaif_ctrl, txq->index, cur_idx, bus_addr, data_len,
+ is_last_one);
+ t7xx_record_drb_skb(dpmaif_ctrl, txq->index, cur_idx, skb, false, is_frag,
+ is_last_one, bus_addr, data_len);
+ spin_unlock_irqrestore(&txq->tx_lock, flags);
+ }
+
+ if (atomic_sub_return(send_cnt, &txq->tx_budget) <= (MAX_SKB_FRAGS + 2))
+ cb->state_notify(dpmaif_ctrl->t7xx_dev, DMPAIF_TXQ_STATE_FULL, txq->index);
+
+ atomic_set(&txq->tx_processing, 0);
+
+ return 0;
+
+unmap_buffers:
+ while (wr_cnt--) {
+ struct dpmaif_drb_skb *drb_skb = txq->drb_skb_base;
+
+ cur_idx = cur_idx ? cur_idx - 1 : txq->drb_size_cnt - 1;
+ drb_skb += cur_idx;
+ dma_unmap_single(dpmaif_ctrl->dev, drb_skb->bus_addr,
+ drb_skb->data_len, DMA_TO_DEVICE);
+ }
+
+ txq->drb_wr_idx = drb_wr_idx_backup;
+ atomic_set(&txq->tx_processing, 0);
+
+ return -ENOMEM;
+}
+
+static bool t7xx_tx_lists_are_all_empty(const struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ int i;
+
+ for (i = 0; i < DPMAIF_TXQ_NUM; i++) {
+ if (!skb_queue_empty(&dpmaif_ctrl->txq[i].tx_skb_head))
+ return false;
+ }
+
+ return true;
+}
+
+/* Currently, only the default TX queue is used */
+static struct dpmaif_tx_queue *t7xx_select_tx_queue(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ struct dpmaif_tx_queue *txq;
+
+ txq = &dpmaif_ctrl->txq[DPMAIF_TX_DEFAULT_QUEUE];
+ if (!txq->que_started)
+ return NULL;
+
+ return txq;
+}
+
+static unsigned int t7xx_txq_drb_wr_available(struct dpmaif_tx_queue *txq)
+{
+ return t7xx_ring_buf_rd_wr_count(txq->drb_size_cnt, txq->drb_release_rd_idx,
+ txq->drb_wr_idx, DPMAIF_WRITE);
+}
+
+static unsigned int t7xx_skb_drb_cnt(struct sk_buff *skb)
+{
+ /* Normal DRB (frags data + skb linear data) + msg DRB */
+ return skb_shinfo(skb)->nr_frags + 2;
+}
+
+static int t7xx_txq_burst_send_skb(struct dpmaif_tx_queue *txq)
+{
+ unsigned int drb_remain_cnt, i;
+ unsigned int send_drb_cnt;
+ int drb_cnt = 0;
+ int ret = 0;
+
+ drb_remain_cnt = t7xx_txq_drb_wr_available(txq);
+
+ for (i = 0; i < DPMAIF_SKB_TX_BURST_CNT; i++) {
+ struct sk_buff *skb;
+
+ skb = skb_peek(&txq->tx_skb_head);
+ if (!skb)
+ break;
+
+ send_drb_cnt = t7xx_skb_drb_cnt(skb);
+ if (drb_remain_cnt < send_drb_cnt) {
+ drb_remain_cnt = t7xx_txq_drb_wr_available(txq);
+ continue;
+ }
+
+ drb_remain_cnt -= send_drb_cnt;
+
+ ret = t7xx_dpmaif_add_skb_to_ring(txq->dpmaif_ctrl, skb);
+ if (ret < 0) {
+ dev_err(txq->dpmaif_ctrl->dev,
+ "Failed to add skb to device's ring: %d\n", ret);
+ break;
+ }
+
+ drb_cnt += send_drb_cnt;
+ skb_unlink(skb, &txq->tx_skb_head);
+ }
+
+ if (drb_cnt > 0)
+ return drb_cnt;
+
+ return ret;
+}
+
+static void t7xx_do_tx_hw_push(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ bool wait_disable_sleep = true;
+
+ do {
+ struct dpmaif_tx_queue *txq;
+ int drb_send_cnt;
+
+ txq = t7xx_select_tx_queue(dpmaif_ctrl);
+ if (!txq)
+ return;
+
+ drb_send_cnt = t7xx_txq_burst_send_skb(txq);
+ if (drb_send_cnt <= 0) {
+ usleep_range(10, 20);
+ cond_resched();
+ continue;
+ }
+
+ /* Wait for the PCIe resource to unlock */
+ if (wait_disable_sleep) {
+ if (!t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev))
+ return;
+
+ wait_disable_sleep = false;
+ }
+
+ t7xx_dpmaif_ul_update_hw_drb_cnt(&dpmaif_ctrl->hw_info, txq->index,
+ drb_send_cnt * DPMAIF_UL_DRB_SIZE_WORD);
+
+ cond_resched();
+ } while (!t7xx_tx_lists_are_all_empty(dpmaif_ctrl) && !kthread_should_stop() &&
+ (dpmaif_ctrl->state == DPMAIF_STATE_PWRON));
+}
+
+static int t7xx_dpmaif_tx_hw_push_thread(void *arg)
+{
+ struct dpmaif_ctrl *dpmaif_ctrl = arg;
+ int ret;
+
+ while (!kthread_should_stop()) {
+ if (t7xx_tx_lists_are_all_empty(dpmaif_ctrl) ||
+ dpmaif_ctrl->state != DPMAIF_STATE_PWRON) {
+ if (wait_event_interruptible(dpmaif_ctrl->tx_wq,
+ (!t7xx_tx_lists_are_all_empty(dpmaif_ctrl) &&
+ dpmaif_ctrl->state == DPMAIF_STATE_PWRON) ||
+ kthread_should_stop()))
+ continue;
+
+ if (kthread_should_stop())
+ break;
+ }
+
+ ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev);
+ if (ret < 0 && ret != -EACCES)
+ return ret;
+
+ t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev);
+ t7xx_do_tx_hw_push(dpmaif_ctrl);
+ t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev);
+ pm_runtime_mark_last_busy(dpmaif_ctrl->dev);
+ pm_runtime_put_autosuspend(dpmaif_ctrl->dev);
+ }
+
+ return 0;
+}
+
+int t7xx_dpmaif_tx_thread_init(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ init_waitqueue_head(&dpmaif_ctrl->tx_wq);
+ dpmaif_ctrl->tx_thread = kthread_run(t7xx_dpmaif_tx_hw_push_thread,
+ dpmaif_ctrl, "dpmaif_tx_hw_push");
+ return PTR_ERR_OR_ZERO(dpmaif_ctrl->tx_thread);
+}
+
+void t7xx_dpmaif_tx_thread_rel(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ if (dpmaif_ctrl->tx_thread)
+ kthread_stop(dpmaif_ctrl->tx_thread);
+}
+
+/**
+ * t7xx_dpmaif_tx_send_skb() - Add skb to the transmit queue.
+ * @dpmaif_ctrl: Pointer to struct dpmaif_ctrl.
+ * @txq_number: Queue number to xmit on.
+ * @skb: Pointer to the skb to transmit.
+ *
+ * Add the skb to the queue of the skbs to be transmit.
+ * Wake up the thread that push the skbs from the queue to the HW.
+ *
+ * Return:
+ * * 0 - Success.
+ * * -EBUSY - Tx budget exhausted.
+ * In normal circumstances t7xx_dpmaif_add_skb_to_ring() must report the txq full
+ * state to prevent this error condition.
+ */
+int t7xx_dpmaif_tx_send_skb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int txq_number,
+ struct sk_buff *skb)
+{
+ struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[txq_number];
+ struct dpmaif_callbacks *cb = dpmaif_ctrl->callbacks;
+ struct t7xx_skb_cb *skb_cb;
+
+ if (atomic_read(&txq->tx_budget) <= t7xx_skb_drb_cnt(skb)) {
+ cb->state_notify(dpmaif_ctrl->t7xx_dev, DMPAIF_TXQ_STATE_FULL, txq_number);
+ return -EBUSY;
+ }
+
+ skb_cb = T7XX_SKB_CB(skb);
+ skb_cb->txq_number = txq_number;
+ skb_queue_tail(&txq->tx_skb_head, skb);
+ wake_up(&dpmaif_ctrl->tx_wq);
+
+ return 0;
+}
+
+void t7xx_dpmaif_irq_tx_done(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int que_mask)
+{
+ int i;
+
+ for (i = 0; i < DPMAIF_TXQ_NUM; i++) {
+ if (que_mask & BIT(i))
+ queue_work(dpmaif_ctrl->txq[i].worker, &dpmaif_ctrl->txq[i].dpmaif_tx_work);
+ }
+}
+
+static int t7xx_dpmaif_tx_drb_buf_init(struct dpmaif_tx_queue *txq)
+{
+ size_t brb_skb_size, brb_pd_size;
+
+ brb_pd_size = DPMAIF_DRB_LIST_LEN * sizeof(struct dpmaif_drb);
+ brb_skb_size = DPMAIF_DRB_LIST_LEN * sizeof(struct dpmaif_drb_skb);
+
+ txq->drb_size_cnt = DPMAIF_DRB_LIST_LEN;
+
+ /* For HW && AP SW */
+ txq->drb_base = dma_alloc_coherent(txq->dpmaif_ctrl->dev, brb_pd_size,
+ &txq->drb_bus_addr, GFP_KERNEL | __GFP_ZERO);
+ if (!txq->drb_base)
+ return -ENOMEM;
+
+ /* For AP SW to record the skb information */
+ txq->drb_skb_base = devm_kzalloc(txq->dpmaif_ctrl->dev, brb_skb_size, GFP_KERNEL);
+ if (!txq->drb_skb_base) {
+ dma_free_coherent(txq->dpmaif_ctrl->dev, brb_pd_size,
+ txq->drb_base, txq->drb_bus_addr);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void t7xx_dpmaif_tx_free_drb_skb(struct dpmaif_tx_queue *txq)
+{
+ struct dpmaif_drb_skb *drb_skb, *drb_skb_base = txq->drb_skb_base;
+ unsigned int i;
+
+ if (!drb_skb_base)
+ return;
+
+ for (i = 0; i < txq->drb_size_cnt; i++) {
+ drb_skb = drb_skb_base + i;
+ if (!drb_skb->skb)
+ continue;
+
+ if (!drb_skb->is_msg)
+ dma_unmap_single(txq->dpmaif_ctrl->dev, drb_skb->bus_addr,
+ drb_skb->data_len, DMA_TO_DEVICE);
+
+ if (drb_skb->is_last) {
+ dev_kfree_skb(drb_skb->skb);
+ drb_skb->skb = NULL;
+ }
+ }
+}
+
+static void t7xx_dpmaif_tx_drb_buf_rel(struct dpmaif_tx_queue *txq)
+{
+ if (txq->drb_base)
+ dma_free_coherent(txq->dpmaif_ctrl->dev,
+ txq->drb_size_cnt * sizeof(struct dpmaif_drb),
+ txq->drb_base, txq->drb_bus_addr);
+
+ t7xx_dpmaif_tx_free_drb_skb(txq);
+}
+
+/**
+ * t7xx_dpmaif_txq_init() - Initialize TX queue.
+ * @txq: Pointer to struct dpmaif_tx_queue.
+ *
+ * Initialize the TX queue data structure and allocate memory for it to use.
+ *
+ * Return:
+ * * 0 - Success.
+ * * -ERROR - Error code from failure sub-initializations.
+ */
+int t7xx_dpmaif_txq_init(struct dpmaif_tx_queue *txq)
+{
+ int ret;
+
+ skb_queue_head_init(&txq->tx_skb_head);
+ init_waitqueue_head(&txq->req_wq);
+ atomic_set(&txq->tx_budget, DPMAIF_DRB_LIST_LEN);
+
+ ret = t7xx_dpmaif_tx_drb_buf_init(txq);
+ if (ret) {
+ dev_err(txq->dpmaif_ctrl->dev, "Failed to initialize DRB buffers: %d\n", ret);
+ return ret;
+ }
+
+ txq->worker = alloc_ordered_workqueue("md_dpmaif_tx%d_worker",
+ WQ_MEM_RECLAIM | (txq->index ? 0 : WQ_HIGHPRI),
+ txq->index);
+ if (!txq->worker)
+ return -ENOMEM;
+
+ INIT_WORK(&txq->dpmaif_tx_work, t7xx_dpmaif_tx_done);
+ spin_lock_init(&txq->tx_lock);
+
+ return 0;
+}
+
+void t7xx_dpmaif_txq_free(struct dpmaif_tx_queue *txq)
+{
+ if (txq->worker)
+ destroy_workqueue(txq->worker);
+
+ skb_queue_purge(&txq->tx_skb_head);
+ t7xx_dpmaif_tx_drb_buf_rel(txq);
+}
+
+void t7xx_dpmaif_tx_stop(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ int i;
+
+ for (i = 0; i < DPMAIF_TXQ_NUM; i++) {
+ struct dpmaif_tx_queue *txq;
+ int count = 0;
+
+ txq = &dpmaif_ctrl->txq[i];
+ txq->que_started = false;
+ /* Make sure TXQ is disabled */
+ smp_mb();
+
+ /* Wait for active Tx to be done */
+ while (atomic_read(&txq->tx_processing)) {
+ if (++count >= DPMAIF_MAX_CHECK_COUNT) {
+ dev_err(dpmaif_ctrl->dev, "TX queue stop failed\n");
+ break;
+ }
+ }
+ }
+}
+
+static void t7xx_dpmaif_txq_flush_rel(struct dpmaif_tx_queue *txq)
+{
+ txq->que_started = false;
+
+ cancel_work_sync(&txq->dpmaif_tx_work);
+ flush_work(&txq->dpmaif_tx_work);
+ t7xx_dpmaif_tx_free_drb_skb(txq);
+
+ txq->drb_rd_idx = 0;
+ txq->drb_wr_idx = 0;
+ txq->drb_release_rd_idx = 0;
+}
+
+void t7xx_dpmaif_tx_clear(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ int i;
+
+ for (i = 0; i < DPMAIF_TXQ_NUM; i++)
+ t7xx_dpmaif_txq_flush_rel(&dpmaif_ctrl->txq[i]);
+}
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.h b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.h
new file mode 100644
index 0000000000..ca9b9ea2da
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ *
+ * Contributors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ */
+
+#ifndef __T7XX_HIF_DPMA_TX_H__
+#define __T7XX_HIF_DPMA_TX_H__
+
+#include <linux/bits.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+
+#include "t7xx_hif_dpmaif.h"
+
+#define DPMAIF_TX_DEFAULT_QUEUE 0
+
+struct dpmaif_drb {
+ __le32 header;
+ union {
+ struct {
+ __le32 data_addr_l;
+ __le32 data_addr_h;
+ } pd;
+ struct {
+ __le32 msg_hdr;
+ __le32 reserved1;
+ } msg;
+ };
+ __le32 reserved2;
+};
+
+/* Header fields */
+#define DRB_HDR_DATA_LEN GENMASK(31, 16)
+#define DRB_HDR_RESERVED GENMASK(15, 3)
+#define DRB_HDR_CONT BIT(2)
+#define DRB_HDR_DTYP GENMASK(1, 0)
+
+#define DRB_MSG_DW2_RES GENMASK(31, 30)
+#define DRB_MSG_L4_CHK BIT(29)
+#define DRB_MSG_IP_CHK BIT(28)
+#define DRB_MSG_RESERVED BIT(27)
+#define DRB_MSG_NETWORK_TYPE GENMASK(26, 24)
+#define DRB_MSG_CHANNEL_ID GENMASK(23, 16)
+#define DRB_MSG_COUNT_L GENMASK(15, 0)
+
+struct dpmaif_drb_skb {
+ struct sk_buff *skb;
+ dma_addr_t bus_addr;
+ unsigned int data_len;
+ u16 index:13;
+ u16 is_msg:1;
+ u16 is_frag:1;
+ u16 is_last:1;
+};
+
+int t7xx_dpmaif_tx_send_skb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int txq_number,
+ struct sk_buff *skb);
+void t7xx_dpmaif_tx_thread_rel(struct dpmaif_ctrl *dpmaif_ctrl);
+int t7xx_dpmaif_tx_thread_init(struct dpmaif_ctrl *dpmaif_ctrl);
+void t7xx_dpmaif_txq_free(struct dpmaif_tx_queue *txq);
+void t7xx_dpmaif_irq_tx_done(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int que_mask);
+int t7xx_dpmaif_txq_init(struct dpmaif_tx_queue *txq);
+void t7xx_dpmaif_tx_stop(struct dpmaif_ctrl *dpmaif_ctrl);
+void t7xx_dpmaif_tx_clear(struct dpmaif_ctrl *dpmaif_ctrl);
+
+#endif /* __T7XX_HIF_DPMA_TX_H__ */
diff --git a/drivers/net/wwan/t7xx/t7xx_mhccif.c b/drivers/net/wwan/t7xx/t7xx_mhccif.c
new file mode 100644
index 0000000000..3ee18d46f8
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_mhccif.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ *
+ * Contributors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/completion.h>
+#include <linux/dev_printk.h>
+#include <linux/io.h>
+#include <linux/irqreturn.h>
+
+#include "t7xx_mhccif.h"
+#include "t7xx_modem_ops.h"
+#include "t7xx_pci.h"
+#include "t7xx_pcie_mac.h"
+#include "t7xx_reg.h"
+
+#define D2H_INT_SR_ACK (D2H_INT_SUSPEND_ACK | \
+ D2H_INT_RESUME_ACK | \
+ D2H_INT_SUSPEND_ACK_AP | \
+ D2H_INT_RESUME_ACK_AP)
+
+static void t7xx_mhccif_clear_interrupts(struct t7xx_pci_dev *t7xx_dev, u32 mask)
+{
+ void __iomem *mhccif_pbase = t7xx_dev->base_addr.mhccif_rc_base;
+
+ /* Clear level 2 interrupt */
+ iowrite32(mask, mhccif_pbase + REG_EP2RC_SW_INT_ACK);
+ /* Ensure write is complete */
+ t7xx_mhccif_read_sw_int_sts(t7xx_dev);
+ /* Clear level 1 interrupt */
+ t7xx_pcie_mac_clear_int_status(t7xx_dev, MHCCIF_INT);
+}
+
+static irqreturn_t t7xx_mhccif_isr_thread(int irq, void *data)
+{
+ struct t7xx_pci_dev *t7xx_dev = data;
+ u32 int_status, val;
+
+ val = T7XX_L1_1_BIT(1) | T7XX_L1_2_BIT(1);
+ iowrite32(val, IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
+
+ int_status = t7xx_mhccif_read_sw_int_sts(t7xx_dev);
+ if (int_status & D2H_SW_INT_MASK) {
+ int ret = t7xx_pci_mhccif_isr(t7xx_dev);
+
+ if (ret)
+ dev_err(&t7xx_dev->pdev->dev, "PCI MHCCIF ISR failure: %d", ret);
+ }
+
+ t7xx_mhccif_clear_interrupts(t7xx_dev, int_status);
+
+ if (int_status & D2H_INT_DS_LOCK_ACK)
+ complete_all(&t7xx_dev->sleep_lock_acquire);
+
+ if (int_status & D2H_INT_SR_ACK)
+ complete(&t7xx_dev->pm_sr_ack);
+
+ iowrite32(T7XX_L1_BIT(1), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
+
+ int_status = t7xx_mhccif_read_sw_int_sts(t7xx_dev);
+ if (!int_status) {
+ val = T7XX_L1_1_BIT(1) | T7XX_L1_2_BIT(1);
+ iowrite32(val, IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
+ }
+
+ t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT);
+ return IRQ_HANDLED;
+}
+
+u32 t7xx_mhccif_read_sw_int_sts(struct t7xx_pci_dev *t7xx_dev)
+{
+ return ioread32(t7xx_dev->base_addr.mhccif_rc_base + REG_EP2RC_SW_INT_STS);
+}
+
+void t7xx_mhccif_mask_set(struct t7xx_pci_dev *t7xx_dev, u32 val)
+{
+ iowrite32(val, t7xx_dev->base_addr.mhccif_rc_base + REG_EP2RC_SW_INT_EAP_MASK_SET);
+}
+
+void t7xx_mhccif_mask_clr(struct t7xx_pci_dev *t7xx_dev, u32 val)
+{
+ iowrite32(val, t7xx_dev->base_addr.mhccif_rc_base + REG_EP2RC_SW_INT_EAP_MASK_CLR);
+}
+
+u32 t7xx_mhccif_mask_get(struct t7xx_pci_dev *t7xx_dev)
+{
+ return ioread32(t7xx_dev->base_addr.mhccif_rc_base + REG_EP2RC_SW_INT_EAP_MASK);
+}
+
+static irqreturn_t t7xx_mhccif_isr_handler(int irq, void *data)
+{
+ return IRQ_WAKE_THREAD;
+}
+
+void t7xx_mhccif_init(struct t7xx_pci_dev *t7xx_dev)
+{
+ t7xx_dev->base_addr.mhccif_rc_base = t7xx_dev->base_addr.pcie_ext_reg_base +
+ MHCCIF_RC_DEV_BASE -
+ t7xx_dev->base_addr.pcie_dev_reg_trsl_addr;
+
+ t7xx_dev->intr_handler[MHCCIF_INT] = t7xx_mhccif_isr_handler;
+ t7xx_dev->intr_thread[MHCCIF_INT] = t7xx_mhccif_isr_thread;
+ t7xx_dev->callback_param[MHCCIF_INT] = t7xx_dev;
+}
+
+void t7xx_mhccif_h2d_swint_trigger(struct t7xx_pci_dev *t7xx_dev, u32 channel)
+{
+ void __iomem *mhccif_pbase = t7xx_dev->base_addr.mhccif_rc_base;
+
+ iowrite32(BIT(channel), mhccif_pbase + REG_RC2EP_SW_BSY);
+ iowrite32(channel, mhccif_pbase + REG_RC2EP_SW_TCHNUM);
+}
diff --git a/drivers/net/wwan/t7xx/t7xx_mhccif.h b/drivers/net/wwan/t7xx/t7xx_mhccif.h
new file mode 100644
index 0000000000..20c50dce9f
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_mhccif.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ *
+ * Contributors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ */
+
+#ifndef __T7XX_MHCCIF_H__
+#define __T7XX_MHCCIF_H__
+
+#include <linux/types.h>
+
+#include "t7xx_pci.h"
+#include "t7xx_reg.h"
+
+#define D2H_SW_INT_MASK (D2H_INT_EXCEPTION_INIT | \
+ D2H_INT_EXCEPTION_INIT_DONE | \
+ D2H_INT_EXCEPTION_CLEARQ_DONE | \
+ D2H_INT_EXCEPTION_ALLQ_RESET | \
+ D2H_INT_PORT_ENUM | \
+ D2H_INT_ASYNC_AP_HK | \
+ D2H_INT_ASYNC_MD_HK)
+
+void t7xx_mhccif_mask_set(struct t7xx_pci_dev *t7xx_dev, u32 val);
+void t7xx_mhccif_mask_clr(struct t7xx_pci_dev *t7xx_dev, u32 val);
+u32 t7xx_mhccif_mask_get(struct t7xx_pci_dev *t7xx_dev);
+void t7xx_mhccif_init(struct t7xx_pci_dev *t7xx_dev);
+u32 t7xx_mhccif_read_sw_int_sts(struct t7xx_pci_dev *t7xx_dev);
+void t7xx_mhccif_h2d_swint_trigger(struct t7xx_pci_dev *t7xx_dev, u32 channel);
+
+#endif /*__T7XX_MHCCIF_H__ */
diff --git a/drivers/net/wwan/t7xx/t7xx_modem_ops.c b/drivers/net/wwan/t7xx/t7xx_modem_ops.c
new file mode 100644
index 0000000000..24e7d49146
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.c
@@ -0,0 +1,775 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ *
+ * Contributors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/gfp.h>
+#include <linux/io.h>
+#include <linux/irqreturn.h>
+#include <linux/kthread.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#include "t7xx_cldma.h"
+#include "t7xx_hif_cldma.h"
+#include "t7xx_mhccif.h"
+#include "t7xx_modem_ops.h"
+#include "t7xx_netdev.h"
+#include "t7xx_pci.h"
+#include "t7xx_pcie_mac.h"
+#include "t7xx_port.h"
+#include "t7xx_port_proxy.h"
+#include "t7xx_reg.h"
+#include "t7xx_state_monitor.h"
+
+#define RT_ID_MD_PORT_ENUM 0
+#define RT_ID_AP_PORT_ENUM 1
+/* Modem feature query identification code - "ICCC" */
+#define MD_FEATURE_QUERY_ID 0x49434343
+
+#define FEATURE_VER GENMASK(7, 4)
+#define FEATURE_MSK GENMASK(3, 0)
+
+#define RGU_RESET_DELAY_MS 10
+#define PORT_RESET_DELAY_MS 2000
+#define EX_HS_TIMEOUT_MS 5000
+#define EX_HS_POLL_DELAY_MS 10
+
+enum mtk_feature_support_type {
+ MTK_FEATURE_DOES_NOT_EXIST,
+ MTK_FEATURE_NOT_SUPPORTED,
+ MTK_FEATURE_MUST_BE_SUPPORTED,
+};
+
+static unsigned int t7xx_get_interrupt_status(struct t7xx_pci_dev *t7xx_dev)
+{
+ return t7xx_mhccif_read_sw_int_sts(t7xx_dev) & D2H_SW_INT_MASK;
+}
+
+/**
+ * t7xx_pci_mhccif_isr() - Process MHCCIF interrupts.
+ * @t7xx_dev: MTK device.
+ *
+ * Check the interrupt status and queue commands accordingly.
+ *
+ * Returns:
+ ** 0 - Success.
+ ** -EINVAL - Failure to get FSM control.
+ */
+int t7xx_pci_mhccif_isr(struct t7xx_pci_dev *t7xx_dev)
+{
+ struct t7xx_modem *md = t7xx_dev->md;
+ struct t7xx_fsm_ctl *ctl;
+ unsigned int int_sta;
+ int ret = 0;
+ u32 mask;
+
+ ctl = md->fsm_ctl;
+ if (!ctl) {
+ dev_err_ratelimited(&t7xx_dev->pdev->dev,
+ "MHCCIF interrupt received before initializing MD monitor\n");
+ return -EINVAL;
+ }
+
+ spin_lock_bh(&md->exp_lock);
+ int_sta = t7xx_get_interrupt_status(t7xx_dev);
+ md->exp_id |= int_sta;
+ if (md->exp_id & D2H_INT_EXCEPTION_INIT) {
+ if (ctl->md_state == MD_STATE_INVALID ||
+ ctl->md_state == MD_STATE_WAITING_FOR_HS1 ||
+ ctl->md_state == MD_STATE_WAITING_FOR_HS2 ||
+ ctl->md_state == MD_STATE_READY) {
+ md->exp_id &= ~D2H_INT_EXCEPTION_INIT;
+ ret = t7xx_fsm_recv_md_intr(ctl, MD_IRQ_CCIF_EX);
+ }
+ } else if (md->exp_id & D2H_INT_PORT_ENUM) {
+ md->exp_id &= ~D2H_INT_PORT_ENUM;
+
+ if (ctl->curr_state == FSM_STATE_INIT || ctl->curr_state == FSM_STATE_PRE_START ||
+ ctl->curr_state == FSM_STATE_STOPPED)
+ ret = t7xx_fsm_recv_md_intr(ctl, MD_IRQ_PORT_ENUM);
+ } else if (ctl->md_state == MD_STATE_WAITING_FOR_HS1) {
+ mask = t7xx_mhccif_mask_get(t7xx_dev);
+ if ((md->exp_id & D2H_INT_ASYNC_MD_HK) && !(mask & D2H_INT_ASYNC_MD_HK)) {
+ md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
+ queue_work(md->handshake_wq, &md->handshake_work);
+ }
+ }
+ spin_unlock_bh(&md->exp_lock);
+
+ return ret;
+}
+
+static void t7xx_clr_device_irq_via_pcie(struct t7xx_pci_dev *t7xx_dev)
+{
+ struct t7xx_addr_base *pbase_addr = &t7xx_dev->base_addr;
+ void __iomem *reset_pcie_reg;
+ u32 val;
+
+ reset_pcie_reg = pbase_addr->pcie_ext_reg_base + TOPRGU_CH_PCIE_IRQ_STA -
+ pbase_addr->pcie_dev_reg_trsl_addr;
+ val = ioread32(reset_pcie_reg);
+ iowrite32(val, reset_pcie_reg);
+}
+
+void t7xx_clear_rgu_irq(struct t7xx_pci_dev *t7xx_dev)
+{
+ /* Clear L2 */
+ t7xx_clr_device_irq_via_pcie(t7xx_dev);
+ /* Clear L1 */
+ t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT);
+}
+
+static int t7xx_acpi_reset(struct t7xx_pci_dev *t7xx_dev, char *fn_name)
+{
+#ifdef CONFIG_ACPI
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct device *dev = &t7xx_dev->pdev->dev;
+ acpi_status acpi_ret;
+ acpi_handle handle;
+
+ handle = ACPI_HANDLE(dev);
+ if (!handle) {
+ dev_err(dev, "ACPI handle not found\n");
+ return -EFAULT;
+ }
+
+ if (!acpi_has_method(handle, fn_name)) {
+ dev_err(dev, "%s method not found\n", fn_name);
+ return -EFAULT;
+ }
+
+ acpi_ret = acpi_evaluate_object(handle, fn_name, NULL, &buffer);
+ if (ACPI_FAILURE(acpi_ret)) {
+ dev_err(dev, "%s method fail: %s\n", fn_name, acpi_format_exception(acpi_ret));
+ return -EFAULT;
+ }
+
+ kfree(buffer.pointer);
+
+#endif
+ return 0;
+}
+
+int t7xx_acpi_fldr_func(struct t7xx_pci_dev *t7xx_dev)
+{
+ return t7xx_acpi_reset(t7xx_dev, "_RST");
+}
+
+static void t7xx_reset_device_via_pmic(struct t7xx_pci_dev *t7xx_dev)
+{
+ u32 val;
+
+ val = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
+ if (val & MISC_RESET_TYPE_PLDR)
+ t7xx_acpi_reset(t7xx_dev, "MRST._RST");
+ else if (val & MISC_RESET_TYPE_FLDR)
+ t7xx_acpi_fldr_func(t7xx_dev);
+}
+
+static irqreturn_t t7xx_rgu_isr_thread(int irq, void *data)
+{
+ struct t7xx_pci_dev *t7xx_dev = data;
+
+ msleep(RGU_RESET_DELAY_MS);
+ t7xx_reset_device_via_pmic(t7xx_dev);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t t7xx_rgu_isr_handler(int irq, void *data)
+{
+ struct t7xx_pci_dev *t7xx_dev = data;
+ struct t7xx_modem *modem;
+
+ t7xx_clear_rgu_irq(t7xx_dev);
+ if (!t7xx_dev->rgu_pci_irq_en)
+ return IRQ_HANDLED;
+
+ modem = t7xx_dev->md;
+ modem->rgu_irq_asserted = true;
+ t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
+ return IRQ_WAKE_THREAD;
+}
+
+static void t7xx_pcie_register_rgu_isr(struct t7xx_pci_dev *t7xx_dev)
+{
+ /* Registers RGU callback ISR with PCIe driver */
+ t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
+ t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT);
+
+ t7xx_dev->intr_handler[SAP_RGU_INT] = t7xx_rgu_isr_handler;
+ t7xx_dev->intr_thread[SAP_RGU_INT] = t7xx_rgu_isr_thread;
+ t7xx_dev->callback_param[SAP_RGU_INT] = t7xx_dev;
+ t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
+}
+
+/**
+ * t7xx_cldma_exception() - CLDMA exception handler.
+ * @md_ctrl: modem control struct.
+ * @stage: exception stage.
+ *
+ * Part of the modem exception recovery.
+ * Stages are one after the other as describe below:
+ * HIF_EX_INIT: Disable and clear TXQ.
+ * HIF_EX_CLEARQ_DONE: Disable RX, flush TX/RX workqueues and clear RX.
+ * HIF_EX_ALLQ_RESET: HW is back in safe mode for re-initialization and restart.
+ */
+
+/* Modem Exception Handshake Flow
+ *
+ * Modem HW Exception interrupt received
+ * (MD_IRQ_CCIF_EX)
+ * |
+ * +---------v--------+
+ * | HIF_EX_INIT | : Disable and clear TXQ
+ * +------------------+
+ * |
+ * +---------v--------+
+ * | HIF_EX_INIT_DONE | : Wait for the init to be done
+ * +------------------+
+ * |
+ * +---------v--------+
+ * |HIF_EX_CLEARQ_DONE| : Disable and clear RXQ
+ * +------------------+ : Flush TX/RX workqueues
+ * |
+ * +---------v--------+
+ * |HIF_EX_ALLQ_RESET | : Restart HW and CLDMA
+ * +------------------+
+ */
+static void t7xx_cldma_exception(struct cldma_ctrl *md_ctrl, enum hif_ex_stage stage)
+{
+ switch (stage) {
+ case HIF_EX_INIT:
+ t7xx_cldma_stop_all_qs(md_ctrl, MTK_TX);
+ t7xx_cldma_clear_all_qs(md_ctrl, MTK_TX);
+ break;
+
+ case HIF_EX_CLEARQ_DONE:
+ /* We do not want to get CLDMA IRQ when MD is
+ * resetting CLDMA after it got clearq_ack.
+ */
+ t7xx_cldma_stop_all_qs(md_ctrl, MTK_RX);
+ t7xx_cldma_stop(md_ctrl);
+
+ if (md_ctrl->hif_id == CLDMA_ID_MD)
+ t7xx_cldma_hw_reset(md_ctrl->t7xx_dev->base_addr.infracfg_ao_base);
+
+ t7xx_cldma_clear_all_qs(md_ctrl, MTK_RX);
+ break;
+
+ case HIF_EX_ALLQ_RESET:
+ t7xx_cldma_hw_init(&md_ctrl->hw_info);
+ t7xx_cldma_start(md_ctrl);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void t7xx_md_exception(struct t7xx_modem *md, enum hif_ex_stage stage)
+{
+ struct t7xx_pci_dev *t7xx_dev = md->t7xx_dev;
+
+ if (stage == HIF_EX_CLEARQ_DONE) {
+ /* Give DHL time to flush data */
+ msleep(PORT_RESET_DELAY_MS);
+ t7xx_port_proxy_reset(md->port_prox);
+ }
+
+ t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_MD], stage);
+ t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_AP], stage);
+
+ if (stage == HIF_EX_INIT)
+ t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_EXCEPTION_ACK);
+ else if (stage == HIF_EX_CLEARQ_DONE)
+ t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_EXCEPTION_CLEARQ_ACK);
+}
+
+static int t7xx_wait_hif_ex_hk_event(struct t7xx_modem *md, int event_id)
+{
+ unsigned int waited_time_ms = 0;
+
+ do {
+ if (md->exp_id & event_id)
+ return 0;
+
+ waited_time_ms += EX_HS_POLL_DELAY_MS;
+ msleep(EX_HS_POLL_DELAY_MS);
+ } while (waited_time_ms < EX_HS_TIMEOUT_MS);
+
+ return -EFAULT;
+}
+
+static void t7xx_md_sys_sw_init(struct t7xx_pci_dev *t7xx_dev)
+{
+ /* Register the MHCCIF ISR for MD exception, port enum and
+ * async handshake notifications.
+ */
+ t7xx_mhccif_mask_set(t7xx_dev, D2H_SW_INT_MASK);
+ t7xx_mhccif_mask_clr(t7xx_dev, D2H_INT_PORT_ENUM);
+
+ /* Register RGU IRQ handler for sAP exception notification */
+ t7xx_dev->rgu_pci_irq_en = true;
+ t7xx_pcie_register_rgu_isr(t7xx_dev);
+}
+
+struct feature_query {
+ __le32 head_pattern;
+ u8 feature_set[FEATURE_COUNT];
+ __le32 tail_pattern;
+};
+
+static void t7xx_prepare_host_rt_data_query(struct t7xx_sys_info *core)
+{
+ struct feature_query *ft_query;
+ struct sk_buff *skb;
+
+ skb = t7xx_ctrl_alloc_skb(sizeof(*ft_query));
+ if (!skb)
+ return;
+
+ ft_query = skb_put(skb, sizeof(*ft_query));
+ ft_query->head_pattern = cpu_to_le32(MD_FEATURE_QUERY_ID);
+ memcpy(ft_query->feature_set, core->feature_set, FEATURE_COUNT);
+ ft_query->tail_pattern = cpu_to_le32(MD_FEATURE_QUERY_ID);
+
+ /* Send HS1 message to device */
+ t7xx_port_send_ctl_skb(core->ctl_port, skb, CTL_ID_HS1_MSG, 0);
+}
+
+static int t7xx_prepare_device_rt_data(struct t7xx_sys_info *core, struct device *dev,
+ void *data)
+{
+ struct feature_query *md_feature = data;
+ struct mtk_runtime_feature *rt_feature;
+ unsigned int i, rt_data_len = 0;
+ struct sk_buff *skb;
+
+ /* Parse MD runtime data query */
+ if (le32_to_cpu(md_feature->head_pattern) != MD_FEATURE_QUERY_ID ||
+ le32_to_cpu(md_feature->tail_pattern) != MD_FEATURE_QUERY_ID) {
+ dev_err(dev, "Invalid feature pattern: head 0x%x, tail 0x%x\n",
+ le32_to_cpu(md_feature->head_pattern),
+ le32_to_cpu(md_feature->tail_pattern));
+ return -EINVAL;
+ }
+
+ for (i = 0; i < FEATURE_COUNT; i++) {
+ if (FIELD_GET(FEATURE_MSK, md_feature->feature_set[i]) !=
+ MTK_FEATURE_MUST_BE_SUPPORTED)
+ rt_data_len += sizeof(*rt_feature);
+ }
+
+ skb = t7xx_ctrl_alloc_skb(rt_data_len);
+ if (!skb)
+ return -ENOMEM;
+
+ rt_feature = skb_put(skb, rt_data_len);
+ memset(rt_feature, 0, rt_data_len);
+
+ /* Fill runtime feature */
+ for (i = 0; i < FEATURE_COUNT; i++) {
+ u8 md_feature_mask = FIELD_GET(FEATURE_MSK, md_feature->feature_set[i]);
+
+ if (md_feature_mask == MTK_FEATURE_MUST_BE_SUPPORTED)
+ continue;
+
+ rt_feature->feature_id = i;
+ if (md_feature_mask == MTK_FEATURE_DOES_NOT_EXIST)
+ rt_feature->support_info = md_feature->feature_set[i];
+
+ rt_feature++;
+ }
+
+ /* Send HS3 message to device */
+ t7xx_port_send_ctl_skb(core->ctl_port, skb, CTL_ID_HS3_MSG, 0);
+ return 0;
+}
+
+static int t7xx_parse_host_rt_data(struct t7xx_fsm_ctl *ctl, struct t7xx_sys_info *core,
+ struct device *dev, void *data, int data_length)
+{
+ enum mtk_feature_support_type ft_spt_st, ft_spt_cfg;
+ struct mtk_runtime_feature *rt_feature;
+ int i, offset;
+
+ offset = sizeof(struct feature_query);
+ for (i = 0; i < FEATURE_COUNT && offset < data_length; i++) {
+ rt_feature = data + offset;
+ offset += sizeof(*rt_feature) + le32_to_cpu(rt_feature->data_len);
+
+ ft_spt_cfg = FIELD_GET(FEATURE_MSK, core->feature_set[i]);
+ if (ft_spt_cfg != MTK_FEATURE_MUST_BE_SUPPORTED)
+ continue;
+
+ ft_spt_st = FIELD_GET(FEATURE_MSK, rt_feature->support_info);
+ if (ft_spt_st != MTK_FEATURE_MUST_BE_SUPPORTED)
+ return -EINVAL;
+
+ if (i == RT_ID_MD_PORT_ENUM || i == RT_ID_AP_PORT_ENUM)
+ t7xx_port_enum_msg_handler(ctl->md, rt_feature->data);
+ }
+
+ return 0;
+}
+
+static int t7xx_core_reset(struct t7xx_modem *md)
+{
+ struct device *dev = &md->t7xx_dev->pdev->dev;
+ struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
+
+ md->core_md.ready = false;
+
+ if (!ctl) {
+ dev_err(dev, "FSM is not initialized\n");
+ return -EINVAL;
+ }
+
+ if (md->core_md.handshake_ongoing) {
+ int ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2_EXIT, NULL, 0);
+
+ if (ret)
+ return ret;
+ }
+
+ md->core_md.handshake_ongoing = false;
+ return 0;
+}
+
+static void t7xx_core_hk_handler(struct t7xx_modem *md, struct t7xx_sys_info *core_info,
+ struct t7xx_fsm_ctl *ctl,
+ enum t7xx_fsm_event_state event_id,
+ enum t7xx_fsm_event_state err_detect)
+{
+ struct t7xx_fsm_event *event = NULL, *event_next;
+ struct device *dev = &md->t7xx_dev->pdev->dev;
+ unsigned long flags;
+ int ret;
+
+ t7xx_prepare_host_rt_data_query(core_info);
+
+ while (!kthread_should_stop()) {
+ bool event_received = false;
+
+ spin_lock_irqsave(&ctl->event_lock, flags);
+ list_for_each_entry_safe(event, event_next, &ctl->event_queue, entry) {
+ if (event->event_id == err_detect) {
+ list_del(&event->entry);
+ spin_unlock_irqrestore(&ctl->event_lock, flags);
+ dev_err(dev, "Core handshake error event received\n");
+ goto err_free_event;
+ } else if (event->event_id == event_id) {
+ list_del(&event->entry);
+ event_received = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ctl->event_lock, flags);
+
+ if (event_received)
+ break;
+
+ wait_event_interruptible(ctl->event_wq, !list_empty(&ctl->event_queue) ||
+ kthread_should_stop());
+ if (kthread_should_stop())
+ goto err_free_event;
+ }
+
+ if (!event || ctl->exp_flg)
+ goto err_free_event;
+
+ ret = t7xx_parse_host_rt_data(ctl, core_info, dev, event->data, event->length);
+ if (ret) {
+ dev_err(dev, "Host failure parsing runtime data: %d\n", ret);
+ goto err_free_event;
+ }
+
+ if (ctl->exp_flg)
+ goto err_free_event;
+
+ ret = t7xx_prepare_device_rt_data(core_info, dev, event->data);
+ if (ret) {
+ dev_err(dev, "Device failure parsing runtime data: %d", ret);
+ goto err_free_event;
+ }
+
+ core_info->ready = true;
+ core_info->handshake_ongoing = false;
+ wake_up(&ctl->async_hk_wq);
+err_free_event:
+ kfree(event);
+}
+
+static void t7xx_md_hk_wq(struct work_struct *work)
+{
+ struct t7xx_modem *md = container_of(work, struct t7xx_modem, handshake_work);
+ struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
+
+ /* Clear the HS2 EXIT event appended in core_reset() */
+ t7xx_fsm_clr_event(ctl, FSM_EVENT_MD_HS2_EXIT);
+ t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_MD]);
+ t7xx_cldma_start(md->md_ctrl[CLDMA_ID_MD]);
+ t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS2);
+ md->core_md.handshake_ongoing = true;
+ t7xx_core_hk_handler(md, &md->core_md, ctl, FSM_EVENT_MD_HS2, FSM_EVENT_MD_HS2_EXIT);
+}
+
+static void t7xx_ap_hk_wq(struct work_struct *work)
+{
+ struct t7xx_modem *md = container_of(work, struct t7xx_modem, ap_handshake_work);
+ struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
+
+ /* Clear the HS2 EXIT event appended in t7xx_core_reset(). */
+ t7xx_fsm_clr_event(ctl, FSM_EVENT_AP_HS2_EXIT);
+ t7xx_cldma_stop(md->md_ctrl[CLDMA_ID_AP]);
+ t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_AP]);
+ t7xx_cldma_start(md->md_ctrl[CLDMA_ID_AP]);
+ md->core_ap.handshake_ongoing = true;
+ t7xx_core_hk_handler(md, &md->core_ap, ctl, FSM_EVENT_AP_HS2, FSM_EVENT_AP_HS2_EXIT);
+}
+
+void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id)
+{
+ struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
+ unsigned int int_sta;
+ unsigned long flags;
+
+ switch (evt_id) {
+ case FSM_PRE_START:
+ t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_PORT_ENUM | D2H_INT_ASYNC_MD_HK |
+ D2H_INT_ASYNC_AP_HK);
+ break;
+
+ case FSM_START:
+ t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_PORT_ENUM);
+
+ spin_lock_irqsave(&md->exp_lock, flags);
+ int_sta = t7xx_get_interrupt_status(md->t7xx_dev);
+ md->exp_id |= int_sta;
+ if (md->exp_id & D2H_INT_EXCEPTION_INIT) {
+ ctl->exp_flg = true;
+ md->exp_id &= ~D2H_INT_EXCEPTION_INIT;
+ md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
+ md->exp_id &= ~D2H_INT_ASYNC_AP_HK;
+ } else if (ctl->exp_flg) {
+ md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
+ md->exp_id &= ~D2H_INT_ASYNC_AP_HK;
+ } else {
+ void __iomem *mhccif_base = md->t7xx_dev->base_addr.mhccif_rc_base;
+
+ if (md->exp_id & D2H_INT_ASYNC_MD_HK) {
+ queue_work(md->handshake_wq, &md->handshake_work);
+ md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
+ iowrite32(D2H_INT_ASYNC_MD_HK, mhccif_base + REG_EP2RC_SW_INT_ACK);
+ t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK);
+ }
+
+ if (md->exp_id & D2H_INT_ASYNC_AP_HK) {
+ queue_work(md->handshake_wq, &md->ap_handshake_work);
+ md->exp_id &= ~D2H_INT_ASYNC_AP_HK;
+ iowrite32(D2H_INT_ASYNC_AP_HK, mhccif_base + REG_EP2RC_SW_INT_ACK);
+ t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_AP_HK);
+ }
+ }
+ spin_unlock_irqrestore(&md->exp_lock, flags);
+
+ t7xx_mhccif_mask_clr(md->t7xx_dev,
+ D2H_INT_EXCEPTION_INIT |
+ D2H_INT_EXCEPTION_INIT_DONE |
+ D2H_INT_EXCEPTION_CLEARQ_DONE |
+ D2H_INT_EXCEPTION_ALLQ_RESET);
+ break;
+
+ case FSM_READY:
+ t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK);
+ t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_AP_HK);
+ break;
+
+ default:
+ break;
+ }
+}
+
+void t7xx_md_exception_handshake(struct t7xx_modem *md)
+{
+ struct device *dev = &md->t7xx_dev->pdev->dev;
+ int ret;
+
+ t7xx_md_exception(md, HIF_EX_INIT);
+ ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_INIT_DONE);
+ if (ret)
+ dev_err(dev, "EX CCIF HS timeout, RCH 0x%lx\n", D2H_INT_EXCEPTION_INIT_DONE);
+
+ t7xx_md_exception(md, HIF_EX_INIT_DONE);
+ ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_CLEARQ_DONE);
+ if (ret)
+ dev_err(dev, "EX CCIF HS timeout, RCH 0x%lx\n", D2H_INT_EXCEPTION_CLEARQ_DONE);
+
+ t7xx_md_exception(md, HIF_EX_CLEARQ_DONE);
+ ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_ALLQ_RESET);
+ if (ret)
+ dev_err(dev, "EX CCIF HS timeout, RCH 0x%lx\n", D2H_INT_EXCEPTION_ALLQ_RESET);
+
+ t7xx_md_exception(md, HIF_EX_ALLQ_RESET);
+}
+
+static struct t7xx_modem *t7xx_md_alloc(struct t7xx_pci_dev *t7xx_dev)
+{
+ struct device *dev = &t7xx_dev->pdev->dev;
+ struct t7xx_modem *md;
+
+ md = devm_kzalloc(dev, sizeof(*md), GFP_KERNEL);
+ if (!md)
+ return NULL;
+
+ md->t7xx_dev = t7xx_dev;
+ t7xx_dev->md = md;
+ spin_lock_init(&md->exp_lock);
+ md->handshake_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI,
+ 0, "md_hk_wq");
+ if (!md->handshake_wq)
+ return NULL;
+
+ INIT_WORK(&md->handshake_work, t7xx_md_hk_wq);
+ md->core_md.feature_set[RT_ID_MD_PORT_ENUM] &= ~FEATURE_MSK;
+ md->core_md.feature_set[RT_ID_MD_PORT_ENUM] |=
+ FIELD_PREP(FEATURE_MSK, MTK_FEATURE_MUST_BE_SUPPORTED);
+
+ INIT_WORK(&md->ap_handshake_work, t7xx_ap_hk_wq);
+ md->core_ap.feature_set[RT_ID_AP_PORT_ENUM] &= ~FEATURE_MSK;
+ md->core_ap.feature_set[RT_ID_AP_PORT_ENUM] |=
+ FIELD_PREP(FEATURE_MSK, MTK_FEATURE_MUST_BE_SUPPORTED);
+
+ return md;
+}
+
+int t7xx_md_reset(struct t7xx_pci_dev *t7xx_dev)
+{
+ struct t7xx_modem *md = t7xx_dev->md;
+
+ md->md_init_finish = false;
+ md->exp_id = 0;
+ t7xx_fsm_reset(md);
+ t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_MD]);
+ t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_AP]);
+ t7xx_port_proxy_reset(md->port_prox);
+ md->md_init_finish = true;
+ return t7xx_core_reset(md);
+}
+
+/**
+ * t7xx_md_init() - Initialize modem.
+ * @t7xx_dev: MTK device.
+ *
+ * Allocate and initialize MD control block, and initialize data path.
+ * Register MHCCIF ISR and RGU ISR, and start the state machine.
+ *
+ * Return:
+ ** 0 - Success.
+ ** -ENOMEM - Allocation failure.
+ */
+int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev)
+{
+ struct t7xx_modem *md;
+ int ret;
+
+ md = t7xx_md_alloc(t7xx_dev);
+ if (!md)
+ return -ENOMEM;
+
+ ret = t7xx_cldma_alloc(CLDMA_ID_MD, t7xx_dev);
+ if (ret)
+ goto err_destroy_hswq;
+
+ ret = t7xx_cldma_alloc(CLDMA_ID_AP, t7xx_dev);
+ if (ret)
+ goto err_destroy_hswq;
+
+ ret = t7xx_fsm_init(md);
+ if (ret)
+ goto err_destroy_hswq;
+
+ ret = t7xx_ccmni_init(t7xx_dev);
+ if (ret)
+ goto err_uninit_fsm;
+
+ ret = t7xx_cldma_init(md->md_ctrl[CLDMA_ID_MD]);
+ if (ret)
+ goto err_uninit_ccmni;
+
+ ret = t7xx_cldma_init(md->md_ctrl[CLDMA_ID_AP]);
+ if (ret)
+ goto err_uninit_md_cldma;
+
+ ret = t7xx_port_proxy_init(md);
+ if (ret)
+ goto err_uninit_ap_cldma;
+
+ ret = t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_START, 0);
+ if (ret) /* t7xx_fsm_uninit() flushes cmd queue */
+ goto err_uninit_proxy;
+
+ t7xx_md_sys_sw_init(t7xx_dev);
+ md->md_init_finish = true;
+ return 0;
+
+err_uninit_proxy:
+ t7xx_port_proxy_uninit(md->port_prox);
+
+err_uninit_ap_cldma:
+ t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_AP]);
+
+err_uninit_md_cldma:
+ t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]);
+
+err_uninit_ccmni:
+ t7xx_ccmni_exit(t7xx_dev);
+
+err_uninit_fsm:
+ t7xx_fsm_uninit(md);
+
+err_destroy_hswq:
+ destroy_workqueue(md->handshake_wq);
+ dev_err(&t7xx_dev->pdev->dev, "Modem init failed\n");
+ return ret;
+}
+
+void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev)
+{
+ struct t7xx_modem *md = t7xx_dev->md;
+
+ t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
+
+ if (!md->md_init_finish)
+ return;
+
+ t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_PRE_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION);
+ t7xx_port_proxy_uninit(md->port_prox);
+ t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_AP]);
+ t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]);
+ t7xx_ccmni_exit(t7xx_dev);
+ t7xx_fsm_uninit(md);
+ destroy_workqueue(md->handshake_wq);
+}
diff --git a/drivers/net/wwan/t7xx/t7xx_modem_ops.h b/drivers/net/wwan/t7xx/t7xx_modem_ops.h
new file mode 100644
index 0000000000..abe633cf7a
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ *
+ * Contributors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ */
+
+#ifndef __T7XX_MODEM_OPS_H__
+#define __T7XX_MODEM_OPS_H__
+
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "t7xx_hif_cldma.h"
+#include "t7xx_pci.h"
+
+#define FEATURE_COUNT 64
+
+/**
+ * enum hif_ex_stage - HIF exception handshake stages with the HW.
+ * @HIF_EX_INIT: Disable and clear TXQ.
+ * @HIF_EX_INIT_DONE: Polling for initialization to be done.
+ * @HIF_EX_CLEARQ_DONE: Disable RX, flush TX/RX workqueues and clear RX.
+ * @HIF_EX_ALLQ_RESET: HW is back in safe mode for re-initialization and restart.
+ */
+enum hif_ex_stage {
+ HIF_EX_INIT,
+ HIF_EX_INIT_DONE,
+ HIF_EX_CLEARQ_DONE,
+ HIF_EX_ALLQ_RESET,
+};
+
+struct mtk_runtime_feature {
+ u8 feature_id;
+ u8 support_info;
+ u8 reserved[2];
+ __le32 data_len;
+ __le32 data[];
+};
+
+enum md_event_id {
+ FSM_PRE_START,
+ FSM_START,
+ FSM_READY,
+};
+
+struct t7xx_sys_info {
+ bool ready;
+ bool handshake_ongoing;
+ u8 feature_set[FEATURE_COUNT];
+ struct t7xx_port *ctl_port;
+};
+
+struct t7xx_modem {
+ struct cldma_ctrl *md_ctrl[CLDMA_NUM];
+ struct t7xx_pci_dev *t7xx_dev;
+ struct t7xx_sys_info core_md;
+ struct t7xx_sys_info core_ap;
+ bool md_init_finish;
+ bool rgu_irq_asserted;
+ struct workqueue_struct *handshake_wq;
+ struct work_struct handshake_work;
+ struct work_struct ap_handshake_work;
+ struct t7xx_fsm_ctl *fsm_ctl;
+ struct port_proxy *port_prox;
+ unsigned int exp_id;
+ spinlock_t exp_lock; /* Protects exception events */
+};
+
+void t7xx_md_exception_handshake(struct t7xx_modem *md);
+void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id);
+int t7xx_md_reset(struct t7xx_pci_dev *t7xx_dev);
+int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev);
+void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev);
+void t7xx_clear_rgu_irq(struct t7xx_pci_dev *t7xx_dev);
+int t7xx_acpi_fldr_func(struct t7xx_pci_dev *t7xx_dev);
+int t7xx_pci_mhccif_isr(struct t7xx_pci_dev *t7xx_dev);
+
+#endif /* __T7XX_MODEM_OPS_H__ */
diff --git a/drivers/net/wwan/t7xx/t7xx_netdev.c b/drivers/net/wwan/t7xx/t7xx_netdev.c
new file mode 100644
index 0000000000..3ef4a8a4f8
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_netdev.c
@@ -0,0 +1,516 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ *
+ * Contributors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ */
+
+#include <linux/atomic.h>
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/netdev_features.h>
+#include <linux/netdevice.h>
+#include <linux/pm_runtime.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <linux/wwan.h>
+#include <net/ipv6.h>
+#include <net/pkt_sched.h>
+
+#include "t7xx_hif_dpmaif_rx.h"
+#include "t7xx_hif_dpmaif_tx.h"
+#include "t7xx_netdev.h"
+#include "t7xx_pci.h"
+#include "t7xx_port_proxy.h"
+#include "t7xx_state_monitor.h"
+
+#define IP_MUX_SESSION_DEFAULT 0
+#define SBD_PACKET_TYPE_MASK GENMASK(7, 4)
+
+static void t7xx_ccmni_enable_napi(struct t7xx_ccmni_ctrl *ctlb)
+{
+ struct dpmaif_ctrl *ctrl;
+ int i, ret;
+
+ ctrl = ctlb->hif_ctrl;
+
+ if (ctlb->is_napi_en)
+ return;
+
+ for (i = 0; i < RXQ_NUM; i++) {
+ /* The usage count has to be bumped every time before calling
+ * napi_schedule. It will be decresed in the poll routine,
+ * right after napi_complete_done is called.
+ */
+ ret = pm_runtime_resume_and_get(ctrl->dev);
+ if (ret < 0) {
+ dev_err(ctrl->dev, "Failed to resume device: %d\n",
+ ret);
+ return;
+ }
+ napi_enable(ctlb->napi[i]);
+ napi_schedule(ctlb->napi[i]);
+ }
+ ctlb->is_napi_en = true;
+}
+
+static void t7xx_ccmni_disable_napi(struct t7xx_ccmni_ctrl *ctlb)
+{
+ int i;
+
+ if (!ctlb->is_napi_en)
+ return;
+
+ for (i = 0; i < RXQ_NUM; i++) {
+ napi_synchronize(ctlb->napi[i]);
+ napi_disable(ctlb->napi[i]);
+ }
+
+ ctlb->is_napi_en = false;
+}
+
+static int t7xx_ccmni_open(struct net_device *dev)
+{
+ struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
+ struct t7xx_ccmni_ctrl *ccmni_ctl = ccmni->ctlb;
+
+ netif_carrier_on(dev);
+ netif_tx_start_all_queues(dev);
+ if (!atomic_fetch_inc(&ccmni_ctl->napi_usr_refcnt))
+ t7xx_ccmni_enable_napi(ccmni_ctl);
+
+ atomic_inc(&ccmni->usage);
+ return 0;
+}
+
+static int t7xx_ccmni_close(struct net_device *dev)
+{
+ struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
+ struct t7xx_ccmni_ctrl *ccmni_ctl = ccmni->ctlb;
+
+ atomic_dec(&ccmni->usage);
+ if (atomic_dec_and_test(&ccmni_ctl->napi_usr_refcnt))
+ t7xx_ccmni_disable_napi(ccmni_ctl);
+
+ netif_carrier_off(dev);
+ netif_tx_disable(dev);
+ return 0;
+}
+
+static int t7xx_ccmni_send_packet(struct t7xx_ccmni *ccmni, struct sk_buff *skb,
+ unsigned int txq_number)
+{
+ struct t7xx_ccmni_ctrl *ctlb = ccmni->ctlb;
+ struct t7xx_skb_cb *skb_cb = T7XX_SKB_CB(skb);
+
+ skb_cb->netif_idx = ccmni->index;
+
+ if (t7xx_dpmaif_tx_send_skb(ctlb->hif_ctrl, txq_number, skb))
+ return NETDEV_TX_BUSY;
+
+ return 0;
+}
+
+static netdev_tx_t t7xx_ccmni_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
+ int skb_len = skb->len;
+
+ /* If MTU is changed or there is no headroom, drop the packet */
+ if (skb->len > dev->mtu || skb_headroom(skb) < sizeof(struct ccci_header)) {
+ dev_kfree_skb(skb);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ if (t7xx_ccmni_send_packet(ccmni, skb, DPMAIF_TX_DEFAULT_QUEUE))
+ return NETDEV_TX_BUSY;
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb_len;
+
+ return NETDEV_TX_OK;
+}
+
+static void t7xx_ccmni_tx_timeout(struct net_device *dev, unsigned int __always_unused txqueue)
+{
+ struct t7xx_ccmni *ccmni = netdev_priv(dev);
+
+ dev->stats.tx_errors++;
+
+ if (atomic_read(&ccmni->usage) > 0)
+ netif_tx_wake_all_queues(dev);
+}
+
+static const struct net_device_ops ccmni_netdev_ops = {
+ .ndo_open = t7xx_ccmni_open,
+ .ndo_stop = t7xx_ccmni_close,
+ .ndo_start_xmit = t7xx_ccmni_start_xmit,
+ .ndo_tx_timeout = t7xx_ccmni_tx_timeout,
+};
+
+static void t7xx_ccmni_start(struct t7xx_ccmni_ctrl *ctlb)
+{
+ struct t7xx_ccmni *ccmni;
+ int i;
+
+ for (i = 0; i < ctlb->nic_dev_num; i++) {
+ ccmni = ctlb->ccmni_inst[i];
+ if (!ccmni)
+ continue;
+
+ if (atomic_read(&ccmni->usage) > 0) {
+ netif_tx_start_all_queues(ccmni->dev);
+ netif_carrier_on(ccmni->dev);
+ }
+ }
+
+ if (atomic_read(&ctlb->napi_usr_refcnt))
+ t7xx_ccmni_enable_napi(ctlb);
+}
+
+static void t7xx_ccmni_pre_stop(struct t7xx_ccmni_ctrl *ctlb)
+{
+ struct t7xx_ccmni *ccmni;
+ int i;
+
+ for (i = 0; i < ctlb->nic_dev_num; i++) {
+ ccmni = ctlb->ccmni_inst[i];
+ if (!ccmni)
+ continue;
+
+ if (atomic_read(&ccmni->usage) > 0)
+ netif_tx_disable(ccmni->dev);
+ }
+}
+
+static void t7xx_ccmni_post_stop(struct t7xx_ccmni_ctrl *ctlb)
+{
+ struct t7xx_ccmni *ccmni;
+ int i;
+
+ if (atomic_read(&ctlb->napi_usr_refcnt))
+ t7xx_ccmni_disable_napi(ctlb);
+
+ for (i = 0; i < ctlb->nic_dev_num; i++) {
+ ccmni = ctlb->ccmni_inst[i];
+ if (!ccmni)
+ continue;
+
+ if (atomic_read(&ccmni->usage) > 0)
+ netif_carrier_off(ccmni->dev);
+ }
+}
+
+static void t7xx_ccmni_wwan_setup(struct net_device *dev)
+{
+ dev->needed_headroom += sizeof(struct ccci_header);
+
+ dev->mtu = ETH_DATA_LEN;
+ dev->max_mtu = CCMNI_MTU_MAX;
+ BUILD_BUG_ON(CCMNI_MTU_MAX > DPMAIF_HW_MTU_SIZE);
+
+ dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
+ dev->watchdog_timeo = CCMNI_NETDEV_WDT_TO;
+
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+
+ dev->features = NETIF_F_VLAN_CHALLENGED;
+
+ dev->features |= NETIF_F_SG;
+ dev->hw_features |= NETIF_F_SG;
+
+ dev->features |= NETIF_F_HW_CSUM;
+ dev->hw_features |= NETIF_F_HW_CSUM;
+
+ dev->features |= NETIF_F_RXCSUM;
+ dev->hw_features |= NETIF_F_RXCSUM;
+
+ dev->features |= NETIF_F_GRO;
+ dev->hw_features |= NETIF_F_GRO;
+
+ dev->needs_free_netdev = true;
+
+ dev->type = ARPHRD_NONE;
+
+ dev->netdev_ops = &ccmni_netdev_ops;
+}
+
+static void t7xx_init_netdev_napi(struct t7xx_ccmni_ctrl *ctlb)
+{
+ int i;
+
+ /* one HW, but shared with multiple net devices,
+ * so add a dummy device for NAPI.
+ */
+ init_dummy_netdev(&ctlb->dummy_dev);
+ atomic_set(&ctlb->napi_usr_refcnt, 0);
+ ctlb->is_napi_en = false;
+
+ for (i = 0; i < RXQ_NUM; i++) {
+ ctlb->napi[i] = &ctlb->hif_ctrl->rxq[i].napi;
+ netif_napi_add_weight(&ctlb->dummy_dev, ctlb->napi[i], t7xx_dpmaif_napi_rx_poll,
+ NIC_NAPI_POLL_BUDGET);
+ }
+}
+
+static void t7xx_uninit_netdev_napi(struct t7xx_ccmni_ctrl *ctlb)
+{
+ int i;
+
+ for (i = 0; i < RXQ_NUM; i++) {
+ netif_napi_del(ctlb->napi[i]);
+ ctlb->napi[i] = NULL;
+ }
+}
+
+static int t7xx_ccmni_wwan_newlink(void *ctxt, struct net_device *dev, u32 if_id,
+ struct netlink_ext_ack *extack)
+{
+ struct t7xx_ccmni_ctrl *ctlb = ctxt;
+ struct t7xx_ccmni *ccmni;
+ int ret;
+
+ if (if_id >= ARRAY_SIZE(ctlb->ccmni_inst))
+ return -EINVAL;
+
+ ccmni = wwan_netdev_drvpriv(dev);
+ ccmni->index = if_id;
+ ccmni->ctlb = ctlb;
+ ccmni->dev = dev;
+ atomic_set(&ccmni->usage, 0);
+ ctlb->ccmni_inst[if_id] = ccmni;
+
+ ret = register_netdevice(dev);
+ if (ret)
+ return ret;
+
+ netif_device_attach(dev);
+ return 0;
+}
+
+static void t7xx_ccmni_wwan_dellink(void *ctxt, struct net_device *dev, struct list_head *head)
+{
+ struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
+ struct t7xx_ccmni_ctrl *ctlb = ctxt;
+ u8 if_id = ccmni->index;
+
+ if (if_id >= ARRAY_SIZE(ctlb->ccmni_inst))
+ return;
+
+ if (WARN_ON(ctlb->ccmni_inst[if_id] != ccmni))
+ return;
+
+ unregister_netdevice(dev);
+}
+
+static const struct wwan_ops ccmni_wwan_ops = {
+ .priv_size = sizeof(struct t7xx_ccmni),
+ .setup = t7xx_ccmni_wwan_setup,
+ .newlink = t7xx_ccmni_wwan_newlink,
+ .dellink = t7xx_ccmni_wwan_dellink,
+};
+
+static int t7xx_ccmni_register_wwan(struct t7xx_ccmni_ctrl *ctlb)
+{
+ struct device *dev = ctlb->hif_ctrl->dev;
+ int ret;
+
+ if (ctlb->wwan_is_registered)
+ return 0;
+
+ /* WWAN core will create a netdev for the default IP MUX channel */
+ ret = wwan_register_ops(dev, &ccmni_wwan_ops, ctlb, IP_MUX_SESSION_DEFAULT);
+ if (ret < 0) {
+ dev_err(dev, "Unable to register WWAN ops, %d\n", ret);
+ return ret;
+ }
+
+ ctlb->wwan_is_registered = true;
+ return 0;
+}
+
+static int t7xx_ccmni_md_state_callback(enum md_state state, void *para)
+{
+ struct t7xx_ccmni_ctrl *ctlb = para;
+ struct device *dev;
+ int ret = 0;
+
+ dev = ctlb->hif_ctrl->dev;
+ ctlb->md_sta = state;
+
+ switch (state) {
+ case MD_STATE_READY:
+ ret = t7xx_ccmni_register_wwan(ctlb);
+ if (!ret)
+ t7xx_ccmni_start(ctlb);
+ break;
+
+ case MD_STATE_EXCEPTION:
+ case MD_STATE_STOPPED:
+ t7xx_ccmni_pre_stop(ctlb);
+
+ ret = t7xx_dpmaif_md_state_callback(ctlb->hif_ctrl, state);
+ if (ret < 0)
+ dev_err(dev, "DPMAIF md state callback err, state=%d\n", state);
+
+ t7xx_ccmni_post_stop(ctlb);
+ break;
+
+ case MD_STATE_WAITING_FOR_HS1:
+ case MD_STATE_WAITING_TO_STOP:
+ ret = t7xx_dpmaif_md_state_callback(ctlb->hif_ctrl, state);
+ if (ret < 0)
+ dev_err(dev, "DPMAIF md state callback err, state=%d\n", state);
+
+ break;
+
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static void init_md_status_notifier(struct t7xx_pci_dev *t7xx_dev)
+{
+ struct t7xx_ccmni_ctrl *ctlb = t7xx_dev->ccmni_ctlb;
+ struct t7xx_fsm_notifier *md_status_notifier;
+
+ md_status_notifier = &ctlb->md_status_notify;
+ INIT_LIST_HEAD(&md_status_notifier->entry);
+ md_status_notifier->notifier_fn = t7xx_ccmni_md_state_callback;
+ md_status_notifier->data = ctlb;
+
+ t7xx_fsm_notifier_register(t7xx_dev->md, md_status_notifier);
+}
+
+static void t7xx_ccmni_recv_skb(struct t7xx_ccmni_ctrl *ccmni_ctlb, struct sk_buff *skb,
+ struct napi_struct *napi)
+{
+ struct t7xx_skb_cb *skb_cb;
+ struct net_device *net_dev;
+ struct t7xx_ccmni *ccmni;
+ int pkt_type, skb_len;
+ u8 netif_id;
+
+ skb_cb = T7XX_SKB_CB(skb);
+ netif_id = skb_cb->netif_idx;
+ ccmni = ccmni_ctlb->ccmni_inst[netif_id];
+ if (!ccmni) {
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ net_dev = ccmni->dev;
+ pkt_type = skb_cb->rx_pkt_type;
+ skb->dev = net_dev;
+ if (pkt_type == PKT_TYPE_IP6)
+ skb->protocol = htons(ETH_P_IPV6);
+ else
+ skb->protocol = htons(ETH_P_IP);
+
+ skb_len = skb->len;
+ napi_gro_receive(napi, skb);
+ net_dev->stats.rx_packets++;
+ net_dev->stats.rx_bytes += skb_len;
+}
+
+static void t7xx_ccmni_queue_tx_irq_notify(struct t7xx_ccmni_ctrl *ctlb, int qno)
+{
+ struct t7xx_ccmni *ccmni = ctlb->ccmni_inst[0];
+ struct netdev_queue *net_queue;
+
+ if (netif_running(ccmni->dev) && atomic_read(&ccmni->usage) > 0) {
+ net_queue = netdev_get_tx_queue(ccmni->dev, qno);
+ if (netif_tx_queue_stopped(net_queue))
+ netif_tx_wake_queue(net_queue);
+ }
+}
+
+static void t7xx_ccmni_queue_tx_full_notify(struct t7xx_ccmni_ctrl *ctlb, int qno)
+{
+ struct t7xx_ccmni *ccmni = ctlb->ccmni_inst[0];
+ struct netdev_queue *net_queue;
+
+ if (atomic_read(&ccmni->usage) > 0) {
+ netdev_err(ccmni->dev, "TX queue %d is full\n", qno);
+ net_queue = netdev_get_tx_queue(ccmni->dev, qno);
+ netif_tx_stop_queue(net_queue);
+ }
+}
+
+static void t7xx_ccmni_queue_state_notify(struct t7xx_pci_dev *t7xx_dev,
+ enum dpmaif_txq_state state, int qno)
+{
+ struct t7xx_ccmni_ctrl *ctlb = t7xx_dev->ccmni_ctlb;
+
+ if (ctlb->md_sta != MD_STATE_READY)
+ return;
+
+ if (!ctlb->ccmni_inst[0]) {
+ dev_warn(&t7xx_dev->pdev->dev, "No netdev registered yet\n");
+ return;
+ }
+
+ if (state == DMPAIF_TXQ_STATE_IRQ)
+ t7xx_ccmni_queue_tx_irq_notify(ctlb, qno);
+ else if (state == DMPAIF_TXQ_STATE_FULL)
+ t7xx_ccmni_queue_tx_full_notify(ctlb, qno);
+}
+
+int t7xx_ccmni_init(struct t7xx_pci_dev *t7xx_dev)
+{
+ struct device *dev = &t7xx_dev->pdev->dev;
+ struct t7xx_ccmni_ctrl *ctlb;
+
+ ctlb = devm_kzalloc(dev, sizeof(*ctlb), GFP_KERNEL);
+ if (!ctlb)
+ return -ENOMEM;
+
+ t7xx_dev->ccmni_ctlb = ctlb;
+ ctlb->t7xx_dev = t7xx_dev;
+ ctlb->callbacks.state_notify = t7xx_ccmni_queue_state_notify;
+ ctlb->callbacks.recv_skb = t7xx_ccmni_recv_skb;
+ ctlb->nic_dev_num = NIC_DEV_DEFAULT;
+
+ ctlb->hif_ctrl = t7xx_dpmaif_hif_init(t7xx_dev, &ctlb->callbacks);
+ if (!ctlb->hif_ctrl)
+ return -ENOMEM;
+
+ t7xx_init_netdev_napi(ctlb);
+ init_md_status_notifier(t7xx_dev);
+ return 0;
+}
+
+void t7xx_ccmni_exit(struct t7xx_pci_dev *t7xx_dev)
+{
+ struct t7xx_ccmni_ctrl *ctlb = t7xx_dev->ccmni_ctlb;
+
+ t7xx_fsm_notifier_unregister(t7xx_dev->md, &ctlb->md_status_notify);
+
+ if (ctlb->wwan_is_registered) {
+ wwan_unregister_ops(&t7xx_dev->pdev->dev);
+ ctlb->wwan_is_registered = false;
+ }
+
+ t7xx_uninit_netdev_napi(ctlb);
+ t7xx_dpmaif_hif_exit(ctlb->hif_ctrl);
+}
diff --git a/drivers/net/wwan/t7xx/t7xx_netdev.h b/drivers/net/wwan/t7xx/t7xx_netdev.h
new file mode 100644
index 0000000000..f5ed6f99a1
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_netdev.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ *
+ * Contributors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ */
+
+#ifndef __T7XX_NETDEV_H__
+#define __T7XX_NETDEV_H__
+
+#include <linux/bits.h>
+#include <linux/netdevice.h>
+#include <linux/types.h>
+
+#include "t7xx_hif_dpmaif.h"
+#include "t7xx_pci.h"
+#include "t7xx_state_monitor.h"
+
+#define RXQ_NUM DPMAIF_RXQ_NUM
+#define NIC_DEV_MAX 21
+#define NIC_DEV_DEFAULT 2
+
+#define CCMNI_NETDEV_WDT_TO (1 * HZ)
+#define CCMNI_MTU_MAX 3000
+#define NIC_NAPI_POLL_BUDGET 128
+
+struct t7xx_ccmni {
+ u8 index;
+ atomic_t usage;
+ struct net_device *dev;
+ struct t7xx_ccmni_ctrl *ctlb;
+};
+
+struct t7xx_ccmni_ctrl {
+ struct t7xx_pci_dev *t7xx_dev;
+ struct dpmaif_ctrl *hif_ctrl;
+ struct t7xx_ccmni *ccmni_inst[NIC_DEV_MAX];
+ struct dpmaif_callbacks callbacks;
+ unsigned int nic_dev_num;
+ unsigned int md_sta;
+ struct t7xx_fsm_notifier md_status_notify;
+ bool wwan_is_registered;
+ struct net_device dummy_dev;
+ struct napi_struct *napi[RXQ_NUM];
+ atomic_t napi_usr_refcnt;
+ bool is_napi_en;
+};
+
+int t7xx_ccmni_init(struct t7xx_pci_dev *t7xx_dev);
+void t7xx_ccmni_exit(struct t7xx_pci_dev *t7xx_dev);
+
+#endif /* __T7XX_NETDEV_H__ */
diff --git a/drivers/net/wwan/t7xx/t7xx_pci.c b/drivers/net/wwan/t7xx/t7xx_pci.c
new file mode 100644
index 0000000000..91256e005b
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_pci.c
@@ -0,0 +1,781 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ *
+ * Contributors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ */
+
+#include <linux/atomic.h>
+#include <linux/bits.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/gfp.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/jiffies.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_wakeup.h>
+#include <linux/spinlock.h>
+
+#include "t7xx_mhccif.h"
+#include "t7xx_modem_ops.h"
+#include "t7xx_pci.h"
+#include "t7xx_pcie_mac.h"
+#include "t7xx_reg.h"
+#include "t7xx_state_monitor.h"
+
+#define T7XX_PCI_IREG_BASE 0
+#define T7XX_PCI_EREG_BASE 2
+
+#define T7XX_INIT_TIMEOUT 20
+#define PM_SLEEP_DIS_TIMEOUT_MS 20
+#define PM_ACK_TIMEOUT_MS 1500
+#define PM_AUTOSUSPEND_MS 20000
+#define PM_RESOURCE_POLL_TIMEOUT_US 10000
+#define PM_RESOURCE_POLL_STEP_US 100
+
+enum t7xx_pm_state {
+ MTK_PM_EXCEPTION,
+ MTK_PM_INIT, /* Device initialized, but handshake not completed */
+ MTK_PM_SUSPENDED,
+ MTK_PM_RESUMED,
+};
+
+static void t7xx_dev_set_sleep_capability(struct t7xx_pci_dev *t7xx_dev, bool enable)
+{
+ void __iomem *ctrl_reg = IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_CTRL;
+ u32 value;
+
+ value = ioread32(ctrl_reg);
+
+ if (enable)
+ value &= ~T7XX_PCIE_MISC_MAC_SLEEP_DIS;
+ else
+ value |= T7XX_PCIE_MISC_MAC_SLEEP_DIS;
+
+ iowrite32(value, ctrl_reg);
+}
+
+static int t7xx_wait_pm_config(struct t7xx_pci_dev *t7xx_dev)
+{
+ int ret, val;
+
+ ret = read_poll_timeout(ioread32, val,
+ (val & T7XX_PCIE_RESOURCE_STS_MSK) == T7XX_PCIE_RESOURCE_STS_MSK,
+ PM_RESOURCE_POLL_STEP_US, PM_RESOURCE_POLL_TIMEOUT_US, true,
+ IREG_BASE(t7xx_dev) + T7XX_PCIE_RESOURCE_STATUS);
+ if (ret == -ETIMEDOUT)
+ dev_err(&t7xx_dev->pdev->dev, "PM configuration timed out\n");
+
+ return ret;
+}
+
+static int t7xx_pci_pm_init(struct t7xx_pci_dev *t7xx_dev)
+{
+ struct pci_dev *pdev = t7xx_dev->pdev;
+
+ INIT_LIST_HEAD(&t7xx_dev->md_pm_entities);
+ mutex_init(&t7xx_dev->md_pm_entity_mtx);
+ spin_lock_init(&t7xx_dev->md_pm_lock);
+ init_completion(&t7xx_dev->sleep_lock_acquire);
+ init_completion(&t7xx_dev->pm_sr_ack);
+ init_completion(&t7xx_dev->init_done);
+ atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT);
+
+ device_init_wakeup(&pdev->dev, true);
+ dev_pm_set_driver_flags(&pdev->dev, pdev->dev.power.driver_flags |
+ DPM_FLAG_NO_DIRECT_COMPLETE);
+
+ iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, PM_AUTOSUSPEND_MS);
+ pm_runtime_use_autosuspend(&pdev->dev);
+
+ return t7xx_wait_pm_config(t7xx_dev);
+}
+
+void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev)
+{
+ /* Enable the PCIe resource lock only after MD deep sleep is done */
+ t7xx_mhccif_mask_clr(t7xx_dev,
+ D2H_INT_DS_LOCK_ACK |
+ D2H_INT_SUSPEND_ACK |
+ D2H_INT_RESUME_ACK |
+ D2H_INT_SUSPEND_ACK_AP |
+ D2H_INT_RESUME_ACK_AP);
+ iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
+ atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED);
+
+ pm_runtime_mark_last_busy(&t7xx_dev->pdev->dev);
+ pm_runtime_allow(&t7xx_dev->pdev->dev);
+ pm_runtime_put_noidle(&t7xx_dev->pdev->dev);
+ complete_all(&t7xx_dev->init_done);
+}
+
+static int t7xx_pci_pm_reinit(struct t7xx_pci_dev *t7xx_dev)
+{
+ /* The device is kept in FSM re-init flow
+ * so just roll back PM setting to the init setting.
+ */
+ atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT);
+
+ pm_runtime_get_noresume(&t7xx_dev->pdev->dev);
+
+ iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
+ return t7xx_wait_pm_config(t7xx_dev);
+}
+
+void t7xx_pci_pm_exp_detected(struct t7xx_pci_dev *t7xx_dev)
+{
+ iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
+ t7xx_wait_pm_config(t7xx_dev);
+ atomic_set(&t7xx_dev->md_pm_state, MTK_PM_EXCEPTION);
+}
+
+int t7xx_pci_pm_entity_register(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity)
+{
+ struct md_pm_entity *entity;
+
+ mutex_lock(&t7xx_dev->md_pm_entity_mtx);
+ list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
+ if (entity->id == pm_entity->id) {
+ mutex_unlock(&t7xx_dev->md_pm_entity_mtx);
+ return -EEXIST;
+ }
+ }
+
+ list_add_tail(&pm_entity->entity, &t7xx_dev->md_pm_entities);
+ mutex_unlock(&t7xx_dev->md_pm_entity_mtx);
+ return 0;
+}
+
+int t7xx_pci_pm_entity_unregister(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity)
+{
+ struct md_pm_entity *entity, *tmp_entity;
+
+ mutex_lock(&t7xx_dev->md_pm_entity_mtx);
+ list_for_each_entry_safe(entity, tmp_entity, &t7xx_dev->md_pm_entities, entity) {
+ if (entity->id == pm_entity->id) {
+ list_del(&pm_entity->entity);
+ mutex_unlock(&t7xx_dev->md_pm_entity_mtx);
+ return 0;
+ }
+ }
+
+ mutex_unlock(&t7xx_dev->md_pm_entity_mtx);
+
+ return -ENXIO;
+}
+
+int t7xx_pci_sleep_disable_complete(struct t7xx_pci_dev *t7xx_dev)
+{
+ struct device *dev = &t7xx_dev->pdev->dev;
+ int ret;
+
+ ret = wait_for_completion_timeout(&t7xx_dev->sleep_lock_acquire,
+ msecs_to_jiffies(PM_SLEEP_DIS_TIMEOUT_MS));
+ if (!ret)
+ dev_err_ratelimited(dev, "Resource wait complete timed out\n");
+
+ return ret;
+}
+
+/**
+ * t7xx_pci_disable_sleep() - Disable deep sleep capability.
+ * @t7xx_dev: MTK device.
+ *
+ * Lock the deep sleep capability, note that the device can still go into deep sleep
+ * state while device is in D0 state, from the host's point-of-view.
+ *
+ * If device is in deep sleep state, wake up the device and disable deep sleep capability.
+ */
+void t7xx_pci_disable_sleep(struct t7xx_pci_dev *t7xx_dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&t7xx_dev->md_pm_lock, flags);
+ t7xx_dev->sleep_disable_count++;
+ if (atomic_read(&t7xx_dev->md_pm_state) < MTK_PM_RESUMED)
+ goto unlock_and_complete;
+
+ if (t7xx_dev->sleep_disable_count == 1) {
+ u32 status;
+
+ reinit_completion(&t7xx_dev->sleep_lock_acquire);
+ t7xx_dev_set_sleep_capability(t7xx_dev, false);
+
+ status = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_RESOURCE_STATUS);
+ if (status & T7XX_PCIE_RESOURCE_STS_MSK)
+ goto unlock_and_complete;
+
+ t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DS_LOCK);
+ }
+ spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags);
+ return;
+
+unlock_and_complete:
+ spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags);
+ complete_all(&t7xx_dev->sleep_lock_acquire);
+}
+
+/**
+ * t7xx_pci_enable_sleep() - Enable deep sleep capability.
+ * @t7xx_dev: MTK device.
+ *
+ * After enabling deep sleep, device can enter into deep sleep state.
+ */
+void t7xx_pci_enable_sleep(struct t7xx_pci_dev *t7xx_dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&t7xx_dev->md_pm_lock, flags);
+ t7xx_dev->sleep_disable_count--;
+ if (atomic_read(&t7xx_dev->md_pm_state) < MTK_PM_RESUMED)
+ goto unlock;
+
+ if (t7xx_dev->sleep_disable_count == 0)
+ t7xx_dev_set_sleep_capability(t7xx_dev, true);
+
+unlock:
+ spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags);
+}
+
+static int t7xx_send_pm_request(struct t7xx_pci_dev *t7xx_dev, u32 request)
+{
+ unsigned long wait_ret;
+
+ reinit_completion(&t7xx_dev->pm_sr_ack);
+ t7xx_mhccif_h2d_swint_trigger(t7xx_dev, request);
+ wait_ret = wait_for_completion_timeout(&t7xx_dev->pm_sr_ack,
+ msecs_to_jiffies(PM_ACK_TIMEOUT_MS));
+ if (!wait_ret)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int __t7xx_pci_pm_suspend(struct pci_dev *pdev)
+{
+ enum t7xx_pm_id entity_id = PM_ENTITY_ID_INVALID;
+ struct t7xx_pci_dev *t7xx_dev;
+ struct md_pm_entity *entity;
+ int ret;
+
+ t7xx_dev = pci_get_drvdata(pdev);
+ if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT) {
+ dev_err(&pdev->dev, "[PM] Exiting suspend, modem in invalid state\n");
+ return -EFAULT;
+ }
+
+ iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
+ ret = t7xx_wait_pm_config(t7xx_dev);
+ if (ret) {
+ iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
+ return ret;
+ }
+
+ atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED);
+ t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
+ t7xx_dev->rgu_pci_irq_en = false;
+
+ list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
+ if (!entity->suspend)
+ continue;
+
+ ret = entity->suspend(t7xx_dev, entity->entity_param);
+ if (ret) {
+ entity_id = entity->id;
+ dev_err(&pdev->dev, "[PM] Suspend error: %d, id: %d\n", ret, entity_id);
+ goto abort_suspend;
+ }
+ }
+
+ ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_SUSPEND_REQ);
+ if (ret) {
+ dev_err(&pdev->dev, "[PM] MD suspend error: %d\n", ret);
+ goto abort_suspend;
+ }
+
+ ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_SUSPEND_REQ_AP);
+ if (ret) {
+ t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ);
+ dev_err(&pdev->dev, "[PM] SAP suspend error: %d\n", ret);
+ goto abort_suspend;
+ }
+
+ list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
+ if (entity->suspend_late)
+ entity->suspend_late(t7xx_dev, entity->entity_param);
+ }
+
+ iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
+ return 0;
+
+abort_suspend:
+ list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
+ if (entity_id == entity->id)
+ break;
+
+ if (entity->resume)
+ entity->resume(t7xx_dev, entity->entity_param);
+ }
+
+ iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
+ atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED);
+ t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
+ return ret;
+}
+
+static void t7xx_pcie_interrupt_reinit(struct t7xx_pci_dev *t7xx_dev)
+{
+ t7xx_pcie_set_mac_msix_cfg(t7xx_dev, EXT_INT_NUM);
+
+ /* Disable interrupt first and let the IPs enable them */
+ iowrite32(MSIX_MSK_SET_ALL, IREG_BASE(t7xx_dev) + IMASK_HOST_MSIX_CLR_GRP0_0);
+
+ /* Device disables PCIe interrupts during resume and
+ * following function will re-enable PCIe interrupts.
+ */
+ t7xx_pcie_mac_interrupts_en(t7xx_dev);
+ t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT);
+}
+
+static int t7xx_pcie_reinit(struct t7xx_pci_dev *t7xx_dev, bool is_d3)
+{
+ int ret;
+
+ ret = pcim_enable_device(t7xx_dev->pdev);
+ if (ret)
+ return ret;
+
+ t7xx_pcie_mac_atr_init(t7xx_dev);
+ t7xx_pcie_interrupt_reinit(t7xx_dev);
+
+ if (is_d3) {
+ t7xx_mhccif_init(t7xx_dev);
+ return t7xx_pci_pm_reinit(t7xx_dev);
+ }
+
+ return 0;
+}
+
+static int t7xx_send_fsm_command(struct t7xx_pci_dev *t7xx_dev, u32 event)
+{
+ struct t7xx_fsm_ctl *fsm_ctl = t7xx_dev->md->fsm_ctl;
+ struct device *dev = &t7xx_dev->pdev->dev;
+ int ret = -EINVAL;
+
+ switch (event) {
+ case FSM_CMD_STOP:
+ ret = t7xx_fsm_append_cmd(fsm_ctl, FSM_CMD_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION);
+ break;
+
+ case FSM_CMD_START:
+ t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
+ t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT);
+ t7xx_dev->rgu_pci_irq_en = true;
+ t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
+ ret = t7xx_fsm_append_cmd(fsm_ctl, FSM_CMD_START, 0);
+ break;
+
+ default:
+ break;
+ }
+
+ if (ret)
+ dev_err(dev, "Failure handling FSM command %u, %d\n", event, ret);
+
+ return ret;
+}
+
+static int __t7xx_pci_pm_resume(struct pci_dev *pdev, bool state_check)
+{
+ struct t7xx_pci_dev *t7xx_dev;
+ struct md_pm_entity *entity;
+ u32 prev_state;
+ int ret = 0;
+
+ t7xx_dev = pci_get_drvdata(pdev);
+ if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT) {
+ iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
+ return 0;
+ }
+
+ t7xx_pcie_mac_interrupts_en(t7xx_dev);
+ prev_state = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_PM_RESUME_STATE);
+
+ if (state_check) {
+ /* For D3/L3 resume, the device could boot so quickly that the
+ * initial value of the dummy register might be overwritten.
+ * Identify new boots if the ATR source address register is not initialized.
+ */
+ u32 atr_reg_val = ioread32(IREG_BASE(t7xx_dev) +
+ ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR);
+ if (prev_state == PM_RESUME_REG_STATE_L3 ||
+ (prev_state == PM_RESUME_REG_STATE_INIT &&
+ atr_reg_val == ATR_SRC_ADDR_INVALID)) {
+ ret = t7xx_send_fsm_command(t7xx_dev, FSM_CMD_STOP);
+ if (ret)
+ return ret;
+
+ ret = t7xx_pcie_reinit(t7xx_dev, true);
+ if (ret)
+ return ret;
+
+ t7xx_clear_rgu_irq(t7xx_dev);
+ return t7xx_send_fsm_command(t7xx_dev, FSM_CMD_START);
+ }
+
+ if (prev_state == PM_RESUME_REG_STATE_EXP ||
+ prev_state == PM_RESUME_REG_STATE_L2_EXP) {
+ if (prev_state == PM_RESUME_REG_STATE_L2_EXP) {
+ ret = t7xx_pcie_reinit(t7xx_dev, false);
+ if (ret)
+ return ret;
+ }
+
+ atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED);
+ t7xx_dev->rgu_pci_irq_en = true;
+ t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
+
+ t7xx_mhccif_mask_clr(t7xx_dev,
+ D2H_INT_EXCEPTION_INIT |
+ D2H_INT_EXCEPTION_INIT_DONE |
+ D2H_INT_EXCEPTION_CLEARQ_DONE |
+ D2H_INT_EXCEPTION_ALLQ_RESET |
+ D2H_INT_PORT_ENUM);
+
+ return ret;
+ }
+
+ if (prev_state == PM_RESUME_REG_STATE_L2) {
+ ret = t7xx_pcie_reinit(t7xx_dev, false);
+ if (ret)
+ return ret;
+
+ } else if (prev_state != PM_RESUME_REG_STATE_L1 &&
+ prev_state != PM_RESUME_REG_STATE_INIT) {
+ ret = t7xx_send_fsm_command(t7xx_dev, FSM_CMD_STOP);
+ if (ret)
+ return ret;
+
+ t7xx_clear_rgu_irq(t7xx_dev);
+ atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED);
+ return 0;
+ }
+ }
+
+ iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
+ t7xx_wait_pm_config(t7xx_dev);
+
+ list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
+ if (entity->resume_early)
+ entity->resume_early(t7xx_dev, entity->entity_param);
+ }
+
+ ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ);
+ if (ret)
+ dev_err(&pdev->dev, "[PM] MD resume error: %d\n", ret);
+
+ ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ_AP);
+ if (ret)
+ dev_err(&pdev->dev, "[PM] SAP resume error: %d\n", ret);
+
+ list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
+ if (entity->resume) {
+ ret = entity->resume(t7xx_dev, entity->entity_param);
+ if (ret)
+ dev_err(&pdev->dev, "[PM] Resume entry ID: %d error: %d\n",
+ entity->id, ret);
+ }
+ }
+
+ t7xx_dev->rgu_pci_irq_en = true;
+ t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
+ iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
+ pm_runtime_mark_last_busy(&pdev->dev);
+ atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED);
+
+ return ret;
+}
+
+static int t7xx_pci_pm_resume_noirq(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct t7xx_pci_dev *t7xx_dev;
+
+ t7xx_dev = pci_get_drvdata(pdev);
+ t7xx_pcie_mac_interrupts_dis(t7xx_dev);
+
+ return 0;
+}
+
+static void t7xx_pci_shutdown(struct pci_dev *pdev)
+{
+ __t7xx_pci_pm_suspend(pdev);
+}
+
+static int t7xx_pci_pm_prepare(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct t7xx_pci_dev *t7xx_dev;
+
+ t7xx_dev = pci_get_drvdata(pdev);
+ if (!wait_for_completion_timeout(&t7xx_dev->init_done, T7XX_INIT_TIMEOUT * HZ)) {
+ dev_warn(dev, "Not ready for system sleep.\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int t7xx_pci_pm_suspend(struct device *dev)
+{
+ return __t7xx_pci_pm_suspend(to_pci_dev(dev));
+}
+
+static int t7xx_pci_pm_resume(struct device *dev)
+{
+ return __t7xx_pci_pm_resume(to_pci_dev(dev), true);
+}
+
+static int t7xx_pci_pm_thaw(struct device *dev)
+{
+ return __t7xx_pci_pm_resume(to_pci_dev(dev), false);
+}
+
+static int t7xx_pci_pm_runtime_suspend(struct device *dev)
+{
+ return __t7xx_pci_pm_suspend(to_pci_dev(dev));
+}
+
+static int t7xx_pci_pm_runtime_resume(struct device *dev)
+{
+ return __t7xx_pci_pm_resume(to_pci_dev(dev), true);
+}
+
+static const struct dev_pm_ops t7xx_pci_pm_ops = {
+ .prepare = t7xx_pci_pm_prepare,
+ .suspend = t7xx_pci_pm_suspend,
+ .resume = t7xx_pci_pm_resume,
+ .resume_noirq = t7xx_pci_pm_resume_noirq,
+ .freeze = t7xx_pci_pm_suspend,
+ .thaw = t7xx_pci_pm_thaw,
+ .poweroff = t7xx_pci_pm_suspend,
+ .restore = t7xx_pci_pm_resume,
+ .restore_noirq = t7xx_pci_pm_resume_noirq,
+ .runtime_suspend = t7xx_pci_pm_runtime_suspend,
+ .runtime_resume = t7xx_pci_pm_runtime_resume
+};
+
+static int t7xx_request_irq(struct pci_dev *pdev)
+{
+ struct t7xx_pci_dev *t7xx_dev;
+ int ret = 0, i;
+
+ t7xx_dev = pci_get_drvdata(pdev);
+
+ for (i = 0; i < EXT_INT_NUM; i++) {
+ const char *irq_descr;
+ int irq_vec;
+
+ if (!t7xx_dev->intr_handler[i])
+ continue;
+
+ irq_descr = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_%d",
+ dev_driver_string(&pdev->dev), i);
+ if (!irq_descr) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ irq_vec = pci_irq_vector(pdev, i);
+ ret = request_threaded_irq(irq_vec, t7xx_dev->intr_handler[i],
+ t7xx_dev->intr_thread[i], 0, irq_descr,
+ t7xx_dev->callback_param[i]);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request IRQ: %d\n", ret);
+ break;
+ }
+ }
+
+ if (ret) {
+ while (i--) {
+ if (!t7xx_dev->intr_handler[i])
+ continue;
+
+ free_irq(pci_irq_vector(pdev, i), t7xx_dev->callback_param[i]);
+ }
+ }
+
+ return ret;
+}
+
+static int t7xx_setup_msix(struct t7xx_pci_dev *t7xx_dev)
+{
+ struct pci_dev *pdev = t7xx_dev->pdev;
+ int ret;
+
+ /* Only using 6 interrupts, but HW-design requires power-of-2 IRQs allocation */
+ ret = pci_alloc_irq_vectors(pdev, EXT_INT_NUM, EXT_INT_NUM, PCI_IRQ_MSIX);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to allocate MSI-X entry: %d\n", ret);
+ return ret;
+ }
+
+ ret = t7xx_request_irq(pdev);
+ if (ret) {
+ pci_free_irq_vectors(pdev);
+ return ret;
+ }
+
+ t7xx_pcie_set_mac_msix_cfg(t7xx_dev, EXT_INT_NUM);
+ return 0;
+}
+
+static int t7xx_interrupt_init(struct t7xx_pci_dev *t7xx_dev)
+{
+ int ret, i;
+
+ if (!t7xx_dev->pdev->msix_cap)
+ return -EINVAL;
+
+ ret = t7xx_setup_msix(t7xx_dev);
+ if (ret)
+ return ret;
+
+ /* IPs enable interrupts when ready */
+ for (i = 0; i < EXT_INT_NUM; i++)
+ t7xx_pcie_mac_set_int(t7xx_dev, i);
+
+ return 0;
+}
+
+static void t7xx_pci_infracfg_ao_calc(struct t7xx_pci_dev *t7xx_dev)
+{
+ t7xx_dev->base_addr.infracfg_ao_base = t7xx_dev->base_addr.pcie_ext_reg_base +
+ INFRACFG_AO_DEV_CHIP -
+ t7xx_dev->base_addr.pcie_dev_reg_trsl_addr;
+}
+
+static int t7xx_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct t7xx_pci_dev *t7xx_dev;
+ int ret;
+
+ t7xx_dev = devm_kzalloc(&pdev->dev, sizeof(*t7xx_dev), GFP_KERNEL);
+ if (!t7xx_dev)
+ return -ENOMEM;
+
+ pci_set_drvdata(pdev, t7xx_dev);
+ t7xx_dev->pdev = pdev;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ pci_set_master(pdev);
+
+ ret = pcim_iomap_regions(pdev, BIT(T7XX_PCI_IREG_BASE) | BIT(T7XX_PCI_EREG_BASE),
+ pci_name(pdev));
+ if (ret) {
+ dev_err(&pdev->dev, "Could not request BARs: %d\n", ret);
+ return -ENOMEM;
+ }
+
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ dev_err(&pdev->dev, "Could not set PCI DMA mask: %d\n", ret);
+ return ret;
+ }
+
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ dev_err(&pdev->dev, "Could not set consistent PCI DMA mask: %d\n", ret);
+ return ret;
+ }
+
+ IREG_BASE(t7xx_dev) = pcim_iomap_table(pdev)[T7XX_PCI_IREG_BASE];
+ t7xx_dev->base_addr.pcie_ext_reg_base = pcim_iomap_table(pdev)[T7XX_PCI_EREG_BASE];
+
+ ret = t7xx_pci_pm_init(t7xx_dev);
+ if (ret)
+ return ret;
+
+ t7xx_pcie_mac_atr_init(t7xx_dev);
+ t7xx_pci_infracfg_ao_calc(t7xx_dev);
+ t7xx_mhccif_init(t7xx_dev);
+
+ ret = t7xx_md_init(t7xx_dev);
+ if (ret)
+ return ret;
+
+ t7xx_pcie_mac_interrupts_dis(t7xx_dev);
+
+ ret = t7xx_interrupt_init(t7xx_dev);
+ if (ret) {
+ t7xx_md_exit(t7xx_dev);
+ return ret;
+ }
+
+ t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT);
+ t7xx_pcie_mac_interrupts_en(t7xx_dev);
+
+ return 0;
+}
+
+static void t7xx_pci_remove(struct pci_dev *pdev)
+{
+ struct t7xx_pci_dev *t7xx_dev;
+ int i;
+
+ t7xx_dev = pci_get_drvdata(pdev);
+ t7xx_md_exit(t7xx_dev);
+
+ for (i = 0; i < EXT_INT_NUM; i++) {
+ if (!t7xx_dev->intr_handler[i])
+ continue;
+
+ free_irq(pci_irq_vector(pdev, i), t7xx_dev->callback_param[i]);
+ }
+
+ pci_free_irq_vectors(t7xx_dev->pdev);
+}
+
+static const struct pci_device_id t7xx_pci_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x4d75) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, t7xx_pci_table);
+
+static struct pci_driver t7xx_pci_driver = {
+ .name = "mtk_t7xx",
+ .id_table = t7xx_pci_table,
+ .probe = t7xx_pci_probe,
+ .remove = t7xx_pci_remove,
+ .driver.pm = &t7xx_pci_pm_ops,
+ .shutdown = t7xx_pci_shutdown,
+};
+
+module_pci_driver(t7xx_pci_driver);
+
+MODULE_AUTHOR("MediaTek Inc");
+MODULE_DESCRIPTION("MediaTek PCIe 5G WWAN modem T7xx driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/wwan/t7xx/t7xx_pci.h b/drivers/net/wwan/t7xx/t7xx_pci.h
new file mode 100644
index 0000000000..f08f1ab744
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_pci.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ *
+ * Contributors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ */
+
+#ifndef __T7XX_PCI_H__
+#define __T7XX_PCI_H__
+
+#include <linux/completion.h>
+#include <linux/irqreturn.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include "t7xx_reg.h"
+
+/* struct t7xx_addr_base - holds base addresses
+ * @pcie_mac_ireg_base: PCIe MAC register base
+ * @pcie_ext_reg_base: used to calculate base addresses for CLDMA, DPMA and MHCCIF registers
+ * @pcie_dev_reg_trsl_addr: used to calculate the register base address
+ * @infracfg_ao_base: base address used in CLDMA reset operations
+ * @mhccif_rc_base: host view of MHCCIF rc base addr
+ */
+struct t7xx_addr_base {
+ void __iomem *pcie_mac_ireg_base;
+ void __iomem *pcie_ext_reg_base;
+ u32 pcie_dev_reg_trsl_addr;
+ void __iomem *infracfg_ao_base;
+ void __iomem *mhccif_rc_base;
+};
+
+typedef irqreturn_t (*t7xx_intr_callback)(int irq, void *param);
+
+/* struct t7xx_pci_dev - MTK device context structure
+ * @intr_handler: array of handler function for request_threaded_irq
+ * @intr_thread: array of thread_fn for request_threaded_irq
+ * @callback_param: array of cookie passed back to interrupt functions
+ * @pdev: PCI device
+ * @base_addr: memory base addresses of HW components
+ * @md: modem interface
+ * @ccmni_ctlb: context structure used to control the network data path
+ * @rgu_pci_irq_en: RGU callback ISR registered and active
+ * @md_pm_entities: list of pm entities
+ * @md_pm_entity_mtx: protects md_pm_entities list
+ * @pm_sr_ack: ack from the device when went to sleep or woke up
+ * @md_pm_state: state for resume/suspend
+ * @md_pm_lock: protects PCIe sleep lock
+ * @sleep_disable_count: PCIe L1.2 lock counter
+ * @sleep_lock_acquire: indicates that sleep has been disabled
+ */
+struct t7xx_pci_dev {
+ t7xx_intr_callback intr_handler[EXT_INT_NUM];
+ t7xx_intr_callback intr_thread[EXT_INT_NUM];
+ void *callback_param[EXT_INT_NUM];
+ struct pci_dev *pdev;
+ struct t7xx_addr_base base_addr;
+ struct t7xx_modem *md;
+ struct t7xx_ccmni_ctrl *ccmni_ctlb;
+ bool rgu_pci_irq_en;
+ struct completion init_done;
+
+ /* Low Power Items */
+ struct list_head md_pm_entities;
+ struct mutex md_pm_entity_mtx; /* Protects MD PM entities list */
+ struct completion pm_sr_ack;
+ atomic_t md_pm_state;
+ spinlock_t md_pm_lock; /* Protects PCI resource lock */
+ unsigned int sleep_disable_count;
+ struct completion sleep_lock_acquire;
+#ifdef CONFIG_WWAN_DEBUGFS
+ struct dentry *debugfs_dir;
+#endif
+};
+
+enum t7xx_pm_id {
+ PM_ENTITY_ID_CTRL1,
+ PM_ENTITY_ID_CTRL2,
+ PM_ENTITY_ID_DATA,
+ PM_ENTITY_ID_INVALID
+};
+
+/* struct md_pm_entity - device power management entity
+ * @entity: list of PM Entities
+ * @suspend: callback invoked before sending D3 request to device
+ * @suspend_late: callback invoked after getting D3 ACK from device
+ * @resume_early: callback invoked before sending the resume request to device
+ * @resume: callback invoked after getting resume ACK from device
+ * @id: unique PM entity identifier
+ * @entity_param: parameter passed to the registered callbacks
+ *
+ * This structure is used to indicate PM operations required by internal
+ * HW modules such as CLDMA and DPMA.
+ */
+struct md_pm_entity {
+ struct list_head entity;
+ int (*suspend)(struct t7xx_pci_dev *t7xx_dev, void *entity_param);
+ void (*suspend_late)(struct t7xx_pci_dev *t7xx_dev, void *entity_param);
+ void (*resume_early)(struct t7xx_pci_dev *t7xx_dev, void *entity_param);
+ int (*resume)(struct t7xx_pci_dev *t7xx_dev, void *entity_param);
+ enum t7xx_pm_id id;
+ void *entity_param;
+};
+
+void t7xx_pci_disable_sleep(struct t7xx_pci_dev *t7xx_dev);
+void t7xx_pci_enable_sleep(struct t7xx_pci_dev *t7xx_dev);
+int t7xx_pci_sleep_disable_complete(struct t7xx_pci_dev *t7xx_dev);
+int t7xx_pci_pm_entity_register(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity);
+int t7xx_pci_pm_entity_unregister(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity);
+void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev);
+void t7xx_pci_pm_exp_detected(struct t7xx_pci_dev *t7xx_dev);
+
+#endif /* __T7XX_PCI_H__ */
diff --git a/drivers/net/wwan/t7xx/t7xx_pcie_mac.c b/drivers/net/wwan/t7xx/t7xx_pcie_mac.c
new file mode 100644
index 0000000000..76da4c15e3
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_pcie_mac.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ *
+ * Contributors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include "t7xx_pci.h"
+#include "t7xx_pcie_mac.h"
+#include "t7xx_reg.h"
+
+#define T7XX_PCIE_REG_BAR 2
+#define T7XX_PCIE_REG_PORT ATR_SRC_PCI_WIN0
+#define T7XX_PCIE_REG_TABLE_NUM 0
+#define T7XX_PCIE_REG_TRSL_PORT ATR_DST_AXIM_0
+
+#define T7XX_PCIE_DEV_DMA_PORT_START ATR_SRC_AXIS_0
+#define T7XX_PCIE_DEV_DMA_PORT_END ATR_SRC_AXIS_2
+#define T7XX_PCIE_DEV_DMA_TABLE_NUM 0
+#define T7XX_PCIE_DEV_DMA_TRSL_ADDR 0
+#define T7XX_PCIE_DEV_DMA_SRC_ADDR 0
+#define T7XX_PCIE_DEV_DMA_TRANSPARENT 1
+#define T7XX_PCIE_DEV_DMA_SIZE 0
+
+enum t7xx_atr_src_port {
+ ATR_SRC_PCI_WIN0,
+ ATR_SRC_PCI_WIN1,
+ ATR_SRC_AXIS_0,
+ ATR_SRC_AXIS_1,
+ ATR_SRC_AXIS_2,
+ ATR_SRC_AXIS_3,
+};
+
+enum t7xx_atr_dst_port {
+ ATR_DST_PCI_TRX,
+ ATR_DST_PCI_CONFIG,
+ ATR_DST_AXIM_0 = 4,
+ ATR_DST_AXIM_1,
+ ATR_DST_AXIM_2,
+ ATR_DST_AXIM_3,
+};
+
+struct t7xx_atr_config {
+ u64 src_addr;
+ u64 trsl_addr;
+ u64 size;
+ u32 port;
+ u32 table;
+ enum t7xx_atr_dst_port trsl_id;
+ u32 transparent;
+};
+
+static void t7xx_pcie_mac_atr_tables_dis(void __iomem *pbase, enum t7xx_atr_src_port port)
+{
+ void __iomem *reg;
+ int i, offset;
+
+ for (i = 0; i < ATR_TABLE_NUM_PER_ATR; i++) {
+ offset = ATR_PORT_OFFSET * port + ATR_TABLE_OFFSET * i;
+ reg = pbase + ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR + offset;
+ iowrite64(0, reg);
+ }
+}
+
+static int t7xx_pcie_mac_atr_cfg(struct t7xx_pci_dev *t7xx_dev, struct t7xx_atr_config *cfg)
+{
+ struct device *dev = &t7xx_dev->pdev->dev;
+ void __iomem *pbase = IREG_BASE(t7xx_dev);
+ int atr_size, pos, offset;
+ void __iomem *reg;
+ u64 value;
+
+ if (cfg->transparent) {
+ /* No address conversion is performed */
+ atr_size = ATR_TRANSPARENT_SIZE;
+ } else {
+ if (cfg->src_addr & (cfg->size - 1)) {
+ dev_err(dev, "Source address is not aligned to size\n");
+ return -EINVAL;
+ }
+
+ if (cfg->trsl_addr & (cfg->size - 1)) {
+ dev_err(dev, "Translation address %llx is not aligned to size %llx\n",
+ cfg->trsl_addr, cfg->size - 1);
+ return -EINVAL;
+ }
+
+ pos = __ffs64(cfg->size);
+
+ /* HW calculates the address translation space as 2^(atr_size + 1) */
+ atr_size = pos - 1;
+ }
+
+ offset = ATR_PORT_OFFSET * cfg->port + ATR_TABLE_OFFSET * cfg->table;
+
+ reg = pbase + ATR_PCIE_WIN0_T0_TRSL_ADDR + offset;
+ value = cfg->trsl_addr & ATR_PCIE_WIN0_ADDR_ALGMT;
+ iowrite64(value, reg);
+
+ reg = pbase + ATR_PCIE_WIN0_T0_TRSL_PARAM + offset;
+ iowrite32(cfg->trsl_id, reg);
+
+ reg = pbase + ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR + offset;
+ value = (cfg->src_addr & ATR_PCIE_WIN0_ADDR_ALGMT) | (atr_size << 1) | BIT(0);
+ iowrite64(value, reg);
+
+ /* Ensure ATR is set */
+ ioread64(reg);
+ return 0;
+}
+
+/**
+ * t7xx_pcie_mac_atr_init() - Initialize address translation.
+ * @t7xx_dev: MTK device.
+ *
+ * Setup ATR for ports & device.
+ */
+void t7xx_pcie_mac_atr_init(struct t7xx_pci_dev *t7xx_dev)
+{
+ struct t7xx_atr_config cfg;
+ u32 i;
+
+ /* Disable for all ports */
+ for (i = ATR_SRC_PCI_WIN0; i <= ATR_SRC_AXIS_3; i++)
+ t7xx_pcie_mac_atr_tables_dis(IREG_BASE(t7xx_dev), i);
+
+ memset(&cfg, 0, sizeof(cfg));
+ /* Config ATR for RC to access device's register */
+ cfg.src_addr = pci_resource_start(t7xx_dev->pdev, T7XX_PCIE_REG_BAR);
+ cfg.size = T7XX_PCIE_REG_SIZE_CHIP;
+ cfg.trsl_addr = T7XX_PCIE_REG_TRSL_ADDR_CHIP;
+ cfg.port = T7XX_PCIE_REG_PORT;
+ cfg.table = T7XX_PCIE_REG_TABLE_NUM;
+ cfg.trsl_id = T7XX_PCIE_REG_TRSL_PORT;
+ t7xx_pcie_mac_atr_tables_dis(IREG_BASE(t7xx_dev), cfg.port);
+ t7xx_pcie_mac_atr_cfg(t7xx_dev, &cfg);
+
+ t7xx_dev->base_addr.pcie_dev_reg_trsl_addr = T7XX_PCIE_REG_TRSL_ADDR_CHIP;
+
+ /* Config ATR for EP to access RC's memory */
+ for (i = T7XX_PCIE_DEV_DMA_PORT_START; i <= T7XX_PCIE_DEV_DMA_PORT_END; i++) {
+ cfg.src_addr = T7XX_PCIE_DEV_DMA_SRC_ADDR;
+ cfg.size = T7XX_PCIE_DEV_DMA_SIZE;
+ cfg.trsl_addr = T7XX_PCIE_DEV_DMA_TRSL_ADDR;
+ cfg.port = i;
+ cfg.table = T7XX_PCIE_DEV_DMA_TABLE_NUM;
+ cfg.trsl_id = ATR_DST_PCI_TRX;
+ cfg.transparent = T7XX_PCIE_DEV_DMA_TRANSPARENT;
+ t7xx_pcie_mac_atr_tables_dis(IREG_BASE(t7xx_dev), cfg.port);
+ t7xx_pcie_mac_atr_cfg(t7xx_dev, &cfg);
+ }
+}
+
+/**
+ * t7xx_pcie_mac_enable_disable_int() - Enable/disable interrupts.
+ * @t7xx_dev: MTK device.
+ * @enable: Enable/disable.
+ *
+ * Enable or disable device interrupts.
+ */
+static void t7xx_pcie_mac_enable_disable_int(struct t7xx_pci_dev *t7xx_dev, bool enable)
+{
+ u32 value;
+
+ value = ioread32(IREG_BASE(t7xx_dev) + ISTAT_HST_CTRL);
+
+ if (enable)
+ value &= ~ISTAT_HST_CTRL_DIS;
+ else
+ value |= ISTAT_HST_CTRL_DIS;
+
+ iowrite32(value, IREG_BASE(t7xx_dev) + ISTAT_HST_CTRL);
+}
+
+void t7xx_pcie_mac_interrupts_en(struct t7xx_pci_dev *t7xx_dev)
+{
+ t7xx_pcie_mac_enable_disable_int(t7xx_dev, true);
+}
+
+void t7xx_pcie_mac_interrupts_dis(struct t7xx_pci_dev *t7xx_dev)
+{
+ t7xx_pcie_mac_enable_disable_int(t7xx_dev, false);
+}
+
+/**
+ * t7xx_pcie_mac_clear_set_int() - Clear/set interrupt by type.
+ * @t7xx_dev: MTK device.
+ * @int_type: Interrupt type.
+ * @clear: Clear/set.
+ *
+ * Clear or set device interrupt by type.
+ */
+static void t7xx_pcie_mac_clear_set_int(struct t7xx_pci_dev *t7xx_dev,
+ enum t7xx_int int_type, bool clear)
+{
+ void __iomem *reg;
+ u32 val;
+
+ if (clear)
+ reg = IREG_BASE(t7xx_dev) + IMASK_HOST_MSIX_CLR_GRP0_0;
+ else
+ reg = IREG_BASE(t7xx_dev) + IMASK_HOST_MSIX_SET_GRP0_0;
+
+ val = BIT(EXT_INT_START + int_type);
+ iowrite32(val, reg);
+}
+
+void t7xx_pcie_mac_clear_int(struct t7xx_pci_dev *t7xx_dev, enum t7xx_int int_type)
+{
+ t7xx_pcie_mac_clear_set_int(t7xx_dev, int_type, true);
+}
+
+void t7xx_pcie_mac_set_int(struct t7xx_pci_dev *t7xx_dev, enum t7xx_int int_type)
+{
+ t7xx_pcie_mac_clear_set_int(t7xx_dev, int_type, false);
+}
+
+/**
+ * t7xx_pcie_mac_clear_int_status() - Clear interrupt status by type.
+ * @t7xx_dev: MTK device.
+ * @int_type: Interrupt type.
+ *
+ * Enable or disable device interrupts' status by type.
+ */
+void t7xx_pcie_mac_clear_int_status(struct t7xx_pci_dev *t7xx_dev, enum t7xx_int int_type)
+{
+ void __iomem *reg = IREG_BASE(t7xx_dev) + MSIX_ISTAT_HST_GRP0_0;
+ u32 val = BIT(EXT_INT_START + int_type);
+
+ iowrite32(val, reg);
+}
+
+/**
+ * t7xx_pcie_set_mac_msix_cfg() - Write MSIX control configuration.
+ * @t7xx_dev: MTK device.
+ * @irq_count: Number of MSIX IRQ vectors.
+ *
+ * Write IRQ count to device.
+ */
+void t7xx_pcie_set_mac_msix_cfg(struct t7xx_pci_dev *t7xx_dev, unsigned int irq_count)
+{
+ u32 val = ffs(irq_count) * 2 - 1;
+
+ iowrite32(val, IREG_BASE(t7xx_dev) + T7XX_PCIE_CFG_MSIX);
+}
diff --git a/drivers/net/wwan/t7xx/t7xx_pcie_mac.h b/drivers/net/wwan/t7xx/t7xx_pcie_mac.h
new file mode 100644
index 0000000000..50336fa7e8
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_pcie_mac.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ *
+ * Contributors:
+ * Moises Veleta <moises.veleta@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ */
+
+#ifndef __T7XX_PCIE_MAC_H__
+#define __T7XX_PCIE_MAC_H__
+
+#include "t7xx_pci.h"
+#include "t7xx_reg.h"
+
+#define IREG_BASE(t7xx_dev) ((t7xx_dev)->base_addr.pcie_mac_ireg_base)
+
+void t7xx_pcie_mac_interrupts_en(struct t7xx_pci_dev *t7xx_dev);
+void t7xx_pcie_mac_interrupts_dis(struct t7xx_pci_dev *t7xx_dev);
+void t7xx_pcie_mac_atr_init(struct t7xx_pci_dev *t7xx_dev);
+void t7xx_pcie_mac_clear_int(struct t7xx_pci_dev *t7xx_dev, enum t7xx_int int_type);
+void t7xx_pcie_mac_set_int(struct t7xx_pci_dev *t7xx_dev, enum t7xx_int int_type);
+void t7xx_pcie_mac_clear_int_status(struct t7xx_pci_dev *t7xx_dev, enum t7xx_int int_type);
+void t7xx_pcie_set_mac_msix_cfg(struct t7xx_pci_dev *t7xx_dev, unsigned int irq_count);
+
+#endif /* __T7XX_PCIE_MAC_H__ */
diff --git a/drivers/net/wwan/t7xx/t7xx_port.h b/drivers/net/wwan/t7xx/t7xx_port.h
new file mode 100644
index 0000000000..4ae8a00a85
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_port.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ *
+ * Contributors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ * Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ */
+
+#ifndef __T7XX_PORT_H__
+#define __T7XX_PORT_H__
+
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/wwan.h>
+
+#include "t7xx_hif_cldma.h"
+#include "t7xx_pci.h"
+
+#define PORT_CH_ID_MASK GENMASK(7, 0)
+
+/* Channel ID and Message ID definitions.
+ * The channel number consists of peer_id(15:12) , channel_id(11:0)
+ * peer_id:
+ * 0:reserved, 1: to AP, 2: to MD
+ */
+enum port_ch {
+ /* to AP */
+ PORT_CH_AP_CONTROL_RX = 0x1000,
+ PORT_CH_AP_CONTROL_TX = 0x1001,
+
+ /* to MD */
+ PORT_CH_CONTROL_RX = 0x2000,
+ PORT_CH_CONTROL_TX = 0x2001,
+ PORT_CH_UART1_RX = 0x2006, /* META */
+ PORT_CH_UART1_TX = 0x2008,
+ PORT_CH_UART2_RX = 0x200a, /* AT */
+ PORT_CH_UART2_TX = 0x200c,
+ PORT_CH_MD_LOG_RX = 0x202a, /* MD logging */
+ PORT_CH_MD_LOG_TX = 0x202b,
+ PORT_CH_LB_IT_RX = 0x203e, /* Loop back test */
+ PORT_CH_LB_IT_TX = 0x203f,
+ PORT_CH_STATUS_RX = 0x2043, /* Status events */
+ PORT_CH_MIPC_RX = 0x20ce, /* MIPC */
+ PORT_CH_MIPC_TX = 0x20cf,
+ PORT_CH_MBIM_RX = 0x20d0,
+ PORT_CH_MBIM_TX = 0x20d1,
+ PORT_CH_DSS0_RX = 0x20d2,
+ PORT_CH_DSS0_TX = 0x20d3,
+ PORT_CH_DSS1_RX = 0x20d4,
+ PORT_CH_DSS1_TX = 0x20d5,
+ PORT_CH_DSS2_RX = 0x20d6,
+ PORT_CH_DSS2_TX = 0x20d7,
+ PORT_CH_DSS3_RX = 0x20d8,
+ PORT_CH_DSS3_TX = 0x20d9,
+ PORT_CH_DSS4_RX = 0x20da,
+ PORT_CH_DSS4_TX = 0x20db,
+ PORT_CH_DSS5_RX = 0x20dc,
+ PORT_CH_DSS5_TX = 0x20dd,
+ PORT_CH_DSS6_RX = 0x20de,
+ PORT_CH_DSS6_TX = 0x20df,
+ PORT_CH_DSS7_RX = 0x20e0,
+ PORT_CH_DSS7_TX = 0x20e1,
+};
+
+struct t7xx_port;
+struct port_ops {
+ int (*init)(struct t7xx_port *port);
+ int (*recv_skb)(struct t7xx_port *port, struct sk_buff *skb);
+ void (*md_state_notify)(struct t7xx_port *port, unsigned int md_state);
+ void (*uninit)(struct t7xx_port *port);
+ int (*enable_chl)(struct t7xx_port *port);
+ int (*disable_chl)(struct t7xx_port *port);
+};
+
+struct t7xx_port_conf {
+ enum port_ch tx_ch;
+ enum port_ch rx_ch;
+ unsigned char txq_index;
+ unsigned char rxq_index;
+ unsigned char txq_exp_index;
+ unsigned char rxq_exp_index;
+ enum cldma_id path_id;
+ struct port_ops *ops;
+ char *name;
+ enum wwan_port_type port_type;
+};
+
+struct t7xx_port {
+ /* Members not initialized in definition */
+ const struct t7xx_port_conf *port_conf;
+ struct t7xx_pci_dev *t7xx_dev;
+ struct device *dev;
+ u16 seq_nums[2]; /* TX/RX sequence numbers */
+ atomic_t usage_cnt;
+ struct list_head entry;
+ struct list_head queue_entry;
+ /* TX and RX flows are asymmetric since ports are multiplexed on
+ * queues.
+ *
+ * TX: data blocks are sent directly to a queue. Each port
+ * does not maintain a TX list; instead, they only provide
+ * a wait_queue_head for blocking writes.
+ *
+ * RX: Each port uses a RX list to hold packets,
+ * allowing the modem to dispatch RX packet as quickly as possible.
+ */
+ struct sk_buff_head rx_skb_list;
+ spinlock_t port_update_lock; /* Protects port configuration */
+ wait_queue_head_t rx_wq;
+ int rx_length_th;
+ bool chan_enable;
+ struct task_struct *thread;
+ union {
+ struct {
+ struct wwan_port *wwan_port;
+ } wwan;
+ struct {
+ struct rchan *relaych;
+ } log;
+ };
+};
+
+struct sk_buff *t7xx_port_alloc_skb(int payload);
+struct sk_buff *t7xx_ctrl_alloc_skb(int payload);
+int t7xx_port_enqueue_skb(struct t7xx_port *port, struct sk_buff *skb);
+int t7xx_port_send_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int pkt_header,
+ unsigned int ex_msg);
+int t7xx_port_send_ctl_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int msg,
+ unsigned int ex_msg);
+
+#endif /* __T7XX_PORT_H__ */
diff --git a/drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c b/drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c
new file mode 100644
index 0000000000..ae632ef966
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ *
+ * Contributors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+
+#include "t7xx_port.h"
+#include "t7xx_port_proxy.h"
+#include "t7xx_state_monitor.h"
+
+#define PORT_MSG_VERSION GENMASK(31, 16)
+#define PORT_MSG_PRT_CNT GENMASK(15, 0)
+
+struct port_msg {
+ __le32 head_pattern;
+ __le32 info;
+ __le32 tail_pattern;
+ __le32 data[];
+};
+
+static int port_ctl_send_msg_to_md(struct t7xx_port *port, unsigned int msg, unsigned int ex_msg)
+{
+ struct sk_buff *skb;
+ int ret;
+
+ skb = t7xx_ctrl_alloc_skb(0);
+ if (!skb)
+ return -ENOMEM;
+
+ ret = t7xx_port_send_ctl_skb(port, skb, msg, ex_msg);
+ if (ret)
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+static int fsm_ee_message_handler(struct t7xx_port *port, struct t7xx_fsm_ctl *ctl,
+ struct sk_buff *skb)
+{
+ struct ctrl_msg_header *ctrl_msg_h = (struct ctrl_msg_header *)skb->data;
+ struct device *dev = &ctl->md->t7xx_dev->pdev->dev;
+ enum md_state md_state;
+ int ret = -EINVAL;
+
+ md_state = t7xx_fsm_get_md_state(ctl);
+ if (md_state != MD_STATE_EXCEPTION) {
+ dev_err(dev, "Receive invalid MD_EX %x when MD state is %d\n",
+ ctrl_msg_h->ex_msg, md_state);
+ return -EINVAL;
+ }
+
+ switch (le32_to_cpu(ctrl_msg_h->ctrl_msg_id)) {
+ case CTL_ID_MD_EX:
+ if (le32_to_cpu(ctrl_msg_h->ex_msg) != MD_EX_CHK_ID) {
+ dev_err(dev, "Receive invalid MD_EX %x\n", ctrl_msg_h->ex_msg);
+ break;
+ }
+
+ ret = port_ctl_send_msg_to_md(port, CTL_ID_MD_EX, MD_EX_CHK_ID);
+ if (ret) {
+ dev_err(dev, "Failed to send exception message to modem\n");
+ break;
+ }
+
+ ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_EX, NULL, 0);
+ if (ret)
+ dev_err(dev, "Failed to append Modem Exception event");
+
+ break;
+
+ case CTL_ID_MD_EX_ACK:
+ if (le32_to_cpu(ctrl_msg_h->ex_msg) != MD_EX_CHK_ACK_ID) {
+ dev_err(dev, "Receive invalid MD_EX_ACK %x\n", ctrl_msg_h->ex_msg);
+ break;
+ }
+
+ ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_EX_REC_OK, NULL, 0);
+ if (ret)
+ dev_err(dev, "Failed to append Modem Exception Received event");
+
+ break;
+
+ case CTL_ID_MD_EX_PASS:
+ ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_EX_PASS, NULL, 0);
+ if (ret)
+ dev_err(dev, "Failed to append Modem Exception Passed event");
+
+ break;
+
+ case CTL_ID_DRV_VER_ERROR:
+ dev_err(dev, "AP/MD driver version mismatch\n");
+ }
+
+ return ret;
+}
+
+/**
+ * t7xx_port_enum_msg_handler() - Parse the port enumeration message to create/remove nodes.
+ * @md: Modem context.
+ * @msg: Message.
+ *
+ * Used to control create/remove device node.
+ *
+ * Return:
+ * * 0 - Success.
+ * * -EFAULT - Message check failure.
+ */
+int t7xx_port_enum_msg_handler(struct t7xx_modem *md, void *msg)
+{
+ struct device *dev = &md->t7xx_dev->pdev->dev;
+ unsigned int version, port_count, i;
+ struct port_msg *port_msg = msg;
+
+ version = FIELD_GET(PORT_MSG_VERSION, le32_to_cpu(port_msg->info));
+ if (version != PORT_ENUM_VER ||
+ le32_to_cpu(port_msg->head_pattern) != PORT_ENUM_HEAD_PATTERN ||
+ le32_to_cpu(port_msg->tail_pattern) != PORT_ENUM_TAIL_PATTERN) {
+ dev_err(dev, "Invalid port control message %x:%x:%x\n",
+ version, le32_to_cpu(port_msg->head_pattern),
+ le32_to_cpu(port_msg->tail_pattern));
+ return -EFAULT;
+ }
+
+ port_count = FIELD_GET(PORT_MSG_PRT_CNT, le32_to_cpu(port_msg->info));
+ for (i = 0; i < port_count; i++) {
+ u32 port_info = le32_to_cpu(port_msg->data[i]);
+ unsigned int ch_id;
+ bool en_flag;
+
+ ch_id = FIELD_GET(PORT_INFO_CH_ID, port_info);
+ en_flag = port_info & PORT_INFO_ENFLG;
+ if (t7xx_port_proxy_chl_enable_disable(md->port_prox, ch_id, en_flag))
+ dev_dbg(dev, "Port:%x not found\n", ch_id);
+ }
+
+ return 0;
+}
+
+static int control_msg_handler(struct t7xx_port *port, struct sk_buff *skb)
+{
+ const struct t7xx_port_conf *port_conf = port->port_conf;
+ struct t7xx_fsm_ctl *ctl = port->t7xx_dev->md->fsm_ctl;
+ struct ctrl_msg_header *ctrl_msg_h;
+ int ret = 0;
+
+ ctrl_msg_h = (struct ctrl_msg_header *)skb->data;
+ switch (le32_to_cpu(ctrl_msg_h->ctrl_msg_id)) {
+ case CTL_ID_HS2_MSG:
+ skb_pull(skb, sizeof(*ctrl_msg_h));
+
+ if (port_conf->rx_ch == PORT_CH_CONTROL_RX ||
+ port_conf->rx_ch == PORT_CH_AP_CONTROL_RX) {
+ int event = port_conf->rx_ch == PORT_CH_CONTROL_RX ?
+ FSM_EVENT_MD_HS2 : FSM_EVENT_AP_HS2;
+
+ ret = t7xx_fsm_append_event(ctl, event, skb->data,
+ le32_to_cpu(ctrl_msg_h->data_length));
+ if (ret)
+ dev_err(port->dev, "Failed to append Handshake 2 event");
+ }
+
+ dev_kfree_skb_any(skb);
+ break;
+
+ case CTL_ID_MD_EX:
+ case CTL_ID_MD_EX_ACK:
+ case CTL_ID_MD_EX_PASS:
+ case CTL_ID_DRV_VER_ERROR:
+ ret = fsm_ee_message_handler(port, ctl, skb);
+ dev_kfree_skb_any(skb);
+ break;
+
+ case CTL_ID_PORT_ENUM:
+ skb_pull(skb, sizeof(*ctrl_msg_h));
+ ret = t7xx_port_enum_msg_handler(ctl->md, (struct port_msg *)skb->data);
+ if (!ret)
+ ret = port_ctl_send_msg_to_md(port, CTL_ID_PORT_ENUM, 0);
+ else
+ ret = port_ctl_send_msg_to_md(port, CTL_ID_PORT_ENUM,
+ PORT_ENUM_VER_MISMATCH);
+
+ break;
+
+ default:
+ ret = -EINVAL;
+ dev_err(port->dev, "Unknown control message ID to FSM %x\n",
+ le32_to_cpu(ctrl_msg_h->ctrl_msg_id));
+ break;
+ }
+
+ if (ret)
+ dev_err(port->dev, "%s control message handle error: %d\n", port_conf->name, ret);
+
+ return ret;
+}
+
+static int port_ctl_rx_thread(void *arg)
+{
+ while (!kthread_should_stop()) {
+ struct t7xx_port *port = arg;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->rx_wq.lock, flags);
+ if (skb_queue_empty(&port->rx_skb_list) &&
+ wait_event_interruptible_locked_irq(port->rx_wq,
+ !skb_queue_empty(&port->rx_skb_list) ||
+ kthread_should_stop())) {
+ spin_unlock_irqrestore(&port->rx_wq.lock, flags);
+ continue;
+ }
+ if (kthread_should_stop()) {
+ spin_unlock_irqrestore(&port->rx_wq.lock, flags);
+ break;
+ }
+ skb = __skb_dequeue(&port->rx_skb_list);
+ spin_unlock_irqrestore(&port->rx_wq.lock, flags);
+
+ control_msg_handler(port, skb);
+ }
+
+ return 0;
+}
+
+static int port_ctl_init(struct t7xx_port *port)
+{
+ const struct t7xx_port_conf *port_conf = port->port_conf;
+
+ port->thread = kthread_run(port_ctl_rx_thread, port, "%s", port_conf->name);
+ if (IS_ERR(port->thread)) {
+ dev_err(port->dev, "Failed to start port control thread\n");
+ return PTR_ERR(port->thread);
+ }
+
+ port->rx_length_th = CTRL_QUEUE_MAXLEN;
+ return 0;
+}
+
+static void port_ctl_uninit(struct t7xx_port *port)
+{
+ unsigned long flags;
+ struct sk_buff *skb;
+
+ if (port->thread)
+ kthread_stop(port->thread);
+
+ spin_lock_irqsave(&port->rx_wq.lock, flags);
+ port->rx_length_th = 0;
+ while ((skb = __skb_dequeue(&port->rx_skb_list)) != NULL)
+ dev_kfree_skb_any(skb);
+ spin_unlock_irqrestore(&port->rx_wq.lock, flags);
+}
+
+struct port_ops ctl_port_ops = {
+ .init = port_ctl_init,
+ .recv_skb = t7xx_port_enqueue_skb,
+ .uninit = port_ctl_uninit,
+};
diff --git a/drivers/net/wwan/t7xx/t7xx_port_proxy.c b/drivers/net/wwan/t7xx/t7xx_port_proxy.c
new file mode 100644
index 0000000000..274846d39f
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.c
@@ -0,0 +1,533 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ *
+ * Contributors:
+ * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ * Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/wwan.h>
+
+#include "t7xx_hif_cldma.h"
+#include "t7xx_modem_ops.h"
+#include "t7xx_port.h"
+#include "t7xx_port_proxy.h"
+#include "t7xx_state_monitor.h"
+
+#define Q_IDX_CTRL 0
+#define Q_IDX_MBIM 2
+#define Q_IDX_AT_CMD 5
+
+#define INVALID_SEQ_NUM GENMASK(15, 0)
+
+#define for_each_proxy_port(i, p, proxy) \
+ for (i = 0, (p) = &(proxy)->ports[i]; \
+ i < (proxy)->port_count; \
+ i++, (p) = &(proxy)->ports[i])
+
+static const struct t7xx_port_conf t7xx_port_conf[] = {
+ {
+ .tx_ch = PORT_CH_UART2_TX,
+ .rx_ch = PORT_CH_UART2_RX,
+ .txq_index = Q_IDX_AT_CMD,
+ .rxq_index = Q_IDX_AT_CMD,
+ .txq_exp_index = 0xff,
+ .rxq_exp_index = 0xff,
+ .path_id = CLDMA_ID_MD,
+ .ops = &wwan_sub_port_ops,
+ .name = "AT",
+ .port_type = WWAN_PORT_AT,
+ }, {
+ .tx_ch = PORT_CH_MBIM_TX,
+ .rx_ch = PORT_CH_MBIM_RX,
+ .txq_index = Q_IDX_MBIM,
+ .rxq_index = Q_IDX_MBIM,
+ .path_id = CLDMA_ID_MD,
+ .ops = &wwan_sub_port_ops,
+ .name = "MBIM",
+ .port_type = WWAN_PORT_MBIM,
+ }, {
+#ifdef CONFIG_WWAN_DEBUGFS
+ .tx_ch = PORT_CH_MD_LOG_TX,
+ .rx_ch = PORT_CH_MD_LOG_RX,
+ .txq_index = 7,
+ .rxq_index = 7,
+ .txq_exp_index = 7,
+ .rxq_exp_index = 7,
+ .path_id = CLDMA_ID_MD,
+ .ops = &t7xx_trace_port_ops,
+ .name = "mdlog",
+ }, {
+#endif
+ .tx_ch = PORT_CH_CONTROL_TX,
+ .rx_ch = PORT_CH_CONTROL_RX,
+ .txq_index = Q_IDX_CTRL,
+ .rxq_index = Q_IDX_CTRL,
+ .path_id = CLDMA_ID_MD,
+ .ops = &ctl_port_ops,
+ .name = "t7xx_ctrl",
+ }, {
+ .tx_ch = PORT_CH_AP_CONTROL_TX,
+ .rx_ch = PORT_CH_AP_CONTROL_RX,
+ .txq_index = Q_IDX_CTRL,
+ .rxq_index = Q_IDX_CTRL,
+ .path_id = CLDMA_ID_AP,
+ .ops = &ctl_port_ops,
+ .name = "t7xx_ap_ctrl",
+ },
+};
+
+static struct t7xx_port *t7xx_proxy_get_port_by_ch(struct port_proxy *port_prox, enum port_ch ch)
+{
+ const struct t7xx_port_conf *port_conf;
+ struct t7xx_port *port;
+ int i;
+
+ for_each_proxy_port(i, port, port_prox) {
+ port_conf = port->port_conf;
+ if (port_conf->rx_ch == ch || port_conf->tx_ch == ch)
+ return port;
+ }
+
+ return NULL;
+}
+
+static u16 t7xx_port_next_rx_seq_num(struct t7xx_port *port, struct ccci_header *ccci_h)
+{
+ u32 status = le32_to_cpu(ccci_h->status);
+ u16 seq_num, next_seq_num;
+ bool assert_bit;
+
+ seq_num = FIELD_GET(CCCI_H_SEQ_FLD, status);
+ next_seq_num = (seq_num + 1) & FIELD_MAX(CCCI_H_SEQ_FLD);
+ assert_bit = status & CCCI_H_AST_BIT;
+ if (!assert_bit || port->seq_nums[MTK_RX] == INVALID_SEQ_NUM)
+ return next_seq_num;
+
+ if (seq_num != port->seq_nums[MTK_RX])
+ dev_warn_ratelimited(port->dev,
+ "seq num out-of-order %u != %u (header %X, len %X)\n",
+ seq_num, port->seq_nums[MTK_RX],
+ le32_to_cpu(ccci_h->packet_header),
+ le32_to_cpu(ccci_h->packet_len));
+
+ return next_seq_num;
+}
+
+void t7xx_port_proxy_reset(struct port_proxy *port_prox)
+{
+ struct t7xx_port *port;
+ int i;
+
+ for_each_proxy_port(i, port, port_prox) {
+ port->seq_nums[MTK_RX] = INVALID_SEQ_NUM;
+ port->seq_nums[MTK_TX] = 0;
+ }
+}
+
+static int t7xx_port_get_queue_no(struct t7xx_port *port)
+{
+ const struct t7xx_port_conf *port_conf = port->port_conf;
+ struct t7xx_fsm_ctl *ctl = port->t7xx_dev->md->fsm_ctl;
+
+ return t7xx_fsm_get_md_state(ctl) == MD_STATE_EXCEPTION ?
+ port_conf->txq_exp_index : port_conf->txq_index;
+}
+
+static void t7xx_port_struct_init(struct t7xx_port *port)
+{
+ INIT_LIST_HEAD(&port->entry);
+ INIT_LIST_HEAD(&port->queue_entry);
+ skb_queue_head_init(&port->rx_skb_list);
+ init_waitqueue_head(&port->rx_wq);
+ port->seq_nums[MTK_RX] = INVALID_SEQ_NUM;
+ port->seq_nums[MTK_TX] = 0;
+ atomic_set(&port->usage_cnt, 0);
+}
+
+struct sk_buff *t7xx_port_alloc_skb(int payload)
+{
+ struct sk_buff *skb = __dev_alloc_skb(payload + sizeof(struct ccci_header), GFP_KERNEL);
+
+ if (skb)
+ skb_reserve(skb, sizeof(struct ccci_header));
+
+ return skb;
+}
+
+struct sk_buff *t7xx_ctrl_alloc_skb(int payload)
+{
+ struct sk_buff *skb = t7xx_port_alloc_skb(payload + sizeof(struct ctrl_msg_header));
+
+ if (skb)
+ skb_reserve(skb, sizeof(struct ctrl_msg_header));
+
+ return skb;
+}
+
+/**
+ * t7xx_port_enqueue_skb() - Enqueue the received skb into the port's rx_skb_list.
+ * @port: port context.
+ * @skb: received skb.
+ *
+ * Return:
+ * * 0 - Success.
+ * * -ENOBUFS - Not enough buffer space. Caller will try again later, skb is not consumed.
+ */
+int t7xx_port_enqueue_skb(struct t7xx_port *port, struct sk_buff *skb)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->rx_wq.lock, flags);
+ if (port->rx_skb_list.qlen >= port->rx_length_th) {
+ spin_unlock_irqrestore(&port->rx_wq.lock, flags);
+
+ return -ENOBUFS;
+ }
+ __skb_queue_tail(&port->rx_skb_list, skb);
+ spin_unlock_irqrestore(&port->rx_wq.lock, flags);
+
+ wake_up_all(&port->rx_wq);
+ return 0;
+}
+
+static int t7xx_port_send_raw_skb(struct t7xx_port *port, struct sk_buff *skb)
+{
+ enum cldma_id path_id = port->port_conf->path_id;
+ struct cldma_ctrl *md_ctrl;
+ int ret, tx_qno;
+
+ md_ctrl = port->t7xx_dev->md->md_ctrl[path_id];
+ tx_qno = t7xx_port_get_queue_no(port);
+ ret = t7xx_cldma_send_skb(md_ctrl, tx_qno, skb);
+ if (ret)
+ dev_err(port->dev, "Failed to send skb: %d\n", ret);
+
+ return ret;
+}
+
+static int t7xx_port_send_ccci_skb(struct t7xx_port *port, struct sk_buff *skb,
+ unsigned int pkt_header, unsigned int ex_msg)
+{
+ const struct t7xx_port_conf *port_conf = port->port_conf;
+ struct ccci_header *ccci_h;
+ u32 status;
+ int ret;
+
+ ccci_h = skb_push(skb, sizeof(*ccci_h));
+ status = FIELD_PREP(CCCI_H_CHN_FLD, port_conf->tx_ch) |
+ FIELD_PREP(CCCI_H_SEQ_FLD, port->seq_nums[MTK_TX]) | CCCI_H_AST_BIT;
+ ccci_h->status = cpu_to_le32(status);
+ ccci_h->packet_header = cpu_to_le32(pkt_header);
+ ccci_h->packet_len = cpu_to_le32(skb->len);
+ ccci_h->ex_msg = cpu_to_le32(ex_msg);
+
+ ret = t7xx_port_send_raw_skb(port, skb);
+ if (ret)
+ return ret;
+
+ port->seq_nums[MTK_TX]++;
+ return 0;
+}
+
+int t7xx_port_send_ctl_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int msg,
+ unsigned int ex_msg)
+{
+ struct ctrl_msg_header *ctrl_msg_h;
+ unsigned int msg_len = skb->len;
+ u32 pkt_header = 0;
+
+ ctrl_msg_h = skb_push(skb, sizeof(*ctrl_msg_h));
+ ctrl_msg_h->ctrl_msg_id = cpu_to_le32(msg);
+ ctrl_msg_h->ex_msg = cpu_to_le32(ex_msg);
+ ctrl_msg_h->data_length = cpu_to_le32(msg_len);
+
+ if (!msg_len)
+ pkt_header = CCCI_HEADER_NO_DATA;
+
+ return t7xx_port_send_ccci_skb(port, skb, pkt_header, ex_msg);
+}
+
+int t7xx_port_send_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int pkt_header,
+ unsigned int ex_msg)
+{
+ struct t7xx_fsm_ctl *ctl = port->t7xx_dev->md->fsm_ctl;
+ unsigned int fsm_state;
+
+ fsm_state = t7xx_fsm_get_ctl_state(ctl);
+ if (fsm_state != FSM_STATE_PRE_START) {
+ const struct t7xx_port_conf *port_conf = port->port_conf;
+ enum md_state md_state = t7xx_fsm_get_md_state(ctl);
+
+ switch (md_state) {
+ case MD_STATE_EXCEPTION:
+ if (port_conf->tx_ch != PORT_CH_MD_LOG_TX)
+ return -EBUSY;
+ break;
+
+ case MD_STATE_WAITING_FOR_HS1:
+ case MD_STATE_WAITING_FOR_HS2:
+ case MD_STATE_STOPPED:
+ case MD_STATE_WAITING_TO_STOP:
+ case MD_STATE_INVALID:
+ return -ENODEV;
+
+ default:
+ break;
+ }
+ }
+
+ return t7xx_port_send_ccci_skb(port, skb, pkt_header, ex_msg);
+}
+
+static void t7xx_proxy_setup_ch_mapping(struct port_proxy *port_prox)
+{
+ struct t7xx_port *port;
+
+ int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(port_prox->rx_ch_ports); i++)
+ INIT_LIST_HEAD(&port_prox->rx_ch_ports[i]);
+
+ for (j = 0; j < ARRAY_SIZE(port_prox->queue_ports); j++) {
+ for (i = 0; i < ARRAY_SIZE(port_prox->queue_ports[j]); i++)
+ INIT_LIST_HEAD(&port_prox->queue_ports[j][i]);
+ }
+
+ for_each_proxy_port(i, port, port_prox) {
+ const struct t7xx_port_conf *port_conf = port->port_conf;
+ enum cldma_id path_id = port_conf->path_id;
+ u8 ch_id;
+
+ ch_id = FIELD_GET(PORT_CH_ID_MASK, port_conf->rx_ch);
+ list_add_tail(&port->entry, &port_prox->rx_ch_ports[ch_id]);
+ list_add_tail(&port->queue_entry,
+ &port_prox->queue_ports[path_id][port_conf->rxq_index]);
+ }
+}
+
+static struct t7xx_port *t7xx_port_proxy_find_port(struct t7xx_pci_dev *t7xx_dev,
+ struct cldma_queue *queue, u16 channel)
+{
+ struct port_proxy *port_prox = t7xx_dev->md->port_prox;
+ struct list_head *port_list;
+ struct t7xx_port *port;
+ u8 ch_id;
+
+ ch_id = FIELD_GET(PORT_CH_ID_MASK, channel);
+ port_list = &port_prox->rx_ch_ports[ch_id];
+ list_for_each_entry(port, port_list, entry) {
+ const struct t7xx_port_conf *port_conf = port->port_conf;
+
+ if (queue->md_ctrl->hif_id == port_conf->path_id &&
+ channel == port_conf->rx_ch)
+ return port;
+ }
+
+ return NULL;
+}
+
+/**
+ * t7xx_port_proxy_recv_skb() - Dispatch received skb.
+ * @queue: CLDMA queue.
+ * @skb: Socket buffer.
+ *
+ * Return:
+ ** 0 - Packet consumed.
+ ** -ERROR - Failed to process skb.
+ */
+static int t7xx_port_proxy_recv_skb(struct cldma_queue *queue, struct sk_buff *skb)
+{
+ struct ccci_header *ccci_h = (struct ccci_header *)skb->data;
+ struct t7xx_pci_dev *t7xx_dev = queue->md_ctrl->t7xx_dev;
+ struct t7xx_fsm_ctl *ctl = t7xx_dev->md->fsm_ctl;
+ struct device *dev = queue->md_ctrl->dev;
+ const struct t7xx_port_conf *port_conf;
+ struct t7xx_port *port;
+ u16 seq_num, channel;
+ int ret;
+
+ channel = FIELD_GET(CCCI_H_CHN_FLD, le32_to_cpu(ccci_h->status));
+ if (t7xx_fsm_get_md_state(ctl) == MD_STATE_INVALID) {
+ dev_err_ratelimited(dev, "Packet drop on channel 0x%x, modem not ready\n", channel);
+ goto drop_skb;
+ }
+
+ port = t7xx_port_proxy_find_port(t7xx_dev, queue, channel);
+ if (!port) {
+ dev_err_ratelimited(dev, "Packet drop on channel 0x%x, port not found\n", channel);
+ goto drop_skb;
+ }
+
+ seq_num = t7xx_port_next_rx_seq_num(port, ccci_h);
+ port_conf = port->port_conf;
+ skb_pull(skb, sizeof(*ccci_h));
+
+ ret = port_conf->ops->recv_skb(port, skb);
+ /* Error indicates to try again later */
+ if (ret) {
+ skb_push(skb, sizeof(*ccci_h));
+ return ret;
+ }
+
+ port->seq_nums[MTK_RX] = seq_num;
+ return 0;
+
+drop_skb:
+ dev_kfree_skb_any(skb);
+ return 0;
+}
+
+/**
+ * t7xx_port_proxy_md_status_notify() - Notify all ports of state.
+ *@port_prox: The port_proxy pointer.
+ *@state: State.
+ *
+ * Called by t7xx_fsm. Used to dispatch modem status for all ports,
+ * which want to know MD state transition.
+ */
+void t7xx_port_proxy_md_status_notify(struct port_proxy *port_prox, unsigned int state)
+{
+ struct t7xx_port *port;
+ int i;
+
+ for_each_proxy_port(i, port, port_prox) {
+ const struct t7xx_port_conf *port_conf = port->port_conf;
+
+ if (port_conf->ops->md_state_notify)
+ port_conf->ops->md_state_notify(port, state);
+ }
+}
+
+static void t7xx_proxy_init_all_ports(struct t7xx_modem *md)
+{
+ struct port_proxy *port_prox = md->port_prox;
+ struct t7xx_port *port;
+ int i;
+
+ for_each_proxy_port(i, port, port_prox) {
+ const struct t7xx_port_conf *port_conf = port->port_conf;
+
+ t7xx_port_struct_init(port);
+
+ if (port_conf->tx_ch == PORT_CH_CONTROL_TX)
+ md->core_md.ctl_port = port;
+
+ if (port_conf->tx_ch == PORT_CH_AP_CONTROL_TX)
+ md->core_ap.ctl_port = port;
+
+ port->t7xx_dev = md->t7xx_dev;
+ port->dev = &md->t7xx_dev->pdev->dev;
+ spin_lock_init(&port->port_update_lock);
+ port->chan_enable = false;
+
+ if (port_conf->ops->init)
+ port_conf->ops->init(port);
+ }
+
+ t7xx_proxy_setup_ch_mapping(port_prox);
+}
+
+static int t7xx_proxy_alloc(struct t7xx_modem *md)
+{
+ unsigned int port_count = ARRAY_SIZE(t7xx_port_conf);
+ struct device *dev = &md->t7xx_dev->pdev->dev;
+ struct port_proxy *port_prox;
+ int i;
+
+ port_prox = devm_kzalloc(dev, sizeof(*port_prox) + sizeof(struct t7xx_port) * port_count,
+ GFP_KERNEL);
+ if (!port_prox)
+ return -ENOMEM;
+
+ md->port_prox = port_prox;
+ port_prox->dev = dev;
+
+ for (i = 0; i < port_count; i++)
+ port_prox->ports[i].port_conf = &t7xx_port_conf[i];
+
+ port_prox->port_count = port_count;
+ t7xx_proxy_init_all_ports(md);
+ return 0;
+}
+
+/**
+ * t7xx_port_proxy_init() - Initialize ports.
+ * @md: Modem.
+ *
+ * Create all port instances.
+ *
+ * Return:
+ * * 0 - Success.
+ * * -ERROR - Error code from failure sub-initializations.
+ */
+int t7xx_port_proxy_init(struct t7xx_modem *md)
+{
+ int ret;
+
+ ret = t7xx_proxy_alloc(md);
+ if (ret)
+ return ret;
+
+ t7xx_cldma_set_recv_skb(md->md_ctrl[CLDMA_ID_AP], t7xx_port_proxy_recv_skb);
+ t7xx_cldma_set_recv_skb(md->md_ctrl[CLDMA_ID_MD], t7xx_port_proxy_recv_skb);
+ return 0;
+}
+
+void t7xx_port_proxy_uninit(struct port_proxy *port_prox)
+{
+ struct t7xx_port *port;
+ int i;
+
+ for_each_proxy_port(i, port, port_prox) {
+ const struct t7xx_port_conf *port_conf = port->port_conf;
+
+ if (port_conf->ops->uninit)
+ port_conf->ops->uninit(port);
+ }
+}
+
+int t7xx_port_proxy_chl_enable_disable(struct port_proxy *port_prox, unsigned int ch_id,
+ bool en_flag)
+{
+ struct t7xx_port *port = t7xx_proxy_get_port_by_ch(port_prox, ch_id);
+ const struct t7xx_port_conf *port_conf;
+
+ if (!port)
+ return -EINVAL;
+
+ port_conf = port->port_conf;
+
+ if (en_flag) {
+ if (port_conf->ops->enable_chl)
+ port_conf->ops->enable_chl(port);
+ } else {
+ if (port_conf->ops->disable_chl)
+ port_conf->ops->disable_chl(port);
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wwan/t7xx/t7xx_port_proxy.h b/drivers/net/wwan/t7xx/t7xx_port_proxy.h
new file mode 100644
index 0000000000..81d059fbc0
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ *
+ * Contributors:
+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ */
+
+#ifndef __T7XX_PORT_PROXY_H__
+#define __T7XX_PORT_PROXY_H__
+
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+
+#include "t7xx_hif_cldma.h"
+#include "t7xx_modem_ops.h"
+#include "t7xx_port.h"
+
+#define MTK_QUEUES 16
+#define RX_QUEUE_MAXLEN 32
+#define CTRL_QUEUE_MAXLEN 16
+
+struct port_proxy {
+ int port_count;
+ struct list_head rx_ch_ports[PORT_CH_ID_MASK + 1];
+ struct list_head queue_ports[CLDMA_NUM][MTK_QUEUES];
+ struct device *dev;
+ struct t7xx_port ports[];
+};
+
+struct ccci_header {
+ __le32 packet_header;
+ __le32 packet_len;
+ __le32 status;
+ __le32 ex_msg;
+};
+
+/* Coupled with HW - indicates if there is data following the CCCI header or not */
+#define CCCI_HEADER_NO_DATA 0xffffffff
+
+#define CCCI_H_AST_BIT BIT(31)
+#define CCCI_H_SEQ_FLD GENMASK(30, 16)
+#define CCCI_H_CHN_FLD GENMASK(15, 0)
+
+struct ctrl_msg_header {
+ __le32 ctrl_msg_id;
+ __le32 ex_msg;
+ __le32 data_length;
+};
+
+/* Control identification numbers for AP<->MD messages */
+#define CTL_ID_HS1_MSG 0x0
+#define CTL_ID_HS2_MSG 0x1
+#define CTL_ID_HS3_MSG 0x2
+#define CTL_ID_MD_EX 0x4
+#define CTL_ID_DRV_VER_ERROR 0x5
+#define CTL_ID_MD_EX_ACK 0x6
+#define CTL_ID_MD_EX_PASS 0x8
+#define CTL_ID_PORT_ENUM 0x9
+
+/* Modem exception check identification code - "EXCP" */
+#define MD_EX_CHK_ID 0x45584350
+/* Modem exception check acknowledge identification code - "EREC" */
+#define MD_EX_CHK_ACK_ID 0x45524543
+
+#define PORT_INFO_RSRVD GENMASK(31, 16)
+#define PORT_INFO_ENFLG BIT(15)
+#define PORT_INFO_CH_ID GENMASK(14, 0)
+
+#define PORT_ENUM_VER 0
+#define PORT_ENUM_HEAD_PATTERN 0x5a5a5a5a
+#define PORT_ENUM_TAIL_PATTERN 0xa5a5a5a5
+#define PORT_ENUM_VER_MISMATCH 0x00657272
+
+/* Port operations mapping */
+extern struct port_ops wwan_sub_port_ops;
+extern struct port_ops ctl_port_ops;
+
+#ifdef CONFIG_WWAN_DEBUGFS
+extern struct port_ops t7xx_trace_port_ops;
+#endif
+
+void t7xx_port_proxy_reset(struct port_proxy *port_prox);
+void t7xx_port_proxy_uninit(struct port_proxy *port_prox);
+int t7xx_port_proxy_init(struct t7xx_modem *md);
+void t7xx_port_proxy_md_status_notify(struct port_proxy *port_prox, unsigned int state);
+int t7xx_port_enum_msg_handler(struct t7xx_modem *md, void *msg);
+int t7xx_port_proxy_chl_enable_disable(struct port_proxy *port_prox, unsigned int ch_id,
+ bool en_flag);
+
+#endif /* __T7XX_PORT_PROXY_H__ */
diff --git a/drivers/net/wwan/t7xx/t7xx_port_trace.c b/drivers/net/wwan/t7xx/t7xx_port_trace.c
new file mode 100644
index 0000000000..6a3f363858
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_port_trace.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 Intel Corporation.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/relay.h>
+#include <linux/skbuff.h>
+#include <linux/wwan.h>
+
+#include "t7xx_port.h"
+#include "t7xx_port_proxy.h"
+#include "t7xx_state_monitor.h"
+
+#define T7XX_TRC_SUB_BUFF_SIZE 131072
+#define T7XX_TRC_N_SUB_BUFF 32
+
+static struct dentry *t7xx_trace_create_buf_file_handler(const char *filename,
+ struct dentry *parent,
+ umode_t mode,
+ struct rchan_buf *buf,
+ int *is_global)
+{
+ *is_global = 1;
+ return debugfs_create_file(filename, mode, parent, buf,
+ &relay_file_operations);
+}
+
+static int t7xx_trace_remove_buf_file_handler(struct dentry *dentry)
+{
+ debugfs_remove(dentry);
+ return 0;
+}
+
+static int t7xx_trace_subbuf_start_handler(struct rchan_buf *buf, void *subbuf,
+ void *prev_subbuf, size_t prev_padding)
+{
+ if (relay_buf_full(buf)) {
+ pr_err_ratelimited("Relay_buf full dropping traces");
+ return 0;
+ }
+
+ return 1;
+}
+
+static struct rchan_callbacks relay_callbacks = {
+ .subbuf_start = t7xx_trace_subbuf_start_handler,
+ .create_buf_file = t7xx_trace_create_buf_file_handler,
+ .remove_buf_file = t7xx_trace_remove_buf_file_handler,
+};
+
+static void t7xx_trace_port_uninit(struct t7xx_port *port)
+{
+ struct dentry *debugfs_dir = port->t7xx_dev->debugfs_dir;
+ struct rchan *relaych = port->log.relaych;
+
+ if (!relaych)
+ return;
+
+ relay_close(relaych);
+ debugfs_remove_recursive(debugfs_dir);
+}
+
+static int t7xx_trace_port_recv_skb(struct t7xx_port *port, struct sk_buff *skb)
+{
+ struct rchan *relaych = port->log.relaych;
+
+ if (!relaych)
+ return -EINVAL;
+
+ relay_write(relaych, skb->data, skb->len);
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+static void t7xx_port_trace_md_state_notify(struct t7xx_port *port, unsigned int state)
+{
+ struct rchan *relaych = port->log.relaych;
+ struct dentry *debugfs_wwan_dir;
+ struct dentry *debugfs_dir;
+
+ if (state != MD_STATE_READY || relaych)
+ return;
+
+ debugfs_wwan_dir = wwan_get_debugfs_dir(port->dev);
+ if (IS_ERR(debugfs_wwan_dir))
+ return;
+
+ debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, debugfs_wwan_dir);
+ if (IS_ERR_OR_NULL(debugfs_dir)) {
+ wwan_put_debugfs_dir(debugfs_wwan_dir);
+ dev_err(port->dev, "Unable to create debugfs for trace");
+ return;
+ }
+
+ relaych = relay_open("relay_ch", debugfs_dir, T7XX_TRC_SUB_BUFF_SIZE,
+ T7XX_TRC_N_SUB_BUFF, &relay_callbacks, NULL);
+ if (!relaych)
+ goto err_rm_debugfs_dir;
+
+ wwan_put_debugfs_dir(debugfs_wwan_dir);
+ port->log.relaych = relaych;
+ port->t7xx_dev->debugfs_dir = debugfs_dir;
+ return;
+
+err_rm_debugfs_dir:
+ debugfs_remove_recursive(debugfs_dir);
+ wwan_put_debugfs_dir(debugfs_wwan_dir);
+ dev_err(port->dev, "Unable to create trace port %s", port->port_conf->name);
+}
+
+struct port_ops t7xx_trace_port_ops = {
+ .recv_skb = t7xx_trace_port_recv_skb,
+ .uninit = t7xx_trace_port_uninit,
+ .md_state_notify = t7xx_port_trace_md_state_notify,
+};
diff --git a/drivers/net/wwan/t7xx/t7xx_port_wwan.c b/drivers/net/wwan/t7xx/t7xx_port_wwan.c
new file mode 100644
index 0000000000..17389c8f66
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_port_wwan.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ *
+ * Contributors:
+ * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ */
+
+#include <linux/atomic.h>
+#include <linux/bitfield.h>
+#include <linux/dev_printk.h>
+#include <linux/err.h>
+#include <linux/gfp.h>
+#include <linux/minmax.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/wwan.h>
+
+#include "t7xx_port.h"
+#include "t7xx_port_proxy.h"
+#include "t7xx_state_monitor.h"
+
+static int t7xx_port_ctrl_start(struct wwan_port *port)
+{
+ struct t7xx_port *port_mtk = wwan_port_get_drvdata(port);
+
+ if (atomic_read(&port_mtk->usage_cnt))
+ return -EBUSY;
+
+ atomic_inc(&port_mtk->usage_cnt);
+ return 0;
+}
+
+static void t7xx_port_ctrl_stop(struct wwan_port *port)
+{
+ struct t7xx_port *port_mtk = wwan_port_get_drvdata(port);
+
+ atomic_dec(&port_mtk->usage_cnt);
+}
+
+static int t7xx_port_ctrl_tx(struct wwan_port *port, struct sk_buff *skb)
+{
+ struct t7xx_port *port_private = wwan_port_get_drvdata(port);
+ const struct t7xx_port_conf *port_conf;
+ struct sk_buff *cur = skb, *cloned;
+ struct t7xx_fsm_ctl *ctl;
+ enum md_state md_state;
+ int cnt = 0, ret;
+
+ if (!port_private->chan_enable)
+ return -EINVAL;
+
+ port_conf = port_private->port_conf;
+ ctl = port_private->t7xx_dev->md->fsm_ctl;
+ md_state = t7xx_fsm_get_md_state(ctl);
+ if (md_state == MD_STATE_WAITING_FOR_HS1 || md_state == MD_STATE_WAITING_FOR_HS2) {
+ dev_warn(port_private->dev, "Cannot write to %s port when md_state=%d\n",
+ port_conf->name, md_state);
+ return -ENODEV;
+ }
+
+ while (cur) {
+ cloned = skb_clone(cur, GFP_KERNEL);
+ cloned->len = skb_headlen(cur);
+ ret = t7xx_port_send_skb(port_private, cloned, 0, 0);
+ if (ret) {
+ dev_kfree_skb(cloned);
+ dev_err(port_private->dev, "Write error on %s port, %d\n",
+ port_conf->name, ret);
+ return cnt ? cnt + ret : ret;
+ }
+ cnt += cur->len;
+ if (cur == skb)
+ cur = skb_shinfo(skb)->frag_list;
+ else
+ cur = cur->next;
+ }
+
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+static const struct wwan_port_ops wwan_ops = {
+ .start = t7xx_port_ctrl_start,
+ .stop = t7xx_port_ctrl_stop,
+ .tx = t7xx_port_ctrl_tx,
+};
+
+static int t7xx_port_wwan_init(struct t7xx_port *port)
+{
+ port->rx_length_th = RX_QUEUE_MAXLEN;
+ return 0;
+}
+
+static void t7xx_port_wwan_uninit(struct t7xx_port *port)
+{
+ if (!port->wwan.wwan_port)
+ return;
+
+ port->rx_length_th = 0;
+ wwan_remove_port(port->wwan.wwan_port);
+ port->wwan.wwan_port = NULL;
+}
+
+static int t7xx_port_wwan_recv_skb(struct t7xx_port *port, struct sk_buff *skb)
+{
+ if (!atomic_read(&port->usage_cnt) || !port->chan_enable) {
+ const struct t7xx_port_conf *port_conf = port->port_conf;
+
+ dev_kfree_skb_any(skb);
+ dev_err_ratelimited(port->dev, "Port %s is not opened, drop packets\n",
+ port_conf->name);
+ /* Dropping skb, caller should not access skb.*/
+ return 0;
+ }
+
+ wwan_port_rx(port->wwan.wwan_port, skb);
+ return 0;
+}
+
+static int t7xx_port_wwan_enable_chl(struct t7xx_port *port)
+{
+ spin_lock(&port->port_update_lock);
+ port->chan_enable = true;
+ spin_unlock(&port->port_update_lock);
+
+ return 0;
+}
+
+static int t7xx_port_wwan_disable_chl(struct t7xx_port *port)
+{
+ spin_lock(&port->port_update_lock);
+ port->chan_enable = false;
+ spin_unlock(&port->port_update_lock);
+
+ return 0;
+}
+
+static void t7xx_port_wwan_md_state_notify(struct t7xx_port *port, unsigned int state)
+{
+ const struct t7xx_port_conf *port_conf = port->port_conf;
+ unsigned int header_len = sizeof(struct ccci_header);
+ struct wwan_port_caps caps;
+
+ if (state != MD_STATE_READY)
+ return;
+
+ if (!port->wwan.wwan_port) {
+ caps.frag_len = CLDMA_MTU - header_len;
+ caps.headroom_len = header_len;
+ port->wwan.wwan_port = wwan_create_port(port->dev, port_conf->port_type,
+ &wwan_ops, &caps, port);
+ if (IS_ERR(port->wwan.wwan_port))
+ dev_err(port->dev, "Unable to create WWWAN port %s", port_conf->name);
+ }
+}
+
+struct port_ops wwan_sub_port_ops = {
+ .init = t7xx_port_wwan_init,
+ .recv_skb = t7xx_port_wwan_recv_skb,
+ .uninit = t7xx_port_wwan_uninit,
+ .enable_chl = t7xx_port_wwan_enable_chl,
+ .disable_chl = t7xx_port_wwan_disable_chl,
+ .md_state_notify = t7xx_port_wwan_md_state_notify,
+};
diff --git a/drivers/net/wwan/t7xx/t7xx_reg.h b/drivers/net/wwan/t7xx/t7xx_reg.h
new file mode 100644
index 0000000000..c41d7d094c
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_reg.h
@@ -0,0 +1,350 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+ *
+ * Contributors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ */
+
+#ifndef __T7XX_REG_H__
+#define __T7XX_REG_H__
+
+#include <linux/bits.h>
+
+/* Device base address offset */
+#define MHCCIF_RC_DEV_BASE 0x10024000
+
+#define REG_RC2EP_SW_BSY 0x04
+#define REG_RC2EP_SW_INT_START 0x08
+
+#define REG_RC2EP_SW_TCHNUM 0x0c
+#define H2D_CH_EXCEPTION_ACK 1
+#define H2D_CH_EXCEPTION_CLEARQ_ACK 2
+#define H2D_CH_DS_LOCK 3
+/* Channels 4-8 are reserved */
+#define H2D_CH_SUSPEND_REQ 9
+#define H2D_CH_RESUME_REQ 10
+#define H2D_CH_SUSPEND_REQ_AP 11
+#define H2D_CH_RESUME_REQ_AP 12
+#define H2D_CH_DEVICE_RESET 13
+#define H2D_CH_DRM_DISABLE_AP 14
+
+#define REG_EP2RC_SW_INT_STS 0x10
+#define REG_EP2RC_SW_INT_ACK 0x14
+#define REG_EP2RC_SW_INT_EAP_MASK 0x20
+#define REG_EP2RC_SW_INT_EAP_MASK_SET 0x30
+#define REG_EP2RC_SW_INT_EAP_MASK_CLR 0x40
+
+#define D2H_INT_DS_LOCK_ACK BIT(0)
+#define D2H_INT_EXCEPTION_INIT BIT(1)
+#define D2H_INT_EXCEPTION_INIT_DONE BIT(2)
+#define D2H_INT_EXCEPTION_CLEARQ_DONE BIT(3)
+#define D2H_INT_EXCEPTION_ALLQ_RESET BIT(4)
+#define D2H_INT_PORT_ENUM BIT(5)
+/* Bits 6-10 are reserved */
+#define D2H_INT_SUSPEND_ACK BIT(11)
+#define D2H_INT_RESUME_ACK BIT(12)
+#define D2H_INT_SUSPEND_ACK_AP BIT(13)
+#define D2H_INT_RESUME_ACK_AP BIT(14)
+#define D2H_INT_ASYNC_AP_HK BIT(15)
+#define D2H_INT_ASYNC_MD_HK BIT(16)
+
+/* Register base */
+#define INFRACFG_AO_DEV_CHIP 0x10001000
+
+/* ATR setting */
+#define T7XX_PCIE_REG_TRSL_ADDR_CHIP 0x10000000
+#define T7XX_PCIE_REG_SIZE_CHIP 0x00400000
+
+/* Reset Generic Unit (RGU) */
+#define TOPRGU_CH_PCIE_IRQ_STA 0x1000790c
+
+#define ATR_PORT_OFFSET 0x100
+#define ATR_TABLE_OFFSET 0x20
+#define ATR_TABLE_NUM_PER_ATR 8
+#define ATR_TRANSPARENT_SIZE 0x3f
+
+/* PCIE_MAC_IREG Register Definition */
+
+#define ISTAT_HST_CTRL 0x01ac
+#define ISTAT_HST_CTRL_DIS BIT(0)
+
+#define T7XX_PCIE_MISC_CTRL 0x0348
+#define T7XX_PCIE_MISC_MAC_SLEEP_DIS BIT(7)
+
+#define T7XX_PCIE_CFG_MSIX 0x03ec
+#define ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR 0x0600
+#define ATR_PCIE_WIN0_T0_TRSL_ADDR 0x0608
+#define ATR_PCIE_WIN0_T0_TRSL_PARAM 0x0610
+#define ATR_PCIE_WIN0_ADDR_ALGMT GENMASK_ULL(63, 12)
+
+#define ATR_SRC_ADDR_INVALID 0x007f
+
+#define T7XX_PCIE_PM_RESUME_STATE 0x0d0c
+
+enum t7xx_pm_resume_state {
+ PM_RESUME_REG_STATE_L3,
+ PM_RESUME_REG_STATE_L1,
+ PM_RESUME_REG_STATE_INIT,
+ PM_RESUME_REG_STATE_EXP,
+ PM_RESUME_REG_STATE_L2,
+ PM_RESUME_REG_STATE_L2_EXP,
+};
+
+#define T7XX_PCIE_MISC_DEV_STATUS 0x0d1c
+#define MISC_STAGE_MASK GENMASK(2, 0)
+#define MISC_RESET_TYPE_PLDR BIT(26)
+#define MISC_RESET_TYPE_FLDR BIT(27)
+#define LINUX_STAGE 4
+
+#define T7XX_PCIE_RESOURCE_STATUS 0x0d28
+#define T7XX_PCIE_RESOURCE_STS_MSK GENMASK(4, 0)
+
+#define DISABLE_ASPM_LOWPWR 0x0e50
+#define ENABLE_ASPM_LOWPWR 0x0e54
+#define T7XX_L1_BIT(i) BIT((i) * 4 + 1)
+#define T7XX_L1_1_BIT(i) BIT((i) * 4 + 2)
+#define T7XX_L1_2_BIT(i) BIT((i) * 4 + 3)
+
+#define MSIX_ISTAT_HST_GRP0_0 0x0f00
+#define IMASK_HOST_MSIX_SET_GRP0_0 0x3000
+#define IMASK_HOST_MSIX_CLR_GRP0_0 0x3080
+#define EXT_INT_START 24
+#define EXT_INT_NUM 8
+#define MSIX_MSK_SET_ALL GENMASK(31, 24)
+
+enum t7xx_int {
+ DPMAIF_INT,
+ CLDMA0_INT,
+ CLDMA1_INT,
+ CLDMA2_INT,
+ MHCCIF_INT,
+ DPMAIF2_INT,
+ SAP_RGU_INT,
+ CLDMA3_INT,
+};
+
+/* DPMA definitions */
+
+#define DPMAIF_PD_BASE 0x1022d000
+#define BASE_DPMAIF_UL DPMAIF_PD_BASE
+#define BASE_DPMAIF_DL (DPMAIF_PD_BASE + 0x100)
+#define BASE_DPMAIF_AP_MISC (DPMAIF_PD_BASE + 0x400)
+#define BASE_DPMAIF_MMW_HPC (DPMAIF_PD_BASE + 0x600)
+#define BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX (DPMAIF_PD_BASE + 0x900)
+#define BASE_DPMAIF_PD_SRAM_DL (DPMAIF_PD_BASE + 0xc00)
+#define BASE_DPMAIF_PD_SRAM_UL (DPMAIF_PD_BASE + 0xd00)
+
+#define DPMAIF_AO_BASE 0x10014000
+#define BASE_DPMAIF_AO_UL DPMAIF_AO_BASE
+#define BASE_DPMAIF_AO_DL (DPMAIF_AO_BASE + 0x400)
+
+#define DPMAIF_UL_ADD_DESC (BASE_DPMAIF_UL + 0x00)
+#define DPMAIF_UL_CHK_BUSY (BASE_DPMAIF_UL + 0x88)
+#define DPMAIF_UL_RESERVE_AO_RW (BASE_DPMAIF_UL + 0xac)
+#define DPMAIF_UL_ADD_DESC_CH0 (BASE_DPMAIF_UL + 0xb0)
+
+#define DPMAIF_DL_BAT_INIT (BASE_DPMAIF_DL + 0x00)
+#define DPMAIF_DL_BAT_ADD (BASE_DPMAIF_DL + 0x04)
+#define DPMAIF_DL_BAT_INIT_CON0 (BASE_DPMAIF_DL + 0x08)
+#define DPMAIF_DL_BAT_INIT_CON1 (BASE_DPMAIF_DL + 0x0c)
+#define DPMAIF_DL_BAT_INIT_CON2 (BASE_DPMAIF_DL + 0x10)
+#define DPMAIF_DL_BAT_INIT_CON3 (BASE_DPMAIF_DL + 0x50)
+#define DPMAIF_DL_CHK_BUSY (BASE_DPMAIF_DL + 0xb4)
+
+#define DPMAIF_AP_L2TISAR0 (BASE_DPMAIF_AP_MISC + 0x00)
+#define DPMAIF_AP_APDL_L2TISAR0 (BASE_DPMAIF_AP_MISC + 0x50)
+#define DPMAIF_AP_IP_BUSY (BASE_DPMAIF_AP_MISC + 0x60)
+#define DPMAIF_AP_CG_EN (BASE_DPMAIF_AP_MISC + 0x68)
+#define DPMAIF_AP_OVERWRITE_CFG (BASE_DPMAIF_AP_MISC + 0x90)
+#define DPMAIF_AP_MEM_CLR (BASE_DPMAIF_AP_MISC + 0x94)
+#define DPMAIF_AP_ALL_L2TISAR0_MASK GENMASK(31, 0)
+#define DPMAIF_AP_APDL_ALL_L2TISAR0_MASK GENMASK(31, 0)
+#define DPMAIF_AP_IP_BUSY_MASK GENMASK(31, 0)
+
+#define DPMAIF_AO_UL_INIT_SET (BASE_DPMAIF_AO_UL + 0x0)
+#define DPMAIF_AO_UL_CHNL_ARB0 (BASE_DPMAIF_AO_UL + 0x1c)
+#define DPMAIF_AO_UL_AP_L2TIMR0 (BASE_DPMAIF_AO_UL + 0x80)
+#define DPMAIF_AO_UL_AP_L2TIMCR0 (BASE_DPMAIF_AO_UL + 0x84)
+#define DPMAIF_AO_UL_AP_L2TIMSR0 (BASE_DPMAIF_AO_UL + 0x88)
+#define DPMAIF_AO_UL_AP_L1TIMR0 (BASE_DPMAIF_AO_UL + 0x8c)
+#define DPMAIF_AO_UL_APDL_L2TIMR0 (BASE_DPMAIF_AO_UL + 0x90)
+#define DPMAIF_AO_UL_APDL_L2TIMCR0 (BASE_DPMAIF_AO_UL + 0x94)
+#define DPMAIF_AO_UL_APDL_L2TIMSR0 (BASE_DPMAIF_AO_UL + 0x98)
+#define DPMAIF_AO_AP_DLUL_IP_BUSY_MASK (BASE_DPMAIF_AO_UL + 0x9c)
+
+#define DPMAIF_AO_UL_CHNL0_CON0 (BASE_DPMAIF_PD_SRAM_UL + 0x10)
+#define DPMAIF_AO_UL_CHNL0_CON1 (BASE_DPMAIF_PD_SRAM_UL + 0x14)
+#define DPMAIF_AO_UL_CHNL0_CON2 (BASE_DPMAIF_PD_SRAM_UL + 0x18)
+#define DPMAIF_AO_UL_CH0_STA (BASE_DPMAIF_PD_SRAM_UL + 0x70)
+
+#define DPMAIF_AO_DL_INIT_SET (BASE_DPMAIF_AO_DL + 0x00)
+#define DPMAIF_AO_DL_IRQ_MASK (BASE_DPMAIF_AO_DL + 0x0c)
+#define DPMAIF_AO_DL_DLQPIT_INIT_CON5 (BASE_DPMAIF_AO_DL + 0x28)
+#define DPMAIF_AO_DL_DLQPIT_TRIG_THRES (BASE_DPMAIF_AO_DL + 0x34)
+
+#define DPMAIF_AO_DL_PKTINFO_CON0 (BASE_DPMAIF_PD_SRAM_DL + 0x00)
+#define DPMAIF_AO_DL_PKTINFO_CON1 (BASE_DPMAIF_PD_SRAM_DL + 0x04)
+#define DPMAIF_AO_DL_PKTINFO_CON2 (BASE_DPMAIF_PD_SRAM_DL + 0x08)
+#define DPMAIF_AO_DL_RDY_CHK_THRES (BASE_DPMAIF_PD_SRAM_DL + 0x0c)
+#define DPMAIF_AO_DL_RDY_CHK_FRG_THRES (BASE_DPMAIF_PD_SRAM_DL + 0x10)
+
+#define DPMAIF_AO_DL_DLQ_AGG_CFG (BASE_DPMAIF_PD_SRAM_DL + 0x20)
+#define DPMAIF_AO_DL_DLQPIT_TIMEOUT0 (BASE_DPMAIF_PD_SRAM_DL + 0x24)
+#define DPMAIF_AO_DL_DLQPIT_TIMEOUT1 (BASE_DPMAIF_PD_SRAM_DL + 0x28)
+#define DPMAIF_AO_DL_HPC_CNTL (BASE_DPMAIF_PD_SRAM_DL + 0x38)
+#define DPMAIF_AO_DL_PIT_SEQ_END (BASE_DPMAIF_PD_SRAM_DL + 0x40)
+
+#define DPMAIF_AO_DL_BAT_RD_IDX (BASE_DPMAIF_PD_SRAM_DL + 0xd8)
+#define DPMAIF_AO_DL_BAT_WR_IDX (BASE_DPMAIF_PD_SRAM_DL + 0xdc)
+#define DPMAIF_AO_DL_PIT_RD_IDX (BASE_DPMAIF_PD_SRAM_DL + 0xec)
+#define DPMAIF_AO_DL_PIT_WR_IDX (BASE_DPMAIF_PD_SRAM_DL + 0x60)
+#define DPMAIF_AO_DL_FRGBAT_RD_IDX (BASE_DPMAIF_PD_SRAM_DL + 0x78)
+#define DPMAIF_AO_DL_DLQ_WR_IDX (BASE_DPMAIF_PD_SRAM_DL + 0xa4)
+
+#define DPMAIF_HPC_INTR_MASK (BASE_DPMAIF_MMW_HPC + 0x0f4)
+#define DPMA_HPC_ALL_INT_MASK GENMASK(15, 0)
+
+#define DPMAIF_HPC_DLQ_PATH_MODE 3
+#define DPMAIF_HPC_ADD_MODE_DF 0
+#define DPMAIF_HPC_TOTAL_NUM 8
+#define DPMAIF_HPC_MAX_TOTAL_NUM 8
+
+#define DPMAIF_DL_DLQPIT_INIT (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x00)
+#define DPMAIF_DL_DLQPIT_ADD (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x10)
+#define DPMAIF_DL_DLQPIT_INIT_CON0 (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x14)
+#define DPMAIF_DL_DLQPIT_INIT_CON1 (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x18)
+#define DPMAIF_DL_DLQPIT_INIT_CON2 (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x1c)
+#define DPMAIF_DL_DLQPIT_INIT_CON3 (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x20)
+#define DPMAIF_DL_DLQPIT_INIT_CON4 (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x24)
+#define DPMAIF_DL_DLQPIT_INIT_CON5 (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x28)
+#define DPMAIF_DL_DLQPIT_INIT_CON6 (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x2c)
+
+#define DPMAIF_ULQSAR_n(q) (DPMAIF_AO_UL_CHNL0_CON0 + 0x10 * (q))
+#define DPMAIF_UL_DRBSIZE_ADDRH_n(q) (DPMAIF_AO_UL_CHNL0_CON1 + 0x10 * (q))
+#define DPMAIF_UL_DRB_ADDRH_n(q) (DPMAIF_AO_UL_CHNL0_CON2 + 0x10 * (q))
+#define DPMAIF_ULQ_STA0_n(q) (DPMAIF_AO_UL_CH0_STA + 0x04 * (q))
+#define DPMAIF_ULQ_ADD_DESC_CH_n(q) (DPMAIF_UL_ADD_DESC_CH0 + 0x04 * (q))
+
+#define DPMAIF_UL_DRB_RIDX_MSK GENMASK(31, 16)
+
+#define DPMAIF_AP_RGU_ASSERT 0x10001150
+#define DPMAIF_AP_RGU_DEASSERT 0x10001154
+#define DPMAIF_AP_RST_BIT BIT(2)
+
+#define DPMAIF_AP_AO_RGU_ASSERT 0x10001140
+#define DPMAIF_AP_AO_RGU_DEASSERT 0x10001144
+#define DPMAIF_AP_AO_RST_BIT BIT(6)
+
+/* DPMAIF init/restore */
+#define DPMAIF_UL_ADD_NOT_READY BIT(31)
+#define DPMAIF_UL_ADD_UPDATE BIT(31)
+#define DPMAIF_UL_ADD_COUNT_MASK GENMASK(15, 0)
+#define DPMAIF_UL_ALL_QUE_ARB_EN GENMASK(11, 8)
+
+#define DPMAIF_DL_ADD_UPDATE BIT(31)
+#define DPMAIF_DL_ADD_NOT_READY BIT(31)
+#define DPMAIF_DL_FRG_ADD_UPDATE BIT(16)
+#define DPMAIF_DL_ADD_COUNT_MASK GENMASK(15, 0)
+
+#define DPMAIF_DL_BAT_INIT_ALLSET BIT(0)
+#define DPMAIF_DL_BAT_FRG_INIT BIT(16)
+#define DPMAIF_DL_BAT_INIT_EN BIT(31)
+#define DPMAIF_DL_BAT_INIT_NOT_READY BIT(31)
+#define DPMAIF_DL_BAT_INIT_ONLY_ENABLE_BIT 0
+
+#define DPMAIF_DL_PIT_INIT_ALLSET BIT(0)
+#define DPMAIF_DL_PIT_INIT_EN BIT(31)
+#define DPMAIF_DL_PIT_INIT_NOT_READY BIT(31)
+
+#define DPMAIF_BAT_REMAIN_SZ_BASE 16
+#define DPMAIF_BAT_BUFFER_SZ_BASE 128
+#define DPMAIF_FRG_BUFFER_SZ_BASE 128
+
+#define DLQ_PIT_IDX_SIZE 0x20
+
+#define DPMAIF_PIT_SIZE_MSK GENMASK(17, 0)
+
+#define DPMAIF_PIT_REM_CNT_MSK GENMASK(17, 0)
+
+#define DPMAIF_BAT_EN_MSK BIT(16)
+#define DPMAIF_FRG_EN_MSK BIT(28)
+#define DPMAIF_BAT_SIZE_MSK GENMASK(15, 0)
+
+#define DPMAIF_BAT_BID_MAXCNT_MSK GENMASK(31, 16)
+#define DPMAIF_BAT_REMAIN_MINSZ_MSK GENMASK(15, 8)
+#define DPMAIF_PIT_CHK_NUM_MSK GENMASK(31, 24)
+#define DPMAIF_BAT_BUF_SZ_MSK GENMASK(16, 8)
+#define DPMAIF_FRG_BUF_SZ_MSK GENMASK(16, 8)
+#define DPMAIF_BAT_RSV_LEN_MSK GENMASK(7, 0)
+#define DPMAIF_PKT_ALIGN_MSK GENMASK(23, 22)
+
+#define DPMAIF_BAT_CHECK_THRES_MSK GENMASK(21, 16)
+#define DPMAIF_FRG_CHECK_THRES_MSK GENMASK(7, 0)
+
+#define DPMAIF_PKT_ALIGN_EN BIT(23)
+
+#define DPMAIF_DRB_SIZE_MSK GENMASK(15, 0)
+
+#define DPMAIF_DL_RD_WR_IDX_MSK GENMASK(17, 0)
+
+/* DPMAIF_UL_CHK_BUSY */
+#define DPMAIF_UL_IDLE_STS BIT(11)
+/* DPMAIF_DL_CHK_BUSY */
+#define DPMAIF_DL_IDLE_STS BIT(23)
+/* DPMAIF_AO_DL_RDY_CHK_THRES */
+#define DPMAIF_DL_PKT_CHECKSUM_EN BIT(31)
+#define DPMAIF_PORT_MODE_PCIE BIT(30)
+#define DPMAIF_DL_BURST_PIT_EN BIT(13)
+/* DPMAIF_DL_BAT_INIT_CON1 */
+#define DPMAIF_DL_BAT_CACHE_PRI BIT(22)
+/* DPMAIF_AP_MEM_CLR */
+#define DPMAIF_MEM_CLR BIT(0)
+/* DPMAIF_AP_OVERWRITE_CFG */
+#define DPMAIF_SRAM_SYNC BIT(0)
+/* DPMAIF_AO_UL_INIT_SET */
+#define DPMAIF_UL_INIT_DONE BIT(0)
+/* DPMAIF_AO_DL_INIT_SET */
+#define DPMAIF_DL_INIT_DONE BIT(0)
+/* DPMAIF_AO_DL_PIT_SEQ_END */
+#define DPMAIF_DL_PIT_SEQ_MSK GENMASK(7, 0)
+/* DPMAIF_UL_RESERVE_AO_RW */
+#define DPMAIF_PCIE_MODE_SET_VALUE 0x55
+/* DPMAIF_AP_CG_EN */
+#define DPMAIF_CG_EN 0x7f
+
+#define DPMAIF_UDL_IP_BUSY BIT(0)
+#define DPMAIF_DL_INT_DLQ0_QDONE BIT(8)
+#define DPMAIF_DL_INT_DLQ1_QDONE BIT(9)
+#define DPMAIF_DL_INT_DLQ0_PITCNT_LEN BIT(10)
+#define DPMAIF_DL_INT_DLQ1_PITCNT_LEN BIT(11)
+#define DPMAIF_DL_INT_Q2TOQ1 BIT(24)
+#define DPMAIF_DL_INT_Q2APTOP BIT(25)
+
+#define DPMAIF_DLQ_LOW_TIMEOUT_THRES_MKS GENMASK(15, 0)
+#define DPMAIF_DLQ_HIGH_TIMEOUT_THRES_MSK GENMASK(31, 16)
+
+/* DPMAIF DLQ HW configure */
+#define DPMAIF_AGG_MAX_LEN_DF 65535
+#define DPMAIF_AGG_TBL_ENT_NUM_DF 50
+#define DPMAIF_HASH_PRIME_DF 13
+#define DPMAIF_MID_TIMEOUT_THRES_DF 100
+#define DPMAIF_DLQ_TIMEOUT_THRES_DF 100
+#define DPMAIF_DLQ_PRS_THRES_DF 10
+#define DPMAIF_DLQ_HASH_BIT_CHOOSE_DF 0
+
+#define DPMAIF_DLQPIT_EN_MSK BIT(20)
+#define DPMAIF_DLQPIT_CHAN_OFS 16
+#define DPMAIF_ADD_DLQ_PIT_CHAN_OFS 20
+
+#endif /* __T7XX_REG_H__ */
diff --git a/drivers/net/wwan/t7xx/t7xx_state_monitor.c b/drivers/net/wwan/t7xx/t7xx_state_monitor.c
new file mode 100644
index 0000000000..80edb8e75a
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.c
@@ -0,0 +1,559 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ *
+ * Contributors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gfp.h>
+#include <linux/iopoll.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+#include "t7xx_hif_cldma.h"
+#include "t7xx_mhccif.h"
+#include "t7xx_modem_ops.h"
+#include "t7xx_pci.h"
+#include "t7xx_pcie_mac.h"
+#include "t7xx_port_proxy.h"
+#include "t7xx_reg.h"
+#include "t7xx_state_monitor.h"
+
+#define FSM_DRM_DISABLE_DELAY_MS 200
+#define FSM_EVENT_POLL_INTERVAL_MS 20
+#define FSM_MD_EX_REC_OK_TIMEOUT_MS 10000
+#define FSM_MD_EX_PASS_TIMEOUT_MS 45000
+#define FSM_CMD_TIMEOUT_MS 2000
+
+void t7xx_fsm_notifier_register(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier)
+{
+ struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctl->notifier_lock, flags);
+ list_add_tail(&notifier->entry, &ctl->notifier_list);
+ spin_unlock_irqrestore(&ctl->notifier_lock, flags);
+}
+
+void t7xx_fsm_notifier_unregister(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier)
+{
+ struct t7xx_fsm_notifier *notifier_cur, *notifier_next;
+ struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctl->notifier_lock, flags);
+ list_for_each_entry_safe(notifier_cur, notifier_next, &ctl->notifier_list, entry) {
+ if (notifier_cur == notifier)
+ list_del(&notifier->entry);
+ }
+ spin_unlock_irqrestore(&ctl->notifier_lock, flags);
+}
+
+static void fsm_state_notify(struct t7xx_modem *md, enum md_state state)
+{
+ struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
+ struct t7xx_fsm_notifier *notifier;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctl->notifier_lock, flags);
+ list_for_each_entry(notifier, &ctl->notifier_list, entry) {
+ spin_unlock_irqrestore(&ctl->notifier_lock, flags);
+ if (notifier->notifier_fn)
+ notifier->notifier_fn(state, notifier->data);
+
+ spin_lock_irqsave(&ctl->notifier_lock, flags);
+ }
+ spin_unlock_irqrestore(&ctl->notifier_lock, flags);
+}
+
+void t7xx_fsm_broadcast_state(struct t7xx_fsm_ctl *ctl, enum md_state state)
+{
+ ctl->md_state = state;
+
+ /* Update to port first, otherwise sending message on HS2 may fail */
+ t7xx_port_proxy_md_status_notify(ctl->md->port_prox, state);
+ fsm_state_notify(ctl->md, state);
+}
+
+static void fsm_finish_command(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd, int result)
+{
+ if (cmd->flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
+ *cmd->ret = result;
+ complete_all(cmd->done);
+ }
+
+ kfree(cmd);
+}
+
+static void fsm_del_kf_event(struct t7xx_fsm_event *event)
+{
+ list_del(&event->entry);
+ kfree(event);
+}
+
+static void fsm_flush_event_cmd_qs(struct t7xx_fsm_ctl *ctl)
+{
+ struct device *dev = &ctl->md->t7xx_dev->pdev->dev;
+ struct t7xx_fsm_event *event, *evt_next;
+ struct t7xx_fsm_command *cmd, *cmd_next;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctl->command_lock, flags);
+ list_for_each_entry_safe(cmd, cmd_next, &ctl->command_queue, entry) {
+ dev_warn(dev, "Unhandled command %d\n", cmd->cmd_id);
+ list_del(&cmd->entry);
+ fsm_finish_command(ctl, cmd, -EINVAL);
+ }
+ spin_unlock_irqrestore(&ctl->command_lock, flags);
+
+ spin_lock_irqsave(&ctl->event_lock, flags);
+ list_for_each_entry_safe(event, evt_next, &ctl->event_queue, entry) {
+ dev_warn(dev, "Unhandled event %d\n", event->event_id);
+ fsm_del_kf_event(event);
+ }
+ spin_unlock_irqrestore(&ctl->event_lock, flags);
+}
+
+static void fsm_wait_for_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_expected,
+ enum t7xx_fsm_event_state event_ignore, int retries)
+{
+ struct t7xx_fsm_event *event;
+ bool event_received = false;
+ unsigned long flags;
+ int cnt = 0;
+
+ while (cnt++ < retries && !event_received) {
+ bool sleep_required = true;
+
+ if (kthread_should_stop())
+ return;
+
+ spin_lock_irqsave(&ctl->event_lock, flags);
+ event = list_first_entry_or_null(&ctl->event_queue, struct t7xx_fsm_event, entry);
+ if (event) {
+ event_received = event->event_id == event_expected;
+ if (event_received || event->event_id == event_ignore) {
+ fsm_del_kf_event(event);
+ sleep_required = false;
+ }
+ }
+ spin_unlock_irqrestore(&ctl->event_lock, flags);
+
+ if (sleep_required)
+ msleep(FSM_EVENT_POLL_INTERVAL_MS);
+ }
+}
+
+static void fsm_routine_exception(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd,
+ enum t7xx_ex_reason reason)
+{
+ struct device *dev = &ctl->md->t7xx_dev->pdev->dev;
+
+ if (ctl->curr_state != FSM_STATE_READY && ctl->curr_state != FSM_STATE_STARTING) {
+ if (cmd)
+ fsm_finish_command(ctl, cmd, -EINVAL);
+
+ return;
+ }
+
+ ctl->curr_state = FSM_STATE_EXCEPTION;
+
+ switch (reason) {
+ case EXCEPTION_HS_TIMEOUT:
+ dev_err(dev, "Boot Handshake failure\n");
+ break;
+
+ case EXCEPTION_EVENT:
+ dev_err(dev, "Exception event\n");
+ t7xx_fsm_broadcast_state(ctl, MD_STATE_EXCEPTION);
+ t7xx_pci_pm_exp_detected(ctl->md->t7xx_dev);
+ t7xx_md_exception_handshake(ctl->md);
+
+ fsm_wait_for_event(ctl, FSM_EVENT_MD_EX_REC_OK, FSM_EVENT_MD_EX,
+ FSM_MD_EX_REC_OK_TIMEOUT_MS / FSM_EVENT_POLL_INTERVAL_MS);
+ fsm_wait_for_event(ctl, FSM_EVENT_MD_EX_PASS, FSM_EVENT_INVALID,
+ FSM_MD_EX_PASS_TIMEOUT_MS / FSM_EVENT_POLL_INTERVAL_MS);
+ break;
+
+ default:
+ dev_err(dev, "Exception %d\n", reason);
+ break;
+ }
+
+ if (cmd)
+ fsm_finish_command(ctl, cmd, 0);
+}
+
+static int fsm_stopped_handler(struct t7xx_fsm_ctl *ctl)
+{
+ ctl->curr_state = FSM_STATE_STOPPED;
+
+ t7xx_fsm_broadcast_state(ctl, MD_STATE_STOPPED);
+ return t7xx_md_reset(ctl->md->t7xx_dev);
+}
+
+static void fsm_routine_stopped(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd)
+{
+ if (ctl->curr_state == FSM_STATE_STOPPED) {
+ fsm_finish_command(ctl, cmd, -EINVAL);
+ return;
+ }
+
+ fsm_finish_command(ctl, cmd, fsm_stopped_handler(ctl));
+}
+
+static void fsm_routine_stopping(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd)
+{
+ struct t7xx_pci_dev *t7xx_dev;
+ struct cldma_ctrl *md_ctrl;
+ int err;
+
+ if (ctl->curr_state == FSM_STATE_STOPPED || ctl->curr_state == FSM_STATE_STOPPING) {
+ fsm_finish_command(ctl, cmd, -EINVAL);
+ return;
+ }
+
+ md_ctrl = ctl->md->md_ctrl[CLDMA_ID_MD];
+ t7xx_dev = ctl->md->t7xx_dev;
+
+ ctl->curr_state = FSM_STATE_STOPPING;
+ t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_TO_STOP);
+ t7xx_cldma_stop(md_ctrl);
+
+ if (!ctl->md->rgu_irq_asserted) {
+ t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DRM_DISABLE_AP);
+ /* Wait for the DRM disable to take effect */
+ msleep(FSM_DRM_DISABLE_DELAY_MS);
+
+ err = t7xx_acpi_fldr_func(t7xx_dev);
+ if (err)
+ t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DEVICE_RESET);
+ }
+
+ fsm_finish_command(ctl, cmd, fsm_stopped_handler(ctl));
+}
+
+static void t7xx_fsm_broadcast_ready_state(struct t7xx_fsm_ctl *ctl)
+{
+ if (ctl->md_state != MD_STATE_WAITING_FOR_HS2)
+ return;
+
+ ctl->md_state = MD_STATE_READY;
+
+ fsm_state_notify(ctl->md, MD_STATE_READY);
+ t7xx_port_proxy_md_status_notify(ctl->md->port_prox, MD_STATE_READY);
+}
+
+static void fsm_routine_ready(struct t7xx_fsm_ctl *ctl)
+{
+ struct t7xx_modem *md = ctl->md;
+
+ ctl->curr_state = FSM_STATE_READY;
+ t7xx_fsm_broadcast_ready_state(ctl);
+ t7xx_md_event_notify(md, FSM_READY);
+}
+
+static int fsm_routine_starting(struct t7xx_fsm_ctl *ctl)
+{
+ struct t7xx_modem *md = ctl->md;
+ struct device *dev;
+
+ ctl->curr_state = FSM_STATE_STARTING;
+
+ t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS1);
+ t7xx_md_event_notify(md, FSM_START);
+
+ wait_event_interruptible_timeout(ctl->async_hk_wq,
+ (md->core_md.ready && md->core_ap.ready) ||
+ ctl->exp_flg, HZ * 60);
+ dev = &md->t7xx_dev->pdev->dev;
+
+ if (ctl->exp_flg)
+ dev_err(dev, "MD exception is captured during handshake\n");
+
+ if (!md->core_md.ready) {
+ dev_err(dev, "MD handshake timeout\n");
+ if (md->core_md.handshake_ongoing)
+ t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2_EXIT, NULL, 0);
+
+ fsm_routine_exception(ctl, NULL, EXCEPTION_HS_TIMEOUT);
+ return -ETIMEDOUT;
+ } else if (!md->core_ap.ready) {
+ dev_err(dev, "AP handshake timeout\n");
+ if (md->core_ap.handshake_ongoing)
+ t7xx_fsm_append_event(ctl, FSM_EVENT_AP_HS2_EXIT, NULL, 0);
+
+ fsm_routine_exception(ctl, NULL, EXCEPTION_HS_TIMEOUT);
+ return -ETIMEDOUT;
+ }
+
+ t7xx_pci_pm_init_late(md->t7xx_dev);
+ fsm_routine_ready(ctl);
+ return 0;
+}
+
+static void fsm_routine_start(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd)
+{
+ struct t7xx_modem *md = ctl->md;
+ u32 dev_status;
+ int ret;
+
+ if (!md)
+ return;
+
+ if (ctl->curr_state != FSM_STATE_INIT && ctl->curr_state != FSM_STATE_PRE_START &&
+ ctl->curr_state != FSM_STATE_STOPPED) {
+ fsm_finish_command(ctl, cmd, -EINVAL);
+ return;
+ }
+
+ ctl->curr_state = FSM_STATE_PRE_START;
+ t7xx_md_event_notify(md, FSM_PRE_START);
+
+ ret = read_poll_timeout(ioread32, dev_status,
+ (dev_status & MISC_STAGE_MASK) == LINUX_STAGE, 20000, 2000000,
+ false, IREG_BASE(md->t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
+ if (ret) {
+ struct device *dev = &md->t7xx_dev->pdev->dev;
+
+ fsm_finish_command(ctl, cmd, -ETIMEDOUT);
+ dev_err(dev, "Invalid device status 0x%lx\n", dev_status & MISC_STAGE_MASK);
+ return;
+ }
+
+ t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_AP]);
+ t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_MD]);
+ fsm_finish_command(ctl, cmd, fsm_routine_starting(ctl));
+}
+
+static int fsm_main_thread(void *data)
+{
+ struct t7xx_fsm_ctl *ctl = data;
+ struct t7xx_fsm_command *cmd;
+ unsigned long flags;
+
+ while (!kthread_should_stop()) {
+ if (wait_event_interruptible(ctl->command_wq, !list_empty(&ctl->command_queue) ||
+ kthread_should_stop()))
+ continue;
+
+ if (kthread_should_stop())
+ break;
+
+ spin_lock_irqsave(&ctl->command_lock, flags);
+ cmd = list_first_entry(&ctl->command_queue, struct t7xx_fsm_command, entry);
+ list_del(&cmd->entry);
+ spin_unlock_irqrestore(&ctl->command_lock, flags);
+
+ switch (cmd->cmd_id) {
+ case FSM_CMD_START:
+ fsm_routine_start(ctl, cmd);
+ break;
+
+ case FSM_CMD_EXCEPTION:
+ fsm_routine_exception(ctl, cmd, FIELD_GET(FSM_CMD_EX_REASON, cmd->flag));
+ break;
+
+ case FSM_CMD_PRE_STOP:
+ fsm_routine_stopping(ctl, cmd);
+ break;
+
+ case FSM_CMD_STOP:
+ fsm_routine_stopped(ctl, cmd);
+ break;
+
+ default:
+ fsm_finish_command(ctl, cmd, -EINVAL);
+ fsm_flush_event_cmd_qs(ctl);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id, unsigned int flag)
+{
+ DECLARE_COMPLETION_ONSTACK(done);
+ struct t7xx_fsm_command *cmd;
+ unsigned long flags;
+ int ret;
+
+ cmd = kzalloc(sizeof(*cmd), flag & FSM_CMD_FLAG_IN_INTERRUPT ? GFP_ATOMIC : GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&cmd->entry);
+ cmd->cmd_id = cmd_id;
+ cmd->flag = flag;
+ if (flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
+ cmd->done = &done;
+ cmd->ret = &ret;
+ }
+
+ spin_lock_irqsave(&ctl->command_lock, flags);
+ list_add_tail(&cmd->entry, &ctl->command_queue);
+ spin_unlock_irqrestore(&ctl->command_lock, flags);
+
+ wake_up(&ctl->command_wq);
+
+ if (flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
+ unsigned long wait_ret;
+
+ wait_ret = wait_for_completion_timeout(&done,
+ msecs_to_jiffies(FSM_CMD_TIMEOUT_MS));
+ if (!wait_ret)
+ return -ETIMEDOUT;
+
+ return ret;
+ }
+
+ return 0;
+}
+
+int t7xx_fsm_append_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id,
+ unsigned char *data, unsigned int length)
+{
+ struct device *dev = &ctl->md->t7xx_dev->pdev->dev;
+ struct t7xx_fsm_event *event;
+ unsigned long flags;
+
+ if (event_id <= FSM_EVENT_INVALID || event_id >= FSM_EVENT_MAX) {
+ dev_err(dev, "Invalid event %d\n", event_id);
+ return -EINVAL;
+ }
+
+ event = kmalloc(sizeof(*event) + length, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
+ if (!event)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&event->entry);
+ event->event_id = event_id;
+ event->length = length;
+
+ if (data && length)
+ memcpy(event->data, data, length);
+
+ spin_lock_irqsave(&ctl->event_lock, flags);
+ list_add_tail(&event->entry, &ctl->event_queue);
+ spin_unlock_irqrestore(&ctl->event_lock, flags);
+
+ wake_up_all(&ctl->event_wq);
+ return 0;
+}
+
+void t7xx_fsm_clr_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id)
+{
+ struct t7xx_fsm_event *event, *evt_next;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctl->event_lock, flags);
+ list_for_each_entry_safe(event, evt_next, &ctl->event_queue, entry) {
+ if (event->event_id == event_id)
+ fsm_del_kf_event(event);
+ }
+ spin_unlock_irqrestore(&ctl->event_lock, flags);
+}
+
+enum md_state t7xx_fsm_get_md_state(struct t7xx_fsm_ctl *ctl)
+{
+ if (ctl)
+ return ctl->md_state;
+
+ return MD_STATE_INVALID;
+}
+
+unsigned int t7xx_fsm_get_ctl_state(struct t7xx_fsm_ctl *ctl)
+{
+ if (ctl)
+ return ctl->curr_state;
+
+ return FSM_STATE_STOPPED;
+}
+
+int t7xx_fsm_recv_md_intr(struct t7xx_fsm_ctl *ctl, enum t7xx_md_irq_type type)
+{
+ unsigned int cmd_flags = FSM_CMD_FLAG_IN_INTERRUPT;
+
+ if (type == MD_IRQ_PORT_ENUM) {
+ return t7xx_fsm_append_cmd(ctl, FSM_CMD_START, cmd_flags);
+ } else if (type == MD_IRQ_CCIF_EX) {
+ ctl->exp_flg = true;
+ wake_up(&ctl->async_hk_wq);
+ cmd_flags |= FIELD_PREP(FSM_CMD_EX_REASON, EXCEPTION_EVENT);
+ return t7xx_fsm_append_cmd(ctl, FSM_CMD_EXCEPTION, cmd_flags);
+ }
+
+ return -EINVAL;
+}
+
+void t7xx_fsm_reset(struct t7xx_modem *md)
+{
+ struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
+
+ fsm_flush_event_cmd_qs(ctl);
+ ctl->curr_state = FSM_STATE_STOPPED;
+ ctl->exp_flg = false;
+}
+
+int t7xx_fsm_init(struct t7xx_modem *md)
+{
+ struct device *dev = &md->t7xx_dev->pdev->dev;
+ struct t7xx_fsm_ctl *ctl;
+
+ ctl = devm_kzalloc(dev, sizeof(*ctl), GFP_KERNEL);
+ if (!ctl)
+ return -ENOMEM;
+
+ md->fsm_ctl = ctl;
+ ctl->md = md;
+ ctl->curr_state = FSM_STATE_INIT;
+ INIT_LIST_HEAD(&ctl->command_queue);
+ INIT_LIST_HEAD(&ctl->event_queue);
+ init_waitqueue_head(&ctl->async_hk_wq);
+ init_waitqueue_head(&ctl->event_wq);
+ INIT_LIST_HEAD(&ctl->notifier_list);
+ init_waitqueue_head(&ctl->command_wq);
+ spin_lock_init(&ctl->event_lock);
+ spin_lock_init(&ctl->command_lock);
+ ctl->exp_flg = false;
+ spin_lock_init(&ctl->notifier_lock);
+
+ ctl->fsm_thread = kthread_run(fsm_main_thread, ctl, "t7xx_fsm");
+ return PTR_ERR_OR_ZERO(ctl->fsm_thread);
+}
+
+void t7xx_fsm_uninit(struct t7xx_modem *md)
+{
+ struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
+
+ if (!ctl)
+ return;
+
+ if (ctl->fsm_thread)
+ kthread_stop(ctl->fsm_thread);
+
+ fsm_flush_event_cmd_qs(ctl);
+}
diff --git a/drivers/net/wwan/t7xx/t7xx_state_monitor.h b/drivers/net/wwan/t7xx/t7xx_state_monitor.h
new file mode 100644
index 0000000000..b6e76f3903
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ *
+ * Contributors:
+ * Eliot Lee <eliot.lee@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ */
+
+#ifndef __T7XX_MONITOR_H__
+#define __T7XX_MONITOR_H__
+
+#include <linux/bits.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+#include "t7xx_modem_ops.h"
+
+enum t7xx_fsm_state {
+ FSM_STATE_INIT,
+ FSM_STATE_PRE_START,
+ FSM_STATE_STARTING,
+ FSM_STATE_READY,
+ FSM_STATE_EXCEPTION,
+ FSM_STATE_STOPPING,
+ FSM_STATE_STOPPED,
+};
+
+enum t7xx_fsm_event_state {
+ FSM_EVENT_INVALID,
+ FSM_EVENT_MD_HS2,
+ FSM_EVENT_AP_HS2,
+ FSM_EVENT_MD_EX,
+ FSM_EVENT_MD_EX_REC_OK,
+ FSM_EVENT_MD_EX_PASS,
+ FSM_EVENT_MD_HS2_EXIT,
+ FSM_EVENT_AP_HS2_EXIT,
+ FSM_EVENT_MAX
+};
+
+enum t7xx_fsm_cmd_state {
+ FSM_CMD_INVALID,
+ FSM_CMD_START,
+ FSM_CMD_EXCEPTION,
+ FSM_CMD_PRE_STOP,
+ FSM_CMD_STOP,
+};
+
+enum t7xx_ex_reason {
+ EXCEPTION_HS_TIMEOUT,
+ EXCEPTION_EVENT,
+};
+
+enum t7xx_md_irq_type {
+ MD_IRQ_WDT,
+ MD_IRQ_CCIF_EX,
+ MD_IRQ_PORT_ENUM,
+};
+
+enum md_state {
+ MD_STATE_INVALID,
+ MD_STATE_WAITING_FOR_HS1,
+ MD_STATE_WAITING_FOR_HS2,
+ MD_STATE_READY,
+ MD_STATE_EXCEPTION,
+ MD_STATE_WAITING_TO_STOP,
+ MD_STATE_STOPPED,
+};
+
+#define FSM_CMD_FLAG_WAIT_FOR_COMPLETION BIT(0)
+#define FSM_CMD_FLAG_FLIGHT_MODE BIT(1)
+#define FSM_CMD_FLAG_IN_INTERRUPT BIT(2)
+#define FSM_CMD_EX_REASON GENMASK(23, 16)
+
+struct t7xx_fsm_ctl {
+ struct t7xx_modem *md;
+ enum md_state md_state;
+ unsigned int curr_state;
+ struct list_head command_queue;
+ struct list_head event_queue;
+ wait_queue_head_t command_wq;
+ wait_queue_head_t event_wq;
+ wait_queue_head_t async_hk_wq;
+ spinlock_t event_lock; /* Protects event queue */
+ spinlock_t command_lock; /* Protects command queue */
+ struct task_struct *fsm_thread;
+ bool exp_flg;
+ spinlock_t notifier_lock; /* Protects notifier list */
+ struct list_head notifier_list;
+};
+
+struct t7xx_fsm_event {
+ struct list_head entry;
+ enum t7xx_fsm_event_state event_id;
+ unsigned int length;
+ unsigned char data[];
+};
+
+struct t7xx_fsm_command {
+ struct list_head entry;
+ enum t7xx_fsm_cmd_state cmd_id;
+ unsigned int flag;
+ struct completion *done;
+ int *ret;
+};
+
+struct t7xx_fsm_notifier {
+ struct list_head entry;
+ int (*notifier_fn)(enum md_state state, void *data);
+ void *data;
+};
+
+int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id,
+ unsigned int flag);
+int t7xx_fsm_append_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id,
+ unsigned char *data, unsigned int length);
+void t7xx_fsm_clr_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id);
+void t7xx_fsm_broadcast_state(struct t7xx_fsm_ctl *ctl, enum md_state state);
+void t7xx_fsm_reset(struct t7xx_modem *md);
+int t7xx_fsm_init(struct t7xx_modem *md);
+void t7xx_fsm_uninit(struct t7xx_modem *md);
+int t7xx_fsm_recv_md_intr(struct t7xx_fsm_ctl *ctl, enum t7xx_md_irq_type type);
+enum md_state t7xx_fsm_get_md_state(struct t7xx_fsm_ctl *ctl);
+unsigned int t7xx_fsm_get_ctl_state(struct t7xx_fsm_ctl *ctl);
+void t7xx_fsm_notifier_register(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier);
+void t7xx_fsm_notifier_unregister(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier);
+
+#endif /* __T7XX_MONITOR_H__ */
diff --git a/drivers/net/wwan/wwan_core.c b/drivers/net/wwan/wwan_core.c
new file mode 100644
index 0000000000..284ab1f563
--- /dev/null
+++ b/drivers/net/wwan/wwan_core.c
@@ -0,0 +1,1250 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2021, Linaro Ltd <loic.poulain@linaro.org> */
+
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/idr.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/termios.h>
+#include <linux/wwan.h>
+#include <net/rtnetlink.h>
+#include <uapi/linux/wwan.h>
+
+/* Maximum number of minors in use */
+#define WWAN_MAX_MINORS (1 << MINORBITS)
+
+static DEFINE_MUTEX(wwan_register_lock); /* WWAN device create|remove lock */
+static DEFINE_IDA(minors); /* minors for WWAN port chardevs */
+static DEFINE_IDA(wwan_dev_ids); /* for unique WWAN device IDs */
+static struct class *wwan_class;
+static int wwan_major;
+static struct dentry *wwan_debugfs_dir;
+
+#define to_wwan_dev(d) container_of(d, struct wwan_device, dev)
+#define to_wwan_port(d) container_of(d, struct wwan_port, dev)
+
+/* WWAN port flags */
+#define WWAN_PORT_TX_OFF 0
+
+/**
+ * struct wwan_device - The structure that defines a WWAN device
+ *
+ * @id: WWAN device unique ID.
+ * @dev: Underlying device.
+ * @port_id: Current available port ID to pick.
+ * @ops: wwan device ops
+ * @ops_ctxt: context to pass to ops
+ * @debugfs_dir: WWAN device debugfs dir
+ */
+struct wwan_device {
+ unsigned int id;
+ struct device dev;
+ atomic_t port_id;
+ const struct wwan_ops *ops;
+ void *ops_ctxt;
+#ifdef CONFIG_WWAN_DEBUGFS
+ struct dentry *debugfs_dir;
+#endif
+};
+
+/**
+ * struct wwan_port - The structure that defines a WWAN port
+ * @type: Port type
+ * @start_count: Port start counter
+ * @flags: Store port state and capabilities
+ * @ops: Pointer to WWAN port operations
+ * @ops_lock: Protect port ops
+ * @dev: Underlying device
+ * @rxq: Buffer inbound queue
+ * @waitqueue: The waitqueue for port fops (read/write/poll)
+ * @data_lock: Port specific data access serialization
+ * @headroom_len: SKB reserved headroom size
+ * @frag_len: Length to fragment packet
+ * @at_data: AT port specific data
+ */
+struct wwan_port {
+ enum wwan_port_type type;
+ unsigned int start_count;
+ unsigned long flags;
+ const struct wwan_port_ops *ops;
+ struct mutex ops_lock; /* Serialize ops + protect against removal */
+ struct device dev;
+ struct sk_buff_head rxq;
+ wait_queue_head_t waitqueue;
+ struct mutex data_lock; /* Port specific data access serialization */
+ size_t headroom_len;
+ size_t frag_len;
+ union {
+ struct {
+ struct ktermios termios;
+ int mdmbits;
+ } at_data;
+ };
+};
+
+static ssize_t index_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct wwan_device *wwan = to_wwan_dev(dev);
+
+ return sprintf(buf, "%d\n", wwan->id);
+}
+static DEVICE_ATTR_RO(index);
+
+static struct attribute *wwan_dev_attrs[] = {
+ &dev_attr_index.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(wwan_dev);
+
+static void wwan_dev_destroy(struct device *dev)
+{
+ struct wwan_device *wwandev = to_wwan_dev(dev);
+
+ ida_free(&wwan_dev_ids, wwandev->id);
+ kfree(wwandev);
+}
+
+static const struct device_type wwan_dev_type = {
+ .name = "wwan_dev",
+ .release = wwan_dev_destroy,
+ .groups = wwan_dev_groups,
+};
+
+static int wwan_dev_parent_match(struct device *dev, const void *parent)
+{
+ return (dev->type == &wwan_dev_type &&
+ (dev->parent == parent || dev == parent));
+}
+
+static struct wwan_device *wwan_dev_get_by_parent(struct device *parent)
+{
+ struct device *dev;
+
+ dev = class_find_device(wwan_class, NULL, parent, wwan_dev_parent_match);
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ return to_wwan_dev(dev);
+}
+
+static int wwan_dev_name_match(struct device *dev, const void *name)
+{
+ return dev->type == &wwan_dev_type &&
+ strcmp(dev_name(dev), name) == 0;
+}
+
+static struct wwan_device *wwan_dev_get_by_name(const char *name)
+{
+ struct device *dev;
+
+ dev = class_find_device(wwan_class, NULL, name, wwan_dev_name_match);
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ return to_wwan_dev(dev);
+}
+
+#ifdef CONFIG_WWAN_DEBUGFS
+struct dentry *wwan_get_debugfs_dir(struct device *parent)
+{
+ struct wwan_device *wwandev;
+
+ wwandev = wwan_dev_get_by_parent(parent);
+ if (IS_ERR(wwandev))
+ return ERR_CAST(wwandev);
+
+ return wwandev->debugfs_dir;
+}
+EXPORT_SYMBOL_GPL(wwan_get_debugfs_dir);
+
+static int wwan_dev_debugfs_match(struct device *dev, const void *dir)
+{
+ struct wwan_device *wwandev;
+
+ if (dev->type != &wwan_dev_type)
+ return 0;
+
+ wwandev = to_wwan_dev(dev);
+
+ return wwandev->debugfs_dir == dir;
+}
+
+static struct wwan_device *wwan_dev_get_by_debugfs(struct dentry *dir)
+{
+ struct device *dev;
+
+ dev = class_find_device(wwan_class, NULL, dir, wwan_dev_debugfs_match);
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ return to_wwan_dev(dev);
+}
+
+void wwan_put_debugfs_dir(struct dentry *dir)
+{
+ struct wwan_device *wwandev = wwan_dev_get_by_debugfs(dir);
+
+ if (WARN_ON(IS_ERR(wwandev)))
+ return;
+
+ /* wwan_dev_get_by_debugfs() also got a reference */
+ put_device(&wwandev->dev);
+ put_device(&wwandev->dev);
+}
+EXPORT_SYMBOL_GPL(wwan_put_debugfs_dir);
+#endif
+
+/* This function allocates and registers a new WWAN device OR if a WWAN device
+ * already exist for the given parent, it gets a reference and return it.
+ * This function is not exported (for now), it is called indirectly via
+ * wwan_create_port().
+ */
+static struct wwan_device *wwan_create_dev(struct device *parent)
+{
+ struct wwan_device *wwandev;
+ int err, id;
+
+ /* The 'find-alloc-register' operation must be protected against
+ * concurrent execution, a WWAN device is possibly shared between
+ * multiple callers or concurrently unregistered from wwan_remove_dev().
+ */
+ mutex_lock(&wwan_register_lock);
+
+ /* If wwandev already exists, return it */
+ wwandev = wwan_dev_get_by_parent(parent);
+ if (!IS_ERR(wwandev))
+ goto done_unlock;
+
+ id = ida_alloc(&wwan_dev_ids, GFP_KERNEL);
+ if (id < 0) {
+ wwandev = ERR_PTR(id);
+ goto done_unlock;
+ }
+
+ wwandev = kzalloc(sizeof(*wwandev), GFP_KERNEL);
+ if (!wwandev) {
+ wwandev = ERR_PTR(-ENOMEM);
+ ida_free(&wwan_dev_ids, id);
+ goto done_unlock;
+ }
+
+ wwandev->dev.parent = parent;
+ wwandev->dev.class = wwan_class;
+ wwandev->dev.type = &wwan_dev_type;
+ wwandev->id = id;
+ dev_set_name(&wwandev->dev, "wwan%d", wwandev->id);
+
+ err = device_register(&wwandev->dev);
+ if (err) {
+ put_device(&wwandev->dev);
+ wwandev = ERR_PTR(err);
+ goto done_unlock;
+ }
+
+#ifdef CONFIG_WWAN_DEBUGFS
+ wwandev->debugfs_dir =
+ debugfs_create_dir(kobject_name(&wwandev->dev.kobj),
+ wwan_debugfs_dir);
+#endif
+
+done_unlock:
+ mutex_unlock(&wwan_register_lock);
+
+ return wwandev;
+}
+
+static int is_wwan_child(struct device *dev, void *data)
+{
+ return dev->class == wwan_class;
+}
+
+static void wwan_remove_dev(struct wwan_device *wwandev)
+{
+ int ret;
+
+ /* Prevent concurrent picking from wwan_create_dev */
+ mutex_lock(&wwan_register_lock);
+
+ /* WWAN device is created and registered (get+add) along with its first
+ * child port, and subsequent port registrations only grab a reference
+ * (get). The WWAN device must then be unregistered (del+put) along with
+ * its last port, and reference simply dropped (put) otherwise. In the
+ * same fashion, we must not unregister it when the ops are still there.
+ */
+ if (wwandev->ops)
+ ret = 1;
+ else
+ ret = device_for_each_child(&wwandev->dev, NULL, is_wwan_child);
+
+ if (!ret) {
+#ifdef CONFIG_WWAN_DEBUGFS
+ debugfs_remove_recursive(wwandev->debugfs_dir);
+#endif
+ device_unregister(&wwandev->dev);
+ } else {
+ put_device(&wwandev->dev);
+ }
+
+ mutex_unlock(&wwan_register_lock);
+}
+
+/* ------- WWAN port management ------- */
+
+static const struct {
+ const char * const name; /* Port type name */
+ const char * const devsuf; /* Port devce name suffix */
+} wwan_port_types[WWAN_PORT_MAX + 1] = {
+ [WWAN_PORT_AT] = {
+ .name = "AT",
+ .devsuf = "at",
+ },
+ [WWAN_PORT_MBIM] = {
+ .name = "MBIM",
+ .devsuf = "mbim",
+ },
+ [WWAN_PORT_QMI] = {
+ .name = "QMI",
+ .devsuf = "qmi",
+ },
+ [WWAN_PORT_QCDM] = {
+ .name = "QCDM",
+ .devsuf = "qcdm",
+ },
+ [WWAN_PORT_FIREHOSE] = {
+ .name = "FIREHOSE",
+ .devsuf = "firehose",
+ },
+ [WWAN_PORT_XMMRPC] = {
+ .name = "XMMRPC",
+ .devsuf = "xmmrpc",
+ },
+};
+
+static ssize_t type_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct wwan_port *port = to_wwan_port(dev);
+
+ return sprintf(buf, "%s\n", wwan_port_types[port->type].name);
+}
+static DEVICE_ATTR_RO(type);
+
+static struct attribute *wwan_port_attrs[] = {
+ &dev_attr_type.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(wwan_port);
+
+static void wwan_port_destroy(struct device *dev)
+{
+ struct wwan_port *port = to_wwan_port(dev);
+
+ ida_free(&minors, MINOR(port->dev.devt));
+ mutex_destroy(&port->data_lock);
+ mutex_destroy(&port->ops_lock);
+ kfree(port);
+}
+
+static const struct device_type wwan_port_dev_type = {
+ .name = "wwan_port",
+ .release = wwan_port_destroy,
+ .groups = wwan_port_groups,
+};
+
+static int wwan_port_minor_match(struct device *dev, const void *minor)
+{
+ return (dev->type == &wwan_port_dev_type &&
+ MINOR(dev->devt) == *(unsigned int *)minor);
+}
+
+static struct wwan_port *wwan_port_get_by_minor(unsigned int minor)
+{
+ struct device *dev;
+
+ dev = class_find_device(wwan_class, NULL, &minor, wwan_port_minor_match);
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ return to_wwan_port(dev);
+}
+
+/* Allocate and set unique name based on passed format
+ *
+ * Name allocation approach is highly inspired by the __dev_alloc_name()
+ * function.
+ *
+ * To avoid names collision, the caller must prevent the new port device
+ * registration as well as concurrent invocation of this function.
+ */
+static int __wwan_port_dev_assign_name(struct wwan_port *port, const char *fmt)
+{
+ struct wwan_device *wwandev = to_wwan_dev(port->dev.parent);
+ const unsigned int max_ports = PAGE_SIZE * 8;
+ struct class_dev_iter iter;
+ unsigned long *idmap;
+ struct device *dev;
+ char buf[0x20];
+ int id;
+
+ idmap = (unsigned long *)get_zeroed_page(GFP_KERNEL);
+ if (!idmap)
+ return -ENOMEM;
+
+ /* Collect ids of same name format ports */
+ class_dev_iter_init(&iter, wwan_class, NULL, &wwan_port_dev_type);
+ while ((dev = class_dev_iter_next(&iter))) {
+ if (dev->parent != &wwandev->dev)
+ continue;
+ if (sscanf(dev_name(dev), fmt, &id) != 1)
+ continue;
+ if (id < 0 || id >= max_ports)
+ continue;
+ set_bit(id, idmap);
+ }
+ class_dev_iter_exit(&iter);
+
+ /* Allocate unique id */
+ id = find_first_zero_bit(idmap, max_ports);
+ free_page((unsigned long)idmap);
+
+ snprintf(buf, sizeof(buf), fmt, id); /* Name generation */
+
+ dev = device_find_child_by_name(&wwandev->dev, buf);
+ if (dev) {
+ put_device(dev);
+ return -ENFILE;
+ }
+
+ return dev_set_name(&port->dev, buf);
+}
+
+struct wwan_port *wwan_create_port(struct device *parent,
+ enum wwan_port_type type,
+ const struct wwan_port_ops *ops,
+ struct wwan_port_caps *caps,
+ void *drvdata)
+{
+ struct wwan_device *wwandev;
+ struct wwan_port *port;
+ char namefmt[0x20];
+ int minor, err;
+
+ if (type > WWAN_PORT_MAX || !ops)
+ return ERR_PTR(-EINVAL);
+
+ /* A port is always a child of a WWAN device, retrieve (allocate or
+ * pick) the WWAN device based on the provided parent device.
+ */
+ wwandev = wwan_create_dev(parent);
+ if (IS_ERR(wwandev))
+ return ERR_CAST(wwandev);
+
+ /* A port is exposed as character device, get a minor */
+ minor = ida_alloc_range(&minors, 0, WWAN_MAX_MINORS - 1, GFP_KERNEL);
+ if (minor < 0) {
+ err = minor;
+ goto error_wwandev_remove;
+ }
+
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port) {
+ err = -ENOMEM;
+ ida_free(&minors, minor);
+ goto error_wwandev_remove;
+ }
+
+ port->type = type;
+ port->ops = ops;
+ port->frag_len = caps ? caps->frag_len : SIZE_MAX;
+ port->headroom_len = caps ? caps->headroom_len : 0;
+ mutex_init(&port->ops_lock);
+ skb_queue_head_init(&port->rxq);
+ init_waitqueue_head(&port->waitqueue);
+ mutex_init(&port->data_lock);
+
+ port->dev.parent = &wwandev->dev;
+ port->dev.class = wwan_class;
+ port->dev.type = &wwan_port_dev_type;
+ port->dev.devt = MKDEV(wwan_major, minor);
+ dev_set_drvdata(&port->dev, drvdata);
+
+ /* allocate unique name based on wwan device id, port type and number */
+ snprintf(namefmt, sizeof(namefmt), "wwan%u%s%%d", wwandev->id,
+ wwan_port_types[port->type].devsuf);
+
+ /* Serialize ports registration */
+ mutex_lock(&wwan_register_lock);
+
+ __wwan_port_dev_assign_name(port, namefmt);
+ err = device_register(&port->dev);
+
+ mutex_unlock(&wwan_register_lock);
+
+ if (err)
+ goto error_put_device;
+
+ dev_info(&wwandev->dev, "port %s attached\n", dev_name(&port->dev));
+ return port;
+
+error_put_device:
+ put_device(&port->dev);
+error_wwandev_remove:
+ wwan_remove_dev(wwandev);
+
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(wwan_create_port);
+
+void wwan_remove_port(struct wwan_port *port)
+{
+ struct wwan_device *wwandev = to_wwan_dev(port->dev.parent);
+
+ mutex_lock(&port->ops_lock);
+ if (port->start_count)
+ port->ops->stop(port);
+ port->ops = NULL; /* Prevent any new port operations (e.g. from fops) */
+ mutex_unlock(&port->ops_lock);
+
+ wake_up_interruptible(&port->waitqueue);
+
+ skb_queue_purge(&port->rxq);
+ dev_set_drvdata(&port->dev, NULL);
+
+ dev_info(&wwandev->dev, "port %s disconnected\n", dev_name(&port->dev));
+ device_unregister(&port->dev);
+
+ /* Release related wwan device */
+ wwan_remove_dev(wwandev);
+}
+EXPORT_SYMBOL_GPL(wwan_remove_port);
+
+void wwan_port_rx(struct wwan_port *port, struct sk_buff *skb)
+{
+ skb_queue_tail(&port->rxq, skb);
+ wake_up_interruptible(&port->waitqueue);
+}
+EXPORT_SYMBOL_GPL(wwan_port_rx);
+
+void wwan_port_txon(struct wwan_port *port)
+{
+ clear_bit(WWAN_PORT_TX_OFF, &port->flags);
+ wake_up_interruptible(&port->waitqueue);
+}
+EXPORT_SYMBOL_GPL(wwan_port_txon);
+
+void wwan_port_txoff(struct wwan_port *port)
+{
+ set_bit(WWAN_PORT_TX_OFF, &port->flags);
+}
+EXPORT_SYMBOL_GPL(wwan_port_txoff);
+
+void *wwan_port_get_drvdata(struct wwan_port *port)
+{
+ return dev_get_drvdata(&port->dev);
+}
+EXPORT_SYMBOL_GPL(wwan_port_get_drvdata);
+
+static int wwan_port_op_start(struct wwan_port *port)
+{
+ int ret = 0;
+
+ mutex_lock(&port->ops_lock);
+ if (!port->ops) { /* Port got unplugged */
+ ret = -ENODEV;
+ goto out_unlock;
+ }
+
+ /* If port is already started, don't start again */
+ if (!port->start_count)
+ ret = port->ops->start(port);
+
+ if (!ret)
+ port->start_count++;
+
+out_unlock:
+ mutex_unlock(&port->ops_lock);
+
+ return ret;
+}
+
+static void wwan_port_op_stop(struct wwan_port *port)
+{
+ mutex_lock(&port->ops_lock);
+ port->start_count--;
+ if (!port->start_count) {
+ if (port->ops)
+ port->ops->stop(port);
+ skb_queue_purge(&port->rxq);
+ }
+ mutex_unlock(&port->ops_lock);
+}
+
+static int wwan_port_op_tx(struct wwan_port *port, struct sk_buff *skb,
+ bool nonblock)
+{
+ int ret;
+
+ mutex_lock(&port->ops_lock);
+ if (!port->ops) { /* Port got unplugged */
+ ret = -ENODEV;
+ goto out_unlock;
+ }
+
+ if (nonblock || !port->ops->tx_blocking)
+ ret = port->ops->tx(port, skb);
+ else
+ ret = port->ops->tx_blocking(port, skb);
+
+out_unlock:
+ mutex_unlock(&port->ops_lock);
+
+ return ret;
+}
+
+static bool is_read_blocked(struct wwan_port *port)
+{
+ return skb_queue_empty(&port->rxq) && port->ops;
+}
+
+static bool is_write_blocked(struct wwan_port *port)
+{
+ return test_bit(WWAN_PORT_TX_OFF, &port->flags) && port->ops;
+}
+
+static int wwan_wait_rx(struct wwan_port *port, bool nonblock)
+{
+ if (!is_read_blocked(port))
+ return 0;
+
+ if (nonblock)
+ return -EAGAIN;
+
+ if (wait_event_interruptible(port->waitqueue, !is_read_blocked(port)))
+ return -ERESTARTSYS;
+
+ return 0;
+}
+
+static int wwan_wait_tx(struct wwan_port *port, bool nonblock)
+{
+ if (!is_write_blocked(port))
+ return 0;
+
+ if (nonblock)
+ return -EAGAIN;
+
+ if (wait_event_interruptible(port->waitqueue, !is_write_blocked(port)))
+ return -ERESTARTSYS;
+
+ return 0;
+}
+
+static int wwan_port_fops_open(struct inode *inode, struct file *file)
+{
+ struct wwan_port *port;
+ int err = 0;
+
+ port = wwan_port_get_by_minor(iminor(inode));
+ if (IS_ERR(port))
+ return PTR_ERR(port);
+
+ file->private_data = port;
+ stream_open(inode, file);
+
+ err = wwan_port_op_start(port);
+ if (err)
+ put_device(&port->dev);
+
+ return err;
+}
+
+static int wwan_port_fops_release(struct inode *inode, struct file *filp)
+{
+ struct wwan_port *port = filp->private_data;
+
+ wwan_port_op_stop(port);
+ put_device(&port->dev);
+
+ return 0;
+}
+
+static ssize_t wwan_port_fops_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct wwan_port *port = filp->private_data;
+ struct sk_buff *skb;
+ size_t copied;
+ int ret;
+
+ ret = wwan_wait_rx(port, !!(filp->f_flags & O_NONBLOCK));
+ if (ret)
+ return ret;
+
+ skb = skb_dequeue(&port->rxq);
+ if (!skb)
+ return -EIO;
+
+ copied = min_t(size_t, count, skb->len);
+ if (copy_to_user(buf, skb->data, copied)) {
+ kfree_skb(skb);
+ return -EFAULT;
+ }
+ skb_pull(skb, copied);
+
+ /* skb is not fully consumed, keep it in the queue */
+ if (skb->len)
+ skb_queue_head(&port->rxq, skb);
+ else
+ consume_skb(skb);
+
+ return copied;
+}
+
+static ssize_t wwan_port_fops_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *offp)
+{
+ struct sk_buff *skb, *head = NULL, *tail = NULL;
+ struct wwan_port *port = filp->private_data;
+ size_t frag_len, remain = count;
+ int ret;
+
+ ret = wwan_wait_tx(port, !!(filp->f_flags & O_NONBLOCK));
+ if (ret)
+ return ret;
+
+ do {
+ frag_len = min(remain, port->frag_len);
+ skb = alloc_skb(frag_len + port->headroom_len, GFP_KERNEL);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto freeskb;
+ }
+ skb_reserve(skb, port->headroom_len);
+
+ if (!head) {
+ head = skb;
+ } else if (!tail) {
+ skb_shinfo(head)->frag_list = skb;
+ tail = skb;
+ } else {
+ tail->next = skb;
+ tail = skb;
+ }
+
+ if (copy_from_user(skb_put(skb, frag_len), buf + count - remain, frag_len)) {
+ ret = -EFAULT;
+ goto freeskb;
+ }
+
+ if (skb != head) {
+ head->data_len += skb->len;
+ head->len += skb->len;
+ head->truesize += skb->truesize;
+ }
+ } while (remain -= frag_len);
+
+ ret = wwan_port_op_tx(port, head, !!(filp->f_flags & O_NONBLOCK));
+ if (!ret)
+ return count;
+
+freeskb:
+ kfree_skb(head);
+ return ret;
+}
+
+static __poll_t wwan_port_fops_poll(struct file *filp, poll_table *wait)
+{
+ struct wwan_port *port = filp->private_data;
+ __poll_t mask = 0;
+
+ poll_wait(filp, &port->waitqueue, wait);
+
+ mutex_lock(&port->ops_lock);
+ if (port->ops && port->ops->tx_poll)
+ mask |= port->ops->tx_poll(port, filp, wait);
+ else if (!is_write_blocked(port))
+ mask |= EPOLLOUT | EPOLLWRNORM;
+ if (!is_read_blocked(port))
+ mask |= EPOLLIN | EPOLLRDNORM;
+ if (!port->ops)
+ mask |= EPOLLHUP | EPOLLERR;
+ mutex_unlock(&port->ops_lock);
+
+ return mask;
+}
+
+/* Implements minimalistic stub terminal IOCTLs support */
+static long wwan_port_fops_at_ioctl(struct wwan_port *port, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret = 0;
+
+ mutex_lock(&port->data_lock);
+
+ switch (cmd) {
+ case TCFLSH:
+ break;
+
+ case TCGETS:
+ if (copy_to_user((void __user *)arg, &port->at_data.termios,
+ sizeof(struct termios)))
+ ret = -EFAULT;
+ break;
+
+ case TCSETS:
+ case TCSETSW:
+ case TCSETSF:
+ if (copy_from_user(&port->at_data.termios, (void __user *)arg,
+ sizeof(struct termios)))
+ ret = -EFAULT;
+ break;
+
+#ifdef TCGETS2
+ case TCGETS2:
+ if (copy_to_user((void __user *)arg, &port->at_data.termios,
+ sizeof(struct termios2)))
+ ret = -EFAULT;
+ break;
+
+ case TCSETS2:
+ case TCSETSW2:
+ case TCSETSF2:
+ if (copy_from_user(&port->at_data.termios, (void __user *)arg,
+ sizeof(struct termios2)))
+ ret = -EFAULT;
+ break;
+#endif
+
+ case TIOCMGET:
+ ret = put_user(port->at_data.mdmbits, (int __user *)arg);
+ break;
+
+ case TIOCMSET:
+ case TIOCMBIC:
+ case TIOCMBIS: {
+ int mdmbits;
+
+ if (copy_from_user(&mdmbits, (int __user *)arg, sizeof(int))) {
+ ret = -EFAULT;
+ break;
+ }
+ if (cmd == TIOCMBIC)
+ port->at_data.mdmbits &= ~mdmbits;
+ else if (cmd == TIOCMBIS)
+ port->at_data.mdmbits |= mdmbits;
+ else
+ port->at_data.mdmbits = mdmbits;
+ break;
+ }
+
+ default:
+ ret = -ENOIOCTLCMD;
+ }
+
+ mutex_unlock(&port->data_lock);
+
+ return ret;
+}
+
+static long wwan_port_fops_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct wwan_port *port = filp->private_data;
+ int res;
+
+ if (port->type == WWAN_PORT_AT) { /* AT port specific IOCTLs */
+ res = wwan_port_fops_at_ioctl(port, cmd, arg);
+ if (res != -ENOIOCTLCMD)
+ return res;
+ }
+
+ switch (cmd) {
+ case TIOCINQ: { /* aka SIOCINQ aka FIONREAD */
+ unsigned long flags;
+ struct sk_buff *skb;
+ int amount = 0;
+
+ spin_lock_irqsave(&port->rxq.lock, flags);
+ skb_queue_walk(&port->rxq, skb)
+ amount += skb->len;
+ spin_unlock_irqrestore(&port->rxq.lock, flags);
+
+ return put_user(amount, (int __user *)arg);
+ }
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+static const struct file_operations wwan_port_fops = {
+ .owner = THIS_MODULE,
+ .open = wwan_port_fops_open,
+ .release = wwan_port_fops_release,
+ .read = wwan_port_fops_read,
+ .write = wwan_port_fops_write,
+ .poll = wwan_port_fops_poll,
+ .unlocked_ioctl = wwan_port_fops_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = compat_ptr_ioctl,
+#endif
+ .llseek = noop_llseek,
+};
+
+static int wwan_rtnl_validate(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ if (!data)
+ return -EINVAL;
+
+ if (!tb[IFLA_PARENT_DEV_NAME])
+ return -EINVAL;
+
+ if (!data[IFLA_WWAN_LINK_ID])
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct device_type wwan_type = { .name = "wwan" };
+
+static struct net_device *wwan_rtnl_alloc(struct nlattr *tb[],
+ const char *ifname,
+ unsigned char name_assign_type,
+ unsigned int num_tx_queues,
+ unsigned int num_rx_queues)
+{
+ const char *devname = nla_data(tb[IFLA_PARENT_DEV_NAME]);
+ struct wwan_device *wwandev = wwan_dev_get_by_name(devname);
+ struct net_device *dev;
+ unsigned int priv_size;
+
+ if (IS_ERR(wwandev))
+ return ERR_CAST(wwandev);
+
+ /* only supported if ops were registered (not just ports) */
+ if (!wwandev->ops) {
+ dev = ERR_PTR(-EOPNOTSUPP);
+ goto out;
+ }
+
+ priv_size = sizeof(struct wwan_netdev_priv) + wwandev->ops->priv_size;
+ dev = alloc_netdev_mqs(priv_size, ifname, name_assign_type,
+ wwandev->ops->setup, num_tx_queues, num_rx_queues);
+
+ if (dev) {
+ SET_NETDEV_DEV(dev, &wwandev->dev);
+ SET_NETDEV_DEVTYPE(dev, &wwan_type);
+ }
+
+out:
+ /* release the reference */
+ put_device(&wwandev->dev);
+ return dev;
+}
+
+static int wwan_rtnl_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ struct wwan_device *wwandev = wwan_dev_get_by_parent(dev->dev.parent);
+ u32 link_id = nla_get_u32(data[IFLA_WWAN_LINK_ID]);
+ struct wwan_netdev_priv *priv = netdev_priv(dev);
+ int ret;
+
+ if (IS_ERR(wwandev))
+ return PTR_ERR(wwandev);
+
+ /* shouldn't have a netdev (left) with us as parent so WARN */
+ if (WARN_ON(!wwandev->ops)) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ priv->link_id = link_id;
+ if (wwandev->ops->newlink)
+ ret = wwandev->ops->newlink(wwandev->ops_ctxt, dev,
+ link_id, extack);
+ else
+ ret = register_netdevice(dev);
+
+out:
+ /* release the reference */
+ put_device(&wwandev->dev);
+ return ret;
+}
+
+static void wwan_rtnl_dellink(struct net_device *dev, struct list_head *head)
+{
+ struct wwan_device *wwandev = wwan_dev_get_by_parent(dev->dev.parent);
+
+ if (IS_ERR(wwandev))
+ return;
+
+ /* shouldn't have a netdev (left) with us as parent so WARN */
+ if (WARN_ON(!wwandev->ops))
+ goto out;
+
+ if (wwandev->ops->dellink)
+ wwandev->ops->dellink(wwandev->ops_ctxt, dev, head);
+ else
+ unregister_netdevice_queue(dev, head);
+
+out:
+ /* release the reference */
+ put_device(&wwandev->dev);
+}
+
+static size_t wwan_rtnl_get_size(const struct net_device *dev)
+{
+ return
+ nla_total_size(4) + /* IFLA_WWAN_LINK_ID */
+ 0;
+}
+
+static int wwan_rtnl_fill_info(struct sk_buff *skb,
+ const struct net_device *dev)
+{
+ struct wwan_netdev_priv *priv = netdev_priv(dev);
+
+ if (nla_put_u32(skb, IFLA_WWAN_LINK_ID, priv->link_id))
+ goto nla_put_failure;
+
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static const struct nla_policy wwan_rtnl_policy[IFLA_WWAN_MAX + 1] = {
+ [IFLA_WWAN_LINK_ID] = { .type = NLA_U32 },
+};
+
+static struct rtnl_link_ops wwan_rtnl_link_ops __read_mostly = {
+ .kind = "wwan",
+ .maxtype = __IFLA_WWAN_MAX,
+ .alloc = wwan_rtnl_alloc,
+ .validate = wwan_rtnl_validate,
+ .newlink = wwan_rtnl_newlink,
+ .dellink = wwan_rtnl_dellink,
+ .get_size = wwan_rtnl_get_size,
+ .fill_info = wwan_rtnl_fill_info,
+ .policy = wwan_rtnl_policy,
+};
+
+static void wwan_create_default_link(struct wwan_device *wwandev,
+ u32 def_link_id)
+{
+ struct nlattr *tb[IFLA_MAX + 1], *linkinfo[IFLA_INFO_MAX + 1];
+ struct nlattr *data[IFLA_WWAN_MAX + 1];
+ struct net_device *dev;
+ struct nlmsghdr *nlh;
+ struct sk_buff *msg;
+
+ /* Forge attributes required to create a WWAN netdev. We first
+ * build a netlink message and then parse it. This looks
+ * odd, but such approach is less error prone.
+ */
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (WARN_ON(!msg))
+ return;
+ nlh = nlmsg_put(msg, 0, 0, RTM_NEWLINK, 0, 0);
+ if (WARN_ON(!nlh))
+ goto free_attrs;
+
+ if (nla_put_string(msg, IFLA_PARENT_DEV_NAME, dev_name(&wwandev->dev)))
+ goto free_attrs;
+ tb[IFLA_LINKINFO] = nla_nest_start(msg, IFLA_LINKINFO);
+ if (!tb[IFLA_LINKINFO])
+ goto free_attrs;
+ linkinfo[IFLA_INFO_DATA] = nla_nest_start(msg, IFLA_INFO_DATA);
+ if (!linkinfo[IFLA_INFO_DATA])
+ goto free_attrs;
+ if (nla_put_u32(msg, IFLA_WWAN_LINK_ID, def_link_id))
+ goto free_attrs;
+ nla_nest_end(msg, linkinfo[IFLA_INFO_DATA]);
+ nla_nest_end(msg, tb[IFLA_LINKINFO]);
+
+ nlmsg_end(msg, nlh);
+
+ /* The next three parsing calls can not fail */
+ nlmsg_parse_deprecated(nlh, 0, tb, IFLA_MAX, NULL, NULL);
+ nla_parse_nested_deprecated(linkinfo, IFLA_INFO_MAX, tb[IFLA_LINKINFO],
+ NULL, NULL);
+ nla_parse_nested_deprecated(data, IFLA_WWAN_MAX,
+ linkinfo[IFLA_INFO_DATA], NULL, NULL);
+
+ rtnl_lock();
+
+ dev = rtnl_create_link(&init_net, "wwan%d", NET_NAME_ENUM,
+ &wwan_rtnl_link_ops, tb, NULL);
+ if (WARN_ON(IS_ERR(dev)))
+ goto unlock;
+
+ if (WARN_ON(wwan_rtnl_newlink(&init_net, dev, tb, data, NULL))) {
+ free_netdev(dev);
+ goto unlock;
+ }
+
+ rtnl_configure_link(dev, NULL, 0, NULL); /* Link initialized, notify new link */
+
+unlock:
+ rtnl_unlock();
+
+free_attrs:
+ nlmsg_free(msg);
+}
+
+/**
+ * wwan_register_ops - register WWAN device ops
+ * @parent: Device to use as parent and shared by all WWAN ports and
+ * created netdevs
+ * @ops: operations to register
+ * @ctxt: context to pass to operations
+ * @def_link_id: id of the default link that will be automatically created by
+ * the WWAN core for the WWAN device. The default link will not be created
+ * if the passed value is WWAN_NO_DEFAULT_LINK.
+ *
+ * Returns: 0 on success, a negative error code on failure
+ */
+int wwan_register_ops(struct device *parent, const struct wwan_ops *ops,
+ void *ctxt, u32 def_link_id)
+{
+ struct wwan_device *wwandev;
+
+ if (WARN_ON(!parent || !ops || !ops->setup))
+ return -EINVAL;
+
+ wwandev = wwan_create_dev(parent);
+ if (IS_ERR(wwandev))
+ return PTR_ERR(wwandev);
+
+ if (WARN_ON(wwandev->ops)) {
+ wwan_remove_dev(wwandev);
+ return -EBUSY;
+ }
+
+ wwandev->ops = ops;
+ wwandev->ops_ctxt = ctxt;
+
+ /* NB: we do not abort ops registration in case of default link
+ * creation failure. Link ops is the management interface, while the
+ * default link creation is a service option. And we should not prevent
+ * a user from manually creating a link latter if service option failed
+ * now.
+ */
+ if (def_link_id != WWAN_NO_DEFAULT_LINK)
+ wwan_create_default_link(wwandev, def_link_id);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(wwan_register_ops);
+
+/* Enqueue child netdev deletion */
+static int wwan_child_dellink(struct device *dev, void *data)
+{
+ struct list_head *kill_list = data;
+
+ if (dev->type == &wwan_type)
+ wwan_rtnl_dellink(to_net_dev(dev), kill_list);
+
+ return 0;
+}
+
+/**
+ * wwan_unregister_ops - remove WWAN device ops
+ * @parent: Device to use as parent and shared by all WWAN ports and
+ * created netdevs
+ */
+void wwan_unregister_ops(struct device *parent)
+{
+ struct wwan_device *wwandev = wwan_dev_get_by_parent(parent);
+ LIST_HEAD(kill_list);
+
+ if (WARN_ON(IS_ERR(wwandev)))
+ return;
+ if (WARN_ON(!wwandev->ops)) {
+ put_device(&wwandev->dev);
+ return;
+ }
+
+ /* put the reference obtained by wwan_dev_get_by_parent(),
+ * we should still have one (that the owner is giving back
+ * now) due to the ops being assigned.
+ */
+ put_device(&wwandev->dev);
+
+ rtnl_lock(); /* Prevent concurent netdev(s) creation/destroying */
+
+ /* Remove all child netdev(s), using batch removing */
+ device_for_each_child(&wwandev->dev, &kill_list,
+ wwan_child_dellink);
+ unregister_netdevice_many(&kill_list);
+
+ wwandev->ops = NULL; /* Finally remove ops */
+
+ rtnl_unlock();
+
+ wwandev->ops_ctxt = NULL;
+ wwan_remove_dev(wwandev);
+}
+EXPORT_SYMBOL_GPL(wwan_unregister_ops);
+
+static int __init wwan_init(void)
+{
+ int err;
+
+ err = rtnl_link_register(&wwan_rtnl_link_ops);
+ if (err)
+ return err;
+
+ wwan_class = class_create("wwan");
+ if (IS_ERR(wwan_class)) {
+ err = PTR_ERR(wwan_class);
+ goto unregister;
+ }
+
+ /* chrdev used for wwan ports */
+ wwan_major = __register_chrdev(0, 0, WWAN_MAX_MINORS, "wwan_port",
+ &wwan_port_fops);
+ if (wwan_major < 0) {
+ err = wwan_major;
+ goto destroy;
+ }
+
+#ifdef CONFIG_WWAN_DEBUGFS
+ wwan_debugfs_dir = debugfs_create_dir("wwan", NULL);
+#endif
+
+ return 0;
+
+destroy:
+ class_destroy(wwan_class);
+unregister:
+ rtnl_link_unregister(&wwan_rtnl_link_ops);
+ return err;
+}
+
+static void __exit wwan_exit(void)
+{
+ debugfs_remove_recursive(wwan_debugfs_dir);
+ __unregister_chrdev(wwan_major, 0, WWAN_MAX_MINORS, "wwan_port");
+ rtnl_link_unregister(&wwan_rtnl_link_ops);
+ class_destroy(wwan_class);
+}
+
+module_init(wwan_init);
+module_exit(wwan_exit);
+
+MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>");
+MODULE_DESCRIPTION("WWAN core");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/wwan/wwan_hwsim.c b/drivers/net/wwan/wwan_hwsim.c
new file mode 100644
index 0000000000..ff3dd24ddb
--- /dev/null
+++ b/drivers/net/wwan/wwan_hwsim.c
@@ -0,0 +1,559 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * WWAN device simulator for WWAN framework testing.
+ *
+ * Copyright (c) 2021, Sergey Ryazanov <ryazanov.s.a@gmail.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/wwan.h>
+#include <linux/debugfs.h>
+#include <linux/workqueue.h>
+
+#include <net/arp.h>
+
+static int wwan_hwsim_devsnum = 2;
+module_param_named(devices, wwan_hwsim_devsnum, int, 0444);
+MODULE_PARM_DESC(devices, "Number of simulated devices");
+
+static struct class *wwan_hwsim_class;
+
+static struct dentry *wwan_hwsim_debugfs_topdir;
+static struct dentry *wwan_hwsim_debugfs_devcreate;
+
+static DEFINE_SPINLOCK(wwan_hwsim_devs_lock);
+static LIST_HEAD(wwan_hwsim_devs);
+static unsigned int wwan_hwsim_dev_idx;
+static struct workqueue_struct *wwan_wq;
+
+struct wwan_hwsim_dev {
+ struct list_head list;
+ unsigned int id;
+ struct device dev;
+ struct work_struct del_work;
+ struct dentry *debugfs_topdir;
+ struct dentry *debugfs_portcreate;
+ spinlock_t ports_lock; /* Serialize ports creation/deletion */
+ unsigned int port_idx;
+ struct list_head ports;
+};
+
+struct wwan_hwsim_port {
+ struct list_head list;
+ unsigned int id;
+ struct wwan_hwsim_dev *dev;
+ struct wwan_port *wwan;
+ struct work_struct del_work;
+ struct dentry *debugfs_topdir;
+ enum { /* AT command parser state */
+ AT_PARSER_WAIT_A,
+ AT_PARSER_WAIT_T,
+ AT_PARSER_WAIT_TERM,
+ AT_PARSER_SKIP_LINE,
+ } pstate;
+};
+
+static const struct file_operations wwan_hwsim_debugfs_portdestroy_fops;
+static const struct file_operations wwan_hwsim_debugfs_portcreate_fops;
+static const struct file_operations wwan_hwsim_debugfs_devdestroy_fops;
+static void wwan_hwsim_port_del_work(struct work_struct *work);
+static void wwan_hwsim_dev_del_work(struct work_struct *work);
+
+static netdev_tx_t wwan_hwsim_netdev_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += skb->len;
+ consume_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops wwan_hwsim_netdev_ops = {
+ .ndo_start_xmit = wwan_hwsim_netdev_xmit,
+};
+
+static void wwan_hwsim_netdev_setup(struct net_device *ndev)
+{
+ ndev->netdev_ops = &wwan_hwsim_netdev_ops;
+ ndev->needs_free_netdev = true;
+
+ ndev->mtu = ETH_DATA_LEN;
+ ndev->min_mtu = ETH_MIN_MTU;
+ ndev->max_mtu = ETH_MAX_MTU;
+
+ ndev->type = ARPHRD_NONE;
+ ndev->flags = IFF_POINTOPOINT | IFF_NOARP;
+}
+
+static const struct wwan_ops wwan_hwsim_wwan_rtnl_ops = {
+ .priv_size = 0, /* No private data */
+ .setup = wwan_hwsim_netdev_setup,
+};
+
+static int wwan_hwsim_port_start(struct wwan_port *wport)
+{
+ struct wwan_hwsim_port *port = wwan_port_get_drvdata(wport);
+
+ port->pstate = AT_PARSER_WAIT_A;
+
+ return 0;
+}
+
+static void wwan_hwsim_port_stop(struct wwan_port *wport)
+{
+}
+
+/* Implements a minimalistic AT commands parser that echo input back and
+ * reply with 'OK' to each input command. See AT command protocol details in the
+ * ITU-T V.250 recomendations document.
+ *
+ * Be aware that this processor is not fully V.250 compliant.
+ */
+static int wwan_hwsim_port_tx(struct wwan_port *wport, struct sk_buff *in)
+{
+ struct wwan_hwsim_port *port = wwan_port_get_drvdata(wport);
+ struct sk_buff *out;
+ int i, n, s;
+
+ /* Estimate a max possible number of commands by counting the number of
+ * termination chars (S3 param, CR by default). And then allocate the
+ * output buffer that will be enough to fit the echo and result codes of
+ * all commands.
+ */
+ for (i = 0, n = 0; i < in->len; ++i)
+ if (in->data[i] == '\r')
+ n++;
+ n = in->len + n * (2 + 2 + 2); /* Output buffer size */
+ out = alloc_skb(n, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ for (i = 0, s = 0; i < in->len; ++i) {
+ char c = in->data[i];
+
+ if (port->pstate == AT_PARSER_WAIT_A) {
+ if (c == 'A' || c == 'a')
+ port->pstate = AT_PARSER_WAIT_T;
+ else if (c != '\n') /* Ignore formating char */
+ port->pstate = AT_PARSER_SKIP_LINE;
+ } else if (port->pstate == AT_PARSER_WAIT_T) {
+ if (c == 'T' || c == 't')
+ port->pstate = AT_PARSER_WAIT_TERM;
+ else
+ port->pstate = AT_PARSER_SKIP_LINE;
+ } else if (port->pstate == AT_PARSER_WAIT_TERM) {
+ if (c != '\r')
+ continue;
+ /* Consume the trailing formatting char as well */
+ if ((i + 1) < in->len && in->data[i + 1] == '\n')
+ i++;
+ n = i - s + 1;
+ skb_put_data(out, &in->data[s], n);/* Echo */
+ skb_put_data(out, "\r\nOK\r\n", 6);
+ s = i + 1;
+ port->pstate = AT_PARSER_WAIT_A;
+ } else if (port->pstate == AT_PARSER_SKIP_LINE) {
+ if (c != '\r')
+ continue;
+ port->pstate = AT_PARSER_WAIT_A;
+ }
+ }
+
+ if (i > s) {
+ /* Echo the processed portion of a not yet completed command */
+ n = i - s;
+ skb_put_data(out, &in->data[s], n);
+ }
+
+ consume_skb(in);
+
+ wwan_port_rx(wport, out);
+
+ return 0;
+}
+
+static const struct wwan_port_ops wwan_hwsim_port_ops = {
+ .start = wwan_hwsim_port_start,
+ .stop = wwan_hwsim_port_stop,
+ .tx = wwan_hwsim_port_tx,
+};
+
+static struct wwan_hwsim_port *wwan_hwsim_port_new(struct wwan_hwsim_dev *dev)
+{
+ struct wwan_hwsim_port *port;
+ char name[0x10];
+ int err;
+
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return ERR_PTR(-ENOMEM);
+
+ port->dev = dev;
+
+ spin_lock(&dev->ports_lock);
+ port->id = dev->port_idx++;
+ spin_unlock(&dev->ports_lock);
+
+ port->wwan = wwan_create_port(&dev->dev, WWAN_PORT_AT,
+ &wwan_hwsim_port_ops,
+ NULL, port);
+ if (IS_ERR(port->wwan)) {
+ err = PTR_ERR(port->wwan);
+ goto err_free_port;
+ }
+
+ INIT_WORK(&port->del_work, wwan_hwsim_port_del_work);
+
+ snprintf(name, sizeof(name), "port%u", port->id);
+ port->debugfs_topdir = debugfs_create_dir(name, dev->debugfs_topdir);
+ debugfs_create_file("destroy", 0200, port->debugfs_topdir, port,
+ &wwan_hwsim_debugfs_portdestroy_fops);
+
+ return port;
+
+err_free_port:
+ kfree(port);
+
+ return ERR_PTR(err);
+}
+
+static void wwan_hwsim_port_del(struct wwan_hwsim_port *port)
+{
+ debugfs_remove(port->debugfs_topdir);
+
+ /* Make sure that there is no pending deletion work */
+ if (current_work() != &port->del_work)
+ cancel_work_sync(&port->del_work);
+
+ wwan_remove_port(port->wwan);
+ kfree(port);
+}
+
+static void wwan_hwsim_port_del_work(struct work_struct *work)
+{
+ struct wwan_hwsim_port *port =
+ container_of(work, typeof(*port), del_work);
+ struct wwan_hwsim_dev *dev = port->dev;
+
+ spin_lock(&dev->ports_lock);
+ if (list_empty(&port->list)) {
+ /* Someone else deleting port at the moment */
+ spin_unlock(&dev->ports_lock);
+ return;
+ }
+ list_del_init(&port->list);
+ spin_unlock(&dev->ports_lock);
+
+ wwan_hwsim_port_del(port);
+}
+
+static void wwan_hwsim_dev_release(struct device *sysdev)
+{
+ struct wwan_hwsim_dev *dev = container_of(sysdev, typeof(*dev), dev);
+
+ kfree(dev);
+}
+
+static struct wwan_hwsim_dev *wwan_hwsim_dev_new(void)
+{
+ struct wwan_hwsim_dev *dev;
+ int err;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock(&wwan_hwsim_devs_lock);
+ dev->id = wwan_hwsim_dev_idx++;
+ spin_unlock(&wwan_hwsim_devs_lock);
+
+ dev->dev.release = wwan_hwsim_dev_release;
+ dev->dev.class = wwan_hwsim_class;
+ dev_set_name(&dev->dev, "hwsim%u", dev->id);
+
+ spin_lock_init(&dev->ports_lock);
+ INIT_LIST_HEAD(&dev->ports);
+
+ err = device_register(&dev->dev);
+ if (err)
+ goto err_free_dev;
+
+ INIT_WORK(&dev->del_work, wwan_hwsim_dev_del_work);
+
+ err = wwan_register_ops(&dev->dev, &wwan_hwsim_wwan_rtnl_ops, dev, 1);
+ if (err)
+ goto err_unreg_dev;
+
+ dev->debugfs_topdir = debugfs_create_dir(dev_name(&dev->dev),
+ wwan_hwsim_debugfs_topdir);
+ debugfs_create_file("destroy", 0200, dev->debugfs_topdir, dev,
+ &wwan_hwsim_debugfs_devdestroy_fops);
+ dev->debugfs_portcreate =
+ debugfs_create_file("portcreate", 0200,
+ dev->debugfs_topdir, dev,
+ &wwan_hwsim_debugfs_portcreate_fops);
+
+ return dev;
+
+err_unreg_dev:
+ device_unregister(&dev->dev);
+ /* Memory will be freed in the device release callback */
+
+ return ERR_PTR(err);
+
+err_free_dev:
+ put_device(&dev->dev);
+
+ return ERR_PTR(err);
+}
+
+static void wwan_hwsim_dev_del(struct wwan_hwsim_dev *dev)
+{
+ debugfs_remove(dev->debugfs_portcreate); /* Avoid new ports */
+
+ spin_lock(&dev->ports_lock);
+ while (!list_empty(&dev->ports)) {
+ struct wwan_hwsim_port *port;
+
+ port = list_first_entry(&dev->ports, struct wwan_hwsim_port,
+ list);
+ list_del_init(&port->list);
+ spin_unlock(&dev->ports_lock);
+ wwan_hwsim_port_del(port);
+ spin_lock(&dev->ports_lock);
+ }
+ spin_unlock(&dev->ports_lock);
+
+ debugfs_remove(dev->debugfs_topdir);
+
+ /* This will remove all child netdev(s) */
+ wwan_unregister_ops(&dev->dev);
+
+ /* Make sure that there is no pending deletion work */
+ if (current_work() != &dev->del_work)
+ cancel_work_sync(&dev->del_work);
+
+ device_unregister(&dev->dev);
+ /* Memory will be freed in the device release callback */
+}
+
+static void wwan_hwsim_dev_del_work(struct work_struct *work)
+{
+ struct wwan_hwsim_dev *dev = container_of(work, typeof(*dev), del_work);
+
+ spin_lock(&wwan_hwsim_devs_lock);
+ if (list_empty(&dev->list)) {
+ /* Someone else deleting device at the moment */
+ spin_unlock(&wwan_hwsim_devs_lock);
+ return;
+ }
+ list_del_init(&dev->list);
+ spin_unlock(&wwan_hwsim_devs_lock);
+
+ wwan_hwsim_dev_del(dev);
+}
+
+static ssize_t wwan_hwsim_debugfs_portdestroy_write(struct file *file,
+ const char __user *usrbuf,
+ size_t count, loff_t *ppos)
+{
+ struct wwan_hwsim_port *port = file->private_data;
+
+ /* We can not delete port here since it will cause a deadlock due to
+ * waiting this callback to finish in the debugfs_remove() call. So,
+ * use workqueue.
+ */
+ queue_work(wwan_wq, &port->del_work);
+
+ return count;
+}
+
+static const struct file_operations wwan_hwsim_debugfs_portdestroy_fops = {
+ .write = wwan_hwsim_debugfs_portdestroy_write,
+ .open = simple_open,
+ .llseek = noop_llseek,
+};
+
+static ssize_t wwan_hwsim_debugfs_portcreate_write(struct file *file,
+ const char __user *usrbuf,
+ size_t count, loff_t *ppos)
+{
+ struct wwan_hwsim_dev *dev = file->private_data;
+ struct wwan_hwsim_port *port;
+
+ port = wwan_hwsim_port_new(dev);
+ if (IS_ERR(port))
+ return PTR_ERR(port);
+
+ spin_lock(&dev->ports_lock);
+ list_add_tail(&port->list, &dev->ports);
+ spin_unlock(&dev->ports_lock);
+
+ return count;
+}
+
+static const struct file_operations wwan_hwsim_debugfs_portcreate_fops = {
+ .write = wwan_hwsim_debugfs_portcreate_write,
+ .open = simple_open,
+ .llseek = noop_llseek,
+};
+
+static ssize_t wwan_hwsim_debugfs_devdestroy_write(struct file *file,
+ const char __user *usrbuf,
+ size_t count, loff_t *ppos)
+{
+ struct wwan_hwsim_dev *dev = file->private_data;
+
+ /* We can not delete device here since it will cause a deadlock due to
+ * waiting this callback to finish in the debugfs_remove() call. So,
+ * use workqueue.
+ */
+ queue_work(wwan_wq, &dev->del_work);
+
+ return count;
+}
+
+static const struct file_operations wwan_hwsim_debugfs_devdestroy_fops = {
+ .write = wwan_hwsim_debugfs_devdestroy_write,
+ .open = simple_open,
+ .llseek = noop_llseek,
+};
+
+static ssize_t wwan_hwsim_debugfs_devcreate_write(struct file *file,
+ const char __user *usrbuf,
+ size_t count, loff_t *ppos)
+{
+ struct wwan_hwsim_dev *dev;
+
+ dev = wwan_hwsim_dev_new();
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+
+ spin_lock(&wwan_hwsim_devs_lock);
+ list_add_tail(&dev->list, &wwan_hwsim_devs);
+ spin_unlock(&wwan_hwsim_devs_lock);
+
+ return count;
+}
+
+static const struct file_operations wwan_hwsim_debugfs_devcreate_fops = {
+ .write = wwan_hwsim_debugfs_devcreate_write,
+ .open = simple_open,
+ .llseek = noop_llseek,
+};
+
+static int __init wwan_hwsim_init_devs(void)
+{
+ struct wwan_hwsim_dev *dev;
+ int i, j;
+
+ for (i = 0; i < wwan_hwsim_devsnum; ++i) {
+ dev = wwan_hwsim_dev_new();
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+
+ spin_lock(&wwan_hwsim_devs_lock);
+ list_add_tail(&dev->list, &wwan_hwsim_devs);
+ spin_unlock(&wwan_hwsim_devs_lock);
+
+ /* Create a couple of ports per each device to accelerate
+ * the simulator readiness time.
+ */
+ for (j = 0; j < 2; ++j) {
+ struct wwan_hwsim_port *port;
+
+ port = wwan_hwsim_port_new(dev);
+ if (IS_ERR(port))
+ return PTR_ERR(port);
+
+ spin_lock(&dev->ports_lock);
+ list_add_tail(&port->list, &dev->ports);
+ spin_unlock(&dev->ports_lock);
+ }
+ }
+
+ return 0;
+}
+
+static void wwan_hwsim_free_devs(void)
+{
+ struct wwan_hwsim_dev *dev;
+
+ spin_lock(&wwan_hwsim_devs_lock);
+ while (!list_empty(&wwan_hwsim_devs)) {
+ dev = list_first_entry(&wwan_hwsim_devs, struct wwan_hwsim_dev,
+ list);
+ list_del_init(&dev->list);
+ spin_unlock(&wwan_hwsim_devs_lock);
+ wwan_hwsim_dev_del(dev);
+ spin_lock(&wwan_hwsim_devs_lock);
+ }
+ spin_unlock(&wwan_hwsim_devs_lock);
+}
+
+static int __init wwan_hwsim_init(void)
+{
+ int err;
+
+ if (wwan_hwsim_devsnum < 0 || wwan_hwsim_devsnum > 128)
+ return -EINVAL;
+
+ wwan_wq = alloc_workqueue("wwan_wq", 0, 0);
+ if (!wwan_wq)
+ return -ENOMEM;
+
+ wwan_hwsim_class = class_create("wwan_hwsim");
+ if (IS_ERR(wwan_hwsim_class)) {
+ err = PTR_ERR(wwan_hwsim_class);
+ goto err_wq_destroy;
+ }
+
+ wwan_hwsim_debugfs_topdir = debugfs_create_dir("wwan_hwsim", NULL);
+ wwan_hwsim_debugfs_devcreate =
+ debugfs_create_file("devcreate", 0200,
+ wwan_hwsim_debugfs_topdir, NULL,
+ &wwan_hwsim_debugfs_devcreate_fops);
+
+ err = wwan_hwsim_init_devs();
+ if (err)
+ goto err_clean_devs;
+
+ return 0;
+
+err_clean_devs:
+ debugfs_remove(wwan_hwsim_debugfs_devcreate); /* Avoid new devs */
+ wwan_hwsim_free_devs();
+ flush_workqueue(wwan_wq); /* Wait deletion works completion */
+ debugfs_remove(wwan_hwsim_debugfs_topdir);
+ class_destroy(wwan_hwsim_class);
+err_wq_destroy:
+ destroy_workqueue(wwan_wq);
+
+ return err;
+}
+
+static void __exit wwan_hwsim_exit(void)
+{
+ debugfs_remove(wwan_hwsim_debugfs_devcreate); /* Avoid new devs */
+ wwan_hwsim_free_devs();
+ flush_workqueue(wwan_wq); /* Wait deletion works completion */
+ debugfs_remove(wwan_hwsim_debugfs_topdir);
+ class_destroy(wwan_hwsim_class);
+ destroy_workqueue(wwan_wq);
+}
+
+module_init(wwan_hwsim_init);
+module_exit(wwan_hwsim_exit);
+
+MODULE_AUTHOR("Sergey Ryazanov");
+MODULE_DESCRIPTION("Device simulator for WWAN framework");
+MODULE_LICENSE("GPL");