summaryrefslogtreecommitdiffstats
path: root/drivers/net/ipa
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/net/ipa
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/ipa')
-rw-r--r--drivers/net/ipa/Kconfig23
-rw-r--r--drivers/net/ipa/Makefile25
-rw-r--r--drivers/net/ipa/data/ipa_data-v3.1.c538
-rw-r--r--drivers/net/ipa/data/ipa_data-v3.5.1.c423
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.11.c406
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.2.c385
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.5.c462
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.7.c405
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.9.c456
-rw-r--r--drivers/net/ipa/data/ipa_data-v5.0.c481
-rw-r--r--drivers/net/ipa/gsi.c2431
-rw-r--r--drivers/net/ipa/gsi.h281
-rw-r--r--drivers/net/ipa/gsi_private.h130
-rw-r--r--drivers/net/ipa/gsi_reg.c161
-rw-r--r--drivers/net/ipa/gsi_reg.h383
-rw-r--r--drivers/net/ipa/gsi_trans.c790
-rw-r--r--drivers/net/ipa/gsi_trans.h230
-rw-r--r--drivers/net/ipa/ipa.h173
-rw-r--r--drivers/net/ipa/ipa_cmd.c647
-rw-r--r--drivers/net/ipa/ipa_cmd.h175
-rw-r--r--drivers/net/ipa/ipa_data.h254
-rw-r--r--drivers/net/ipa/ipa_endpoint.c2196
-rw-r--r--drivers/net/ipa/ipa_endpoint.h207
-rw-r--r--drivers/net/ipa/ipa_gsi.c55
-rw-r--r--drivers/net/ipa/ipa_gsi.h71
-rw-r--r--drivers/net/ipa/ipa_interrupt.c296
-rw-r--r--drivers/net/ipa/ipa_interrupt.h100
-rw-r--r--drivers/net/ipa/ipa_main.c1017
-rw-r--r--drivers/net/ipa/ipa_mem.c684
-rw-r--r--drivers/net/ipa/ipa_mem.h106
-rw-r--r--drivers/net/ipa/ipa_modem.c478
-rw-r--r--drivers/net/ipa/ipa_modem.h24
-rw-r--r--drivers/net/ipa/ipa_power.c434
-rw-r--r--drivers/net/ipa/ipa_power.h92
-rw-r--r--drivers/net/ipa/ipa_qmi.c537
-rw-r--r--drivers/net/ipa/ipa_qmi.h64
-rw-r--r--drivers/net/ipa/ipa_qmi_msg.c723
-rw-r--r--drivers/net/ipa/ipa_qmi_msg.h261
-rw-r--r--drivers/net/ipa/ipa_reg.c167
-rw-r--r--drivers/net/ipa/ipa_reg.h646
-rw-r--r--drivers/net/ipa/ipa_resource.c178
-rw-r--r--drivers/net/ipa/ipa_resource.h23
-rw-r--r--drivers/net/ipa/ipa_smp2p.c349
-rw-r--r--drivers/net/ipa/ipa_smp2p.h47
-rw-r--r--drivers/net/ipa/ipa_sysfs.c171
-rw-r--r--drivers/net/ipa/ipa_sysfs.h16
-rw-r--r--drivers/net/ipa/ipa_table.c774
-rw-r--r--drivers/net/ipa/ipa_table.h79
-rw-r--r--drivers/net/ipa/ipa_uc.c261
-rw-r--r--drivers/net/ipa/ipa_uc.h54
-rw-r--r--drivers/net/ipa/ipa_version.h73
-rw-r--r--drivers/net/ipa/reg.h134
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v3.1.c291
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v3.5.1.c303
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v4.0.c308
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v4.11.c313
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v4.5.c311
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v4.9.c312
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v5.0.c317
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v3.1.c448
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v3.5.1.c458
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.11.c514
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.2.c458
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.5.c535
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.7.c506
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.9.c511
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v5.0.c564
67 files changed, 25725 insertions, 0 deletions
diff --git a/drivers/net/ipa/Kconfig b/drivers/net/ipa/Kconfig
new file mode 100644
index 0000000000..6782c2cbf5
--- /dev/null
+++ b/drivers/net/ipa/Kconfig
@@ -0,0 +1,23 @@
+config QCOM_IPA
+ tristate "Qualcomm IPA support"
+ depends on NET && QCOM_SMEM
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on INTERCONNECT
+ depends on QCOM_RPROC_COMMON || (QCOM_RPROC_COMMON=n && COMPILE_TEST)
+ depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n
+ select QCOM_MDT_LOADER if ARCH_QCOM
+ select QCOM_SCM
+ select QCOM_QMI_HELPERS
+ help
+ Choose Y or M here to include support for the Qualcomm
+ IP Accelerator (IPA), a hardware block present in some
+ Qualcomm SoCs. The IPA is a programmable protocol processor
+ that is capable of generic hardware handling of IP packets,
+ including routing, filtering, and NAT. Currently the IPA
+ driver supports only basic transport of network traffic
+ between the AP and modem.
+
+ Note that if selected, the selection type must match that
+ of QCOM_Q6V5_COMMON (Y or M).
+
+ If unsure, say N.
diff --git a/drivers/net/ipa/Makefile b/drivers/net/ipa/Makefile
new file mode 100644
index 0000000000..7293d5cc2b
--- /dev/null
+++ b/drivers/net/ipa/Makefile
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Qualcomm IPA driver.
+
+IPA_REG_VERSIONS := 3.1 3.5.1 4.2 4.5 4.7 4.9 4.11 5.0
+
+# Some IPA versions can reuse another set of GSI register definitions.
+GSI_REG_VERSIONS := 3.1 3.5.1 4.0 4.5 4.9 4.11 5.0
+
+IPA_DATA_VERSIONS := 3.1 3.5.1 4.2 4.5 4.7 4.9 4.11 5.0
+
+obj-$(CONFIG_QCOM_IPA) += ipa.o
+
+ipa-y := ipa_main.o ipa_power.o ipa_reg.o ipa_mem.o \
+ ipa_table.o ipa_interrupt.o gsi.o gsi_reg.o \
+ gsi_trans.o ipa_gsi.o ipa_smp2p.o ipa_uc.o \
+ ipa_endpoint.o ipa_cmd.o ipa_modem.o \
+ ipa_resource.o ipa_qmi.o ipa_qmi_msg.o \
+ ipa_sysfs.o
+
+ipa-y += $(IPA_REG_VERSIONS:%=reg/ipa_reg-v%.o)
+
+ipa-y += $(GSI_REG_VERSIONS:%=reg/gsi_reg-v%.o)
+
+ipa-y += $(IPA_DATA_VERSIONS:%=data/ipa_data-v%.o)
diff --git a/drivers/net/ipa/data/ipa_data-v3.1.c b/drivers/net/ipa/data/ipa_data-v3.1.c
new file mode 100644
index 0000000000..3380fb3483
--- /dev/null
+++ b/drivers/net/ipa/data/ipa_data-v3.1.c
@@ -0,0 +1,538 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2021 Linaro Ltd.
+ */
+
+#include <linux/log2.h>
+
+#include "../gsi.h"
+#include "../ipa_data.h"
+#include "../ipa_endpoint.h"
+#include "../ipa_mem.h"
+
+/** enum ipa_resource_type - IPA resource types for an SoC having IPA v3.1 */
+enum ipa_resource_type {
+ /* Source resource types; first must have value 0 */
+ IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS = 0,
+ IPA_RESOURCE_TYPE_SRC_HDR_SECTORS,
+ IPA_RESOURCE_TYPE_SRC_HDRI1_BUFFER,
+ IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS,
+ IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF,
+ IPA_RESOURCE_TYPE_SRC_HDRI2_BUFFERS,
+ IPA_RESOURCE_TYPE_SRC_HPS_DMARS,
+ IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES,
+
+ /* Destination resource types; first must have value 0 */
+ IPA_RESOURCE_TYPE_DST_DATA_SECTORS = 0,
+ IPA_RESOURCE_TYPE_DST_DATA_SECTOR_LISTS,
+ IPA_RESOURCE_TYPE_DST_DPS_DMARS,
+};
+
+/* Resource groups used for an SoC having IPA v3.1 */
+enum ipa_rsrc_group_id {
+ /* Source resource group identifiers */
+ IPA_RSRC_GROUP_SRC_UL = 0,
+ IPA_RSRC_GROUP_SRC_DL,
+ IPA_RSRC_GROUP_SRC_DIAG,
+ IPA_RSRC_GROUP_SRC_DMA,
+ IPA_RSRC_GROUP_SRC_UNUSED,
+ IPA_RSRC_GROUP_SRC_UC_RX_Q,
+ IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */
+
+ /* Destination resource group identifiers */
+ IPA_RSRC_GROUP_DST_UL = 0,
+ IPA_RSRC_GROUP_DST_DL,
+ IPA_RSRC_GROUP_DST_DIAG_DPL,
+ IPA_RSRC_GROUP_DST_DMA,
+ IPA_RSRC_GROUP_DST_Q6ZIP_GENERAL,
+ IPA_RSRC_GROUP_DST_Q6ZIP_ENGINE,
+ IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */
+};
+
+/* QSB configuration data for an SoC having IPA v3.1 */
+static const struct ipa_qsb_data ipa_qsb_data[] = {
+ [IPA_QSB_MASTER_DDR] = {
+ .max_writes = 8,
+ .max_reads = 8,
+ },
+ [IPA_QSB_MASTER_PCIE] = {
+ .max_writes = 2,
+ .max_reads = 8,
+ },
+};
+
+/* Endpoint data for an SoC having IPA v3.1 */
+static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
+ [IPA_ENDPOINT_AP_COMMAND_TX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 6,
+ .endpoint_id = 22,
+ .toward_ipa = true,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 18,
+ },
+ .endpoint = {
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_SRC_UL,
+ .dma_mode = true,
+ .dma_endpoint = IPA_ENDPOINT_AP_LAN_RX,
+ .tx = {
+ .seq_type = IPA_SEQ_DMA,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_LAN_RX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 7,
+ .endpoint_id = 15,
+ .toward_ipa = false,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 8,
+ },
+ .endpoint = {
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_SRC_UL,
+ .aggregation = true,
+ .status_enable = true,
+ .rx = {
+ .buffer_size = 8192,
+ .pad_align = ilog2(sizeof(u32)),
+ .aggr_time_limit = 500,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_MODEM_TX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 5,
+ .endpoint_id = 3,
+ .toward_ipa = true,
+ .channel = {
+ .tre_count = 512,
+ .event_count = 512,
+ .tlv_count = 16,
+ },
+ .endpoint = {
+ .filter_support = true,
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_SRC_UL,
+ .checksum = true,
+ .qmap = true,
+ .status_enable = true,
+ .tx = {
+ .seq_type = IPA_SEQ_2_PASS_SKIP_LAST_UC,
+ .status_endpoint =
+ IPA_ENDPOINT_MODEM_AP_RX,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_MODEM_RX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 8,
+ .endpoint_id = 16,
+ .toward_ipa = false,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 8,
+ },
+ .endpoint = {
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_DST_DL,
+ .checksum = true,
+ .qmap = true,
+ .aggregation = true,
+ .rx = {
+ .buffer_size = 8192,
+ .aggr_time_limit = 500,
+ .aggr_close_eof = true,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_MODEM_LAN_TX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 4,
+ .endpoint_id = 9,
+ .toward_ipa = true,
+ .endpoint = {
+ .filter_support = true,
+ },
+ },
+ [IPA_ENDPOINT_MODEM_AP_TX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 0,
+ .endpoint_id = 5,
+ .toward_ipa = true,
+ .endpoint = {
+ .filter_support = true,
+ },
+ },
+ [IPA_ENDPOINT_MODEM_AP_RX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 5,
+ .endpoint_id = 18,
+ .toward_ipa = false,
+ },
+};
+
+/* Source resource configuration data for an SoC having IPA v3.1 */
+static const struct ipa_resource ipa_resource_src[] = {
+ [IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL] = {
+ .min = 3, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DL] = {
+ .min = 3, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DIAG] = {
+ .min = 1, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DMA] = {
+ .min = 1, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 2, .max = 255,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_HDR_SECTORS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DL] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DIAG] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DMA] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 0, .max = 255,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_HDRI1_BUFFER] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DL] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DIAG] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DMA] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 0, .max = 255,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL] = {
+ .min = 14, .max = 14,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DL] = {
+ .min = 16, .max = 16,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DIAG] = {
+ .min = 5, .max = 5,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DMA] = {
+ .min = 5, .max = 5,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 8, .max = 8,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL] = {
+ .min = 19, .max = 19,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DL] = {
+ .min = 26, .max = 26,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DIAG] = {
+ .min = 5, .max = 5, /* 3 downstream */
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DMA] = {
+ .min = 5, .max = 5, /* 7 downstream */
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 8, .max = 8,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_HDRI2_BUFFERS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DL] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DIAG] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DMA] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 0, .max = 255,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_HPS_DMARS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DL] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DIAG] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DMA] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 0, .max = 255,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL] = {
+ .min = 19, .max = 19,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DL] = {
+ .min = 26, .max = 26,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DIAG] = {
+ .min = 5, .max = 5,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DMA] = {
+ .min = 5, .max = 5,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 8, .max = 8,
+ },
+ },
+};
+
+/* Destination resource configuration data for an SoC having IPA v3.1 */
+static const struct ipa_resource ipa_resource_dst[] = {
+ [IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = {
+ .limits[IPA_RSRC_GROUP_DST_UL] = {
+ .min = 3, .max = 3, /* 2 downstream */
+ },
+ .limits[IPA_RSRC_GROUP_DST_DL] = {
+ .min = 3, .max = 3,
+ },
+ .limits[IPA_RSRC_GROUP_DST_DIAG_DPL] = {
+ .min = 1, .max = 1, /* 0 downstream */
+ },
+ /* IPA_RSRC_GROUP_DST_DMA uses 2 downstream */
+ .limits[IPA_RSRC_GROUP_DST_Q6ZIP_GENERAL] = {
+ .min = 3, .max = 3,
+ },
+ .limits[IPA_RSRC_GROUP_DST_Q6ZIP_ENGINE] = {
+ .min = 3, .max = 3,
+ },
+ },
+ [IPA_RESOURCE_TYPE_DST_DATA_SECTOR_LISTS] = {
+ .limits[IPA_RSRC_GROUP_DST_UL] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_DST_DL] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_DST_DIAG_DPL] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_DST_DMA] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_DST_Q6ZIP_GENERAL] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_DST_Q6ZIP_ENGINE] = {
+ .min = 0, .max = 255,
+ },
+ },
+ [IPA_RESOURCE_TYPE_DST_DPS_DMARS] = {
+ .limits[IPA_RSRC_GROUP_DST_UL] = {
+ .min = 1, .max = 1,
+ },
+ .limits[IPA_RSRC_GROUP_DST_DL] = {
+ .min = 1, .max = 1,
+ },
+ .limits[IPA_RSRC_GROUP_DST_DIAG_DPL] = {
+ .min = 1, .max = 1,
+ },
+ .limits[IPA_RSRC_GROUP_DST_DMA] = {
+ .min = 1, .max = 1,
+ },
+ .limits[IPA_RSRC_GROUP_DST_Q6ZIP_GENERAL] = {
+ .min = 1, .max = 1,
+ },
+ },
+};
+
+/* Resource configuration data for an SoC having IPA v3.1 */
+static const struct ipa_resource_data ipa_resource_data = {
+ .rsrc_group_src_count = IPA_RSRC_GROUP_SRC_COUNT,
+ .rsrc_group_dst_count = IPA_RSRC_GROUP_DST_COUNT,
+ .resource_src_count = ARRAY_SIZE(ipa_resource_src),
+ .resource_src = ipa_resource_src,
+ .resource_dst_count = ARRAY_SIZE(ipa_resource_dst),
+ .resource_dst = ipa_resource_dst,
+};
+
+/* IPA-resident memory region data for an SoC having IPA v3.1 */
+static const struct ipa_mem ipa_mem_local_data[] = {
+ {
+ .id = IPA_MEM_UC_SHARED,
+ .offset = 0x0000,
+ .size = 0x0080,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_UC_INFO,
+ .offset = 0x0080,
+ .size = 0x0200,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_V4_FILTER_HASHED,
+ .offset = 0x0288,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V4_FILTER,
+ .offset = 0x0308,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V6_FILTER_HASHED,
+ .offset = 0x0388,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V6_FILTER,
+ .offset = 0x0408,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V4_ROUTE_HASHED,
+ .offset = 0x0488,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V4_ROUTE,
+ .offset = 0x0508,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V6_ROUTE_HASHED,
+ .offset = 0x0588,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V6_ROUTE,
+ .offset = 0x0608,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_MODEM_HEADER,
+ .offset = 0x0688,
+ .size = 0x0140,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_MODEM_PROC_CTX,
+ .offset = 0x07d0,
+ .size = 0x0200,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_AP_PROC_CTX,
+ .offset = 0x09d0,
+ .size = 0x0200,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_MODEM,
+ .offset = 0x0bd8,
+ .size = 0x1424,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_END_MARKER,
+ .offset = 0x2000,
+ .size = 0,
+ .canary_count = 1,
+ },
+};
+
+/* Memory configuration data for an SoC having IPA v3.1 */
+static const struct ipa_mem_data ipa_mem_data = {
+ .local_count = ARRAY_SIZE(ipa_mem_local_data),
+ .local = ipa_mem_local_data,
+ .imem_addr = 0x146bd000,
+ .imem_size = 0x00002000,
+ .smem_id = 497,
+ .smem_size = 0x00002000,
+};
+
+/* Interconnect bandwidths are in 1000 byte/second units */
+static const struct ipa_interconnect_data ipa_interconnect_data[] = {
+ {
+ .name = "memory",
+ .peak_bandwidth = 640000, /* 640 MBps */
+ .average_bandwidth = 80000, /* 80 MBps */
+ },
+ {
+ .name = "imem",
+ .peak_bandwidth = 640000, /* 640 MBps */
+ .average_bandwidth = 80000, /* 80 MBps */
+ },
+ /* Average bandwidth is unused for the next interconnect */
+ {
+ .name = "config",
+ .peak_bandwidth = 80000, /* 80 MBps */
+ .average_bandwidth = 0, /* unused */
+ },
+};
+
+/* Clock and interconnect configuration data for an SoC having IPA v3.1 */
+static const struct ipa_power_data ipa_power_data = {
+ .core_clock_rate = 16 * 1000 * 1000, /* Hz */
+ .interconnect_count = ARRAY_SIZE(ipa_interconnect_data),
+ .interconnect_data = ipa_interconnect_data,
+};
+
+/* Configuration data for an SoC having IPA v3.1 */
+const struct ipa_data ipa_data_v3_1 = {
+ .version = IPA_VERSION_3_1,
+ .backward_compat = BIT(BCR_CMDQ_L_LACK_ONE_ENTRY),
+ .qsb_count = ARRAY_SIZE(ipa_qsb_data),
+ .qsb_data = ipa_qsb_data,
+ .modem_route_count = 8,
+ .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
+ .endpoint_data = ipa_gsi_endpoint_data,
+ .resource_data = &ipa_resource_data,
+ .mem_data = &ipa_mem_data,
+ .power_data = &ipa_power_data,
+};
diff --git a/drivers/net/ipa/data/ipa_data-v3.5.1.c b/drivers/net/ipa/data/ipa_data-v3.5.1.c
new file mode 100644
index 0000000000..4287114b24
--- /dev/null
+++ b/drivers/net/ipa/data/ipa_data-v3.5.1.c
@@ -0,0 +1,423 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2021 Linaro Ltd.
+ */
+
+#include <linux/log2.h>
+
+#include "../gsi.h"
+#include "../ipa_data.h"
+#include "../ipa_endpoint.h"
+#include "../ipa_mem.h"
+
+/** enum ipa_resource_type - IPA resource types for an SoC having IPA v3.5.1 */
+enum ipa_resource_type {
+ /* Source resource types; first must have value 0 */
+ IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS = 0,
+ IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS,
+ IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF,
+ IPA_RESOURCE_TYPE_SRC_HPS_DMARS,
+ IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES,
+
+ /* Destination resource types; first must have value 0 */
+ IPA_RESOURCE_TYPE_DST_DATA_SECTORS = 0,
+ IPA_RESOURCE_TYPE_DST_DPS_DMARS,
+};
+
+/* Resource groups used for an SoC having IPA v3.5.1 */
+enum ipa_rsrc_group_id {
+ /* Source resource group identifiers */
+ IPA_RSRC_GROUP_SRC_LWA_DL = 0,
+ IPA_RSRC_GROUP_SRC_UL_DL,
+ IPA_RSRC_GROUP_SRC_MHI_DMA,
+ IPA_RSRC_GROUP_SRC_UC_RX_Q,
+ IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */
+
+ /* Destination resource group identifiers */
+ IPA_RSRC_GROUP_DST_LWA_DL = 0,
+ IPA_RSRC_GROUP_DST_UL_DL_DPL,
+ IPA_RSRC_GROUP_DST_UNUSED_2,
+ IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */
+};
+
+/* QSB configuration data for an SoC having IPA v3.5.1 */
+static const struct ipa_qsb_data ipa_qsb_data[] = {
+ [IPA_QSB_MASTER_DDR] = {
+ .max_writes = 8,
+ .max_reads = 8,
+ },
+ [IPA_QSB_MASTER_PCIE] = {
+ .max_writes = 4,
+ .max_reads = 12,
+ },
+};
+
+/* Endpoint datdata for an SoC having IPA v3.5.1 */
+static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
+ [IPA_ENDPOINT_AP_COMMAND_TX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 4,
+ .endpoint_id = 5,
+ .toward_ipa = true,
+ .channel = {
+ .tre_count = 512,
+ .event_count = 256,
+ .tlv_count = 20,
+ },
+ .endpoint = {
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
+ .dma_mode = true,
+ .dma_endpoint = IPA_ENDPOINT_AP_LAN_RX,
+ .tx = {
+ .seq_type = IPA_SEQ_DMA,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_LAN_RX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 5,
+ .endpoint_id = 9,
+ .toward_ipa = false,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 8,
+ },
+ .endpoint = {
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
+ .aggregation = true,
+ .status_enable = true,
+ .rx = {
+ .buffer_size = 8192,
+ .pad_align = ilog2(sizeof(u32)),
+ .aggr_time_limit = 500,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_MODEM_TX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 3,
+ .endpoint_id = 2,
+ .toward_ipa = true,
+ .channel = {
+ .tre_count = 512,
+ .event_count = 512,
+ .tlv_count = 16,
+ },
+ .endpoint = {
+ .filter_support = true,
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
+ .checksum = true,
+ .qmap = true,
+ .status_enable = true,
+ .tx = {
+ .seq_type = IPA_SEQ_2_PASS_SKIP_LAST_UC,
+ .seq_rep_type = IPA_SEQ_REP_DMA_PARSER,
+ .status_endpoint =
+ IPA_ENDPOINT_MODEM_AP_RX,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_MODEM_RX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 6,
+ .endpoint_id = 10,
+ .toward_ipa = false,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 8,
+ },
+ .endpoint = {
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
+ .checksum = true,
+ .qmap = true,
+ .aggregation = true,
+ .rx = {
+ .buffer_size = 8192,
+ .aggr_time_limit = 500,
+ .aggr_close_eof = true,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_MODEM_LAN_TX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 0,
+ .endpoint_id = 3,
+ .toward_ipa = true,
+ .endpoint = {
+ .filter_support = true,
+ },
+ },
+ [IPA_ENDPOINT_MODEM_AP_TX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 4,
+ .endpoint_id = 6,
+ .toward_ipa = true,
+ .endpoint = {
+ .filter_support = true,
+ },
+ },
+ [IPA_ENDPOINT_MODEM_AP_RX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 2,
+ .endpoint_id = 12,
+ .toward_ipa = false,
+ },
+};
+
+/* Source resource configuration data for an SoC having IPA v3.5.1 */
+static const struct ipa_resource ipa_resource_src[] = {
+ [IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = {
+ .limits[IPA_RSRC_GROUP_SRC_LWA_DL] = {
+ .min = 1, .max = 63,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 1, .max = 63,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 1, .max = 63,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS] = {
+ .limits[IPA_RSRC_GROUP_SRC_LWA_DL] = {
+ .min = 10, .max = 10,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 10, .max = 10,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 8, .max = 8,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF] = {
+ .limits[IPA_RSRC_GROUP_SRC_LWA_DL] = {
+ .min = 12, .max = 12,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 14, .max = 14,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 8, .max = 8,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_HPS_DMARS] = {
+ .limits[IPA_RSRC_GROUP_SRC_LWA_DL] = {
+ .min = 0, .max = 63,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 0, .max = 63,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_MHI_DMA] = {
+ .min = 0, .max = 63,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 0, .max = 63,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES] = {
+ .limits[IPA_RSRC_GROUP_SRC_LWA_DL] = {
+ .min = 14, .max = 14,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 20, .max = 20,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 14, .max = 14,
+ },
+ },
+};
+
+/* Destination resource configuration data for an SoC having IPA v3.5.1 */
+static const struct ipa_resource ipa_resource_dst[] = {
+ [IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = {
+ .limits[IPA_RSRC_GROUP_DST_LWA_DL] = {
+ .min = 4, .max = 4,
+ },
+ .limits[1] = {
+ .min = 4, .max = 4,
+ },
+ .limits[IPA_RSRC_GROUP_DST_UNUSED_2] = {
+ .min = 3, .max = 3,
+ }
+ },
+ [IPA_RESOURCE_TYPE_DST_DPS_DMARS] = {
+ .limits[IPA_RSRC_GROUP_DST_LWA_DL] = {
+ .min = 2, .max = 63,
+ },
+ .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
+ .min = 1, .max = 63,
+ },
+ .limits[IPA_RSRC_GROUP_DST_UNUSED_2] = {
+ .min = 1, .max = 2,
+ }
+ },
+};
+
+/* Resource configuration data for an SoC having IPA v3.5.1 */
+static const struct ipa_resource_data ipa_resource_data = {
+ .rsrc_group_src_count = IPA_RSRC_GROUP_SRC_COUNT,
+ .rsrc_group_dst_count = IPA_RSRC_GROUP_DST_COUNT,
+ .resource_src_count = ARRAY_SIZE(ipa_resource_src),
+ .resource_src = ipa_resource_src,
+ .resource_dst_count = ARRAY_SIZE(ipa_resource_dst),
+ .resource_dst = ipa_resource_dst,
+};
+
+/* IPA-resident memory region data for an SoC having IPA v3.5.1 */
+static const struct ipa_mem ipa_mem_local_data[] = {
+ {
+ .id = IPA_MEM_UC_SHARED,
+ .offset = 0x0000,
+ .size = 0x0080,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_UC_INFO,
+ .offset = 0x0080,
+ .size = 0x0200,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_V4_FILTER_HASHED,
+ .offset = 0x0288,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V4_FILTER,
+ .offset = 0x0308,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V6_FILTER_HASHED,
+ .offset = 0x0388,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V6_FILTER,
+ .offset = 0x0408,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V4_ROUTE_HASHED,
+ .offset = 0x0488,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V4_ROUTE,
+ .offset = 0x0508,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V6_ROUTE_HASHED,
+ .offset = 0x0588,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V6_ROUTE,
+ .offset = 0x0608,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_MODEM_HEADER,
+ .offset = 0x0688,
+ .size = 0x0140,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_MODEM_PROC_CTX,
+ .offset = 0x07d0,
+ .size = 0x0200,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_AP_PROC_CTX,
+ .offset = 0x09d0,
+ .size = 0x0200,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_MODEM,
+ .offset = 0x0bd8,
+ .size = 0x1024,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_UC_EVENT_RING,
+ .offset = 0x1c00,
+ .size = 0x0400,
+ .canary_count = 1,
+ },
+};
+
+/* Memory configuration data for an SoC having IPA v3.5.1 */
+static const struct ipa_mem_data ipa_mem_data = {
+ .local_count = ARRAY_SIZE(ipa_mem_local_data),
+ .local = ipa_mem_local_data,
+ .imem_addr = 0x146bd000,
+ .imem_size = 0x00002000,
+ .smem_id = 497,
+ .smem_size = 0x00002000,
+};
+
+/* Interconnect bandwidths are in 1000 byte/second units */
+static const struct ipa_interconnect_data ipa_interconnect_data[] = {
+ {
+ .name = "memory",
+ .peak_bandwidth = 600000, /* 600 MBps */
+ .average_bandwidth = 80000, /* 80 MBps */
+ },
+ /* Average bandwidth is unused for the next two interconnects */
+ {
+ .name = "imem",
+ .peak_bandwidth = 350000, /* 350 MBps */
+ .average_bandwidth = 0, /* unused */
+ },
+ {
+ .name = "config",
+ .peak_bandwidth = 40000, /* 40 MBps */
+ .average_bandwidth = 0, /* unused */
+ },
+};
+
+/* Clock and interconnect configuration data for an SoC having IPA v3.5.1 */
+static const struct ipa_power_data ipa_power_data = {
+ .core_clock_rate = 75 * 1000 * 1000, /* Hz */
+ .interconnect_count = ARRAY_SIZE(ipa_interconnect_data),
+ .interconnect_data = ipa_interconnect_data,
+};
+
+/* Configuration data for an SoC having IPA v3.5.1 */
+const struct ipa_data ipa_data_v3_5_1 = {
+ .version = IPA_VERSION_3_5_1,
+ .backward_compat = BIT(BCR_CMDQ_L_LACK_ONE_ENTRY) |
+ BIT(BCR_TX_NOT_USING_BRESP) |
+ BIT(BCR_SUSPEND_L2_IRQ) |
+ BIT(BCR_HOLB_DROP_L2_IRQ) |
+ BIT(BCR_DUAL_TX),
+ .qsb_count = ARRAY_SIZE(ipa_qsb_data),
+ .qsb_data = ipa_qsb_data,
+ .modem_route_count = 8,
+ .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
+ .endpoint_data = ipa_gsi_endpoint_data,
+ .resource_data = &ipa_resource_data,
+ .mem_data = &ipa_mem_data,
+ .power_data = &ipa_power_data,
+};
diff --git a/drivers/net/ipa/data/ipa_data-v4.11.c b/drivers/net/ipa/data/ipa_data-v4.11.c
new file mode 100644
index 0000000000..1b4b52501e
--- /dev/null
+++ b/drivers/net/ipa/data/ipa_data-v4.11.c
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2021 Linaro Ltd. */
+
+#include <linux/log2.h>
+
+#include "../gsi.h"
+#include "../ipa_data.h"
+#include "../ipa_endpoint.h"
+#include "../ipa_mem.h"
+
+/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.11 */
+enum ipa_resource_type {
+ /* Source resource types; first must have value 0 */
+ IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS = 0,
+ IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS,
+ IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF,
+ IPA_RESOURCE_TYPE_SRC_HPS_DMARS,
+ IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES,
+
+ /* Destination resource types; first must have value 0 */
+ IPA_RESOURCE_TYPE_DST_DATA_SECTORS = 0,
+ IPA_RESOURCE_TYPE_DST_DPS_DMARS,
+};
+
+/* Resource groups used for an SoC having IPA v4.11 */
+enum ipa_rsrc_group_id {
+ /* Source resource group identifiers */
+ IPA_RSRC_GROUP_SRC_UL_DL = 0,
+ IPA_RSRC_GROUP_SRC_UC_RX_Q,
+ IPA_RSRC_GROUP_SRC_UNUSED_2,
+ IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */
+
+ /* Destination resource group identifiers */
+ IPA_RSRC_GROUP_DST_UL_DL_DPL = 0,
+ IPA_RSRC_GROUP_DST_UNUSED_1,
+ IPA_RSRC_GROUP_DST_DRB_IP,
+ IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */
+};
+
+/* QSB configuration data for an SoC having IPA v4.11 */
+static const struct ipa_qsb_data ipa_qsb_data[] = {
+ [IPA_QSB_MASTER_DDR] = {
+ .max_writes = 12,
+ .max_reads = 13,
+ .max_reads_beats = 120,
+ },
+};
+
+/* Endpoint configuration data for an SoC having IPA v4.11 */
+static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
+ [IPA_ENDPOINT_AP_COMMAND_TX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 5,
+ .endpoint_id = 7,
+ .toward_ipa = true,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 20,
+ },
+ .endpoint = {
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
+ .dma_mode = true,
+ .dma_endpoint = IPA_ENDPOINT_AP_LAN_RX,
+ .tx = {
+ .seq_type = IPA_SEQ_DMA,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_LAN_RX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 14,
+ .endpoint_id = 9,
+ .toward_ipa = false,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 9,
+ },
+ .endpoint = {
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
+ .aggregation = true,
+ .status_enable = true,
+ .rx = {
+ .buffer_size = 8192,
+ .pad_align = ilog2(sizeof(u32)),
+ .aggr_time_limit = 500,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_MODEM_TX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 2,
+ .endpoint_id = 2,
+ .toward_ipa = true,
+ .channel = {
+ .tre_count = 512,
+ .event_count = 512,
+ .tlv_count = 16,
+ },
+ .endpoint = {
+ .filter_support = true,
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
+ .checksum = true,
+ .qmap = true,
+ .status_enable = true,
+ .tx = {
+ .seq_type = IPA_SEQ_2_PASS_SKIP_LAST_UC,
+ .status_endpoint =
+ IPA_ENDPOINT_MODEM_AP_RX,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_MODEM_RX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 7,
+ .endpoint_id = 16,
+ .toward_ipa = false,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 9,
+ },
+ .endpoint = {
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
+ .checksum = true,
+ .qmap = true,
+ .aggregation = true,
+ .rx = {
+ .buffer_size = 32768,
+ .aggr_time_limit = 500,
+ .aggr_close_eof = true,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_MODEM_AP_TX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 0,
+ .endpoint_id = 5,
+ .toward_ipa = true,
+ .endpoint = {
+ .filter_support = true,
+ },
+ },
+ [IPA_ENDPOINT_MODEM_AP_RX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 7,
+ .endpoint_id = 14,
+ .toward_ipa = false,
+ },
+ [IPA_ENDPOINT_MODEM_DL_NLO_TX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 2,
+ .endpoint_id = 8,
+ .toward_ipa = true,
+ .endpoint = {
+ .filter_support = true,
+ },
+ },
+};
+
+/* Source resource configuration data for an SoC having IPA v4.11 */
+static const struct ipa_resource ipa_resource_src[] = {
+ [IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 6, .max = 6,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 8, .max = 8,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 18, .max = 18,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_HPS_DMARS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 2, .max = 2,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 15, .max = 15,
+ },
+ },
+};
+
+/* Destination resource configuration data for an SoC having IPA v4.11 */
+static const struct ipa_resource ipa_resource_dst[] = {
+ [IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = {
+ .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
+ .min = 3, .max = 3,
+ },
+ .limits[IPA_RSRC_GROUP_DST_DRB_IP] = {
+ .min = 25, .max = 25,
+ },
+ },
+ [IPA_RESOURCE_TYPE_DST_DPS_DMARS] = {
+ .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
+ .min = 2, .max = 2,
+ },
+ },
+};
+
+/* Resource configuration data for an SoC having IPA v4.11 */
+static const struct ipa_resource_data ipa_resource_data = {
+ .rsrc_group_src_count = IPA_RSRC_GROUP_SRC_COUNT,
+ .rsrc_group_dst_count = IPA_RSRC_GROUP_DST_COUNT,
+ .resource_src_count = ARRAY_SIZE(ipa_resource_src),
+ .resource_src = ipa_resource_src,
+ .resource_dst_count = ARRAY_SIZE(ipa_resource_dst),
+ .resource_dst = ipa_resource_dst,
+};
+
+/* IPA-resident memory region data for an SoC having IPA v4.11 */
+static const struct ipa_mem ipa_mem_local_data[] = {
+ {
+ .id = IPA_MEM_UC_SHARED,
+ .offset = 0x0000,
+ .size = 0x0080,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_UC_INFO,
+ .offset = 0x0080,
+ .size = 0x0200,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_V4_FILTER_HASHED,
+ .offset = 0x0288,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V4_FILTER,
+ .offset = 0x0308,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V6_FILTER_HASHED,
+ .offset = 0x0388,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V6_FILTER,
+ .offset = 0x0408,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V4_ROUTE_HASHED,
+ .offset = 0x0488,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V4_ROUTE,
+ .offset = 0x0508,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V6_ROUTE_HASHED,
+ .offset = 0x0588,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V6_ROUTE,
+ .offset = 0x0608,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_MODEM_HEADER,
+ .offset = 0x0688,
+ .size = 0x0240,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_AP_HEADER,
+ .offset = 0x08c8,
+ .size = 0x0200,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_MODEM_PROC_CTX,
+ .offset = 0x0ad0,
+ .size = 0x0200,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_AP_PROC_CTX,
+ .offset = 0x0cd0,
+ .size = 0x0200,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_NAT_TABLE,
+ .offset = 0x0ee0,
+ .size = 0x0d00,
+ .canary_count = 4,
+ },
+ {
+ .id = IPA_MEM_PDN_CONFIG,
+ .offset = 0x1be8,
+ .size = 0x0050,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_STATS_QUOTA_MODEM,
+ .offset = 0x1c40,
+ .size = 0x0030,
+ .canary_count = 4,
+ },
+ {
+ .id = IPA_MEM_STATS_QUOTA_AP,
+ .offset = 0x1c70,
+ .size = 0x0048,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_STATS_TETHERING,
+ .offset = 0x1cb8,
+ .size = 0x0238,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_STATS_DROP,
+ .offset = 0x1ef0,
+ .size = 0x0020,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_MODEM,
+ .offset = 0x1f18,
+ .size = 0x100c,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_END_MARKER,
+ .offset = 0x3000,
+ .size = 0x0000,
+ .canary_count = 1,
+ },
+};
+
+/* Memory configuration data for an SoC having IPA v4.11 */
+static const struct ipa_mem_data ipa_mem_data = {
+ .local_count = ARRAY_SIZE(ipa_mem_local_data),
+ .local = ipa_mem_local_data,
+ .imem_addr = 0x146a8000,
+ .imem_size = 0x00002000,
+ .smem_id = 497,
+ .smem_size = 0x00009000,
+};
+
+/* Interconnect rates are in 1000 byte/second units */
+static const struct ipa_interconnect_data ipa_interconnect_data[] = {
+ {
+ .name = "memory",
+ .peak_bandwidth = 600000, /* 600 MBps */
+ .average_bandwidth = 150000, /* 150 MBps */
+ },
+ /* Average rate is unused for the next interconnect */
+ {
+ .name = "config",
+ .peak_bandwidth = 74000, /* 74 MBps */
+ .average_bandwidth = 0, /* unused */
+ },
+};
+
+/* Clock and interconnect configuration data for an SoC having IPA v4.11 */
+static const struct ipa_power_data ipa_power_data = {
+ .core_clock_rate = 60 * 1000 * 1000, /* Hz */
+ .interconnect_count = ARRAY_SIZE(ipa_interconnect_data),
+ .interconnect_data = ipa_interconnect_data,
+};
+
+/* Configuration data for an SoC having IPA v4.11 */
+const struct ipa_data ipa_data_v4_11 = {
+ .version = IPA_VERSION_4_11,
+ .qsb_count = ARRAY_SIZE(ipa_qsb_data),
+ .qsb_data = ipa_qsb_data,
+ .modem_route_count = 8,
+ .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
+ .endpoint_data = ipa_gsi_endpoint_data,
+ .resource_data = &ipa_resource_data,
+ .mem_data = &ipa_mem_data,
+ .power_data = &ipa_power_data,
+};
diff --git a/drivers/net/ipa/data/ipa_data-v4.2.c b/drivers/net/ipa/data/ipa_data-v4.2.c
new file mode 100644
index 0000000000..199ed0ed86
--- /dev/null
+++ b/drivers/net/ipa/data/ipa_data-v4.2.c
@@ -0,0 +1,385 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2019-2021 Linaro Ltd. */
+
+#include <linux/log2.h>
+
+#include "../gsi.h"
+#include "../ipa_data.h"
+#include "../ipa_endpoint.h"
+#include "../ipa_mem.h"
+
+/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.2 */
+enum ipa_resource_type {
+ /* Source resource types; first must have value 0 */
+ IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS = 0,
+ IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS,
+ IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF,
+ IPA_RESOURCE_TYPE_SRC_HPS_DMARS,
+ IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES,
+
+ /* Destination resource types; first must have value 0 */
+ IPA_RESOURCE_TYPE_DST_DATA_SECTORS = 0,
+ IPA_RESOURCE_TYPE_DST_DPS_DMARS,
+};
+
+/* Resource groups used for an SoC having IPA v4.2 */
+enum ipa_rsrc_group_id {
+ /* Source resource group identifiers */
+ IPA_RSRC_GROUP_SRC_UL_DL = 0,
+ IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */
+
+ /* Destination resource group identifiers */
+ IPA_RSRC_GROUP_DST_UL_DL_DPL = 0,
+ IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */
+};
+
+/* QSB configuration data for an SoC having IPA v4.2 */
+static const struct ipa_qsb_data ipa_qsb_data[] = {
+ [IPA_QSB_MASTER_DDR] = {
+ .max_writes = 8,
+ .max_reads = 12,
+ /* no outstanding read byte (beat) limit */
+ },
+};
+
+/* Endpoint configuration data for an SoC having IPA v4.2 */
+static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
+ [IPA_ENDPOINT_AP_COMMAND_TX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 1,
+ .endpoint_id = 6,
+ .toward_ipa = true,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 20,
+ },
+ .endpoint = {
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
+ .dma_mode = true,
+ .dma_endpoint = IPA_ENDPOINT_AP_LAN_RX,
+ .tx = {
+ .seq_type = IPA_SEQ_DMA,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_LAN_RX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 2,
+ .endpoint_id = 8,
+ .toward_ipa = false,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 6,
+ },
+ .endpoint = {
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
+ .aggregation = true,
+ .status_enable = true,
+ .rx = {
+ .buffer_size = 8192,
+ .pad_align = ilog2(sizeof(u32)),
+ .aggr_time_limit = 500,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_MODEM_TX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 0,
+ .endpoint_id = 1,
+ .toward_ipa = true,
+ .channel = {
+ .tre_count = 512,
+ .event_count = 512,
+ .tlv_count = 8,
+ },
+ .endpoint = {
+ .filter_support = true,
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
+ .checksum = true,
+ .qmap = true,
+ .status_enable = true,
+ .tx = {
+ .seq_type = IPA_SEQ_1_PASS_SKIP_LAST_UC,
+ .seq_rep_type = IPA_SEQ_REP_DMA_PARSER,
+ .status_endpoint =
+ IPA_ENDPOINT_MODEM_AP_RX,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_MODEM_RX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 3,
+ .endpoint_id = 9,
+ .toward_ipa = false,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 6,
+ },
+ .endpoint = {
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
+ .checksum = true,
+ .qmap = true,
+ .aggregation = true,
+ .rx = {
+ .buffer_size = 8192,
+ .aggr_time_limit = 500,
+ .aggr_close_eof = true,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_MODEM_COMMAND_TX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 1,
+ .endpoint_id = 5,
+ .toward_ipa = true,
+ },
+ [IPA_ENDPOINT_MODEM_LAN_RX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 3,
+ .endpoint_id = 11,
+ .toward_ipa = false,
+ },
+ [IPA_ENDPOINT_MODEM_AP_TX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 0,
+ .endpoint_id = 4,
+ .toward_ipa = true,
+ .endpoint = {
+ .filter_support = true,
+ },
+ },
+ [IPA_ENDPOINT_MODEM_AP_RX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 2,
+ .endpoint_id = 10,
+ .toward_ipa = false,
+ },
+};
+
+/* Source resource configuration data for an SoC having IPA v4.2 */
+static const struct ipa_resource ipa_resource_src[] = {
+ [IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 3, .max = 63,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 3, .max = 3,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 10, .max = 10,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_HPS_DMARS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 1, .max = 1,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 5, .max = 5,
+ },
+ },
+};
+
+/* Destination resource configuration data for an SoC having IPA v4.2 */
+static const struct ipa_resource ipa_resource_dst[] = {
+ [IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = {
+ .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
+ .min = 3, .max = 3,
+ },
+ },
+ [IPA_RESOURCE_TYPE_DST_DPS_DMARS] = {
+ .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
+ .min = 1, .max = 63,
+ },
+ },
+};
+
+/* Resource configuration data for an SoC having IPA v4.2 */
+static const struct ipa_resource_data ipa_resource_data = {
+ .rsrc_group_src_count = IPA_RSRC_GROUP_SRC_COUNT,
+ .rsrc_group_dst_count = IPA_RSRC_GROUP_DST_COUNT,
+ .resource_src_count = ARRAY_SIZE(ipa_resource_src),
+ .resource_src = ipa_resource_src,
+ .resource_dst_count = ARRAY_SIZE(ipa_resource_dst),
+ .resource_dst = ipa_resource_dst,
+};
+
+/* IPA-resident memory region data for an SoC having IPA v4.2 */
+static const struct ipa_mem ipa_mem_local_data[] = {
+ {
+ .id = IPA_MEM_UC_SHARED,
+ .offset = 0x0000,
+ .size = 0x0080,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_UC_INFO,
+ .offset = 0x0080,
+ .size = 0x0200,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_V4_FILTER_HASHED,
+ .offset = 0x0288,
+ .size = 0,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V4_FILTER,
+ .offset = 0x0290,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V6_FILTER_HASHED,
+ .offset = 0x0310,
+ .size = 0,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V6_FILTER,
+ .offset = 0x0318,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V4_ROUTE_HASHED,
+ .offset = 0x0398,
+ .size = 0,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V4_ROUTE,
+ .offset = 0x03a0,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V6_ROUTE_HASHED,
+ .offset = 0x0420,
+ .size = 0,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V6_ROUTE,
+ .offset = 0x0428,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_MODEM_HEADER,
+ .offset = 0x04a8,
+ .size = 0x0140,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_MODEM_PROC_CTX,
+ .offset = 0x05f0,
+ .size = 0x0200,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_AP_PROC_CTX,
+ .offset = 0x07f0,
+ .size = 0x0200,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_PDN_CONFIG,
+ .offset = 0x09f8,
+ .size = 0x0050,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_STATS_QUOTA_MODEM,
+ .offset = 0x0a50,
+ .size = 0x0060,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_STATS_TETHERING,
+ .offset = 0x0ab0,
+ .size = 0x0140,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_MODEM,
+ .offset = 0x0bf0,
+ .size = 0x140c,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_END_MARKER,
+ .offset = 0x2000,
+ .size = 0,
+ .canary_count = 1,
+ },
+};
+
+/* Memory configuration data for an SoC having IPA v4.2 */
+static const struct ipa_mem_data ipa_mem_data = {
+ .local_count = ARRAY_SIZE(ipa_mem_local_data),
+ .local = ipa_mem_local_data,
+ .imem_addr = 0x146a8000,
+ .imem_size = 0x00002000,
+ .smem_id = 497,
+ .smem_size = 0x00002000,
+};
+
+/* Interconnect rates are in 1000 byte/second units */
+static const struct ipa_interconnect_data ipa_interconnect_data[] = {
+ {
+ .name = "memory",
+ .peak_bandwidth = 465000, /* 465 MBps */
+ .average_bandwidth = 80000, /* 80 MBps */
+ },
+ /* Average bandwidth is unused for the next two interconnects */
+ {
+ .name = "imem",
+ .peak_bandwidth = 68570, /* 68.570 MBps */
+ .average_bandwidth = 0, /* unused */
+ },
+ {
+ .name = "config",
+ .peak_bandwidth = 30000, /* 30 MBps */
+ .average_bandwidth = 0, /* unused */
+ },
+};
+
+/* Clock and interconnect configuration data for an SoC having IPA v4.2 */
+static const struct ipa_power_data ipa_power_data = {
+ .core_clock_rate = 100 * 1000 * 1000, /* Hz */
+ .interconnect_count = ARRAY_SIZE(ipa_interconnect_data),
+ .interconnect_data = ipa_interconnect_data,
+};
+
+/* Configuration data for an SoC having IPA v4.2 */
+const struct ipa_data ipa_data_v4_2 = {
+ .version = IPA_VERSION_4_2,
+ /* backward_compat value is 0 */
+ .qsb_count = ARRAY_SIZE(ipa_qsb_data),
+ .qsb_data = ipa_qsb_data,
+ .modem_route_count = 8,
+ .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
+ .endpoint_data = ipa_gsi_endpoint_data,
+ .resource_data = &ipa_resource_data,
+ .mem_data = &ipa_mem_data,
+ .power_data = &ipa_power_data,
+};
diff --git a/drivers/net/ipa/data/ipa_data-v4.5.c b/drivers/net/ipa/data/ipa_data-v4.5.c
new file mode 100644
index 0000000000..19b549f299
--- /dev/null
+++ b/drivers/net/ipa/data/ipa_data-v4.5.c
@@ -0,0 +1,462 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2021 Linaro Ltd. */
+
+#include <linux/log2.h>
+
+#include "../gsi.h"
+#include "../ipa_data.h"
+#include "../ipa_endpoint.h"
+#include "../ipa_mem.h"
+
+/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.5 */
+enum ipa_resource_type {
+ /* Source resource types; first must have value 0 */
+ IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS = 0,
+ IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS,
+ IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF,
+ IPA_RESOURCE_TYPE_SRC_HPS_DMARS,
+ IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES,
+
+ /* Destination resource types; first must have value 0 */
+ IPA_RESOURCE_TYPE_DST_DATA_SECTORS = 0,
+ IPA_RESOURCE_TYPE_DST_DPS_DMARS,
+};
+
+/* Resource groups used for an SoC having IPA v4.5 */
+enum ipa_rsrc_group_id {
+ /* Source resource group identifiers */
+ IPA_RSRC_GROUP_SRC_UNUSED_0 = 0,
+ IPA_RSRC_GROUP_SRC_UL_DL,
+ IPA_RSRC_GROUP_SRC_UNUSED_2,
+ IPA_RSRC_GROUP_SRC_UNUSED_3,
+ IPA_RSRC_GROUP_SRC_UC_RX_Q,
+ IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */
+
+ /* Destination resource group identifiers */
+ IPA_RSRC_GROUP_DST_UNUSED_0 = 0,
+ IPA_RSRC_GROUP_DST_UL_DL_DPL,
+ IPA_RSRC_GROUP_DST_UNUSED_2,
+ IPA_RSRC_GROUP_DST_UNUSED_3,
+ IPA_RSRC_GROUP_DST_UC,
+ IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */
+};
+
+/* QSB configuration data for an SoC having IPA v4.5 */
+static const struct ipa_qsb_data ipa_qsb_data[] = {
+ [IPA_QSB_MASTER_DDR] = {
+ .max_writes = 8,
+ .max_reads = 0, /* no limit (hardware max) */
+ .max_reads_beats = 120,
+ },
+ [IPA_QSB_MASTER_PCIE] = {
+ .max_writes = 8,
+ .max_reads = 12,
+ /* no outstanding read byte (beat) limit */
+ },
+};
+
+/* Endpoint configuration data for an SoC having IPA v4.5 */
+static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
+ [IPA_ENDPOINT_AP_COMMAND_TX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 9,
+ .endpoint_id = 7,
+ .toward_ipa = true,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 20,
+ },
+ .endpoint = {
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
+ .dma_mode = true,
+ .dma_endpoint = IPA_ENDPOINT_AP_LAN_RX,
+ .tx = {
+ .seq_type = IPA_SEQ_DMA,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_LAN_RX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 10,
+ .endpoint_id = 16,
+ .toward_ipa = false,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 9,
+ },
+ .endpoint = {
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
+ .aggregation = true,
+ .status_enable = true,
+ .rx = {
+ .buffer_size = 8192,
+ .pad_align = ilog2(sizeof(u32)),
+ .aggr_time_limit = 500,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_MODEM_TX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 7,
+ .endpoint_id = 2,
+ .toward_ipa = true,
+ .channel = {
+ .tre_count = 512,
+ .event_count = 512,
+ .tlv_count = 16,
+ },
+ .endpoint = {
+ .filter_support = true,
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
+ .checksum = true,
+ .qmap = true,
+ .status_enable = true,
+ .tx = {
+ .seq_type = IPA_SEQ_2_PASS_SKIP_LAST_UC,
+ .status_endpoint =
+ IPA_ENDPOINT_MODEM_AP_RX,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_MODEM_RX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 1,
+ .endpoint_id = 14,
+ .toward_ipa = false,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 9,
+ },
+ .endpoint = {
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
+ .checksum = true,
+ .qmap = true,
+ .aggregation = true,
+ .rx = {
+ .buffer_size = 8192,
+ .aggr_time_limit = 500,
+ .aggr_close_eof = true,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_MODEM_AP_TX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 0,
+ .endpoint_id = 5,
+ .toward_ipa = true,
+ .endpoint = {
+ .filter_support = true,
+ },
+ },
+ [IPA_ENDPOINT_MODEM_AP_RX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 7,
+ .endpoint_id = 21,
+ .toward_ipa = false,
+ },
+ [IPA_ENDPOINT_MODEM_DL_NLO_TX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 2,
+ .endpoint_id = 8,
+ .toward_ipa = true,
+ .endpoint = {
+ .filter_support = true,
+ },
+ },
+};
+
+/* Source resource configuration data for an SoC having IPA v4.5 */
+static const struct ipa_resource ipa_resource_src[] = {
+ [IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 1, .max = 11,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 1, .max = 63,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 14, .max = 14,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 3, .max = 3,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 18, .max = 18,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 8, .max = 8,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_HPS_DMARS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UNUSED_0] = {
+ .min = 0, .max = 63,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 0, .max = 63,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UNUSED_2] = {
+ .min = 0, .max = 63,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UNUSED_3] = {
+ .min = 0, .max = 63,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 0, .max = 63,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 24, .max = 24,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 8, .max = 8,
+ },
+ },
+};
+
+/* Destination resource configuration data for an SoC having IPA v4.5 */
+static const struct ipa_resource ipa_resource_dst[] = {
+ [IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = {
+ .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
+ .min = 16, .max = 16,
+ },
+ .limits[IPA_RSRC_GROUP_DST_UNUSED_2] = {
+ .min = 2, .max = 2,
+ },
+ .limits[IPA_RSRC_GROUP_DST_UNUSED_3] = {
+ .min = 2, .max = 2,
+ },
+ },
+ [IPA_RESOURCE_TYPE_DST_DPS_DMARS] = {
+ .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
+ .min = 2, .max = 63,
+ },
+ .limits[IPA_RSRC_GROUP_DST_UNUSED_2] = {
+ .min = 1, .max = 2,
+ },
+ .limits[IPA_RSRC_GROUP_DST_UNUSED_3] = {
+ .min = 1, .max = 2,
+ },
+ .limits[IPA_RSRC_GROUP_DST_UC] = {
+ .min = 0, .max = 2,
+ },
+ },
+};
+
+/* Resource configuration data for an SoC having IPA v4.5 */
+static const struct ipa_resource_data ipa_resource_data = {
+ .rsrc_group_src_count = IPA_RSRC_GROUP_SRC_COUNT,
+ .rsrc_group_dst_count = IPA_RSRC_GROUP_DST_COUNT,
+ .resource_src_count = ARRAY_SIZE(ipa_resource_src),
+ .resource_src = ipa_resource_src,
+ .resource_dst_count = ARRAY_SIZE(ipa_resource_dst),
+ .resource_dst = ipa_resource_dst,
+};
+
+/* IPA-resident memory region data for an SoC having IPA v4.5 */
+static const struct ipa_mem ipa_mem_local_data[] = {
+ {
+ .id = IPA_MEM_UC_SHARED,
+ .offset = 0x0000,
+ .size = 0x0080,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_UC_INFO,
+ .offset = 0x0080,
+ .size = 0x0200,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_V4_FILTER_HASHED,
+ .offset = 0x0288,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V4_FILTER,
+ .offset = 0x0308,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V6_FILTER_HASHED,
+ .offset = 0x0388,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V6_FILTER,
+ .offset = 0x0408,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V4_ROUTE_HASHED,
+ .offset = 0x0488,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V4_ROUTE,
+ .offset = 0x0508,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V6_ROUTE_HASHED,
+ .offset = 0x0588,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V6_ROUTE,
+ .offset = 0x0608,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_MODEM_HEADER,
+ .offset = 0x0688,
+ .size = 0x0240,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_AP_HEADER,
+ .offset = 0x08c8,
+ .size = 0x0200,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_MODEM_PROC_CTX,
+ .offset = 0x0ad0,
+ .size = 0x0b20,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_AP_PROC_CTX,
+ .offset = 0x15f0,
+ .size = 0x0200,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_NAT_TABLE,
+ .offset = 0x1800,
+ .size = 0x0d00,
+ .canary_count = 4,
+ },
+ {
+ .id = IPA_MEM_STATS_QUOTA_MODEM,
+ .offset = 0x2510,
+ .size = 0x0030,
+ .canary_count = 4,
+ },
+ {
+ .id = IPA_MEM_STATS_QUOTA_AP,
+ .offset = 0x2540,
+ .size = 0x0048,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_STATS_TETHERING,
+ .offset = 0x2588,
+ .size = 0x0238,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_STATS_FILTER_ROUTE,
+ .offset = 0x27c0,
+ .size = 0x0800,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_STATS_DROP,
+ .offset = 0x2fc0,
+ .size = 0x0020,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_MODEM,
+ .offset = 0x2fe8,
+ .size = 0x0800,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_UC_EVENT_RING,
+ .offset = 0x3800,
+ .size = 0x1000,
+ .canary_count = 1,
+ },
+ {
+ .id = IPA_MEM_PDN_CONFIG,
+ .offset = 0x4800,
+ .size = 0x0050,
+ .canary_count = 0,
+ },
+};
+
+/* Memory configuration data for an SoC having IPA v4.5 */
+static const struct ipa_mem_data ipa_mem_data = {
+ .local_count = ARRAY_SIZE(ipa_mem_local_data),
+ .local = ipa_mem_local_data,
+ .imem_addr = 0x14688000,
+ .imem_size = 0x00003000,
+ .smem_id = 497,
+ .smem_size = 0x00009000,
+};
+
+/* Interconnect rates are in 1000 byte/second units */
+static const struct ipa_interconnect_data ipa_interconnect_data[] = {
+ {
+ .name = "memory",
+ .peak_bandwidth = 600000, /* 600 MBps */
+ .average_bandwidth = 150000, /* 150 MBps */
+ },
+ /* Average rate is unused for the next two interconnects */
+ {
+ .name = "imem",
+ .peak_bandwidth = 450000, /* 450 MBps */
+ .average_bandwidth = 75000, /* 75 MBps (unused?) */
+ },
+ {
+ .name = "config",
+ .peak_bandwidth = 171400, /* 171.4 MBps */
+ .average_bandwidth = 0, /* unused */
+ },
+};
+
+/* Clock and interconnect configuration data for an SoC having IPA v4.5 */
+static const struct ipa_power_data ipa_power_data = {
+ .core_clock_rate = 150 * 1000 * 1000, /* Hz (150? 60?) */
+ .interconnect_count = ARRAY_SIZE(ipa_interconnect_data),
+ .interconnect_data = ipa_interconnect_data,
+};
+
+/* Configuration data for an SoC having IPA v4.5 */
+const struct ipa_data ipa_data_v4_5 = {
+ .version = IPA_VERSION_4_5,
+ .qsb_count = ARRAY_SIZE(ipa_qsb_data),
+ .qsb_data = ipa_qsb_data,
+ .modem_route_count = 8,
+ .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
+ .endpoint_data = ipa_gsi_endpoint_data,
+ .resource_data = &ipa_resource_data,
+ .mem_data = &ipa_mem_data,
+ .power_data = &ipa_power_data,
+};
diff --git a/drivers/net/ipa/data/ipa_data-v4.7.c b/drivers/net/ipa/data/ipa_data-v4.7.c
new file mode 100644
index 0000000000..b83390c486
--- /dev/null
+++ b/drivers/net/ipa/data/ipa_data-v4.7.c
@@ -0,0 +1,405 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2022 Linaro Ltd. */
+
+#include <linux/log2.h>
+
+#include "../gsi.h"
+#include "../ipa_data.h"
+#include "../ipa_endpoint.h"
+#include "../ipa_mem.h"
+
+/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.7 */
+enum ipa_resource_type {
+ /* Source resource types; first must have value 0 */
+ IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS = 0,
+ IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS,
+ IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF,
+ IPA_RESOURCE_TYPE_SRC_HPS_DMARS,
+ IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES,
+
+ /* Destination resource types; first must have value 0 */
+ IPA_RESOURCE_TYPE_DST_DATA_SECTORS = 0,
+ IPA_RESOURCE_TYPE_DST_DPS_DMARS,
+};
+
+/* Resource groups used for an SoC having IPA v4.7 */
+enum ipa_rsrc_group_id {
+ /* Source resource group identifiers */
+ IPA_RSRC_GROUP_SRC_UL_DL = 0,
+ IPA_RSRC_GROUP_SRC_UC_RX_Q,
+ IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */
+
+ /* Destination resource group identifiers */
+ IPA_RSRC_GROUP_DST_UL_DL_DPL = 0,
+ IPA_RSRC_GROUP_DST_UNUSED_1,
+ IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */
+};
+
+/* QSB configuration data for an SoC having IPA v4.7 */
+static const struct ipa_qsb_data ipa_qsb_data[] = {
+ [IPA_QSB_MASTER_DDR] = {
+ .max_writes = 8,
+ .max_reads = 0, /* no limit (hardware max) */
+ .max_reads_beats = 120,
+ },
+};
+
+/* Endpoint configuration data for an SoC having IPA v4.7 */
+static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
+ [IPA_ENDPOINT_AP_COMMAND_TX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 5,
+ .endpoint_id = 7,
+ .toward_ipa = true,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 20,
+ },
+ .endpoint = {
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
+ .dma_mode = true,
+ .dma_endpoint = IPA_ENDPOINT_AP_LAN_RX,
+ .tx = {
+ .seq_type = IPA_SEQ_DMA,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_LAN_RX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 14,
+ .endpoint_id = 9,
+ .toward_ipa = false,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 9,
+ },
+ .endpoint = {
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
+ .aggregation = true,
+ .status_enable = true,
+ .rx = {
+ .buffer_size = 8192,
+ .pad_align = ilog2(sizeof(u32)),
+ .aggr_time_limit = 500,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_MODEM_TX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 2,
+ .endpoint_id = 2,
+ .toward_ipa = true,
+ .channel = {
+ .tre_count = 512,
+ .event_count = 512,
+ .tlv_count = 16,
+ },
+ .endpoint = {
+ .filter_support = true,
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
+ .qmap = true,
+ .status_enable = true,
+ .tx = {
+ .seq_type = IPA_SEQ_2_PASS_SKIP_LAST_UC,
+ .status_endpoint =
+ IPA_ENDPOINT_MODEM_AP_RX,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_MODEM_RX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 7,
+ .endpoint_id = 16,
+ .toward_ipa = false,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 9,
+ },
+ .endpoint = {
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
+ .qmap = true,
+ .aggregation = true,
+ .rx = {
+ .buffer_size = 8192,
+ .aggr_time_limit = 500,
+ .aggr_close_eof = true,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_MODEM_AP_TX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 0,
+ .endpoint_id = 5,
+ .toward_ipa = true,
+ .endpoint = {
+ .filter_support = true,
+ },
+ },
+ [IPA_ENDPOINT_MODEM_AP_RX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 7,
+ .endpoint_id = 14,
+ .toward_ipa = false,
+ },
+ [IPA_ENDPOINT_MODEM_DL_NLO_TX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 2,
+ .endpoint_id = 8,
+ .toward_ipa = true,
+ .endpoint = {
+ .filter_support = true,
+ },
+ },
+};
+
+/* Source resource configuration data for an SoC having IPA v4.7 */
+static const struct ipa_resource ipa_resource_src[] = {
+ [IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 8, .max = 8,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 8, .max = 8,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 18, .max = 18,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_HPS_DMARS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 2, .max = 2,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 15, .max = 15,
+ },
+ },
+};
+
+/* Destination resource configuration data for an SoC having IPA v4.7 */
+static const struct ipa_resource ipa_resource_dst[] = {
+ [IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = {
+ .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
+ .min = 7, .max = 7,
+ },
+ },
+ [IPA_RESOURCE_TYPE_DST_DPS_DMARS] = {
+ .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
+ .min = 2, .max = 2,
+ },
+ },
+};
+
+/* Resource configuration data for an SoC having IPA v4.7 */
+static const struct ipa_resource_data ipa_resource_data = {
+ .rsrc_group_dst_count = IPA_RSRC_GROUP_DST_COUNT,
+ .rsrc_group_src_count = IPA_RSRC_GROUP_SRC_COUNT,
+ .resource_src_count = ARRAY_SIZE(ipa_resource_src),
+ .resource_src = ipa_resource_src,
+ .resource_dst_count = ARRAY_SIZE(ipa_resource_dst),
+ .resource_dst = ipa_resource_dst,
+};
+
+/* IPA-resident memory region data for an SoC having IPA v4.7 */
+static const struct ipa_mem ipa_mem_local_data[] = {
+ {
+ .id = IPA_MEM_UC_SHARED,
+ .offset = 0x0000,
+ .size = 0x0080,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_UC_INFO,
+ .offset = 0x0080,
+ .size = 0x0200,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_V4_FILTER_HASHED,
+ .offset = 0x0288,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V4_FILTER,
+ .offset = 0x0308,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V6_FILTER_HASHED,
+ .offset = 0x0388,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V6_FILTER,
+ .offset = 0x0408,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V4_ROUTE_HASHED,
+ .offset = 0x0488,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V4_ROUTE,
+ .offset = 0x0508,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V6_ROUTE_HASHED,
+ .offset = 0x0588,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V6_ROUTE,
+ .offset = 0x0608,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_MODEM_HEADER,
+ .offset = 0x0688,
+ .size = 0x0240,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_AP_HEADER,
+ .offset = 0x08c8,
+ .size = 0x0200,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_MODEM_PROC_CTX,
+ .offset = 0x0ad0,
+ .size = 0x0200,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_AP_PROC_CTX,
+ .offset = 0x0cd0,
+ .size = 0x0200,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_NAT_TABLE,
+ .offset = 0x0ee0,
+ .size = 0x0d00,
+ .canary_count = 4,
+ },
+ {
+ .id = IPA_MEM_PDN_CONFIG,
+ .offset = 0x1be8,
+ .size = 0x0050,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_STATS_QUOTA_MODEM,
+ .offset = 0x1c40,
+ .size = 0x0030,
+ .canary_count = 4,
+ },
+ {
+ .id = IPA_MEM_STATS_QUOTA_AP,
+ .offset = 0x1c70,
+ .size = 0x0048,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_STATS_TETHERING,
+ .offset = 0x1cb8,
+ .size = 0x0238,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_STATS_DROP,
+ .offset = 0x1ef0,
+ .size = 0x0020,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_MODEM,
+ .offset = 0x1f18,
+ .size = 0x100c,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_END_MARKER,
+ .offset = 0x3000,
+ .size = 0x0000,
+ .canary_count = 1,
+ },
+};
+
+/* Memory configuration data for an SoC having IPA v4.7 */
+static const struct ipa_mem_data ipa_mem_data = {
+ .local_count = ARRAY_SIZE(ipa_mem_local_data),
+ .local = ipa_mem_local_data,
+ .imem_addr = 0x146a8000,
+ .imem_size = 0x00002000,
+ .smem_id = 497,
+ .smem_size = 0x00009000,
+};
+
+/* Interconnect rates are in 1000 byte/second units */
+static const struct ipa_interconnect_data ipa_interconnect_data[] = {
+ {
+ .name = "memory",
+ .peak_bandwidth = 600000, /* 600 MBps */
+ .average_bandwidth = 150000, /* 150 MBps */
+ },
+ /* Average rate is unused for the next two interconnects */
+ {
+ .name = "imem",
+ .peak_bandwidth = 450000, /* 450 MBps */
+ .average_bandwidth = 75000, /* 75 MBps (unused?) */
+ },
+ {
+ .name = "config",
+ .peak_bandwidth = 171400, /* 171.4 MBps */
+ .average_bandwidth = 0, /* unused */
+ },
+};
+
+/* Clock and interconnect configuration data for an SoC having IPA v4.7 */
+static const struct ipa_power_data ipa_power_data = {
+ /* XXX Downstream code says 150 MHz (DT SVS2), 60 MHz (code) */
+ .core_clock_rate = 100 * 1000 * 1000, /* Hz (150? 60?) */
+ .interconnect_count = ARRAY_SIZE(ipa_interconnect_data),
+ .interconnect_data = ipa_interconnect_data,
+};
+
+/* Configuration data for an SoC having IPA v4.7 */
+const struct ipa_data ipa_data_v4_7 = {
+ .version = IPA_VERSION_4_7,
+ .qsb_count = ARRAY_SIZE(ipa_qsb_data),
+ .qsb_data = ipa_qsb_data,
+ .modem_route_count = 8,
+ .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
+ .endpoint_data = ipa_gsi_endpoint_data,
+ .resource_data = &ipa_resource_data,
+ .mem_data = &ipa_mem_data,
+ .power_data = &ipa_power_data,
+};
diff --git a/drivers/net/ipa/data/ipa_data-v4.9.c b/drivers/net/ipa/data/ipa_data-v4.9.c
new file mode 100644
index 0000000000..d30fc1fe6c
--- /dev/null
+++ b/drivers/net/ipa/data/ipa_data-v4.9.c
@@ -0,0 +1,456 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2021 Linaro Ltd. */
+
+#include <linux/log2.h>
+
+#include "../gsi.h"
+#include "../ipa_data.h"
+#include "../ipa_endpoint.h"
+#include "../ipa_mem.h"
+
+/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.9 */
+enum ipa_resource_type {
+ /* Source resource types; first must have value 0 */
+ IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS = 0,
+ IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS,
+ IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF,
+ IPA_RESOURCE_TYPE_SRC_HPS_DMARS,
+ IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES,
+
+ /* Destination resource types; first must have value 0 */
+ IPA_RESOURCE_TYPE_DST_DATA_SECTORS = 0,
+ IPA_RESOURCE_TYPE_DST_DPS_DMARS,
+};
+
+/* Resource groups used for an SoC having IPA v4.9 */
+enum ipa_rsrc_group_id {
+ /* Source resource group identifiers */
+ IPA_RSRC_GROUP_SRC_UL_DL = 0,
+ IPA_RSRC_GROUP_SRC_DMA,
+ IPA_RSRC_GROUP_SRC_UC_RX_Q,
+ IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */
+
+ /* Destination resource group identifiers */
+ IPA_RSRC_GROUP_DST_UL_DL_DPL = 0,
+ IPA_RSRC_GROUP_DST_DMA,
+ IPA_RSRC_GROUP_DST_UC,
+ IPA_RSRC_GROUP_DST_DRB_IP,
+ IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */
+};
+
+/* QSB configuration data for an SoC having IPA v4.9 */
+static const struct ipa_qsb_data ipa_qsb_data[] = {
+ [IPA_QSB_MASTER_DDR] = {
+ .max_writes = 8,
+ .max_reads = 0, /* no limit (hardware max) */
+ .max_reads_beats = 120,
+ },
+};
+
+/* Endpoint configuration data for an SoC having IPA v4.9 */
+static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
+ [IPA_ENDPOINT_AP_COMMAND_TX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 6,
+ .endpoint_id = 7,
+ .toward_ipa = true,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 20,
+ },
+ .endpoint = {
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
+ .dma_mode = true,
+ .dma_endpoint = IPA_ENDPOINT_AP_LAN_RX,
+ .tx = {
+ .seq_type = IPA_SEQ_DMA,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_LAN_RX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 7,
+ .endpoint_id = 11,
+ .toward_ipa = false,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 9,
+ },
+ .endpoint = {
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
+ .aggregation = true,
+ .status_enable = true,
+ .rx = {
+ .buffer_size = 8192,
+ .pad_align = ilog2(sizeof(u32)),
+ .aggr_time_limit = 500,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_MODEM_TX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 2,
+ .endpoint_id = 2,
+ .toward_ipa = true,
+ .channel = {
+ .tre_count = 512,
+ .event_count = 512,
+ .tlv_count = 16,
+ },
+ .endpoint = {
+ .filter_support = true,
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
+ .checksum = true,
+ .qmap = true,
+ .status_enable = true,
+ .tx = {
+ .seq_type = IPA_SEQ_2_PASS_SKIP_LAST_UC,
+ .status_endpoint =
+ IPA_ENDPOINT_MODEM_AP_RX,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_MODEM_RX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 12,
+ .endpoint_id = 20,
+ .toward_ipa = false,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 9,
+ },
+ .endpoint = {
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
+ .checksum = true,
+ .qmap = true,
+ .aggregation = true,
+ .rx = {
+ .buffer_size = 8192,
+ .aggr_time_limit = 500,
+ .aggr_close_eof = true,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_MODEM_AP_TX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 0,
+ .endpoint_id = 5,
+ .toward_ipa = true,
+ .endpoint = {
+ .filter_support = true,
+ },
+ },
+ [IPA_ENDPOINT_MODEM_AP_RX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 7,
+ .endpoint_id = 16,
+ .toward_ipa = false,
+ },
+ [IPA_ENDPOINT_MODEM_DL_NLO_TX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 2,
+ .endpoint_id = 8,
+ .toward_ipa = true,
+ .endpoint = {
+ .filter_support = true,
+ },
+ },
+};
+
+/* Source resource configuration data for an SoC having IPA v4.9 */
+static const struct ipa_resource ipa_resource_src[] = {
+ [IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 1, .max = 12,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DMA] = {
+ .min = 1, .max = 1,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 1, .max = 12,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 20, .max = 20,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DMA] = {
+ .min = 2, .max = 2,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 3, .max = 3,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 38, .max = 38,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DMA] = {
+ .min = 4, .max = 4,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 8, .max = 8,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_HPS_DMARS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 0, .max = 4,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DMA] = {
+ .min = 0, .max = 4,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 0, .max = 4,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 30, .max = 30,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DMA] = {
+ .min = 8, .max = 8,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 8, .max = 8,
+ },
+ },
+};
+
+/* Destination resource configuration data for an SoC having IPA v4.9 */
+static const struct ipa_resource ipa_resource_dst[] = {
+ [IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = {
+ .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
+ .min = 9, .max = 9,
+ },
+ .limits[IPA_RSRC_GROUP_DST_DMA] = {
+ .min = 1, .max = 1,
+ },
+ .limits[IPA_RSRC_GROUP_DST_UC] = {
+ .min = 1, .max = 1,
+ },
+ .limits[IPA_RSRC_GROUP_DST_DRB_IP] = {
+ .min = 39, .max = 39,
+ },
+ },
+ [IPA_RESOURCE_TYPE_DST_DPS_DMARS] = {
+ .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
+ .min = 2, .max = 3,
+ },
+ .limits[IPA_RSRC_GROUP_DST_DMA] = {
+ .min = 1, .max = 2,
+ },
+ .limits[IPA_RSRC_GROUP_DST_UC] = {
+ .min = 0, .max = 2,
+ },
+ },
+};
+
+/* Resource configuration data for an SoC having IPA v4.9 */
+static const struct ipa_resource_data ipa_resource_data = {
+ .rsrc_group_dst_count = IPA_RSRC_GROUP_DST_COUNT,
+ .rsrc_group_src_count = IPA_RSRC_GROUP_SRC_COUNT,
+ .resource_src_count = ARRAY_SIZE(ipa_resource_src),
+ .resource_src = ipa_resource_src,
+ .resource_dst_count = ARRAY_SIZE(ipa_resource_dst),
+ .resource_dst = ipa_resource_dst,
+};
+
+/* IPA-resident memory region data for an SoC having IPA v4.9 */
+static const struct ipa_mem ipa_mem_local_data[] = {
+ {
+ .id = IPA_MEM_UC_SHARED,
+ .offset = 0x0000,
+ .size = 0x0080,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_UC_INFO,
+ .offset = 0x0080,
+ .size = 0x0200,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_V4_FILTER_HASHED,
+ .offset = 0x0288,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V4_FILTER,
+ .offset = 0x0308,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V6_FILTER_HASHED,
+ .offset = 0x0388,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V6_FILTER,
+ .offset = 0x0408,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V4_ROUTE_HASHED,
+ .offset = 0x0488,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V4_ROUTE,
+ .offset = 0x0508,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V6_ROUTE_HASHED,
+ .offset = 0x0588,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V6_ROUTE,
+ .offset = 0x0608,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_MODEM_HEADER,
+ .offset = 0x0688,
+ .size = 0x0240,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_AP_HEADER,
+ .offset = 0x08c8,
+ .size = 0x0200,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_MODEM_PROC_CTX,
+ .offset = 0x0ad0,
+ .size = 0x0b20,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_AP_PROC_CTX,
+ .offset = 0x15f0,
+ .size = 0x0200,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_NAT_TABLE,
+ .offset = 0x1800,
+ .size = 0x0d00,
+ .canary_count = 4,
+ },
+ {
+ .id = IPA_MEM_STATS_QUOTA_MODEM,
+ .offset = 0x2510,
+ .size = 0x0030,
+ .canary_count = 4,
+ },
+ {
+ .id = IPA_MEM_STATS_QUOTA_AP,
+ .offset = 0x2540,
+ .size = 0x0048,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_STATS_TETHERING,
+ .offset = 0x2588,
+ .size = 0x0238,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_STATS_FILTER_ROUTE,
+ .offset = 0x27c0,
+ .size = 0x0800,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_STATS_DROP,
+ .offset = 0x2fc0,
+ .size = 0x0020,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_MODEM,
+ .offset = 0x2fe8,
+ .size = 0x0800,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_UC_EVENT_RING,
+ .offset = 0x3800,
+ .size = 0x1000,
+ .canary_count = 1,
+ },
+ {
+ .id = IPA_MEM_PDN_CONFIG,
+ .offset = 0x4800,
+ .size = 0x0050,
+ .canary_count = 0,
+ },
+};
+
+/* Memory configuration data for an SoC having IPA v4.9 */
+static const struct ipa_mem_data ipa_mem_data = {
+ .local_count = ARRAY_SIZE(ipa_mem_local_data),
+ .local = ipa_mem_local_data,
+ .imem_addr = 0x146bd000,
+ .imem_size = 0x00002000,
+ .smem_id = 497,
+ .smem_size = 0x00009000,
+};
+
+/* Interconnect rates are in 1000 byte/second units */
+static const struct ipa_interconnect_data ipa_interconnect_data[] = {
+ {
+ .name = "memory",
+ .peak_bandwidth = 600000, /* 600 MBps */
+ .average_bandwidth = 150000, /* 150 MBps */
+ },
+ /* Average rate is unused for the next interconnect */
+ {
+ .name = "config",
+ .peak_bandwidth = 74000, /* 74 MBps */
+ .average_bandwidth = 0, /* unused */
+ },
+
+};
+
+/* Clock and interconnect configuration data for an SoC having IPA v4.9 */
+static const struct ipa_power_data ipa_power_data = {
+ .core_clock_rate = 60 * 1000 * 1000, /* Hz */
+ .interconnect_count = ARRAY_SIZE(ipa_interconnect_data),
+ .interconnect_data = ipa_interconnect_data,
+};
+
+/* Configuration data for an SoC having IPA v4.9. */
+const struct ipa_data ipa_data_v4_9 = {
+ .version = IPA_VERSION_4_9,
+ .qsb_count = ARRAY_SIZE(ipa_qsb_data),
+ .qsb_data = ipa_qsb_data,
+ .modem_route_count = 8,
+ .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
+ .endpoint_data = ipa_gsi_endpoint_data,
+ .resource_data = &ipa_resource_data,
+ .mem_data = &ipa_mem_data,
+ .power_data = &ipa_power_data,
+};
diff --git a/drivers/net/ipa/data/ipa_data-v5.0.c b/drivers/net/ipa/data/ipa_data-v5.0.c
new file mode 100644
index 0000000000..4d8171dae4
--- /dev/null
+++ b/drivers/net/ipa/data/ipa_data-v5.0.c
@@ -0,0 +1,481 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2023 Linaro Ltd. */
+
+#include <linux/log2.h>
+
+#include "../gsi.h"
+#include "../ipa_data.h"
+#include "../ipa_endpoint.h"
+#include "../ipa_mem.h"
+
+/** enum ipa_resource_type - IPA resource types for an SoC having IPA v5.0 */
+enum ipa_resource_type {
+ /* Source resource types; first must have value 0 */
+ IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS = 0,
+ IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS,
+ IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF,
+ IPA_RESOURCE_TYPE_SRC_HPS_DMARS,
+ IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES,
+
+ /* Destination resource types; first must have value 0 */
+ IPA_RESOURCE_TYPE_DST_DATA_SECTORS = 0,
+ IPA_RESOURCE_TYPE_DST_DPS_DMARS,
+ IPA_RESOURCE_TYPE_DST_ULSO_SEGMENTS,
+};
+
+/* Resource groups used for an SoC having IPA v5.0 */
+enum ipa_rsrc_group_id {
+ /* Source resource group identifiers */
+ IPA_RSRC_GROUP_SRC_UL = 0,
+ IPA_RSRC_GROUP_SRC_DL,
+ IPA_RSRC_GROUP_SRC_UNUSED_2,
+ IPA_RSRC_GROUP_SRC_UNUSED_3,
+ IPA_RSRC_GROUP_SRC_URLLC,
+ IPA_RSRC_GROUP_SRC_U_RX_QC,
+ IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */
+
+ /* Destination resource group identifiers */
+ IPA_RSRC_GROUP_DST_UL = 0,
+ IPA_RSRC_GROUP_DST_DL,
+ IPA_RSRC_GROUP_DST_DMA,
+ IPA_RSRC_GROUP_DST_QDSS,
+ IPA_RSRC_GROUP_DST_CV2X,
+ IPA_RSRC_GROUP_DST_UC,
+ IPA_RSRC_GROUP_DST_DRB_IP,
+ IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */
+};
+
+/* QSB configuration data for an SoC having IPA v5.0 */
+static const struct ipa_qsb_data ipa_qsb_data[] = {
+ [IPA_QSB_MASTER_DDR] = {
+ .max_writes = 0,
+ .max_reads = 0, /* no limit (hardware max) */
+ .max_reads_beats = 0,
+ },
+ [IPA_QSB_MASTER_PCIE] = {
+ .max_writes = 0,
+ .max_reads = 0, /* no limit (hardware max) */
+ .max_reads_beats = 0,
+ },
+};
+
+/* Endpoint configuration data for an SoC having IPA v5.0 */
+static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
+ [IPA_ENDPOINT_AP_COMMAND_TX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 12,
+ .endpoint_id = 14,
+ .toward_ipa = true,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 20,
+ },
+ .endpoint = {
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_SRC_UL,
+ .dma_mode = true,
+ .dma_endpoint = IPA_ENDPOINT_AP_LAN_RX,
+ .tx = {
+ .seq_type = IPA_SEQ_DMA,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_LAN_RX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 13,
+ .endpoint_id = 16,
+ .toward_ipa = false,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 9,
+ },
+ .endpoint = {
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_DST_UL,
+ .aggregation = true,
+ .status_enable = true,
+ .rx = {
+ .buffer_size = 8192,
+ .pad_align = ilog2(sizeof(u32)),
+ .aggr_time_limit = 500,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_MODEM_TX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 11,
+ .endpoint_id = 2,
+ .toward_ipa = true,
+ .channel = {
+ .tre_count = 512,
+ .event_count = 512,
+ .tlv_count = 25,
+ },
+ .endpoint = {
+ .filter_support = true,
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_SRC_UL,
+ .checksum = true,
+ .qmap = true,
+ .status_enable = true,
+ .tx = {
+ .seq_type = IPA_SEQ_2_PASS_SKIP_LAST_UC,
+ .status_endpoint =
+ IPA_ENDPOINT_MODEM_AP_RX,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_MODEM_RX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 1,
+ .endpoint_id = 23,
+ .toward_ipa = false,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 9,
+ },
+ .endpoint = {
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_DST_DL,
+ .checksum = true,
+ .qmap = true,
+ .aggregation = true,
+ .rx = {
+ .buffer_size = 8192,
+ .aggr_time_limit = 500,
+ .aggr_close_eof = true,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_MODEM_AP_TX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 0,
+ .endpoint_id = 12,
+ .toward_ipa = true,
+ .endpoint = {
+ .filter_support = true,
+ },
+ },
+ [IPA_ENDPOINT_MODEM_AP_RX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 7,
+ .endpoint_id = 21,
+ .toward_ipa = false,
+ },
+ [IPA_ENDPOINT_MODEM_DL_NLO_TX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 2,
+ .endpoint_id = 15,
+ .toward_ipa = true,
+ .endpoint = {
+ .filter_support = true,
+ },
+ },
+};
+
+/* Source resource configuration data for an SoC having IPA v5.0 */
+static const struct ipa_resource ipa_resource_src[] = {
+ [IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL] = {
+ .min = 3, .max = 9,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DL] = {
+ .min = 4, .max = 10,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_URLLC] = {
+ .min = 1, .max = 63,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_U_RX_QC] = {
+ .min = 0, .max = 63,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL] = {
+ .min = 9, .max = 9,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DL] = {
+ .min = 12, .max = 12,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_URLLC] = {
+ .min = 10, .max = 10,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL] = {
+ .min = 9, .max = 9,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DL] = {
+ .min = 24, .max = 24,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_URLLC] = {
+ .min = 20, .max = 20,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_HPS_DMARS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL] = {
+ .min = 0, .max = 63,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DL] = {
+ .min = 0, .max = 63,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_URLLC] = {
+ .min = 1, .max = 63,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_U_RX_QC] = {
+ .min = 0, .max = 63,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL] = {
+ .min = 22, .max = 22,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DL] = {
+ .min = 16, .max = 16,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_URLLC] = {
+ .min = 16, .max = 16,
+ },
+ },
+};
+
+/* Destination resource configuration data for an SoC having IPA v5.0 */
+static const struct ipa_resource ipa_resource_dst[] = {
+ [IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = {
+ .limits[IPA_RSRC_GROUP_DST_UL] = {
+ .min = 6, .max = 6,
+ },
+ .limits[IPA_RSRC_GROUP_DST_DL] = {
+ .min = 5, .max = 5,
+ },
+ .limits[IPA_RSRC_GROUP_DST_DRB_IP] = {
+ .min = 39, .max = 39,
+ },
+ },
+ [IPA_RESOURCE_TYPE_DST_DPS_DMARS] = {
+ .limits[IPA_RSRC_GROUP_DST_UL] = {
+ .min = 0, .max = 3,
+ },
+ .limits[IPA_RSRC_GROUP_DST_DL] = {
+ .min = 0, .max = 3,
+ },
+ },
+ [IPA_RESOURCE_TYPE_DST_ULSO_SEGMENTS] = {
+ .limits[IPA_RSRC_GROUP_DST_UL] = {
+ .min = 0, .max = 63,
+ },
+ .limits[IPA_RSRC_GROUP_DST_DL] = {
+ .min = 0, .max = 63,
+ },
+ },
+};
+
+/* Resource configuration data for an SoC having IPA v5.0 */
+static const struct ipa_resource_data ipa_resource_data = {
+ .rsrc_group_dst_count = IPA_RSRC_GROUP_DST_COUNT,
+ .rsrc_group_src_count = IPA_RSRC_GROUP_SRC_COUNT,
+ .resource_src_count = ARRAY_SIZE(ipa_resource_src),
+ .resource_src = ipa_resource_src,
+ .resource_dst_count = ARRAY_SIZE(ipa_resource_dst),
+ .resource_dst = ipa_resource_dst,
+};
+
+/* IPA-resident memory region data for an SoC having IPA v5.0 */
+static const struct ipa_mem ipa_mem_local_data[] = {
+ {
+ .id = IPA_MEM_UC_EVENT_RING,
+ .offset = 0x0000,
+ .size = 0x1000,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_UC_SHARED,
+ .offset = 0x1000,
+ .size = 0x0080,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_UC_INFO,
+ .offset = 0x1080,
+ .size = 0x0200,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_V4_FILTER_HASHED,
+ .offset = 0x1288,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V4_FILTER,
+ .offset = 0x1308,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V6_FILTER_HASHED,
+ .offset = 0x1388,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V6_FILTER,
+ .offset = 0x1408,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V4_ROUTE_HASHED,
+ .offset = 0x1488,
+ .size = 0x0098,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V4_ROUTE,
+ .offset = 0x1528,
+ .size = 0x0098,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V6_ROUTE_HASHED,
+ .offset = 0x15c8,
+ .size = 0x0098,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V6_ROUTE,
+ .offset = 0x1668,
+ .size = 0x0098,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_MODEM_HEADER,
+ .offset = 0x1708,
+ .size = 0x0240,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_AP_HEADER,
+ .offset = 0x1948,
+ .size = 0x01e0,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_MODEM_PROC_CTX,
+ .offset = 0x1b40,
+ .size = 0x0b20,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_AP_PROC_CTX,
+ .offset = 0x2660,
+ .size = 0x0200,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_STATS_QUOTA_MODEM,
+ .offset = 0x2868,
+ .size = 0x0060,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_STATS_QUOTA_AP,
+ .offset = 0x28c8,
+ .size = 0x0048,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_AP_V4_FILTER,
+ .offset = 0x2918,
+ .size = 0x0118,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_AP_V6_FILTER,
+ .offset = 0x2aa0,
+ .size = 0x0228,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_STATS_FILTER_ROUTE,
+ .offset = 0x2cd0,
+ .size = 0x0ba0,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_STATS_DROP,
+ .offset = 0x3870,
+ .size = 0x0020,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_MODEM,
+ .offset = 0x3898,
+ .size = 0x0d48,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_NAT_TABLE,
+ .offset = 0x45e0,
+ .size = 0x0900,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_PDN_CONFIG,
+ .offset = 0x4ee8,
+ .size = 0x0100,
+ .canary_count = 2,
+ },
+};
+
+/* Memory configuration data for an SoC having IPA v5.0 */
+static const struct ipa_mem_data ipa_mem_data = {
+ .local_count = ARRAY_SIZE(ipa_mem_local_data),
+ .local = ipa_mem_local_data,
+ .imem_addr = 0x14688000,
+ .imem_size = 0x00003000,
+ .smem_id = 497,
+ .smem_size = 0x00009000,
+};
+
+/* Interconnect rates are in 1000 byte/second units */
+static const struct ipa_interconnect_data ipa_interconnect_data[] = {
+ {
+ .name = "memory",
+ .peak_bandwidth = 1900000, /* 1.9 GBps */
+ .average_bandwidth = 600000, /* 600 MBps */
+ },
+ /* Average rate is unused for the next interconnect */
+ {
+ .name = "config",
+ .peak_bandwidth = 76800, /* 76.8 MBps */
+ .average_bandwidth = 0, /* unused */
+ },
+};
+
+/* Clock and interconnect configuration data for an SoC having IPA v5.0 */
+static const struct ipa_power_data ipa_power_data = {
+ .core_clock_rate = 120 * 1000 * 1000, /* Hz */
+ .interconnect_count = ARRAY_SIZE(ipa_interconnect_data),
+ .interconnect_data = ipa_interconnect_data,
+};
+
+/* Configuration data for an SoC having IPA v5.0. */
+const struct ipa_data ipa_data_v5_0 = {
+ .version = IPA_VERSION_5_0,
+ .qsb_count = ARRAY_SIZE(ipa_qsb_data),
+ .qsb_data = ipa_qsb_data,
+ .modem_route_count = 11,
+ .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
+ .endpoint_data = ipa_gsi_endpoint_data,
+ .resource_data = &ipa_resource_data,
+ .mem_data = &ipa_mem_data,
+ .power_data = &ipa_power_data,
+};
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
new file mode 100644
index 0000000000..9a0b1fe4a9
--- /dev/null
+++ b/drivers/net/ipa/gsi.c
@@ -0,0 +1,2431 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2023 Linaro Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/mutex.h>
+#include <linux/completion.h>
+#include <linux/io.h>
+#include <linux/bug.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+
+#include "gsi.h"
+#include "reg.h"
+#include "gsi_reg.h"
+#include "gsi_private.h"
+#include "gsi_trans.h"
+#include "ipa_gsi.h"
+#include "ipa_data.h"
+#include "ipa_version.h"
+
+/**
+ * DOC: The IPA Generic Software Interface
+ *
+ * The generic software interface (GSI) is an integral component of the IPA,
+ * providing a well-defined communication layer between the AP subsystem
+ * and the IPA core. The modem uses the GSI layer as well.
+ *
+ * -------- ---------
+ * | | | |
+ * | AP +<---. .----+ Modem |
+ * | +--. | | .->+ |
+ * | | | | | | | |
+ * -------- | | | | ---------
+ * v | v |
+ * --+-+---+-+--
+ * | GSI |
+ * |-----------|
+ * | |
+ * | IPA |
+ * | |
+ * -------------
+ *
+ * In the above diagram, the AP and Modem represent "execution environments"
+ * (EEs), which are independent operating environments that use the IPA for
+ * data transfer.
+ *
+ * Each EE uses a set of unidirectional GSI "channels," which allow transfer
+ * of data to or from the IPA. A channel is implemented as a ring buffer,
+ * with a DRAM-resident array of "transfer elements" (TREs) available to
+ * describe transfers to or from other EEs through the IPA. A transfer
+ * element can also contain an immediate command, requesting the IPA perform
+ * actions other than data transfer.
+ *
+ * Each TRE refers to a block of data--also located in DRAM. After writing
+ * one or more TREs to a channel, the writer (either the IPA or an EE) writes
+ * a doorbell register to inform the receiving side how many elements have
+ * been written.
+ *
+ * Each channel has a GSI "event ring" associated with it. An event ring
+ * is implemented very much like a channel ring, but is always directed from
+ * the IPA to an EE. The IPA notifies an EE (such as the AP) about channel
+ * events by adding an entry to the event ring associated with the channel.
+ * The GSI then writes its doorbell for the event ring, causing the target
+ * EE to be interrupted. Each entry in an event ring contains a pointer
+ * to the channel TRE whose completion the event represents.
+ *
+ * Each TRE in a channel ring has a set of flags. One flag indicates whether
+ * the completion of the transfer operation generates an entry (and possibly
+ * an interrupt) in the channel's event ring. Other flags allow transfer
+ * elements to be chained together, forming a single logical transaction.
+ * TRE flags are used to control whether and when interrupts are generated
+ * to signal completion of channel transfers.
+ *
+ * Elements in channel and event rings are completed (or consumed) strictly
+ * in order. Completion of one entry implies the completion of all preceding
+ * entries. A single completion interrupt can therefore communicate the
+ * completion of many transfers.
+ *
+ * Note that all GSI registers are little-endian, which is the assumed
+ * endianness of I/O space accesses. The accessor functions perform byte
+ * swapping if needed (i.e., for a big endian CPU).
+ */
+
+/* Delay period for interrupt moderation (in 32KHz IPA internal timer ticks) */
+#define GSI_EVT_RING_INT_MODT (32 * 1) /* 1ms under 32KHz clock */
+
+#define GSI_CMD_TIMEOUT 50 /* milliseconds */
+
+#define GSI_CHANNEL_STOP_RETRIES 10
+#define GSI_CHANNEL_MODEM_HALT_RETRIES 10
+#define GSI_CHANNEL_MODEM_FLOW_RETRIES 5 /* disable flow control only */
+
+#define GSI_MHI_EVENT_ID_START 10 /* 1st reserved event id */
+#define GSI_MHI_EVENT_ID_END 16 /* Last reserved event id */
+
+#define GSI_ISR_MAX_ITER 50 /* Detect interrupt storms */
+
+/* An entry in an event ring */
+struct gsi_event {
+ __le64 xfer_ptr;
+ __le16 len;
+ u8 reserved1;
+ u8 code;
+ __le16 reserved2;
+ u8 type;
+ u8 chid;
+};
+
+/** gsi_channel_scratch_gpi - GPI protocol scratch register
+ * @max_outstanding_tre:
+ * Defines the maximum number of TREs allowed in a single transaction
+ * on a channel (in bytes). This determines the amount of prefetch
+ * performed by the hardware. We configure this to equal the size of
+ * the TLV FIFO for the channel.
+ * @outstanding_threshold:
+ * Defines the threshold (in bytes) determining when the sequencer
+ * should update the channel doorbell. We configure this to equal
+ * the size of two TREs.
+ */
+struct gsi_channel_scratch_gpi {
+ u64 reserved1;
+ u16 reserved2;
+ u16 max_outstanding_tre;
+ u16 reserved3;
+ u16 outstanding_threshold;
+};
+
+/** gsi_channel_scratch - channel scratch configuration area
+ *
+ * The exact interpretation of this register is protocol-specific.
+ * We only use GPI channels; see struct gsi_channel_scratch_gpi, above.
+ */
+union gsi_channel_scratch {
+ struct gsi_channel_scratch_gpi gpi;
+ struct {
+ u32 word1;
+ u32 word2;
+ u32 word3;
+ u32 word4;
+ } data;
+};
+
+/* Check things that can be validated at build time. */
+static void gsi_validate_build(void)
+{
+ /* This is used as a divisor */
+ BUILD_BUG_ON(!GSI_RING_ELEMENT_SIZE);
+
+ /* Code assumes the size of channel and event ring element are
+ * the same (and fixed). Make sure the size of an event ring
+ * element is what's expected.
+ */
+ BUILD_BUG_ON(sizeof(struct gsi_event) != GSI_RING_ELEMENT_SIZE);
+
+ /* Hardware requires a 2^n ring size. We ensure the number of
+ * elements in an event ring is a power of 2 elsewhere; this
+ * ensure the elements themselves meet the requirement.
+ */
+ BUILD_BUG_ON(!is_power_of_2(GSI_RING_ELEMENT_SIZE));
+}
+
+/* Return the channel id associated with a given channel */
+static u32 gsi_channel_id(struct gsi_channel *channel)
+{
+ return channel - &channel->gsi->channel[0];
+}
+
+/* An initialized channel has a non-null GSI pointer */
+static bool gsi_channel_initialized(struct gsi_channel *channel)
+{
+ return !!channel->gsi;
+}
+
+/* Encode the channel protocol for the CH_C_CNTXT_0 register */
+static u32 ch_c_cntxt_0_type_encode(enum ipa_version version,
+ const struct reg *reg,
+ enum gsi_channel_type type)
+{
+ u32 val;
+
+ val = reg_encode(reg, CHTYPE_PROTOCOL, type);
+ if (version < IPA_VERSION_4_5 || version >= IPA_VERSION_5_0)
+ return val;
+
+ type >>= hweight32(reg_fmask(reg, CHTYPE_PROTOCOL));
+
+ return val | reg_encode(reg, CHTYPE_PROTOCOL_MSB, type);
+}
+
+/* Update the GSI IRQ type register with the cached value */
+static void gsi_irq_type_update(struct gsi *gsi, u32 val)
+{
+ const struct reg *reg = gsi_reg(gsi, CNTXT_TYPE_IRQ_MSK);
+
+ gsi->type_enabled_bitmap = val;
+ iowrite32(val, gsi->virt + reg_offset(reg));
+}
+
+static void gsi_irq_type_enable(struct gsi *gsi, enum gsi_irq_type_id type_id)
+{
+ gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | type_id);
+}
+
+static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id)
+{
+ gsi_irq_type_update(gsi, gsi->type_enabled_bitmap & ~type_id);
+}
+
+/* Event ring commands are performed one at a time. Their completion
+ * is signaled by the event ring control GSI interrupt type, which is
+ * only enabled when we issue an event ring command. Only the event
+ * ring being operated on has this interrupt enabled.
+ */
+static void gsi_irq_ev_ctrl_enable(struct gsi *gsi, u32 evt_ring_id)
+{
+ u32 val = BIT(evt_ring_id);
+ const struct reg *reg;
+
+ /* There's a small chance that a previous command completed
+ * after the interrupt was disabled, so make sure we have no
+ * pending interrupts before we enable them.
+ */
+ reg = gsi_reg(gsi, CNTXT_SRC_EV_CH_IRQ_CLR);
+ iowrite32(~0, gsi->virt + reg_offset(reg));
+
+ reg = gsi_reg(gsi, CNTXT_SRC_EV_CH_IRQ_MSK);
+ iowrite32(val, gsi->virt + reg_offset(reg));
+ gsi_irq_type_enable(gsi, GSI_EV_CTRL);
+}
+
+/* Disable event ring control interrupts */
+static void gsi_irq_ev_ctrl_disable(struct gsi *gsi)
+{
+ const struct reg *reg;
+
+ gsi_irq_type_disable(gsi, GSI_EV_CTRL);
+
+ reg = gsi_reg(gsi, CNTXT_SRC_EV_CH_IRQ_MSK);
+ iowrite32(0, gsi->virt + reg_offset(reg));
+}
+
+/* Channel commands are performed one at a time. Their completion is
+ * signaled by the channel control GSI interrupt type, which is only
+ * enabled when we issue a channel command. Only the channel being
+ * operated on has this interrupt enabled.
+ */
+static void gsi_irq_ch_ctrl_enable(struct gsi *gsi, u32 channel_id)
+{
+ u32 val = BIT(channel_id);
+ const struct reg *reg;
+
+ /* There's a small chance that a previous command completed
+ * after the interrupt was disabled, so make sure we have no
+ * pending interrupts before we enable them.
+ */
+ reg = gsi_reg(gsi, CNTXT_SRC_CH_IRQ_CLR);
+ iowrite32(~0, gsi->virt + reg_offset(reg));
+
+ reg = gsi_reg(gsi, CNTXT_SRC_CH_IRQ_MSK);
+ iowrite32(val, gsi->virt + reg_offset(reg));
+
+ gsi_irq_type_enable(gsi, GSI_CH_CTRL);
+}
+
+/* Disable channel control interrupts */
+static void gsi_irq_ch_ctrl_disable(struct gsi *gsi)
+{
+ const struct reg *reg;
+
+ gsi_irq_type_disable(gsi, GSI_CH_CTRL);
+
+ reg = gsi_reg(gsi, CNTXT_SRC_CH_IRQ_MSK);
+ iowrite32(0, gsi->virt + reg_offset(reg));
+}
+
+static void gsi_irq_ieob_enable_one(struct gsi *gsi, u32 evt_ring_id)
+{
+ bool enable_ieob = !gsi->ieob_enabled_bitmap;
+ const struct reg *reg;
+ u32 val;
+
+ gsi->ieob_enabled_bitmap |= BIT(evt_ring_id);
+
+ reg = gsi_reg(gsi, CNTXT_SRC_IEOB_IRQ_MSK);
+ val = gsi->ieob_enabled_bitmap;
+ iowrite32(val, gsi->virt + reg_offset(reg));
+
+ /* Enable the interrupt type if this is the first channel enabled */
+ if (enable_ieob)
+ gsi_irq_type_enable(gsi, GSI_IEOB);
+}
+
+static void gsi_irq_ieob_disable(struct gsi *gsi, u32 event_mask)
+{
+ const struct reg *reg;
+ u32 val;
+
+ gsi->ieob_enabled_bitmap &= ~event_mask;
+
+ /* Disable the interrupt type if this was the last enabled channel */
+ if (!gsi->ieob_enabled_bitmap)
+ gsi_irq_type_disable(gsi, GSI_IEOB);
+
+ reg = gsi_reg(gsi, CNTXT_SRC_IEOB_IRQ_MSK);
+ val = gsi->ieob_enabled_bitmap;
+ iowrite32(val, gsi->virt + reg_offset(reg));
+}
+
+static void gsi_irq_ieob_disable_one(struct gsi *gsi, u32 evt_ring_id)
+{
+ gsi_irq_ieob_disable(gsi, BIT(evt_ring_id));
+}
+
+/* Enable all GSI_interrupt types */
+static void gsi_irq_enable(struct gsi *gsi)
+{
+ const struct reg *reg;
+ u32 val;
+
+ /* Global interrupts include hardware error reports. Enable
+ * that so we can at least report the error should it occur.
+ */
+ reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_EN);
+ iowrite32(ERROR_INT, gsi->virt + reg_offset(reg));
+
+ gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | GSI_GLOB_EE);
+
+ /* General GSI interrupts are reported to all EEs; if they occur
+ * they are unrecoverable (without reset). A breakpoint interrupt
+ * also exists, but we don't support that. We want to be notified
+ * of errors so we can report them, even if they can't be handled.
+ */
+ reg = gsi_reg(gsi, CNTXT_GSI_IRQ_EN);
+ val = BUS_ERROR;
+ val |= CMD_FIFO_OVRFLOW;
+ val |= MCS_STACK_OVRFLOW;
+ iowrite32(val, gsi->virt + reg_offset(reg));
+
+ gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | GSI_GENERAL);
+}
+
+/* Disable all GSI interrupt types */
+static void gsi_irq_disable(struct gsi *gsi)
+{
+ const struct reg *reg;
+
+ gsi_irq_type_update(gsi, 0);
+
+ /* Clear the type-specific interrupt masks set by gsi_irq_enable() */
+ reg = gsi_reg(gsi, CNTXT_GSI_IRQ_EN);
+ iowrite32(0, gsi->virt + reg_offset(reg));
+
+ reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_EN);
+ iowrite32(0, gsi->virt + reg_offset(reg));
+}
+
+/* Return the virtual address associated with a ring index */
+void *gsi_ring_virt(struct gsi_ring *ring, u32 index)
+{
+ /* Note: index *must* be used modulo the ring count here */
+ return ring->virt + (index % ring->count) * GSI_RING_ELEMENT_SIZE;
+}
+
+/* Return the 32-bit DMA address associated with a ring index */
+static u32 gsi_ring_addr(struct gsi_ring *ring, u32 index)
+{
+ return lower_32_bits(ring->addr) + index * GSI_RING_ELEMENT_SIZE;
+}
+
+/* Return the ring index of a 32-bit ring offset */
+static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset)
+{
+ return (offset - gsi_ring_addr(ring, 0)) / GSI_RING_ELEMENT_SIZE;
+}
+
+/* Issue a GSI command by writing a value to a register, then wait for
+ * completion to be signaled. Returns true if the command completes
+ * or false if it times out.
+ */
+static bool gsi_command(struct gsi *gsi, u32 reg, u32 val)
+{
+ unsigned long timeout = msecs_to_jiffies(GSI_CMD_TIMEOUT);
+ struct completion *completion = &gsi->completion;
+
+ reinit_completion(completion);
+
+ iowrite32(val, gsi->virt + reg);
+
+ return !!wait_for_completion_timeout(completion, timeout);
+}
+
+/* Return the hardware's notion of the current state of an event ring */
+static enum gsi_evt_ring_state
+gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id)
+{
+ const struct reg *reg = gsi_reg(gsi, EV_CH_E_CNTXT_0);
+ u32 val;
+
+ val = ioread32(gsi->virt + reg_n_offset(reg, evt_ring_id));
+
+ return reg_decode(reg, EV_CHSTATE, val);
+}
+
+/* Issue an event ring command and wait for it to complete */
+static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
+ enum gsi_evt_cmd_opcode opcode)
+{
+ struct device *dev = gsi->dev;
+ const struct reg *reg;
+ bool timeout;
+ u32 val;
+
+ /* Enable the completion interrupt for the command */
+ gsi_irq_ev_ctrl_enable(gsi, evt_ring_id);
+
+ reg = gsi_reg(gsi, EV_CH_CMD);
+ val = reg_encode(reg, EV_CHID, evt_ring_id);
+ val |= reg_encode(reg, EV_OPCODE, opcode);
+
+ timeout = !gsi_command(gsi, reg_offset(reg), val);
+
+ gsi_irq_ev_ctrl_disable(gsi);
+
+ if (!timeout)
+ return;
+
+ dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n",
+ opcode, evt_ring_id, gsi_evt_ring_state(gsi, evt_ring_id));
+}
+
+/* Allocate an event ring in NOT_ALLOCATED state */
+static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
+{
+ enum gsi_evt_ring_state state;
+
+ /* Get initial event ring state */
+ state = gsi_evt_ring_state(gsi, evt_ring_id);
+ if (state != GSI_EVT_RING_STATE_NOT_ALLOCATED) {
+ dev_err(gsi->dev, "event ring %u bad state %u before alloc\n",
+ evt_ring_id, state);
+ return -EINVAL;
+ }
+
+ gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
+
+ /* If successful the event ring state will have changed */
+ state = gsi_evt_ring_state(gsi, evt_ring_id);
+ if (state == GSI_EVT_RING_STATE_ALLOCATED)
+ return 0;
+
+ dev_err(gsi->dev, "event ring %u bad state %u after alloc\n",
+ evt_ring_id, state);
+
+ return -EIO;
+}
+
+/* Reset a GSI event ring in ALLOCATED or ERROR state. */
+static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
+{
+ enum gsi_evt_ring_state state;
+
+ state = gsi_evt_ring_state(gsi, evt_ring_id);
+ if (state != GSI_EVT_RING_STATE_ALLOCATED &&
+ state != GSI_EVT_RING_STATE_ERROR) {
+ dev_err(gsi->dev, "event ring %u bad state %u before reset\n",
+ evt_ring_id, state);
+ return;
+ }
+
+ gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
+
+ /* If successful the event ring state will have changed */
+ state = gsi_evt_ring_state(gsi, evt_ring_id);
+ if (state == GSI_EVT_RING_STATE_ALLOCATED)
+ return;
+
+ dev_err(gsi->dev, "event ring %u bad state %u after reset\n",
+ evt_ring_id, state);
+}
+
+/* Issue a hardware de-allocation request for an allocated event ring */
+static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
+{
+ enum gsi_evt_ring_state state;
+
+ state = gsi_evt_ring_state(gsi, evt_ring_id);
+ if (state != GSI_EVT_RING_STATE_ALLOCATED) {
+ dev_err(gsi->dev, "event ring %u state %u before dealloc\n",
+ evt_ring_id, state);
+ return;
+ }
+
+ gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
+
+ /* If successful the event ring state will have changed */
+ state = gsi_evt_ring_state(gsi, evt_ring_id);
+ if (state == GSI_EVT_RING_STATE_NOT_ALLOCATED)
+ return;
+
+ dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n",
+ evt_ring_id, state);
+}
+
+/* Fetch the current state of a channel from hardware */
+static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel)
+{
+ const struct reg *reg = gsi_reg(channel->gsi, CH_C_CNTXT_0);
+ u32 channel_id = gsi_channel_id(channel);
+ struct gsi *gsi = channel->gsi;
+ void __iomem *virt = gsi->virt;
+ u32 val;
+
+ reg = gsi_reg(gsi, CH_C_CNTXT_0);
+ val = ioread32(virt + reg_n_offset(reg, channel_id));
+
+ return reg_decode(reg, CHSTATE, val);
+}
+
+/* Issue a channel command and wait for it to complete */
+static void
+gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
+{
+ u32 channel_id = gsi_channel_id(channel);
+ struct gsi *gsi = channel->gsi;
+ struct device *dev = gsi->dev;
+ const struct reg *reg;
+ bool timeout;
+ u32 val;
+
+ /* Enable the completion interrupt for the command */
+ gsi_irq_ch_ctrl_enable(gsi, channel_id);
+
+ reg = gsi_reg(gsi, CH_CMD);
+ val = reg_encode(reg, CH_CHID, channel_id);
+ val |= reg_encode(reg, CH_OPCODE, opcode);
+
+ timeout = !gsi_command(gsi, reg_offset(reg), val);
+
+ gsi_irq_ch_ctrl_disable(gsi);
+
+ if (!timeout)
+ return;
+
+ dev_err(dev, "GSI command %u for channel %u timed out, state %u\n",
+ opcode, channel_id, gsi_channel_state(channel));
+}
+
+/* Allocate GSI channel in NOT_ALLOCATED state */
+static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+ struct device *dev = gsi->dev;
+ enum gsi_channel_state state;
+
+ /* Get initial channel state */
+ state = gsi_channel_state(channel);
+ if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) {
+ dev_err(dev, "channel %u bad state %u before alloc\n",
+ channel_id, state);
+ return -EINVAL;
+ }
+
+ gsi_channel_command(channel, GSI_CH_ALLOCATE);
+
+ /* If successful the channel state will have changed */
+ state = gsi_channel_state(channel);
+ if (state == GSI_CHANNEL_STATE_ALLOCATED)
+ return 0;
+
+ dev_err(dev, "channel %u bad state %u after alloc\n",
+ channel_id, state);
+
+ return -EIO;
+}
+
+/* Start an ALLOCATED channel */
+static int gsi_channel_start_command(struct gsi_channel *channel)
+{
+ struct device *dev = channel->gsi->dev;
+ enum gsi_channel_state state;
+
+ state = gsi_channel_state(channel);
+ if (state != GSI_CHANNEL_STATE_ALLOCATED &&
+ state != GSI_CHANNEL_STATE_STOPPED) {
+ dev_err(dev, "channel %u bad state %u before start\n",
+ gsi_channel_id(channel), state);
+ return -EINVAL;
+ }
+
+ gsi_channel_command(channel, GSI_CH_START);
+
+ /* If successful the channel state will have changed */
+ state = gsi_channel_state(channel);
+ if (state == GSI_CHANNEL_STATE_STARTED)
+ return 0;
+
+ dev_err(dev, "channel %u bad state %u after start\n",
+ gsi_channel_id(channel), state);
+
+ return -EIO;
+}
+
+/* Stop a GSI channel in STARTED state */
+static int gsi_channel_stop_command(struct gsi_channel *channel)
+{
+ struct device *dev = channel->gsi->dev;
+ enum gsi_channel_state state;
+
+ state = gsi_channel_state(channel);
+
+ /* Channel could have entered STOPPED state since last call
+ * if it timed out. If so, we're done.
+ */
+ if (state == GSI_CHANNEL_STATE_STOPPED)
+ return 0;
+
+ if (state != GSI_CHANNEL_STATE_STARTED &&
+ state != GSI_CHANNEL_STATE_STOP_IN_PROC) {
+ dev_err(dev, "channel %u bad state %u before stop\n",
+ gsi_channel_id(channel), state);
+ return -EINVAL;
+ }
+
+ gsi_channel_command(channel, GSI_CH_STOP);
+
+ /* If successful the channel state will have changed */
+ state = gsi_channel_state(channel);
+ if (state == GSI_CHANNEL_STATE_STOPPED)
+ return 0;
+
+ /* We may have to try again if stop is in progress */
+ if (state == GSI_CHANNEL_STATE_STOP_IN_PROC)
+ return -EAGAIN;
+
+ dev_err(dev, "channel %u bad state %u after stop\n",
+ gsi_channel_id(channel), state);
+
+ return -EIO;
+}
+
+/* Reset a GSI channel in ALLOCATED or ERROR state. */
+static void gsi_channel_reset_command(struct gsi_channel *channel)
+{
+ struct device *dev = channel->gsi->dev;
+ enum gsi_channel_state state;
+
+ /* A short delay is required before a RESET command */
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
+
+ state = gsi_channel_state(channel);
+ if (state != GSI_CHANNEL_STATE_STOPPED &&
+ state != GSI_CHANNEL_STATE_ERROR) {
+ /* No need to reset a channel already in ALLOCATED state */
+ if (state != GSI_CHANNEL_STATE_ALLOCATED)
+ dev_err(dev, "channel %u bad state %u before reset\n",
+ gsi_channel_id(channel), state);
+ return;
+ }
+
+ gsi_channel_command(channel, GSI_CH_RESET);
+
+ /* If successful the channel state will have changed */
+ state = gsi_channel_state(channel);
+ if (state != GSI_CHANNEL_STATE_ALLOCATED)
+ dev_err(dev, "channel %u bad state %u after reset\n",
+ gsi_channel_id(channel), state);
+}
+
+/* Deallocate an ALLOCATED GSI channel */
+static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+ struct device *dev = gsi->dev;
+ enum gsi_channel_state state;
+
+ state = gsi_channel_state(channel);
+ if (state != GSI_CHANNEL_STATE_ALLOCATED) {
+ dev_err(dev, "channel %u bad state %u before dealloc\n",
+ channel_id, state);
+ return;
+ }
+
+ gsi_channel_command(channel, GSI_CH_DE_ALLOC);
+
+ /* If successful the channel state will have changed */
+ state = gsi_channel_state(channel);
+
+ if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
+ dev_err(dev, "channel %u bad state %u after dealloc\n",
+ channel_id, state);
+}
+
+/* Ring an event ring doorbell, reporting the last entry processed by the AP.
+ * The index argument (modulo the ring count) is the first unfilled entry, so
+ * we supply one less than that with the doorbell. Update the event ring
+ * index field with the value provided.
+ */
+static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index)
+{
+ const struct reg *reg = gsi_reg(gsi, EV_CH_E_DOORBELL_0);
+ struct gsi_ring *ring = &gsi->evt_ring[evt_ring_id].ring;
+ u32 val;
+
+ ring->index = index; /* Next unused entry */
+
+ /* Note: index *must* be used modulo the ring count here */
+ val = gsi_ring_addr(ring, (index - 1) % ring->count);
+ iowrite32(val, gsi->virt + reg_n_offset(reg, evt_ring_id));
+}
+
+/* Program an event ring for use */
+static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
+{
+ struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
+ struct gsi_ring *ring = &evt_ring->ring;
+ const struct reg *reg;
+ u32 val;
+
+ reg = gsi_reg(gsi, EV_CH_E_CNTXT_0);
+ /* We program all event rings as GPI type/protocol */
+ val = reg_encode(reg, EV_CHTYPE, GSI_CHANNEL_TYPE_GPI);
+ /* EV_EE field is 0 (GSI_EE_AP) */
+ val |= reg_bit(reg, EV_INTYPE);
+ val |= reg_encode(reg, EV_ELEMENT_SIZE, GSI_RING_ELEMENT_SIZE);
+ iowrite32(val, gsi->virt + reg_n_offset(reg, evt_ring_id));
+
+ reg = gsi_reg(gsi, EV_CH_E_CNTXT_1);
+ val = reg_encode(reg, R_LENGTH, ring->count * GSI_RING_ELEMENT_SIZE);
+ iowrite32(val, gsi->virt + reg_n_offset(reg, evt_ring_id));
+
+ /* The context 2 and 3 registers store the low-order and
+ * high-order 32 bits of the address of the event ring,
+ * respectively.
+ */
+ reg = gsi_reg(gsi, EV_CH_E_CNTXT_2);
+ val = lower_32_bits(ring->addr);
+ iowrite32(val, gsi->virt + reg_n_offset(reg, evt_ring_id));
+
+ reg = gsi_reg(gsi, EV_CH_E_CNTXT_3);
+ val = upper_32_bits(ring->addr);
+ iowrite32(val, gsi->virt + reg_n_offset(reg, evt_ring_id));
+
+ /* Enable interrupt moderation by setting the moderation delay */
+ reg = gsi_reg(gsi, EV_CH_E_CNTXT_8);
+ val = reg_encode(reg, EV_MODT, GSI_EVT_RING_INT_MODT);
+ val |= reg_encode(reg, EV_MODC, 1); /* comes from channel */
+ /* EV_MOD_CNT is 0 (no counter-based interrupt coalescing) */
+ iowrite32(val, gsi->virt + reg_n_offset(reg, evt_ring_id));
+
+ /* No MSI write data, and MSI high and low address is 0 */
+ reg = gsi_reg(gsi, EV_CH_E_CNTXT_9);
+ iowrite32(0, gsi->virt + reg_n_offset(reg, evt_ring_id));
+
+ reg = gsi_reg(gsi, EV_CH_E_CNTXT_10);
+ iowrite32(0, gsi->virt + reg_n_offset(reg, evt_ring_id));
+
+ reg = gsi_reg(gsi, EV_CH_E_CNTXT_11);
+ iowrite32(0, gsi->virt + reg_n_offset(reg, evt_ring_id));
+
+ /* We don't need to get event read pointer updates */
+ reg = gsi_reg(gsi, EV_CH_E_CNTXT_12);
+ iowrite32(0, gsi->virt + reg_n_offset(reg, evt_ring_id));
+
+ reg = gsi_reg(gsi, EV_CH_E_CNTXT_13);
+ iowrite32(0, gsi->virt + reg_n_offset(reg, evt_ring_id));
+
+ /* Finally, tell the hardware our "last processed" event (arbitrary) */
+ gsi_evt_ring_doorbell(gsi, evt_ring_id, ring->index);
+}
+
+/* Find the transaction whose completion indicates a channel is quiesced */
+static struct gsi_trans *gsi_channel_trans_last(struct gsi_channel *channel)
+{
+ struct gsi_trans_info *trans_info = &channel->trans_info;
+ u32 pending_id = trans_info->pending_id;
+ struct gsi_trans *trans;
+ u16 trans_id;
+
+ if (channel->toward_ipa && pending_id != trans_info->free_id) {
+ /* There is a small chance a TX transaction got allocated
+ * just before we disabled transmits, so check for that.
+ * The last allocated, committed, or pending transaction
+ * precedes the first free transaction.
+ */
+ trans_id = trans_info->free_id - 1;
+ } else if (trans_info->polled_id != pending_id) {
+ /* Otherwise (TX or RX) we want to wait for anything that
+ * has completed, or has been polled but not released yet.
+ *
+ * The last completed or polled transaction precedes the
+ * first pending transaction.
+ */
+ trans_id = pending_id - 1;
+ } else {
+ return NULL;
+ }
+
+ /* Caller will wait for this, so take a reference */
+ trans = &trans_info->trans[trans_id % channel->tre_count];
+ refcount_inc(&trans->refcount);
+
+ return trans;
+}
+
+/* Wait for transaction activity on a channel to complete */
+static void gsi_channel_trans_quiesce(struct gsi_channel *channel)
+{
+ struct gsi_trans *trans;
+
+ /* Get the last transaction, and wait for it to complete */
+ trans = gsi_channel_trans_last(channel);
+ if (trans) {
+ wait_for_completion(&trans->completion);
+ gsi_trans_free(trans);
+ }
+}
+
+/* Program a channel for use; there is no gsi_channel_deprogram() */
+static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
+{
+ size_t size = channel->tre_ring.count * GSI_RING_ELEMENT_SIZE;
+ u32 channel_id = gsi_channel_id(channel);
+ union gsi_channel_scratch scr = { };
+ struct gsi_channel_scratch_gpi *gpi;
+ struct gsi *gsi = channel->gsi;
+ const struct reg *reg;
+ u32 wrr_weight = 0;
+ u32 offset;
+ u32 val;
+
+ reg = gsi_reg(gsi, CH_C_CNTXT_0);
+
+ /* We program all channels as GPI type/protocol */
+ val = ch_c_cntxt_0_type_encode(gsi->version, reg, GSI_CHANNEL_TYPE_GPI);
+ if (channel->toward_ipa)
+ val |= reg_bit(reg, CHTYPE_DIR);
+ if (gsi->version < IPA_VERSION_5_0)
+ val |= reg_encode(reg, ERINDEX, channel->evt_ring_id);
+ val |= reg_encode(reg, ELEMENT_SIZE, GSI_RING_ELEMENT_SIZE);
+ iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
+
+ reg = gsi_reg(gsi, CH_C_CNTXT_1);
+ val = reg_encode(reg, CH_R_LENGTH, size);
+ if (gsi->version >= IPA_VERSION_5_0)
+ val |= reg_encode(reg, CH_ERINDEX, channel->evt_ring_id);
+ iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
+
+ /* The context 2 and 3 registers store the low-order and
+ * high-order 32 bits of the address of the channel ring,
+ * respectively.
+ */
+ reg = gsi_reg(gsi, CH_C_CNTXT_2);
+ val = lower_32_bits(channel->tre_ring.addr);
+ iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
+
+ reg = gsi_reg(gsi, CH_C_CNTXT_3);
+ val = upper_32_bits(channel->tre_ring.addr);
+ iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
+
+ reg = gsi_reg(gsi, CH_C_QOS);
+
+ /* Command channel gets low weighted round-robin priority */
+ if (channel->command)
+ wrr_weight = reg_field_max(reg, WRR_WEIGHT);
+ val = reg_encode(reg, WRR_WEIGHT, wrr_weight);
+
+ /* Max prefetch is 1 segment (do not set MAX_PREFETCH_FMASK) */
+
+ /* No need to use the doorbell engine starting at IPA v4.0 */
+ if (gsi->version < IPA_VERSION_4_0 && doorbell)
+ val |= reg_bit(reg, USE_DB_ENG);
+
+ /* v4.0 introduces an escape buffer for prefetch. We use it
+ * on all but the AP command channel.
+ */
+ if (gsi->version >= IPA_VERSION_4_0 && !channel->command) {
+ /* If not otherwise set, prefetch buffers are used */
+ if (gsi->version < IPA_VERSION_4_5)
+ val |= reg_bit(reg, USE_ESCAPE_BUF_ONLY);
+ else
+ val |= reg_encode(reg, PREFETCH_MODE, ESCAPE_BUF_ONLY);
+ }
+ /* All channels set DB_IN_BYTES */
+ if (gsi->version >= IPA_VERSION_4_9)
+ val |= reg_bit(reg, DB_IN_BYTES);
+
+ iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
+
+ /* Now update the scratch registers for GPI protocol */
+ gpi = &scr.gpi;
+ gpi->max_outstanding_tre = channel->trans_tre_max *
+ GSI_RING_ELEMENT_SIZE;
+ gpi->outstanding_threshold = 2 * GSI_RING_ELEMENT_SIZE;
+
+ reg = gsi_reg(gsi, CH_C_SCRATCH_0);
+ val = scr.data.word1;
+ iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
+
+ reg = gsi_reg(gsi, CH_C_SCRATCH_1);
+ val = scr.data.word2;
+ iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
+
+ reg = gsi_reg(gsi, CH_C_SCRATCH_2);
+ val = scr.data.word3;
+ iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
+
+ /* We must preserve the upper 16 bits of the last scratch register.
+ * The next sequence assumes those bits remain unchanged between the
+ * read and the write.
+ */
+ reg = gsi_reg(gsi, CH_C_SCRATCH_3);
+ offset = reg_n_offset(reg, channel_id);
+ val = ioread32(gsi->virt + offset);
+ val = (scr.data.word4 & GENMASK(31, 16)) | (val & GENMASK(15, 0));
+ iowrite32(val, gsi->virt + offset);
+
+ /* All done! */
+}
+
+static int __gsi_channel_start(struct gsi_channel *channel, bool resume)
+{
+ struct gsi *gsi = channel->gsi;
+ int ret;
+
+ /* Prior to IPA v4.0 suspend/resume is not implemented by GSI */
+ if (resume && gsi->version < IPA_VERSION_4_0)
+ return 0;
+
+ mutex_lock(&gsi->mutex);
+
+ ret = gsi_channel_start_command(channel);
+
+ mutex_unlock(&gsi->mutex);
+
+ return ret;
+}
+
+/* Start an allocated GSI channel */
+int gsi_channel_start(struct gsi *gsi, u32 channel_id)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+ int ret;
+
+ /* Enable NAPI and the completion interrupt */
+ napi_enable(&channel->napi);
+ gsi_irq_ieob_enable_one(gsi, channel->evt_ring_id);
+
+ ret = __gsi_channel_start(channel, false);
+ if (ret) {
+ gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id);
+ napi_disable(&channel->napi);
+ }
+
+ return ret;
+}
+
+static int gsi_channel_stop_retry(struct gsi_channel *channel)
+{
+ u32 retries = GSI_CHANNEL_STOP_RETRIES;
+ int ret;
+
+ do {
+ ret = gsi_channel_stop_command(channel);
+ if (ret != -EAGAIN)
+ break;
+ usleep_range(3 * USEC_PER_MSEC, 5 * USEC_PER_MSEC);
+ } while (retries--);
+
+ return ret;
+}
+
+static int __gsi_channel_stop(struct gsi_channel *channel, bool suspend)
+{
+ struct gsi *gsi = channel->gsi;
+ int ret;
+
+ /* Wait for any underway transactions to complete before stopping. */
+ gsi_channel_trans_quiesce(channel);
+
+ /* Prior to IPA v4.0 suspend/resume is not implemented by GSI */
+ if (suspend && gsi->version < IPA_VERSION_4_0)
+ return 0;
+
+ mutex_lock(&gsi->mutex);
+
+ ret = gsi_channel_stop_retry(channel);
+
+ mutex_unlock(&gsi->mutex);
+
+ return ret;
+}
+
+/* Stop a started channel */
+int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+ int ret;
+
+ ret = __gsi_channel_stop(channel, false);
+ if (ret)
+ return ret;
+
+ /* Disable the completion interrupt and NAPI if successful */
+ gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id);
+ napi_disable(&channel->napi);
+
+ return 0;
+}
+
+/* Reset and reconfigure a channel, (possibly) enabling the doorbell engine */
+void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+
+ mutex_lock(&gsi->mutex);
+
+ gsi_channel_reset_command(channel);
+ /* Due to a hardware quirk we may need to reset RX channels twice. */
+ if (gsi->version < IPA_VERSION_4_0 && !channel->toward_ipa)
+ gsi_channel_reset_command(channel);
+
+ /* Hardware assumes this is 0 following reset */
+ channel->tre_ring.index = 0;
+ gsi_channel_program(channel, doorbell);
+ gsi_channel_trans_cancel_pending(channel);
+
+ mutex_unlock(&gsi->mutex);
+}
+
+/* Stop a started channel for suspend */
+int gsi_channel_suspend(struct gsi *gsi, u32 channel_id)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+ int ret;
+
+ ret = __gsi_channel_stop(channel, true);
+ if (ret)
+ return ret;
+
+ /* Ensure NAPI polling has finished. */
+ napi_synchronize(&channel->napi);
+
+ return 0;
+}
+
+/* Resume a suspended channel (starting if stopped) */
+int gsi_channel_resume(struct gsi *gsi, u32 channel_id)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+
+ return __gsi_channel_start(channel, true);
+}
+
+/* Prevent all GSI interrupts while suspended */
+void gsi_suspend(struct gsi *gsi)
+{
+ disable_irq(gsi->irq);
+}
+
+/* Allow all GSI interrupts again when resuming */
+void gsi_resume(struct gsi *gsi)
+{
+ enable_irq(gsi->irq);
+}
+
+void gsi_trans_tx_committed(struct gsi_trans *trans)
+{
+ struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
+
+ channel->trans_count++;
+ channel->byte_count += trans->len;
+
+ trans->trans_count = channel->trans_count;
+ trans->byte_count = channel->byte_count;
+}
+
+void gsi_trans_tx_queued(struct gsi_trans *trans)
+{
+ u32 channel_id = trans->channel_id;
+ struct gsi *gsi = trans->gsi;
+ struct gsi_channel *channel;
+ u32 trans_count;
+ u32 byte_count;
+
+ channel = &gsi->channel[channel_id];
+
+ byte_count = channel->byte_count - channel->queued_byte_count;
+ trans_count = channel->trans_count - channel->queued_trans_count;
+ channel->queued_byte_count = channel->byte_count;
+ channel->queued_trans_count = channel->trans_count;
+
+ ipa_gsi_channel_tx_queued(gsi, channel_id, trans_count, byte_count);
+}
+
+/**
+ * gsi_trans_tx_completed() - Report completed TX transactions
+ * @trans: TX channel transaction that has completed
+ *
+ * Report that a transaction on a TX channel has completed. At the time a
+ * transaction is committed, we record *in the transaction* its channel's
+ * committed transaction and byte counts. Transactions are completed in
+ * order, and the difference between the channel's byte/transaction count
+ * when the transaction was committed and when it completes tells us
+ * exactly how much data has been transferred while the transaction was
+ * pending.
+ *
+ * We report this information to the network stack, which uses it to manage
+ * the rate at which data is sent to hardware.
+ */
+static void gsi_trans_tx_completed(struct gsi_trans *trans)
+{
+ u32 channel_id = trans->channel_id;
+ struct gsi *gsi = trans->gsi;
+ struct gsi_channel *channel;
+ u32 trans_count;
+ u32 byte_count;
+
+ channel = &gsi->channel[channel_id];
+ trans_count = trans->trans_count - channel->compl_trans_count;
+ byte_count = trans->byte_count - channel->compl_byte_count;
+
+ channel->compl_trans_count += trans_count;
+ channel->compl_byte_count += byte_count;
+
+ ipa_gsi_channel_tx_completed(gsi, channel_id, trans_count, byte_count);
+}
+
+/* Channel control interrupt handler */
+static void gsi_isr_chan_ctrl(struct gsi *gsi)
+{
+ const struct reg *reg;
+ u32 channel_mask;
+
+ reg = gsi_reg(gsi, CNTXT_SRC_CH_IRQ);
+ channel_mask = ioread32(gsi->virt + reg_offset(reg));
+
+ reg = gsi_reg(gsi, CNTXT_SRC_CH_IRQ_CLR);
+ iowrite32(channel_mask, gsi->virt + reg_offset(reg));
+
+ while (channel_mask) {
+ u32 channel_id = __ffs(channel_mask);
+
+ channel_mask ^= BIT(channel_id);
+
+ complete(&gsi->completion);
+ }
+}
+
+/* Event ring control interrupt handler */
+static void gsi_isr_evt_ctrl(struct gsi *gsi)
+{
+ const struct reg *reg;
+ u32 event_mask;
+
+ reg = gsi_reg(gsi, CNTXT_SRC_EV_CH_IRQ);
+ event_mask = ioread32(gsi->virt + reg_offset(reg));
+
+ reg = gsi_reg(gsi, CNTXT_SRC_EV_CH_IRQ_CLR);
+ iowrite32(event_mask, gsi->virt + reg_offset(reg));
+
+ while (event_mask) {
+ u32 evt_ring_id = __ffs(event_mask);
+
+ event_mask ^= BIT(evt_ring_id);
+
+ complete(&gsi->completion);
+ }
+}
+
+/* Global channel error interrupt handler */
+static void
+gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code)
+{
+ if (code == GSI_OUT_OF_RESOURCES) {
+ dev_err(gsi->dev, "channel %u out of resources\n", channel_id);
+ complete(&gsi->completion);
+ return;
+ }
+
+ /* Report, but otherwise ignore all other error codes */
+ dev_err(gsi->dev, "channel %u global error ee 0x%08x code 0x%08x\n",
+ channel_id, err_ee, code);
+}
+
+/* Global event error interrupt handler */
+static void
+gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code)
+{
+ if (code == GSI_OUT_OF_RESOURCES) {
+ struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
+ u32 channel_id = gsi_channel_id(evt_ring->channel);
+
+ complete(&gsi->completion);
+ dev_err(gsi->dev, "evt_ring for channel %u out of resources\n",
+ channel_id);
+ return;
+ }
+
+ /* Report, but otherwise ignore all other error codes */
+ dev_err(gsi->dev, "event ring %u global error ee %u code 0x%08x\n",
+ evt_ring_id, err_ee, code);
+}
+
+/* Global error interrupt handler */
+static void gsi_isr_glob_err(struct gsi *gsi)
+{
+ const struct reg *log_reg;
+ const struct reg *clr_reg;
+ enum gsi_err_type type;
+ enum gsi_err_code code;
+ u32 offset;
+ u32 which;
+ u32 val;
+ u32 ee;
+
+ /* Get the logged error, then reinitialize the log */
+ log_reg = gsi_reg(gsi, ERROR_LOG);
+ offset = reg_offset(log_reg);
+ val = ioread32(gsi->virt + offset);
+ iowrite32(0, gsi->virt + offset);
+
+ clr_reg = gsi_reg(gsi, ERROR_LOG_CLR);
+ iowrite32(~0, gsi->virt + reg_offset(clr_reg));
+
+ /* Parse the error value */
+ ee = reg_decode(log_reg, ERR_EE, val);
+ type = reg_decode(log_reg, ERR_TYPE, val);
+ which = reg_decode(log_reg, ERR_VIRT_IDX, val);
+ code = reg_decode(log_reg, ERR_CODE, val);
+
+ if (type == GSI_ERR_TYPE_CHAN)
+ gsi_isr_glob_chan_err(gsi, ee, which, code);
+ else if (type == GSI_ERR_TYPE_EVT)
+ gsi_isr_glob_evt_err(gsi, ee, which, code);
+ else /* type GSI_ERR_TYPE_GLOB should be fatal */
+ dev_err(gsi->dev, "unexpected global error 0x%08x\n", type);
+}
+
+/* Generic EE interrupt handler */
+static void gsi_isr_gp_int1(struct gsi *gsi)
+{
+ const struct reg *reg;
+ u32 result;
+ u32 val;
+
+ /* This interrupt is used to handle completions of GENERIC GSI
+ * commands. We use these to allocate and halt channels on the
+ * modem's behalf due to a hardware quirk on IPA v4.2. The modem
+ * "owns" channels even when the AP allocates them, and have no
+ * way of knowing whether a modem channel's state has been changed.
+ *
+ * We also use GENERIC commands to enable/disable channel flow
+ * control for IPA v4.2+.
+ *
+ * It is recommended that we halt the modem channels we allocated
+ * when shutting down, but it's possible the channel isn't running
+ * at the time we issue the HALT command. We'll get an error in
+ * that case, but it's harmless (the channel is already halted).
+ * Similarly, we could get an error back when updating flow control
+ * on a channel because it's not in the proper state.
+ *
+ * In either case, we silently ignore a INCORRECT_CHANNEL_STATE
+ * error if we receive it.
+ */
+ reg = gsi_reg(gsi, CNTXT_SCRATCH_0);
+ val = ioread32(gsi->virt + reg_offset(reg));
+ result = reg_decode(reg, GENERIC_EE_RESULT, val);
+
+ switch (result) {
+ case GENERIC_EE_SUCCESS:
+ case GENERIC_EE_INCORRECT_CHANNEL_STATE:
+ gsi->result = 0;
+ break;
+
+ case GENERIC_EE_RETRY:
+ gsi->result = -EAGAIN;
+ break;
+
+ default:
+ dev_err(gsi->dev, "global INT1 generic result %u\n", result);
+ gsi->result = -EIO;
+ break;
+ }
+
+ complete(&gsi->completion);
+}
+
+/* Inter-EE interrupt handler */
+static void gsi_isr_glob_ee(struct gsi *gsi)
+{
+ const struct reg *reg;
+ u32 val;
+
+ reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_STTS);
+ val = ioread32(gsi->virt + reg_offset(reg));
+
+ if (val & ERROR_INT)
+ gsi_isr_glob_err(gsi);
+
+ reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_CLR);
+ iowrite32(val, gsi->virt + reg_offset(reg));
+
+ val &= ~ERROR_INT;
+
+ if (val & GP_INT1) {
+ val ^= GP_INT1;
+ gsi_isr_gp_int1(gsi);
+ }
+
+ if (val)
+ dev_err(gsi->dev, "unexpected global interrupt 0x%08x\n", val);
+}
+
+/* I/O completion interrupt event */
+static void gsi_isr_ieob(struct gsi *gsi)
+{
+ const struct reg *reg;
+ u32 event_mask;
+
+ reg = gsi_reg(gsi, CNTXT_SRC_IEOB_IRQ);
+ event_mask = ioread32(gsi->virt + reg_offset(reg));
+
+ gsi_irq_ieob_disable(gsi, event_mask);
+
+ reg = gsi_reg(gsi, CNTXT_SRC_IEOB_IRQ_CLR);
+ iowrite32(event_mask, gsi->virt + reg_offset(reg));
+
+ while (event_mask) {
+ u32 evt_ring_id = __ffs(event_mask);
+
+ event_mask ^= BIT(evt_ring_id);
+
+ napi_schedule(&gsi->evt_ring[evt_ring_id].channel->napi);
+ }
+}
+
+/* General event interrupts represent serious problems, so report them */
+static void gsi_isr_general(struct gsi *gsi)
+{
+ struct device *dev = gsi->dev;
+ const struct reg *reg;
+ u32 val;
+
+ reg = gsi_reg(gsi, CNTXT_GSI_IRQ_STTS);
+ val = ioread32(gsi->virt + reg_offset(reg));
+
+ reg = gsi_reg(gsi, CNTXT_GSI_IRQ_CLR);
+ iowrite32(val, gsi->virt + reg_offset(reg));
+
+ dev_err(dev, "unexpected general interrupt 0x%08x\n", val);
+}
+
+/**
+ * gsi_isr() - Top level GSI interrupt service routine
+ * @irq: Interrupt number (ignored)
+ * @dev_id: GSI pointer supplied to request_irq()
+ *
+ * This is the main handler function registered for the GSI IRQ. Each type
+ * of interrupt has a separate handler function that is called from here.
+ */
+static irqreturn_t gsi_isr(int irq, void *dev_id)
+{
+ struct gsi *gsi = dev_id;
+ const struct reg *reg;
+ u32 intr_mask;
+ u32 cnt = 0;
+ u32 offset;
+
+ reg = gsi_reg(gsi, CNTXT_TYPE_IRQ);
+ offset = reg_offset(reg);
+
+ /* enum gsi_irq_type_id defines GSI interrupt types */
+ while ((intr_mask = ioread32(gsi->virt + offset))) {
+ /* intr_mask contains bitmask of pending GSI interrupts */
+ do {
+ u32 gsi_intr = BIT(__ffs(intr_mask));
+
+ intr_mask ^= gsi_intr;
+
+ /* Note: the IRQ condition for each type is cleared
+ * when the type-specific register is updated.
+ */
+ switch (gsi_intr) {
+ case GSI_CH_CTRL:
+ gsi_isr_chan_ctrl(gsi);
+ break;
+ case GSI_EV_CTRL:
+ gsi_isr_evt_ctrl(gsi);
+ break;
+ case GSI_GLOB_EE:
+ gsi_isr_glob_ee(gsi);
+ break;
+ case GSI_IEOB:
+ gsi_isr_ieob(gsi);
+ break;
+ case GSI_GENERAL:
+ gsi_isr_general(gsi);
+ break;
+ default:
+ dev_err(gsi->dev,
+ "unrecognized interrupt type 0x%08x\n",
+ gsi_intr);
+ break;
+ }
+ } while (intr_mask);
+
+ if (++cnt > GSI_ISR_MAX_ITER) {
+ dev_err(gsi->dev, "interrupt flood\n");
+ break;
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* Init function for GSI IRQ lookup; there is no gsi_irq_exit() */
+static int gsi_irq_init(struct gsi *gsi, struct platform_device *pdev)
+{
+ int ret;
+
+ ret = platform_get_irq_byname(pdev, "gsi");
+ if (ret <= 0)
+ return ret ? : -EINVAL;
+
+ gsi->irq = ret;
+
+ return 0;
+}
+
+/* Return the transaction associated with a transfer completion event */
+static struct gsi_trans *
+gsi_event_trans(struct gsi *gsi, struct gsi_event *event)
+{
+ u32 channel_id = event->chid;
+ struct gsi_channel *channel;
+ struct gsi_trans *trans;
+ u32 tre_offset;
+ u32 tre_index;
+
+ channel = &gsi->channel[channel_id];
+ if (WARN(!channel->gsi, "event has bad channel %u\n", channel_id))
+ return NULL;
+
+ /* Event xfer_ptr records the TRE it's associated with */
+ tre_offset = lower_32_bits(le64_to_cpu(event->xfer_ptr));
+ tre_index = gsi_ring_index(&channel->tre_ring, tre_offset);
+
+ trans = gsi_channel_trans_mapped(channel, tre_index);
+
+ if (WARN(!trans, "channel %u event with no transaction\n", channel_id))
+ return NULL;
+
+ return trans;
+}
+
+/**
+ * gsi_evt_ring_update() - Update transaction state from hardware
+ * @gsi: GSI pointer
+ * @evt_ring_id: Event ring ID
+ * @index: Event index in ring reported by hardware
+ *
+ * Events for RX channels contain the actual number of bytes received into
+ * the buffer. Every event has a transaction associated with it, and here
+ * we update transactions to record their actual received lengths.
+ *
+ * When an event for a TX channel arrives we use information in the
+ * transaction to report the number of requests and bytes that have
+ * been transferred.
+ *
+ * This function is called whenever we learn that the GSI hardware has filled
+ * new events since the last time we checked. The ring's index field tells
+ * the first entry in need of processing. The index provided is the
+ * first *unfilled* event in the ring (following the last filled one).
+ *
+ * Events are sequential within the event ring, and transactions are
+ * sequential within the transaction array.
+ *
+ * Note that @index always refers to an element *within* the event ring.
+ */
+static void gsi_evt_ring_update(struct gsi *gsi, u32 evt_ring_id, u32 index)
+{
+ struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
+ struct gsi_ring *ring = &evt_ring->ring;
+ struct gsi_event *event_done;
+ struct gsi_event *event;
+ u32 event_avail;
+ u32 old_index;
+
+ /* Starting with the oldest un-processed event, determine which
+ * transaction (and which channel) is associated with the event.
+ * For RX channels, update each completed transaction with the
+ * number of bytes that were actually received. For TX channels
+ * associated with a network device, report to the network stack
+ * the number of transfers and bytes this completion represents.
+ */
+ old_index = ring->index;
+ event = gsi_ring_virt(ring, old_index);
+
+ /* Compute the number of events to process before we wrap,
+ * and determine when we'll be done processing events.
+ */
+ event_avail = ring->count - old_index % ring->count;
+ event_done = gsi_ring_virt(ring, index);
+ do {
+ struct gsi_trans *trans;
+
+ trans = gsi_event_trans(gsi, event);
+ if (!trans)
+ return;
+
+ if (trans->direction == DMA_FROM_DEVICE)
+ trans->len = __le16_to_cpu(event->len);
+ else
+ gsi_trans_tx_completed(trans);
+
+ gsi_trans_move_complete(trans);
+
+ /* Move on to the next event and transaction */
+ if (--event_avail)
+ event++;
+ else
+ event = gsi_ring_virt(ring, 0);
+ } while (event != event_done);
+
+ /* Tell the hardware we've handled these events */
+ gsi_evt_ring_doorbell(gsi, evt_ring_id, index);
+}
+
+/* Initialize a ring, including allocating DMA memory for its entries */
+static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count)
+{
+ u32 size = count * GSI_RING_ELEMENT_SIZE;
+ struct device *dev = gsi->dev;
+ dma_addr_t addr;
+
+ /* Hardware requires a 2^n ring size, with alignment equal to size.
+ * The DMA address returned by dma_alloc_coherent() is guaranteed to
+ * be a power-of-2 number of pages, which satisfies the requirement.
+ */
+ ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
+ if (!ring->virt)
+ return -ENOMEM;
+
+ ring->addr = addr;
+ ring->count = count;
+ ring->index = 0;
+
+ return 0;
+}
+
+/* Free a previously-allocated ring */
+static void gsi_ring_free(struct gsi *gsi, struct gsi_ring *ring)
+{
+ size_t size = ring->count * GSI_RING_ELEMENT_SIZE;
+
+ dma_free_coherent(gsi->dev, size, ring->virt, ring->addr);
+}
+
+/* Allocate an available event ring id */
+static int gsi_evt_ring_id_alloc(struct gsi *gsi)
+{
+ u32 evt_ring_id;
+
+ if (gsi->event_bitmap == ~0U) {
+ dev_err(gsi->dev, "event rings exhausted\n");
+ return -ENOSPC;
+ }
+
+ evt_ring_id = ffz(gsi->event_bitmap);
+ gsi->event_bitmap |= BIT(evt_ring_id);
+
+ return (int)evt_ring_id;
+}
+
+/* Free a previously-allocated event ring id */
+static void gsi_evt_ring_id_free(struct gsi *gsi, u32 evt_ring_id)
+{
+ gsi->event_bitmap &= ~BIT(evt_ring_id);
+}
+
+/* Ring a channel doorbell, reporting the first un-filled entry */
+void gsi_channel_doorbell(struct gsi_channel *channel)
+{
+ struct gsi_ring *tre_ring = &channel->tre_ring;
+ u32 channel_id = gsi_channel_id(channel);
+ struct gsi *gsi = channel->gsi;
+ const struct reg *reg;
+ u32 val;
+
+ reg = gsi_reg(gsi, CH_C_DOORBELL_0);
+ /* Note: index *must* be used modulo the ring count here */
+ val = gsi_ring_addr(tre_ring, tre_ring->index % tre_ring->count);
+ iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
+}
+
+/* Consult hardware, move newly completed transactions to completed state */
+void gsi_channel_update(struct gsi_channel *channel)
+{
+ u32 evt_ring_id = channel->evt_ring_id;
+ struct gsi *gsi = channel->gsi;
+ struct gsi_evt_ring *evt_ring;
+ struct gsi_trans *trans;
+ struct gsi_ring *ring;
+ const struct reg *reg;
+ u32 offset;
+ u32 index;
+
+ evt_ring = &gsi->evt_ring[evt_ring_id];
+ ring = &evt_ring->ring;
+
+ /* See if there's anything new to process; if not, we're done. Note
+ * that index always refers to an entry *within* the event ring.
+ */
+ reg = gsi_reg(gsi, EV_CH_E_CNTXT_4);
+ offset = reg_n_offset(reg, evt_ring_id);
+ index = gsi_ring_index(ring, ioread32(gsi->virt + offset));
+ if (index == ring->index % ring->count)
+ return;
+
+ /* Get the transaction for the latest completed event. */
+ trans = gsi_event_trans(gsi, gsi_ring_virt(ring, index - 1));
+ if (!trans)
+ return;
+
+ /* For RX channels, update each completed transaction with the number
+ * of bytes that were actually received. For TX channels, report
+ * the number of transactions and bytes this completion represents
+ * up the network stack.
+ */
+ gsi_evt_ring_update(gsi, evt_ring_id, index);
+}
+
+/**
+ * gsi_channel_poll_one() - Return a single completed transaction on a channel
+ * @channel: Channel to be polled
+ *
+ * Return: Transaction pointer, or null if none are available
+ *
+ * This function returns the first of a channel's completed transactions.
+ * If no transactions are in completed state, the hardware is consulted to
+ * determine whether any new transactions have completed. If so, they're
+ * moved to completed state and the first such transaction is returned.
+ * If there are no more completed transactions, a null pointer is returned.
+ */
+static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel)
+{
+ struct gsi_trans *trans;
+
+ /* Get the first completed transaction */
+ trans = gsi_channel_trans_complete(channel);
+ if (trans)
+ gsi_trans_move_polled(trans);
+
+ return trans;
+}
+
+/**
+ * gsi_channel_poll() - NAPI poll function for a channel
+ * @napi: NAPI structure for the channel
+ * @budget: Budget supplied by NAPI core
+ *
+ * Return: Number of items polled (<= budget)
+ *
+ * Single transactions completed by hardware are polled until either
+ * the budget is exhausted, or there are no more. Each transaction
+ * polled is passed to gsi_trans_complete(), to perform remaining
+ * completion processing and retire/free the transaction.
+ */
+static int gsi_channel_poll(struct napi_struct *napi, int budget)
+{
+ struct gsi_channel *channel;
+ int count;
+
+ channel = container_of(napi, struct gsi_channel, napi);
+ for (count = 0; count < budget; count++) {
+ struct gsi_trans *trans;
+
+ trans = gsi_channel_poll_one(channel);
+ if (!trans)
+ break;
+ gsi_trans_complete(trans);
+ }
+
+ if (count < budget && napi_complete(napi))
+ gsi_irq_ieob_enable_one(channel->gsi, channel->evt_ring_id);
+
+ return count;
+}
+
+/* The event bitmap represents which event ids are available for allocation.
+ * Set bits are not available, clear bits can be used. This function
+ * initializes the map so all events supported by the hardware are available,
+ * then precludes any reserved events from being allocated.
+ */
+static u32 gsi_event_bitmap_init(u32 evt_ring_max)
+{
+ u32 event_bitmap = GENMASK(BITS_PER_LONG - 1, evt_ring_max);
+
+ event_bitmap |= GENMASK(GSI_MHI_EVENT_ID_END, GSI_MHI_EVENT_ID_START);
+
+ return event_bitmap;
+}
+
+/* Setup function for a single channel */
+static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+ u32 evt_ring_id = channel->evt_ring_id;
+ int ret;
+
+ if (!gsi_channel_initialized(channel))
+ return 0;
+
+ ret = gsi_evt_ring_alloc_command(gsi, evt_ring_id);
+ if (ret)
+ return ret;
+
+ gsi_evt_ring_program(gsi, evt_ring_id);
+
+ ret = gsi_channel_alloc_command(gsi, channel_id);
+ if (ret)
+ goto err_evt_ring_de_alloc;
+
+ gsi_channel_program(channel, true);
+
+ if (channel->toward_ipa)
+ netif_napi_add_tx(&gsi->dummy_dev, &channel->napi,
+ gsi_channel_poll);
+ else
+ netif_napi_add(&gsi->dummy_dev, &channel->napi,
+ gsi_channel_poll);
+
+ return 0;
+
+err_evt_ring_de_alloc:
+ /* We've done nothing with the event ring yet so don't reset */
+ gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
+
+ return ret;
+}
+
+/* Inverse of gsi_channel_setup_one() */
+static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+ u32 evt_ring_id = channel->evt_ring_id;
+
+ if (!gsi_channel_initialized(channel))
+ return;
+
+ netif_napi_del(&channel->napi);
+
+ gsi_channel_de_alloc_command(gsi, channel_id);
+ gsi_evt_ring_reset_command(gsi, evt_ring_id);
+ gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
+}
+
+/* We use generic commands only to operate on modem channels. We don't have
+ * the ability to determine channel state for a modem channel, so we simply
+ * issue the command and wait for it to complete.
+ */
+static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
+ enum gsi_generic_cmd_opcode opcode,
+ u8 params)
+{
+ const struct reg *reg;
+ bool timeout;
+ u32 offset;
+ u32 val;
+
+ /* The error global interrupt type is always enabled (until we tear
+ * down), so we will keep it enabled.
+ *
+ * A generic EE command completes with a GSI global interrupt of
+ * type GP_INT1. We only perform one generic command at a time
+ * (to allocate, halt, or enable/disable flow control on a modem
+ * channel), and only from this function. So we enable the GP_INT1
+ * IRQ type here, and disable it again after the command completes.
+ */
+ reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_EN);
+ val = ERROR_INT | GP_INT1;
+ iowrite32(val, gsi->virt + reg_offset(reg));
+
+ /* First zero the result code field */
+ reg = gsi_reg(gsi, CNTXT_SCRATCH_0);
+ offset = reg_offset(reg);
+ val = ioread32(gsi->virt + offset);
+
+ val &= ~reg_fmask(reg, GENERIC_EE_RESULT);
+ iowrite32(val, gsi->virt + offset);
+
+ /* Now issue the command */
+ reg = gsi_reg(gsi, GENERIC_CMD);
+ val = reg_encode(reg, GENERIC_OPCODE, opcode);
+ val |= reg_encode(reg, GENERIC_CHID, channel_id);
+ val |= reg_encode(reg, GENERIC_EE, GSI_EE_MODEM);
+ if (gsi->version >= IPA_VERSION_4_11)
+ val |= reg_encode(reg, GENERIC_PARAMS, params);
+
+ timeout = !gsi_command(gsi, reg_offset(reg), val);
+
+ /* Disable the GP_INT1 IRQ type again */
+ reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_EN);
+ iowrite32(ERROR_INT, gsi->virt + reg_offset(reg));
+
+ if (!timeout)
+ return gsi->result;
+
+ dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n",
+ opcode, channel_id);
+
+ return -ETIMEDOUT;
+}
+
+static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id)
+{
+ return gsi_generic_command(gsi, channel_id,
+ GSI_GENERIC_ALLOCATE_CHANNEL, 0);
+}
+
+static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id)
+{
+ u32 retries = GSI_CHANNEL_MODEM_HALT_RETRIES;
+ int ret;
+
+ do
+ ret = gsi_generic_command(gsi, channel_id,
+ GSI_GENERIC_HALT_CHANNEL, 0);
+ while (ret == -EAGAIN && retries--);
+
+ if (ret)
+ dev_err(gsi->dev, "error %d halting modem channel %u\n",
+ ret, channel_id);
+}
+
+/* Enable or disable flow control for a modem GSI TX channel (IPA v4.2+) */
+void
+gsi_modem_channel_flow_control(struct gsi *gsi, u32 channel_id, bool enable)
+{
+ u32 retries = 0;
+ u32 command;
+ int ret;
+
+ command = enable ? GSI_GENERIC_ENABLE_FLOW_CONTROL
+ : GSI_GENERIC_DISABLE_FLOW_CONTROL;
+ /* Disabling flow control on IPA v4.11+ can return -EAGAIN if enable
+ * is underway. In this case we need to retry the command.
+ */
+ if (!enable && gsi->version >= IPA_VERSION_4_11)
+ retries = GSI_CHANNEL_MODEM_FLOW_RETRIES;
+
+ do
+ ret = gsi_generic_command(gsi, channel_id, command, 0);
+ while (ret == -EAGAIN && retries--);
+
+ if (ret)
+ dev_err(gsi->dev,
+ "error %d %sabling mode channel %u flow control\n",
+ ret, enable ? "en" : "dis", channel_id);
+}
+
+/* Setup function for channels */
+static int gsi_channel_setup(struct gsi *gsi)
+{
+ u32 channel_id = 0;
+ u32 mask;
+ int ret;
+
+ gsi_irq_enable(gsi);
+
+ mutex_lock(&gsi->mutex);
+
+ do {
+ ret = gsi_channel_setup_one(gsi, channel_id);
+ if (ret)
+ goto err_unwind;
+ } while (++channel_id < gsi->channel_count);
+
+ /* Make sure no channels were defined that hardware does not support */
+ while (channel_id < GSI_CHANNEL_COUNT_MAX) {
+ struct gsi_channel *channel = &gsi->channel[channel_id++];
+
+ if (!gsi_channel_initialized(channel))
+ continue;
+
+ ret = -EINVAL;
+ dev_err(gsi->dev, "channel %u not supported by hardware\n",
+ channel_id - 1);
+ channel_id = gsi->channel_count;
+ goto err_unwind;
+ }
+
+ /* Allocate modem channels if necessary */
+ mask = gsi->modem_channel_bitmap;
+ while (mask) {
+ u32 modem_channel_id = __ffs(mask);
+
+ ret = gsi_modem_channel_alloc(gsi, modem_channel_id);
+ if (ret)
+ goto err_unwind_modem;
+
+ /* Clear bit from mask only after success (for unwind) */
+ mask ^= BIT(modem_channel_id);
+ }
+
+ mutex_unlock(&gsi->mutex);
+
+ return 0;
+
+err_unwind_modem:
+ /* Compute which modem channels need to be deallocated */
+ mask ^= gsi->modem_channel_bitmap;
+ while (mask) {
+ channel_id = __fls(mask);
+
+ mask ^= BIT(channel_id);
+
+ gsi_modem_channel_halt(gsi, channel_id);
+ }
+
+err_unwind:
+ while (channel_id--)
+ gsi_channel_teardown_one(gsi, channel_id);
+
+ mutex_unlock(&gsi->mutex);
+
+ gsi_irq_disable(gsi);
+
+ return ret;
+}
+
+/* Inverse of gsi_channel_setup() */
+static void gsi_channel_teardown(struct gsi *gsi)
+{
+ u32 mask = gsi->modem_channel_bitmap;
+ u32 channel_id;
+
+ mutex_lock(&gsi->mutex);
+
+ while (mask) {
+ channel_id = __fls(mask);
+
+ mask ^= BIT(channel_id);
+
+ gsi_modem_channel_halt(gsi, channel_id);
+ }
+
+ channel_id = gsi->channel_count - 1;
+ do
+ gsi_channel_teardown_one(gsi, channel_id);
+ while (channel_id--);
+
+ mutex_unlock(&gsi->mutex);
+
+ gsi_irq_disable(gsi);
+}
+
+/* Turn off all GSI interrupts initially */
+static int gsi_irq_setup(struct gsi *gsi)
+{
+ const struct reg *reg;
+ int ret;
+
+ /* Writing 1 indicates IRQ interrupts; 0 would be MSI */
+ reg = gsi_reg(gsi, CNTXT_INTSET);
+ iowrite32(reg_bit(reg, INTYPE), gsi->virt + reg_offset(reg));
+
+ /* Disable all interrupt types */
+ gsi_irq_type_update(gsi, 0);
+
+ /* Clear all type-specific interrupt masks */
+ reg = gsi_reg(gsi, CNTXT_SRC_CH_IRQ_MSK);
+ iowrite32(0, gsi->virt + reg_offset(reg));
+
+ reg = gsi_reg(gsi, CNTXT_SRC_EV_CH_IRQ_MSK);
+ iowrite32(0, gsi->virt + reg_offset(reg));
+
+ reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_EN);
+ iowrite32(0, gsi->virt + reg_offset(reg));
+
+ reg = gsi_reg(gsi, CNTXT_SRC_IEOB_IRQ_MSK);
+ iowrite32(0, gsi->virt + reg_offset(reg));
+
+ /* The inter-EE interrupts are not supported for IPA v3.0-v3.1 */
+ if (gsi->version > IPA_VERSION_3_1) {
+ reg = gsi_reg(gsi, INTER_EE_SRC_CH_IRQ_MSK);
+ iowrite32(0, gsi->virt + reg_offset(reg));
+
+ reg = gsi_reg(gsi, INTER_EE_SRC_EV_CH_IRQ_MSK);
+ iowrite32(0, gsi->virt + reg_offset(reg));
+ }
+
+ reg = gsi_reg(gsi, CNTXT_GSI_IRQ_EN);
+ iowrite32(0, gsi->virt + reg_offset(reg));
+
+ ret = request_irq(gsi->irq, gsi_isr, 0, "gsi", gsi);
+ if (ret)
+ dev_err(gsi->dev, "error %d requesting \"gsi\" IRQ\n", ret);
+
+ return ret;
+}
+
+static void gsi_irq_teardown(struct gsi *gsi)
+{
+ free_irq(gsi->irq, gsi);
+}
+
+/* Get # supported channel and event rings; there is no gsi_ring_teardown() */
+static int gsi_ring_setup(struct gsi *gsi)
+{
+ struct device *dev = gsi->dev;
+ const struct reg *reg;
+ u32 count;
+ u32 val;
+
+ if (gsi->version < IPA_VERSION_3_5_1) {
+ /* No HW_PARAM_2 register prior to IPA v3.5.1, assume the max */
+ gsi->channel_count = GSI_CHANNEL_COUNT_MAX;
+ gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX;
+
+ return 0;
+ }
+
+ reg = gsi_reg(gsi, HW_PARAM_2);
+ val = ioread32(gsi->virt + reg_offset(reg));
+
+ count = reg_decode(reg, NUM_CH_PER_EE, val);
+ if (!count) {
+ dev_err(dev, "GSI reports zero channels supported\n");
+ return -EINVAL;
+ }
+ if (count > GSI_CHANNEL_COUNT_MAX) {
+ dev_warn(dev, "limiting to %u channels; hardware supports %u\n",
+ GSI_CHANNEL_COUNT_MAX, count);
+ count = GSI_CHANNEL_COUNT_MAX;
+ }
+ gsi->channel_count = count;
+
+ if (gsi->version < IPA_VERSION_5_0) {
+ count = reg_decode(reg, NUM_EV_PER_EE, val);
+ } else {
+ reg = gsi_reg(gsi, HW_PARAM_4);
+ count = reg_decode(reg, EV_PER_EE, val);
+ }
+ if (!count) {
+ dev_err(dev, "GSI reports zero event rings supported\n");
+ return -EINVAL;
+ }
+ if (count > GSI_EVT_RING_COUNT_MAX) {
+ dev_warn(dev,
+ "limiting to %u event rings; hardware supports %u\n",
+ GSI_EVT_RING_COUNT_MAX, count);
+ count = GSI_EVT_RING_COUNT_MAX;
+ }
+ gsi->evt_ring_count = count;
+
+ return 0;
+}
+
+/* Setup function for GSI. GSI firmware must be loaded and initialized */
+int gsi_setup(struct gsi *gsi)
+{
+ const struct reg *reg;
+ u32 val;
+ int ret;
+
+ /* Here is where we first touch the GSI hardware */
+ reg = gsi_reg(gsi, GSI_STATUS);
+ val = ioread32(gsi->virt + reg_offset(reg));
+ if (!(val & reg_bit(reg, ENABLED))) {
+ dev_err(gsi->dev, "GSI has not been enabled\n");
+ return -EIO;
+ }
+
+ ret = gsi_irq_setup(gsi);
+ if (ret)
+ return ret;
+
+ ret = gsi_ring_setup(gsi); /* No matching teardown required */
+ if (ret)
+ goto err_irq_teardown;
+
+ /* Initialize the error log */
+ reg = gsi_reg(gsi, ERROR_LOG);
+ iowrite32(0, gsi->virt + reg_offset(reg));
+
+ ret = gsi_channel_setup(gsi);
+ if (ret)
+ goto err_irq_teardown;
+
+ return 0;
+
+err_irq_teardown:
+ gsi_irq_teardown(gsi);
+
+ return ret;
+}
+
+/* Inverse of gsi_setup() */
+void gsi_teardown(struct gsi *gsi)
+{
+ gsi_channel_teardown(gsi);
+ gsi_irq_teardown(gsi);
+}
+
+/* Initialize a channel's event ring */
+static int gsi_channel_evt_ring_init(struct gsi_channel *channel)
+{
+ struct gsi *gsi = channel->gsi;
+ struct gsi_evt_ring *evt_ring;
+ int ret;
+
+ ret = gsi_evt_ring_id_alloc(gsi);
+ if (ret < 0)
+ return ret;
+ channel->evt_ring_id = ret;
+
+ evt_ring = &gsi->evt_ring[channel->evt_ring_id];
+ evt_ring->channel = channel;
+
+ ret = gsi_ring_alloc(gsi, &evt_ring->ring, channel->event_count);
+ if (!ret)
+ return 0; /* Success! */
+
+ dev_err(gsi->dev, "error %d allocating channel %u event ring\n",
+ ret, gsi_channel_id(channel));
+
+ gsi_evt_ring_id_free(gsi, channel->evt_ring_id);
+
+ return ret;
+}
+
+/* Inverse of gsi_channel_evt_ring_init() */
+static void gsi_channel_evt_ring_exit(struct gsi_channel *channel)
+{
+ u32 evt_ring_id = channel->evt_ring_id;
+ struct gsi *gsi = channel->gsi;
+ struct gsi_evt_ring *evt_ring;
+
+ evt_ring = &gsi->evt_ring[evt_ring_id];
+ gsi_ring_free(gsi, &evt_ring->ring);
+ gsi_evt_ring_id_free(gsi, evt_ring_id);
+}
+
+static bool gsi_channel_data_valid(struct gsi *gsi, bool command,
+ const struct ipa_gsi_endpoint_data *data)
+{
+ const struct gsi_channel_data *channel_data;
+ u32 channel_id = data->channel_id;
+ struct device *dev = gsi->dev;
+
+ /* Make sure channel ids are in the range driver supports */
+ if (channel_id >= GSI_CHANNEL_COUNT_MAX) {
+ dev_err(dev, "bad channel id %u; must be less than %u\n",
+ channel_id, GSI_CHANNEL_COUNT_MAX);
+ return false;
+ }
+
+ if (data->ee_id != GSI_EE_AP && data->ee_id != GSI_EE_MODEM) {
+ dev_err(dev, "bad EE id %u; not AP or modem\n", data->ee_id);
+ return false;
+ }
+
+ if (command && !data->toward_ipa) {
+ dev_err(dev, "command channel %u is not TX\n", channel_id);
+ return false;
+ }
+
+ channel_data = &data->channel;
+
+ if (!channel_data->tlv_count ||
+ channel_data->tlv_count > GSI_TLV_MAX) {
+ dev_err(dev, "channel %u bad tlv_count %u; must be 1..%u\n",
+ channel_id, channel_data->tlv_count, GSI_TLV_MAX);
+ return false;
+ }
+
+ if (command && IPA_COMMAND_TRANS_TRE_MAX > channel_data->tlv_count) {
+ dev_err(dev, "command TRE max too big for channel %u (%u > %u)\n",
+ channel_id, IPA_COMMAND_TRANS_TRE_MAX,
+ channel_data->tlv_count);
+ return false;
+ }
+
+ /* We have to allow at least one maximally-sized transaction to
+ * be outstanding (which would use tlv_count TREs). Given how
+ * gsi_channel_tre_max() is computed, tre_count has to be almost
+ * twice the TLV FIFO size to satisfy this requirement.
+ */
+ if (channel_data->tre_count < 2 * channel_data->tlv_count - 1) {
+ dev_err(dev, "channel %u TLV count %u exceeds TRE count %u\n",
+ channel_id, channel_data->tlv_count,
+ channel_data->tre_count);
+ return false;
+ }
+
+ if (!is_power_of_2(channel_data->tre_count)) {
+ dev_err(dev, "channel %u bad tre_count %u; not power of 2\n",
+ channel_id, channel_data->tre_count);
+ return false;
+ }
+
+ if (!is_power_of_2(channel_data->event_count)) {
+ dev_err(dev, "channel %u bad event_count %u; not power of 2\n",
+ channel_id, channel_data->event_count);
+ return false;
+ }
+
+ return true;
+}
+
+/* Init function for a single channel */
+static int gsi_channel_init_one(struct gsi *gsi,
+ const struct ipa_gsi_endpoint_data *data,
+ bool command)
+{
+ struct gsi_channel *channel;
+ u32 tre_count;
+ int ret;
+
+ if (!gsi_channel_data_valid(gsi, command, data))
+ return -EINVAL;
+
+ /* Worst case we need an event for every outstanding TRE */
+ if (data->channel.tre_count > data->channel.event_count) {
+ tre_count = data->channel.event_count;
+ dev_warn(gsi->dev, "channel %u limited to %u TREs\n",
+ data->channel_id, tre_count);
+ } else {
+ tre_count = data->channel.tre_count;
+ }
+
+ channel = &gsi->channel[data->channel_id];
+ memset(channel, 0, sizeof(*channel));
+
+ channel->gsi = gsi;
+ channel->toward_ipa = data->toward_ipa;
+ channel->command = command;
+ channel->trans_tre_max = data->channel.tlv_count;
+ channel->tre_count = tre_count;
+ channel->event_count = data->channel.event_count;
+
+ ret = gsi_channel_evt_ring_init(channel);
+ if (ret)
+ goto err_clear_gsi;
+
+ ret = gsi_ring_alloc(gsi, &channel->tre_ring, data->channel.tre_count);
+ if (ret) {
+ dev_err(gsi->dev, "error %d allocating channel %u ring\n",
+ ret, data->channel_id);
+ goto err_channel_evt_ring_exit;
+ }
+
+ ret = gsi_channel_trans_init(gsi, data->channel_id);
+ if (ret)
+ goto err_ring_free;
+
+ if (command) {
+ u32 tre_max = gsi_channel_tre_max(gsi, data->channel_id);
+
+ ret = ipa_cmd_pool_init(channel, tre_max);
+ }
+ if (!ret)
+ return 0; /* Success! */
+
+ gsi_channel_trans_exit(channel);
+err_ring_free:
+ gsi_ring_free(gsi, &channel->tre_ring);
+err_channel_evt_ring_exit:
+ gsi_channel_evt_ring_exit(channel);
+err_clear_gsi:
+ channel->gsi = NULL; /* Mark it not (fully) initialized */
+
+ return ret;
+}
+
+/* Inverse of gsi_channel_init_one() */
+static void gsi_channel_exit_one(struct gsi_channel *channel)
+{
+ if (!gsi_channel_initialized(channel))
+ return;
+
+ if (channel->command)
+ ipa_cmd_pool_exit(channel);
+ gsi_channel_trans_exit(channel);
+ gsi_ring_free(channel->gsi, &channel->tre_ring);
+ gsi_channel_evt_ring_exit(channel);
+}
+
+/* Init function for channels */
+static int gsi_channel_init(struct gsi *gsi, u32 count,
+ const struct ipa_gsi_endpoint_data *data)
+{
+ bool modem_alloc;
+ int ret = 0;
+ u32 i;
+
+ /* IPA v4.2 requires the AP to allocate channels for the modem */
+ modem_alloc = gsi->version == IPA_VERSION_4_2;
+
+ gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX);
+ gsi->ieob_enabled_bitmap = 0;
+
+ /* The endpoint data array is indexed by endpoint name */
+ for (i = 0; i < count; i++) {
+ bool command = i == IPA_ENDPOINT_AP_COMMAND_TX;
+
+ if (ipa_gsi_endpoint_data_empty(&data[i]))
+ continue; /* Skip over empty slots */
+
+ /* Mark modem channels to be allocated (hardware workaround) */
+ if (data[i].ee_id == GSI_EE_MODEM) {
+ if (modem_alloc)
+ gsi->modem_channel_bitmap |=
+ BIT(data[i].channel_id);
+ continue;
+ }
+
+ ret = gsi_channel_init_one(gsi, &data[i], command);
+ if (ret)
+ goto err_unwind;
+ }
+
+ return ret;
+
+err_unwind:
+ while (i--) {
+ if (ipa_gsi_endpoint_data_empty(&data[i]))
+ continue;
+ if (modem_alloc && data[i].ee_id == GSI_EE_MODEM) {
+ gsi->modem_channel_bitmap &= ~BIT(data[i].channel_id);
+ continue;
+ }
+ gsi_channel_exit_one(&gsi->channel[data->channel_id]);
+ }
+
+ return ret;
+}
+
+/* Inverse of gsi_channel_init() */
+static void gsi_channel_exit(struct gsi *gsi)
+{
+ u32 channel_id = GSI_CHANNEL_COUNT_MAX - 1;
+
+ do
+ gsi_channel_exit_one(&gsi->channel[channel_id]);
+ while (channel_id--);
+ gsi->modem_channel_bitmap = 0;
+}
+
+/* Init function for GSI. GSI hardware does not need to be "ready" */
+int gsi_init(struct gsi *gsi, struct platform_device *pdev,
+ enum ipa_version version, u32 count,
+ const struct ipa_gsi_endpoint_data *data)
+{
+ int ret;
+
+ gsi_validate_build();
+
+ gsi->dev = &pdev->dev;
+ gsi->version = version;
+
+ /* GSI uses NAPI on all channels. Create a dummy network device
+ * for the channel NAPI contexts to be associated with.
+ */
+ init_dummy_netdev(&gsi->dummy_dev);
+ init_completion(&gsi->completion);
+
+ ret = gsi_reg_init(gsi, pdev);
+ if (ret)
+ return ret;
+
+ ret = gsi_irq_init(gsi, pdev); /* No matching exit required */
+ if (ret)
+ goto err_reg_exit;
+
+ ret = gsi_channel_init(gsi, count, data);
+ if (ret)
+ goto err_reg_exit;
+
+ mutex_init(&gsi->mutex);
+
+ return 0;
+
+err_reg_exit:
+ gsi_reg_exit(gsi);
+
+ return ret;
+}
+
+/* Inverse of gsi_init() */
+void gsi_exit(struct gsi *gsi)
+{
+ mutex_destroy(&gsi->mutex);
+ gsi_channel_exit(gsi);
+ gsi_reg_exit(gsi);
+}
+
+/* The maximum number of outstanding TREs on a channel. This limits
+ * a channel's maximum number of transactions outstanding (worst case
+ * is one TRE per transaction).
+ *
+ * The absolute limit is the number of TREs in the channel's TRE ring,
+ * and in theory we should be able use all of them. But in practice,
+ * doing that led to the hardware reporting exhaustion of event ring
+ * slots for writing completion information. So the hardware limit
+ * would be (tre_count - 1).
+ *
+ * We reduce it a bit further though. Transaction resource pools are
+ * sized to be a little larger than this maximum, to allow resource
+ * allocations to always be contiguous. The number of entries in a
+ * TRE ring buffer is a power of 2, and the extra resources in a pool
+ * tends to nearly double the memory allocated for it. Reducing the
+ * maximum number of outstanding TREs allows the number of entries in
+ * a pool to avoid crossing that power-of-2 boundary, and this can
+ * substantially reduce pool memory requirements. The number we
+ * reduce it by matches the number added in gsi_trans_pool_init().
+ */
+u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+
+ /* Hardware limit is channel->tre_count - 1 */
+ return channel->tre_count - (channel->trans_tre_max - 1);
+}
diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h
new file mode 100644
index 0000000000..42063b227c
--- /dev/null
+++ b/drivers/net/ipa/gsi.h
@@ -0,0 +1,281 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2023 Linaro Ltd.
+ */
+#ifndef _GSI_H_
+#define _GSI_H_
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/completion.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+
+#include "ipa_version.h"
+
+/* Maximum number of channels and event rings supported by the driver */
+#define GSI_CHANNEL_COUNT_MAX 28
+#define GSI_EVT_RING_COUNT_MAX 28
+
+/* Maximum TLV FIFO size for a channel; 64 here is arbitrary (and high) */
+#define GSI_TLV_MAX 64
+
+struct device;
+struct scatterlist;
+struct platform_device;
+
+struct gsi;
+struct gsi_trans;
+struct gsi_channel_data;
+struct ipa_gsi_endpoint_data;
+
+struct gsi_ring {
+ void *virt; /* ring array base address */
+ dma_addr_t addr; /* primarily low 32 bits used */
+ u32 count; /* number of elements in ring */
+
+ /* The ring index value indicates the next "open" entry in the ring.
+ *
+ * A channel ring consists of TRE entries filled by the AP and passed
+ * to the hardware for processing. For a channel ring, the ring index
+ * identifies the next unused entry to be filled by the AP. In this
+ * case the initial value is assumed by hardware to be 0.
+ *
+ * An event ring consists of event structures filled by the hardware
+ * and passed to the AP. For event rings, the ring index identifies
+ * the next ring entry that is not known to have been filled by the
+ * hardware. The initial value used is arbitrary (so we use 0).
+ */
+ u32 index;
+};
+
+/* Transactions use several resources that can be allocated dynamically
+ * but taken from a fixed-size pool. The number of elements required for
+ * the pool is limited by the total number of TREs that can be outstanding.
+ *
+ * If sufficient TREs are available to reserve for a transaction,
+ * allocation from these pools is guaranteed to succeed. Furthermore,
+ * these resources are implicitly freed whenever the TREs in the
+ * transaction they're associated with are released.
+ *
+ * The result of a pool allocation of multiple elements is always
+ * contiguous.
+ */
+struct gsi_trans_pool {
+ void *base; /* base address of element pool */
+ u32 count; /* # elements in the pool */
+ u32 free; /* next free element in pool (modulo) */
+ u32 size; /* size (bytes) of an element */
+ u32 max_alloc; /* max allocation request */
+ dma_addr_t addr; /* DMA address if DMA pool (or 0) */
+};
+
+struct gsi_trans_info {
+ atomic_t tre_avail; /* TREs available for allocation */
+
+ u16 free_id; /* first free trans in array */
+ u16 allocated_id; /* first allocated transaction */
+ u16 committed_id; /* first committed transaction */
+ u16 pending_id; /* first pending transaction */
+ u16 completed_id; /* first completed transaction */
+ u16 polled_id; /* first polled transaction */
+ struct gsi_trans *trans; /* transaction array */
+ struct gsi_trans **map; /* TRE -> transaction map */
+
+ struct gsi_trans_pool sg_pool; /* scatterlist pool */
+ struct gsi_trans_pool cmd_pool; /* command payload DMA pool */
+};
+
+/* Hardware values signifying the state of a channel */
+enum gsi_channel_state {
+ GSI_CHANNEL_STATE_NOT_ALLOCATED = 0x0,
+ GSI_CHANNEL_STATE_ALLOCATED = 0x1,
+ GSI_CHANNEL_STATE_STARTED = 0x2,
+ GSI_CHANNEL_STATE_STOPPED = 0x3,
+ GSI_CHANNEL_STATE_STOP_IN_PROC = 0x4,
+ GSI_CHANNEL_STATE_FLOW_CONTROLLED = 0x5, /* IPA v4.2-v4.9 */
+ GSI_CHANNEL_STATE_ERROR = 0xf,
+};
+
+/* We only care about channels between IPA and AP */
+struct gsi_channel {
+ struct gsi *gsi;
+ bool toward_ipa;
+ bool command; /* AP command TX channel or not */
+
+ u8 trans_tre_max; /* max TREs in a transaction */
+ u16 tre_count;
+ u16 event_count;
+
+ struct gsi_ring tre_ring;
+ u32 evt_ring_id;
+
+ /* The following counts are used only for TX endpoints */
+ u64 byte_count; /* total # bytes transferred */
+ u64 trans_count; /* total # transactions */
+ u64 queued_byte_count; /* last reported queued byte count */
+ u64 queued_trans_count; /* ...and queued trans count */
+ u64 compl_byte_count; /* last reported completed byte count */
+ u64 compl_trans_count; /* ...and completed trans count */
+
+ struct gsi_trans_info trans_info;
+
+ struct napi_struct napi;
+};
+
+/* Hardware values signifying the state of an event ring */
+enum gsi_evt_ring_state {
+ GSI_EVT_RING_STATE_NOT_ALLOCATED = 0x0,
+ GSI_EVT_RING_STATE_ALLOCATED = 0x1,
+ GSI_EVT_RING_STATE_ERROR = 0xf,
+};
+
+struct gsi_evt_ring {
+ struct gsi_channel *channel;
+ struct gsi_ring ring;
+};
+
+struct gsi {
+ struct device *dev; /* Same as IPA device */
+ enum ipa_version version;
+ void __iomem *virt; /* I/O mapped registers */
+ const struct regs *regs;
+
+ u32 irq;
+ u32 channel_count;
+ u32 evt_ring_count;
+ u32 event_bitmap; /* allocated event rings */
+ u32 modem_channel_bitmap; /* modem channels to allocate */
+ u32 type_enabled_bitmap; /* GSI IRQ types enabled */
+ u32 ieob_enabled_bitmap; /* IEOB IRQ enabled (event rings) */
+ int result; /* Negative errno (generic commands) */
+ struct completion completion; /* Signals GSI command completion */
+ struct mutex mutex; /* protects commands, programming */
+ struct gsi_channel channel[GSI_CHANNEL_COUNT_MAX];
+ struct gsi_evt_ring evt_ring[GSI_EVT_RING_COUNT_MAX];
+ struct net_device dummy_dev; /* needed for NAPI */
+};
+
+/**
+ * gsi_setup() - Set up the GSI subsystem
+ * @gsi: Address of GSI structure embedded in an IPA structure
+ *
+ * Return: 0 if successful, or a negative error code
+ *
+ * Performs initialization that must wait until the GSI hardware is
+ * ready (including firmware loaded).
+ */
+int gsi_setup(struct gsi *gsi);
+
+/**
+ * gsi_teardown() - Tear down GSI subsystem
+ * @gsi: GSI address previously passed to a successful gsi_setup() call
+ */
+void gsi_teardown(struct gsi *gsi);
+
+/**
+ * gsi_channel_tre_max() - Channel maximum number of in-flight TREs
+ * @gsi: GSI pointer
+ * @channel_id: Channel whose limit is to be returned
+ *
+ * Return: The maximum number of TREs outstanding on the channel
+ */
+u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id);
+
+/**
+ * gsi_channel_start() - Start an allocated GSI channel
+ * @gsi: GSI pointer
+ * @channel_id: Channel to start
+ *
+ * Return: 0 if successful, or a negative error code
+ */
+int gsi_channel_start(struct gsi *gsi, u32 channel_id);
+
+/**
+ * gsi_channel_stop() - Stop a started GSI channel
+ * @gsi: GSI pointer returned by gsi_setup()
+ * @channel_id: Channel to stop
+ *
+ * Return: 0 if successful, or a negative error code
+ */
+int gsi_channel_stop(struct gsi *gsi, u32 channel_id);
+
+/**
+ * gsi_modem_channel_flow_control() - Set channel flow control state (IPA v4.2+)
+ * @gsi: GSI pointer returned by gsi_setup()
+ * @channel_id: Modem TX channel to control
+ * @enable: Whether to enable flow control (i.e., prevent flow)
+ */
+void gsi_modem_channel_flow_control(struct gsi *gsi, u32 channel_id,
+ bool enable);
+
+/**
+ * gsi_channel_reset() - Reset an allocated GSI channel
+ * @gsi: GSI pointer
+ * @channel_id: Channel to be reset
+ * @doorbell: Whether to (possibly) enable the doorbell engine
+ *
+ * Reset a channel and reconfigure it. The @doorbell flag indicates
+ * that the doorbell engine should be enabled if needed.
+ *
+ * GSI hardware relinquishes ownership of all pending receive buffer
+ * transactions and they will complete with their cancelled flag set.
+ */
+void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell);
+
+/**
+ * gsi_suspend() - Prepare the GSI subsystem for suspend
+ * @gsi: GSI pointer
+ */
+void gsi_suspend(struct gsi *gsi);
+
+/**
+ * gsi_resume() - Resume the GSI subsystem following suspend
+ * @gsi: GSI pointer
+ */
+void gsi_resume(struct gsi *gsi);
+
+/**
+ * gsi_channel_suspend() - Suspend a GSI channel
+ * @gsi: GSI pointer
+ * @channel_id: Channel to suspend
+ *
+ * For IPA v4.0+, suspend is implemented by stopping the channel.
+ */
+int gsi_channel_suspend(struct gsi *gsi, u32 channel_id);
+
+/**
+ * gsi_channel_resume() - Resume a suspended GSI channel
+ * @gsi: GSI pointer
+ * @channel_id: Channel to resume
+ *
+ * For IPA v4.0+, the stopped channel is started again.
+ */
+int gsi_channel_resume(struct gsi *gsi, u32 channel_id);
+
+/**
+ * gsi_init() - Initialize the GSI subsystem
+ * @gsi: Address of GSI structure embedded in an IPA structure
+ * @pdev: IPA platform device
+ * @version: IPA hardware version (implies GSI version)
+ * @count: Number of entries in the configuration data array
+ * @data: Endpoint and channel configuration data
+ *
+ * Return: 0 if successful, or a negative error code
+ *
+ * Early stage initialization of the GSI subsystem, performing tasks
+ * that can be done before the GSI hardware is ready to use.
+ */
+int gsi_init(struct gsi *gsi, struct platform_device *pdev,
+ enum ipa_version version, u32 count,
+ const struct ipa_gsi_endpoint_data *data);
+
+/**
+ * gsi_exit() - Exit the GSI subsystem
+ * @gsi: GSI address previously passed to a successful gsi_init() call
+ */
+void gsi_exit(struct gsi *gsi);
+
+#endif /* _GSI_H_ */
diff --git a/drivers/net/ipa/gsi_private.h b/drivers/net/ipa/gsi_private.h
new file mode 100644
index 0000000000..c65f7c5cdc
--- /dev/null
+++ b/drivers/net/ipa/gsi_private.h
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2022 Linaro Ltd.
+ */
+#ifndef _GSI_PRIVATE_H_
+#define _GSI_PRIVATE_H_
+
+/* === Only "gsi.c" and "gsi_trans.c" should include this file === */
+
+#include <linux/types.h>
+
+struct gsi_trans;
+struct gsi_ring;
+struct gsi_channel;
+
+#define GSI_RING_ELEMENT_SIZE 16 /* bytes; must be a power of 2 */
+
+/**
+ * gsi_trans_move_complete() - Mark a GSI transaction completed
+ * @trans: Transaction whose state is to be updated
+ */
+void gsi_trans_move_complete(struct gsi_trans *trans);
+
+/**
+ * gsi_trans_move_polled() - Mark a transaction polled
+ * @trans: Transaction whose state is to be updated
+ */
+void gsi_trans_move_polled(struct gsi_trans *trans);
+
+/**
+ * gsi_trans_complete() - Complete a GSI transaction
+ * @trans: Transaction to complete
+ *
+ * Marks a transaction complete (including freeing it).
+ */
+void gsi_trans_complete(struct gsi_trans *trans);
+
+/**
+ * gsi_channel_trans_mapped() - Return a transaction mapped to a TRE index
+ * @channel: Channel associated with the transaction
+ * @index: Index of the TRE having a transaction
+ *
+ * Return: The GSI transaction pointer associated with the TRE index
+ */
+struct gsi_trans *gsi_channel_trans_mapped(struct gsi_channel *channel,
+ u32 index);
+
+/**
+ * gsi_channel_trans_complete() - Return a channel's next completed transaction
+ * @channel: Channel whose next transaction is to be returned
+ *
+ * Return: The next completed transaction, or NULL if nothing new
+ */
+struct gsi_trans *gsi_channel_trans_complete(struct gsi_channel *channel);
+
+/**
+ * gsi_channel_trans_cancel_pending() - Cancel pending transactions
+ * @channel: Channel whose pending transactions should be cancelled
+ *
+ * Cancel all pending transactions on a channel. These are transactions
+ * that have been committed but not yet completed. This is required when
+ * the channel gets reset. At that time all pending transactions will be
+ * marked as cancelled.
+ *
+ * NOTE: Transactions already complete at the time of this call are
+ * unaffected.
+ */
+void gsi_channel_trans_cancel_pending(struct gsi_channel *channel);
+
+/**
+ * gsi_channel_trans_init() - Initialize a channel's GSI transaction info
+ * @gsi: GSI pointer
+ * @channel_id: Channel number
+ *
+ * Return: 0 if successful, or -ENOMEM on allocation failure
+ *
+ * Creates and sets up information for managing transactions on a channel
+ */
+int gsi_channel_trans_init(struct gsi *gsi, u32 channel_id);
+
+/**
+ * gsi_channel_trans_exit() - Inverse of gsi_channel_trans_init()
+ * @channel: Channel whose transaction information is to be cleaned up
+ */
+void gsi_channel_trans_exit(struct gsi_channel *channel);
+
+/**
+ * gsi_channel_doorbell() - Ring a channel's doorbell
+ * @channel: Channel whose doorbell should be rung
+ *
+ * Rings a channel's doorbell to inform the GSI hardware that new
+ * transactions (TREs, really) are available for it to process.
+ */
+void gsi_channel_doorbell(struct gsi_channel *channel);
+
+/* gsi_channel_update() - Update knowledge of channel hardware state
+ * @channel: Channel to be updated
+ *
+ * Consult hardware, change the state of any newly-completed transactions
+ * on a channel.
+ */
+void gsi_channel_update(struct gsi_channel *channel);
+
+/**
+ * gsi_ring_virt() - Return virtual address for a ring entry
+ * @ring: Ring whose address is to be translated
+ * @index: Index (slot number) of entry
+ */
+void *gsi_ring_virt(struct gsi_ring *ring, u32 index);
+
+/**
+ * gsi_trans_tx_committed() - Record bytes committed for transmit
+ * @trans: TX endpoint transaction being committed
+ *
+ * Report that a TX transaction has been committed. It updates some
+ * statistics used to manage transmit rates.
+ */
+void gsi_trans_tx_committed(struct gsi_trans *trans);
+
+/**
+ * gsi_trans_tx_queued() - Report a queued TX channel transaction
+ * @trans: Transaction being passed to hardware
+ *
+ * Report to the network stack that a TX transaction is being supplied
+ * to the hardware.
+ */
+void gsi_trans_tx_queued(struct gsi_trans *trans);
+
+#endif /* _GSI_PRIVATE_H_ */
diff --git a/drivers/net/ipa/gsi_reg.c b/drivers/net/ipa/gsi_reg.c
new file mode 100644
index 0000000000..c5458e28b1
--- /dev/null
+++ b/drivers/net/ipa/gsi_reg.c
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2023 Linaro Ltd. */
+
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#include "gsi.h"
+#include "reg.h"
+#include "gsi_reg.h"
+
+/* Is this register ID valid for the current GSI version? */
+static bool gsi_reg_id_valid(struct gsi *gsi, enum gsi_reg_id reg_id)
+{
+ switch (reg_id) {
+ case INTER_EE_SRC_CH_IRQ_MSK:
+ case INTER_EE_SRC_EV_CH_IRQ_MSK:
+ return gsi->version >= IPA_VERSION_3_5;
+
+ case HW_PARAM_2:
+ return gsi->version >= IPA_VERSION_3_5_1;
+
+ case HW_PARAM_4:
+ return gsi->version >= IPA_VERSION_5_0;
+
+ case CH_C_CNTXT_0:
+ case CH_C_CNTXT_1:
+ case CH_C_CNTXT_2:
+ case CH_C_CNTXT_3:
+ case CH_C_QOS:
+ case CH_C_SCRATCH_0:
+ case CH_C_SCRATCH_1:
+ case CH_C_SCRATCH_2:
+ case CH_C_SCRATCH_3:
+ case EV_CH_E_CNTXT_0:
+ case EV_CH_E_CNTXT_1:
+ case EV_CH_E_CNTXT_2:
+ case EV_CH_E_CNTXT_3:
+ case EV_CH_E_CNTXT_4:
+ case EV_CH_E_CNTXT_8:
+ case EV_CH_E_CNTXT_9:
+ case EV_CH_E_CNTXT_10:
+ case EV_CH_E_CNTXT_11:
+ case EV_CH_E_CNTXT_12:
+ case EV_CH_E_CNTXT_13:
+ case EV_CH_E_SCRATCH_0:
+ case EV_CH_E_SCRATCH_1:
+ case CH_C_DOORBELL_0:
+ case EV_CH_E_DOORBELL_0:
+ case GSI_STATUS:
+ case CH_CMD:
+ case EV_CH_CMD:
+ case GENERIC_CMD:
+ case CNTXT_TYPE_IRQ:
+ case CNTXT_TYPE_IRQ_MSK:
+ case CNTXT_SRC_CH_IRQ:
+ case CNTXT_SRC_CH_IRQ_MSK:
+ case CNTXT_SRC_CH_IRQ_CLR:
+ case CNTXT_SRC_EV_CH_IRQ:
+ case CNTXT_SRC_EV_CH_IRQ_MSK:
+ case CNTXT_SRC_EV_CH_IRQ_CLR:
+ case CNTXT_SRC_IEOB_IRQ:
+ case CNTXT_SRC_IEOB_IRQ_MSK:
+ case CNTXT_SRC_IEOB_IRQ_CLR:
+ case CNTXT_GLOB_IRQ_STTS:
+ case CNTXT_GLOB_IRQ_EN:
+ case CNTXT_GLOB_IRQ_CLR:
+ case CNTXT_GSI_IRQ_STTS:
+ case CNTXT_GSI_IRQ_EN:
+ case CNTXT_GSI_IRQ_CLR:
+ case CNTXT_INTSET:
+ case ERROR_LOG:
+ case ERROR_LOG_CLR:
+ case CNTXT_SCRATCH_0:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+const struct reg *gsi_reg(struct gsi *gsi, enum gsi_reg_id reg_id)
+{
+ if (WARN(!gsi_reg_id_valid(gsi, reg_id), "invalid reg %u\n", reg_id))
+ return NULL;
+
+ return reg(gsi->regs, reg_id);
+}
+
+static const struct regs *gsi_regs(struct gsi *gsi)
+{
+ switch (gsi->version) {
+ case IPA_VERSION_3_1:
+ return &gsi_regs_v3_1;
+
+ case IPA_VERSION_3_5_1:
+ return &gsi_regs_v3_5_1;
+
+ case IPA_VERSION_4_2:
+ return &gsi_regs_v4_0;
+
+ case IPA_VERSION_4_5:
+ case IPA_VERSION_4_7:
+ return &gsi_regs_v4_5;
+
+ case IPA_VERSION_4_9:
+ return &gsi_regs_v4_9;
+
+ case IPA_VERSION_4_11:
+ return &gsi_regs_v4_11;
+
+ case IPA_VERSION_5_0:
+ return &gsi_regs_v5_0;
+
+ default:
+ return NULL;
+ }
+}
+
+/* Sets gsi->virt and I/O maps the "gsi" memory range for registers */
+int gsi_reg_init(struct gsi *gsi, struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ resource_size_t size;
+
+ /* Get GSI memory range and map it */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi");
+ if (!res) {
+ dev_err(dev, "DT error getting \"gsi\" memory property\n");
+ return -ENODEV;
+ }
+
+ size = resource_size(res);
+ if (res->start > U32_MAX || size > U32_MAX - res->start) {
+ dev_err(dev, "DT memory resource \"gsi\" out of range\n");
+ return -EINVAL;
+ }
+
+ gsi->regs = gsi_regs(gsi);
+ if (!gsi->regs) {
+ dev_err(dev, "unsupported IPA version %u (?)\n", gsi->version);
+ return -EINVAL;
+ }
+
+ gsi->virt = ioremap(res->start, size);
+ if (!gsi->virt) {
+ dev_err(dev, "unable to remap \"gsi\" memory\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/* Inverse of gsi_reg_init() */
+void gsi_reg_exit(struct gsi *gsi)
+{
+ iounmap(gsi->virt);
+ gsi->virt = NULL;
+ gsi->regs = NULL;
+}
diff --git a/drivers/net/ipa/gsi_reg.h b/drivers/net/ipa/gsi_reg.h
new file mode 100644
index 0000000000..cf046567f3
--- /dev/null
+++ b/drivers/net/ipa/gsi_reg.h
@@ -0,0 +1,383 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2023 Linaro Ltd.
+ */
+#ifndef _GSI_REG_H_
+#define _GSI_REG_H_
+
+/* === Only "gsi.c" and "gsi_reg.c" should include this file === */
+
+#include <linux/bits.h>
+
+struct platform_device;
+
+struct gsi;
+
+/**
+ * DOC: GSI Registers
+ *
+ * GSI registers are located within the "gsi" address space defined by Device
+ * Tree. The offset of each register within that space is specified by
+ * symbols defined below. The GSI address space is mapped to virtual memory
+ * space in gsi_init(). All GSI registers are 32 bits wide.
+ *
+ * Each register type is duplicated for a number of instances of something.
+ * For example, each GSI channel has its own set of registers defining its
+ * configuration. The offset to a channel's set of registers is computed
+ * based on a "base" offset plus an additional "stride" amount computed
+ * from the channel's ID. For such registers, the offset is computed by a
+ * function-like macro that takes a parameter used in the computation.
+ *
+ * The offset of a register dependent on execution environment is computed
+ * by a macro that is supplied a parameter "ee". The "ee" value is a member
+ * of the gsi_ee_id enumerated type.
+ *
+ * The offset of a channel register is computed by a macro that is supplied a
+ * parameter "ch". The "ch" value is a channel id whose maximum value is 30
+ * (though the actual limit is hardware-dependent).
+ *
+ * The offset of an event register is computed by a macro that is supplied a
+ * parameter "ev". The "ev" value is an event id whose maximum value is 15
+ * (though the actual limit is hardware-dependent).
+ */
+
+/* enum gsi_reg_id - GSI register IDs */
+enum gsi_reg_id {
+ INTER_EE_SRC_CH_IRQ_MSK, /* IPA v3.5+ */
+ INTER_EE_SRC_EV_CH_IRQ_MSK, /* IPA v3.5+ */
+ CH_C_CNTXT_0,
+ CH_C_CNTXT_1,
+ CH_C_CNTXT_2,
+ CH_C_CNTXT_3,
+ CH_C_QOS,
+ CH_C_SCRATCH_0,
+ CH_C_SCRATCH_1,
+ CH_C_SCRATCH_2,
+ CH_C_SCRATCH_3,
+ EV_CH_E_CNTXT_0,
+ EV_CH_E_CNTXT_1,
+ EV_CH_E_CNTXT_2,
+ EV_CH_E_CNTXT_3,
+ EV_CH_E_CNTXT_4,
+ EV_CH_E_CNTXT_8,
+ EV_CH_E_CNTXT_9,
+ EV_CH_E_CNTXT_10,
+ EV_CH_E_CNTXT_11,
+ EV_CH_E_CNTXT_12,
+ EV_CH_E_CNTXT_13,
+ EV_CH_E_SCRATCH_0,
+ EV_CH_E_SCRATCH_1,
+ CH_C_DOORBELL_0,
+ EV_CH_E_DOORBELL_0,
+ GSI_STATUS,
+ CH_CMD,
+ EV_CH_CMD,
+ GENERIC_CMD,
+ HW_PARAM_2, /* IPA v3.5.1+ */
+ HW_PARAM_4, /* IPA v5.0+ */
+ CNTXT_TYPE_IRQ,
+ CNTXT_TYPE_IRQ_MSK,
+ CNTXT_SRC_CH_IRQ,
+ CNTXT_SRC_CH_IRQ_MSK,
+ CNTXT_SRC_CH_IRQ_CLR,
+ CNTXT_SRC_EV_CH_IRQ,
+ CNTXT_SRC_EV_CH_IRQ_MSK,
+ CNTXT_SRC_EV_CH_IRQ_CLR,
+ CNTXT_SRC_IEOB_IRQ,
+ CNTXT_SRC_IEOB_IRQ_MSK,
+ CNTXT_SRC_IEOB_IRQ_CLR,
+ CNTXT_GLOB_IRQ_STTS,
+ CNTXT_GLOB_IRQ_EN,
+ CNTXT_GLOB_IRQ_CLR,
+ CNTXT_GSI_IRQ_STTS,
+ CNTXT_GSI_IRQ_EN,
+ CNTXT_GSI_IRQ_CLR,
+ CNTXT_INTSET,
+ ERROR_LOG,
+ ERROR_LOG_CLR,
+ CNTXT_SCRATCH_0,
+ GSI_REG_ID_COUNT, /* Last; not an ID */
+};
+
+/* CH_C_CNTXT_0 register */
+enum gsi_reg_ch_c_cntxt_0_field_id {
+ CHTYPE_PROTOCOL,
+ CHTYPE_DIR,
+ CH_EE,
+ CHID,
+ CHTYPE_PROTOCOL_MSB, /* IPA v4.5-4.11 */
+ ERINDEX, /* Not IPA v5.0+ */
+ CHSTATE,
+ ELEMENT_SIZE,
+};
+
+/** enum gsi_channel_type - CHTYPE_PROTOCOL field values in CH_C_CNTXT_0 */
+enum gsi_channel_type {
+ GSI_CHANNEL_TYPE_MHI = 0x0,
+ GSI_CHANNEL_TYPE_XHCI = 0x1,
+ GSI_CHANNEL_TYPE_GPI = 0x2,
+ GSI_CHANNEL_TYPE_XDCI = 0x3,
+ GSI_CHANNEL_TYPE_WDI2 = 0x4,
+ GSI_CHANNEL_TYPE_GCI = 0x5,
+ GSI_CHANNEL_TYPE_WDI3 = 0x6,
+ GSI_CHANNEL_TYPE_MHIP = 0x7,
+ GSI_CHANNEL_TYPE_AQC = 0x8,
+ GSI_CHANNEL_TYPE_11AD = 0x9,
+};
+
+/* CH_C_CNTXT_1 register */
+enum gsi_reg_ch_c_cntxt_1_field_id {
+ CH_R_LENGTH,
+ CH_ERINDEX, /* IPA v5.0+ */
+};
+
+/* CH_C_QOS register */
+enum gsi_reg_ch_c_qos_field_id {
+ WRR_WEIGHT,
+ MAX_PREFETCH,
+ USE_DB_ENG,
+ USE_ESCAPE_BUF_ONLY, /* IPA v4.0-4.2 */
+ PREFETCH_MODE, /* IPA v4.5+ */
+ EMPTY_LVL_THRSHOLD, /* IPA v4.5+ */
+ DB_IN_BYTES, /* IPA v4.9+ */
+ LOW_LATENCY_EN, /* IPA v5.0+ */
+};
+
+/** enum gsi_prefetch_mode - PREFETCH_MODE field in CH_C_QOS */
+enum gsi_prefetch_mode {
+ USE_PREFETCH_BUFS = 0,
+ ESCAPE_BUF_ONLY = 1,
+ SMART_PREFETCH = 2,
+ FREE_PREFETCH = 3,
+};
+
+/* EV_CH_E_CNTXT_0 register */
+enum gsi_reg_ch_c_ev_ch_e_cntxt_0_field_id {
+ EV_CHTYPE, /* enum gsi_channel_type */
+ EV_EE, /* enum gsi_ee_id; always GSI_EE_AP for us */
+ EV_EVCHID,
+ EV_INTYPE,
+ EV_CHSTATE,
+ EV_ELEMENT_SIZE,
+};
+
+/* EV_CH_E_CNTXT_1 register */
+enum gsi_reg_ev_ch_c_cntxt_1_field_id {
+ R_LENGTH,
+};
+
+/* EV_CH_E_CNTXT_8 register */
+enum gsi_reg_ch_c_ev_ch_e_cntxt_8_field_id {
+ EV_MODT,
+ EV_MODC,
+ EV_MOD_CNT,
+};
+
+/* GSI_STATUS register */
+enum gsi_reg_gsi_status_field_id {
+ ENABLED,
+};
+
+/* CH_CMD register */
+enum gsi_reg_gsi_ch_cmd_field_id {
+ CH_CHID,
+ CH_OPCODE,
+};
+
+/** enum gsi_ch_cmd_opcode - CH_OPCODE field values in CH_CMD */
+enum gsi_ch_cmd_opcode {
+ GSI_CH_ALLOCATE = 0x0,
+ GSI_CH_START = 0x1,
+ GSI_CH_STOP = 0x2,
+ GSI_CH_RESET = 0x9,
+ GSI_CH_DE_ALLOC = 0xa,
+ GSI_CH_DB_STOP = 0xb,
+};
+
+/* EV_CH_CMD register */
+enum gsi_ev_ch_cmd_field_id {
+ EV_CHID,
+ EV_OPCODE,
+};
+
+/** enum gsi_evt_cmd_opcode - EV_OPCODE field values in EV_CH_CMD */
+enum gsi_evt_cmd_opcode {
+ GSI_EVT_ALLOCATE = 0x0,
+ GSI_EVT_RESET = 0x9,
+ GSI_EVT_DE_ALLOC = 0xa,
+};
+
+/* GENERIC_CMD register */
+enum gsi_generic_cmd_field_id {
+ GENERIC_OPCODE,
+ GENERIC_CHID,
+ GENERIC_EE,
+ GENERIC_PARAMS, /* IPA v4.11+ */
+};
+
+/** enum gsi_generic_cmd_opcode - GENERIC_OPCODE field values in GENERIC_CMD */
+enum gsi_generic_cmd_opcode {
+ GSI_GENERIC_HALT_CHANNEL = 0x1,
+ GSI_GENERIC_ALLOCATE_CHANNEL = 0x2,
+ GSI_GENERIC_ENABLE_FLOW_CONTROL = 0x3, /* IPA v4.2+ */
+ GSI_GENERIC_DISABLE_FLOW_CONTROL = 0x4, /* IPA v4.2+ */
+ GSI_GENERIC_QUERY_FLOW_CONTROL = 0x5, /* IPA v4.11+ */
+};
+
+/* HW_PARAM_2 register */ /* IPA v3.5.1+ */
+enum gsi_hw_param_2_field_id {
+ IRAM_SIZE,
+ NUM_CH_PER_EE,
+ NUM_EV_PER_EE, /* Not IPA v5.0+ */
+ GSI_CH_PEND_TRANSLATE,
+ GSI_CH_FULL_LOGIC,
+ GSI_USE_SDMA, /* IPA v4.0+ */
+ GSI_SDMA_N_INT, /* IPA v4.0+ */
+ GSI_SDMA_MAX_BURST, /* IPA v4.0+ */
+ GSI_SDMA_N_IOVEC, /* IPA v4.0+ */
+ GSI_USE_RD_WR_ENG, /* IPA v4.2+ */
+ GSI_USE_INTER_EE, /* IPA v4.2+ */
+};
+
+/** enum gsi_iram_size - IRAM_SIZE field values in HW_PARAM_2 */
+enum gsi_iram_size {
+ IRAM_SIZE_ONE_KB = 0x0,
+ IRAM_SIZE_TWO_KB = 0x1,
+ /* The next two values are available for IPA v4.0 and above */
+ IRAM_SIZE_TWO_N_HALF_KB = 0x2,
+ IRAM_SIZE_THREE_KB = 0x3,
+ /* The next two values are available for IPA v4.5 and above */
+ IRAM_SIZE_THREE_N_HALF_KB = 0x4,
+ IRAM_SIZE_FOUR_KB = 0x5,
+};
+
+/* HW_PARAM_4 register */ /* IPA v5.0+ */
+enum gsi_hw_param_4_field_id {
+ EV_PER_EE,
+ IRAM_PROTOCOL_COUNT,
+};
+
+/**
+ * enum gsi_irq_type_id: GSI IRQ types
+ * @GSI_CH_CTRL: Channel allocation, deallocation, etc.
+ * @GSI_EV_CTRL: Event ring allocation, deallocation, etc.
+ * @GSI_GLOB_EE: Global/general event
+ * @GSI_IEOB: Transfer (TRE) completion
+ * @GSI_INTER_EE_CH_CTRL: Remote-issued stop/reset (unused)
+ * @GSI_INTER_EE_EV_CTRL: Remote-issued event reset (unused)
+ * @GSI_GENERAL: General hardware event (bus error, etc.)
+ */
+enum gsi_irq_type_id {
+ GSI_CH_CTRL = BIT(0),
+ GSI_EV_CTRL = BIT(1),
+ GSI_GLOB_EE = BIT(2),
+ GSI_IEOB = BIT(3),
+ GSI_INTER_EE_CH_CTRL = BIT(4),
+ GSI_INTER_EE_EV_CTRL = BIT(5),
+ GSI_GENERAL = BIT(6),
+ /* IRQ types 7-31 (and their bit values) are reserved */
+};
+
+/** enum gsi_global_irq_id: Global GSI interrupt events */
+enum gsi_global_irq_id {
+ ERROR_INT = BIT(0),
+ GP_INT1 = BIT(1),
+ GP_INT2 = BIT(2),
+ GP_INT3 = BIT(3),
+ /* Global IRQ types 4-31 (and their bit values) are reserved */
+};
+
+/** enum gsi_general_irq_id: GSI general IRQ conditions */
+enum gsi_general_irq_id {
+ BREAK_POINT = BIT(0),
+ BUS_ERROR = BIT(1),
+ CMD_FIFO_OVRFLOW = BIT(2),
+ MCS_STACK_OVRFLOW = BIT(3),
+ /* General IRQ types 4-31 (and their bit values) are reserved */
+};
+
+/* CNTXT_INTSET register */
+enum gsi_cntxt_intset_field_id {
+ INTYPE,
+};
+
+/* ERROR_LOG register */
+enum gsi_error_log_field_id {
+ ERR_ARG3,
+ ERR_ARG2,
+ ERR_ARG1,
+ ERR_CODE,
+ ERR_VIRT_IDX,
+ ERR_TYPE,
+ ERR_EE,
+};
+
+/** enum gsi_err_code - ERR_CODE field values in EE_ERR_LOG */
+enum gsi_err_code {
+ GSI_INVALID_TRE = 0x1,
+ GSI_OUT_OF_BUFFERS = 0x2,
+ GSI_OUT_OF_RESOURCES = 0x3,
+ GSI_UNSUPPORTED_INTER_EE_OP = 0x4,
+ GSI_EVT_RING_EMPTY = 0x5,
+ GSI_NON_ALLOCATED_EVT_ACCESS = 0x6,
+ /* 7 is not assigned */
+ GSI_HWO_1 = 0x8,
+};
+
+/** enum gsi_err_type - ERR_TYPE field values in EE_ERR_LOG */
+enum gsi_err_type {
+ GSI_ERR_TYPE_GLOB = 0x1,
+ GSI_ERR_TYPE_CHAN = 0x2,
+ GSI_ERR_TYPE_EVT = 0x3,
+};
+
+/* CNTXT_SCRATCH_0 register */
+enum gsi_cntxt_scratch_0_field_id {
+ INTER_EE_RESULT,
+ GENERIC_EE_RESULT,
+};
+
+/** enum gsi_generic_ee_result - GENERIC_EE_RESULT field values in SCRATCH_0 */
+enum gsi_generic_ee_result {
+ GENERIC_EE_SUCCESS = 0x1,
+ GENERIC_EE_INCORRECT_CHANNEL_STATE = 0x2,
+ GENERIC_EE_INCORRECT_DIRECTION = 0x3,
+ GENERIC_EE_INCORRECT_CHANNEL_TYPE = 0x4,
+ GENERIC_EE_INCORRECT_CHANNEL = 0x5,
+ GENERIC_EE_RETRY = 0x6,
+ GENERIC_EE_NO_RESOURCES = 0x7,
+};
+
+extern const struct regs gsi_regs_v3_1;
+extern const struct regs gsi_regs_v3_5_1;
+extern const struct regs gsi_regs_v4_0;
+extern const struct regs gsi_regs_v4_5;
+extern const struct regs gsi_regs_v4_9;
+extern const struct regs gsi_regs_v4_11;
+extern const struct regs gsi_regs_v5_0;
+
+/**
+ * gsi_reg() - Return the structure describing a GSI register
+ * @gsi: GSI pointer
+ * @reg_id: GSI register ID
+ */
+const struct reg *gsi_reg(struct gsi *gsi, enum gsi_reg_id reg_id);
+
+/**
+ * gsi_reg_init() - Perform GSI register initialization
+ * @gsi: GSI pointer
+ * @pdev: GSI (IPA) platform device
+ *
+ * Initialize GSI registers, including looking up and I/O mapping
+ * the "gsi" memory space.
+ */
+int gsi_reg_init(struct gsi *gsi, struct platform_device *pdev);
+
+/**
+ * gsi_reg_exit() - Inverse of gsi_reg_init()
+ * @gsi: GSI pointer
+ */
+void gsi_reg_exit(struct gsi *gsi);
+
+#endif /* _GSI_REG_H_ */
diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c
new file mode 100644
index 0000000000..ee6fb00b71
--- /dev/null
+++ b/drivers/net/ipa/gsi_trans.c
@@ -0,0 +1,790 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2022 Linaro Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/refcount.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-direction.h>
+
+#include "gsi.h"
+#include "gsi_private.h"
+#include "gsi_trans.h"
+#include "ipa_gsi.h"
+#include "ipa_data.h"
+#include "ipa_cmd.h"
+
+/**
+ * DOC: GSI Transactions
+ *
+ * A GSI transaction abstracts the behavior of a GSI channel by representing
+ * everything about a related group of IPA operations in a single structure.
+ * (A "operation" in this sense is either a data transfer or an IPA immediate
+ * command.) Most details of interaction with the GSI hardware are managed
+ * by the GSI transaction core, allowing users to simply describe operations
+ * to be performed. When a transaction has completed a callback function
+ * (dependent on the type of endpoint associated with the channel) allows
+ * cleanup of resources associated with the transaction.
+ *
+ * To perform an operation (or set of them), a user of the GSI transaction
+ * interface allocates a transaction, indicating the number of TREs required
+ * (one per operation). If sufficient TREs are available, they are reserved
+ * for use in the transaction and the allocation succeeds. This way
+ * exhaustion of the available TREs in a channel ring is detected as early
+ * as possible. Any other resources that might be needed to complete a
+ * transaction are also allocated when the transaction is allocated.
+ *
+ * Operations performed as part of a transaction are represented in an array
+ * of Linux scatterlist structures, allocated with the transaction. These
+ * scatterlist structures are initialized by "adding" operations to the
+ * transaction. If a buffer in an operation must be mapped for DMA, this is
+ * done at the time it is added to the transaction. It is possible for a
+ * mapping error to occur when an operation is added. In this case the
+ * transaction should simply be freed; this correctly releases resources
+ * associated with the transaction.
+ *
+ * Once all operations have been successfully added to a transaction, the
+ * transaction is committed. Committing transfers ownership of the entire
+ * transaction to the GSI transaction core. The GSI transaction code
+ * formats the content of the scatterlist array into the channel ring
+ * buffer and informs the hardware that new TREs are available to process.
+ *
+ * The last TRE in each transaction is marked to interrupt the AP when the
+ * GSI hardware has completed it. Because transfers described by TREs are
+ * performed strictly in order, signaling the completion of just the last
+ * TRE in the transaction is sufficient to indicate the full transaction
+ * is complete.
+ *
+ * When a transaction is complete, ipa_gsi_trans_complete() is called by the
+ * GSI code into the IPA layer, allowing it to perform any final cleanup
+ * required before the transaction is freed.
+ */
+
+/* Hardware values representing a transfer element type */
+enum gsi_tre_type {
+ GSI_RE_XFER = 0x2,
+ GSI_RE_IMMD_CMD = 0x3,
+};
+
+/* An entry in a channel ring */
+struct gsi_tre {
+ __le64 addr; /* DMA address */
+ __le16 len_opcode; /* length in bytes or enum IPA_CMD_* */
+ __le16 reserved;
+ __le32 flags; /* TRE_FLAGS_* */
+};
+
+/* gsi_tre->flags mask values (in CPU byte order) */
+#define TRE_FLAGS_CHAIN_FMASK GENMASK(0, 0)
+#define TRE_FLAGS_IEOT_FMASK GENMASK(9, 9)
+#define TRE_FLAGS_BEI_FMASK GENMASK(10, 10)
+#define TRE_FLAGS_TYPE_FMASK GENMASK(23, 16)
+
+int gsi_trans_pool_init(struct gsi_trans_pool *pool, size_t size, u32 count,
+ u32 max_alloc)
+{
+ size_t alloc_size;
+ void *virt;
+
+ if (!size)
+ return -EINVAL;
+ if (count < max_alloc)
+ return -EINVAL;
+ if (!max_alloc)
+ return -EINVAL;
+
+ /* By allocating a few extra entries in our pool (one less
+ * than the maximum number that will be requested in a
+ * single allocation), we can always satisfy requests without
+ * ever worrying about straddling the end of the pool array.
+ * If there aren't enough entries starting at the free index,
+ * we just allocate free entries from the beginning of the pool.
+ */
+ alloc_size = size_mul(count + max_alloc - 1, size);
+ alloc_size = kmalloc_size_roundup(alloc_size);
+ virt = kzalloc(alloc_size, GFP_KERNEL);
+ if (!virt)
+ return -ENOMEM;
+
+ pool->base = virt;
+ /* If the allocator gave us any extra memory, use it */
+ pool->count = alloc_size / size;
+ pool->free = 0;
+ pool->max_alloc = max_alloc;
+ pool->size = size;
+ pool->addr = 0; /* Only used for DMA pools */
+
+ return 0;
+}
+
+void gsi_trans_pool_exit(struct gsi_trans_pool *pool)
+{
+ kfree(pool->base);
+ memset(pool, 0, sizeof(*pool));
+}
+
+/* Home-grown DMA pool. This way we can preallocate the pool, and guarantee
+ * allocations will succeed. The immediate commands in a transaction can
+ * require up to max_alloc elements from the pool. But we only allow
+ * allocation of a single element from a DMA pool at a time.
+ */
+int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool,
+ size_t size, u32 count, u32 max_alloc)
+{
+ size_t total_size;
+ dma_addr_t addr;
+ void *virt;
+
+ if (!size)
+ return -EINVAL;
+ if (count < max_alloc)
+ return -EINVAL;
+ if (!max_alloc)
+ return -EINVAL;
+
+ /* Don't let allocations cross a power-of-two boundary */
+ size = __roundup_pow_of_two(size);
+ total_size = (count + max_alloc - 1) * size;
+
+ /* The allocator will give us a power-of-2 number of pages
+ * sufficient to satisfy our request. Round up our requested
+ * size to avoid any unused space in the allocation. This way
+ * gsi_trans_pool_exit_dma() can assume the total allocated
+ * size is exactly (count * size).
+ */
+ total_size = PAGE_SIZE << get_order(total_size);
+
+ virt = dma_alloc_coherent(dev, total_size, &addr, GFP_KERNEL);
+ if (!virt)
+ return -ENOMEM;
+
+ pool->base = virt;
+ pool->count = total_size / size;
+ pool->free = 0;
+ pool->size = size;
+ pool->max_alloc = max_alloc;
+ pool->addr = addr;
+
+ return 0;
+}
+
+void gsi_trans_pool_exit_dma(struct device *dev, struct gsi_trans_pool *pool)
+{
+ size_t total_size = pool->count * pool->size;
+
+ dma_free_coherent(dev, total_size, pool->base, pool->addr);
+ memset(pool, 0, sizeof(*pool));
+}
+
+/* Return the byte offset of the next free entry in the pool */
+static u32 gsi_trans_pool_alloc_common(struct gsi_trans_pool *pool, u32 count)
+{
+ u32 offset;
+
+ WARN_ON(!count);
+ WARN_ON(count > pool->max_alloc);
+
+ /* Allocate from beginning if wrap would occur */
+ if (count > pool->count - pool->free)
+ pool->free = 0;
+
+ offset = pool->free * pool->size;
+ pool->free += count;
+ memset(pool->base + offset, 0, count * pool->size);
+
+ return offset;
+}
+
+/* Allocate a contiguous block of zeroed entries from a pool */
+void *gsi_trans_pool_alloc(struct gsi_trans_pool *pool, u32 count)
+{
+ return pool->base + gsi_trans_pool_alloc_common(pool, count);
+}
+
+/* Allocate a single zeroed entry from a DMA pool */
+void *gsi_trans_pool_alloc_dma(struct gsi_trans_pool *pool, dma_addr_t *addr)
+{
+ u32 offset = gsi_trans_pool_alloc_common(pool, 1);
+
+ *addr = pool->addr + offset;
+
+ return pool->base + offset;
+}
+
+/* Map a TRE ring entry index to the transaction it is associated with */
+static void gsi_trans_map(struct gsi_trans *trans, u32 index)
+{
+ struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
+
+ /* The completion event will indicate the last TRE used */
+ index += trans->used_count - 1;
+
+ /* Note: index *must* be used modulo the ring count here */
+ channel->trans_info.map[index % channel->tre_ring.count] = trans;
+}
+
+/* Return the transaction mapped to a given ring entry */
+struct gsi_trans *
+gsi_channel_trans_mapped(struct gsi_channel *channel, u32 index)
+{
+ /* Note: index *must* be used modulo the ring count here */
+ return channel->trans_info.map[index % channel->tre_ring.count];
+}
+
+/* Return the oldest completed transaction for a channel (or null) */
+struct gsi_trans *gsi_channel_trans_complete(struct gsi_channel *channel)
+{
+ struct gsi_trans_info *trans_info = &channel->trans_info;
+ u16 trans_id = trans_info->completed_id;
+
+ if (trans_id == trans_info->pending_id) {
+ gsi_channel_update(channel);
+ if (trans_id == trans_info->pending_id)
+ return NULL;
+ }
+
+ return &trans_info->trans[trans_id %= channel->tre_count];
+}
+
+/* Move a transaction from allocated to committed state */
+static void gsi_trans_move_committed(struct gsi_trans *trans)
+{
+ struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
+ struct gsi_trans_info *trans_info = &channel->trans_info;
+
+ /* This allocated transaction is now committed */
+ trans_info->allocated_id++;
+}
+
+/* Move committed transactions to pending state */
+static void gsi_trans_move_pending(struct gsi_trans *trans)
+{
+ struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
+ struct gsi_trans_info *trans_info = &channel->trans_info;
+ u16 trans_index = trans - &trans_info->trans[0];
+ u16 delta;
+
+ /* These committed transactions are now pending */
+ delta = trans_index - trans_info->committed_id + 1;
+ trans_info->committed_id += delta % channel->tre_count;
+}
+
+/* Move pending transactions to completed state */
+void gsi_trans_move_complete(struct gsi_trans *trans)
+{
+ struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
+ struct gsi_trans_info *trans_info = &channel->trans_info;
+ u16 trans_index = trans - trans_info->trans;
+ u16 delta;
+
+ /* These pending transactions are now completed */
+ delta = trans_index - trans_info->pending_id + 1;
+ delta %= channel->tre_count;
+ trans_info->pending_id += delta;
+}
+
+/* Move a transaction from completed to polled state */
+void gsi_trans_move_polled(struct gsi_trans *trans)
+{
+ struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
+ struct gsi_trans_info *trans_info = &channel->trans_info;
+
+ /* This completed transaction is now polled */
+ trans_info->completed_id++;
+}
+
+/* Reserve some number of TREs on a channel. Returns true if successful */
+static bool
+gsi_trans_tre_reserve(struct gsi_trans_info *trans_info, u32 tre_count)
+{
+ int avail = atomic_read(&trans_info->tre_avail);
+ int new;
+
+ do {
+ new = avail - (int)tre_count;
+ if (unlikely(new < 0))
+ return false;
+ } while (!atomic_try_cmpxchg(&trans_info->tre_avail, &avail, new));
+
+ return true;
+}
+
+/* Release previously-reserved TRE entries to a channel */
+static void
+gsi_trans_tre_release(struct gsi_trans_info *trans_info, u32 tre_count)
+{
+ atomic_add(tre_count, &trans_info->tre_avail);
+}
+
+/* Return true if no transactions are allocated, false otherwise */
+bool gsi_channel_trans_idle(struct gsi *gsi, u32 channel_id)
+{
+ u32 tre_max = gsi_channel_tre_max(gsi, channel_id);
+ struct gsi_trans_info *trans_info;
+
+ trans_info = &gsi->channel[channel_id].trans_info;
+
+ return atomic_read(&trans_info->tre_avail) == tre_max;
+}
+
+/* Allocate a GSI transaction on a channel */
+struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id,
+ u32 tre_count,
+ enum dma_data_direction direction)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+ struct gsi_trans_info *trans_info;
+ struct gsi_trans *trans;
+ u16 trans_index;
+
+ if (WARN_ON(tre_count > channel->trans_tre_max))
+ return NULL;
+
+ trans_info = &channel->trans_info;
+
+ /* If we can't reserve the TREs for the transaction, we're done */
+ if (!gsi_trans_tre_reserve(trans_info, tre_count))
+ return NULL;
+
+ trans_index = trans_info->free_id % channel->tre_count;
+ trans = &trans_info->trans[trans_index];
+ memset(trans, 0, sizeof(*trans));
+
+ /* Initialize non-zero fields in the transaction */
+ trans->gsi = gsi;
+ trans->channel_id = channel_id;
+ trans->rsvd_count = tre_count;
+ init_completion(&trans->completion);
+
+ /* Allocate the scatterlist */
+ trans->sgl = gsi_trans_pool_alloc(&trans_info->sg_pool, tre_count);
+ sg_init_marker(trans->sgl, tre_count);
+
+ trans->direction = direction;
+ refcount_set(&trans->refcount, 1);
+
+ /* This free transaction is now allocated */
+ trans_info->free_id++;
+
+ return trans;
+}
+
+/* Free a previously-allocated transaction */
+void gsi_trans_free(struct gsi_trans *trans)
+{
+ struct gsi_trans_info *trans_info;
+
+ if (!refcount_dec_and_test(&trans->refcount))
+ return;
+
+ /* Unused transactions are allocated but never committed, pending,
+ * completed, or polled.
+ */
+ trans_info = &trans->gsi->channel[trans->channel_id].trans_info;
+ if (!trans->used_count) {
+ trans_info->allocated_id++;
+ trans_info->committed_id++;
+ trans_info->pending_id++;
+ trans_info->completed_id++;
+ } else {
+ ipa_gsi_trans_release(trans);
+ }
+
+ /* This transaction is now free */
+ trans_info->polled_id++;
+
+ /* Releasing the reserved TREs implicitly frees the sgl[] and
+ * (if present) info[] arrays, plus the transaction itself.
+ */
+ gsi_trans_tre_release(trans_info, trans->rsvd_count);
+}
+
+/* Add an immediate command to a transaction */
+void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size,
+ dma_addr_t addr, enum ipa_cmd_opcode opcode)
+{
+ u32 which = trans->used_count++;
+ struct scatterlist *sg;
+
+ WARN_ON(which >= trans->rsvd_count);
+
+ /* Commands are quite different from data transfer requests.
+ * Their payloads come from a pool whose memory is allocated
+ * using dma_alloc_coherent(). We therefore do *not* map them
+ * for DMA (unlike what we do for pages and skbs).
+ *
+ * When a transaction completes, the SGL is normally unmapped.
+ * A command transaction has direction DMA_NONE, which tells
+ * gsi_trans_complete() to skip the unmapping step.
+ *
+ * The only things we use directly in a command scatter/gather
+ * entry are the DMA address and length. We still need the SG
+ * table flags to be maintained though, so assign a NULL page
+ * pointer for that purpose.
+ */
+ sg = &trans->sgl[which];
+ sg_assign_page(sg, NULL);
+ sg_dma_address(sg) = addr;
+ sg_dma_len(sg) = size;
+
+ trans->cmd_opcode[which] = opcode;
+}
+
+/* Add a page transfer to a transaction. It will fill the only TRE. */
+int gsi_trans_page_add(struct gsi_trans *trans, struct page *page, u32 size,
+ u32 offset)
+{
+ struct scatterlist *sg = &trans->sgl[0];
+ int ret;
+
+ if (WARN_ON(trans->rsvd_count != 1))
+ return -EINVAL;
+ if (WARN_ON(trans->used_count))
+ return -EINVAL;
+
+ sg_set_page(sg, page, size, offset);
+ ret = dma_map_sg(trans->gsi->dev, sg, 1, trans->direction);
+ if (!ret)
+ return -ENOMEM;
+
+ trans->used_count++; /* Transaction now owns the (DMA mapped) page */
+
+ return 0;
+}
+
+/* Add an SKB transfer to a transaction. No other TREs will be used. */
+int gsi_trans_skb_add(struct gsi_trans *trans, struct sk_buff *skb)
+{
+ struct scatterlist *sg = &trans->sgl[0];
+ u32 used_count;
+ int ret;
+
+ if (WARN_ON(trans->rsvd_count != 1))
+ return -EINVAL;
+ if (WARN_ON(trans->used_count))
+ return -EINVAL;
+
+ /* skb->len will not be 0 (checked early) */
+ ret = skb_to_sgvec(skb, sg, 0, skb->len);
+ if (ret < 0)
+ return ret;
+ used_count = ret;
+
+ ret = dma_map_sg(trans->gsi->dev, sg, used_count, trans->direction);
+ if (!ret)
+ return -ENOMEM;
+
+ /* Transaction now owns the (DMA mapped) skb */
+ trans->used_count += used_count;
+
+ return 0;
+}
+
+/* Compute the length/opcode value to use for a TRE */
+static __le16 gsi_tre_len_opcode(enum ipa_cmd_opcode opcode, u32 len)
+{
+ return opcode == IPA_CMD_NONE ? cpu_to_le16((u16)len)
+ : cpu_to_le16((u16)opcode);
+}
+
+/* Compute the flags value to use for a given TRE */
+static __le32 gsi_tre_flags(bool last_tre, bool bei, enum ipa_cmd_opcode opcode)
+{
+ enum gsi_tre_type tre_type;
+ u32 tre_flags;
+
+ tre_type = opcode == IPA_CMD_NONE ? GSI_RE_XFER : GSI_RE_IMMD_CMD;
+ tre_flags = u32_encode_bits(tre_type, TRE_FLAGS_TYPE_FMASK);
+
+ /* Last TRE contains interrupt flags */
+ if (last_tre) {
+ /* All transactions end in a transfer completion interrupt */
+ tre_flags |= TRE_FLAGS_IEOT_FMASK;
+ /* Don't interrupt when outbound commands are acknowledged */
+ if (bei)
+ tre_flags |= TRE_FLAGS_BEI_FMASK;
+ } else { /* All others indicate there's more to come */
+ tre_flags |= TRE_FLAGS_CHAIN_FMASK;
+ }
+
+ return cpu_to_le32(tre_flags);
+}
+
+static void gsi_trans_tre_fill(struct gsi_tre *dest_tre, dma_addr_t addr,
+ u32 len, bool last_tre, bool bei,
+ enum ipa_cmd_opcode opcode)
+{
+ struct gsi_tre tre;
+
+ tre.addr = cpu_to_le64(addr);
+ tre.len_opcode = gsi_tre_len_opcode(opcode, len);
+ tre.reserved = 0;
+ tre.flags = gsi_tre_flags(last_tre, bei, opcode);
+
+ /* ARM64 can write 16 bytes as a unit with a single instruction.
+ * Doing the assignment this way is an attempt to make that happen.
+ */
+ *dest_tre = tre;
+}
+
+/**
+ * __gsi_trans_commit() - Common GSI transaction commit code
+ * @trans: Transaction to commit
+ * @ring_db: Whether to tell the hardware about these queued transfers
+ *
+ * Formats channel ring TRE entries based on the content of the scatterlist.
+ * Maps a transaction pointer to the last ring entry used for the transaction,
+ * so it can be recovered when it completes. Moves the transaction to
+ * pending state. Finally, updates the channel ring pointer and optionally
+ * rings the doorbell.
+ */
+static void __gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
+{
+ struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
+ struct gsi_ring *tre_ring = &channel->tre_ring;
+ enum ipa_cmd_opcode opcode = IPA_CMD_NONE;
+ bool bei = channel->toward_ipa;
+ struct gsi_tre *dest_tre;
+ struct scatterlist *sg;
+ u32 byte_count = 0;
+ u8 *cmd_opcode;
+ u32 avail;
+ u32 i;
+
+ WARN_ON(!trans->used_count);
+
+ /* Consume the entries. If we cross the end of the ring while
+ * filling them we'll switch to the beginning to finish.
+ * If there is no info array we're doing a simple data
+ * transfer request, whose opcode is IPA_CMD_NONE.
+ */
+ cmd_opcode = channel->command ? &trans->cmd_opcode[0] : NULL;
+ avail = tre_ring->count - tre_ring->index % tre_ring->count;
+ dest_tre = gsi_ring_virt(tre_ring, tre_ring->index);
+ for_each_sg(trans->sgl, sg, trans->used_count, i) {
+ bool last_tre = i == trans->used_count - 1;
+ dma_addr_t addr = sg_dma_address(sg);
+ u32 len = sg_dma_len(sg);
+
+ byte_count += len;
+ if (!avail--)
+ dest_tre = gsi_ring_virt(tre_ring, 0);
+ if (cmd_opcode)
+ opcode = *cmd_opcode++;
+
+ gsi_trans_tre_fill(dest_tre, addr, len, last_tre, bei, opcode);
+ dest_tre++;
+ }
+ /* Associate the TRE with the transaction */
+ gsi_trans_map(trans, tre_ring->index);
+
+ tre_ring->index += trans->used_count;
+
+ trans->len = byte_count;
+ if (channel->toward_ipa)
+ gsi_trans_tx_committed(trans);
+
+ gsi_trans_move_committed(trans);
+
+ /* Ring doorbell if requested, or if all TREs are allocated */
+ if (ring_db || !atomic_read(&channel->trans_info.tre_avail)) {
+ /* Report what we're handing off to hardware for TX channels */
+ if (channel->toward_ipa)
+ gsi_trans_tx_queued(trans);
+ gsi_trans_move_pending(trans);
+ gsi_channel_doorbell(channel);
+ }
+}
+
+/* Commit a GSI transaction */
+void gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
+{
+ if (trans->used_count)
+ __gsi_trans_commit(trans, ring_db);
+ else
+ gsi_trans_free(trans);
+}
+
+/* Commit a GSI transaction and wait for it to complete */
+void gsi_trans_commit_wait(struct gsi_trans *trans)
+{
+ if (!trans->used_count)
+ goto out_trans_free;
+
+ refcount_inc(&trans->refcount);
+
+ __gsi_trans_commit(trans, true);
+
+ wait_for_completion(&trans->completion);
+
+out_trans_free:
+ gsi_trans_free(trans);
+}
+
+/* Process the completion of a transaction; called while polling */
+void gsi_trans_complete(struct gsi_trans *trans)
+{
+ /* If the entire SGL was mapped when added, unmap it now */
+ if (trans->direction != DMA_NONE)
+ dma_unmap_sg(trans->gsi->dev, trans->sgl, trans->used_count,
+ trans->direction);
+
+ ipa_gsi_trans_complete(trans);
+
+ complete(&trans->completion);
+
+ gsi_trans_free(trans);
+}
+
+/* Cancel a channel's pending transactions */
+void gsi_channel_trans_cancel_pending(struct gsi_channel *channel)
+{
+ struct gsi_trans_info *trans_info = &channel->trans_info;
+ u16 trans_id = trans_info->pending_id;
+
+ /* channel->gsi->mutex is held by caller */
+
+ /* If there are no pending transactions, we're done */
+ if (trans_id == trans_info->committed_id)
+ return;
+
+ /* Mark all pending transactions cancelled */
+ do {
+ struct gsi_trans *trans;
+
+ trans = &trans_info->trans[trans_id % channel->tre_count];
+ trans->cancelled = true;
+ } while (++trans_id != trans_info->committed_id);
+
+ /* All pending transactions are now completed */
+ trans_info->pending_id = trans_info->committed_id;
+
+ /* Schedule NAPI polling to complete the cancelled transactions */
+ napi_schedule(&channel->napi);
+}
+
+/* Issue a command to read a single byte from a channel */
+int gsi_trans_read_byte(struct gsi *gsi, u32 channel_id, dma_addr_t addr)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+ struct gsi_ring *tre_ring = &channel->tre_ring;
+ struct gsi_trans_info *trans_info;
+ struct gsi_tre *dest_tre;
+
+ trans_info = &channel->trans_info;
+
+ /* First reserve the TRE, if possible */
+ if (!gsi_trans_tre_reserve(trans_info, 1))
+ return -EBUSY;
+
+ /* Now fill the reserved TRE and tell the hardware */
+
+ dest_tre = gsi_ring_virt(tre_ring, tre_ring->index);
+ gsi_trans_tre_fill(dest_tre, addr, 1, true, false, IPA_CMD_NONE);
+
+ tre_ring->index++;
+ gsi_channel_doorbell(channel);
+
+ return 0;
+}
+
+/* Mark a gsi_trans_read_byte() request done */
+void gsi_trans_read_byte_done(struct gsi *gsi, u32 channel_id)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+
+ gsi_trans_tre_release(&channel->trans_info, 1);
+}
+
+/* Initialize a channel's GSI transaction info */
+int gsi_channel_trans_init(struct gsi *gsi, u32 channel_id)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+ u32 tre_count = channel->tre_count;
+ struct gsi_trans_info *trans_info;
+ u32 tre_max;
+ int ret;
+
+ /* Ensure the size of a channel element is what's expected */
+ BUILD_BUG_ON(sizeof(struct gsi_tre) != GSI_RING_ELEMENT_SIZE);
+
+ trans_info = &channel->trans_info;
+
+ /* The tre_avail field is what ultimately limits the number of
+ * outstanding transactions and their resources. A transaction
+ * allocation succeeds only if the TREs available are sufficient
+ * for what the transaction might need.
+ */
+ tre_max = gsi_channel_tre_max(channel->gsi, channel_id);
+ atomic_set(&trans_info->tre_avail, tre_max);
+
+ /* We can't use more TREs than the number available in the ring.
+ * This limits the number of transactions that can be outstanding.
+ * Worst case is one TRE per transaction (but we actually limit
+ * it to something a little less than that). By allocating a
+ * power-of-two number of transactions we can use an index
+ * modulo that number to determine the next one that's free.
+ * Transactions are allocated one at a time.
+ */
+ trans_info->trans = kcalloc(tre_count, sizeof(*trans_info->trans),
+ GFP_KERNEL);
+ if (!trans_info->trans)
+ return -ENOMEM;
+ trans_info->free_id = 0; /* all modulo channel->tre_count */
+ trans_info->allocated_id = 0;
+ trans_info->committed_id = 0;
+ trans_info->pending_id = 0;
+ trans_info->completed_id = 0;
+ trans_info->polled_id = 0;
+
+ /* A completion event contains a pointer to the TRE that caused
+ * the event (which will be the last one used by the transaction).
+ * Each entry in this map records the transaction associated
+ * with a corresponding completed TRE.
+ */
+ trans_info->map = kcalloc(tre_count, sizeof(*trans_info->map),
+ GFP_KERNEL);
+ if (!trans_info->map) {
+ ret = -ENOMEM;
+ goto err_trans_free;
+ }
+
+ /* A transaction uses a scatterlist array to represent the data
+ * transfers implemented by the transaction. Each scatterlist
+ * element is used to fill a single TRE when the transaction is
+ * committed. So we need as many scatterlist elements as the
+ * maximum number of TREs that can be outstanding.
+ */
+ ret = gsi_trans_pool_init(&trans_info->sg_pool,
+ sizeof(struct scatterlist),
+ tre_max, channel->trans_tre_max);
+ if (ret)
+ goto err_map_free;
+
+
+ return 0;
+
+err_map_free:
+ kfree(trans_info->map);
+err_trans_free:
+ kfree(trans_info->trans);
+
+ dev_err(gsi->dev, "error %d initializing channel %u transactions\n",
+ ret, channel_id);
+
+ return ret;
+}
+
+/* Inverse of gsi_channel_trans_init() */
+void gsi_channel_trans_exit(struct gsi_channel *channel)
+{
+ struct gsi_trans_info *trans_info = &channel->trans_info;
+
+ gsi_trans_pool_exit(&trans_info->sg_pool);
+ kfree(trans_info->trans);
+ kfree(trans_info->map);
+}
diff --git a/drivers/net/ipa/gsi_trans.h b/drivers/net/ipa/gsi_trans.h
new file mode 100644
index 0000000000..30c1c2dc77
--- /dev/null
+++ b/drivers/net/ipa/gsi_trans.h
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2022 Linaro Ltd.
+ */
+#ifndef _GSI_TRANS_H_
+#define _GSI_TRANS_H_
+
+#include <linux/types.h>
+#include <linux/refcount.h>
+#include <linux/completion.h>
+#include <linux/dma-direction.h>
+
+#include "ipa_cmd.h"
+
+struct page;
+struct scatterlist;
+struct device;
+struct sk_buff;
+
+struct gsi;
+struct gsi_trans;
+struct gsi_trans_pool;
+
+/* Maximum number of TREs in an IPA immediate command transaction */
+#define IPA_COMMAND_TRANS_TRE_MAX 8
+
+/**
+ * struct gsi_trans - a GSI transaction
+ *
+ * Most fields in this structure for internal use by the transaction core code:
+ * @gsi: GSI pointer
+ * @channel_id: Channel number transaction is associated with
+ * @cancelled: If set by the core code, transaction was cancelled
+ * @rsvd_count: Number of TREs reserved for this transaction
+ * @used_count: Number of TREs *used* (could be less than rsvd_count)
+ * @len: Number of bytes sent or received by the transaction
+ * @data: Preserved but not touched by the core transaction code
+ * @cmd_opcode: Array of command opcodes (command channel only)
+ * @sgl: An array of scatter/gather entries managed by core code
+ * @direction: DMA transfer direction (DMA_NONE for commands)
+ * @refcount: Reference count used for destruction
+ * @completion: Completed when the transaction completes
+ * @byte_count: TX channel byte count recorded when transaction committed
+ * @trans_count: Channel transaction count when committed (for BQL accounting)
+ *
+ * The @len field is set when the transaction is committed. For RX
+ * transactions it is updated later to reflect the actual number of bytes
+ * received.
+ */
+struct gsi_trans {
+ struct gsi *gsi;
+ u8 channel_id;
+
+ bool cancelled; /* true if transaction was cancelled */
+
+ u8 rsvd_count; /* # TREs requested */
+ u8 used_count; /* # entries used in sgl[] */
+ u32 len; /* total # bytes across sgl[] */
+
+ union {
+ void *data;
+ u8 cmd_opcode[IPA_COMMAND_TRANS_TRE_MAX];
+ };
+ struct scatterlist *sgl;
+ enum dma_data_direction direction;
+
+ refcount_t refcount;
+ struct completion completion;
+
+ u64 byte_count; /* channel byte_count when committed */
+ u64 trans_count; /* channel trans_count when committed */
+};
+
+/**
+ * gsi_trans_pool_init() - Initialize a pool of structures for transactions
+ * @pool: GSI transaction pool pointer
+ * @size: Size of elements in the pool
+ * @count: Minimum number of elements in the pool
+ * @max_alloc: Maximum number of elements allocated at a time from pool
+ *
+ * Return: 0 if successful, or a negative error code
+ */
+int gsi_trans_pool_init(struct gsi_trans_pool *pool, size_t size, u32 count,
+ u32 max_alloc);
+
+/**
+ * gsi_trans_pool_alloc() - Allocate one or more elements from a pool
+ * @pool: Pool pointer
+ * @count: Number of elements to allocate from the pool
+ *
+ * Return: Virtual address of element(s) allocated from the pool
+ */
+void *gsi_trans_pool_alloc(struct gsi_trans_pool *pool, u32 count);
+
+/**
+ * gsi_trans_pool_exit() - Inverse of gsi_trans_pool_init()
+ * @pool: Pool pointer
+ */
+void gsi_trans_pool_exit(struct gsi_trans_pool *pool);
+
+/**
+ * gsi_trans_pool_init_dma() - Initialize a pool of DMA-able structures
+ * @dev: Device used for DMA
+ * @pool: Pool pointer
+ * @size: Size of elements in the pool
+ * @count: Minimum number of elements in the pool
+ * @max_alloc: Maximum number of elements allocated at a time from pool
+ *
+ * Return: 0 if successful, or a negative error code
+ *
+ * Structures in this pool reside in DMA-coherent memory.
+ */
+int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool,
+ size_t size, u32 count, u32 max_alloc);
+
+/**
+ * gsi_trans_pool_alloc_dma() - Allocate an element from a DMA pool
+ * @pool: DMA pool pointer
+ * @addr: DMA address "handle" associated with the allocation
+ *
+ * Return: Virtual address of element allocated from the pool
+ *
+ * Only one element at a time may be allocated from a DMA pool.
+ */
+void *gsi_trans_pool_alloc_dma(struct gsi_trans_pool *pool, dma_addr_t *addr);
+
+/**
+ * gsi_trans_pool_exit_dma() - Inverse of gsi_trans_pool_init_dma()
+ * @dev: Device used for DMA
+ * @pool: Pool pointer
+ */
+void gsi_trans_pool_exit_dma(struct device *dev, struct gsi_trans_pool *pool);
+
+/**
+ * gsi_channel_trans_idle() - Return whether no transactions are allocated
+ * @gsi: GSI pointer
+ * @channel_id: Channel the transaction is associated with
+ *
+ * Return: True if no transactions are allocated, false otherwise
+ *
+ */
+bool gsi_channel_trans_idle(struct gsi *gsi, u32 channel_id);
+
+/**
+ * gsi_channel_trans_alloc() - Allocate a GSI transaction on a channel
+ * @gsi: GSI pointer
+ * @channel_id: Channel the transaction is associated with
+ * @tre_count: Number of elements in the transaction
+ * @direction: DMA direction for entire SGL (or DMA_NONE)
+ *
+ * Return: A GSI transaction structure, or a null pointer if all
+ * available transactions are in use
+ */
+struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id,
+ u32 tre_count,
+ enum dma_data_direction direction);
+
+/**
+ * gsi_trans_free() - Free a previously-allocated GSI transaction
+ * @trans: Transaction to be freed
+ */
+void gsi_trans_free(struct gsi_trans *trans);
+
+/**
+ * gsi_trans_cmd_add() - Add an immediate command to a transaction
+ * @trans: Transaction
+ * @buf: Buffer pointer for command payload
+ * @size: Number of bytes in buffer
+ * @addr: DMA address for payload
+ * @opcode: IPA immediate command opcode
+ */
+void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size,
+ dma_addr_t addr, enum ipa_cmd_opcode opcode);
+
+/**
+ * gsi_trans_page_add() - Add a page transfer to a transaction
+ * @trans: Transaction
+ * @page: Page pointer
+ * @size: Number of bytes (starting at offset) to transfer
+ * @offset: Offset within page for start of transfer
+ */
+int gsi_trans_page_add(struct gsi_trans *trans, struct page *page, u32 size,
+ u32 offset);
+
+/**
+ * gsi_trans_skb_add() - Add a socket transfer to a transaction
+ * @trans: Transaction
+ * @skb: Socket buffer for transfer (outbound)
+ *
+ * Return: 0, or -EMSGSIZE if socket data won't fit in transaction.
+ */
+int gsi_trans_skb_add(struct gsi_trans *trans, struct sk_buff *skb);
+
+/**
+ * gsi_trans_commit() - Commit a GSI transaction
+ * @trans: Transaction to commit
+ * @ring_db: Whether to tell the hardware about these queued transfers
+ */
+void gsi_trans_commit(struct gsi_trans *trans, bool ring_db);
+
+/**
+ * gsi_trans_commit_wait() - Commit a GSI transaction and wait for it
+ * to complete
+ * @trans: Transaction to commit
+ */
+void gsi_trans_commit_wait(struct gsi_trans *trans);
+
+/**
+ * gsi_trans_read_byte() - Issue a single byte read TRE on a channel
+ * @gsi: GSI pointer
+ * @channel_id: Channel on which to read a byte
+ * @addr: DMA address into which to transfer the one byte
+ *
+ * This is not a transaction operation at all. It's defined here because
+ * it needs to be done in coordination with other transaction activity.
+ */
+int gsi_trans_read_byte(struct gsi *gsi, u32 channel_id, dma_addr_t addr);
+
+/**
+ * gsi_trans_read_byte_done() - Clean up after a single byte read TRE
+ * @gsi: GSI pointer
+ * @channel_id: Channel on which byte was read
+ *
+ * This function needs to be called to signal that the work related
+ * to reading a byte initiated by gsi_trans_read_byte() is complete.
+ */
+void gsi_trans_read_byte_done(struct gsi *gsi, u32 channel_id);
+
+#endif /* _GSI_TRANS_H_ */
diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h
new file mode 100644
index 0000000000..f3355e040a
--- /dev/null
+++ b/drivers/net/ipa/ipa.h
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2022 Linaro Ltd.
+ */
+#ifndef _IPA_H_
+#define _IPA_H_
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/notifier.h>
+#include <linux/pm_wakeup.h>
+
+#include "ipa_version.h"
+#include "gsi.h"
+#include "ipa_mem.h"
+#include "ipa_qmi.h"
+#include "ipa_endpoint.h"
+#include "ipa_interrupt.h"
+
+struct clk;
+struct icc_path;
+struct net_device;
+struct platform_device;
+
+struct ipa_power;
+struct ipa_smp2p;
+struct ipa_interrupt;
+
+/**
+ * struct ipa - IPA information
+ * @gsi: Embedded GSI structure
+ * @version: IPA hardware version
+ * @pdev: Platform device
+ * @completion: Used to signal pipeline clear transfer complete
+ * @nb: Notifier block used for remoteproc SSR
+ * @notifier: Remoteproc SSR notifier
+ * @smp2p: SMP2P information
+ * @power: IPA power information
+ * @table_addr: DMA address of filter/route table content
+ * @table_virt: Virtual address of filter/route table content
+ * @route_count: Total number of entries in a routing table
+ * @modem_route_count: Number of modem entries in a routing table
+ * @filter_count: Maximum number of entries in a filter table
+ * @interrupt: IPA Interrupt information
+ * @uc_powered: true if power is active by proxy for microcontroller
+ * @uc_loaded: true after microcontroller has reported it's ready
+ * @reg_virt: Virtual address used for IPA register access
+ * @regs: IPA register definitions
+ * @mem_addr: DMA address of IPA-local memory space
+ * @mem_virt: Virtual address of IPA-local memory space
+ * @mem_offset: Offset from @mem_virt used for access to IPA memory
+ * @mem_size: Total size (bytes) of memory at @mem_virt
+ * @mem_count: Number of entries in the mem array
+ * @mem: Array of IPA-local memory region descriptors
+ * @imem_iova: I/O virtual address of IPA region in IMEM
+ * @imem_size: Size of IMEM region
+ * @smem_iova: I/O virtual address of IPA region in SMEM
+ * @smem_size: Size of SMEM region
+ * @zero_addr: DMA address of preallocated zero-filled memory
+ * @zero_virt: Virtual address of preallocated zero-filled memory
+ * @zero_size: Size (bytes) of preallocated zero-filled memory
+ * @endpoint_count: Number of defined bits in most bitmaps below
+ * @available_count: Number of defined bits in the available bitmap
+ * @defined: Bitmap of endpoints defined in config data
+ * @available: Bitmap of endpoints supported by hardware
+ * @filtered: Bitmap of endpoints that support filtering
+ * @set_up: Bitmap of endpoints that are set up for use
+ * @enabled: Bitmap of currently enabled endpoints
+ * @modem_tx_count: Number of defined modem TX endoints
+ * @endpoint: Array of endpoint information
+ * @channel_map: Mapping of GSI channel to IPA endpoint
+ * @name_map: Mapping of IPA endpoint name to IPA endpoint
+ * @setup_complete: Flag indicating whether setup stage has completed
+ * @modem_state: State of modem (stopped, running)
+ * @modem_netdev: Network device structure used for modem
+ * @qmi: QMI information
+ */
+struct ipa {
+ struct gsi gsi;
+ enum ipa_version version;
+ struct platform_device *pdev;
+ struct completion completion;
+ struct notifier_block nb;
+ void *notifier;
+ struct ipa_smp2p *smp2p;
+ struct ipa_power *power;
+
+ dma_addr_t table_addr;
+ __le64 *table_virt;
+ u32 route_count;
+ u32 modem_route_count;
+ u32 filter_count;
+
+ struct ipa_interrupt *interrupt;
+ bool uc_powered;
+ bool uc_loaded;
+
+ void __iomem *reg_virt;
+ const struct regs *regs;
+
+ dma_addr_t mem_addr;
+ void *mem_virt;
+ u32 mem_offset;
+ u32 mem_size;
+ u32 mem_count;
+ const struct ipa_mem *mem;
+
+ unsigned long imem_iova;
+ size_t imem_size;
+
+ unsigned long smem_iova;
+ size_t smem_size;
+
+ dma_addr_t zero_addr;
+ void *zero_virt;
+ size_t zero_size;
+
+ /* Bitmaps indicating endpoint state */
+ u32 endpoint_count;
+ u32 available_count;
+ unsigned long *defined; /* Defined in configuration data */
+ unsigned long *available; /* Supported by hardware */
+ u64 filtered; /* Support filtering (AP and modem) */
+ unsigned long *set_up;
+ unsigned long *enabled;
+
+ u32 modem_tx_count;
+ struct ipa_endpoint endpoint[IPA_ENDPOINT_MAX];
+ struct ipa_endpoint *channel_map[GSI_CHANNEL_COUNT_MAX];
+ struct ipa_endpoint *name_map[IPA_ENDPOINT_COUNT];
+
+ bool setup_complete;
+
+ atomic_t modem_state; /* enum ipa_modem_state */
+ struct net_device *modem_netdev;
+ struct ipa_qmi qmi;
+};
+
+/**
+ * ipa_setup() - Perform IPA setup
+ * @ipa: IPA pointer
+ *
+ * IPA initialization is broken into stages: init; config; and setup.
+ * (These have inverses exit, deconfig, and teardown.)
+ *
+ * Activities performed at the init stage can be done without requiring
+ * any access to IPA hardware. Activities performed at the config stage
+ * require IPA power, because they involve access to IPA registers.
+ * The setup stage is performed only after the GSI hardware is ready
+ * (more on this below). The setup stage allows the AP to perform
+ * more complex initialization by issuing "immediate commands" using
+ * a special interface to the IPA.
+ *
+ * This function, @ipa_setup(), starts the setup stage.
+ *
+ * In order for the GSI hardware to be functional it needs firmware to be
+ * loaded (in addition to some other low-level initialization). This early
+ * GSI initialization can be done either by Trust Zone on the AP or by the
+ * modem.
+ *
+ * If it's done by Trust Zone, the AP loads the GSI firmware and supplies
+ * it to Trust Zone to verify and install. When this completes, if
+ * verification was successful, the GSI layer is ready and ipa_setup()
+ * implements the setup phase of initialization.
+ *
+ * If the modem performs early GSI initialization, the AP needs to know
+ * when this has occurred. An SMP2P interrupt is used for this purpose,
+ * and receipt of that interrupt triggers the call to ipa_setup().
+ */
+int ipa_setup(struct ipa *ipa);
+
+#endif /* _IPA_H_ */
diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
new file mode 100644
index 0000000000..f1419fbd77
--- /dev/null
+++ b/drivers/net/ipa/ipa_cmd.c
@@ -0,0 +1,647 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2023 Linaro Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/bitfield.h>
+#include <linux/dma-direction.h>
+
+#include "gsi.h"
+#include "gsi_trans.h"
+#include "ipa.h"
+#include "ipa_endpoint.h"
+#include "ipa_table.h"
+#include "ipa_cmd.h"
+#include "ipa_mem.h"
+
+/**
+ * DOC: IPA Immediate Commands
+ *
+ * The AP command TX endpoint is used to issue immediate commands to the IPA.
+ * An immediate command is generally used to request the IPA do something
+ * other than data transfer to another endpoint.
+ *
+ * Immediate commands are represented by GSI transactions just like other
+ * transfer requests, and use a single GSI TRE. Each immediate command
+ * has a well-defined format, having a payload of a known length. This
+ * allows the transfer element's length field to be used to hold an
+ * immediate command's opcode. The payload for a command resides in AP
+ * memory and is described by a single scatterlist entry in its transaction.
+ * Commands do not require a transaction completion callback, and are
+ * always issued using gsi_trans_commit_wait().
+ */
+
+/* Some commands can wait until indicated pipeline stages are clear */
+enum pipeline_clear_options {
+ pipeline_clear_hps = 0x0,
+ pipeline_clear_src_grp = 0x1,
+ pipeline_clear_full = 0x2,
+};
+
+/* IPA_CMD_IP_V{4,6}_{FILTER,ROUTING}_INIT */
+
+struct ipa_cmd_hw_ip_fltrt_init {
+ __le64 hash_rules_addr;
+ __le64 flags;
+ __le64 nhash_rules_addr;
+};
+
+/* Field masks for ipa_cmd_hw_ip_fltrt_init structure fields */
+#define IP_FLTRT_FLAGS_HASH_SIZE_FMASK GENMASK_ULL(11, 0)
+#define IP_FLTRT_FLAGS_HASH_ADDR_FMASK GENMASK_ULL(27, 12)
+#define IP_FLTRT_FLAGS_NHASH_SIZE_FMASK GENMASK_ULL(39, 28)
+#define IP_FLTRT_FLAGS_NHASH_ADDR_FMASK GENMASK_ULL(55, 40)
+
+/* IPA_CMD_HDR_INIT_LOCAL */
+
+struct ipa_cmd_hw_hdr_init_local {
+ __le64 hdr_table_addr;
+ __le32 flags;
+ __le32 reserved;
+};
+
+/* Field masks for ipa_cmd_hw_hdr_init_local structure fields */
+#define HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK GENMASK(11, 0)
+#define HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK GENMASK(27, 12)
+
+/* IPA_CMD_REGISTER_WRITE */
+
+/* For IPA v4.0+, the pipeline clear options are encoded in the opcode */
+#define REGISTER_WRITE_OPCODE_SKIP_CLEAR_FMASK GENMASK(8, 8)
+#define REGISTER_WRITE_OPCODE_CLEAR_OPTION_FMASK GENMASK(10, 9)
+
+struct ipa_cmd_register_write {
+ __le16 flags; /* Unused/reserved prior to IPA v4.0 */
+ __le16 offset;
+ __le32 value;
+ __le32 value_mask;
+ __le32 clear_options; /* Unused/reserved for IPA v4.0+ */
+};
+
+/* Field masks for ipa_cmd_register_write structure fields */
+/* The next field is present for IPA v4.0+ */
+#define REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK GENMASK(14, 11)
+/* The next field is not present for IPA v4.0+ */
+#define REGISTER_WRITE_FLAGS_SKIP_CLEAR_FMASK GENMASK(15, 15)
+
+/* The next field and its values are not present for IPA v4.0+ */
+#define REGISTER_WRITE_CLEAR_OPTIONS_FMASK GENMASK(1, 0)
+
+/* IPA_CMD_IP_PACKET_INIT */
+
+struct ipa_cmd_ip_packet_init {
+ u8 dest_endpoint; /* Full 8 bits used for IPA v5.0+ */
+ u8 reserved[7];
+};
+
+/* Field mask for ipa_cmd_ip_packet_init dest_endpoint field (unused v5.0+) */
+#define IPA_PACKET_INIT_DEST_ENDPOINT_FMASK GENMASK(4, 0)
+
+/* IPA_CMD_DMA_SHARED_MEM */
+
+/* For IPA v4.0+, this opcode gets modified with pipeline clear options */
+
+#define DMA_SHARED_MEM_OPCODE_SKIP_CLEAR_FMASK GENMASK(8, 8)
+#define DMA_SHARED_MEM_OPCODE_CLEAR_OPTION_FMASK GENMASK(10, 9)
+
+struct ipa_cmd_hw_dma_mem_mem {
+ __le16 clear_after_read; /* 0 or DMA_SHARED_MEM_CLEAR_AFTER_READ */
+ __le16 size;
+ __le16 local_addr;
+ __le16 flags;
+ __le64 system_addr;
+};
+
+/* Flag allowing atomic clear of target region after reading data (v4.0+)*/
+#define DMA_SHARED_MEM_CLEAR_AFTER_READ GENMASK(15, 15)
+
+/* Field masks for ipa_cmd_hw_dma_mem_mem structure fields */
+#define DMA_SHARED_MEM_FLAGS_DIRECTION_FMASK GENMASK(0, 0)
+/* The next two fields are not present for IPA v4.0+ */
+#define DMA_SHARED_MEM_FLAGS_SKIP_CLEAR_FMASK GENMASK(1, 1)
+#define DMA_SHARED_MEM_FLAGS_CLEAR_OPTIONS_FMASK GENMASK(3, 2)
+
+/* IPA_CMD_IP_PACKET_TAG_STATUS */
+
+struct ipa_cmd_ip_packet_tag_status {
+ __le64 tag;
+};
+
+#define IP_PACKET_TAG_STATUS_TAG_FMASK GENMASK_ULL(63, 16)
+
+/* Immediate command payload */
+union ipa_cmd_payload {
+ struct ipa_cmd_hw_ip_fltrt_init table_init;
+ struct ipa_cmd_hw_hdr_init_local hdr_init_local;
+ struct ipa_cmd_register_write register_write;
+ struct ipa_cmd_ip_packet_init ip_packet_init;
+ struct ipa_cmd_hw_dma_mem_mem dma_shared_mem;
+ struct ipa_cmd_ip_packet_tag_status ip_packet_tag_status;
+};
+
+static void ipa_cmd_validate_build(void)
+{
+ /* The size of a filter table needs to fit into fields in the
+ * ipa_cmd_hw_ip_fltrt_init structure. Although hashed tables
+ * might not be used, non-hashed and hashed tables have the same
+ * maximum size. IPv4 and IPv6 filter tables have the same number
+ * of entries.
+ */
+ /* Hashed and non-hashed fields are assumed to be the same size */
+ BUILD_BUG_ON(field_max(IP_FLTRT_FLAGS_HASH_SIZE_FMASK) !=
+ field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK));
+ BUILD_BUG_ON(field_max(IP_FLTRT_FLAGS_HASH_ADDR_FMASK) !=
+ field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK));
+
+ /* Prior to IPA v5.0, we supported no more than 32 endpoints,
+ * and this was reflected in some 5-bit fields that held
+ * endpoint numbers. Starting with IPA v5.0, the widths of
+ * these fields were extended to 8 bits, meaning up to 256
+ * endpoints. If the driver claims to support more than
+ * that it's an error.
+ */
+ BUILD_BUG_ON(IPA_ENDPOINT_MAX - 1 > U8_MAX);
+}
+
+/* Validate a memory region holding a table */
+bool ipa_cmd_table_init_valid(struct ipa *ipa, const struct ipa_mem *mem,
+ bool route)
+{
+ u32 offset_max = field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
+ u32 size_max = field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK);
+ const char *table = route ? "route" : "filter";
+ struct device *dev = &ipa->pdev->dev;
+ u32 size;
+
+ size = route ? ipa->route_count : ipa->filter_count + 1;
+ size *= sizeof(__le64);
+
+ /* Size must fit in the immediate command field that holds it */
+ if (size > size_max) {
+ dev_err(dev, "%s table region size too large\n", table);
+ dev_err(dev, " (0x%04x > 0x%04x)\n", size, size_max);
+
+ return false;
+ }
+
+ /* Offset must fit in the immediate command field that holds it */
+ if (mem->offset > offset_max ||
+ ipa->mem_offset > offset_max - mem->offset) {
+ dev_err(dev, "%s table region offset too large\n", table);
+ dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n",
+ ipa->mem_offset, mem->offset, offset_max);
+
+ return false;
+ }
+
+ return true;
+}
+
+/* Validate the memory region that holds headers */
+static bool ipa_cmd_header_init_local_valid(struct ipa *ipa)
+{
+ struct device *dev = &ipa->pdev->dev;
+ const struct ipa_mem *mem;
+ u32 offset_max;
+ u32 size_max;
+ u32 offset;
+ u32 size;
+
+ /* In ipa_cmd_hdr_init_local_add() we record the offset and size of
+ * the header table memory area in an immediate command. Make sure
+ * the offset and size fit in the fields that need to hold them, and
+ * that the entire range is within the overall IPA memory range.
+ */
+ offset_max = field_max(HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK);
+ size_max = field_max(HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK);
+
+ /* The header memory area contains both the modem and AP header
+ * regions. The modem portion defines the address of the region.
+ */
+ mem = ipa_mem_find(ipa, IPA_MEM_MODEM_HEADER);
+ offset = mem->offset;
+ size = mem->size;
+
+ /* Make sure the offset fits in the IPA command */
+ if (offset > offset_max || ipa->mem_offset > offset_max - offset) {
+ dev_err(dev, "header table region offset too large\n");
+ dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n",
+ ipa->mem_offset, offset, offset_max);
+
+ return false;
+ }
+
+ /* Add the size of the AP portion (if defined) to the combined size */
+ mem = ipa_mem_find(ipa, IPA_MEM_AP_HEADER);
+ if (mem)
+ size += mem->size;
+
+ /* Make sure the combined size fits in the IPA command */
+ if (size > size_max) {
+ dev_err(dev, "header table region size too large\n");
+ dev_err(dev, " (0x%04x > 0x%08x)\n", size, size_max);
+
+ return false;
+ }
+
+ return true;
+}
+
+/* Indicate whether an offset can be used with a register_write command */
+static bool ipa_cmd_register_write_offset_valid(struct ipa *ipa,
+ const char *name, u32 offset)
+{
+ struct ipa_cmd_register_write *payload;
+ struct device *dev = &ipa->pdev->dev;
+ u32 offset_max;
+ u32 bit_count;
+
+ /* The maximum offset in a register_write immediate command depends
+ * on the version of IPA. A 16 bit offset is always supported,
+ * but starting with IPA v4.0 some additional high-order bits are
+ * allowed.
+ */
+ bit_count = BITS_PER_BYTE * sizeof(payload->offset);
+ if (ipa->version >= IPA_VERSION_4_0)
+ bit_count += hweight32(REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK);
+ BUILD_BUG_ON(bit_count > 32);
+ offset_max = ~0U >> (32 - bit_count);
+
+ /* Make sure the offset can be represented by the field(s)
+ * that holds it. Also make sure the offset is not outside
+ * the overall IPA memory range.
+ */
+ if (offset > offset_max || ipa->mem_offset > offset_max - offset) {
+ dev_err(dev, "%s offset too large 0x%04x + 0x%04x > 0x%04x)\n",
+ name, ipa->mem_offset, offset, offset_max);
+ return false;
+ }
+
+ return true;
+}
+
+/* Check whether offsets passed to register_write are valid */
+static bool ipa_cmd_register_write_valid(struct ipa *ipa)
+{
+ const struct reg *reg;
+ const char *name;
+ u32 offset;
+
+ /* If hashed tables are supported, ensure the hash flush register
+ * offset will fit in a register write IPA immediate command.
+ */
+ if (ipa_table_hash_support(ipa)) {
+ if (ipa->version < IPA_VERSION_5_0)
+ reg = ipa_reg(ipa, FILT_ROUT_HASH_FLUSH);
+ else
+ reg = ipa_reg(ipa, FILT_ROUT_CACHE_FLUSH);
+
+ offset = reg_offset(reg);
+ name = "filter/route hash flush";
+ if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
+ return false;
+ }
+
+ /* Each endpoint can have a status endpoint associated with it,
+ * and this is recorded in an endpoint register. If the modem
+ * crashes, we reset the status endpoint for all modem endpoints
+ * using a register write IPA immediate command. Make sure the
+ * worst case (highest endpoint number) offset of that endpoint
+ * fits in the register write command field(s) that must hold it.
+ */
+ reg = ipa_reg(ipa, ENDP_STATUS);
+ offset = reg_n_offset(reg, IPA_ENDPOINT_COUNT - 1);
+ name = "maximal endpoint status";
+ if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
+ return false;
+
+ return true;
+}
+
+int ipa_cmd_pool_init(struct gsi_channel *channel, u32 tre_max)
+{
+ struct gsi_trans_info *trans_info = &channel->trans_info;
+ struct device *dev = channel->gsi->dev;
+
+ /* Command payloads are allocated one at a time, but a single
+ * transaction can require up to the maximum supported by the
+ * channel; treat them as if they were allocated all at once.
+ */
+ return gsi_trans_pool_init_dma(dev, &trans_info->cmd_pool,
+ sizeof(union ipa_cmd_payload),
+ tre_max, channel->trans_tre_max);
+}
+
+void ipa_cmd_pool_exit(struct gsi_channel *channel)
+{
+ struct gsi_trans_info *trans_info = &channel->trans_info;
+ struct device *dev = channel->gsi->dev;
+
+ gsi_trans_pool_exit_dma(dev, &trans_info->cmd_pool);
+}
+
+static union ipa_cmd_payload *
+ipa_cmd_payload_alloc(struct ipa *ipa, dma_addr_t *addr)
+{
+ struct gsi_trans_info *trans_info;
+ struct ipa_endpoint *endpoint;
+
+ endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
+ trans_info = &ipa->gsi.channel[endpoint->channel_id].trans_info;
+
+ return gsi_trans_pool_alloc_dma(&trans_info->cmd_pool, addr);
+}
+
+/* If hash_size is 0, hash_offset and hash_addr ignored. */
+void ipa_cmd_table_init_add(struct gsi_trans *trans,
+ enum ipa_cmd_opcode opcode, u16 size, u32 offset,
+ dma_addr_t addr, u16 hash_size, u32 hash_offset,
+ dma_addr_t hash_addr)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ struct ipa_cmd_hw_ip_fltrt_init *payload;
+ union ipa_cmd_payload *cmd_payload;
+ dma_addr_t payload_addr;
+ u64 val;
+
+ /* Record the non-hash table offset and size */
+ offset += ipa->mem_offset;
+ val = u64_encode_bits(offset, IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
+ val |= u64_encode_bits(size, IP_FLTRT_FLAGS_NHASH_SIZE_FMASK);
+
+ /* The hash table offset and address are zero if its size is 0 */
+ if (hash_size) {
+ /* Record the hash table offset and size */
+ hash_offset += ipa->mem_offset;
+ val |= u64_encode_bits(hash_offset,
+ IP_FLTRT_FLAGS_HASH_ADDR_FMASK);
+ val |= u64_encode_bits(hash_size,
+ IP_FLTRT_FLAGS_HASH_SIZE_FMASK);
+ }
+
+ cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
+ payload = &cmd_payload->table_init;
+
+ /* Fill in all offsets and sizes and the non-hash table address */
+ if (hash_size)
+ payload->hash_rules_addr = cpu_to_le64(hash_addr);
+ payload->flags = cpu_to_le64(val);
+ payload->nhash_rules_addr = cpu_to_le64(addr);
+
+ gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
+ opcode);
+}
+
+/* Initialize header space in IPA-local memory */
+void ipa_cmd_hdr_init_local_add(struct gsi_trans *trans, u32 offset, u16 size,
+ dma_addr_t addr)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ enum ipa_cmd_opcode opcode = IPA_CMD_HDR_INIT_LOCAL;
+ struct ipa_cmd_hw_hdr_init_local *payload;
+ union ipa_cmd_payload *cmd_payload;
+ dma_addr_t payload_addr;
+ u32 flags;
+
+ offset += ipa->mem_offset;
+
+ /* With this command we tell the IPA where in its local memory the
+ * header tables reside. The content of the buffer provided is
+ * also written via DMA into that space. The IPA hardware owns
+ * the table, but the AP must initialize it.
+ */
+ cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
+ payload = &cmd_payload->hdr_init_local;
+
+ payload->hdr_table_addr = cpu_to_le64(addr);
+ flags = u32_encode_bits(size, HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK);
+ flags |= u32_encode_bits(offset, HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK);
+ payload->flags = cpu_to_le32(flags);
+
+ gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
+ opcode);
+}
+
+void ipa_cmd_register_write_add(struct gsi_trans *trans, u32 offset, u32 value,
+ u32 mask, bool clear_full)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ struct ipa_cmd_register_write *payload;
+ union ipa_cmd_payload *cmd_payload;
+ u32 opcode = IPA_CMD_REGISTER_WRITE;
+ dma_addr_t payload_addr;
+ u32 clear_option;
+ u32 options;
+ u16 flags;
+
+ /* pipeline_clear_src_grp is not used */
+ clear_option = clear_full ? pipeline_clear_full : pipeline_clear_hps;
+
+ /* IPA v4.0+ represents the pipeline clear options in the opcode. It
+ * also supports a larger offset by encoding additional high-order
+ * bits in the payload flags field.
+ */
+ if (ipa->version >= IPA_VERSION_4_0) {
+ u16 offset_high;
+ u32 val;
+
+ /* Opcode encodes pipeline clear options */
+ /* SKIP_CLEAR is always 0 (don't skip pipeline clear) */
+ val = u16_encode_bits(clear_option,
+ REGISTER_WRITE_OPCODE_CLEAR_OPTION_FMASK);
+ opcode |= val;
+
+ /* Extract the high 4 bits from the offset */
+ offset_high = (u16)u32_get_bits(offset, GENMASK(19, 16));
+ offset &= (1 << 16) - 1;
+
+ /* Extract the top 4 bits and encode it into the flags field */
+ flags = u16_encode_bits(offset_high,
+ REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK);
+ options = 0; /* reserved */
+
+ } else {
+ flags = 0; /* SKIP_CLEAR flag is always 0 */
+ options = u16_encode_bits(clear_option,
+ REGISTER_WRITE_CLEAR_OPTIONS_FMASK);
+ }
+
+ cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
+ payload = &cmd_payload->register_write;
+
+ payload->flags = cpu_to_le16(flags);
+ payload->offset = cpu_to_le16((u16)offset);
+ payload->value = cpu_to_le32(value);
+ payload->value_mask = cpu_to_le32(mask);
+ payload->clear_options = cpu_to_le32(options);
+
+ gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
+ opcode);
+}
+
+/* Skip IP packet processing on the next data transfer on a TX channel */
+static void ipa_cmd_ip_packet_init_add(struct gsi_trans *trans, u8 endpoint_id)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_INIT;
+ struct ipa_cmd_ip_packet_init *payload;
+ union ipa_cmd_payload *cmd_payload;
+ dma_addr_t payload_addr;
+
+ cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
+ payload = &cmd_payload->ip_packet_init;
+
+ if (ipa->version < IPA_VERSION_5_0) {
+ payload->dest_endpoint =
+ u8_encode_bits(endpoint_id,
+ IPA_PACKET_INIT_DEST_ENDPOINT_FMASK);
+ } else {
+ payload->dest_endpoint = endpoint_id;
+ }
+
+ gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
+ opcode);
+}
+
+/* Use a DMA command to read or write a block of IPA-resident memory */
+void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset, u16 size,
+ dma_addr_t addr, bool toward_ipa)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ enum ipa_cmd_opcode opcode = IPA_CMD_DMA_SHARED_MEM;
+ struct ipa_cmd_hw_dma_mem_mem *payload;
+ union ipa_cmd_payload *cmd_payload;
+ dma_addr_t payload_addr;
+ u16 flags;
+
+ /* size and offset must fit in 16 bit fields */
+ WARN_ON(!size);
+ WARN_ON(size > U16_MAX);
+ WARN_ON(offset > U16_MAX || ipa->mem_offset > U16_MAX - offset);
+
+ offset += ipa->mem_offset;
+
+ cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
+ payload = &cmd_payload->dma_shared_mem;
+
+ /* payload->clear_after_read was reserved prior to IPA v4.0. It's
+ * never needed for current code, so it's 0 regardless of version.
+ */
+ payload->size = cpu_to_le16(size);
+ payload->local_addr = cpu_to_le16(offset);
+ /* payload->flags:
+ * direction: 0 = write to IPA, 1 read from IPA
+ * Starting at v4.0 these are reserved; either way, all zero:
+ * pipeline clear: 0 = wait for pipeline clear (don't skip)
+ * clear_options: 0 = pipeline_clear_hps
+ * Instead, for v4.0+ these are encoded in the opcode. But again
+ * since both values are 0 we won't bother OR'ing them in.
+ */
+ flags = toward_ipa ? 0 : DMA_SHARED_MEM_FLAGS_DIRECTION_FMASK;
+ payload->flags = cpu_to_le16(flags);
+ payload->system_addr = cpu_to_le64(addr);
+
+ gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
+ opcode);
+}
+
+static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_TAG_STATUS;
+ struct ipa_cmd_ip_packet_tag_status *payload;
+ union ipa_cmd_payload *cmd_payload;
+ dma_addr_t payload_addr;
+
+ cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
+ payload = &cmd_payload->ip_packet_tag_status;
+
+ payload->tag = le64_encode_bits(0, IP_PACKET_TAG_STATUS_TAG_FMASK);
+
+ gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
+ opcode);
+}
+
+/* Issue a small command TX data transfer */
+static void ipa_cmd_transfer_add(struct gsi_trans *trans)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ enum ipa_cmd_opcode opcode = IPA_CMD_NONE;
+ union ipa_cmd_payload *payload;
+ dma_addr_t payload_addr;
+
+ /* Just transfer a zero-filled payload structure */
+ payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
+
+ gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
+ opcode);
+}
+
+/* Add immediate commands to a transaction to clear the hardware pipeline */
+void ipa_cmd_pipeline_clear_add(struct gsi_trans *trans)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ struct ipa_endpoint *endpoint;
+
+ /* This will complete when the transfer is received */
+ reinit_completion(&ipa->completion);
+
+ /* Issue a no-op register write command (mask 0 means no write) */
+ ipa_cmd_register_write_add(trans, 0, 0, 0, true);
+
+ /* Send a data packet through the IPA pipeline. The packet_init
+ * command says to send the next packet directly to the exception
+ * endpoint without any other IPA processing. The tag_status
+ * command requests that status be generated on completion of
+ * that transfer, and that it will be tagged with a value.
+ * Finally, the transfer command sends a small packet of data
+ * (instead of a command) using the command endpoint.
+ */
+ endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX];
+ ipa_cmd_ip_packet_init_add(trans, endpoint->endpoint_id);
+ ipa_cmd_ip_tag_status_add(trans);
+ ipa_cmd_transfer_add(trans);
+}
+
+/* Returns the number of commands required to clear the pipeline */
+u32 ipa_cmd_pipeline_clear_count(void)
+{
+ return 4;
+}
+
+void ipa_cmd_pipeline_clear_wait(struct ipa *ipa)
+{
+ wait_for_completion(&ipa->completion);
+}
+
+/* Allocate a transaction for the command TX endpoint */
+struct gsi_trans *ipa_cmd_trans_alloc(struct ipa *ipa, u32 tre_count)
+{
+ struct ipa_endpoint *endpoint;
+
+ if (WARN_ON(tre_count > IPA_COMMAND_TRANS_TRE_MAX))
+ return NULL;
+
+ endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
+
+ return gsi_channel_trans_alloc(&ipa->gsi, endpoint->channel_id,
+ tre_count, DMA_NONE);
+}
+
+/* Init function for immediate commands; there is no ipa_cmd_exit() */
+int ipa_cmd_init(struct ipa *ipa)
+{
+ ipa_cmd_validate_build();
+
+ if (!ipa_cmd_header_init_local_valid(ipa))
+ return -EINVAL;
+
+ if (!ipa_cmd_register_write_valid(ipa))
+ return -EINVAL;
+
+ return 0;
+}
diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h
new file mode 100644
index 0000000000..e2cf1c2b0e
--- /dev/null
+++ b/drivers/net/ipa/ipa_cmd.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2022 Linaro Ltd.
+ */
+#ifndef _IPA_CMD_H_
+#define _IPA_CMD_H_
+
+#include <linux/types.h>
+#include <linux/dma-direction.h>
+
+struct sk_buff;
+struct scatterlist;
+
+struct ipa;
+struct ipa_mem;
+struct gsi_trans;
+struct gsi_channel;
+
+/**
+ * enum ipa_cmd_opcode: IPA immediate commands
+ *
+ * @IPA_CMD_IP_V4_FILTER_INIT: Initialize IPv4 filter table
+ * @IPA_CMD_IP_V6_FILTER_INIT: Initialize IPv6 filter table
+ * @IPA_CMD_IP_V4_ROUTING_INIT: Initialize IPv4 routing table
+ * @IPA_CMD_IP_V6_ROUTING_INIT: Initialize IPv6 routing table
+ * @IPA_CMD_HDR_INIT_LOCAL: Initialize IPA-local header memory
+ * @IPA_CMD_REGISTER_WRITE: Register write performed by IPA
+ * @IPA_CMD_IP_PACKET_INIT: Set up next packet's destination endpoint
+ * @IPA_CMD_DMA_SHARED_MEM: DMA command performed by IPA
+ * @IPA_CMD_IP_PACKET_TAG_STATUS: Have next packet generate tag * status
+ * @IPA_CMD_NONE: Special (invalid) "not a command" value
+ *
+ * All immediate commands are issued using the AP command TX endpoint.
+ */
+enum ipa_cmd_opcode {
+ IPA_CMD_NONE = 0x0,
+ IPA_CMD_IP_V4_FILTER_INIT = 0x3,
+ IPA_CMD_IP_V6_FILTER_INIT = 0x4,
+ IPA_CMD_IP_V4_ROUTING_INIT = 0x7,
+ IPA_CMD_IP_V6_ROUTING_INIT = 0x8,
+ IPA_CMD_HDR_INIT_LOCAL = 0x9,
+ IPA_CMD_REGISTER_WRITE = 0xc,
+ IPA_CMD_IP_PACKET_INIT = 0x10,
+ IPA_CMD_DMA_SHARED_MEM = 0x13,
+ IPA_CMD_IP_PACKET_TAG_STATUS = 0x14,
+};
+
+/**
+ * ipa_cmd_table_init_valid() - Validate a memory region holding a table
+ * @ipa: - IPA pointer
+ * @mem: - IPA memory region descriptor
+ * @route: - Whether the region holds a route or filter table
+ *
+ * Return: true if region is valid, false otherwise
+ */
+bool ipa_cmd_table_init_valid(struct ipa *ipa, const struct ipa_mem *mem,
+ bool route);
+
+/**
+ * ipa_cmd_data_valid() - Validate command-realted configuration is valid
+ * @ipa: - IPA pointer
+ *
+ * Return: true if assumptions required for command are valid
+ */
+bool ipa_cmd_data_valid(struct ipa *ipa);
+
+/**
+ * ipa_cmd_pool_init() - initialize command channel pools
+ * @channel: AP->IPA command TX GSI channel pointer
+ * @tre_count: Number of pool elements to allocate
+ *
+ * Return: 0 if successful, or a negative error code
+ */
+int ipa_cmd_pool_init(struct gsi_channel *channel, u32 tre_count);
+
+/**
+ * ipa_cmd_pool_exit() - Inverse of ipa_cmd_pool_init()
+ * @channel: AP->IPA command TX GSI channel pointer
+ */
+void ipa_cmd_pool_exit(struct gsi_channel *channel);
+
+/**
+ * ipa_cmd_table_init_add() - Add table init command to a transaction
+ * @trans: GSI transaction
+ * @opcode: IPA immediate command opcode
+ * @size: Size of non-hashed routing table memory
+ * @offset: Offset in IPA shared memory of non-hashed routing table memory
+ * @addr: DMA address of non-hashed table data to write
+ * @hash_size: Size of hashed routing table memory
+ * @hash_offset: Offset in IPA shared memory of hashed routing table memory
+ * @hash_addr: DMA address of hashed table data to write
+ *
+ * If hash_size is 0, hash_offset and hash_addr are ignored.
+ */
+void ipa_cmd_table_init_add(struct gsi_trans *trans, enum ipa_cmd_opcode opcode,
+ u16 size, u32 offset, dma_addr_t addr,
+ u16 hash_size, u32 hash_offset,
+ dma_addr_t hash_addr);
+
+/**
+ * ipa_cmd_hdr_init_local_add() - Add a header init command to a transaction
+ * @trans: GSI transaction
+ * @offset: Offset of header memory in IPA local space
+ * @size: Size of header memory
+ * @addr: DMA address of buffer to be written from
+ *
+ * Defines and fills the location in IPA memory to use for headers.
+ */
+void ipa_cmd_hdr_init_local_add(struct gsi_trans *trans, u32 offset, u16 size,
+ dma_addr_t addr);
+
+/**
+ * ipa_cmd_register_write_add() - Add a register write command to a transaction
+ * @trans: GSI transaction
+ * @offset: Offset of register to be written
+ * @value: Value to be written
+ * @mask: Mask of bits in register to update with bits from value
+ * @clear_full: Pipeline clear option; true means full pipeline clear
+ */
+void ipa_cmd_register_write_add(struct gsi_trans *trans, u32 offset, u32 value,
+ u32 mask, bool clear_full);
+
+/**
+ * ipa_cmd_dma_shared_mem_add() - Add a DMA memory command to a transaction
+ * @trans: GSI transaction
+ * @offset: Offset of IPA memory to be read or written
+ * @size: Number of bytes of memory to be transferred
+ * @addr: DMA address of buffer to be read into or written from
+ * @toward_ipa: true means write to IPA memory; false means read
+ */
+void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset,
+ u16 size, dma_addr_t addr, bool toward_ipa);
+
+/**
+ * ipa_cmd_pipeline_clear_add() - Add pipeline clear commands to a transaction
+ * @trans: GSI transaction
+ */
+void ipa_cmd_pipeline_clear_add(struct gsi_trans *trans);
+
+/**
+ * ipa_cmd_pipeline_clear_count() - # commands required to clear pipeline
+ *
+ * Return: The number of elements to allocate in a transaction
+ * to hold commands to clear the pipeline
+ */
+u32 ipa_cmd_pipeline_clear_count(void);
+
+/**
+ * ipa_cmd_pipeline_clear_wait() - Wait pipeline clear to complete
+ * @ipa: - IPA pointer
+ */
+void ipa_cmd_pipeline_clear_wait(struct ipa *ipa);
+
+/**
+ * ipa_cmd_trans_alloc() - Allocate a transaction for the command TX endpoint
+ * @ipa: IPA pointer
+ * @tre_count: Number of elements in the transaction
+ *
+ * Return: A GSI transaction structure, or a null pointer if all
+ * available transactions are in use
+ */
+struct gsi_trans *ipa_cmd_trans_alloc(struct ipa *ipa, u32 tre_count);
+
+/**
+ * ipa_cmd_init() - Initialize IPA immediate commands
+ * @ipa: - IPA pointer
+ *
+ * Return: 0 if successful, or a negative error code
+ *
+ * There is no need for a matching ipa_cmd_exit() function.
+ */
+int ipa_cmd_init(struct ipa *ipa);
+
+#endif /* _IPA_CMD_H_ */
diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h
new file mode 100644
index 0000000000..ce82b00fdc
--- /dev/null
+++ b/drivers/net/ipa/ipa_data.h
@@ -0,0 +1,254 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2023 Linaro Ltd.
+ */
+#ifndef _IPA_DATA_H_
+#define _IPA_DATA_H_
+
+#include <linux/types.h>
+
+#include "ipa_version.h"
+#include "ipa_endpoint.h"
+#include "ipa_mem.h"
+
+/**
+ * DOC: IPA/GSI Configuration Data
+ *
+ * Boot-time configuration data is used to define the configuration of the
+ * IPA and GSI resources to use for a given platform. This data is supplied
+ * via the Device Tree match table, associated with a particular compatible
+ * string. The data defines information about how resources, endpoints and
+ * channels, memory, power and so on are allocated and used for the
+ * platform.
+ *
+ * Resources are data structures used internally by the IPA hardware. The
+ * configuration data defines the number (or limits of the number) of various
+ * types of these resources.
+ *
+ * Endpoint configuration data defines properties of both IPA endpoints and
+ * GSI channels. A channel is a GSI construct, and represents a single
+ * communication path between the IPA and a particular execution environment
+ * (EE), such as the AP or Modem. Each EE has a set of channels associated
+ * with it, and each channel has an ID unique for that EE. For the most part
+ * the only GSI channels of concern to this driver belong to the AP.
+ *
+ * An endpoint is an IPA construct representing a single channel anywhere
+ * in the system. An IPA endpoint ID maps directly to an (EE, channel_id)
+ * pair. Generally, this driver is concerned with only endpoints associated
+ * with the AP, however this will change when support for routing (etc.) is
+ * added. IPA endpoint and GSI channel configuration data are defined
+ * together, establishing the endpoint_id->(EE, channel_id) mapping.
+ *
+ * Endpoint configuration data consists of three parts: properties that
+ * are common to IPA and GSI (EE ID, channel ID, endpoint ID, and direction);
+ * properties associated with the GSI channel; and properties associated with
+ * the IPA endpoint.
+ */
+
+/* The maximum possible number of source or destination resource groups */
+#define IPA_RESOURCE_GROUP_MAX 8
+
+/** enum ipa_qsb_master_id - array index for IPA QSB configuration data */
+enum ipa_qsb_master_id {
+ IPA_QSB_MASTER_DDR,
+ IPA_QSB_MASTER_PCIE,
+};
+
+/**
+ * struct ipa_qsb_data - Qualcomm System Bus configuration data
+ * @max_writes: Maximum outstanding write requests for this master
+ * @max_reads: Maximum outstanding read requests for this master
+ * @max_reads_beats: Max outstanding read bytes in 8-byte "beats" (if non-zero)
+ */
+struct ipa_qsb_data {
+ u8 max_writes;
+ u8 max_reads;
+ u8 max_reads_beats; /* Not present for IPA v3.5.1 */
+};
+
+/**
+ * struct gsi_channel_data - GSI channel configuration data
+ * @tre_count: number of TREs in the channel ring
+ * @event_count: number of slots in the associated event ring
+ * @tlv_count: number of entries in channel's TLV FIFO
+ *
+ * A GSI channel is a unidirectional means of transferring data to or
+ * from (and through) the IPA. A GSI channel has a ring buffer made
+ * up of "transfer ring elements" (TREs) that specify individual data
+ * transfers or IPA immediate commands. TREs are filled by the AP,
+ * and control is passed to IPA hardware by writing the last written
+ * element into a doorbell register.
+ *
+ * When data transfer commands have completed the GSI generates an
+ * event (a structure of data) and optionally signals the AP with
+ * an interrupt. Event structures are implemented by another ring
+ * buffer, directed toward the AP from the IPA.
+ *
+ * The input to a GSI channel is a FIFO of type/length/value (TLV)
+ * elements, and the size of this FIFO limits the number of TREs
+ * that can be included in a single transaction.
+ */
+struct gsi_channel_data {
+ u16 tre_count; /* must be a power of 2 */
+ u16 event_count; /* must be a power of 2 */
+ u8 tlv_count;
+};
+
+/**
+ * struct ipa_endpoint_data - IPA endpoint configuration data
+ * @filter_support: whether endpoint supports filtering
+ * @config: hardware configuration
+ *
+ * Not all endpoints support the IPA filtering capability. A filter table
+ * defines the filters to apply for those endpoints that support it. The
+ * AP is responsible for initializing this table, and it must include entries
+ * for non-AP endpoints. For this reason we define *all* endpoints used
+ * in the system, and indicate whether they support filtering.
+ *
+ * The remaining endpoint configuration data specifies default hardware
+ * configuration values that apply only to AP endpoints.
+ */
+struct ipa_endpoint_data {
+ bool filter_support;
+ struct ipa_endpoint_config config;
+};
+
+/**
+ * struct ipa_gsi_endpoint_data - GSI channel/IPA endpoint data
+ * @ee_id: GSI execution environment ID
+ * @channel_id: GSI channel ID
+ * @endpoint_id: IPA endpoint ID
+ * @toward_ipa: direction of data transfer
+ * @channel: GSI channel configuration data (see above)
+ * @endpoint: IPA endpoint configuration data (see above)
+ */
+struct ipa_gsi_endpoint_data {
+ u8 ee_id; /* enum gsi_ee_id */
+ u8 channel_id;
+ u8 endpoint_id;
+ bool toward_ipa;
+
+ struct gsi_channel_data channel;
+ struct ipa_endpoint_data endpoint;
+};
+
+/**
+ * struct ipa_resource_limits - minimum and maximum resource counts
+ * @min: minimum number of resources of a given type
+ * @max: maximum number of resources of a given type
+ */
+struct ipa_resource_limits {
+ u32 min;
+ u32 max;
+};
+
+/**
+ * struct ipa_resource - resource group source or destination resource usage
+ * @limits: array of resource limits, indexed by group
+ */
+struct ipa_resource {
+ struct ipa_resource_limits limits[IPA_RESOURCE_GROUP_MAX];
+};
+
+/**
+ * struct ipa_resource_data - IPA resource configuration data
+ * @rsrc_group_src_count: number of source resource groups supported
+ * @rsrc_group_dst_count: number of destination resource groups supported
+ * @resource_src_count: number of entries in the resource_src array
+ * @resource_src: source endpoint group resources
+ * @resource_dst_count: number of entries in the resource_dst array
+ * @resource_dst: destination endpoint group resources
+ *
+ * In order to manage quality of service between endpoints, certain resources
+ * required for operation are allocated to groups of endpoints. Generally
+ * this information is invisible to the AP, but the AP is responsible for
+ * programming it at initialization time, so we specify it here.
+ */
+struct ipa_resource_data {
+ u32 rsrc_group_src_count;
+ u32 rsrc_group_dst_count;
+ u32 resource_src_count;
+ const struct ipa_resource *resource_src;
+ u32 resource_dst_count;
+ const struct ipa_resource *resource_dst;
+};
+
+/**
+ * struct ipa_mem_data - description of IPA memory regions
+ * @local_count: number of regions defined in the local[] array
+ * @local: array of IPA-local memory region descriptors
+ * @imem_addr: physical address of IPA region within IMEM
+ * @imem_size: size in bytes of IPA IMEM region
+ * @smem_id: item identifier for IPA region within SMEM memory
+ * @smem_size: size in bytes of the IPA SMEM region
+ */
+struct ipa_mem_data {
+ u32 local_count;
+ const struct ipa_mem *local;
+ u32 imem_addr;
+ u32 imem_size;
+ u32 smem_id;
+ u32 smem_size;
+};
+
+/**
+ * struct ipa_interconnect_data - description of IPA interconnect bandwidths
+ * @name: Interconnect name (matches interconnect-name in DT)
+ * @peak_bandwidth: Peak interconnect bandwidth (in 1000 byte/sec units)
+ * @average_bandwidth: Average interconnect bandwidth (in 1000 byte/sec units)
+ */
+struct ipa_interconnect_data {
+ const char *name;
+ u32 peak_bandwidth;
+ u32 average_bandwidth;
+};
+
+/**
+ * struct ipa_power_data - description of IPA power configuration data
+ * @core_clock_rate: Core clock rate (Hz)
+ * @interconnect_count: Number of entries in the interconnect_data array
+ * @interconnect_data: IPA interconnect configuration data
+ */
+struct ipa_power_data {
+ u32 core_clock_rate;
+ u32 interconnect_count; /* # entries in interconnect_data[] */
+ const struct ipa_interconnect_data *interconnect_data;
+};
+
+/**
+ * struct ipa_data - combined IPA/GSI configuration data
+ * @version: IPA hardware version
+ * @backward_compat: BCR register value (prior to IPA v4.5 only)
+ * @qsb_count: number of entries in the qsb_data array
+ * @qsb_data: Qualcomm System Bus configuration data
+ * @modem_route_count: number of modem entries in a routing table
+ * @endpoint_count: number of entries in the endpoint_data array
+ * @endpoint_data: IPA endpoint/GSI channel data
+ * @resource_data: IPA resource configuration data
+ * @mem_data: IPA memory region data
+ * @power_data: IPA power data
+ */
+struct ipa_data {
+ enum ipa_version version;
+ u32 backward_compat;
+ u32 qsb_count; /* number of entries in qsb_data[] */
+ const struct ipa_qsb_data *qsb_data;
+ u32 modem_route_count;
+ u32 endpoint_count; /* number of entries in endpoint_data[] */
+ const struct ipa_gsi_endpoint_data *endpoint_data;
+ const struct ipa_resource_data *resource_data;
+ const struct ipa_mem_data *mem_data;
+ const struct ipa_power_data *power_data;
+};
+
+extern const struct ipa_data ipa_data_v3_1;
+extern const struct ipa_data ipa_data_v3_5_1;
+extern const struct ipa_data ipa_data_v4_2;
+extern const struct ipa_data ipa_data_v4_5;
+extern const struct ipa_data ipa_data_v4_7;
+extern const struct ipa_data ipa_data_v4_9;
+extern const struct ipa_data ipa_data_v4_11;
+extern const struct ipa_data ipa_data_v5_0;
+
+#endif /* _IPA_DATA_H_ */
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
new file mode 100644
index 0000000000..afa1d56d90
--- /dev/null
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -0,0 +1,2196 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2023 Linaro Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/bitfield.h>
+#include <linux/if_rmnet.h>
+#include <linux/dma-direction.h>
+
+#include "gsi.h"
+#include "gsi_trans.h"
+#include "ipa.h"
+#include "ipa_data.h"
+#include "ipa_endpoint.h"
+#include "ipa_cmd.h"
+#include "ipa_mem.h"
+#include "ipa_modem.h"
+#include "ipa_table.h"
+#include "ipa_gsi.h"
+#include "ipa_power.h"
+
+/* Hardware is told about receive buffers once a "batch" has been queued */
+#define IPA_REPLENISH_BATCH 16 /* Must be non-zero */
+
+/* The amount of RX buffer space consumed by standard skb overhead */
+#define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
+
+/* Where to find the QMAP mux_id for a packet within modem-supplied metadata */
+#define IPA_ENDPOINT_QMAP_METADATA_MASK 0x000000ff /* host byte order */
+
+#define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3
+
+/** enum ipa_status_opcode - IPA status opcode field hardware values */
+enum ipa_status_opcode { /* *Not* a bitmask */
+ IPA_STATUS_OPCODE_PACKET = 1,
+ IPA_STATUS_OPCODE_NEW_RULE_PACKET = 2,
+ IPA_STATUS_OPCODE_DROPPED_PACKET = 4,
+ IPA_STATUS_OPCODE_SUSPENDED_PACKET = 8,
+ IPA_STATUS_OPCODE_LOG = 16,
+ IPA_STATUS_OPCODE_DCMP = 32,
+ IPA_STATUS_OPCODE_PACKET_2ND_PASS = 64,
+};
+
+/** enum ipa_status_exception - IPA status exception field hardware values */
+enum ipa_status_exception { /* *Not* a bitmask */
+ /* 0 means no exception */
+ IPA_STATUS_EXCEPTION_DEAGGR = 1,
+ IPA_STATUS_EXCEPTION_IPTYPE = 4,
+ IPA_STATUS_EXCEPTION_PACKET_LENGTH = 8,
+ IPA_STATUS_EXCEPTION_FRAG_RULE_MISS = 16,
+ IPA_STATUS_EXCEPTION_SW_FILTER = 32,
+ IPA_STATUS_EXCEPTION_NAT = 64, /* IPv4 */
+ IPA_STATUS_EXCEPTION_IPV6_CONN_TRACK = 64, /* IPv6 */
+ IPA_STATUS_EXCEPTION_UC = 128,
+ IPA_STATUS_EXCEPTION_INVALID_ENDPOINT = 129,
+ IPA_STATUS_EXCEPTION_HEADER_INSERT = 136,
+ IPA_STATUS_EXCEPTION_CHEKCSUM = 229,
+};
+
+/** enum ipa_status_mask - IPA status mask field bitmask hardware values */
+enum ipa_status_mask {
+ IPA_STATUS_MASK_FRAG_PROCESS = BIT(0),
+ IPA_STATUS_MASK_FILT_PROCESS = BIT(1),
+ IPA_STATUS_MASK_NAT_PROCESS = BIT(2),
+ IPA_STATUS_MASK_ROUTE_PROCESS = BIT(3),
+ IPA_STATUS_MASK_TAG_VALID = BIT(4),
+ IPA_STATUS_MASK_FRAGMENT = BIT(5),
+ IPA_STATUS_MASK_FIRST_FRAGMENT = BIT(6),
+ IPA_STATUS_MASK_V4 = BIT(7),
+ IPA_STATUS_MASK_CKSUM_PROCESS = BIT(8),
+ IPA_STATUS_MASK_AGGR_PROCESS = BIT(9),
+ IPA_STATUS_MASK_DEST_EOT = BIT(10),
+ IPA_STATUS_MASK_DEAGGR_PROCESS = BIT(11),
+ IPA_STATUS_MASK_DEAGG_FIRST = BIT(12),
+ IPA_STATUS_MASK_SRC_EOT = BIT(13),
+ IPA_STATUS_MASK_PREV_EOT = BIT(14),
+ IPA_STATUS_MASK_BYTE_LIMIT = BIT(15),
+};
+
+/* Special IPA filter/router rule field value indicating "rule miss" */
+#define IPA_STATUS_RULE_MISS 0x3ff /* 10-bit filter/router rule fields */
+
+/** The IPA status nat_type field uses enum ipa_nat_type hardware values */
+
+/* enum ipa_status_field_id - IPA packet status structure field identifiers */
+enum ipa_status_field_id {
+ STATUS_OPCODE, /* enum ipa_status_opcode */
+ STATUS_EXCEPTION, /* enum ipa_status_exception */
+ STATUS_MASK, /* enum ipa_status_mask (bitmask) */
+ STATUS_LENGTH,
+ STATUS_SRC_ENDPOINT,
+ STATUS_DST_ENDPOINT,
+ STATUS_METADATA,
+ STATUS_FILTER_LOCAL, /* Boolean */
+ STATUS_FILTER_HASH, /* Boolean */
+ STATUS_FILTER_GLOBAL, /* Boolean */
+ STATUS_FILTER_RETAIN, /* Boolean */
+ STATUS_FILTER_RULE_INDEX,
+ STATUS_ROUTER_LOCAL, /* Boolean */
+ STATUS_ROUTER_HASH, /* Boolean */
+ STATUS_UCP, /* Boolean */
+ STATUS_ROUTER_TABLE,
+ STATUS_ROUTER_RULE_INDEX,
+ STATUS_NAT_HIT, /* Boolean */
+ STATUS_NAT_INDEX,
+ STATUS_NAT_TYPE, /* enum ipa_nat_type */
+ STATUS_TAG_LOW32, /* Low-order 32 bits of 48-bit tag */
+ STATUS_TAG_HIGH16, /* High-order 16 bits of 48-bit tag */
+ STATUS_SEQUENCE,
+ STATUS_TIME_OF_DAY,
+ STATUS_HEADER_LOCAL, /* Boolean */
+ STATUS_HEADER_OFFSET,
+ STATUS_FRAG_HIT, /* Boolean */
+ STATUS_FRAG_RULE_INDEX,
+};
+
+/* Size in bytes of an IPA packet status structure */
+#define IPA_STATUS_SIZE sizeof(__le32[8])
+
+/* IPA status structure decoder; looks up field values for a structure */
+static u32 ipa_status_extract(struct ipa *ipa, const void *data,
+ enum ipa_status_field_id field)
+{
+ enum ipa_version version = ipa->version;
+ const __le32 *word = data;
+
+ switch (field) {
+ case STATUS_OPCODE:
+ return le32_get_bits(word[0], GENMASK(7, 0));
+ case STATUS_EXCEPTION:
+ return le32_get_bits(word[0], GENMASK(15, 8));
+ case STATUS_MASK:
+ return le32_get_bits(word[0], GENMASK(31, 16));
+ case STATUS_LENGTH:
+ return le32_get_bits(word[1], GENMASK(15, 0));
+ case STATUS_SRC_ENDPOINT:
+ if (version < IPA_VERSION_5_0)
+ return le32_get_bits(word[1], GENMASK(20, 16));
+ return le32_get_bits(word[1], GENMASK(23, 16));
+ /* Status word 1, bits 21-23 are reserved (not IPA v5.0+) */
+ /* Status word 1, bits 24-26 are reserved (IPA v5.0+) */
+ case STATUS_DST_ENDPOINT:
+ if (version < IPA_VERSION_5_0)
+ return le32_get_bits(word[1], GENMASK(28, 24));
+ return le32_get_bits(word[7], GENMASK(23, 16));
+ /* Status word 1, bits 29-31 are reserved */
+ case STATUS_METADATA:
+ return le32_to_cpu(word[2]);
+ case STATUS_FILTER_LOCAL:
+ return le32_get_bits(word[3], GENMASK(0, 0));
+ case STATUS_FILTER_HASH:
+ return le32_get_bits(word[3], GENMASK(1, 1));
+ case STATUS_FILTER_GLOBAL:
+ return le32_get_bits(word[3], GENMASK(2, 2));
+ case STATUS_FILTER_RETAIN:
+ return le32_get_bits(word[3], GENMASK(3, 3));
+ case STATUS_FILTER_RULE_INDEX:
+ return le32_get_bits(word[3], GENMASK(13, 4));
+ /* ROUTER_TABLE is in word 3, bits 14-21 (IPA v5.0+) */
+ case STATUS_ROUTER_LOCAL:
+ if (version < IPA_VERSION_5_0)
+ return le32_get_bits(word[3], GENMASK(14, 14));
+ return le32_get_bits(word[1], GENMASK(27, 27));
+ case STATUS_ROUTER_HASH:
+ if (version < IPA_VERSION_5_0)
+ return le32_get_bits(word[3], GENMASK(15, 15));
+ return le32_get_bits(word[1], GENMASK(28, 28));
+ case STATUS_UCP:
+ if (version < IPA_VERSION_5_0)
+ return le32_get_bits(word[3], GENMASK(16, 16));
+ return le32_get_bits(word[7], GENMASK(31, 31));
+ case STATUS_ROUTER_TABLE:
+ if (version < IPA_VERSION_5_0)
+ return le32_get_bits(word[3], GENMASK(21, 17));
+ return le32_get_bits(word[3], GENMASK(21, 14));
+ case STATUS_ROUTER_RULE_INDEX:
+ return le32_get_bits(word[3], GENMASK(31, 22));
+ case STATUS_NAT_HIT:
+ return le32_get_bits(word[4], GENMASK(0, 0));
+ case STATUS_NAT_INDEX:
+ return le32_get_bits(word[4], GENMASK(13, 1));
+ case STATUS_NAT_TYPE:
+ return le32_get_bits(word[4], GENMASK(15, 14));
+ case STATUS_TAG_LOW32:
+ return le32_get_bits(word[4], GENMASK(31, 16)) |
+ (le32_get_bits(word[5], GENMASK(15, 0)) << 16);
+ case STATUS_TAG_HIGH16:
+ return le32_get_bits(word[5], GENMASK(31, 16));
+ case STATUS_SEQUENCE:
+ return le32_get_bits(word[6], GENMASK(7, 0));
+ case STATUS_TIME_OF_DAY:
+ return le32_get_bits(word[6], GENMASK(31, 8));
+ case STATUS_HEADER_LOCAL:
+ return le32_get_bits(word[7], GENMASK(0, 0));
+ case STATUS_HEADER_OFFSET:
+ return le32_get_bits(word[7], GENMASK(10, 1));
+ case STATUS_FRAG_HIT:
+ return le32_get_bits(word[7], GENMASK(11, 11));
+ case STATUS_FRAG_RULE_INDEX:
+ return le32_get_bits(word[7], GENMASK(15, 12));
+ /* Status word 7, bits 16-30 are reserved */
+ /* Status word 7, bit 31 is reserved (not IPA v5.0+) */
+ default:
+ WARN(true, "%s: bad field_id %u\n", __func__, field);
+ return 0;
+ }
+}
+
+/* Compute the aggregation size value to use for a given buffer size */
+static u32 ipa_aggr_size_kb(u32 rx_buffer_size, bool aggr_hard_limit)
+{
+ /* A hard aggregation limit will not be crossed; aggregation closes
+ * if saving incoming data would cross the hard byte limit boundary.
+ *
+ * With a soft limit, aggregation closes *after* the size boundary
+ * has been crossed. In that case the limit must leave enough space
+ * after that limit to receive a full MTU of data plus overhead.
+ */
+ if (!aggr_hard_limit)
+ rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
+
+ /* The byte limit is encoded as a number of kilobytes */
+
+ return rx_buffer_size / SZ_1K;
+}
+
+static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
+ const struct ipa_gsi_endpoint_data *all_data,
+ const struct ipa_gsi_endpoint_data *data)
+{
+ const struct ipa_gsi_endpoint_data *other_data;
+ struct device *dev = &ipa->pdev->dev;
+ enum ipa_endpoint_name other_name;
+
+ if (ipa_gsi_endpoint_data_empty(data))
+ return true;
+
+ if (!data->toward_ipa) {
+ const struct ipa_endpoint_rx *rx_config;
+ const struct reg *reg;
+ u32 buffer_size;
+ u32 aggr_size;
+ u32 limit;
+
+ if (data->endpoint.filter_support) {
+ dev_err(dev, "filtering not supported for "
+ "RX endpoint %u\n",
+ data->endpoint_id);
+ return false;
+ }
+
+ /* Nothing more to check for non-AP RX */
+ if (data->ee_id != GSI_EE_AP)
+ return true;
+
+ rx_config = &data->endpoint.config.rx;
+
+ /* The buffer size must hold an MTU plus overhead */
+ buffer_size = rx_config->buffer_size;
+ limit = IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
+ if (buffer_size < limit) {
+ dev_err(dev, "RX buffer size too small for RX endpoint %u (%u < %u)\n",
+ data->endpoint_id, buffer_size, limit);
+ return false;
+ }
+
+ if (!data->endpoint.config.aggregation) {
+ bool result = true;
+
+ /* No aggregation; check for bogus aggregation data */
+ if (rx_config->aggr_time_limit) {
+ dev_err(dev,
+ "time limit with no aggregation for RX endpoint %u\n",
+ data->endpoint_id);
+ result = false;
+ }
+
+ if (rx_config->aggr_hard_limit) {
+ dev_err(dev, "hard limit with no aggregation for RX endpoint %u\n",
+ data->endpoint_id);
+ result = false;
+ }
+
+ if (rx_config->aggr_close_eof) {
+ dev_err(dev, "close EOF with no aggregation for RX endpoint %u\n",
+ data->endpoint_id);
+ result = false;
+ }
+
+ return result; /* Nothing more to check */
+ }
+
+ /* For an endpoint supporting receive aggregation, the byte
+ * limit defines the point at which aggregation closes. This
+ * check ensures the receive buffer size doesn't result in a
+ * limit that exceeds what's representable in the aggregation
+ * byte limit field.
+ */
+ aggr_size = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
+ rx_config->aggr_hard_limit);
+ reg = ipa_reg(ipa, ENDP_INIT_AGGR);
+
+ limit = reg_field_max(reg, BYTE_LIMIT);
+ if (aggr_size > limit) {
+ dev_err(dev, "aggregated size too large for RX endpoint %u (%u KB > %u KB)\n",
+ data->endpoint_id, aggr_size, limit);
+
+ return false;
+ }
+
+ return true; /* Nothing more to check for RX */
+ }
+
+ /* Starting with IPA v4.5 sequencer replication is obsolete */
+ if (ipa->version >= IPA_VERSION_4_5) {
+ if (data->endpoint.config.tx.seq_rep_type) {
+ dev_err(dev, "no-zero seq_rep_type TX endpoint %u\n",
+ data->endpoint_id);
+ return false;
+ }
+ }
+
+ if (data->endpoint.config.status_enable) {
+ other_name = data->endpoint.config.tx.status_endpoint;
+ if (other_name >= count) {
+ dev_err(dev, "status endpoint name %u out of range "
+ "for endpoint %u\n",
+ other_name, data->endpoint_id);
+ return false;
+ }
+
+ /* Status endpoint must be defined... */
+ other_data = &all_data[other_name];
+ if (ipa_gsi_endpoint_data_empty(other_data)) {
+ dev_err(dev, "DMA endpoint name %u undefined "
+ "for endpoint %u\n",
+ other_name, data->endpoint_id);
+ return false;
+ }
+
+ /* ...and has to be an RX endpoint... */
+ if (other_data->toward_ipa) {
+ dev_err(dev,
+ "status endpoint for endpoint %u not RX\n",
+ data->endpoint_id);
+ return false;
+ }
+
+ /* ...and if it's to be an AP endpoint... */
+ if (other_data->ee_id == GSI_EE_AP) {
+ /* ...make sure it has status enabled. */
+ if (!other_data->endpoint.config.status_enable) {
+ dev_err(dev,
+ "status not enabled for endpoint %u\n",
+ other_data->endpoint_id);
+ return false;
+ }
+ }
+ }
+
+ if (data->endpoint.config.dma_mode) {
+ other_name = data->endpoint.config.dma_endpoint;
+ if (other_name >= count) {
+ dev_err(dev, "DMA endpoint name %u out of range "
+ "for endpoint %u\n",
+ other_name, data->endpoint_id);
+ return false;
+ }
+
+ other_data = &all_data[other_name];
+ if (ipa_gsi_endpoint_data_empty(other_data)) {
+ dev_err(dev, "DMA endpoint name %u undefined "
+ "for endpoint %u\n",
+ other_name, data->endpoint_id);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/* Validate endpoint configuration data. Return max defined endpoint ID */
+static u32 ipa_endpoint_max(struct ipa *ipa, u32 count,
+ const struct ipa_gsi_endpoint_data *data)
+{
+ const struct ipa_gsi_endpoint_data *dp = data;
+ struct device *dev = &ipa->pdev->dev;
+ enum ipa_endpoint_name name;
+ u32 max;
+
+ if (count > IPA_ENDPOINT_COUNT) {
+ dev_err(dev, "too many endpoints specified (%u > %u)\n",
+ count, IPA_ENDPOINT_COUNT);
+ return 0;
+ }
+
+ /* Make sure needed endpoints have defined data */
+ if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) {
+ dev_err(dev, "command TX endpoint not defined\n");
+ return 0;
+ }
+ if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) {
+ dev_err(dev, "LAN RX endpoint not defined\n");
+ return 0;
+ }
+ if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) {
+ dev_err(dev, "AP->modem TX endpoint not defined\n");
+ return 0;
+ }
+ if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) {
+ dev_err(dev, "AP<-modem RX endpoint not defined\n");
+ return 0;
+ }
+
+ max = 0;
+ for (name = 0; name < count; name++, dp++) {
+ if (!ipa_endpoint_data_valid_one(ipa, count, data, dp))
+ return 0;
+ max = max_t(u32, max, dp->endpoint_id);
+ }
+
+ return max;
+}
+
+/* Allocate a transaction to use on a non-command endpoint */
+static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
+ u32 tre_count)
+{
+ struct gsi *gsi = &endpoint->ipa->gsi;
+ u32 channel_id = endpoint->channel_id;
+ enum dma_data_direction direction;
+
+ direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+ return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction);
+}
+
+/* suspend_delay represents suspend for RX, delay for TX endpoints.
+ * Note that suspend is not supported starting with IPA v4.0, and
+ * delay mode should not be used starting with IPA v4.2.
+ */
+static bool
+ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
+{
+ struct ipa *ipa = endpoint->ipa;
+ const struct reg *reg;
+ u32 field_id;
+ u32 offset;
+ bool state;
+ u32 mask;
+ u32 val;
+
+ if (endpoint->toward_ipa)
+ WARN_ON(ipa->version >= IPA_VERSION_4_2);
+ else
+ WARN_ON(ipa->version >= IPA_VERSION_4_0);
+
+ reg = ipa_reg(ipa, ENDP_INIT_CTRL);
+ offset = reg_n_offset(reg, endpoint->endpoint_id);
+ val = ioread32(ipa->reg_virt + offset);
+
+ field_id = endpoint->toward_ipa ? ENDP_DELAY : ENDP_SUSPEND;
+ mask = reg_bit(reg, field_id);
+
+ state = !!(val & mask);
+
+ /* Don't bother if it's already in the requested state */
+ if (suspend_delay != state) {
+ val ^= mask;
+ iowrite32(val, ipa->reg_virt + offset);
+ }
+
+ return state;
+}
+
+/* We don't care what the previous state was for delay mode */
+static void
+ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable)
+{
+ /* Delay mode should not be used for IPA v4.2+ */
+ WARN_ON(endpoint->ipa->version >= IPA_VERSION_4_2);
+ WARN_ON(!endpoint->toward_ipa);
+
+ (void)ipa_endpoint_init_ctrl(endpoint, enable);
+}
+
+static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
+{
+ u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ u32 unit = endpoint_id / 32;
+ const struct reg *reg;
+ u32 val;
+
+ WARN_ON(!test_bit(endpoint_id, ipa->available));
+
+ reg = ipa_reg(ipa, STATE_AGGR_ACTIVE);
+ val = ioread32(ipa->reg_virt + reg_n_offset(reg, unit));
+
+ return !!(val & BIT(endpoint_id % 32));
+}
+
+static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
+{
+ u32 endpoint_id = endpoint->endpoint_id;
+ u32 mask = BIT(endpoint_id % 32);
+ struct ipa *ipa = endpoint->ipa;
+ u32 unit = endpoint_id / 32;
+ const struct reg *reg;
+
+ WARN_ON(!test_bit(endpoint_id, ipa->available));
+
+ reg = ipa_reg(ipa, AGGR_FORCE_CLOSE);
+ iowrite32(mask, ipa->reg_virt + reg_n_offset(reg, unit));
+}
+
+/**
+ * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt
+ * @endpoint: Endpoint on which to emulate a suspend
+ *
+ * Emulate suspend IPA interrupt to unsuspend an endpoint suspended
+ * with an open aggregation frame. This is to work around a hardware
+ * issue in IPA version 3.5.1 where the suspend interrupt will not be
+ * generated when it should be.
+ */
+static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint)
+{
+ struct ipa *ipa = endpoint->ipa;
+
+ if (!endpoint->config.aggregation)
+ return;
+
+ /* Nothing to do if the endpoint doesn't have aggregation open */
+ if (!ipa_endpoint_aggr_active(endpoint))
+ return;
+
+ /* Force close aggregation */
+ ipa_endpoint_force_close(endpoint);
+
+ ipa_interrupt_simulate_suspend(ipa->interrupt);
+}
+
+/* Returns previous suspend state (true means suspend was enabled) */
+static bool
+ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable)
+{
+ bool suspended;
+
+ if (endpoint->ipa->version >= IPA_VERSION_4_0)
+ return enable; /* For IPA v4.0+, no change made */
+
+ WARN_ON(endpoint->toward_ipa);
+
+ suspended = ipa_endpoint_init_ctrl(endpoint, enable);
+
+ /* A client suspended with an open aggregation frame will not
+ * generate a SUSPEND IPA interrupt. If enabling suspend, have
+ * ipa_endpoint_suspend_aggr() handle this.
+ */
+ if (enable && !suspended)
+ ipa_endpoint_suspend_aggr(endpoint);
+
+ return suspended;
+}
+
+/* Put all modem RX endpoints into suspend mode, and stop transmission
+ * on all modem TX endpoints. Prior to IPA v4.2, endpoint DELAY mode is
+ * used for TX endpoints; starting with IPA v4.2 we use GSI channel flow
+ * control instead.
+ */
+void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
+{
+ u32 endpoint_id = 0;
+
+ while (endpoint_id < ipa->endpoint_count) {
+ struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id++];
+
+ if (endpoint->ee_id != GSI_EE_MODEM)
+ continue;
+
+ if (!endpoint->toward_ipa)
+ (void)ipa_endpoint_program_suspend(endpoint, enable);
+ else if (ipa->version < IPA_VERSION_4_2)
+ ipa_endpoint_program_delay(endpoint, enable);
+ else
+ gsi_modem_channel_flow_control(&ipa->gsi,
+ endpoint->channel_id,
+ enable);
+ }
+}
+
+/* Reset all modem endpoints to use the default exception endpoint */
+int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
+{
+ struct gsi_trans *trans;
+ u32 endpoint_id;
+ u32 count;
+
+ /* We need one command per modem TX endpoint, plus the commands
+ * that clear the pipeline.
+ */
+ count = ipa->modem_tx_count + ipa_cmd_pipeline_clear_count();
+ trans = ipa_cmd_trans_alloc(ipa, count);
+ if (!trans) {
+ dev_err(&ipa->pdev->dev,
+ "no transaction to reset modem exception endpoints\n");
+ return -EBUSY;
+ }
+
+ for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) {
+ struct ipa_endpoint *endpoint;
+ const struct reg *reg;
+ u32 offset;
+
+ /* We only reset modem TX endpoints */
+ endpoint = &ipa->endpoint[endpoint_id];
+ if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa))
+ continue;
+
+ reg = ipa_reg(ipa, ENDP_STATUS);
+ offset = reg_n_offset(reg, endpoint_id);
+
+ /* Value written is 0, and all bits are updated. That
+ * means status is disabled on the endpoint, and as a
+ * result all other fields in the register are ignored.
+ */
+ ipa_cmd_register_write_add(trans, offset, 0, ~0, false);
+ }
+
+ ipa_cmd_pipeline_clear_add(trans);
+
+ gsi_trans_commit_wait(trans);
+
+ ipa_cmd_pipeline_clear_wait(ipa);
+
+ return 0;
+}
+
+static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
+{
+ u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ enum ipa_cs_offload_en enabled;
+ const struct reg *reg;
+ u32 val = 0;
+
+ reg = ipa_reg(ipa, ENDP_INIT_CFG);
+ /* FRAG_OFFLOAD_EN is 0 */
+ if (endpoint->config.checksum) {
+ enum ipa_version version = ipa->version;
+
+ if (endpoint->toward_ipa) {
+ u32 off;
+
+ /* Checksum header offset is in 4-byte units */
+ off = sizeof(struct rmnet_map_header) / sizeof(u32);
+ val |= reg_encode(reg, CS_METADATA_HDR_OFFSET, off);
+
+ enabled = version < IPA_VERSION_4_5
+ ? IPA_CS_OFFLOAD_UL
+ : IPA_CS_OFFLOAD_INLINE;
+ } else {
+ enabled = version < IPA_VERSION_4_5
+ ? IPA_CS_OFFLOAD_DL
+ : IPA_CS_OFFLOAD_INLINE;
+ }
+ } else {
+ enabled = IPA_CS_OFFLOAD_NONE;
+ }
+ val |= reg_encode(reg, CS_OFFLOAD_EN, enabled);
+ /* CS_GEN_QMB_MASTER_SEL is 0 */
+
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
+}
+
+static void ipa_endpoint_init_nat(struct ipa_endpoint *endpoint)
+{
+ u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ const struct reg *reg;
+ u32 val;
+
+ if (!endpoint->toward_ipa)
+ return;
+
+ reg = ipa_reg(ipa, ENDP_INIT_NAT);
+ val = reg_encode(reg, NAT_EN, IPA_NAT_TYPE_BYPASS);
+
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
+}
+
+static u32
+ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint)
+{
+ u32 header_size = sizeof(struct rmnet_map_header);
+
+ /* Without checksum offload, we just have the MAP header */
+ if (!endpoint->config.checksum)
+ return header_size;
+
+ if (version < IPA_VERSION_4_5) {
+ /* Checksum header inserted for AP TX endpoints only */
+ if (endpoint->toward_ipa)
+ header_size += sizeof(struct rmnet_map_ul_csum_header);
+ } else {
+ /* Checksum header is used in both directions */
+ header_size += sizeof(struct rmnet_map_v5_csum_header);
+ }
+
+ return header_size;
+}
+
+/* Encoded value for ENDP_INIT_HDR register HDR_LEN* field(s) */
+static u32 ipa_header_size_encode(enum ipa_version version,
+ const struct reg *reg, u32 header_size)
+{
+ u32 field_max = reg_field_max(reg, HDR_LEN);
+ u32 val;
+
+ /* We know field_max can be used as a mask (2^n - 1) */
+ val = reg_encode(reg, HDR_LEN, header_size & field_max);
+ if (version < IPA_VERSION_4_5) {
+ WARN_ON(header_size > field_max);
+ return val;
+ }
+
+ /* IPA v4.5 adds a few more most-significant bits */
+ header_size >>= hweight32(field_max);
+ WARN_ON(header_size > reg_field_max(reg, HDR_LEN_MSB));
+ val |= reg_encode(reg, HDR_LEN_MSB, header_size);
+
+ return val;
+}
+
+/* Encoded value for ENDP_INIT_HDR register OFST_METADATA* field(s) */
+static u32 ipa_metadata_offset_encode(enum ipa_version version,
+ const struct reg *reg, u32 offset)
+{
+ u32 field_max = reg_field_max(reg, HDR_OFST_METADATA);
+ u32 val;
+
+ /* We know field_max can be used as a mask (2^n - 1) */
+ val = reg_encode(reg, HDR_OFST_METADATA, offset);
+ if (version < IPA_VERSION_4_5) {
+ WARN_ON(offset > field_max);
+ return val;
+ }
+
+ /* IPA v4.5 adds a few more most-significant bits */
+ offset >>= hweight32(field_max);
+ WARN_ON(offset > reg_field_max(reg, HDR_OFST_METADATA_MSB));
+ val |= reg_encode(reg, HDR_OFST_METADATA_MSB, offset);
+
+ return val;
+}
+
+/**
+ * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
+ * @endpoint: Endpoint pointer
+ *
+ * We program QMAP endpoints so each packet received is preceded by a QMAP
+ * header structure. The QMAP header contains a 1-byte mux_id and 2-byte
+ * packet size field, and we have the IPA hardware populate both for each
+ * received packet. The header is configured (in the HDR_EXT register)
+ * to use big endian format.
+ *
+ * The packet size is written into the QMAP header's pkt_len field. That
+ * location is defined here using the HDR_OFST_PKT_SIZE field.
+ *
+ * The mux_id comes from a 4-byte metadata value supplied with each packet
+ * by the modem. It is *not* a QMAP header, but it does contain the mux_id
+ * value that we want, in its low-order byte. A bitmask defined in the
+ * endpoint's METADATA_MASK register defines which byte within the modem
+ * metadata contains the mux_id. And the OFST_METADATA field programmed
+ * here indicates where the extracted byte should be placed within the QMAP
+ * header.
+ */
+static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
+{
+ u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ const struct reg *reg;
+ u32 val = 0;
+
+ reg = ipa_reg(ipa, ENDP_INIT_HDR);
+ if (endpoint->config.qmap) {
+ enum ipa_version version = ipa->version;
+ size_t header_size;
+
+ header_size = ipa_qmap_header_size(version, endpoint);
+ val = ipa_header_size_encode(version, reg, header_size);
+
+ /* Define how to fill fields in a received QMAP header */
+ if (!endpoint->toward_ipa) {
+ u32 off; /* Field offset within header */
+
+ /* Where IPA will write the metadata value */
+ off = offsetof(struct rmnet_map_header, mux_id);
+ val |= ipa_metadata_offset_encode(version, reg, off);
+
+ /* Where IPA will write the length */
+ off = offsetof(struct rmnet_map_header, pkt_len);
+ /* Upper bits are stored in HDR_EXT with IPA v4.5 */
+ if (version >= IPA_VERSION_4_5)
+ off &= reg_field_max(reg, HDR_OFST_PKT_SIZE);
+
+ val |= reg_bit(reg, HDR_OFST_PKT_SIZE_VALID);
+ val |= reg_encode(reg, HDR_OFST_PKT_SIZE, off);
+ }
+ /* For QMAP TX, metadata offset is 0 (modem assumes this) */
+ val |= reg_bit(reg, HDR_OFST_METADATA_VALID);
+
+ /* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */
+ /* HDR_A5_MUX is 0 */
+ /* HDR_LEN_INC_DEAGG_HDR is 0 */
+ /* HDR_METADATA_REG_VALID is 0 (TX only, version < v4.5) */
+ }
+
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
+}
+
+static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
+{
+ u32 pad_align = endpoint->config.rx.pad_align;
+ u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ const struct reg *reg;
+ u32 val = 0;
+
+ reg = ipa_reg(ipa, ENDP_INIT_HDR_EXT);
+ if (endpoint->config.qmap) {
+ /* We have a header, so we must specify its endianness */
+ val |= reg_bit(reg, HDR_ENDIANNESS); /* big endian */
+
+ /* A QMAP header contains a 6 bit pad field at offset 0.
+ * The RMNet driver assumes this field is meaningful in
+ * packets it receives, and assumes the header's payload
+ * length includes that padding. The RMNet driver does
+ * *not* pad packets it sends, however, so the pad field
+ * (although 0) should be ignored.
+ */
+ if (!endpoint->toward_ipa) {
+ val |= reg_bit(reg, HDR_TOTAL_LEN_OR_PAD_VALID);
+ /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
+ val |= reg_bit(reg, HDR_PAYLOAD_LEN_INC_PADDING);
+ /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
+ }
+ }
+
+ /* HDR_PAYLOAD_LEN_INC_PADDING is 0 */
+ if (!endpoint->toward_ipa)
+ val |= reg_encode(reg, HDR_PAD_TO_ALIGNMENT, pad_align);
+
+ /* IPA v4.5 adds some most-significant bits to a few fields,
+ * two of which are defined in the HDR (not HDR_EXT) register.
+ */
+ if (ipa->version >= IPA_VERSION_4_5) {
+ /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */
+ if (endpoint->config.qmap && !endpoint->toward_ipa) {
+ u32 mask = reg_field_max(reg, HDR_OFST_PKT_SIZE);
+ u32 off; /* Field offset within header */
+
+ off = offsetof(struct rmnet_map_header, pkt_len);
+ /* Low bits are in the ENDP_INIT_HDR register */
+ off >>= hweight32(mask);
+ val |= reg_encode(reg, HDR_OFST_PKT_SIZE_MSB, off);
+ /* HDR_ADDITIONAL_CONST_LEN is 0 so MSB is 0 */
+ }
+ }
+
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
+}
+
+static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
+{
+ u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ const struct reg *reg;
+ u32 val = 0;
+ u32 offset;
+
+ if (endpoint->toward_ipa)
+ return; /* Register not valid for TX endpoints */
+
+ reg = ipa_reg(ipa, ENDP_INIT_HDR_METADATA_MASK);
+ offset = reg_n_offset(reg, endpoint_id);
+
+ /* Note that HDR_ENDIANNESS indicates big endian header fields */
+ if (endpoint->config.qmap)
+ val = (__force u32)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
+
+ iowrite32(val, ipa->reg_virt + offset);
+}
+
+static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
+{
+ struct ipa *ipa = endpoint->ipa;
+ const struct reg *reg;
+ u32 offset;
+ u32 val;
+
+ if (!endpoint->toward_ipa)
+ return; /* Register not valid for RX endpoints */
+
+ reg = ipa_reg(ipa, ENDP_INIT_MODE);
+ if (endpoint->config.dma_mode) {
+ enum ipa_endpoint_name name = endpoint->config.dma_endpoint;
+ u32 dma_endpoint_id = ipa->name_map[name]->endpoint_id;
+
+ val = reg_encode(reg, ENDP_MODE, IPA_DMA);
+ val |= reg_encode(reg, DEST_PIPE_INDEX, dma_endpoint_id);
+ } else {
+ val = reg_encode(reg, ENDP_MODE, IPA_BASIC);
+ }
+ /* All other bits unspecified (and 0) */
+
+ offset = reg_n_offset(reg, endpoint->endpoint_id);
+ iowrite32(val, ipa->reg_virt + offset);
+}
+
+/* For IPA v4.5+, times are expressed using Qtime. A time is represented
+ * at one of several available granularities, which are configured in
+ * ipa_qtime_config(). Three (or, starting with IPA v5.0, four) pulse
+ * generators are set up with different "tick" periods. A Qtime value
+ * encodes a tick count along with an indication of a pulse generator
+ * (which has a fixed tick period). Two pulse generators are always
+ * available to the AP; a third is available starting with IPA v5.0.
+ * This function determines which pulse generator most accurately
+ * represents the time period provided, and returns the tick count to
+ * use to represent that time.
+ */
+static u32
+ipa_qtime_val(struct ipa *ipa, u32 microseconds, u32 max, u32 *select)
+{
+ u32 which = 0;
+ u32 ticks;
+
+ /* Pulse generator 0 has 100 microsecond granularity */
+ ticks = DIV_ROUND_CLOSEST(microseconds, 100);
+ if (ticks <= max)
+ goto out;
+
+ /* Pulse generator 1 has millisecond granularity */
+ which = 1;
+ ticks = DIV_ROUND_CLOSEST(microseconds, 1000);
+ if (ticks <= max)
+ goto out;
+
+ if (ipa->version >= IPA_VERSION_5_0) {
+ /* Pulse generator 2 has 10 millisecond granularity */
+ which = 2;
+ ticks = DIV_ROUND_CLOSEST(microseconds, 100);
+ }
+ WARN_ON(ticks > max);
+out:
+ *select = which;
+
+ return ticks;
+}
+
+/* Encode the aggregation timer limit (microseconds) based on IPA version */
+static u32 aggr_time_limit_encode(struct ipa *ipa, const struct reg *reg,
+ u32 microseconds)
+{
+ u32 ticks;
+ u32 max;
+
+ if (!microseconds)
+ return 0; /* Nothing to compute if time limit is 0 */
+
+ max = reg_field_max(reg, TIME_LIMIT);
+ if (ipa->version >= IPA_VERSION_4_5) {
+ u32 select;
+
+ ticks = ipa_qtime_val(ipa, microseconds, max, &select);
+
+ return reg_encode(reg, AGGR_GRAN_SEL, select) |
+ reg_encode(reg, TIME_LIMIT, ticks);
+ }
+
+ /* We program aggregation granularity in ipa_hardware_config() */
+ ticks = DIV_ROUND_CLOSEST(microseconds, IPA_AGGR_GRANULARITY);
+ WARN(ticks > max, "aggr_time_limit too large (%u > %u usec)\n",
+ microseconds, max * IPA_AGGR_GRANULARITY);
+
+ return reg_encode(reg, TIME_LIMIT, ticks);
+}
+
+static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
+{
+ u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ const struct reg *reg;
+ u32 val = 0;
+
+ reg = ipa_reg(ipa, ENDP_INIT_AGGR);
+ if (endpoint->config.aggregation) {
+ if (!endpoint->toward_ipa) {
+ const struct ipa_endpoint_rx *rx_config;
+ u32 buffer_size;
+ u32 limit;
+
+ rx_config = &endpoint->config.rx;
+ val |= reg_encode(reg, AGGR_EN, IPA_ENABLE_AGGR);
+ val |= reg_encode(reg, AGGR_TYPE, IPA_GENERIC);
+
+ buffer_size = rx_config->buffer_size;
+ limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
+ rx_config->aggr_hard_limit);
+ val |= reg_encode(reg, BYTE_LIMIT, limit);
+
+ limit = rx_config->aggr_time_limit;
+ val |= aggr_time_limit_encode(ipa, reg, limit);
+
+ /* AGGR_PKT_LIMIT is 0 (unlimited) */
+
+ if (rx_config->aggr_close_eof)
+ val |= reg_bit(reg, SW_EOF_ACTIVE);
+ } else {
+ val |= reg_encode(reg, AGGR_EN, IPA_ENABLE_DEAGGR);
+ val |= reg_encode(reg, AGGR_TYPE, IPA_QCMAP);
+ /* other fields ignored */
+ }
+ /* AGGR_FORCE_CLOSE is 0 */
+ /* AGGR_GRAN_SEL is 0 for IPA v4.5 */
+ } else {
+ val |= reg_encode(reg, AGGR_EN, IPA_BYPASS_AGGR);
+ /* other fields ignored */
+ }
+
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
+}
+
+/* The head-of-line blocking timer is defined as a tick count. For
+ * IPA version 4.5 the tick count is based on the Qtimer, which is
+ * derived from the 19.2 MHz SoC XO clock. For older IPA versions
+ * each tick represents 128 cycles of the IPA core clock.
+ *
+ * Return the encoded value representing the timeout period provided
+ * that should be written to the ENDP_INIT_HOL_BLOCK_TIMER register.
+ */
+static u32 hol_block_timer_encode(struct ipa *ipa, const struct reg *reg,
+ u32 microseconds)
+{
+ u32 width;
+ u32 scale;
+ u64 ticks;
+ u64 rate;
+ u32 high;
+ u32 val;
+
+ if (!microseconds)
+ return 0; /* Nothing to compute if timer period is 0 */
+
+ if (ipa->version >= IPA_VERSION_4_5) {
+ u32 max = reg_field_max(reg, TIMER_LIMIT);
+ u32 select;
+ u32 ticks;
+
+ ticks = ipa_qtime_val(ipa, microseconds, max, &select);
+
+ return reg_encode(reg, TIMER_GRAN_SEL, 1) |
+ reg_encode(reg, TIMER_LIMIT, ticks);
+ }
+
+ /* Use 64 bit arithmetic to avoid overflow */
+ rate = ipa_core_clock_rate(ipa);
+ ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC);
+
+ /* We still need the result to fit into the field */
+ WARN_ON(ticks > reg_field_max(reg, TIMER_BASE_VALUE));
+
+ /* IPA v3.5.1 through v4.1 just record the tick count */
+ if (ipa->version < IPA_VERSION_4_2)
+ return reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks);
+
+ /* For IPA v4.2, the tick count is represented by base and
+ * scale fields within the 32-bit timer register, where:
+ * ticks = base << scale;
+ * The best precision is achieved when the base value is as
+ * large as possible. Find the highest set bit in the tick
+ * count, and extract the number of bits in the base field
+ * such that high bit is included.
+ */
+ high = fls(ticks); /* 1..32 (or warning above) */
+ width = hweight32(reg_fmask(reg, TIMER_BASE_VALUE));
+ scale = high > width ? high - width : 0;
+ if (scale) {
+ /* If we're scaling, round up to get a closer result */
+ ticks += 1 << (scale - 1);
+ /* High bit was set, so rounding might have affected it */
+ if (fls(ticks) != high)
+ scale++;
+ }
+
+ val = reg_encode(reg, TIMER_SCALE, scale);
+ val |= reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks >> scale);
+
+ return val;
+}
+
+/* If microseconds is 0, timeout is immediate */
+static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
+ u32 microseconds)
+{
+ u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ const struct reg *reg;
+ u32 val;
+
+ /* This should only be changed when HOL_BLOCK_EN is disabled */
+ reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_TIMER);
+ val = hol_block_timer_encode(ipa, reg, microseconds);
+
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
+}
+
+static void
+ipa_endpoint_init_hol_block_en(struct ipa_endpoint *endpoint, bool enable)
+{
+ u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ const struct reg *reg;
+ u32 offset;
+ u32 val;
+
+ reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_EN);
+ offset = reg_n_offset(reg, endpoint_id);
+ val = enable ? reg_bit(reg, HOL_BLOCK_EN) : 0;
+
+ iowrite32(val, ipa->reg_virt + offset);
+
+ /* When enabling, the register must be written twice for IPA v4.5+ */
+ if (enable && ipa->version >= IPA_VERSION_4_5)
+ iowrite32(val, ipa->reg_virt + offset);
+}
+
+/* Assumes HOL_BLOCK is in disabled state */
+static void ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint,
+ u32 microseconds)
+{
+ ipa_endpoint_init_hol_block_timer(endpoint, microseconds);
+ ipa_endpoint_init_hol_block_en(endpoint, true);
+}
+
+static void ipa_endpoint_init_hol_block_disable(struct ipa_endpoint *endpoint)
+{
+ ipa_endpoint_init_hol_block_en(endpoint, false);
+}
+
+void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
+{
+ u32 endpoint_id = 0;
+
+ while (endpoint_id < ipa->endpoint_count) {
+ struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id++];
+
+ if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM)
+ continue;
+
+ ipa_endpoint_init_hol_block_disable(endpoint);
+ ipa_endpoint_init_hol_block_enable(endpoint, 0);
+ }
+}
+
+static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
+{
+ u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ const struct reg *reg;
+ u32 val = 0;
+
+ if (!endpoint->toward_ipa)
+ return; /* Register not valid for RX endpoints */
+
+ reg = ipa_reg(ipa, ENDP_INIT_DEAGGR);
+ /* DEAGGR_HDR_LEN is 0 */
+ /* PACKET_OFFSET_VALID is 0 */
+ /* PACKET_OFFSET_LOCATION is ignored (not valid) */
+ /* MAX_PACKET_LEN is 0 (not enforced) */
+
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
+}
+
+static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint)
+{
+ u32 resource_group = endpoint->config.resource_group;
+ u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ const struct reg *reg;
+ u32 val;
+
+ reg = ipa_reg(ipa, ENDP_INIT_RSRC_GRP);
+ val = reg_encode(reg, ENDP_RSRC_GRP, resource_group);
+
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
+}
+
+static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
+{
+ u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ const struct reg *reg;
+ u32 val;
+
+ if (!endpoint->toward_ipa)
+ return; /* Register not valid for RX endpoints */
+
+ reg = ipa_reg(ipa, ENDP_INIT_SEQ);
+
+ /* Low-order byte configures primary packet processing */
+ val = reg_encode(reg, SEQ_TYPE, endpoint->config.tx.seq_type);
+
+ /* Second byte (if supported) configures replicated packet processing */
+ if (ipa->version < IPA_VERSION_4_5)
+ val |= reg_encode(reg, SEQ_REP_TYPE,
+ endpoint->config.tx.seq_rep_type);
+
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
+}
+
+/**
+ * ipa_endpoint_skb_tx() - Transmit a socket buffer
+ * @endpoint: Endpoint pointer
+ * @skb: Socket buffer to send
+ *
+ * Returns: 0 if successful, or a negative error code
+ */
+int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb)
+{
+ struct gsi_trans *trans;
+ u32 nr_frags;
+ int ret;
+
+ /* Make sure source endpoint's TLV FIFO has enough entries to
+ * hold the linear portion of the skb and all its fragments.
+ * If not, see if we can linearize it before giving up.
+ */
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ if (nr_frags > endpoint->skb_frag_max) {
+ if (skb_linearize(skb))
+ return -E2BIG;
+ nr_frags = 0;
+ }
+
+ trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags);
+ if (!trans)
+ return -EBUSY;
+
+ ret = gsi_trans_skb_add(trans, skb);
+ if (ret)
+ goto err_trans_free;
+ trans->data = skb; /* transaction owns skb now */
+
+ gsi_trans_commit(trans, !netdev_xmit_more());
+
+ return 0;
+
+err_trans_free:
+ gsi_trans_free(trans);
+
+ return -ENOMEM;
+}
+
+static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
+{
+ u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ const struct reg *reg;
+ u32 val = 0;
+
+ reg = ipa_reg(ipa, ENDP_STATUS);
+ if (endpoint->config.status_enable) {
+ val |= reg_bit(reg, STATUS_EN);
+ if (endpoint->toward_ipa) {
+ enum ipa_endpoint_name name;
+ u32 status_endpoint_id;
+
+ name = endpoint->config.tx.status_endpoint;
+ status_endpoint_id = ipa->name_map[name]->endpoint_id;
+
+ val |= reg_encode(reg, STATUS_ENDP, status_endpoint_id);
+ }
+ /* STATUS_LOCATION is 0, meaning IPA packet status
+ * precedes the packet (not present for IPA v4.5+)
+ */
+ /* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v4.0+) */
+ }
+
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
+}
+
+static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint,
+ struct gsi_trans *trans)
+{
+ struct page *page;
+ u32 buffer_size;
+ u32 offset;
+ u32 len;
+ int ret;
+
+ buffer_size = endpoint->config.rx.buffer_size;
+ page = dev_alloc_pages(get_order(buffer_size));
+ if (!page)
+ return -ENOMEM;
+
+ /* Offset the buffer to make space for skb headroom */
+ offset = NET_SKB_PAD;
+ len = buffer_size - offset;
+
+ ret = gsi_trans_page_add(trans, page, len, offset);
+ if (ret)
+ put_page(page);
+ else
+ trans->data = page; /* transaction owns page now */
+
+ return ret;
+}
+
+/**
+ * ipa_endpoint_replenish() - Replenish endpoint receive buffers
+ * @endpoint: Endpoint to be replenished
+ *
+ * The IPA hardware can hold a fixed number of receive buffers for an RX
+ * endpoint, based on the number of entries in the underlying channel ring
+ * buffer. If an endpoint's "backlog" is non-zero, it indicates how many
+ * more receive buffers can be supplied to the hardware. Replenishing for
+ * an endpoint can be disabled, in which case buffers are not queued to
+ * the hardware.
+ */
+static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint)
+{
+ struct gsi_trans *trans;
+
+ if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags))
+ return;
+
+ /* Skip it if it's already active */
+ if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags))
+ return;
+
+ while ((trans = ipa_endpoint_trans_alloc(endpoint, 1))) {
+ bool doorbell;
+
+ if (ipa_endpoint_replenish_one(endpoint, trans))
+ goto try_again_later;
+
+
+ /* Ring the doorbell if we've got a full batch */
+ doorbell = !(++endpoint->replenish_count % IPA_REPLENISH_BATCH);
+ gsi_trans_commit(trans, doorbell);
+ }
+
+ clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
+
+ return;
+
+try_again_later:
+ gsi_trans_free(trans);
+ clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
+
+ /* Whenever a receive buffer transaction completes we'll try to
+ * replenish again. It's unlikely, but if we fail to supply even
+ * one buffer, nothing will trigger another replenish attempt.
+ * If the hardware has no receive buffers queued, schedule work to
+ * try replenishing again.
+ */
+ if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
+ schedule_delayed_work(&endpoint->replenish_work,
+ msecs_to_jiffies(1));
+}
+
+static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
+{
+ set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
+
+ /* Start replenishing if hardware currently has no buffers */
+ if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
+ ipa_endpoint_replenish(endpoint);
+}
+
+static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
+{
+ clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
+}
+
+static void ipa_endpoint_replenish_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct ipa_endpoint *endpoint;
+
+ endpoint = container_of(dwork, struct ipa_endpoint, replenish_work);
+
+ ipa_endpoint_replenish(endpoint);
+}
+
+static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
+ void *data, u32 len, u32 extra)
+{
+ struct sk_buff *skb;
+
+ if (!endpoint->netdev)
+ return;
+
+ skb = __dev_alloc_skb(len, GFP_ATOMIC);
+ if (skb) {
+ /* Copy the data into the socket buffer and receive it */
+ skb_put(skb, len);
+ memcpy(skb->data, data, len);
+ skb->truesize += extra;
+ }
+
+ ipa_modem_skb_rx(endpoint->netdev, skb);
+}
+
+static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
+ struct page *page, u32 len)
+{
+ u32 buffer_size = endpoint->config.rx.buffer_size;
+ struct sk_buff *skb;
+
+ /* Nothing to do if there's no netdev */
+ if (!endpoint->netdev)
+ return false;
+
+ WARN_ON(len > SKB_WITH_OVERHEAD(buffer_size - NET_SKB_PAD));
+
+ skb = build_skb(page_address(page), buffer_size);
+ if (skb) {
+ /* Reserve the headroom and account for the data */
+ skb_reserve(skb, NET_SKB_PAD);
+ skb_put(skb, len);
+ }
+
+ /* Receive the buffer (or record drop if unable to build it) */
+ ipa_modem_skb_rx(endpoint->netdev, skb);
+
+ return skb != NULL;
+}
+
+ /* The format of an IPA packet status structure is the same for several
+ * status types (opcodes). Other types aren't currently supported.
+ */
+static bool ipa_status_format_packet(enum ipa_status_opcode opcode)
+{
+ switch (opcode) {
+ case IPA_STATUS_OPCODE_PACKET:
+ case IPA_STATUS_OPCODE_DROPPED_PACKET:
+ case IPA_STATUS_OPCODE_SUSPENDED_PACKET:
+ case IPA_STATUS_OPCODE_PACKET_2ND_PASS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool
+ipa_endpoint_status_skip(struct ipa_endpoint *endpoint, const void *data)
+{
+ struct ipa *ipa = endpoint->ipa;
+ enum ipa_status_opcode opcode;
+ u32 endpoint_id;
+
+ opcode = ipa_status_extract(ipa, data, STATUS_OPCODE);
+ if (!ipa_status_format_packet(opcode))
+ return true;
+
+ endpoint_id = ipa_status_extract(ipa, data, STATUS_DST_ENDPOINT);
+ if (endpoint_id != endpoint->endpoint_id)
+ return true;
+
+ return false; /* Don't skip this packet, process it */
+}
+
+static bool
+ipa_endpoint_status_tag_valid(struct ipa_endpoint *endpoint, const void *data)
+{
+ struct ipa_endpoint *command_endpoint;
+ enum ipa_status_mask status_mask;
+ struct ipa *ipa = endpoint->ipa;
+ u32 endpoint_id;
+
+ status_mask = ipa_status_extract(ipa, data, STATUS_MASK);
+ if (!status_mask)
+ return false; /* No valid tag */
+
+ /* The status contains a valid tag. We know the packet was sent to
+ * this endpoint (already verified by ipa_endpoint_status_skip()).
+ * If the packet came from the AP->command TX endpoint we know
+ * this packet was sent as part of the pipeline clear process.
+ */
+ endpoint_id = ipa_status_extract(ipa, data, STATUS_SRC_ENDPOINT);
+ command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
+ if (endpoint_id == command_endpoint->endpoint_id) {
+ complete(&ipa->completion);
+ } else {
+ dev_err(&ipa->pdev->dev,
+ "unexpected tagged packet from endpoint %u\n",
+ endpoint_id);
+ }
+
+ return true;
+}
+
+/* Return whether the status indicates the packet should be dropped */
+static bool
+ipa_endpoint_status_drop(struct ipa_endpoint *endpoint, const void *data)
+{
+ enum ipa_status_exception exception;
+ struct ipa *ipa = endpoint->ipa;
+ u32 rule;
+
+ /* If the status indicates a tagged transfer, we'll drop the packet */
+ if (ipa_endpoint_status_tag_valid(endpoint, data))
+ return true;
+
+ /* Deaggregation exceptions we drop; all other types we consume */
+ exception = ipa_status_extract(ipa, data, STATUS_EXCEPTION);
+ if (exception)
+ return exception == IPA_STATUS_EXCEPTION_DEAGGR;
+
+ /* Drop the packet if it fails to match a routing rule; otherwise no */
+ rule = ipa_status_extract(ipa, data, STATUS_ROUTER_RULE_INDEX);
+
+ return rule == IPA_STATUS_RULE_MISS;
+}
+
+static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
+ struct page *page, u32 total_len)
+{
+ u32 buffer_size = endpoint->config.rx.buffer_size;
+ void *data = page_address(page) + NET_SKB_PAD;
+ u32 unused = buffer_size - total_len;
+ struct ipa *ipa = endpoint->ipa;
+ u32 resid = total_len;
+
+ while (resid) {
+ u32 length;
+ u32 align;
+ u32 len;
+
+ if (resid < IPA_STATUS_SIZE) {
+ dev_err(&endpoint->ipa->pdev->dev,
+ "short message (%u bytes < %zu byte status)\n",
+ resid, IPA_STATUS_SIZE);
+ break;
+ }
+
+ /* Skip over status packets that lack packet data */
+ length = ipa_status_extract(ipa, data, STATUS_LENGTH);
+ if (!length || ipa_endpoint_status_skip(endpoint, data)) {
+ data += IPA_STATUS_SIZE;
+ resid -= IPA_STATUS_SIZE;
+ continue;
+ }
+
+ /* Compute the amount of buffer space consumed by the packet,
+ * including the status. If the hardware is configured to
+ * pad packet data to an aligned boundary, account for that.
+ * And if checksum offload is enabled a trailer containing
+ * computed checksum information will be appended.
+ */
+ align = endpoint->config.rx.pad_align ? : 1;
+ len = IPA_STATUS_SIZE + ALIGN(length, align);
+ if (endpoint->config.checksum)
+ len += sizeof(struct rmnet_map_dl_csum_trailer);
+
+ if (!ipa_endpoint_status_drop(endpoint, data)) {
+ void *data2;
+ u32 extra;
+
+ /* Client receives only packet data (no status) */
+ data2 = data + IPA_STATUS_SIZE;
+
+ /* Have the true size reflect the extra unused space in
+ * the original receive buffer. Distribute the "cost"
+ * proportionately across all aggregated packets in the
+ * buffer.
+ */
+ extra = DIV_ROUND_CLOSEST(unused * len, total_len);
+ ipa_endpoint_skb_copy(endpoint, data2, length, extra);
+ }
+
+ /* Consume status and the full packet it describes */
+ data += len;
+ resid -= len;
+ }
+}
+
+void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
+ struct gsi_trans *trans)
+{
+ struct page *page;
+
+ if (endpoint->toward_ipa)
+ return;
+
+ if (trans->cancelled)
+ goto done;
+
+ /* Parse or build a socket buffer using the actual received length */
+ page = trans->data;
+ if (endpoint->config.status_enable)
+ ipa_endpoint_status_parse(endpoint, page, trans->len);
+ else if (ipa_endpoint_skb_build(endpoint, page, trans->len))
+ trans->data = NULL; /* Pages have been consumed */
+done:
+ ipa_endpoint_replenish(endpoint);
+}
+
+void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
+ struct gsi_trans *trans)
+{
+ if (endpoint->toward_ipa) {
+ struct ipa *ipa = endpoint->ipa;
+
+ /* Nothing to do for command transactions */
+ if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) {
+ struct sk_buff *skb = trans->data;
+
+ if (skb)
+ dev_kfree_skb_any(skb);
+ }
+ } else {
+ struct page *page = trans->data;
+
+ if (page)
+ put_page(page);
+ }
+}
+
+void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id)
+{
+ const struct reg *reg;
+ u32 val;
+
+ reg = ipa_reg(ipa, ROUTE);
+ /* ROUTE_DIS is 0 */
+ val = reg_encode(reg, ROUTE_DEF_PIPE, endpoint_id);
+ val |= reg_bit(reg, ROUTE_DEF_HDR_TABLE);
+ /* ROUTE_DEF_HDR_OFST is 0 */
+ val |= reg_encode(reg, ROUTE_FRAG_DEF_PIPE, endpoint_id);
+ val |= reg_bit(reg, ROUTE_DEF_RETAIN_HDR);
+
+ iowrite32(val, ipa->reg_virt + reg_offset(reg));
+}
+
+void ipa_endpoint_default_route_clear(struct ipa *ipa)
+{
+ ipa_endpoint_default_route_set(ipa, 0);
+}
+
+/**
+ * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
+ * @endpoint: Endpoint to be reset
+ *
+ * If aggregation is active on an RX endpoint when a reset is performed
+ * on its underlying GSI channel, a special sequence of actions must be
+ * taken to ensure the IPA pipeline is properly cleared.
+ *
+ * Return: 0 if successful, or a negative error code
+ */
+static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
+{
+ struct device *dev = &endpoint->ipa->pdev->dev;
+ struct ipa *ipa = endpoint->ipa;
+ struct gsi *gsi = &ipa->gsi;
+ bool suspended = false;
+ dma_addr_t addr;
+ u32 retries;
+ u32 len = 1;
+ void *virt;
+ int ret;
+
+ virt = kzalloc(len, GFP_KERNEL);
+ if (!virt)
+ return -ENOMEM;
+
+ addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, addr)) {
+ ret = -ENOMEM;
+ goto out_kfree;
+ }
+
+ /* Force close aggregation before issuing the reset */
+ ipa_endpoint_force_close(endpoint);
+
+ /* Reset and reconfigure the channel with the doorbell engine
+ * disabled. Then poll until we know aggregation is no longer
+ * active. We'll re-enable the doorbell (if appropriate) when
+ * we reset again below.
+ */
+ gsi_channel_reset(gsi, endpoint->channel_id, false);
+
+ /* Make sure the channel isn't suspended */
+ suspended = ipa_endpoint_program_suspend(endpoint, false);
+
+ /* Start channel and do a 1 byte read */
+ ret = gsi_channel_start(gsi, endpoint->channel_id);
+ if (ret)
+ goto out_suspend_again;
+
+ ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr);
+ if (ret)
+ goto err_endpoint_stop;
+
+ /* Wait for aggregation to be closed on the channel */
+ retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX;
+ do {
+ if (!ipa_endpoint_aggr_active(endpoint))
+ break;
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
+ } while (retries--);
+
+ /* Check one last time */
+ if (ipa_endpoint_aggr_active(endpoint))
+ dev_err(dev, "endpoint %u still active during reset\n",
+ endpoint->endpoint_id);
+
+ gsi_trans_read_byte_done(gsi, endpoint->channel_id);
+
+ ret = gsi_channel_stop(gsi, endpoint->channel_id);
+ if (ret)
+ goto out_suspend_again;
+
+ /* Finally, reset and reconfigure the channel again (re-enabling
+ * the doorbell engine if appropriate). Sleep for 1 millisecond to
+ * complete the channel reset sequence. Finish by suspending the
+ * channel again (if necessary).
+ */
+ gsi_channel_reset(gsi, endpoint->channel_id, true);
+
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
+
+ goto out_suspend_again;
+
+err_endpoint_stop:
+ (void)gsi_channel_stop(gsi, endpoint->channel_id);
+out_suspend_again:
+ if (suspended)
+ (void)ipa_endpoint_program_suspend(endpoint, true);
+ dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE);
+out_kfree:
+ kfree(virt);
+
+ return ret;
+}
+
+static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
+{
+ u32 channel_id = endpoint->channel_id;
+ struct ipa *ipa = endpoint->ipa;
+ bool special;
+ int ret = 0;
+
+ /* On IPA v3.5.1, if an RX endpoint is reset while aggregation
+ * is active, we need to handle things specially to recover.
+ * All other cases just need to reset the underlying GSI channel.
+ */
+ special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa &&
+ endpoint->config.aggregation;
+ if (special && ipa_endpoint_aggr_active(endpoint))
+ ret = ipa_endpoint_reset_rx_aggr(endpoint);
+ else
+ gsi_channel_reset(&ipa->gsi, channel_id, true);
+
+ if (ret)
+ dev_err(&ipa->pdev->dev,
+ "error %d resetting channel %u for endpoint %u\n",
+ ret, endpoint->channel_id, endpoint->endpoint_id);
+}
+
+static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
+{
+ if (endpoint->toward_ipa) {
+ /* Newer versions of IPA use GSI channel flow control
+ * instead of endpoint DELAY mode to prevent sending data.
+ * Flow control is disabled for newly-allocated channels,
+ * and we can assume flow control is not (ever) enabled
+ * for AP TX channels.
+ */
+ if (endpoint->ipa->version < IPA_VERSION_4_2)
+ ipa_endpoint_program_delay(endpoint, false);
+ } else {
+ /* Ensure suspend mode is off on all AP RX endpoints */
+ (void)ipa_endpoint_program_suspend(endpoint, false);
+ }
+ ipa_endpoint_init_cfg(endpoint);
+ ipa_endpoint_init_nat(endpoint);
+ ipa_endpoint_init_hdr(endpoint);
+ ipa_endpoint_init_hdr_ext(endpoint);
+ ipa_endpoint_init_hdr_metadata_mask(endpoint);
+ ipa_endpoint_init_mode(endpoint);
+ ipa_endpoint_init_aggr(endpoint);
+ if (!endpoint->toward_ipa) {
+ if (endpoint->config.rx.holb_drop)
+ ipa_endpoint_init_hol_block_enable(endpoint, 0);
+ else
+ ipa_endpoint_init_hol_block_disable(endpoint);
+ }
+ ipa_endpoint_init_deaggr(endpoint);
+ ipa_endpoint_init_rsrc_grp(endpoint);
+ ipa_endpoint_init_seq(endpoint);
+ ipa_endpoint_status(endpoint);
+}
+
+int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint)
+{
+ u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ struct gsi *gsi = &ipa->gsi;
+ int ret;
+
+ ret = gsi_channel_start(gsi, endpoint->channel_id);
+ if (ret) {
+ dev_err(&ipa->pdev->dev,
+ "error %d starting %cX channel %u for endpoint %u\n",
+ ret, endpoint->toward_ipa ? 'T' : 'R',
+ endpoint->channel_id, endpoint_id);
+ return ret;
+ }
+
+ if (!endpoint->toward_ipa) {
+ ipa_interrupt_suspend_enable(ipa->interrupt, endpoint_id);
+ ipa_endpoint_replenish_enable(endpoint);
+ }
+
+ __set_bit(endpoint_id, ipa->enabled);
+
+ return 0;
+}
+
+void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
+{
+ u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ struct gsi *gsi = &ipa->gsi;
+ int ret;
+
+ if (!test_bit(endpoint_id, ipa->enabled))
+ return;
+
+ __clear_bit(endpoint_id, endpoint->ipa->enabled);
+
+ if (!endpoint->toward_ipa) {
+ ipa_endpoint_replenish_disable(endpoint);
+ ipa_interrupt_suspend_disable(ipa->interrupt, endpoint_id);
+ }
+
+ /* Note that if stop fails, the channel's state is not well-defined */
+ ret = gsi_channel_stop(gsi, endpoint->channel_id);
+ if (ret)
+ dev_err(&ipa->pdev->dev,
+ "error %d attempting to stop endpoint %u\n", ret,
+ endpoint_id);
+}
+
+void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
+{
+ struct device *dev = &endpoint->ipa->pdev->dev;
+ struct gsi *gsi = &endpoint->ipa->gsi;
+ int ret;
+
+ if (!test_bit(endpoint->endpoint_id, endpoint->ipa->enabled))
+ return;
+
+ if (!endpoint->toward_ipa) {
+ ipa_endpoint_replenish_disable(endpoint);
+ (void)ipa_endpoint_program_suspend(endpoint, true);
+ }
+
+ ret = gsi_channel_suspend(gsi, endpoint->channel_id);
+ if (ret)
+ dev_err(dev, "error %d suspending channel %u\n", ret,
+ endpoint->channel_id);
+}
+
+void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
+{
+ struct device *dev = &endpoint->ipa->pdev->dev;
+ struct gsi *gsi = &endpoint->ipa->gsi;
+ int ret;
+
+ if (!test_bit(endpoint->endpoint_id, endpoint->ipa->enabled))
+ return;
+
+ if (!endpoint->toward_ipa)
+ (void)ipa_endpoint_program_suspend(endpoint, false);
+
+ ret = gsi_channel_resume(gsi, endpoint->channel_id);
+ if (ret)
+ dev_err(dev, "error %d resuming channel %u\n", ret,
+ endpoint->channel_id);
+ else if (!endpoint->toward_ipa)
+ ipa_endpoint_replenish_enable(endpoint);
+}
+
+void ipa_endpoint_suspend(struct ipa *ipa)
+{
+ if (!ipa->setup_complete)
+ return;
+
+ if (ipa->modem_netdev)
+ ipa_modem_suspend(ipa->modem_netdev);
+
+ ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
+ ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
+}
+
+void ipa_endpoint_resume(struct ipa *ipa)
+{
+ if (!ipa->setup_complete)
+ return;
+
+ ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
+ ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
+
+ if (ipa->modem_netdev)
+ ipa_modem_resume(ipa->modem_netdev);
+}
+
+static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
+{
+ struct gsi *gsi = &endpoint->ipa->gsi;
+ u32 channel_id = endpoint->channel_id;
+
+ /* Only AP endpoints get set up */
+ if (endpoint->ee_id != GSI_EE_AP)
+ return;
+
+ endpoint->skb_frag_max = gsi->channel[channel_id].trans_tre_max - 1;
+ if (!endpoint->toward_ipa) {
+ /* RX transactions require a single TRE, so the maximum
+ * backlog is the same as the maximum outstanding TREs.
+ */
+ clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
+ clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
+ INIT_DELAYED_WORK(&endpoint->replenish_work,
+ ipa_endpoint_replenish_work);
+ }
+
+ ipa_endpoint_program(endpoint);
+
+ __set_bit(endpoint->endpoint_id, endpoint->ipa->set_up);
+}
+
+static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint)
+{
+ __clear_bit(endpoint->endpoint_id, endpoint->ipa->set_up);
+
+ if (!endpoint->toward_ipa)
+ cancel_delayed_work_sync(&endpoint->replenish_work);
+
+ ipa_endpoint_reset(endpoint);
+}
+
+void ipa_endpoint_setup(struct ipa *ipa)
+{
+ u32 endpoint_id;
+
+ for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count)
+ ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]);
+}
+
+void ipa_endpoint_teardown(struct ipa *ipa)
+{
+ u32 endpoint_id;
+
+ for_each_set_bit(endpoint_id, ipa->set_up, ipa->endpoint_count)
+ ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]);
+}
+
+void ipa_endpoint_deconfig(struct ipa *ipa)
+{
+ ipa->available_count = 0;
+ bitmap_free(ipa->available);
+ ipa->available = NULL;
+}
+
+int ipa_endpoint_config(struct ipa *ipa)
+{
+ struct device *dev = &ipa->pdev->dev;
+ const struct reg *reg;
+ u32 endpoint_id;
+ u32 hw_limit;
+ u32 tx_count;
+ u32 rx_count;
+ u32 rx_base;
+ u32 limit;
+ u32 val;
+
+ /* Prior to IPA v3.5, the FLAVOR_0 register was not supported.
+ * Furthermore, the endpoints were not grouped such that TX
+ * endpoint numbers started with 0 and RX endpoints had numbers
+ * higher than all TX endpoints, so we can't do the simple
+ * direction check used for newer hardware below.
+ *
+ * For hardware that doesn't support the FLAVOR_0 register,
+ * just set the available mask to support any endpoint, and
+ * assume the configuration is valid.
+ */
+ if (ipa->version < IPA_VERSION_3_5) {
+ ipa->available = bitmap_zalloc(IPA_ENDPOINT_MAX, GFP_KERNEL);
+ if (!ipa->available)
+ return -ENOMEM;
+ ipa->available_count = IPA_ENDPOINT_MAX;
+
+ bitmap_set(ipa->available, 0, IPA_ENDPOINT_MAX);
+
+ return 0;
+ }
+
+ /* Find out about the endpoints supplied by the hardware, and ensure
+ * the highest one doesn't exceed the number supported by software.
+ */
+ reg = ipa_reg(ipa, FLAVOR_0);
+ val = ioread32(ipa->reg_virt + reg_offset(reg));
+
+ /* Our RX is an IPA producer; our TX is an IPA consumer. */
+ tx_count = reg_decode(reg, MAX_CONS_PIPES, val);
+ rx_count = reg_decode(reg, MAX_PROD_PIPES, val);
+ rx_base = reg_decode(reg, PROD_LOWEST, val);
+
+ limit = rx_base + rx_count;
+ if (limit > IPA_ENDPOINT_MAX) {
+ dev_err(dev, "too many endpoints, %u > %u\n",
+ limit, IPA_ENDPOINT_MAX);
+ return -EINVAL;
+ }
+
+ /* Until IPA v5.0, the max endpoint ID was 32 */
+ hw_limit = ipa->version < IPA_VERSION_5_0 ? 32 : U8_MAX + 1;
+ if (limit > hw_limit) {
+ dev_err(dev, "unexpected endpoint count, %u > %u\n",
+ limit, hw_limit);
+ return -EINVAL;
+ }
+
+ /* Allocate and initialize the available endpoint bitmap */
+ ipa->available = bitmap_zalloc(limit, GFP_KERNEL);
+ if (!ipa->available)
+ return -ENOMEM;
+ ipa->available_count = limit;
+
+ /* Mark all supported RX and TX endpoints as available */
+ bitmap_set(ipa->available, 0, tx_count);
+ bitmap_set(ipa->available, rx_base, rx_count);
+
+ for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) {
+ struct ipa_endpoint *endpoint;
+
+ if (endpoint_id >= limit) {
+ dev_err(dev, "invalid endpoint id, %u > %u\n",
+ endpoint_id, limit - 1);
+ goto err_free_bitmap;
+ }
+
+ if (!test_bit(endpoint_id, ipa->available)) {
+ dev_err(dev, "unavailable endpoint id %u\n",
+ endpoint_id);
+ goto err_free_bitmap;
+ }
+
+ /* Make sure it's pointing in the right direction */
+ endpoint = &ipa->endpoint[endpoint_id];
+ if (endpoint->toward_ipa) {
+ if (endpoint_id < tx_count)
+ continue;
+ } else if (endpoint_id >= rx_base) {
+ continue;
+ }
+
+ dev_err(dev, "endpoint id %u wrong direction\n", endpoint_id);
+ goto err_free_bitmap;
+ }
+
+ return 0;
+
+err_free_bitmap:
+ ipa_endpoint_deconfig(ipa);
+
+ return -EINVAL;
+}
+
+static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
+ const struct ipa_gsi_endpoint_data *data)
+{
+ struct ipa_endpoint *endpoint;
+
+ endpoint = &ipa->endpoint[data->endpoint_id];
+
+ if (data->ee_id == GSI_EE_AP)
+ ipa->channel_map[data->channel_id] = endpoint;
+ ipa->name_map[name] = endpoint;
+
+ endpoint->ipa = ipa;
+ endpoint->ee_id = data->ee_id;
+ endpoint->channel_id = data->channel_id;
+ endpoint->endpoint_id = data->endpoint_id;
+ endpoint->toward_ipa = data->toward_ipa;
+ endpoint->config = data->endpoint.config;
+
+ __set_bit(endpoint->endpoint_id, ipa->defined);
+}
+
+static void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint)
+{
+ __clear_bit(endpoint->endpoint_id, endpoint->ipa->defined);
+
+ memset(endpoint, 0, sizeof(*endpoint));
+}
+
+void ipa_endpoint_exit(struct ipa *ipa)
+{
+ u32 endpoint_id;
+
+ ipa->filtered = 0;
+
+ for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count)
+ ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]);
+
+ bitmap_free(ipa->enabled);
+ ipa->enabled = NULL;
+ bitmap_free(ipa->set_up);
+ ipa->set_up = NULL;
+ bitmap_free(ipa->defined);
+ ipa->defined = NULL;
+
+ memset(ipa->name_map, 0, sizeof(ipa->name_map));
+ memset(ipa->channel_map, 0, sizeof(ipa->channel_map));
+}
+
+/* Returns a bitmask of endpoints that support filtering, or 0 on error */
+int ipa_endpoint_init(struct ipa *ipa, u32 count,
+ const struct ipa_gsi_endpoint_data *data)
+{
+ enum ipa_endpoint_name name;
+ u32 filtered;
+
+ BUILD_BUG_ON(!IPA_REPLENISH_BATCH);
+
+ /* Number of endpoints is one more than the maximum ID */
+ ipa->endpoint_count = ipa_endpoint_max(ipa, count, data) + 1;
+ if (!ipa->endpoint_count)
+ return -EINVAL;
+
+ /* Initialize endpoint state bitmaps */
+ ipa->defined = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL);
+ if (!ipa->defined)
+ return -ENOMEM;
+
+ ipa->set_up = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL);
+ if (!ipa->set_up)
+ goto err_free_defined;
+
+ ipa->enabled = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL);
+ if (!ipa->enabled)
+ goto err_free_set_up;
+
+ filtered = 0;
+ for (name = 0; name < count; name++, data++) {
+ if (ipa_gsi_endpoint_data_empty(data))
+ continue; /* Skip over empty slots */
+
+ ipa_endpoint_init_one(ipa, name, data);
+
+ if (data->endpoint.filter_support)
+ filtered |= BIT(data->endpoint_id);
+ if (data->ee_id == GSI_EE_MODEM && data->toward_ipa)
+ ipa->modem_tx_count++;
+ }
+
+ /* Make sure the set of filtered endpoints is valid */
+ if (!ipa_filtered_valid(ipa, filtered)) {
+ ipa_endpoint_exit(ipa);
+
+ return -EINVAL;
+ }
+
+ ipa->filtered = filtered;
+
+ return 0;
+
+err_free_set_up:
+ bitmap_free(ipa->set_up);
+ ipa->set_up = NULL;
+err_free_defined:
+ bitmap_free(ipa->defined);
+ ipa->defined = NULL;
+
+ return -ENOMEM;
+}
diff --git a/drivers/net/ipa/ipa_endpoint.h b/drivers/net/ipa/ipa_endpoint.h
new file mode 100644
index 0000000000..3ad2e80204
--- /dev/null
+++ b/drivers/net/ipa/ipa_endpoint.h
@@ -0,0 +1,207 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2023 Linaro Ltd.
+ */
+#ifndef _IPA_ENDPOINT_H_
+#define _IPA_ENDPOINT_H_
+
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <linux/if_ether.h>
+
+#include "gsi.h"
+#include "ipa_reg.h"
+
+struct net_device;
+struct sk_buff;
+
+struct ipa;
+struct ipa_gsi_endpoint_data;
+
+/* Non-zero granularity of counter used to implement aggregation timeout */
+#define IPA_AGGR_GRANULARITY 500 /* microseconds */
+
+#define IPA_MTU ETH_DATA_LEN
+
+enum ipa_endpoint_name {
+ IPA_ENDPOINT_AP_COMMAND_TX,
+ IPA_ENDPOINT_AP_LAN_RX,
+ IPA_ENDPOINT_AP_MODEM_TX,
+ IPA_ENDPOINT_AP_MODEM_RX,
+ IPA_ENDPOINT_MODEM_COMMAND_TX,
+ IPA_ENDPOINT_MODEM_LAN_TX,
+ IPA_ENDPOINT_MODEM_LAN_RX,
+ IPA_ENDPOINT_MODEM_AP_TX,
+ IPA_ENDPOINT_MODEM_AP_RX,
+ IPA_ENDPOINT_MODEM_DL_NLO_TX,
+ IPA_ENDPOINT_COUNT, /* Number of names (not an index) */
+};
+
+#define IPA_ENDPOINT_MAX 36 /* Max supported by driver */
+
+/**
+ * struct ipa_endpoint_tx - Endpoint configuration for TX endpoints
+ * @seq_type: primary packet processing sequencer type
+ * @seq_rep_type: sequencer type for replication processing
+ * @status_endpoint: endpoint to which status elements are sent
+ *
+ * The @status_endpoint is only valid if the endpoint's @status_enable
+ * flag is set.
+ */
+struct ipa_endpoint_tx {
+ enum ipa_seq_type seq_type;
+ enum ipa_seq_rep_type seq_rep_type;
+ enum ipa_endpoint_name status_endpoint;
+};
+
+/**
+ * struct ipa_endpoint_rx - Endpoint configuration for RX endpoints
+ * @buffer_size: requested receive buffer size (bytes)
+ * @pad_align: power-of-2 boundary to which packet payload is aligned
+ * @aggr_time_limit: time before aggregation closes (microseconds)
+ * @aggr_hard_limit: whether aggregation closes before or after boundary
+ * @aggr_close_eof: whether aggregation closes on end-of-frame
+ * @holb_drop: whether to drop packets to avoid head-of-line blocking
+ *
+ * The actual size of the receive buffer is rounded up if necessary
+ * to be a power-of-2 number of pages.
+ *
+ * With each packet it transfers, the IPA hardware can perform certain
+ * transformations of its packet data. One of these is adding pad bytes
+ * to the end of the packet data so the result ends on a power-of-2 boundary.
+ *
+ * It is also able to aggregate multiple packets into a single receive buffer.
+ * Aggregation is "open" while a buffer is being filled, and "closes" when
+ * certain criteria are met.
+ *
+ * A time limit can be specified to close aggregation. Aggregation will be
+ * closed if this period passes after data is first written into a receive
+ * buffer. If not specified, no time limit is imposed.
+ *
+ * Insufficient space available in the receive buffer can close aggregation.
+ * The aggregation byte limit defines the point (in units of 1024 bytes) in
+ * the buffer where aggregation closes. With a "soft" aggregation limit,
+ * aggregation closes when a packet written to the buffer *crosses* that
+ * aggregation limit. With a "hard" aggregation limit, aggregation will
+ * close *before* writing a packet that would cross that boundary.
+ */
+struct ipa_endpoint_rx {
+ u32 buffer_size;
+ u32 pad_align;
+ u32 aggr_time_limit;
+ bool aggr_hard_limit;
+ bool aggr_close_eof;
+ bool holb_drop;
+};
+
+/**
+ * struct ipa_endpoint_config - IPA endpoint hardware configuration
+ * @resource_group: resource group to assign endpoint to
+ * @checksum: whether checksum offload is enabled
+ * @qmap: whether endpoint uses QMAP protocol
+ * @aggregation: whether endpoint supports aggregation
+ * @status_enable: whether endpoint uses status elements
+ * @dma_mode: whether endpoint operates in DMA mode
+ * @dma_endpoint: peer endpoint, if operating in DMA mode
+ * @tx: TX-specific endpoint information (see above)
+ * @rx: RX-specific endpoint information (see above)
+ */
+struct ipa_endpoint_config {
+ u32 resource_group;
+ bool checksum;
+ bool qmap;
+ bool aggregation;
+ bool status_enable;
+ bool dma_mode;
+ enum ipa_endpoint_name dma_endpoint;
+ union {
+ struct ipa_endpoint_tx tx;
+ struct ipa_endpoint_rx rx;
+ };
+};
+
+/**
+ * enum ipa_replenish_flag: RX buffer replenish flags
+ *
+ * @IPA_REPLENISH_ENABLED: Whether receive buffer replenishing is enabled
+ * @IPA_REPLENISH_ACTIVE: Whether replenishing is underway
+ * @IPA_REPLENISH_COUNT: Number of defined replenish flags
+ */
+enum ipa_replenish_flag {
+ IPA_REPLENISH_ENABLED,
+ IPA_REPLENISH_ACTIVE,
+ IPA_REPLENISH_COUNT, /* Number of flags (must be last) */
+};
+
+/**
+ * struct ipa_endpoint - IPA endpoint information
+ * @ipa: IPA pointer
+ * @ee_id: Execution environmnent endpoint is associated with
+ * @channel_id: GSI channel used by the endpoint
+ * @endpoint_id: IPA endpoint number
+ * @toward_ipa: Endpoint direction (true = TX, false = RX)
+ * @config: Default endpoint configuration
+ * @skb_frag_max: Maximum allowed number of TX SKB fragments
+ * @evt_ring_id: GSI event ring used by the endpoint
+ * @netdev: Network device pointer, if endpoint uses one
+ * @replenish_flags: Replenishing state flags
+ * @replenish_count: Total number of replenish transactions committed
+ * @replenish_work: Work item used for repeated replenish failures
+ */
+struct ipa_endpoint {
+ struct ipa *ipa;
+ enum gsi_ee_id ee_id;
+ u32 channel_id;
+ u32 endpoint_id;
+ bool toward_ipa;
+ struct ipa_endpoint_config config;
+
+ u32 skb_frag_max; /* Used for netdev TX only */
+ u32 evt_ring_id;
+
+ /* Net device this endpoint is associated with, if any */
+ struct net_device *netdev;
+
+ /* Receive buffer replenishing for RX endpoints */
+ DECLARE_BITMAP(replenish_flags, IPA_REPLENISH_COUNT);
+ u64 replenish_count;
+ struct delayed_work replenish_work; /* global wq */
+};
+
+void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa);
+
+void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable);
+
+int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa);
+
+int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb);
+
+int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint);
+void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint);
+
+void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint);
+void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint);
+
+void ipa_endpoint_suspend(struct ipa *ipa);
+void ipa_endpoint_resume(struct ipa *ipa);
+
+void ipa_endpoint_setup(struct ipa *ipa);
+void ipa_endpoint_teardown(struct ipa *ipa);
+
+int ipa_endpoint_config(struct ipa *ipa);
+void ipa_endpoint_deconfig(struct ipa *ipa);
+
+void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id);
+void ipa_endpoint_default_route_clear(struct ipa *ipa);
+
+int ipa_endpoint_init(struct ipa *ipa, u32 count,
+ const struct ipa_gsi_endpoint_data *data);
+void ipa_endpoint_exit(struct ipa *ipa);
+
+void ipa_endpoint_trans_complete(struct ipa_endpoint *ipa,
+ struct gsi_trans *trans);
+void ipa_endpoint_trans_release(struct ipa_endpoint *ipa,
+ struct gsi_trans *trans);
+
+#endif /* _IPA_ENDPOINT_H_ */
diff --git a/drivers/net/ipa/ipa_gsi.c b/drivers/net/ipa/ipa_gsi.c
new file mode 100644
index 0000000000..d323adb033
--- /dev/null
+++ b/drivers/net/ipa/ipa_gsi.c
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+
+#include <linux/types.h>
+
+#include "ipa_gsi.h"
+#include "gsi_trans.h"
+#include "ipa.h"
+#include "ipa_endpoint.h"
+#include "ipa_data.h"
+
+void ipa_gsi_trans_complete(struct gsi_trans *trans)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+
+ ipa_endpoint_trans_complete(ipa->channel_map[trans->channel_id], trans);
+}
+
+void ipa_gsi_trans_release(struct gsi_trans *trans)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+
+ ipa_endpoint_trans_release(ipa->channel_map[trans->channel_id], trans);
+}
+
+void ipa_gsi_channel_tx_queued(struct gsi *gsi, u32 channel_id, u32 count,
+ u32 byte_count)
+{
+ struct ipa *ipa = container_of(gsi, struct ipa, gsi);
+ struct ipa_endpoint *endpoint;
+
+ endpoint = ipa->channel_map[channel_id];
+ if (endpoint->netdev)
+ netdev_sent_queue(endpoint->netdev, byte_count);
+}
+
+void ipa_gsi_channel_tx_completed(struct gsi *gsi, u32 channel_id, u32 count,
+ u32 byte_count)
+{
+ struct ipa *ipa = container_of(gsi, struct ipa, gsi);
+ struct ipa_endpoint *endpoint;
+
+ endpoint = ipa->channel_map[channel_id];
+ if (endpoint->netdev)
+ netdev_completed_queue(endpoint->netdev, count, byte_count);
+}
+
+/* Indicate whether an endpoint config data entry is "empty" */
+bool ipa_gsi_endpoint_data_empty(const struct ipa_gsi_endpoint_data *data)
+{
+ return data->ee_id == GSI_EE_AP && !data->channel.tlv_count;
+}
diff --git a/drivers/net/ipa/ipa_gsi.h b/drivers/net/ipa/ipa_gsi.h
new file mode 100644
index 0000000000..c02cb6f3a2
--- /dev/null
+++ b/drivers/net/ipa/ipa_gsi.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+#ifndef _IPA_GSI_TRANS_H_
+#define _IPA_GSI_TRANS_H_
+
+#include <linux/types.h>
+
+struct gsi;
+struct gsi_trans;
+struct ipa_gsi_endpoint_data;
+
+/**
+ * ipa_gsi_trans_complete() - GSI transaction completion callback
+ * @trans: Transaction that has completed
+ *
+ * This called from the GSI layer to notify the IPA layer that a
+ * transaction has completed.
+ */
+void ipa_gsi_trans_complete(struct gsi_trans *trans);
+
+/**
+ * ipa_gsi_trans_release() - GSI transaction release callback
+ * @trans: Transaction whose resources should be freed
+ *
+ * This called from the GSI layer to notify the IPA layer that a
+ * transaction is about to be freed, so any resources associated
+ * with it should be released.
+ */
+void ipa_gsi_trans_release(struct gsi_trans *trans);
+
+/**
+ * ipa_gsi_channel_tx_queued() - GSI queued to hardware notification
+ * @gsi: GSI pointer
+ * @channel_id: Channel number
+ * @count: Number of transactions queued
+ * @byte_count: Number of bytes to transfer represented by transactions
+ *
+ * This called from the GSI layer to notify the IPA layer that some
+ * number of transactions have been queued to hardware for execution.
+ */
+void ipa_gsi_channel_tx_queued(struct gsi *gsi, u32 channel_id, u32 count,
+ u32 byte_count);
+
+/**
+ * ipa_gsi_channel_tx_completed() - GSI transaction completion callback
+ * @gsi: GSI pointer
+ * @channel_id: Channel number
+ * @count: Number of transactions completed since last report
+ * @byte_count: Number of bytes transferred represented by transactions
+ *
+ * This called from the GSI layer to notify the IPA layer that the hardware
+ * has reported the completion of some number of transactions.
+ */
+void ipa_gsi_channel_tx_completed(struct gsi *gsi, u32 channel_id, u32 count,
+ u32 byte_count);
+
+/* ipa_gsi_endpoint_data_empty() - Empty endpoint config data test
+ * @data: endpoint configuration data
+ *
+ * Determines whether an endpoint configuration data entry is empty,
+ * meaning it contains no valid configuration information and should
+ * be ignored.
+ *
+ * Return: true if empty; false otherwise
+ */
+bool ipa_gsi_endpoint_data_empty(const struct ipa_gsi_endpoint_data *data);
+
+#endif /* _IPA_GSI_TRANS_H_ */
diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c
new file mode 100644
index 0000000000..4bc05948f7
--- /dev/null
+++ b/drivers/net/ipa/ipa_interrupt.c
@@ -0,0 +1,296 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2022 Linaro Ltd.
+ */
+
+/* DOC: IPA Interrupts
+ *
+ * The IPA has an interrupt line distinct from the interrupt used by the GSI
+ * code. Whereas GSI interrupts are generally related to channel events (like
+ * transfer completions), IPA interrupts are related to other events related
+ * to the IPA. Some of the IPA interrupts come from a microcontroller
+ * embedded in the IPA. Each IPA interrupt type can be both masked and
+ * acknowledged independent of the others.
+ *
+ * Two of the IPA interrupts are initiated by the microcontroller. A third
+ * can be generated to signal the need for a wakeup/resume when an IPA
+ * endpoint has been suspended. There are other IPA events, but at this
+ * time only these three are supported.
+ */
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_wakeirq.h>
+
+#include "ipa.h"
+#include "ipa_reg.h"
+#include "ipa_endpoint.h"
+#include "ipa_power.h"
+#include "ipa_uc.h"
+#include "ipa_interrupt.h"
+
+/**
+ * struct ipa_interrupt - IPA interrupt information
+ * @ipa: IPA pointer
+ * @irq: Linux IRQ number used for IPA interrupts
+ * @enabled: Mask indicating which interrupts are enabled
+ */
+struct ipa_interrupt {
+ struct ipa *ipa;
+ u32 irq;
+ u32 enabled;
+};
+
+/* Process a particular interrupt type that has been received */
+static void ipa_interrupt_process(struct ipa_interrupt *interrupt, u32 irq_id)
+{
+ struct ipa *ipa = interrupt->ipa;
+ const struct reg *reg;
+ u32 mask = BIT(irq_id);
+ u32 offset;
+
+ reg = ipa_reg(ipa, IPA_IRQ_CLR);
+ offset = reg_offset(reg);
+
+ switch (irq_id) {
+ case IPA_IRQ_UC_0:
+ case IPA_IRQ_UC_1:
+ /* For microcontroller interrupts, clear the interrupt right
+ * away, "to avoid clearing unhandled interrupts."
+ */
+ iowrite32(mask, ipa->reg_virt + offset);
+ ipa_uc_interrupt_handler(ipa, irq_id);
+ break;
+
+ case IPA_IRQ_TX_SUSPEND:
+ /* Clearing the SUSPEND_TX interrupt also clears the
+ * register that tells us which suspended endpoint(s)
+ * caused the interrupt, so defer clearing until after
+ * the handler has been called.
+ */
+ ipa_power_suspend_handler(ipa, irq_id);
+ fallthrough;
+
+ default: /* Silently ignore (and clear) any other condition */
+ iowrite32(mask, ipa->reg_virt + offset);
+ break;
+ }
+}
+
+/* IPA IRQ handler is threaded */
+static irqreturn_t ipa_isr_thread(int irq, void *dev_id)
+{
+ struct ipa_interrupt *interrupt = dev_id;
+ struct ipa *ipa = interrupt->ipa;
+ u32 enabled = interrupt->enabled;
+ const struct reg *reg;
+ struct device *dev;
+ u32 pending;
+ u32 offset;
+ u32 mask;
+ int ret;
+
+ dev = &ipa->pdev->dev;
+ ret = pm_runtime_get_sync(dev);
+ if (WARN_ON(ret < 0))
+ goto out_power_put;
+
+ /* The status register indicates which conditions are present,
+ * including conditions whose interrupt is not enabled. Handle
+ * only the enabled ones.
+ */
+ reg = ipa_reg(ipa, IPA_IRQ_STTS);
+ offset = reg_offset(reg);
+ pending = ioread32(ipa->reg_virt + offset);
+ while ((mask = pending & enabled)) {
+ do {
+ u32 irq_id = __ffs(mask);
+
+ mask ^= BIT(irq_id);
+
+ ipa_interrupt_process(interrupt, irq_id);
+ } while (mask);
+ pending = ioread32(ipa->reg_virt + offset);
+ }
+
+ /* If any disabled interrupts are pending, clear them */
+ if (pending) {
+ dev_dbg(dev, "clearing disabled IPA interrupts 0x%08x\n",
+ pending);
+ reg = ipa_reg(ipa, IPA_IRQ_CLR);
+ iowrite32(pending, ipa->reg_virt + reg_offset(reg));
+ }
+out_power_put:
+ pm_runtime_mark_last_busy(dev);
+ (void)pm_runtime_put_autosuspend(dev);
+
+ return IRQ_HANDLED;
+}
+
+static void ipa_interrupt_enabled_update(struct ipa *ipa)
+{
+ const struct reg *reg = ipa_reg(ipa, IPA_IRQ_EN);
+
+ iowrite32(ipa->interrupt->enabled, ipa->reg_virt + reg_offset(reg));
+}
+
+/* Enable an IPA interrupt type */
+void ipa_interrupt_enable(struct ipa *ipa, enum ipa_irq_id ipa_irq)
+{
+ /* Update the IPA interrupt mask to enable it */
+ ipa->interrupt->enabled |= BIT(ipa_irq);
+ ipa_interrupt_enabled_update(ipa);
+}
+
+/* Disable an IPA interrupt type */
+void ipa_interrupt_disable(struct ipa *ipa, enum ipa_irq_id ipa_irq)
+{
+ /* Update the IPA interrupt mask to disable it */
+ ipa->interrupt->enabled &= ~BIT(ipa_irq);
+ ipa_interrupt_enabled_update(ipa);
+}
+
+void ipa_interrupt_irq_disable(struct ipa *ipa)
+{
+ disable_irq(ipa->interrupt->irq);
+}
+
+void ipa_interrupt_irq_enable(struct ipa *ipa)
+{
+ enable_irq(ipa->interrupt->irq);
+}
+
+/* Common function used to enable/disable TX_SUSPEND for an endpoint */
+static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt,
+ u32 endpoint_id, bool enable)
+{
+ struct ipa *ipa = interrupt->ipa;
+ u32 mask = BIT(endpoint_id % 32);
+ u32 unit = endpoint_id / 32;
+ const struct reg *reg;
+ u32 offset;
+ u32 val;
+
+ WARN_ON(!test_bit(endpoint_id, ipa->available));
+
+ /* IPA version 3.0 does not support TX_SUSPEND interrupt control */
+ if (ipa->version == IPA_VERSION_3_0)
+ return;
+
+ reg = ipa_reg(ipa, IRQ_SUSPEND_EN);
+ offset = reg_n_offset(reg, unit);
+ val = ioread32(ipa->reg_virt + offset);
+
+ if (enable)
+ val |= mask;
+ else
+ val &= ~mask;
+
+ iowrite32(val, ipa->reg_virt + offset);
+}
+
+/* Enable TX_SUSPEND for an endpoint */
+void
+ipa_interrupt_suspend_enable(struct ipa_interrupt *interrupt, u32 endpoint_id)
+{
+ ipa_interrupt_suspend_control(interrupt, endpoint_id, true);
+}
+
+/* Disable TX_SUSPEND for an endpoint */
+void
+ipa_interrupt_suspend_disable(struct ipa_interrupt *interrupt, u32 endpoint_id)
+{
+ ipa_interrupt_suspend_control(interrupt, endpoint_id, false);
+}
+
+/* Clear the suspend interrupt for all endpoints that signaled it */
+void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt)
+{
+ struct ipa *ipa = interrupt->ipa;
+ u32 unit_count;
+ u32 unit;
+
+ unit_count = roundup(ipa->endpoint_count, 32);
+ for (unit = 0; unit < unit_count; unit++) {
+ const struct reg *reg;
+ u32 val;
+
+ reg = ipa_reg(ipa, IRQ_SUSPEND_INFO);
+ val = ioread32(ipa->reg_virt + reg_n_offset(reg, unit));
+
+ /* SUSPEND interrupt status isn't cleared on IPA version 3.0 */
+ if (ipa->version == IPA_VERSION_3_0)
+ continue;
+
+ reg = ipa_reg(ipa, IRQ_SUSPEND_CLR);
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, unit));
+ }
+}
+
+/* Simulate arrival of an IPA TX_SUSPEND interrupt */
+void ipa_interrupt_simulate_suspend(struct ipa_interrupt *interrupt)
+{
+ ipa_interrupt_process(interrupt, IPA_IRQ_TX_SUSPEND);
+}
+
+/* Configure the IPA interrupt framework */
+struct ipa_interrupt *ipa_interrupt_config(struct ipa *ipa)
+{
+ struct device *dev = &ipa->pdev->dev;
+ struct ipa_interrupt *interrupt;
+ const struct reg *reg;
+ unsigned int irq;
+ int ret;
+
+ ret = platform_get_irq_byname(ipa->pdev, "ipa");
+ if (ret <= 0) {
+ dev_err(dev, "DT error %d getting \"ipa\" IRQ property\n",
+ ret);
+ return ERR_PTR(ret ? : -EINVAL);
+ }
+ irq = ret;
+
+ interrupt = kzalloc(sizeof(*interrupt), GFP_KERNEL);
+ if (!interrupt)
+ return ERR_PTR(-ENOMEM);
+ interrupt->ipa = ipa;
+ interrupt->irq = irq;
+
+ /* Start with all IPA interrupts disabled */
+ reg = ipa_reg(ipa, IPA_IRQ_EN);
+ iowrite32(0, ipa->reg_virt + reg_offset(reg));
+
+ ret = request_threaded_irq(irq, NULL, ipa_isr_thread, IRQF_ONESHOT,
+ "ipa", interrupt);
+ if (ret) {
+ dev_err(dev, "error %d requesting \"ipa\" IRQ\n", ret);
+ goto err_kfree;
+ }
+
+ ret = dev_pm_set_wake_irq(dev, irq);
+ if (ret) {
+ dev_err(dev, "error %d registering \"ipa\" IRQ as wakeirq\n", ret);
+ goto err_free_irq;
+ }
+
+ return interrupt;
+
+err_free_irq:
+ free_irq(interrupt->irq, interrupt);
+err_kfree:
+ kfree(interrupt);
+
+ return ERR_PTR(ret);
+}
+
+/* Inverse of ipa_interrupt_config() */
+void ipa_interrupt_deconfig(struct ipa_interrupt *interrupt)
+{
+ struct device *dev = &interrupt->ipa->pdev->dev;
+
+ dev_pm_clear_wake_irq(dev);
+ free_irq(interrupt->irq, interrupt);
+ kfree(interrupt);
+}
diff --git a/drivers/net/ipa/ipa_interrupt.h b/drivers/net/ipa/ipa_interrupt.h
new file mode 100644
index 0000000000..12e3e798cc
--- /dev/null
+++ b/drivers/net/ipa/ipa_interrupt.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2022 Linaro Ltd.
+ */
+#ifndef _IPA_INTERRUPT_H_
+#define _IPA_INTERRUPT_H_
+
+#include <linux/types.h>
+#include <linux/bits.h>
+
+struct ipa;
+struct ipa_interrupt;
+enum ipa_irq_id;
+
+/**
+ * ipa_interrupt_suspend_enable - Enable TX_SUSPEND for an endpoint
+ * @interrupt: IPA interrupt structure
+ * @endpoint_id: Endpoint whose interrupt should be enabled
+ *
+ * Note: The "TX" in the name is from the perspective of the IPA hardware.
+ * A TX_SUSPEND interrupt arrives on an AP RX enpoint when packet data can't
+ * be delivered to the endpoint because it is suspended (or its underlying
+ * channel is stopped).
+ */
+void ipa_interrupt_suspend_enable(struct ipa_interrupt *interrupt,
+ u32 endpoint_id);
+
+/**
+ * ipa_interrupt_suspend_disable - Disable TX_SUSPEND for an endpoint
+ * @interrupt: IPA interrupt structure
+ * @endpoint_id: Endpoint whose interrupt should be disabled
+ */
+void ipa_interrupt_suspend_disable(struct ipa_interrupt *interrupt,
+ u32 endpoint_id);
+
+/**
+ * ipa_interrupt_suspend_clear_all - clear all suspend interrupts
+ * @interrupt: IPA interrupt structure
+ *
+ * Clear the TX_SUSPEND interrupt for all endpoints that signaled it.
+ */
+void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt);
+
+/**
+ * ipa_interrupt_simulate_suspend() - Simulate TX_SUSPEND IPA interrupt
+ * @interrupt: IPA interrupt structure
+ *
+ * This calls the TX_SUSPEND interrupt handler, as if such an interrupt
+ * had been signaled. This is needed to work around a hardware quirk
+ * that occurs if aggregation is active on an endpoint when its underlying
+ * channel is suspended.
+ */
+void ipa_interrupt_simulate_suspend(struct ipa_interrupt *interrupt);
+
+/**
+ * ipa_interrupt_enable() - Enable an IPA interrupt type
+ * @ipa: IPA pointer
+ * @ipa_irq: IPA interrupt ID
+ */
+void ipa_interrupt_enable(struct ipa *ipa, enum ipa_irq_id ipa_irq);
+
+/**
+ * ipa_interrupt_disable() - Disable an IPA interrupt type
+ * @ipa: IPA pointer
+ * @ipa_irq: IPA interrupt ID
+ */
+void ipa_interrupt_disable(struct ipa *ipa, enum ipa_irq_id ipa_irq);
+
+/**
+ * ipa_interrupt_irq_enable() - Enable IPA interrupts
+ * @ipa: IPA pointer
+ *
+ * This enables the IPA interrupt line
+ */
+void ipa_interrupt_irq_enable(struct ipa *ipa);
+
+/**
+ * ipa_interrupt_irq_disable() - Disable IPA interrupts
+ * @ipa: IPA pointer
+ *
+ * This disables the IPA interrupt line
+ */
+void ipa_interrupt_irq_disable(struct ipa *ipa);
+
+/**
+ * ipa_interrupt_config() - Configure the IPA interrupt framework
+ * @ipa: IPA pointer
+ *
+ * Return: Pointer to IPA SMP2P info, or a pointer-coded error
+ */
+struct ipa_interrupt *ipa_interrupt_config(struct ipa *ipa);
+
+/**
+ * ipa_interrupt_deconfig() - Inverse of ipa_interrupt_config()
+ * @interrupt: IPA interrupt structure
+ */
+void ipa_interrupt_deconfig(struct ipa_interrupt *interrupt);
+
+#endif /* _IPA_INTERRUPT_H_ */
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
new file mode 100644
index 0000000000..da853353a5
--- /dev/null
+++ b/drivers/net/ipa/ipa_main.c
@@ -0,0 +1,1017 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2023 Linaro Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/atomic.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/bug.h>
+#include <linux/io.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/firmware/qcom/qcom_scm.h>
+#include <linux/soc/qcom/mdt_loader.h>
+
+#include "ipa.h"
+#include "ipa_power.h"
+#include "ipa_data.h"
+#include "ipa_endpoint.h"
+#include "ipa_resource.h"
+#include "ipa_cmd.h"
+#include "ipa_reg.h"
+#include "ipa_mem.h"
+#include "ipa_table.h"
+#include "ipa_smp2p.h"
+#include "ipa_modem.h"
+#include "ipa_uc.h"
+#include "ipa_interrupt.h"
+#include "gsi_trans.h"
+#include "ipa_sysfs.h"
+
+/**
+ * DOC: The IP Accelerator
+ *
+ * This driver supports the Qualcomm IP Accelerator (IPA), which is a
+ * networking component found in many Qualcomm SoCs. The IPA is connected
+ * to the application processor (AP), but is also connected (and partially
+ * controlled by) other "execution environments" (EEs), such as a modem.
+ *
+ * The IPA is the conduit between the AP and the modem that carries network
+ * traffic. This driver presents a network interface representing the
+ * connection of the modem to external (e.g. LTE) networks.
+ *
+ * The IPA provides protocol checksum calculation, offloading this work
+ * from the AP. The IPA offers additional functionality, including routing,
+ * filtering, and NAT support, but that more advanced functionality is not
+ * currently supported. Despite that, some resources--including routing
+ * tables and filter tables--are defined in this driver because they must
+ * be initialized even when the advanced hardware features are not used.
+ *
+ * There are two distinct layers that implement the IPA hardware, and this
+ * is reflected in the organization of the driver. The generic software
+ * interface (GSI) is an integral component of the IPA, providing a
+ * well-defined communication layer between the AP subsystem and the IPA
+ * core. The GSI implements a set of "channels" used for communication
+ * between the AP and the IPA.
+ *
+ * The IPA layer uses GSI channels to implement its "endpoints". And while
+ * a GSI channel carries data between the AP and the IPA, a pair of IPA
+ * endpoints is used to carry traffic between two EEs. Specifically, the main
+ * modem network interface is implemented by two pairs of endpoints: a TX
+ * endpoint on the AP coupled with an RX endpoint on the modem; and another
+ * RX endpoint on the AP receiving data from a TX endpoint on the modem.
+ */
+
+/* The name of the GSI firmware file relative to /lib/firmware */
+#define IPA_FW_PATH_DEFAULT "ipa_fws.mdt"
+#define IPA_PAS_ID 15
+
+/* Shift of 19.2 MHz timestamp to achieve lower resolution timestamps */
+#define DPL_TIMESTAMP_SHIFT 14 /* ~1.172 kHz, ~853 usec per tick */
+#define TAG_TIMESTAMP_SHIFT 14
+#define NAT_TIMESTAMP_SHIFT 24 /* ~1.144 Hz, ~874 msec per tick */
+
+/* Divider for 19.2 MHz crystal oscillator clock to get common timer clock */
+#define IPA_XO_CLOCK_DIVIDER 192 /* 1 is subtracted where used */
+
+/**
+ * enum ipa_firmware_loader: How GSI firmware gets loaded
+ *
+ * @IPA_LOADER_DEFER: System not ready; try again later
+ * @IPA_LOADER_SELF: AP loads GSI firmware
+ * @IPA_LOADER_MODEM: Modem loads GSI firmware, signals when done
+ * @IPA_LOADER_SKIP: Neither AP nor modem need to load GSI firmware
+ * @IPA_LOADER_INVALID: GSI firmware loader specification is invalid
+ */
+enum ipa_firmware_loader {
+ IPA_LOADER_DEFER,
+ IPA_LOADER_SELF,
+ IPA_LOADER_MODEM,
+ IPA_LOADER_SKIP,
+ IPA_LOADER_INVALID,
+};
+
+/**
+ * ipa_setup() - Set up IPA hardware
+ * @ipa: IPA pointer
+ *
+ * Perform initialization that requires issuing immediate commands on
+ * the command TX endpoint. If the modem is doing GSI firmware load
+ * and initialization, this function will be called when an SMP2P
+ * interrupt has been signaled by the modem. Otherwise it will be
+ * called from ipa_probe() after GSI firmware has been successfully
+ * loaded, authenticated, and started by Trust Zone.
+ */
+int ipa_setup(struct ipa *ipa)
+{
+ struct ipa_endpoint *exception_endpoint;
+ struct ipa_endpoint *command_endpoint;
+ struct device *dev = &ipa->pdev->dev;
+ int ret;
+
+ ret = gsi_setup(&ipa->gsi);
+ if (ret)
+ return ret;
+
+ ret = ipa_power_setup(ipa);
+ if (ret)
+ goto err_gsi_teardown;
+
+ ipa_endpoint_setup(ipa);
+
+ /* We need to use the AP command TX endpoint to perform other
+ * initialization, so we enable first.
+ */
+ command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
+ ret = ipa_endpoint_enable_one(command_endpoint);
+ if (ret)
+ goto err_endpoint_teardown;
+
+ ret = ipa_mem_setup(ipa); /* No matching teardown required */
+ if (ret)
+ goto err_command_disable;
+
+ ret = ipa_table_setup(ipa); /* No matching teardown required */
+ if (ret)
+ goto err_command_disable;
+
+ /* Enable the exception handling endpoint, and tell the hardware
+ * to use it by default.
+ */
+ exception_endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX];
+ ret = ipa_endpoint_enable_one(exception_endpoint);
+ if (ret)
+ goto err_command_disable;
+
+ ipa_endpoint_default_route_set(ipa, exception_endpoint->endpoint_id);
+
+ /* We're all set. Now prepare for communication with the modem */
+ ret = ipa_qmi_setup(ipa);
+ if (ret)
+ goto err_default_route_clear;
+
+ ipa->setup_complete = true;
+
+ dev_info(dev, "IPA driver setup completed successfully\n");
+
+ return 0;
+
+err_default_route_clear:
+ ipa_endpoint_default_route_clear(ipa);
+ ipa_endpoint_disable_one(exception_endpoint);
+err_command_disable:
+ ipa_endpoint_disable_one(command_endpoint);
+err_endpoint_teardown:
+ ipa_endpoint_teardown(ipa);
+ ipa_power_teardown(ipa);
+err_gsi_teardown:
+ gsi_teardown(&ipa->gsi);
+
+ return ret;
+}
+
+/**
+ * ipa_teardown() - Inverse of ipa_setup()
+ * @ipa: IPA pointer
+ */
+static void ipa_teardown(struct ipa *ipa)
+{
+ struct ipa_endpoint *exception_endpoint;
+ struct ipa_endpoint *command_endpoint;
+
+ /* We're going to tear everything down, as if setup never completed */
+ ipa->setup_complete = false;
+
+ ipa_qmi_teardown(ipa);
+ ipa_endpoint_default_route_clear(ipa);
+ exception_endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX];
+ ipa_endpoint_disable_one(exception_endpoint);
+ command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
+ ipa_endpoint_disable_one(command_endpoint);
+ ipa_endpoint_teardown(ipa);
+ ipa_power_teardown(ipa);
+ gsi_teardown(&ipa->gsi);
+}
+
+static void
+ipa_hardware_config_bcr(struct ipa *ipa, const struct ipa_data *data)
+{
+ const struct reg *reg;
+ u32 val;
+
+ /* IPA v4.5+ has no backward compatibility register */
+ if (ipa->version >= IPA_VERSION_4_5)
+ return;
+
+ reg = ipa_reg(ipa, IPA_BCR);
+ val = data->backward_compat;
+ iowrite32(val, ipa->reg_virt + reg_offset(reg));
+}
+
+static void ipa_hardware_config_tx(struct ipa *ipa)
+{
+ enum ipa_version version = ipa->version;
+ const struct reg *reg;
+ u32 offset;
+ u32 val;
+
+ if (version <= IPA_VERSION_4_0 || version >= IPA_VERSION_4_5)
+ return;
+
+ /* Disable PA mask to allow HOLB drop */
+ reg = ipa_reg(ipa, IPA_TX_CFG);
+ offset = reg_offset(reg);
+
+ val = ioread32(ipa->reg_virt + offset);
+
+ val &= ~reg_bit(reg, PA_MASK_EN);
+
+ iowrite32(val, ipa->reg_virt + offset);
+}
+
+static void ipa_hardware_config_clkon(struct ipa *ipa)
+{
+ enum ipa_version version = ipa->version;
+ const struct reg *reg;
+ u32 val;
+
+ if (version >= IPA_VERSION_4_5)
+ return;
+
+ if (version < IPA_VERSION_4_0 && version != IPA_VERSION_3_1)
+ return;
+
+ /* Implement some hardware workarounds */
+ reg = ipa_reg(ipa, CLKON_CFG);
+ if (version == IPA_VERSION_3_1) {
+ /* Disable MISC clock gating */
+ val = reg_bit(reg, CLKON_MISC);
+ } else { /* IPA v4.0+ */
+ /* Enable open global clocks in the CLKON configuration */
+ val = reg_bit(reg, CLKON_GLOBAL);
+ val |= reg_bit(reg, GLOBAL_2X_CLK);
+ }
+
+ iowrite32(val, ipa->reg_virt + reg_offset(reg));
+}
+
+/* Configure bus access behavior for IPA components */
+static void ipa_hardware_config_comp(struct ipa *ipa)
+{
+ const struct reg *reg;
+ u32 offset;
+ u32 val;
+
+ /* Nothing to configure prior to IPA v4.0 */
+ if (ipa->version < IPA_VERSION_4_0)
+ return;
+
+ reg = ipa_reg(ipa, COMP_CFG);
+ offset = reg_offset(reg);
+
+ val = ioread32(ipa->reg_virt + offset);
+
+ if (ipa->version == IPA_VERSION_4_0) {
+ val &= ~reg_bit(reg, IPA_QMB_SELECT_CONS_EN);
+ val &= ~reg_bit(reg, IPA_QMB_SELECT_PROD_EN);
+ val &= ~reg_bit(reg, IPA_QMB_SELECT_GLOBAL_EN);
+ } else if (ipa->version < IPA_VERSION_4_5) {
+ val |= reg_bit(reg, GSI_MULTI_AXI_MASTERS_DIS);
+ } else {
+ /* For IPA v4.5+ FULL_FLUSH_WAIT_RS_CLOSURE_EN is 0 */
+ }
+
+ val |= reg_bit(reg, GSI_MULTI_INORDER_RD_DIS);
+ val |= reg_bit(reg, GSI_MULTI_INORDER_WR_DIS);
+
+ iowrite32(val, ipa->reg_virt + offset);
+}
+
+/* Configure DDR and (possibly) PCIe max read/write QSB values */
+static void
+ipa_hardware_config_qsb(struct ipa *ipa, const struct ipa_data *data)
+{
+ const struct ipa_qsb_data *data0;
+ const struct ipa_qsb_data *data1;
+ const struct reg *reg;
+ u32 val;
+
+ /* QMB 0 represents DDR; QMB 1 (if present) represents PCIe */
+ data0 = &data->qsb_data[IPA_QSB_MASTER_DDR];
+ if (data->qsb_count > 1)
+ data1 = &data->qsb_data[IPA_QSB_MASTER_PCIE];
+
+ /* Max outstanding write accesses for QSB masters */
+ reg = ipa_reg(ipa, QSB_MAX_WRITES);
+
+ val = reg_encode(reg, GEN_QMB_0_MAX_WRITES, data0->max_writes);
+ if (data->qsb_count > 1)
+ val |= reg_encode(reg, GEN_QMB_1_MAX_WRITES, data1->max_writes);
+
+ iowrite32(val, ipa->reg_virt + reg_offset(reg));
+
+ /* Max outstanding read accesses for QSB masters */
+ reg = ipa_reg(ipa, QSB_MAX_READS);
+
+ val = reg_encode(reg, GEN_QMB_0_MAX_READS, data0->max_reads);
+ if (ipa->version >= IPA_VERSION_4_0)
+ val |= reg_encode(reg, GEN_QMB_0_MAX_READS_BEATS,
+ data0->max_reads_beats);
+ if (data->qsb_count > 1) {
+ val = reg_encode(reg, GEN_QMB_1_MAX_READS, data1->max_reads);
+ if (ipa->version >= IPA_VERSION_4_0)
+ val |= reg_encode(reg, GEN_QMB_1_MAX_READS_BEATS,
+ data1->max_reads_beats);
+ }
+
+ iowrite32(val, ipa->reg_virt + reg_offset(reg));
+}
+
+/* The internal inactivity timer clock is used for the aggregation timer */
+#define TIMER_FREQUENCY 32000 /* 32 KHz inactivity timer clock */
+
+/* Compute the value to use in the COUNTER_CFG register AGGR_GRANULARITY
+ * field to represent the given number of microseconds. The value is one
+ * less than the number of timer ticks in the requested period. 0 is not
+ * a valid granularity value (so for example @usec must be at least 16 for
+ * a TIMER_FREQUENCY of 32000).
+ */
+static __always_inline u32 ipa_aggr_granularity_val(u32 usec)
+{
+ return DIV_ROUND_CLOSEST(usec * TIMER_FREQUENCY, USEC_PER_SEC) - 1;
+}
+
+/* IPA uses unified Qtime starting at IPA v4.5, implementing various
+ * timestamps and timers independent of the IPA core clock rate. The
+ * Qtimer is based on a 56-bit timestamp incremented at each tick of
+ * a 19.2 MHz SoC crystal oscillator (XO clock).
+ *
+ * For IPA timestamps (tag, NAT, data path logging) a lower resolution
+ * timestamp is achieved by shifting the Qtimer timestamp value right
+ * some number of bits to produce the low-order bits of the coarser
+ * granularity timestamp.
+ *
+ * For timers, a common timer clock is derived from the XO clock using
+ * a divider (we use 192, to produce a 100kHz timer clock). From
+ * this common clock, three "pulse generators" are used to produce
+ * timer ticks at a configurable frequency. IPA timers (such as
+ * those used for aggregation or head-of-line block handling) now
+ * define their period based on one of these pulse generators.
+ */
+static void ipa_qtime_config(struct ipa *ipa)
+{
+ const struct reg *reg;
+ u32 offset;
+ u32 val;
+
+ /* Timer clock divider must be disabled when we change the rate */
+ reg = ipa_reg(ipa, TIMERS_XO_CLK_DIV_CFG);
+ iowrite32(0, ipa->reg_virt + reg_offset(reg));
+
+ reg = ipa_reg(ipa, QTIME_TIMESTAMP_CFG);
+ /* Set DPL time stamp resolution to use Qtime (instead of 1 msec) */
+ val = reg_encode(reg, DPL_TIMESTAMP_LSB, DPL_TIMESTAMP_SHIFT);
+ val |= reg_bit(reg, DPL_TIMESTAMP_SEL);
+ /* Configure tag and NAT Qtime timestamp resolution as well */
+ val = reg_encode(reg, TAG_TIMESTAMP_LSB, TAG_TIMESTAMP_SHIFT);
+ val = reg_encode(reg, NAT_TIMESTAMP_LSB, NAT_TIMESTAMP_SHIFT);
+
+ iowrite32(val, ipa->reg_virt + reg_offset(reg));
+
+ /* Set granularity of pulse generators used for other timers */
+ reg = ipa_reg(ipa, TIMERS_PULSE_GRAN_CFG);
+ val = reg_encode(reg, PULSE_GRAN_0, IPA_GRAN_100_US);
+ val |= reg_encode(reg, PULSE_GRAN_1, IPA_GRAN_1_MS);
+ if (ipa->version >= IPA_VERSION_5_0) {
+ val |= reg_encode(reg, PULSE_GRAN_2, IPA_GRAN_10_MS);
+ val |= reg_encode(reg, PULSE_GRAN_3, IPA_GRAN_10_MS);
+ } else {
+ val |= reg_encode(reg, PULSE_GRAN_2, IPA_GRAN_1_MS);
+ }
+
+ iowrite32(val, ipa->reg_virt + reg_offset(reg));
+
+ /* Actual divider is 1 more than value supplied here */
+ reg = ipa_reg(ipa, TIMERS_XO_CLK_DIV_CFG);
+ offset = reg_offset(reg);
+
+ val = reg_encode(reg, DIV_VALUE, IPA_XO_CLOCK_DIVIDER - 1);
+
+ iowrite32(val, ipa->reg_virt + offset);
+
+ /* Divider value is set; re-enable the common timer clock divider */
+ val |= reg_bit(reg, DIV_ENABLE);
+
+ iowrite32(val, ipa->reg_virt + offset);
+}
+
+/* Before IPA v4.5 timing is controlled by a counter register */
+static void ipa_hardware_config_counter(struct ipa *ipa)
+{
+ u32 granularity = ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY);
+ const struct reg *reg;
+ u32 val;
+
+ reg = ipa_reg(ipa, COUNTER_CFG);
+ /* If defined, EOT_COAL_GRANULARITY is 0 */
+ val = reg_encode(reg, AGGR_GRANULARITY, granularity);
+ iowrite32(val, ipa->reg_virt + reg_offset(reg));
+}
+
+static void ipa_hardware_config_timing(struct ipa *ipa)
+{
+ if (ipa->version < IPA_VERSION_4_5)
+ ipa_hardware_config_counter(ipa);
+ else
+ ipa_qtime_config(ipa);
+}
+
+static void ipa_hardware_config_hashing(struct ipa *ipa)
+{
+ const struct reg *reg;
+
+ /* Other than IPA v4.2, all versions enable "hashing". Starting
+ * with IPA v5.0, the filter and router tables are implemented
+ * differently, but the default configuration enables this feature
+ * (now referred to as "cacheing"), so there's nothing to do here.
+ */
+ if (ipa->version != IPA_VERSION_4_2)
+ return;
+
+ /* IPA v4.2 does not support hashed tables, so disable them */
+ reg = ipa_reg(ipa, FILT_ROUT_HASH_EN);
+
+ /* IPV6_ROUTER_HASH, IPV6_FILTER_HASH, IPV4_ROUTER_HASH,
+ * IPV4_FILTER_HASH are all zero.
+ */
+ iowrite32(0, ipa->reg_virt + reg_offset(reg));
+}
+
+static void ipa_idle_indication_cfg(struct ipa *ipa,
+ u32 enter_idle_debounce_thresh,
+ bool const_non_idle_enable)
+{
+ const struct reg *reg;
+ u32 val;
+
+ if (ipa->version < IPA_VERSION_3_5_1)
+ return;
+
+ reg = ipa_reg(ipa, IDLE_INDICATION_CFG);
+ val = reg_encode(reg, ENTER_IDLE_DEBOUNCE_THRESH,
+ enter_idle_debounce_thresh);
+ if (const_non_idle_enable)
+ val |= reg_bit(reg, CONST_NON_IDLE_ENABLE);
+
+ iowrite32(val, ipa->reg_virt + reg_offset(reg));
+}
+
+/**
+ * ipa_hardware_dcd_config() - Enable dynamic clock division on IPA
+ * @ipa: IPA pointer
+ *
+ * Configures when the IPA signals it is idle to the global clock
+ * controller, which can respond by scaling down the clock to save
+ * power.
+ */
+static void ipa_hardware_dcd_config(struct ipa *ipa)
+{
+ /* Recommended values for IPA 3.5 and later according to IPA HPG */
+ ipa_idle_indication_cfg(ipa, 256, false);
+}
+
+static void ipa_hardware_dcd_deconfig(struct ipa *ipa)
+{
+ /* Power-on reset values */
+ ipa_idle_indication_cfg(ipa, 0, true);
+}
+
+/**
+ * ipa_hardware_config() - Primitive hardware initialization
+ * @ipa: IPA pointer
+ * @data: IPA configuration data
+ */
+static void ipa_hardware_config(struct ipa *ipa, const struct ipa_data *data)
+{
+ ipa_hardware_config_bcr(ipa, data);
+ ipa_hardware_config_tx(ipa);
+ ipa_hardware_config_clkon(ipa);
+ ipa_hardware_config_comp(ipa);
+ ipa_hardware_config_qsb(ipa, data);
+ ipa_hardware_config_timing(ipa);
+ ipa_hardware_config_hashing(ipa);
+ ipa_hardware_dcd_config(ipa);
+}
+
+/**
+ * ipa_hardware_deconfig() - Inverse of ipa_hardware_config()
+ * @ipa: IPA pointer
+ *
+ * This restores the power-on reset values (even if they aren't different)
+ */
+static void ipa_hardware_deconfig(struct ipa *ipa)
+{
+ /* Mostly we just leave things as we set them. */
+ ipa_hardware_dcd_deconfig(ipa);
+}
+
+/**
+ * ipa_config() - Configure IPA hardware
+ * @ipa: IPA pointer
+ * @data: IPA configuration data
+ *
+ * Perform initialization requiring IPA power to be enabled.
+ */
+static int ipa_config(struct ipa *ipa, const struct ipa_data *data)
+{
+ int ret;
+
+ ipa_hardware_config(ipa, data);
+
+ ret = ipa_mem_config(ipa);
+ if (ret)
+ goto err_hardware_deconfig;
+
+ ipa->interrupt = ipa_interrupt_config(ipa);
+ if (IS_ERR(ipa->interrupt)) {
+ ret = PTR_ERR(ipa->interrupt);
+ ipa->interrupt = NULL;
+ goto err_mem_deconfig;
+ }
+
+ ipa_uc_config(ipa);
+
+ ret = ipa_endpoint_config(ipa);
+ if (ret)
+ goto err_uc_deconfig;
+
+ ipa_table_config(ipa); /* No deconfig required */
+
+ /* Assign resource limitation to each group; no deconfig required */
+ ret = ipa_resource_config(ipa, data->resource_data);
+ if (ret)
+ goto err_endpoint_deconfig;
+
+ ret = ipa_modem_config(ipa);
+ if (ret)
+ goto err_endpoint_deconfig;
+
+ return 0;
+
+err_endpoint_deconfig:
+ ipa_endpoint_deconfig(ipa);
+err_uc_deconfig:
+ ipa_uc_deconfig(ipa);
+ ipa_interrupt_deconfig(ipa->interrupt);
+ ipa->interrupt = NULL;
+err_mem_deconfig:
+ ipa_mem_deconfig(ipa);
+err_hardware_deconfig:
+ ipa_hardware_deconfig(ipa);
+
+ return ret;
+}
+
+/**
+ * ipa_deconfig() - Inverse of ipa_config()
+ * @ipa: IPA pointer
+ */
+static void ipa_deconfig(struct ipa *ipa)
+{
+ ipa_modem_deconfig(ipa);
+ ipa_endpoint_deconfig(ipa);
+ ipa_uc_deconfig(ipa);
+ ipa_interrupt_deconfig(ipa->interrupt);
+ ipa->interrupt = NULL;
+ ipa_mem_deconfig(ipa);
+ ipa_hardware_deconfig(ipa);
+}
+
+static int ipa_firmware_load(struct device *dev)
+{
+ const struct firmware *fw;
+ struct device_node *node;
+ struct resource res;
+ phys_addr_t phys;
+ const char *path;
+ ssize_t size;
+ void *virt;
+ int ret;
+
+ node = of_parse_phandle(dev->of_node, "memory-region", 0);
+ if (!node) {
+ dev_err(dev, "DT error getting \"memory-region\" property\n");
+ return -EINVAL;
+ }
+
+ ret = of_address_to_resource(node, 0, &res);
+ of_node_put(node);
+ if (ret) {
+ dev_err(dev, "error %d getting \"memory-region\" resource\n",
+ ret);
+ return ret;
+ }
+
+ /* Use name from DTB if specified; use default for *any* error */
+ ret = of_property_read_string(dev->of_node, "firmware-name", &path);
+ if (ret) {
+ dev_dbg(dev, "error %d getting \"firmware-name\" resource\n",
+ ret);
+ path = IPA_FW_PATH_DEFAULT;
+ }
+
+ ret = request_firmware(&fw, path, dev);
+ if (ret) {
+ dev_err(dev, "error %d requesting \"%s\"\n", ret, path);
+ return ret;
+ }
+
+ phys = res.start;
+ size = (size_t)resource_size(&res);
+ virt = memremap(phys, size, MEMREMAP_WC);
+ if (!virt) {
+ dev_err(dev, "unable to remap firmware memory\n");
+ ret = -ENOMEM;
+ goto out_release_firmware;
+ }
+
+ ret = qcom_mdt_load(dev, fw, path, IPA_PAS_ID, virt, phys, size, NULL);
+ if (ret)
+ dev_err(dev, "error %d loading \"%s\"\n", ret, path);
+ else if ((ret = qcom_scm_pas_auth_and_reset(IPA_PAS_ID)))
+ dev_err(dev, "error %d authenticating \"%s\"\n", ret, path);
+
+ memunmap(virt);
+out_release_firmware:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static const struct of_device_id ipa_match[] = {
+ {
+ .compatible = "qcom,msm8998-ipa",
+ .data = &ipa_data_v3_1,
+ },
+ {
+ .compatible = "qcom,sdm845-ipa",
+ .data = &ipa_data_v3_5_1,
+ },
+ {
+ .compatible = "qcom,sc7180-ipa",
+ .data = &ipa_data_v4_2,
+ },
+ {
+ .compatible = "qcom,sdx55-ipa",
+ .data = &ipa_data_v4_5,
+ },
+ {
+ .compatible = "qcom,sm6350-ipa",
+ .data = &ipa_data_v4_7,
+ },
+ {
+ .compatible = "qcom,sm8350-ipa",
+ .data = &ipa_data_v4_9,
+ },
+ {
+ .compatible = "qcom,sc7280-ipa",
+ .data = &ipa_data_v4_11,
+ },
+ {
+ .compatible = "qcom,sdx65-ipa",
+ .data = &ipa_data_v5_0,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ipa_match);
+
+/* Check things that can be validated at build time. This just
+ * groups these things BUILD_BUG_ON() calls don't clutter the rest
+ * of the code.
+ * */
+static void ipa_validate_build(void)
+{
+ /* At one time we assumed a 64-bit build, allowing some do_div()
+ * calls to be replaced by simple division or modulo operations.
+ * We currently only perform divide and modulo operations on u32,
+ * u16, or size_t objects, and of those only size_t has any chance
+ * of being a 64-bit value. (It should be guaranteed 32 bits wide
+ * on a 32-bit build, but there is no harm in verifying that.)
+ */
+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_64BIT) && sizeof(size_t) != 4);
+
+ /* Code assumes the EE ID for the AP is 0 (zeroed structure field) */
+ BUILD_BUG_ON(GSI_EE_AP != 0);
+
+ /* There's no point if we have no channels or event rings */
+ BUILD_BUG_ON(!GSI_CHANNEL_COUNT_MAX);
+ BUILD_BUG_ON(!GSI_EVT_RING_COUNT_MAX);
+
+ /* GSI hardware design limits */
+ BUILD_BUG_ON(GSI_CHANNEL_COUNT_MAX > 32);
+ BUILD_BUG_ON(GSI_EVT_RING_COUNT_MAX > 31);
+
+ /* The number of TREs in a transaction is limited by the channel's
+ * TLV FIFO size. A transaction structure uses 8-bit fields
+ * to represents the number of TREs it has allocated and used.
+ */
+ BUILD_BUG_ON(GSI_TLV_MAX > U8_MAX);
+
+ /* This is used as a divisor */
+ BUILD_BUG_ON(!IPA_AGGR_GRANULARITY);
+
+ /* Aggregation granularity value can't be 0, and must fit */
+ BUILD_BUG_ON(!ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY));
+}
+
+static enum ipa_firmware_loader ipa_firmware_loader(struct device *dev)
+{
+ bool modem_init;
+ const char *str;
+ int ret;
+
+ /* Look up the old and new properties by name */
+ modem_init = of_property_read_bool(dev->of_node, "modem-init");
+ ret = of_property_read_string(dev->of_node, "qcom,gsi-loader", &str);
+
+ /* If the new property doesn't exist, it's legacy behavior */
+ if (ret == -EINVAL) {
+ if (modem_init)
+ return IPA_LOADER_MODEM;
+ goto out_self;
+ }
+
+ /* Any other error on the new property means it's poorly defined */
+ if (ret)
+ return IPA_LOADER_INVALID;
+
+ /* New property value exists; if old one does too, that's invalid */
+ if (modem_init)
+ return IPA_LOADER_INVALID;
+
+ /* Modem loads GSI firmware for "modem" */
+ if (!strcmp(str, "modem"))
+ return IPA_LOADER_MODEM;
+
+ /* No GSI firmware load is needed for "skip" */
+ if (!strcmp(str, "skip"))
+ return IPA_LOADER_SKIP;
+
+ /* Any value other than "self" is an error */
+ if (strcmp(str, "self"))
+ return IPA_LOADER_INVALID;
+out_self:
+ /* We need Trust Zone to load firmware; make sure it's available */
+ if (qcom_scm_is_available())
+ return IPA_LOADER_SELF;
+
+ return IPA_LOADER_DEFER;
+}
+
+/**
+ * ipa_probe() - IPA platform driver probe function
+ * @pdev: Platform device pointer
+ *
+ * Return: 0 if successful, or a negative error code (possibly
+ * EPROBE_DEFER)
+ *
+ * This is the main entry point for the IPA driver. Initialization proceeds
+ * in several stages:
+ * - The "init" stage involves activities that can be initialized without
+ * access to the IPA hardware.
+ * - The "config" stage requires IPA power to be active so IPA registers
+ * can be accessed, but does not require the use of IPA immediate commands.
+ * - The "setup" stage uses IPA immediate commands, and so requires the GSI
+ * layer to be initialized.
+ *
+ * A Boolean Device Tree "modem-init" property determines whether GSI
+ * initialization will be performed by the AP (Trust Zone) or the modem.
+ * If the AP does GSI initialization, the setup phase is entered after
+ * this has completed successfully. Otherwise the modem initializes
+ * the GSI layer and signals it has finished by sending an SMP2P interrupt
+ * to the AP; this triggers the start if IPA setup.
+ */
+static int ipa_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ enum ipa_firmware_loader loader;
+ const struct ipa_data *data;
+ struct ipa_power *power;
+ struct ipa *ipa;
+ int ret;
+
+ ipa_validate_build();
+
+ /* Get configuration data early; needed for power initialization */
+ data = of_device_get_match_data(dev);
+ if (!data) {
+ dev_err(dev, "matched hardware not supported\n");
+ return -ENODEV;
+ }
+
+ if (!ipa_version_supported(data->version)) {
+ dev_err(dev, "unsupported IPA version %u\n", data->version);
+ return -EINVAL;
+ }
+
+ if (!data->modem_route_count) {
+ dev_err(dev, "modem_route_count cannot be zero\n");
+ return -EINVAL;
+ }
+
+ loader = ipa_firmware_loader(dev);
+ if (loader == IPA_LOADER_INVALID)
+ return -EINVAL;
+ if (loader == IPA_LOADER_DEFER)
+ return -EPROBE_DEFER;
+
+ /* The clock and interconnects might not be ready when we're
+ * probed, so might return -EPROBE_DEFER.
+ */
+ power = ipa_power_init(dev, data->power_data);
+ if (IS_ERR(power))
+ return PTR_ERR(power);
+
+ /* No more EPROBE_DEFER. Allocate and initialize the IPA structure */
+ ipa = kzalloc(sizeof(*ipa), GFP_KERNEL);
+ if (!ipa) {
+ ret = -ENOMEM;
+ goto err_power_exit;
+ }
+
+ ipa->pdev = pdev;
+ dev_set_drvdata(dev, ipa);
+ ipa->power = power;
+ ipa->version = data->version;
+ ipa->modem_route_count = data->modem_route_count;
+ init_completion(&ipa->completion);
+
+ ret = ipa_reg_init(ipa);
+ if (ret)
+ goto err_kfree_ipa;
+
+ ret = ipa_mem_init(ipa, data->mem_data);
+ if (ret)
+ goto err_reg_exit;
+
+ ret = gsi_init(&ipa->gsi, pdev, ipa->version, data->endpoint_count,
+ data->endpoint_data);
+ if (ret)
+ goto err_mem_exit;
+
+ /* Result is a non-zero mask of endpoints that support filtering */
+ ret = ipa_endpoint_init(ipa, data->endpoint_count, data->endpoint_data);
+ if (ret)
+ goto err_gsi_exit;
+
+ ret = ipa_table_init(ipa);
+ if (ret)
+ goto err_endpoint_exit;
+
+ ret = ipa_smp2p_init(ipa, loader == IPA_LOADER_MODEM);
+ if (ret)
+ goto err_table_exit;
+
+ /* Power needs to be active for config and setup */
+ ret = pm_runtime_get_sync(dev);
+ if (WARN_ON(ret < 0))
+ goto err_power_put;
+
+ ret = ipa_config(ipa, data);
+ if (ret)
+ goto err_power_put;
+
+ dev_info(dev, "IPA driver initialized");
+
+ /* If the modem is loading GSI firmware, it will trigger a call to
+ * ipa_setup() when it has finished. In that case we're done here.
+ */
+ if (loader == IPA_LOADER_MODEM)
+ goto done;
+
+ if (loader == IPA_LOADER_SELF) {
+ /* The AP is loading GSI firmware; do so now */
+ ret = ipa_firmware_load(dev);
+ if (ret)
+ goto err_deconfig;
+ } /* Otherwise loader == IPA_LOADER_SKIP */
+
+ /* GSI firmware is loaded; proceed to setup */
+ ret = ipa_setup(ipa);
+ if (ret)
+ goto err_deconfig;
+done:
+ pm_runtime_mark_last_busy(dev);
+ (void)pm_runtime_put_autosuspend(dev);
+
+ return 0;
+
+err_deconfig:
+ ipa_deconfig(ipa);
+err_power_put:
+ pm_runtime_put_noidle(dev);
+ ipa_smp2p_exit(ipa);
+err_table_exit:
+ ipa_table_exit(ipa);
+err_endpoint_exit:
+ ipa_endpoint_exit(ipa);
+err_gsi_exit:
+ gsi_exit(&ipa->gsi);
+err_mem_exit:
+ ipa_mem_exit(ipa);
+err_reg_exit:
+ ipa_reg_exit(ipa);
+err_kfree_ipa:
+ kfree(ipa);
+err_power_exit:
+ ipa_power_exit(power);
+
+ return ret;
+}
+
+static int ipa_remove(struct platform_device *pdev)
+{
+ struct ipa *ipa = dev_get_drvdata(&pdev->dev);
+ struct ipa_power *power = ipa->power;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ /* Prevent the modem from triggering a call to ipa_setup(). This
+ * also ensures a modem-initiated setup that's underway completes.
+ */
+ ipa_smp2p_irq_disable_setup(ipa);
+
+ ret = pm_runtime_get_sync(dev);
+ if (WARN_ON(ret < 0))
+ goto out_power_put;
+
+ if (ipa->setup_complete) {
+ ret = ipa_modem_stop(ipa);
+ /* If starting or stopping is in progress, try once more */
+ if (ret == -EBUSY) {
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
+ ret = ipa_modem_stop(ipa);
+ }
+ if (ret)
+ return ret;
+
+ ipa_teardown(ipa);
+ }
+
+ ipa_deconfig(ipa);
+out_power_put:
+ pm_runtime_put_noidle(dev);
+ ipa_smp2p_exit(ipa);
+ ipa_table_exit(ipa);
+ ipa_endpoint_exit(ipa);
+ gsi_exit(&ipa->gsi);
+ ipa_mem_exit(ipa);
+ ipa_reg_exit(ipa);
+ kfree(ipa);
+ ipa_power_exit(power);
+
+ dev_info(dev, "IPA driver removed");
+
+ return 0;
+}
+
+static void ipa_shutdown(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = ipa_remove(pdev);
+ if (ret)
+ dev_err(&pdev->dev, "shutdown: remove returned %d\n", ret);
+}
+
+static const struct attribute_group *ipa_attribute_groups[] = {
+ &ipa_attribute_group,
+ &ipa_feature_attribute_group,
+ &ipa_endpoint_id_attribute_group,
+ &ipa_modem_attribute_group,
+ NULL,
+};
+
+static struct platform_driver ipa_driver = {
+ .probe = ipa_probe,
+ .remove = ipa_remove,
+ .shutdown = ipa_shutdown,
+ .driver = {
+ .name = "ipa",
+ .pm = &ipa_pm_ops,
+ .of_match_table = ipa_match,
+ .dev_groups = ipa_attribute_groups,
+ },
+};
+
+module_platform_driver(ipa_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm IP Accelerator device driver");
diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
new file mode 100644
index 0000000000..db6ada2343
--- /dev/null
+++ b/drivers/net/ipa/ipa_mem.c
@@ -0,0 +1,684 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2023 Linaro Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/bitfield.h>
+#include <linux/bug.h>
+#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
+#include <linux/io.h>
+#include <linux/soc/qcom/smem.h>
+
+#include "ipa.h"
+#include "ipa_reg.h"
+#include "ipa_data.h"
+#include "ipa_cmd.h"
+#include "ipa_mem.h"
+#include "ipa_table.h"
+#include "gsi_trans.h"
+
+/* "Canary" value placed between memory regions to detect overflow */
+#define IPA_MEM_CANARY_VAL cpu_to_le32(0xdeadbeef)
+
+/* SMEM host id representing the modem. */
+#define QCOM_SMEM_HOST_MODEM 1
+
+const struct ipa_mem *ipa_mem_find(struct ipa *ipa, enum ipa_mem_id mem_id)
+{
+ u32 i;
+
+ for (i = 0; i < ipa->mem_count; i++) {
+ const struct ipa_mem *mem = &ipa->mem[i];
+
+ if (mem->id == mem_id)
+ return mem;
+ }
+
+ return NULL;
+}
+
+/* Add an immediate command to a transaction that zeroes a memory region */
+static void
+ipa_mem_zero_region_add(struct gsi_trans *trans, enum ipa_mem_id mem_id)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id);
+ dma_addr_t addr = ipa->zero_addr;
+
+ if (!mem->size)
+ return;
+
+ ipa_cmd_dma_shared_mem_add(trans, mem->offset, mem->size, addr, true);
+}
+
+/**
+ * ipa_mem_setup() - Set up IPA AP and modem shared memory areas
+ * @ipa: IPA pointer
+ *
+ * Set up the shared memory regions in IPA local memory. This involves
+ * zero-filling memory regions, and in the case of header memory, telling
+ * the IPA where it's located.
+ *
+ * This function performs the initial setup of this memory. If the modem
+ * crashes, its regions are re-zeroed in ipa_mem_zero_modem().
+ *
+ * The AP informs the modem where its portions of memory are located
+ * in a QMI exchange that occurs at modem startup.
+ *
+ * There is no need for a matching ipa_mem_teardown() function.
+ *
+ * Return: 0 if successful, or a negative error code
+ */
+int ipa_mem_setup(struct ipa *ipa)
+{
+ dma_addr_t addr = ipa->zero_addr;
+ const struct reg *reg;
+ const struct ipa_mem *mem;
+ struct gsi_trans *trans;
+ u32 offset;
+ u16 size;
+ u32 val;
+
+ /* Get a transaction to define the header memory region and to zero
+ * the processing context and modem memory regions.
+ */
+ trans = ipa_cmd_trans_alloc(ipa, 4);
+ if (!trans) {
+ dev_err(&ipa->pdev->dev, "no transaction for memory setup\n");
+ return -EBUSY;
+ }
+
+ /* Initialize IPA-local header memory. The AP header region, if
+ * present, is contiguous with and follows the modem header region,
+ * and they are initialized together.
+ */
+ mem = ipa_mem_find(ipa, IPA_MEM_MODEM_HEADER);
+ offset = mem->offset;
+ size = mem->size;
+ mem = ipa_mem_find(ipa, IPA_MEM_AP_HEADER);
+ if (mem)
+ size += mem->size;
+
+ ipa_cmd_hdr_init_local_add(trans, offset, size, addr);
+
+ ipa_mem_zero_region_add(trans, IPA_MEM_MODEM_PROC_CTX);
+ ipa_mem_zero_region_add(trans, IPA_MEM_AP_PROC_CTX);
+ ipa_mem_zero_region_add(trans, IPA_MEM_MODEM);
+
+ gsi_trans_commit_wait(trans);
+
+ /* Tell the hardware where the processing context area is located */
+ mem = ipa_mem_find(ipa, IPA_MEM_MODEM_PROC_CTX);
+ offset = ipa->mem_offset + mem->offset;
+
+ reg = ipa_reg(ipa, LOCAL_PKT_PROC_CNTXT);
+ val = reg_encode(reg, IPA_BASE_ADDR, offset);
+ iowrite32(val, ipa->reg_virt + reg_offset(reg));
+
+ return 0;
+}
+
+/* Is the given memory region ID is valid for the current IPA version? */
+static bool ipa_mem_id_valid(struct ipa *ipa, enum ipa_mem_id mem_id)
+{
+ enum ipa_version version = ipa->version;
+
+ switch (mem_id) {
+ case IPA_MEM_UC_SHARED:
+ case IPA_MEM_UC_INFO:
+ case IPA_MEM_V4_FILTER_HASHED:
+ case IPA_MEM_V4_FILTER:
+ case IPA_MEM_V6_FILTER_HASHED:
+ case IPA_MEM_V6_FILTER:
+ case IPA_MEM_V4_ROUTE_HASHED:
+ case IPA_MEM_V4_ROUTE:
+ case IPA_MEM_V6_ROUTE_HASHED:
+ case IPA_MEM_V6_ROUTE:
+ case IPA_MEM_MODEM_HEADER:
+ case IPA_MEM_AP_HEADER:
+ case IPA_MEM_MODEM_PROC_CTX:
+ case IPA_MEM_AP_PROC_CTX:
+ case IPA_MEM_MODEM:
+ case IPA_MEM_UC_EVENT_RING:
+ case IPA_MEM_PDN_CONFIG:
+ case IPA_MEM_STATS_QUOTA_MODEM:
+ case IPA_MEM_STATS_QUOTA_AP:
+ case IPA_MEM_END_MARKER: /* pseudo region */
+ break;
+
+ case IPA_MEM_STATS_TETHERING:
+ case IPA_MEM_STATS_DROP:
+ if (version < IPA_VERSION_4_0)
+ return false;
+ break;
+
+ case IPA_MEM_STATS_V4_FILTER:
+ case IPA_MEM_STATS_V6_FILTER:
+ case IPA_MEM_STATS_V4_ROUTE:
+ case IPA_MEM_STATS_V6_ROUTE:
+ if (version < IPA_VERSION_4_0 || version > IPA_VERSION_4_2)
+ return false;
+ break;
+
+ case IPA_MEM_AP_V4_FILTER:
+ case IPA_MEM_AP_V6_FILTER:
+ if (version != IPA_VERSION_5_0)
+ return false;
+ break;
+
+ case IPA_MEM_NAT_TABLE:
+ case IPA_MEM_STATS_FILTER_ROUTE:
+ if (version < IPA_VERSION_4_5)
+ return false;
+ break;
+
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+/* Must the given memory region be present in the configuration? */
+static bool ipa_mem_id_required(struct ipa *ipa, enum ipa_mem_id mem_id)
+{
+ switch (mem_id) {
+ case IPA_MEM_UC_SHARED:
+ case IPA_MEM_UC_INFO:
+ case IPA_MEM_V4_FILTER_HASHED:
+ case IPA_MEM_V4_FILTER:
+ case IPA_MEM_V6_FILTER_HASHED:
+ case IPA_MEM_V6_FILTER:
+ case IPA_MEM_V4_ROUTE_HASHED:
+ case IPA_MEM_V4_ROUTE:
+ case IPA_MEM_V6_ROUTE_HASHED:
+ case IPA_MEM_V6_ROUTE:
+ case IPA_MEM_MODEM_HEADER:
+ case IPA_MEM_MODEM_PROC_CTX:
+ case IPA_MEM_AP_PROC_CTX:
+ case IPA_MEM_MODEM:
+ return true;
+
+ case IPA_MEM_PDN_CONFIG:
+ case IPA_MEM_STATS_QUOTA_MODEM:
+ return ipa->version >= IPA_VERSION_4_0;
+
+ case IPA_MEM_STATS_TETHERING:
+ return ipa->version >= IPA_VERSION_4_0 &&
+ ipa->version != IPA_VERSION_5_0;
+
+ default:
+ return false; /* Anything else is optional */
+ }
+}
+
+static bool ipa_mem_valid_one(struct ipa *ipa, const struct ipa_mem *mem)
+{
+ struct device *dev = &ipa->pdev->dev;
+ enum ipa_mem_id mem_id = mem->id;
+ u16 size_multiple;
+
+ /* Make sure the memory region is valid for this version of IPA */
+ if (!ipa_mem_id_valid(ipa, mem_id)) {
+ dev_err(dev, "region id %u not valid\n", mem_id);
+ return false;
+ }
+
+ if (!mem->size && !mem->canary_count) {
+ dev_err(dev, "empty memory region %u\n", mem_id);
+ return false;
+ }
+
+ /* Other than modem memory, sizes must be a multiple of 8 */
+ size_multiple = mem_id == IPA_MEM_MODEM ? 4 : 8;
+ if (mem->size % size_multiple)
+ dev_err(dev, "region %u size not a multiple of %u bytes\n",
+ mem_id, size_multiple);
+ else if (mem->offset % 8)
+ dev_err(dev, "region %u offset not 8-byte aligned\n", mem_id);
+ else if (mem->offset < mem->canary_count * sizeof(__le32))
+ dev_err(dev, "region %u offset too small for %hu canaries\n",
+ mem_id, mem->canary_count);
+ else if (mem_id == IPA_MEM_END_MARKER && mem->size)
+ dev_err(dev, "non-zero end marker region size\n");
+ else
+ return true;
+
+ return false;
+}
+
+/* Verify each defined memory region is valid. */
+static bool ipa_mem_valid(struct ipa *ipa, const struct ipa_mem_data *mem_data)
+{
+ DECLARE_BITMAP(regions, IPA_MEM_COUNT) = { };
+ struct device *dev = &ipa->pdev->dev;
+ enum ipa_mem_id mem_id;
+ u32 i;
+
+ if (mem_data->local_count > IPA_MEM_COUNT) {
+ dev_err(dev, "too many memory regions (%u > %u)\n",
+ mem_data->local_count, IPA_MEM_COUNT);
+ return false;
+ }
+
+ for (i = 0; i < mem_data->local_count; i++) {
+ const struct ipa_mem *mem = &mem_data->local[i];
+
+ if (__test_and_set_bit(mem->id, regions)) {
+ dev_err(dev, "duplicate memory region %u\n", mem->id);
+ return false;
+ }
+
+ /* Defined regions have non-zero size and/or canary count */
+ if (!ipa_mem_valid_one(ipa, mem))
+ return false;
+ }
+
+ /* Now see if any required regions are not defined */
+ for_each_clear_bit(mem_id, regions, IPA_MEM_COUNT) {
+ if (ipa_mem_id_required(ipa, mem_id))
+ dev_err(dev, "required memory region %u missing\n",
+ mem_id);
+ }
+
+ return true;
+}
+
+/* Do all memory regions fit within the IPA local memory? */
+static bool ipa_mem_size_valid(struct ipa *ipa)
+{
+ struct device *dev = &ipa->pdev->dev;
+ u32 limit = ipa->mem_size;
+ u32 i;
+
+ for (i = 0; i < ipa->mem_count; i++) {
+ const struct ipa_mem *mem = &ipa->mem[i];
+
+ if (mem->offset + mem->size <= limit)
+ continue;
+
+ dev_err(dev, "region %u ends beyond memory limit (0x%08x)\n",
+ mem->id, limit);
+
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * ipa_mem_config() - Configure IPA shared memory
+ * @ipa: IPA pointer
+ *
+ * Return: 0 if successful, or a negative error code
+ */
+int ipa_mem_config(struct ipa *ipa)
+{
+ struct device *dev = &ipa->pdev->dev;
+ const struct ipa_mem *mem;
+ const struct reg *reg;
+ dma_addr_t addr;
+ u32 mem_size;
+ void *virt;
+ u32 val;
+ u32 i;
+
+ /* Check the advertised location and size of the shared memory area */
+ reg = ipa_reg(ipa, SHARED_MEM_SIZE);
+ val = ioread32(ipa->reg_virt + reg_offset(reg));
+
+ /* The fields in the register are in 8 byte units */
+ ipa->mem_offset = 8 * reg_decode(reg, MEM_BADDR, val);
+
+ /* Make sure the end is within the region's mapped space */
+ mem_size = 8 * reg_decode(reg, MEM_SIZE, val);
+
+ /* If the sizes don't match, issue a warning */
+ if (ipa->mem_offset + mem_size < ipa->mem_size) {
+ dev_warn(dev, "limiting IPA memory size to 0x%08x\n",
+ mem_size);
+ ipa->mem_size = mem_size;
+ } else if (ipa->mem_offset + mem_size > ipa->mem_size) {
+ dev_dbg(dev, "ignoring larger reported memory size: 0x%08x\n",
+ mem_size);
+ }
+
+ /* We know our memory size; make sure regions are all in range */
+ if (!ipa_mem_size_valid(ipa))
+ return -EINVAL;
+
+ /* Prealloc DMA memory for zeroing regions */
+ virt = dma_alloc_coherent(dev, IPA_MEM_MAX, &addr, GFP_KERNEL);
+ if (!virt)
+ return -ENOMEM;
+ ipa->zero_addr = addr;
+ ipa->zero_virt = virt;
+ ipa->zero_size = IPA_MEM_MAX;
+
+ /* For each defined region, write "canary" values in the
+ * space prior to the region's base address if indicated.
+ */
+ for (i = 0; i < ipa->mem_count; i++) {
+ u16 canary_count = ipa->mem[i].canary_count;
+ __le32 *canary;
+
+ if (!canary_count)
+ continue;
+
+ /* Write canary values in the space before the region */
+ canary = ipa->mem_virt + ipa->mem_offset + ipa->mem[i].offset;
+ do
+ *--canary = IPA_MEM_CANARY_VAL;
+ while (--canary_count);
+ }
+
+ /* Verify the microcontroller ring alignment (if defined) */
+ mem = ipa_mem_find(ipa, IPA_MEM_UC_EVENT_RING);
+ if (mem && mem->offset % 1024) {
+ dev_err(dev, "microcontroller ring not 1024-byte aligned\n");
+ goto err_dma_free;
+ }
+
+ return 0;
+
+err_dma_free:
+ dma_free_coherent(dev, IPA_MEM_MAX, ipa->zero_virt, ipa->zero_addr);
+
+ return -EINVAL;
+}
+
+/* Inverse of ipa_mem_config() */
+void ipa_mem_deconfig(struct ipa *ipa)
+{
+ struct device *dev = &ipa->pdev->dev;
+
+ dma_free_coherent(dev, ipa->zero_size, ipa->zero_virt, ipa->zero_addr);
+ ipa->zero_size = 0;
+ ipa->zero_virt = NULL;
+ ipa->zero_addr = 0;
+}
+
+/**
+ * ipa_mem_zero_modem() - Zero IPA-local memory regions owned by the modem
+ * @ipa: IPA pointer
+ *
+ * Zero regions of IPA-local memory used by the modem. These are configured
+ * (and initially zeroed) by ipa_mem_setup(), but if the modem crashes and
+ * restarts via SSR we need to re-initialize them. A QMI message tells the
+ * modem where to find regions of IPA local memory it needs to know about
+ * (these included).
+ */
+int ipa_mem_zero_modem(struct ipa *ipa)
+{
+ struct gsi_trans *trans;
+
+ /* Get a transaction to zero the modem memory, modem header,
+ * and modem processing context regions.
+ */
+ trans = ipa_cmd_trans_alloc(ipa, 3);
+ if (!trans) {
+ dev_err(&ipa->pdev->dev,
+ "no transaction to zero modem memory\n");
+ return -EBUSY;
+ }
+
+ ipa_mem_zero_region_add(trans, IPA_MEM_MODEM_HEADER);
+ ipa_mem_zero_region_add(trans, IPA_MEM_MODEM_PROC_CTX);
+ ipa_mem_zero_region_add(trans, IPA_MEM_MODEM);
+
+ gsi_trans_commit_wait(trans);
+
+ return 0;
+}
+
+/**
+ * ipa_imem_init() - Initialize IMEM memory used by the IPA
+ * @ipa: IPA pointer
+ * @addr: Physical address of the IPA region in IMEM
+ * @size: Size (bytes) of the IPA region in IMEM
+ *
+ * IMEM is a block of shared memory separate from system DRAM, and
+ * a portion of this memory is available for the IPA to use. The
+ * modem accesses this memory directly, but the IPA accesses it
+ * via the IOMMU, using the AP's credentials.
+ *
+ * If this region exists (size > 0) we map it for read/write access
+ * through the IOMMU using the IPA device.
+ *
+ * Note: @addr and @size are not guaranteed to be page-aligned.
+ */
+static int ipa_imem_init(struct ipa *ipa, unsigned long addr, size_t size)
+{
+ struct device *dev = &ipa->pdev->dev;
+ struct iommu_domain *domain;
+ unsigned long iova;
+ phys_addr_t phys;
+ int ret;
+
+ if (!size)
+ return 0; /* IMEM memory not used */
+
+ domain = iommu_get_domain_for_dev(dev);
+ if (!domain) {
+ dev_err(dev, "no IOMMU domain found for IMEM\n");
+ return -EINVAL;
+ }
+
+ /* Align the address down and the size up to page boundaries */
+ phys = addr & PAGE_MASK;
+ size = PAGE_ALIGN(size + addr - phys);
+ iova = phys; /* We just want a direct mapping */
+
+ ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE,
+ GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ ipa->imem_iova = iova;
+ ipa->imem_size = size;
+
+ return 0;
+}
+
+static void ipa_imem_exit(struct ipa *ipa)
+{
+ struct iommu_domain *domain;
+ struct device *dev;
+
+ if (!ipa->imem_size)
+ return;
+
+ dev = &ipa->pdev->dev;
+ domain = iommu_get_domain_for_dev(dev);
+ if (domain) {
+ size_t size;
+
+ size = iommu_unmap(domain, ipa->imem_iova, ipa->imem_size);
+ if (size != ipa->imem_size)
+ dev_warn(dev, "unmapped %zu IMEM bytes, expected %zu\n",
+ size, ipa->imem_size);
+ } else {
+ dev_err(dev, "couldn't get IPA IOMMU domain for IMEM\n");
+ }
+
+ ipa->imem_size = 0;
+ ipa->imem_iova = 0;
+}
+
+/**
+ * ipa_smem_init() - Initialize SMEM memory used by the IPA
+ * @ipa: IPA pointer
+ * @item: Item ID of SMEM memory
+ * @size: Size (bytes) of SMEM memory region
+ *
+ * SMEM is a managed block of shared DRAM, from which numbered "items"
+ * can be allocated. One item is designated for use by the IPA.
+ *
+ * The modem accesses SMEM memory directly, but the IPA accesses it
+ * via the IOMMU, using the AP's credentials.
+ *
+ * If size provided is non-zero, we allocate it and map it for
+ * access through the IOMMU.
+ *
+ * Note: @size and the item address are is not guaranteed to be page-aligned.
+ */
+static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size)
+{
+ struct device *dev = &ipa->pdev->dev;
+ struct iommu_domain *domain;
+ unsigned long iova;
+ phys_addr_t phys;
+ phys_addr_t addr;
+ size_t actual;
+ void *virt;
+ int ret;
+
+ if (!size)
+ return 0; /* SMEM memory not used */
+
+ /* SMEM is memory shared between the AP and another system entity
+ * (in this case, the modem). An allocation from SMEM is persistent
+ * until the AP reboots; there is no way to free an allocated SMEM
+ * region. Allocation only reserves the space; to use it you need
+ * to "get" a pointer it (this does not imply reference counting).
+ * The item might have already been allocated, in which case we
+ * use it unless the size isn't what we expect.
+ */
+ ret = qcom_smem_alloc(QCOM_SMEM_HOST_MODEM, item, size);
+ if (ret && ret != -EEXIST) {
+ dev_err(dev, "error %d allocating size %zu SMEM item %u\n",
+ ret, size, item);
+ return ret;
+ }
+
+ /* Now get the address of the SMEM memory region */
+ virt = qcom_smem_get(QCOM_SMEM_HOST_MODEM, item, &actual);
+ if (IS_ERR(virt)) {
+ ret = PTR_ERR(virt);
+ dev_err(dev, "error %d getting SMEM item %u\n", ret, item);
+ return ret;
+ }
+
+ /* In case the region was already allocated, verify the size */
+ if (ret && actual != size) {
+ dev_err(dev, "SMEM item %u has size %zu, expected %zu\n",
+ item, actual, size);
+ return -EINVAL;
+ }
+
+ domain = iommu_get_domain_for_dev(dev);
+ if (!domain) {
+ dev_err(dev, "no IOMMU domain found for SMEM\n");
+ return -EINVAL;
+ }
+
+ /* Align the address down and the size up to a page boundary */
+ addr = qcom_smem_virt_to_phys(virt);
+ phys = addr & PAGE_MASK;
+ size = PAGE_ALIGN(size + addr - phys);
+ iova = phys; /* We just want a direct mapping */
+
+ ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE,
+ GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ ipa->smem_iova = iova;
+ ipa->smem_size = size;
+
+ return 0;
+}
+
+static void ipa_smem_exit(struct ipa *ipa)
+{
+ struct device *dev = &ipa->pdev->dev;
+ struct iommu_domain *domain;
+
+ domain = iommu_get_domain_for_dev(dev);
+ if (domain) {
+ size_t size;
+
+ size = iommu_unmap(domain, ipa->smem_iova, ipa->smem_size);
+ if (size != ipa->smem_size)
+ dev_warn(dev, "unmapped %zu SMEM bytes, expected %zu\n",
+ size, ipa->smem_size);
+
+ } else {
+ dev_err(dev, "couldn't get IPA IOMMU domain for SMEM\n");
+ }
+
+ ipa->smem_size = 0;
+ ipa->smem_iova = 0;
+}
+
+/* Perform memory region-related initialization */
+int ipa_mem_init(struct ipa *ipa, const struct ipa_mem_data *mem_data)
+{
+ struct device *dev = &ipa->pdev->dev;
+ struct resource *res;
+ int ret;
+
+ /* Make sure the set of defined memory regions is valid */
+ if (!ipa_mem_valid(ipa, mem_data))
+ return -EINVAL;
+
+ ipa->mem_count = mem_data->local_count;
+ ipa->mem = mem_data->local;
+
+ /* Check the route and filter table memory regions */
+ if (!ipa_table_mem_valid(ipa, false))
+ return -EINVAL;
+ if (!ipa_table_mem_valid(ipa, true))
+ return -EINVAL;
+
+ ret = dma_set_mask_and_coherent(&ipa->pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ dev_err(dev, "error %d setting DMA mask\n", ret);
+ return ret;
+ }
+
+ res = platform_get_resource_byname(ipa->pdev, IORESOURCE_MEM,
+ "ipa-shared");
+ if (!res) {
+ dev_err(dev,
+ "DT error getting \"ipa-shared\" memory property\n");
+ return -ENODEV;
+ }
+
+ ipa->mem_virt = memremap(res->start, resource_size(res), MEMREMAP_WC);
+ if (!ipa->mem_virt) {
+ dev_err(dev, "unable to remap \"ipa-shared\" memory\n");
+ return -ENOMEM;
+ }
+
+ ipa->mem_addr = res->start;
+ ipa->mem_size = resource_size(res);
+
+ ret = ipa_imem_init(ipa, mem_data->imem_addr, mem_data->imem_size);
+ if (ret)
+ goto err_unmap;
+
+ ret = ipa_smem_init(ipa, mem_data->smem_id, mem_data->smem_size);
+ if (ret)
+ goto err_imem_exit;
+
+ return 0;
+
+err_imem_exit:
+ ipa_imem_exit(ipa);
+err_unmap:
+ memunmap(ipa->mem_virt);
+
+ return ret;
+}
+
+/* Inverse of ipa_mem_init() */
+void ipa_mem_exit(struct ipa *ipa)
+{
+ ipa_smem_exit(ipa);
+ ipa_imem_exit(ipa);
+ memunmap(ipa->mem_virt);
+}
diff --git a/drivers/net/ipa/ipa_mem.h b/drivers/net/ipa/ipa_mem.h
new file mode 100644
index 0000000000..868e9c20e8
--- /dev/null
+++ b/drivers/net/ipa/ipa_mem.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2023 Linaro Ltd.
+ */
+#ifndef _IPA_MEM_H_
+#define _IPA_MEM_H_
+
+struct ipa;
+struct ipa_mem_data;
+
+/**
+ * DOC: IPA Local Memory
+ *
+ * The IPA has a block of shared memory, divided into regions used for
+ * specific purposes.
+ *
+ * The regions within the shared block are bounded by an offset (relative to
+ * the "ipa-shared" memory range) and size found in the IPA_SHARED_MEM_SIZE
+ * register.
+ *
+ * Each region is optionally preceded by one or more 32-bit "canary" values.
+ * These are meant to detect out-of-range writes (if they become corrupted).
+ * A given region (such as a filter or routing table) has the same number
+ * of canaries for all IPA hardware versions. Still, the number used is
+ * defined in the config data, allowing for generic handling of regions.
+ *
+ * The set of memory regions is defined in configuration data. They are
+ * subject to these constraints:
+ * - a zero offset and zero size represents and undefined region
+ * - a region's size does not include space for its "canary" values
+ * - a region's offset is defined to be *past* all "canary" values
+ * - offset must be large enough to account for all canaries
+ * - a region's size may be zero, but may still have canaries
+ * - all offsets must be 8-byte aligned
+ * - most sizes must be a multiple of 8
+ * - modem memory size must be a multiple of 4
+ * - the microcontroller ring offset must be a multiple of 1024
+ */
+
+/* The maximum allowed size for any memory region */
+#define IPA_MEM_MAX (2 * PAGE_SIZE)
+
+/* IPA-resident memory region ids */
+enum ipa_mem_id {
+ IPA_MEM_UC_SHARED, /* 0 canaries */
+ IPA_MEM_UC_INFO, /* 0 canaries */
+ IPA_MEM_V4_FILTER_HASHED, /* 2 canaries */
+ IPA_MEM_V4_FILTER, /* 2 canaries */
+ IPA_MEM_V6_FILTER_HASHED, /* 2 canaries */
+ IPA_MEM_V6_FILTER, /* 2 canaries */
+ IPA_MEM_V4_ROUTE_HASHED, /* 2 canaries */
+ IPA_MEM_V4_ROUTE, /* 2 canaries */
+ IPA_MEM_V6_ROUTE_HASHED, /* 2 canaries */
+ IPA_MEM_V6_ROUTE, /* 2 canaries */
+ IPA_MEM_MODEM_HEADER, /* 2 canaries */
+ IPA_MEM_AP_HEADER, /* 0 canaries, optional */
+ IPA_MEM_MODEM_PROC_CTX, /* 2 canaries */
+ IPA_MEM_AP_PROC_CTX, /* 0 canaries */
+ IPA_MEM_MODEM, /* 0/2 canaries */
+ IPA_MEM_UC_EVENT_RING, /* 1 canary, optional */
+ IPA_MEM_PDN_CONFIG, /* 0/2 canaries (IPA v4.0+) */
+ IPA_MEM_STATS_QUOTA_MODEM, /* 2/4 canaries (IPA v4.0+) */
+ IPA_MEM_STATS_QUOTA_AP, /* 0 canaries, optional (IPA v4.0+) */
+ IPA_MEM_STATS_TETHERING, /* 0 canaries, optional (IPA v4.0+) */
+ IPA_MEM_STATS_DROP, /* 0 canaries, optional (IPA v4.0+) */
+ /* The next 7 filter and route statistics regions are optional */
+ IPA_MEM_STATS_V4_FILTER, /* 0 canaries (IPA v4.0-v4.2) */
+ IPA_MEM_STATS_V6_FILTER, /* 0 canaries (IPA v4.0-v4.2) */
+ IPA_MEM_STATS_V4_ROUTE, /* 0 canaries (IPA v4.0-v4.2) */
+ IPA_MEM_STATS_V6_ROUTE, /* 0 canaries (IPA v4.0-v4.2) */
+ IPA_MEM_AP_V4_FILTER, /* 2 canaries (IPA v5.0) */
+ IPA_MEM_AP_V6_FILTER, /* 0 canaries (IPA v5.0) */
+ IPA_MEM_STATS_FILTER_ROUTE, /* 0 canaries (IPA v4.5+) */
+ IPA_MEM_NAT_TABLE, /* 4 canaries, optional (IPA v4.5+) */
+ IPA_MEM_END_MARKER, /* 1 canary (not a real region) */
+ IPA_MEM_COUNT, /* Number of regions (not an index) */
+};
+
+/**
+ * struct ipa_mem - IPA local memory region description
+ * @id: memory region identifier
+ * @offset: offset in IPA memory space to base of the region
+ * @size: size in bytes base of the region
+ * @canary_count: Number of 32-bit "canary" values that precede region
+ */
+struct ipa_mem {
+ enum ipa_mem_id id;
+ u32 offset;
+ u16 size;
+ u16 canary_count;
+};
+
+const struct ipa_mem *ipa_mem_find(struct ipa *ipa, enum ipa_mem_id mem_id);
+
+int ipa_mem_config(struct ipa *ipa);
+void ipa_mem_deconfig(struct ipa *ipa);
+
+int ipa_mem_setup(struct ipa *ipa); /* No ipa_mem_teardown() needed */
+
+int ipa_mem_zero_modem(struct ipa *ipa);
+
+int ipa_mem_init(struct ipa *ipa, const struct ipa_mem_data *mem_data);
+void ipa_mem_exit(struct ipa *ipa);
+
+#endif /* _IPA_MEM_H_ */
diff --git a/drivers/net/ipa/ipa_modem.c b/drivers/net/ipa/ipa_modem.c
new file mode 100644
index 0000000000..423422a2a4
--- /dev/null
+++ b/drivers/net/ipa/ipa_modem.c
@@ -0,0 +1,478 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2022 Linaro Ltd.
+ */
+
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_rmnet.h>
+#include <linux/etherdevice.h>
+#include <net/pkt_sched.h>
+#include <linux/pm_runtime.h>
+#include <linux/remoteproc/qcom_rproc.h>
+
+#include "ipa.h"
+#include "ipa_data.h"
+#include "ipa_endpoint.h"
+#include "ipa_table.h"
+#include "ipa_mem.h"
+#include "ipa_modem.h"
+#include "ipa_smp2p.h"
+#include "ipa_qmi.h"
+#include "ipa_uc.h"
+#include "ipa_power.h"
+
+#define IPA_NETDEV_NAME "rmnet_ipa%d"
+#define IPA_NETDEV_TAILROOM 0 /* for padding by mux layer */
+#define IPA_NETDEV_TIMEOUT 10 /* seconds */
+
+enum ipa_modem_state {
+ IPA_MODEM_STATE_STOPPED = 0,
+ IPA_MODEM_STATE_STARTING,
+ IPA_MODEM_STATE_RUNNING,
+ IPA_MODEM_STATE_STOPPING,
+};
+
+/**
+ * struct ipa_priv - IPA network device private data
+ * @ipa: IPA pointer
+ * @work: Work structure used to wake the modem netdev TX queue
+ */
+struct ipa_priv {
+ struct ipa *ipa;
+ struct work_struct work;
+};
+
+/** ipa_open() - Opens the modem network interface */
+static int ipa_open(struct net_device *netdev)
+{
+ struct ipa_priv *priv = netdev_priv(netdev);
+ struct ipa *ipa = priv->ipa;
+ struct device *dev;
+ int ret;
+
+ dev = &ipa->pdev->dev;
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ goto err_power_put;
+
+ ret = ipa_endpoint_enable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
+ if (ret)
+ goto err_power_put;
+
+ ret = ipa_endpoint_enable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
+ if (ret)
+ goto err_disable_tx;
+
+ netif_start_queue(netdev);
+
+ pm_runtime_mark_last_busy(dev);
+ (void)pm_runtime_put_autosuspend(dev);
+
+ return 0;
+
+err_disable_tx:
+ ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
+err_power_put:
+ pm_runtime_put_noidle(dev);
+
+ return ret;
+}
+
+/** ipa_stop() - Stops the modem network interface. */
+static int ipa_stop(struct net_device *netdev)
+{
+ struct ipa_priv *priv = netdev_priv(netdev);
+ struct ipa *ipa = priv->ipa;
+ struct device *dev;
+ int ret;
+
+ dev = &ipa->pdev->dev;
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ goto out_power_put;
+
+ netif_stop_queue(netdev);
+
+ ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
+ ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
+out_power_put:
+ pm_runtime_mark_last_busy(dev);
+ (void)pm_runtime_put_autosuspend(dev);
+
+ return 0;
+}
+
+/** ipa_start_xmit() - Transmits an skb.
+ * @skb: skb to be transmitted
+ * @dev: network device
+ *
+ * Return codes:
+ * NETDEV_TX_OK: Success
+ * NETDEV_TX_BUSY: Error while transmitting the skb. Try again later
+ */
+static netdev_tx_t
+ipa_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct net_device_stats *stats = &netdev->stats;
+ struct ipa_priv *priv = netdev_priv(netdev);
+ struct ipa_endpoint *endpoint;
+ struct ipa *ipa = priv->ipa;
+ u32 skb_len = skb->len;
+ struct device *dev;
+ int ret;
+
+ if (!skb_len)
+ goto err_drop_skb;
+
+ endpoint = ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX];
+ if (endpoint->config.qmap && skb->protocol != htons(ETH_P_MAP))
+ goto err_drop_skb;
+
+ /* The hardware must be powered for us to transmit */
+ dev = &ipa->pdev->dev;
+ ret = pm_runtime_get(dev);
+ if (ret < 1) {
+ /* If a resume won't happen, just drop the packet */
+ if (ret < 0 && ret != -EINPROGRESS) {
+ ipa_power_modem_queue_active(ipa);
+ pm_runtime_put_noidle(dev);
+ goto err_drop_skb;
+ }
+
+ /* No power (yet). Stop the network stack from transmitting
+ * until we're resumed; ipa_modem_resume() arranges for the
+ * TX queue to be started again.
+ */
+ ipa_power_modem_queue_stop(ipa);
+
+ pm_runtime_put_noidle(dev);
+
+ return NETDEV_TX_BUSY;
+ }
+
+ ipa_power_modem_queue_active(ipa);
+
+ ret = ipa_endpoint_skb_tx(endpoint, skb);
+
+ pm_runtime_mark_last_busy(dev);
+ (void)pm_runtime_put_autosuspend(dev);
+
+ if (ret) {
+ if (ret != -E2BIG)
+ return NETDEV_TX_BUSY;
+ goto err_drop_skb;
+ }
+
+ stats->tx_packets++;
+ stats->tx_bytes += skb_len;
+
+ return NETDEV_TX_OK;
+
+err_drop_skb:
+ dev_kfree_skb_any(skb);
+ stats->tx_dropped++;
+
+ return NETDEV_TX_OK;
+}
+
+void ipa_modem_skb_rx(struct net_device *netdev, struct sk_buff *skb)
+{
+ struct net_device_stats *stats = &netdev->stats;
+
+ if (skb) {
+ skb->dev = netdev;
+ skb->protocol = htons(ETH_P_MAP);
+ stats->rx_packets++;
+ stats->rx_bytes += skb->len;
+
+ (void)netif_receive_skb(skb);
+ } else {
+ stats->rx_dropped++;
+ }
+}
+
+static const struct net_device_ops ipa_modem_ops = {
+ .ndo_open = ipa_open,
+ .ndo_stop = ipa_stop,
+ .ndo_start_xmit = ipa_start_xmit,
+};
+
+/** ipa_modem_netdev_setup() - netdev setup function for the modem */
+static void ipa_modem_netdev_setup(struct net_device *netdev)
+{
+ netdev->netdev_ops = &ipa_modem_ops;
+
+ netdev->header_ops = NULL;
+ netdev->type = ARPHRD_RAWIP;
+ netdev->hard_header_len = 0;
+ netdev->min_header_len = ETH_HLEN;
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = IPA_MTU;
+ netdev->mtu = netdev->max_mtu;
+ netdev->addr_len = 0;
+ netdev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
+ netdev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
+ netdev->priv_flags |= IFF_TX_SKB_SHARING;
+ eth_broadcast_addr(netdev->broadcast);
+
+ /* The endpoint is configured for QMAP */
+ netdev->needed_headroom = sizeof(struct rmnet_map_header);
+ netdev->needed_tailroom = IPA_NETDEV_TAILROOM;
+ netdev->watchdog_timeo = IPA_NETDEV_TIMEOUT * HZ;
+ netdev->hw_features = NETIF_F_SG;
+}
+
+/** ipa_modem_suspend() - suspend callback
+ * @netdev: Network device
+ *
+ * Suspend the modem's endpoints.
+ */
+void ipa_modem_suspend(struct net_device *netdev)
+{
+ struct ipa_priv *priv = netdev_priv(netdev);
+ struct ipa *ipa = priv->ipa;
+
+ if (!(netdev->flags & IFF_UP))
+ return;
+
+ ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
+ ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
+}
+
+/**
+ * ipa_modem_wake_queue_work() - enable modem netdev queue
+ * @work: Work structure
+ *
+ * Re-enable transmit on the modem network device. This is called
+ * in (power management) work queue context, scheduled when resuming
+ * the modem. We can't enable the queue directly in ipa_modem_resume()
+ * because transmits restart the instant the queue is awakened; but the
+ * device power state won't be ACTIVE until *after* ipa_modem_resume()
+ * returns.
+ */
+static void ipa_modem_wake_queue_work(struct work_struct *work)
+{
+ struct ipa_priv *priv = container_of(work, struct ipa_priv, work);
+
+ ipa_power_modem_queue_wake(priv->ipa);
+}
+
+/** ipa_modem_resume() - resume callback for runtime_pm
+ * @dev: pointer to device
+ *
+ * Resume the modem's endpoints.
+ */
+void ipa_modem_resume(struct net_device *netdev)
+{
+ struct ipa_priv *priv = netdev_priv(netdev);
+ struct ipa *ipa = priv->ipa;
+
+ if (!(netdev->flags & IFF_UP))
+ return;
+
+ ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
+ ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
+
+ /* Arrange for the TX queue to be restarted */
+ (void)queue_pm_work(&priv->work);
+}
+
+int ipa_modem_start(struct ipa *ipa)
+{
+ enum ipa_modem_state state;
+ struct net_device *netdev;
+ struct ipa_priv *priv;
+ int ret;
+
+ /* Only attempt to start the modem if it's stopped */
+ state = atomic_cmpxchg(&ipa->modem_state, IPA_MODEM_STATE_STOPPED,
+ IPA_MODEM_STATE_STARTING);
+
+ /* Silently ignore attempts when running, or when changing state */
+ if (state != IPA_MODEM_STATE_STOPPED)
+ return 0;
+
+ netdev = alloc_netdev(sizeof(struct ipa_priv), IPA_NETDEV_NAME,
+ NET_NAME_UNKNOWN, ipa_modem_netdev_setup);
+ if (!netdev) {
+ ret = -ENOMEM;
+ goto out_set_state;
+ }
+
+ SET_NETDEV_DEV(netdev, &ipa->pdev->dev);
+ priv = netdev_priv(netdev);
+ priv->ipa = ipa;
+ INIT_WORK(&priv->work, ipa_modem_wake_queue_work);
+ ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = netdev;
+ ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = netdev;
+ ipa->modem_netdev = netdev;
+
+ ret = register_netdev(netdev);
+ if (ret) {
+ ipa->modem_netdev = NULL;
+ ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = NULL;
+ ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = NULL;
+ free_netdev(netdev);
+ }
+
+out_set_state:
+ if (ret)
+ atomic_set(&ipa->modem_state, IPA_MODEM_STATE_STOPPED);
+ else
+ atomic_set(&ipa->modem_state, IPA_MODEM_STATE_RUNNING);
+ smp_mb__after_atomic();
+
+ return ret;
+}
+
+int ipa_modem_stop(struct ipa *ipa)
+{
+ struct net_device *netdev = ipa->modem_netdev;
+ enum ipa_modem_state state;
+
+ /* Only attempt to stop the modem if it's running */
+ state = atomic_cmpxchg(&ipa->modem_state, IPA_MODEM_STATE_RUNNING,
+ IPA_MODEM_STATE_STOPPING);
+
+ /* Silently ignore attempts when already stopped */
+ if (state == IPA_MODEM_STATE_STOPPED)
+ return 0;
+
+ /* If we're somewhere between stopped and starting, we're busy */
+ if (state != IPA_MODEM_STATE_RUNNING)
+ return -EBUSY;
+
+ /* Clean up the netdev and endpoints if it was started */
+ if (netdev) {
+ struct ipa_priv *priv = netdev_priv(netdev);
+
+ cancel_work_sync(&priv->work);
+ /* If it was opened, stop it first */
+ if (netdev->flags & IFF_UP)
+ (void)ipa_stop(netdev);
+ unregister_netdev(netdev);
+ ipa->modem_netdev = NULL;
+ ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = NULL;
+ ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = NULL;
+ free_netdev(netdev);
+ }
+
+ atomic_set(&ipa->modem_state, IPA_MODEM_STATE_STOPPED);
+ smp_mb__after_atomic();
+
+ return 0;
+}
+
+/* Treat a "clean" modem stop the same as a crash */
+static void ipa_modem_crashed(struct ipa *ipa)
+{
+ struct device *dev = &ipa->pdev->dev;
+ int ret;
+
+ /* Prevent the modem from triggering a call to ipa_setup() */
+ ipa_smp2p_irq_disable_setup(ipa);
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "error %d getting power to handle crash\n", ret);
+ goto out_power_put;
+ }
+
+ ipa_endpoint_modem_pause_all(ipa, true);
+
+ ipa_endpoint_modem_hol_block_clear_all(ipa);
+
+ ipa_table_reset(ipa, true);
+
+ ret = ipa_table_hash_flush(ipa);
+ if (ret)
+ dev_err(dev, "error %d flushing hash caches\n", ret);
+
+ ret = ipa_endpoint_modem_exception_reset_all(ipa);
+ if (ret)
+ dev_err(dev, "error %d resetting exception endpoint\n", ret);
+
+ ipa_endpoint_modem_pause_all(ipa, false);
+
+ ret = ipa_modem_stop(ipa);
+ if (ret)
+ dev_err(dev, "error %d stopping modem\n", ret);
+
+ /* Now prepare for the next modem boot */
+ ret = ipa_mem_zero_modem(ipa);
+ if (ret)
+ dev_err(dev, "error %d zeroing modem memory regions\n", ret);
+
+out_power_put:
+ pm_runtime_mark_last_busy(dev);
+ (void)pm_runtime_put_autosuspend(dev);
+}
+
+static int ipa_modem_notify(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ struct ipa *ipa = container_of(nb, struct ipa, nb);
+ struct qcom_ssr_notify_data *notify_data = data;
+ struct device *dev = &ipa->pdev->dev;
+
+ switch (action) {
+ case QCOM_SSR_BEFORE_POWERUP:
+ dev_info(dev, "received modem starting event\n");
+ ipa_uc_power(ipa);
+ ipa_smp2p_notify_reset(ipa);
+ break;
+
+ case QCOM_SSR_AFTER_POWERUP:
+ dev_info(dev, "received modem running event\n");
+ break;
+
+ case QCOM_SSR_BEFORE_SHUTDOWN:
+ dev_info(dev, "received modem %s event\n",
+ notify_data->crashed ? "crashed" : "stopping");
+ if (ipa->setup_complete)
+ ipa_modem_crashed(ipa);
+ break;
+
+ case QCOM_SSR_AFTER_SHUTDOWN:
+ dev_info(dev, "received modem offline event\n");
+ break;
+
+ default:
+ dev_err(dev, "received unrecognized event %lu\n", action);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+int ipa_modem_config(struct ipa *ipa)
+{
+ void *notifier;
+
+ ipa->nb.notifier_call = ipa_modem_notify;
+
+ notifier = qcom_register_ssr_notifier("mpss", &ipa->nb);
+ if (IS_ERR(notifier))
+ return PTR_ERR(notifier);
+
+ ipa->notifier = notifier;
+
+ return 0;
+}
+
+void ipa_modem_deconfig(struct ipa *ipa)
+{
+ struct device *dev = &ipa->pdev->dev;
+ int ret;
+
+ ret = qcom_unregister_ssr_notifier(ipa->notifier, &ipa->nb);
+ if (ret)
+ dev_err(dev, "error %d unregistering notifier", ret);
+
+ ipa->notifier = NULL;
+ memset(&ipa->nb, 0, sizeof(ipa->nb));
+}
diff --git a/drivers/net/ipa/ipa_modem.h b/drivers/net/ipa/ipa_modem.h
new file mode 100644
index 0000000000..d85718db9a
--- /dev/null
+++ b/drivers/net/ipa/ipa_modem.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2022 Linaro Ltd.
+ */
+#ifndef _IPA_MODEM_H_
+#define _IPA_MODEM_H_
+
+struct ipa;
+struct net_device;
+struct sk_buff;
+
+int ipa_modem_start(struct ipa *ipa);
+int ipa_modem_stop(struct ipa *ipa);
+
+void ipa_modem_skb_rx(struct net_device *netdev, struct sk_buff *skb);
+
+void ipa_modem_suspend(struct net_device *netdev);
+void ipa_modem_resume(struct net_device *netdev);
+
+int ipa_modem_config(struct ipa *ipa);
+void ipa_modem_deconfig(struct ipa *ipa);
+
+#endif /* _IPA_MODEM_H_ */
diff --git a/drivers/net/ipa/ipa_power.c b/drivers/net/ipa/ipa_power.c
new file mode 100644
index 0000000000..0eaa7a7f33
--- /dev/null
+++ b/drivers/net/ipa/ipa_power.c
@@ -0,0 +1,434 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2022 Linaro Ltd.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/interconnect.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/bitops.h>
+
+#include "linux/soc/qcom/qcom_aoss.h"
+
+#include "ipa.h"
+#include "ipa_power.h"
+#include "ipa_endpoint.h"
+#include "ipa_modem.h"
+#include "ipa_data.h"
+
+/**
+ * DOC: IPA Power Management
+ *
+ * The IPA hardware is enabled when the IPA core clock and all the
+ * interconnects (buses) it depends on are enabled. Runtime power
+ * management is used to determine whether the core clock and
+ * interconnects are enabled, and if not in use to be suspended
+ * automatically.
+ *
+ * The core clock currently runs at a fixed clock rate when enabled,
+ * an all interconnects use a fixed average and peak bandwidth.
+ */
+
+#define IPA_AUTOSUSPEND_DELAY 500 /* milliseconds */
+
+/**
+ * enum ipa_power_flag - IPA power flags
+ * @IPA_POWER_FLAG_RESUMED: Whether resume from suspend has been signaled
+ * @IPA_POWER_FLAG_SYSTEM: Hardware is system (not runtime) suspended
+ * @IPA_POWER_FLAG_STOPPED: Modem TX is disabled by ipa_start_xmit()
+ * @IPA_POWER_FLAG_STARTED: Modem TX was enabled by ipa_runtime_resume()
+ * @IPA_POWER_FLAG_COUNT: Number of defined power flags
+ */
+enum ipa_power_flag {
+ IPA_POWER_FLAG_RESUMED,
+ IPA_POWER_FLAG_SYSTEM,
+ IPA_POWER_FLAG_STOPPED,
+ IPA_POWER_FLAG_STARTED,
+ IPA_POWER_FLAG_COUNT, /* Last; not a flag */
+};
+
+/**
+ * struct ipa_power - IPA power management information
+ * @dev: IPA device pointer
+ * @core: IPA core clock
+ * @qmp: QMP handle for AOSS communication
+ * @spinlock: Protects modem TX queue enable/disable
+ * @flags: Boolean state flags
+ * @interconnect_count: Number of elements in interconnect[]
+ * @interconnect: Interconnect array
+ */
+struct ipa_power {
+ struct device *dev;
+ struct clk *core;
+ struct qmp *qmp;
+ spinlock_t spinlock; /* used with STOPPED/STARTED power flags */
+ DECLARE_BITMAP(flags, IPA_POWER_FLAG_COUNT);
+ u32 interconnect_count;
+ struct icc_bulk_data interconnect[];
+};
+
+/* Initialize interconnects required for IPA operation */
+static int ipa_interconnect_init(struct ipa_power *power,
+ const struct ipa_interconnect_data *data)
+{
+ struct icc_bulk_data *interconnect;
+ int ret;
+ u32 i;
+
+ /* Initialize our interconnect data array for bulk operations */
+ interconnect = &power->interconnect[0];
+ for (i = 0; i < power->interconnect_count; i++) {
+ /* interconnect->path is filled in by of_icc_bulk_get() */
+ interconnect->name = data->name;
+ interconnect->avg_bw = data->average_bandwidth;
+ interconnect->peak_bw = data->peak_bandwidth;
+ data++;
+ interconnect++;
+ }
+
+ ret = of_icc_bulk_get(power->dev, power->interconnect_count,
+ power->interconnect);
+ if (ret)
+ return ret;
+
+ /* All interconnects are initially disabled */
+ icc_bulk_disable(power->interconnect_count, power->interconnect);
+
+ /* Set the bandwidth values to be used when enabled */
+ ret = icc_bulk_set_bw(power->interconnect_count, power->interconnect);
+ if (ret)
+ icc_bulk_put(power->interconnect_count, power->interconnect);
+
+ return ret;
+}
+
+/* Inverse of ipa_interconnect_init() */
+static void ipa_interconnect_exit(struct ipa_power *power)
+{
+ icc_bulk_put(power->interconnect_count, power->interconnect);
+}
+
+/* Enable IPA power, enabling interconnects and the core clock */
+static int ipa_power_enable(struct ipa *ipa)
+{
+ struct ipa_power *power = ipa->power;
+ int ret;
+
+ ret = icc_bulk_enable(power->interconnect_count, power->interconnect);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(power->core);
+ if (ret) {
+ dev_err(power->dev, "error %d enabling core clock\n", ret);
+ icc_bulk_disable(power->interconnect_count,
+ power->interconnect);
+ }
+
+ return ret;
+}
+
+/* Inverse of ipa_power_enable() */
+static void ipa_power_disable(struct ipa *ipa)
+{
+ struct ipa_power *power = ipa->power;
+
+ clk_disable_unprepare(power->core);
+
+ icc_bulk_disable(power->interconnect_count, power->interconnect);
+}
+
+static int ipa_runtime_suspend(struct device *dev)
+{
+ struct ipa *ipa = dev_get_drvdata(dev);
+
+ /* Endpoints aren't usable until setup is complete */
+ if (ipa->setup_complete) {
+ __clear_bit(IPA_POWER_FLAG_RESUMED, ipa->power->flags);
+ ipa_endpoint_suspend(ipa);
+ gsi_suspend(&ipa->gsi);
+ }
+
+ ipa_power_disable(ipa);
+
+ return 0;
+}
+
+static int ipa_runtime_resume(struct device *dev)
+{
+ struct ipa *ipa = dev_get_drvdata(dev);
+ int ret;
+
+ ret = ipa_power_enable(ipa);
+ if (WARN_ON(ret < 0))
+ return ret;
+
+ /* Endpoints aren't usable until setup is complete */
+ if (ipa->setup_complete) {
+ gsi_resume(&ipa->gsi);
+ ipa_endpoint_resume(ipa);
+ }
+
+ return 0;
+}
+
+static int ipa_suspend(struct device *dev)
+{
+ struct ipa *ipa = dev_get_drvdata(dev);
+
+ __set_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags);
+
+ /* Increment the disable depth to ensure that the IRQ won't
+ * be re-enabled until the matching _enable call in
+ * ipa_resume(). We do this to ensure that the interrupt
+ * handler won't run whilst PM runtime is disabled.
+ *
+ * Note that disabling the IRQ is NOT the same as disabling
+ * irq wake. If wakeup is enabled for the IPA then the IRQ
+ * will still cause the system to wake up, see irq_set_irq_wake().
+ */
+ ipa_interrupt_irq_disable(ipa);
+
+ return pm_runtime_force_suspend(dev);
+}
+
+static int ipa_resume(struct device *dev)
+{
+ struct ipa *ipa = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pm_runtime_force_resume(dev);
+
+ __clear_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags);
+
+ /* Now that PM runtime is enabled again it's safe
+ * to turn the IRQ back on and process any data
+ * that was received during suspend.
+ */
+ ipa_interrupt_irq_enable(ipa);
+
+ return ret;
+}
+
+/* Return the current IPA core clock rate */
+u32 ipa_core_clock_rate(struct ipa *ipa)
+{
+ return ipa->power ? (u32)clk_get_rate(ipa->power->core) : 0;
+}
+
+void ipa_power_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id)
+{
+ /* To handle an IPA interrupt we will have resumed the hardware
+ * just to handle the interrupt, so we're done. If we are in a
+ * system suspend, trigger a system resume.
+ */
+ if (!__test_and_set_bit(IPA_POWER_FLAG_RESUMED, ipa->power->flags))
+ if (test_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags))
+ pm_wakeup_dev_event(&ipa->pdev->dev, 0, true);
+
+ /* Acknowledge/clear the suspend interrupt on all endpoints */
+ ipa_interrupt_suspend_clear_all(ipa->interrupt);
+}
+
+/* The next few functions coordinate stopping and starting the modem
+ * network device transmit queue.
+ *
+ * Transmit can be running concurrent with power resume, and there's a
+ * chance the resume completes before the transmit path stops the queue,
+ * leaving the queue in a stopped state. The next two functions are used
+ * to avoid this: ipa_power_modem_queue_stop() is used by ipa_start_xmit()
+ * to conditionally stop the TX queue; and ipa_power_modem_queue_start()
+ * is used by ipa_runtime_resume() to conditionally restart it.
+ *
+ * Two flags and a spinlock are used. If the queue is stopped, the STOPPED
+ * power flag is set. And if the queue is started, the STARTED flag is set.
+ * The queue is only started on resume if the STOPPED flag is set. And the
+ * queue is only started in ipa_start_xmit() if the STARTED flag is *not*
+ * set. As a result, the queue remains operational if the two activites
+ * happen concurrently regardless of the order they complete. The spinlock
+ * ensures the flag and TX queue operations are done atomically.
+ *
+ * The first function stops the modem netdev transmit queue, but only if
+ * the STARTED flag is *not* set. That flag is cleared if it was set.
+ * If the queue is stopped, the STOPPED flag is set. This is called only
+ * from the power ->runtime_resume operation.
+ */
+void ipa_power_modem_queue_stop(struct ipa *ipa)
+{
+ struct ipa_power *power = ipa->power;
+ unsigned long flags;
+
+ spin_lock_irqsave(&power->spinlock, flags);
+
+ if (!__test_and_clear_bit(IPA_POWER_FLAG_STARTED, power->flags)) {
+ netif_stop_queue(ipa->modem_netdev);
+ __set_bit(IPA_POWER_FLAG_STOPPED, power->flags);
+ }
+
+ spin_unlock_irqrestore(&power->spinlock, flags);
+}
+
+/* This function starts the modem netdev transmit queue, but only if the
+ * STOPPED flag is set. That flag is cleared if it was set. If the queue
+ * was restarted, the STARTED flag is set; this allows ipa_start_xmit()
+ * to skip stopping the queue in the event of a race.
+ */
+void ipa_power_modem_queue_wake(struct ipa *ipa)
+{
+ struct ipa_power *power = ipa->power;
+ unsigned long flags;
+
+ spin_lock_irqsave(&power->spinlock, flags);
+
+ if (__test_and_clear_bit(IPA_POWER_FLAG_STOPPED, power->flags)) {
+ __set_bit(IPA_POWER_FLAG_STARTED, power->flags);
+ netif_wake_queue(ipa->modem_netdev);
+ }
+
+ spin_unlock_irqrestore(&power->spinlock, flags);
+}
+
+/* This function clears the STARTED flag once the TX queue is operating */
+void ipa_power_modem_queue_active(struct ipa *ipa)
+{
+ clear_bit(IPA_POWER_FLAG_STARTED, ipa->power->flags);
+}
+
+static int ipa_power_retention_init(struct ipa_power *power)
+{
+ struct qmp *qmp = qmp_get(power->dev);
+
+ if (IS_ERR(qmp)) {
+ if (PTR_ERR(qmp) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ /* We assume any other error means it's not defined/needed */
+ qmp = NULL;
+ }
+ power->qmp = qmp;
+
+ return 0;
+}
+
+static void ipa_power_retention_exit(struct ipa_power *power)
+{
+ qmp_put(power->qmp);
+ power->qmp = NULL;
+}
+
+/* Control register retention on power collapse */
+void ipa_power_retention(struct ipa *ipa, bool enable)
+{
+ static const char fmt[] = "{ class: bcm, res: ipa_pc, val: %c }";
+ struct ipa_power *power = ipa->power;
+ int ret;
+
+ if (!power->qmp)
+ return; /* Not needed on this platform */
+
+ ret = qmp_send(power->qmp, fmt, enable ? '1' : '0');
+ if (ret)
+ dev_err(power->dev, "error %d sending QMP %sable request\n",
+ ret, enable ? "en" : "dis");
+}
+
+int ipa_power_setup(struct ipa *ipa)
+{
+ int ret;
+
+ ipa_interrupt_enable(ipa, IPA_IRQ_TX_SUSPEND);
+
+ ret = device_init_wakeup(&ipa->pdev->dev, true);
+ if (ret)
+ ipa_interrupt_disable(ipa, IPA_IRQ_TX_SUSPEND);
+
+ return ret;
+}
+
+void ipa_power_teardown(struct ipa *ipa)
+{
+ (void)device_init_wakeup(&ipa->pdev->dev, false);
+ ipa_interrupt_disable(ipa, IPA_IRQ_TX_SUSPEND);
+}
+
+/* Initialize IPA power management */
+struct ipa_power *
+ipa_power_init(struct device *dev, const struct ipa_power_data *data)
+{
+ struct ipa_power *power;
+ struct clk *clk;
+ size_t size;
+ int ret;
+
+ clk = clk_get(dev, "core");
+ if (IS_ERR(clk)) {
+ dev_err_probe(dev, PTR_ERR(clk), "error getting core clock\n");
+
+ return ERR_CAST(clk);
+ }
+
+ ret = clk_set_rate(clk, data->core_clock_rate);
+ if (ret) {
+ dev_err(dev, "error %d setting core clock rate to %u\n",
+ ret, data->core_clock_rate);
+ goto err_clk_put;
+ }
+
+ size = struct_size(power, interconnect, data->interconnect_count);
+ power = kzalloc(size, GFP_KERNEL);
+ if (!power) {
+ ret = -ENOMEM;
+ goto err_clk_put;
+ }
+ power->dev = dev;
+ power->core = clk;
+ spin_lock_init(&power->spinlock);
+ power->interconnect_count = data->interconnect_count;
+
+ ret = ipa_interconnect_init(power, data->interconnect_data);
+ if (ret)
+ goto err_kfree;
+
+ ret = ipa_power_retention_init(power);
+ if (ret)
+ goto err_interconnect_exit;
+
+ pm_runtime_set_autosuspend_delay(dev, IPA_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_enable(dev);
+
+ return power;
+
+err_interconnect_exit:
+ ipa_interconnect_exit(power);
+err_kfree:
+ kfree(power);
+err_clk_put:
+ clk_put(clk);
+
+ return ERR_PTR(ret);
+}
+
+/* Inverse of ipa_power_init() */
+void ipa_power_exit(struct ipa_power *power)
+{
+ struct device *dev = power->dev;
+ struct clk *clk = power->core;
+
+ pm_runtime_disable(dev);
+ pm_runtime_dont_use_autosuspend(dev);
+ ipa_power_retention_exit(power);
+ ipa_interconnect_exit(power);
+ kfree(power);
+ clk_put(clk);
+}
+
+const struct dev_pm_ops ipa_pm_ops = {
+ .suspend = ipa_suspend,
+ .resume = ipa_resume,
+ .runtime_suspend = ipa_runtime_suspend,
+ .runtime_resume = ipa_runtime_resume,
+};
diff --git a/drivers/net/ipa/ipa_power.h b/drivers/net/ipa/ipa_power.h
new file mode 100644
index 0000000000..3a4c59ea12
--- /dev/null
+++ b/drivers/net/ipa/ipa_power.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2022 Linaro Ltd.
+ */
+#ifndef _IPA_POWER_H_
+#define _IPA_POWER_H_
+
+struct device;
+
+struct ipa;
+struct ipa_power_data;
+enum ipa_irq_id;
+
+/* IPA device power management function block */
+extern const struct dev_pm_ops ipa_pm_ops;
+
+/**
+ * ipa_core_clock_rate() - Return the current IPA core clock rate
+ * @ipa: IPA structure
+ *
+ * Return: The current clock rate (in Hz), or 0.
+ */
+u32 ipa_core_clock_rate(struct ipa *ipa);
+
+/**
+ * ipa_power_modem_queue_stop() - Possibly stop the modem netdev TX queue
+ * @ipa: IPA pointer
+ */
+void ipa_power_modem_queue_stop(struct ipa *ipa);
+
+/**
+ * ipa_power_modem_queue_wake() - Possibly wake the modem netdev TX queue
+ * @ipa: IPA pointer
+ */
+void ipa_power_modem_queue_wake(struct ipa *ipa);
+
+/**
+ * ipa_power_modem_queue_active() - Report modem netdev TX queue active
+ * @ipa: IPA pointer
+ */
+void ipa_power_modem_queue_active(struct ipa *ipa);
+
+/**
+ * ipa_power_retention() - Control register retention on power collapse
+ * @ipa: IPA pointer
+ * @enable: Whether retention should be enabled or disabled
+ */
+void ipa_power_retention(struct ipa *ipa, bool enable);
+
+/**
+ * ipa_power_suspend_handler() - Handler for SUSPEND IPA interrupts
+ * @ipa: IPA pointer
+ * @irq_id: IPA interrupt ID (unused)
+ *
+ * If an RX endpoint is suspended, and the IPA has a packet destined for
+ * that endpoint, the IPA generates a SUSPEND interrupt to inform the AP
+ * that it should resume the endpoint.
+ */
+void ipa_power_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id);
+
+/**
+ * ipa_power_setup() - Set up IPA power management
+ * @ipa: IPA pointer
+ *
+ * Return: 0 if successful, or a negative error code
+ */
+int ipa_power_setup(struct ipa *ipa);
+
+/**
+ * ipa_power_teardown() - Inverse of ipa_power_setup()
+ * @ipa: IPA pointer
+ */
+void ipa_power_teardown(struct ipa *ipa);
+
+/**
+ * ipa_power_init() - Initialize IPA power management
+ * @dev: IPA device
+ * @data: Clock configuration data
+ *
+ * Return: A pointer to an ipa_power structure, or a pointer-coded error
+ */
+struct ipa_power *ipa_power_init(struct device *dev,
+ const struct ipa_power_data *data);
+
+/**
+ * ipa_power_exit() - Inverse of ipa_power_init()
+ * @power: IPA power pointer
+ */
+void ipa_power_exit(struct ipa_power *power);
+
+#endif /* _IPA_POWER_H_ */
diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c
new file mode 100644
index 0000000000..f70f0a1d1c
--- /dev/null
+++ b/drivers/net/ipa/ipa_qmi.c
@@ -0,0 +1,537 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2022 Linaro Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/qrtr.h>
+#include <linux/soc/qcom/qmi.h>
+
+#include "ipa.h"
+#include "ipa_endpoint.h"
+#include "ipa_mem.h"
+#include "ipa_table.h"
+#include "ipa_modem.h"
+#include "ipa_qmi_msg.h"
+
+/**
+ * DOC: AP/Modem QMI Handshake
+ *
+ * The AP and modem perform a "handshake" at initialization time to ensure
+ * both sides know when everything is ready to begin operating. The AP
+ * driver (this code) uses two QMI handles (endpoints) for this; a client
+ * using a service on the modem, and server to service modem requests (and
+ * to supply an indication message from the AP). Once the handshake is
+ * complete, the AP and modem may begin IPA operation. This occurs
+ * only when the AP IPA driver, modem IPA driver, and IPA microcontroller
+ * are ready.
+ *
+ * The QMI service on the modem expects to receive an INIT_DRIVER request from
+ * the AP, which contains parameters used by the modem during initialization.
+ * The AP sends this request as soon as it is knows the modem side service
+ * is available. The modem responds to this request, and if this response
+ * contains a success result, the AP knows the modem IPA driver is ready.
+ *
+ * The modem is responsible for loading firmware on the IPA microcontroller.
+ * This occurs only during the initial modem boot. The modem sends a
+ * separate DRIVER_INIT_COMPLETE request to the AP to report that the
+ * microcontroller is ready. The AP may assume the microcontroller is
+ * ready and remain so (even if the modem reboots) once it has received
+ * and responded to this request.
+ *
+ * There is one final exchange involved in the handshake. It is required
+ * on the initial modem boot, but optional (but in practice does occur) on
+ * subsequent boots. The modem expects to receive a final INIT_COMPLETE
+ * indication message from the AP when it is about to begin its normal
+ * operation. The AP will only send this message after it has received
+ * and responded to an INDICATION_REGISTER request from the modem.
+ *
+ * So in summary:
+ * - Whenever the AP learns the modem has booted and its IPA QMI service
+ * is available, it sends an INIT_DRIVER request to the modem. The
+ * modem supplies a success response when it is ready to operate.
+ * - On the initial boot, the modem sets up the IPA microcontroller, and
+ * sends a DRIVER_INIT_COMPLETE request to the AP when this is done.
+ * - When the modem is ready to receive an INIT_COMPLETE indication from
+ * the AP, it sends an INDICATION_REGISTER request to the AP.
+ * - On the initial modem boot, everything is ready when:
+ * - AP has received a success response from its INIT_DRIVER request
+ * - AP has responded to a DRIVER_INIT_COMPLETE request
+ * - AP has responded to an INDICATION_REGISTER request from the modem
+ * - AP has sent an INIT_COMPLETE indication to the modem
+ * - On subsequent modem boots, everything is ready when:
+ * - AP has received a success response from its INIT_DRIVER request
+ * - AP has responded to a DRIVER_INIT_COMPLETE request
+ * - The INDICATION_REGISTER request and INIT_COMPLETE indication are
+ * optional for non-initial modem boots, and have no bearing on the
+ * determination of when things are "ready"
+ */
+
+#define IPA_HOST_SERVICE_SVC_ID 0x31
+#define IPA_HOST_SVC_VERS 1
+#define IPA_HOST_SERVICE_INS_ID 1
+
+#define IPA_MODEM_SERVICE_SVC_ID 0x31
+#define IPA_MODEM_SERVICE_INS_ID 2
+#define IPA_MODEM_SVC_VERS 1
+
+#define QMI_INIT_DRIVER_TIMEOUT 60000 /* A minute in milliseconds */
+
+/* Send an INIT_COMPLETE indication message to the modem */
+static void ipa_server_init_complete(struct ipa_qmi *ipa_qmi)
+{
+ struct ipa *ipa = container_of(ipa_qmi, struct ipa, qmi);
+ struct qmi_handle *qmi = &ipa_qmi->server_handle;
+ struct sockaddr_qrtr *sq = &ipa_qmi->modem_sq;
+ struct ipa_init_complete_ind ind = { };
+ int ret;
+
+ ind.status.result = QMI_RESULT_SUCCESS_V01;
+ ind.status.error = QMI_ERR_NONE_V01;
+
+ ret = qmi_send_indication(qmi, sq, IPA_QMI_INIT_COMPLETE,
+ IPA_QMI_INIT_COMPLETE_IND_SZ,
+ ipa_init_complete_ind_ei, &ind);
+ if (ret)
+ dev_err(&ipa->pdev->dev,
+ "error %d sending init complete indication\n", ret);
+ else
+ ipa_qmi->indication_sent = true;
+}
+
+/* If requested (and not already sent) send the INIT_COMPLETE indication */
+static void ipa_qmi_indication(struct ipa_qmi *ipa_qmi)
+{
+ if (!ipa_qmi->indication_requested)
+ return;
+
+ if (ipa_qmi->indication_sent)
+ return;
+
+ ipa_server_init_complete(ipa_qmi);
+}
+
+/* Determine whether everything is ready to start normal operation.
+ * We know everything (else) is ready when we know the IPA driver on
+ * the modem is ready, and the microcontroller is ready.
+ *
+ * When the modem boots (or reboots), the handshake sequence starts
+ * with the AP sending the modem an INIT_DRIVER request. Within
+ * that request, the uc_loaded flag will be zero (false) for an
+ * initial boot, non-zero (true) for a subsequent (SSR) boot.
+ */
+static void ipa_qmi_ready(struct ipa_qmi *ipa_qmi)
+{
+ struct ipa *ipa;
+ int ret;
+
+ /* We aren't ready until the modem and microcontroller are */
+ if (!ipa_qmi->modem_ready || !ipa_qmi->uc_ready)
+ return;
+
+ /* Send the indication message if it was requested */
+ ipa_qmi_indication(ipa_qmi);
+
+ /* The initial boot requires us to send the indication. */
+ if (ipa_qmi->initial_boot) {
+ if (!ipa_qmi->indication_sent)
+ return;
+
+ /* The initial modem boot completed successfully */
+ ipa_qmi->initial_boot = false;
+ }
+
+ /* We're ready. Start up normal operation */
+ ipa = container_of(ipa_qmi, struct ipa, qmi);
+ ret = ipa_modem_start(ipa);
+ if (ret)
+ dev_err(&ipa->pdev->dev, "error %d starting modem\n", ret);
+}
+
+/* All QMI clients from the modem node are gone (modem shut down or crashed). */
+static void ipa_server_bye(struct qmi_handle *qmi, unsigned int node)
+{
+ struct ipa_qmi *ipa_qmi;
+
+ ipa_qmi = container_of(qmi, struct ipa_qmi, server_handle);
+
+ /* The modem client and server go away at the same time */
+ memset(&ipa_qmi->modem_sq, 0, sizeof(ipa_qmi->modem_sq));
+
+ /* initial_boot doesn't change when modem reboots */
+ /* uc_ready doesn't change when modem reboots */
+ ipa_qmi->modem_ready = false;
+ ipa_qmi->indication_requested = false;
+ ipa_qmi->indication_sent = false;
+}
+
+static const struct qmi_ops ipa_server_ops = {
+ .bye = ipa_server_bye,
+};
+
+/* Callback function to handle an INDICATION_REGISTER request message from the
+ * modem. This informs the AP that the modem is now ready to receive the
+ * INIT_COMPLETE indication message.
+ */
+static void ipa_server_indication_register(struct qmi_handle *qmi,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *decoded)
+{
+ struct ipa_indication_register_rsp rsp = { };
+ struct ipa_qmi *ipa_qmi;
+ struct ipa *ipa;
+ int ret;
+
+ ipa_qmi = container_of(qmi, struct ipa_qmi, server_handle);
+ ipa = container_of(ipa_qmi, struct ipa, qmi);
+
+ rsp.rsp.result = QMI_RESULT_SUCCESS_V01;
+ rsp.rsp.error = QMI_ERR_NONE_V01;
+
+ ret = qmi_send_response(qmi, sq, txn, IPA_QMI_INDICATION_REGISTER,
+ IPA_QMI_INDICATION_REGISTER_RSP_SZ,
+ ipa_indication_register_rsp_ei, &rsp);
+ if (!ret) {
+ ipa_qmi->indication_requested = true;
+ ipa_qmi_ready(ipa_qmi); /* We might be ready now */
+ } else {
+ dev_err(&ipa->pdev->dev,
+ "error %d sending register indication response\n", ret);
+ }
+}
+
+/* Respond to a DRIVER_INIT_COMPLETE request message from the modem. */
+static void ipa_server_driver_init_complete(struct qmi_handle *qmi,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *decoded)
+{
+ struct ipa_driver_init_complete_rsp rsp = { };
+ struct ipa_qmi *ipa_qmi;
+ struct ipa *ipa;
+ int ret;
+
+ ipa_qmi = container_of(qmi, struct ipa_qmi, server_handle);
+ ipa = container_of(ipa_qmi, struct ipa, qmi);
+
+ rsp.rsp.result = QMI_RESULT_SUCCESS_V01;
+ rsp.rsp.error = QMI_ERR_NONE_V01;
+
+ ret = qmi_send_response(qmi, sq, txn, IPA_QMI_DRIVER_INIT_COMPLETE,
+ IPA_QMI_DRIVER_INIT_COMPLETE_RSP_SZ,
+ ipa_driver_init_complete_rsp_ei, &rsp);
+ if (!ret) {
+ ipa_qmi->uc_ready = true;
+ ipa_qmi_ready(ipa_qmi); /* We might be ready now */
+ } else {
+ dev_err(&ipa->pdev->dev,
+ "error %d sending init complete response\n", ret);
+ }
+}
+
+/* The server handles two request message types sent by the modem. */
+static const struct qmi_msg_handler ipa_server_msg_handlers[] = {
+ {
+ .type = QMI_REQUEST,
+ .msg_id = IPA_QMI_INDICATION_REGISTER,
+ .ei = ipa_indication_register_req_ei,
+ .decoded_size = IPA_QMI_INDICATION_REGISTER_REQ_SZ,
+ .fn = ipa_server_indication_register,
+ },
+ {
+ .type = QMI_REQUEST,
+ .msg_id = IPA_QMI_DRIVER_INIT_COMPLETE,
+ .ei = ipa_driver_init_complete_req_ei,
+ .decoded_size = IPA_QMI_DRIVER_INIT_COMPLETE_REQ_SZ,
+ .fn = ipa_server_driver_init_complete,
+ },
+ { },
+};
+
+/* Handle an INIT_DRIVER response message from the modem. */
+static void ipa_client_init_driver(struct qmi_handle *qmi,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn, const void *decoded)
+{
+ txn->result = 0; /* IPA_QMI_INIT_DRIVER request was successful */
+ complete(&txn->completion);
+}
+
+/* The client handles one response message type sent by the modem. */
+static const struct qmi_msg_handler ipa_client_msg_handlers[] = {
+ {
+ .type = QMI_RESPONSE,
+ .msg_id = IPA_QMI_INIT_DRIVER,
+ .ei = ipa_init_modem_driver_rsp_ei,
+ .decoded_size = IPA_QMI_INIT_DRIVER_RSP_SZ,
+ .fn = ipa_client_init_driver,
+ },
+ { },
+};
+
+/* Return a pointer to an init modem driver request structure, which contains
+ * configuration parameters for the modem. The modem may be started multiple
+ * times, but generally these parameters don't change so we can reuse the
+ * request structure once it's initialized. The only exception is the
+ * skip_uc_load field, which will be set only after the microcontroller has
+ * reported it has completed its initialization.
+ */
+static const struct ipa_init_modem_driver_req *
+init_modem_driver_req(struct ipa_qmi *ipa_qmi)
+{
+ struct ipa *ipa = container_of(ipa_qmi, struct ipa, qmi);
+ u32 modem_route_count = ipa->modem_route_count;
+ static struct ipa_init_modem_driver_req req;
+ const struct ipa_mem *mem;
+
+ /* The microcontroller is initialized on the first boot */
+ req.skip_uc_load_valid = 1;
+ req.skip_uc_load = ipa->uc_loaded ? 1 : 0;
+
+ /* We only have to initialize most of it once */
+ if (req.platform_type_valid)
+ return &req;
+
+ req.platform_type_valid = 1;
+ req.platform_type = IPA_QMI_PLATFORM_TYPE_MSM_ANDROID;
+
+ mem = ipa_mem_find(ipa, IPA_MEM_MODEM_HEADER);
+ if (mem->size) {
+ req.hdr_tbl_info_valid = 1;
+ req.hdr_tbl_info.start = ipa->mem_offset + mem->offset;
+ req.hdr_tbl_info.end = req.hdr_tbl_info.start + mem->size - 1;
+ }
+
+ mem = ipa_mem_find(ipa, IPA_MEM_V4_ROUTE);
+ req.v4_route_tbl_info_valid = 1;
+ req.v4_route_tbl_info.start = ipa->mem_offset + mem->offset;
+ req.v4_route_tbl_info.end = modem_route_count - 1;
+
+ mem = ipa_mem_find(ipa, IPA_MEM_V6_ROUTE);
+ req.v6_route_tbl_info_valid = 1;
+ req.v6_route_tbl_info.start = ipa->mem_offset + mem->offset;
+ req.v6_route_tbl_info.end = modem_route_count - 1;
+
+ mem = ipa_mem_find(ipa, IPA_MEM_V4_FILTER);
+ req.v4_filter_tbl_start_valid = 1;
+ req.v4_filter_tbl_start = ipa->mem_offset + mem->offset;
+
+ mem = ipa_mem_find(ipa, IPA_MEM_V6_FILTER);
+ req.v6_filter_tbl_start_valid = 1;
+ req.v6_filter_tbl_start = ipa->mem_offset + mem->offset;
+
+ mem = ipa_mem_find(ipa, IPA_MEM_MODEM);
+ if (mem->size) {
+ req.modem_mem_info_valid = 1;
+ req.modem_mem_info.start = ipa->mem_offset + mem->offset;
+ req.modem_mem_info.size = mem->size;
+ }
+
+ req.ctrl_comm_dest_end_pt_valid = 1;
+ req.ctrl_comm_dest_end_pt =
+ ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->endpoint_id;
+
+ /* skip_uc_load_valid and skip_uc_load are set above */
+
+ mem = ipa_mem_find(ipa, IPA_MEM_MODEM_PROC_CTX);
+ if (mem->size) {
+ req.hdr_proc_ctx_tbl_info_valid = 1;
+ req.hdr_proc_ctx_tbl_info.start =
+ ipa->mem_offset + mem->offset;
+ req.hdr_proc_ctx_tbl_info.end =
+ req.hdr_proc_ctx_tbl_info.start + mem->size - 1;
+ }
+
+ /* Nothing to report for the compression table (zip_tbl_info) */
+
+ mem = ipa_mem_find(ipa, IPA_MEM_V4_ROUTE_HASHED);
+ if (mem->size) {
+ req.v4_hash_route_tbl_info_valid = 1;
+ req.v4_hash_route_tbl_info.start =
+ ipa->mem_offset + mem->offset;
+ req.v4_hash_route_tbl_info.end = modem_route_count - 1;
+ }
+
+ mem = ipa_mem_find(ipa, IPA_MEM_V6_ROUTE_HASHED);
+ if (mem->size) {
+ req.v6_hash_route_tbl_info_valid = 1;
+ req.v6_hash_route_tbl_info.start =
+ ipa->mem_offset + mem->offset;
+ req.v6_hash_route_tbl_info.end = modem_route_count - 1;
+ }
+
+ mem = ipa_mem_find(ipa, IPA_MEM_V4_FILTER_HASHED);
+ if (mem->size) {
+ req.v4_hash_filter_tbl_start_valid = 1;
+ req.v4_hash_filter_tbl_start = ipa->mem_offset + mem->offset;
+ }
+
+ mem = ipa_mem_find(ipa, IPA_MEM_V6_FILTER_HASHED);
+ if (mem->size) {
+ req.v6_hash_filter_tbl_start_valid = 1;
+ req.v6_hash_filter_tbl_start = ipa->mem_offset + mem->offset;
+ }
+
+ /* The stats fields are only valid for IPA v4.0+ */
+ if (ipa->version >= IPA_VERSION_4_0) {
+ mem = ipa_mem_find(ipa, IPA_MEM_STATS_QUOTA_MODEM);
+ if (mem->size) {
+ req.hw_stats_quota_base_addr_valid = 1;
+ req.hw_stats_quota_base_addr =
+ ipa->mem_offset + mem->offset;
+ req.hw_stats_quota_size_valid = 1;
+ req.hw_stats_quota_size = ipa->mem_offset + mem->size;
+ }
+
+ /* If the DROP stats region is defined, include it */
+ mem = ipa_mem_find(ipa, IPA_MEM_STATS_DROP);
+ if (mem && mem->size) {
+ req.hw_stats_drop_base_addr_valid = 1;
+ req.hw_stats_drop_base_addr =
+ ipa->mem_offset + mem->offset;
+ req.hw_stats_drop_size_valid = 1;
+ req.hw_stats_drop_size = ipa->mem_offset + mem->size;
+ }
+ }
+
+ return &req;
+}
+
+/* Send an INIT_DRIVER request to the modem, and wait for it to complete. */
+static void ipa_client_init_driver_work(struct work_struct *work)
+{
+ unsigned long timeout = msecs_to_jiffies(QMI_INIT_DRIVER_TIMEOUT);
+ const struct ipa_init_modem_driver_req *req;
+ struct ipa_qmi *ipa_qmi;
+ struct qmi_handle *qmi;
+ struct qmi_txn txn;
+ struct device *dev;
+ struct ipa *ipa;
+ int ret;
+
+ ipa_qmi = container_of(work, struct ipa_qmi, init_driver_work);
+ qmi = &ipa_qmi->client_handle;
+
+ ipa = container_of(ipa_qmi, struct ipa, qmi);
+ dev = &ipa->pdev->dev;
+
+ ret = qmi_txn_init(qmi, &txn, NULL, NULL);
+ if (ret < 0) {
+ dev_err(dev, "error %d preparing init driver request\n", ret);
+ return;
+ }
+
+ /* Send the request, and if successful wait for its response */
+ req = init_modem_driver_req(ipa_qmi);
+ ret = qmi_send_request(qmi, &ipa_qmi->modem_sq, &txn,
+ IPA_QMI_INIT_DRIVER, IPA_QMI_INIT_DRIVER_REQ_SZ,
+ ipa_init_modem_driver_req_ei, req);
+ if (ret)
+ dev_err(dev, "error %d sending init driver request\n", ret);
+ else if ((ret = qmi_txn_wait(&txn, timeout)))
+ dev_err(dev, "error %d awaiting init driver response\n", ret);
+
+ if (!ret) {
+ ipa_qmi->modem_ready = true;
+ ipa_qmi_ready(ipa_qmi); /* We might be ready now */
+ } else {
+ /* If any error occurs we need to cancel the transaction */
+ qmi_txn_cancel(&txn);
+ }
+}
+
+/* The modem server is now available. We will send an INIT_DRIVER request
+ * to the modem, but can't wait for it to complete in this callback thread.
+ * Schedule a worker on the global workqueue to do that for us.
+ */
+static int
+ipa_client_new_server(struct qmi_handle *qmi, struct qmi_service *svc)
+{
+ struct ipa_qmi *ipa_qmi;
+
+ ipa_qmi = container_of(qmi, struct ipa_qmi, client_handle);
+
+ ipa_qmi->modem_sq.sq_family = AF_QIPCRTR;
+ ipa_qmi->modem_sq.sq_node = svc->node;
+ ipa_qmi->modem_sq.sq_port = svc->port;
+
+ schedule_work(&ipa_qmi->init_driver_work);
+
+ return 0;
+}
+
+static const struct qmi_ops ipa_client_ops = {
+ .new_server = ipa_client_new_server,
+};
+
+/* Set up for QMI message exchange */
+int ipa_qmi_setup(struct ipa *ipa)
+{
+ struct ipa_qmi *ipa_qmi = &ipa->qmi;
+ int ret;
+
+ ipa_qmi->initial_boot = true;
+
+ /* The server handle is used to handle the DRIVER_INIT_COMPLETE
+ * request on the first modem boot. It also receives the
+ * INDICATION_REGISTER request on the first boot and (optionally)
+ * subsequent boots. The INIT_COMPLETE indication message is
+ * sent over the server handle if requested.
+ */
+ ret = qmi_handle_init(&ipa_qmi->server_handle,
+ IPA_QMI_SERVER_MAX_RCV_SZ, &ipa_server_ops,
+ ipa_server_msg_handlers);
+ if (ret)
+ return ret;
+
+ ret = qmi_add_server(&ipa_qmi->server_handle, IPA_HOST_SERVICE_SVC_ID,
+ IPA_HOST_SVC_VERS, IPA_HOST_SERVICE_INS_ID);
+ if (ret)
+ goto err_server_handle_release;
+
+ /* The client handle is only used for sending an INIT_DRIVER request
+ * to the modem, and receiving its response message.
+ */
+ ret = qmi_handle_init(&ipa_qmi->client_handle,
+ IPA_QMI_CLIENT_MAX_RCV_SZ, &ipa_client_ops,
+ ipa_client_msg_handlers);
+ if (ret)
+ goto err_server_handle_release;
+
+ /* We need this ready before the service lookup is added */
+ INIT_WORK(&ipa_qmi->init_driver_work, ipa_client_init_driver_work);
+
+ ret = qmi_add_lookup(&ipa_qmi->client_handle, IPA_MODEM_SERVICE_SVC_ID,
+ IPA_MODEM_SVC_VERS, IPA_MODEM_SERVICE_INS_ID);
+ if (ret)
+ goto err_client_handle_release;
+
+ return 0;
+
+err_client_handle_release:
+ /* Releasing the handle also removes registered lookups */
+ qmi_handle_release(&ipa_qmi->client_handle);
+ memset(&ipa_qmi->client_handle, 0, sizeof(ipa_qmi->client_handle));
+err_server_handle_release:
+ /* Releasing the handle also removes registered services */
+ qmi_handle_release(&ipa_qmi->server_handle);
+ memset(&ipa_qmi->server_handle, 0, sizeof(ipa_qmi->server_handle));
+
+ return ret;
+}
+
+/* Tear down IPA QMI handles */
+void ipa_qmi_teardown(struct ipa *ipa)
+{
+ cancel_work_sync(&ipa->qmi.init_driver_work);
+
+ qmi_handle_release(&ipa->qmi.client_handle);
+ memset(&ipa->qmi.client_handle, 0, sizeof(ipa->qmi.client_handle));
+
+ qmi_handle_release(&ipa->qmi.server_handle);
+ memset(&ipa->qmi.server_handle, 0, sizeof(ipa->qmi.server_handle));
+}
diff --git a/drivers/net/ipa/ipa_qmi.h b/drivers/net/ipa/ipa_qmi.h
new file mode 100644
index 0000000000..1c236826c1
--- /dev/null
+++ b/drivers/net/ipa/ipa_qmi.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2022 Linaro Ltd.
+ */
+#ifndef _IPA_QMI_H_
+#define _IPA_QMI_H_
+
+#include <linux/types.h>
+#include <linux/soc/qcom/qmi.h>
+
+struct ipa;
+
+/**
+ * struct ipa_qmi - QMI state associated with an IPA
+ * @client_handle: Used to send an QMI requests to the modem
+ * @server_handle: Used to handle QMI requests from the modem
+ * @modem_sq: QMAP socket address for the modem QMI server
+ * @init_driver_work: Work structure used for INIT_DRIVER message handling
+ * @initial_boot: True if first boot has not yet completed
+ * @uc_ready: True once DRIVER_INIT_COMPLETE request received
+ * @modem_ready: True when INIT_DRIVER response received
+ * @indication_requested: True when INDICATION_REGISTER request received
+ * @indication_sent: True when INIT_COMPLETE indication sent
+ */
+struct ipa_qmi {
+ struct qmi_handle client_handle;
+ struct qmi_handle server_handle;
+
+ /* Information used for the client handle */
+ struct sockaddr_qrtr modem_sq;
+ struct work_struct init_driver_work;
+
+ /* Flags used in negotiating readiness */
+ bool initial_boot;
+ bool uc_ready;
+ bool modem_ready;
+ bool indication_requested;
+ bool indication_sent;
+};
+
+/**
+ * ipa_qmi_setup() - Set up for QMI message exchange
+ * @ipa: IPA pointer
+ *
+ * This is called at the end of ipa_setup(), to prepare for the exchange
+ * of QMI messages that perform a "handshake" between the AP and modem.
+ * When the modem QMI server announces its presence, an AP request message
+ * supplies operating parameters to be used to the modem, and the modem
+ * acknowledges receipt of those parameters. The modem will not touch the
+ * IPA hardware until this handshake is complete.
+ *
+ * If the modem crashes (or shuts down) a new handshake begins when the
+ * modem's QMI server is started again.
+ */
+int ipa_qmi_setup(struct ipa *ipa);
+
+/**
+ * ipa_qmi_teardown() - Tear down IPA QMI handles
+ * @ipa: IPA pointer
+ */
+void ipa_qmi_teardown(struct ipa *ipa);
+
+#endif /* !_IPA_QMI_H_ */
diff --git a/drivers/net/ipa/ipa_qmi_msg.c b/drivers/net/ipa/ipa_qmi_msg.c
new file mode 100644
index 0000000000..894f995172
--- /dev/null
+++ b/drivers/net/ipa/ipa_qmi_msg.c
@@ -0,0 +1,723 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2022 Linaro Ltd.
+ */
+#include <linux/stddef.h>
+#include <linux/soc/qcom/qmi.h>
+
+#include "ipa_qmi_msg.h"
+
+/* QMI message structure definition for struct ipa_indication_register_req */
+const struct qmi_elem_info ipa_indication_register_req_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_indication_register_req,
+ master_driver_init_complete_valid),
+ .tlv_type = 0x10,
+ .offset = offsetof(struct ipa_indication_register_req,
+ master_driver_init_complete_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_indication_register_req,
+ master_driver_init_complete),
+ .tlv_type = 0x10,
+ .offset = offsetof(struct ipa_indication_register_req,
+ master_driver_init_complete),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_indication_register_req,
+ data_usage_quota_reached_valid),
+ .tlv_type = 0x11,
+ .offset = offsetof(struct ipa_indication_register_req,
+ data_usage_quota_reached_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_indication_register_req,
+ data_usage_quota_reached),
+ .tlv_type = 0x11,
+ .offset = offsetof(struct ipa_indication_register_req,
+ data_usage_quota_reached),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_indication_register_req,
+ ipa_mhi_ready_ind_valid),
+ .tlv_type = 0x12,
+ .offset = offsetof(struct ipa_indication_register_req,
+ ipa_mhi_ready_ind_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_indication_register_req,
+ ipa_mhi_ready_ind),
+ .tlv_type = 0x12,
+ .offset = offsetof(struct ipa_indication_register_req,
+ ipa_mhi_ready_ind),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_indication_register_req,
+ endpoint_desc_ind_valid),
+ .tlv_type = 0x13,
+ .offset = offsetof(struct ipa_indication_register_req,
+ endpoint_desc_ind_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_indication_register_req,
+ endpoint_desc_ind),
+ .tlv_type = 0x13,
+ .offset = offsetof(struct ipa_indication_register_req,
+ endpoint_desc_ind),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_indication_register_req,
+ bw_change_ind_valid),
+ .tlv_type = 0x14,
+ .offset = offsetof(struct ipa_indication_register_req,
+ bw_change_ind_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_indication_register_req,
+ bw_change_ind),
+ .tlv_type = 0x14,
+ .offset = offsetof(struct ipa_indication_register_req,
+ bw_change_ind),
+ },
+ {
+ .data_type = QMI_EOTI,
+ },
+};
+
+/* QMI message structure definition for struct ipa_indication_register_rsp */
+const struct qmi_elem_info ipa_indication_register_rsp_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_indication_register_rsp,
+ rsp),
+ .tlv_type = 0x02,
+ .offset = offsetof(struct ipa_indication_register_rsp,
+ rsp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ },
+};
+
+/* QMI message structure definition for struct ipa_driver_init_complete_req */
+const struct qmi_elem_info ipa_driver_init_complete_req_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_driver_init_complete_req,
+ status),
+ .tlv_type = 0x01,
+ .offset = offsetof(struct ipa_driver_init_complete_req,
+ status),
+ },
+ {
+ .data_type = QMI_EOTI,
+ },
+};
+
+/* QMI message structure definition for struct ipa_driver_init_complete_rsp */
+const struct qmi_elem_info ipa_driver_init_complete_rsp_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_driver_init_complete_rsp,
+ rsp),
+ .tlv_type = 0x02,
+ .offset = offsetof(struct ipa_driver_init_complete_rsp,
+ rsp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ },
+};
+
+/* QMI message structure definition for struct ipa_init_complete_ind */
+const struct qmi_elem_info ipa_init_complete_ind_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_complete_ind,
+ status),
+ .tlv_type = 0x02,
+ .offset = offsetof(struct ipa_init_complete_ind,
+ status),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ },
+};
+
+/* QMI message structure definition for struct ipa_mem_bounds */
+const struct qmi_elem_info ipa_mem_bounds_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_mem_bounds, start),
+ .offset = offsetof(struct ipa_mem_bounds, start),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_mem_bounds, end),
+ .offset = offsetof(struct ipa_mem_bounds, end),
+ },
+ {
+ .data_type = QMI_EOTI,
+ },
+};
+
+/* QMI message structure definition for struct ipa_mem_array */
+const struct qmi_elem_info ipa_mem_array_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_mem_array, start),
+ .offset = offsetof(struct ipa_mem_array, start),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_mem_array, count),
+ .offset = offsetof(struct ipa_mem_array, count),
+ },
+ {
+ .data_type = QMI_EOTI,
+ },
+};
+
+/* QMI message structure definition for struct ipa_mem_range */
+const struct qmi_elem_info ipa_mem_range_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_mem_range, start),
+ .offset = offsetof(struct ipa_mem_range, start),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_mem_range, size),
+ .offset = offsetof(struct ipa_mem_range, size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ },
+};
+
+/* QMI message structure definition for struct ipa_init_modem_driver_req */
+const struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ platform_type_valid),
+ .tlv_type = 0x10,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ platform_type_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ platform_type),
+ .tlv_type = 0x10,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ platform_type),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ hdr_tbl_info_valid),
+ .tlv_type = 0x11,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ hdr_tbl_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ hdr_tbl_info),
+ .tlv_type = 0x11,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ hdr_tbl_info),
+ .ei_array = ipa_mem_bounds_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v4_route_tbl_info_valid),
+ .tlv_type = 0x12,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v4_route_tbl_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v4_route_tbl_info),
+ .tlv_type = 0x12,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v4_route_tbl_info),
+ .ei_array = ipa_mem_bounds_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v6_route_tbl_info_valid),
+ .tlv_type = 0x13,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v6_route_tbl_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v6_route_tbl_info),
+ .tlv_type = 0x13,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v6_route_tbl_info),
+ .ei_array = ipa_mem_bounds_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v4_filter_tbl_start_valid),
+ .tlv_type = 0x14,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v4_filter_tbl_start_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v4_filter_tbl_start),
+ .tlv_type = 0x14,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v4_filter_tbl_start),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v6_filter_tbl_start_valid),
+ .tlv_type = 0x15,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v6_filter_tbl_start_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v6_filter_tbl_start),
+ .tlv_type = 0x15,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v6_filter_tbl_start),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ modem_mem_info_valid),
+ .tlv_type = 0x16,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ modem_mem_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ modem_mem_info),
+ .tlv_type = 0x16,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ modem_mem_info),
+ .ei_array = ipa_mem_range_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ ctrl_comm_dest_end_pt_valid),
+ .tlv_type = 0x17,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ ctrl_comm_dest_end_pt_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ ctrl_comm_dest_end_pt),
+ .tlv_type = 0x17,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ ctrl_comm_dest_end_pt),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ skip_uc_load_valid),
+ .tlv_type = 0x18,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ skip_uc_load_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ skip_uc_load),
+ .tlv_type = 0x18,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ skip_uc_load),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ hdr_proc_ctx_tbl_info_valid),
+ .tlv_type = 0x19,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ hdr_proc_ctx_tbl_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ hdr_proc_ctx_tbl_info),
+ .tlv_type = 0x19,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ hdr_proc_ctx_tbl_info),
+ .ei_array = ipa_mem_bounds_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ zip_tbl_info_valid),
+ .tlv_type = 0x1a,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ zip_tbl_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ zip_tbl_info),
+ .tlv_type = 0x1a,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ zip_tbl_info),
+ .ei_array = ipa_mem_bounds_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v4_hash_route_tbl_info_valid),
+ .tlv_type = 0x1b,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v4_hash_route_tbl_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v4_hash_route_tbl_info),
+ .tlv_type = 0x1b,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v4_hash_route_tbl_info),
+ .ei_array = ipa_mem_bounds_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v6_hash_route_tbl_info_valid),
+ .tlv_type = 0x1c,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v6_hash_route_tbl_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v6_hash_route_tbl_info),
+ .tlv_type = 0x1c,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v6_hash_route_tbl_info),
+ .ei_array = ipa_mem_bounds_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v4_hash_filter_tbl_start_valid),
+ .tlv_type = 0x1d,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v4_hash_filter_tbl_start_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v4_hash_filter_tbl_start),
+ .tlv_type = 0x1d,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v4_hash_filter_tbl_start),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v6_hash_filter_tbl_start_valid),
+ .tlv_type = 0x1e,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v6_hash_filter_tbl_start_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v6_hash_filter_tbl_start),
+ .tlv_type = 0x1e,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v6_hash_filter_tbl_start),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ hw_stats_quota_base_addr_valid),
+ .tlv_type = 0x1f,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ hw_stats_quota_base_addr_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ hw_stats_quota_base_addr),
+ .tlv_type = 0x1f,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ hw_stats_quota_base_addr),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ hw_stats_quota_size_valid),
+ .tlv_type = 0x20,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ hw_stats_quota_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ hw_stats_quota_size),
+ .tlv_type = 0x20,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ hw_stats_quota_size),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ hw_stats_drop_base_addr_valid),
+ .tlv_type = 0x21,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ hw_stats_drop_base_addr_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ hw_stats_drop_base_addr),
+ .tlv_type = 0x21,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ hw_stats_drop_base_addr),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ hw_stats_drop_size_valid),
+ .tlv_type = 0x22,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ hw_stats_drop_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ hw_stats_drop_size),
+ .tlv_type = 0x22,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ hw_stats_drop_size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ },
+};
+
+/* QMI message structure definition for struct ipa_init_modem_driver_rsp */
+const struct qmi_elem_info ipa_init_modem_driver_rsp_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_rsp,
+ rsp),
+ .tlv_type = 0x02,
+ .offset = offsetof(struct ipa_init_modem_driver_rsp,
+ rsp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_rsp,
+ ctrl_comm_dest_end_pt_valid),
+ .tlv_type = 0x10,
+ .offset = offsetof(struct ipa_init_modem_driver_rsp,
+ ctrl_comm_dest_end_pt_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_rsp,
+ ctrl_comm_dest_end_pt),
+ .tlv_type = 0x10,
+ .offset = offsetof(struct ipa_init_modem_driver_rsp,
+ ctrl_comm_dest_end_pt),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_rsp,
+ default_end_pt_valid),
+ .tlv_type = 0x11,
+ .offset = offsetof(struct ipa_init_modem_driver_rsp,
+ default_end_pt_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_rsp,
+ default_end_pt),
+ .tlv_type = 0x11,
+ .offset = offsetof(struct ipa_init_modem_driver_rsp,
+ default_end_pt),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_rsp,
+ modem_driver_init_pending_valid),
+ .tlv_type = 0x12,
+ .offset = offsetof(struct ipa_init_modem_driver_rsp,
+ modem_driver_init_pending_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_rsp,
+ modem_driver_init_pending),
+ .tlv_type = 0x12,
+ .offset = offsetof(struct ipa_init_modem_driver_rsp,
+ modem_driver_init_pending),
+ },
+ {
+ .data_type = QMI_EOTI,
+ },
+};
diff --git a/drivers/net/ipa/ipa_qmi_msg.h b/drivers/net/ipa/ipa_qmi_msg.h
new file mode 100644
index 0000000000..b73503552c
--- /dev/null
+++ b/drivers/net/ipa/ipa_qmi_msg.h
@@ -0,0 +1,261 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2022 Linaro Ltd.
+ */
+#ifndef _IPA_QMI_MSG_H_
+#define _IPA_QMI_MSG_H_
+
+/* === Only "ipa_qmi" and "ipa_qmi_msg.c" should include this file === */
+
+#include <linux/types.h>
+#include <linux/soc/qcom/qmi.h>
+
+/* Request/response/indication QMI message ids used for IPA. Receiving
+ * end issues a response for requests; indications require no response.
+ */
+#define IPA_QMI_INDICATION_REGISTER 0x20 /* modem -> AP request */
+#define IPA_QMI_INIT_DRIVER 0x21 /* AP -> modem request */
+#define IPA_QMI_INIT_COMPLETE 0x22 /* AP -> modem indication */
+#define IPA_QMI_DRIVER_INIT_COMPLETE 0x35 /* modem -> AP request */
+
+/* The maximum size required for message types. These sizes include
+ * the message data, along with type (1 byte) and length (2 byte)
+ * information for each field. The qmi_send_*() interfaces require
+ * the message size to be provided.
+ */
+#define IPA_QMI_INDICATION_REGISTER_REQ_SZ 20 /* -> server handle */
+#define IPA_QMI_INDICATION_REGISTER_RSP_SZ 7 /* <- server handle */
+#define IPA_QMI_INIT_DRIVER_REQ_SZ 162 /* client handle -> */
+#define IPA_QMI_INIT_DRIVER_RSP_SZ 25 /* client handle <- */
+#define IPA_QMI_INIT_COMPLETE_IND_SZ 7 /* <- server handle */
+#define IPA_QMI_DRIVER_INIT_COMPLETE_REQ_SZ 4 /* -> server handle */
+#define IPA_QMI_DRIVER_INIT_COMPLETE_RSP_SZ 7 /* <- server handle */
+
+/* Maximum size of messages we expect the AP to receive (max of above) */
+#define IPA_QMI_SERVER_MAX_RCV_SZ 8
+#define IPA_QMI_CLIENT_MAX_RCV_SZ 25
+
+/* Request message for the IPA_QMI_INDICATION_REGISTER request */
+struct ipa_indication_register_req {
+ u8 master_driver_init_complete_valid;
+ u8 master_driver_init_complete;
+ u8 data_usage_quota_reached_valid;
+ u8 data_usage_quota_reached;
+ u8 ipa_mhi_ready_ind_valid;
+ u8 ipa_mhi_ready_ind;
+ u8 endpoint_desc_ind_valid;
+ u8 endpoint_desc_ind;
+ u8 bw_change_ind_valid;
+ u8 bw_change_ind;
+};
+
+/* The response to a IPA_QMI_INDICATION_REGISTER request consists only of
+ * a standard QMI response.
+ */
+struct ipa_indication_register_rsp {
+ struct qmi_response_type_v01 rsp;
+};
+
+/* Request message for the IPA_QMI_DRIVER_INIT_COMPLETE request */
+struct ipa_driver_init_complete_req {
+ u8 status;
+};
+
+/* The response to a IPA_QMI_DRIVER_INIT_COMPLETE request consists only
+ * of a standard QMI response.
+ */
+struct ipa_driver_init_complete_rsp {
+ struct qmi_response_type_v01 rsp;
+};
+
+/* The message for the IPA_QMI_INIT_COMPLETE_IND indication consists
+ * only of a standard QMI response.
+ */
+struct ipa_init_complete_ind {
+ struct qmi_response_type_v01 status;
+};
+
+/* The AP tells the modem its platform type. We assume Android. */
+enum ipa_platform_type {
+ IPA_QMI_PLATFORM_TYPE_INVALID = 0x0, /* Invalid */
+ IPA_QMI_PLATFORM_TYPE_TN = 0x1, /* Data card */
+ IPA_QMI_PLATFORM_TYPE_LE = 0x2, /* Data router */
+ IPA_QMI_PLATFORM_TYPE_MSM_ANDROID = 0x3, /* Android MSM */
+ IPA_QMI_PLATFORM_TYPE_MSM_WINDOWS = 0x4, /* Windows MSM */
+ IPA_QMI_PLATFORM_TYPE_MSM_QNX_V01 = 0x5, /* QNX MSM */
+};
+
+/* This defines the start and end offset of a range of memory. The start
+ * value is a byte offset relative to the start of IPA shared memory. The
+ * end value is the last addressable unit *within* the range. Typically
+ * the end value is in units of bytes, however it can also be a maximum
+ * array index value.
+ */
+struct ipa_mem_bounds {
+ u32 start;
+ u32 end;
+};
+
+/* This defines the location and size of an array. The start value
+ * is an offset relative to the start of IPA shared memory. The
+ * size of the array is implied by the number of entries (the entry
+ * size is assumed to be known).
+ */
+struct ipa_mem_array {
+ u32 start;
+ u32 count;
+};
+
+/* This defines the location and size of a range of memory. The
+ * start is an offset relative to the start of IPA shared memory.
+ * This differs from the ipa_mem_bounds structure in that the size
+ * (in bytes) of the memory region is specified rather than the
+ * offset of its last byte.
+ */
+struct ipa_mem_range {
+ u32 start;
+ u32 size;
+};
+
+/* The message for the IPA_QMI_INIT_DRIVER request contains information
+ * from the AP that affects modem initialization.
+ */
+struct ipa_init_modem_driver_req {
+ u8 platform_type_valid;
+ u32 platform_type; /* enum ipa_platform_type */
+
+ /* Modem header table information. This defines the IPA shared
+ * memory in which the modem may insert header table entries.
+ */
+ u8 hdr_tbl_info_valid;
+ struct ipa_mem_bounds hdr_tbl_info;
+
+ /* Routing table information. These define the location and maximum
+ * *index* (not byte) for the modem portion of non-hashable IPv4 and
+ * IPv6 routing tables. The start values are byte offsets relative
+ * to the start of IPA shared memory.
+ */
+ u8 v4_route_tbl_info_valid;
+ struct ipa_mem_bounds v4_route_tbl_info;
+ u8 v6_route_tbl_info_valid;
+ struct ipa_mem_bounds v6_route_tbl_info;
+
+ /* Filter table information. These define the location of the
+ * non-hashable IPv4 and IPv6 filter tables. The start values are
+ * byte offsets relative to the start of IPA shared memory.
+ */
+ u8 v4_filter_tbl_start_valid;
+ u32 v4_filter_tbl_start;
+ u8 v6_filter_tbl_start_valid;
+ u32 v6_filter_tbl_start;
+
+ /* Modem memory information. This defines the location and
+ * size of memory available for the modem to use.
+ */
+ u8 modem_mem_info_valid;
+ struct ipa_mem_range modem_mem_info;
+
+ /* This defines the destination endpoint on the AP to which
+ * the modem driver can send control commands. Must be less
+ * than ipa_endpoint_max().
+ */
+ u8 ctrl_comm_dest_end_pt_valid;
+ u32 ctrl_comm_dest_end_pt;
+
+ /* This defines whether the modem should load the microcontroller
+ * or not. It is unnecessary to reload it if the modem is being
+ * restarted.
+ *
+ * NOTE: this field is named "is_ssr_bootup" elsewhere.
+ */
+ u8 skip_uc_load_valid;
+ u8 skip_uc_load;
+
+ /* Processing context memory information. This defines the memory in
+ * which the modem may insert header processing context table entries.
+ */
+ u8 hdr_proc_ctx_tbl_info_valid;
+ struct ipa_mem_bounds hdr_proc_ctx_tbl_info;
+
+ /* Compression command memory information. This defines the memory
+ * in which the modem may insert compression/decompression commands.
+ */
+ u8 zip_tbl_info_valid;
+ struct ipa_mem_bounds zip_tbl_info;
+
+ /* Routing table information. These define the location and maximum
+ * *index* (not byte) for the modem portion of hashable IPv4 and IPv6
+ * routing tables (if supported by hardware). The start values are
+ * byte offsets relative to the start of IPA shared memory.
+ */
+ u8 v4_hash_route_tbl_info_valid;
+ struct ipa_mem_bounds v4_hash_route_tbl_info;
+ u8 v6_hash_route_tbl_info_valid;
+ struct ipa_mem_bounds v6_hash_route_tbl_info;
+
+ /* Filter table information. These define the location and size
+ * of hashable IPv4 and IPv6 filter tables (if supported by hardware).
+ * The start values are byte offsets relative to the start of IPA
+ * shared memory.
+ */
+ u8 v4_hash_filter_tbl_start_valid;
+ u32 v4_hash_filter_tbl_start;
+ u8 v6_hash_filter_tbl_start_valid;
+ u32 v6_hash_filter_tbl_start;
+
+ /* Statistics information. These define the locations of the
+ * first and last statistics sub-regions. (IPA v4.0 and above)
+ */
+ u8 hw_stats_quota_base_addr_valid;
+ u32 hw_stats_quota_base_addr;
+ u8 hw_stats_quota_size_valid;
+ u32 hw_stats_quota_size;
+ u8 hw_stats_drop_base_addr_valid;
+ u32 hw_stats_drop_base_addr;
+ u8 hw_stats_drop_size_valid;
+ u32 hw_stats_drop_size;
+};
+
+/* The response to a IPA_QMI_INIT_DRIVER request begins with a standard
+ * QMI response, but contains other information as well. Currently we
+ * simply wait for the INIT_DRIVER transaction to complete and
+ * ignore any other data that might be returned.
+ */
+struct ipa_init_modem_driver_rsp {
+ struct qmi_response_type_v01 rsp;
+
+ /* This defines the destination endpoint on the modem to which
+ * the AP driver can send control commands. Must be less than
+ * ipa_endpoint_max().
+ */
+ u8 ctrl_comm_dest_end_pt_valid;
+ u32 ctrl_comm_dest_end_pt;
+
+ /* This defines the default endpoint. The AP driver is not
+ * required to configure the hardware with this value. Must
+ * be less than ipa_endpoint_max().
+ */
+ u8 default_end_pt_valid;
+ u32 default_end_pt;
+
+ /* This defines whether a second handshake is required to complete
+ * initialization.
+ */
+ u8 modem_driver_init_pending_valid;
+ u8 modem_driver_init_pending;
+};
+
+/* Message structure definitions defined in "ipa_qmi_msg.c" */
+extern const struct qmi_elem_info ipa_indication_register_req_ei[];
+extern const struct qmi_elem_info ipa_indication_register_rsp_ei[];
+extern const struct qmi_elem_info ipa_driver_init_complete_req_ei[];
+extern const struct qmi_elem_info ipa_driver_init_complete_rsp_ei[];
+extern const struct qmi_elem_info ipa_init_complete_ind_ei[];
+extern const struct qmi_elem_info ipa_mem_bounds_ei[];
+extern const struct qmi_elem_info ipa_mem_array_ei[];
+extern const struct qmi_elem_info ipa_mem_range_ei[];
+extern const struct qmi_elem_info ipa_init_modem_driver_req_ei[];
+extern const struct qmi_elem_info ipa_init_modem_driver_rsp_ei[];
+
+#endif /* !_IPA_QMI_MSG_H_ */
diff --git a/drivers/net/ipa/ipa_reg.c b/drivers/net/ipa/ipa_reg.c
new file mode 100644
index 0000000000..818a84f7c4
--- /dev/null
+++ b/drivers/net/ipa/ipa_reg.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2023 Linaro Ltd.
+ */
+
+#include <linux/io.h>
+
+#include "ipa.h"
+#include "ipa_reg.h"
+
+/* Is this register ID valid for the current IPA version? */
+static bool ipa_reg_id_valid(struct ipa *ipa, enum ipa_reg_id reg_id)
+{
+ enum ipa_version version = ipa->version;
+
+ switch (reg_id) {
+ case FILT_ROUT_HASH_EN:
+ return version == IPA_VERSION_4_2;
+
+ case FILT_ROUT_HASH_FLUSH:
+ return version < IPA_VERSION_5_0 && version != IPA_VERSION_4_2;
+
+ case FILT_ROUT_CACHE_FLUSH:
+ case ENDP_FILTER_CACHE_CFG:
+ case ENDP_ROUTER_CACHE_CFG:
+ return version >= IPA_VERSION_5_0;
+
+ case IPA_BCR:
+ case COUNTER_CFG:
+ return version < IPA_VERSION_4_5;
+
+ case IPA_TX_CFG:
+ case FLAVOR_0:
+ case IDLE_INDICATION_CFG:
+ return version >= IPA_VERSION_3_5;
+
+ case QTIME_TIMESTAMP_CFG:
+ case TIMERS_XO_CLK_DIV_CFG:
+ case TIMERS_PULSE_GRAN_CFG:
+ return version >= IPA_VERSION_4_5;
+
+ case SRC_RSRC_GRP_45_RSRC_TYPE:
+ case DST_RSRC_GRP_45_RSRC_TYPE:
+ return version <= IPA_VERSION_3_1 ||
+ version == IPA_VERSION_4_5 ||
+ version == IPA_VERSION_5_0;
+
+ case SRC_RSRC_GRP_67_RSRC_TYPE:
+ case DST_RSRC_GRP_67_RSRC_TYPE:
+ return version <= IPA_VERSION_3_1 ||
+ version == IPA_VERSION_5_0;
+
+ case ENDP_FILTER_ROUTER_HSH_CFG:
+ return version < IPA_VERSION_5_0 &&
+ version != IPA_VERSION_4_2;
+
+ case IRQ_SUSPEND_EN:
+ case IRQ_SUSPEND_CLR:
+ return version >= IPA_VERSION_3_1;
+
+ case COMP_CFG:
+ case CLKON_CFG:
+ case ROUTE:
+ case SHARED_MEM_SIZE:
+ case QSB_MAX_WRITES:
+ case QSB_MAX_READS:
+ case STATE_AGGR_ACTIVE:
+ case LOCAL_PKT_PROC_CNTXT:
+ case AGGR_FORCE_CLOSE:
+ case SRC_RSRC_GRP_01_RSRC_TYPE:
+ case SRC_RSRC_GRP_23_RSRC_TYPE:
+ case DST_RSRC_GRP_01_RSRC_TYPE:
+ case DST_RSRC_GRP_23_RSRC_TYPE:
+ case ENDP_INIT_CTRL:
+ case ENDP_INIT_CFG:
+ case ENDP_INIT_NAT:
+ case ENDP_INIT_HDR:
+ case ENDP_INIT_HDR_EXT:
+ case ENDP_INIT_HDR_METADATA_MASK:
+ case ENDP_INIT_MODE:
+ case ENDP_INIT_AGGR:
+ case ENDP_INIT_HOL_BLOCK_EN:
+ case ENDP_INIT_HOL_BLOCK_TIMER:
+ case ENDP_INIT_DEAGGR:
+ case ENDP_INIT_RSRC_GRP:
+ case ENDP_INIT_SEQ:
+ case ENDP_STATUS:
+ case IPA_IRQ_STTS:
+ case IPA_IRQ_EN:
+ case IPA_IRQ_CLR:
+ case IPA_IRQ_UC:
+ case IRQ_SUSPEND_INFO:
+ return true; /* These should be defined for all versions */
+
+ default:
+ return false;
+ }
+}
+
+const struct reg *ipa_reg(struct ipa *ipa, enum ipa_reg_id reg_id)
+{
+ if (WARN(!ipa_reg_id_valid(ipa, reg_id), "invalid reg %u\n", reg_id))
+ return NULL;
+
+ return reg(ipa->regs, reg_id);
+}
+
+static const struct regs *ipa_regs(enum ipa_version version)
+{
+ switch (version) {
+ case IPA_VERSION_3_1:
+ return &ipa_regs_v3_1;
+ case IPA_VERSION_3_5_1:
+ return &ipa_regs_v3_5_1;
+ case IPA_VERSION_4_2:
+ return &ipa_regs_v4_2;
+ case IPA_VERSION_4_5:
+ return &ipa_regs_v4_5;
+ case IPA_VERSION_4_7:
+ return &ipa_regs_v4_7;
+ case IPA_VERSION_4_9:
+ return &ipa_regs_v4_9;
+ case IPA_VERSION_4_11:
+ return &ipa_regs_v4_11;
+ case IPA_VERSION_5_0:
+ return &ipa_regs_v5_0;
+ default:
+ return NULL;
+ }
+}
+
+int ipa_reg_init(struct ipa *ipa)
+{
+ struct device *dev = &ipa->pdev->dev;
+ const struct regs *regs;
+ struct resource *res;
+
+ regs = ipa_regs(ipa->version);
+ if (!regs)
+ return -EINVAL;
+
+ if (WARN_ON(regs->reg_count > IPA_REG_ID_COUNT))
+ return -EINVAL;
+
+ /* Setup IPA register memory */
+ res = platform_get_resource_byname(ipa->pdev, IORESOURCE_MEM,
+ "ipa-reg");
+ if (!res) {
+ dev_err(dev, "DT error getting \"ipa-reg\" memory property\n");
+ return -ENODEV;
+ }
+
+ ipa->reg_virt = ioremap(res->start, resource_size(res));
+ if (!ipa->reg_virt) {
+ dev_err(dev, "unable to remap \"ipa-reg\" memory\n");
+ return -ENOMEM;
+ }
+ ipa->regs = regs;
+
+ return 0;
+}
+
+void ipa_reg_exit(struct ipa *ipa)
+{
+ iounmap(ipa->reg_virt);
+}
diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h
new file mode 100644
index 0000000000..3ac48dea86
--- /dev/null
+++ b/drivers/net/ipa/ipa_reg.h
@@ -0,0 +1,646 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2023 Linaro Ltd.
+ */
+#ifndef _IPA_REG_H_
+#define _IPA_REG_H_
+
+#include <linux/bitfield.h>
+#include <linux/bug.h>
+
+#include "ipa_version.h"
+#include "reg.h"
+
+struct ipa;
+
+/**
+ * DOC: IPA Registers
+ *
+ * IPA registers are located within the "ipa-reg" address space defined by
+ * Device Tree. Each register has a specified offset within that space,
+ * which is mapped into virtual memory space in ipa_mem_init(). Each
+ * has a unique identifer, taken from the ipa_reg_id enumerated type.
+ * All IPA registers are 32 bits wide.
+ *
+ * Certain "parameterized" register types are duplicated for a number of
+ * instances of something. For example, each IPA endpoint has an set of
+ * registers defining its configuration. The offset to an endpoint's set
+ * of registers is computed based on an "base" offset, plus an endpoint's
+ * ID multiplied and a "stride" value for the register. Similarly, some
+ * registers have an offset that depends on execution environment. In
+ * this case, the stride is multiplied by a member of the gsi_ee_id
+ * enumerated type.
+ *
+ * Each version of IPA implements an array of ipa_reg structures indexed
+ * by register ID. Each entry in the array specifies the base offset and
+ * (for parameterized registers) a non-zero stride value. Not all versions
+ * of IPA define all registers. The offset for a register is returned by
+ * reg_offset() when the register's ipa_reg structure is supplied;
+ * zero is returned for an undefined register (this should never happen).
+ *
+ * Some registers encode multiple fields within them. Each field in
+ * such a register has a unique identifier (from an enumerated type).
+ * The position and width of the fields in a register are defined by
+ * an array of field masks, indexed by field ID. Two functions are
+ * used to access register fields; both take an ipa_reg structure as
+ * argument. To encode a value to be represented in a register field,
+ * the value and field ID are passed to reg_encode(). To extract
+ * a value encoded in a register field, the field ID is passed to
+ * reg_decode(). In addition, for single-bit fields, reg_bit()
+ * can be used to either encode the bit value, or to generate a mask
+ * used to extract the bit value.
+ */
+
+/* enum ipa_reg_id - IPA register IDs */
+enum ipa_reg_id {
+ COMP_CFG,
+ CLKON_CFG,
+ ROUTE,
+ SHARED_MEM_SIZE,
+ QSB_MAX_WRITES,
+ QSB_MAX_READS,
+ FILT_ROUT_HASH_EN, /* IPA v4.2 */
+ FILT_ROUT_HASH_FLUSH, /* Not IPA v4.2 nor IPA v5.0+ */
+ FILT_ROUT_CACHE_FLUSH, /* IPA v5.0+ */
+ STATE_AGGR_ACTIVE,
+ IPA_BCR, /* Not IPA v4.5+ */
+ LOCAL_PKT_PROC_CNTXT,
+ AGGR_FORCE_CLOSE,
+ COUNTER_CFG, /* Not IPA v4.5+ */
+ IPA_TX_CFG, /* IPA v3.5+ */
+ FLAVOR_0, /* IPA v3.5+ */
+ IDLE_INDICATION_CFG, /* IPA v3.5+ */
+ QTIME_TIMESTAMP_CFG, /* IPA v4.5+ */
+ TIMERS_XO_CLK_DIV_CFG, /* IPA v4.5+ */
+ TIMERS_PULSE_GRAN_CFG, /* IPA v4.5+ */
+ SRC_RSRC_GRP_01_RSRC_TYPE,
+ SRC_RSRC_GRP_23_RSRC_TYPE,
+ SRC_RSRC_GRP_45_RSRC_TYPE, /* Not IPA v3.5+; IPA v4.5, IPA v5.0 */
+ SRC_RSRC_GRP_67_RSRC_TYPE, /* Not IPA v3.5+; IPA v5.0 */
+ DST_RSRC_GRP_01_RSRC_TYPE,
+ DST_RSRC_GRP_23_RSRC_TYPE,
+ DST_RSRC_GRP_45_RSRC_TYPE, /* Not IPA v3.5+; IPA v4.5, IPA v5.0 */
+ DST_RSRC_GRP_67_RSRC_TYPE, /* Not IPA v3.5+; IPA v5.0 */
+ ENDP_INIT_CTRL, /* Not IPA v4.2+ for TX, not IPA v4.0+ for RX */
+ ENDP_INIT_CFG,
+ ENDP_INIT_NAT, /* TX only */
+ ENDP_INIT_HDR,
+ ENDP_INIT_HDR_EXT,
+ ENDP_INIT_HDR_METADATA_MASK, /* RX only */
+ ENDP_INIT_MODE, /* TX only */
+ ENDP_INIT_AGGR,
+ ENDP_INIT_HOL_BLOCK_EN, /* RX only */
+ ENDP_INIT_HOL_BLOCK_TIMER, /* RX only */
+ ENDP_INIT_DEAGGR, /* TX only */
+ ENDP_INIT_RSRC_GRP,
+ ENDP_INIT_SEQ, /* TX only */
+ ENDP_STATUS,
+ ENDP_FILTER_ROUTER_HSH_CFG, /* Not IPA v4.2 */
+ ENDP_FILTER_CACHE_CFG, /* IPA v5.0+ */
+ ENDP_ROUTER_CACHE_CFG, /* IPA v5.0+ */
+ /* The IRQ registers that follow are only used for GSI_EE_AP */
+ IPA_IRQ_STTS,
+ IPA_IRQ_EN,
+ IPA_IRQ_CLR,
+ IPA_IRQ_UC,
+ IRQ_SUSPEND_INFO,
+ IRQ_SUSPEND_EN, /* IPA v3.1+ */
+ IRQ_SUSPEND_CLR, /* IPA v3.1+ */
+ IPA_REG_ID_COUNT, /* Last; not an ID */
+};
+
+/* COMP_CFG register */
+enum ipa_reg_comp_cfg_field_id {
+ COMP_CFG_ENABLE, /* Not IPA v4.0+ */
+ RAM_ARB_PRI_CLIENT_SAMP_FIX_DIS, /* IPA v4.7+ */
+ GSI_SNOC_BYPASS_DIS,
+ GEN_QMB_0_SNOC_BYPASS_DIS,
+ GEN_QMB_1_SNOC_BYPASS_DIS,
+ IPA_DCMP_FAST_CLK_EN, /* Not IPA v4.5+ */
+ IPA_QMB_SELECT_CONS_EN, /* IPA v4.0+ */
+ IPA_QMB_SELECT_PROD_EN, /* IPA v4.0+ */
+ GSI_MULTI_INORDER_RD_DIS, /* IPA v4.0+ */
+ GSI_MULTI_INORDER_WR_DIS, /* IPA v4.0+ */
+ GEN_QMB_0_MULTI_INORDER_RD_DIS, /* IPA v4.0+ */
+ GEN_QMB_1_MULTI_INORDER_RD_DIS, /* IPA v4.0+ */
+ GEN_QMB_0_MULTI_INORDER_WR_DIS, /* IPA v4.0+ */
+ GEN_QMB_1_MULTI_INORDER_WR_DIS, /* IPA v4.0+ */
+ GEN_QMB_0_SNOC_CNOC_LOOP_PROT_DIS, /* IPA v4.0+ */
+ GSI_SNOC_CNOC_LOOP_PROT_DISABLE, /* IPA v4.0+ */
+ GSI_MULTI_AXI_MASTERS_DIS, /* IPA v4.0+ */
+ IPA_QMB_SELECT_GLOBAL_EN, /* IPA v4.0+ */
+ QMB_RAM_RD_CACHE_DISABLE, /* IPA v4.9+ */
+ GENQMB_AOOOWR, /* IPA v4.9+ */
+ IF_OUT_OF_BUF_STOP_RESET_MASK_EN, /* IPA v4.9+ */
+ GEN_QMB_1_DYNAMIC_ASIZE, /* IPA v4.9+ */
+ GEN_QMB_0_DYNAMIC_ASIZE, /* IPA v4.9+ */
+ ATOMIC_FETCHER_ARB_LOCK_DIS, /* IPA v4.0+ */
+ FULL_FLUSH_WAIT_RS_CLOSURE_EN, /* IPA v4.5+ */
+};
+
+/* CLKON_CFG register */
+enum ipa_reg_clkon_cfg_field_id {
+ CLKON_RX,
+ CLKON_PROC,
+ TX_WRAPPER,
+ CLKON_MISC,
+ RAM_ARB,
+ FTCH_HPS,
+ FTCH_DPS,
+ CLKON_HPS,
+ CLKON_DPS,
+ RX_HPS_CMDQS,
+ HPS_DPS_CMDQS,
+ DPS_TX_CMDQS,
+ RSRC_MNGR,
+ CTX_HANDLER,
+ ACK_MNGR,
+ D_DCPH,
+ H_DCPH,
+ CLKON_DCMP, /* IPA v4.5+ */
+ NTF_TX_CMDQS, /* IPA v3.5+ */
+ CLKON_TX_0, /* IPA v3.5+ */
+ CLKON_TX_1, /* IPA v3.5+ */
+ CLKON_FNR, /* IPA v3.5.1+ */
+ QSB2AXI_CMDQ_L, /* IPA v4.0+ */
+ AGGR_WRAPPER, /* IPA v4.0+ */
+ RAM_SLAVEWAY, /* IPA v4.0+ */
+ CLKON_QMB, /* IPA v4.0+ */
+ WEIGHT_ARB, /* IPA v4.0+ */
+ GSI_IF, /* IPA v4.0+ */
+ CLKON_GLOBAL, /* IPA v4.0+ */
+ GLOBAL_2X_CLK, /* IPA v4.0+ */
+ DPL_FIFO, /* IPA v4.5+ */
+ DRBIP, /* IPA v4.7+ */
+};
+
+/* ROUTE register */
+enum ipa_reg_route_field_id {
+ ROUTE_DIS,
+ ROUTE_DEF_PIPE,
+ ROUTE_DEF_HDR_TABLE,
+ ROUTE_DEF_HDR_OFST,
+ ROUTE_FRAG_DEF_PIPE,
+ ROUTE_DEF_RETAIN_HDR,
+};
+
+/* SHARED_MEM_SIZE register */
+enum ipa_reg_shared_mem_size_field_id {
+ MEM_SIZE,
+ MEM_BADDR,
+};
+
+/* QSB_MAX_WRITES register */
+enum ipa_reg_qsb_max_writes_field_id {
+ GEN_QMB_0_MAX_WRITES,
+ GEN_QMB_1_MAX_WRITES,
+};
+
+/* QSB_MAX_READS register */
+enum ipa_reg_qsb_max_reads_field_id {
+ GEN_QMB_0_MAX_READS,
+ GEN_QMB_1_MAX_READS,
+ GEN_QMB_0_MAX_READS_BEATS, /* IPA v4.0+ */
+ GEN_QMB_1_MAX_READS_BEATS, /* IPA v4.0+ */
+};
+
+/* FILT_ROUT_HASH_EN and FILT_ROUT_HASH_FLUSH registers */
+enum ipa_reg_filt_rout_hash_field_id {
+ IPV6_ROUTER_HASH,
+ IPV6_FILTER_HASH,
+ IPV4_ROUTER_HASH,
+ IPV4_FILTER_HASH,
+};
+
+/* FILT_ROUT_CACHE_FLUSH register */
+enum ipa_reg_filt_rout_cache_field_id {
+ ROUTER_CACHE,
+ FILTER_CACHE,
+};
+
+/* BCR register */
+enum ipa_bcr_compat {
+ BCR_CMDQ_L_LACK_ONE_ENTRY = 0x0, /* Not IPA v4.2+ */
+ BCR_TX_NOT_USING_BRESP = 0x1, /* Not IPA v4.2+ */
+ BCR_TX_SUSPEND_IRQ_ASSERT_ONCE = 0x2, /* Not IPA v4.0+ */
+ BCR_SUSPEND_L2_IRQ = 0x3, /* Not IPA v4.2+ */
+ BCR_HOLB_DROP_L2_IRQ = 0x4, /* Not IPA v4.2+ */
+ BCR_DUAL_TX = 0x5, /* IPA v3.5+ */
+ BCR_ENABLE_FILTER_DATA_CACHE = 0x6, /* IPA v3.5+ */
+ BCR_NOTIF_PRIORITY_OVER_ZLT = 0x7, /* IPA v3.5+ */
+ BCR_FILTER_PREFETCH_EN = 0x8, /* IPA v3.5+ */
+ BCR_ROUTER_PREFETCH_EN = 0x9, /* IPA v3.5+ */
+};
+
+/* LOCAL_PKT_PROC_CNTXT register */
+enum ipa_reg_local_pkt_proc_cntxt_field_id {
+ IPA_BASE_ADDR,
+};
+
+/* COUNTER_CFG register */
+enum ipa_reg_counter_cfg_field_id {
+ EOT_COAL_GRANULARITY, /* Not v3.5+ */
+ AGGR_GRANULARITY,
+};
+
+/* IPA_TX_CFG register */
+enum ipa_reg_ipa_tx_cfg_field_id {
+ TX0_PREFETCH_DISABLE, /* Not v4.0+ */
+ TX1_PREFETCH_DISABLE, /* Not v4.0+ */
+ PREFETCH_ALMOST_EMPTY_SIZE, /* Not v4.0+ */
+ PREFETCH_ALMOST_EMPTY_SIZE_TX0, /* v4.0+ */
+ DMAW_SCND_OUTSD_PRED_THRESHOLD, /* v4.0+ */
+ DMAW_SCND_OUTSD_PRED_EN, /* v4.0+ */
+ DMAW_MAX_BEATS_256_DIS, /* v4.0+ */
+ PA_MASK_EN, /* v4.0+ */
+ PREFETCH_ALMOST_EMPTY_SIZE_TX1, /* v4.0+ */
+ DUAL_TX_ENABLE, /* v4.5+ */
+ SSPND_PA_NO_START_STATE, /* v4,2+, not v4.5 */
+ SSPND_PA_NO_BQ_STATE, /* v4.2 only */
+ HOLB_STICKY_DROP_EN, /* v5.0+ */
+};
+
+/* FLAVOR_0 register */
+enum ipa_reg_flavor_0_field_id {
+ MAX_PIPES,
+ MAX_CONS_PIPES,
+ MAX_PROD_PIPES,
+ PROD_LOWEST,
+};
+
+/* IDLE_INDICATION_CFG register */
+enum ipa_reg_idle_indication_cfg_field_id {
+ ENTER_IDLE_DEBOUNCE_THRESH,
+ CONST_NON_IDLE_ENABLE,
+};
+
+/* QTIME_TIMESTAMP_CFG register */
+enum ipa_reg_qtime_timestamp_cfg_field_id {
+ DPL_TIMESTAMP_LSB,
+ DPL_TIMESTAMP_SEL,
+ TAG_TIMESTAMP_LSB,
+ NAT_TIMESTAMP_LSB,
+};
+
+/* TIMERS_XO_CLK_DIV_CFG register */
+enum ipa_reg_timers_xo_clk_div_cfg_field_id {
+ DIV_VALUE,
+ DIV_ENABLE,
+};
+
+/* TIMERS_PULSE_GRAN_CFG register */
+enum ipa_reg_timers_pulse_gran_cfg_field_id {
+ PULSE_GRAN_0,
+ PULSE_GRAN_1,
+ PULSE_GRAN_2,
+ PULSE_GRAN_3,
+};
+
+/* Values for IPA_GRAN_x fields of TIMERS_PULSE_GRAN_CFG */
+enum ipa_pulse_gran {
+ IPA_GRAN_10_US = 0x0,
+ IPA_GRAN_20_US = 0x1,
+ IPA_GRAN_50_US = 0x2,
+ IPA_GRAN_100_US = 0x3,
+ IPA_GRAN_1_MS = 0x4,
+ IPA_GRAN_10_MS = 0x5,
+ IPA_GRAN_100_MS = 0x6,
+ IPA_GRAN_655350_US = 0x7,
+};
+
+/* {SRC,DST}_RSRC_GRP_{01,23,45,67}_RSRC_TYPE registers */
+enum ipa_reg_rsrc_grp_rsrc_type_field_id {
+ X_MIN_LIM,
+ X_MAX_LIM,
+ Y_MIN_LIM,
+ Y_MAX_LIM,
+};
+
+/* ENDP_INIT_CTRL register */
+enum ipa_reg_endp_init_ctrl_field_id {
+ ENDP_SUSPEND, /* Not v4.0+ */
+ ENDP_DELAY, /* Not v4.2+ */
+};
+
+/* ENDP_INIT_CFG register */
+enum ipa_reg_endp_init_cfg_field_id {
+ FRAG_OFFLOAD_EN,
+ CS_OFFLOAD_EN,
+ CS_METADATA_HDR_OFFSET,
+ CS_GEN_QMB_MASTER_SEL,
+};
+
+/** enum ipa_cs_offload_en - ENDP_INIT_CFG register CS_OFFLOAD_EN field value */
+enum ipa_cs_offload_en {
+ IPA_CS_OFFLOAD_NONE = 0x0,
+ IPA_CS_OFFLOAD_UL /* TX */ = 0x1, /* Not IPA v4.5+ */
+ IPA_CS_OFFLOAD_DL /* RX */ = 0x2, /* Not IPA v4.5+ */
+ IPA_CS_OFFLOAD_INLINE /* TX and RX */ = 0x1, /* IPA v4.5+ */
+};
+
+/* ENDP_INIT_NAT register */
+enum ipa_reg_endp_init_nat_field_id {
+ NAT_EN,
+};
+
+/** enum ipa_nat_type - ENDP_INIT_NAT register NAT_EN field value */
+enum ipa_nat_type {
+ IPA_NAT_TYPE_BYPASS = 0,
+ IPA_NAT_TYPE_SRC = 1,
+ IPA_NAT_TYPE_DST = 2,
+};
+
+/* ENDP_INIT_HDR register */
+enum ipa_reg_endp_init_hdr_field_id {
+ HDR_LEN,
+ HDR_OFST_METADATA_VALID,
+ HDR_OFST_METADATA,
+ HDR_ADDITIONAL_CONST_LEN,
+ HDR_OFST_PKT_SIZE_VALID,
+ HDR_OFST_PKT_SIZE,
+ HDR_A5_MUX, /* Not v4.9+ */
+ HDR_LEN_INC_DEAGG_HDR,
+ HDR_METADATA_REG_VALID, /* Not v4.5+ */
+ HDR_LEN_MSB, /* v4.5+ */
+ HDR_OFST_METADATA_MSB, /* v4.5+ */
+};
+
+/* ENDP_INIT_HDR_EXT register */
+enum ipa_reg_endp_init_hdr_ext_field_id {
+ HDR_ENDIANNESS,
+ HDR_TOTAL_LEN_OR_PAD_VALID,
+ HDR_TOTAL_LEN_OR_PAD,
+ HDR_PAYLOAD_LEN_INC_PADDING,
+ HDR_TOTAL_LEN_OR_PAD_OFFSET,
+ HDR_PAD_TO_ALIGNMENT,
+ HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB, /* v4.5+ */
+ HDR_OFST_PKT_SIZE_MSB, /* v4.5+ */
+ HDR_ADDITIONAL_CONST_LEN_MSB, /* v4.5+ */
+ HDR_BYTES_TO_REMOVE_VALID, /* v5.0+ */
+ HDR_BYTES_TO_REMOVE, /* v5.0+ */
+};
+
+/* ENDP_INIT_MODE register */
+enum ipa_reg_endp_init_mode_field_id {
+ ENDP_MODE,
+ DCPH_ENABLE, /* v4.5+ */
+ DEST_PIPE_INDEX,
+ BYTE_THRESHOLD,
+ PIPE_REPLICATION_EN,
+ PAD_EN,
+ HDR_FTCH_DISABLE, /* v4.5+ */
+ DRBIP_ACL_ENABLE, /* v4.9+ */
+};
+
+/** enum ipa_mode - ENDP_INIT_MODE register MODE field value */
+enum ipa_mode {
+ IPA_BASIC = 0x0,
+ IPA_ENABLE_FRAMING_HDLC = 0x1,
+ IPA_ENABLE_DEFRAMING_HDLC = 0x2,
+ IPA_DMA = 0x3,
+};
+
+/* ENDP_INIT_AGGR register */
+enum ipa_reg_endp_init_aggr_field_id {
+ AGGR_EN,
+ AGGR_TYPE,
+ BYTE_LIMIT,
+ TIME_LIMIT,
+ PKT_LIMIT,
+ SW_EOF_ACTIVE,
+ FORCE_CLOSE,
+ HARD_BYTE_LIMIT_EN,
+ AGGR_GRAN_SEL,
+};
+
+/** enum ipa_aggr_en - ENDP_INIT_AGGR register AGGR_EN field value */
+enum ipa_aggr_en {
+ IPA_BYPASS_AGGR /* TX and RX */ = 0x0,
+ IPA_ENABLE_AGGR /* RX */ = 0x1,
+ IPA_ENABLE_DEAGGR /* TX */ = 0x2,
+};
+
+/** enum ipa_aggr_type - ENDP_INIT_AGGR register AGGR_TYPE field value */
+enum ipa_aggr_type {
+ IPA_MBIM_16 = 0x0,
+ IPA_HDLC = 0x1,
+ IPA_TLP = 0x2,
+ IPA_RNDIS = 0x3,
+ IPA_GENERIC = 0x4,
+ IPA_COALESCE = 0x5,
+ IPA_QCMAP = 0x6,
+};
+
+/* ENDP_INIT_HOL_BLOCK_EN register */
+enum ipa_reg_endp_init_hol_block_en_field_id {
+ HOL_BLOCK_EN,
+};
+
+/* ENDP_INIT_HOL_BLOCK_TIMER register */
+enum ipa_reg_endp_init_hol_block_timer_field_id {
+ TIMER_BASE_VALUE, /* Not v4.5+ */
+ TIMER_SCALE, /* v4.2 only */
+ TIMER_LIMIT, /* v4.5+ */
+ TIMER_GRAN_SEL, /* v4.5+ */
+};
+
+/* ENDP_INIT_DEAGGR register */
+enum ipa_reg_endp_deaggr_field_id {
+ DEAGGR_HDR_LEN,
+ SYSPIPE_ERR_DETECTION,
+ PACKET_OFFSET_VALID,
+ PACKET_OFFSET_LOCATION,
+ IGNORE_MIN_PKT_ERR,
+ MAX_PACKET_LEN,
+};
+
+/* ENDP_INIT_RSRC_GRP register */
+enum ipa_reg_endp_init_rsrc_grp_field_id {
+ ENDP_RSRC_GRP,
+};
+
+/* ENDP_INIT_SEQ register */
+enum ipa_reg_endp_init_seq_field_id {
+ SEQ_TYPE,
+ SEQ_REP_TYPE, /* Not v4.5+ */
+};
+
+/**
+ * enum ipa_seq_type - HPS and DPS sequencer type
+ * @IPA_SEQ_DMA: Perform DMA only
+ * @IPA_SEQ_1_PASS: One pass through the pipeline
+ * @IPA_SEQ_2_PASS_SKIP_LAST_UC: Two passes, skip the microcprocessor
+ * @IPA_SEQ_1_PASS_SKIP_LAST_UC: One pass, skip the microcprocessor
+ * @IPA_SEQ_2_PASS: Two passes through the pipeline
+ * @IPA_SEQ_3_PASS_SKIP_LAST_UC: Three passes, skip the microcprocessor
+ * @IPA_SEQ_DECIPHER: Optional deciphering step (combined)
+ *
+ * The low-order byte of the sequencer type register defines the number of
+ * passes a packet takes through the IPA pipeline. The last pass through can
+ * optionally skip the microprocessor. Deciphering is optional for all types;
+ * if enabled, an additional mask (two bits) is added to the type value.
+ *
+ * Note: not all combinations of ipa_seq_type and ipa_seq_rep_type are
+ * supported (or meaningful).
+ */
+enum ipa_seq_type {
+ IPA_SEQ_DMA = 0x00,
+ IPA_SEQ_1_PASS = 0x02,
+ IPA_SEQ_2_PASS_SKIP_LAST_UC = 0x04,
+ IPA_SEQ_1_PASS_SKIP_LAST_UC = 0x06,
+ IPA_SEQ_2_PASS = 0x0a,
+ IPA_SEQ_3_PASS_SKIP_LAST_UC = 0x0c,
+ /* The next value can be ORed with the above */
+ IPA_SEQ_DECIPHER = 0x11,
+};
+
+/**
+ * enum ipa_seq_rep_type - replicated packet sequencer type
+ * @IPA_SEQ_REP_DMA_PARSER: DMA parser for replicated packets
+ *
+ * This goes in the second byte of the endpoint sequencer type register.
+ *
+ * Note: not all combinations of ipa_seq_type and ipa_seq_rep_type are
+ * supported (or meaningful).
+ */
+enum ipa_seq_rep_type {
+ IPA_SEQ_REP_DMA_PARSER = 0x08,
+};
+
+/* ENDP_STATUS register */
+enum ipa_reg_endp_status_field_id {
+ STATUS_EN,
+ STATUS_ENDP,
+ STATUS_LOCATION, /* Not v4.5+ */
+ STATUS_PKT_SUPPRESS, /* v4.0+ */
+};
+
+/* ENDP_FILTER_ROUTER_HSH_CFG register */
+enum ipa_reg_endp_filter_router_hsh_cfg_field_id {
+ FILTER_HASH_MSK_SRC_ID,
+ FILTER_HASH_MSK_SRC_IP,
+ FILTER_HASH_MSK_DST_IP,
+ FILTER_HASH_MSK_SRC_PORT,
+ FILTER_HASH_MSK_DST_PORT,
+ FILTER_HASH_MSK_PROTOCOL,
+ FILTER_HASH_MSK_METADATA,
+ FILTER_HASH_MSK_ALL, /* Bitwise OR of the above 6 fields */
+
+ ROUTER_HASH_MSK_SRC_ID,
+ ROUTER_HASH_MSK_SRC_IP,
+ ROUTER_HASH_MSK_DST_IP,
+ ROUTER_HASH_MSK_SRC_PORT,
+ ROUTER_HASH_MSK_DST_PORT,
+ ROUTER_HASH_MSK_PROTOCOL,
+ ROUTER_HASH_MSK_METADATA,
+ ROUTER_HASH_MSK_ALL, /* Bitwise OR of the above 6 fields */
+};
+
+/* ENDP_FILTER_CACHE_CFG and ENDP_ROUTER_CACHE_CFG registers */
+enum ipa_reg_endp_cache_cfg_field_id {
+ CACHE_MSK_SRC_ID,
+ CACHE_MSK_SRC_IP,
+ CACHE_MSK_DST_IP,
+ CACHE_MSK_SRC_PORT,
+ CACHE_MSK_DST_PORT,
+ CACHE_MSK_PROTOCOL,
+ CACHE_MSK_METADATA,
+};
+
+/* IPA_IRQ_STTS, IPA_IRQ_EN, and IPA_IRQ_CLR registers */
+/**
+ * enum ipa_irq_id - Bit positions representing type of IPA IRQ
+ * @IPA_IRQ_UC_0: Microcontroller event interrupt
+ * @IPA_IRQ_UC_1: Microcontroller response interrupt
+ * @IPA_IRQ_TX_SUSPEND: Data ready interrupt
+ * @IPA_IRQ_COUNT: Number of IRQ ids (must be last)
+ *
+ * IRQ types not described above are not currently used.
+ *
+ * @IPA_IRQ_BAD_SNOC_ACCESS: (Not currently used)
+ * @IPA_IRQ_EOT_COAL: (Not currently used)
+ * @IPA_IRQ_UC_2: (Not currently used)
+ * @IPA_IRQ_UC_3: (Not currently used)
+ * @IPA_IRQ_UC_IN_Q_NOT_EMPTY: (Not currently used)
+ * @IPA_IRQ_UC_RX_CMD_Q_NOT_FULL: (Not currently used)
+ * @IPA_IRQ_PROC_UC_ACK_Q_NOT_EMPTY: (Not currently used)
+ * @IPA_IRQ_RX_ERR: (Not currently used)
+ * @IPA_IRQ_DEAGGR_ERR: (Not currently used)
+ * @IPA_IRQ_TX_ERR: (Not currently used)
+ * @IPA_IRQ_STEP_MODE: (Not currently used)
+ * @IPA_IRQ_PROC_ERR: (Not currently used)
+ * @IPA_IRQ_TX_HOLB_DROP: (Not currently used)
+ * @IPA_IRQ_BAM_GSI_IDLE: (Not currently used)
+ * @IPA_IRQ_PIPE_YELLOW_BELOW: (Not currently used)
+ * @IPA_IRQ_PIPE_RED_BELOW: (Not currently used)
+ * @IPA_IRQ_PIPE_YELLOW_ABOVE: (Not currently used)
+ * @IPA_IRQ_PIPE_RED_ABOVE: (Not currently used)
+ * @IPA_IRQ_UCP: (Not currently used)
+ * @IPA_IRQ_DCMP: (Not currently used)
+ * @IPA_IRQ_GSI_EE: (Not currently used)
+ * @IPA_IRQ_GSI_IPA_IF_TLV_RCVD: (Not currently used)
+ * @IPA_IRQ_GSI_UC: (Not currently used)
+ * @IPA_IRQ_TLV_LEN_MIN_DSM: (Not currently used)
+ * @IPA_IRQ_DRBIP_PKT_EXCEED_MAX_SIZE_EN: (Not currently used)
+ * @IPA_IRQ_DRBIP_DATA_SCTR_CFG_ERROR_EN: (Not currently used)
+ * @IPA_IRQ_DRBIP_IMM_CMD_NO_FLSH_HZRD_EN: (Not currently used)
+ */
+enum ipa_irq_id {
+ IPA_IRQ_BAD_SNOC_ACCESS = 0x0,
+ /* The next bit is not present for IPA v3.5+ */
+ IPA_IRQ_EOT_COAL = 0x1,
+ IPA_IRQ_UC_0 = 0x2,
+ IPA_IRQ_UC_1 = 0x3,
+ IPA_IRQ_UC_2 = 0x4,
+ IPA_IRQ_UC_3 = 0x5,
+ IPA_IRQ_UC_IN_Q_NOT_EMPTY = 0x6,
+ IPA_IRQ_UC_RX_CMD_Q_NOT_FULL = 0x7,
+ IPA_IRQ_PROC_UC_ACK_Q_NOT_EMPTY = 0x8,
+ IPA_IRQ_RX_ERR = 0x9,
+ IPA_IRQ_DEAGGR_ERR = 0xa,
+ IPA_IRQ_TX_ERR = 0xb,
+ IPA_IRQ_STEP_MODE = 0xc,
+ IPA_IRQ_PROC_ERR = 0xd,
+ IPA_IRQ_TX_SUSPEND = 0xe,
+ IPA_IRQ_TX_HOLB_DROP = 0xf,
+ IPA_IRQ_BAM_GSI_IDLE = 0x10,
+ IPA_IRQ_PIPE_YELLOW_BELOW = 0x11,
+ IPA_IRQ_PIPE_RED_BELOW = 0x12,
+ IPA_IRQ_PIPE_YELLOW_ABOVE = 0x13,
+ IPA_IRQ_PIPE_RED_ABOVE = 0x14,
+ IPA_IRQ_UCP = 0x15,
+ /* The next bit is not present for IPA v4.5+ */
+ IPA_IRQ_DCMP = 0x16,
+ IPA_IRQ_GSI_EE = 0x17,
+ IPA_IRQ_GSI_IPA_IF_TLV_RCVD = 0x18,
+ IPA_IRQ_GSI_UC = 0x19,
+ /* The next bit is present for IPA v4.5+ */
+ IPA_IRQ_TLV_LEN_MIN_DSM = 0x1a,
+ /* The next three bits are present for IPA v4.9+ */
+ IPA_IRQ_DRBIP_PKT_EXCEED_MAX_SIZE_EN = 0x1b,
+ IPA_IRQ_DRBIP_DATA_SCTR_CFG_ERROR_EN = 0x1c,
+ IPA_IRQ_DRBIP_IMM_CMD_NO_FLSH_HZRD_EN = 0x1d,
+ IPA_IRQ_COUNT, /* Last; not an id */
+};
+
+/* IPA_IRQ_UC register */
+enum ipa_reg_ipa_irq_uc_field_id {
+ UC_INTR,
+};
+
+extern const struct regs ipa_regs_v3_1;
+extern const struct regs ipa_regs_v3_5_1;
+extern const struct regs ipa_regs_v4_2;
+extern const struct regs ipa_regs_v4_5;
+extern const struct regs ipa_regs_v4_7;
+extern const struct regs ipa_regs_v4_9;
+extern const struct regs ipa_regs_v4_11;
+extern const struct regs ipa_regs_v5_0;
+
+const struct reg *ipa_reg(struct ipa *ipa, enum ipa_reg_id reg_id);
+
+int ipa_reg_init(struct ipa *ipa);
+void ipa_reg_exit(struct ipa *ipa);
+
+#endif /* _IPA_REG_H_ */
diff --git a/drivers/net/ipa/ipa_resource.c b/drivers/net/ipa/ipa_resource.c
new file mode 100644
index 0000000000..82c88a744d
--- /dev/null
+++ b/drivers/net/ipa/ipa_resource.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2022 Linaro Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+#include "ipa.h"
+#include "ipa_data.h"
+#include "ipa_reg.h"
+#include "ipa_resource.h"
+
+/**
+ * DOC: IPA Resources
+ *
+ * The IPA manages a set of resources internally for various purposes.
+ * A given IPA version has a fixed number of resource types, and a fixed
+ * total number of resources of each type. "Source" resource types
+ * are separate from "destination" resource types.
+ *
+ * Each version of IPA also has some number of resource groups. Each
+ * endpoint is assigned to a resource group, and all endpoints in the
+ * same group share pools of each type of resource. A subset of the
+ * total resources of each type is assigned for use by each group.
+ */
+
+static bool ipa_resource_limits_valid(struct ipa *ipa,
+ const struct ipa_resource_data *data)
+{
+ u32 group_count;
+ u32 i;
+ u32 j;
+
+ /* We program at most 8 source or destination resource group limits */
+ BUILD_BUG_ON(IPA_RESOURCE_GROUP_MAX > 8);
+
+ group_count = data->rsrc_group_src_count;
+ if (!group_count || group_count > IPA_RESOURCE_GROUP_MAX)
+ return false;
+
+ /* Return an error if a non-zero resource limit is specified
+ * for a resource group not supported by hardware.
+ */
+ for (i = 0; i < data->resource_src_count; i++) {
+ const struct ipa_resource *resource;
+
+ resource = &data->resource_src[i];
+ for (j = group_count; j < IPA_RESOURCE_GROUP_MAX; j++)
+ if (resource->limits[j].min || resource->limits[j].max)
+ return false;
+ }
+
+ group_count = data->rsrc_group_dst_count;
+ if (!group_count || group_count > IPA_RESOURCE_GROUP_MAX)
+ return false;
+
+ for (i = 0; i < data->resource_dst_count; i++) {
+ const struct ipa_resource *resource;
+
+ resource = &data->resource_dst[i];
+ for (j = group_count; j < IPA_RESOURCE_GROUP_MAX; j++)
+ if (resource->limits[j].min || resource->limits[j].max)
+ return false;
+ }
+
+ return true;
+}
+
+static void
+ipa_resource_config_common(struct ipa *ipa, u32 resource_type,
+ const struct reg *reg,
+ const struct ipa_resource_limits *xlimits,
+ const struct ipa_resource_limits *ylimits)
+{
+ u32 val;
+
+ val = reg_encode(reg, X_MIN_LIM, xlimits->min);
+ val |= reg_encode(reg, X_MAX_LIM, xlimits->max);
+ if (ylimits) {
+ val |= reg_encode(reg, Y_MIN_LIM, ylimits->min);
+ val |= reg_encode(reg, Y_MAX_LIM, ylimits->max);
+ }
+
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, resource_type));
+}
+
+static void ipa_resource_config_src(struct ipa *ipa, u32 resource_type,
+ const struct ipa_resource_data *data)
+{
+ u32 group_count = data->rsrc_group_src_count;
+ const struct ipa_resource_limits *ylimits;
+ const struct ipa_resource *resource;
+ const struct reg *reg;
+
+ resource = &data->resource_src[resource_type];
+
+ reg = ipa_reg(ipa, SRC_RSRC_GRP_01_RSRC_TYPE);
+ ylimits = group_count == 1 ? NULL : &resource->limits[1];
+ ipa_resource_config_common(ipa, resource_type, reg,
+ &resource->limits[0], ylimits);
+ if (group_count < 3)
+ return;
+
+ reg = ipa_reg(ipa, SRC_RSRC_GRP_23_RSRC_TYPE);
+ ylimits = group_count == 3 ? NULL : &resource->limits[3];
+ ipa_resource_config_common(ipa, resource_type, reg,
+ &resource->limits[2], ylimits);
+ if (group_count < 5)
+ return;
+
+ reg = ipa_reg(ipa, SRC_RSRC_GRP_45_RSRC_TYPE);
+ ylimits = group_count == 5 ? NULL : &resource->limits[5];
+ ipa_resource_config_common(ipa, resource_type, reg,
+ &resource->limits[4], ylimits);
+ if (group_count < 7)
+ return;
+
+ reg = ipa_reg(ipa, SRC_RSRC_GRP_67_RSRC_TYPE);
+ ylimits = group_count == 7 ? NULL : &resource->limits[7];
+ ipa_resource_config_common(ipa, resource_type, reg,
+ &resource->limits[6], ylimits);
+}
+
+static void ipa_resource_config_dst(struct ipa *ipa, u32 resource_type,
+ const struct ipa_resource_data *data)
+{
+ u32 group_count = data->rsrc_group_dst_count;
+ const struct ipa_resource_limits *ylimits;
+ const struct ipa_resource *resource;
+ const struct reg *reg;
+
+ resource = &data->resource_dst[resource_type];
+
+ reg = ipa_reg(ipa, DST_RSRC_GRP_01_RSRC_TYPE);
+ ylimits = group_count == 1 ? NULL : &resource->limits[1];
+ ipa_resource_config_common(ipa, resource_type, reg,
+ &resource->limits[0], ylimits);
+ if (group_count < 3)
+ return;
+
+ reg = ipa_reg(ipa, DST_RSRC_GRP_23_RSRC_TYPE);
+ ylimits = group_count == 3 ? NULL : &resource->limits[3];
+ ipa_resource_config_common(ipa, resource_type, reg,
+ &resource->limits[2], ylimits);
+ if (group_count < 5)
+ return;
+
+ reg = ipa_reg(ipa, DST_RSRC_GRP_45_RSRC_TYPE);
+ ylimits = group_count == 5 ? NULL : &resource->limits[5];
+ ipa_resource_config_common(ipa, resource_type, reg,
+ &resource->limits[4], ylimits);
+ if (group_count < 7)
+ return;
+
+ reg = ipa_reg(ipa, DST_RSRC_GRP_67_RSRC_TYPE);
+ ylimits = group_count == 7 ? NULL : &resource->limits[7];
+ ipa_resource_config_common(ipa, resource_type, reg,
+ &resource->limits[6], ylimits);
+}
+
+/* Configure resources; there is no ipa_resource_deconfig() */
+int ipa_resource_config(struct ipa *ipa, const struct ipa_resource_data *data)
+{
+ u32 i;
+
+ if (!ipa_resource_limits_valid(ipa, data))
+ return -EINVAL;
+
+ for (i = 0; i < data->resource_src_count; i++)
+ ipa_resource_config_src(ipa, i, data);
+
+ for (i = 0; i < data->resource_dst_count; i++)
+ ipa_resource_config_dst(ipa, i, data);
+
+ return 0;
+}
diff --git a/drivers/net/ipa/ipa_resource.h b/drivers/net/ipa/ipa_resource.h
new file mode 100644
index 0000000000..ef5818bff1
--- /dev/null
+++ b/drivers/net/ipa/ipa_resource.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2021 Linaro Ltd.
+ */
+#ifndef _IPA_RESOURCE_H_
+#define _IPA_RESOURCE_H_
+
+struct ipa;
+struct ipa_resource_data;
+
+/**
+ * ipa_resource_config() - Configure resources
+ * @ipa: IPA pointer
+ * @data: IPA resource configuration data
+ *
+ * There is no need for a matching ipa_resource_deconfig() function.
+ *
+ * Return: true if all regions are valid, false otherwise
+ */
+int ipa_resource_config(struct ipa *ipa, const struct ipa_resource_data *data);
+
+#endif /* _IPA_RESOURCE_H_ */
diff --git a/drivers/net/ipa/ipa_smp2p.c b/drivers/net/ipa/ipa_smp2p.c
new file mode 100644
index 0000000000..5620dc271f
--- /dev/null
+++ b/drivers/net/ipa/ipa_smp2p.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2022 Linaro Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/notifier.h>
+#include <linux/panic_notifier.h>
+#include <linux/pm_runtime.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/soc/qcom/smem_state.h>
+
+#include "ipa_smp2p.h"
+#include "ipa.h"
+#include "ipa_uc.h"
+
+/**
+ * DOC: IPA SMP2P communication with the modem
+ *
+ * SMP2P is a primitive communication mechanism available between the AP and
+ * the modem. The IPA driver uses this for two purposes: to enable the modem
+ * to state that the GSI hardware is ready to use; and to communicate the
+ * state of IPA power in the event of a crash.
+ *
+ * GSI needs to have early initialization completed before it can be used.
+ * This initialization is done either by Trust Zone or by the modem. In the
+ * latter case, the modem uses an SMP2P interrupt to tell the AP IPA driver
+ * when the GSI is ready to use.
+ *
+ * The modem is also able to inquire about the current state of IPA
+ * power by trigging another SMP2P interrupt to the AP. We communicate
+ * whether power is enabled using two SMP2P state bits--one to indicate
+ * the power state (on or off), and a second to indicate the power state
+ * bit is valid. The modem will poll the valid bit until it is set, and
+ * at that time records whether the AP has IPA power enabled.
+ *
+ * Finally, if the AP kernel panics, we update the SMP2P state bits even if
+ * we never receive an interrupt from the modem requesting this.
+ */
+
+/**
+ * struct ipa_smp2p - IPA SMP2P information
+ * @ipa: IPA pointer
+ * @valid_state: SMEM state indicating enabled state is valid
+ * @enabled_state: SMEM state to indicate power is enabled
+ * @valid_bit: Valid bit in 32-bit SMEM state mask
+ * @enabled_bit: Enabled bit in 32-bit SMEM state mask
+ * @enabled_bit: Enabled bit in 32-bit SMEM state mask
+ * @clock_query_irq: IPA interrupt triggered by modem for power query
+ * @setup_ready_irq: IPA interrupt triggered by modem to signal GSI ready
+ * @power_on: Whether IPA power is on
+ * @notified: Whether modem has been notified of power state
+ * @setup_disabled: Whether setup ready interrupt handler is disabled
+ * @mutex: Mutex protecting ready-interrupt/shutdown interlock
+ * @panic_notifier: Panic notifier structure
+*/
+struct ipa_smp2p {
+ struct ipa *ipa;
+ struct qcom_smem_state *valid_state;
+ struct qcom_smem_state *enabled_state;
+ u32 valid_bit;
+ u32 enabled_bit;
+ u32 clock_query_irq;
+ u32 setup_ready_irq;
+ bool power_on;
+ bool notified;
+ bool setup_disabled;
+ struct mutex mutex;
+ struct notifier_block panic_notifier;
+};
+
+/**
+ * ipa_smp2p_notify() - use SMP2P to tell modem about IPA power state
+ * @smp2p: SMP2P information
+ *
+ * This is called either when the modem has requested it (by triggering
+ * the modem power query IPA interrupt) or whenever the AP is shutting down
+ * (via a panic notifier). It sets the two SMP2P state bits--one saying
+ * whether the IPA power is on, and the other indicating the first bit
+ * is valid.
+ */
+static void ipa_smp2p_notify(struct ipa_smp2p *smp2p)
+{
+ struct device *dev;
+ u32 value;
+ u32 mask;
+
+ if (smp2p->notified)
+ return;
+
+ dev = &smp2p->ipa->pdev->dev;
+ smp2p->power_on = pm_runtime_get_if_active(dev, true) > 0;
+
+ /* Signal whether the IPA power is enabled */
+ mask = BIT(smp2p->enabled_bit);
+ value = smp2p->power_on ? mask : 0;
+ qcom_smem_state_update_bits(smp2p->enabled_state, mask, value);
+
+ /* Now indicate that the enabled flag is valid */
+ mask = BIT(smp2p->valid_bit);
+ value = mask;
+ qcom_smem_state_update_bits(smp2p->valid_state, mask, value);
+
+ smp2p->notified = true;
+}
+
+/* Threaded IRQ handler for modem "ipa-clock-query" SMP2P interrupt */
+static irqreturn_t ipa_smp2p_modem_clk_query_isr(int irq, void *dev_id)
+{
+ struct ipa_smp2p *smp2p = dev_id;
+
+ ipa_smp2p_notify(smp2p);
+
+ return IRQ_HANDLED;
+}
+
+static int ipa_smp2p_panic_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct ipa_smp2p *smp2p;
+
+ smp2p = container_of(nb, struct ipa_smp2p, panic_notifier);
+
+ ipa_smp2p_notify(smp2p);
+
+ if (smp2p->power_on)
+ ipa_uc_panic_notifier(smp2p->ipa);
+
+ return NOTIFY_DONE;
+}
+
+static int ipa_smp2p_panic_notifier_register(struct ipa_smp2p *smp2p)
+{
+ /* IPA panic handler needs to run before modem shuts down */
+ smp2p->panic_notifier.notifier_call = ipa_smp2p_panic_notifier;
+ smp2p->panic_notifier.priority = INT_MAX; /* Do it early */
+
+ return atomic_notifier_chain_register(&panic_notifier_list,
+ &smp2p->panic_notifier);
+}
+
+static void ipa_smp2p_panic_notifier_unregister(struct ipa_smp2p *smp2p)
+{
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &smp2p->panic_notifier);
+}
+
+/* Threaded IRQ handler for modem "ipa-setup-ready" SMP2P interrupt */
+static irqreturn_t ipa_smp2p_modem_setup_ready_isr(int irq, void *dev_id)
+{
+ struct ipa_smp2p *smp2p = dev_id;
+ struct device *dev;
+ int ret;
+
+ /* Ignore any (spurious) interrupts received after the first */
+ if (smp2p->ipa->setup_complete)
+ return IRQ_HANDLED;
+
+ /* Power needs to be active for setup */
+ dev = &smp2p->ipa->pdev->dev;
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "error %d getting power for setup\n", ret);
+ goto out_power_put;
+ }
+
+ /* An error here won't cause driver shutdown, so warn if one occurs */
+ ret = ipa_setup(smp2p->ipa);
+ WARN(ret != 0, "error %d from ipa_setup()\n", ret);
+
+out_power_put:
+ pm_runtime_mark_last_busy(dev);
+ (void)pm_runtime_put_autosuspend(dev);
+
+ return IRQ_HANDLED;
+}
+
+/* Initialize SMP2P interrupts */
+static int ipa_smp2p_irq_init(struct ipa_smp2p *smp2p, const char *name,
+ irq_handler_t handler)
+{
+ struct device *dev = &smp2p->ipa->pdev->dev;
+ unsigned int irq;
+ int ret;
+
+ ret = platform_get_irq_byname(smp2p->ipa->pdev, name);
+ if (ret <= 0)
+ return ret ? : -EINVAL;
+ irq = ret;
+
+ ret = request_threaded_irq(irq, NULL, handler, 0, name, smp2p);
+ if (ret) {
+ dev_err(dev, "error %d requesting \"%s\" IRQ\n", ret, name);
+ return ret;
+ }
+
+ return irq;
+}
+
+static void ipa_smp2p_irq_exit(struct ipa_smp2p *smp2p, u32 irq)
+{
+ free_irq(irq, smp2p);
+}
+
+/* Drop the power reference if it was taken in ipa_smp2p_notify() */
+static void ipa_smp2p_power_release(struct ipa *ipa)
+{
+ struct device *dev = &ipa->pdev->dev;
+
+ if (!ipa->smp2p->power_on)
+ return;
+
+ pm_runtime_mark_last_busy(dev);
+ (void)pm_runtime_put_autosuspend(dev);
+ ipa->smp2p->power_on = false;
+}
+
+/* Initialize the IPA SMP2P subsystem */
+int ipa_smp2p_init(struct ipa *ipa, bool modem_init)
+{
+ struct qcom_smem_state *enabled_state;
+ struct device *dev = &ipa->pdev->dev;
+ struct qcom_smem_state *valid_state;
+ struct ipa_smp2p *smp2p;
+ u32 enabled_bit;
+ u32 valid_bit;
+ int ret;
+
+ valid_state = qcom_smem_state_get(dev, "ipa-clock-enabled-valid",
+ &valid_bit);
+ if (IS_ERR(valid_state))
+ return PTR_ERR(valid_state);
+ if (valid_bit >= 32) /* BITS_PER_U32 */
+ return -EINVAL;
+
+ enabled_state = qcom_smem_state_get(dev, "ipa-clock-enabled",
+ &enabled_bit);
+ if (IS_ERR(enabled_state))
+ return PTR_ERR(enabled_state);
+ if (enabled_bit >= 32) /* BITS_PER_U32 */
+ return -EINVAL;
+
+ smp2p = kzalloc(sizeof(*smp2p), GFP_KERNEL);
+ if (!smp2p)
+ return -ENOMEM;
+
+ smp2p->ipa = ipa;
+
+ /* These fields are needed by the power query interrupt
+ * handler, so initialize them now.
+ */
+ mutex_init(&smp2p->mutex);
+ smp2p->valid_state = valid_state;
+ smp2p->valid_bit = valid_bit;
+ smp2p->enabled_state = enabled_state;
+ smp2p->enabled_bit = enabled_bit;
+
+ /* We have enough information saved to handle notifications */
+ ipa->smp2p = smp2p;
+
+ ret = ipa_smp2p_irq_init(smp2p, "ipa-clock-query",
+ ipa_smp2p_modem_clk_query_isr);
+ if (ret < 0)
+ goto err_null_smp2p;
+ smp2p->clock_query_irq = ret;
+
+ ret = ipa_smp2p_panic_notifier_register(smp2p);
+ if (ret)
+ goto err_irq_exit;
+
+ if (modem_init) {
+ /* Result will be non-zero (negative for error) */
+ ret = ipa_smp2p_irq_init(smp2p, "ipa-setup-ready",
+ ipa_smp2p_modem_setup_ready_isr);
+ if (ret < 0)
+ goto err_notifier_unregister;
+ smp2p->setup_ready_irq = ret;
+ }
+
+ return 0;
+
+err_notifier_unregister:
+ ipa_smp2p_panic_notifier_unregister(smp2p);
+err_irq_exit:
+ ipa_smp2p_irq_exit(smp2p, smp2p->clock_query_irq);
+err_null_smp2p:
+ ipa->smp2p = NULL;
+ mutex_destroy(&smp2p->mutex);
+ kfree(smp2p);
+
+ return ret;
+}
+
+void ipa_smp2p_exit(struct ipa *ipa)
+{
+ struct ipa_smp2p *smp2p = ipa->smp2p;
+
+ if (smp2p->setup_ready_irq)
+ ipa_smp2p_irq_exit(smp2p, smp2p->setup_ready_irq);
+ ipa_smp2p_panic_notifier_unregister(smp2p);
+ ipa_smp2p_irq_exit(smp2p, smp2p->clock_query_irq);
+ /* We won't get notified any more; drop power reference (if any) */
+ ipa_smp2p_power_release(ipa);
+ ipa->smp2p = NULL;
+ mutex_destroy(&smp2p->mutex);
+ kfree(smp2p);
+}
+
+void ipa_smp2p_irq_disable_setup(struct ipa *ipa)
+{
+ struct ipa_smp2p *smp2p = ipa->smp2p;
+
+ if (!smp2p->setup_ready_irq)
+ return;
+
+ mutex_lock(&smp2p->mutex);
+
+ if (!smp2p->setup_disabled) {
+ disable_irq(smp2p->setup_ready_irq);
+ smp2p->setup_disabled = true;
+ }
+
+ mutex_unlock(&smp2p->mutex);
+}
+
+/* Reset state tracking whether we have notified the modem */
+void ipa_smp2p_notify_reset(struct ipa *ipa)
+{
+ struct ipa_smp2p *smp2p = ipa->smp2p;
+ u32 mask;
+
+ if (!smp2p->notified)
+ return;
+
+ ipa_smp2p_power_release(ipa);
+
+ /* Reset the power enabled valid flag */
+ mask = BIT(smp2p->valid_bit);
+ qcom_smem_state_update_bits(smp2p->valid_state, mask, 0);
+
+ /* Mark the power disabled for good measure... */
+ mask = BIT(smp2p->enabled_bit);
+ qcom_smem_state_update_bits(smp2p->enabled_state, mask, 0);
+
+ smp2p->notified = false;
+}
diff --git a/drivers/net/ipa/ipa_smp2p.h b/drivers/net/ipa/ipa_smp2p.h
new file mode 100644
index 0000000000..9b969b03d1
--- /dev/null
+++ b/drivers/net/ipa/ipa_smp2p.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2022 Linaro Ltd.
+ */
+#ifndef _IPA_SMP2P_H_
+#define _IPA_SMP2P_H_
+
+#include <linux/types.h>
+
+struct ipa;
+
+/**
+ * ipa_smp2p_init() - Initialize the IPA SMP2P subsystem
+ * @ipa: IPA pointer
+ * @modem_init: Whether the modem is responsible for GSI initialization
+ *
+ * Return: 0 if successful, or a negative error code
+ *
+ */
+int ipa_smp2p_init(struct ipa *ipa, bool modem_init);
+
+/**
+ * ipa_smp2p_exit() - Inverse of ipa_smp2p_init()
+ * @ipa: IPA pointer
+ */
+void ipa_smp2p_exit(struct ipa *ipa);
+
+/**
+ * ipa_smp2p_irq_disable_setup() - Disable the "setup ready" interrupt
+ * @ipa: IPA pointer
+ *
+ * Disable the "ipa-setup-ready" interrupt from the modem.
+ */
+void ipa_smp2p_irq_disable_setup(struct ipa *ipa);
+
+/**
+ * ipa_smp2p_notify_reset() - Reset modem notification state
+ * @ipa: IPA pointer
+ *
+ * If the modem crashes it queries the IPA power state. In cleaning
+ * up after such a crash this is used to reset some state maintained
+ * for managing this notification.
+ */
+void ipa_smp2p_notify_reset(struct ipa *ipa);
+
+#endif /* _IPA_SMP2P_H_ */
diff --git a/drivers/net/ipa/ipa_sysfs.c b/drivers/net/ipa/ipa_sysfs.c
new file mode 100644
index 0000000000..2ff09ce343
--- /dev/null
+++ b/drivers/net/ipa/ipa_sysfs.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2021-2022 Linaro Ltd. */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/sysfs.h>
+
+#include "ipa.h"
+#include "ipa_version.h"
+#include "ipa_sysfs.h"
+
+static const char *ipa_version_string(struct ipa *ipa)
+{
+ switch (ipa->version) {
+ case IPA_VERSION_3_0:
+ return "3.0";
+ case IPA_VERSION_3_1:
+ return "3.1";
+ case IPA_VERSION_3_5:
+ return "3.5";
+ case IPA_VERSION_3_5_1:
+ return "3.5.1";
+ case IPA_VERSION_4_0:
+ return "4.0";
+ case IPA_VERSION_4_1:
+ return "4.1";
+ case IPA_VERSION_4_2:
+ return "4.2";
+ case IPA_VERSION_4_5:
+ return "4.5";
+ case IPA_VERSION_4_7:
+ return "4.7";
+ case IPA_VERSION_4_9:
+ return "4.9";
+ case IPA_VERSION_4_11:
+ return "4.11";
+ case IPA_VERSION_5_0:
+ return "5.0";
+ default:
+ return "0.0"; /* Won't happen (checked at probe time) */
+ }
+}
+
+static ssize_t
+version_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct ipa *ipa = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%s\n", ipa_version_string(ipa));
+}
+
+static DEVICE_ATTR_RO(version);
+
+static struct attribute *ipa_attrs[] = {
+ &dev_attr_version.attr,
+ NULL
+};
+
+const struct attribute_group ipa_attribute_group = {
+ .attrs = ipa_attrs,
+};
+
+static const char *ipa_offload_string(struct ipa *ipa)
+{
+ return ipa->version < IPA_VERSION_4_5 ? "MAPv4" : "MAPv5";
+}
+
+static ssize_t rx_offload_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ipa *ipa = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%s\n", ipa_offload_string(ipa));
+}
+
+static DEVICE_ATTR_RO(rx_offload);
+
+static ssize_t tx_offload_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ipa *ipa = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%s\n", ipa_offload_string(ipa));
+}
+
+static DEVICE_ATTR_RO(tx_offload);
+
+static struct attribute *ipa_feature_attrs[] = {
+ &dev_attr_rx_offload.attr,
+ &dev_attr_tx_offload.attr,
+ NULL
+};
+
+const struct attribute_group ipa_feature_attribute_group = {
+ .name = "feature",
+ .attrs = ipa_feature_attrs,
+};
+
+static umode_t ipa_endpoint_id_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct ipa *ipa = dev_get_drvdata(kobj_to_dev(kobj));
+ struct device_attribute *dev_attr;
+ struct dev_ext_attribute *ea;
+ bool visible;
+
+ /* An endpoint id attribute is only visible if it's defined */
+ dev_attr = container_of(attr, struct device_attribute, attr);
+ ea = container_of(dev_attr, struct dev_ext_attribute, attr);
+
+ visible = !!ipa->name_map[(enum ipa_endpoint_name)(uintptr_t)ea->var];
+
+ return visible ? attr->mode : 0;
+}
+
+static ssize_t endpoint_id_attr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ipa *ipa = dev_get_drvdata(dev);
+ struct ipa_endpoint *endpoint;
+ struct dev_ext_attribute *ea;
+
+ ea = container_of(attr, struct dev_ext_attribute, attr);
+ endpoint = ipa->name_map[(enum ipa_endpoint_name)(uintptr_t)ea->var];
+
+ return sysfs_emit(buf, "%u\n", endpoint->endpoint_id);
+}
+
+#define ENDPOINT_ID_ATTR(_n, _endpoint_name) \
+ static struct dev_ext_attribute dev_attr_endpoint_id_ ## _n = { \
+ .attr = __ATTR(_n, 0444, endpoint_id_attr_show, NULL), \
+ .var = (void *)(_endpoint_name), \
+ }
+
+ENDPOINT_ID_ATTR(modem_rx, IPA_ENDPOINT_AP_MODEM_RX);
+ENDPOINT_ID_ATTR(modem_tx, IPA_ENDPOINT_AP_MODEM_TX);
+
+static struct attribute *ipa_endpoint_id_attrs[] = {
+ &dev_attr_endpoint_id_modem_rx.attr.attr,
+ &dev_attr_endpoint_id_modem_tx.attr.attr,
+ NULL
+};
+
+const struct attribute_group ipa_endpoint_id_attribute_group = {
+ .name = "endpoint_id",
+ .is_visible = ipa_endpoint_id_is_visible,
+ .attrs = ipa_endpoint_id_attrs,
+};
+
+/* Reuse endpoint ID attributes for the legacy modem endpoint IDs */
+#define MODEM_ATTR(_n, _endpoint_name) \
+ static struct dev_ext_attribute dev_attr_modem_ ## _n = { \
+ .attr = __ATTR(_n, 0444, endpoint_id_attr_show, NULL), \
+ .var = (void *)(_endpoint_name), \
+ }
+
+MODEM_ATTR(rx_endpoint_id, IPA_ENDPOINT_AP_MODEM_RX);
+MODEM_ATTR(tx_endpoint_id, IPA_ENDPOINT_AP_MODEM_TX);
+
+static struct attribute *ipa_modem_attrs[] = {
+ &dev_attr_modem_rx_endpoint_id.attr.attr,
+ &dev_attr_modem_tx_endpoint_id.attr.attr,
+ NULL,
+};
+
+const struct attribute_group ipa_modem_attribute_group = {
+ .name = "modem",
+ .attrs = ipa_modem_attrs,
+};
diff --git a/drivers/net/ipa/ipa_sysfs.h b/drivers/net/ipa/ipa_sysfs.h
new file mode 100644
index 0000000000..58ba22810b
--- /dev/null
+++ b/drivers/net/ipa/ipa_sysfs.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2022 Linaro Ltd.
+ */
+#ifndef _IPA_SYSFS_H_
+#define _IPA_SYSFS_H_
+
+struct attribute_group;
+
+extern const struct attribute_group ipa_attribute_group;
+extern const struct attribute_group ipa_feature_attribute_group;
+extern const struct attribute_group ipa_endpoint_id_attribute_group;
+extern const struct attribute_group ipa_modem_attribute_group;
+
+#endif /* _IPA_SYSFS_H_ */
diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
new file mode 100644
index 0000000000..7b637bb8b4
--- /dev/null
+++ b/drivers/net/ipa/ipa_table.c
@@ -0,0 +1,774 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2023 Linaro Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/bits.h>
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+#include <linux/io.h>
+#include <linux/build_bug.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+
+#include "ipa.h"
+#include "ipa_version.h"
+#include "ipa_endpoint.h"
+#include "ipa_table.h"
+#include "ipa_reg.h"
+#include "ipa_mem.h"
+#include "ipa_cmd.h"
+#include "gsi.h"
+#include "gsi_trans.h"
+
+/**
+ * DOC: IPA Filter and Route Tables
+ *
+ * The IPA has tables defined in its local (IPA-resident) memory that define
+ * filter and routing rules. An entry in either of these tables is a little
+ * endian 64-bit "slot" that holds the address of a rule definition. (The
+ * size of these slots is 64 bits regardless of the host DMA address size.)
+ *
+ * Separate tables (both filter and route) are used for IPv4 and IPv6. There
+ * is normally another set of "hashed" filter and route tables, which are
+ * used with a hash of message metadata. Hashed operation is not supported
+ * by all IPA hardware (IPA v4.2 doesn't support hashed tables).
+ *
+ * Rules can be in local memory or in DRAM (system memory). The offset of
+ * an object (such as a route or filter table) in IPA-resident memory must
+ * 128-byte aligned. An object in system memory (such as a route or filter
+ * rule) must be at an 8-byte aligned address. We currently only place
+ * route or filter rules in system memory.
+ *
+ * A rule consists of a contiguous block of 32-bit values terminated with
+ * 32 zero bits. A special "zero entry" rule consisting of 64 zero bits
+ * represents "no filtering" or "no routing," and is the reset value for
+ * filter or route table rules.
+ *
+ * Each filter rule is associated with an AP or modem TX endpoint, though
+ * not all TX endpoints support filtering. The first 64-bit slot in a
+ * filter table is a bitmap indicating which endpoints have entries in
+ * the table. Each set bit in this bitmap indicates the presence of the
+ * address of a filter rule in the memory following the bitmap. Until IPA
+ * v5.0, the low-order bit (bit 0) in this bitmap represents a special
+ * global filter, which applies to all traffic. Otherwise the position of
+ * each set bit represents an endpoint for which a filter rule is defined.
+ *
+ * The global rule is not used in current code, and support for it is
+ * removed starting at IPA v5.0. For IPA v5.0+, the endpoint bitmap
+ * position defines the endpoint ID--i.e. if bit 1 is set in the endpoint
+ * bitmap, endpoint 1 has a filter rule. Older versions of IPA represent
+ * the presence of a filter rule for endpoint X by bit (X + 1) being set.
+ * I.e., bit 1 set indicates the presence of a filter rule for endpoint 0,
+ * and bit 3 set means there is a filter rule present for endpoint 2.
+ *
+ * Each filter table entry has the address of a set of equations that
+ * implement a filter rule. So following the endpoint bitmap there
+ * will be such an address/entry for each endpoint with a set bit in
+ * the bitmap.
+ *
+ * The AP initializes all entries in a filter table to refer to a "zero"
+ * rule. Once initialized, the modem and AP update the entries for
+ * endpoints they "own" directly. Currently the AP does not use the IPA
+ * filtering functionality.
+ *
+ * This diagram shows an example of a filter table with an endpoint
+ * bitmap as defined prior to IPA v5.0.
+ *
+ * IPA Filter Table
+ * ----------------------
+ * endpoint bitmap | 0x0000000000000048 | Bits 3 and 6 set (endpoints 2 and 5)
+ * |--------------------|
+ * 1st endpoint | 0x000123456789abc0 | DMA address for modem endpoint 2 rule
+ * |--------------------|
+ * 2nd endpoint | 0x000123456789abf0 | DMA address for AP endpoint 5 rule
+ * |--------------------|
+ * (unused) | | (Unused space in filter table)
+ * |--------------------|
+ * . . .
+ * |--------------------|
+ * (unused) | | (Unused space in filter table)
+ * ----------------------
+ *
+ * The set of available route rules is divided about equally between the AP
+ * and modem. The AP initializes all entries in a route table to refer to
+ * a "zero entry". Once initialized, the modem and AP are responsible for
+ * updating their own entries. All entries in a route table are usable,
+ * though the AP currently does not use the IPA routing functionality.
+ *
+ * IPA Route Table
+ * ----------------------
+ * 1st modem route | 0x0001234500001100 | DMA address for first route rule
+ * |--------------------|
+ * 2nd modem route | 0x0001234500001140 | DMA address for second route rule
+ * |--------------------|
+ * . . .
+ * |--------------------|
+ * Last modem route| 0x0001234500002280 | DMA address for Nth route rule
+ * |--------------------|
+ * 1st AP route | 0x0001234500001100 | DMA address for route rule (N+1)
+ * |--------------------|
+ * 2nd AP route | 0x0001234500001140 | DMA address for next route rule
+ * |--------------------|
+ * . . .
+ * |--------------------|
+ * Last AP route | 0x0001234500002280 | DMA address for last route rule
+ * ----------------------
+ */
+
+/* Filter or route rules consist of a set of 32-bit values followed by a
+ * 32-bit all-zero rule list terminator. The "zero rule" is simply an
+ * all-zero rule followed by the list terminator.
+ */
+#define IPA_ZERO_RULE_SIZE (2 * sizeof(__le32))
+
+/* Check things that can be validated at build time. */
+static void ipa_table_validate_build(void)
+{
+ /* Filter and route tables contain DMA addresses that refer
+ * to filter or route rules. But the size of a table entry
+ * is 64 bits regardless of what the size of an AP DMA address
+ * is. A fixed constant defines the size of an entry, and
+ * code in ipa_table_init() uses a pointer to __le64 to
+ * initialize tables.
+ */
+ BUILD_BUG_ON(sizeof(dma_addr_t) > sizeof(__le64));
+
+ /* A "zero rule" is used to represent no filtering or no routing.
+ * It is a 64-bit block of zeroed memory. Code in ipa_table_init()
+ * assumes that it can be written using a pointer to __le64.
+ */
+ BUILD_BUG_ON(IPA_ZERO_RULE_SIZE != sizeof(__le64));
+}
+
+static const struct ipa_mem *
+ipa_table_mem(struct ipa *ipa, bool filter, bool hashed, bool ipv6)
+{
+ enum ipa_mem_id mem_id;
+
+ mem_id = filter ? hashed ? ipv6 ? IPA_MEM_V6_FILTER_HASHED
+ : IPA_MEM_V4_FILTER_HASHED
+ : ipv6 ? IPA_MEM_V6_FILTER
+ : IPA_MEM_V4_FILTER
+ : hashed ? ipv6 ? IPA_MEM_V6_ROUTE_HASHED
+ : IPA_MEM_V4_ROUTE_HASHED
+ : ipv6 ? IPA_MEM_V6_ROUTE
+ : IPA_MEM_V4_ROUTE;
+
+ return ipa_mem_find(ipa, mem_id);
+}
+
+bool ipa_filtered_valid(struct ipa *ipa, u64 filtered)
+{
+ struct device *dev = &ipa->pdev->dev;
+ u32 count;
+
+ if (!filtered) {
+ dev_err(dev, "at least one filtering endpoint is required\n");
+
+ return false;
+ }
+
+ count = hweight64(filtered);
+ if (count > ipa->filter_count) {
+ dev_err(dev, "too many filtering endpoints (%u > %u)\n",
+ count, ipa->filter_count);
+
+ return false;
+ }
+
+ return true;
+}
+
+/* Zero entry count means no table, so just return a 0 address */
+static dma_addr_t ipa_table_addr(struct ipa *ipa, bool filter_mask, u16 count)
+{
+ u32 skip;
+
+ if (!count)
+ return 0;
+
+ WARN_ON(count > max_t(u32, ipa->filter_count, ipa->route_count));
+
+ /* Skip over the zero rule and possibly the filter mask */
+ skip = filter_mask ? 1 : 2;
+
+ return ipa->table_addr + skip * sizeof(*ipa->table_virt);
+}
+
+static void ipa_table_reset_add(struct gsi_trans *trans, bool filter,
+ bool hashed, bool ipv6, u16 first, u16 count)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ const struct ipa_mem *mem;
+ dma_addr_t addr;
+ u32 offset;
+ u16 size;
+
+ /* Nothing to do if the memory region is doesn't exist or is empty */
+ mem = ipa_table_mem(ipa, filter, hashed, ipv6);
+ if (!mem || !mem->size)
+ return;
+
+ if (filter)
+ first++; /* skip over bitmap */
+
+ offset = mem->offset + first * sizeof(__le64);
+ size = count * sizeof(__le64);
+ addr = ipa_table_addr(ipa, false, count);
+
+ ipa_cmd_dma_shared_mem_add(trans, offset, size, addr, true);
+}
+
+/* Reset entries in a single filter table belonging to either the AP or
+ * modem to refer to the zero entry. The memory region supplied will be
+ * for the IPv4 and IPv6 non-hashed and hashed filter tables.
+ */
+static int
+ipa_filter_reset_table(struct ipa *ipa, bool hashed, bool ipv6, bool modem)
+{
+ u64 ep_mask = ipa->filtered;
+ struct gsi_trans *trans;
+ enum gsi_ee_id ee_id;
+
+ trans = ipa_cmd_trans_alloc(ipa, hweight64(ep_mask));
+ if (!trans) {
+ dev_err(&ipa->pdev->dev,
+ "no transaction for %s filter reset\n",
+ modem ? "modem" : "AP");
+ return -EBUSY;
+ }
+
+ ee_id = modem ? GSI_EE_MODEM : GSI_EE_AP;
+ while (ep_mask) {
+ u32 endpoint_id = __ffs(ep_mask);
+ struct ipa_endpoint *endpoint;
+
+ ep_mask ^= BIT(endpoint_id);
+
+ endpoint = &ipa->endpoint[endpoint_id];
+ if (endpoint->ee_id != ee_id)
+ continue;
+
+ ipa_table_reset_add(trans, true, hashed, ipv6, endpoint_id, 1);
+ }
+
+ gsi_trans_commit_wait(trans);
+
+ return 0;
+}
+
+/* Theoretically, each filter table could have more filter slots to
+ * update than the maximum number of commands in a transaction. So
+ * we do each table separately.
+ */
+static int ipa_filter_reset(struct ipa *ipa, bool modem)
+{
+ int ret;
+
+ ret = ipa_filter_reset_table(ipa, false, false, modem);
+ if (ret)
+ return ret;
+
+ ret = ipa_filter_reset_table(ipa, false, true, modem);
+ if (ret || !ipa_table_hash_support(ipa))
+ return ret;
+
+ ret = ipa_filter_reset_table(ipa, true, false, modem);
+ if (ret)
+ return ret;
+
+ return ipa_filter_reset_table(ipa, true, true, modem);
+}
+
+/* The AP routes and modem routes are each contiguous within the
+ * table. We can update each table with a single command, and we
+ * won't exceed the per-transaction command limit.
+ * */
+static int ipa_route_reset(struct ipa *ipa, bool modem)
+{
+ bool hash_support = ipa_table_hash_support(ipa);
+ u32 modem_route_count = ipa->modem_route_count;
+ struct gsi_trans *trans;
+ u16 first;
+ u16 count;
+
+ trans = ipa_cmd_trans_alloc(ipa, hash_support ? 4 : 2);
+ if (!trans) {
+ dev_err(&ipa->pdev->dev,
+ "no transaction for %s route reset\n",
+ modem ? "modem" : "AP");
+ return -EBUSY;
+ }
+
+ if (modem) {
+ first = 0;
+ count = modem_route_count;
+ } else {
+ first = modem_route_count;
+ count = ipa->route_count - modem_route_count;
+ }
+
+ ipa_table_reset_add(trans, false, false, false, first, count);
+ ipa_table_reset_add(trans, false, false, true, first, count);
+
+ if (hash_support) {
+ ipa_table_reset_add(trans, false, true, false, first, count);
+ ipa_table_reset_add(trans, false, true, true, first, count);
+ }
+
+ gsi_trans_commit_wait(trans);
+
+ return 0;
+}
+
+void ipa_table_reset(struct ipa *ipa, bool modem)
+{
+ struct device *dev = &ipa->pdev->dev;
+ const char *ee_name;
+ int ret;
+
+ ee_name = modem ? "modem" : "AP";
+
+ /* Report errors, but reset filter and route tables */
+ ret = ipa_filter_reset(ipa, modem);
+ if (ret)
+ dev_err(dev, "error %d resetting filter table for %s\n",
+ ret, ee_name);
+
+ ret = ipa_route_reset(ipa, modem);
+ if (ret)
+ dev_err(dev, "error %d resetting route table for %s\n",
+ ret, ee_name);
+}
+
+int ipa_table_hash_flush(struct ipa *ipa)
+{
+ struct gsi_trans *trans;
+ const struct reg *reg;
+ u32 val;
+
+ if (!ipa_table_hash_support(ipa))
+ return 0;
+
+ trans = ipa_cmd_trans_alloc(ipa, 1);
+ if (!trans) {
+ dev_err(&ipa->pdev->dev, "no transaction for hash flush\n");
+ return -EBUSY;
+ }
+
+ if (ipa->version < IPA_VERSION_5_0) {
+ reg = ipa_reg(ipa, FILT_ROUT_HASH_FLUSH);
+
+ val = reg_bit(reg, IPV6_ROUTER_HASH);
+ val |= reg_bit(reg, IPV6_FILTER_HASH);
+ val |= reg_bit(reg, IPV4_ROUTER_HASH);
+ val |= reg_bit(reg, IPV4_FILTER_HASH);
+ } else {
+ reg = ipa_reg(ipa, FILT_ROUT_CACHE_FLUSH);
+
+ /* IPA v5.0+ uses a unified cache (both IPv4 and IPv6) */
+ val = reg_bit(reg, ROUTER_CACHE);
+ val |= reg_bit(reg, FILTER_CACHE);
+ }
+
+ ipa_cmd_register_write_add(trans, reg_offset(reg), val, val, false);
+
+ gsi_trans_commit_wait(trans);
+
+ return 0;
+}
+
+static void ipa_table_init_add(struct gsi_trans *trans, bool filter, bool ipv6)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ const struct ipa_mem *hash_mem;
+ enum ipa_cmd_opcode opcode;
+ const struct ipa_mem *mem;
+ dma_addr_t hash_addr;
+ dma_addr_t addr;
+ u32 hash_offset;
+ u32 zero_offset;
+ u16 hash_count;
+ u32 zero_size;
+ u16 hash_size;
+ u16 count;
+ u16 size;
+
+ opcode = filter ? ipv6 ? IPA_CMD_IP_V6_FILTER_INIT
+ : IPA_CMD_IP_V4_FILTER_INIT
+ : ipv6 ? IPA_CMD_IP_V6_ROUTING_INIT
+ : IPA_CMD_IP_V4_ROUTING_INIT;
+
+ /* The non-hashed region will exist (see ipa_table_mem_valid()) */
+ mem = ipa_table_mem(ipa, filter, false, ipv6);
+ hash_mem = ipa_table_mem(ipa, filter, true, ipv6);
+ hash_offset = hash_mem ? hash_mem->offset : 0;
+
+ /* Compute the number of table entries to initialize */
+ if (filter) {
+ /* The number of filtering endpoints determines number of
+ * entries in the filter table; we also add one more "slot"
+ * to hold the bitmap itself. The size of the hashed filter
+ * table is either the same as the non-hashed one, or zero.
+ */
+ count = 1 + hweight64(ipa->filtered);
+ hash_count = hash_mem && hash_mem->size ? count : 0;
+ } else {
+ /* The size of a route table region determines the number
+ * of entries it has.
+ */
+ count = mem->size / sizeof(__le64);
+ hash_count = hash_mem ? hash_mem->size / sizeof(__le64) : 0;
+ }
+ size = count * sizeof(__le64);
+ hash_size = hash_count * sizeof(__le64);
+
+ addr = ipa_table_addr(ipa, filter, count);
+ hash_addr = ipa_table_addr(ipa, filter, hash_count);
+
+ ipa_cmd_table_init_add(trans, opcode, size, mem->offset, addr,
+ hash_size, hash_offset, hash_addr);
+ if (!filter)
+ return;
+
+ /* Zero the unused space in the filter table */
+ zero_offset = mem->offset + size;
+ zero_size = mem->size - size;
+ ipa_cmd_dma_shared_mem_add(trans, zero_offset, zero_size,
+ ipa->zero_addr, true);
+ if (!hash_size)
+ return;
+
+ /* Zero the unused space in the hashed filter table */
+ zero_offset = hash_offset + hash_size;
+ zero_size = hash_mem->size - hash_size;
+ ipa_cmd_dma_shared_mem_add(trans, zero_offset, zero_size,
+ ipa->zero_addr, true);
+}
+
+int ipa_table_setup(struct ipa *ipa)
+{
+ struct gsi_trans *trans;
+
+ /* We will need at most 8 TREs:
+ * - IPv4:
+ * - One for route table initialization (non-hashed and hashed)
+ * - One for filter table initialization (non-hashed and hashed)
+ * - One to zero unused entries in the non-hashed filter table
+ * - One to zero unused entries in the hashed filter table
+ * - IPv6:
+ * - One for route table initialization (non-hashed and hashed)
+ * - One for filter table initialization (non-hashed and hashed)
+ * - One to zero unused entries in the non-hashed filter table
+ * - One to zero unused entries in the hashed filter table
+ * All platforms support at least 8 TREs in a transaction.
+ */
+ trans = ipa_cmd_trans_alloc(ipa, 8);
+ if (!trans) {
+ dev_err(&ipa->pdev->dev, "no transaction for table setup\n");
+ return -EBUSY;
+ }
+
+ ipa_table_init_add(trans, false, false);
+ ipa_table_init_add(trans, false, true);
+ ipa_table_init_add(trans, true, false);
+ ipa_table_init_add(trans, true, true);
+
+ gsi_trans_commit_wait(trans);
+
+ return 0;
+}
+
+/**
+ * ipa_filter_tuple_zero() - Zero an endpoint's hashed filter tuple
+ * @endpoint: Endpoint whose filter hash tuple should be zeroed
+ *
+ * Endpoint must be for the AP (not modem) and support filtering. Updates
+ * the filter hash values without changing route ones.
+ */
+static void ipa_filter_tuple_zero(struct ipa_endpoint *endpoint)
+{
+ u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ const struct reg *reg;
+ u32 offset;
+ u32 val;
+
+ if (ipa->version < IPA_VERSION_5_0) {
+ reg = ipa_reg(ipa, ENDP_FILTER_ROUTER_HSH_CFG);
+
+ offset = reg_n_offset(reg, endpoint_id);
+ val = ioread32(endpoint->ipa->reg_virt + offset);
+
+ /* Zero all filter-related fields, preserving the rest */
+ val &= ~reg_fmask(reg, FILTER_HASH_MSK_ALL);
+ } else {
+ /* IPA v5.0 separates filter and router cache configuration */
+ reg = ipa_reg(ipa, ENDP_FILTER_CACHE_CFG);
+ offset = reg_n_offset(reg, endpoint_id);
+
+ /* Zero all filter-related fields */
+ val = 0;
+ }
+
+ iowrite32(val, endpoint->ipa->reg_virt + offset);
+}
+
+/* Configure a hashed filter table; there is no ipa_filter_deconfig() */
+static void ipa_filter_config(struct ipa *ipa, bool modem)
+{
+ enum gsi_ee_id ee_id = modem ? GSI_EE_MODEM : GSI_EE_AP;
+ u64 ep_mask = ipa->filtered;
+
+ if (!ipa_table_hash_support(ipa))
+ return;
+
+ while (ep_mask) {
+ u32 endpoint_id = __ffs(ep_mask);
+ struct ipa_endpoint *endpoint;
+
+ ep_mask ^= BIT(endpoint_id);
+
+ endpoint = &ipa->endpoint[endpoint_id];
+ if (endpoint->ee_id == ee_id)
+ ipa_filter_tuple_zero(endpoint);
+ }
+}
+
+static bool ipa_route_id_modem(struct ipa *ipa, u32 route_id)
+{
+ return route_id < ipa->modem_route_count;
+}
+
+/**
+ * ipa_route_tuple_zero() - Zero a hashed route table entry tuple
+ * @ipa: IPA pointer
+ * @route_id: Route table entry whose hash tuple should be zeroed
+ *
+ * Updates the route hash values without changing filter ones.
+ */
+static void ipa_route_tuple_zero(struct ipa *ipa, u32 route_id)
+{
+ const struct reg *reg;
+ u32 offset;
+ u32 val;
+
+ if (ipa->version < IPA_VERSION_5_0) {
+ reg = ipa_reg(ipa, ENDP_FILTER_ROUTER_HSH_CFG);
+ offset = reg_n_offset(reg, route_id);
+
+ val = ioread32(ipa->reg_virt + offset);
+
+ /* Zero all route-related fields, preserving the rest */
+ val &= ~reg_fmask(reg, ROUTER_HASH_MSK_ALL);
+ } else {
+ /* IPA v5.0 separates filter and router cache configuration */
+ reg = ipa_reg(ipa, ENDP_ROUTER_CACHE_CFG);
+ offset = reg_n_offset(reg, route_id);
+
+ /* Zero all route-related fields */
+ val = 0;
+ }
+
+ iowrite32(val, ipa->reg_virt + offset);
+}
+
+/* Configure a hashed route table; there is no ipa_route_deconfig() */
+static void ipa_route_config(struct ipa *ipa, bool modem)
+{
+ u32 route_id;
+
+ if (!ipa_table_hash_support(ipa))
+ return;
+
+ for (route_id = 0; route_id < ipa->route_count; route_id++)
+ if (ipa_route_id_modem(ipa, route_id) == modem)
+ ipa_route_tuple_zero(ipa, route_id);
+}
+
+/* Configure a filter and route tables; there is no ipa_table_deconfig() */
+void ipa_table_config(struct ipa *ipa)
+{
+ ipa_filter_config(ipa, false);
+ ipa_filter_config(ipa, true);
+ ipa_route_config(ipa, false);
+ ipa_route_config(ipa, true);
+}
+
+/* Verify the sizes of all IPA table filter or routing table memory regions
+ * are valid. If valid, this records the size of the routing table.
+ */
+bool ipa_table_mem_valid(struct ipa *ipa, bool filter)
+{
+ bool hash_support = ipa_table_hash_support(ipa);
+ const struct ipa_mem *mem_hashed;
+ const struct ipa_mem *mem_ipv4;
+ const struct ipa_mem *mem_ipv6;
+ u32 count;
+
+ /* IPv4 and IPv6 non-hashed tables are expected to be defined and
+ * have the same size. Both must have at least two entries (and
+ * would normally have more than that).
+ */
+ mem_ipv4 = ipa_table_mem(ipa, filter, false, false);
+ if (!mem_ipv4)
+ return false;
+
+ mem_ipv6 = ipa_table_mem(ipa, filter, false, true);
+ if (!mem_ipv6)
+ return false;
+
+ if (mem_ipv4->size != mem_ipv6->size)
+ return false;
+
+ /* Compute and record the number of entries for each table type */
+ count = mem_ipv4->size / sizeof(__le64);
+ if (count < 2)
+ return false;
+ if (filter)
+ ipa->filter_count = count - 1; /* Filter map in first entry */
+ else
+ ipa->route_count = count;
+
+ /* Table offset and size must fit in TABLE_INIT command fields */
+ if (!ipa_cmd_table_init_valid(ipa, mem_ipv4, !filter))
+ return false;
+
+ /* Make sure the regions are big enough */
+ if (filter) {
+ /* Filter tables must able to hold the endpoint bitmap plus
+ * an entry for each endpoint that supports filtering
+ */
+ if (count < 1 + hweight64(ipa->filtered))
+ return false;
+ } else {
+ /* Routing tables must be able to hold all modem entries,
+ * plus at least one entry for the AP.
+ */
+ if (count < ipa->modem_route_count + 1)
+ return false;
+ }
+
+ /* If hashing is supported, hashed tables are expected to be defined,
+ * and have the same size as non-hashed tables. If hashing is not
+ * supported, hashed tables are expected to have zero size (or not
+ * be defined).
+ */
+ mem_hashed = ipa_table_mem(ipa, filter, true, false);
+ if (hash_support) {
+ if (!mem_hashed || mem_hashed->size != mem_ipv4->size)
+ return false;
+ } else {
+ if (mem_hashed && mem_hashed->size)
+ return false;
+ }
+
+ /* Same check for IPv6 tables */
+ mem_hashed = ipa_table_mem(ipa, filter, true, true);
+ if (hash_support) {
+ if (!mem_hashed || mem_hashed->size != mem_ipv6->size)
+ return false;
+ } else {
+ if (mem_hashed && mem_hashed->size)
+ return false;
+ }
+
+ return true;
+}
+
+/* Initialize a coherent DMA allocation containing initialized filter and
+ * route table data. This is used when initializing or resetting the IPA
+ * filter or route table.
+ *
+ * The first entry in a filter table contains a bitmap indicating which
+ * endpoints contain entries in the table. In addition to that first entry,
+ * there is a fixed maximum number of entries that follow. Filter table
+ * entries are 64 bits wide, and (other than the bitmap) contain the DMA
+ * address of a filter rule. A "zero rule" indicates no filtering, and
+ * consists of 64 bits of zeroes. When a filter table is initialized (or
+ * reset) its entries are made to refer to the zero rule.
+ *
+ * Each entry in a route table is the DMA address of a routing rule. For
+ * routing there is also a 64-bit "zero rule" that means no routing, and
+ * when a route table is initialized or reset, its entries are made to refer
+ * to the zero rule. The zero rule is shared for route and filter tables.
+ *
+ * +-------------------+
+ * --> | zero rule |
+ * / |-------------------|
+ * | | filter mask |
+ * |\ |-------------------|
+ * | ---- zero rule address | \
+ * |\ |-------------------| |
+ * | ---- zero rule address | | Max IPA filter count
+ * | |-------------------| > or IPA route count,
+ * | ... | whichever is greater
+ * \ |-------------------| |
+ * ---- zero rule address | /
+ * +-------------------+
+ */
+int ipa_table_init(struct ipa *ipa)
+{
+ struct device *dev = &ipa->pdev->dev;
+ dma_addr_t addr;
+ __le64 le_addr;
+ __le64 *virt;
+ size_t size;
+ u32 count;
+
+ ipa_table_validate_build();
+
+ count = max_t(u32, ipa->filter_count, ipa->route_count);
+
+ /* The IPA hardware requires route and filter table rules to be
+ * aligned on a 128-byte boundary. We put the "zero rule" at the
+ * base of the table area allocated here. The DMA address returned
+ * by dma_alloc_coherent() is guaranteed to be a power-of-2 number
+ * of pages, which satisfies the rule alignment requirement.
+ */
+ size = IPA_ZERO_RULE_SIZE + (1 + count) * sizeof(__le64);
+ virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
+ if (!virt)
+ return -ENOMEM;
+
+ ipa->table_virt = virt;
+ ipa->table_addr = addr;
+
+ /* First slot is the zero rule */
+ *virt++ = 0;
+
+ /* Next is the filter table bitmap. The "soft" bitmap value might
+ * need to be converted to the hardware representation by shifting
+ * it left one position. Prior to IPA v5.0, bit 0 repesents global
+ * filtering, which is possible but not used. IPA v5.0+ eliminated
+ * that option, so there's no shifting required.
+ */
+ if (ipa->version < IPA_VERSION_5_0)
+ *virt++ = cpu_to_le64(ipa->filtered << 1);
+ else
+ *virt++ = cpu_to_le64(ipa->filtered);
+
+ /* All the rest contain the DMA address of the zero rule */
+ le_addr = cpu_to_le64(addr);
+ while (count--)
+ *virt++ = le_addr;
+
+ return 0;
+}
+
+void ipa_table_exit(struct ipa *ipa)
+{
+ u32 count = max_t(u32, 1 + ipa->filter_count, ipa->route_count);
+ struct device *dev = &ipa->pdev->dev;
+ size_t size;
+
+ size = IPA_ZERO_RULE_SIZE + (1 + count) * sizeof(__le64);
+
+ dma_free_coherent(dev, size, ipa->table_virt, ipa->table_addr);
+ ipa->table_addr = 0;
+ ipa->table_virt = NULL;
+}
diff --git a/drivers/net/ipa/ipa_table.h b/drivers/net/ipa/ipa_table.h
new file mode 100644
index 0000000000..7cc951904b
--- /dev/null
+++ b/drivers/net/ipa/ipa_table.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2022 Linaro Ltd.
+ */
+#ifndef _IPA_TABLE_H_
+#define _IPA_TABLE_H_
+
+#include <linux/types.h>
+
+struct ipa;
+
+/**
+ * ipa_filtered_valid() - Validate a filter table endpoint bitmap
+ * @ipa: IPA pointer
+ * @filtered: Filter table endpoint bitmap to check
+ *
+ * Return: true if all regions are valid, false otherwise
+ */
+bool ipa_filtered_valid(struct ipa *ipa, u64 filtered);
+
+/**
+ * ipa_table_hash_support() - Return true if hashed tables are supported
+ * @ipa: IPA pointer
+ */
+static inline bool ipa_table_hash_support(struct ipa *ipa)
+{
+ return ipa->version != IPA_VERSION_4_2;
+}
+
+/**
+ * ipa_table_reset() - Reset filter and route tables entries to "none"
+ * @ipa: IPA pointer
+ * @modem: Whether to reset modem or AP entries
+ */
+void ipa_table_reset(struct ipa *ipa, bool modem);
+
+/**
+ * ipa_table_hash_flush() - Synchronize hashed filter and route updates
+ * @ipa: IPA pointer
+ */
+int ipa_table_hash_flush(struct ipa *ipa);
+
+/**
+ * ipa_table_setup() - Set up filter and route tables
+ * @ipa: IPA pointer
+ *
+ * There is no need for a matching ipa_table_teardown() function.
+ */
+int ipa_table_setup(struct ipa *ipa);
+
+/**
+ * ipa_table_config() - Configure filter and route tables
+ * @ipa: IPA pointer
+ *
+ * There is no need for a matching ipa_table_deconfig() function.
+ */
+void ipa_table_config(struct ipa *ipa);
+
+/**
+ * ipa_table_init() - Do early initialization of filter and route tables
+ * @ipa: IPA pointer
+ */
+int ipa_table_init(struct ipa *ipa);
+
+/**
+ * ipa_table_exit() - Inverse of ipa_table_init()
+ * @ipa: IPA pointer
+ */
+void ipa_table_exit(struct ipa *ipa);
+
+/**
+ * ipa_table_mem_valid() - Validate sizes of table memory regions
+ * @ipa: IPA pointer
+ * @filter: Whether to check filter or routing tables
+ */
+bool ipa_table_mem_valid(struct ipa *ipa, bool filter);
+
+#endif /* _IPA_TABLE_H_ */
diff --git a/drivers/net/ipa/ipa_uc.c b/drivers/net/ipa/ipa_uc.c
new file mode 100644
index 0000000000..7eaa0b4ebe
--- /dev/null
+++ b/drivers/net/ipa/ipa_uc.c
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2022 Linaro Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+
+#include "ipa.h"
+#include "ipa_uc.h"
+#include "ipa_power.h"
+
+/**
+ * DOC: The IPA embedded microcontroller
+ *
+ * The IPA incorporates a microcontroller that is able to do some additional
+ * handling/offloading of network activity. The current code makes
+ * essentially no use of the microcontroller, but it still requires some
+ * initialization. It needs to be notified in the event the AP crashes.
+ *
+ * The microcontroller can generate two interrupts to the AP. One interrupt
+ * is used to indicate that a response to a request from the AP is available.
+ * The other is used to notify the AP of the occurrence of an event. In
+ * addition, the AP can interrupt the microcontroller by writing a register.
+ *
+ * A 128 byte block of structured memory within the IPA SRAM is used together
+ * with these interrupts to implement the communication interface between the
+ * AP and the IPA microcontroller. Each side writes data to the shared area
+ * before interrupting its peer, which will read the written data in response
+ * to the interrupt. Some information found in the shared area is currently
+ * unused. All remaining space in the shared area is reserved, and must not
+ * be read or written by the AP.
+ */
+/* Supports hardware interface version 0x2000 */
+
+/* Delay to allow a the microcontroller to save state when crashing */
+#define IPA_SEND_DELAY 100 /* microseconds */
+
+/**
+ * struct ipa_uc_mem_area - AP/microcontroller shared memory area
+ * @command: command code (AP->microcontroller)
+ * @reserved0: reserved bytes; avoid reading or writing
+ * @command_param: low 32 bits of command parameter (AP->microcontroller)
+ * @command_param_hi: high 32 bits of command parameter (AP->microcontroller)
+ *
+ * @response: response code (microcontroller->AP)
+ * @reserved1: reserved bytes; avoid reading or writing
+ * @response_param: response parameter (microcontroller->AP)
+ *
+ * @event: event code (microcontroller->AP)
+ * @reserved2: reserved bytes; avoid reading or writing
+ * @event_param: event parameter (microcontroller->AP)
+ *
+ * @first_error_address: address of first error-source on SNOC
+ * @hw_state: state of hardware (including error type information)
+ * @warning_counter: counter of non-fatal hardware errors
+ * @reserved3: reserved bytes; avoid reading or writing
+ * @interface_version: hardware-reported interface version
+ * @reserved4: reserved bytes; avoid reading or writing
+ *
+ * A shared memory area at the base of IPA resident memory is used for
+ * communication with the microcontroller. The region is 128 bytes in
+ * size, but only the first 40 bytes (structured this way) are used.
+ */
+struct ipa_uc_mem_area {
+ u8 command; /* enum ipa_uc_command */
+ u8 reserved0[3];
+ __le32 command_param;
+ __le32 command_param_hi;
+ u8 response; /* enum ipa_uc_response */
+ u8 reserved1[3];
+ __le32 response_param;
+ u8 event; /* enum ipa_uc_event */
+ u8 reserved2[3];
+
+ __le32 event_param;
+ __le32 first_error_address;
+ u8 hw_state;
+ u8 warning_counter;
+ __le16 reserved3;
+ __le16 interface_version;
+ __le16 reserved4;
+};
+
+/** enum ipa_uc_command - commands from the AP to the microcontroller */
+enum ipa_uc_command {
+ IPA_UC_COMMAND_NO_OP = 0x0,
+ IPA_UC_COMMAND_UPDATE_FLAGS = 0x1,
+ IPA_UC_COMMAND_DEBUG_RUN_TEST = 0x2,
+ IPA_UC_COMMAND_DEBUG_GET_INFO = 0x3,
+ IPA_UC_COMMAND_ERR_FATAL = 0x4,
+ IPA_UC_COMMAND_CLK_GATE = 0x5,
+ IPA_UC_COMMAND_CLK_UNGATE = 0x6,
+ IPA_UC_COMMAND_MEMCPY = 0x7,
+ IPA_UC_COMMAND_RESET_PIPE = 0x8,
+ IPA_UC_COMMAND_REG_WRITE = 0x9,
+ IPA_UC_COMMAND_GSI_CH_EMPTY = 0xa,
+};
+
+/** enum ipa_uc_response - microcontroller response codes */
+enum ipa_uc_response {
+ IPA_UC_RESPONSE_NO_OP = 0x0,
+ IPA_UC_RESPONSE_INIT_COMPLETED = 0x1,
+ IPA_UC_RESPONSE_CMD_COMPLETED = 0x2,
+ IPA_UC_RESPONSE_DEBUG_GET_INFO = 0x3,
+};
+
+/** enum ipa_uc_event - common cpu events reported by the microcontroller */
+enum ipa_uc_event {
+ IPA_UC_EVENT_NO_OP = 0x0,
+ IPA_UC_EVENT_ERROR = 0x1,
+ IPA_UC_EVENT_LOG_INFO = 0x2,
+};
+
+static struct ipa_uc_mem_area *ipa_uc_shared(struct ipa *ipa)
+{
+ const struct ipa_mem *mem = ipa_mem_find(ipa, IPA_MEM_UC_SHARED);
+ u32 offset = ipa->mem_offset + mem->offset;
+
+ return ipa->mem_virt + offset;
+}
+
+/* Microcontroller event IPA interrupt handler */
+static void ipa_uc_event_handler(struct ipa *ipa)
+{
+ struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa);
+ struct device *dev = &ipa->pdev->dev;
+
+ if (shared->event == IPA_UC_EVENT_ERROR)
+ dev_err(dev, "microcontroller error event\n");
+ else if (shared->event != IPA_UC_EVENT_LOG_INFO)
+ dev_err(dev, "unsupported microcontroller event %u\n",
+ shared->event);
+ /* The LOG_INFO event can be safely ignored */
+}
+
+/* Microcontroller response IPA interrupt handler */
+static void ipa_uc_response_hdlr(struct ipa *ipa)
+{
+ struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa);
+ struct device *dev = &ipa->pdev->dev;
+
+ /* An INIT_COMPLETED response message is sent to the AP by the
+ * microcontroller when it is operational. Other than this, the AP
+ * should only receive responses from the microcontroller when it has
+ * sent it a request message.
+ *
+ * We can drop the power reference taken in ipa_uc_power() once we
+ * know the microcontroller has finished its initialization.
+ */
+ switch (shared->response) {
+ case IPA_UC_RESPONSE_INIT_COMPLETED:
+ if (ipa->uc_powered) {
+ ipa->uc_loaded = true;
+ ipa_power_retention(ipa, true);
+ pm_runtime_mark_last_busy(dev);
+ (void)pm_runtime_put_autosuspend(dev);
+ ipa->uc_powered = false;
+ } else {
+ dev_warn(dev, "unexpected init_completed response\n");
+ }
+ break;
+ default:
+ dev_warn(dev, "unsupported microcontroller response %u\n",
+ shared->response);
+ break;
+ }
+}
+
+void ipa_uc_interrupt_handler(struct ipa *ipa, enum ipa_irq_id irq_id)
+{
+ /* Silently ignore anything unrecognized */
+ if (irq_id == IPA_IRQ_UC_0)
+ ipa_uc_event_handler(ipa);
+ else if (irq_id == IPA_IRQ_UC_1)
+ ipa_uc_response_hdlr(ipa);
+}
+
+/* Configure the IPA microcontroller subsystem */
+void ipa_uc_config(struct ipa *ipa)
+{
+ ipa->uc_powered = false;
+ ipa->uc_loaded = false;
+ ipa_interrupt_enable(ipa, IPA_IRQ_UC_0);
+ ipa_interrupt_enable(ipa, IPA_IRQ_UC_1);
+}
+
+/* Inverse of ipa_uc_config() */
+void ipa_uc_deconfig(struct ipa *ipa)
+{
+ struct device *dev = &ipa->pdev->dev;
+
+ ipa_interrupt_disable(ipa, IPA_IRQ_UC_1);
+ ipa_interrupt_disable(ipa, IPA_IRQ_UC_0);
+ if (ipa->uc_loaded)
+ ipa_power_retention(ipa, false);
+
+ if (!ipa->uc_powered)
+ return;
+
+ pm_runtime_mark_last_busy(dev);
+ (void)pm_runtime_put_autosuspend(dev);
+}
+
+/* Take a proxy power reference for the microcontroller */
+void ipa_uc_power(struct ipa *ipa)
+{
+ static bool already;
+ struct device *dev;
+ int ret;
+
+ if (already)
+ return;
+ already = true; /* Only do this on first boot */
+
+ /* This power reference dropped in ipa_uc_response_hdlr() above */
+ dev = &ipa->pdev->dev;
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(dev);
+ dev_err(dev, "error %d getting proxy power\n", ret);
+ } else {
+ ipa->uc_powered = true;
+ }
+}
+
+/* Send a command to the microcontroller */
+static void send_uc_command(struct ipa *ipa, u32 command, u32 command_param)
+{
+ struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa);
+ const struct reg *reg;
+ u32 val;
+
+ /* Fill in the command data */
+ shared->command = command;
+ shared->command_param = cpu_to_le32(command_param);
+ shared->command_param_hi = 0;
+ shared->response = 0;
+ shared->response_param = 0;
+
+ /* Use an interrupt to tell the microcontroller the command is ready */
+ reg = ipa_reg(ipa, IPA_IRQ_UC);
+ val = reg_bit(reg, UC_INTR);
+
+ iowrite32(val, ipa->reg_virt + reg_offset(reg));
+}
+
+/* Tell the microcontroller the AP is shutting down */
+void ipa_uc_panic_notifier(struct ipa *ipa)
+{
+ if (!ipa->uc_loaded)
+ return;
+
+ send_uc_command(ipa, IPA_UC_COMMAND_ERR_FATAL, 0);
+
+ /* give uc enough time to save state */
+ udelay(IPA_SEND_DELAY);
+}
diff --git a/drivers/net/ipa/ipa_uc.h b/drivers/net/ipa/ipa_uc.h
new file mode 100644
index 0000000000..85aa0df818
--- /dev/null
+++ b/drivers/net/ipa/ipa_uc.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2022 Linaro Ltd.
+ */
+#ifndef _IPA_UC_H_
+#define _IPA_UC_H_
+
+struct ipa;
+enum ipa_irq_id;
+
+/**
+ * ipa_uc_interrupt_handler() - Handler for microcontroller IPA interrupts
+ * @ipa: IPA pointer
+ * @irq_id: IPA interrupt ID
+ */
+void ipa_uc_interrupt_handler(struct ipa *ipa, enum ipa_irq_id irq_id);
+
+/**
+ * ipa_uc_config() - Configure the IPA microcontroller subsystem
+ * @ipa: IPA pointer
+ */
+void ipa_uc_config(struct ipa *ipa);
+
+/**
+ * ipa_uc_deconfig() - Inverse of ipa_uc_config()
+ * @ipa: IPA pointer
+ */
+void ipa_uc_deconfig(struct ipa *ipa);
+
+/**
+ * ipa_uc_power() - Take a proxy power reference for the microcontroller
+ * @ipa: IPA pointer
+ *
+ * The first time the modem boots, it loads firmware for and starts the
+ * IPA-resident microcontroller. The microcontroller signals that it
+ * has completed its initialization by sending an INIT_COMPLETED response
+ * message to the AP. The AP must ensure the IPA is powered until
+ * it receives this message, and to do so we take a "proxy" clock
+ * reference on its behalf here. Once we receive the INIT_COMPLETED
+ * message (in ipa_uc_response_hdlr()) we drop this power reference.
+ */
+void ipa_uc_power(struct ipa *ipa);
+
+/**
+ * ipa_uc_panic_notifier()
+ * @ipa: IPA pointer
+ *
+ * Notifier function called when the system crashes, to inform the
+ * microcontroller of the event.
+ */
+void ipa_uc_panic_notifier(struct ipa *ipa);
+
+#endif /* _IPA_UC_H_ */
diff --git a/drivers/net/ipa/ipa_version.h b/drivers/net/ipa/ipa_version.h
new file mode 100644
index 0000000000..06e75b8ece
--- /dev/null
+++ b/drivers/net/ipa/ipa_version.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2022 Linaro Ltd.
+ */
+#ifndef _IPA_VERSION_H_
+#define _IPA_VERSION_H_
+
+/**
+ * enum ipa_version
+ * @IPA_VERSION_3_0: IPA version 3.0/GSI version 1.0
+ * @IPA_VERSION_3_1: IPA version 3.1/GSI version 1.0
+ * @IPA_VERSION_3_5: IPA version 3.5/GSI version 1.2
+ * @IPA_VERSION_3_5_1: IPA version 3.5.1/GSI version 1.3
+ * @IPA_VERSION_4_0: IPA version 4.0/GSI version 2.0
+ * @IPA_VERSION_4_1: IPA version 4.1/GSI version 2.0
+ * @IPA_VERSION_4_2: IPA version 4.2/GSI version 2.2
+ * @IPA_VERSION_4_5: IPA version 4.5/GSI version 2.5
+ * @IPA_VERSION_4_7: IPA version 4.7/GSI version 2.7
+ * @IPA_VERSION_4_9: IPA version 4.9/GSI version 2.9
+ * @IPA_VERSION_4_11: IPA version 4.11/GSI version 2.11 (2.1.1)
+ * @IPA_VERSION_5_0: IPA version 5.0/GSI version 3.0
+ * @IPA_VERSION_5_1: IPA version 5.1/GSI version 3.0
+ * @IPA_VERSION_5_5: IPA version 5.5/GSI version 5.5
+ * @IPA_VERSION_COUNT: Number of defined IPA versions
+ *
+ * Defines the version of IPA (and GSI) hardware present on the platform.
+ * Please update ipa_version_string() whenever a new version is added.
+ */
+enum ipa_version {
+ IPA_VERSION_3_0,
+ IPA_VERSION_3_1,
+ IPA_VERSION_3_5,
+ IPA_VERSION_3_5_1,
+ IPA_VERSION_4_0,
+ IPA_VERSION_4_1,
+ IPA_VERSION_4_2,
+ IPA_VERSION_4_5,
+ IPA_VERSION_4_7,
+ IPA_VERSION_4_9,
+ IPA_VERSION_4_11,
+ IPA_VERSION_5_0,
+ IPA_VERSION_5_1,
+ IPA_VERSION_5_5,
+ IPA_VERSION_COUNT, /* Last; not a version */
+};
+
+static inline bool ipa_version_supported(enum ipa_version version)
+{
+ switch (version) {
+ case IPA_VERSION_3_1:
+ case IPA_VERSION_3_5_1:
+ case IPA_VERSION_4_2:
+ case IPA_VERSION_4_5:
+ case IPA_VERSION_4_7:
+ case IPA_VERSION_4_9:
+ case IPA_VERSION_4_11:
+ case IPA_VERSION_5_0:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/* Execution environment IDs */
+enum gsi_ee_id {
+ GSI_EE_AP = 0x0,
+ GSI_EE_MODEM = 0x1,
+ GSI_EE_UC = 0x2,
+ GSI_EE_TZ = 0x3,
+};
+
+#endif /* _IPA_VERSION_H_ */
diff --git a/drivers/net/ipa/reg.h b/drivers/net/ipa/reg.h
new file mode 100644
index 0000000000..2ee07eebca
--- /dev/null
+++ b/drivers/net/ipa/reg.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* *Copyright (C) 2022-2023 Linaro Ltd. */
+
+#ifndef _REG_H_
+#define _REG_H_
+
+#include <linux/types.h>
+#include <linux/log2.h>
+#include <linux/bug.h>
+
+/**
+ * struct reg - A register descriptor
+ * @offset: Register offset relative to base of register memory
+ * @stride: Distance between two instances, if parameterized
+ * @fcount: Number of entries in the @fmask array
+ * @fmask: Array of mask values defining position and width of fields
+ * @name: Upper-case name of the register
+ */
+struct reg {
+ u32 offset;
+ u32 stride;
+ u32 fcount;
+ const u32 *fmask; /* BIT(nr) or GENMASK(h, l) */
+ const char *name;
+};
+
+/* Helper macro for defining "simple" (non-parameterized) registers */
+#define REG(__NAME, __reg_id, __offset) \
+ REG_STRIDE(__NAME, __reg_id, __offset, 0)
+
+/* Helper macro for defining parameterized registers, specifying stride */
+#define REG_STRIDE(__NAME, __reg_id, __offset, __stride) \
+ static const struct reg reg_ ## __reg_id = { \
+ .name = #__NAME, \
+ .offset = __offset, \
+ .stride = __stride, \
+ }
+
+#define REG_FIELDS(__NAME, __name, __offset) \
+ REG_STRIDE_FIELDS(__NAME, __name, __offset, 0)
+
+#define REG_STRIDE_FIELDS(__NAME, __name, __offset, __stride) \
+ static const struct reg reg_ ## __name = { \
+ .name = #__NAME, \
+ .offset = __offset, \
+ .stride = __stride, \
+ .fcount = ARRAY_SIZE(reg_ ## __name ## _fmask), \
+ .fmask = reg_ ## __name ## _fmask, \
+ }
+
+/**
+ * struct regs - Description of registers supported by hardware
+ * @reg_count: Number of registers in the @reg[] array
+ * @reg: Array of register descriptors
+ */
+struct regs {
+ u32 reg_count;
+ const struct reg **reg;
+};
+
+static inline const struct reg *reg(const struct regs *regs, u32 reg_id)
+{
+ if (WARN(reg_id >= regs->reg_count,
+ "reg out of range (%u > %u)\n", reg_id, regs->reg_count - 1))
+ return NULL;
+
+ return regs->reg[reg_id];
+}
+
+/* Return the field mask for a field in a register, or 0 on error */
+static inline u32 reg_fmask(const struct reg *reg, u32 field_id)
+{
+ if (!reg || WARN_ON(field_id >= reg->fcount))
+ return 0;
+
+ return reg->fmask[field_id];
+}
+
+/* Return the mask for a single-bit field in a register, or 0 on error */
+static inline u32 reg_bit(const struct reg *reg, u32 field_id)
+{
+ u32 fmask = reg_fmask(reg, field_id);
+
+ if (WARN_ON(!is_power_of_2(fmask)))
+ return 0;
+
+ return fmask;
+}
+
+/* Return the maximum value representable by the given field; always 2^n - 1 */
+static inline u32 reg_field_max(const struct reg *reg, u32 field_id)
+{
+ u32 fmask = reg_fmask(reg, field_id);
+
+ return fmask ? fmask >> __ffs(fmask) : 0;
+}
+
+/* Encode a value into the given field of a register */
+static inline u32 reg_encode(const struct reg *reg, u32 field_id, u32 val)
+{
+ u32 fmask = reg_fmask(reg, field_id);
+
+ if (!fmask)
+ return 0;
+
+ val <<= __ffs(fmask);
+ if (WARN_ON(val & ~fmask))
+ return 0;
+
+ return val;
+}
+
+/* Given a register value, decode (extract) the value in the given field */
+static inline u32 reg_decode(const struct reg *reg, u32 field_id, u32 val)
+{
+ u32 fmask = reg_fmask(reg, field_id);
+
+ return fmask ? (val & fmask) >> __ffs(fmask) : 0;
+}
+
+/* Returns 0 for NULL reg; warning should have already been issued */
+static inline u32 reg_offset(const struct reg *reg)
+{
+ return reg ? reg->offset : 0;
+}
+
+/* Returns 0 for NULL reg; warning should have already been issued */
+static inline u32 reg_n_offset(const struct reg *reg, u32 n)
+{
+ return reg ? reg->offset + n * reg->stride : 0;
+}
+
+#endif /* _REG_H_ */
diff --git a/drivers/net/ipa/reg/gsi_reg-v3.1.c b/drivers/net/ipa/reg/gsi_reg-v3.1.c
new file mode 100644
index 0000000000..e036805a78
--- /dev/null
+++ b/drivers/net/ipa/reg/gsi_reg-v3.1.c
@@ -0,0 +1,291 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2023 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../gsi.h"
+#include "../reg.h"
+#include "../gsi_reg.h"
+
+REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk,
+ 0x0000c020 + 0x1000 * GSI_EE_AP);
+
+REG(INTER_EE_SRC_EV_CH_IRQ_MSK, inter_ee_src_ev_ch_irq_msk,
+ 0x0000c024 + 0x1000 * GSI_EE_AP);
+
+static const u32 reg_ch_c_cntxt_0_fmask[] = {
+ [CHTYPE_PROTOCOL] = GENMASK(2, 0),
+ [CHTYPE_DIR] = BIT(3),
+ [CH_EE] = GENMASK(7, 4),
+ [CHID] = GENMASK(12, 8),
+ /* Bit 13 reserved */
+ [ERINDEX] = GENMASK(18, 14),
+ /* Bit 19 reserved */
+ [CHSTATE] = GENMASK(23, 20),
+ [ELEMENT_SIZE] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(CH_C_CNTXT_0, ch_c_cntxt_0,
+ 0x0001c000 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ch_c_cntxt_1_fmask[] = {
+ [CH_R_LENGTH] = GENMASK(15, 0),
+ /* Bits 16-31 reserved */
+};
+
+REG_STRIDE_FIELDS(CH_C_CNTXT_1, ch_c_cntxt_1,
+ 0x0001c004 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_CNTXT_2, ch_c_cntxt_2, 0x0001c008 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_CNTXT_3, ch_c_cntxt_3, 0x0001c00c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ch_c_qos_fmask[] = {
+ [WRR_WEIGHT] = GENMASK(3, 0),
+ /* Bits 4-7 reserved */
+ [MAX_PREFETCH] = BIT(8),
+ [USE_DB_ENG] = BIT(9),
+ /* Bits 10-31 reserved */
+};
+
+REG_STRIDE_FIELDS(CH_C_QOS, ch_c_qos, 0x0001c05c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_error_log_fmask[] = {
+ [ERR_ARG3] = GENMASK(3, 0),
+ [ERR_ARG2] = GENMASK(7, 4),
+ [ERR_ARG1] = GENMASK(11, 8),
+ [ERR_CODE] = GENMASK(15, 12),
+ /* Bits 16-18 reserved */
+ [ERR_VIRT_IDX] = GENMASK(23, 19),
+ [ERR_TYPE] = GENMASK(27, 24),
+ [ERR_EE] = GENMASK(31, 28),
+};
+
+REG_STRIDE(CH_C_SCRATCH_0, ch_c_scratch_0,
+ 0x0001c060 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_1, ch_c_scratch_1,
+ 0x0001c064 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_2, ch_c_scratch_2,
+ 0x0001c068 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_3, ch_c_scratch_3,
+ 0x0001c06c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_0_fmask[] = {
+ [EV_CHTYPE] = GENMASK(3, 0),
+ [EV_EE] = GENMASK(7, 4),
+ [EV_EVCHID] = GENMASK(15, 8),
+ [EV_INTYPE] = BIT(16),
+ /* Bits 17-19 reserved */
+ [EV_CHSTATE] = GENMASK(23, 20),
+ [EV_ELEMENT_SIZE] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_0, ev_ch_e_cntxt_0,
+ 0x0001d000 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_1_fmask[] = {
+ [R_LENGTH] = GENMASK(15, 0),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_1, ev_ch_e_cntxt_1,
+ 0x0001d004 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_2, ev_ch_e_cntxt_2,
+ 0x0001d008 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_3, ev_ch_e_cntxt_3,
+ 0x0001d00c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_4, ev_ch_e_cntxt_4,
+ 0x0001d010 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_8_fmask[] = {
+ [EV_MODT] = GENMASK(15, 0),
+ [EV_MODC] = GENMASK(23, 16),
+ [EV_MOD_CNT] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_8, ev_ch_e_cntxt_8,
+ 0x0001d020 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_9, ev_ch_e_cntxt_9,
+ 0x0001d024 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_10, ev_ch_e_cntxt_10,
+ 0x0001d028 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_11, ev_ch_e_cntxt_11,
+ 0x0001d02c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_12, ev_ch_e_cntxt_12,
+ 0x0001d030 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_13, ev_ch_e_cntxt_13,
+ 0x0001d034 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_SCRATCH_0, ev_ch_e_scratch_0,
+ 0x0001d048 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_SCRATCH_1, ev_ch_e_scratch_1,
+ 0x0001d04c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_DOORBELL_0, ch_c_doorbell_0,
+ 0x0001e000 + 0x4000 * GSI_EE_AP, 0x08);
+
+REG_STRIDE(EV_CH_E_DOORBELL_0, ev_ch_e_doorbell_0,
+ 0x0001e100 + 0x4000 * GSI_EE_AP, 0x08);
+
+static const u32 reg_gsi_status_fmask[] = {
+ [ENABLED] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+REG_FIELDS(GSI_STATUS, gsi_status, 0x0001f000 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_ch_cmd_fmask[] = {
+ [CH_CHID] = GENMASK(7, 0),
+ /* Bits 8-23 reserved */
+ [CH_OPCODE] = GENMASK(31, 24),
+};
+
+REG_FIELDS(CH_CMD, ch_cmd, 0x0001f008 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_ev_ch_cmd_fmask[] = {
+ [EV_CHID] = GENMASK(7, 0),
+ /* Bits 8-23 reserved */
+ [EV_OPCODE] = GENMASK(31, 24),
+};
+
+REG_FIELDS(EV_CH_CMD, ev_ch_cmd, 0x0001f010 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_generic_cmd_fmask[] = {
+ [GENERIC_OPCODE] = GENMASK(4, 0),
+ [GENERIC_CHID] = GENMASK(9, 5),
+ [GENERIC_EE] = GENMASK(13, 10),
+ /* Bits 14-31 reserved */
+};
+
+REG_FIELDS(GENERIC_CMD, generic_cmd, 0x0001f018 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_TYPE_IRQ, cntxt_type_irq, 0x0001f080 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_TYPE_IRQ_MSK, cntxt_type_irq_msk, 0x0001f088 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ, cntxt_src_ch_irq, 0x0001f090 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ, cntxt_src_ev_ch_irq, 0x0001f094 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ_MSK, cntxt_src_ch_irq_msk,
+ 0x0001f098 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ_MSK, cntxt_src_ev_ch_irq_msk,
+ 0x0001f09c + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ_CLR, cntxt_src_ch_irq_clr,
+ 0x0001f0a0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ_CLR, cntxt_src_ev_ch_irq_clr,
+ 0x0001f0a4 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ, cntxt_src_ieob_irq, 0x0001f0b0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ_MSK, cntxt_src_ieob_irq_msk,
+ 0x0001f0b8 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ_CLR, cntxt_src_ieob_irq_clr,
+ 0x0001f0c0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_STTS, cntxt_glob_irq_stts, 0x0001f100 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_EN, cntxt_glob_irq_en, 0x0001f108 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_CLR, cntxt_glob_irq_clr, 0x0001f110 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_STTS, cntxt_gsi_irq_stts, 0x0001f118 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_EN, cntxt_gsi_irq_en, 0x0001f120 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_CLR, cntxt_gsi_irq_clr, 0x0001f128 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_cntxt_intset_fmask[] = {
+ [INTYPE] = BIT(0)
+ /* Bits 1-31 reserved */
+};
+
+REG_FIELDS(CNTXT_INTSET, cntxt_intset, 0x0001f180 + 0x4000 * GSI_EE_AP);
+
+REG_FIELDS(ERROR_LOG, error_log, 0x0001f200 + 0x4000 * GSI_EE_AP);
+
+REG(ERROR_LOG_CLR, error_log_clr, 0x0001f210 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_cntxt_scratch_0_fmask[] = {
+ [INTER_EE_RESULT] = GENMASK(2, 0),
+ /* Bits 3-4 reserved */
+ [GENERIC_EE_RESULT] = GENMASK(7, 5),
+ /* Bits 8-31 reserved */
+};
+
+REG_FIELDS(CNTXT_SCRATCH_0, cntxt_scratch_0, 0x0001f400 + 0x4000 * GSI_EE_AP);
+
+static const struct reg *reg_array[] = {
+ [INTER_EE_SRC_CH_IRQ_MSK] = &reg_inter_ee_src_ch_irq_msk,
+ [INTER_EE_SRC_EV_CH_IRQ_MSK] = &reg_inter_ee_src_ev_ch_irq_msk,
+ [CH_C_CNTXT_0] = &reg_ch_c_cntxt_0,
+ [CH_C_CNTXT_1] = &reg_ch_c_cntxt_1,
+ [CH_C_CNTXT_2] = &reg_ch_c_cntxt_2,
+ [CH_C_CNTXT_3] = &reg_ch_c_cntxt_3,
+ [CH_C_QOS] = &reg_ch_c_qos,
+ [CH_C_SCRATCH_0] = &reg_ch_c_scratch_0,
+ [CH_C_SCRATCH_1] = &reg_ch_c_scratch_1,
+ [CH_C_SCRATCH_2] = &reg_ch_c_scratch_2,
+ [CH_C_SCRATCH_3] = &reg_ch_c_scratch_3,
+ [EV_CH_E_CNTXT_0] = &reg_ev_ch_e_cntxt_0,
+ [EV_CH_E_CNTXT_1] = &reg_ev_ch_e_cntxt_1,
+ [EV_CH_E_CNTXT_2] = &reg_ev_ch_e_cntxt_2,
+ [EV_CH_E_CNTXT_3] = &reg_ev_ch_e_cntxt_3,
+ [EV_CH_E_CNTXT_4] = &reg_ev_ch_e_cntxt_4,
+ [EV_CH_E_CNTXT_8] = &reg_ev_ch_e_cntxt_8,
+ [EV_CH_E_CNTXT_9] = &reg_ev_ch_e_cntxt_9,
+ [EV_CH_E_CNTXT_10] = &reg_ev_ch_e_cntxt_10,
+ [EV_CH_E_CNTXT_11] = &reg_ev_ch_e_cntxt_11,
+ [EV_CH_E_CNTXT_12] = &reg_ev_ch_e_cntxt_12,
+ [EV_CH_E_CNTXT_13] = &reg_ev_ch_e_cntxt_13,
+ [EV_CH_E_SCRATCH_0] = &reg_ev_ch_e_scratch_0,
+ [EV_CH_E_SCRATCH_1] = &reg_ev_ch_e_scratch_1,
+ [CH_C_DOORBELL_0] = &reg_ch_c_doorbell_0,
+ [EV_CH_E_DOORBELL_0] = &reg_ev_ch_e_doorbell_0,
+ [GSI_STATUS] = &reg_gsi_status,
+ [CH_CMD] = &reg_ch_cmd,
+ [EV_CH_CMD] = &reg_ev_ch_cmd,
+ [GENERIC_CMD] = &reg_generic_cmd,
+ [CNTXT_TYPE_IRQ] = &reg_cntxt_type_irq,
+ [CNTXT_TYPE_IRQ_MSK] = &reg_cntxt_type_irq_msk,
+ [CNTXT_SRC_CH_IRQ] = &reg_cntxt_src_ch_irq,
+ [CNTXT_SRC_EV_CH_IRQ] = &reg_cntxt_src_ev_ch_irq,
+ [CNTXT_SRC_CH_IRQ_MSK] = &reg_cntxt_src_ch_irq_msk,
+ [CNTXT_SRC_EV_CH_IRQ_MSK] = &reg_cntxt_src_ev_ch_irq_msk,
+ [CNTXT_SRC_CH_IRQ_CLR] = &reg_cntxt_src_ch_irq_clr,
+ [CNTXT_SRC_EV_CH_IRQ_CLR] = &reg_cntxt_src_ev_ch_irq_clr,
+ [CNTXT_SRC_IEOB_IRQ] = &reg_cntxt_src_ieob_irq,
+ [CNTXT_SRC_IEOB_IRQ_MSK] = &reg_cntxt_src_ieob_irq_msk,
+ [CNTXT_SRC_IEOB_IRQ_CLR] = &reg_cntxt_src_ieob_irq_clr,
+ [CNTXT_GLOB_IRQ_STTS] = &reg_cntxt_glob_irq_stts,
+ [CNTXT_GLOB_IRQ_EN] = &reg_cntxt_glob_irq_en,
+ [CNTXT_GLOB_IRQ_CLR] = &reg_cntxt_glob_irq_clr,
+ [CNTXT_GSI_IRQ_STTS] = &reg_cntxt_gsi_irq_stts,
+ [CNTXT_GSI_IRQ_EN] = &reg_cntxt_gsi_irq_en,
+ [CNTXT_GSI_IRQ_CLR] = &reg_cntxt_gsi_irq_clr,
+ [CNTXT_INTSET] = &reg_cntxt_intset,
+ [ERROR_LOG] = &reg_error_log,
+ [ERROR_LOG_CLR] = &reg_error_log_clr,
+ [CNTXT_SCRATCH_0] = &reg_cntxt_scratch_0,
+};
+
+const struct regs gsi_regs_v3_1 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
+};
diff --git a/drivers/net/ipa/reg/gsi_reg-v3.5.1.c b/drivers/net/ipa/reg/gsi_reg-v3.5.1.c
new file mode 100644
index 0000000000..8c3ab3a528
--- /dev/null
+++ b/drivers/net/ipa/reg/gsi_reg-v3.5.1.c
@@ -0,0 +1,303 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2023 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../gsi.h"
+#include "../reg.h"
+#include "../gsi_reg.h"
+
+REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk,
+ 0x0000c020 + 0x1000 * GSI_EE_AP);
+
+REG(INTER_EE_SRC_EV_CH_IRQ_MSK, inter_ee_src_ev_ch_irq_msk,
+ 0x0000c024 + 0x1000 * GSI_EE_AP);
+
+static const u32 reg_ch_c_cntxt_0_fmask[] = {
+ [CHTYPE_PROTOCOL] = GENMASK(2, 0),
+ [CHTYPE_DIR] = BIT(3),
+ [CH_EE] = GENMASK(7, 4),
+ [CHID] = GENMASK(12, 8),
+ /* Bit 13 reserved */
+ [ERINDEX] = GENMASK(18, 14),
+ /* Bit 19 reserved */
+ [CHSTATE] = GENMASK(23, 20),
+ [ELEMENT_SIZE] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(CH_C_CNTXT_0, ch_c_cntxt_0,
+ 0x0001c000 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ch_c_cntxt_1_fmask[] = {
+ [CH_R_LENGTH] = GENMASK(15, 0),
+ /* Bits 16-31 reserved */
+};
+
+REG_STRIDE_FIELDS(CH_C_CNTXT_1, ch_c_cntxt_1,
+ 0x0001c004 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_CNTXT_2, ch_c_cntxt_2, 0x0001c008 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_CNTXT_3, ch_c_cntxt_3, 0x0001c00c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ch_c_qos_fmask[] = {
+ [WRR_WEIGHT] = GENMASK(3, 0),
+ /* Bits 4-7 reserved */
+ [MAX_PREFETCH] = BIT(8),
+ [USE_DB_ENG] = BIT(9),
+ /* Bits 10-31 reserved */
+};
+
+REG_STRIDE_FIELDS(CH_C_QOS, ch_c_qos, 0x0001c05c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_error_log_fmask[] = {
+ [ERR_ARG3] = GENMASK(3, 0),
+ [ERR_ARG2] = GENMASK(7, 4),
+ [ERR_ARG1] = GENMASK(11, 8),
+ [ERR_CODE] = GENMASK(15, 12),
+ /* Bits 16-18 reserved */
+ [ERR_VIRT_IDX] = GENMASK(23, 19),
+ [ERR_TYPE] = GENMASK(27, 24),
+ [ERR_EE] = GENMASK(31, 28),
+};
+
+REG_STRIDE(CH_C_SCRATCH_0, ch_c_scratch_0,
+ 0x0001c060 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_1, ch_c_scratch_1,
+ 0x0001c064 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_2, ch_c_scratch_2,
+ 0x0001c068 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_3, ch_c_scratch_3,
+ 0x0001c06c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_0_fmask[] = {
+ [EV_CHTYPE] = GENMASK(3, 0),
+ [EV_EE] = GENMASK(7, 4),
+ [EV_EVCHID] = GENMASK(15, 8),
+ [EV_INTYPE] = BIT(16),
+ /* Bits 17-19 reserved */
+ [EV_CHSTATE] = GENMASK(23, 20),
+ [EV_ELEMENT_SIZE] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_0, ev_ch_e_cntxt_0,
+ 0x0001d000 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_1_fmask[] = {
+ [R_LENGTH] = GENMASK(15, 0),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_1, ev_ch_e_cntxt_1,
+ 0x0001d004 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_2, ev_ch_e_cntxt_2,
+ 0x0001d008 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_3, ev_ch_e_cntxt_3,
+ 0x0001d00c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_4, ev_ch_e_cntxt_4,
+ 0x0001d010 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_8_fmask[] = {
+ [EV_MODT] = GENMASK(15, 0),
+ [EV_MODC] = GENMASK(23, 16),
+ [EV_MOD_CNT] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_8, ev_ch_e_cntxt_8,
+ 0x0001d020 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_9, ev_ch_e_cntxt_9,
+ 0x0001d024 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_10, ev_ch_e_cntxt_10,
+ 0x0001d028 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_11, ev_ch_e_cntxt_11,
+ 0x0001d02c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_12, ev_ch_e_cntxt_12,
+ 0x0001d030 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_13, ev_ch_e_cntxt_13,
+ 0x0001d034 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_SCRATCH_0, ev_ch_e_scratch_0,
+ 0x0001d048 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_SCRATCH_1, ev_ch_e_scratch_1,
+ 0x0001d04c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_DOORBELL_0, ch_c_doorbell_0,
+ 0x0001e000 + 0x4000 * GSI_EE_AP, 0x08);
+
+REG_STRIDE(EV_CH_E_DOORBELL_0, ev_ch_e_doorbell_0,
+ 0x0001e100 + 0x4000 * GSI_EE_AP, 0x08);
+
+static const u32 reg_gsi_status_fmask[] = {
+ [ENABLED] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+REG_FIELDS(GSI_STATUS, gsi_status, 0x0001f000 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_ch_cmd_fmask[] = {
+ [CH_CHID] = GENMASK(7, 0),
+ /* Bits 8-23 reserved */
+ [CH_OPCODE] = GENMASK(31, 24),
+};
+
+REG_FIELDS(CH_CMD, ch_cmd, 0x0001f008 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_ev_ch_cmd_fmask[] = {
+ [EV_CHID] = GENMASK(7, 0),
+ /* Bits 8-23 reserved */
+ [EV_OPCODE] = GENMASK(31, 24),
+};
+
+REG_FIELDS(EV_CH_CMD, ev_ch_cmd, 0x0001f010 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_generic_cmd_fmask[] = {
+ [GENERIC_OPCODE] = GENMASK(4, 0),
+ [GENERIC_CHID] = GENMASK(9, 5),
+ [GENERIC_EE] = GENMASK(13, 10),
+ /* Bits 14-31 reserved */
+};
+
+REG_FIELDS(GENERIC_CMD, generic_cmd, 0x0001f018 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_hw_param_2_fmask[] = {
+ [IRAM_SIZE] = GENMASK(2, 0),
+ [NUM_CH_PER_EE] = GENMASK(7, 3),
+ [NUM_EV_PER_EE] = GENMASK(12, 8),
+ [GSI_CH_PEND_TRANSLATE] = BIT(13),
+ [GSI_CH_FULL_LOGIC] = BIT(14),
+ /* Bits 15-31 reserved */
+};
+
+REG_FIELDS(HW_PARAM_2, hw_param_2, 0x0001f040 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_TYPE_IRQ, cntxt_type_irq, 0x0001f080 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_TYPE_IRQ_MSK, cntxt_type_irq_msk, 0x0001f088 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ, cntxt_src_ch_irq, 0x0001f090 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ, cntxt_src_ev_ch_irq, 0x0001f094 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ_MSK, cntxt_src_ch_irq_msk,
+ 0x0001f098 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ_MSK, cntxt_src_ev_ch_irq_msk,
+ 0x0001f09c + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ_CLR, cntxt_src_ch_irq_clr,
+ 0x0001f0a0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ_CLR, cntxt_src_ev_ch_irq_clr,
+ 0x0001f0a4 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ, cntxt_src_ieob_irq, 0x0001f0b0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ_MSK, cntxt_src_ieob_irq_msk,
+ 0x0001f0b8 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ_CLR, cntxt_src_ieob_irq_clr,
+ 0x0001f0c0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_STTS, cntxt_glob_irq_stts, 0x0001f100 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_EN, cntxt_glob_irq_en, 0x0001f108 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_CLR, cntxt_glob_irq_clr, 0x0001f110 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_STTS, cntxt_gsi_irq_stts, 0x0001f118 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_EN, cntxt_gsi_irq_en, 0x0001f120 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_CLR, cntxt_gsi_irq_clr, 0x0001f128 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_cntxt_intset_fmask[] = {
+ [INTYPE] = BIT(0)
+ /* Bits 1-31 reserved */
+};
+
+REG_FIELDS(CNTXT_INTSET, cntxt_intset, 0x0001f180 + 0x4000 * GSI_EE_AP);
+
+REG_FIELDS(ERROR_LOG, error_log, 0x0001f200 + 0x4000 * GSI_EE_AP);
+
+REG(ERROR_LOG_CLR, error_log_clr, 0x0001f210 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_cntxt_scratch_0_fmask[] = {
+ [INTER_EE_RESULT] = GENMASK(2, 0),
+ /* Bits 3-4 reserved */
+ [GENERIC_EE_RESULT] = GENMASK(7, 5),
+ /* Bits 8-31 reserved */
+};
+
+REG_FIELDS(CNTXT_SCRATCH_0, cntxt_scratch_0, 0x0001f400 + 0x4000 * GSI_EE_AP);
+
+static const struct reg *reg_array[] = {
+ [INTER_EE_SRC_CH_IRQ_MSK] = &reg_inter_ee_src_ch_irq_msk,
+ [INTER_EE_SRC_EV_CH_IRQ_MSK] = &reg_inter_ee_src_ev_ch_irq_msk,
+ [CH_C_CNTXT_0] = &reg_ch_c_cntxt_0,
+ [CH_C_CNTXT_1] = &reg_ch_c_cntxt_1,
+ [CH_C_CNTXT_2] = &reg_ch_c_cntxt_2,
+ [CH_C_CNTXT_3] = &reg_ch_c_cntxt_3,
+ [CH_C_QOS] = &reg_ch_c_qos,
+ [CH_C_SCRATCH_0] = &reg_ch_c_scratch_0,
+ [CH_C_SCRATCH_1] = &reg_ch_c_scratch_1,
+ [CH_C_SCRATCH_2] = &reg_ch_c_scratch_2,
+ [CH_C_SCRATCH_3] = &reg_ch_c_scratch_3,
+ [EV_CH_E_CNTXT_0] = &reg_ev_ch_e_cntxt_0,
+ [EV_CH_E_CNTXT_1] = &reg_ev_ch_e_cntxt_1,
+ [EV_CH_E_CNTXT_2] = &reg_ev_ch_e_cntxt_2,
+ [EV_CH_E_CNTXT_3] = &reg_ev_ch_e_cntxt_3,
+ [EV_CH_E_CNTXT_4] = &reg_ev_ch_e_cntxt_4,
+ [EV_CH_E_CNTXT_8] = &reg_ev_ch_e_cntxt_8,
+ [EV_CH_E_CNTXT_9] = &reg_ev_ch_e_cntxt_9,
+ [EV_CH_E_CNTXT_10] = &reg_ev_ch_e_cntxt_10,
+ [EV_CH_E_CNTXT_11] = &reg_ev_ch_e_cntxt_11,
+ [EV_CH_E_CNTXT_12] = &reg_ev_ch_e_cntxt_12,
+ [EV_CH_E_CNTXT_13] = &reg_ev_ch_e_cntxt_13,
+ [EV_CH_E_SCRATCH_0] = &reg_ev_ch_e_scratch_0,
+ [EV_CH_E_SCRATCH_1] = &reg_ev_ch_e_scratch_1,
+ [CH_C_DOORBELL_0] = &reg_ch_c_doorbell_0,
+ [EV_CH_E_DOORBELL_0] = &reg_ev_ch_e_doorbell_0,
+ [GSI_STATUS] = &reg_gsi_status,
+ [CH_CMD] = &reg_ch_cmd,
+ [EV_CH_CMD] = &reg_ev_ch_cmd,
+ [GENERIC_CMD] = &reg_generic_cmd,
+ [HW_PARAM_2] = &reg_hw_param_2,
+ [CNTXT_TYPE_IRQ] = &reg_cntxt_type_irq,
+ [CNTXT_TYPE_IRQ_MSK] = &reg_cntxt_type_irq_msk,
+ [CNTXT_SRC_CH_IRQ] = &reg_cntxt_src_ch_irq,
+ [CNTXT_SRC_EV_CH_IRQ] = &reg_cntxt_src_ev_ch_irq,
+ [CNTXT_SRC_CH_IRQ_MSK] = &reg_cntxt_src_ch_irq_msk,
+ [CNTXT_SRC_EV_CH_IRQ_MSK] = &reg_cntxt_src_ev_ch_irq_msk,
+ [CNTXT_SRC_CH_IRQ_CLR] = &reg_cntxt_src_ch_irq_clr,
+ [CNTXT_SRC_EV_CH_IRQ_CLR] = &reg_cntxt_src_ev_ch_irq_clr,
+ [CNTXT_SRC_IEOB_IRQ] = &reg_cntxt_src_ieob_irq,
+ [CNTXT_SRC_IEOB_IRQ_MSK] = &reg_cntxt_src_ieob_irq_msk,
+ [CNTXT_SRC_IEOB_IRQ_CLR] = &reg_cntxt_src_ieob_irq_clr,
+ [CNTXT_GLOB_IRQ_STTS] = &reg_cntxt_glob_irq_stts,
+ [CNTXT_GLOB_IRQ_EN] = &reg_cntxt_glob_irq_en,
+ [CNTXT_GLOB_IRQ_CLR] = &reg_cntxt_glob_irq_clr,
+ [CNTXT_GSI_IRQ_STTS] = &reg_cntxt_gsi_irq_stts,
+ [CNTXT_GSI_IRQ_EN] = &reg_cntxt_gsi_irq_en,
+ [CNTXT_GSI_IRQ_CLR] = &reg_cntxt_gsi_irq_clr,
+ [CNTXT_INTSET] = &reg_cntxt_intset,
+ [ERROR_LOG] = &reg_error_log,
+ [ERROR_LOG_CLR] = &reg_error_log_clr,
+ [CNTXT_SCRATCH_0] = &reg_cntxt_scratch_0,
+};
+
+const struct regs gsi_regs_v3_5_1 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
+};
diff --git a/drivers/net/ipa/reg/gsi_reg-v4.0.c b/drivers/net/ipa/reg/gsi_reg-v4.0.c
new file mode 100644
index 0000000000..7cc7a21d07
--- /dev/null
+++ b/drivers/net/ipa/reg/gsi_reg-v4.0.c
@@ -0,0 +1,308 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2023 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../gsi.h"
+#include "../reg.h"
+#include "../gsi_reg.h"
+
+REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk,
+ 0x0000c020 + 0x1000 * GSI_EE_AP);
+
+REG(INTER_EE_SRC_EV_CH_IRQ_MSK, inter_ee_src_ev_ch_irq_msk,
+ 0x0000c024 + 0x1000 * GSI_EE_AP);
+
+static const u32 reg_ch_c_cntxt_0_fmask[] = {
+ [CHTYPE_PROTOCOL] = GENMASK(2, 0),
+ [CHTYPE_DIR] = BIT(3),
+ [CH_EE] = GENMASK(7, 4),
+ [CHID] = GENMASK(12, 8),
+ /* Bit 13 reserved */
+ [ERINDEX] = GENMASK(18, 14),
+ /* Bit 19 reserved */
+ [CHSTATE] = GENMASK(23, 20),
+ [ELEMENT_SIZE] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(CH_C_CNTXT_0, ch_c_cntxt_0,
+ 0x0001c000 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ch_c_cntxt_1_fmask[] = {
+ [CH_R_LENGTH] = GENMASK(15, 0),
+ /* Bits 16-31 reserved */
+};
+
+REG_STRIDE_FIELDS(CH_C_CNTXT_1, ch_c_cntxt_1,
+ 0x0001c004 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_CNTXT_2, ch_c_cntxt_2, 0x0001c008 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_CNTXT_3, ch_c_cntxt_3, 0x0001c00c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ch_c_qos_fmask[] = {
+ [WRR_WEIGHT] = GENMASK(3, 0),
+ /* Bits 4-7 reserved */
+ [MAX_PREFETCH] = BIT(8),
+ [USE_DB_ENG] = BIT(9),
+ [USE_ESCAPE_BUF_ONLY] = BIT(10),
+ /* Bits 11-31 reserved */
+};
+
+REG_STRIDE_FIELDS(CH_C_QOS, ch_c_qos, 0x0001c05c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_error_log_fmask[] = {
+ [ERR_ARG3] = GENMASK(3, 0),
+ [ERR_ARG2] = GENMASK(7, 4),
+ [ERR_ARG1] = GENMASK(11, 8),
+ [ERR_CODE] = GENMASK(15, 12),
+ /* Bits 16-18 reserved */
+ [ERR_VIRT_IDX] = GENMASK(23, 19),
+ [ERR_TYPE] = GENMASK(27, 24),
+ [ERR_EE] = GENMASK(31, 28),
+};
+
+REG_STRIDE(CH_C_SCRATCH_0, ch_c_scratch_0,
+ 0x0001c060 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_1, ch_c_scratch_1,
+ 0x0001c064 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_2, ch_c_scratch_2,
+ 0x0001c068 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_3, ch_c_scratch_3,
+ 0x0001c06c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_0_fmask[] = {
+ [EV_CHTYPE] = GENMASK(3, 0),
+ [EV_EE] = GENMASK(7, 4),
+ [EV_EVCHID] = GENMASK(15, 8),
+ [EV_INTYPE] = BIT(16),
+ /* Bits 17-19 reserved */
+ [EV_CHSTATE] = GENMASK(23, 20),
+ [EV_ELEMENT_SIZE] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_0, ev_ch_e_cntxt_0,
+ 0x0001d000 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_1_fmask[] = {
+ [R_LENGTH] = GENMASK(15, 0),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_1, ev_ch_e_cntxt_1,
+ 0x0001d004 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_2, ev_ch_e_cntxt_2,
+ 0x0001d008 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_3, ev_ch_e_cntxt_3,
+ 0x0001d00c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_4, ev_ch_e_cntxt_4,
+ 0x0001d010 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_8_fmask[] = {
+ [EV_MODT] = GENMASK(15, 0),
+ [EV_MODC] = GENMASK(23, 16),
+ [EV_MOD_CNT] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_8, ev_ch_e_cntxt_8,
+ 0x0001d020 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_9, ev_ch_e_cntxt_9,
+ 0x0001d024 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_10, ev_ch_e_cntxt_10,
+ 0x0001d028 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_11, ev_ch_e_cntxt_11,
+ 0x0001d02c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_12, ev_ch_e_cntxt_12,
+ 0x0001d030 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_13, ev_ch_e_cntxt_13,
+ 0x0001d034 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_SCRATCH_0, ev_ch_e_scratch_0,
+ 0x0001d048 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_SCRATCH_1, ev_ch_e_scratch_1,
+ 0x0001d04c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_DOORBELL_0, ch_c_doorbell_0,
+ 0x0001e000 + 0x4000 * GSI_EE_AP, 0x08);
+
+REG_STRIDE(EV_CH_E_DOORBELL_0, ev_ch_e_doorbell_0,
+ 0x0001e100 + 0x4000 * GSI_EE_AP, 0x08);
+
+static const u32 reg_gsi_status_fmask[] = {
+ [ENABLED] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+REG_FIELDS(GSI_STATUS, gsi_status, 0x0001f000 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_ch_cmd_fmask[] = {
+ [CH_CHID] = GENMASK(7, 0),
+ /* Bits 8-23 reserved */
+ [CH_OPCODE] = GENMASK(31, 24),
+};
+
+REG_FIELDS(CH_CMD, ch_cmd, 0x0001f008 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_ev_ch_cmd_fmask[] = {
+ [EV_CHID] = GENMASK(7, 0),
+ /* Bits 8-23 reserved */
+ [EV_OPCODE] = GENMASK(31, 24),
+};
+
+REG_FIELDS(EV_CH_CMD, ev_ch_cmd, 0x0001f010 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_generic_cmd_fmask[] = {
+ [GENERIC_OPCODE] = GENMASK(4, 0),
+ [GENERIC_CHID] = GENMASK(9, 5),
+ [GENERIC_EE] = GENMASK(13, 10),
+ /* Bits 14-31 reserved */
+};
+
+REG_FIELDS(GENERIC_CMD, generic_cmd, 0x0001f018 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_hw_param_2_fmask[] = {
+ [IRAM_SIZE] = GENMASK(2, 0),
+ [NUM_CH_PER_EE] = GENMASK(7, 3),
+ [NUM_EV_PER_EE] = GENMASK(12, 8),
+ [GSI_CH_PEND_TRANSLATE] = BIT(13),
+ [GSI_CH_FULL_LOGIC] = BIT(14),
+ [GSI_USE_SDMA] = BIT(15),
+ [GSI_SDMA_N_INT] = GENMASK(18, 16),
+ [GSI_SDMA_MAX_BURST] = GENMASK(26, 19),
+ [GSI_SDMA_N_IOVEC] = GENMASK(29, 27),
+ /* Bits 30-31 reserved */
+};
+
+REG_FIELDS(HW_PARAM_2, hw_param_2, 0x0001f040 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_TYPE_IRQ, cntxt_type_irq, 0x0001f080 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_TYPE_IRQ_MSK, cntxt_type_irq_msk, 0x0001f088 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ, cntxt_src_ch_irq, 0x0001f090 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ, cntxt_src_ev_ch_irq, 0x0001f094 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ_MSK, cntxt_src_ch_irq_msk,
+ 0x0001f098 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ_MSK, cntxt_src_ev_ch_irq_msk,
+ 0x0001f09c + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ_CLR, cntxt_src_ch_irq_clr,
+ 0x0001f0a0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ_CLR, cntxt_src_ev_ch_irq_clr,
+ 0x0001f0a4 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ, cntxt_src_ieob_irq, 0x0001f0b0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ_MSK, cntxt_src_ieob_irq_msk,
+ 0x0001f0b8 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ_CLR, cntxt_src_ieob_irq_clr,
+ 0x0001f0c0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_STTS, cntxt_glob_irq_stts, 0x0001f100 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_EN, cntxt_glob_irq_en, 0x0001f108 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_CLR, cntxt_glob_irq_clr, 0x0001f110 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_STTS, cntxt_gsi_irq_stts, 0x0001f118 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_EN, cntxt_gsi_irq_en, 0x0001f120 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_CLR, cntxt_gsi_irq_clr, 0x0001f128 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_cntxt_intset_fmask[] = {
+ [INTYPE] = BIT(0)
+ /* Bits 1-31 reserved */
+};
+
+REG_FIELDS(CNTXT_INTSET, cntxt_intset, 0x0001f180 + 0x4000 * GSI_EE_AP);
+
+REG_FIELDS(ERROR_LOG, error_log, 0x0001f200 + 0x4000 * GSI_EE_AP);
+
+REG(ERROR_LOG_CLR, error_log_clr, 0x0001f210 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_cntxt_scratch_0_fmask[] = {
+ [INTER_EE_RESULT] = GENMASK(2, 0),
+ /* Bits 3-4 reserved */
+ [GENERIC_EE_RESULT] = GENMASK(7, 5),
+ /* Bits 8-31 reserved */
+};
+
+REG_FIELDS(CNTXT_SCRATCH_0, cntxt_scratch_0, 0x0001f400 + 0x4000 * GSI_EE_AP);
+
+static const struct reg *reg_array[] = {
+ [INTER_EE_SRC_CH_IRQ_MSK] = &reg_inter_ee_src_ch_irq_msk,
+ [INTER_EE_SRC_EV_CH_IRQ_MSK] = &reg_inter_ee_src_ev_ch_irq_msk,
+ [CH_C_CNTXT_0] = &reg_ch_c_cntxt_0,
+ [CH_C_CNTXT_1] = &reg_ch_c_cntxt_1,
+ [CH_C_CNTXT_2] = &reg_ch_c_cntxt_2,
+ [CH_C_CNTXT_3] = &reg_ch_c_cntxt_3,
+ [CH_C_QOS] = &reg_ch_c_qos,
+ [CH_C_SCRATCH_0] = &reg_ch_c_scratch_0,
+ [CH_C_SCRATCH_1] = &reg_ch_c_scratch_1,
+ [CH_C_SCRATCH_2] = &reg_ch_c_scratch_2,
+ [CH_C_SCRATCH_3] = &reg_ch_c_scratch_3,
+ [EV_CH_E_CNTXT_0] = &reg_ev_ch_e_cntxt_0,
+ [EV_CH_E_CNTXT_1] = &reg_ev_ch_e_cntxt_1,
+ [EV_CH_E_CNTXT_2] = &reg_ev_ch_e_cntxt_2,
+ [EV_CH_E_CNTXT_3] = &reg_ev_ch_e_cntxt_3,
+ [EV_CH_E_CNTXT_4] = &reg_ev_ch_e_cntxt_4,
+ [EV_CH_E_CNTXT_8] = &reg_ev_ch_e_cntxt_8,
+ [EV_CH_E_CNTXT_9] = &reg_ev_ch_e_cntxt_9,
+ [EV_CH_E_CNTXT_10] = &reg_ev_ch_e_cntxt_10,
+ [EV_CH_E_CNTXT_11] = &reg_ev_ch_e_cntxt_11,
+ [EV_CH_E_CNTXT_12] = &reg_ev_ch_e_cntxt_12,
+ [EV_CH_E_CNTXT_13] = &reg_ev_ch_e_cntxt_13,
+ [EV_CH_E_SCRATCH_0] = &reg_ev_ch_e_scratch_0,
+ [EV_CH_E_SCRATCH_1] = &reg_ev_ch_e_scratch_1,
+ [CH_C_DOORBELL_0] = &reg_ch_c_doorbell_0,
+ [EV_CH_E_DOORBELL_0] = &reg_ev_ch_e_doorbell_0,
+ [GSI_STATUS] = &reg_gsi_status,
+ [CH_CMD] = &reg_ch_cmd,
+ [EV_CH_CMD] = &reg_ev_ch_cmd,
+ [GENERIC_CMD] = &reg_generic_cmd,
+ [HW_PARAM_2] = &reg_hw_param_2,
+ [CNTXT_TYPE_IRQ] = &reg_cntxt_type_irq,
+ [CNTXT_TYPE_IRQ_MSK] = &reg_cntxt_type_irq_msk,
+ [CNTXT_SRC_CH_IRQ] = &reg_cntxt_src_ch_irq,
+ [CNTXT_SRC_EV_CH_IRQ] = &reg_cntxt_src_ev_ch_irq,
+ [CNTXT_SRC_CH_IRQ_MSK] = &reg_cntxt_src_ch_irq_msk,
+ [CNTXT_SRC_EV_CH_IRQ_MSK] = &reg_cntxt_src_ev_ch_irq_msk,
+ [CNTXT_SRC_CH_IRQ_CLR] = &reg_cntxt_src_ch_irq_clr,
+ [CNTXT_SRC_EV_CH_IRQ_CLR] = &reg_cntxt_src_ev_ch_irq_clr,
+ [CNTXT_SRC_IEOB_IRQ] = &reg_cntxt_src_ieob_irq,
+ [CNTXT_SRC_IEOB_IRQ_MSK] = &reg_cntxt_src_ieob_irq_msk,
+ [CNTXT_SRC_IEOB_IRQ_CLR] = &reg_cntxt_src_ieob_irq_clr,
+ [CNTXT_GLOB_IRQ_STTS] = &reg_cntxt_glob_irq_stts,
+ [CNTXT_GLOB_IRQ_EN] = &reg_cntxt_glob_irq_en,
+ [CNTXT_GLOB_IRQ_CLR] = &reg_cntxt_glob_irq_clr,
+ [CNTXT_GSI_IRQ_STTS] = &reg_cntxt_gsi_irq_stts,
+ [CNTXT_GSI_IRQ_EN] = &reg_cntxt_gsi_irq_en,
+ [CNTXT_GSI_IRQ_CLR] = &reg_cntxt_gsi_irq_clr,
+ [CNTXT_INTSET] = &reg_cntxt_intset,
+ [ERROR_LOG] = &reg_error_log,
+ [ERROR_LOG_CLR] = &reg_error_log_clr,
+ [CNTXT_SCRATCH_0] = &reg_cntxt_scratch_0,
+};
+
+const struct regs gsi_regs_v4_0 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
+};
diff --git a/drivers/net/ipa/reg/gsi_reg-v4.11.c b/drivers/net/ipa/reg/gsi_reg-v4.11.c
new file mode 100644
index 0000000000..0169651903
--- /dev/null
+++ b/drivers/net/ipa/reg/gsi_reg-v4.11.c
@@ -0,0 +1,313 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2023 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../gsi.h"
+#include "../reg.h"
+#include "../gsi_reg.h"
+
+REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk,
+ 0x0000c020 + 0x1000 * GSI_EE_AP);
+
+REG(INTER_EE_SRC_EV_CH_IRQ_MSK, inter_ee_src_ev_ch_irq_msk,
+ 0x0000c024 + 0x1000 * GSI_EE_AP);
+
+static const u32 reg_ch_c_cntxt_0_fmask[] = {
+ [CHTYPE_PROTOCOL] = GENMASK(2, 0),
+ [CHTYPE_DIR] = BIT(3),
+ [CH_EE] = GENMASK(7, 4),
+ [CHID] = GENMASK(12, 8),
+ [CHTYPE_PROTOCOL_MSB] = BIT(13),
+ [ERINDEX] = GENMASK(18, 14),
+ /* Bit 19 reserved */
+ [CHSTATE] = GENMASK(23, 20),
+ [ELEMENT_SIZE] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(CH_C_CNTXT_0, ch_c_cntxt_0,
+ 0x0000f000 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ch_c_cntxt_1_fmask[] = {
+ [CH_R_LENGTH] = GENMASK(19, 0),
+ /* Bits 20-31 reserved */
+};
+
+REG_STRIDE_FIELDS(CH_C_CNTXT_1, ch_c_cntxt_1,
+ 0x0000f004 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_CNTXT_2, ch_c_cntxt_2, 0x0000f008 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_CNTXT_3, ch_c_cntxt_3, 0x0000f00c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ch_c_qos_fmask[] = {
+ [WRR_WEIGHT] = GENMASK(3, 0),
+ /* Bits 4-7 reserved */
+ [MAX_PREFETCH] = BIT(8),
+ [USE_DB_ENG] = BIT(9),
+ [PREFETCH_MODE] = GENMASK(13, 10),
+ /* Bits 14-15 reserved */
+ [EMPTY_LVL_THRSHOLD] = GENMASK(23, 16),
+ [DB_IN_BYTES] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+REG_STRIDE_FIELDS(CH_C_QOS, ch_c_qos, 0x0000f05c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_error_log_fmask[] = {
+ [ERR_ARG3] = GENMASK(3, 0),
+ [ERR_ARG2] = GENMASK(7, 4),
+ [ERR_ARG1] = GENMASK(11, 8),
+ [ERR_CODE] = GENMASK(15, 12),
+ /* Bits 16-18 reserved */
+ [ERR_VIRT_IDX] = GENMASK(23, 19),
+ [ERR_TYPE] = GENMASK(27, 24),
+ [ERR_EE] = GENMASK(31, 28),
+};
+
+REG_STRIDE(CH_C_SCRATCH_0, ch_c_scratch_0,
+ 0x0000f060 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_1, ch_c_scratch_1,
+ 0x0000f064 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_2, ch_c_scratch_2,
+ 0x0000f068 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_3, ch_c_scratch_3,
+ 0x0000f06c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_0_fmask[] = {
+ [EV_CHTYPE] = GENMASK(3, 0),
+ [EV_EE] = GENMASK(7, 4),
+ [EV_EVCHID] = GENMASK(15, 8),
+ [EV_INTYPE] = BIT(16),
+ /* Bits 17-19 reserved */
+ [EV_CHSTATE] = GENMASK(23, 20),
+ [EV_ELEMENT_SIZE] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_0, ev_ch_e_cntxt_0,
+ 0x00010000 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_1_fmask[] = {
+ [R_LENGTH] = GENMASK(19, 0),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_1, ev_ch_e_cntxt_1,
+ 0x00010004 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_2, ev_ch_e_cntxt_2,
+ 0x00010008 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_3, ev_ch_e_cntxt_3,
+ 0x0001000c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_4, ev_ch_e_cntxt_4,
+ 0x00010010 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_8_fmask[] = {
+ [EV_MODT] = GENMASK(15, 0),
+ [EV_MODC] = GENMASK(23, 16),
+ [EV_MOD_CNT] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_8, ev_ch_e_cntxt_8,
+ 0x00010020 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_9, ev_ch_e_cntxt_9,
+ 0x00010024 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_10, ev_ch_e_cntxt_10,
+ 0x00010028 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_11, ev_ch_e_cntxt_11,
+ 0x0001002c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_12, ev_ch_e_cntxt_12,
+ 0x00010030 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_13, ev_ch_e_cntxt_13,
+ 0x00010034 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_SCRATCH_0, ev_ch_e_scratch_0,
+ 0x00010048 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_SCRATCH_1, ev_ch_e_scratch_1,
+ 0x0001004c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_DOORBELL_0, ch_c_doorbell_0,
+ 0x00011000 + 0x4000 * GSI_EE_AP, 0x08);
+
+REG_STRIDE(EV_CH_E_DOORBELL_0, ev_ch_e_doorbell_0,
+ 0x00011100 + 0x4000 * GSI_EE_AP, 0x08);
+
+static const u32 reg_gsi_status_fmask[] = {
+ [ENABLED] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+REG_FIELDS(GSI_STATUS, gsi_status, 0x00012000 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_ch_cmd_fmask[] = {
+ [CH_CHID] = GENMASK(7, 0),
+ /* Bits 8-23 reserved */
+ [CH_OPCODE] = GENMASK(31, 24),
+};
+
+REG_FIELDS(CH_CMD, ch_cmd, 0x00012008 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_ev_ch_cmd_fmask[] = {
+ [EV_CHID] = GENMASK(7, 0),
+ /* Bits 8-23 reserved */
+ [EV_OPCODE] = GENMASK(31, 24),
+};
+
+REG_FIELDS(EV_CH_CMD, ev_ch_cmd, 0x00012010 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_generic_cmd_fmask[] = {
+ [GENERIC_OPCODE] = GENMASK(4, 0),
+ [GENERIC_CHID] = GENMASK(9, 5),
+ [GENERIC_EE] = GENMASK(13, 10),
+ /* Bits 14-23 reserved */
+ [GENERIC_PARAMS] = GENMASK(31, 24),
+};
+
+REG_FIELDS(GENERIC_CMD, generic_cmd, 0x00012018 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_hw_param_2_fmask[] = {
+ [IRAM_SIZE] = GENMASK(2, 0),
+ [NUM_CH_PER_EE] = GENMASK(7, 3),
+ [NUM_EV_PER_EE] = GENMASK(12, 8),
+ [GSI_CH_PEND_TRANSLATE] = BIT(13),
+ [GSI_CH_FULL_LOGIC] = BIT(14),
+ [GSI_USE_SDMA] = BIT(15),
+ [GSI_SDMA_N_INT] = GENMASK(18, 16),
+ [GSI_SDMA_MAX_BURST] = GENMASK(26, 19),
+ [GSI_SDMA_N_IOVEC] = GENMASK(29, 27),
+ [GSI_USE_RD_WR_ENG] = BIT(30),
+ [GSI_USE_INTER_EE] = BIT(31),
+};
+
+REG_FIELDS(HW_PARAM_2, hw_param_2, 0x00012040 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_TYPE_IRQ, cntxt_type_irq, 0x00012080 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_TYPE_IRQ_MSK, cntxt_type_irq_msk, 0x00012088 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ, cntxt_src_ch_irq, 0x00012090 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ, cntxt_src_ev_ch_irq, 0x00012094 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ_MSK, cntxt_src_ch_irq_msk,
+ 0x00012098 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ_MSK, cntxt_src_ev_ch_irq_msk,
+ 0x0001209c + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ_CLR, cntxt_src_ch_irq_clr,
+ 0x000120a0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ_CLR, cntxt_src_ev_ch_irq_clr,
+ 0x000120a4 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ, cntxt_src_ieob_irq, 0x000120b0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ_MSK, cntxt_src_ieob_irq_msk,
+ 0x000120b8 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ_CLR, cntxt_src_ieob_irq_clr,
+ 0x000120c0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_STTS, cntxt_glob_irq_stts, 0x00012100 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_EN, cntxt_glob_irq_en, 0x00012108 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_CLR, cntxt_glob_irq_clr, 0x00012110 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_STTS, cntxt_gsi_irq_stts, 0x00012118 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_EN, cntxt_gsi_irq_en, 0x00012120 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_CLR, cntxt_gsi_irq_clr, 0x00012128 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_cntxt_intset_fmask[] = {
+ [INTYPE] = BIT(0)
+ /* Bits 1-31 reserved */
+};
+
+REG_FIELDS(CNTXT_INTSET, cntxt_intset, 0x00012180 + 0x4000 * GSI_EE_AP);
+
+REG_FIELDS(ERROR_LOG, error_log, 0x00012200 + 0x4000 * GSI_EE_AP);
+
+REG(ERROR_LOG_CLR, error_log_clr, 0x00012210 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_cntxt_scratch_0_fmask[] = {
+ [INTER_EE_RESULT] = GENMASK(2, 0),
+ /* Bits 3-4 reserved */
+ [GENERIC_EE_RESULT] = GENMASK(7, 5),
+ /* Bits 8-31 reserved */
+};
+
+REG_FIELDS(CNTXT_SCRATCH_0, cntxt_scratch_0, 0x00012400 + 0x4000 * GSI_EE_AP);
+
+static const struct reg *reg_array[] = {
+ [INTER_EE_SRC_CH_IRQ_MSK] = &reg_inter_ee_src_ch_irq_msk,
+ [INTER_EE_SRC_EV_CH_IRQ_MSK] = &reg_inter_ee_src_ev_ch_irq_msk,
+ [CH_C_CNTXT_0] = &reg_ch_c_cntxt_0,
+ [CH_C_CNTXT_1] = &reg_ch_c_cntxt_1,
+ [CH_C_CNTXT_2] = &reg_ch_c_cntxt_2,
+ [CH_C_CNTXT_3] = &reg_ch_c_cntxt_3,
+ [CH_C_QOS] = &reg_ch_c_qos,
+ [CH_C_SCRATCH_0] = &reg_ch_c_scratch_0,
+ [CH_C_SCRATCH_1] = &reg_ch_c_scratch_1,
+ [CH_C_SCRATCH_2] = &reg_ch_c_scratch_2,
+ [CH_C_SCRATCH_3] = &reg_ch_c_scratch_3,
+ [EV_CH_E_CNTXT_0] = &reg_ev_ch_e_cntxt_0,
+ [EV_CH_E_CNTXT_1] = &reg_ev_ch_e_cntxt_1,
+ [EV_CH_E_CNTXT_2] = &reg_ev_ch_e_cntxt_2,
+ [EV_CH_E_CNTXT_3] = &reg_ev_ch_e_cntxt_3,
+ [EV_CH_E_CNTXT_4] = &reg_ev_ch_e_cntxt_4,
+ [EV_CH_E_CNTXT_8] = &reg_ev_ch_e_cntxt_8,
+ [EV_CH_E_CNTXT_9] = &reg_ev_ch_e_cntxt_9,
+ [EV_CH_E_CNTXT_10] = &reg_ev_ch_e_cntxt_10,
+ [EV_CH_E_CNTXT_11] = &reg_ev_ch_e_cntxt_11,
+ [EV_CH_E_CNTXT_12] = &reg_ev_ch_e_cntxt_12,
+ [EV_CH_E_CNTXT_13] = &reg_ev_ch_e_cntxt_13,
+ [EV_CH_E_SCRATCH_0] = &reg_ev_ch_e_scratch_0,
+ [EV_CH_E_SCRATCH_1] = &reg_ev_ch_e_scratch_1,
+ [CH_C_DOORBELL_0] = &reg_ch_c_doorbell_0,
+ [EV_CH_E_DOORBELL_0] = &reg_ev_ch_e_doorbell_0,
+ [GSI_STATUS] = &reg_gsi_status,
+ [CH_CMD] = &reg_ch_cmd,
+ [EV_CH_CMD] = &reg_ev_ch_cmd,
+ [GENERIC_CMD] = &reg_generic_cmd,
+ [HW_PARAM_2] = &reg_hw_param_2,
+ [CNTXT_TYPE_IRQ] = &reg_cntxt_type_irq,
+ [CNTXT_TYPE_IRQ_MSK] = &reg_cntxt_type_irq_msk,
+ [CNTXT_SRC_CH_IRQ] = &reg_cntxt_src_ch_irq,
+ [CNTXT_SRC_EV_CH_IRQ] = &reg_cntxt_src_ev_ch_irq,
+ [CNTXT_SRC_CH_IRQ_MSK] = &reg_cntxt_src_ch_irq_msk,
+ [CNTXT_SRC_EV_CH_IRQ_MSK] = &reg_cntxt_src_ev_ch_irq_msk,
+ [CNTXT_SRC_CH_IRQ_CLR] = &reg_cntxt_src_ch_irq_clr,
+ [CNTXT_SRC_EV_CH_IRQ_CLR] = &reg_cntxt_src_ev_ch_irq_clr,
+ [CNTXT_SRC_IEOB_IRQ] = &reg_cntxt_src_ieob_irq,
+ [CNTXT_SRC_IEOB_IRQ_MSK] = &reg_cntxt_src_ieob_irq_msk,
+ [CNTXT_SRC_IEOB_IRQ_CLR] = &reg_cntxt_src_ieob_irq_clr,
+ [CNTXT_GLOB_IRQ_STTS] = &reg_cntxt_glob_irq_stts,
+ [CNTXT_GLOB_IRQ_EN] = &reg_cntxt_glob_irq_en,
+ [CNTXT_GLOB_IRQ_CLR] = &reg_cntxt_glob_irq_clr,
+ [CNTXT_GSI_IRQ_STTS] = &reg_cntxt_gsi_irq_stts,
+ [CNTXT_GSI_IRQ_EN] = &reg_cntxt_gsi_irq_en,
+ [CNTXT_GSI_IRQ_CLR] = &reg_cntxt_gsi_irq_clr,
+ [CNTXT_INTSET] = &reg_cntxt_intset,
+ [ERROR_LOG] = &reg_error_log,
+ [ERROR_LOG_CLR] = &reg_error_log_clr,
+ [CNTXT_SCRATCH_0] = &reg_cntxt_scratch_0,
+};
+
+const struct regs gsi_regs_v4_11 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
+};
diff --git a/drivers/net/ipa/reg/gsi_reg-v4.5.c b/drivers/net/ipa/reg/gsi_reg-v4.5.c
new file mode 100644
index 0000000000..2900e5c3ff
--- /dev/null
+++ b/drivers/net/ipa/reg/gsi_reg-v4.5.c
@@ -0,0 +1,311 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2023 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../gsi.h"
+#include "../reg.h"
+#include "../gsi_reg.h"
+
+REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk,
+ 0x0000c020 + 0x1000 * GSI_EE_AP);
+
+REG(INTER_EE_SRC_EV_CH_IRQ_MSK, inter_ee_src_ev_ch_irq_msk,
+ 0x0000c024 + 0x1000 * GSI_EE_AP);
+
+static const u32 reg_ch_c_cntxt_0_fmask[] = {
+ [CHTYPE_PROTOCOL] = GENMASK(2, 0),
+ [CHTYPE_DIR] = BIT(3),
+ [CH_EE] = GENMASK(7, 4),
+ [CHID] = GENMASK(12, 8),
+ [CHTYPE_PROTOCOL_MSB] = BIT(13),
+ [ERINDEX] = GENMASK(18, 14),
+ /* Bit 19 reserved */
+ [CHSTATE] = GENMASK(23, 20),
+ [ELEMENT_SIZE] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(CH_C_CNTXT_0, ch_c_cntxt_0,
+ 0x0000f000 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ch_c_cntxt_1_fmask[] = {
+ [CH_R_LENGTH] = GENMASK(15, 0),
+ /* Bits 16-31 reserved */
+};
+
+REG_STRIDE_FIELDS(CH_C_CNTXT_1, ch_c_cntxt_1,
+ 0x0000f004 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_CNTXT_2, ch_c_cntxt_2, 0x0000f008 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_CNTXT_3, ch_c_cntxt_3, 0x0000f00c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ch_c_qos_fmask[] = {
+ [WRR_WEIGHT] = GENMASK(3, 0),
+ /* Bits 4-7 reserved */
+ [MAX_PREFETCH] = BIT(8),
+ [USE_DB_ENG] = BIT(9),
+ [PREFETCH_MODE] = GENMASK(13, 10),
+ /* Bits 14-15 reserved */
+ [EMPTY_LVL_THRSHOLD] = GENMASK(23, 16),
+ /* Bits 24-31 reserved */
+};
+
+REG_STRIDE_FIELDS(CH_C_QOS, ch_c_qos, 0x0000f05c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_error_log_fmask[] = {
+ [ERR_ARG3] = GENMASK(3, 0),
+ [ERR_ARG2] = GENMASK(7, 4),
+ [ERR_ARG1] = GENMASK(11, 8),
+ [ERR_CODE] = GENMASK(15, 12),
+ /* Bits 16-18 reserved */
+ [ERR_VIRT_IDX] = GENMASK(23, 19),
+ [ERR_TYPE] = GENMASK(27, 24),
+ [ERR_EE] = GENMASK(31, 28),
+};
+
+REG_STRIDE(CH_C_SCRATCH_0, ch_c_scratch_0,
+ 0x0000f060 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_1, ch_c_scratch_1,
+ 0x0000f064 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_2, ch_c_scratch_2,
+ 0x0000f068 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_3, ch_c_scratch_3,
+ 0x0000f06c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_0_fmask[] = {
+ [EV_CHTYPE] = GENMASK(3, 0),
+ [EV_EE] = GENMASK(7, 4),
+ [EV_EVCHID] = GENMASK(15, 8),
+ [EV_INTYPE] = BIT(16),
+ /* Bits 17-19 reserved */
+ [EV_CHSTATE] = GENMASK(23, 20),
+ [EV_ELEMENT_SIZE] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_0, ev_ch_e_cntxt_0,
+ 0x00010000 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_1_fmask[] = {
+ [R_LENGTH] = GENMASK(15, 0),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_1, ev_ch_e_cntxt_1,
+ 0x00010004 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_2, ev_ch_e_cntxt_2,
+ 0x00010008 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_3, ev_ch_e_cntxt_3,
+ 0x0001000c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_4, ev_ch_e_cntxt_4,
+ 0x00010010 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_8_fmask[] = {
+ [EV_MODT] = GENMASK(15, 0),
+ [EV_MODC] = GENMASK(23, 16),
+ [EV_MOD_CNT] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_8, ev_ch_e_cntxt_8,
+ 0x00010020 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_9, ev_ch_e_cntxt_9,
+ 0x00010024 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_10, ev_ch_e_cntxt_10,
+ 0x00010028 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_11, ev_ch_e_cntxt_11,
+ 0x0001002c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_12, ev_ch_e_cntxt_12,
+ 0x00010030 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_13, ev_ch_e_cntxt_13,
+ 0x00010034 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_SCRATCH_0, ev_ch_e_scratch_0,
+ 0x00010048 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_SCRATCH_1, ev_ch_e_scratch_1,
+ 0x0001004c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_DOORBELL_0, ch_c_doorbell_0,
+ 0x00011000 + 0x4000 * GSI_EE_AP, 0x08);
+
+REG_STRIDE(EV_CH_E_DOORBELL_0, ev_ch_e_doorbell_0,
+ 0x00011100 + 0x4000 * GSI_EE_AP, 0x08);
+
+static const u32 reg_gsi_status_fmask[] = {
+ [ENABLED] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+REG_FIELDS(GSI_STATUS, gsi_status, 0x00012000 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_ch_cmd_fmask[] = {
+ [CH_CHID] = GENMASK(7, 0),
+ /* Bits 8-23 reserved */
+ [CH_OPCODE] = GENMASK(31, 24),
+};
+
+REG_FIELDS(CH_CMD, ch_cmd, 0x00012008 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_ev_ch_cmd_fmask[] = {
+ [EV_CHID] = GENMASK(7, 0),
+ /* Bits 8-23 reserved */
+ [EV_OPCODE] = GENMASK(31, 24),
+};
+
+REG_FIELDS(EV_CH_CMD, ev_ch_cmd, 0x00012010 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_generic_cmd_fmask[] = {
+ [GENERIC_OPCODE] = GENMASK(4, 0),
+ [GENERIC_CHID] = GENMASK(9, 5),
+ [GENERIC_EE] = GENMASK(13, 10),
+ /* Bits 14-31 reserved */
+};
+
+REG_FIELDS(GENERIC_CMD, generic_cmd, 0x00012018 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_hw_param_2_fmask[] = {
+ [IRAM_SIZE] = GENMASK(2, 0),
+ [NUM_CH_PER_EE] = GENMASK(7, 3),
+ [NUM_EV_PER_EE] = GENMASK(12, 8),
+ [GSI_CH_PEND_TRANSLATE] = BIT(13),
+ [GSI_CH_FULL_LOGIC] = BIT(14),
+ [GSI_USE_SDMA] = BIT(15),
+ [GSI_SDMA_N_INT] = GENMASK(18, 16),
+ [GSI_SDMA_MAX_BURST] = GENMASK(26, 19),
+ [GSI_SDMA_N_IOVEC] = GENMASK(29, 27),
+ [GSI_USE_RD_WR_ENG] = BIT(30),
+ [GSI_USE_INTER_EE] = BIT(31),
+};
+
+REG_FIELDS(HW_PARAM_2, hw_param_2, 0x00012040 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_TYPE_IRQ, cntxt_type_irq, 0x00012080 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_TYPE_IRQ_MSK, cntxt_type_irq_msk, 0x00012088 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ, cntxt_src_ch_irq, 0x00012090 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ, cntxt_src_ev_ch_irq, 0x00012094 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ_MSK, cntxt_src_ch_irq_msk,
+ 0x00012098 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ_MSK, cntxt_src_ev_ch_irq_msk,
+ 0x0001209c + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ_CLR, cntxt_src_ch_irq_clr,
+ 0x000120a0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ_CLR, cntxt_src_ev_ch_irq_clr,
+ 0x000120a4 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ, cntxt_src_ieob_irq, 0x000120b0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ_MSK, cntxt_src_ieob_irq_msk,
+ 0x000120b8 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ_CLR, cntxt_src_ieob_irq_clr,
+ 0x000120c0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_STTS, cntxt_glob_irq_stts, 0x00012100 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_EN, cntxt_glob_irq_en, 0x00012108 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_CLR, cntxt_glob_irq_clr, 0x00012110 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_STTS, cntxt_gsi_irq_stts, 0x00012118 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_EN, cntxt_gsi_irq_en, 0x00012120 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_CLR, cntxt_gsi_irq_clr, 0x00012128 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_cntxt_intset_fmask[] = {
+ [INTYPE] = BIT(0)
+ /* Bits 1-31 reserved */
+};
+
+REG_FIELDS(CNTXT_INTSET, cntxt_intset, 0x00012180 + 0x4000 * GSI_EE_AP);
+
+REG_FIELDS(ERROR_LOG, error_log, 0x00012200 + 0x4000 * GSI_EE_AP);
+
+REG(ERROR_LOG_CLR, error_log_clr, 0x00012210 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_cntxt_scratch_0_fmask[] = {
+ [INTER_EE_RESULT] = GENMASK(2, 0),
+ /* Bits 3-4 reserved */
+ [GENERIC_EE_RESULT] = GENMASK(7, 5),
+ /* Bits 8-31 reserved */
+};
+
+REG_FIELDS(CNTXT_SCRATCH_0, cntxt_scratch_0, 0x00012400 + 0x4000 * GSI_EE_AP);
+
+static const struct reg *reg_array[] = {
+ [INTER_EE_SRC_CH_IRQ_MSK] = &reg_inter_ee_src_ch_irq_msk,
+ [INTER_EE_SRC_EV_CH_IRQ_MSK] = &reg_inter_ee_src_ev_ch_irq_msk,
+ [CH_C_CNTXT_0] = &reg_ch_c_cntxt_0,
+ [CH_C_CNTXT_1] = &reg_ch_c_cntxt_1,
+ [CH_C_CNTXT_2] = &reg_ch_c_cntxt_2,
+ [CH_C_CNTXT_3] = &reg_ch_c_cntxt_3,
+ [CH_C_QOS] = &reg_ch_c_qos,
+ [CH_C_SCRATCH_0] = &reg_ch_c_scratch_0,
+ [CH_C_SCRATCH_1] = &reg_ch_c_scratch_1,
+ [CH_C_SCRATCH_2] = &reg_ch_c_scratch_2,
+ [CH_C_SCRATCH_3] = &reg_ch_c_scratch_3,
+ [EV_CH_E_CNTXT_0] = &reg_ev_ch_e_cntxt_0,
+ [EV_CH_E_CNTXT_1] = &reg_ev_ch_e_cntxt_1,
+ [EV_CH_E_CNTXT_2] = &reg_ev_ch_e_cntxt_2,
+ [EV_CH_E_CNTXT_3] = &reg_ev_ch_e_cntxt_3,
+ [EV_CH_E_CNTXT_4] = &reg_ev_ch_e_cntxt_4,
+ [EV_CH_E_CNTXT_8] = &reg_ev_ch_e_cntxt_8,
+ [EV_CH_E_CNTXT_9] = &reg_ev_ch_e_cntxt_9,
+ [EV_CH_E_CNTXT_10] = &reg_ev_ch_e_cntxt_10,
+ [EV_CH_E_CNTXT_11] = &reg_ev_ch_e_cntxt_11,
+ [EV_CH_E_CNTXT_12] = &reg_ev_ch_e_cntxt_12,
+ [EV_CH_E_CNTXT_13] = &reg_ev_ch_e_cntxt_13,
+ [EV_CH_E_SCRATCH_0] = &reg_ev_ch_e_scratch_0,
+ [EV_CH_E_SCRATCH_1] = &reg_ev_ch_e_scratch_1,
+ [CH_C_DOORBELL_0] = &reg_ch_c_doorbell_0,
+ [EV_CH_E_DOORBELL_0] = &reg_ev_ch_e_doorbell_0,
+ [GSI_STATUS] = &reg_gsi_status,
+ [CH_CMD] = &reg_ch_cmd,
+ [EV_CH_CMD] = &reg_ev_ch_cmd,
+ [GENERIC_CMD] = &reg_generic_cmd,
+ [HW_PARAM_2] = &reg_hw_param_2,
+ [CNTXT_TYPE_IRQ] = &reg_cntxt_type_irq,
+ [CNTXT_TYPE_IRQ_MSK] = &reg_cntxt_type_irq_msk,
+ [CNTXT_SRC_CH_IRQ] = &reg_cntxt_src_ch_irq,
+ [CNTXT_SRC_EV_CH_IRQ] = &reg_cntxt_src_ev_ch_irq,
+ [CNTXT_SRC_CH_IRQ_MSK] = &reg_cntxt_src_ch_irq_msk,
+ [CNTXT_SRC_EV_CH_IRQ_MSK] = &reg_cntxt_src_ev_ch_irq_msk,
+ [CNTXT_SRC_CH_IRQ_CLR] = &reg_cntxt_src_ch_irq_clr,
+ [CNTXT_SRC_EV_CH_IRQ_CLR] = &reg_cntxt_src_ev_ch_irq_clr,
+ [CNTXT_SRC_IEOB_IRQ] = &reg_cntxt_src_ieob_irq,
+ [CNTXT_SRC_IEOB_IRQ_MSK] = &reg_cntxt_src_ieob_irq_msk,
+ [CNTXT_SRC_IEOB_IRQ_CLR] = &reg_cntxt_src_ieob_irq_clr,
+ [CNTXT_GLOB_IRQ_STTS] = &reg_cntxt_glob_irq_stts,
+ [CNTXT_GLOB_IRQ_EN] = &reg_cntxt_glob_irq_en,
+ [CNTXT_GLOB_IRQ_CLR] = &reg_cntxt_glob_irq_clr,
+ [CNTXT_GSI_IRQ_STTS] = &reg_cntxt_gsi_irq_stts,
+ [CNTXT_GSI_IRQ_EN] = &reg_cntxt_gsi_irq_en,
+ [CNTXT_GSI_IRQ_CLR] = &reg_cntxt_gsi_irq_clr,
+ [CNTXT_INTSET] = &reg_cntxt_intset,
+ [ERROR_LOG] = &reg_error_log,
+ [ERROR_LOG_CLR] = &reg_error_log_clr,
+ [CNTXT_SCRATCH_0] = &reg_cntxt_scratch_0,
+};
+
+const struct regs gsi_regs_v4_5 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
+};
diff --git a/drivers/net/ipa/reg/gsi_reg-v4.9.c b/drivers/net/ipa/reg/gsi_reg-v4.9.c
new file mode 100644
index 0000000000..8b5d95425a
--- /dev/null
+++ b/drivers/net/ipa/reg/gsi_reg-v4.9.c
@@ -0,0 +1,312 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2023 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../gsi.h"
+#include "../reg.h"
+#include "../gsi_reg.h"
+
+REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk,
+ 0x0000c020 + 0x1000 * GSI_EE_AP);
+
+REG(INTER_EE_SRC_EV_CH_IRQ_MSK, inter_ee_src_ev_ch_irq_msk,
+ 0x0000c024 + 0x1000 * GSI_EE_AP);
+
+static const u32 reg_ch_c_cntxt_0_fmask[] = {
+ [CHTYPE_PROTOCOL] = GENMASK(2, 0),
+ [CHTYPE_DIR] = BIT(3),
+ [CH_EE] = GENMASK(7, 4),
+ [CHID] = GENMASK(12, 8),
+ [CHTYPE_PROTOCOL_MSB] = BIT(13),
+ [ERINDEX] = GENMASK(18, 14),
+ /* Bit 19 reserved */
+ [CHSTATE] = GENMASK(23, 20),
+ [ELEMENT_SIZE] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(CH_C_CNTXT_0, ch_c_cntxt_0,
+ 0x0000f000 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ch_c_cntxt_1_fmask[] = {
+ [CH_R_LENGTH] = GENMASK(19, 0),
+ /* Bits 20-31 reserved */
+};
+
+REG_STRIDE_FIELDS(CH_C_CNTXT_1, ch_c_cntxt_1,
+ 0x0000f004 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_CNTXT_2, ch_c_cntxt_2, 0x0000f008 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_CNTXT_3, ch_c_cntxt_3, 0x0000f00c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ch_c_qos_fmask[] = {
+ [WRR_WEIGHT] = GENMASK(3, 0),
+ /* Bits 4-7 reserved */
+ [MAX_PREFETCH] = BIT(8),
+ [USE_DB_ENG] = BIT(9),
+ [PREFETCH_MODE] = GENMASK(13, 10),
+ /* Bits 14-15 reserved */
+ [EMPTY_LVL_THRSHOLD] = GENMASK(23, 16),
+ [DB_IN_BYTES] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+REG_STRIDE_FIELDS(CH_C_QOS, ch_c_qos, 0x0000f05c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_error_log_fmask[] = {
+ [ERR_ARG3] = GENMASK(3, 0),
+ [ERR_ARG2] = GENMASK(7, 4),
+ [ERR_ARG1] = GENMASK(11, 8),
+ [ERR_CODE] = GENMASK(15, 12),
+ /* Bits 16-18 reserved */
+ [ERR_VIRT_IDX] = GENMASK(23, 19),
+ [ERR_TYPE] = GENMASK(27, 24),
+ [ERR_EE] = GENMASK(31, 28),
+};
+
+REG_STRIDE(CH_C_SCRATCH_0, ch_c_scratch_0,
+ 0x0000f060 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_1, ch_c_scratch_1,
+ 0x0000f064 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_2, ch_c_scratch_2,
+ 0x0000f068 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_3, ch_c_scratch_3,
+ 0x0000f06c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_0_fmask[] = {
+ [EV_CHTYPE] = GENMASK(3, 0),
+ [EV_EE] = GENMASK(7, 4),
+ [EV_EVCHID] = GENMASK(15, 8),
+ [EV_INTYPE] = BIT(16),
+ /* Bits 17-19 reserved */
+ [EV_CHSTATE] = GENMASK(23, 20),
+ [EV_ELEMENT_SIZE] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_0, ev_ch_e_cntxt_0,
+ 0x00010000 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_1_fmask[] = {
+ [R_LENGTH] = GENMASK(15, 0),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_1, ev_ch_e_cntxt_1,
+ 0x00010004 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_2, ev_ch_e_cntxt_2,
+ 0x00010008 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_3, ev_ch_e_cntxt_3,
+ 0x0001000c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_4, ev_ch_e_cntxt_4,
+ 0x00010010 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_8_fmask[] = {
+ [EV_MODT] = GENMASK(15, 0),
+ [EV_MODC] = GENMASK(23, 16),
+ [EV_MOD_CNT] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_8, ev_ch_e_cntxt_8,
+ 0x00010020 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_9, ev_ch_e_cntxt_9,
+ 0x00010024 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_10, ev_ch_e_cntxt_10,
+ 0x00010028 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_11, ev_ch_e_cntxt_11,
+ 0x0001002c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_12, ev_ch_e_cntxt_12,
+ 0x00010030 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_13, ev_ch_e_cntxt_13,
+ 0x00010034 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_SCRATCH_0, ev_ch_e_scratch_0,
+ 0x00010048 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_SCRATCH_1, ev_ch_e_scratch_1,
+ 0x0001004c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_DOORBELL_0, ch_c_doorbell_0,
+ 0x00011000 + 0x4000 * GSI_EE_AP, 0x08);
+
+REG_STRIDE(EV_CH_E_DOORBELL_0, ev_ch_e_doorbell_0,
+ 0x00011100 + 0x4000 * GSI_EE_AP, 0x08);
+
+static const u32 reg_gsi_status_fmask[] = {
+ [ENABLED] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+REG_FIELDS(GSI_STATUS, gsi_status, 0x00012000 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_ch_cmd_fmask[] = {
+ [CH_CHID] = GENMASK(7, 0),
+ /* Bits 8-23 reserved */
+ [CH_OPCODE] = GENMASK(31, 24),
+};
+
+REG_FIELDS(CH_CMD, ch_cmd, 0x00012008 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_ev_ch_cmd_fmask[] = {
+ [EV_CHID] = GENMASK(7, 0),
+ /* Bits 8-23 reserved */
+ [EV_OPCODE] = GENMASK(31, 24),
+};
+
+REG_FIELDS(EV_CH_CMD, ev_ch_cmd, 0x00012010 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_generic_cmd_fmask[] = {
+ [GENERIC_OPCODE] = GENMASK(4, 0),
+ [GENERIC_CHID] = GENMASK(9, 5),
+ [GENERIC_EE] = GENMASK(13, 10),
+ /* Bits 14-31 reserved */
+};
+
+REG_FIELDS(GENERIC_CMD, generic_cmd, 0x00012018 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_hw_param_2_fmask[] = {
+ [IRAM_SIZE] = GENMASK(2, 0),
+ [NUM_CH_PER_EE] = GENMASK(7, 3),
+ [NUM_EV_PER_EE] = GENMASK(12, 8),
+ [GSI_CH_PEND_TRANSLATE] = BIT(13),
+ [GSI_CH_FULL_LOGIC] = BIT(14),
+ [GSI_USE_SDMA] = BIT(15),
+ [GSI_SDMA_N_INT] = GENMASK(18, 16),
+ [GSI_SDMA_MAX_BURST] = GENMASK(26, 19),
+ [GSI_SDMA_N_IOVEC] = GENMASK(29, 27),
+ [GSI_USE_RD_WR_ENG] = BIT(30),
+ [GSI_USE_INTER_EE] = BIT(31),
+};
+
+REG_FIELDS(HW_PARAM_2, hw_param_2, 0x00012040 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_TYPE_IRQ, cntxt_type_irq, 0x00012080 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_TYPE_IRQ_MSK, cntxt_type_irq_msk, 0x00012088 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ, cntxt_src_ch_irq, 0x00012090 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ, cntxt_src_ev_ch_irq, 0x00012094 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ_MSK, cntxt_src_ch_irq_msk,
+ 0x00012098 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ_MSK, cntxt_src_ev_ch_irq_msk,
+ 0x0001209c + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ_CLR, cntxt_src_ch_irq_clr,
+ 0x000120a0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ_CLR, cntxt_src_ev_ch_irq_clr,
+ 0x000120a4 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ, cntxt_src_ieob_irq, 0x000120b0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ_MSK, cntxt_src_ieob_irq_msk,
+ 0x000120b8 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ_CLR, cntxt_src_ieob_irq_clr,
+ 0x000120c0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_STTS, cntxt_glob_irq_stts, 0x00012100 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_EN, cntxt_glob_irq_en, 0x00012108 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_CLR, cntxt_glob_irq_clr, 0x00012110 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_STTS, cntxt_gsi_irq_stts, 0x00012118 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_EN, cntxt_gsi_irq_en, 0x00012120 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_CLR, cntxt_gsi_irq_clr, 0x00012128 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_cntxt_intset_fmask[] = {
+ [INTYPE] = BIT(0)
+ /* Bits 1-31 reserved */
+};
+
+REG_FIELDS(CNTXT_INTSET, cntxt_intset, 0x00012180 + 0x4000 * GSI_EE_AP);
+
+REG_FIELDS(ERROR_LOG, error_log, 0x00012200 + 0x4000 * GSI_EE_AP);
+
+REG(ERROR_LOG_CLR, error_log_clr, 0x00012210 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_cntxt_scratch_0_fmask[] = {
+ [INTER_EE_RESULT] = GENMASK(2, 0),
+ /* Bits 3-4 reserved */
+ [GENERIC_EE_RESULT] = GENMASK(7, 5),
+ /* Bits 8-31 reserved */
+};
+
+REG_FIELDS(CNTXT_SCRATCH_0, cntxt_scratch_0, 0x00012400 + 0x4000 * GSI_EE_AP);
+
+static const struct reg *reg_array[] = {
+ [INTER_EE_SRC_CH_IRQ_MSK] = &reg_inter_ee_src_ch_irq_msk,
+ [INTER_EE_SRC_EV_CH_IRQ_MSK] = &reg_inter_ee_src_ev_ch_irq_msk,
+ [CH_C_CNTXT_0] = &reg_ch_c_cntxt_0,
+ [CH_C_CNTXT_1] = &reg_ch_c_cntxt_1,
+ [CH_C_CNTXT_2] = &reg_ch_c_cntxt_2,
+ [CH_C_CNTXT_3] = &reg_ch_c_cntxt_3,
+ [CH_C_QOS] = &reg_ch_c_qos,
+ [CH_C_SCRATCH_0] = &reg_ch_c_scratch_0,
+ [CH_C_SCRATCH_1] = &reg_ch_c_scratch_1,
+ [CH_C_SCRATCH_2] = &reg_ch_c_scratch_2,
+ [CH_C_SCRATCH_3] = &reg_ch_c_scratch_3,
+ [EV_CH_E_CNTXT_0] = &reg_ev_ch_e_cntxt_0,
+ [EV_CH_E_CNTXT_1] = &reg_ev_ch_e_cntxt_1,
+ [EV_CH_E_CNTXT_2] = &reg_ev_ch_e_cntxt_2,
+ [EV_CH_E_CNTXT_3] = &reg_ev_ch_e_cntxt_3,
+ [EV_CH_E_CNTXT_4] = &reg_ev_ch_e_cntxt_4,
+ [EV_CH_E_CNTXT_8] = &reg_ev_ch_e_cntxt_8,
+ [EV_CH_E_CNTXT_9] = &reg_ev_ch_e_cntxt_9,
+ [EV_CH_E_CNTXT_10] = &reg_ev_ch_e_cntxt_10,
+ [EV_CH_E_CNTXT_11] = &reg_ev_ch_e_cntxt_11,
+ [EV_CH_E_CNTXT_12] = &reg_ev_ch_e_cntxt_12,
+ [EV_CH_E_CNTXT_13] = &reg_ev_ch_e_cntxt_13,
+ [EV_CH_E_SCRATCH_0] = &reg_ev_ch_e_scratch_0,
+ [EV_CH_E_SCRATCH_1] = &reg_ev_ch_e_scratch_1,
+ [CH_C_DOORBELL_0] = &reg_ch_c_doorbell_0,
+ [EV_CH_E_DOORBELL_0] = &reg_ev_ch_e_doorbell_0,
+ [GSI_STATUS] = &reg_gsi_status,
+ [CH_CMD] = &reg_ch_cmd,
+ [EV_CH_CMD] = &reg_ev_ch_cmd,
+ [GENERIC_CMD] = &reg_generic_cmd,
+ [HW_PARAM_2] = &reg_hw_param_2,
+ [CNTXT_TYPE_IRQ] = &reg_cntxt_type_irq,
+ [CNTXT_TYPE_IRQ_MSK] = &reg_cntxt_type_irq_msk,
+ [CNTXT_SRC_CH_IRQ] = &reg_cntxt_src_ch_irq,
+ [CNTXT_SRC_EV_CH_IRQ] = &reg_cntxt_src_ev_ch_irq,
+ [CNTXT_SRC_CH_IRQ_MSK] = &reg_cntxt_src_ch_irq_msk,
+ [CNTXT_SRC_EV_CH_IRQ_MSK] = &reg_cntxt_src_ev_ch_irq_msk,
+ [CNTXT_SRC_CH_IRQ_CLR] = &reg_cntxt_src_ch_irq_clr,
+ [CNTXT_SRC_EV_CH_IRQ_CLR] = &reg_cntxt_src_ev_ch_irq_clr,
+ [CNTXT_SRC_IEOB_IRQ] = &reg_cntxt_src_ieob_irq,
+ [CNTXT_SRC_IEOB_IRQ_MSK] = &reg_cntxt_src_ieob_irq_msk,
+ [CNTXT_SRC_IEOB_IRQ_CLR] = &reg_cntxt_src_ieob_irq_clr,
+ [CNTXT_GLOB_IRQ_STTS] = &reg_cntxt_glob_irq_stts,
+ [CNTXT_GLOB_IRQ_EN] = &reg_cntxt_glob_irq_en,
+ [CNTXT_GLOB_IRQ_CLR] = &reg_cntxt_glob_irq_clr,
+ [CNTXT_GSI_IRQ_STTS] = &reg_cntxt_gsi_irq_stts,
+ [CNTXT_GSI_IRQ_EN] = &reg_cntxt_gsi_irq_en,
+ [CNTXT_GSI_IRQ_CLR] = &reg_cntxt_gsi_irq_clr,
+ [CNTXT_INTSET] = &reg_cntxt_intset,
+ [ERROR_LOG] = &reg_error_log,
+ [ERROR_LOG_CLR] = &reg_error_log_clr,
+ [CNTXT_SCRATCH_0] = &reg_cntxt_scratch_0,
+};
+
+const struct regs gsi_regs_v4_9 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
+};
diff --git a/drivers/net/ipa/reg/gsi_reg-v5.0.c b/drivers/net/ipa/reg/gsi_reg-v5.0.c
new file mode 100644
index 0000000000..145eb0bd09
--- /dev/null
+++ b/drivers/net/ipa/reg/gsi_reg-v5.0.c
@@ -0,0 +1,317 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2023 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../gsi.h"
+#include "../reg.h"
+#include "../gsi_reg.h"
+
+REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk,
+ 0x0000c01c + 0x1000 * GSI_EE_AP);
+
+REG(INTER_EE_SRC_EV_CH_IRQ_MSK, inter_ee_src_ev_ch_irq_msk,
+ 0x0000c028 + 0x1000 * GSI_EE_AP);
+
+static const u32 reg_ch_c_cntxt_0_fmask[] = {
+ [CHTYPE_PROTOCOL] = GENMASK(6, 0),
+ [CHTYPE_DIR] = BIT(7),
+ [CH_EE] = GENMASK(11, 8),
+ [CHID] = GENMASK(19, 12),
+ [CHSTATE] = GENMASK(23, 20),
+ [ELEMENT_SIZE] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(CH_C_CNTXT_0, ch_c_cntxt_0,
+ 0x00014000 + 0x12000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ch_c_cntxt_1_fmask[] = {
+ [CH_R_LENGTH] = GENMASK(23, 0),
+ [ERINDEX] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(CH_C_CNTXT_1, ch_c_cntxt_1,
+ 0x00014004 + 0x12000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_CNTXT_2, ch_c_cntxt_2, 0x00014008 + 0x12000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_CNTXT_3, ch_c_cntxt_3, 0x0001400c + 0x12000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ch_c_qos_fmask[] = {
+ [WRR_WEIGHT] = GENMASK(3, 0),
+ /* Bits 4-7 reserved */
+ [MAX_PREFETCH] = BIT(8),
+ [USE_DB_ENG] = BIT(9),
+ [PREFETCH_MODE] = GENMASK(13, 10),
+ /* Bits 14-15 reserved */
+ [EMPTY_LVL_THRSHOLD] = GENMASK(23, 16),
+ [DB_IN_BYTES] = BIT(24),
+ [LOW_LATENCY_EN] = BIT(25),
+ /* Bits 26-31 reserved */
+};
+
+REG_STRIDE_FIELDS(CH_C_QOS, ch_c_qos, 0x00014048 + 0x12000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_0, ch_c_scratch_0,
+ 0x0001404c + 0x12000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_1, ch_c_scratch_1,
+ 0x00014050 + 0x12000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_2, ch_c_scratch_2,
+ 0x00014054 + 0x12000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_3, ch_c_scratch_3,
+ 0x00014058 + 0x12000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_0_fmask[] = {
+ [EV_CHTYPE] = GENMASK(6, 0),
+ [EV_INTYPE] = BIT(7),
+ [EV_EVCHID] = GENMASK(15, 8),
+ [EV_EE] = GENMASK(19, 16),
+ [EV_CHSTATE] = GENMASK(23, 20),
+ [EV_ELEMENT_SIZE] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_0, ev_ch_e_cntxt_0,
+ 0x0001c000 + 0x12000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_1_fmask[] = {
+ [R_LENGTH] = GENMASK(23, 0),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_1, ev_ch_e_cntxt_1,
+ 0x0001c004 + 0x12000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_2, ev_ch_e_cntxt_2,
+ 0x0001c008 + 0x12000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_3, ev_ch_e_cntxt_3,
+ 0x0001c00c + 0x12000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_4, ev_ch_e_cntxt_4,
+ 0x0001c010 + 0x12000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_8_fmask[] = {
+ [EV_MODT] = GENMASK(15, 0),
+ [EV_MODC] = GENMASK(23, 16),
+ [EV_MOD_CNT] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_8, ev_ch_e_cntxt_8,
+ 0x0001c020 + 0x12000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_9, ev_ch_e_cntxt_9,
+ 0x0001c024 + 0x12000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_10, ev_ch_e_cntxt_10,
+ 0x0001c028 + 0x12000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_11, ev_ch_e_cntxt_11,
+ 0x0001c02c + 0x12000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_12, ev_ch_e_cntxt_12,
+ 0x0001c030 + 0x12000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_13, ev_ch_e_cntxt_13,
+ 0x0001c034 + 0x12000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_SCRATCH_0, ev_ch_e_scratch_0,
+ 0x0001c048 + 0x12000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_SCRATCH_1, ev_ch_e_scratch_1,
+ 0x0001c04c + 0x12000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_DOORBELL_0, ch_c_doorbell_0,
+ 0x00024000 + 0x12000 * GSI_EE_AP, 0x08);
+
+REG_STRIDE(EV_CH_E_DOORBELL_0, ev_ch_e_doorbell_0,
+ 0x00024800 + 0x12000 * GSI_EE_AP, 0x08);
+
+static const u32 reg_gsi_status_fmask[] = {
+ [ENABLED] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+REG_FIELDS(GSI_STATUS, gsi_status, 0x00025000 + 0x12000 * GSI_EE_AP);
+
+static const u32 reg_ch_cmd_fmask[] = {
+ [CH_CHID] = GENMASK(7, 0),
+ /* Bits 8-23 reserved */
+ [CH_OPCODE] = GENMASK(31, 24),
+};
+
+REG_FIELDS(CH_CMD, ch_cmd, 0x00025008 + 0x12000 * GSI_EE_AP);
+
+static const u32 reg_ev_ch_cmd_fmask[] = {
+ [EV_CHID] = GENMASK(7, 0),
+ /* Bits 8-23 reserved */
+ [EV_OPCODE] = GENMASK(31, 24),
+};
+
+REG_FIELDS(EV_CH_CMD, ev_ch_cmd, 0x00025010 + 0x12000 * GSI_EE_AP);
+
+static const u32 reg_generic_cmd_fmask[] = {
+ [GENERIC_OPCODE] = GENMASK(4, 0),
+ [GENERIC_CHID] = GENMASK(9, 5),
+ [GENERIC_EE] = GENMASK(13, 10),
+ /* Bits 14-31 reserved */
+};
+
+REG_FIELDS(GENERIC_CMD, generic_cmd, 0x00025018 + 0x12000 * GSI_EE_AP);
+
+static const u32 reg_hw_param_2_fmask[] = {
+ [NUM_CH_PER_EE] = GENMASK(7, 0),
+ [IRAM_SIZE] = GENMASK(12, 8),
+ [GSI_CH_PEND_TRANSLATE] = BIT(13),
+ [GSI_CH_FULL_LOGIC] = BIT(14),
+ [GSI_USE_SDMA] = BIT(15),
+ [GSI_SDMA_N_INT] = GENMASK(18, 16),
+ [GSI_SDMA_MAX_BURST] = GENMASK(26, 19),
+ [GSI_SDMA_N_IOVEC] = GENMASK(29, 27),
+ [GSI_USE_RD_WR_ENG] = BIT(30),
+ [GSI_USE_INTER_EE] = BIT(31),
+};
+
+REG_FIELDS(HW_PARAM_2, hw_param_2, 0x00025040 + 0x12000 * GSI_EE_AP);
+
+static const u32 reg_hw_param_4_fmask[] = {
+ [EV_PER_EE] = GENMASK(7, 0),
+ [IRAM_PROTOCOL_COUNT] = GENMASK(15, 8),
+ /* Bits 16-31 reserved */
+};
+
+REG_FIELDS(HW_PARAM_4, hw_param_4, 0x00025050 + 0x12000 * GSI_EE_AP);
+
+REG(CNTXT_TYPE_IRQ, cntxt_type_irq, 0x00025080 + 0x12000 * GSI_EE_AP);
+
+REG(CNTXT_TYPE_IRQ_MSK, cntxt_type_irq_msk, 0x00025088 + 0x12000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ, cntxt_src_ch_irq, 0x00025090 + 0x12000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ_MSK, cntxt_src_ch_irq_msk,
+ 0x00025094 + 0x12000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ_CLR, cntxt_src_ch_irq_clr,
+ 0x00025098 + 0x12000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ, cntxt_src_ev_ch_irq, 0x0002509c + 0x12000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ_MSK, cntxt_src_ev_ch_irq_msk,
+ 0x000250a0 + 0x12000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ_CLR, cntxt_src_ev_ch_irq_clr,
+ 0x000250a4 + 0x12000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ, cntxt_src_ieob_irq, 0x000250a8 + 0x12000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ_MSK, cntxt_src_ieob_irq_msk,
+ 0x000250ac + 0x12000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ_CLR, cntxt_src_ieob_irq_clr,
+ 0x000250b0 + 0x12000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_STTS, cntxt_glob_irq_stts, 0x00025200 + 0x12000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_EN, cntxt_glob_irq_en, 0x00025204 + 0x12000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_CLR, cntxt_glob_irq_clr, 0x00025208 + 0x12000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_STTS, cntxt_gsi_irq_stts, 0x0002520c + 0x12000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_EN, cntxt_gsi_irq_en, 0x00025210 + 0x12000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_CLR, cntxt_gsi_irq_clr, 0x00025214 + 0x12000 * GSI_EE_AP);
+
+static const u32 reg_cntxt_intset_fmask[] = {
+ [INTYPE] = BIT(0)
+ /* Bits 1-31 reserved */
+};
+
+REG_FIELDS(CNTXT_INTSET, cntxt_intset, 0x00025220 + 0x12000 * GSI_EE_AP);
+
+static const u32 reg_error_log_fmask[] = {
+ [ERR_ARG3] = GENMASK(3, 0),
+ [ERR_ARG2] = GENMASK(7, 4),
+ [ERR_ARG1] = GENMASK(11, 8),
+ [ERR_CODE] = GENMASK(15, 12),
+ /* Bits 16-18 reserved */
+ [ERR_VIRT_IDX] = GENMASK(23, 19),
+ [ERR_TYPE] = GENMASK(27, 24),
+ [ERR_EE] = GENMASK(31, 28),
+};
+
+REG_FIELDS(ERROR_LOG, error_log, 0x00025240 + 0x12000 * GSI_EE_AP);
+
+REG(ERROR_LOG_CLR, error_log_clr, 0x00025244 + 0x12000 * GSI_EE_AP);
+
+static const u32 reg_cntxt_scratch_0_fmask[] = {
+ [INTER_EE_RESULT] = GENMASK(2, 0),
+ /* Bits 3-4 reserved */
+ [GENERIC_EE_RESULT] = GENMASK(7, 5),
+ /* Bits 8-31 reserved */
+};
+
+REG_FIELDS(CNTXT_SCRATCH_0, cntxt_scratch_0, 0x00025400 + 0x12000 * GSI_EE_AP);
+
+static const struct reg *reg_array[] = {
+ [INTER_EE_SRC_CH_IRQ_MSK] = &reg_inter_ee_src_ch_irq_msk,
+ [INTER_EE_SRC_EV_CH_IRQ_MSK] = &reg_inter_ee_src_ev_ch_irq_msk,
+ [CH_C_CNTXT_0] = &reg_ch_c_cntxt_0,
+ [CH_C_CNTXT_1] = &reg_ch_c_cntxt_1,
+ [CH_C_CNTXT_2] = &reg_ch_c_cntxt_2,
+ [CH_C_CNTXT_3] = &reg_ch_c_cntxt_3,
+ [CH_C_QOS] = &reg_ch_c_qos,
+ [CH_C_SCRATCH_0] = &reg_ch_c_scratch_0,
+ [CH_C_SCRATCH_1] = &reg_ch_c_scratch_1,
+ [CH_C_SCRATCH_2] = &reg_ch_c_scratch_2,
+ [CH_C_SCRATCH_3] = &reg_ch_c_scratch_3,
+ [EV_CH_E_CNTXT_0] = &reg_ev_ch_e_cntxt_0,
+ [EV_CH_E_CNTXT_1] = &reg_ev_ch_e_cntxt_1,
+ [EV_CH_E_CNTXT_2] = &reg_ev_ch_e_cntxt_2,
+ [EV_CH_E_CNTXT_3] = &reg_ev_ch_e_cntxt_3,
+ [EV_CH_E_CNTXT_4] = &reg_ev_ch_e_cntxt_4,
+ [EV_CH_E_CNTXT_8] = &reg_ev_ch_e_cntxt_8,
+ [EV_CH_E_CNTXT_9] = &reg_ev_ch_e_cntxt_9,
+ [EV_CH_E_CNTXT_10] = &reg_ev_ch_e_cntxt_10,
+ [EV_CH_E_CNTXT_11] = &reg_ev_ch_e_cntxt_11,
+ [EV_CH_E_CNTXT_12] = &reg_ev_ch_e_cntxt_12,
+ [EV_CH_E_CNTXT_13] = &reg_ev_ch_e_cntxt_13,
+ [EV_CH_E_SCRATCH_0] = &reg_ev_ch_e_scratch_0,
+ [EV_CH_E_SCRATCH_1] = &reg_ev_ch_e_scratch_1,
+ [CH_C_DOORBELL_0] = &reg_ch_c_doorbell_0,
+ [EV_CH_E_DOORBELL_0] = &reg_ev_ch_e_doorbell_0,
+ [GSI_STATUS] = &reg_gsi_status,
+ [CH_CMD] = &reg_ch_cmd,
+ [EV_CH_CMD] = &reg_ev_ch_cmd,
+ [GENERIC_CMD] = &reg_generic_cmd,
+ [HW_PARAM_2] = &reg_hw_param_2,
+ [HW_PARAM_4] = &reg_hw_param_4,
+ [CNTXT_TYPE_IRQ] = &reg_cntxt_type_irq,
+ [CNTXT_TYPE_IRQ_MSK] = &reg_cntxt_type_irq_msk,
+ [CNTXT_SRC_CH_IRQ] = &reg_cntxt_src_ch_irq,
+ [CNTXT_SRC_CH_IRQ_MSK] = &reg_cntxt_src_ch_irq_msk,
+ [CNTXT_SRC_CH_IRQ_CLR] = &reg_cntxt_src_ch_irq_clr,
+ [CNTXT_SRC_EV_CH_IRQ] = &reg_cntxt_src_ev_ch_irq,
+ [CNTXT_SRC_EV_CH_IRQ_MSK] = &reg_cntxt_src_ev_ch_irq_msk,
+ [CNTXT_SRC_EV_CH_IRQ_CLR] = &reg_cntxt_src_ev_ch_irq_clr,
+ [CNTXT_SRC_IEOB_IRQ] = &reg_cntxt_src_ieob_irq,
+ [CNTXT_SRC_IEOB_IRQ_MSK] = &reg_cntxt_src_ieob_irq_msk,
+ [CNTXT_SRC_IEOB_IRQ_CLR] = &reg_cntxt_src_ieob_irq_clr,
+ [CNTXT_GLOB_IRQ_STTS] = &reg_cntxt_glob_irq_stts,
+ [CNTXT_GLOB_IRQ_EN] = &reg_cntxt_glob_irq_en,
+ [CNTXT_GLOB_IRQ_CLR] = &reg_cntxt_glob_irq_clr,
+ [CNTXT_GSI_IRQ_STTS] = &reg_cntxt_gsi_irq_stts,
+ [CNTXT_GSI_IRQ_EN] = &reg_cntxt_gsi_irq_en,
+ [CNTXT_GSI_IRQ_CLR] = &reg_cntxt_gsi_irq_clr,
+ [CNTXT_INTSET] = &reg_cntxt_intset,
+ [ERROR_LOG] = &reg_error_log,
+ [ERROR_LOG_CLR] = &reg_error_log_clr,
+ [CNTXT_SCRATCH_0] = &reg_cntxt_scratch_0,
+};
+
+const struct regs gsi_regs_v5_0 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
+};
diff --git a/drivers/net/ipa/reg/ipa_reg-v3.1.c b/drivers/net/ipa/reg/ipa_reg-v3.1.c
new file mode 100644
index 0000000000..648dbfe1fc
--- /dev/null
+++ b/drivers/net/ipa/reg/ipa_reg-v3.1.c
@@ -0,0 +1,448 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2022 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../ipa.h"
+#include "../ipa_reg.h"
+
+static const u32 reg_comp_cfg_fmask[] = {
+ [COMP_CFG_ENABLE] = BIT(0),
+ [GSI_SNOC_BYPASS_DIS] = BIT(1),
+ [GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
+ [GEN_QMB_1_SNOC_BYPASS_DIS] = BIT(3),
+ [IPA_DCMP_FAST_CLK_EN] = BIT(4),
+ /* Bits 5-31 reserved */
+};
+
+REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+
+static const u32 reg_clkon_cfg_fmask[] = {
+ [CLKON_RX] = BIT(0),
+ [CLKON_PROC] = BIT(1),
+ [TX_WRAPPER] = BIT(2),
+ [CLKON_MISC] = BIT(3),
+ [RAM_ARB] = BIT(4),
+ [FTCH_HPS] = BIT(5),
+ [FTCH_DPS] = BIT(6),
+ [CLKON_HPS] = BIT(7),
+ [CLKON_DPS] = BIT(8),
+ [RX_HPS_CMDQS] = BIT(9),
+ [HPS_DPS_CMDQS] = BIT(10),
+ [DPS_TX_CMDQS] = BIT(11),
+ [RSRC_MNGR] = BIT(12),
+ [CTX_HANDLER] = BIT(13),
+ [ACK_MNGR] = BIT(14),
+ [D_DCPH] = BIT(15),
+ [H_DCPH] = BIT(16),
+ /* Bits 17-31 reserved */
+};
+
+REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+
+static const u32 reg_route_fmask[] = {
+ [ROUTE_DIS] = BIT(0),
+ [ROUTE_DEF_PIPE] = GENMASK(5, 1),
+ [ROUTE_DEF_HDR_TABLE] = BIT(6),
+ [ROUTE_DEF_HDR_OFST] = GENMASK(16, 7),
+ [ROUTE_FRAG_DEF_PIPE] = GENMASK(21, 17),
+ /* Bits 22-23 reserved */
+ [ROUTE_DEF_RETAIN_HDR] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+REG_FIELDS(ROUTE, route, 0x00000048);
+
+static const u32 reg_shared_mem_size_fmask[] = {
+ [MEM_SIZE] = GENMASK(15, 0),
+ [MEM_BADDR] = GENMASK(31, 16),
+};
+
+REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+
+static const u32 reg_qsb_max_writes_fmask[] = {
+ [GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
+ /* Bits 8-31 reserved */
+};
+
+REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+
+static const u32 reg_qsb_max_reads_fmask[] = {
+ [GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
+};
+
+REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+
+static const u32 reg_filt_rout_hash_en_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x000008c);
+
+static const u32 reg_filt_rout_hash_flush_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x0000090);
+
+/* Valid bits defined by ipa->available */
+REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x0000010c, 0x0004);
+
+REG(IPA_BCR, ipa_bcr, 0x000001d0);
+
+static const u32 reg_local_pkt_proc_cntxt_fmask[] = {
+ [IPA_BASE_ADDR] = GENMASK(16, 0),
+ /* Bits 17-31 reserved */
+};
+
+/* Offset must be a multiple of 8 */
+REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+
+/* Valid bits defined by ipa->available */
+REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
+
+static const u32 reg_counter_cfg_fmask[] = {
+ [EOT_COAL_GRANULARITY] = GENMASK(3, 0),
+ [AGGR_GRANULARITY] = GENMASK(8, 4),
+ /* Bits 5-31 reserved */
+};
+
+REG_FIELDS(COUNTER_CFG, counter_cfg, 0x000001f0);
+
+static const u32 reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(7, 0),
+ [X_MAX_LIM] = GENMASK(15, 8),
+ [Y_MIN_LIM] = GENMASK(23, 16),
+ [Y_MAX_LIM] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
+
+static const u32 reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(7, 0),
+ [X_MAX_LIM] = GENMASK(15, 8),
+ [Y_MIN_LIM] = GENMASK(23, 16),
+ [Y_MAX_LIM] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
+
+static const u32 reg_src_rsrc_grp_45_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(7, 0),
+ [X_MAX_LIM] = GENMASK(15, 8),
+ [Y_MIN_LIM] = GENMASK(23, 16),
+ [Y_MAX_LIM] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_45_RSRC_TYPE, src_rsrc_grp_45_rsrc_type,
+ 0x00000408, 0x0020);
+
+static const u32 reg_src_rsrc_grp_67_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(7, 0),
+ [X_MAX_LIM] = GENMASK(15, 8),
+ [Y_MIN_LIM] = GENMASK(23, 16),
+ [Y_MAX_LIM] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_67_RSRC_TYPE, src_rsrc_grp_67_rsrc_type,
+ 0x0000040c, 0x0020);
+
+static const u32 reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(7, 0),
+ [X_MAX_LIM] = GENMASK(15, 8),
+ [Y_MIN_LIM] = GENMASK(23, 16),
+ [Y_MAX_LIM] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
+
+static const u32 reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(7, 0),
+ [X_MAX_LIM] = GENMASK(15, 8),
+ [Y_MIN_LIM] = GENMASK(23, 16),
+ [Y_MAX_LIM] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
+
+static const u32 reg_dst_rsrc_grp_45_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(7, 0),
+ [X_MAX_LIM] = GENMASK(15, 8),
+ [Y_MIN_LIM] = GENMASK(23, 16),
+ [Y_MAX_LIM] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(DST_RSRC_GRP_45_RSRC_TYPE, dst_rsrc_grp_45_rsrc_type,
+ 0x00000508, 0x0020);
+
+static const u32 reg_dst_rsrc_grp_67_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(7, 0),
+ [X_MAX_LIM] = GENMASK(15, 8),
+ [Y_MIN_LIM] = GENMASK(23, 16),
+ [Y_MAX_LIM] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(DST_RSRC_GRP_67_RSRC_TYPE, dst_rsrc_grp_67_rsrc_type,
+ 0x0000050c, 0x0020);
+
+static const u32 reg_endp_init_ctrl_fmask[] = {
+ [ENDP_SUSPEND] = BIT(0),
+ [ENDP_DELAY] = BIT(1),
+ /* Bits 2-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_CTRL, endp_init_ctrl, 0x00000800, 0x0070);
+
+static const u32 reg_endp_init_cfg_fmask[] = {
+ [FRAG_OFFLOAD_EN] = BIT(0),
+ [CS_OFFLOAD_EN] = GENMASK(2, 1),
+ [CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
+ /* Bit 7 reserved */
+ [CS_GEN_QMB_MASTER_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+
+static const u32 reg_endp_init_nat_fmask[] = {
+ [NAT_EN] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+
+static const u32 reg_endp_init_hdr_fmask[] = {
+ [HDR_LEN] = GENMASK(5, 0),
+ [HDR_OFST_METADATA_VALID] = BIT(6),
+ [HDR_OFST_METADATA] = GENMASK(12, 7),
+ [HDR_ADDITIONAL_CONST_LEN] = GENMASK(18, 13),
+ [HDR_OFST_PKT_SIZE_VALID] = BIT(19),
+ [HDR_OFST_PKT_SIZE] = GENMASK(25, 20),
+ [HDR_A5_MUX] = BIT(26),
+ [HDR_LEN_INC_DEAGG_HDR] = BIT(27),
+ [HDR_METADATA_REG_VALID] = BIT(28),
+ /* Bits 29-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+
+static const u32 reg_endp_init_hdr_ext_fmask[] = {
+ [HDR_ENDIANNESS] = BIT(0),
+ [HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
+ [HDR_TOTAL_LEN_OR_PAD] = BIT(2),
+ [HDR_PAYLOAD_LEN_INC_PADDING] = BIT(3),
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET] = GENMASK(9, 4),
+ [HDR_PAD_TO_ALIGNMENT] = GENMASK(13, 10),
+ /* Bits 14-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+
+REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
+
+static const u32 reg_endp_init_mode_fmask[] = {
+ [ENDP_MODE] = GENMASK(2, 0),
+ /* Bit 3 reserved */
+ [DEST_PIPE_INDEX] = GENMASK(8, 4),
+ /* Bits 9-11 reserved */
+ [BYTE_THRESHOLD] = GENMASK(27, 12),
+ [PIPE_REPLICATION_EN] = BIT(28),
+ [PAD_EN] = BIT(29),
+ [HDR_FTCH_DISABLE] = BIT(30),
+ /* Bit 31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+
+static const u32 reg_endp_init_aggr_fmask[] = {
+ [AGGR_EN] = GENMASK(1, 0),
+ [AGGR_TYPE] = GENMASK(4, 2),
+ [BYTE_LIMIT] = GENMASK(9, 5),
+ [TIME_LIMIT] = GENMASK(14, 10),
+ [PKT_LIMIT] = GENMASK(20, 15),
+ [SW_EOF_ACTIVE] = BIT(21),
+ [FORCE_CLOSE] = BIT(22),
+ /* Bit 23 reserved */
+ [HARD_BYTE_LIMIT_EN] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+
+static const u32 reg_endp_init_hol_block_en_fmask[] = {
+ [HOL_BLOCK_EN] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
+
+/* Entire register is a tick count */
+static const u32 reg_endp_init_hol_block_timer_fmask[] = {
+ [TIMER_BASE_VALUE] = GENMASK(31, 0),
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
+
+static const u32 reg_endp_init_deaggr_fmask[] = {
+ [DEAGGR_HDR_LEN] = GENMASK(5, 0),
+ [SYSPIPE_ERR_DETECTION] = BIT(6),
+ [PACKET_OFFSET_VALID] = BIT(7),
+ [PACKET_OFFSET_LOCATION] = GENMASK(13, 8),
+ [IGNORE_MIN_PKT_ERR] = BIT(14),
+ /* Bit 15 reserved */
+ [MAX_PACKET_LEN] = GENMASK(31, 16),
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+
+static const u32 reg_endp_init_rsrc_grp_fmask[] = {
+ [ENDP_RSRC_GRP] = GENMASK(2, 0),
+ /* Bits 3-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp, 0x00000838, 0x0070);
+
+static const u32 reg_endp_init_seq_fmask[] = {
+ [SEQ_TYPE] = GENMASK(7, 0),
+ [SEQ_REP_TYPE] = GENMASK(15, 8),
+ /* Bits 16-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+
+static const u32 reg_endp_status_fmask[] = {
+ [STATUS_EN] = BIT(0),
+ [STATUS_ENDP] = GENMASK(5, 1),
+ /* Bits 6-7 reserved */
+ [STATUS_LOCATION] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+
+static const u32 reg_endp_filter_router_hsh_cfg_fmask[] = {
+ [FILTER_HASH_MSK_SRC_ID] = BIT(0),
+ [FILTER_HASH_MSK_SRC_IP] = BIT(1),
+ [FILTER_HASH_MSK_DST_IP] = BIT(2),
+ [FILTER_HASH_MSK_SRC_PORT] = BIT(3),
+ [FILTER_HASH_MSK_DST_PORT] = BIT(4),
+ [FILTER_HASH_MSK_PROTOCOL] = BIT(5),
+ [FILTER_HASH_MSK_METADATA] = BIT(6),
+ [FILTER_HASH_MSK_ALL] = GENMASK(6, 0),
+ /* Bits 7-15 reserved */
+ [ROUTER_HASH_MSK_SRC_ID] = BIT(16),
+ [ROUTER_HASH_MSK_SRC_IP] = BIT(17),
+ [ROUTER_HASH_MSK_DST_IP] = BIT(18),
+ [ROUTER_HASH_MSK_SRC_PORT] = BIT(19),
+ [ROUTER_HASH_MSK_DST_PORT] = BIT(20),
+ [ROUTER_HASH_MSK_PROTOCOL] = BIT(21),
+ [ROUTER_HASH_MSK_METADATA] = BIT(22),
+ [ROUTER_HASH_MSK_ALL] = GENMASK(22, 16),
+ /* Bits 23-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
+ 0x0000085c, 0x0070);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
+
+static const u32 reg_ipa_irq_uc_fmask[] = {
+ [UC_INTR] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
+ 0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
+
+/* Valid bits defined by ipa->available */
+REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
+ 0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
+
+/* Valid bits defined by ipa->available */
+REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
+ 0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
+
+static const struct reg *reg_array[] = {
+ [COMP_CFG] = &reg_comp_cfg,
+ [CLKON_CFG] = &reg_clkon_cfg,
+ [ROUTE] = &reg_route,
+ [SHARED_MEM_SIZE] = &reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &reg_qsb_max_writes,
+ [QSB_MAX_READS] = &reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &reg_state_aggr_active,
+ [IPA_BCR] = &reg_ipa_bcr,
+ [LOCAL_PKT_PROC_CNTXT] = &reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &reg_aggr_force_close,
+ [COUNTER_CFG] = &reg_counter_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &reg_src_rsrc_grp_23_rsrc_type,
+ [SRC_RSRC_GRP_45_RSRC_TYPE] = &reg_src_rsrc_grp_45_rsrc_type,
+ [SRC_RSRC_GRP_67_RSRC_TYPE] = &reg_src_rsrc_grp_67_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &reg_dst_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_45_RSRC_TYPE] = &reg_dst_rsrc_grp_45_rsrc_type,
+ [DST_RSRC_GRP_67_RSRC_TYPE] = &reg_dst_rsrc_grp_67_rsrc_type,
+ [ENDP_INIT_CTRL] = &reg_endp_init_ctrl,
+ [ENDP_INIT_CFG] = &reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &reg_endp_init_seq,
+ [ENDP_STATUS] = &reg_endp_status,
+ [ENDP_FILTER_ROUTER_HSH_CFG] = &reg_endp_filter_router_hsh_cfg,
+ [IPA_IRQ_STTS] = &reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &reg_irq_suspend_clr,
+};
+
+const struct regs ipa_regs_v3_1 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
+};
diff --git a/drivers/net/ipa/reg/ipa_reg-v3.5.1.c b/drivers/net/ipa/reg/ipa_reg-v3.5.1.c
new file mode 100644
index 0000000000..78b1bf60cd
--- /dev/null
+++ b/drivers/net/ipa/reg/ipa_reg-v3.5.1.c
@@ -0,0 +1,458 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2022 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../ipa.h"
+#include "../ipa_reg.h"
+
+static const u32 reg_comp_cfg_fmask[] = {
+ [COMP_CFG_ENABLE] = BIT(0),
+ [GSI_SNOC_BYPASS_DIS] = BIT(1),
+ [GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
+ [GEN_QMB_1_SNOC_BYPASS_DIS] = BIT(3),
+ [IPA_DCMP_FAST_CLK_EN] = BIT(4),
+ /* Bits 5-31 reserved */
+};
+
+REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+
+static const u32 reg_clkon_cfg_fmask[] = {
+ [CLKON_RX] = BIT(0),
+ [CLKON_PROC] = BIT(1),
+ [TX_WRAPPER] = BIT(2),
+ [CLKON_MISC] = BIT(3),
+ [RAM_ARB] = BIT(4),
+ [FTCH_HPS] = BIT(5),
+ [FTCH_DPS] = BIT(6),
+ [CLKON_HPS] = BIT(7),
+ [CLKON_DPS] = BIT(8),
+ [RX_HPS_CMDQS] = BIT(9),
+ [HPS_DPS_CMDQS] = BIT(10),
+ [DPS_TX_CMDQS] = BIT(11),
+ [RSRC_MNGR] = BIT(12),
+ [CTX_HANDLER] = BIT(13),
+ [ACK_MNGR] = BIT(14),
+ [D_DCPH] = BIT(15),
+ [H_DCPH] = BIT(16),
+ /* Bit 17 reserved */
+ [NTF_TX_CMDQS] = BIT(18),
+ [CLKON_TX_0] = BIT(19),
+ [CLKON_TX_1] = BIT(20),
+ [CLKON_FNR] = BIT(21),
+ /* Bits 22-31 reserved */
+};
+
+REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+
+static const u32 reg_route_fmask[] = {
+ [ROUTE_DIS] = BIT(0),
+ [ROUTE_DEF_PIPE] = GENMASK(5, 1),
+ [ROUTE_DEF_HDR_TABLE] = BIT(6),
+ [ROUTE_DEF_HDR_OFST] = GENMASK(16, 7),
+ [ROUTE_FRAG_DEF_PIPE] = GENMASK(21, 17),
+ /* Bits 22-23 reserved */
+ [ROUTE_DEF_RETAIN_HDR] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+REG_FIELDS(ROUTE, route, 0x00000048);
+
+static const u32 reg_shared_mem_size_fmask[] = {
+ [MEM_SIZE] = GENMASK(15, 0),
+ [MEM_BADDR] = GENMASK(31, 16),
+};
+
+REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+
+static const u32 reg_qsb_max_writes_fmask[] = {
+ [GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
+ /* Bits 8-31 reserved */
+};
+
+REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+
+static const u32 reg_qsb_max_reads_fmask[] = {
+ [GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
+};
+
+REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+
+static const u32 reg_filt_rout_hash_en_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x000008c);
+
+static const u32 reg_filt_rout_hash_flush_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x0000090);
+
+/* Valid bits defined by ipa->available */
+REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x0000010c, 0x0004);
+
+REG(IPA_BCR, ipa_bcr, 0x000001d0);
+
+static const u32 reg_local_pkt_proc_cntxt_fmask[] = {
+ [IPA_BASE_ADDR] = GENMASK(16, 0),
+ /* Bits 17-31 reserved */
+};
+
+/* Offset must be a multiple of 8 */
+REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+
+/* Valid bits defined by ipa->available */
+REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
+
+static const u32 reg_counter_cfg_fmask[] = {
+ /* Bits 0-3 reserved */
+ [AGGR_GRANULARITY] = GENMASK(8, 4),
+ /* Bits 5-31 reserved */
+};
+
+REG_FIELDS(COUNTER_CFG, counter_cfg, 0x000001f0);
+
+static const u32 reg_ipa_tx_cfg_fmask[] = {
+ [TX0_PREFETCH_DISABLE] = BIT(0),
+ [TX1_PREFETCH_DISABLE] = BIT(1),
+ [PREFETCH_ALMOST_EMPTY_SIZE] = GENMASK(4, 2),
+ /* Bits 5-31 reserved */
+};
+
+REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
+
+static const u32 reg_flavor_0_fmask[] = {
+ [MAX_PIPES] = GENMASK(3, 0),
+ /* Bits 4-7 reserved */
+ [MAX_CONS_PIPES] = GENMASK(12, 8),
+ /* Bits 13-15 reserved */
+ [MAX_PROD_PIPES] = GENMASK(20, 16),
+ /* Bits 21-23 reserved */
+ [PROD_LOWEST] = GENMASK(27, 24),
+ /* Bits 28-31 reserved */
+};
+
+REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
+
+static const u32 reg_idle_indication_cfg_fmask[] = {
+ [ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
+ [CONST_NON_IDLE_ENABLE] = BIT(16),
+ /* Bits 17-31 reserved */
+};
+
+REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000220);
+
+static const u32 reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
+
+static const u32 reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
+
+static const u32 reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
+
+static const u32 reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
+
+static const u32 reg_endp_init_ctrl_fmask[] = {
+ [ENDP_SUSPEND] = BIT(0),
+ [ENDP_DELAY] = BIT(1),
+ /* Bits 2-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_CTRL, endp_init_ctrl, 0x00000800, 0x0070);
+
+static const u32 reg_endp_init_cfg_fmask[] = {
+ [FRAG_OFFLOAD_EN] = BIT(0),
+ [CS_OFFLOAD_EN] = GENMASK(2, 1),
+ [CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
+ /* Bit 7 reserved */
+ [CS_GEN_QMB_MASTER_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+
+static const u32 reg_endp_init_nat_fmask[] = {
+ [NAT_EN] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+
+static const u32 reg_endp_init_hdr_fmask[] = {
+ [HDR_LEN] = GENMASK(5, 0),
+ [HDR_OFST_METADATA_VALID] = BIT(6),
+ [HDR_OFST_METADATA] = GENMASK(12, 7),
+ [HDR_ADDITIONAL_CONST_LEN] = GENMASK(18, 13),
+ [HDR_OFST_PKT_SIZE_VALID] = BIT(19),
+ [HDR_OFST_PKT_SIZE] = GENMASK(25, 20),
+ [HDR_A5_MUX] = BIT(26),
+ [HDR_LEN_INC_DEAGG_HDR] = BIT(27),
+ [HDR_METADATA_REG_VALID] = BIT(28),
+ /* Bits 29-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+
+static const u32 reg_endp_init_hdr_ext_fmask[] = {
+ [HDR_ENDIANNESS] = BIT(0),
+ [HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
+ [HDR_TOTAL_LEN_OR_PAD] = BIT(2),
+ [HDR_PAYLOAD_LEN_INC_PADDING] = BIT(3),
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET] = GENMASK(9, 4),
+ [HDR_PAD_TO_ALIGNMENT] = GENMASK(13, 10),
+ /* Bits 14-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+
+REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
+
+static const u32 reg_endp_init_mode_fmask[] = {
+ [ENDP_MODE] = GENMASK(2, 0),
+ /* Bit 3 reserved */
+ [DEST_PIPE_INDEX] = GENMASK(8, 4),
+ /* Bits 9-11 reserved */
+ [BYTE_THRESHOLD] = GENMASK(27, 12),
+ [PIPE_REPLICATION_EN] = BIT(28),
+ [PAD_EN] = BIT(29),
+ [HDR_FTCH_DISABLE] = BIT(30),
+ /* Bit 31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+
+static const u32 reg_endp_init_aggr_fmask[] = {
+ [AGGR_EN] = GENMASK(1, 0),
+ [AGGR_TYPE] = GENMASK(4, 2),
+ [BYTE_LIMIT] = GENMASK(9, 5),
+ [TIME_LIMIT] = GENMASK(14, 10),
+ [PKT_LIMIT] = GENMASK(20, 15),
+ [SW_EOF_ACTIVE] = BIT(21),
+ [FORCE_CLOSE] = BIT(22),
+ /* Bit 23 reserved */
+ [HARD_BYTE_LIMIT_EN] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+
+static const u32 reg_endp_init_hol_block_en_fmask[] = {
+ [HOL_BLOCK_EN] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
+
+/* Entire register is a tick count */
+static const u32 reg_endp_init_hol_block_timer_fmask[] = {
+ [TIMER_BASE_VALUE] = GENMASK(31, 0),
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
+
+static const u32 reg_endp_init_deaggr_fmask[] = {
+ [DEAGGR_HDR_LEN] = GENMASK(5, 0),
+ [SYSPIPE_ERR_DETECTION] = BIT(6),
+ [PACKET_OFFSET_VALID] = BIT(7),
+ [PACKET_OFFSET_LOCATION] = GENMASK(13, 8),
+ [IGNORE_MIN_PKT_ERR] = BIT(14),
+ /* Bit 15 reserved */
+ [MAX_PACKET_LEN] = GENMASK(31, 16),
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+
+static const u32 reg_endp_init_rsrc_grp_fmask[] = {
+ [ENDP_RSRC_GRP] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp, 0x00000838, 0x0070);
+
+static const u32 reg_endp_init_seq_fmask[] = {
+ [SEQ_TYPE] = GENMASK(7, 0),
+ [SEQ_REP_TYPE] = GENMASK(15, 8),
+ /* Bits 16-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+
+static const u32 reg_endp_status_fmask[] = {
+ [STATUS_EN] = BIT(0),
+ [STATUS_ENDP] = GENMASK(5, 1),
+ /* Bits 6-7 reserved */
+ [STATUS_LOCATION] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+
+static const u32 reg_endp_filter_router_hsh_cfg_fmask[] = {
+ [FILTER_HASH_MSK_SRC_ID] = BIT(0),
+ [FILTER_HASH_MSK_SRC_IP] = BIT(1),
+ [FILTER_HASH_MSK_DST_IP] = BIT(2),
+ [FILTER_HASH_MSK_SRC_PORT] = BIT(3),
+ [FILTER_HASH_MSK_DST_PORT] = BIT(4),
+ [FILTER_HASH_MSK_PROTOCOL] = BIT(5),
+ [FILTER_HASH_MSK_METADATA] = BIT(6),
+ [FILTER_HASH_MSK_ALL] = GENMASK(6, 0),
+ /* Bits 7-15 reserved */
+ [ROUTER_HASH_MSK_SRC_ID] = BIT(16),
+ [ROUTER_HASH_MSK_SRC_IP] = BIT(17),
+ [ROUTER_HASH_MSK_DST_IP] = BIT(18),
+ [ROUTER_HASH_MSK_SRC_PORT] = BIT(19),
+ [ROUTER_HASH_MSK_DST_PORT] = BIT(20),
+ [ROUTER_HASH_MSK_PROTOCOL] = BIT(21),
+ [ROUTER_HASH_MSK_METADATA] = BIT(22),
+ [ROUTER_HASH_MSK_ALL] = GENMASK(22, 16),
+ /* Bits 23-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
+ 0x0000085c, 0x0070);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
+
+static const u32 reg_ipa_irq_uc_fmask[] = {
+ [UC_INTR] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
+ 0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
+
+/* Valid bits defined by ipa->available */
+REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
+ 0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
+
+/* Valid bits defined by ipa->available */
+REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
+ 0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
+
+static const struct reg *reg_array[] = {
+ [COMP_CFG] = &reg_comp_cfg,
+ [CLKON_CFG] = &reg_clkon_cfg,
+ [ROUTE] = &reg_route,
+ [SHARED_MEM_SIZE] = &reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &reg_qsb_max_writes,
+ [QSB_MAX_READS] = &reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &reg_state_aggr_active,
+ [IPA_BCR] = &reg_ipa_bcr,
+ [LOCAL_PKT_PROC_CNTXT] = &reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &reg_aggr_force_close,
+ [COUNTER_CFG] = &reg_counter_cfg,
+ [IPA_TX_CFG] = &reg_ipa_tx_cfg,
+ [FLAVOR_0] = &reg_flavor_0,
+ [IDLE_INDICATION_CFG] = &reg_idle_indication_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &reg_src_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &reg_dst_rsrc_grp_23_rsrc_type,
+ [ENDP_INIT_CTRL] = &reg_endp_init_ctrl,
+ [ENDP_INIT_CFG] = &reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &reg_endp_init_seq,
+ [ENDP_STATUS] = &reg_endp_status,
+ [ENDP_FILTER_ROUTER_HSH_CFG] = &reg_endp_filter_router_hsh_cfg,
+ [IPA_IRQ_STTS] = &reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &reg_irq_suspend_clr,
+};
+
+const struct regs ipa_regs_v3_5_1 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
+};
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.11.c b/drivers/net/ipa/reg/ipa_reg-v4.11.c
new file mode 100644
index 0000000000..29e71cce4a
--- /dev/null
+++ b/drivers/net/ipa/reg/ipa_reg-v4.11.c
@@ -0,0 +1,514 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2022 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../ipa.h"
+#include "../ipa_reg.h"
+
+static const u32 reg_comp_cfg_fmask[] = {
+ [RAM_ARB_PRI_CLIENT_SAMP_FIX_DIS] = BIT(0),
+ [GSI_SNOC_BYPASS_DIS] = BIT(1),
+ [GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
+ [GEN_QMB_1_SNOC_BYPASS_DIS] = BIT(3),
+ /* Bit 4 reserved */
+ [IPA_QMB_SELECT_CONS_EN] = BIT(5),
+ [IPA_QMB_SELECT_PROD_EN] = BIT(6),
+ [GSI_MULTI_INORDER_RD_DIS] = BIT(7),
+ [GSI_MULTI_INORDER_WR_DIS] = BIT(8),
+ [GEN_QMB_0_MULTI_INORDER_RD_DIS] = BIT(9),
+ [GEN_QMB_1_MULTI_INORDER_RD_DIS] = BIT(10),
+ [GEN_QMB_0_MULTI_INORDER_WR_DIS] = BIT(11),
+ [GEN_QMB_1_MULTI_INORDER_WR_DIS] = BIT(12),
+ [GEN_QMB_0_SNOC_CNOC_LOOP_PROT_DIS] = BIT(13),
+ [GSI_SNOC_CNOC_LOOP_PROT_DISABLE] = BIT(14),
+ [GSI_MULTI_AXI_MASTERS_DIS] = BIT(15),
+ [IPA_QMB_SELECT_GLOBAL_EN] = BIT(16),
+ [FULL_FLUSH_WAIT_RS_CLOSURE_EN] = BIT(17),
+ /* Bit 18 reserved */
+ [QMB_RAM_RD_CACHE_DISABLE] = BIT(19),
+ [GENQMB_AOOOWR] = BIT(20),
+ [IF_OUT_OF_BUF_STOP_RESET_MASK_EN] = BIT(21),
+ [ATOMIC_FETCHER_ARB_LOCK_DIS] = GENMASK(23, 22),
+ /* Bits 24-29 reserved */
+ [GEN_QMB_1_DYNAMIC_ASIZE] = BIT(30),
+ [GEN_QMB_0_DYNAMIC_ASIZE] = BIT(31),
+};
+
+REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+
+static const u32 reg_clkon_cfg_fmask[] = {
+ [CLKON_RX] = BIT(0),
+ [CLKON_PROC] = BIT(1),
+ [TX_WRAPPER] = BIT(2),
+ [CLKON_MISC] = BIT(3),
+ [RAM_ARB] = BIT(4),
+ [FTCH_HPS] = BIT(5),
+ [FTCH_DPS] = BIT(6),
+ [CLKON_HPS] = BIT(7),
+ [CLKON_DPS] = BIT(8),
+ [RX_HPS_CMDQS] = BIT(9),
+ [HPS_DPS_CMDQS] = BIT(10),
+ [DPS_TX_CMDQS] = BIT(11),
+ [RSRC_MNGR] = BIT(12),
+ [CTX_HANDLER] = BIT(13),
+ [ACK_MNGR] = BIT(14),
+ [D_DCPH] = BIT(15),
+ [H_DCPH] = BIT(16),
+ /* Bit 17 reserved */
+ [NTF_TX_CMDQS] = BIT(18),
+ [CLKON_TX_0] = BIT(19),
+ [CLKON_TX_1] = BIT(20),
+ [CLKON_FNR] = BIT(21),
+ [QSB2AXI_CMDQ_L] = BIT(22),
+ [AGGR_WRAPPER] = BIT(23),
+ [RAM_SLAVEWAY] = BIT(24),
+ [CLKON_QMB] = BIT(25),
+ [WEIGHT_ARB] = BIT(26),
+ [GSI_IF] = BIT(27),
+ [CLKON_GLOBAL] = BIT(28),
+ [GLOBAL_2X_CLK] = BIT(29),
+ [DPL_FIFO] = BIT(30),
+ [DRBIP] = BIT(31),
+};
+
+REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+
+static const u32 reg_route_fmask[] = {
+ [ROUTE_DIS] = BIT(0),
+ [ROUTE_DEF_PIPE] = GENMASK(5, 1),
+ [ROUTE_DEF_HDR_TABLE] = BIT(6),
+ [ROUTE_DEF_HDR_OFST] = GENMASK(16, 7),
+ [ROUTE_FRAG_DEF_PIPE] = GENMASK(21, 17),
+ /* Bits 22-23 reserved */
+ [ROUTE_DEF_RETAIN_HDR] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+REG_FIELDS(ROUTE, route, 0x00000048);
+
+static const u32 reg_shared_mem_size_fmask[] = {
+ [MEM_SIZE] = GENMASK(15, 0),
+ [MEM_BADDR] = GENMASK(31, 16),
+};
+
+REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+
+static const u32 reg_qsb_max_writes_fmask[] = {
+ [GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
+ /* Bits 8-31 reserved */
+};
+
+REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+
+static const u32 reg_qsb_max_reads_fmask[] = {
+ [GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
+ /* Bits 8-15 reserved */
+ [GEN_QMB_0_MAX_READS_BEATS] = GENMASK(23, 16),
+ [GEN_QMB_1_MAX_READS_BEATS] = GENMASK(31, 24),
+};
+
+REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+
+static const u32 reg_filt_rout_hash_en_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
+
+static const u32 reg_filt_rout_hash_flush_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
+
+/* Valid bits defined by ipa->available */
+REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
+
+static const u32 reg_local_pkt_proc_cntxt_fmask[] = {
+ [IPA_BASE_ADDR] = GENMASK(17, 0),
+ /* Bits 18-31 reserved */
+};
+
+/* Offset must be a multiple of 8 */
+REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+
+/* Valid bits defined by ipa->available */
+REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
+
+static const u32 reg_ipa_tx_cfg_fmask[] = {
+ /* Bits 0-1 reserved */
+ [PREFETCH_ALMOST_EMPTY_SIZE_TX0] = GENMASK(5, 2),
+ [DMAW_SCND_OUTSD_PRED_THRESHOLD] = GENMASK(9, 6),
+ [DMAW_SCND_OUTSD_PRED_EN] = BIT(10),
+ [DMAW_MAX_BEATS_256_DIS] = BIT(11),
+ [PA_MASK_EN] = BIT(12),
+ [PREFETCH_ALMOST_EMPTY_SIZE_TX1] = GENMASK(16, 13),
+ [DUAL_TX_ENABLE] = BIT(17),
+ [SSPND_PA_NO_START_STATE] = BIT(18),
+ /* Bits 19-31 reserved */
+};
+
+REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
+
+static const u32 reg_flavor_0_fmask[] = {
+ [MAX_PIPES] = GENMASK(4, 0),
+ /* Bits 5-7 reserved */
+ [MAX_CONS_PIPES] = GENMASK(12, 8),
+ /* Bits 13-15 reserved */
+ [MAX_PROD_PIPES] = GENMASK(20, 16),
+ /* Bits 21-23 reserved */
+ [PROD_LOWEST] = GENMASK(27, 24),
+ /* Bits 28-31 reserved */
+};
+
+REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
+
+static const u32 reg_idle_indication_cfg_fmask[] = {
+ [ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
+ [CONST_NON_IDLE_ENABLE] = BIT(16),
+ /* Bits 17-31 reserved */
+};
+
+REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
+
+static const u32 reg_qtime_timestamp_cfg_fmask[] = {
+ [DPL_TIMESTAMP_LSB] = GENMASK(4, 0),
+ /* Bits 5-6 reserved */
+ [DPL_TIMESTAMP_SEL] = BIT(7),
+ [TAG_TIMESTAMP_LSB] = GENMASK(12, 8),
+ /* Bits 13-15 reserved */
+ [NAT_TIMESTAMP_LSB] = GENMASK(20, 16),
+ /* Bits 21-31 reserved */
+};
+
+REG_FIELDS(QTIME_TIMESTAMP_CFG, qtime_timestamp_cfg, 0x0000024c);
+
+static const u32 reg_timers_xo_clk_div_cfg_fmask[] = {
+ [DIV_VALUE] = GENMASK(8, 0),
+ /* Bits 9-30 reserved */
+ [DIV_ENABLE] = BIT(31),
+};
+
+REG_FIELDS(TIMERS_XO_CLK_DIV_CFG, timers_xo_clk_div_cfg, 0x00000250);
+
+static const u32 reg_timers_pulse_gran_cfg_fmask[] = {
+ [PULSE_GRAN_0] = GENMASK(2, 0),
+ [PULSE_GRAN_1] = GENMASK(5, 3),
+ [PULSE_GRAN_2] = GENMASK(8, 6),
+ /* Bits 9-31 reserved */
+};
+
+REG_FIELDS(TIMERS_PULSE_GRAN_CFG, timers_pulse_gran_cfg, 0x00000254);
+
+static const u32 reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
+
+static const u32 reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
+
+static const u32 reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
+
+static const u32 reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
+
+static const u32 reg_endp_init_cfg_fmask[] = {
+ [FRAG_OFFLOAD_EN] = BIT(0),
+ [CS_OFFLOAD_EN] = GENMASK(2, 1),
+ [CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
+ /* Bit 7 reserved */
+ [CS_GEN_QMB_MASTER_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+
+static const u32 reg_endp_init_nat_fmask[] = {
+ [NAT_EN] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+
+static const u32 reg_endp_init_hdr_fmask[] = {
+ [HDR_LEN] = GENMASK(5, 0),
+ [HDR_OFST_METADATA_VALID] = BIT(6),
+ [HDR_OFST_METADATA] = GENMASK(12, 7),
+ [HDR_ADDITIONAL_CONST_LEN] = GENMASK(18, 13),
+ [HDR_OFST_PKT_SIZE_VALID] = BIT(19),
+ [HDR_OFST_PKT_SIZE] = GENMASK(25, 20),
+ /* Bit 26 reserved */
+ [HDR_LEN_INC_DEAGG_HDR] = BIT(27),
+ [HDR_LEN_MSB] = GENMASK(29, 28),
+ [HDR_OFST_METADATA_MSB] = GENMASK(31, 30),
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+
+static const u32 reg_endp_init_hdr_ext_fmask[] = {
+ [HDR_ENDIANNESS] = BIT(0),
+ [HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
+ [HDR_TOTAL_LEN_OR_PAD] = BIT(2),
+ [HDR_PAYLOAD_LEN_INC_PADDING] = BIT(3),
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET] = GENMASK(9, 4),
+ [HDR_PAD_TO_ALIGNMENT] = GENMASK(13, 10),
+ /* Bits 14-15 reserved */
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB] = GENMASK(17, 16),
+ [HDR_OFST_PKT_SIZE_MSB] = GENMASK(19, 18),
+ [HDR_ADDITIONAL_CONST_LEN_MSB] = GENMASK(21, 20),
+ /* Bits 22-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+
+REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
+
+static const u32 reg_endp_init_mode_fmask[] = {
+ [ENDP_MODE] = GENMASK(2, 0),
+ [DCPH_ENABLE] = BIT(3),
+ [DEST_PIPE_INDEX] = GENMASK(8, 4),
+ /* Bits 9-11 reserved */
+ [BYTE_THRESHOLD] = GENMASK(27, 12),
+ [PIPE_REPLICATION_EN] = BIT(28),
+ [PAD_EN] = BIT(29),
+ [DRBIP_ACL_ENABLE] = BIT(30),
+ /* Bit 31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+
+static const u32 reg_endp_init_aggr_fmask[] = {
+ [AGGR_EN] = GENMASK(1, 0),
+ [AGGR_TYPE] = GENMASK(4, 2),
+ [BYTE_LIMIT] = GENMASK(10, 5),
+ /* Bit 11 reserved */
+ [TIME_LIMIT] = GENMASK(16, 12),
+ [PKT_LIMIT] = GENMASK(22, 17),
+ [SW_EOF_ACTIVE] = BIT(23),
+ [FORCE_CLOSE] = BIT(24),
+ /* Bit 25 reserved */
+ [HARD_BYTE_LIMIT_EN] = BIT(26),
+ [AGGR_GRAN_SEL] = BIT(27),
+ /* Bits 28-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+
+static const u32 reg_endp_init_hol_block_en_fmask[] = {
+ [HOL_BLOCK_EN] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
+
+static const u32 reg_endp_init_hol_block_timer_fmask[] = {
+ [TIMER_LIMIT] = GENMASK(4, 0),
+ /* Bits 5-7 reserved */
+ [TIMER_GRAN_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
+
+static const u32 reg_endp_init_deaggr_fmask[] = {
+ [DEAGGR_HDR_LEN] = GENMASK(5, 0),
+ [SYSPIPE_ERR_DETECTION] = BIT(6),
+ [PACKET_OFFSET_VALID] = BIT(7),
+ [PACKET_OFFSET_LOCATION] = GENMASK(13, 8),
+ [IGNORE_MIN_PKT_ERR] = BIT(14),
+ /* Bit 15 reserved */
+ [MAX_PACKET_LEN] = GENMASK(31, 16),
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+
+static const u32 reg_endp_init_rsrc_grp_fmask[] = {
+ [ENDP_RSRC_GRP] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp, 0x00000838, 0x0070);
+
+static const u32 reg_endp_init_seq_fmask[] = {
+ [SEQ_TYPE] = GENMASK(7, 0),
+ /* Bits 8-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+
+static const u32 reg_endp_status_fmask[] = {
+ [STATUS_EN] = BIT(0),
+ [STATUS_ENDP] = GENMASK(5, 1),
+ /* Bits 6-8 reserved */
+ [STATUS_PKT_SUPPRESS] = BIT(9),
+ /* Bits 10-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+
+static const u32 reg_endp_filter_router_hsh_cfg_fmask[] = {
+ [FILTER_HASH_MSK_SRC_ID] = BIT(0),
+ [FILTER_HASH_MSK_SRC_IP] = BIT(1),
+ [FILTER_HASH_MSK_DST_IP] = BIT(2),
+ [FILTER_HASH_MSK_SRC_PORT] = BIT(3),
+ [FILTER_HASH_MSK_DST_PORT] = BIT(4),
+ [FILTER_HASH_MSK_PROTOCOL] = BIT(5),
+ [FILTER_HASH_MSK_METADATA] = BIT(6),
+ [FILTER_HASH_MSK_ALL] = GENMASK(6, 0),
+ /* Bits 7-15 reserved */
+ [ROUTER_HASH_MSK_SRC_ID] = BIT(16),
+ [ROUTER_HASH_MSK_SRC_IP] = BIT(17),
+ [ROUTER_HASH_MSK_DST_IP] = BIT(18),
+ [ROUTER_HASH_MSK_SRC_PORT] = BIT(19),
+ [ROUTER_HASH_MSK_DST_PORT] = BIT(20),
+ [ROUTER_HASH_MSK_PROTOCOL] = BIT(21),
+ [ROUTER_HASH_MSK_METADATA] = BIT(22),
+ [ROUTER_HASH_MSK_ALL] = GENMASK(22, 16),
+ /* Bits 23-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
+ 0x0000085c, 0x0070);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00004008 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+REG(IPA_IRQ_EN, ipa_irq_en, 0x0000400c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00004010 + 0x1000 * GSI_EE_AP);
+
+static const u32 reg_ipa_irq_uc_fmask[] = {
+ [UC_INTR] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000401c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
+ 0x00004030 + 0x1000 * GSI_EE_AP, 0x0004);
+
+/* Valid bits defined by ipa->available */
+REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
+ 0x00004034 + 0x1000 * GSI_EE_AP, 0x0004);
+
+/* Valid bits defined by ipa->available */
+REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
+ 0x00004038 + 0x1000 * GSI_EE_AP, 0x0004);
+
+static const struct reg *reg_array[] = {
+ [COMP_CFG] = &reg_comp_cfg,
+ [CLKON_CFG] = &reg_clkon_cfg,
+ [ROUTE] = &reg_route,
+ [SHARED_MEM_SIZE] = &reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &reg_qsb_max_writes,
+ [QSB_MAX_READS] = &reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &reg_state_aggr_active,
+ [LOCAL_PKT_PROC_CNTXT] = &reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &reg_aggr_force_close,
+ [IPA_TX_CFG] = &reg_ipa_tx_cfg,
+ [FLAVOR_0] = &reg_flavor_0,
+ [IDLE_INDICATION_CFG] = &reg_idle_indication_cfg,
+ [QTIME_TIMESTAMP_CFG] = &reg_qtime_timestamp_cfg,
+ [TIMERS_XO_CLK_DIV_CFG] = &reg_timers_xo_clk_div_cfg,
+ [TIMERS_PULSE_GRAN_CFG] = &reg_timers_pulse_gran_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &reg_src_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &reg_dst_rsrc_grp_23_rsrc_type,
+ [ENDP_INIT_CFG] = &reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &reg_endp_init_seq,
+ [ENDP_STATUS] = &reg_endp_status,
+ [ENDP_FILTER_ROUTER_HSH_CFG] = &reg_endp_filter_router_hsh_cfg,
+ [IPA_IRQ_STTS] = &reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &reg_irq_suspend_clr,
+};
+
+const struct regs ipa_regs_v4_11 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
+};
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.2.c b/drivers/net/ipa/reg/ipa_reg-v4.2.c
new file mode 100644
index 0000000000..bb7cf48814
--- /dev/null
+++ b/drivers/net/ipa/reg/ipa_reg-v4.2.c
@@ -0,0 +1,458 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2022 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../ipa.h"
+#include "../ipa_reg.h"
+
+static const u32 reg_comp_cfg_fmask[] = {
+ /* Bit 0 reserved */
+ [GSI_SNOC_BYPASS_DIS] = BIT(1),
+ [GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
+ [GEN_QMB_1_SNOC_BYPASS_DIS] = BIT(3),
+ [IPA_DCMP_FAST_CLK_EN] = BIT(4),
+ [IPA_QMB_SELECT_CONS_EN] = BIT(5),
+ [IPA_QMB_SELECT_PROD_EN] = BIT(6),
+ [GSI_MULTI_INORDER_RD_DIS] = BIT(7),
+ [GSI_MULTI_INORDER_WR_DIS] = BIT(8),
+ [GEN_QMB_0_MULTI_INORDER_RD_DIS] = BIT(9),
+ [GEN_QMB_1_MULTI_INORDER_RD_DIS] = BIT(10),
+ [GEN_QMB_0_MULTI_INORDER_WR_DIS] = BIT(11),
+ [GEN_QMB_1_MULTI_INORDER_WR_DIS] = BIT(12),
+ [GEN_QMB_0_SNOC_CNOC_LOOP_PROT_DIS] = BIT(13),
+ [GSI_SNOC_CNOC_LOOP_PROT_DISABLE] = BIT(14),
+ [GSI_MULTI_AXI_MASTERS_DIS] = BIT(15),
+ [IPA_QMB_SELECT_GLOBAL_EN] = BIT(16),
+ [ATOMIC_FETCHER_ARB_LOCK_DIS] = GENMASK(20, 17),
+ /* Bits 21-31 reserved */
+};
+
+REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+
+static const u32 reg_clkon_cfg_fmask[] = {
+ [CLKON_RX] = BIT(0),
+ [CLKON_PROC] = BIT(1),
+ [TX_WRAPPER] = BIT(2),
+ [CLKON_MISC] = BIT(3),
+ [RAM_ARB] = BIT(4),
+ [FTCH_HPS] = BIT(5),
+ [FTCH_DPS] = BIT(6),
+ [CLKON_HPS] = BIT(7),
+ [CLKON_DPS] = BIT(8),
+ [RX_HPS_CMDQS] = BIT(9),
+ [HPS_DPS_CMDQS] = BIT(10),
+ [DPS_TX_CMDQS] = BIT(11),
+ [RSRC_MNGR] = BIT(12),
+ [CTX_HANDLER] = BIT(13),
+ [ACK_MNGR] = BIT(14),
+ [D_DCPH] = BIT(15),
+ [H_DCPH] = BIT(16),
+ /* Bit 17 reserved */
+ [NTF_TX_CMDQS] = BIT(18),
+ [CLKON_TX_0] = BIT(19),
+ [CLKON_TX_1] = BIT(20),
+ [CLKON_FNR] = BIT(21),
+ [QSB2AXI_CMDQ_L] = BIT(22),
+ [AGGR_WRAPPER] = BIT(23),
+ [RAM_SLAVEWAY] = BIT(24),
+ [CLKON_QMB] = BIT(25),
+ [WEIGHT_ARB] = BIT(26),
+ [GSI_IF] = BIT(27),
+ [CLKON_GLOBAL] = BIT(28),
+ [GLOBAL_2X_CLK] = BIT(29),
+ /* Bits 30-31 reserved */
+};
+
+REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+
+static const u32 reg_route_fmask[] = {
+ [ROUTE_DIS] = BIT(0),
+ [ROUTE_DEF_PIPE] = GENMASK(5, 1),
+ [ROUTE_DEF_HDR_TABLE] = BIT(6),
+ [ROUTE_DEF_HDR_OFST] = GENMASK(16, 7),
+ [ROUTE_FRAG_DEF_PIPE] = GENMASK(21, 17),
+ /* Bits 22-23 reserved */
+ [ROUTE_DEF_RETAIN_HDR] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+REG_FIELDS(ROUTE, route, 0x00000048);
+
+static const u32 reg_shared_mem_size_fmask[] = {
+ [MEM_SIZE] = GENMASK(15, 0),
+ [MEM_BADDR] = GENMASK(31, 16),
+};
+
+REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+
+static const u32 reg_qsb_max_writes_fmask[] = {
+ [GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
+ /* Bits 8-31 reserved */
+};
+
+REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+
+static const u32 reg_qsb_max_reads_fmask[] = {
+ [GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
+ /* Bits 8-15 reserved */
+ [GEN_QMB_0_MAX_READS_BEATS] = GENMASK(23, 16),
+ [GEN_QMB_1_MAX_READS_BEATS] = GENMASK(31, 24),
+};
+
+REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+
+static const u32 reg_filt_rout_hash_en_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
+
+static const u32 reg_filt_rout_hash_flush_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
+
+/* Valid bits defined by ipa->available */
+REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
+
+REG(IPA_BCR, ipa_bcr, 0x000001d0);
+
+static const u32 reg_local_pkt_proc_cntxt_fmask[] = {
+ [IPA_BASE_ADDR] = GENMASK(16, 0),
+ /* Bits 17-31 reserved */
+};
+
+/* Offset must be a multiple of 8 */
+REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+
+/* Valid bits defined by ipa->available */
+REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
+
+static const u32 reg_counter_cfg_fmask[] = {
+ /* Bits 0-3 reserved */
+ [AGGR_GRANULARITY] = GENMASK(8, 4),
+ /* Bits 9-31 reserved */
+};
+
+REG_FIELDS(COUNTER_CFG, counter_cfg, 0x000001f0);
+
+static const u32 reg_ipa_tx_cfg_fmask[] = {
+ /* Bits 0-1 reserved */
+ [PREFETCH_ALMOST_EMPTY_SIZE_TX0] = GENMASK(5, 2),
+ [DMAW_SCND_OUTSD_PRED_THRESHOLD] = GENMASK(9, 6),
+ [DMAW_SCND_OUTSD_PRED_EN] = BIT(10),
+ [DMAW_MAX_BEATS_256_DIS] = BIT(11),
+ [PA_MASK_EN] = BIT(12),
+ [PREFETCH_ALMOST_EMPTY_SIZE_TX1] = GENMASK(16, 13),
+ /* Bit 17 reserved */
+ [SSPND_PA_NO_START_STATE] = BIT(18),
+ [SSPND_PA_NO_BQ_STATE] = BIT(19),
+ /* Bits 20-31 reserved */
+};
+
+REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
+
+static const u32 reg_flavor_0_fmask[] = {
+ [MAX_PIPES] = GENMASK(3, 0),
+ /* Bits 4-7 reserved */
+ [MAX_CONS_PIPES] = GENMASK(12, 8),
+ /* Bits 13-15 reserved */
+ [MAX_PROD_PIPES] = GENMASK(20, 16),
+ /* Bits 21-23 reserved */
+ [PROD_LOWEST] = GENMASK(27, 24),
+ /* Bits 28-31 reserved */
+};
+
+REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
+
+static const u32 reg_idle_indication_cfg_fmask[] = {
+ [ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
+ [CONST_NON_IDLE_ENABLE] = BIT(16),
+ /* Bits 17-31 reserved */
+};
+
+REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
+
+static const u32 reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
+
+static const u32 reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
+
+static const u32 reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
+
+static const u32 reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
+
+static const u32 reg_endp_init_cfg_fmask[] = {
+ [FRAG_OFFLOAD_EN] = BIT(0),
+ [CS_OFFLOAD_EN] = GENMASK(2, 1),
+ [CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
+ /* Bit 7 reserved */
+ [CS_GEN_QMB_MASTER_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+
+static const u32 reg_endp_init_nat_fmask[] = {
+ [NAT_EN] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+
+static const u32 reg_endp_init_hdr_fmask[] = {
+ [HDR_LEN] = GENMASK(5, 0),
+ [HDR_OFST_METADATA_VALID] = BIT(6),
+ [HDR_OFST_METADATA] = GENMASK(12, 7),
+ [HDR_ADDITIONAL_CONST_LEN] = GENMASK(18, 13),
+ [HDR_OFST_PKT_SIZE_VALID] = BIT(19),
+ [HDR_OFST_PKT_SIZE] = GENMASK(25, 20),
+ [HDR_A5_MUX] = BIT(26),
+ [HDR_LEN_INC_DEAGG_HDR] = BIT(27),
+ [HDR_METADATA_REG_VALID] = BIT(28),
+ /* Bits 29-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+
+static const u32 reg_endp_init_hdr_ext_fmask[] = {
+ [HDR_ENDIANNESS] = BIT(0),
+ [HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
+ [HDR_TOTAL_LEN_OR_PAD] = BIT(2),
+ [HDR_PAYLOAD_LEN_INC_PADDING] = BIT(3),
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET] = GENMASK(9, 4),
+ [HDR_PAD_TO_ALIGNMENT] = GENMASK(13, 10),
+ /* Bits 14-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+
+REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
+
+static const u32 reg_endp_init_mode_fmask[] = {
+ [ENDP_MODE] = GENMASK(2, 0),
+ /* Bit 3 reserved */
+ [DEST_PIPE_INDEX] = GENMASK(8, 4),
+ /* Bits 9-11 reserved */
+ [BYTE_THRESHOLD] = GENMASK(27, 12),
+ [PIPE_REPLICATION_EN] = BIT(28),
+ [PAD_EN] = BIT(29),
+ [HDR_FTCH_DISABLE] = BIT(30),
+ /* Bit 31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+
+static const u32 reg_endp_init_aggr_fmask[] = {
+ [AGGR_EN] = GENMASK(1, 0),
+ [AGGR_TYPE] = GENMASK(4, 2),
+ [BYTE_LIMIT] = GENMASK(9, 5),
+ [TIME_LIMIT] = GENMASK(14, 10),
+ [PKT_LIMIT] = GENMASK(20, 15),
+ [SW_EOF_ACTIVE] = BIT(21),
+ [FORCE_CLOSE] = BIT(22),
+ /* Bit 23 reserved */
+ [HARD_BYTE_LIMIT_EN] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+
+static const u32 reg_endp_init_hol_block_en_fmask[] = {
+ [HOL_BLOCK_EN] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
+
+static const u32 reg_endp_init_hol_block_timer_fmask[] = {
+ [TIMER_BASE_VALUE] = GENMASK(4, 0),
+ /* Bits 5-7 reserved */
+ [TIMER_SCALE] = GENMASK(12, 8),
+ /* Bits 9-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
+
+static const u32 reg_endp_init_deaggr_fmask[] = {
+ [DEAGGR_HDR_LEN] = GENMASK(5, 0),
+ [SYSPIPE_ERR_DETECTION] = BIT(6),
+ [PACKET_OFFSET_VALID] = BIT(7),
+ [PACKET_OFFSET_LOCATION] = GENMASK(13, 8),
+ [IGNORE_MIN_PKT_ERR] = BIT(14),
+ /* Bit 15 reserved */
+ [MAX_PACKET_LEN] = GENMASK(31, 16),
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+
+static const u32 reg_endp_init_rsrc_grp_fmask[] = {
+ [ENDP_RSRC_GRP] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp, 0x00000838, 0x0070);
+
+static const u32 reg_endp_init_seq_fmask[] = {
+ [SEQ_TYPE] = GENMASK(7, 0),
+ [SEQ_REP_TYPE] = GENMASK(15, 8),
+ /* Bits 16-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+
+static const u32 reg_endp_status_fmask[] = {
+ [STATUS_EN] = BIT(0),
+ [STATUS_ENDP] = GENMASK(5, 1),
+ /* Bits 6-7 reserved */
+ [STATUS_LOCATION] = BIT(8),
+ [STATUS_PKT_SUPPRESS] = BIT(9),
+ /* Bits 10-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
+
+static const u32 reg_ipa_irq_uc_fmask[] = {
+ [UC_INTR] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
+ 0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
+
+/* Valid bits defined by ipa->available */
+REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
+ 0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
+
+/* Valid bits defined by ipa->available */
+REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
+ 0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
+
+static const struct reg *reg_array[] = {
+ [COMP_CFG] = &reg_comp_cfg,
+ [CLKON_CFG] = &reg_clkon_cfg,
+ [ROUTE] = &reg_route,
+ [SHARED_MEM_SIZE] = &reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &reg_qsb_max_writes,
+ [QSB_MAX_READS] = &reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &reg_state_aggr_active,
+ [IPA_BCR] = &reg_ipa_bcr,
+ [LOCAL_PKT_PROC_CNTXT] = &reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &reg_aggr_force_close,
+ [COUNTER_CFG] = &reg_counter_cfg,
+ [IPA_TX_CFG] = &reg_ipa_tx_cfg,
+ [FLAVOR_0] = &reg_flavor_0,
+ [IDLE_INDICATION_CFG] = &reg_idle_indication_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &reg_src_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &reg_dst_rsrc_grp_23_rsrc_type,
+ [ENDP_INIT_CFG] = &reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &reg_endp_init_seq,
+ [ENDP_STATUS] = &reg_endp_status,
+ [IPA_IRQ_STTS] = &reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &reg_irq_suspend_clr,
+};
+
+const struct regs ipa_regs_v4_2 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
+};
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.5.c b/drivers/net/ipa/reg/ipa_reg-v4.5.c
new file mode 100644
index 0000000000..1c58f78851
--- /dev/null
+++ b/drivers/net/ipa/reg/ipa_reg-v4.5.c
@@ -0,0 +1,535 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2022 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../ipa.h"
+#include "../ipa_reg.h"
+
+static const u32 reg_comp_cfg_fmask[] = {
+ /* Bit 0 reserved */
+ [GSI_SNOC_BYPASS_DIS] = BIT(1),
+ [GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
+ [GEN_QMB_1_SNOC_BYPASS_DIS] = BIT(3),
+ /* Bit 4 reserved */
+ [IPA_QMB_SELECT_CONS_EN] = BIT(5),
+ [IPA_QMB_SELECT_PROD_EN] = BIT(6),
+ [GSI_MULTI_INORDER_RD_DIS] = BIT(7),
+ [GSI_MULTI_INORDER_WR_DIS] = BIT(8),
+ [GEN_QMB_0_MULTI_INORDER_RD_DIS] = BIT(9),
+ [GEN_QMB_1_MULTI_INORDER_RD_DIS] = BIT(10),
+ [GEN_QMB_0_MULTI_INORDER_WR_DIS] = BIT(11),
+ [GEN_QMB_1_MULTI_INORDER_WR_DIS] = BIT(12),
+ [GEN_QMB_0_SNOC_CNOC_LOOP_PROT_DIS] = BIT(13),
+ [GSI_SNOC_CNOC_LOOP_PROT_DISABLE] = BIT(14),
+ [GSI_MULTI_AXI_MASTERS_DIS] = BIT(15),
+ [IPA_QMB_SELECT_GLOBAL_EN] = BIT(16),
+ [ATOMIC_FETCHER_ARB_LOCK_DIS] = GENMASK(20, 17),
+ [FULL_FLUSH_WAIT_RS_CLOSURE_EN] = BIT(21),
+ /* Bits 22-31 reserved */
+};
+
+REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+
+static const u32 reg_clkon_cfg_fmask[] = {
+ [CLKON_RX] = BIT(0),
+ [CLKON_PROC] = BIT(1),
+ [TX_WRAPPER] = BIT(2),
+ [CLKON_MISC] = BIT(3),
+ [RAM_ARB] = BIT(4),
+ [FTCH_HPS] = BIT(5),
+ [FTCH_DPS] = BIT(6),
+ [CLKON_HPS] = BIT(7),
+ [CLKON_DPS] = BIT(8),
+ [RX_HPS_CMDQS] = BIT(9),
+ [HPS_DPS_CMDQS] = BIT(10),
+ [DPS_TX_CMDQS] = BIT(11),
+ [RSRC_MNGR] = BIT(12),
+ [CTX_HANDLER] = BIT(13),
+ [ACK_MNGR] = BIT(14),
+ [D_DCPH] = BIT(15),
+ [H_DCPH] = BIT(16),
+ [CLKON_DCMP] = BIT(17),
+ [NTF_TX_CMDQS] = BIT(18),
+ [CLKON_TX_0] = BIT(19),
+ [CLKON_TX_1] = BIT(20),
+ [CLKON_FNR] = BIT(21),
+ [QSB2AXI_CMDQ_L] = BIT(22),
+ [AGGR_WRAPPER] = BIT(23),
+ [RAM_SLAVEWAY] = BIT(24),
+ [CLKON_QMB] = BIT(25),
+ [WEIGHT_ARB] = BIT(26),
+ [GSI_IF] = BIT(27),
+ [CLKON_GLOBAL] = BIT(28),
+ [GLOBAL_2X_CLK] = BIT(29),
+ [DPL_FIFO] = BIT(30),
+ /* Bit 31 reserved */
+};
+
+REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+
+static const u32 reg_route_fmask[] = {
+ [ROUTE_DIS] = BIT(0),
+ [ROUTE_DEF_PIPE] = GENMASK(5, 1),
+ [ROUTE_DEF_HDR_TABLE] = BIT(6),
+ [ROUTE_DEF_HDR_OFST] = GENMASK(16, 7),
+ [ROUTE_FRAG_DEF_PIPE] = GENMASK(21, 17),
+ /* Bits 22-23 reserved */
+ [ROUTE_DEF_RETAIN_HDR] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+REG_FIELDS(ROUTE, route, 0x00000048);
+
+static const u32 reg_shared_mem_size_fmask[] = {
+ [MEM_SIZE] = GENMASK(15, 0),
+ [MEM_BADDR] = GENMASK(31, 16),
+};
+
+REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+
+static const u32 reg_qsb_max_writes_fmask[] = {
+ [GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
+ /* Bits 8-31 reserved */
+};
+
+REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+
+static const u32 reg_qsb_max_reads_fmask[] = {
+ [GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
+ /* Bits 8-15 reserved */
+ [GEN_QMB_0_MAX_READS_BEATS] = GENMASK(23, 16),
+ [GEN_QMB_1_MAX_READS_BEATS] = GENMASK(31, 24),
+};
+
+REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+
+static const u32 reg_filt_rout_hash_en_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
+
+static const u32 reg_filt_rout_hash_flush_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
+
+/* Valid bits defined by ipa->available */
+REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
+
+static const u32 reg_local_pkt_proc_cntxt_fmask[] = {
+ [IPA_BASE_ADDR] = GENMASK(17, 0),
+ /* Bits 18-31 reserved */
+};
+
+/* Offset must be a multiple of 8 */
+REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+
+/* Valid bits defined by ipa->available */
+REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
+
+static const u32 reg_ipa_tx_cfg_fmask[] = {
+ /* Bits 0-1 reserved */
+ [PREFETCH_ALMOST_EMPTY_SIZE_TX0] = GENMASK(5, 2),
+ [DMAW_SCND_OUTSD_PRED_THRESHOLD] = GENMASK(9, 6),
+ [DMAW_SCND_OUTSD_PRED_EN] = BIT(10),
+ [DMAW_MAX_BEATS_256_DIS] = BIT(11),
+ [PA_MASK_EN] = BIT(12),
+ [PREFETCH_ALMOST_EMPTY_SIZE_TX1] = GENMASK(16, 13),
+ [DUAL_TX_ENABLE] = BIT(17),
+ /* Bits 18-31 reserved */
+};
+
+REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
+
+static const u32 reg_flavor_0_fmask[] = {
+ [MAX_PIPES] = GENMASK(3, 0),
+ /* Bits 4-7 reserved */
+ [MAX_CONS_PIPES] = GENMASK(12, 8),
+ /* Bits 13-15 reserved */
+ [MAX_PROD_PIPES] = GENMASK(20, 16),
+ /* Bits 21-23 reserved */
+ [PROD_LOWEST] = GENMASK(27, 24),
+ /* Bits 28-31 reserved */
+};
+
+REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
+
+static const u32 reg_idle_indication_cfg_fmask[] = {
+ [ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
+ [CONST_NON_IDLE_ENABLE] = BIT(16),
+ /* Bits 17-31 reserved */
+};
+
+REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
+
+static const u32 reg_qtime_timestamp_cfg_fmask[] = {
+ [DPL_TIMESTAMP_LSB] = GENMASK(4, 0),
+ /* Bits 5-6 reserved */
+ [DPL_TIMESTAMP_SEL] = BIT(7),
+ [TAG_TIMESTAMP_LSB] = GENMASK(12, 8),
+ /* Bits 13-15 reserved */
+ [NAT_TIMESTAMP_LSB] = GENMASK(20, 16),
+ /* Bits 21-31 reserved */
+};
+
+REG_FIELDS(QTIME_TIMESTAMP_CFG, qtime_timestamp_cfg, 0x0000024c);
+
+static const u32 reg_timers_xo_clk_div_cfg_fmask[] = {
+ [DIV_VALUE] = GENMASK(8, 0),
+ /* Bits 9-30 reserved */
+ [DIV_ENABLE] = BIT(31),
+};
+
+REG_FIELDS(TIMERS_XO_CLK_DIV_CFG, timers_xo_clk_div_cfg, 0x00000250);
+
+static const u32 reg_timers_pulse_gran_cfg_fmask[] = {
+ [PULSE_GRAN_0] = GENMASK(2, 0),
+ [PULSE_GRAN_1] = GENMASK(5, 3),
+ [PULSE_GRAN_2] = GENMASK(8, 6),
+};
+
+REG_FIELDS(TIMERS_PULSE_GRAN_CFG, timers_pulse_gran_cfg, 0x00000254);
+
+static const u32 reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
+
+static const u32 reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
+
+static const u32 reg_src_rsrc_grp_45_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_45_RSRC_TYPE, src_rsrc_grp_45_rsrc_type,
+ 0x00000408, 0x0020);
+
+static const u32 reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
+
+static const u32 reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
+
+static const u32 reg_dst_rsrc_grp_45_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+REG_STRIDE_FIELDS(DST_RSRC_GRP_45_RSRC_TYPE, dst_rsrc_grp_45_rsrc_type,
+ 0x00000508, 0x0020);
+
+static const u32 reg_endp_init_cfg_fmask[] = {
+ [FRAG_OFFLOAD_EN] = BIT(0),
+ [CS_OFFLOAD_EN] = GENMASK(2, 1),
+ [CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
+ /* Bit 7 reserved */
+ [CS_GEN_QMB_MASTER_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+
+static const u32 reg_endp_init_nat_fmask[] = {
+ [NAT_EN] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+
+static const u32 reg_endp_init_hdr_fmask[] = {
+ [HDR_LEN] = GENMASK(5, 0),
+ [HDR_OFST_METADATA_VALID] = BIT(6),
+ [HDR_OFST_METADATA] = GENMASK(12, 7),
+ [HDR_ADDITIONAL_CONST_LEN] = GENMASK(18, 13),
+ [HDR_OFST_PKT_SIZE_VALID] = BIT(19),
+ [HDR_OFST_PKT_SIZE] = GENMASK(25, 20),
+ [HDR_A5_MUX] = BIT(26),
+ [HDR_LEN_INC_DEAGG_HDR] = BIT(27),
+ [HDR_LEN_MSB] = GENMASK(29, 28),
+ [HDR_OFST_METADATA_MSB] = GENMASK(31, 30),
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+
+static const u32 reg_endp_init_hdr_ext_fmask[] = {
+ [HDR_ENDIANNESS] = BIT(0),
+ [HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
+ [HDR_TOTAL_LEN_OR_PAD] = BIT(2),
+ [HDR_PAYLOAD_LEN_INC_PADDING] = BIT(3),
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET] = GENMASK(9, 4),
+ [HDR_PAD_TO_ALIGNMENT] = GENMASK(13, 10),
+ /* Bits 14-15 reserved */
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB] = GENMASK(17, 16),
+ [HDR_OFST_PKT_SIZE_MSB] = GENMASK(19, 18),
+ [HDR_ADDITIONAL_CONST_LEN_MSB] = GENMASK(21, 20),
+ /* Bits 22-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+
+REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
+
+static const u32 reg_endp_init_mode_fmask[] = {
+ [ENDP_MODE] = GENMASK(2, 0),
+ [DCPH_ENABLE] = BIT(3),
+ [DEST_PIPE_INDEX] = GENMASK(8, 4),
+ /* Bits 9-11 reserved */
+ [BYTE_THRESHOLD] = GENMASK(27, 12),
+ [PIPE_REPLICATION_EN] = BIT(28),
+ [PAD_EN] = BIT(29),
+ /* Bits 30-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+
+static const u32 reg_endp_init_aggr_fmask[] = {
+ [AGGR_EN] = GENMASK(1, 0),
+ [AGGR_TYPE] = GENMASK(4, 2),
+ [BYTE_LIMIT] = GENMASK(10, 5),
+ /* Bit 11 reserved */
+ [TIME_LIMIT] = GENMASK(16, 12),
+ [PKT_LIMIT] = GENMASK(22, 17),
+ [SW_EOF_ACTIVE] = BIT(23),
+ [FORCE_CLOSE] = BIT(24),
+ /* Bit 25 reserved */
+ [HARD_BYTE_LIMIT_EN] = BIT(26),
+ [AGGR_GRAN_SEL] = BIT(27),
+ /* Bits 28-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+
+static const u32 reg_endp_init_hol_block_en_fmask[] = {
+ [HOL_BLOCK_EN] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
+
+static const u32 reg_endp_init_hol_block_timer_fmask[] = {
+ [TIMER_LIMIT] = GENMASK(4, 0),
+ /* Bits 5-7 reserved */
+ [TIMER_GRAN_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
+
+static const u32 reg_endp_init_deaggr_fmask[] = {
+ [DEAGGR_HDR_LEN] = GENMASK(5, 0),
+ [SYSPIPE_ERR_DETECTION] = BIT(6),
+ [PACKET_OFFSET_VALID] = BIT(7),
+ [PACKET_OFFSET_LOCATION] = GENMASK(13, 8),
+ [IGNORE_MIN_PKT_ERR] = BIT(14),
+ /* Bit 15 reserved */
+ [MAX_PACKET_LEN] = GENMASK(31, 16),
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+
+static const u32 reg_endp_init_rsrc_grp_fmask[] = {
+ [ENDP_RSRC_GRP] = GENMASK(2, 0),
+ /* Bits 3-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp, 0x00000838, 0x0070);
+
+static const u32 reg_endp_init_seq_fmask[] = {
+ [SEQ_TYPE] = GENMASK(7, 0),
+ /* Bits 8-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+
+static const u32 reg_endp_status_fmask[] = {
+ [STATUS_EN] = BIT(0),
+ [STATUS_ENDP] = GENMASK(5, 1),
+ /* Bits 6-8 reserved */
+ [STATUS_PKT_SUPPRESS] = BIT(9),
+ /* Bits 10-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+
+static const u32 reg_endp_filter_router_hsh_cfg_fmask[] = {
+ [FILTER_HASH_MSK_SRC_ID] = BIT(0),
+ [FILTER_HASH_MSK_SRC_IP] = BIT(1),
+ [FILTER_HASH_MSK_DST_IP] = BIT(2),
+ [FILTER_HASH_MSK_SRC_PORT] = BIT(3),
+ [FILTER_HASH_MSK_DST_PORT] = BIT(4),
+ [FILTER_HASH_MSK_PROTOCOL] = BIT(5),
+ [FILTER_HASH_MSK_METADATA] = BIT(6),
+ [FILTER_HASH_MSK_ALL] = GENMASK(6, 0),
+ /* Bits 7-15 reserved */
+ [ROUTER_HASH_MSK_SRC_ID] = BIT(16),
+ [ROUTER_HASH_MSK_SRC_IP] = BIT(17),
+ [ROUTER_HASH_MSK_DST_IP] = BIT(18),
+ [ROUTER_HASH_MSK_SRC_PORT] = BIT(19),
+ [ROUTER_HASH_MSK_DST_PORT] = BIT(20),
+ [ROUTER_HASH_MSK_PROTOCOL] = BIT(21),
+ [ROUTER_HASH_MSK_METADATA] = BIT(22),
+ [ROUTER_HASH_MSK_ALL] = GENMASK(22, 16),
+ /* Bits 23-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
+ 0x0000085c, 0x0070);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
+
+static const u32 reg_ipa_irq_uc_fmask[] = {
+ [UC_INTR] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
+ 0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
+
+/* Valid bits defined by ipa->available */
+REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
+ 0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
+
+/* Valid bits defined by ipa->available */
+REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
+ 0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
+
+static const struct reg *reg_array[] = {
+ [COMP_CFG] = &reg_comp_cfg,
+ [CLKON_CFG] = &reg_clkon_cfg,
+ [ROUTE] = &reg_route,
+ [SHARED_MEM_SIZE] = &reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &reg_qsb_max_writes,
+ [QSB_MAX_READS] = &reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &reg_state_aggr_active,
+ [LOCAL_PKT_PROC_CNTXT] = &reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &reg_aggr_force_close,
+ [IPA_TX_CFG] = &reg_ipa_tx_cfg,
+ [FLAVOR_0] = &reg_flavor_0,
+ [IDLE_INDICATION_CFG] = &reg_idle_indication_cfg,
+ [QTIME_TIMESTAMP_CFG] = &reg_qtime_timestamp_cfg,
+ [TIMERS_XO_CLK_DIV_CFG] = &reg_timers_xo_clk_div_cfg,
+ [TIMERS_PULSE_GRAN_CFG] = &reg_timers_pulse_gran_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &reg_src_rsrc_grp_23_rsrc_type,
+ [SRC_RSRC_GRP_45_RSRC_TYPE] = &reg_src_rsrc_grp_45_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &reg_dst_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_45_RSRC_TYPE] = &reg_dst_rsrc_grp_45_rsrc_type,
+ [ENDP_INIT_CFG] = &reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &reg_endp_init_seq,
+ [ENDP_STATUS] = &reg_endp_status,
+ [ENDP_FILTER_ROUTER_HSH_CFG] = &reg_endp_filter_router_hsh_cfg,
+ [IPA_IRQ_STTS] = &reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &reg_irq_suspend_clr,
+};
+
+const struct regs ipa_regs_v4_5 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
+};
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.7.c b/drivers/net/ipa/reg/ipa_reg-v4.7.c
new file mode 100644
index 0000000000..731824fce1
--- /dev/null
+++ b/drivers/net/ipa/reg/ipa_reg-v4.7.c
@@ -0,0 +1,506 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2022 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../ipa.h"
+#include "../ipa_reg.h"
+
+static const u32 reg_comp_cfg_fmask[] = {
+ [RAM_ARB_PRI_CLIENT_SAMP_FIX_DIS] = BIT(0),
+ [GSI_SNOC_BYPASS_DIS] = BIT(1),
+ [GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
+ [GEN_QMB_1_SNOC_BYPASS_DIS] = BIT(3),
+ /* Bit 4 reserved */
+ [IPA_QMB_SELECT_CONS_EN] = BIT(5),
+ [IPA_QMB_SELECT_PROD_EN] = BIT(6),
+ [GSI_MULTI_INORDER_RD_DIS] = BIT(7),
+ [GSI_MULTI_INORDER_WR_DIS] = BIT(8),
+ [GEN_QMB_0_MULTI_INORDER_RD_DIS] = BIT(9),
+ [GEN_QMB_1_MULTI_INORDER_RD_DIS] = BIT(10),
+ [GEN_QMB_0_MULTI_INORDER_WR_DIS] = BIT(11),
+ [GEN_QMB_1_MULTI_INORDER_WR_DIS] = BIT(12),
+ [GEN_QMB_0_SNOC_CNOC_LOOP_PROT_DIS] = BIT(13),
+ [GSI_SNOC_CNOC_LOOP_PROT_DISABLE] = BIT(14),
+ [GSI_MULTI_AXI_MASTERS_DIS] = BIT(15),
+ [IPA_QMB_SELECT_GLOBAL_EN] = BIT(16),
+ [ATOMIC_FETCHER_ARB_LOCK_DIS] = GENMASK(20, 17),
+ [FULL_FLUSH_WAIT_RS_CLOSURE_EN] = BIT(21),
+ /* Bits 22-31 reserved */
+};
+
+REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+
+static const u32 reg_clkon_cfg_fmask[] = {
+ [CLKON_RX] = BIT(0),
+ [CLKON_PROC] = BIT(1),
+ [TX_WRAPPER] = BIT(2),
+ [CLKON_MISC] = BIT(3),
+ [RAM_ARB] = BIT(4),
+ [FTCH_HPS] = BIT(5),
+ [FTCH_DPS] = BIT(6),
+ [CLKON_HPS] = BIT(7),
+ [CLKON_DPS] = BIT(8),
+ [RX_HPS_CMDQS] = BIT(9),
+ [HPS_DPS_CMDQS] = BIT(10),
+ [DPS_TX_CMDQS] = BIT(11),
+ [RSRC_MNGR] = BIT(12),
+ [CTX_HANDLER] = BIT(13),
+ [ACK_MNGR] = BIT(14),
+ [D_DCPH] = BIT(15),
+ [H_DCPH] = BIT(16),
+ [CLKON_DCMP] = BIT(17),
+ [NTF_TX_CMDQS] = BIT(18),
+ [CLKON_TX_0] = BIT(19),
+ [CLKON_TX_1] = BIT(20),
+ [CLKON_FNR] = BIT(21),
+ [QSB2AXI_CMDQ_L] = BIT(22),
+ [AGGR_WRAPPER] = BIT(23),
+ [RAM_SLAVEWAY] = BIT(24),
+ [CLKON_QMB] = BIT(25),
+ [WEIGHT_ARB] = BIT(26),
+ [GSI_IF] = BIT(27),
+ [CLKON_GLOBAL] = BIT(28),
+ [GLOBAL_2X_CLK] = BIT(29),
+ [DPL_FIFO] = BIT(30),
+ [DRBIP] = BIT(31),
+};
+
+REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+
+static const u32 reg_route_fmask[] = {
+ [ROUTE_DIS] = BIT(0),
+ [ROUTE_DEF_PIPE] = GENMASK(5, 1),
+ [ROUTE_DEF_HDR_TABLE] = BIT(6),
+ [ROUTE_DEF_HDR_OFST] = GENMASK(16, 7),
+ [ROUTE_FRAG_DEF_PIPE] = GENMASK(21, 17),
+ /* Bits 22-23 reserved */
+ [ROUTE_DEF_RETAIN_HDR] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+REG_FIELDS(ROUTE, route, 0x00000048);
+
+static const u32 reg_shared_mem_size_fmask[] = {
+ [MEM_SIZE] = GENMASK(15, 0),
+ [MEM_BADDR] = GENMASK(31, 16),
+};
+
+REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+
+static const u32 reg_qsb_max_writes_fmask[] = {
+ [GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
+ /* Bits 8-31 reserved */
+};
+
+REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+
+static const u32 reg_qsb_max_reads_fmask[] = {
+ [GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
+ /* Bits 8-15 reserved */
+ [GEN_QMB_0_MAX_READS_BEATS] = GENMASK(23, 16),
+ [GEN_QMB_1_MAX_READS_BEATS] = GENMASK(31, 24),
+};
+
+REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+
+static const u32 reg_filt_rout_hash_en_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
+
+static const u32 reg_filt_rout_hash_flush_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
+
+/* Valid bits defined by ipa->available */
+REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
+
+static const u32 reg_local_pkt_proc_cntxt_fmask[] = {
+ [IPA_BASE_ADDR] = GENMASK(17, 0),
+ /* Bits 18-31 reserved */
+};
+
+/* Offset must be a multiple of 8 */
+REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+
+/* Valid bits defined by ipa->available */
+REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
+
+static const u32 reg_ipa_tx_cfg_fmask[] = {
+ /* Bits 0-1 reserved */
+ [PREFETCH_ALMOST_EMPTY_SIZE_TX0] = GENMASK(5, 2),
+ [DMAW_SCND_OUTSD_PRED_THRESHOLD] = GENMASK(9, 6),
+ [DMAW_SCND_OUTSD_PRED_EN] = BIT(10),
+ [DMAW_MAX_BEATS_256_DIS] = BIT(11),
+ [PA_MASK_EN] = BIT(12),
+ [PREFETCH_ALMOST_EMPTY_SIZE_TX1] = GENMASK(16, 13),
+ [DUAL_TX_ENABLE] = BIT(17),
+ [SSPND_PA_NO_START_STATE] = BIT(18),
+ /* Bits 19-31 reserved */
+};
+
+REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
+
+static const u32 reg_flavor_0_fmask[] = {
+ [MAX_PIPES] = GENMASK(3, 0),
+ /* Bits 4-7 reserved */
+ [MAX_CONS_PIPES] = GENMASK(12, 8),
+ /* Bits 13-15 reserved */
+ [MAX_PROD_PIPES] = GENMASK(20, 16),
+ /* Bits 21-23 reserved */
+ [PROD_LOWEST] = GENMASK(27, 24),
+ /* Bits 28-31 reserved */
+};
+
+REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
+
+static const u32 reg_idle_indication_cfg_fmask[] = {
+ [ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
+ [CONST_NON_IDLE_ENABLE] = BIT(16),
+ /* Bits 17-31 reserved */
+};
+
+REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
+
+static const u32 reg_qtime_timestamp_cfg_fmask[] = {
+ [DPL_TIMESTAMP_LSB] = GENMASK(4, 0),
+ /* Bits 5-6 reserved */
+ [DPL_TIMESTAMP_SEL] = BIT(7),
+ [TAG_TIMESTAMP_LSB] = GENMASK(12, 8),
+ /* Bits 13-15 reserved */
+ [NAT_TIMESTAMP_LSB] = GENMASK(20, 16),
+ /* Bits 21-31 reserved */
+};
+
+REG_FIELDS(QTIME_TIMESTAMP_CFG, qtime_timestamp_cfg, 0x0000024c);
+
+static const u32 reg_timers_xo_clk_div_cfg_fmask[] = {
+ [DIV_VALUE] = GENMASK(8, 0),
+ /* Bits 9-30 reserved */
+ [DIV_ENABLE] = BIT(31),
+};
+
+REG_FIELDS(TIMERS_XO_CLK_DIV_CFG, timers_xo_clk_div_cfg, 0x00000250);
+
+static const u32 reg_timers_pulse_gran_cfg_fmask[] = {
+ [PULSE_GRAN_0] = GENMASK(2, 0),
+ [PULSE_GRAN_1] = GENMASK(5, 3),
+ [PULSE_GRAN_2] = GENMASK(8, 6),
+};
+
+REG_FIELDS(TIMERS_PULSE_GRAN_CFG, timers_pulse_gran_cfg, 0x00000254);
+
+static const u32 reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
+
+static const u32 reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
+
+static const u32 reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
+
+static const u32 reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
+
+static const u32 reg_endp_init_cfg_fmask[] = {
+ [FRAG_OFFLOAD_EN] = BIT(0),
+ [CS_OFFLOAD_EN] = GENMASK(2, 1),
+ [CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
+ /* Bit 7 reserved */
+ [CS_GEN_QMB_MASTER_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+
+static const u32 reg_endp_init_nat_fmask[] = {
+ [NAT_EN] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+
+static const u32 reg_endp_init_hdr_fmask[] = {
+ [HDR_LEN] = GENMASK(5, 0),
+ [HDR_OFST_METADATA_VALID] = BIT(6),
+ [HDR_OFST_METADATA] = GENMASK(12, 7),
+ [HDR_ADDITIONAL_CONST_LEN] = GENMASK(18, 13),
+ [HDR_OFST_PKT_SIZE_VALID] = BIT(19),
+ [HDR_OFST_PKT_SIZE] = GENMASK(25, 20),
+ [HDR_A5_MUX] = BIT(26),
+ [HDR_LEN_INC_DEAGG_HDR] = BIT(27),
+ [HDR_LEN_MSB] = GENMASK(29, 28),
+ [HDR_OFST_METADATA_MSB] = GENMASK(31, 30),
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+
+static const u32 reg_endp_init_hdr_ext_fmask[] = {
+ [HDR_ENDIANNESS] = BIT(0),
+ [HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
+ [HDR_TOTAL_LEN_OR_PAD] = BIT(2),
+ [HDR_PAYLOAD_LEN_INC_PADDING] = BIT(3),
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET] = GENMASK(9, 4),
+ [HDR_PAD_TO_ALIGNMENT] = GENMASK(13, 10),
+ /* Bits 14-15 reserved */
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB] = GENMASK(17, 16),
+ [HDR_OFST_PKT_SIZE_MSB] = GENMASK(19, 18),
+ [HDR_ADDITIONAL_CONST_LEN_MSB] = GENMASK(21, 20),
+ /* Bits 22-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+
+REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
+
+static const u32 reg_endp_init_mode_fmask[] = {
+ [ENDP_MODE] = GENMASK(2, 0),
+ [DCPH_ENABLE] = BIT(3),
+ [DEST_PIPE_INDEX] = GENMASK(8, 4),
+ /* Bits 9-11 reserved */
+ [BYTE_THRESHOLD] = GENMASK(27, 12),
+ [PIPE_REPLICATION_EN] = BIT(28),
+ [PAD_EN] = BIT(29),
+ /* Bits 30-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+
+static const u32 reg_endp_init_aggr_fmask[] = {
+ [AGGR_EN] = GENMASK(1, 0),
+ [AGGR_TYPE] = GENMASK(4, 2),
+ [BYTE_LIMIT] = GENMASK(10, 5),
+ /* Bit 11 reserved */
+ [TIME_LIMIT] = GENMASK(16, 12),
+ [PKT_LIMIT] = GENMASK(22, 17),
+ [SW_EOF_ACTIVE] = BIT(23),
+ [FORCE_CLOSE] = BIT(24),
+ /* Bit 25 reserved */
+ [HARD_BYTE_LIMIT_EN] = BIT(26),
+ [AGGR_GRAN_SEL] = BIT(27),
+ /* Bits 28-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+
+static const u32 reg_endp_init_hol_block_en_fmask[] = {
+ [HOL_BLOCK_EN] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
+
+static const u32 reg_endp_init_hol_block_timer_fmask[] = {
+ [TIMER_LIMIT] = GENMASK(4, 0),
+ /* Bits 5-7 reserved */
+ [TIMER_GRAN_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
+
+static const u32 reg_endp_init_deaggr_fmask[] = {
+ [DEAGGR_HDR_LEN] = GENMASK(5, 0),
+ [SYSPIPE_ERR_DETECTION] = BIT(6),
+ [PACKET_OFFSET_VALID] = BIT(7),
+ [PACKET_OFFSET_LOCATION] = GENMASK(13, 8),
+ [IGNORE_MIN_PKT_ERR] = BIT(14),
+ /* Bit 15 reserved */
+ [MAX_PACKET_LEN] = GENMASK(31, 16),
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+
+static const u32 reg_endp_init_rsrc_grp_fmask[] = {
+ [ENDP_RSRC_GRP] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp, 0x00000838, 0x0070);
+
+static const u32 reg_endp_init_seq_fmask[] = {
+ [SEQ_TYPE] = GENMASK(7, 0),
+ /* Bits 8-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+
+static const u32 reg_endp_status_fmask[] = {
+ [STATUS_EN] = BIT(0),
+ [STATUS_ENDP] = GENMASK(5, 1),
+ /* Bits 6-8 reserved */
+ [STATUS_PKT_SUPPRESS] = BIT(9),
+ /* Bits 10-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+
+static const u32 reg_endp_filter_router_hsh_cfg_fmask[] = {
+ [FILTER_HASH_MSK_SRC_ID] = BIT(0),
+ [FILTER_HASH_MSK_SRC_IP] = BIT(1),
+ [FILTER_HASH_MSK_DST_IP] = BIT(2),
+ [FILTER_HASH_MSK_SRC_PORT] = BIT(3),
+ [FILTER_HASH_MSK_DST_PORT] = BIT(4),
+ [FILTER_HASH_MSK_PROTOCOL] = BIT(5),
+ [FILTER_HASH_MSK_METADATA] = BIT(6),
+ [FILTER_HASH_MSK_ALL] = GENMASK(6, 0),
+ /* Bits 7-15 reserved */
+ [ROUTER_HASH_MSK_SRC_ID] = BIT(16),
+ [ROUTER_HASH_MSK_SRC_IP] = BIT(17),
+ [ROUTER_HASH_MSK_DST_IP] = BIT(18),
+ [ROUTER_HASH_MSK_SRC_PORT] = BIT(19),
+ [ROUTER_HASH_MSK_DST_PORT] = BIT(20),
+ [ROUTER_HASH_MSK_PROTOCOL] = BIT(21),
+ [ROUTER_HASH_MSK_METADATA] = BIT(22),
+ [ROUTER_HASH_MSK_ALL] = GENMASK(22, 16),
+ /* Bits 23-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
+ 0x0000085c, 0x0070);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
+
+static const u32 reg_ipa_irq_uc_fmask[] = {
+ [UC_INTR] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
+ 0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
+
+/* Valid bits defined by ipa->available */
+REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
+ 0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
+
+/* Valid bits defined by ipa->available */
+REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
+ 0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
+
+static const struct reg *reg_array[] = {
+ [COMP_CFG] = &reg_comp_cfg,
+ [CLKON_CFG] = &reg_clkon_cfg,
+ [ROUTE] = &reg_route,
+ [SHARED_MEM_SIZE] = &reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &reg_qsb_max_writes,
+ [QSB_MAX_READS] = &reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &reg_state_aggr_active,
+ [LOCAL_PKT_PROC_CNTXT] = &reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &reg_aggr_force_close,
+ [IPA_TX_CFG] = &reg_ipa_tx_cfg,
+ [FLAVOR_0] = &reg_flavor_0,
+ [IDLE_INDICATION_CFG] = &reg_idle_indication_cfg,
+ [QTIME_TIMESTAMP_CFG] = &reg_qtime_timestamp_cfg,
+ [TIMERS_XO_CLK_DIV_CFG] = &reg_timers_xo_clk_div_cfg,
+ [TIMERS_PULSE_GRAN_CFG] = &reg_timers_pulse_gran_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &reg_src_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &reg_dst_rsrc_grp_23_rsrc_type,
+ [ENDP_INIT_CFG] = &reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &reg_endp_init_seq,
+ [ENDP_STATUS] = &reg_endp_status,
+ [ENDP_FILTER_ROUTER_HSH_CFG] = &reg_endp_filter_router_hsh_cfg,
+ [IPA_IRQ_STTS] = &reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &reg_irq_suspend_clr,
+};
+
+const struct regs ipa_regs_v4_7 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
+};
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.9.c b/drivers/net/ipa/reg/ipa_reg-v4.9.c
new file mode 100644
index 0000000000..01f87b5290
--- /dev/null
+++ b/drivers/net/ipa/reg/ipa_reg-v4.9.c
@@ -0,0 +1,511 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2022 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../ipa.h"
+#include "../ipa_reg.h"
+
+static const u32 reg_comp_cfg_fmask[] = {
+ [RAM_ARB_PRI_CLIENT_SAMP_FIX_DIS] = BIT(0),
+ [GSI_SNOC_BYPASS_DIS] = BIT(1),
+ [GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
+ [GEN_QMB_1_SNOC_BYPASS_DIS] = BIT(3),
+ /* Bit 4 reserved */
+ [IPA_QMB_SELECT_CONS_EN] = BIT(5),
+ [IPA_QMB_SELECT_PROD_EN] = BIT(6),
+ [GSI_MULTI_INORDER_RD_DIS] = BIT(7),
+ [GSI_MULTI_INORDER_WR_DIS] = BIT(8),
+ [GEN_QMB_0_MULTI_INORDER_RD_DIS] = BIT(9),
+ [GEN_QMB_1_MULTI_INORDER_RD_DIS] = BIT(10),
+ [GEN_QMB_0_MULTI_INORDER_WR_DIS] = BIT(11),
+ [GEN_QMB_1_MULTI_INORDER_WR_DIS] = BIT(12),
+ [GEN_QMB_0_SNOC_CNOC_LOOP_PROT_DIS] = BIT(13),
+ [GSI_SNOC_CNOC_LOOP_PROT_DISABLE] = BIT(14),
+ [GSI_MULTI_AXI_MASTERS_DIS] = BIT(15),
+ [IPA_QMB_SELECT_GLOBAL_EN] = BIT(16),
+ [FULL_FLUSH_WAIT_RS_CLOSURE_EN] = BIT(17),
+ [QMB_RAM_RD_CACHE_DISABLE] = BIT(19),
+ [GENQMB_AOOOWR] = BIT(20),
+ [IF_OUT_OF_BUF_STOP_RESET_MASK_EN] = BIT(21),
+ [ATOMIC_FETCHER_ARB_LOCK_DIS] = GENMASK(24, 22),
+ /* Bits 25-29 reserved */
+ [GEN_QMB_1_DYNAMIC_ASIZE] = BIT(30),
+ [GEN_QMB_0_DYNAMIC_ASIZE] = BIT(31),
+};
+
+REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+
+static const u32 reg_clkon_cfg_fmask[] = {
+ [CLKON_RX] = BIT(0),
+ [CLKON_PROC] = BIT(1),
+ [TX_WRAPPER] = BIT(2),
+ [CLKON_MISC] = BIT(3),
+ [RAM_ARB] = BIT(4),
+ [FTCH_HPS] = BIT(5),
+ [FTCH_DPS] = BIT(6),
+ [CLKON_HPS] = BIT(7),
+ [CLKON_DPS] = BIT(8),
+ [RX_HPS_CMDQS] = BIT(9),
+ [HPS_DPS_CMDQS] = BIT(10),
+ [DPS_TX_CMDQS] = BIT(11),
+ [RSRC_MNGR] = BIT(12),
+ [CTX_HANDLER] = BIT(13),
+ [ACK_MNGR] = BIT(14),
+ [D_DCPH] = BIT(15),
+ [H_DCPH] = BIT(16),
+ [CLKON_DCMP] = BIT(17),
+ [NTF_TX_CMDQS] = BIT(18),
+ [CLKON_TX_0] = BIT(19),
+ [CLKON_TX_1] = BIT(20),
+ [CLKON_FNR] = BIT(21),
+ [QSB2AXI_CMDQ_L] = BIT(22),
+ [AGGR_WRAPPER] = BIT(23),
+ [RAM_SLAVEWAY] = BIT(24),
+ [CLKON_QMB] = BIT(25),
+ [WEIGHT_ARB] = BIT(26),
+ [GSI_IF] = BIT(27),
+ [CLKON_GLOBAL] = BIT(28),
+ [GLOBAL_2X_CLK] = BIT(29),
+ [DPL_FIFO] = BIT(30),
+ [DRBIP] = BIT(31),
+};
+
+REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+
+static const u32 reg_route_fmask[] = {
+ [ROUTE_DIS] = BIT(0),
+ [ROUTE_DEF_PIPE] = GENMASK(5, 1),
+ [ROUTE_DEF_HDR_TABLE] = BIT(6),
+ [ROUTE_DEF_HDR_OFST] = GENMASK(16, 7),
+ [ROUTE_FRAG_DEF_PIPE] = GENMASK(21, 17),
+ /* Bits 22-23 reserved */
+ [ROUTE_DEF_RETAIN_HDR] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+REG_FIELDS(ROUTE, route, 0x00000048);
+
+static const u32 reg_shared_mem_size_fmask[] = {
+ [MEM_SIZE] = GENMASK(15, 0),
+ [MEM_BADDR] = GENMASK(31, 16),
+};
+
+REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+
+static const u32 reg_qsb_max_writes_fmask[] = {
+ [GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
+ /* Bits 8-31 reserved */
+};
+
+REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+
+static const u32 reg_qsb_max_reads_fmask[] = {
+ [GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
+ /* Bits 8-15 reserved */
+ [GEN_QMB_0_MAX_READS_BEATS] = GENMASK(23, 16),
+ [GEN_QMB_1_MAX_READS_BEATS] = GENMASK(31, 24),
+};
+
+REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+
+static const u32 reg_filt_rout_hash_en_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
+
+static const u32 reg_filt_rout_hash_flush_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
+
+/* Valid bits defined by ipa->available */
+REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
+
+static const u32 reg_local_pkt_proc_cntxt_fmask[] = {
+ [IPA_BASE_ADDR] = GENMASK(17, 0),
+ /* Bits 18-31 reserved */
+};
+
+/* Offset must be a multiple of 8 */
+REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+
+/* Valid bits defined by ipa->available */
+REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
+
+static const u32 reg_ipa_tx_cfg_fmask[] = {
+ /* Bits 0-1 reserved */
+ [PREFETCH_ALMOST_EMPTY_SIZE_TX0] = GENMASK(5, 2),
+ [DMAW_SCND_OUTSD_PRED_THRESHOLD] = GENMASK(9, 6),
+ [DMAW_SCND_OUTSD_PRED_EN] = BIT(10),
+ [DMAW_MAX_BEATS_256_DIS] = BIT(11),
+ [PA_MASK_EN] = BIT(12),
+ [PREFETCH_ALMOST_EMPTY_SIZE_TX1] = GENMASK(16, 13),
+ [DUAL_TX_ENABLE] = BIT(17),
+ [SSPND_PA_NO_START_STATE] = BIT(18),
+ /* Bits 19-31 reserved */
+};
+
+REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
+
+static const u32 reg_flavor_0_fmask[] = {
+ [MAX_PIPES] = GENMASK(3, 0),
+ /* Bits 4-7 reserved */
+ [MAX_CONS_PIPES] = GENMASK(12, 8),
+ /* Bits 13-15 reserved */
+ [MAX_PROD_PIPES] = GENMASK(20, 16),
+ /* Bits 21-23 reserved */
+ [PROD_LOWEST] = GENMASK(27, 24),
+ /* Bits 28-31 reserved */
+};
+
+REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
+
+static const u32 reg_idle_indication_cfg_fmask[] = {
+ [ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
+ [CONST_NON_IDLE_ENABLE] = BIT(16),
+ /* Bits 17-31 reserved */
+};
+
+REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
+
+static const u32 reg_qtime_timestamp_cfg_fmask[] = {
+ [DPL_TIMESTAMP_LSB] = GENMASK(4, 0),
+ /* Bits 5-6 reserved */
+ [DPL_TIMESTAMP_SEL] = BIT(7),
+ [TAG_TIMESTAMP_LSB] = GENMASK(12, 8),
+ /* Bits 13-15 reserved */
+ [NAT_TIMESTAMP_LSB] = GENMASK(20, 16),
+ /* Bits 21-31 reserved */
+};
+
+REG_FIELDS(QTIME_TIMESTAMP_CFG, qtime_timestamp_cfg, 0x0000024c);
+
+static const u32 reg_timers_xo_clk_div_cfg_fmask[] = {
+ [DIV_VALUE] = GENMASK(8, 0),
+ /* Bits 9-30 reserved */
+ [DIV_ENABLE] = BIT(31),
+};
+
+REG_FIELDS(TIMERS_XO_CLK_DIV_CFG, timers_xo_clk_div_cfg, 0x00000250);
+
+static const u32 reg_timers_pulse_gran_cfg_fmask[] = {
+ [PULSE_GRAN_0] = GENMASK(2, 0),
+ [PULSE_GRAN_1] = GENMASK(5, 3),
+ [PULSE_GRAN_2] = GENMASK(8, 6),
+};
+
+REG_FIELDS(TIMERS_PULSE_GRAN_CFG, timers_pulse_gran_cfg, 0x00000254);
+
+static const u32 reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
+
+static const u32 reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
+
+static const u32 reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
+
+static const u32 reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
+
+static const u32 reg_endp_init_cfg_fmask[] = {
+ [FRAG_OFFLOAD_EN] = BIT(0),
+ [CS_OFFLOAD_EN] = GENMASK(2, 1),
+ [CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
+ /* Bit 7 reserved */
+ [CS_GEN_QMB_MASTER_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+
+static const u32 reg_endp_init_nat_fmask[] = {
+ [NAT_EN] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+
+static const u32 reg_endp_init_hdr_fmask[] = {
+ [HDR_LEN] = GENMASK(5, 0),
+ [HDR_OFST_METADATA_VALID] = BIT(6),
+ [HDR_OFST_METADATA] = GENMASK(12, 7),
+ [HDR_ADDITIONAL_CONST_LEN] = GENMASK(18, 13),
+ [HDR_OFST_PKT_SIZE_VALID] = BIT(19),
+ [HDR_OFST_PKT_SIZE] = GENMASK(25, 20),
+ [HDR_LEN_INC_DEAGG_HDR] = BIT(27),
+ [HDR_LEN_MSB] = GENMASK(29, 28),
+ [HDR_OFST_METADATA_MSB] = GENMASK(31, 30),
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+
+static const u32 reg_endp_init_hdr_ext_fmask[] = {
+ [HDR_ENDIANNESS] = BIT(0),
+ [HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
+ [HDR_TOTAL_LEN_OR_PAD] = BIT(2),
+ [HDR_PAYLOAD_LEN_INC_PADDING] = BIT(3),
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET] = GENMASK(9, 4),
+ [HDR_PAD_TO_ALIGNMENT] = GENMASK(13, 10),
+ /* Bits 14-15 reserved */
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB] = GENMASK(17, 16),
+ [HDR_OFST_PKT_SIZE_MSB] = GENMASK(19, 18),
+ [HDR_ADDITIONAL_CONST_LEN_MSB] = GENMASK(21, 20),
+ /* Bits 22-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+
+REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
+
+static const u32 reg_endp_init_mode_fmask[] = {
+ [ENDP_MODE] = GENMASK(2, 0),
+ [DCPH_ENABLE] = BIT(3),
+ [DEST_PIPE_INDEX] = GENMASK(8, 4),
+ /* Bits 9-11 reserved */
+ [BYTE_THRESHOLD] = GENMASK(27, 12),
+ [PIPE_REPLICATION_EN] = BIT(28),
+ [PAD_EN] = BIT(29),
+ [DRBIP_ACL_ENABLE] = BIT(30),
+ /* Bit 31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+
+static const u32 reg_endp_init_aggr_fmask[] = {
+ [AGGR_EN] = GENMASK(1, 0),
+ [AGGR_TYPE] = GENMASK(4, 2),
+ [BYTE_LIMIT] = GENMASK(10, 5),
+ /* Bit 11 reserved */
+ [TIME_LIMIT] = GENMASK(16, 12),
+ [PKT_LIMIT] = GENMASK(22, 17),
+ [SW_EOF_ACTIVE] = BIT(23),
+ [FORCE_CLOSE] = BIT(24),
+ /* Bit 25 reserved */
+ [HARD_BYTE_LIMIT_EN] = BIT(26),
+ [AGGR_GRAN_SEL] = BIT(27),
+ /* Bits 28-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+
+static const u32 reg_endp_init_hol_block_en_fmask[] = {
+ [HOL_BLOCK_EN] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
+
+static const u32 reg_endp_init_hol_block_timer_fmask[] = {
+ [TIMER_LIMIT] = GENMASK(4, 0),
+ /* Bits 5-7 reserved */
+ [TIMER_GRAN_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
+
+static const u32 reg_endp_init_deaggr_fmask[] = {
+ [DEAGGR_HDR_LEN] = GENMASK(5, 0),
+ [SYSPIPE_ERR_DETECTION] = BIT(6),
+ [PACKET_OFFSET_VALID] = BIT(7),
+ [PACKET_OFFSET_LOCATION] = GENMASK(13, 8),
+ [IGNORE_MIN_PKT_ERR] = BIT(14),
+ /* Bit 15 reserved */
+ [MAX_PACKET_LEN] = GENMASK(31, 16),
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+
+static const u32 reg_endp_init_rsrc_grp_fmask[] = {
+ [ENDP_RSRC_GRP] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp, 0x00000838, 0x0070);
+
+static const u32 reg_endp_init_seq_fmask[] = {
+ [SEQ_TYPE] = GENMASK(7, 0),
+ /* Bits 8-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+
+static const u32 reg_endp_status_fmask[] = {
+ [STATUS_EN] = BIT(0),
+ [STATUS_ENDP] = GENMASK(5, 1),
+ /* Bits 6-8 reserved */
+ [STATUS_PKT_SUPPRESS] = BIT(9),
+ /* Bits 10-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+
+static const u32 reg_endp_filter_router_hsh_cfg_fmask[] = {
+ [FILTER_HASH_MSK_SRC_ID] = BIT(0),
+ [FILTER_HASH_MSK_SRC_IP] = BIT(1),
+ [FILTER_HASH_MSK_DST_IP] = BIT(2),
+ [FILTER_HASH_MSK_SRC_PORT] = BIT(3),
+ [FILTER_HASH_MSK_DST_PORT] = BIT(4),
+ [FILTER_HASH_MSK_PROTOCOL] = BIT(5),
+ [FILTER_HASH_MSK_METADATA] = BIT(6),
+ [FILTER_HASH_MSK_ALL] = GENMASK(6, 0),
+ /* Bits 7-15 reserved */
+ [ROUTER_HASH_MSK_SRC_ID] = BIT(16),
+ [ROUTER_HASH_MSK_SRC_IP] = BIT(17),
+ [ROUTER_HASH_MSK_DST_IP] = BIT(18),
+ [ROUTER_HASH_MSK_SRC_PORT] = BIT(19),
+ [ROUTER_HASH_MSK_DST_PORT] = BIT(20),
+ [ROUTER_HASH_MSK_PROTOCOL] = BIT(21),
+ [ROUTER_HASH_MSK_METADATA] = BIT(22),
+ [ROUTER_HASH_MSK_ALL] = GENMASK(22, 16),
+ /* Bits 23-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
+ 0x0000085c, 0x0070);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00004008 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+REG(IPA_IRQ_EN, ipa_irq_en, 0x0000400c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00004010 + 0x1000 * GSI_EE_AP);
+
+static const u32 reg_ipa_irq_uc_fmask[] = {
+ [UC_INTR] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000401c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
+ 0x00004030 + 0x1000 * GSI_EE_AP, 0x0004);
+
+/* Valid bits defined by ipa->available */
+REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
+ 0x00004034 + 0x1000 * GSI_EE_AP, 0x0004);
+
+/* Valid bits defined by ipa->available */
+REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
+ 0x00004038 + 0x1000 * GSI_EE_AP, 0x0004);
+
+static const struct reg *reg_array[] = {
+ [COMP_CFG] = &reg_comp_cfg,
+ [CLKON_CFG] = &reg_clkon_cfg,
+ [ROUTE] = &reg_route,
+ [SHARED_MEM_SIZE] = &reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &reg_qsb_max_writes,
+ [QSB_MAX_READS] = &reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &reg_state_aggr_active,
+ [LOCAL_PKT_PROC_CNTXT] = &reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &reg_aggr_force_close,
+ [IPA_TX_CFG] = &reg_ipa_tx_cfg,
+ [FLAVOR_0] = &reg_flavor_0,
+ [IDLE_INDICATION_CFG] = &reg_idle_indication_cfg,
+ [QTIME_TIMESTAMP_CFG] = &reg_qtime_timestamp_cfg,
+ [TIMERS_XO_CLK_DIV_CFG] = &reg_timers_xo_clk_div_cfg,
+ [TIMERS_PULSE_GRAN_CFG] = &reg_timers_pulse_gran_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &reg_src_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &reg_dst_rsrc_grp_23_rsrc_type,
+ [ENDP_INIT_CFG] = &reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &reg_endp_init_seq,
+ [ENDP_STATUS] = &reg_endp_status,
+ [ENDP_FILTER_ROUTER_HSH_CFG] = &reg_endp_filter_router_hsh_cfg,
+ [IPA_IRQ_STTS] = &reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &reg_irq_suspend_clr,
+};
+
+const struct regs ipa_regs_v4_9 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
+};
diff --git a/drivers/net/ipa/reg/ipa_reg-v5.0.c b/drivers/net/ipa/reg/ipa_reg-v5.0.c
new file mode 100644
index 0000000000..95e0edff41
--- /dev/null
+++ b/drivers/net/ipa/reg/ipa_reg-v5.0.c
@@ -0,0 +1,564 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2023 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../ipa.h"
+#include "../ipa_reg.h"
+
+static const u32 reg_flavor_0_fmask[] = {
+ [MAX_PIPES] = GENMASK(7, 0),
+ [MAX_CONS_PIPES] = GENMASK(15, 8),
+ [MAX_PROD_PIPES] = GENMASK(23, 16),
+ [PROD_LOWEST] = GENMASK(31, 24),
+};
+
+REG_FIELDS(FLAVOR_0, flavor_0, 0x00000000);
+
+static const u32 reg_comp_cfg_fmask[] = {
+ [RAM_ARB_PRI_CLIENT_SAMP_FIX_DIS] = BIT(0),
+ [GSI_SNOC_BYPASS_DIS] = BIT(1),
+ [GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
+ [GEN_QMB_1_SNOC_BYPASS_DIS] = BIT(3),
+ /* Bit 4 reserved */
+ [IPA_QMB_SELECT_CONS_EN] = BIT(5),
+ [IPA_QMB_SELECT_PROD_EN] = BIT(6),
+ [GSI_MULTI_INORDER_RD_DIS] = BIT(7),
+ [GSI_MULTI_INORDER_WR_DIS] = BIT(8),
+ [GEN_QMB_0_MULTI_INORDER_RD_DIS] = BIT(9),
+ [GEN_QMB_1_MULTI_INORDER_RD_DIS] = BIT(10),
+ [GEN_QMB_0_MULTI_INORDER_WR_DIS] = BIT(11),
+ [GEN_QMB_1_MULTI_INORDER_WR_DIS] = BIT(12),
+ [GEN_QMB_0_SNOC_CNOC_LOOP_PROT_DIS] = BIT(13),
+ [GSI_SNOC_CNOC_LOOP_PROT_DISABLE] = BIT(14),
+ [GSI_MULTI_AXI_MASTERS_DIS] = BIT(15),
+ [IPA_QMB_SELECT_GLOBAL_EN] = BIT(16),
+ [FULL_FLUSH_WAIT_RS_CLOSURE_EN] = BIT(17),
+ /* Bit 18 reserved */
+ [QMB_RAM_RD_CACHE_DISABLE] = BIT(19),
+ [GENQMB_AOOOWR] = BIT(20),
+ [IF_OUT_OF_BUF_STOP_RESET_MASK_EN] = BIT(21),
+ [ATOMIC_FETCHER_ARB_LOCK_DIS] = GENMASK(27, 22),
+ /* Bits 28-29 reserved */
+ [GEN_QMB_1_DYNAMIC_ASIZE] = BIT(30),
+ [GEN_QMB_0_DYNAMIC_ASIZE] = BIT(31),
+};
+
+REG_FIELDS(COMP_CFG, comp_cfg, 0x0000002c);
+
+static const u32 reg_clkon_cfg_fmask[] = {
+ [CLKON_RX] = BIT(0),
+ [CLKON_PROC] = BIT(1),
+ [TX_WRAPPER] = BIT(2),
+ [CLKON_MISC] = BIT(3),
+ [RAM_ARB] = BIT(4),
+ [FTCH_HPS] = BIT(5),
+ [FTCH_DPS] = BIT(6),
+ [CLKON_HPS] = BIT(7),
+ [CLKON_DPS] = BIT(8),
+ [RX_HPS_CMDQS] = BIT(9),
+ [HPS_DPS_CMDQS] = BIT(10),
+ [DPS_TX_CMDQS] = BIT(11),
+ [RSRC_MNGR] = BIT(12),
+ [CTX_HANDLER] = BIT(13),
+ [ACK_MNGR] = BIT(14),
+ [D_DCPH] = BIT(15),
+ [H_DCPH] = BIT(16),
+ /* Bit 17 reserved */
+ [NTF_TX_CMDQS] = BIT(18),
+ [CLKON_TX_0] = BIT(19),
+ [CLKON_TX_1] = BIT(20),
+ [CLKON_FNR] = BIT(21),
+ [QSB2AXI_CMDQ_L] = BIT(22),
+ [AGGR_WRAPPER] = BIT(23),
+ [RAM_SLAVEWAY] = BIT(24),
+ [CLKON_QMB] = BIT(25),
+ [WEIGHT_ARB] = BIT(26),
+ [GSI_IF] = BIT(27),
+ [CLKON_GLOBAL] = BIT(28),
+ [GLOBAL_2X_CLK] = BIT(29),
+ [DPL_FIFO] = BIT(30),
+ [DRBIP] = BIT(31),
+};
+
+REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000034);
+
+static const u32 reg_route_fmask[] = {
+ [ROUTE_DEF_PIPE] = GENMASK(7, 0),
+ [ROUTE_FRAG_DEF_PIPE] = GENMASK(15, 8),
+ [ROUTE_DEF_HDR_OFST] = GENMASK(25, 16),
+ [ROUTE_DEF_HDR_TABLE] = BIT(26),
+ [ROUTE_DEF_RETAIN_HDR] = BIT(27),
+ [ROUTE_DIS] = BIT(28),
+ /* Bits 29-31 reserved */
+};
+
+REG_FIELDS(ROUTE, route, 0x00000038);
+
+static const u32 reg_shared_mem_size_fmask[] = {
+ [MEM_SIZE] = GENMASK(15, 0),
+ [MEM_BADDR] = GENMASK(31, 16),
+};
+
+REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000040);
+
+static const u32 reg_qsb_max_writes_fmask[] = {
+ [GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
+ /* Bits 8-31 reserved */
+};
+
+REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000054);
+
+static const u32 reg_qsb_max_reads_fmask[] = {
+ [GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
+ /* Bits 8-15 reserved */
+ [GEN_QMB_0_MAX_READS_BEATS] = GENMASK(23, 16),
+ [GEN_QMB_1_MAX_READS_BEATS] = GENMASK(31, 24),
+};
+
+REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000058);
+
+/* Valid bits defined by ipa->available */
+
+REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x00000100, 0x0004);
+
+static const u32 reg_filt_rout_cache_flush_fmask[] = {
+ [ROUTER_CACHE] = BIT(0),
+ /* Bits 1-3 reserved */
+ [FILTER_CACHE] = BIT(4),
+ /* Bits 5-31 reserved */
+};
+
+REG_FIELDS(FILT_ROUT_CACHE_FLUSH, filt_rout_cache_flush, 0x0000404);
+
+static const u32 reg_local_pkt_proc_cntxt_fmask[] = {
+ [IPA_BASE_ADDR] = GENMASK(17, 0),
+ /* Bits 18-31 reserved */
+};
+
+/* Offset must be a multiple of 8 */
+REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x00000478);
+
+static const u32 reg_ipa_tx_cfg_fmask[] = {
+ /* Bits 0-1 reserved */
+ [PREFETCH_ALMOST_EMPTY_SIZE_TX0] = GENMASK(5, 2),
+ [DMAW_SCND_OUTSD_PRED_THRESHOLD] = GENMASK(9, 6),
+ [DMAW_SCND_OUTSD_PRED_EN] = BIT(10),
+ [DMAW_MAX_BEATS_256_DIS] = BIT(11),
+ [PA_MASK_EN] = BIT(12),
+ [PREFETCH_ALMOST_EMPTY_SIZE_TX1] = GENMASK(16, 13),
+ [DUAL_TX_ENABLE] = BIT(17),
+ [SSPND_PA_NO_START_STATE] = BIT(18),
+ /* Bit 19 reserved */
+ [HOLB_STICKY_DROP_EN] = BIT(20),
+ /* Bits 21-31 reserved */
+};
+
+REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x00000488);
+
+static const u32 reg_idle_indication_cfg_fmask[] = {
+ [ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
+ [CONST_NON_IDLE_ENABLE] = BIT(16),
+ /* Bits 17-31 reserved */
+};
+
+REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x000004a8);
+
+static const u32 reg_qtime_timestamp_cfg_fmask[] = {
+ [DPL_TIMESTAMP_LSB] = GENMASK(4, 0),
+ /* Bits 5-6 reserved */
+ [DPL_TIMESTAMP_SEL] = BIT(7),
+ [TAG_TIMESTAMP_LSB] = GENMASK(12, 8),
+ /* Bits 13-15 reserved */
+ [NAT_TIMESTAMP_LSB] = GENMASK(20, 16),
+ /* Bits 21-31 reserved */
+};
+
+REG_FIELDS(QTIME_TIMESTAMP_CFG, qtime_timestamp_cfg, 0x000004ac);
+
+static const u32 reg_timers_xo_clk_div_cfg_fmask[] = {
+ [DIV_VALUE] = GENMASK(8, 0),
+ /* Bits 9-30 reserved */
+ [DIV_ENABLE] = BIT(31),
+};
+
+REG_FIELDS(TIMERS_XO_CLK_DIV_CFG, timers_xo_clk_div_cfg, 0x000004b0);
+
+static const u32 reg_timers_pulse_gran_cfg_fmask[] = {
+ [PULSE_GRAN_0] = GENMASK(2, 0),
+ [PULSE_GRAN_1] = GENMASK(5, 3),
+ [PULSE_GRAN_2] = GENMASK(8, 6),
+ [PULSE_GRAN_3] = GENMASK(11, 9),
+ /* Bits 12-31 reserved */
+};
+
+REG_FIELDS(TIMERS_PULSE_GRAN_CFG, timers_pulse_gran_cfg, 0x000004b4);
+
+static const u32 reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
+
+static const u32 reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
+
+static const u32 reg_src_rsrc_grp_45_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_45_RSRC_TYPE, src_rsrc_grp_45_rsrc_type,
+ 0x00000508, 0x0020);
+
+static const u32 reg_src_rsrc_grp_67_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_67_RSRC_TYPE, src_rsrc_grp_67_rsrc_type,
+ 0x0000050c, 0x0020);
+
+static const u32 reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000600, 0x0020);
+
+static const u32 reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000604, 0x0020);
+
+static const u32 reg_dst_rsrc_grp_45_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+REG_STRIDE_FIELDS(DST_RSRC_GRP_45_RSRC_TYPE, dst_rsrc_grp_45_rsrc_type,
+ 0x00000608, 0x0020);
+
+static const u32 reg_dst_rsrc_grp_67_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+REG_STRIDE_FIELDS(DST_RSRC_GRP_67_RSRC_TYPE, dst_rsrc_grp_67_rsrc_type,
+ 0x0000060c, 0x0020);
+
+/* Valid bits defined by ipa->available */
+
+REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000006b0, 0x0004);
+
+static const u32 reg_endp_init_cfg_fmask[] = {
+ [FRAG_OFFLOAD_EN] = BIT(0),
+ [CS_OFFLOAD_EN] = GENMASK(2, 1),
+ [CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
+ /* Bit 7 reserved */
+ [CS_GEN_QMB_MASTER_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00001008, 0x0080);
+
+static const u32 reg_endp_init_nat_fmask[] = {
+ [NAT_EN] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000100c, 0x0080);
+
+static const u32 reg_endp_init_hdr_fmask[] = {
+ [HDR_LEN] = GENMASK(5, 0),
+ [HDR_OFST_METADATA_VALID] = BIT(6),
+ [HDR_OFST_METADATA] = GENMASK(12, 7),
+ [HDR_ADDITIONAL_CONST_LEN] = GENMASK(18, 13),
+ [HDR_OFST_PKT_SIZE_VALID] = BIT(19),
+ [HDR_OFST_PKT_SIZE] = GENMASK(25, 20),
+ /* Bit 26 reserved */
+ [HDR_LEN_INC_DEAGG_HDR] = BIT(27),
+ [HDR_LEN_MSB] = GENMASK(29, 28),
+ [HDR_OFST_METADATA_MSB] = GENMASK(31, 30),
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00001010, 0x0080);
+
+static const u32 reg_endp_init_hdr_ext_fmask[] = {
+ [HDR_ENDIANNESS] = BIT(0),
+ [HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
+ [HDR_TOTAL_LEN_OR_PAD] = BIT(2),
+ [HDR_PAYLOAD_LEN_INC_PADDING] = BIT(3),
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET] = GENMASK(9, 4),
+ [HDR_PAD_TO_ALIGNMENT] = GENMASK(13, 10),
+ /* Bits 14-15 reserved */
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB] = GENMASK(17, 16),
+ [HDR_OFST_PKT_SIZE_MSB] = GENMASK(19, 18),
+ [HDR_ADDITIONAL_CONST_LEN_MSB] = GENMASK(21, 20),
+ [HDR_BYTES_TO_REMOVE_VALID] = BIT(22),
+ /* Bit 23 reserved */
+ [HDR_BYTES_TO_REMOVE] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00001014, 0x0080);
+
+REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00001018, 0x0080);
+
+static const u32 reg_endp_init_mode_fmask[] = {
+ [ENDP_MODE] = GENMASK(2, 0),
+ [DCPH_ENABLE] = BIT(3),
+ [DEST_PIPE_INDEX] = GENMASK(11, 4),
+ [BYTE_THRESHOLD] = GENMASK(27, 12),
+ [PIPE_REPLICATION_EN] = BIT(28),
+ [PAD_EN] = BIT(29),
+ [DRBIP_ACL_ENABLE] = BIT(30),
+ /* Bit 31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00001020, 0x0080);
+
+static const u32 reg_endp_init_aggr_fmask[] = {
+ [AGGR_EN] = GENMASK(1, 0),
+ [AGGR_TYPE] = GENMASK(4, 2),
+ [BYTE_LIMIT] = GENMASK(10, 5),
+ /* Bit 11 reserved */
+ [TIME_LIMIT] = GENMASK(16, 12),
+ [PKT_LIMIT] = GENMASK(22, 17),
+ [SW_EOF_ACTIVE] = BIT(23),
+ [FORCE_CLOSE] = BIT(24),
+ /* Bit 25 reserved */
+ [HARD_BYTE_LIMIT_EN] = BIT(26),
+ [AGGR_GRAN_SEL] = BIT(27),
+ /* Bits 28-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00001024, 0x0080);
+
+static const u32 reg_endp_init_hol_block_en_fmask[] = {
+ [HOL_BLOCK_EN] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000102c, 0x0080);
+
+static const u32 reg_endp_init_hol_block_timer_fmask[] = {
+ [TIMER_LIMIT] = GENMASK(4, 0),
+ /* Bits 5-7 reserved */
+ [TIMER_GRAN_SEL] = GENMASK(9, 8),
+ /* Bits 10-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00001030, 0x0080);
+
+static const u32 reg_endp_init_deaggr_fmask[] = {
+ [DEAGGR_HDR_LEN] = GENMASK(5, 0),
+ [SYSPIPE_ERR_DETECTION] = BIT(6),
+ [PACKET_OFFSET_VALID] = BIT(7),
+ [PACKET_OFFSET_LOCATION] = GENMASK(13, 8),
+ [IGNORE_MIN_PKT_ERR] = BIT(14),
+ /* Bit 15 reserved */
+ [MAX_PACKET_LEN] = GENMASK(31, 16),
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00001034, 0x0080);
+
+static const u32 reg_endp_init_rsrc_grp_fmask[] = {
+ [ENDP_RSRC_GRP] = GENMASK(2, 0),
+ /* Bits 3-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp, 0x00001038, 0x0080);
+
+static const u32 reg_endp_init_seq_fmask[] = {
+ [SEQ_TYPE] = GENMASK(7, 0),
+ /* Bits 8-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000103c, 0x0080);
+
+static const u32 reg_endp_status_fmask[] = {
+ [STATUS_EN] = BIT(0),
+ [STATUS_ENDP] = GENMASK(8, 1),
+ [STATUS_PKT_SUPPRESS] = BIT(9),
+ /* Bits 10-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00001040, 0x0080);
+
+static const u32 reg_endp_filter_cache_cfg_fmask[] = {
+ [CACHE_MSK_SRC_ID] = BIT(0),
+ [CACHE_MSK_SRC_IP] = BIT(1),
+ [CACHE_MSK_DST_IP] = BIT(2),
+ [CACHE_MSK_SRC_PORT] = BIT(3),
+ [CACHE_MSK_DST_PORT] = BIT(4),
+ [CACHE_MSK_PROTOCOL] = BIT(5),
+ [CACHE_MSK_METADATA] = BIT(6),
+ /* Bits 7-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_FILTER_CACHE_CFG, endp_filter_cache_cfg,
+ 0x0000105c, 0x0080);
+
+static const u32 reg_endp_router_cache_cfg_fmask[] = {
+ [CACHE_MSK_SRC_ID] = BIT(0),
+ [CACHE_MSK_SRC_IP] = BIT(1),
+ [CACHE_MSK_DST_IP] = BIT(2),
+ [CACHE_MSK_SRC_PORT] = BIT(3),
+ [CACHE_MSK_DST_PORT] = BIT(4),
+ [CACHE_MSK_PROTOCOL] = BIT(5),
+ [CACHE_MSK_METADATA] = BIT(6),
+ /* Bits 7-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_ROUTER_CACHE_CFG, endp_router_cache_cfg,
+ 0x00001070, 0x0080);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+REG(IPA_IRQ_STTS, ipa_irq_stts, 0x0000c008 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+REG(IPA_IRQ_EN, ipa_irq_en, 0x0000c00c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+REG(IPA_IRQ_CLR, ipa_irq_clr, 0x0000c010 + 0x1000 * GSI_EE_AP);
+
+static const u32 reg_ipa_irq_uc_fmask[] = {
+ [UC_INTR] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000c01c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+
+REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
+ 0x0000c030 + 0x1000 * GSI_EE_AP, 0x0004);
+
+/* Valid bits defined by ipa->available */
+
+REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
+ 0x0000c050 + 0x1000 * GSI_EE_AP, 0x0004);
+
+/* Valid bits defined by ipa->available */
+
+REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
+ 0x0000c070 + 0x1000 * GSI_EE_AP, 0x0004);
+
+static const struct reg *reg_array[] = {
+ [COMP_CFG] = &reg_comp_cfg,
+ [CLKON_CFG] = &reg_clkon_cfg,
+ [ROUTE] = &reg_route,
+ [SHARED_MEM_SIZE] = &reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &reg_qsb_max_writes,
+ [QSB_MAX_READS] = &reg_qsb_max_reads,
+ [FILT_ROUT_CACHE_FLUSH] = &reg_filt_rout_cache_flush,
+ [STATE_AGGR_ACTIVE] = &reg_state_aggr_active,
+ [LOCAL_PKT_PROC_CNTXT] = &reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &reg_aggr_force_close,
+ [IPA_TX_CFG] = &reg_ipa_tx_cfg,
+ [FLAVOR_0] = &reg_flavor_0,
+ [IDLE_INDICATION_CFG] = &reg_idle_indication_cfg,
+ [QTIME_TIMESTAMP_CFG] = &reg_qtime_timestamp_cfg,
+ [TIMERS_XO_CLK_DIV_CFG] = &reg_timers_xo_clk_div_cfg,
+ [TIMERS_PULSE_GRAN_CFG] = &reg_timers_pulse_gran_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &reg_src_rsrc_grp_23_rsrc_type,
+ [SRC_RSRC_GRP_45_RSRC_TYPE] = &reg_src_rsrc_grp_45_rsrc_type,
+ [SRC_RSRC_GRP_67_RSRC_TYPE] = &reg_src_rsrc_grp_67_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &reg_dst_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_45_RSRC_TYPE] = &reg_dst_rsrc_grp_45_rsrc_type,
+ [DST_RSRC_GRP_67_RSRC_TYPE] = &reg_dst_rsrc_grp_67_rsrc_type,
+ [ENDP_INIT_CFG] = &reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &reg_endp_init_seq,
+ [ENDP_STATUS] = &reg_endp_status,
+ [ENDP_FILTER_CACHE_CFG] = &reg_endp_filter_cache_cfg,
+ [ENDP_ROUTER_CACHE_CFG] = &reg_endp_router_cache_cfg,
+ [IPA_IRQ_STTS] = &reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &reg_irq_suspend_clr,
+};
+
+const struct regs ipa_regs_v5_0 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
+};