summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/huawei
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/net/ethernet/huawei
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/ethernet/huawei')
-rw-r--r--drivers/net/ethernet/huawei/Kconfig20
-rw-r--r--drivers/net/ethernet/huawei/Makefile6
-rw-r--r--drivers/net/ethernet/huawei/hinic/Kconfig14
-rw-r--r--drivers/net/ethernet/huawei/hinic/Makefile8
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_common.c71
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_common.h29
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_debugfs.c324
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_debugfs.h113
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_dev.h132
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_devlink.c584
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_devlink.h119
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_ethtool.c1861
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c987
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h203
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c963
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h185
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h139
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c1197
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h660
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c996
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h262
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_if.c419
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_if.h295
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_io.c634
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_io.h123
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c1515
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h172
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c681
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h181
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c971
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h232
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h210
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c904
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h111
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h430
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c1506
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.c1412
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.h900
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.c631
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.h51
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_sriov.c1364
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_sriov.h107
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c934
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.h54
44 files changed, 22710 insertions, 0 deletions
diff --git a/drivers/net/ethernet/huawei/Kconfig b/drivers/net/ethernet/huawei/Kconfig
new file mode 100644
index 0000000000..c05fce15eb
--- /dev/null
+++ b/drivers/net/ethernet/huawei/Kconfig
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Huawei driver configuration
+#
+
+config NET_VENDOR_HUAWEI
+ bool "Huawei devices"
+ default y
+ help
+ If you have a network (Ethernet) card belonging to this class, say Y.
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Huawei cards. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_HUAWEI
+
+source "drivers/net/ethernet/huawei/hinic/Kconfig"
+
+endif # NET_VENDOR_HUAWEI
diff --git a/drivers/net/ethernet/huawei/Makefile b/drivers/net/ethernet/huawei/Makefile
new file mode 100644
index 0000000000..2549ad5afe
--- /dev/null
+++ b/drivers/net/ethernet/huawei/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the Huawei device drivers.
+#
+
+obj-$(CONFIG_HINIC) += hinic/
diff --git a/drivers/net/ethernet/huawei/hinic/Kconfig b/drivers/net/ethernet/huawei/hinic/Kconfig
new file mode 100644
index 0000000000..b47bd5440c
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Huawei driver configuration
+#
+
+config HINIC
+ tristate "Huawei Intelligent PCIE Network Interface Card"
+ depends on (PCI_MSI && (X86 || ARM64))
+ select NET_DEVLINK
+ help
+ This driver supports HiNIC PCIE Ethernet cards.
+ To compile this driver as part of the kernel, choose Y here.
+ If unsure, choose N.
+ The default is compiled as module.
diff --git a/drivers/net/ethernet/huawei/hinic/Makefile b/drivers/net/ethernet/huawei/hinic/Makefile
new file mode 100644
index 0000000000..2f89119c9b
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_HINIC) += hinic.o
+
+hinic-y := hinic_main.o hinic_tx.o hinic_rx.o hinic_port.o hinic_hw_dev.o \
+ hinic_hw_io.o hinic_hw_qp.o hinic_hw_cmdq.o hinic_hw_wq.o \
+ hinic_hw_mgmt.o hinic_hw_api_cmd.o hinic_hw_eqs.o hinic_hw_if.o \
+ hinic_common.o hinic_ethtool.o hinic_devlink.o hinic_hw_mbox.o \
+ hinic_sriov.o hinic_debugfs.o
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_common.c b/drivers/net/ethernet/huawei/hinic/hinic_common.c
new file mode 100644
index 0000000000..8e9b4a6c88
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_common.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
+
+#include "hinic_common.h"
+
+/**
+ * hinic_cpu_to_be32 - convert data to big endian 32 bit format
+ * @data: the data to convert
+ * @len: length of data to convert
+ **/
+void hinic_cpu_to_be32(void *data, int len)
+{
+ u32 *mem = data;
+ int i;
+
+ len = len / sizeof(u32);
+
+ for (i = 0; i < len; i++) {
+ *mem = cpu_to_be32(*mem);
+ mem++;
+ }
+}
+
+/**
+ * hinic_be32_to_cpu - convert data from big endian 32 bit format
+ * @data: the data to convert
+ * @len: length of data to convert
+ **/
+void hinic_be32_to_cpu(void *data, int len)
+{
+ u32 *mem = data;
+ int i;
+
+ len = len / sizeof(u32);
+
+ for (i = 0; i < len; i++) {
+ *mem = be32_to_cpu(*mem);
+ mem++;
+ }
+}
+
+/**
+ * hinic_set_sge - set dma area in scatter gather entry
+ * @sge: scatter gather entry
+ * @addr: dma address
+ * @len: length of relevant data in the dma address
+ **/
+void hinic_set_sge(struct hinic_sge *sge, dma_addr_t addr, int len)
+{
+ sge->hi_addr = upper_32_bits(addr);
+ sge->lo_addr = lower_32_bits(addr);
+ sge->len = len;
+}
+
+/**
+ * hinic_sge_to_dma - get dma address from scatter gather entry
+ * @sge: scatter gather entry
+ *
+ * Return dma address of sg entry
+ **/
+dma_addr_t hinic_sge_to_dma(struct hinic_sge *sge)
+{
+ return (dma_addr_t)((((u64)sge->hi_addr) << 32) | sge->lo_addr);
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_common.h b/drivers/net/ethernet/huawei/hinic/hinic_common.h
new file mode 100644
index 0000000000..a0de9d9644
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_common.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef HINIC_COMMON_H
+#define HINIC_COMMON_H
+
+#include <linux/types.h>
+
+#define UPPER_8_BITS(data) (((data) >> 8) & 0xFF)
+#define LOWER_8_BITS(data) ((data) & 0xFF)
+
+struct hinic_sge {
+ u32 hi_addr;
+ u32 lo_addr;
+ u32 len;
+};
+
+void hinic_cpu_to_be32(void *data, int len);
+
+void hinic_be32_to_cpu(void *data, int len);
+
+void hinic_set_sge(struct hinic_sge *sge, dma_addr_t addr, int len);
+
+dma_addr_t hinic_sge_to_dma(struct hinic_sge *sge);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_debugfs.c b/drivers/net/ethernet/huawei/hinic/hinic_debugfs.c
new file mode 100644
index 0000000000..061952c6c2
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_debugfs.c
@@ -0,0 +1,324 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+
+#include "hinic_debugfs.h"
+
+static struct dentry *hinic_dbgfs_root;
+
+enum sq_dbg_info {
+ GLB_SQ_ID,
+ SQ_PI,
+ SQ_CI,
+ SQ_FI,
+ SQ_MSIX_ENTRY,
+};
+
+static char *sq_fields[] = {"glb_sq_id", "sq_pi", "sq_ci", "sq_fi", "sq_msix_entry"};
+
+static u64 hinic_dbg_get_sq_info(struct hinic_dev *nic_dev, struct hinic_sq *sq, int idx)
+{
+ struct hinic_wq *wq = sq->wq;
+
+ switch (idx) {
+ case GLB_SQ_ID:
+ return nic_dev->hwdev->func_to_io.global_qpn + sq->qid;
+ case SQ_PI:
+ return atomic_read(&wq->prod_idx) & wq->mask;
+ case SQ_CI:
+ return atomic_read(&wq->cons_idx) & wq->mask;
+ case SQ_FI:
+ return be16_to_cpu(*(__be16 *)(sq->hw_ci_addr)) & wq->mask;
+ case SQ_MSIX_ENTRY:
+ return sq->msix_entry;
+ }
+
+ return 0;
+}
+
+enum rq_dbg_info {
+ GLB_RQ_ID,
+ RQ_HW_PI,
+ RQ_SW_CI,
+ RQ_SW_PI,
+ RQ_MSIX_ENTRY,
+};
+
+static char *rq_fields[] = {"glb_rq_id", "rq_hw_pi", "rq_sw_ci", "rq_sw_pi", "rq_msix_entry"};
+
+static u64 hinic_dbg_get_rq_info(struct hinic_dev *nic_dev, struct hinic_rq *rq, int idx)
+{
+ struct hinic_wq *wq = rq->wq;
+
+ switch (idx) {
+ case GLB_RQ_ID:
+ return nic_dev->hwdev->func_to_io.global_qpn + rq->qid;
+ case RQ_HW_PI:
+ return be16_to_cpu(*(__be16 *)(rq->pi_virt_addr)) & wq->mask;
+ case RQ_SW_CI:
+ return atomic_read(&wq->cons_idx) & wq->mask;
+ case RQ_SW_PI:
+ return atomic_read(&wq->prod_idx) & wq->mask;
+ case RQ_MSIX_ENTRY:
+ return rq->msix_entry;
+ }
+
+ return 0;
+}
+
+enum func_tbl_info {
+ VALID,
+ RX_MODE,
+ MTU,
+ RQ_DEPTH,
+ QUEUE_NUM,
+};
+
+static char *func_table_fields[] = {"valid", "rx_mode", "mtu", "rq_depth", "cfg_q_num"};
+
+static int hinic_dbg_get_func_table(struct hinic_dev *nic_dev, int idx)
+{
+ struct tag_sml_funcfg_tbl *funcfg_table_elem;
+ struct hinic_cmd_lt_rd *read_data;
+ u16 out_size = sizeof(*read_data);
+ int ret = ~0;
+ int err;
+
+ read_data = kzalloc(sizeof(*read_data), GFP_KERNEL);
+ if (!read_data)
+ return ~0;
+
+ read_data->node = TBL_ID_FUNC_CFG_SM_NODE;
+ read_data->inst = TBL_ID_FUNC_CFG_SM_INST;
+ read_data->entry_size = HINIC_FUNCTION_CONFIGURE_TABLE_SIZE;
+ read_data->lt_index = HINIC_HWIF_FUNC_IDX(nic_dev->hwdev->hwif);
+ read_data->len = HINIC_FUNCTION_CONFIGURE_TABLE_SIZE;
+
+ err = hinic_port_msg_cmd(nic_dev->hwdev, HINIC_PORT_CMD_RD_LINE_TBL, read_data,
+ sizeof(*read_data), read_data, &out_size);
+ if (err || out_size != sizeof(*read_data) || read_data->status) {
+ netif_err(nic_dev, drv, nic_dev->netdev,
+ "Failed to get func table, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, read_data->status, out_size);
+ kfree(read_data);
+ return ~0;
+ }
+
+ funcfg_table_elem = (struct tag_sml_funcfg_tbl *)read_data->data;
+
+ switch (idx) {
+ case VALID:
+ ret = funcfg_table_elem->dw0.bs.valid;
+ break;
+ case RX_MODE:
+ ret = funcfg_table_elem->dw0.bs.nic_rx_mode;
+ break;
+ case MTU:
+ ret = funcfg_table_elem->dw1.bs.mtu;
+ break;
+ case RQ_DEPTH:
+ ret = funcfg_table_elem->dw13.bs.cfg_rq_depth;
+ break;
+ case QUEUE_NUM:
+ ret = funcfg_table_elem->dw13.bs.cfg_q_num;
+ break;
+ }
+
+ kfree(read_data);
+
+ return ret;
+}
+
+static ssize_t hinic_dbg_cmd_read(struct file *filp, char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ struct hinic_debug_priv *dbg;
+ char ret_buf[20];
+ int *desc;
+ u64 out;
+ int ret;
+
+ desc = filp->private_data;
+ dbg = container_of(desc, struct hinic_debug_priv, field_id[*desc]);
+
+ switch (dbg->type) {
+ case HINIC_DBG_SQ_INFO:
+ out = hinic_dbg_get_sq_info(dbg->dev, dbg->object, *desc);
+ break;
+
+ case HINIC_DBG_RQ_INFO:
+ out = hinic_dbg_get_rq_info(dbg->dev, dbg->object, *desc);
+ break;
+
+ case HINIC_DBG_FUNC_TABLE:
+ out = hinic_dbg_get_func_table(dbg->dev, *desc);
+ break;
+
+ default:
+ netif_warn(dbg->dev, drv, dbg->dev->netdev, "Invalid hinic debug cmd: %d\n",
+ dbg->type);
+ return -EINVAL;
+ }
+
+ ret = snprintf(ret_buf, sizeof(ret_buf), "0x%llx\n", out);
+
+ return simple_read_from_buffer(buffer, count, ppos, ret_buf, ret);
+}
+
+static const struct file_operations hinic_dbg_cmd_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = hinic_dbg_cmd_read,
+};
+
+static int create_dbg_files(struct hinic_dev *dev, enum hinic_dbg_type type, void *data,
+ struct dentry *root, struct hinic_debug_priv **dbg, char **field,
+ int nfile)
+{
+ struct hinic_debug_priv *tmp;
+ int i;
+
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ tmp->dev = dev;
+ tmp->object = data;
+ tmp->type = type;
+ tmp->root = root;
+
+ for (i = 0; i < nfile; i++) {
+ tmp->field_id[i] = i;
+ debugfs_create_file(field[i], 0400, root, &tmp->field_id[i], &hinic_dbg_cmd_fops);
+ }
+
+ *dbg = tmp;
+
+ return 0;
+}
+
+static void rem_dbg_files(struct hinic_debug_priv *dbg)
+{
+ if (dbg->type != HINIC_DBG_FUNC_TABLE)
+ debugfs_remove_recursive(dbg->root);
+
+ kfree(dbg);
+}
+
+int hinic_sq_debug_add(struct hinic_dev *dev, u16 sq_id)
+{
+ struct hinic_sq *sq;
+ struct dentry *root;
+ char sub_dir[16];
+
+ sq = dev->txqs[sq_id].sq;
+
+ sprintf(sub_dir, "0x%x", sq_id);
+
+ root = debugfs_create_dir(sub_dir, dev->sq_dbgfs);
+
+ return create_dbg_files(dev, HINIC_DBG_SQ_INFO, sq, root, &sq->dbg, sq_fields,
+ ARRAY_SIZE(sq_fields));
+}
+
+void hinic_sq_debug_rem(struct hinic_sq *sq)
+{
+ if (sq->dbg)
+ rem_dbg_files(sq->dbg);
+}
+
+int hinic_rq_debug_add(struct hinic_dev *dev, u16 rq_id)
+{
+ struct hinic_rq *rq;
+ struct dentry *root;
+ char sub_dir[16];
+
+ rq = dev->rxqs[rq_id].rq;
+
+ sprintf(sub_dir, "0x%x", rq_id);
+
+ root = debugfs_create_dir(sub_dir, dev->rq_dbgfs);
+
+ return create_dbg_files(dev, HINIC_DBG_RQ_INFO, rq, root, &rq->dbg, rq_fields,
+ ARRAY_SIZE(rq_fields));
+}
+
+void hinic_rq_debug_rem(struct hinic_rq *rq)
+{
+ if (rq->dbg)
+ rem_dbg_files(rq->dbg);
+}
+
+int hinic_func_table_debug_add(struct hinic_dev *dev)
+{
+ if (HINIC_IS_VF(dev->hwdev->hwif))
+ return 0;
+
+ return create_dbg_files(dev, HINIC_DBG_FUNC_TABLE, dev, dev->func_tbl_dbgfs, &dev->dbg,
+ func_table_fields, ARRAY_SIZE(func_table_fields));
+}
+
+void hinic_func_table_debug_rem(struct hinic_dev *dev)
+{
+ if (!HINIC_IS_VF(dev->hwdev->hwif) && dev->dbg)
+ rem_dbg_files(dev->dbg);
+}
+
+void hinic_sq_dbgfs_init(struct hinic_dev *nic_dev)
+{
+ nic_dev->sq_dbgfs = debugfs_create_dir("SQs", nic_dev->dbgfs_root);
+}
+
+void hinic_sq_dbgfs_uninit(struct hinic_dev *nic_dev)
+{
+ debugfs_remove_recursive(nic_dev->sq_dbgfs);
+}
+
+void hinic_rq_dbgfs_init(struct hinic_dev *nic_dev)
+{
+ nic_dev->rq_dbgfs = debugfs_create_dir("RQs", nic_dev->dbgfs_root);
+}
+
+void hinic_rq_dbgfs_uninit(struct hinic_dev *nic_dev)
+{
+ debugfs_remove_recursive(nic_dev->rq_dbgfs);
+}
+
+void hinic_func_tbl_dbgfs_init(struct hinic_dev *nic_dev)
+{
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
+ nic_dev->func_tbl_dbgfs = debugfs_create_dir("func_table", nic_dev->dbgfs_root);
+}
+
+void hinic_func_tbl_dbgfs_uninit(struct hinic_dev *nic_dev)
+{
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
+ debugfs_remove_recursive(nic_dev->func_tbl_dbgfs);
+}
+
+void hinic_dbg_init(struct hinic_dev *nic_dev)
+{
+ nic_dev->dbgfs_root = debugfs_create_dir(pci_name(nic_dev->hwdev->hwif->pdev),
+ hinic_dbgfs_root);
+}
+
+void hinic_dbg_uninit(struct hinic_dev *nic_dev)
+{
+ debugfs_remove_recursive(nic_dev->dbgfs_root);
+ nic_dev->dbgfs_root = NULL;
+}
+
+void hinic_dbg_register_debugfs(const char *debugfs_dir_name)
+{
+ hinic_dbgfs_root = debugfs_create_dir(debugfs_dir_name, NULL);
+}
+
+void hinic_dbg_unregister_debugfs(void)
+{
+ debugfs_remove_recursive(hinic_dbgfs_root);
+ hinic_dbgfs_root = NULL;
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_debugfs.h b/drivers/net/ethernet/huawei/hinic/hinic_debugfs.h
new file mode 100644
index 0000000000..e10f739d83
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_debugfs.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef HINIC_DEBUGFS_H
+#define HINIC_DEBUGFS_H
+
+#include "hinic_dev.h"
+
+#define TBL_ID_FUNC_CFG_SM_NODE 11
+#define TBL_ID_FUNC_CFG_SM_INST 1
+
+#define HINIC_FUNCTION_CONFIGURE_TABLE_SIZE 64
+
+struct hinic_cmd_lt_rd {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ unsigned char node;
+ unsigned char inst;
+ unsigned char entry_size;
+ unsigned char rsvd;
+ unsigned int lt_index;
+ unsigned int offset;
+ unsigned int len;
+ unsigned char data[100];
+};
+
+struct tag_sml_funcfg_tbl {
+ union {
+ struct {
+ u32 rsvd0 :8;
+ u32 nic_rx_mode :5;
+ u32 rsvd1 :18;
+ u32 valid :1;
+ } bs;
+
+ u32 value;
+ } dw0;
+
+ union {
+ struct {
+ u32 vlan_id :12;
+ u32 vlan_mode :3;
+ u32 fast_recycled_mode :1;
+ u32 mtu :16;
+ } bs;
+
+ u32 value;
+ } dw1;
+
+ u32 dw2;
+ u32 dw3;
+ u32 dw4;
+ u32 dw5;
+ u32 dw6;
+ u32 dw7;
+ u32 dw8;
+ u32 dw9;
+ u32 dw10;
+ u32 dw11;
+ u32 dw12;
+
+ union {
+ struct {
+ u32 rsvd2 :15;
+ u32 cfg_q_num :9;
+ u32 cfg_rq_depth :6;
+ u32 vhd_type :2;
+ } bs;
+
+ u32 value;
+ } dw13;
+
+ u32 dw14;
+ u32 dw15;
+};
+
+int hinic_sq_debug_add(struct hinic_dev *dev, u16 sq_id);
+
+void hinic_sq_debug_rem(struct hinic_sq *sq);
+
+int hinic_rq_debug_add(struct hinic_dev *dev, u16 rq_id);
+
+void hinic_rq_debug_rem(struct hinic_rq *rq);
+
+int hinic_func_table_debug_add(struct hinic_dev *dev);
+
+void hinic_func_table_debug_rem(struct hinic_dev *dev);
+
+void hinic_sq_dbgfs_init(struct hinic_dev *nic_dev);
+
+void hinic_sq_dbgfs_uninit(struct hinic_dev *nic_dev);
+
+void hinic_rq_dbgfs_init(struct hinic_dev *nic_dev);
+
+void hinic_rq_dbgfs_uninit(struct hinic_dev *nic_dev);
+
+void hinic_func_tbl_dbgfs_init(struct hinic_dev *nic_dev);
+
+void hinic_func_tbl_dbgfs_uninit(struct hinic_dev *nic_dev);
+
+void hinic_dbg_init(struct hinic_dev *nic_dev);
+
+void hinic_dbg_uninit(struct hinic_dev *nic_dev);
+
+void hinic_dbg_register_debugfs(const char *debugfs_dir_name);
+
+void hinic_dbg_unregister_debugfs(void);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_dev.h
new file mode 100644
index 0000000000..52ea97c818
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_dev.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef HINIC_DEV_H
+#define HINIC_DEV_H
+
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <linux/semaphore.h>
+#include <linux/workqueue.h>
+#include <linux/bitops.h>
+
+#include "hinic_hw_dev.h"
+#include "hinic_tx.h"
+#include "hinic_rx.h"
+#include "hinic_sriov.h"
+
+#define HINIC_DRV_NAME "hinic"
+
+#define LP_PKT_CNT 64
+
+#define HINIC_MAX_JUMBO_FRAME_SIZE 15872
+#define HINIC_MAX_MTU_SIZE (HINIC_MAX_JUMBO_FRAME_SIZE - ETH_HLEN - ETH_FCS_LEN)
+#define HINIC_MIN_MTU_SIZE 256
+
+enum hinic_flags {
+ HINIC_LINK_UP = BIT(0),
+ HINIC_INTF_UP = BIT(1),
+ HINIC_RSS_ENABLE = BIT(2),
+ HINIC_LINK_DOWN = BIT(3),
+ HINIC_LP_TEST = BIT(4),
+};
+
+struct hinic_rx_mode_work {
+ struct work_struct work;
+ u32 rx_mode;
+};
+
+struct hinic_rss_type {
+ u8 tcp_ipv6_ext;
+ u8 ipv6_ext;
+ u8 tcp_ipv6;
+ u8 ipv6;
+ u8 tcp_ipv4;
+ u8 ipv4;
+ u8 udp_ipv6;
+ u8 udp_ipv4;
+};
+
+enum hinic_rss_hash_type {
+ HINIC_RSS_HASH_ENGINE_TYPE_XOR,
+ HINIC_RSS_HASH_ENGINE_TYPE_TOEP,
+ HINIC_RSS_HASH_ENGINE_TYPE_MAX,
+};
+
+struct hinic_intr_coal_info {
+ u8 pending_limt;
+ u8 coalesce_timer_cfg;
+ u8 resend_timer_cfg;
+};
+
+enum hinic_dbg_type {
+ HINIC_DBG_SQ_INFO,
+ HINIC_DBG_RQ_INFO,
+ HINIC_DBG_FUNC_TABLE,
+};
+
+struct hinic_debug_priv {
+ struct hinic_dev *dev;
+ void *object;
+ enum hinic_dbg_type type;
+ struct dentry *root;
+ int field_id[64];
+};
+
+struct hinic_dev {
+ struct net_device *netdev;
+ struct hinic_hwdev *hwdev;
+
+ u32 msg_enable;
+ unsigned int tx_weight;
+ unsigned int rx_weight;
+ u16 num_qps;
+ u16 max_qps;
+
+ unsigned int flags;
+
+ struct semaphore mgmt_lock;
+ unsigned long *vlan_bitmap;
+
+ struct hinic_rx_mode_work rx_mode_work;
+ struct workqueue_struct *workq;
+
+ struct hinic_txq *txqs;
+ struct hinic_rxq *rxqs;
+ u16 sq_depth;
+ u16 rq_depth;
+
+ u8 rss_tmpl_idx;
+ u8 rss_hash_engine;
+ u16 num_rss;
+ u16 rss_limit;
+ struct hinic_rss_type rss_type;
+ u8 *rss_hkey_user;
+ s32 *rss_indir_user;
+ struct hinic_intr_coal_info *rx_intr_coalesce;
+ struct hinic_intr_coal_info *tx_intr_coalesce;
+ struct hinic_sriov_info sriov_info;
+ int lb_test_rx_idx;
+ int lb_pkt_len;
+ u8 *lb_test_rx_buf;
+
+ struct dentry *dbgfs_root;
+ struct dentry *sq_dbgfs;
+ struct dentry *rq_dbgfs;
+ struct dentry *func_tbl_dbgfs;
+ struct hinic_debug_priv *dbg;
+ struct devlink *devlink;
+ bool cable_unplugged;
+ bool module_unrecognized;
+};
+
+struct hinic_devlink_priv {
+ struct hinic_hwdev *hwdev;
+ struct devlink_health_reporter *hw_fault_reporter;
+ struct devlink_health_reporter *fw_fault_reporter;
+};
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
new file mode 100644
index 0000000000..1749d26f4b
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
@@ -0,0 +1,584 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+#include <linux/netlink.h>
+#include <net/devlink.h>
+#include <linux/firmware.h>
+
+#include "hinic_port.h"
+#include "hinic_devlink.h"
+#include "hinic_hw_dev.h"
+
+static bool check_image_valid(struct hinic_devlink_priv *priv, const u8 *buf,
+ u32 image_size, struct host_image_st *host_image)
+{
+ struct fw_image_st *fw_image = NULL;
+ u32 len = 0;
+ u32 i;
+
+ fw_image = (struct fw_image_st *)buf;
+
+ if (fw_image->fw_magic != HINIC_MAGIC_NUM) {
+ dev_err(&priv->hwdev->hwif->pdev->dev, "Wrong fw_magic read from file, fw_magic: 0x%x\n",
+ fw_image->fw_magic);
+ return false;
+ }
+
+ if (fw_image->fw_info.fw_section_cnt > MAX_FW_TYPE_NUM) {
+ dev_err(&priv->hwdev->hwif->pdev->dev, "Wrong fw_type_num read from file, fw_type_num: 0x%x\n",
+ fw_image->fw_info.fw_section_cnt);
+ return false;
+ }
+
+ for (i = 0; i < fw_image->fw_info.fw_section_cnt; i++) {
+ len += fw_image->fw_section_info[i].fw_section_len;
+ host_image->image_section_info[i] = fw_image->fw_section_info[i];
+ }
+
+ if (len != fw_image->fw_len ||
+ (fw_image->fw_len + UPDATEFW_IMAGE_HEAD_SIZE) != image_size) {
+ dev_err(&priv->hwdev->hwif->pdev->dev, "Wrong data size read from file\n");
+ return false;
+ }
+
+ host_image->image_info.up_total_len = fw_image->fw_len;
+ host_image->image_info.fw_version = fw_image->fw_version;
+ host_image->section_type_num = fw_image->fw_info.fw_section_cnt;
+ host_image->device_id = fw_image->device_id;
+
+ return true;
+}
+
+static bool check_image_integrity(struct hinic_devlink_priv *priv,
+ struct host_image_st *host_image,
+ u32 update_type)
+{
+ u32 collect_section_type = 0;
+ u32 i, type;
+
+ for (i = 0; i < host_image->section_type_num; i++) {
+ type = host_image->image_section_info[i].fw_section_type;
+ if (collect_section_type & (1U << type)) {
+ dev_err(&priv->hwdev->hwif->pdev->dev, "Duplicate section type: %u\n",
+ type);
+ return false;
+ }
+ collect_section_type |= (1U << type);
+ }
+
+ if (update_type == FW_UPDATE_COLD &&
+ (((collect_section_type & _IMAGE_COLD_SUB_MODULES_MUST_IN) ==
+ _IMAGE_COLD_SUB_MODULES_MUST_IN) ||
+ collect_section_type == _IMAGE_CFG_SUB_MODULES_MUST_IN))
+ return true;
+
+ if (update_type == FW_UPDATE_HOT &&
+ (collect_section_type & _IMAGE_HOT_SUB_MODULES_MUST_IN) ==
+ _IMAGE_HOT_SUB_MODULES_MUST_IN)
+ return true;
+
+ if (update_type == FW_UPDATE_COLD)
+ dev_err(&priv->hwdev->hwif->pdev->dev, "Check file integrity failed, valid: 0x%x or 0x%lx, current: 0x%x\n",
+ _IMAGE_COLD_SUB_MODULES_MUST_IN,
+ _IMAGE_CFG_SUB_MODULES_MUST_IN, collect_section_type);
+ else
+ dev_err(&priv->hwdev->hwif->pdev->dev, "Check file integrity failed, valid:0x%x, current: 0x%x\n",
+ _IMAGE_HOT_SUB_MODULES_MUST_IN, collect_section_type);
+
+ return false;
+}
+
+static int check_image_device_type(struct hinic_devlink_priv *priv,
+ u32 image_device_type)
+{
+ struct hinic_comm_board_info board_info = {0};
+
+ if (hinic_get_board_info(priv->hwdev, &board_info)) {
+ dev_err(&priv->hwdev->hwif->pdev->dev, "Get board info failed\n");
+ return false;
+ }
+
+ if (image_device_type == board_info.info.board_type)
+ return true;
+
+ dev_err(&priv->hwdev->hwif->pdev->dev, "The device type of upgrade file doesn't match the device type of current firmware, please check the upgrade file\n");
+ dev_err(&priv->hwdev->hwif->pdev->dev, "The image device type: 0x%x, firmware device type: 0x%x\n",
+ image_device_type, board_info.info.board_type);
+
+ return false;
+}
+
+static int hinic_flash_fw(struct hinic_devlink_priv *priv, const u8 *data,
+ struct host_image_st *host_image)
+{
+ u32 section_remain_send_len, send_fragment_len, send_pos, up_total_len;
+ struct hinic_cmd_update_fw *fw_update_msg = NULL;
+ u32 section_type, section_crc, section_version;
+ u32 i, len, section_len, section_offset;
+ u16 out_size = sizeof(*fw_update_msg);
+ int total_len_flag = 0;
+ int err;
+
+ fw_update_msg = kzalloc(sizeof(*fw_update_msg), GFP_KERNEL);
+ if (!fw_update_msg)
+ return -ENOMEM;
+
+ up_total_len = host_image->image_info.up_total_len;
+
+ for (i = 0; i < host_image->section_type_num; i++) {
+ len = host_image->image_section_info[i].fw_section_len;
+ if (host_image->image_section_info[i].fw_section_type ==
+ UP_FW_UPDATE_BOOT) {
+ up_total_len = up_total_len - len;
+ break;
+ }
+ }
+
+ for (i = 0; i < host_image->section_type_num; i++) {
+ section_len =
+ host_image->image_section_info[i].fw_section_len;
+ section_offset =
+ host_image->image_section_info[i].fw_section_offset;
+ section_remain_send_len = section_len;
+ section_type =
+ host_image->image_section_info[i].fw_section_type;
+ section_crc = host_image->image_section_info[i].fw_section_crc;
+ section_version =
+ host_image->image_section_info[i].fw_section_version;
+
+ if (section_type == UP_FW_UPDATE_BOOT)
+ continue;
+
+ send_fragment_len = 0;
+ send_pos = 0;
+
+ while (section_remain_send_len > 0) {
+ if (!total_len_flag) {
+ fw_update_msg->total_len = up_total_len;
+ total_len_flag = 1;
+ } else {
+ fw_update_msg->total_len = 0;
+ }
+
+ memset(fw_update_msg->data, 0, MAX_FW_FRAGMENT_LEN);
+
+ fw_update_msg->ctl_info.SF =
+ (section_remain_send_len == section_len) ?
+ true : false;
+ fw_update_msg->section_info.FW_section_CRC = section_crc;
+ fw_update_msg->fw_section_version = section_version;
+ fw_update_msg->ctl_info.flag = UP_TYPE_A;
+
+ if (section_type <= UP_FW_UPDATE_UP_DATA_B) {
+ fw_update_msg->section_info.FW_section_type =
+ (section_type % 2) ?
+ UP_FW_UPDATE_UP_DATA :
+ UP_FW_UPDATE_UP_TEXT;
+
+ fw_update_msg->ctl_info.flag = UP_TYPE_B;
+ if (section_type <= UP_FW_UPDATE_UP_DATA_A)
+ fw_update_msg->ctl_info.flag = UP_TYPE_A;
+ } else {
+ fw_update_msg->section_info.FW_section_type =
+ section_type - 0x2;
+ }
+
+ fw_update_msg->setion_total_len = section_len;
+ fw_update_msg->section_offset = send_pos;
+
+ if (section_remain_send_len <= MAX_FW_FRAGMENT_LEN) {
+ fw_update_msg->ctl_info.SL = true;
+ fw_update_msg->ctl_info.fragment_len =
+ section_remain_send_len;
+ send_fragment_len += section_remain_send_len;
+ } else {
+ fw_update_msg->ctl_info.SL = false;
+ fw_update_msg->ctl_info.fragment_len =
+ MAX_FW_FRAGMENT_LEN;
+ send_fragment_len += MAX_FW_FRAGMENT_LEN;
+ }
+
+ memcpy(fw_update_msg->data,
+ data + UPDATEFW_IMAGE_HEAD_SIZE +
+ section_offset + send_pos,
+ fw_update_msg->ctl_info.fragment_len);
+
+ err = hinic_port_msg_cmd(priv->hwdev,
+ HINIC_PORT_CMD_UPDATE_FW,
+ fw_update_msg,
+ sizeof(*fw_update_msg),
+ fw_update_msg, &out_size);
+ if (err || !out_size || fw_update_msg->status) {
+ dev_err(&priv->hwdev->hwif->pdev->dev, "Failed to update firmware, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, fw_update_msg->status, out_size);
+ err = fw_update_msg->status ?
+ fw_update_msg->status : -EIO;
+ kfree(fw_update_msg);
+ return err;
+ }
+
+ send_pos = send_fragment_len;
+ section_remain_send_len = section_len -
+ send_fragment_len;
+ }
+ }
+
+ kfree(fw_update_msg);
+
+ return 0;
+}
+
+static int hinic_firmware_update(struct hinic_devlink_priv *priv,
+ const struct firmware *fw,
+ struct netlink_ext_ack *extack)
+{
+ struct host_image_st host_image;
+ int err;
+
+ memset(&host_image, 0, sizeof(struct host_image_st));
+
+ if (!check_image_valid(priv, fw->data, fw->size, &host_image) ||
+ !check_image_integrity(priv, &host_image, FW_UPDATE_COLD) ||
+ !check_image_device_type(priv, host_image.device_id)) {
+ NL_SET_ERR_MSG_MOD(extack, "Check image failed");
+ return -EINVAL;
+ }
+
+ dev_info(&priv->hwdev->hwif->pdev->dev, "Flash firmware begin\n");
+
+ err = hinic_flash_fw(priv, fw->data, &host_image);
+ if (err) {
+ if (err == HINIC_FW_DISMATCH_ERROR) {
+ dev_err(&priv->hwdev->hwif->pdev->dev, "Firmware image doesn't match this card, please use newer image, err: %d\n",
+ err);
+ NL_SET_ERR_MSG_MOD(extack,
+ "Firmware image doesn't match this card, please use newer image");
+ } else {
+ dev_err(&priv->hwdev->hwif->pdev->dev, "Send firmware image data failed, err: %d\n",
+ err);
+ NL_SET_ERR_MSG_MOD(extack, "Send firmware image data failed");
+ }
+
+ return err;
+ }
+
+ dev_info(&priv->hwdev->hwif->pdev->dev, "Flash firmware end\n");
+
+ return 0;
+}
+
+static int hinic_devlink_flash_update(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct hinic_devlink_priv *priv = devlink_priv(devlink);
+
+ return hinic_firmware_update(priv, params->fw, extack);
+}
+
+static const struct devlink_ops hinic_devlink_ops = {
+ .flash_update = hinic_devlink_flash_update,
+};
+
+struct devlink *hinic_devlink_alloc(struct device *dev)
+{
+ return devlink_alloc(&hinic_devlink_ops, sizeof(struct hinic_dev), dev);
+}
+
+void hinic_devlink_free(struct devlink *devlink)
+{
+ devlink_free(devlink);
+}
+
+void hinic_devlink_register(struct hinic_devlink_priv *priv)
+{
+ struct devlink *devlink = priv_to_devlink(priv);
+
+ devlink_register(devlink);
+}
+
+void hinic_devlink_unregister(struct hinic_devlink_priv *priv)
+{
+ struct devlink *devlink = priv_to_devlink(priv);
+
+ devlink_unregister(devlink);
+}
+
+static int chip_fault_show(struct devlink_fmsg *fmsg,
+ struct hinic_fault_event *event)
+{
+ const char * const level_str[FAULT_LEVEL_MAX + 1] = {
+ "fatal", "reset", "flr", "general", "suggestion", "Unknown"};
+ u8 fault_level;
+ int err;
+
+ fault_level = (event->event.chip.err_level < FAULT_LEVEL_MAX) ?
+ event->event.chip.err_level : FAULT_LEVEL_MAX;
+ if (fault_level == FAULT_LEVEL_SERIOUS_FLR) {
+ err = devlink_fmsg_u32_pair_put(fmsg, "Function level err func_id",
+ (u32)event->event.chip.func_id);
+ if (err)
+ return err;
+ }
+
+ err = devlink_fmsg_u8_pair_put(fmsg, "module_id", event->event.chip.node_id);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "err_type", (u32)event->event.chip.err_type);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_string_pair_put(fmsg, "err_level", level_str[fault_level]);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "err_csr_addr",
+ event->event.chip.err_csr_addr);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "err_csr_value",
+ event->event.chip.err_csr_value);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int fault_report_show(struct devlink_fmsg *fmsg,
+ struct hinic_fault_event *event)
+{
+ const char * const type_str[FAULT_TYPE_MAX + 1] = {
+ "chip", "ucode", "mem rd timeout", "mem wr timeout",
+ "reg rd timeout", "reg wr timeout", "phy fault", "Unknown"};
+ u8 fault_type;
+ int err;
+
+ fault_type = (event->type < FAULT_TYPE_MAX) ? event->type : FAULT_TYPE_MAX;
+
+ err = devlink_fmsg_string_pair_put(fmsg, "Fault type", type_str[fault_type]);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_binary_pair_put(fmsg, "Fault raw data",
+ event->event.val, sizeof(event->event.val));
+ if (err)
+ return err;
+
+ switch (event->type) {
+ case FAULT_TYPE_CHIP:
+ err = chip_fault_show(fmsg, event);
+ if (err)
+ return err;
+ break;
+ case FAULT_TYPE_UCODE:
+ err = devlink_fmsg_u8_pair_put(fmsg, "Cause_id", event->event.ucode.cause_id);
+ if (err)
+ return err;
+ err = devlink_fmsg_u8_pair_put(fmsg, "core_id", event->event.ucode.core_id);
+ if (err)
+ return err;
+ err = devlink_fmsg_u8_pair_put(fmsg, "c_id", event->event.ucode.c_id);
+ if (err)
+ return err;
+ err = devlink_fmsg_u8_pair_put(fmsg, "epc", event->event.ucode.epc);
+ if (err)
+ return err;
+ break;
+ case FAULT_TYPE_MEM_RD_TIMEOUT:
+ case FAULT_TYPE_MEM_WR_TIMEOUT:
+ err = devlink_fmsg_u32_pair_put(fmsg, "Err_csr_ctrl",
+ event->event.mem_timeout.err_csr_ctrl);
+ if (err)
+ return err;
+ err = devlink_fmsg_u32_pair_put(fmsg, "err_csr_data",
+ event->event.mem_timeout.err_csr_data);
+ if (err)
+ return err;
+ err = devlink_fmsg_u32_pair_put(fmsg, "ctrl_tab",
+ event->event.mem_timeout.ctrl_tab);
+ if (err)
+ return err;
+ err = devlink_fmsg_u32_pair_put(fmsg, "mem_index",
+ event->event.mem_timeout.mem_index);
+ if (err)
+ return err;
+ break;
+ case FAULT_TYPE_REG_RD_TIMEOUT:
+ case FAULT_TYPE_REG_WR_TIMEOUT:
+ err = devlink_fmsg_u32_pair_put(fmsg, "Err_csr", event->event.reg_timeout.err_csr);
+ if (err)
+ return err;
+ break;
+ case FAULT_TYPE_PHY_FAULT:
+ err = devlink_fmsg_u8_pair_put(fmsg, "Op_type", event->event.phy_fault.op_type);
+ if (err)
+ return err;
+ err = devlink_fmsg_u8_pair_put(fmsg, "port_id", event->event.phy_fault.port_id);
+ if (err)
+ return err;
+ err = devlink_fmsg_u8_pair_put(fmsg, "dev_ad", event->event.phy_fault.dev_ad);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "csr_addr", event->event.phy_fault.csr_addr);
+ if (err)
+ return err;
+ err = devlink_fmsg_u32_pair_put(fmsg, "op_data", event->event.phy_fault.op_data);
+ if (err)
+ return err;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int hinic_hw_reporter_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ if (priv_ctx)
+ return fault_report_show(fmsg, priv_ctx);
+
+ return 0;
+}
+
+static int mgmt_watchdog_report_show(struct devlink_fmsg *fmsg,
+ struct hinic_mgmt_watchdog_info *watchdog_info)
+{
+ int err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "Mgmt deadloop time_h", watchdog_info->curr_time_h);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "time_l", watchdog_info->curr_time_l);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "task_id", watchdog_info->task_id);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "sp", watchdog_info->sp);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "stack_current_used", watchdog_info->curr_used);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "peak_used", watchdog_info->peak_used);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "\n Overflow_flag", watchdog_info->is_overflow);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "stack_top", watchdog_info->stack_top);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "stack_bottom", watchdog_info->stack_bottom);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "mgmt_pc", watchdog_info->pc);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "lr", watchdog_info->lr);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "cpsr", watchdog_info->cpsr);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_binary_pair_put(fmsg, "Mgmt register info",
+ watchdog_info->reg, sizeof(watchdog_info->reg));
+ if (err)
+ return err;
+
+ err = devlink_fmsg_binary_pair_put(fmsg, "Mgmt dump stack(start from sp)",
+ watchdog_info->data, sizeof(watchdog_info->data));
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int hinic_fw_reporter_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ if (priv_ctx)
+ return mgmt_watchdog_report_show(fmsg, priv_ctx);
+
+ return 0;
+}
+
+static const struct devlink_health_reporter_ops hinic_hw_fault_reporter_ops = {
+ .name = "hw",
+ .dump = hinic_hw_reporter_dump,
+};
+
+static const struct devlink_health_reporter_ops hinic_fw_fault_reporter_ops = {
+ .name = "fw",
+ .dump = hinic_fw_reporter_dump,
+};
+
+int hinic_health_reporters_create(struct hinic_devlink_priv *priv)
+{
+ struct devlink *devlink = priv_to_devlink(priv);
+
+ priv->hw_fault_reporter =
+ devlink_health_reporter_create(devlink, &hinic_hw_fault_reporter_ops,
+ 0, priv);
+ if (IS_ERR(priv->hw_fault_reporter)) {
+ dev_warn(&priv->hwdev->hwif->pdev->dev, "Failed to create hw fault reporter, err: %ld\n",
+ PTR_ERR(priv->hw_fault_reporter));
+ return PTR_ERR(priv->hw_fault_reporter);
+ }
+
+ priv->fw_fault_reporter =
+ devlink_health_reporter_create(devlink, &hinic_fw_fault_reporter_ops,
+ 0, priv);
+ if (IS_ERR(priv->fw_fault_reporter)) {
+ dev_warn(&priv->hwdev->hwif->pdev->dev, "Failed to create fw fault reporter, err: %ld\n",
+ PTR_ERR(priv->fw_fault_reporter));
+ devlink_health_reporter_destroy(priv->hw_fault_reporter);
+ priv->hw_fault_reporter = NULL;
+ return PTR_ERR(priv->fw_fault_reporter);
+ }
+
+ return 0;
+}
+
+void hinic_health_reporters_destroy(struct hinic_devlink_priv *priv)
+{
+ if (!IS_ERR_OR_NULL(priv->fw_fault_reporter)) {
+ devlink_health_reporter_destroy(priv->fw_fault_reporter);
+ priv->fw_fault_reporter = NULL;
+ }
+
+ if (!IS_ERR_OR_NULL(priv->hw_fault_reporter)) {
+ devlink_health_reporter_destroy(priv->hw_fault_reporter);
+ priv->hw_fault_reporter = NULL;
+ }
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.h b/drivers/net/ethernet/huawei/hinic/hinic_devlink.h
new file mode 100644
index 0000000000..46760d607b
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_devlink.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef __HINIC_DEVLINK_H__
+#define __HINIC_DEVLINK_H__
+
+#include <net/devlink.h>
+#include "hinic_dev.h"
+
+#define MAX_FW_TYPE_NUM 30
+#define HINIC_MAGIC_NUM 0x18221100
+#define UPDATEFW_IMAGE_HEAD_SIZE 1024
+#define FW_UPDATE_COLD 0
+#define FW_UPDATE_HOT 1
+
+#define UP_TYPE_A 0x0
+#define UP_TYPE_B 0x1
+
+#define MAX_FW_FRAGMENT_LEN 1536
+#define HINIC_FW_DISMATCH_ERROR 10
+
+enum hinic_fw_type {
+ UP_FW_UPDATE_UP_TEXT_A = 0x0,
+ UP_FW_UPDATE_UP_DATA_A,
+ UP_FW_UPDATE_UP_TEXT_B,
+ UP_FW_UPDATE_UP_DATA_B,
+ UP_FW_UPDATE_UP_DICT,
+
+ UP_FW_UPDATE_HLINK_ONE = 0x5,
+ UP_FW_UPDATE_HLINK_TWO,
+ UP_FW_UPDATE_HLINK_THR,
+ UP_FW_UPDATE_PHY,
+ UP_FW_UPDATE_TILE_TEXT,
+
+ UP_FW_UPDATE_TILE_DATA = 0xa,
+ UP_FW_UPDATE_TILE_DICT,
+ UP_FW_UPDATE_PPE_STATE,
+ UP_FW_UPDATE_PPE_BRANCH,
+ UP_FW_UPDATE_PPE_EXTACT,
+
+ UP_FW_UPDATE_CLP_LEGACY = 0xf,
+ UP_FW_UPDATE_PXE_LEGACY,
+ UP_FW_UPDATE_ISCSI_LEGACY,
+ UP_FW_UPDATE_CLP_EFI,
+ UP_FW_UPDATE_PXE_EFI,
+
+ UP_FW_UPDATE_ISCSI_EFI = 0x14,
+ UP_FW_UPDATE_CFG,
+ UP_FW_UPDATE_BOOT,
+ UP_FW_UPDATE_VPD,
+ FILE_TYPE_TOTAL_NUM
+};
+
+#define _IMAGE_UP_ALL_IN ((1 << UP_FW_UPDATE_UP_TEXT_A) | \
+ (1 << UP_FW_UPDATE_UP_DATA_A) | \
+ (1 << UP_FW_UPDATE_UP_TEXT_B) | \
+ (1 << UP_FW_UPDATE_UP_DATA_B) | \
+ (1 << UP_FW_UPDATE_UP_DICT) | \
+ (1 << UP_FW_UPDATE_BOOT) | \
+ (1 << UP_FW_UPDATE_HLINK_ONE) | \
+ (1 << UP_FW_UPDATE_HLINK_TWO) | \
+ (1 << UP_FW_UPDATE_HLINK_THR))
+
+#define _IMAGE_UCODE_ALL_IN ((1 << UP_FW_UPDATE_TILE_TEXT) | \
+ (1 << UP_FW_UPDATE_TILE_DICT) | \
+ (1 << UP_FW_UPDATE_PPE_STATE) | \
+ (1 << UP_FW_UPDATE_PPE_BRANCH) | \
+ (1 << UP_FW_UPDATE_PPE_EXTACT))
+
+#define _IMAGE_COLD_SUB_MODULES_MUST_IN (_IMAGE_UP_ALL_IN | _IMAGE_UCODE_ALL_IN)
+#define _IMAGE_HOT_SUB_MODULES_MUST_IN (_IMAGE_UP_ALL_IN | _IMAGE_UCODE_ALL_IN)
+#define _IMAGE_CFG_SUB_MODULES_MUST_IN BIT(UP_FW_UPDATE_CFG)
+#define UP_FW_UPDATE_UP_TEXT 0x0
+#define UP_FW_UPDATE_UP_DATA 0x1
+#define UP_FW_UPDATE_VPD_B 0x15
+
+struct fw_section_info_st {
+ u32 fw_section_len;
+ u32 fw_section_offset;
+ u32 fw_section_version;
+ u32 fw_section_type;
+ u32 fw_section_crc;
+};
+
+struct fw_image_st {
+ u32 fw_version;
+ u32 fw_len;
+ u32 fw_magic;
+ struct {
+ u32 fw_section_cnt:16;
+ u32 resd:16;
+ } fw_info;
+ struct fw_section_info_st fw_section_info[MAX_FW_TYPE_NUM];
+ u32 device_id;
+ u32 res[101];
+ void *bin_data;
+};
+
+struct host_image_st {
+ struct fw_section_info_st image_section_info[MAX_FW_TYPE_NUM];
+ struct {
+ u32 up_total_len;
+ u32 fw_version;
+ } image_info;
+ u32 section_type_num;
+ u32 device_id;
+};
+
+struct devlink *hinic_devlink_alloc(struct device *dev);
+void hinic_devlink_free(struct devlink *devlink);
+void hinic_devlink_register(struct hinic_devlink_priv *priv);
+void hinic_devlink_unregister(struct hinic_devlink_priv *priv);
+
+int hinic_health_reporters_create(struct hinic_devlink_priv *priv);
+void hinic_health_reporters_destroy(struct hinic_devlink_priv *priv);
+
+#endif /* __HINIC_DEVLINK_H__ */
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
new file mode 100644
index 0000000000..f4b6802869
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
@@ -0,0 +1,1861 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/ethtool.h>
+#include <linux/vmalloc.h>
+#include <linux/sfp.h>
+
+#include "hinic_hw_qp.h"
+#include "hinic_hw_dev.h"
+#include "hinic_port.h"
+#include "hinic_tx.h"
+#include "hinic_rx.h"
+#include "hinic_dev.h"
+
+#define SET_LINK_STR_MAX_LEN 16
+
+#define GET_SUPPORTED_MODE 0
+#define GET_ADVERTISED_MODE 1
+
+#define ETHTOOL_ADD_SUPPORTED_SPEED_LINK_MODE(ecmd, mode) \
+ ((ecmd)->supported |= \
+ (1UL << hw_to_ethtool_link_mode_table[mode].link_mode_bit))
+#define ETHTOOL_ADD_ADVERTISED_SPEED_LINK_MODE(ecmd, mode) \
+ ((ecmd)->advertising |= \
+ (1UL << hw_to_ethtool_link_mode_table[mode].link_mode_bit))
+#define ETHTOOL_ADD_SUPPORTED_LINK_MODE(ecmd, mode) \
+ ((ecmd)->supported |= SUPPORTED_##mode)
+#define ETHTOOL_ADD_ADVERTISED_LINK_MODE(ecmd, mode) \
+ ((ecmd)->advertising |= ADVERTISED_##mode)
+
+#define COALESCE_PENDING_LIMIT_UNIT 8
+#define COALESCE_TIMER_CFG_UNIT 9
+#define COALESCE_ALL_QUEUE 0xFFFF
+#define COALESCE_MAX_PENDING_LIMIT (255 * COALESCE_PENDING_LIMIT_UNIT)
+#define COALESCE_MAX_TIMER_CFG (255 * COALESCE_TIMER_CFG_UNIT)
+
+struct hw2ethtool_link_mode {
+ enum ethtool_link_mode_bit_indices link_mode_bit;
+ u32 speed;
+ enum hinic_link_mode hw_link_mode;
+};
+
+struct cmd_link_settings {
+ u64 supported;
+ u64 advertising;
+
+ u32 speed;
+ u8 duplex;
+ u8 port;
+ u8 autoneg;
+};
+
+static u32 hw_to_ethtool_speed[LINK_SPEED_LEVELS] = {
+ SPEED_10, SPEED_100,
+ SPEED_1000, SPEED_10000,
+ SPEED_25000, SPEED_40000,
+ SPEED_100000
+};
+
+static struct hw2ethtool_link_mode
+ hw_to_ethtool_link_mode_table[HINIC_LINK_MODE_NUMBERS] = {
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ .speed = SPEED_10000,
+ .hw_link_mode = HINIC_10GE_BASE_KR,
+ },
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ .speed = SPEED_40000,
+ .hw_link_mode = HINIC_40GE_BASE_KR4,
+ },
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ .speed = SPEED_40000,
+ .hw_link_mode = HINIC_40GE_BASE_CR4,
+ },
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ .speed = SPEED_100000,
+ .hw_link_mode = HINIC_100GE_BASE_KR4,
+ },
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ .speed = SPEED_100000,
+ .hw_link_mode = HINIC_100GE_BASE_CR4,
+ },
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ .speed = SPEED_25000,
+ .hw_link_mode = HINIC_25GE_BASE_KR_S,
+ },
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ .speed = SPEED_25000,
+ .hw_link_mode = HINIC_25GE_BASE_CR_S,
+ },
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ .speed = SPEED_25000,
+ .hw_link_mode = HINIC_25GE_BASE_KR,
+ },
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ .speed = SPEED_25000,
+ .hw_link_mode = HINIC_25GE_BASE_CR,
+ },
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ .speed = SPEED_1000,
+ .hw_link_mode = HINIC_GE_BASE_KX,
+ },
+};
+
+#define LP_DEFAULT_TIME 5 /* seconds */
+#define LP_PKT_LEN 1514
+
+#define PORT_DOWN_ERR_IDX 0
+enum diag_test_index {
+ INTERNAL_LP_TEST = 0,
+ EXTERNAL_LP_TEST = 1,
+ DIAG_TEST_MAX = 2,
+};
+
+static void set_link_speed(struct ethtool_link_ksettings *link_ksettings,
+ enum hinic_speed speed)
+{
+ switch (speed) {
+ case HINIC_SPEED_10MB_LINK:
+ link_ksettings->base.speed = SPEED_10;
+ break;
+
+ case HINIC_SPEED_100MB_LINK:
+ link_ksettings->base.speed = SPEED_100;
+ break;
+
+ case HINIC_SPEED_1000MB_LINK:
+ link_ksettings->base.speed = SPEED_1000;
+ break;
+
+ case HINIC_SPEED_10GB_LINK:
+ link_ksettings->base.speed = SPEED_10000;
+ break;
+
+ case HINIC_SPEED_25GB_LINK:
+ link_ksettings->base.speed = SPEED_25000;
+ break;
+
+ case HINIC_SPEED_40GB_LINK:
+ link_ksettings->base.speed = SPEED_40000;
+ break;
+
+ case HINIC_SPEED_100GB_LINK:
+ link_ksettings->base.speed = SPEED_100000;
+ break;
+
+ default:
+ link_ksettings->base.speed = SPEED_UNKNOWN;
+ break;
+ }
+}
+
+static int hinic_get_link_mode_index(enum hinic_link_mode link_mode)
+{
+ int i = 0;
+
+ for (i = 0; i < HINIC_LINK_MODE_NUMBERS; i++) {
+ if (link_mode == hw_to_ethtool_link_mode_table[i].hw_link_mode)
+ break;
+ }
+
+ return i;
+}
+
+static void hinic_add_ethtool_link_mode(struct cmd_link_settings *link_settings,
+ enum hinic_link_mode hw_link_mode,
+ u32 name)
+{
+ enum hinic_link_mode link_mode;
+ int idx = 0;
+
+ for (link_mode = 0; link_mode < HINIC_LINK_MODE_NUMBERS; link_mode++) {
+ if (hw_link_mode & ((u32)1 << link_mode)) {
+ idx = hinic_get_link_mode_index(link_mode);
+ if (idx >= HINIC_LINK_MODE_NUMBERS)
+ continue;
+
+ if (name == GET_SUPPORTED_MODE)
+ ETHTOOL_ADD_SUPPORTED_SPEED_LINK_MODE
+ (link_settings, idx);
+ else
+ ETHTOOL_ADD_ADVERTISED_SPEED_LINK_MODE
+ (link_settings, idx);
+ }
+ }
+}
+
+static void hinic_link_port_type(struct cmd_link_settings *link_settings,
+ enum hinic_port_type port_type)
+{
+ switch (port_type) {
+ case HINIC_PORT_ELEC:
+ case HINIC_PORT_TP:
+ ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, TP);
+ ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, TP);
+ link_settings->port = PORT_TP;
+ break;
+
+ case HINIC_PORT_AOC:
+ case HINIC_PORT_FIBRE:
+ ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, FIBRE);
+ ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, FIBRE);
+ link_settings->port = PORT_FIBRE;
+ break;
+
+ case HINIC_PORT_COPPER:
+ ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, FIBRE);
+ ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, FIBRE);
+ link_settings->port = PORT_DA;
+ break;
+
+ case HINIC_PORT_BACKPLANE:
+ ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, Backplane);
+ ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Backplane);
+ link_settings->port = PORT_NONE;
+ break;
+
+ default:
+ link_settings->port = PORT_OTHER;
+ break;
+ }
+}
+
+static int hinic_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings
+ *link_ksettings)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_link_mode_cmd link_mode = { 0 };
+ struct hinic_pause_config pause_info = { 0 };
+ struct cmd_link_settings settings = { 0 };
+ enum hinic_port_link_state link_state;
+ struct hinic_port_cap port_cap;
+ int err;
+
+ ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
+ ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
+
+ link_ksettings->base.speed = SPEED_UNKNOWN;
+ link_ksettings->base.autoneg = AUTONEG_DISABLE;
+ link_ksettings->base.duplex = DUPLEX_UNKNOWN;
+
+ err = hinic_port_get_cap(nic_dev, &port_cap);
+ if (err)
+ return err;
+
+ hinic_link_port_type(&settings, port_cap.port_type);
+ link_ksettings->base.port = settings.port;
+
+ err = hinic_port_link_state(nic_dev, &link_state);
+ if (err)
+ return err;
+
+ if (link_state == HINIC_LINK_STATE_UP) {
+ set_link_speed(link_ksettings, port_cap.speed);
+ link_ksettings->base.duplex =
+ (port_cap.duplex == HINIC_DUPLEX_FULL) ?
+ DUPLEX_FULL : DUPLEX_HALF;
+ }
+
+ if (!!(port_cap.autoneg_cap & HINIC_AUTONEG_SUPPORTED))
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ advertising, Autoneg);
+
+ if (port_cap.autoneg_state == HINIC_AUTONEG_ACTIVE)
+ link_ksettings->base.autoneg = AUTONEG_ENABLE;
+
+ err = hinic_get_link_mode(nic_dev->hwdev, &link_mode);
+ if (err || link_mode.supported == HINIC_SUPPORTED_UNKNOWN ||
+ link_mode.advertised == HINIC_SUPPORTED_UNKNOWN)
+ return -EIO;
+
+ hinic_add_ethtool_link_mode(&settings, link_mode.supported,
+ GET_SUPPORTED_MODE);
+ hinic_add_ethtool_link_mode(&settings, link_mode.advertised,
+ GET_ADVERTISED_MODE);
+
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) {
+ err = hinic_get_hw_pause_info(nic_dev->hwdev, &pause_info);
+ if (err)
+ return err;
+ ETHTOOL_ADD_SUPPORTED_LINK_MODE(&settings, Pause);
+ if (pause_info.rx_pause && pause_info.tx_pause) {
+ ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Pause);
+ } else if (pause_info.tx_pause) {
+ ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Asym_Pause);
+ } else if (pause_info.rx_pause) {
+ ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Pause);
+ ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Asym_Pause);
+ }
+ }
+
+ linkmode_copy(link_ksettings->link_modes.supported,
+ (unsigned long *)&settings.supported);
+ linkmode_copy(link_ksettings->link_modes.advertising,
+ (unsigned long *)&settings.advertising);
+
+ return 0;
+}
+
+static int hinic_ethtool_to_hw_speed_level(u32 speed)
+{
+ int i;
+
+ for (i = 0; i < LINK_SPEED_LEVELS; i++) {
+ if (hw_to_ethtool_speed[i] == speed)
+ break;
+ }
+
+ return i;
+}
+
+static bool hinic_is_support_speed(enum hinic_link_mode supported_link,
+ u32 speed)
+{
+ enum hinic_link_mode link_mode;
+ int idx;
+
+ for (link_mode = 0; link_mode < HINIC_LINK_MODE_NUMBERS; link_mode++) {
+ if (!(supported_link & ((u32)1 << link_mode)))
+ continue;
+
+ idx = hinic_get_link_mode_index(link_mode);
+ if (idx >= HINIC_LINK_MODE_NUMBERS)
+ continue;
+
+ if (hw_to_ethtool_link_mode_table[idx].speed == speed)
+ return true;
+ }
+
+ return false;
+}
+
+static bool hinic_is_speed_legal(struct hinic_dev *nic_dev, u32 speed)
+{
+ struct hinic_link_mode_cmd link_mode = { 0 };
+ struct net_device *netdev = nic_dev->netdev;
+ enum nic_speed_level speed_level = 0;
+ int err;
+
+ err = hinic_get_link_mode(nic_dev->hwdev, &link_mode);
+ if (err)
+ return false;
+
+ if (link_mode.supported == HINIC_SUPPORTED_UNKNOWN ||
+ link_mode.advertised == HINIC_SUPPORTED_UNKNOWN)
+ return false;
+
+ speed_level = hinic_ethtool_to_hw_speed_level(speed);
+ if (speed_level >= LINK_SPEED_LEVELS ||
+ !hinic_is_support_speed(link_mode.supported, speed)) {
+ netif_err(nic_dev, drv, netdev,
+ "Unsupported speed: %d\n", speed);
+ return false;
+ }
+
+ return true;
+}
+
+static int get_link_settings_type(struct hinic_dev *nic_dev,
+ u8 autoneg, u32 speed, u32 *set_settings)
+{
+ struct hinic_port_cap port_cap = { 0 };
+ int err;
+
+ err = hinic_port_get_cap(nic_dev, &port_cap);
+ if (err)
+ return err;
+
+ /* always set autonegotiation */
+ if (port_cap.autoneg_cap)
+ *set_settings |= HILINK_LINK_SET_AUTONEG;
+
+ if (autoneg == AUTONEG_ENABLE) {
+ if (!port_cap.autoneg_cap) {
+ netif_err(nic_dev, drv, nic_dev->netdev, "Not support autoneg\n");
+ return -EOPNOTSUPP;
+ }
+ } else if (speed != (u32)SPEED_UNKNOWN) {
+ /* set speed only when autoneg is disabled */
+ if (!hinic_is_speed_legal(nic_dev, speed))
+ return -EINVAL;
+ *set_settings |= HILINK_LINK_SET_SPEED;
+ } else {
+ netif_err(nic_dev, drv, nic_dev->netdev, "Need to set speed when autoneg is off\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int set_link_settings_separate_cmd(struct hinic_dev *nic_dev,
+ u32 set_settings, u8 autoneg,
+ u32 speed)
+{
+ enum nic_speed_level speed_level = 0;
+ int err = 0;
+
+ if (set_settings & HILINK_LINK_SET_AUTONEG) {
+ err = hinic_set_autoneg(nic_dev->hwdev,
+ (autoneg == AUTONEG_ENABLE));
+ if (err)
+ netif_err(nic_dev, drv, nic_dev->netdev, "%s autoneg failed\n",
+ (autoneg == AUTONEG_ENABLE) ?
+ "Enable" : "Disable");
+ else
+ netif_info(nic_dev, drv, nic_dev->netdev, "%s autoneg successfully\n",
+ (autoneg == AUTONEG_ENABLE) ?
+ "Enable" : "Disable");
+ }
+
+ if (!err && (set_settings & HILINK_LINK_SET_SPEED)) {
+ speed_level = hinic_ethtool_to_hw_speed_level(speed);
+ err = hinic_set_speed(nic_dev->hwdev, speed_level);
+ if (err)
+ netif_err(nic_dev, drv, nic_dev->netdev, "Set speed %d failed\n",
+ speed);
+ else
+ netif_info(nic_dev, drv, nic_dev->netdev, "Set speed %d successfully\n",
+ speed);
+ }
+
+ return err;
+}
+
+static int hinic_set_settings_to_hw(struct hinic_dev *nic_dev,
+ u32 set_settings, u8 autoneg, u32 speed)
+{
+ struct hinic_link_ksettings_info settings = {0};
+ char set_link_str[SET_LINK_STR_MAX_LEN] = {0};
+ const char *autoneg_str;
+ struct net_device *netdev = nic_dev->netdev;
+ enum nic_speed_level speed_level = 0;
+ int err;
+
+ autoneg_str = (set_settings & HILINK_LINK_SET_AUTONEG) ?
+ (autoneg ? "autong enable " : "autong disable ") : "";
+
+ if (set_settings & HILINK_LINK_SET_SPEED) {
+ speed_level = hinic_ethtool_to_hw_speed_level(speed);
+ err = snprintf(set_link_str, SET_LINK_STR_MAX_LEN,
+ "speed %d ", speed);
+ if (err >= SET_LINK_STR_MAX_LEN) {
+ netif_err(nic_dev, drv, netdev, "Failed to snprintf link speed, function return(%d) and dest_len(%d)\n",
+ err, SET_LINK_STR_MAX_LEN);
+ return -EFAULT;
+ }
+ }
+
+ settings.func_id = HINIC_HWIF_FUNC_IDX(nic_dev->hwdev->hwif);
+ settings.valid_bitmap = set_settings;
+ settings.autoneg = autoneg;
+ settings.speed = speed_level;
+
+ err = hinic_set_link_settings(nic_dev->hwdev, &settings);
+ if (err != HINIC_MGMT_CMD_UNSUPPORTED) {
+ if (err)
+ netif_err(nic_dev, drv, netdev, "Set %s%sfailed\n",
+ autoneg_str, set_link_str);
+ else
+ netif_info(nic_dev, drv, netdev, "Set %s%ssuccessfully\n",
+ autoneg_str, set_link_str);
+
+ return err;
+ }
+
+ return set_link_settings_separate_cmd(nic_dev, set_settings, autoneg,
+ speed);
+}
+
+static int set_link_settings(struct net_device *netdev, u8 autoneg, u32 speed)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ u32 set_settings = 0;
+ int err;
+
+ err = get_link_settings_type(nic_dev, autoneg, speed, &set_settings);
+ if (err)
+ return err;
+
+ if (set_settings)
+ err = hinic_set_settings_to_hw(nic_dev, set_settings,
+ autoneg, speed);
+ else
+ netif_info(nic_dev, drv, netdev, "Nothing changed, exit without setting anything\n");
+
+ return err;
+}
+
+static int hinic_set_link_ksettings(struct net_device *netdev, const struct
+ ethtool_link_ksettings *link_settings)
+{
+ /* only support to set autoneg and speed */
+ return set_link_settings(netdev, link_settings->base.autoneg,
+ link_settings->base.speed);
+}
+
+static void hinic_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ u8 mgmt_ver[HINIC_MGMT_VERSION_MAX_LEN] = {0};
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ int err;
+
+ strscpy(info->driver, HINIC_DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(hwif->pdev), sizeof(info->bus_info));
+
+ err = hinic_get_mgmt_version(nic_dev, mgmt_ver);
+ if (err)
+ return;
+
+ snprintf(info->fw_version, sizeof(info->fw_version), "%s", mgmt_ver);
+}
+
+static void hinic_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+
+ ring->rx_max_pending = HINIC_MAX_QUEUE_DEPTH;
+ ring->tx_max_pending = HINIC_MAX_QUEUE_DEPTH;
+ ring->rx_pending = nic_dev->rq_depth;
+ ring->tx_pending = nic_dev->sq_depth;
+}
+
+static int check_ringparam_valid(struct hinic_dev *nic_dev,
+ struct ethtool_ringparam *ring)
+{
+ if (ring->rx_jumbo_pending || ring->rx_mini_pending) {
+ netif_err(nic_dev, drv, nic_dev->netdev,
+ "Unsupported rx_jumbo_pending/rx_mini_pending\n");
+ return -EINVAL;
+ }
+
+ if (ring->tx_pending > HINIC_MAX_QUEUE_DEPTH ||
+ ring->tx_pending < HINIC_MIN_QUEUE_DEPTH ||
+ ring->rx_pending > HINIC_MAX_QUEUE_DEPTH ||
+ ring->rx_pending < HINIC_MIN_QUEUE_DEPTH) {
+ netif_err(nic_dev, drv, nic_dev->netdev,
+ "Queue depth out of range [%d-%d]\n",
+ HINIC_MIN_QUEUE_DEPTH, HINIC_MAX_QUEUE_DEPTH);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hinic_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ u16 new_sq_depth, new_rq_depth;
+ int err;
+
+ err = check_ringparam_valid(nic_dev, ring);
+ if (err)
+ return err;
+
+ new_sq_depth = (u16)(1U << (u16)ilog2(ring->tx_pending));
+ new_rq_depth = (u16)(1U << (u16)ilog2(ring->rx_pending));
+
+ if (new_sq_depth == nic_dev->sq_depth &&
+ new_rq_depth == nic_dev->rq_depth)
+ return 0;
+
+ netif_info(nic_dev, drv, netdev,
+ "Change Tx/Rx ring depth from %d/%d to %d/%d\n",
+ nic_dev->sq_depth, nic_dev->rq_depth,
+ new_sq_depth, new_rq_depth);
+
+ nic_dev->sq_depth = new_sq_depth;
+ nic_dev->rq_depth = new_rq_depth;
+
+ if (netif_running(netdev)) {
+ netif_info(nic_dev, drv, netdev, "Restarting netdev\n");
+ err = hinic_close(netdev);
+ if (err) {
+ netif_err(nic_dev, drv, netdev,
+ "Failed to close netdev\n");
+ return -EFAULT;
+ }
+
+ err = hinic_open(netdev);
+ if (err) {
+ netif_err(nic_dev, drv, netdev,
+ "Failed to open netdev\n");
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+static int __hinic_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *coal, u16 queue)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_intr_coal_info *rx_intr_coal_info;
+ struct hinic_intr_coal_info *tx_intr_coal_info;
+
+ if (queue == COALESCE_ALL_QUEUE) {
+ /* get tx/rx irq0 as default parameters */
+ rx_intr_coal_info = &nic_dev->rx_intr_coalesce[0];
+ tx_intr_coal_info = &nic_dev->tx_intr_coalesce[0];
+ } else {
+ if (queue >= nic_dev->num_qps) {
+ netif_err(nic_dev, drv, netdev,
+ "Invalid queue_id: %d\n", queue);
+ return -EINVAL;
+ }
+ rx_intr_coal_info = &nic_dev->rx_intr_coalesce[queue];
+ tx_intr_coal_info = &nic_dev->tx_intr_coalesce[queue];
+ }
+
+ /* coalesce_timer is in unit of 9us */
+ coal->rx_coalesce_usecs = rx_intr_coal_info->coalesce_timer_cfg *
+ COALESCE_TIMER_CFG_UNIT;
+ /* coalesced_frames is in unit of 8 */
+ coal->rx_max_coalesced_frames = rx_intr_coal_info->pending_limt *
+ COALESCE_PENDING_LIMIT_UNIT;
+ coal->tx_coalesce_usecs = tx_intr_coal_info->coalesce_timer_cfg *
+ COALESCE_TIMER_CFG_UNIT;
+ coal->tx_max_coalesced_frames = tx_intr_coal_info->pending_limt *
+ COALESCE_PENDING_LIMIT_UNIT;
+
+ return 0;
+}
+
+static int is_coalesce_exceed_limit(const struct ethtool_coalesce *coal)
+{
+ if (coal->rx_coalesce_usecs > COALESCE_MAX_TIMER_CFG ||
+ coal->rx_max_coalesced_frames > COALESCE_MAX_PENDING_LIMIT ||
+ coal->tx_coalesce_usecs > COALESCE_MAX_TIMER_CFG ||
+ coal->tx_max_coalesced_frames > COALESCE_MAX_PENDING_LIMIT)
+ return -ERANGE;
+
+ return 0;
+}
+
+static int set_queue_coalesce(struct hinic_dev *nic_dev, u16 q_id,
+ struct hinic_intr_coal_info *coal,
+ bool set_rx_coal)
+{
+ struct hinic_intr_coal_info *intr_coal = NULL;
+ struct hinic_msix_config interrupt_info = {0};
+ struct net_device *netdev = nic_dev->netdev;
+ u16 msix_idx;
+ int err;
+
+ intr_coal = set_rx_coal ? &nic_dev->rx_intr_coalesce[q_id] :
+ &nic_dev->tx_intr_coalesce[q_id];
+
+ intr_coal->coalesce_timer_cfg = coal->coalesce_timer_cfg;
+ intr_coal->pending_limt = coal->pending_limt;
+
+ /* netdev not running or qp not in using,
+ * don't need to set coalesce to hw
+ */
+ if (!(nic_dev->flags & HINIC_INTF_UP) ||
+ q_id >= nic_dev->num_qps)
+ return 0;
+
+ msix_idx = set_rx_coal ? nic_dev->rxqs[q_id].rq->msix_entry :
+ nic_dev->txqs[q_id].sq->msix_entry;
+ interrupt_info.msix_index = msix_idx;
+ interrupt_info.coalesce_timer_cnt = intr_coal->coalesce_timer_cfg;
+ interrupt_info.pending_cnt = intr_coal->pending_limt;
+ interrupt_info.resend_timer_cnt = intr_coal->resend_timer_cfg;
+
+ err = hinic_set_interrupt_cfg(nic_dev->hwdev, &interrupt_info);
+ if (err)
+ netif_warn(nic_dev, drv, netdev,
+ "Failed to set %s queue%d coalesce",
+ set_rx_coal ? "rx" : "tx", q_id);
+
+ return err;
+}
+
+static int __set_hw_coal_param(struct hinic_dev *nic_dev,
+ struct hinic_intr_coal_info *intr_coal,
+ u16 queue, bool set_rx_coal)
+{
+ int err;
+ u16 i;
+
+ if (queue == COALESCE_ALL_QUEUE) {
+ for (i = 0; i < nic_dev->max_qps; i++) {
+ err = set_queue_coalesce(nic_dev, i, intr_coal,
+ set_rx_coal);
+ if (err)
+ return err;
+ }
+ } else {
+ if (queue >= nic_dev->num_qps) {
+ netif_err(nic_dev, drv, nic_dev->netdev,
+ "Invalid queue_id: %d\n", queue);
+ return -EINVAL;
+ }
+ err = set_queue_coalesce(nic_dev, queue, intr_coal,
+ set_rx_coal);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int __hinic_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *coal, u16 queue)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_intr_coal_info rx_intr_coal = {0};
+ struct hinic_intr_coal_info tx_intr_coal = {0};
+ bool set_rx_coal = false;
+ bool set_tx_coal = false;
+ int err;
+
+ err = is_coalesce_exceed_limit(coal);
+ if (err)
+ return err;
+
+ if (coal->rx_coalesce_usecs || coal->rx_max_coalesced_frames) {
+ rx_intr_coal.coalesce_timer_cfg =
+ (u8)(coal->rx_coalesce_usecs / COALESCE_TIMER_CFG_UNIT);
+ rx_intr_coal.pending_limt = (u8)(coal->rx_max_coalesced_frames /
+ COALESCE_PENDING_LIMIT_UNIT);
+ set_rx_coal = true;
+ }
+
+ if (coal->tx_coalesce_usecs || coal->tx_max_coalesced_frames) {
+ tx_intr_coal.coalesce_timer_cfg =
+ (u8)(coal->tx_coalesce_usecs / COALESCE_TIMER_CFG_UNIT);
+ tx_intr_coal.pending_limt = (u8)(coal->tx_max_coalesced_frames /
+ COALESCE_PENDING_LIMIT_UNIT);
+ set_tx_coal = true;
+ }
+
+ /* setting coalesce timer or pending limit to zero will disable
+ * coalesce
+ */
+ if (set_rx_coal && (!rx_intr_coal.coalesce_timer_cfg ||
+ !rx_intr_coal.pending_limt))
+ netif_warn(nic_dev, drv, netdev, "RX coalesce will be disabled\n");
+ if (set_tx_coal && (!tx_intr_coal.coalesce_timer_cfg ||
+ !tx_intr_coal.pending_limt))
+ netif_warn(nic_dev, drv, netdev, "TX coalesce will be disabled\n");
+
+ if (set_rx_coal) {
+ err = __set_hw_coal_param(nic_dev, &rx_intr_coal, queue, true);
+ if (err)
+ return err;
+ }
+ if (set_tx_coal) {
+ err = __set_hw_coal_param(nic_dev, &tx_intr_coal, queue, false);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static int hinic_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ return __hinic_get_coalesce(netdev, coal, COALESCE_ALL_QUEUE);
+}
+
+static int hinic_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ return __hinic_set_coalesce(netdev, coal, COALESCE_ALL_QUEUE);
+}
+
+static int hinic_get_per_queue_coalesce(struct net_device *netdev, u32 queue,
+ struct ethtool_coalesce *coal)
+{
+ return __hinic_get_coalesce(netdev, coal, queue);
+}
+
+static int hinic_set_per_queue_coalesce(struct net_device *netdev, u32 queue,
+ struct ethtool_coalesce *coal)
+{
+ return __hinic_set_coalesce(netdev, coal, queue);
+}
+
+static void hinic_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_pause_config pause_info = {0};
+ struct hinic_nic_cfg *nic_cfg;
+ int err;
+
+ nic_cfg = &nic_dev->hwdev->func_to_io.nic_cfg;
+
+ err = hinic_get_hw_pause_info(nic_dev->hwdev, &pause_info);
+ if (!err) {
+ pause->autoneg = pause_info.auto_neg;
+ if (nic_cfg->pause_set || !pause_info.auto_neg) {
+ pause->rx_pause = nic_cfg->rx_pause;
+ pause->tx_pause = nic_cfg->tx_pause;
+ } else {
+ pause->rx_pause = pause_info.rx_pause;
+ pause->tx_pause = pause_info.tx_pause;
+ }
+ }
+}
+
+static int hinic_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_pause_config pause_info = {0};
+ struct hinic_port_cap port_cap = {0};
+ int err;
+
+ err = hinic_port_get_cap(nic_dev, &port_cap);
+ if (err)
+ return -EIO;
+
+ if (pause->autoneg != port_cap.autoneg_state)
+ return -EOPNOTSUPP;
+
+ pause_info.auto_neg = pause->autoneg;
+ pause_info.rx_pause = pause->rx_pause;
+ pause_info.tx_pause = pause->tx_pause;
+
+ mutex_lock(&nic_dev->hwdev->func_to_io.nic_cfg.cfg_mutex);
+ err = hinic_set_hw_pause_info(nic_dev->hwdev, &pause_info);
+ if (err) {
+ mutex_unlock(&nic_dev->hwdev->func_to_io.nic_cfg.cfg_mutex);
+ return err;
+ }
+ nic_dev->hwdev->func_to_io.nic_cfg.pause_set = true;
+ nic_dev->hwdev->func_to_io.nic_cfg.auto_neg = pause->autoneg;
+ nic_dev->hwdev->func_to_io.nic_cfg.rx_pause = pause->rx_pause;
+ nic_dev->hwdev->func_to_io.nic_cfg.tx_pause = pause->tx_pause;
+ mutex_unlock(&nic_dev->hwdev->func_to_io.nic_cfg.cfg_mutex);
+
+ return 0;
+}
+
+static void hinic_get_channels(struct net_device *netdev,
+ struct ethtool_channels *channels)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+
+ channels->max_combined = nic_dev->max_qps;
+ channels->combined_count = hinic_hwdev_num_qps(hwdev);
+}
+
+static int hinic_set_channels(struct net_device *netdev,
+ struct ethtool_channels *channels)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ unsigned int count = channels->combined_count;
+ int err;
+
+ netif_info(nic_dev, drv, netdev, "Set max combined queue number from %d to %d\n",
+ hinic_hwdev_num_qps(nic_dev->hwdev), count);
+
+ if (netif_running(netdev)) {
+ netif_info(nic_dev, drv, netdev, "Restarting netdev\n");
+ hinic_close(netdev);
+
+ nic_dev->hwdev->nic_cap.num_qps = count;
+
+ err = hinic_open(netdev);
+ if (err) {
+ netif_err(nic_dev, drv, netdev,
+ "Failed to open netdev\n");
+ return -EFAULT;
+ }
+ } else {
+ nic_dev->hwdev->nic_cap.num_qps = count;
+ }
+
+ return 0;
+}
+
+static int hinic_get_rss_hash_opts(struct hinic_dev *nic_dev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct hinic_rss_type rss_type = { 0 };
+ int err;
+
+ cmd->data = 0;
+
+ if (!(nic_dev->flags & HINIC_RSS_ENABLE))
+ return 0;
+
+ err = hinic_get_rss_type(nic_dev, nic_dev->rss_tmpl_idx,
+ &rss_type);
+ if (err)
+ return err;
+
+ cmd->data = RXH_IP_SRC | RXH_IP_DST;
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ if (rss_type.tcp_ipv4)
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case TCP_V6_FLOW:
+ if (rss_type.tcp_ipv6)
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case UDP_V4_FLOW:
+ if (rss_type.udp_ipv4)
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case UDP_V6_FLOW:
+ if (rss_type.udp_ipv6)
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ break;
+ default:
+ cmd->data = 0;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int set_l4_rss_hash_ops(struct ethtool_rxnfc *cmd,
+ struct hinic_rss_type *rss_type)
+{
+ u8 rss_l4_en = 0;
+
+ switch (cmd->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ rss_l4_en = 0;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ rss_l4_en = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ rss_type->tcp_ipv4 = rss_l4_en;
+ break;
+ case TCP_V6_FLOW:
+ rss_type->tcp_ipv6 = rss_l4_en;
+ break;
+ case UDP_V4_FLOW:
+ rss_type->udp_ipv4 = rss_l4_en;
+ break;
+ case UDP_V6_FLOW:
+ rss_type->udp_ipv6 = rss_l4_en;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hinic_set_rss_hash_opts(struct hinic_dev *nic_dev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct hinic_rss_type *rss_type = &nic_dev->rss_type;
+ int err;
+
+ if (!(nic_dev->flags & HINIC_RSS_ENABLE)) {
+ cmd->data = 0;
+ return -EOPNOTSUPP;
+ }
+
+ /* RSS does not support anything other than hashing
+ * to queues on src and dst IPs and ports
+ */
+ if (cmd->data & ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 |
+ RXH_L4_B_2_3))
+ return -EINVAL;
+
+ /* We need at least the IP SRC and DEST fields for hashing */
+ if (!(cmd->data & RXH_IP_SRC) || !(cmd->data & RXH_IP_DST))
+ return -EINVAL;
+
+ err = hinic_get_rss_type(nic_dev,
+ nic_dev->rss_tmpl_idx, rss_type);
+ if (err)
+ return -EFAULT;
+
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ err = set_l4_rss_hash_ops(cmd, rss_type);
+ if (err)
+ return err;
+ break;
+ case IPV4_FLOW:
+ rss_type->ipv4 = 1;
+ break;
+ case IPV6_FLOW:
+ rss_type->ipv6 = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ err = hinic_set_rss_type(nic_dev, nic_dev->rss_tmpl_idx,
+ *rss_type);
+ if (err)
+ return -EFAULT;
+
+ return 0;
+}
+
+static int __set_rss_rxfh(struct net_device *netdev,
+ const u32 *indir, const u8 *key)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ int err;
+
+ if (indir) {
+ if (!nic_dev->rss_indir_user) {
+ nic_dev->rss_indir_user =
+ kzalloc(sizeof(u32) * HINIC_RSS_INDIR_SIZE,
+ GFP_KERNEL);
+ if (!nic_dev->rss_indir_user)
+ return -ENOMEM;
+ }
+
+ memcpy(nic_dev->rss_indir_user, indir,
+ sizeof(u32) * HINIC_RSS_INDIR_SIZE);
+
+ err = hinic_rss_set_indir_tbl(nic_dev,
+ nic_dev->rss_tmpl_idx, indir);
+ if (err)
+ return -EFAULT;
+ }
+
+ if (key) {
+ if (!nic_dev->rss_hkey_user) {
+ nic_dev->rss_hkey_user =
+ kzalloc(HINIC_RSS_KEY_SIZE * 2, GFP_KERNEL);
+
+ if (!nic_dev->rss_hkey_user)
+ return -ENOMEM;
+ }
+
+ memcpy(nic_dev->rss_hkey_user, key, HINIC_RSS_KEY_SIZE);
+
+ err = hinic_rss_set_template_tbl(nic_dev,
+ nic_dev->rss_tmpl_idx, key);
+ if (err)
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int hinic_get_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd, u32 *rule_locs)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ int err = 0;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = nic_dev->num_qps;
+ break;
+ case ETHTOOL_GRXFH:
+ err = hinic_get_rss_hash_opts(nic_dev, cmd);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int hinic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ int err = 0;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ err = hinic_set_rss_hash_opts(nic_dev, cmd);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int hinic_get_rxfh(struct net_device *netdev,
+ u32 *indir, u8 *key, u8 *hfunc)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ u8 hash_engine_type = 0;
+ int err = 0;
+
+ if (!(nic_dev->flags & HINIC_RSS_ENABLE))
+ return -EOPNOTSUPP;
+
+ if (hfunc) {
+ err = hinic_rss_get_hash_engine(nic_dev,
+ nic_dev->rss_tmpl_idx,
+ &hash_engine_type);
+ if (err)
+ return -EFAULT;
+
+ *hfunc = hash_engine_type ? ETH_RSS_HASH_TOP : ETH_RSS_HASH_XOR;
+ }
+
+ if (indir) {
+ err = hinic_rss_get_indir_tbl(nic_dev,
+ nic_dev->rss_tmpl_idx, indir);
+ if (err)
+ return -EFAULT;
+ }
+
+ if (key)
+ err = hinic_rss_get_template_tbl(nic_dev,
+ nic_dev->rss_tmpl_idx, key);
+
+ return err;
+}
+
+static int hinic_set_rxfh(struct net_device *netdev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ int err = 0;
+
+ if (!(nic_dev->flags & HINIC_RSS_ENABLE))
+ return -EOPNOTSUPP;
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE) {
+ if (hfunc != ETH_RSS_HASH_TOP && hfunc != ETH_RSS_HASH_XOR)
+ return -EOPNOTSUPP;
+
+ nic_dev->rss_hash_engine = (hfunc == ETH_RSS_HASH_XOR) ?
+ HINIC_RSS_HASH_ENGINE_TYPE_XOR :
+ HINIC_RSS_HASH_ENGINE_TYPE_TOEP;
+ err = hinic_rss_set_hash_engine
+ (nic_dev, nic_dev->rss_tmpl_idx,
+ nic_dev->rss_hash_engine);
+ if (err)
+ return -EFAULT;
+ }
+
+ err = __set_rss_rxfh(netdev, indir, key);
+
+ return err;
+}
+
+static u32 hinic_get_rxfh_key_size(struct net_device *netdev)
+{
+ return HINIC_RSS_KEY_SIZE;
+}
+
+static u32 hinic_get_rxfh_indir_size(struct net_device *netdev)
+{
+ return HINIC_RSS_INDIR_SIZE;
+}
+
+#define HINIC_FUNC_STAT(_stat_item) { \
+ .name = #_stat_item, \
+ .size = sizeof_field(struct hinic_vport_stats, _stat_item), \
+ .offset = offsetof(struct hinic_vport_stats, _stat_item) \
+}
+
+static struct hinic_stats hinic_function_stats[] = {
+ HINIC_FUNC_STAT(tx_unicast_pkts_vport),
+ HINIC_FUNC_STAT(tx_unicast_bytes_vport),
+ HINIC_FUNC_STAT(tx_multicast_pkts_vport),
+ HINIC_FUNC_STAT(tx_multicast_bytes_vport),
+ HINIC_FUNC_STAT(tx_broadcast_pkts_vport),
+ HINIC_FUNC_STAT(tx_broadcast_bytes_vport),
+
+ HINIC_FUNC_STAT(rx_unicast_pkts_vport),
+ HINIC_FUNC_STAT(rx_unicast_bytes_vport),
+ HINIC_FUNC_STAT(rx_multicast_pkts_vport),
+ HINIC_FUNC_STAT(rx_multicast_bytes_vport),
+ HINIC_FUNC_STAT(rx_broadcast_pkts_vport),
+ HINIC_FUNC_STAT(rx_broadcast_bytes_vport),
+
+ HINIC_FUNC_STAT(tx_discard_vport),
+ HINIC_FUNC_STAT(rx_discard_vport),
+ HINIC_FUNC_STAT(tx_err_vport),
+ HINIC_FUNC_STAT(rx_err_vport),
+};
+
+static char hinic_test_strings[][ETH_GSTRING_LEN] = {
+ "Internal lb test (on/offline)",
+ "External lb test (external_lb)",
+};
+
+#define HINIC_PORT_STAT(_stat_item) { \
+ .name = #_stat_item, \
+ .size = sizeof_field(struct hinic_phy_port_stats, _stat_item), \
+ .offset = offsetof(struct hinic_phy_port_stats, _stat_item) \
+}
+
+static struct hinic_stats hinic_port_stats[] = {
+ HINIC_PORT_STAT(mac_rx_total_pkt_num),
+ HINIC_PORT_STAT(mac_rx_total_oct_num),
+ HINIC_PORT_STAT(mac_rx_bad_pkt_num),
+ HINIC_PORT_STAT(mac_rx_bad_oct_num),
+ HINIC_PORT_STAT(mac_rx_good_pkt_num),
+ HINIC_PORT_STAT(mac_rx_good_oct_num),
+ HINIC_PORT_STAT(mac_rx_uni_pkt_num),
+ HINIC_PORT_STAT(mac_rx_multi_pkt_num),
+ HINIC_PORT_STAT(mac_rx_broad_pkt_num),
+ HINIC_PORT_STAT(mac_tx_total_pkt_num),
+ HINIC_PORT_STAT(mac_tx_total_oct_num),
+ HINIC_PORT_STAT(mac_tx_bad_pkt_num),
+ HINIC_PORT_STAT(mac_tx_bad_oct_num),
+ HINIC_PORT_STAT(mac_tx_good_pkt_num),
+ HINIC_PORT_STAT(mac_tx_good_oct_num),
+ HINIC_PORT_STAT(mac_tx_uni_pkt_num),
+ HINIC_PORT_STAT(mac_tx_multi_pkt_num),
+ HINIC_PORT_STAT(mac_tx_broad_pkt_num),
+ HINIC_PORT_STAT(mac_rx_fragment_pkt_num),
+ HINIC_PORT_STAT(mac_rx_undersize_pkt_num),
+ HINIC_PORT_STAT(mac_rx_undermin_pkt_num),
+ HINIC_PORT_STAT(mac_rx_64_oct_pkt_num),
+ HINIC_PORT_STAT(mac_rx_65_127_oct_pkt_num),
+ HINIC_PORT_STAT(mac_rx_128_255_oct_pkt_num),
+ HINIC_PORT_STAT(mac_rx_256_511_oct_pkt_num),
+ HINIC_PORT_STAT(mac_rx_512_1023_oct_pkt_num),
+ HINIC_PORT_STAT(mac_rx_1024_1518_oct_pkt_num),
+ HINIC_PORT_STAT(mac_rx_1519_2047_oct_pkt_num),
+ HINIC_PORT_STAT(mac_rx_2048_4095_oct_pkt_num),
+ HINIC_PORT_STAT(mac_rx_4096_8191_oct_pkt_num),
+ HINIC_PORT_STAT(mac_rx_8192_9216_oct_pkt_num),
+ HINIC_PORT_STAT(mac_rx_9217_12287_oct_pkt_num),
+ HINIC_PORT_STAT(mac_rx_12288_16383_oct_pkt_num),
+ HINIC_PORT_STAT(mac_rx_1519_max_good_pkt_num),
+ HINIC_PORT_STAT(mac_rx_1519_max_bad_pkt_num),
+ HINIC_PORT_STAT(mac_rx_oversize_pkt_num),
+ HINIC_PORT_STAT(mac_rx_jabber_pkt_num),
+ HINIC_PORT_STAT(mac_rx_pause_num),
+ HINIC_PORT_STAT(mac_rx_pfc_pkt_num),
+ HINIC_PORT_STAT(mac_rx_pfc_pri0_pkt_num),
+ HINIC_PORT_STAT(mac_rx_pfc_pri1_pkt_num),
+ HINIC_PORT_STAT(mac_rx_pfc_pri2_pkt_num),
+ HINIC_PORT_STAT(mac_rx_pfc_pri3_pkt_num),
+ HINIC_PORT_STAT(mac_rx_pfc_pri4_pkt_num),
+ HINIC_PORT_STAT(mac_rx_pfc_pri5_pkt_num),
+ HINIC_PORT_STAT(mac_rx_pfc_pri6_pkt_num),
+ HINIC_PORT_STAT(mac_rx_pfc_pri7_pkt_num),
+ HINIC_PORT_STAT(mac_rx_control_pkt_num),
+ HINIC_PORT_STAT(mac_rx_sym_err_pkt_num),
+ HINIC_PORT_STAT(mac_rx_fcs_err_pkt_num),
+ HINIC_PORT_STAT(mac_rx_send_app_good_pkt_num),
+ HINIC_PORT_STAT(mac_rx_send_app_bad_pkt_num),
+ HINIC_PORT_STAT(mac_tx_fragment_pkt_num),
+ HINIC_PORT_STAT(mac_tx_undersize_pkt_num),
+ HINIC_PORT_STAT(mac_tx_undermin_pkt_num),
+ HINIC_PORT_STAT(mac_tx_64_oct_pkt_num),
+ HINIC_PORT_STAT(mac_tx_65_127_oct_pkt_num),
+ HINIC_PORT_STAT(mac_tx_128_255_oct_pkt_num),
+ HINIC_PORT_STAT(mac_tx_256_511_oct_pkt_num),
+ HINIC_PORT_STAT(mac_tx_512_1023_oct_pkt_num),
+ HINIC_PORT_STAT(mac_tx_1024_1518_oct_pkt_num),
+ HINIC_PORT_STAT(mac_tx_1519_2047_oct_pkt_num),
+ HINIC_PORT_STAT(mac_tx_2048_4095_oct_pkt_num),
+ HINIC_PORT_STAT(mac_tx_4096_8191_oct_pkt_num),
+ HINIC_PORT_STAT(mac_tx_8192_9216_oct_pkt_num),
+ HINIC_PORT_STAT(mac_tx_9217_12287_oct_pkt_num),
+ HINIC_PORT_STAT(mac_tx_12288_16383_oct_pkt_num),
+ HINIC_PORT_STAT(mac_tx_1519_max_good_pkt_num),
+ HINIC_PORT_STAT(mac_tx_1519_max_bad_pkt_num),
+ HINIC_PORT_STAT(mac_tx_oversize_pkt_num),
+ HINIC_PORT_STAT(mac_tx_jabber_pkt_num),
+ HINIC_PORT_STAT(mac_tx_pause_num),
+ HINIC_PORT_STAT(mac_tx_pfc_pkt_num),
+ HINIC_PORT_STAT(mac_tx_pfc_pri0_pkt_num),
+ HINIC_PORT_STAT(mac_tx_pfc_pri1_pkt_num),
+ HINIC_PORT_STAT(mac_tx_pfc_pri2_pkt_num),
+ HINIC_PORT_STAT(mac_tx_pfc_pri3_pkt_num),
+ HINIC_PORT_STAT(mac_tx_pfc_pri4_pkt_num),
+ HINIC_PORT_STAT(mac_tx_pfc_pri5_pkt_num),
+ HINIC_PORT_STAT(mac_tx_pfc_pri6_pkt_num),
+ HINIC_PORT_STAT(mac_tx_pfc_pri7_pkt_num),
+ HINIC_PORT_STAT(mac_tx_control_pkt_num),
+ HINIC_PORT_STAT(mac_tx_err_all_pkt_num),
+ HINIC_PORT_STAT(mac_tx_from_app_good_pkt_num),
+ HINIC_PORT_STAT(mac_tx_from_app_bad_pkt_num),
+};
+
+#define HINIC_TXQ_STAT(_stat_item) { \
+ .name = "txq%d_"#_stat_item, \
+ .size = sizeof_field(struct hinic_txq_stats, _stat_item), \
+ .offset = offsetof(struct hinic_txq_stats, _stat_item) \
+}
+
+static struct hinic_stats hinic_tx_queue_stats[] = {
+ HINIC_TXQ_STAT(pkts),
+ HINIC_TXQ_STAT(bytes),
+ HINIC_TXQ_STAT(tx_busy),
+ HINIC_TXQ_STAT(tx_wake),
+ HINIC_TXQ_STAT(tx_dropped),
+ HINIC_TXQ_STAT(big_frags_pkts),
+};
+
+#define HINIC_RXQ_STAT(_stat_item) { \
+ .name = "rxq%d_"#_stat_item, \
+ .size = sizeof_field(struct hinic_rxq_stats, _stat_item), \
+ .offset = offsetof(struct hinic_rxq_stats, _stat_item) \
+}
+
+static struct hinic_stats hinic_rx_queue_stats[] = {
+ HINIC_RXQ_STAT(pkts),
+ HINIC_RXQ_STAT(bytes),
+ HINIC_RXQ_STAT(errors),
+ HINIC_RXQ_STAT(csum_errors),
+ HINIC_RXQ_STAT(other_errors),
+};
+
+static void get_drv_queue_stats(struct hinic_dev *nic_dev, u64 *data)
+{
+ struct hinic_txq_stats txq_stats;
+ struct hinic_rxq_stats rxq_stats;
+ u16 i = 0, j = 0, qid = 0;
+ char *p;
+
+ for (qid = 0; qid < nic_dev->num_qps; qid++) {
+ if (!nic_dev->txqs)
+ break;
+
+ hinic_txq_get_stats(&nic_dev->txqs[qid], &txq_stats);
+ for (j = 0; j < ARRAY_SIZE(hinic_tx_queue_stats); j++, i++) {
+ p = (char *)&txq_stats +
+ hinic_tx_queue_stats[j].offset;
+ data[i] = (hinic_tx_queue_stats[j].size ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+ }
+
+ for (qid = 0; qid < nic_dev->num_qps; qid++) {
+ if (!nic_dev->rxqs)
+ break;
+
+ hinic_rxq_get_stats(&nic_dev->rxqs[qid], &rxq_stats);
+ for (j = 0; j < ARRAY_SIZE(hinic_rx_queue_stats); j++, i++) {
+ p = (char *)&rxq_stats +
+ hinic_rx_queue_stats[j].offset;
+ data[i] = (hinic_rx_queue_stats[j].size ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+ }
+}
+
+static void hinic_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_vport_stats vport_stats = {0};
+ struct hinic_phy_port_stats *port_stats;
+ u16 i = 0, j = 0;
+ char *p;
+ int err;
+
+ err = hinic_get_vport_stats(nic_dev, &vport_stats);
+ if (err)
+ netif_err(nic_dev, drv, netdev,
+ "Failed to get vport stats from firmware\n");
+
+ for (j = 0; j < ARRAY_SIZE(hinic_function_stats); j++, i++) {
+ p = (char *)&vport_stats + hinic_function_stats[j].offset;
+ data[i] = (hinic_function_stats[j].size ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+
+ port_stats = kzalloc(sizeof(*port_stats), GFP_KERNEL);
+ if (!port_stats) {
+ memset(&data[i], 0,
+ ARRAY_SIZE(hinic_port_stats) * sizeof(*data));
+ i += ARRAY_SIZE(hinic_port_stats);
+ goto get_drv_stats;
+ }
+
+ err = hinic_get_phy_port_stats(nic_dev, port_stats);
+ if (err)
+ netif_err(nic_dev, drv, netdev,
+ "Failed to get port stats from firmware\n");
+
+ for (j = 0; j < ARRAY_SIZE(hinic_port_stats); j++, i++) {
+ p = (char *)port_stats + hinic_port_stats[j].offset;
+ data[i] = (hinic_port_stats[j].size ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+
+ kfree(port_stats);
+
+get_drv_stats:
+ get_drv_queue_stats(nic_dev, data + i);
+}
+
+static int hinic_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ int count, q_num;
+
+ switch (sset) {
+ case ETH_SS_TEST:
+ return ARRAY_SIZE(hinic_test_strings);
+ case ETH_SS_STATS:
+ q_num = nic_dev->num_qps;
+ count = ARRAY_SIZE(hinic_function_stats) +
+ (ARRAY_SIZE(hinic_tx_queue_stats) +
+ ARRAY_SIZE(hinic_rx_queue_stats)) * q_num;
+
+ count += ARRAY_SIZE(hinic_port_stats);
+
+ return count;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void hinic_get_strings(struct net_device *netdev,
+ u32 stringset, u8 *data)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ char *p = (char *)data;
+ u16 i, j;
+
+ switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(data, *hinic_test_strings, sizeof(hinic_test_strings));
+ return;
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_SIZE(hinic_function_stats); i++) {
+ memcpy(p, hinic_function_stats[i].name,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(hinic_port_stats); i++) {
+ memcpy(p, hinic_port_stats[i].name,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < nic_dev->num_qps; i++) {
+ for (j = 0; j < ARRAY_SIZE(hinic_tx_queue_stats); j++) {
+ sprintf(p, hinic_tx_queue_stats[j].name, i);
+ p += ETH_GSTRING_LEN;
+ }
+ }
+
+ for (i = 0; i < nic_dev->num_qps; i++) {
+ for (j = 0; j < ARRAY_SIZE(hinic_rx_queue_stats); j++) {
+ sprintf(p, hinic_rx_queue_stats[j].name, i);
+ p += ETH_GSTRING_LEN;
+ }
+ }
+
+ return;
+ default:
+ return;
+ }
+}
+
+static int hinic_run_lp_test(struct hinic_dev *nic_dev, u32 test_time)
+{
+ u8 *lb_test_rx_buf = nic_dev->lb_test_rx_buf;
+ struct net_device *netdev = nic_dev->netdev;
+ struct sk_buff *skb_tmp = NULL;
+ struct sk_buff *skb = NULL;
+ u32 cnt = test_time * 5;
+ u8 *test_data = NULL;
+ u32 i;
+ u8 j;
+
+ skb_tmp = alloc_skb(LP_PKT_LEN, GFP_ATOMIC);
+ if (!skb_tmp)
+ return -ENOMEM;
+
+ test_data = __skb_put(skb_tmp, LP_PKT_LEN);
+
+ memset(test_data, 0xFF, 2 * ETH_ALEN);
+ test_data[ETH_ALEN] = 0xFE;
+ test_data[2 * ETH_ALEN] = 0x08;
+ test_data[2 * ETH_ALEN + 1] = 0x0;
+
+ for (i = ETH_HLEN; i < LP_PKT_LEN; i++)
+ test_data[i] = i & 0xFF;
+
+ skb_tmp->queue_mapping = 0;
+ skb_tmp->ip_summed = CHECKSUM_COMPLETE;
+ skb_tmp->dev = netdev;
+
+ for (i = 0; i < cnt; i++) {
+ nic_dev->lb_test_rx_idx = 0;
+ memset(lb_test_rx_buf, 0, LP_PKT_CNT * LP_PKT_LEN);
+
+ for (j = 0; j < LP_PKT_CNT; j++) {
+ skb = pskb_copy(skb_tmp, GFP_ATOMIC);
+ if (!skb) {
+ dev_kfree_skb_any(skb_tmp);
+ netif_err(nic_dev, drv, netdev,
+ "Copy skb failed for loopback test\n");
+ return -ENOMEM;
+ }
+
+ /* mark index for every pkt */
+ skb->data[LP_PKT_LEN - 1] = j;
+
+ if (hinic_lb_xmit_frame(skb, netdev)) {
+ dev_kfree_skb_any(skb);
+ dev_kfree_skb_any(skb_tmp);
+ netif_err(nic_dev, drv, netdev,
+ "Xmit pkt failed for loopback test\n");
+ return -EBUSY;
+ }
+ }
+
+ /* wait till all pkts received to RX buffer */
+ msleep(200);
+
+ for (j = 0; j < LP_PKT_CNT; j++) {
+ if (memcmp(lb_test_rx_buf + j * LP_PKT_LEN,
+ skb_tmp->data, LP_PKT_LEN - 1) ||
+ (*(lb_test_rx_buf + j * LP_PKT_LEN +
+ LP_PKT_LEN - 1) != j)) {
+ dev_kfree_skb_any(skb_tmp);
+ netif_err(nic_dev, drv, netdev,
+ "Compare pkt failed in loopback test(index=0x%02x, data[%d]=0x%02x)\n",
+ j + i * LP_PKT_CNT,
+ LP_PKT_LEN - 1,
+ *(lb_test_rx_buf + j * LP_PKT_LEN +
+ LP_PKT_LEN - 1));
+ return -EIO;
+ }
+ }
+ }
+
+ dev_kfree_skb_any(skb_tmp);
+ return 0;
+}
+
+static int do_lp_test(struct hinic_dev *nic_dev, u32 flags, u32 test_time,
+ enum diag_test_index *test_index)
+{
+ struct net_device *netdev = nic_dev->netdev;
+ u8 *lb_test_rx_buf = NULL;
+ int err = 0;
+
+ if (!(flags & ETH_TEST_FL_EXTERNAL_LB)) {
+ *test_index = INTERNAL_LP_TEST;
+ if (hinic_set_loopback_mode(nic_dev->hwdev,
+ HINIC_INTERNAL_LP_MODE, true)) {
+ netif_err(nic_dev, drv, netdev,
+ "Failed to set port loopback mode before loopback test\n");
+ return -EIO;
+ }
+ } else {
+ *test_index = EXTERNAL_LP_TEST;
+ }
+
+ lb_test_rx_buf = vmalloc(LP_PKT_CNT * LP_PKT_LEN);
+ if (!lb_test_rx_buf) {
+ err = -ENOMEM;
+ } else {
+ nic_dev->lb_test_rx_buf = lb_test_rx_buf;
+ nic_dev->lb_pkt_len = LP_PKT_LEN;
+ nic_dev->flags |= HINIC_LP_TEST;
+ err = hinic_run_lp_test(nic_dev, test_time);
+ nic_dev->flags &= ~HINIC_LP_TEST;
+ msleep(100);
+ vfree(lb_test_rx_buf);
+ nic_dev->lb_test_rx_buf = NULL;
+ }
+
+ if (!(flags & ETH_TEST_FL_EXTERNAL_LB)) {
+ if (hinic_set_loopback_mode(nic_dev->hwdev,
+ HINIC_INTERNAL_LP_MODE, false)) {
+ netif_err(nic_dev, drv, netdev,
+ "Failed to cancel port loopback mode after loopback test\n");
+ err = -EIO;
+ }
+ }
+
+ return err;
+}
+
+static void hinic_diag_test(struct net_device *netdev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ enum hinic_port_link_state link_state;
+ enum diag_test_index test_index = 0;
+ int err = 0;
+
+ memset(data, 0, DIAG_TEST_MAX * sizeof(u64));
+
+ /* don't support loopback test when netdev is closed. */
+ if (!(nic_dev->flags & HINIC_INTF_UP)) {
+ netif_err(nic_dev, drv, netdev,
+ "Do not support loopback test when netdev is closed\n");
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ data[PORT_DOWN_ERR_IDX] = 1;
+ return;
+ }
+
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+
+ err = do_lp_test(nic_dev, eth_test->flags, LP_DEFAULT_TIME,
+ &test_index);
+ if (err) {
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ data[test_index] = 1;
+ }
+
+ netif_tx_wake_all_queues(netdev);
+
+ err = hinic_port_link_state(nic_dev, &link_state);
+ if (!err && link_state == HINIC_LINK_STATE_UP)
+ netif_carrier_on(netdev);
+}
+
+static int hinic_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ int err = 0;
+ u8 port;
+
+ port = nic_dev->hwdev->port_id;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ err = hinic_set_led_status(nic_dev->hwdev, port,
+ HINIC_LED_TYPE_LINK,
+ HINIC_LED_MODE_FORCE_2HZ);
+ if (err)
+ netif_err(nic_dev, drv, netdev,
+ "Set LED blinking in 2HZ failed\n");
+ break;
+
+ case ETHTOOL_ID_INACTIVE:
+ err = hinic_reset_led_status(nic_dev->hwdev, port);
+ if (err)
+ netif_err(nic_dev, drv, netdev,
+ "Reset LED to original status failed\n");
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+static int hinic_get_module_info(struct net_device *netdev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ u8 sfp_type_ext;
+ u8 sfp_type;
+ int err;
+
+ err = hinic_get_sfp_type(nic_dev->hwdev, &sfp_type, &sfp_type_ext);
+ if (err)
+ return err;
+
+ switch (sfp_type) {
+ case SFF8024_ID_SFP:
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ break;
+ case SFF8024_ID_QSFP_8438:
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
+ break;
+ case SFF8024_ID_QSFP_8436_8636:
+ if (sfp_type_ext >= 0x3) {
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
+
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
+ }
+ break;
+ case SFF8024_ID_QSFP28_8636:
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
+ break;
+ default:
+ netif_warn(nic_dev, drv, netdev,
+ "Optical module unknown: 0x%x\n", sfp_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hinic_get_module_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ u8 sfp_data[STD_SFP_INFO_MAX_SIZE];
+ u16 len;
+ int err;
+
+ if (!ee->len || ((ee->len + ee->offset) > STD_SFP_INFO_MAX_SIZE))
+ return -EINVAL;
+
+ memset(data, 0, ee->len);
+
+ err = hinic_get_sfp_eeprom(nic_dev->hwdev, sfp_data, &len);
+ if (err)
+ return err;
+
+ memcpy(data, sfp_data + ee->offset, ee->len);
+
+ return 0;
+}
+
+static int
+hinic_get_link_ext_state(struct net_device *netdev,
+ struct ethtool_link_ext_state_info *link_ext_state_info)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+
+ if (netif_carrier_ok(netdev))
+ return -ENODATA;
+
+ if (nic_dev->cable_unplugged)
+ link_ext_state_info->link_ext_state =
+ ETHTOOL_LINK_EXT_STATE_NO_CABLE;
+ else if (nic_dev->module_unrecognized)
+ link_ext_state_info->link_ext_state =
+ ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH;
+
+ return 0;
+}
+
+static const struct ethtool_ops hinic_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ ETHTOOL_COALESCE_RX_MAX_FRAMES |
+ ETHTOOL_COALESCE_TX_USECS |
+ ETHTOOL_COALESCE_TX_MAX_FRAMES,
+
+ .get_link_ksettings = hinic_get_link_ksettings,
+ .set_link_ksettings = hinic_set_link_ksettings,
+ .get_drvinfo = hinic_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_link_ext_state = hinic_get_link_ext_state,
+ .get_ringparam = hinic_get_ringparam,
+ .set_ringparam = hinic_set_ringparam,
+ .get_coalesce = hinic_get_coalesce,
+ .set_coalesce = hinic_set_coalesce,
+ .get_per_queue_coalesce = hinic_get_per_queue_coalesce,
+ .set_per_queue_coalesce = hinic_set_per_queue_coalesce,
+ .get_pauseparam = hinic_get_pauseparam,
+ .set_pauseparam = hinic_set_pauseparam,
+ .get_channels = hinic_get_channels,
+ .set_channels = hinic_set_channels,
+ .get_rxnfc = hinic_get_rxnfc,
+ .set_rxnfc = hinic_set_rxnfc,
+ .get_rxfh_key_size = hinic_get_rxfh_key_size,
+ .get_rxfh_indir_size = hinic_get_rxfh_indir_size,
+ .get_rxfh = hinic_get_rxfh,
+ .set_rxfh = hinic_set_rxfh,
+ .get_sset_count = hinic_get_sset_count,
+ .get_ethtool_stats = hinic_get_ethtool_stats,
+ .get_strings = hinic_get_strings,
+ .self_test = hinic_diag_test,
+ .set_phys_id = hinic_set_phys_id,
+ .get_module_info = hinic_get_module_info,
+ .get_module_eeprom = hinic_get_module_eeprom,
+};
+
+static const struct ethtool_ops hinicvf_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ ETHTOOL_COALESCE_RX_MAX_FRAMES |
+ ETHTOOL_COALESCE_TX_USECS |
+ ETHTOOL_COALESCE_TX_MAX_FRAMES,
+
+ .get_link_ksettings = hinic_get_link_ksettings,
+ .get_drvinfo = hinic_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_ringparam = hinic_get_ringparam,
+ .set_ringparam = hinic_set_ringparam,
+ .get_coalesce = hinic_get_coalesce,
+ .set_coalesce = hinic_set_coalesce,
+ .get_per_queue_coalesce = hinic_get_per_queue_coalesce,
+ .set_per_queue_coalesce = hinic_set_per_queue_coalesce,
+ .get_channels = hinic_get_channels,
+ .set_channels = hinic_set_channels,
+ .get_rxnfc = hinic_get_rxnfc,
+ .set_rxnfc = hinic_set_rxnfc,
+ .get_rxfh_key_size = hinic_get_rxfh_key_size,
+ .get_rxfh_indir_size = hinic_get_rxfh_indir_size,
+ .get_rxfh = hinic_get_rxfh,
+ .set_rxfh = hinic_set_rxfh,
+ .get_sset_count = hinic_get_sset_count,
+ .get_ethtool_stats = hinic_get_ethtool_stats,
+ .get_strings = hinic_get_strings,
+};
+
+void hinic_set_ethtool_ops(struct net_device *netdev)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
+ netdev->ethtool_ops = &hinic_ethtool_ops;
+ else
+ netdev->ethtool_ops = &hinicvf_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
new file mode 100644
index 0000000000..998717f021
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
@@ -0,0 +1,987 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/jiffies.h>
+#include <linux/delay.h>
+#include <linux/log2.h>
+#include <linux/semaphore.h>
+#include <asm/byteorder.h>
+#include <asm/barrier.h>
+
+#include "hinic_hw_csr.h"
+#include "hinic_hw_if.h"
+#include "hinic_hw_api_cmd.h"
+
+#define API_CHAIN_NUM_CELLS 32
+
+#define API_CMD_CELL_SIZE_SHIFT 6
+#define API_CMD_CELL_SIZE_MIN (BIT(API_CMD_CELL_SIZE_SHIFT))
+
+#define API_CMD_CELL_SIZE(cell_size) \
+ (((cell_size) >= API_CMD_CELL_SIZE_MIN) ? \
+ (1 << (fls(cell_size - 1))) : API_CMD_CELL_SIZE_MIN)
+
+#define API_CMD_CELL_SIZE_VAL(size) \
+ ilog2((size) >> API_CMD_CELL_SIZE_SHIFT)
+
+#define API_CMD_BUF_SIZE 2048
+
+/* Sizes of the members in hinic_api_cmd_cell */
+#define API_CMD_CELL_DESC_SIZE 8
+#define API_CMD_CELL_DATA_ADDR_SIZE 8
+
+#define API_CMD_CELL_ALIGNMENT 8
+
+#define API_CMD_TIMEOUT 1000
+
+#define MASKED_IDX(chain, idx) ((idx) & ((chain)->num_cells - 1))
+
+#define SIZE_8BYTES(size) (ALIGN((size), 8) >> 3)
+#define SIZE_4BYTES(size) (ALIGN((size), 4) >> 2)
+
+#define RD_DMA_ATTR_DEFAULT 0
+#define WR_DMA_ATTR_DEFAULT 0
+
+enum api_cmd_data_format {
+ SGE_DATA = 1, /* cell data is passed by hw address */
+};
+
+enum api_cmd_type {
+ API_CMD_WRITE = 0,
+};
+
+enum api_cmd_bypass {
+ NO_BYPASS = 0,
+ BYPASS = 1,
+};
+
+enum api_cmd_xor_chk_level {
+ XOR_CHK_DIS = 0,
+
+ XOR_CHK_ALL = 3,
+};
+
+static u8 xor_chksum_set(void *data)
+{
+ int idx;
+ u8 *val, checksum = 0;
+
+ val = data;
+
+ for (idx = 0; idx < 7; idx++)
+ checksum ^= val[idx];
+
+ return checksum;
+}
+
+static void set_prod_idx(struct hinic_api_cmd_chain *chain)
+{
+ enum hinic_api_cmd_chain_type chain_type = chain->chain_type;
+ struct hinic_hwif *hwif = chain->hwif;
+ u32 addr, prod_idx;
+
+ addr = HINIC_CSR_API_CMD_CHAIN_PI_ADDR(chain_type);
+ prod_idx = hinic_hwif_read_reg(hwif, addr);
+
+ prod_idx = HINIC_API_CMD_PI_CLEAR(prod_idx, IDX);
+
+ prod_idx |= HINIC_API_CMD_PI_SET(chain->prod_idx, IDX);
+
+ hinic_hwif_write_reg(hwif, addr, prod_idx);
+}
+
+static u32 get_hw_cons_idx(struct hinic_api_cmd_chain *chain)
+{
+ u32 addr, val;
+
+ addr = HINIC_CSR_API_CMD_STATUS_ADDR(chain->chain_type);
+ val = hinic_hwif_read_reg(chain->hwif, addr);
+
+ return HINIC_API_CMD_STATUS_GET(val, CONS_IDX);
+}
+
+static void dump_api_chain_reg(struct hinic_api_cmd_chain *chain)
+{
+ u32 addr, val;
+
+ addr = HINIC_CSR_API_CMD_STATUS_ADDR(chain->chain_type);
+ val = hinic_hwif_read_reg(chain->hwif, addr);
+
+ dev_err(&chain->hwif->pdev->dev, "Chain type: 0x%x, cpld error: 0x%x, check error: 0x%x, current fsm: 0x%x\n",
+ chain->chain_type, HINIC_API_CMD_STATUS_GET(val, CPLD_ERR),
+ HINIC_API_CMD_STATUS_GET(val, CHKSUM_ERR),
+ HINIC_API_CMD_STATUS_GET(val, FSM));
+
+ dev_err(&chain->hwif->pdev->dev, "Chain hw current ci: 0x%x\n",
+ HINIC_API_CMD_STATUS_GET(val, CONS_IDX));
+
+ addr = HINIC_CSR_API_CMD_CHAIN_PI_ADDR(chain->chain_type);
+ val = hinic_hwif_read_reg(chain->hwif, addr);
+ dev_err(&chain->hwif->pdev->dev, "Chain hw current pi: 0x%x\n", val);
+}
+
+/**
+ * chain_busy - check if the chain is still processing last requests
+ * @chain: chain to check
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static int chain_busy(struct hinic_api_cmd_chain *chain)
+{
+ struct hinic_hwif *hwif = chain->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ u32 prod_idx;
+
+ switch (chain->chain_type) {
+ case HINIC_API_CMD_WRITE_TO_MGMT_CPU:
+ chain->cons_idx = get_hw_cons_idx(chain);
+ prod_idx = chain->prod_idx;
+
+ /* check for a space for a new command */
+ if (chain->cons_idx == MASKED_IDX(chain, prod_idx + 1)) {
+ dev_err(&pdev->dev, "API CMD chain %d is busy, cons_idx: %d, prod_idx: %d\n",
+ chain->chain_type, chain->cons_idx,
+ chain->prod_idx);
+ dump_api_chain_reg(chain);
+ return -EBUSY;
+ }
+ break;
+
+ default:
+ dev_err(&pdev->dev, "Unknown API CMD Chain type\n");
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * get_cell_data_size - get the data size of a specific cell type
+ * @type: chain type
+ *
+ * Return the data(Desc + Address) size in the cell
+ **/
+static u8 get_cell_data_size(enum hinic_api_cmd_chain_type type)
+{
+ u8 cell_data_size = 0;
+
+ switch (type) {
+ case HINIC_API_CMD_WRITE_TO_MGMT_CPU:
+ cell_data_size = ALIGN(API_CMD_CELL_DESC_SIZE +
+ API_CMD_CELL_DATA_ADDR_SIZE,
+ API_CMD_CELL_ALIGNMENT);
+ break;
+ default:
+ break;
+ }
+
+ return cell_data_size;
+}
+
+/**
+ * prepare_cell_ctrl - prepare the ctrl of the cell for the command
+ * @cell_ctrl: the control of the cell to set the control value into it
+ * @data_size: the size of the data in the cell
+ **/
+static void prepare_cell_ctrl(u64 *cell_ctrl, u16 data_size)
+{
+ u8 chksum;
+ u64 ctrl;
+
+ ctrl = HINIC_API_CMD_CELL_CTRL_SET(SIZE_8BYTES(data_size), DATA_SZ) |
+ HINIC_API_CMD_CELL_CTRL_SET(RD_DMA_ATTR_DEFAULT, RD_DMA_ATTR) |
+ HINIC_API_CMD_CELL_CTRL_SET(WR_DMA_ATTR_DEFAULT, WR_DMA_ATTR);
+
+ chksum = xor_chksum_set(&ctrl);
+
+ ctrl |= HINIC_API_CMD_CELL_CTRL_SET(chksum, XOR_CHKSUM);
+
+ /* The data in the HW should be in Big Endian Format */
+ *cell_ctrl = cpu_to_be64(ctrl);
+}
+
+/**
+ * prepare_api_cmd - prepare API CMD command
+ * @chain: chain for the command
+ * @dest: destination node on the card that will receive the command
+ * @cmd: command data
+ * @cmd_size: the command size
+ **/
+static void prepare_api_cmd(struct hinic_api_cmd_chain *chain,
+ enum hinic_node_id dest,
+ void *cmd, u16 cmd_size)
+{
+ struct hinic_api_cmd_cell *cell = chain->curr_node;
+ struct hinic_api_cmd_cell_ctxt *cell_ctxt;
+ struct hinic_hwif *hwif = chain->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+
+ cell_ctxt = &chain->cell_ctxt[chain->prod_idx];
+
+ switch (chain->chain_type) {
+ case HINIC_API_CMD_WRITE_TO_MGMT_CPU:
+ cell->desc = HINIC_API_CMD_DESC_SET(SGE_DATA, API_TYPE) |
+ HINIC_API_CMD_DESC_SET(API_CMD_WRITE, RD_WR) |
+ HINIC_API_CMD_DESC_SET(NO_BYPASS, MGMT_BYPASS);
+ break;
+
+ default:
+ dev_err(&pdev->dev, "unknown Chain type\n");
+ return;
+ }
+
+ cell->desc |= HINIC_API_CMD_DESC_SET(dest, DEST) |
+ HINIC_API_CMD_DESC_SET(SIZE_4BYTES(cmd_size), SIZE);
+
+ cell->desc |= HINIC_API_CMD_DESC_SET(xor_chksum_set(&cell->desc),
+ XOR_CHKSUM);
+
+ /* The data in the HW should be in Big Endian Format */
+ cell->desc = cpu_to_be64(cell->desc);
+
+ memcpy(cell_ctxt->api_cmd_vaddr, cmd, cmd_size);
+}
+
+/**
+ * prepare_cell - prepare cell ctrl and cmd in the current cell
+ * @chain: chain for the command
+ * @dest: destination node on the card that will receive the command
+ * @cmd: command data
+ * @cmd_size: the command size
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static void prepare_cell(struct hinic_api_cmd_chain *chain,
+ enum hinic_node_id dest,
+ void *cmd, u16 cmd_size)
+{
+ struct hinic_api_cmd_cell *curr_node = chain->curr_node;
+ u16 data_size = get_cell_data_size(chain->chain_type);
+
+ prepare_cell_ctrl(&curr_node->ctrl, data_size);
+ prepare_api_cmd(chain, dest, cmd, cmd_size);
+}
+
+static inline void cmd_chain_prod_idx_inc(struct hinic_api_cmd_chain *chain)
+{
+ chain->prod_idx = MASKED_IDX(chain, chain->prod_idx + 1);
+}
+
+/**
+ * api_cmd_status_update - update the status in the chain struct
+ * @chain: chain to update
+ **/
+static void api_cmd_status_update(struct hinic_api_cmd_chain *chain)
+{
+ enum hinic_api_cmd_chain_type chain_type;
+ struct hinic_api_cmd_status *wb_status;
+ struct hinic_hwif *hwif = chain->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ u64 status_header;
+ u32 status;
+
+ wb_status = chain->wb_status;
+ status_header = be64_to_cpu(wb_status->header);
+
+ status = be32_to_cpu(wb_status->status);
+ if (HINIC_API_CMD_STATUS_GET(status, CHKSUM_ERR)) {
+ dev_err(&pdev->dev, "API CMD status: Xor check error\n");
+ return;
+ }
+
+ chain_type = HINIC_API_CMD_STATUS_HEADER_GET(status_header, CHAIN_ID);
+ if (chain_type >= HINIC_API_CMD_MAX) {
+ dev_err(&pdev->dev, "unknown API CMD Chain %d\n", chain_type);
+ return;
+ }
+
+ chain->cons_idx = HINIC_API_CMD_STATUS_GET(status, CONS_IDX);
+}
+
+/**
+ * wait_for_status_poll - wait for write to api cmd command to complete
+ * @chain: the chain of the command
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static int wait_for_status_poll(struct hinic_api_cmd_chain *chain)
+{
+ int err = -ETIMEDOUT;
+ unsigned long end;
+
+ end = jiffies + msecs_to_jiffies(API_CMD_TIMEOUT);
+ do {
+ api_cmd_status_update(chain);
+
+ /* wait for CI to be updated - sign for completion */
+ if (chain->cons_idx == chain->prod_idx) {
+ err = 0;
+ break;
+ }
+
+ msleep(20);
+ } while (time_before(jiffies, end));
+
+ return err;
+}
+
+/**
+ * wait_for_api_cmd_completion - wait for command to complete
+ * @chain: chain for the command
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static int wait_for_api_cmd_completion(struct hinic_api_cmd_chain *chain)
+{
+ struct hinic_hwif *hwif = chain->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ int err;
+
+ switch (chain->chain_type) {
+ case HINIC_API_CMD_WRITE_TO_MGMT_CPU:
+ err = wait_for_status_poll(chain);
+ if (err) {
+ dev_err(&pdev->dev, "API CMD Poll status timeout\n");
+ dump_api_chain_reg(chain);
+ break;
+ }
+ break;
+
+ default:
+ dev_err(&pdev->dev, "unknown API CMD Chain type\n");
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * api_cmd - API CMD command
+ * @chain: chain for the command
+ * @dest: destination node on the card that will receive the command
+ * @cmd: command data
+ * @cmd_size: the command size
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static int api_cmd(struct hinic_api_cmd_chain *chain,
+ enum hinic_node_id dest, u8 *cmd, u16 cmd_size)
+{
+ struct hinic_api_cmd_cell_ctxt *ctxt;
+ int err;
+
+ down(&chain->sem);
+ if (chain_busy(chain)) {
+ up(&chain->sem);
+ return -EBUSY;
+ }
+
+ prepare_cell(chain, dest, cmd, cmd_size);
+ cmd_chain_prod_idx_inc(chain);
+
+ wmb(); /* inc pi before issue the command */
+
+ set_prod_idx(chain); /* issue the command */
+
+ ctxt = &chain->cell_ctxt[chain->prod_idx];
+
+ chain->curr_node = ctxt->cell_vaddr;
+
+ err = wait_for_api_cmd_completion(chain);
+
+ up(&chain->sem);
+ return err;
+}
+
+/**
+ * hinic_api_cmd_write - Write API CMD command
+ * @chain: chain for write command
+ * @dest: destination node on the card that will receive the command
+ * @cmd: command data
+ * @size: the command size
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+int hinic_api_cmd_write(struct hinic_api_cmd_chain *chain,
+ enum hinic_node_id dest, u8 *cmd, u16 size)
+{
+ /* Verify the chain type */
+ if (chain->chain_type == HINIC_API_CMD_WRITE_TO_MGMT_CPU)
+ return api_cmd(chain, dest, cmd, size);
+
+ return -EINVAL;
+}
+
+/**
+ * api_cmd_hw_restart - restart the chain in the HW
+ * @chain: the API CMD specific chain to restart
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static int api_cmd_hw_restart(struct hinic_api_cmd_chain *chain)
+{
+ struct hinic_hwif *hwif = chain->hwif;
+ int err = -ETIMEDOUT;
+ unsigned long end;
+ u32 reg_addr, val;
+
+ /* Read Modify Write */
+ reg_addr = HINIC_CSR_API_CMD_CHAIN_REQ_ADDR(chain->chain_type);
+ val = hinic_hwif_read_reg(hwif, reg_addr);
+
+ val = HINIC_API_CMD_CHAIN_REQ_CLEAR(val, RESTART);
+ val |= HINIC_API_CMD_CHAIN_REQ_SET(1, RESTART);
+
+ hinic_hwif_write_reg(hwif, reg_addr, val);
+
+ end = jiffies + msecs_to_jiffies(API_CMD_TIMEOUT);
+ do {
+ val = hinic_hwif_read_reg(hwif, reg_addr);
+
+ if (!HINIC_API_CMD_CHAIN_REQ_GET(val, RESTART)) {
+ err = 0;
+ break;
+ }
+
+ msleep(20);
+ } while (time_before(jiffies, end));
+
+ return err;
+}
+
+/**
+ * api_cmd_ctrl_init - set the control register of a chain
+ * @chain: the API CMD specific chain to set control register for
+ **/
+static void api_cmd_ctrl_init(struct hinic_api_cmd_chain *chain)
+{
+ struct hinic_hwif *hwif = chain->hwif;
+ u32 addr, ctrl;
+ u16 cell_size;
+
+ /* Read Modify Write */
+ addr = HINIC_CSR_API_CMD_CHAIN_CTRL_ADDR(chain->chain_type);
+
+ cell_size = API_CMD_CELL_SIZE_VAL(chain->cell_size);
+
+ ctrl = hinic_hwif_read_reg(hwif, addr);
+
+ ctrl = HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, RESTART_WB_STAT) &
+ HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_ERR) &
+ HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, AEQE_EN) &
+ HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_CHK_EN) &
+ HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, CELL_SIZE);
+
+ ctrl |= HINIC_API_CMD_CHAIN_CTRL_SET(1, XOR_ERR) |
+ HINIC_API_CMD_CHAIN_CTRL_SET(XOR_CHK_ALL, XOR_CHK_EN) |
+ HINIC_API_CMD_CHAIN_CTRL_SET(cell_size, CELL_SIZE);
+
+ hinic_hwif_write_reg(hwif, addr, ctrl);
+}
+
+/**
+ * api_cmd_set_status_addr - set the status address of a chain in the HW
+ * @chain: the API CMD specific chain to set in HW status address for
+ **/
+static void api_cmd_set_status_addr(struct hinic_api_cmd_chain *chain)
+{
+ struct hinic_hwif *hwif = chain->hwif;
+ u32 addr, val;
+
+ addr = HINIC_CSR_API_CMD_STATUS_HI_ADDR(chain->chain_type);
+ val = upper_32_bits(chain->wb_status_paddr);
+ hinic_hwif_write_reg(hwif, addr, val);
+
+ addr = HINIC_CSR_API_CMD_STATUS_LO_ADDR(chain->chain_type);
+ val = lower_32_bits(chain->wb_status_paddr);
+ hinic_hwif_write_reg(hwif, addr, val);
+}
+
+/**
+ * api_cmd_set_num_cells - set the number cells of a chain in the HW
+ * @chain: the API CMD specific chain to set in HW the number of cells for
+ **/
+static void api_cmd_set_num_cells(struct hinic_api_cmd_chain *chain)
+{
+ struct hinic_hwif *hwif = chain->hwif;
+ u32 addr, val;
+
+ addr = HINIC_CSR_API_CMD_CHAIN_NUM_CELLS_ADDR(chain->chain_type);
+ val = chain->num_cells;
+ hinic_hwif_write_reg(hwif, addr, val);
+}
+
+/**
+ * api_cmd_head_init - set the head of a chain in the HW
+ * @chain: the API CMD specific chain to set in HW the head for
+ **/
+static void api_cmd_head_init(struct hinic_api_cmd_chain *chain)
+{
+ struct hinic_hwif *hwif = chain->hwif;
+ u32 addr, val;
+
+ addr = HINIC_CSR_API_CMD_CHAIN_HEAD_HI_ADDR(chain->chain_type);
+ val = upper_32_bits(chain->head_cell_paddr);
+ hinic_hwif_write_reg(hwif, addr, val);
+
+ addr = HINIC_CSR_API_CMD_CHAIN_HEAD_LO_ADDR(chain->chain_type);
+ val = lower_32_bits(chain->head_cell_paddr);
+ hinic_hwif_write_reg(hwif, addr, val);
+}
+
+/**
+ * api_cmd_chain_hw_clean - clean the HW
+ * @chain: the API CMD specific chain
+ **/
+static void api_cmd_chain_hw_clean(struct hinic_api_cmd_chain *chain)
+{
+ struct hinic_hwif *hwif = chain->hwif;
+ u32 addr, ctrl;
+
+ addr = HINIC_CSR_API_CMD_CHAIN_CTRL_ADDR(chain->chain_type);
+
+ ctrl = hinic_hwif_read_reg(hwif, addr);
+ ctrl = HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, RESTART_WB_STAT) &
+ HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_ERR) &
+ HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, AEQE_EN) &
+ HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_CHK_EN) &
+ HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, CELL_SIZE);
+
+ hinic_hwif_write_reg(hwif, addr, ctrl);
+}
+
+/**
+ * api_cmd_chain_hw_init - initialize the chain in the HW
+ * @chain: the API CMD specific chain to initialize in HW
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static int api_cmd_chain_hw_init(struct hinic_api_cmd_chain *chain)
+{
+ struct hinic_hwif *hwif = chain->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ int err;
+
+ api_cmd_chain_hw_clean(chain);
+
+ api_cmd_set_status_addr(chain);
+
+ err = api_cmd_hw_restart(chain);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to restart API CMD HW\n");
+ return err;
+ }
+
+ api_cmd_ctrl_init(chain);
+ api_cmd_set_num_cells(chain);
+ api_cmd_head_init(chain);
+ return 0;
+}
+
+/**
+ * free_cmd_buf - free the dma buffer of API CMD command
+ * @chain: the API CMD specific chain of the cmd
+ * @cell_idx: the cell index of the cmd
+ **/
+static void free_cmd_buf(struct hinic_api_cmd_chain *chain, int cell_idx)
+{
+ struct hinic_api_cmd_cell_ctxt *cell_ctxt;
+ struct hinic_hwif *hwif = chain->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+
+ cell_ctxt = &chain->cell_ctxt[cell_idx];
+
+ dma_free_coherent(&pdev->dev, API_CMD_BUF_SIZE,
+ cell_ctxt->api_cmd_vaddr,
+ cell_ctxt->api_cmd_paddr);
+}
+
+/**
+ * alloc_cmd_buf - allocate a dma buffer for API CMD command
+ * @chain: the API CMD specific chain for the cmd
+ * @cell: the cell in the HW for the cmd
+ * @cell_idx: the index of the cell
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static int alloc_cmd_buf(struct hinic_api_cmd_chain *chain,
+ struct hinic_api_cmd_cell *cell, int cell_idx)
+{
+ struct hinic_api_cmd_cell_ctxt *cell_ctxt;
+ struct hinic_hwif *hwif = chain->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ dma_addr_t cmd_paddr;
+ u8 *cmd_vaddr;
+ int err = 0;
+
+ cmd_vaddr = dma_alloc_coherent(&pdev->dev, API_CMD_BUF_SIZE,
+ &cmd_paddr, GFP_KERNEL);
+ if (!cmd_vaddr)
+ return -ENOMEM;
+
+ cell_ctxt = &chain->cell_ctxt[cell_idx];
+
+ cell_ctxt->api_cmd_vaddr = cmd_vaddr;
+ cell_ctxt->api_cmd_paddr = cmd_paddr;
+
+ /* set the cmd DMA address in the cell */
+ switch (chain->chain_type) {
+ case HINIC_API_CMD_WRITE_TO_MGMT_CPU:
+ /* The data in the HW should be in Big Endian Format */
+ cell->write.hw_cmd_paddr = cpu_to_be64(cmd_paddr);
+ break;
+
+ default:
+ dev_err(&pdev->dev, "Unsupported API CMD chain type\n");
+ free_cmd_buf(chain, cell_idx);
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * api_cmd_create_cell - create API CMD cell for specific chain
+ * @chain: the API CMD specific chain to create its cell
+ * @cell_idx: the index of the cell to create
+ * @pre_node: previous cell
+ * @node_vaddr: the returned virt addr of the cell
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static int api_cmd_create_cell(struct hinic_api_cmd_chain *chain,
+ int cell_idx,
+ struct hinic_api_cmd_cell *pre_node,
+ struct hinic_api_cmd_cell **node_vaddr)
+{
+ struct hinic_api_cmd_cell_ctxt *cell_ctxt;
+ struct hinic_hwif *hwif = chain->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ struct hinic_api_cmd_cell *node;
+ dma_addr_t node_paddr;
+ int err;
+
+ node = dma_alloc_coherent(&pdev->dev, chain->cell_size, &node_paddr,
+ GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ node->read.hw_wb_resp_paddr = 0;
+
+ cell_ctxt = &chain->cell_ctxt[cell_idx];
+ cell_ctxt->cell_vaddr = node;
+ cell_ctxt->cell_paddr = node_paddr;
+
+ if (!pre_node) {
+ chain->head_cell_paddr = node_paddr;
+ chain->head_node = node;
+ } else {
+ /* The data in the HW should be in Big Endian Format */
+ pre_node->next_cell_paddr = cpu_to_be64(node_paddr);
+ }
+
+ switch (chain->chain_type) {
+ case HINIC_API_CMD_WRITE_TO_MGMT_CPU:
+ err = alloc_cmd_buf(chain, node, cell_idx);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to allocate cmd buffer\n");
+ goto err_alloc_cmd_buf;
+ }
+ break;
+
+ default:
+ dev_err(&pdev->dev, "Unsupported API CMD chain type\n");
+ err = -EINVAL;
+ goto err_alloc_cmd_buf;
+ }
+
+ *node_vaddr = node;
+ return 0;
+
+err_alloc_cmd_buf:
+ dma_free_coherent(&pdev->dev, chain->cell_size, node, node_paddr);
+ return err;
+}
+
+/**
+ * api_cmd_destroy_cell - destroy API CMD cell of specific chain
+ * @chain: the API CMD specific chain to destroy its cell
+ * @cell_idx: the cell to destroy
+ **/
+static void api_cmd_destroy_cell(struct hinic_api_cmd_chain *chain,
+ int cell_idx)
+{
+ struct hinic_api_cmd_cell_ctxt *cell_ctxt;
+ struct hinic_hwif *hwif = chain->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ struct hinic_api_cmd_cell *node;
+ dma_addr_t node_paddr;
+ size_t node_size;
+
+ cell_ctxt = &chain->cell_ctxt[cell_idx];
+
+ node = cell_ctxt->cell_vaddr;
+ node_paddr = cell_ctxt->cell_paddr;
+ node_size = chain->cell_size;
+
+ if (cell_ctxt->api_cmd_vaddr) {
+ switch (chain->chain_type) {
+ case HINIC_API_CMD_WRITE_TO_MGMT_CPU:
+ free_cmd_buf(chain, cell_idx);
+ break;
+ default:
+ dev_err(&pdev->dev, "Unsupported API CMD chain type\n");
+ break;
+ }
+
+ dma_free_coherent(&pdev->dev, node_size, node,
+ node_paddr);
+ }
+}
+
+/**
+ * api_cmd_destroy_cells - destroy API CMD cells of specific chain
+ * @chain: the API CMD specific chain to destroy its cells
+ * @num_cells: number of cells to destroy
+ **/
+static void api_cmd_destroy_cells(struct hinic_api_cmd_chain *chain,
+ int num_cells)
+{
+ int cell_idx;
+
+ for (cell_idx = 0; cell_idx < num_cells; cell_idx++)
+ api_cmd_destroy_cell(chain, cell_idx);
+}
+
+/**
+ * api_cmd_create_cells - create API CMD cells for specific chain
+ * @chain: the API CMD specific chain
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static int api_cmd_create_cells(struct hinic_api_cmd_chain *chain)
+{
+ struct hinic_api_cmd_cell *node = NULL, *pre_node = NULL;
+ struct hinic_hwif *hwif = chain->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ int err, cell_idx;
+
+ for (cell_idx = 0; cell_idx < chain->num_cells; cell_idx++) {
+ err = api_cmd_create_cell(chain, cell_idx, pre_node, &node);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to create API CMD cell\n");
+ goto err_create_cell;
+ }
+
+ pre_node = node;
+ }
+
+ /* set the Final node to point on the start */
+ node->next_cell_paddr = cpu_to_be64(chain->head_cell_paddr);
+
+ /* set the current node to be the head */
+ chain->curr_node = chain->head_node;
+ return 0;
+
+err_create_cell:
+ api_cmd_destroy_cells(chain, cell_idx);
+ return err;
+}
+
+/**
+ * api_chain_init - initialize API CMD specific chain
+ * @chain: the API CMD specific chain to initialize
+ * @attr: attributes to set in the chain
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static int api_chain_init(struct hinic_api_cmd_chain *chain,
+ struct hinic_api_cmd_chain_attr *attr)
+{
+ struct hinic_hwif *hwif = attr->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+
+ chain->hwif = hwif;
+ chain->chain_type = attr->chain_type;
+ chain->num_cells = attr->num_cells;
+ chain->cell_size = attr->cell_size;
+
+ chain->prod_idx = 0;
+ chain->cons_idx = 0;
+
+ sema_init(&chain->sem, 1);
+
+ chain->cell_ctxt = devm_kcalloc(&pdev->dev, chain->num_cells,
+ sizeof(*chain->cell_ctxt), GFP_KERNEL);
+ if (!chain->cell_ctxt)
+ return -ENOMEM;
+
+ chain->wb_status = dma_alloc_coherent(&pdev->dev,
+ sizeof(*chain->wb_status),
+ &chain->wb_status_paddr,
+ GFP_KERNEL);
+ if (!chain->wb_status) {
+ dev_err(&pdev->dev, "Failed to allocate DMA wb status\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * api_chain_free - free API CMD specific chain
+ * @chain: the API CMD specific chain to free
+ **/
+static void api_chain_free(struct hinic_api_cmd_chain *chain)
+{
+ struct hinic_hwif *hwif = chain->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+
+ dma_free_coherent(&pdev->dev, sizeof(*chain->wb_status),
+ chain->wb_status, chain->wb_status_paddr);
+}
+
+/**
+ * api_cmd_create_chain - create API CMD specific chain
+ * @attr: attributes to set the chain
+ *
+ * Return the created chain
+ **/
+static struct hinic_api_cmd_chain *
+ api_cmd_create_chain(struct hinic_api_cmd_chain_attr *attr)
+{
+ struct hinic_hwif *hwif = attr->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ struct hinic_api_cmd_chain *chain;
+ int err;
+
+ if (attr->num_cells & (attr->num_cells - 1)) {
+ dev_err(&pdev->dev, "Invalid number of cells, must be power of 2\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
+ if (!chain)
+ return ERR_PTR(-ENOMEM);
+
+ err = api_chain_init(chain, attr);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize chain\n");
+ return ERR_PTR(err);
+ }
+
+ err = api_cmd_create_cells(chain);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to create cells for API CMD chain\n");
+ goto err_create_cells;
+ }
+
+ err = api_cmd_chain_hw_init(chain);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize chain HW\n");
+ goto err_chain_hw_init;
+ }
+
+ return chain;
+
+err_chain_hw_init:
+ api_cmd_destroy_cells(chain, chain->num_cells);
+
+err_create_cells:
+ api_chain_free(chain);
+ return ERR_PTR(err);
+}
+
+/**
+ * api_cmd_destroy_chain - destroy API CMD specific chain
+ * @chain: the API CMD specific chain to destroy
+ **/
+static void api_cmd_destroy_chain(struct hinic_api_cmd_chain *chain)
+{
+ api_cmd_chain_hw_clean(chain);
+ api_cmd_destroy_cells(chain, chain->num_cells);
+ api_chain_free(chain);
+}
+
+/**
+ * hinic_api_cmd_init - Initialize all the API CMD chains
+ * @chain: the API CMD chains that are initialized
+ * @hwif: the hardware interface of a pci function device
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+int hinic_api_cmd_init(struct hinic_api_cmd_chain **chain,
+ struct hinic_hwif *hwif)
+{
+ enum hinic_api_cmd_chain_type type, chain_type;
+ struct hinic_api_cmd_chain_attr attr;
+ struct pci_dev *pdev = hwif->pdev;
+ size_t hw_cell_sz;
+ int err;
+
+ hw_cell_sz = sizeof(struct hinic_api_cmd_cell);
+
+ attr.hwif = hwif;
+ attr.num_cells = API_CHAIN_NUM_CELLS;
+ attr.cell_size = API_CMD_CELL_SIZE(hw_cell_sz);
+
+ chain_type = HINIC_API_CMD_WRITE_TO_MGMT_CPU;
+ for ( ; chain_type < HINIC_API_CMD_MAX; chain_type++) {
+ attr.chain_type = chain_type;
+
+ if (chain_type != HINIC_API_CMD_WRITE_TO_MGMT_CPU)
+ continue;
+
+ chain[chain_type] = api_cmd_create_chain(&attr);
+ if (IS_ERR(chain[chain_type])) {
+ dev_err(&pdev->dev, "Failed to create chain %d\n",
+ chain_type);
+ err = PTR_ERR(chain[chain_type]);
+ goto err_create_chain;
+ }
+ }
+
+ return 0;
+
+err_create_chain:
+ type = HINIC_API_CMD_WRITE_TO_MGMT_CPU;
+ for ( ; type < chain_type; type++) {
+ if (type != HINIC_API_CMD_WRITE_TO_MGMT_CPU)
+ continue;
+
+ api_cmd_destroy_chain(chain[type]);
+ }
+
+ return err;
+}
+
+/**
+ * hinic_api_cmd_free - free the API CMD chains
+ * @chain: the API CMD chains that are freed
+ **/
+void hinic_api_cmd_free(struct hinic_api_cmd_chain **chain)
+{
+ enum hinic_api_cmd_chain_type chain_type;
+
+ chain_type = HINIC_API_CMD_WRITE_TO_MGMT_CPU;
+ for ( ; chain_type < HINIC_API_CMD_MAX; chain_type++) {
+ if (chain_type != HINIC_API_CMD_WRITE_TO_MGMT_CPU)
+ continue;
+
+ api_cmd_destroy_chain(chain[chain_type]);
+ }
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h
new file mode 100644
index 0000000000..6d1654b050
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h
@@ -0,0 +1,203 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef HINIC_HW_API_CMD_H
+#define HINIC_HW_API_CMD_H
+
+#include <linux/types.h>
+#include <linux/semaphore.h>
+
+#include "hinic_hw_if.h"
+
+#define HINIC_API_CMD_PI_IDX_SHIFT 0
+
+#define HINIC_API_CMD_PI_IDX_MASK 0xFFFFFF
+
+#define HINIC_API_CMD_PI_SET(val, member) \
+ (((u32)(val) & HINIC_API_CMD_PI_##member##_MASK) << \
+ HINIC_API_CMD_PI_##member##_SHIFT)
+
+#define HINIC_API_CMD_PI_CLEAR(val, member) \
+ ((val) & (~(HINIC_API_CMD_PI_##member##_MASK \
+ << HINIC_API_CMD_PI_##member##_SHIFT)))
+
+#define HINIC_API_CMD_CHAIN_REQ_RESTART_SHIFT 1
+
+#define HINIC_API_CMD_CHAIN_REQ_RESTART_MASK 0x1
+
+#define HINIC_API_CMD_CHAIN_REQ_SET(val, member) \
+ (((u32)(val) & HINIC_API_CMD_CHAIN_REQ_##member##_MASK) << \
+ HINIC_API_CMD_CHAIN_REQ_##member##_SHIFT)
+
+#define HINIC_API_CMD_CHAIN_REQ_GET(val, member) \
+ (((val) >> HINIC_API_CMD_CHAIN_REQ_##member##_SHIFT) & \
+ HINIC_API_CMD_CHAIN_REQ_##member##_MASK)
+
+#define HINIC_API_CMD_CHAIN_REQ_CLEAR(val, member) \
+ ((val) & (~(HINIC_API_CMD_CHAIN_REQ_##member##_MASK \
+ << HINIC_API_CMD_CHAIN_REQ_##member##_SHIFT)))
+
+#define HINIC_API_CMD_CHAIN_CTRL_RESTART_WB_STAT_SHIFT 1
+#define HINIC_API_CMD_CHAIN_CTRL_XOR_ERR_SHIFT 2
+#define HINIC_API_CMD_CHAIN_CTRL_AEQE_EN_SHIFT 4
+#define HINIC_API_CMD_CHAIN_CTRL_AEQ_ID_SHIFT 8
+#define HINIC_API_CMD_CHAIN_CTRL_XOR_CHK_EN_SHIFT 28
+#define HINIC_API_CMD_CHAIN_CTRL_CELL_SIZE_SHIFT 30
+
+#define HINIC_API_CMD_CHAIN_CTRL_RESTART_WB_STAT_MASK 0x1
+#define HINIC_API_CMD_CHAIN_CTRL_XOR_ERR_MASK 0x1
+#define HINIC_API_CMD_CHAIN_CTRL_AEQE_EN_MASK 0x1
+#define HINIC_API_CMD_CHAIN_CTRL_AEQ_ID_MASK 0x3
+#define HINIC_API_CMD_CHAIN_CTRL_XOR_CHK_EN_MASK 0x3
+#define HINIC_API_CMD_CHAIN_CTRL_CELL_SIZE_MASK 0x3
+
+#define HINIC_API_CMD_CHAIN_CTRL_SET(val, member) \
+ (((u32)(val) & HINIC_API_CMD_CHAIN_CTRL_##member##_MASK) << \
+ HINIC_API_CMD_CHAIN_CTRL_##member##_SHIFT)
+
+#define HINIC_API_CMD_CHAIN_CTRL_CLEAR(val, member) \
+ ((val) & (~(HINIC_API_CMD_CHAIN_CTRL_##member##_MASK \
+ << HINIC_API_CMD_CHAIN_CTRL_##member##_SHIFT)))
+
+#define HINIC_API_CMD_CELL_CTRL_DATA_SZ_SHIFT 0
+#define HINIC_API_CMD_CELL_CTRL_RD_DMA_ATTR_SHIFT 16
+#define HINIC_API_CMD_CELL_CTRL_WR_DMA_ATTR_SHIFT 24
+#define HINIC_API_CMD_CELL_CTRL_XOR_CHKSUM_SHIFT 56
+
+#define HINIC_API_CMD_CELL_CTRL_DATA_SZ_MASK 0x3F
+#define HINIC_API_CMD_CELL_CTRL_RD_DMA_ATTR_MASK 0x3F
+#define HINIC_API_CMD_CELL_CTRL_WR_DMA_ATTR_MASK 0x3F
+#define HINIC_API_CMD_CELL_CTRL_XOR_CHKSUM_MASK 0xFF
+
+#define HINIC_API_CMD_CELL_CTRL_SET(val, member) \
+ ((((u64)val) & HINIC_API_CMD_CELL_CTRL_##member##_MASK) << \
+ HINIC_API_CMD_CELL_CTRL_##member##_SHIFT)
+
+#define HINIC_API_CMD_DESC_API_TYPE_SHIFT 0
+#define HINIC_API_CMD_DESC_RD_WR_SHIFT 1
+#define HINIC_API_CMD_DESC_MGMT_BYPASS_SHIFT 2
+#define HINIC_API_CMD_DESC_DEST_SHIFT 32
+#define HINIC_API_CMD_DESC_SIZE_SHIFT 40
+#define HINIC_API_CMD_DESC_XOR_CHKSUM_SHIFT 56
+
+#define HINIC_API_CMD_DESC_API_TYPE_MASK 0x1
+#define HINIC_API_CMD_DESC_RD_WR_MASK 0x1
+#define HINIC_API_CMD_DESC_MGMT_BYPASS_MASK 0x1
+#define HINIC_API_CMD_DESC_DEST_MASK 0x1F
+#define HINIC_API_CMD_DESC_SIZE_MASK 0x7FF
+#define HINIC_API_CMD_DESC_XOR_CHKSUM_MASK 0xFF
+
+#define HINIC_API_CMD_DESC_SET(val, member) \
+ ((((u64)val) & HINIC_API_CMD_DESC_##member##_MASK) << \
+ HINIC_API_CMD_DESC_##member##_SHIFT)
+
+#define HINIC_API_CMD_STATUS_HEADER_CHAIN_ID_SHIFT 16
+
+#define HINIC_API_CMD_STATUS_HEADER_CHAIN_ID_MASK 0xFF
+
+#define HINIC_API_CMD_STATUS_HEADER_GET(val, member) \
+ (((val) >> HINIC_API_CMD_STATUS_HEADER_##member##_SHIFT) & \
+ HINIC_API_CMD_STATUS_HEADER_##member##_MASK)
+
+#define HINIC_API_CMD_STATUS_CONS_IDX_SHIFT 0
+#define HINIC_API_CMD_STATUS_FSM_SHIFT 24
+#define HINIC_API_CMD_STATUS_CHKSUM_ERR_SHIFT 28
+#define HINIC_API_CMD_STATUS_CPLD_ERR_SHIFT 30
+
+#define HINIC_API_CMD_STATUS_CONS_IDX_MASK 0xFFFFFF
+#define HINIC_API_CMD_STATUS_FSM_MASK 0xFU
+#define HINIC_API_CMD_STATUS_CHKSUM_ERR_MASK 0x3
+#define HINIC_API_CMD_STATUS_CPLD_ERR_MASK 0x1U
+
+#define HINIC_API_CMD_STATUS_GET(val, member) \
+ (((val) >> HINIC_API_CMD_STATUS_##member##_SHIFT) & \
+ HINIC_API_CMD_STATUS_##member##_MASK)
+
+enum hinic_api_cmd_chain_type {
+ HINIC_API_CMD_WRITE_TO_MGMT_CPU = 2,
+
+ HINIC_API_CMD_MAX,
+};
+
+struct hinic_api_cmd_chain_attr {
+ struct hinic_hwif *hwif;
+ enum hinic_api_cmd_chain_type chain_type;
+
+ u32 num_cells;
+ u16 cell_size;
+};
+
+struct hinic_api_cmd_status {
+ u64 header;
+ u32 status;
+ u32 rsvd0;
+ u32 rsvd1;
+ u32 rsvd2;
+ u64 rsvd3;
+};
+
+/* HW struct */
+struct hinic_api_cmd_cell {
+ u64 ctrl;
+
+ /* address is 64 bit in HW struct */
+ u64 next_cell_paddr;
+
+ u64 desc;
+
+ /* HW struct */
+ union {
+ struct {
+ u64 hw_cmd_paddr;
+ } write;
+
+ struct {
+ u64 hw_wb_resp_paddr;
+ u64 hw_cmd_paddr;
+ } read;
+ };
+};
+
+struct hinic_api_cmd_cell_ctxt {
+ dma_addr_t cell_paddr;
+ struct hinic_api_cmd_cell *cell_vaddr;
+
+ dma_addr_t api_cmd_paddr;
+ u8 *api_cmd_vaddr;
+};
+
+struct hinic_api_cmd_chain {
+ struct hinic_hwif *hwif;
+ enum hinic_api_cmd_chain_type chain_type;
+
+ u32 num_cells;
+ u16 cell_size;
+
+ /* HW members in 24 bit format */
+ u32 prod_idx;
+ u32 cons_idx;
+
+ struct semaphore sem;
+
+ struct hinic_api_cmd_cell_ctxt *cell_ctxt;
+
+ dma_addr_t wb_status_paddr;
+ struct hinic_api_cmd_status *wb_status;
+
+ dma_addr_t head_cell_paddr;
+ struct hinic_api_cmd_cell *head_node;
+ struct hinic_api_cmd_cell *curr_node;
+};
+
+int hinic_api_cmd_write(struct hinic_api_cmd_chain *chain,
+ enum hinic_node_id dest, u8 *cmd, u16 size);
+
+int hinic_api_cmd_init(struct hinic_api_cmd_chain **chain,
+ struct hinic_hwif *hwif);
+
+void hinic_api_cmd_free(struct hinic_api_cmd_chain **chain);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
new file mode 100644
index 0000000000..d39eec9c62
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
@@ -0,0 +1,963 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/spinlock.h>
+#include <linux/sizes.h>
+#include <linux/atomic.h>
+#include <linux/log2.h>
+#include <linux/io.h>
+#include <linux/completion.h>
+#include <linux/err.h>
+#include <asm/byteorder.h>
+#include <asm/barrier.h>
+
+#include "hinic_common.h"
+#include "hinic_hw_if.h"
+#include "hinic_hw_eqs.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_hw_wqe.h"
+#include "hinic_hw_wq.h"
+#include "hinic_hw_cmdq.h"
+#include "hinic_hw_io.h"
+#include "hinic_hw_dev.h"
+
+#define CMDQ_CEQE_TYPE_SHIFT 0
+
+#define CMDQ_CEQE_TYPE_MASK 0x7
+
+#define CMDQ_CEQE_GET(val, member) \
+ (((val) >> CMDQ_CEQE_##member##_SHIFT) \
+ & CMDQ_CEQE_##member##_MASK)
+
+#define CMDQ_WQE_ERRCODE_VAL_SHIFT 20
+
+#define CMDQ_WQE_ERRCODE_VAL_MASK 0xF
+
+#define CMDQ_WQE_ERRCODE_GET(val, member) \
+ (((val) >> CMDQ_WQE_ERRCODE_##member##_SHIFT) \
+ & CMDQ_WQE_ERRCODE_##member##_MASK)
+
+#define CMDQ_DB_PI_OFF(pi) (((u16)LOWER_8_BITS(pi)) << 3)
+
+#define CMDQ_DB_ADDR(db_base, pi) ((db_base) + CMDQ_DB_PI_OFF(pi))
+
+#define CMDQ_WQE_HEADER(wqe) ((struct hinic_cmdq_header *)(wqe))
+
+#define CMDQ_WQE_COMPLETED(ctrl_info) \
+ HINIC_CMDQ_CTRL_GET(ctrl_info, HW_BUSY_BIT)
+
+#define FIRST_DATA_TO_WRITE_LAST sizeof(u64)
+
+#define CMDQ_DB_OFF SZ_2K
+
+#define CMDQ_WQEBB_SIZE 64
+#define CMDQ_WQE_SIZE 64
+#define CMDQ_DEPTH SZ_4K
+
+#define CMDQ_WQ_PAGE_SIZE SZ_256K
+
+#define WQE_LCMD_SIZE 64
+#define WQE_SCMD_SIZE 64
+
+#define COMPLETE_LEN 3
+
+#define CMDQ_TIMEOUT 1000
+
+#define CMDQ_PFN(addr, page_size) ((addr) >> (ilog2(page_size)))
+
+#define cmdq_to_cmdqs(cmdq) container_of((cmdq) - (cmdq)->cmdq_type, \
+ struct hinic_cmdqs, cmdq[0])
+
+#define cmdqs_to_func_to_io(cmdqs) container_of(cmdqs, \
+ struct hinic_func_to_io, \
+ cmdqs)
+
+enum completion_format {
+ COMPLETE_DIRECT = 0,
+ COMPLETE_SGE = 1,
+};
+
+enum data_format {
+ DATA_SGE = 0,
+ DATA_DIRECT = 1,
+};
+
+enum bufdesc_len {
+ BUFDESC_LCMD_LEN = 2, /* 16 bytes - 2(8 byte unit) */
+ BUFDESC_SCMD_LEN = 3, /* 24 bytes - 3(8 byte unit) */
+};
+
+enum ctrl_sect_len {
+ CTRL_SECT_LEN = 1, /* 4 bytes (ctrl) - 1(8 byte unit) */
+ CTRL_DIRECT_SECT_LEN = 2, /* 12 bytes (ctrl + rsvd) - 2(8 byte unit) */
+};
+
+enum cmdq_scmd_type {
+ CMDQ_SET_ARM_CMD = 2,
+};
+
+enum cmdq_cmd_type {
+ CMDQ_CMD_SYNC_DIRECT_RESP = 0,
+ CMDQ_CMD_SYNC_SGE_RESP = 1,
+};
+
+enum completion_request {
+ NO_CEQ = 0,
+ CEQ_SET = 1,
+};
+
+/**
+ * hinic_alloc_cmdq_buf - alloc buffer for sending command
+ * @cmdqs: the cmdqs
+ * @cmdq_buf: the buffer returned in this struct
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+int hinic_alloc_cmdq_buf(struct hinic_cmdqs *cmdqs,
+ struct hinic_cmdq_buf *cmdq_buf)
+{
+ struct hinic_hwif *hwif = cmdqs->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+
+ cmdq_buf->buf = dma_pool_alloc(cmdqs->cmdq_buf_pool, GFP_KERNEL,
+ &cmdq_buf->dma_addr);
+ if (!cmdq_buf->buf) {
+ dev_err(&pdev->dev, "Failed to allocate cmd from the pool\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * hinic_free_cmdq_buf - free buffer
+ * @cmdqs: the cmdqs
+ * @cmdq_buf: the buffer to free that is in this struct
+ **/
+void hinic_free_cmdq_buf(struct hinic_cmdqs *cmdqs,
+ struct hinic_cmdq_buf *cmdq_buf)
+{
+ dma_pool_free(cmdqs->cmdq_buf_pool, cmdq_buf->buf, cmdq_buf->dma_addr);
+}
+
+static unsigned int cmdq_wqe_size_from_bdlen(enum bufdesc_len len)
+{
+ unsigned int wqe_size = 0;
+
+ switch (len) {
+ case BUFDESC_LCMD_LEN:
+ wqe_size = WQE_LCMD_SIZE;
+ break;
+ case BUFDESC_SCMD_LEN:
+ wqe_size = WQE_SCMD_SIZE;
+ break;
+ }
+
+ return wqe_size;
+}
+
+static void cmdq_set_sge_completion(struct hinic_cmdq_completion *completion,
+ struct hinic_cmdq_buf *buf_out)
+{
+ struct hinic_sge_resp *sge_resp = &completion->sge_resp;
+
+ hinic_set_sge(&sge_resp->sge, buf_out->dma_addr, buf_out->size);
+}
+
+static void cmdq_prepare_wqe_ctrl(struct hinic_cmdq_wqe *wqe, int wrapped,
+ enum hinic_cmd_ack_type ack_type,
+ enum hinic_mod_type mod, u8 cmd, u16 prod_idx,
+ enum completion_format complete_format,
+ enum data_format data_format,
+ enum bufdesc_len buf_len)
+{
+ struct hinic_cmdq_wqe_lcmd *wqe_lcmd;
+ struct hinic_cmdq_wqe_scmd *wqe_scmd;
+ enum ctrl_sect_len ctrl_len;
+ struct hinic_ctrl *ctrl;
+ u32 saved_data;
+
+ if (data_format == DATA_SGE) {
+ wqe_lcmd = &wqe->wqe_lcmd;
+
+ wqe_lcmd->status.status_info = 0;
+ ctrl = &wqe_lcmd->ctrl;
+ ctrl_len = CTRL_SECT_LEN;
+ } else {
+ wqe_scmd = &wqe->direct_wqe.wqe_scmd;
+
+ wqe_scmd->status.status_info = 0;
+ ctrl = &wqe_scmd->ctrl;
+ ctrl_len = CTRL_DIRECT_SECT_LEN;
+ }
+
+ ctrl->ctrl_info = HINIC_CMDQ_CTRL_SET(prod_idx, PI) |
+ HINIC_CMDQ_CTRL_SET(cmd, CMD) |
+ HINIC_CMDQ_CTRL_SET(mod, MOD) |
+ HINIC_CMDQ_CTRL_SET(ack_type, ACK_TYPE);
+
+ CMDQ_WQE_HEADER(wqe)->header_info =
+ HINIC_CMDQ_WQE_HEADER_SET(buf_len, BUFDESC_LEN) |
+ HINIC_CMDQ_WQE_HEADER_SET(complete_format, COMPLETE_FMT) |
+ HINIC_CMDQ_WQE_HEADER_SET(data_format, DATA_FMT) |
+ HINIC_CMDQ_WQE_HEADER_SET(CEQ_SET, COMPLETE_REQ) |
+ HINIC_CMDQ_WQE_HEADER_SET(COMPLETE_LEN, COMPLETE_SECT_LEN) |
+ HINIC_CMDQ_WQE_HEADER_SET(ctrl_len, CTRL_LEN) |
+ HINIC_CMDQ_WQE_HEADER_SET(wrapped, TOGGLED_WRAPPED);
+
+ saved_data = CMDQ_WQE_HEADER(wqe)->saved_data;
+ saved_data = HINIC_SAVED_DATA_CLEAR(saved_data, ARM);
+
+ if (cmd == CMDQ_SET_ARM_CMD && mod == HINIC_MOD_COMM)
+ CMDQ_WQE_HEADER(wqe)->saved_data |=
+ HINIC_SAVED_DATA_SET(1, ARM);
+ else
+ CMDQ_WQE_HEADER(wqe)->saved_data = saved_data;
+}
+
+static void cmdq_set_lcmd_bufdesc(struct hinic_cmdq_wqe_lcmd *wqe_lcmd,
+ struct hinic_cmdq_buf *buf_in)
+{
+ hinic_set_sge(&wqe_lcmd->buf_desc.sge, buf_in->dma_addr, buf_in->size);
+}
+
+static void cmdq_set_direct_wqe_data(struct hinic_cmdq_direct_wqe *wqe,
+ void *buf_in, u32 in_size)
+{
+ struct hinic_cmdq_wqe_scmd *wqe_scmd = &wqe->wqe_scmd;
+
+ wqe_scmd->buf_desc.buf_len = in_size;
+ memcpy(wqe_scmd->buf_desc.data, buf_in, in_size);
+}
+
+static void cmdq_set_lcmd_wqe(struct hinic_cmdq_wqe *wqe,
+ enum cmdq_cmd_type cmd_type,
+ struct hinic_cmdq_buf *buf_in,
+ struct hinic_cmdq_buf *buf_out, int wrapped,
+ enum hinic_cmd_ack_type ack_type,
+ enum hinic_mod_type mod, u8 cmd, u16 prod_idx)
+{
+ struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd;
+ enum completion_format complete_format;
+
+ switch (cmd_type) {
+ case CMDQ_CMD_SYNC_SGE_RESP:
+ complete_format = COMPLETE_SGE;
+ cmdq_set_sge_completion(&wqe_lcmd->completion, buf_out);
+ break;
+ case CMDQ_CMD_SYNC_DIRECT_RESP:
+ complete_format = COMPLETE_DIRECT;
+ wqe_lcmd->completion.direct_resp = 0;
+ break;
+ }
+
+ cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd,
+ prod_idx, complete_format, DATA_SGE,
+ BUFDESC_LCMD_LEN);
+
+ cmdq_set_lcmd_bufdesc(wqe_lcmd, buf_in);
+}
+
+static void cmdq_set_direct_wqe(struct hinic_cmdq_wqe *wqe,
+ enum cmdq_cmd_type cmd_type,
+ void *buf_in, u16 in_size,
+ struct hinic_cmdq_buf *buf_out, int wrapped,
+ enum hinic_cmd_ack_type ack_type,
+ enum hinic_mod_type mod, u8 cmd, u16 prod_idx)
+{
+ struct hinic_cmdq_direct_wqe *direct_wqe = &wqe->direct_wqe;
+ enum completion_format complete_format;
+ struct hinic_cmdq_wqe_scmd *wqe_scmd;
+
+ wqe_scmd = &direct_wqe->wqe_scmd;
+
+ switch (cmd_type) {
+ case CMDQ_CMD_SYNC_SGE_RESP:
+ complete_format = COMPLETE_SGE;
+ cmdq_set_sge_completion(&wqe_scmd->completion, buf_out);
+ break;
+ case CMDQ_CMD_SYNC_DIRECT_RESP:
+ complete_format = COMPLETE_DIRECT;
+ wqe_scmd->completion.direct_resp = 0;
+ break;
+ }
+
+ cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd, prod_idx,
+ complete_format, DATA_DIRECT, BUFDESC_SCMD_LEN);
+
+ cmdq_set_direct_wqe_data(direct_wqe, buf_in, in_size);
+}
+
+static void cmdq_wqe_fill(void *dst, void *src)
+{
+ memcpy(dst + FIRST_DATA_TO_WRITE_LAST, src + FIRST_DATA_TO_WRITE_LAST,
+ CMDQ_WQE_SIZE - FIRST_DATA_TO_WRITE_LAST);
+
+ wmb(); /* The first 8 bytes should be written last */
+
+ *(u64 *)dst = *(u64 *)src;
+}
+
+static void cmdq_fill_db(u32 *db_info,
+ enum hinic_cmdq_type cmdq_type, u16 prod_idx)
+{
+ *db_info = HINIC_CMDQ_DB_INFO_SET(UPPER_8_BITS(prod_idx), HI_PROD_IDX) |
+ HINIC_CMDQ_DB_INFO_SET(HINIC_CTRL_PATH, PATH) |
+ HINIC_CMDQ_DB_INFO_SET(cmdq_type, CMDQ_TYPE) |
+ HINIC_CMDQ_DB_INFO_SET(HINIC_DB_CMDQ_TYPE, DB_TYPE);
+}
+
+static void cmdq_set_db(struct hinic_cmdq *cmdq,
+ enum hinic_cmdq_type cmdq_type, u16 prod_idx)
+{
+ u32 db_info;
+
+ cmdq_fill_db(&db_info, cmdq_type, prod_idx);
+
+ /* The data that is written to HW should be in Big Endian Format */
+ db_info = cpu_to_be32(db_info);
+
+ wmb(); /* write all before the doorbell */
+
+ writel(db_info, CMDQ_DB_ADDR(cmdq->db_base, prod_idx));
+}
+
+static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq,
+ enum hinic_mod_type mod, u8 cmd,
+ struct hinic_cmdq_buf *buf_in,
+ u64 *resp)
+{
+ struct hinic_cmdq_wqe *curr_cmdq_wqe, cmdq_wqe;
+ u16 curr_prod_idx, next_prod_idx;
+ int errcode, wrapped, num_wqebbs;
+ struct hinic_wq *wq = cmdq->wq;
+ struct hinic_hw_wqe *hw_wqe;
+ struct completion done;
+
+ /* Keep doorbell index correct. bh - for tasklet(ceq). */
+ spin_lock_bh(&cmdq->cmdq_lock);
+
+ /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/
+ hw_wqe = hinic_get_wqe(wq, WQE_LCMD_SIZE, &curr_prod_idx);
+ if (IS_ERR(hw_wqe)) {
+ spin_unlock_bh(&cmdq->cmdq_lock);
+ return -EBUSY;
+ }
+
+ curr_cmdq_wqe = &hw_wqe->cmdq_wqe;
+
+ wrapped = cmdq->wrapped;
+
+ num_wqebbs = ALIGN(WQE_LCMD_SIZE, wq->wqebb_size) / wq->wqebb_size;
+ next_prod_idx = curr_prod_idx + num_wqebbs;
+ if (next_prod_idx >= wq->q_depth) {
+ cmdq->wrapped = !cmdq->wrapped;
+ next_prod_idx -= wq->q_depth;
+ }
+
+ cmdq->errcode[curr_prod_idx] = &errcode;
+
+ init_completion(&done);
+ cmdq->done[curr_prod_idx] = &done;
+
+ cmdq_set_lcmd_wqe(&cmdq_wqe, CMDQ_CMD_SYNC_DIRECT_RESP, buf_in, NULL,
+ wrapped, HINIC_CMD_ACK_TYPE_CMDQ, mod, cmd,
+ curr_prod_idx);
+
+ /* The data that is written to HW should be in Big Endian Format */
+ hinic_cpu_to_be32(&cmdq_wqe, WQE_LCMD_SIZE);
+
+ /* CMDQ WQE is not shadow, therefore wqe will be written to wq */
+ cmdq_wqe_fill(curr_cmdq_wqe, &cmdq_wqe);
+
+ cmdq_set_db(cmdq, HINIC_CMDQ_SYNC, next_prod_idx);
+
+ spin_unlock_bh(&cmdq->cmdq_lock);
+
+ if (!wait_for_completion_timeout(&done,
+ msecs_to_jiffies(CMDQ_TIMEOUT))) {
+ spin_lock_bh(&cmdq->cmdq_lock);
+
+ if (cmdq->errcode[curr_prod_idx] == &errcode)
+ cmdq->errcode[curr_prod_idx] = NULL;
+
+ if (cmdq->done[curr_prod_idx] == &done)
+ cmdq->done[curr_prod_idx] = NULL;
+
+ spin_unlock_bh(&cmdq->cmdq_lock);
+
+ hinic_dump_ceq_info(cmdq->hwdev);
+ return -ETIMEDOUT;
+ }
+
+ smp_rmb(); /* read error code after completion */
+
+ if (resp) {
+ struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &curr_cmdq_wqe->wqe_lcmd;
+
+ *resp = cpu_to_be64(wqe_lcmd->completion.direct_resp);
+ }
+
+ if (errcode != 0)
+ return -EFAULT;
+
+ return 0;
+}
+
+static int cmdq_set_arm_bit(struct hinic_cmdq *cmdq, void *buf_in,
+ u16 in_size)
+{
+ struct hinic_cmdq_wqe *curr_cmdq_wqe, cmdq_wqe;
+ u16 curr_prod_idx, next_prod_idx;
+ struct hinic_wq *wq = cmdq->wq;
+ struct hinic_hw_wqe *hw_wqe;
+ int wrapped, num_wqebbs;
+
+ /* Keep doorbell index correct */
+ spin_lock(&cmdq->cmdq_lock);
+
+ /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/
+ hw_wqe = hinic_get_wqe(wq, WQE_SCMD_SIZE, &curr_prod_idx);
+ if (IS_ERR(hw_wqe)) {
+ spin_unlock(&cmdq->cmdq_lock);
+ return -EBUSY;
+ }
+
+ curr_cmdq_wqe = &hw_wqe->cmdq_wqe;
+
+ wrapped = cmdq->wrapped;
+
+ num_wqebbs = ALIGN(WQE_SCMD_SIZE, wq->wqebb_size) / wq->wqebb_size;
+ next_prod_idx = curr_prod_idx + num_wqebbs;
+ if (next_prod_idx >= wq->q_depth) {
+ cmdq->wrapped = !cmdq->wrapped;
+ next_prod_idx -= wq->q_depth;
+ }
+
+ cmdq_set_direct_wqe(&cmdq_wqe, CMDQ_CMD_SYNC_DIRECT_RESP, buf_in,
+ in_size, NULL, wrapped, HINIC_CMD_ACK_TYPE_CMDQ,
+ HINIC_MOD_COMM, CMDQ_SET_ARM_CMD, curr_prod_idx);
+
+ /* The data that is written to HW should be in Big Endian Format */
+ hinic_cpu_to_be32(&cmdq_wqe, WQE_SCMD_SIZE);
+
+ /* cmdq wqe is not shadow, therefore wqe will be written to wq */
+ cmdq_wqe_fill(curr_cmdq_wqe, &cmdq_wqe);
+
+ cmdq_set_db(cmdq, HINIC_CMDQ_SYNC, next_prod_idx);
+
+ spin_unlock(&cmdq->cmdq_lock);
+ return 0;
+}
+
+static int cmdq_params_valid(struct hinic_cmdq_buf *buf_in)
+{
+ if (buf_in->size > HINIC_CMDQ_MAX_DATA_SIZE)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * hinic_cmdq_direct_resp - send command with direct data as resp
+ * @cmdqs: the cmdqs
+ * @mod: module on the card that will handle the command
+ * @cmd: the command
+ * @buf_in: the buffer for the command
+ * @resp: the response to return
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+int hinic_cmdq_direct_resp(struct hinic_cmdqs *cmdqs,
+ enum hinic_mod_type mod, u8 cmd,
+ struct hinic_cmdq_buf *buf_in, u64 *resp)
+{
+ struct hinic_hwif *hwif = cmdqs->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ int err;
+
+ err = cmdq_params_valid(buf_in);
+ if (err) {
+ dev_err(&pdev->dev, "Invalid CMDQ parameters\n");
+ return err;
+ }
+
+ return cmdq_sync_cmd_direct_resp(&cmdqs->cmdq[HINIC_CMDQ_SYNC],
+ mod, cmd, buf_in, resp);
+}
+
+/**
+ * hinic_set_arm_bit - set arm bit for enable interrupt again
+ * @cmdqs: the cmdqs
+ * @q_type: type of queue to set the arm bit for
+ * @q_id: the queue number
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static int hinic_set_arm_bit(struct hinic_cmdqs *cmdqs,
+ enum hinic_set_arm_qtype q_type, u32 q_id)
+{
+ struct hinic_cmdq *cmdq = &cmdqs->cmdq[HINIC_CMDQ_SYNC];
+ struct hinic_hwif *hwif = cmdqs->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ struct hinic_cmdq_arm_bit arm_bit;
+ int err;
+
+ arm_bit.q_type = q_type;
+ arm_bit.q_id = q_id;
+
+ err = cmdq_set_arm_bit(cmdq, &arm_bit, sizeof(arm_bit));
+ if (err) {
+ dev_err(&pdev->dev, "Failed to set arm for qid %d\n", q_id);
+ return err;
+ }
+
+ return 0;
+}
+
+static void clear_wqe_complete_bit(struct hinic_cmdq *cmdq,
+ struct hinic_cmdq_wqe *wqe)
+{
+ u32 header_info = be32_to_cpu(CMDQ_WQE_HEADER(wqe)->header_info);
+ unsigned int bufdesc_len, wqe_size;
+ struct hinic_ctrl *ctrl;
+
+ bufdesc_len = HINIC_CMDQ_WQE_HEADER_GET(header_info, BUFDESC_LEN);
+ wqe_size = cmdq_wqe_size_from_bdlen(bufdesc_len);
+ if (wqe_size == WQE_LCMD_SIZE) {
+ struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd;
+
+ ctrl = &wqe_lcmd->ctrl;
+ } else {
+ struct hinic_cmdq_direct_wqe *direct_wqe = &wqe->direct_wqe;
+ struct hinic_cmdq_wqe_scmd *wqe_scmd;
+
+ wqe_scmd = &direct_wqe->wqe_scmd;
+ ctrl = &wqe_scmd->ctrl;
+ }
+
+ /* clear HW busy bit */
+ ctrl->ctrl_info = 0;
+
+ wmb(); /* verify wqe is clear */
+}
+
+/**
+ * cmdq_arm_ceq_handler - cmdq completion event handler for arm command
+ * @cmdq: the cmdq of the arm command
+ * @wqe: the wqe of the arm command
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static int cmdq_arm_ceq_handler(struct hinic_cmdq *cmdq,
+ struct hinic_cmdq_wqe *wqe)
+{
+ struct hinic_cmdq_direct_wqe *direct_wqe = &wqe->direct_wqe;
+ struct hinic_cmdq_wqe_scmd *wqe_scmd;
+ struct hinic_ctrl *ctrl;
+ u32 ctrl_info;
+
+ wqe_scmd = &direct_wqe->wqe_scmd;
+ ctrl = &wqe_scmd->ctrl;
+ ctrl_info = be32_to_cpu(ctrl->ctrl_info);
+
+ /* HW should toggle the HW BUSY BIT */
+ if (!CMDQ_WQE_COMPLETED(ctrl_info))
+ return -EBUSY;
+
+ clear_wqe_complete_bit(cmdq, wqe);
+
+ hinic_put_wqe(cmdq->wq, WQE_SCMD_SIZE);
+ return 0;
+}
+
+static void cmdq_update_errcode(struct hinic_cmdq *cmdq, u16 prod_idx,
+ int errcode)
+{
+ if (cmdq->errcode[prod_idx])
+ *cmdq->errcode[prod_idx] = errcode;
+}
+
+/**
+ * cmdq_sync_cmd_handler - cmdq completion event handler for sync command
+ * @cmdq: the cmdq of the command
+ * @cons_idx: the consumer index to update the error code for
+ * @errcode: the error code
+ **/
+static void cmdq_sync_cmd_handler(struct hinic_cmdq *cmdq, u16 cons_idx,
+ int errcode)
+{
+ u16 prod_idx = cons_idx;
+
+ spin_lock(&cmdq->cmdq_lock);
+ cmdq_update_errcode(cmdq, prod_idx, errcode);
+
+ wmb(); /* write all before update for the command request */
+
+ if (cmdq->done[prod_idx])
+ complete(cmdq->done[prod_idx]);
+ spin_unlock(&cmdq->cmdq_lock);
+}
+
+static int cmdq_cmd_ceq_handler(struct hinic_cmdq *cmdq, u16 ci,
+ struct hinic_cmdq_wqe *cmdq_wqe)
+{
+ struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &cmdq_wqe->wqe_lcmd;
+ struct hinic_status *status = &wqe_lcmd->status;
+ struct hinic_ctrl *ctrl = &wqe_lcmd->ctrl;
+ int errcode;
+
+ if (!CMDQ_WQE_COMPLETED(be32_to_cpu(ctrl->ctrl_info)))
+ return -EBUSY;
+
+ dma_rmb();
+
+ errcode = CMDQ_WQE_ERRCODE_GET(be32_to_cpu(status->status_info), VAL);
+
+ cmdq_sync_cmd_handler(cmdq, ci, errcode);
+
+ clear_wqe_complete_bit(cmdq, cmdq_wqe);
+ hinic_put_wqe(cmdq->wq, WQE_LCMD_SIZE);
+ return 0;
+}
+
+/**
+ * cmdq_ceq_handler - cmdq completion event handler
+ * @handle: private data for the handler(cmdqs)
+ * @ceqe_data: ceq element data
+ **/
+static void cmdq_ceq_handler(void *handle, u32 ceqe_data)
+{
+ enum hinic_cmdq_type cmdq_type = CMDQ_CEQE_GET(ceqe_data, TYPE);
+ struct hinic_cmdqs *cmdqs = (struct hinic_cmdqs *)handle;
+ struct hinic_cmdq *cmdq = &cmdqs->cmdq[cmdq_type];
+ struct hinic_cmdq_header *header;
+ struct hinic_hw_wqe *hw_wqe;
+ int err, set_arm = 0;
+ u32 saved_data;
+ u16 ci;
+
+ /* Read the smallest wqe size for getting wqe size */
+ while ((hw_wqe = hinic_read_wqe(cmdq->wq, WQE_SCMD_SIZE, &ci))) {
+ if (IS_ERR(hw_wqe))
+ break;
+
+ header = CMDQ_WQE_HEADER(&hw_wqe->cmdq_wqe);
+ saved_data = be32_to_cpu(header->saved_data);
+
+ if (HINIC_SAVED_DATA_GET(saved_data, ARM)) {
+ /* arm_bit was set until here */
+ set_arm = 0;
+
+ if (cmdq_arm_ceq_handler(cmdq, &hw_wqe->cmdq_wqe))
+ break;
+ } else {
+ set_arm = 1;
+
+ hw_wqe = hinic_read_wqe(cmdq->wq, WQE_LCMD_SIZE, &ci);
+ if (IS_ERR(hw_wqe))
+ break;
+
+ if (cmdq_cmd_ceq_handler(cmdq, ci, &hw_wqe->cmdq_wqe))
+ break;
+ }
+ }
+
+ if (set_arm) {
+ struct hinic_hwif *hwif = cmdqs->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+
+ err = hinic_set_arm_bit(cmdqs, HINIC_SET_ARM_CMDQ, cmdq_type);
+ if (err)
+ dev_err(&pdev->dev, "Failed to set arm for CMDQ\n");
+ }
+}
+
+/**
+ * cmdq_init_queue_ctxt - init the queue ctxt of a cmdq
+ * @cmdq_ctxt: cmdq ctxt to initialize
+ * @cmdq: the cmdq
+ * @cmdq_pages: the memory of the queue
+ **/
+static void cmdq_init_queue_ctxt(struct hinic_cmdq_ctxt *cmdq_ctxt,
+ struct hinic_cmdq *cmdq,
+ struct hinic_cmdq_pages *cmdq_pages)
+{
+ struct hinic_cmdq_ctxt_info *ctxt_info = &cmdq_ctxt->ctxt_info;
+ u64 wq_first_page_paddr, cmdq_first_block_paddr, pfn;
+ struct hinic_cmdqs *cmdqs = cmdq_to_cmdqs(cmdq);
+ struct hinic_wq *wq = cmdq->wq;
+
+ /* The data in the HW is in Big Endian Format */
+ wq_first_page_paddr = be64_to_cpu(*wq->block_vaddr);
+
+ pfn = CMDQ_PFN(wq_first_page_paddr, SZ_4K);
+
+ ctxt_info->curr_wqe_page_pfn =
+ HINIC_CMDQ_CTXT_PAGE_INFO_SET(pfn, CURR_WQE_PAGE_PFN) |
+ HINIC_CMDQ_CTXT_PAGE_INFO_SET(HINIC_CEQ_ID_CMDQ, EQ_ID) |
+ HINIC_CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_ARM) |
+ HINIC_CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_EN) |
+ HINIC_CMDQ_CTXT_PAGE_INFO_SET(cmdq->wrapped, WRAPPED);
+
+ if (wq->num_q_pages != 1) {
+ /* block PFN - Read Modify Write */
+ cmdq_first_block_paddr = cmdq_pages->page_paddr;
+
+ pfn = CMDQ_PFN(cmdq_first_block_paddr, wq->wq_page_size);
+ }
+
+ ctxt_info->wq_block_pfn =
+ HINIC_CMDQ_CTXT_BLOCK_INFO_SET(pfn, WQ_BLOCK_PFN) |
+ HINIC_CMDQ_CTXT_BLOCK_INFO_SET(atomic_read(&wq->cons_idx), CI);
+
+ cmdq_ctxt->func_idx = HINIC_HWIF_FUNC_IDX(cmdqs->hwif);
+ cmdq_ctxt->ppf_idx = HINIC_HWIF_PPF_IDX(cmdqs->hwif);
+ cmdq_ctxt->cmdq_type = cmdq->cmdq_type;
+}
+
+/**
+ * init_cmdq - initialize cmdq
+ * @cmdq: the cmdq
+ * @wq: the wq attaced to the cmdq
+ * @q_type: the cmdq type of the cmdq
+ * @db_area: doorbell area for the cmdq
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static int init_cmdq(struct hinic_cmdq *cmdq, struct hinic_wq *wq,
+ enum hinic_cmdq_type q_type, void __iomem *db_area)
+{
+ int err;
+
+ cmdq->wq = wq;
+ cmdq->cmdq_type = q_type;
+ cmdq->wrapped = 1;
+
+ spin_lock_init(&cmdq->cmdq_lock);
+
+ cmdq->done = vzalloc(array_size(sizeof(*cmdq->done), wq->q_depth));
+ if (!cmdq->done)
+ return -ENOMEM;
+
+ cmdq->errcode = vzalloc(array_size(sizeof(*cmdq->errcode),
+ wq->q_depth));
+ if (!cmdq->errcode) {
+ err = -ENOMEM;
+ goto err_errcode;
+ }
+
+ cmdq->db_base = db_area + CMDQ_DB_OFF;
+ return 0;
+
+err_errcode:
+ vfree(cmdq->done);
+ return err;
+}
+
+/**
+ * free_cmdq - Free cmdq
+ * @cmdq: the cmdq to free
+ **/
+static void free_cmdq(struct hinic_cmdq *cmdq)
+{
+ vfree(cmdq->errcode);
+ vfree(cmdq->done);
+}
+
+/**
+ * init_cmdqs_ctxt - write the cmdq ctxt to HW after init all cmdq
+ * @hwdev: the NIC HW device
+ * @cmdqs: cmdqs to write the ctxts for
+ * @db_area: db_area for all the cmdqs
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static int init_cmdqs_ctxt(struct hinic_hwdev *hwdev,
+ struct hinic_cmdqs *cmdqs, void __iomem **db_area)
+{
+ struct hinic_hwif *hwif = hwdev->hwif;
+ enum hinic_cmdq_type type, cmdq_type;
+ struct hinic_cmdq_ctxt *cmdq_ctxts;
+ struct pci_dev *pdev = hwif->pdev;
+ struct hinic_pfhwdev *pfhwdev;
+ int err;
+
+ cmdq_ctxts = devm_kcalloc(&pdev->dev, HINIC_MAX_CMDQ_TYPES,
+ sizeof(*cmdq_ctxts), GFP_KERNEL);
+ if (!cmdq_ctxts)
+ return -ENOMEM;
+
+ pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+
+ cmdq_type = HINIC_CMDQ_SYNC;
+ for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
+ cmdqs->cmdq[cmdq_type].hwdev = hwdev;
+ err = init_cmdq(&cmdqs->cmdq[cmdq_type],
+ &cmdqs->saved_wqs[cmdq_type], cmdq_type,
+ db_area[cmdq_type]);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize cmdq\n");
+ goto err_init_cmdq;
+ }
+
+ cmdq_init_queue_ctxt(&cmdq_ctxts[cmdq_type],
+ &cmdqs->cmdq[cmdq_type],
+ &cmdqs->cmdq_pages);
+ }
+
+ /* Write the CMDQ ctxts */
+ cmdq_type = HINIC_CMDQ_SYNC;
+ for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
+ err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
+ HINIC_COMM_CMD_CMDQ_CTXT_SET,
+ &cmdq_ctxts[cmdq_type],
+ sizeof(cmdq_ctxts[cmdq_type]),
+ NULL, NULL, HINIC_MGMT_MSG_SYNC);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to set CMDQ CTXT type = %d\n",
+ cmdq_type);
+ goto err_write_cmdq_ctxt;
+ }
+ }
+
+ devm_kfree(&pdev->dev, cmdq_ctxts);
+ return 0;
+
+err_write_cmdq_ctxt:
+ cmdq_type = HINIC_MAX_CMDQ_TYPES;
+
+err_init_cmdq:
+ for (type = HINIC_CMDQ_SYNC; type < cmdq_type; type++)
+ free_cmdq(&cmdqs->cmdq[type]);
+
+ devm_kfree(&pdev->dev, cmdq_ctxts);
+ return err;
+}
+
+static int hinic_set_cmdq_depth(struct hinic_hwdev *hwdev, u16 cmdq_depth)
+{
+ struct hinic_cmd_hw_ioctxt hw_ioctxt = { 0 };
+ struct hinic_pfhwdev *pfhwdev;
+
+ pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+
+ hw_ioctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
+ hw_ioctxt.ppf_idx = HINIC_HWIF_PPF_IDX(hwdev->hwif);
+
+ hw_ioctxt.set_cmdq_depth = HW_IOCTXT_SET_CMDQ_DEPTH_ENABLE;
+ hw_ioctxt.cmdq_depth = (u8)ilog2(cmdq_depth);
+
+ return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
+ HINIC_COMM_CMD_HWCTXT_SET,
+ &hw_ioctxt, sizeof(hw_ioctxt), NULL,
+ NULL, HINIC_MGMT_MSG_SYNC);
+}
+
+/**
+ * hinic_init_cmdqs - init all cmdqs
+ * @cmdqs: cmdqs to init
+ * @hwif: HW interface for accessing cmdqs
+ * @db_area: doorbell areas for all the cmdqs
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif,
+ void __iomem **db_area)
+{
+ struct hinic_func_to_io *func_to_io = cmdqs_to_func_to_io(cmdqs);
+ struct pci_dev *pdev = hwif->pdev;
+ struct hinic_hwdev *hwdev;
+ u16 max_wqe_size;
+ int err;
+
+ cmdqs->hwif = hwif;
+ cmdqs->cmdq_buf_pool = dma_pool_create("hinic_cmdq", &pdev->dev,
+ HINIC_CMDQ_BUF_SIZE,
+ HINIC_CMDQ_BUF_SIZE, 0);
+ if (!cmdqs->cmdq_buf_pool)
+ return -ENOMEM;
+
+ cmdqs->saved_wqs = devm_kcalloc(&pdev->dev, HINIC_MAX_CMDQ_TYPES,
+ sizeof(*cmdqs->saved_wqs), GFP_KERNEL);
+ if (!cmdqs->saved_wqs) {
+ err = -ENOMEM;
+ goto err_saved_wqs;
+ }
+
+ max_wqe_size = WQE_LCMD_SIZE;
+ err = hinic_wqs_cmdq_alloc(&cmdqs->cmdq_pages, cmdqs->saved_wqs, hwif,
+ HINIC_MAX_CMDQ_TYPES, CMDQ_WQEBB_SIZE,
+ CMDQ_WQ_PAGE_SIZE, CMDQ_DEPTH, max_wqe_size);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to allocate CMDQ wqs\n");
+ goto err_cmdq_wqs;
+ }
+
+ hwdev = container_of(func_to_io, struct hinic_hwdev, func_to_io);
+ err = init_cmdqs_ctxt(hwdev, cmdqs, db_area);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to write cmdq ctxt\n");
+ goto err_cmdq_ctxt;
+ }
+
+ hinic_ceq_register_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ, cmdqs,
+ cmdq_ceq_handler);
+
+ err = hinic_set_cmdq_depth(hwdev, CMDQ_DEPTH);
+ if (err) {
+ dev_err(&hwif->pdev->dev, "Failed to set cmdq depth\n");
+ goto err_set_cmdq_depth;
+ }
+
+ return 0;
+
+err_set_cmdq_depth:
+ hinic_ceq_unregister_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ);
+ free_cmdq(&cmdqs->cmdq[HINIC_CMDQ_SYNC]);
+err_cmdq_ctxt:
+ hinic_wqs_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs,
+ HINIC_MAX_CMDQ_TYPES);
+
+err_cmdq_wqs:
+ devm_kfree(&pdev->dev, cmdqs->saved_wqs);
+
+err_saved_wqs:
+ dma_pool_destroy(cmdqs->cmdq_buf_pool);
+ return err;
+}
+
+/**
+ * hinic_free_cmdqs - free all cmdqs
+ * @cmdqs: cmdqs to free
+ **/
+void hinic_free_cmdqs(struct hinic_cmdqs *cmdqs)
+{
+ struct hinic_func_to_io *func_to_io = cmdqs_to_func_to_io(cmdqs);
+ struct hinic_hwif *hwif = cmdqs->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ enum hinic_cmdq_type cmdq_type;
+
+ hinic_ceq_unregister_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ);
+
+ cmdq_type = HINIC_CMDQ_SYNC;
+ for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++)
+ free_cmdq(&cmdqs->cmdq[cmdq_type]);
+
+ hinic_wqs_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs,
+ HINIC_MAX_CMDQ_TYPES);
+
+ devm_kfree(&pdev->dev, cmdqs->saved_wqs);
+
+ dma_pool_destroy(cmdqs->cmdq_buf_pool);
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h
new file mode 100644
index 0000000000..ff09cf0ed5
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h
@@ -0,0 +1,185 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef HINIC_CMDQ_H
+#define HINIC_CMDQ_H
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/completion.h>
+#include <linux/pci.h>
+
+#include "hinic_hw_if.h"
+#include "hinic_hw_wq.h"
+
+#define HINIC_CMDQ_CTXT_CURR_WQE_PAGE_PFN_SHIFT 0
+#define HINIC_CMDQ_CTXT_EQ_ID_SHIFT 56
+#define HINIC_CMDQ_CTXT_CEQ_ARM_SHIFT 61
+#define HINIC_CMDQ_CTXT_CEQ_EN_SHIFT 62
+#define HINIC_CMDQ_CTXT_WRAPPED_SHIFT 63
+
+#define HINIC_CMDQ_CTXT_CURR_WQE_PAGE_PFN_MASK 0xFFFFFFFFFFFFF
+#define HINIC_CMDQ_CTXT_EQ_ID_MASK 0x1F
+#define HINIC_CMDQ_CTXT_CEQ_ARM_MASK 0x1
+#define HINIC_CMDQ_CTXT_CEQ_EN_MASK 0x1
+#define HINIC_CMDQ_CTXT_WRAPPED_MASK 0x1
+
+#define HINIC_CMDQ_CTXT_PAGE_INFO_SET(val, member) \
+ (((u64)(val) & HINIC_CMDQ_CTXT_##member##_MASK) \
+ << HINIC_CMDQ_CTXT_##member##_SHIFT)
+
+#define HINIC_CMDQ_CTXT_PAGE_INFO_GET(val, member) \
+ (((u64)(val) >> HINIC_CMDQ_CTXT_##member##_SHIFT) \
+ & HINIC_CMDQ_CTXT_##member##_MASK)
+
+#define HINIC_CMDQ_CTXT_PAGE_INFO_CLEAR(val, member) \
+ ((val) & (~((u64)HINIC_CMDQ_CTXT_##member##_MASK \
+ << HINIC_CMDQ_CTXT_##member##_SHIFT)))
+
+#define HINIC_CMDQ_CTXT_WQ_BLOCK_PFN_SHIFT 0
+#define HINIC_CMDQ_CTXT_CI_SHIFT 52
+
+#define HINIC_CMDQ_CTXT_WQ_BLOCK_PFN_MASK 0xFFFFFFFFFFFFF
+#define HINIC_CMDQ_CTXT_CI_MASK 0xFFF
+
+#define HINIC_CMDQ_CTXT_BLOCK_INFO_SET(val, member) \
+ (((u64)(val) & HINIC_CMDQ_CTXT_##member##_MASK) \
+ << HINIC_CMDQ_CTXT_##member##_SHIFT)
+
+#define HINIC_CMDQ_CTXT_BLOCK_INFO_GET(val, member) \
+ (((u64)(val) >> HINIC_CMDQ_CTXT_##member##_SHIFT) \
+ & HINIC_CMDQ_CTXT_##member##_MASK)
+
+#define HINIC_CMDQ_CTXT_BLOCK_INFO_CLEAR(val, member) \
+ ((val) & (~((u64)HINIC_CMDQ_CTXT_##member##_MASK \
+ << HINIC_CMDQ_CTXT_##member##_SHIFT)))
+
+#define HINIC_SAVED_DATA_ARM_SHIFT 31
+
+#define HINIC_SAVED_DATA_ARM_MASK 0x1
+
+#define HINIC_SAVED_DATA_SET(val, member) \
+ (((u32)(val) & HINIC_SAVED_DATA_##member##_MASK) \
+ << HINIC_SAVED_DATA_##member##_SHIFT)
+
+#define HINIC_SAVED_DATA_GET(val, member) \
+ (((val) >> HINIC_SAVED_DATA_##member##_SHIFT) \
+ & HINIC_SAVED_DATA_##member##_MASK)
+
+#define HINIC_SAVED_DATA_CLEAR(val, member) \
+ ((val) & (~(HINIC_SAVED_DATA_##member##_MASK \
+ << HINIC_SAVED_DATA_##member##_SHIFT)))
+
+#define HINIC_CMDQ_DB_INFO_HI_PROD_IDX_SHIFT 0
+#define HINIC_CMDQ_DB_INFO_PATH_SHIFT 23
+#define HINIC_CMDQ_DB_INFO_CMDQ_TYPE_SHIFT 24
+#define HINIC_CMDQ_DB_INFO_DB_TYPE_SHIFT 27
+
+#define HINIC_CMDQ_DB_INFO_HI_PROD_IDX_MASK 0xFF
+#define HINIC_CMDQ_DB_INFO_PATH_MASK 0x1
+#define HINIC_CMDQ_DB_INFO_CMDQ_TYPE_MASK 0x7
+#define HINIC_CMDQ_DB_INFO_DB_TYPE_MASK 0x1F
+
+#define HINIC_CMDQ_DB_INFO_SET(val, member) \
+ (((u32)(val) & HINIC_CMDQ_DB_INFO_##member##_MASK) \
+ << HINIC_CMDQ_DB_INFO_##member##_SHIFT)
+
+#define HINIC_CMDQ_BUF_SIZE 2048
+
+#define HINIC_CMDQ_BUF_HW_RSVD 8
+#define HINIC_CMDQ_MAX_DATA_SIZE (HINIC_CMDQ_BUF_SIZE - \
+ HINIC_CMDQ_BUF_HW_RSVD)
+
+enum hinic_cmdq_type {
+ HINIC_CMDQ_SYNC,
+
+ HINIC_MAX_CMDQ_TYPES,
+};
+
+enum hinic_set_arm_qtype {
+ HINIC_SET_ARM_CMDQ,
+};
+
+enum hinic_cmd_ack_type {
+ HINIC_CMD_ACK_TYPE_CMDQ,
+};
+
+struct hinic_cmdq_buf {
+ void *buf;
+ dma_addr_t dma_addr;
+ size_t size;
+};
+
+struct hinic_cmdq_arm_bit {
+ u32 q_type;
+ u32 q_id;
+};
+
+struct hinic_cmdq_ctxt_info {
+ u64 curr_wqe_page_pfn;
+ u64 wq_block_pfn;
+};
+
+struct hinic_cmdq_ctxt {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_idx;
+ u8 cmdq_type;
+ u8 ppf_idx;
+
+ u8 rsvd2[4];
+
+ struct hinic_cmdq_ctxt_info ctxt_info;
+};
+
+struct hinic_cmdq {
+ struct hinic_hwdev *hwdev;
+
+ struct hinic_wq *wq;
+
+ enum hinic_cmdq_type cmdq_type;
+ int wrapped;
+
+ /* Lock for keeping the doorbell order */
+ spinlock_t cmdq_lock;
+
+ struct completion **done;
+ int **errcode;
+
+ /* doorbell area */
+ void __iomem *db_base;
+};
+
+struct hinic_cmdqs {
+ struct hinic_hwif *hwif;
+
+ struct dma_pool *cmdq_buf_pool;
+
+ struct hinic_wq *saved_wqs;
+
+ struct hinic_cmdq_pages cmdq_pages;
+
+ struct hinic_cmdq cmdq[HINIC_MAX_CMDQ_TYPES];
+};
+
+int hinic_alloc_cmdq_buf(struct hinic_cmdqs *cmdqs,
+ struct hinic_cmdq_buf *cmdq_buf);
+
+void hinic_free_cmdq_buf(struct hinic_cmdqs *cmdqs,
+ struct hinic_cmdq_buf *cmdq_buf);
+
+int hinic_cmdq_direct_resp(struct hinic_cmdqs *cmdqs,
+ enum hinic_mod_type mod, u8 cmd,
+ struct hinic_cmdq_buf *buf_in, u64 *out_param);
+
+int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif,
+ void __iomem **db_area);
+
+void hinic_free_cmdqs(struct hinic_cmdqs *cmdqs);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h
new file mode 100644
index 0000000000..d56e7413ac
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef HINIC_HW_CSR_H
+#define HINIC_HW_CSR_H
+
+/* HW interface registers */
+#define HINIC_CSR_FUNC_ATTR0_ADDR 0x0
+#define HINIC_CSR_FUNC_ATTR1_ADDR 0x4
+#define HINIC_CSR_FUNC_ATTR2_ADDR 0x8
+#define HINIC_CSR_FUNC_ATTR4_ADDR 0x10
+#define HINIC_CSR_FUNC_ATTR5_ADDR 0x14
+
+#define HINIC_DMA_ATTR_BASE 0xC80
+#define HINIC_ELECTION_BASE 0x4200
+
+#define HINIC_DMA_ATTR_STRIDE 0x4
+#define HINIC_CSR_DMA_ATTR_ADDR(idx) \
+ (HINIC_DMA_ATTR_BASE + (idx) * HINIC_DMA_ATTR_STRIDE)
+
+#define HINIC_PPF_ELECTION_STRIDE 0x4
+
+#define HINIC_CSR_PPF_ELECTION_ADDR(idx) \
+ (HINIC_ELECTION_BASE + (idx) * HINIC_PPF_ELECTION_STRIDE)
+
+/* API CMD registers */
+#define HINIC_CSR_API_CMD_BASE 0xF000
+
+#define HINIC_CSR_API_CMD_STRIDE 0x100
+
+#define HINIC_CSR_API_CMD_CHAIN_HEAD_HI_ADDR(idx) \
+ (HINIC_CSR_API_CMD_BASE + 0x0 + (idx) * HINIC_CSR_API_CMD_STRIDE)
+
+#define HINIC_CSR_API_CMD_CHAIN_HEAD_LO_ADDR(idx) \
+ (HINIC_CSR_API_CMD_BASE + 0x4 + (idx) * HINIC_CSR_API_CMD_STRIDE)
+
+#define HINIC_CSR_API_CMD_STATUS_HI_ADDR(idx) \
+ (HINIC_CSR_API_CMD_BASE + 0x8 + (idx) * HINIC_CSR_API_CMD_STRIDE)
+
+#define HINIC_CSR_API_CMD_STATUS_LO_ADDR(idx) \
+ (HINIC_CSR_API_CMD_BASE + 0xC + (idx) * HINIC_CSR_API_CMD_STRIDE)
+
+#define HINIC_CSR_API_CMD_CHAIN_NUM_CELLS_ADDR(idx) \
+ (HINIC_CSR_API_CMD_BASE + 0x10 + (idx) * HINIC_CSR_API_CMD_STRIDE)
+
+#define HINIC_CSR_API_CMD_CHAIN_CTRL_ADDR(idx) \
+ (HINIC_CSR_API_CMD_BASE + 0x14 + (idx) * HINIC_CSR_API_CMD_STRIDE)
+
+#define HINIC_CSR_API_CMD_CHAIN_PI_ADDR(idx) \
+ (HINIC_CSR_API_CMD_BASE + 0x1C + (idx) * HINIC_CSR_API_CMD_STRIDE)
+
+#define HINIC_CSR_API_CMD_CHAIN_REQ_ADDR(idx) \
+ (HINIC_CSR_API_CMD_BASE + 0x20 + (idx) * HINIC_CSR_API_CMD_STRIDE)
+
+#define HINIC_CSR_API_CMD_STATUS_ADDR(idx) \
+ (HINIC_CSR_API_CMD_BASE + 0x30 + (idx) * HINIC_CSR_API_CMD_STRIDE)
+
+/* MSI-X registers */
+#define HINIC_CSR_MSIX_CTRL_BASE 0x2000
+#define HINIC_CSR_MSIX_CNT_BASE 0x2004
+
+#define HINIC_CSR_MSIX_STRIDE 0x8
+
+#define HINIC_CSR_MSIX_CTRL_ADDR(idx) \
+ (HINIC_CSR_MSIX_CTRL_BASE + (idx) * HINIC_CSR_MSIX_STRIDE)
+
+#define HINIC_CSR_MSIX_CNT_ADDR(idx) \
+ (HINIC_CSR_MSIX_CNT_BASE + (idx) * HINIC_CSR_MSIX_STRIDE)
+
+/* EQ registers */
+#define HINIC_AEQ_MTT_OFF_BASE_ADDR 0x200
+#define HINIC_CEQ_MTT_OFF_BASE_ADDR 0x400
+
+#define HINIC_EQ_MTT_OFF_STRIDE 0x40
+
+#define HINIC_CSR_AEQ_MTT_OFF(id) \
+ (HINIC_AEQ_MTT_OFF_BASE_ADDR + (id) * HINIC_EQ_MTT_OFF_STRIDE)
+
+#define HINIC_CSR_CEQ_MTT_OFF(id) \
+ (HINIC_CEQ_MTT_OFF_BASE_ADDR + (id) * HINIC_EQ_MTT_OFF_STRIDE)
+
+#define HINIC_CSR_EQ_PAGE_OFF_STRIDE 8
+
+#define HINIC_CSR_AEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \
+ (HINIC_CSR_AEQ_MTT_OFF(q_id) + \
+ (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE)
+
+#define HINIC_CSR_CEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \
+ (HINIC_CSR_CEQ_MTT_OFF(q_id) + \
+ (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE)
+
+#define HINIC_CSR_AEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \
+ (HINIC_CSR_AEQ_MTT_OFF(q_id) + \
+ (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE + 4)
+
+#define HINIC_CSR_CEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \
+ (HINIC_CSR_CEQ_MTT_OFF(q_id) + \
+ (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE + 4)
+
+#define HINIC_AEQ_CTRL_0_ADDR_BASE 0xE00
+#define HINIC_AEQ_CTRL_1_ADDR_BASE 0xE04
+#define HINIC_AEQ_CONS_IDX_ADDR_BASE 0xE08
+#define HINIC_AEQ_PROD_IDX_ADDR_BASE 0xE0C
+
+#define HINIC_CEQ_CTRL_0_ADDR_BASE 0x1000
+#define HINIC_CEQ_CTRL_1_ADDR_BASE 0x1004
+#define HINIC_CEQ_CONS_IDX_ADDR_BASE 0x1008
+#define HINIC_CEQ_PROD_IDX_ADDR_BASE 0x100C
+
+#define HINIC_EQ_OFF_STRIDE 0x80
+
+#define HINIC_CSR_AEQ_CTRL_0_ADDR(idx) \
+ (HINIC_AEQ_CTRL_0_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
+
+#define HINIC_CSR_AEQ_CTRL_1_ADDR(idx) \
+ (HINIC_AEQ_CTRL_1_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
+
+#define HINIC_CSR_AEQ_CONS_IDX_ADDR(idx) \
+ (HINIC_AEQ_CONS_IDX_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
+
+#define HINIC_CSR_AEQ_PROD_IDX_ADDR(idx) \
+ (HINIC_AEQ_PROD_IDX_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
+
+#define HINIC_CSR_CEQ_CTRL_0_ADDR(idx) \
+ (HINIC_CEQ_CTRL_0_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
+
+#define HINIC_CSR_CEQ_CTRL_1_ADDR(idx) \
+ (HINIC_CEQ_CTRL_1_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
+
+#define HINIC_CSR_CEQ_CONS_IDX_ADDR(idx) \
+ (HINIC_CEQ_CONS_IDX_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
+
+#define HINIC_CSR_CEQ_PROD_IDX_ADDR(idx) \
+ (HINIC_CEQ_PROD_IDX_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
new file mode 100644
index 0000000000..27795288c5
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
@@ -0,0 +1,1197 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/log2.h>
+#include <linux/err.h>
+#include <linux/netdevice.h>
+#include <net/devlink.h>
+
+#include "hinic_devlink.h"
+#include "hinic_sriov.h"
+#include "hinic_dev.h"
+#include "hinic_hw_if.h"
+#include "hinic_hw_eqs.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_hw_qp_ctxt.h"
+#include "hinic_hw_qp.h"
+#include "hinic_hw_io.h"
+#include "hinic_hw_dev.h"
+
+#define OUTBOUND_STATE_TIMEOUT 100
+#define DB_STATE_TIMEOUT 100
+
+#define MAX_IRQS(max_qps, num_aeqs, num_ceqs) \
+ (2 * (max_qps) + (num_aeqs) + (num_ceqs))
+
+#define ADDR_IN_4BYTES(addr) ((addr) >> 2)
+
+enum intr_type {
+ INTR_MSIX_TYPE,
+};
+
+/**
+ * parse_capability - convert device capabilities to NIC capabilities
+ * @hwdev: the HW device to set and convert device capabilities for
+ * @dev_cap: device capabilities from FW
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static int parse_capability(struct hinic_hwdev *hwdev,
+ struct hinic_dev_cap *dev_cap)
+{
+ struct hinic_cap *nic_cap = &hwdev->nic_cap;
+ int num_aeqs, num_ceqs, num_irqs;
+
+ if (!HINIC_IS_VF(hwdev->hwif) && dev_cap->intr_type != INTR_MSIX_TYPE)
+ return -EFAULT;
+
+ num_aeqs = HINIC_HWIF_NUM_AEQS(hwdev->hwif);
+ num_ceqs = HINIC_HWIF_NUM_CEQS(hwdev->hwif);
+ num_irqs = HINIC_HWIF_NUM_IRQS(hwdev->hwif);
+
+ /* Each QP has its own (SQ + RQ) interrupts */
+ nic_cap->num_qps = (num_irqs - (num_aeqs + num_ceqs)) / 2;
+
+ if (nic_cap->num_qps > HINIC_Q_CTXT_MAX)
+ nic_cap->num_qps = HINIC_Q_CTXT_MAX;
+
+ if (!HINIC_IS_VF(hwdev->hwif))
+ nic_cap->max_qps = dev_cap->max_sqs + 1;
+ else
+ nic_cap->max_qps = dev_cap->max_sqs;
+
+ if (nic_cap->num_qps > nic_cap->max_qps)
+ nic_cap->num_qps = nic_cap->max_qps;
+
+ if (!HINIC_IS_VF(hwdev->hwif)) {
+ nic_cap->max_vf = dev_cap->max_vf;
+ nic_cap->max_vf_qps = dev_cap->max_vf_sqs + 1;
+ }
+
+ hwdev->port_id = dev_cap->port_id;
+
+ return 0;
+}
+
+/**
+ * get_capability - get device capabilities from FW
+ * @pfhwdev: the PF HW device to get capabilities for
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static int get_capability(struct hinic_pfhwdev *pfhwdev)
+{
+ struct hinic_hwdev *hwdev = &pfhwdev->hwdev;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ struct hinic_dev_cap dev_cap;
+ u16 out_len;
+ int err;
+
+ out_len = sizeof(dev_cap);
+
+ err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_CFGM,
+ HINIC_CFG_NIC_CAP, &dev_cap, sizeof(dev_cap),
+ &dev_cap, &out_len, HINIC_MGMT_MSG_SYNC);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to get capability from FW\n");
+ return err;
+ }
+
+ return parse_capability(hwdev, &dev_cap);
+}
+
+/**
+ * get_dev_cap - get device capabilities
+ * @hwdev: the NIC HW device to get capabilities for
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static int get_dev_cap(struct hinic_hwdev *hwdev)
+{
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ struct hinic_pfhwdev *pfhwdev;
+ int err;
+
+ switch (HINIC_FUNC_TYPE(hwif)) {
+ case HINIC_PPF:
+ case HINIC_PF:
+ case HINIC_VF:
+ pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+ err = get_capability(pfhwdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to get capability\n");
+ return err;
+ }
+ break;
+ default:
+ dev_err(&pdev->dev, "Unsupported PCI Function type\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * init_msix - enable the msix and save the entries
+ * @hwdev: the NIC HW device
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static int init_msix(struct hinic_hwdev *hwdev)
+{
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ int nr_irqs, num_aeqs, num_ceqs;
+ int i, err;
+
+ num_aeqs = HINIC_HWIF_NUM_AEQS(hwif);
+ num_ceqs = HINIC_HWIF_NUM_CEQS(hwif);
+ nr_irqs = MAX_IRQS(HINIC_MAX_QPS, num_aeqs, num_ceqs);
+ if (nr_irqs > HINIC_HWIF_NUM_IRQS(hwif))
+ nr_irqs = HINIC_HWIF_NUM_IRQS(hwif);
+
+ hwdev->msix_entries = devm_kcalloc(&pdev->dev, nr_irqs,
+ sizeof(*hwdev->msix_entries),
+ GFP_KERNEL);
+ if (!hwdev->msix_entries)
+ return -ENOMEM;
+
+ for (i = 0; i < nr_irqs; i++)
+ hwdev->msix_entries[i].entry = i;
+
+ err = pci_enable_msix_exact(pdev, hwdev->msix_entries, nr_irqs);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable pci msix\n");
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * disable_msix - disable the msix
+ * @hwdev: the NIC HW device
+ **/
+static void disable_msix(struct hinic_hwdev *hwdev)
+{
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+
+ pci_disable_msix(pdev);
+}
+
+/**
+ * hinic_port_msg_cmd - send port msg to mgmt
+ * @hwdev: the NIC HW device
+ * @cmd: the port command
+ * @buf_in: input buffer
+ * @in_size: input size
+ * @buf_out: output buffer
+ * @out_size: returned output size
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+int hinic_port_msg_cmd(struct hinic_hwdev *hwdev, enum hinic_port_cmd cmd,
+ void *buf_in, u16 in_size, void *buf_out, u16 *out_size)
+{
+ struct hinic_pfhwdev *pfhwdev;
+
+ pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+
+ return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_L2NIC, cmd,
+ buf_in, in_size, buf_out, out_size,
+ HINIC_MGMT_MSG_SYNC);
+}
+
+int hinic_hilink_msg_cmd(struct hinic_hwdev *hwdev, enum hinic_hilink_cmd cmd,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size)
+{
+ struct hinic_pfhwdev *pfhwdev;
+
+ pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+
+ return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_HILINK, cmd,
+ buf_in, in_size, buf_out, out_size,
+ HINIC_MGMT_MSG_SYNC);
+}
+
+/**
+ * init_fw_ctxt- Init Firmware tables before network mgmt and io operations
+ * @hwdev: the NIC HW device
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static int init_fw_ctxt(struct hinic_hwdev *hwdev)
+{
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ struct hinic_cmd_fw_ctxt fw_ctxt;
+ u16 out_size = sizeof(fw_ctxt);
+ int err;
+
+ fw_ctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
+ fw_ctxt.rx_buf_sz = HINIC_RX_BUF_SZ;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_FWCTXT_INIT,
+ &fw_ctxt, sizeof(fw_ctxt),
+ &fw_ctxt, &out_size);
+ if (err || out_size != sizeof(fw_ctxt) || fw_ctxt.status) {
+ dev_err(&pdev->dev, "Failed to init FW ctxt, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, fw_ctxt.status, out_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * set_hw_ioctxt - set the shape of the IO queues in FW
+ * @hwdev: the NIC HW device
+ * @rq_depth: rq depth
+ * @sq_depth: sq depth
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static int set_hw_ioctxt(struct hinic_hwdev *hwdev, unsigned int sq_depth,
+ unsigned int rq_depth)
+{
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct hinic_cmd_hw_ioctxt hw_ioctxt;
+ struct hinic_pfhwdev *pfhwdev;
+
+ hw_ioctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
+ hw_ioctxt.ppf_idx = HINIC_HWIF_PPF_IDX(hwif);
+
+ hw_ioctxt.set_cmdq_depth = HW_IOCTXT_SET_CMDQ_DEPTH_DEFAULT;
+ hw_ioctxt.cmdq_depth = 0;
+
+ hw_ioctxt.lro_en = 1;
+
+ hw_ioctxt.rq_depth = ilog2(rq_depth);
+
+ hw_ioctxt.rx_buf_sz_idx = HINIC_RX_BUF_SZ_IDX;
+
+ hw_ioctxt.sq_depth = ilog2(sq_depth);
+
+ pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+
+ return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
+ HINIC_COMM_CMD_HWCTXT_SET,
+ &hw_ioctxt, sizeof(hw_ioctxt), NULL,
+ NULL, HINIC_MGMT_MSG_SYNC);
+}
+
+static int wait_for_outbound_state(struct hinic_hwdev *hwdev)
+{
+ enum hinic_outbound_state outbound_state;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ unsigned long end;
+
+ end = jiffies + msecs_to_jiffies(OUTBOUND_STATE_TIMEOUT);
+ do {
+ outbound_state = hinic_outbound_state_get(hwif);
+
+ if (outbound_state == HINIC_OUTBOUND_ENABLE)
+ return 0;
+
+ msleep(20);
+ } while (time_before(jiffies, end));
+
+ dev_err(&pdev->dev, "Wait for OUTBOUND - Timeout\n");
+ return -EFAULT;
+}
+
+static int wait_for_db_state(struct hinic_hwdev *hwdev)
+{
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ enum hinic_db_state db_state;
+ unsigned long end;
+
+ end = jiffies + msecs_to_jiffies(DB_STATE_TIMEOUT);
+ do {
+ db_state = hinic_db_state_get(hwif);
+
+ if (db_state == HINIC_DB_ENABLE)
+ return 0;
+
+ msleep(20);
+ } while (time_before(jiffies, end));
+
+ dev_err(&pdev->dev, "Wait for DB - Timeout\n");
+ return -EFAULT;
+}
+
+/**
+ * clear_io_resources - set the IO resources as not active in the NIC
+ * @hwdev: the NIC HW device
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static int clear_io_resources(struct hinic_hwdev *hwdev)
+{
+ struct hinic_cmd_clear_io_res cmd_clear_io_res;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ struct hinic_pfhwdev *pfhwdev;
+ int err;
+
+ /* sleep 100ms to wait for firmware stopping I/O */
+ msleep(100);
+
+ cmd_clear_io_res.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
+
+ pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+
+ err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
+ HINIC_COMM_CMD_IO_RES_CLEAR, &cmd_clear_io_res,
+ sizeof(cmd_clear_io_res), NULL, NULL,
+ HINIC_MGMT_MSG_SYNC);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to clear IO resources\n");
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * set_resources_state - set the state of the resources in the NIC
+ * @hwdev: the NIC HW device
+ * @state: the state to set
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static int set_resources_state(struct hinic_hwdev *hwdev,
+ enum hinic_res_state state)
+{
+ struct hinic_cmd_set_res_state res_state;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct hinic_pfhwdev *pfhwdev;
+
+ res_state.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
+ res_state.state = state;
+
+ pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+
+ return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt,
+ HINIC_MOD_COMM,
+ HINIC_COMM_CMD_RES_STATE_SET,
+ &res_state, sizeof(res_state), NULL,
+ NULL, HINIC_MGMT_MSG_SYNC);
+}
+
+/**
+ * get_base_qpn - get the first qp number
+ * @hwdev: the NIC HW device
+ * @base_qpn: returned qp number
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static int get_base_qpn(struct hinic_hwdev *hwdev, u16 *base_qpn)
+{
+ struct hinic_cmd_base_qpn cmd_base_qpn;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ u16 out_size = sizeof(cmd_base_qpn);
+ struct pci_dev *pdev = hwif->pdev;
+ int err;
+
+ cmd_base_qpn.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_GLOBAL_QPN,
+ &cmd_base_qpn, sizeof(cmd_base_qpn),
+ &cmd_base_qpn, &out_size);
+ if (err || out_size != sizeof(cmd_base_qpn) || cmd_base_qpn.status) {
+ dev_err(&pdev->dev, "Failed to get base qpn, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, cmd_base_qpn.status, out_size);
+ return -EIO;
+ }
+
+ *base_qpn = cmd_base_qpn.qpn;
+ return 0;
+}
+
+/**
+ * hinic_hwdev_ifup - Preparing the HW for passing IO
+ * @hwdev: the NIC HW device
+ * @sq_depth: the send queue depth
+ * @rq_depth: the receive queue depth
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+int hinic_hwdev_ifup(struct hinic_hwdev *hwdev, u16 sq_depth, u16 rq_depth)
+{
+ struct hinic_func_to_io *func_to_io = &hwdev->func_to_io;
+ struct hinic_cap *nic_cap = &hwdev->nic_cap;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ int err, num_aeqs, num_ceqs, num_qps;
+ struct msix_entry *ceq_msix_entries;
+ struct msix_entry *sq_msix_entries;
+ struct msix_entry *rq_msix_entries;
+ struct pci_dev *pdev = hwif->pdev;
+ u16 base_qpn;
+
+ err = get_base_qpn(hwdev, &base_qpn);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to get global base qp number\n");
+ return err;
+ }
+
+ num_aeqs = HINIC_HWIF_NUM_AEQS(hwif);
+ num_ceqs = HINIC_HWIF_NUM_CEQS(hwif);
+
+ ceq_msix_entries = &hwdev->msix_entries[num_aeqs];
+ func_to_io->hwdev = hwdev;
+ func_to_io->sq_depth = sq_depth;
+ func_to_io->rq_depth = rq_depth;
+ func_to_io->global_qpn = base_qpn;
+
+ err = hinic_io_init(func_to_io, hwif, nic_cap->max_qps, num_ceqs,
+ ceq_msix_entries);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init IO channel\n");
+ return err;
+ }
+
+ num_qps = nic_cap->num_qps;
+ sq_msix_entries = &hwdev->msix_entries[num_aeqs + num_ceqs];
+ rq_msix_entries = &hwdev->msix_entries[num_aeqs + num_ceqs + num_qps];
+
+ err = hinic_io_create_qps(func_to_io, base_qpn, num_qps,
+ sq_msix_entries, rq_msix_entries);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to create QPs\n");
+ goto err_create_qps;
+ }
+
+ err = wait_for_db_state(hwdev);
+ if (err) {
+ dev_warn(&pdev->dev, "db - disabled, try again\n");
+ hinic_db_state_set(hwif, HINIC_DB_ENABLE);
+ }
+
+ err = set_hw_ioctxt(hwdev, sq_depth, rq_depth);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to set HW IO ctxt\n");
+ goto err_hw_ioctxt;
+ }
+
+ return 0;
+
+err_hw_ioctxt:
+ hinic_io_destroy_qps(func_to_io, num_qps);
+
+err_create_qps:
+ hinic_io_free(func_to_io);
+ return err;
+}
+
+/**
+ * hinic_hwdev_ifdown - Closing the HW for passing IO
+ * @hwdev: the NIC HW device
+ *
+ **/
+void hinic_hwdev_ifdown(struct hinic_hwdev *hwdev)
+{
+ struct hinic_func_to_io *func_to_io = &hwdev->func_to_io;
+ struct hinic_cap *nic_cap = &hwdev->nic_cap;
+
+ clear_io_resources(hwdev);
+
+ hinic_io_destroy_qps(func_to_io, nic_cap->num_qps);
+ hinic_io_free(func_to_io);
+}
+
+/**
+ * hinic_hwdev_cb_register - register callback handler for MGMT events
+ * @hwdev: the NIC HW device
+ * @cmd: the mgmt event
+ * @handle: private data for the handler
+ * @handler: event handler
+ **/
+void hinic_hwdev_cb_register(struct hinic_hwdev *hwdev,
+ enum hinic_mgmt_msg_cmd cmd, void *handle,
+ void (*handler)(void *handle, void *buf_in,
+ u16 in_size, void *buf_out,
+ u16 *out_size))
+{
+ struct hinic_pfhwdev *pfhwdev;
+ struct hinic_nic_cb *nic_cb;
+ u8 cmd_cb;
+
+ pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+
+ cmd_cb = cmd - HINIC_MGMT_MSG_CMD_BASE;
+ nic_cb = &pfhwdev->nic_cb[cmd_cb];
+
+ nic_cb->handler = handler;
+ nic_cb->handle = handle;
+ nic_cb->cb_state = HINIC_CB_ENABLED;
+}
+
+/**
+ * hinic_hwdev_cb_unregister - unregister callback handler for MGMT events
+ * @hwdev: the NIC HW device
+ * @cmd: the mgmt event
+ **/
+void hinic_hwdev_cb_unregister(struct hinic_hwdev *hwdev,
+ enum hinic_mgmt_msg_cmd cmd)
+{
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct hinic_pfhwdev *pfhwdev;
+ struct hinic_nic_cb *nic_cb;
+ u8 cmd_cb;
+
+ if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif))
+ return;
+
+ pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+
+ cmd_cb = cmd - HINIC_MGMT_MSG_CMD_BASE;
+ nic_cb = &pfhwdev->nic_cb[cmd_cb];
+
+ nic_cb->cb_state &= ~HINIC_CB_ENABLED;
+
+ while (nic_cb->cb_state & HINIC_CB_RUNNING)
+ schedule();
+
+ nic_cb->handler = NULL;
+}
+
+/**
+ * nic_mgmt_msg_handler - nic mgmt event handler
+ * @handle: private data for the handler
+ * @cmd: message command
+ * @buf_in: input buffer
+ * @in_size: input size
+ * @buf_out: output buffer
+ * @out_size: returned output size
+ **/
+static void nic_mgmt_msg_handler(void *handle, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+{
+ struct hinic_pfhwdev *pfhwdev = handle;
+ enum hinic_cb_state cb_state;
+ struct hinic_nic_cb *nic_cb;
+ struct hinic_hwdev *hwdev;
+ struct hinic_hwif *hwif;
+ struct pci_dev *pdev;
+ u8 cmd_cb;
+
+ hwdev = &pfhwdev->hwdev;
+ hwif = hwdev->hwif;
+ pdev = hwif->pdev;
+
+ if (cmd < HINIC_MGMT_MSG_CMD_BASE ||
+ cmd >= HINIC_MGMT_MSG_CMD_MAX) {
+ dev_err(&pdev->dev, "unknown L2NIC event, cmd = %d\n", cmd);
+ return;
+ }
+
+ cmd_cb = cmd - HINIC_MGMT_MSG_CMD_BASE;
+
+ nic_cb = &pfhwdev->nic_cb[cmd_cb];
+
+ cb_state = cmpxchg(&nic_cb->cb_state,
+ HINIC_CB_ENABLED,
+ HINIC_CB_ENABLED | HINIC_CB_RUNNING);
+
+ if (cb_state == HINIC_CB_ENABLED && nic_cb->handler)
+ nic_cb->handler(nic_cb->handle, buf_in,
+ in_size, buf_out, out_size);
+ else
+ dev_err(&pdev->dev, "Unhandled NIC Event %d\n", cmd);
+
+ nic_cb->cb_state &= ~HINIC_CB_RUNNING;
+}
+
+static void hinic_comm_recv_mgmt_self_cmd_reg(struct hinic_pfhwdev *pfhwdev,
+ u8 cmd,
+ comm_mgmt_self_msg_proc proc)
+{
+ u8 cmd_idx;
+
+ cmd_idx = pfhwdev->proc.cmd_num;
+ if (cmd_idx >= HINIC_COMM_SELF_CMD_MAX) {
+ dev_err(&pfhwdev->hwdev.hwif->pdev->dev,
+ "Register recv mgmt process failed, cmd: 0x%x\n", cmd);
+ return;
+ }
+
+ pfhwdev->proc.info[cmd_idx].cmd = cmd;
+ pfhwdev->proc.info[cmd_idx].proc = proc;
+ pfhwdev->proc.cmd_num++;
+}
+
+static void hinic_comm_recv_mgmt_self_cmd_unreg(struct hinic_pfhwdev *pfhwdev,
+ u8 cmd)
+{
+ u8 cmd_idx;
+
+ cmd_idx = pfhwdev->proc.cmd_num;
+ if (cmd_idx >= HINIC_COMM_SELF_CMD_MAX) {
+ dev_err(&pfhwdev->hwdev.hwif->pdev->dev, "Unregister recv mgmt process failed, cmd: 0x%x\n",
+ cmd);
+ return;
+ }
+
+ for (cmd_idx = 0; cmd_idx < HINIC_COMM_SELF_CMD_MAX; cmd_idx++) {
+ if (cmd == pfhwdev->proc.info[cmd_idx].cmd) {
+ pfhwdev->proc.info[cmd_idx].cmd = 0;
+ pfhwdev->proc.info[cmd_idx].proc = NULL;
+ pfhwdev->proc.cmd_num--;
+ }
+ }
+}
+
+static void comm_mgmt_msg_handler(void *handle, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+{
+ struct hinic_pfhwdev *pfhwdev = handle;
+ u8 cmd_idx;
+
+ for (cmd_idx = 0; cmd_idx < pfhwdev->proc.cmd_num; cmd_idx++) {
+ if (cmd == pfhwdev->proc.info[cmd_idx].cmd) {
+ if (!pfhwdev->proc.info[cmd_idx].proc) {
+ dev_warn(&pfhwdev->hwdev.hwif->pdev->dev,
+ "PF recv mgmt comm msg handle null, cmd: 0x%x\n",
+ cmd);
+ } else {
+ pfhwdev->proc.info[cmd_idx].proc
+ (&pfhwdev->hwdev, buf_in, in_size,
+ buf_out, out_size);
+ }
+
+ return;
+ }
+ }
+
+ dev_warn(&pfhwdev->hwdev.hwif->pdev->dev, "Received unknown mgmt cpu event: 0x%x\n",
+ cmd);
+
+ *out_size = 0;
+}
+
+/* pf fault report event */
+static void pf_fault_event_handler(void *dev, void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_cmd_fault_event *fault_event = buf_in;
+ struct hinic_hwdev *hwdev = dev;
+
+ if (in_size != sizeof(*fault_event)) {
+ dev_err(&hwdev->hwif->pdev->dev, "Invalid fault event report, length: %d, should be %zu\n",
+ in_size, sizeof(*fault_event));
+ return;
+ }
+
+ if (!hwdev->devlink_dev || IS_ERR_OR_NULL(hwdev->devlink_dev->hw_fault_reporter))
+ return;
+
+ devlink_health_report(hwdev->devlink_dev->hw_fault_reporter,
+ "HW fatal error reported", &fault_event->event);
+}
+
+static void mgmt_watchdog_timeout_event_handler(void *dev,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_mgmt_watchdog_info *watchdog_info = buf_in;
+ struct hinic_hwdev *hwdev = dev;
+
+ if (in_size != sizeof(*watchdog_info)) {
+ dev_err(&hwdev->hwif->pdev->dev, "Invalid mgmt watchdog report, length: %d, should be %zu\n",
+ in_size, sizeof(*watchdog_info));
+ return;
+ }
+
+ if (!hwdev->devlink_dev || IS_ERR_OR_NULL(hwdev->devlink_dev->fw_fault_reporter))
+ return;
+
+ devlink_health_report(hwdev->devlink_dev->fw_fault_reporter,
+ "FW fatal error reported", watchdog_info);
+}
+
+/**
+ * init_pfhwdev - Initialize the extended components of PF
+ * @pfhwdev: the HW device for PF
+ *
+ * Return 0 - success, negative - failure
+ **/
+static int init_pfhwdev(struct hinic_pfhwdev *pfhwdev)
+{
+ struct hinic_hwdev *hwdev = &pfhwdev->hwdev;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ int err;
+
+ err = hinic_pf_to_mgmt_init(&pfhwdev->pf_to_mgmt, hwif);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize PF to MGMT channel\n");
+ return err;
+ }
+
+ err = hinic_func_to_func_init(hwdev);
+ if (err) {
+ dev_err(&hwif->pdev->dev, "Failed to init mailbox\n");
+ hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt);
+ return err;
+ }
+
+ if (!HINIC_IS_VF(hwif)) {
+ hinic_register_mgmt_msg_cb(&pfhwdev->pf_to_mgmt,
+ HINIC_MOD_L2NIC, pfhwdev,
+ nic_mgmt_msg_handler);
+ hinic_register_mgmt_msg_cb(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
+ pfhwdev, comm_mgmt_msg_handler);
+ hinic_comm_recv_mgmt_self_cmd_reg(pfhwdev,
+ HINIC_COMM_CMD_FAULT_REPORT,
+ pf_fault_event_handler);
+ hinic_comm_recv_mgmt_self_cmd_reg
+ (pfhwdev, HINIC_COMM_CMD_WATCHDOG_INFO,
+ mgmt_watchdog_timeout_event_handler);
+ } else {
+ hinic_register_vf_mbox_cb(hwdev, HINIC_MOD_L2NIC,
+ nic_mgmt_msg_handler);
+ }
+
+ hinic_set_pf_action(hwif, HINIC_PF_MGMT_ACTIVE);
+ hinic_devlink_register(hwdev->devlink_dev);
+ return 0;
+}
+
+/**
+ * free_pfhwdev - Free the extended components of PF
+ * @pfhwdev: the HW device for PF
+ **/
+static void free_pfhwdev(struct hinic_pfhwdev *pfhwdev)
+{
+ struct hinic_hwdev *hwdev = &pfhwdev->hwdev;
+
+ hinic_devlink_unregister(hwdev->devlink_dev);
+ hinic_set_pf_action(hwdev->hwif, HINIC_PF_MGMT_INIT);
+
+ if (!HINIC_IS_VF(hwdev->hwif)) {
+ hinic_comm_recv_mgmt_self_cmd_unreg(pfhwdev,
+ HINIC_COMM_CMD_WATCHDOG_INFO);
+ hinic_comm_recv_mgmt_self_cmd_unreg(pfhwdev,
+ HINIC_COMM_CMD_FAULT_REPORT);
+ hinic_unregister_mgmt_msg_cb(&pfhwdev->pf_to_mgmt,
+ HINIC_MOD_COMM);
+ hinic_unregister_mgmt_msg_cb(&pfhwdev->pf_to_mgmt,
+ HINIC_MOD_L2NIC);
+ } else {
+ hinic_unregister_vf_mbox_cb(hwdev, HINIC_MOD_L2NIC);
+ }
+
+ hinic_func_to_func_free(hwdev);
+
+ hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt);
+}
+
+static int hinic_l2nic_reset(struct hinic_hwdev *hwdev)
+{
+ struct hinic_cmd_l2nic_reset l2nic_reset = {0};
+ u16 out_size = sizeof(l2nic_reset);
+ struct hinic_pfhwdev *pfhwdev;
+ int err;
+
+ pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+
+ l2nic_reset.func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
+ /* 0 represents standard l2nic reset flow */
+ l2nic_reset.reset_flag = 0;
+
+ err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
+ HINIC_COMM_CMD_L2NIC_RESET, &l2nic_reset,
+ sizeof(l2nic_reset), &l2nic_reset,
+ &out_size, HINIC_MGMT_MSG_SYNC);
+ if (err || !out_size || l2nic_reset.status) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to reset L2NIC resources, err: %d, status: 0x%x, out_size: 0x%x\n",
+ err, l2nic_reset.status, out_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int hinic_get_interrupt_cfg(struct hinic_hwdev *hwdev,
+ struct hinic_msix_config *interrupt_info)
+{
+ u16 out_size = sizeof(*interrupt_info);
+ struct hinic_pfhwdev *pfhwdev;
+ int err;
+
+ if (!hwdev || !interrupt_info)
+ return -EINVAL;
+
+ pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+
+ interrupt_info->func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
+
+ err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
+ HINIC_COMM_CMD_MSI_CTRL_REG_RD_BY_UP,
+ interrupt_info, sizeof(*interrupt_info),
+ interrupt_info, &out_size, HINIC_MGMT_MSG_SYNC);
+ if (err || !out_size || interrupt_info->status) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to get interrupt config, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, interrupt_info->status, out_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hinic_set_interrupt_cfg(struct hinic_hwdev *hwdev,
+ struct hinic_msix_config *interrupt_info)
+{
+ u16 out_size = sizeof(*interrupt_info);
+ struct hinic_msix_config temp_info;
+ struct hinic_pfhwdev *pfhwdev;
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+
+ interrupt_info->func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
+
+ err = hinic_get_interrupt_cfg(hwdev, &temp_info);
+ if (err)
+ return -EINVAL;
+
+ interrupt_info->lli_credit_cnt = temp_info.lli_credit_cnt;
+ interrupt_info->lli_timer_cnt = temp_info.lli_timer_cnt;
+
+ err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
+ HINIC_COMM_CMD_MSI_CTRL_REG_WR_BY_UP,
+ interrupt_info, sizeof(*interrupt_info),
+ interrupt_info, &out_size, HINIC_MGMT_MSG_SYNC);
+ if (err || !out_size || interrupt_info->status) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to get interrupt config, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, interrupt_info->status, out_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * hinic_init_hwdev - Initialize the NIC HW
+ * @pdev: the NIC pci device
+ * @devlink: the poniter of hinic devlink
+ *
+ * Return initialized NIC HW device
+ *
+ * Initialize the NIC HW device and return a pointer to it
+ **/
+struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev, struct devlink *devlink)
+{
+ struct hinic_pfhwdev *pfhwdev;
+ struct hinic_hwdev *hwdev;
+ struct hinic_hwif *hwif;
+ int err, num_aeqs;
+
+ hwif = devm_kzalloc(&pdev->dev, sizeof(*hwif), GFP_KERNEL);
+ if (!hwif)
+ return ERR_PTR(-ENOMEM);
+
+ err = hinic_init_hwif(hwif, pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init HW interface\n");
+ return ERR_PTR(err);
+ }
+
+ pfhwdev = devm_kzalloc(&pdev->dev, sizeof(*pfhwdev), GFP_KERNEL);
+ if (!pfhwdev) {
+ err = -ENOMEM;
+ goto err_pfhwdev_alloc;
+ }
+
+ hwdev = &pfhwdev->hwdev;
+ hwdev->hwif = hwif;
+ hwdev->devlink_dev = devlink_priv(devlink);
+ hwdev->devlink_dev->hwdev = hwdev;
+
+ err = init_msix(hwdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init msix\n");
+ goto err_init_msix;
+ }
+
+ err = wait_for_outbound_state(hwdev);
+ if (err) {
+ dev_warn(&pdev->dev, "outbound - disabled, try again\n");
+ hinic_outbound_state_set(hwif, HINIC_OUTBOUND_ENABLE);
+ }
+
+ num_aeqs = HINIC_HWIF_NUM_AEQS(hwif);
+
+ err = hinic_aeqs_init(&hwdev->aeqs, hwif, num_aeqs,
+ HINIC_DEFAULT_AEQ_LEN, HINIC_EQ_PAGE_SIZE,
+ hwdev->msix_entries);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init async event queues\n");
+ goto err_aeqs_init;
+ }
+
+ err = init_pfhwdev(pfhwdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init PF HW device\n");
+ goto err_init_pfhwdev;
+ }
+
+ err = hinic_l2nic_reset(hwdev);
+ if (err)
+ goto err_l2nic_reset;
+
+ err = get_dev_cap(hwdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to get device capabilities\n");
+ goto err_dev_cap;
+ }
+
+ mutex_init(&hwdev->func_to_io.nic_cfg.cfg_mutex);
+
+ err = hinic_vf_func_init(hwdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init nic mbox\n");
+ goto err_vf_func_init;
+ }
+
+ err = init_fw_ctxt(hwdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init function table\n");
+ goto err_init_fw_ctxt;
+ }
+
+ err = set_resources_state(hwdev, HINIC_RES_ACTIVE);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to set resources state\n");
+ goto err_resources_state;
+ }
+
+ return hwdev;
+
+err_resources_state:
+err_init_fw_ctxt:
+ hinic_vf_func_free(hwdev);
+err_vf_func_init:
+err_l2nic_reset:
+err_dev_cap:
+ free_pfhwdev(pfhwdev);
+
+err_init_pfhwdev:
+ hinic_aeqs_free(&hwdev->aeqs);
+
+err_aeqs_init:
+ disable_msix(hwdev);
+
+err_init_msix:
+err_pfhwdev_alloc:
+ hinic_free_hwif(hwif);
+ if (err > 0)
+ err = -EIO;
+ return ERR_PTR(err);
+}
+
+/**
+ * hinic_free_hwdev - Free the NIC HW device
+ * @hwdev: the NIC HW device
+ **/
+void hinic_free_hwdev(struct hinic_hwdev *hwdev)
+{
+ struct hinic_pfhwdev *pfhwdev = container_of(hwdev,
+ struct hinic_pfhwdev,
+ hwdev);
+
+ set_resources_state(hwdev, HINIC_RES_CLEAN);
+
+ hinic_vf_func_free(hwdev);
+
+ free_pfhwdev(pfhwdev);
+
+ hinic_aeqs_free(&hwdev->aeqs);
+
+ disable_msix(hwdev);
+
+ hinic_free_hwif(hwdev->hwif);
+}
+
+/**
+ * hinic_hwdev_num_qps - return the number QPs available for use
+ * @hwdev: the NIC HW device
+ *
+ * Return number QPs available for use
+ **/
+int hinic_hwdev_num_qps(struct hinic_hwdev *hwdev)
+{
+ struct hinic_cap *nic_cap = &hwdev->nic_cap;
+
+ return nic_cap->num_qps;
+}
+
+/**
+ * hinic_hwdev_get_sq - get SQ
+ * @hwdev: the NIC HW device
+ * @i: the position of the SQ
+ *
+ * Return: the SQ in the i position
+ **/
+struct hinic_sq *hinic_hwdev_get_sq(struct hinic_hwdev *hwdev, int i)
+{
+ struct hinic_func_to_io *func_to_io = &hwdev->func_to_io;
+ struct hinic_qp *qp = &func_to_io->qps[i];
+
+ if (i >= hinic_hwdev_num_qps(hwdev))
+ return NULL;
+
+ return &qp->sq;
+}
+
+/**
+ * hinic_hwdev_get_rq - get RQ
+ * @hwdev: the NIC HW device
+ * @i: the position of the RQ
+ *
+ * Return: the RQ in the i position
+ **/
+struct hinic_rq *hinic_hwdev_get_rq(struct hinic_hwdev *hwdev, int i)
+{
+ struct hinic_func_to_io *func_to_io = &hwdev->func_to_io;
+ struct hinic_qp *qp = &func_to_io->qps[i];
+
+ if (i >= hinic_hwdev_num_qps(hwdev))
+ return NULL;
+
+ return &qp->rq;
+}
+
+/**
+ * hinic_hwdev_msix_cnt_set - clear message attribute counters for msix entry
+ * @hwdev: the NIC HW device
+ * @msix_index: msix_index
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+int hinic_hwdev_msix_cnt_set(struct hinic_hwdev *hwdev, u16 msix_index)
+{
+ return hinic_msix_attr_cnt_clear(hwdev->hwif, msix_index);
+}
+
+/**
+ * hinic_hwdev_msix_set - set message attribute for msix entry
+ * @hwdev: the NIC HW device
+ * @msix_index: msix_index
+ * @pending_limit: the maximum pending interrupt events (unit 8)
+ * @coalesc_timer: coalesc period for interrupt (unit 8 us)
+ * @lli_timer_cfg: replenishing period for low latency credit (unit 8 us)
+ * @lli_credit_limit: maximum credits for low latency msix messages (unit 8)
+ * @resend_timer: maximum wait for resending msix (unit coalesc period)
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+int hinic_hwdev_msix_set(struct hinic_hwdev *hwdev, u16 msix_index,
+ u8 pending_limit, u8 coalesc_timer,
+ u8 lli_timer_cfg, u8 lli_credit_limit,
+ u8 resend_timer)
+{
+ return hinic_msix_attr_set(hwdev->hwif, msix_index,
+ pending_limit, coalesc_timer,
+ lli_timer_cfg, lli_credit_limit,
+ resend_timer);
+}
+
+/**
+ * hinic_hwdev_hw_ci_addr_set - set cons idx addr and attributes in HW for sq
+ * @hwdev: the NIC HW device
+ * @sq: send queue
+ * @pending_limit: the maximum pending update ci events (unit 8)
+ * @coalesc_timer: coalesc period for update ci (unit 8 us)
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+int hinic_hwdev_hw_ci_addr_set(struct hinic_hwdev *hwdev, struct hinic_sq *sq,
+ u8 pending_limit, u8 coalesc_timer)
+{
+ struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq);
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct hinic_pfhwdev *pfhwdev;
+ struct hinic_cmd_hw_ci hw_ci;
+
+ hw_ci.dma_attr_off = 0;
+ hw_ci.pending_limit = pending_limit;
+ hw_ci.coalesc_timer = coalesc_timer;
+
+ hw_ci.msix_en = 1;
+ hw_ci.msix_entry_idx = sq->msix_entry;
+
+ hw_ci.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
+
+ hw_ci.sq_id = qp->q_id;
+
+ hw_ci.ci_addr = ADDR_IN_4BYTES(sq->hw_ci_dma_addr);
+
+ pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+ return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt,
+ HINIC_MOD_COMM,
+ HINIC_COMM_CMD_SQ_HI_CI_SET,
+ &hw_ci, sizeof(hw_ci), NULL,
+ NULL, HINIC_MGMT_MSG_SYNC);
+}
+
+/**
+ * hinic_hwdev_set_msix_state- set msix state
+ * @hwdev: the NIC HW device
+ * @msix_index: IRQ corresponding index number
+ * @flag: msix state
+ *
+ **/
+void hinic_hwdev_set_msix_state(struct hinic_hwdev *hwdev, u16 msix_index,
+ enum hinic_msix_state flag)
+{
+ hinic_set_msix_state(hwdev->hwif, msix_index, flag);
+}
+
+int hinic_get_board_info(struct hinic_hwdev *hwdev,
+ struct hinic_comm_board_info *board_info)
+{
+ u16 out_size = sizeof(*board_info);
+ struct hinic_pfhwdev *pfhwdev;
+ int err;
+
+ if (!hwdev || !board_info)
+ return -EINVAL;
+
+ pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+
+ err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
+ HINIC_COMM_CMD_GET_BOARD_INFO,
+ board_info, sizeof(*board_info),
+ board_info, &out_size, HINIC_MGMT_MSG_SYNC);
+ if (err || board_info->status || !out_size) {
+ dev_err(&hwdev->hwif->pdev->dev,
+ "Failed to get board info, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, board_info->status, out_size);
+ return -EIO;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
new file mode 100644
index 0000000000..6b5797e697
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
@@ -0,0 +1,660 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef HINIC_HW_DEV_H
+#define HINIC_HW_DEV_H
+
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <net/devlink.h>
+
+#include "hinic_hw_if.h"
+#include "hinic_hw_eqs.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_hw_qp.h"
+#include "hinic_hw_io.h"
+#include "hinic_hw_mbox.h"
+
+#define HINIC_MAX_QPS 32
+
+#define HINIC_MGMT_NUM_MSG_CMD (HINIC_MGMT_MSG_CMD_MAX - \
+ HINIC_MGMT_MSG_CMD_BASE)
+
+#define HINIC_PF_SET_VF_ALREADY 0x4
+#define HINIC_MGMT_STATUS_EXIST 0x6
+#define HINIC_MGMT_CMD_UNSUPPORTED 0xFF
+
+#define HINIC_CMD_VER_FUNC_ID 2
+
+struct hinic_cap {
+ u16 max_qps;
+ u16 num_qps;
+ u8 max_vf;
+ u16 max_vf_qps;
+};
+
+enum hw_ioctxt_set_cmdq_depth {
+ HW_IOCTXT_SET_CMDQ_DEPTH_DEFAULT,
+ HW_IOCTXT_SET_CMDQ_DEPTH_ENABLE,
+};
+
+enum hinic_port_cmd {
+ HINIC_PORT_CMD_VF_REGISTER = 0x0,
+ HINIC_PORT_CMD_VF_UNREGISTER = 0x1,
+
+ HINIC_PORT_CMD_CHANGE_MTU = 0x2,
+
+ HINIC_PORT_CMD_ADD_VLAN = 0x3,
+ HINIC_PORT_CMD_DEL_VLAN = 0x4,
+
+ HINIC_PORT_CMD_SET_ETS = 0x7,
+ HINIC_PORT_CMD_GET_ETS = 0x8,
+
+ HINIC_PORT_CMD_SET_PFC = 0x5,
+
+ HINIC_PORT_CMD_SET_MAC = 0x9,
+ HINIC_PORT_CMD_GET_MAC = 0xA,
+ HINIC_PORT_CMD_DEL_MAC = 0xB,
+
+ HINIC_PORT_CMD_SET_RX_MODE = 0xC,
+
+ HINIC_PORT_CMD_SET_ANTI_ATTACK_RATE = 0xD,
+
+ HINIC_PORT_CMD_GET_PAUSE_INFO = 0x14,
+ HINIC_PORT_CMD_SET_PAUSE_INFO = 0x15,
+
+ HINIC_PORT_CMD_GET_LINK_STATE = 0x18,
+
+ HINIC_PORT_CMD_SET_LRO = 0x19,
+
+ HINIC_PORT_CMD_SET_RX_CSUM = 0x1A,
+
+ HINIC_PORT_CMD_SET_RX_VLAN_OFFLOAD = 0x1B,
+
+ HINIC_PORT_CMD_GET_PORT_STATISTICS = 0x1C,
+
+ HINIC_PORT_CMD_CLEAR_PORT_STATISTICS = 0x1D,
+
+ HINIC_PORT_CMD_GET_VPORT_STAT = 0x1E,
+
+ HINIC_PORT_CMD_CLEAN_VPORT_STAT = 0x1F,
+
+ HINIC_PORT_CMD_GET_RSS_TEMPLATE_INDIR_TBL = 0x25,
+
+ HINIC_PORT_CMD_SET_PORT_STATE = 0x29,
+ HINIC_PORT_CMD_GET_PORT_STATE = 0x30,
+
+ HINIC_PORT_CMD_SET_RSS_TEMPLATE_TBL = 0x2B,
+
+ HINIC_PORT_CMD_GET_RSS_TEMPLATE_TBL = 0x2C,
+
+ HINIC_PORT_CMD_SET_RSS_HASH_ENGINE = 0x2D,
+
+ HINIC_PORT_CMD_GET_RSS_HASH_ENGINE = 0x2E,
+
+ HINIC_PORT_CMD_GET_RSS_CTX_TBL = 0x2F,
+
+ HINIC_PORT_CMD_SET_RSS_CTX_TBL = 0x30,
+
+ HINIC_PORT_CMD_RSS_TEMP_MGR = 0x31,
+
+ HINIC_PORT_CMD_RD_LINE_TBL = 0x39,
+
+ HINIC_PORT_CMD_RSS_CFG = 0x42,
+
+ HINIC_PORT_CMD_GET_PHY_TYPE = 0x44,
+
+ HINIC_PORT_CMD_FWCTXT_INIT = 0x45,
+
+ HINIC_PORT_CMD_GET_LOOPBACK_MODE = 0x48,
+ HINIC_PORT_CMD_SET_LOOPBACK_MODE = 0x49,
+
+ HINIC_PORT_CMD_GET_JUMBO_FRAME_SIZE = 0x4A,
+ HINIC_PORT_CMD_SET_JUMBO_FRAME_SIZE = 0x4B,
+
+ HINIC_PORT_CMD_ENABLE_SPOOFCHK = 0x4E,
+
+ HINIC_PORT_CMD_GET_MGMT_VERSION = 0x58,
+
+ HINIC_PORT_CMD_GET_PORT_TYPE = 0x5B,
+
+ HINIC_PORT_CMD_SET_FUNC_STATE = 0x5D,
+
+ HINIC_PORT_CMD_GET_PORT_ID_BY_FUNC_ID = 0x5E,
+
+ HINIC_PORT_CMD_GET_DMA_CS = 0x64,
+ HINIC_PORT_CMD_SET_DMA_CS = 0x65,
+
+ HINIC_PORT_CMD_GET_GLOBAL_QPN = 0x66,
+
+ HINIC_PORT_CMD_SET_VF_RATE = 0x69,
+
+ HINIC_PORT_CMD_SET_VF_VLAN = 0x6A,
+
+ HINIC_PORT_CMD_CLR_VF_VLAN = 0x6B,
+
+ HINIC_PORT_CMD_SET_TSO = 0x70,
+
+ HINIC_PORT_CMD_UPDATE_FW = 0x72,
+
+ HINIC_PORT_CMD_SET_RQ_IQ_MAP = 0x73,
+
+ HINIC_PORT_CMD_SET_PFC_THD = 0x75,
+
+ HINIC_PORT_CMD_LINK_STATUS_REPORT = 0xA0,
+
+ HINIC_PORT_CMD_SET_LOSSLESS_ETH = 0xA3,
+
+ HINIC_PORT_CMD_UPDATE_MAC = 0xA4,
+
+ HINIC_PORT_CMD_GET_CAP = 0xAA,
+
+ HINIC_PORT_CMD_UP_TC_ADD_FLOW = 0xAF,
+ HINIC_PORT_CMD_UP_TC_DEL_FLOW = 0xB0,
+ HINIC_PORT_CMD_UP_TC_GET_FLOW = 0xB1,
+
+ HINIC_PORT_CMD_UP_TC_FLUSH_TCAM = 0xB2,
+
+ HINIC_PORT_CMD_UP_TC_CTRL_TCAM_BLOCK = 0xB3,
+
+ HINIC_PORT_CMD_UP_TC_ENABLE = 0xB4,
+
+ HINIC_PORT_CMD_UP_TC_GET_TCAM_BLOCK = 0xB5,
+
+ HINIC_PORT_CMD_SET_IPSU_MAC = 0xCB,
+ HINIC_PORT_CMD_GET_IPSU_MAC = 0xCC,
+
+ HINIC_PORT_CMD_SET_XSFP_STATUS = 0xD4,
+
+ HINIC_PORT_CMD_GET_LINK_MODE = 0xD9,
+
+ HINIC_PORT_CMD_SET_SPEED = 0xDA,
+
+ HINIC_PORT_CMD_SET_AUTONEG = 0xDB,
+
+ HINIC_PORT_CMD_CLEAR_QP_RES = 0xDD,
+
+ HINIC_PORT_CMD_SET_SUPER_CQE = 0xDE,
+
+ HINIC_PORT_CMD_SET_VF_COS = 0xDF,
+ HINIC_PORT_CMD_GET_VF_COS = 0xE1,
+
+ HINIC_PORT_CMD_CABLE_PLUG_EVENT = 0xE5,
+
+ HINIC_PORT_CMD_LINK_ERR_EVENT = 0xE6,
+
+ HINIC_PORT_CMD_SET_COS_UP_MAP = 0xE8,
+
+ HINIC_PORT_CMD_RESET_LINK_CFG = 0xEB,
+
+ HINIC_PORT_CMD_GET_STD_SFP_INFO = 0xF0,
+
+ HINIC_PORT_CMD_FORCE_PKT_DROP = 0xF3,
+
+ HINIC_PORT_CMD_SET_LRO_TIMER = 0xF4,
+
+ HINIC_PORT_CMD_SET_VHD_CFG = 0xF7,
+
+ HINIC_PORT_CMD_SET_LINK_FOLLOW = 0xF8,
+
+ HINIC_PORT_CMD_SET_VF_MAX_MIN_RATE = 0xF9,
+
+ HINIC_PORT_CMD_GET_SFP_ABS = 0xFB,
+
+ HINIC_PORT_CMD_Q_FILTER = 0xFC,
+
+ HINIC_PORT_CMD_TCAM_FILTER = 0xFE,
+
+ HINIC_PORT_CMD_SET_VLAN_FILTER = 0xFF,
+};
+
+/* cmd of mgmt CPU message for HILINK module */
+enum hinic_hilink_cmd {
+ HINIC_HILINK_CMD_GET_LINK_INFO = 0x3,
+ HINIC_HILINK_CMD_SET_LINK_SETTINGS = 0x8,
+};
+
+enum hinic_ucode_cmd {
+ HINIC_UCODE_CMD_MODIFY_QUEUE_CONTEXT = 0,
+ HINIC_UCODE_CMD_CLEAN_QUEUE_CONTEXT,
+ HINIC_UCODE_CMD_ARM_SQ,
+ HINIC_UCODE_CMD_ARM_RQ,
+ HINIC_UCODE_CMD_SET_RSS_INDIR_TABLE,
+ HINIC_UCODE_CMD_SET_RSS_CONTEXT_TABLE,
+ HINIC_UCODE_CMD_GET_RSS_INDIR_TABLE,
+ HINIC_UCODE_CMD_GET_RSS_CONTEXT_TABLE,
+ HINIC_UCODE_CMD_SET_IQ_ENABLE,
+ HINIC_UCODE_CMD_SET_RQ_FLUSH = 10
+};
+
+#define NIC_RSS_CMD_TEMP_ALLOC 0x01
+#define NIC_RSS_CMD_TEMP_FREE 0x02
+
+enum hinic_mgmt_msg_cmd {
+ HINIC_MGMT_MSG_CMD_BASE = 0xA0,
+
+ HINIC_MGMT_MSG_CMD_LINK_STATUS = 0xA0,
+
+ HINIC_MGMT_MSG_CMD_CABLE_PLUG_EVENT = 0xE5,
+ HINIC_MGMT_MSG_CMD_LINK_ERR_EVENT = 0xE6,
+
+ HINIC_MGMT_MSG_CMD_MAX,
+};
+
+enum hinic_cb_state {
+ HINIC_CB_ENABLED = BIT(0),
+ HINIC_CB_RUNNING = BIT(1),
+};
+
+enum hinic_res_state {
+ HINIC_RES_CLEAN = 0,
+ HINIC_RES_ACTIVE = 1,
+};
+
+struct hinic_cmd_fw_ctxt {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_idx;
+ u16 rx_buf_sz;
+
+ u32 rsvd1;
+};
+
+struct hinic_cmd_hw_ioctxt {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_idx;
+
+ u16 rsvd1;
+
+ u8 set_cmdq_depth;
+ u8 cmdq_depth;
+
+ u8 lro_en;
+ u8 rsvd3;
+ u8 ppf_idx;
+ u8 rsvd4;
+
+ u16 rq_depth;
+ u16 rx_buf_sz_idx;
+ u16 sq_depth;
+};
+
+struct hinic_cmd_io_status {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_idx;
+ u8 rsvd1;
+ u8 rsvd2;
+ u32 io_status;
+};
+
+struct hinic_cmd_clear_io_res {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_idx;
+ u8 rsvd1;
+ u8 rsvd2;
+};
+
+struct hinic_cmd_set_res_state {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_idx;
+ u8 state;
+ u8 rsvd1;
+ u32 rsvd2;
+};
+
+struct hinic_ceq_ctrl_reg {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 q_id;
+ u32 ctrl0;
+ u32 ctrl1;
+};
+
+struct hinic_cmd_base_qpn {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_idx;
+ u16 qpn;
+};
+
+struct hinic_cmd_hw_ci {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_idx;
+
+ u8 dma_attr_off;
+ u8 pending_limit;
+ u8 coalesc_timer;
+
+ u8 msix_en;
+ u16 msix_entry_idx;
+
+ u32 sq_id;
+ u32 rsvd1;
+ u64 ci_addr;
+};
+
+struct hinic_cmd_l2nic_reset {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 reset_flag;
+};
+
+struct hinic_msix_config {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 msix_index;
+ u8 pending_cnt;
+ u8 coalesce_timer_cnt;
+ u8 lli_timer_cnt;
+ u8 lli_credit_cnt;
+ u8 resend_timer_cnt;
+ u8 rsvd1[3];
+};
+
+struct hinic_set_random_id {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 vf_in_pf;
+ u8 rsvd1;
+ u16 func_idx;
+ u32 random_id;
+};
+
+struct hinic_board_info {
+ u32 board_type;
+ u32 port_num;
+ u32 port_speed;
+ u32 pcie_width;
+ u32 host_num;
+ u32 pf_num;
+ u32 vf_total_num;
+ u32 tile_num;
+ u32 qcm_num;
+ u32 core_num;
+ u32 work_mode;
+ u32 service_mode;
+ u32 pcie_mode;
+ u32 cfg_addr;
+ u32 boot_sel;
+ u32 board_id;
+};
+
+struct hinic_comm_board_info {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ struct hinic_board_info info;
+
+ u32 rsvd1[4];
+};
+
+struct hinic_hwdev {
+ struct hinic_hwif *hwif;
+ struct msix_entry *msix_entries;
+
+ struct hinic_aeqs aeqs;
+ struct hinic_func_to_io func_to_io;
+ struct hinic_mbox_func_to_func *func_to_func;
+
+ struct hinic_cap nic_cap;
+ u8 port_id;
+ struct hinic_devlink_priv *devlink_dev;
+};
+
+struct hinic_nic_cb {
+ void (*handler)(void *handle, void *buf_in,
+ u16 in_size, void *buf_out,
+ u16 *out_size);
+
+ void *handle;
+ unsigned long cb_state;
+};
+
+#define HINIC_COMM_SELF_CMD_MAX 4
+
+typedef void (*comm_mgmt_self_msg_proc)(void *handle, void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size);
+
+struct comm_mgmt_self_msg_sub_info {
+ u8 cmd;
+ comm_mgmt_self_msg_proc proc;
+};
+
+struct comm_mgmt_self_msg_info {
+ u8 cmd_num;
+ struct comm_mgmt_self_msg_sub_info info[HINIC_COMM_SELF_CMD_MAX];
+};
+
+struct hinic_pfhwdev {
+ struct hinic_hwdev hwdev;
+
+ struct hinic_pf_to_mgmt pf_to_mgmt;
+
+ struct hinic_nic_cb nic_cb[HINIC_MGMT_NUM_MSG_CMD];
+
+ struct comm_mgmt_self_msg_info proc;
+};
+
+struct hinic_dev_cap {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 rsvd1[5];
+ u8 intr_type;
+ u8 max_cos_id;
+ u8 er_id;
+ u8 port_id;
+ u8 max_vf;
+ u8 rsvd2[62];
+ u16 max_sqs;
+ u16 max_rqs;
+ u16 max_vf_sqs;
+ u16 max_vf_rqs;
+ u8 rsvd3[204];
+};
+
+union hinic_fault_hw_mgmt {
+ u32 val[4];
+ /* valid only type == FAULT_TYPE_CHIP */
+ struct {
+ u8 node_id;
+ u8 err_level;
+ u16 err_type;
+ u32 err_csr_addr;
+ u32 err_csr_value;
+ /* func_id valid only if err_level == FAULT_LEVEL_SERIOUS_FLR */
+ u16 func_id;
+ u16 rsvd2;
+ } chip;
+
+ /* valid only if type == FAULT_TYPE_UCODE */
+ struct {
+ u8 cause_id;
+ u8 core_id;
+ u8 c_id;
+ u8 rsvd3;
+ u32 epc;
+ u32 rsvd4;
+ u32 rsvd5;
+ } ucode;
+
+ /* valid only if type == FAULT_TYPE_MEM_RD_TIMEOUT ||
+ * FAULT_TYPE_MEM_WR_TIMEOUT
+ */
+ struct {
+ u32 err_csr_ctrl;
+ u32 err_csr_data;
+ u32 ctrl_tab;
+ u32 mem_index;
+ } mem_timeout;
+
+ /* valid only if type == FAULT_TYPE_REG_RD_TIMEOUT ||
+ * FAULT_TYPE_REG_WR_TIMEOUT
+ */
+ struct {
+ u32 err_csr;
+ u32 rsvd6;
+ u32 rsvd7;
+ u32 rsvd8;
+ } reg_timeout;
+
+ struct {
+ /* 0: read; 1: write */
+ u8 op_type;
+ u8 port_id;
+ u8 dev_ad;
+ u8 rsvd9;
+ u32 csr_addr;
+ u32 op_data;
+ u32 rsvd10;
+ } phy_fault;
+};
+
+struct hinic_fault_event {
+ u8 type;
+ u8 fault_level;
+ u8 rsvd0[2];
+ union hinic_fault_hw_mgmt event;
+};
+
+struct hinic_cmd_fault_event {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ struct hinic_fault_event event;
+};
+
+enum hinic_fault_type {
+ FAULT_TYPE_CHIP,
+ FAULT_TYPE_UCODE,
+ FAULT_TYPE_MEM_RD_TIMEOUT,
+ FAULT_TYPE_MEM_WR_TIMEOUT,
+ FAULT_TYPE_REG_RD_TIMEOUT,
+ FAULT_TYPE_REG_WR_TIMEOUT,
+ FAULT_TYPE_PHY_FAULT,
+ FAULT_TYPE_MAX,
+};
+
+enum hinic_fault_err_level {
+ FAULT_LEVEL_FATAL,
+ FAULT_LEVEL_SERIOUS_RESET,
+ FAULT_LEVEL_SERIOUS_FLR,
+ FAULT_LEVEL_GENERAL,
+ FAULT_LEVEL_SUGGESTION,
+ FAULT_LEVEL_MAX
+};
+
+struct hinic_mgmt_watchdog_info {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u32 curr_time_h;
+ u32 curr_time_l;
+ u32 task_id;
+ u32 rsv;
+
+ u32 reg[13];
+ u32 pc;
+ u32 lr;
+ u32 cpsr;
+
+ u32 stack_top;
+ u32 stack_bottom;
+ u32 sp;
+ u32 curr_used;
+ u32 peak_used;
+ u32 is_overflow;
+
+ u32 stack_actlen;
+ u8 data[1024];
+};
+
+void hinic_hwdev_cb_register(struct hinic_hwdev *hwdev,
+ enum hinic_mgmt_msg_cmd cmd, void *handle,
+ void (*handler)(void *handle, void *buf_in,
+ u16 in_size, void *buf_out,
+ u16 *out_size));
+
+void hinic_hwdev_cb_unregister(struct hinic_hwdev *hwdev,
+ enum hinic_mgmt_msg_cmd cmd);
+
+int hinic_port_msg_cmd(struct hinic_hwdev *hwdev, enum hinic_port_cmd cmd,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size);
+
+int hinic_hilink_msg_cmd(struct hinic_hwdev *hwdev, enum hinic_hilink_cmd cmd,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size);
+
+int hinic_hwdev_ifup(struct hinic_hwdev *hwdev, u16 sq_depth, u16 rq_depth);
+
+void hinic_hwdev_ifdown(struct hinic_hwdev *hwdev);
+
+struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev, struct devlink *devlink);
+
+void hinic_free_hwdev(struct hinic_hwdev *hwdev);
+
+int hinic_hwdev_num_qps(struct hinic_hwdev *hwdev);
+
+struct hinic_sq *hinic_hwdev_get_sq(struct hinic_hwdev *hwdev, int i);
+
+struct hinic_rq *hinic_hwdev_get_rq(struct hinic_hwdev *hwdev, int i);
+
+int hinic_hwdev_msix_cnt_set(struct hinic_hwdev *hwdev, u16 msix_index);
+
+int hinic_hwdev_msix_set(struct hinic_hwdev *hwdev, u16 msix_index,
+ u8 pending_limit, u8 coalesc_timer,
+ u8 lli_timer_cfg, u8 lli_credit_limit,
+ u8 resend_timer);
+
+int hinic_hwdev_hw_ci_addr_set(struct hinic_hwdev *hwdev, struct hinic_sq *sq,
+ u8 pending_limit, u8 coalesc_timer);
+
+void hinic_hwdev_set_msix_state(struct hinic_hwdev *hwdev, u16 msix_index,
+ enum hinic_msix_state flag);
+
+int hinic_set_interrupt_cfg(struct hinic_hwdev *hwdev,
+ struct hinic_msix_config *interrupt_info);
+
+int hinic_get_board_info(struct hinic_hwdev *hwdev,
+ struct hinic_comm_board_info *board_info);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
new file mode 100644
index 0000000000..045c47786a
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
@@ -0,0 +1,996 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/log2.h>
+#include <asm/byteorder.h>
+#include <asm/barrier.h>
+
+#include "hinic_hw_dev.h"
+#include "hinic_hw_csr.h"
+#include "hinic_hw_if.h"
+#include "hinic_hw_eqs.h"
+
+#define HINIC_EQS_WQ_NAME "hinic_eqs"
+
+#define GET_EQ_NUM_PAGES(eq, pg_size) \
+ (ALIGN((eq)->q_len * (eq)->elem_size, pg_size) / (pg_size))
+
+#define GET_EQ_NUM_ELEMS_IN_PG(eq, pg_size) ((pg_size) / (eq)->elem_size)
+
+#define EQ_CONS_IDX_REG_ADDR(eq) (((eq)->type == HINIC_AEQ) ? \
+ HINIC_CSR_AEQ_CONS_IDX_ADDR((eq)->q_id) : \
+ HINIC_CSR_CEQ_CONS_IDX_ADDR((eq)->q_id))
+
+#define EQ_PROD_IDX_REG_ADDR(eq) (((eq)->type == HINIC_AEQ) ? \
+ HINIC_CSR_AEQ_PROD_IDX_ADDR((eq)->q_id) : \
+ HINIC_CSR_CEQ_PROD_IDX_ADDR((eq)->q_id))
+
+#define EQ_HI_PHYS_ADDR_REG(eq, pg_num) (((eq)->type == HINIC_AEQ) ? \
+ HINIC_CSR_AEQ_HI_PHYS_ADDR_REG((eq)->q_id, pg_num) : \
+ HINIC_CSR_CEQ_HI_PHYS_ADDR_REG((eq)->q_id, pg_num))
+
+#define EQ_LO_PHYS_ADDR_REG(eq, pg_num) (((eq)->type == HINIC_AEQ) ? \
+ HINIC_CSR_AEQ_LO_PHYS_ADDR_REG((eq)->q_id, pg_num) : \
+ HINIC_CSR_CEQ_LO_PHYS_ADDR_REG((eq)->q_id, pg_num))
+
+#define GET_EQ_ELEMENT(eq, idx) \
+ ((eq)->virt_addr[(idx) / (eq)->num_elem_in_pg] + \
+ (((idx) & ((eq)->num_elem_in_pg - 1)) * (eq)->elem_size))
+
+#define GET_AEQ_ELEM(eq, idx) ((struct hinic_aeq_elem *) \
+ GET_EQ_ELEMENT(eq, idx))
+
+#define GET_CEQ_ELEM(eq, idx) ((u32 *) \
+ GET_EQ_ELEMENT(eq, idx))
+
+#define GET_CURR_AEQ_ELEM(eq) GET_AEQ_ELEM(eq, (eq)->cons_idx)
+
+#define GET_CURR_CEQ_ELEM(eq) GET_CEQ_ELEM(eq, (eq)->cons_idx)
+
+#define PAGE_IN_4K(page_size) ((page_size) >> 12)
+#define EQ_SET_HW_PAGE_SIZE_VAL(eq) (ilog2(PAGE_IN_4K((eq)->page_size)))
+
+#define ELEMENT_SIZE_IN_32B(eq) (((eq)->elem_size) >> 5)
+#define EQ_SET_HW_ELEM_SIZE_VAL(eq) (ilog2(ELEMENT_SIZE_IN_32B(eq)))
+
+#define EQ_MAX_PAGES 8
+
+#define CEQE_TYPE_SHIFT 23
+#define CEQE_TYPE_MASK 0x7
+
+#define CEQE_TYPE(ceqe) (((ceqe) >> CEQE_TYPE_SHIFT) & \
+ CEQE_TYPE_MASK)
+
+#define CEQE_DATA_MASK 0x3FFFFFF
+#define CEQE_DATA(ceqe) ((ceqe) & CEQE_DATA_MASK)
+
+#define aeq_to_aeqs(eq) \
+ container_of((eq) - (eq)->q_id, struct hinic_aeqs, aeq[0])
+
+#define ceq_to_ceqs(eq) \
+ container_of((eq) - (eq)->q_id, struct hinic_ceqs, ceq[0])
+
+#define work_to_aeq_work(work) \
+ container_of(work, struct hinic_eq_work, work)
+
+#define DMA_ATTR_AEQ_DEFAULT 0
+#define DMA_ATTR_CEQ_DEFAULT 0
+
+/* No coalescence */
+#define THRESH_CEQ_DEFAULT 0
+
+enum eq_int_mode {
+ EQ_INT_MODE_ARMED,
+ EQ_INT_MODE_ALWAYS
+};
+
+enum eq_arm_state {
+ EQ_NOT_ARMED,
+ EQ_ARMED
+};
+
+/**
+ * hinic_aeq_register_hw_cb - register AEQ callback for specific event
+ * @aeqs: pointer to Async eqs of the chip
+ * @event: aeq event to register callback for it
+ * @handle: private data will be used by the callback
+ * @hwe_handler: callback function
+ **/
+void hinic_aeq_register_hw_cb(struct hinic_aeqs *aeqs,
+ enum hinic_aeq_type event, void *handle,
+ void (*hwe_handler)(void *handle, void *data,
+ u8 size))
+{
+ struct hinic_hw_event_cb *hwe_cb = &aeqs->hwe_cb[event];
+
+ hwe_cb->hwe_handler = hwe_handler;
+ hwe_cb->handle = handle;
+ hwe_cb->hwe_state = HINIC_EQE_ENABLED;
+}
+
+/**
+ * hinic_aeq_unregister_hw_cb - unregister the AEQ callback for specific event
+ * @aeqs: pointer to Async eqs of the chip
+ * @event: aeq event to unregister callback for it
+ **/
+void hinic_aeq_unregister_hw_cb(struct hinic_aeqs *aeqs,
+ enum hinic_aeq_type event)
+{
+ struct hinic_hw_event_cb *hwe_cb = &aeqs->hwe_cb[event];
+
+ hwe_cb->hwe_state &= ~HINIC_EQE_ENABLED;
+
+ while (hwe_cb->hwe_state & HINIC_EQE_RUNNING)
+ schedule();
+
+ hwe_cb->hwe_handler = NULL;
+}
+
+/**
+ * hinic_ceq_register_cb - register CEQ callback for specific event
+ * @ceqs: pointer to Completion eqs part of the chip
+ * @event: ceq event to register callback for it
+ * @handle: private data will be used by the callback
+ * @handler: callback function
+ **/
+void hinic_ceq_register_cb(struct hinic_ceqs *ceqs,
+ enum hinic_ceq_type event, void *handle,
+ void (*handler)(void *handle, u32 ceqe_data))
+{
+ struct hinic_ceq_cb *ceq_cb = &ceqs->ceq_cb[event];
+
+ ceq_cb->handler = handler;
+ ceq_cb->handle = handle;
+ ceq_cb->ceqe_state = HINIC_EQE_ENABLED;
+}
+
+/**
+ * hinic_ceq_unregister_cb - unregister the CEQ callback for specific event
+ * @ceqs: pointer to Completion eqs part of the chip
+ * @event: ceq event to unregister callback for it
+ **/
+void hinic_ceq_unregister_cb(struct hinic_ceqs *ceqs,
+ enum hinic_ceq_type event)
+{
+ struct hinic_ceq_cb *ceq_cb = &ceqs->ceq_cb[event];
+
+ ceq_cb->ceqe_state &= ~HINIC_EQE_ENABLED;
+
+ while (ceq_cb->ceqe_state & HINIC_EQE_RUNNING)
+ schedule();
+
+ ceq_cb->handler = NULL;
+}
+
+static u8 eq_cons_idx_checksum_set(u32 val)
+{
+ u8 checksum = 0;
+ int idx;
+
+ for (idx = 0; idx < 32; idx += 4)
+ checksum ^= ((val >> idx) & 0xF);
+
+ return (checksum & 0xF);
+}
+
+/**
+ * eq_update_ci - update the HW cons idx of event queue
+ * @eq: the event queue to update the cons idx for
+ * @arm_state: the arm bit value of eq's interrupt
+ **/
+static void eq_update_ci(struct hinic_eq *eq, u32 arm_state)
+{
+ u32 val, addr = EQ_CONS_IDX_REG_ADDR(eq);
+
+ /* Read Modify Write */
+ val = hinic_hwif_read_reg(eq->hwif, addr);
+
+ val = HINIC_EQ_CI_CLEAR(val, IDX) &
+ HINIC_EQ_CI_CLEAR(val, WRAPPED) &
+ HINIC_EQ_CI_CLEAR(val, INT_ARMED) &
+ HINIC_EQ_CI_CLEAR(val, XOR_CHKSUM);
+
+ val |= HINIC_EQ_CI_SET(eq->cons_idx, IDX) |
+ HINIC_EQ_CI_SET(eq->wrapped, WRAPPED) |
+ HINIC_EQ_CI_SET(arm_state, INT_ARMED);
+
+ val |= HINIC_EQ_CI_SET(eq_cons_idx_checksum_set(val), XOR_CHKSUM);
+
+ hinic_hwif_write_reg(eq->hwif, addr, val);
+}
+
+/**
+ * aeq_irq_handler - handler for the AEQ event
+ * @eq: the Async Event Queue that received the event
+ **/
+static void aeq_irq_handler(struct hinic_eq *eq)
+{
+ struct hinic_aeqs *aeqs = aeq_to_aeqs(eq);
+ struct hinic_hwif *hwif = aeqs->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ struct hinic_aeq_elem *aeqe_curr;
+ struct hinic_hw_event_cb *hwe_cb;
+ enum hinic_aeq_type event;
+ unsigned long eqe_state;
+ u32 aeqe_desc;
+ int i, size;
+
+ for (i = 0; i < eq->q_len; i++) {
+ aeqe_curr = GET_CURR_AEQ_ELEM(eq);
+
+ /* Data in HW is in Big endian Format */
+ aeqe_desc = be32_to_cpu(aeqe_curr->desc);
+
+ /* HW toggles the wrapped bit, when it adds eq element */
+ if (HINIC_EQ_ELEM_DESC_GET(aeqe_desc, WRAPPED) == eq->wrapped)
+ break;
+
+ dma_rmb();
+
+ event = HINIC_EQ_ELEM_DESC_GET(aeqe_desc, TYPE);
+ if (event >= HINIC_MAX_AEQ_EVENTS) {
+ dev_err(&pdev->dev, "Unknown AEQ Event %d\n", event);
+ return;
+ }
+
+ if (!HINIC_EQ_ELEM_DESC_GET(aeqe_desc, SRC)) {
+ hwe_cb = &aeqs->hwe_cb[event];
+
+ size = HINIC_EQ_ELEM_DESC_GET(aeqe_desc, SIZE);
+
+ eqe_state = cmpxchg(&hwe_cb->hwe_state,
+ HINIC_EQE_ENABLED,
+ HINIC_EQE_ENABLED |
+ HINIC_EQE_RUNNING);
+ if (eqe_state == HINIC_EQE_ENABLED &&
+ hwe_cb->hwe_handler)
+ hwe_cb->hwe_handler(hwe_cb->handle,
+ aeqe_curr->data, size);
+ else
+ dev_err(&pdev->dev, "Unhandled AEQ Event %d\n",
+ event);
+
+ hwe_cb->hwe_state &= ~HINIC_EQE_RUNNING;
+ }
+
+ eq->cons_idx++;
+
+ if (eq->cons_idx == eq->q_len) {
+ eq->cons_idx = 0;
+ eq->wrapped = !eq->wrapped;
+ }
+ }
+}
+
+/**
+ * ceq_event_handler - handler for the ceq events
+ * @ceqs: ceqs part of the chip
+ * @ceqe: ceq element that describes the event
+ **/
+static void ceq_event_handler(struct hinic_ceqs *ceqs, u32 ceqe)
+{
+ struct hinic_hwif *hwif = ceqs->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ struct hinic_ceq_cb *ceq_cb;
+ enum hinic_ceq_type event;
+ unsigned long eqe_state;
+
+ event = CEQE_TYPE(ceqe);
+ if (event >= HINIC_MAX_CEQ_EVENTS) {
+ dev_err(&pdev->dev, "Unknown CEQ event, event = %d\n", event);
+ return;
+ }
+
+ ceq_cb = &ceqs->ceq_cb[event];
+
+ eqe_state = cmpxchg(&ceq_cb->ceqe_state,
+ HINIC_EQE_ENABLED,
+ HINIC_EQE_ENABLED | HINIC_EQE_RUNNING);
+
+ if (eqe_state == HINIC_EQE_ENABLED && ceq_cb->handler)
+ ceq_cb->handler(ceq_cb->handle, CEQE_DATA(ceqe));
+ else
+ dev_err(&pdev->dev, "Unhandled CEQ Event %d\n", event);
+
+ ceq_cb->ceqe_state &= ~HINIC_EQE_RUNNING;
+}
+
+/**
+ * ceq_irq_handler - handler for the CEQ event
+ * @eq: the Completion Event Queue that received the event
+ **/
+static void ceq_irq_handler(struct hinic_eq *eq)
+{
+ struct hinic_ceqs *ceqs = ceq_to_ceqs(eq);
+ u32 ceqe;
+ int i;
+
+ for (i = 0; i < eq->q_len; i++) {
+ ceqe = *(GET_CURR_CEQ_ELEM(eq));
+
+ /* Data in HW is in Big endian Format */
+ ceqe = be32_to_cpu(ceqe);
+
+ /* HW toggles the wrapped bit, when it adds eq element event */
+ if (HINIC_EQ_ELEM_DESC_GET(ceqe, WRAPPED) == eq->wrapped)
+ break;
+
+ ceq_event_handler(ceqs, ceqe);
+
+ eq->cons_idx++;
+
+ if (eq->cons_idx == eq->q_len) {
+ eq->cons_idx = 0;
+ eq->wrapped = !eq->wrapped;
+ }
+ }
+}
+
+/**
+ * eq_irq_handler - handler for the EQ event
+ * @data: the Event Queue that received the event
+ **/
+static void eq_irq_handler(void *data)
+{
+ struct hinic_eq *eq = data;
+
+ if (eq->type == HINIC_AEQ)
+ aeq_irq_handler(eq);
+ else if (eq->type == HINIC_CEQ)
+ ceq_irq_handler(eq);
+
+ eq_update_ci(eq, EQ_ARMED);
+}
+
+/**
+ * eq_irq_work - the work of the EQ that received the event
+ * @work: the work struct that is associated with the EQ
+ **/
+static void eq_irq_work(struct work_struct *work)
+{
+ struct hinic_eq_work *aeq_work = work_to_aeq_work(work);
+ struct hinic_eq *aeq;
+
+ aeq = aeq_work->data;
+ eq_irq_handler(aeq);
+}
+
+/**
+ * ceq_tasklet - the tasklet of the EQ that received the event
+ * @t: the tasklet struct pointer
+ **/
+static void ceq_tasklet(struct tasklet_struct *t)
+{
+ struct hinic_eq *ceq = from_tasklet(ceq, t, ceq_tasklet);
+
+ eq_irq_handler(ceq);
+}
+
+/**
+ * aeq_interrupt - aeq interrupt handler
+ * @irq: irq number
+ * @data: the Async Event Queue that collected the event
+ **/
+static irqreturn_t aeq_interrupt(int irq, void *data)
+{
+ struct hinic_eq_work *aeq_work;
+ struct hinic_eq *aeq = data;
+ struct hinic_aeqs *aeqs;
+
+ /* clear resend timer cnt register */
+ hinic_msix_attr_cnt_clear(aeq->hwif, aeq->msix_entry.entry);
+
+ aeq_work = &aeq->aeq_work;
+ aeq_work->data = aeq;
+
+ aeqs = aeq_to_aeqs(aeq);
+ queue_work(aeqs->workq, &aeq_work->work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ceq_interrupt - ceq interrupt handler
+ * @irq: irq number
+ * @data: the Completion Event Queue that collected the event
+ **/
+static irqreturn_t ceq_interrupt(int irq, void *data)
+{
+ struct hinic_eq *ceq = data;
+
+ /* clear resend timer cnt register */
+ hinic_msix_attr_cnt_clear(ceq->hwif, ceq->msix_entry.entry);
+
+ tasklet_schedule(&ceq->ceq_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static u32 get_ctrl0_val(struct hinic_eq *eq, u32 addr)
+{
+ struct msix_entry *msix_entry = &eq->msix_entry;
+ enum hinic_eq_type type = eq->type;
+ u32 val, ctrl0;
+
+ if (type == HINIC_AEQ) {
+ /* RMW Ctrl0 */
+ addr = HINIC_CSR_AEQ_CTRL_0_ADDR(eq->q_id);
+
+ val = hinic_hwif_read_reg(eq->hwif, addr);
+
+ val = HINIC_AEQ_CTRL_0_CLEAR(val, INT_IDX) &
+ HINIC_AEQ_CTRL_0_CLEAR(val, DMA_ATTR) &
+ HINIC_AEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) &
+ HINIC_AEQ_CTRL_0_CLEAR(val, INT_MODE);
+
+ ctrl0 = HINIC_AEQ_CTRL_0_SET(msix_entry->entry, INT_IDX) |
+ HINIC_AEQ_CTRL_0_SET(DMA_ATTR_AEQ_DEFAULT, DMA_ATTR) |
+ HINIC_AEQ_CTRL_0_SET(HINIC_HWIF_PCI_INTF(eq->hwif),
+ PCI_INTF_IDX) |
+ HINIC_AEQ_CTRL_0_SET(EQ_INT_MODE_ARMED, INT_MODE);
+
+ val |= ctrl0;
+ } else {
+ /* RMW Ctrl0 */
+ addr = HINIC_CSR_CEQ_CTRL_0_ADDR(eq->q_id);
+
+ val = hinic_hwif_read_reg(eq->hwif, addr);
+
+ val = HINIC_CEQ_CTRL_0_CLEAR(val, INTR_IDX) &
+ HINIC_CEQ_CTRL_0_CLEAR(val, DMA_ATTR) &
+ HINIC_CEQ_CTRL_0_CLEAR(val, KICK_THRESH) &
+ HINIC_CEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) &
+ HINIC_CEQ_CTRL_0_CLEAR(val, INTR_MODE);
+
+ ctrl0 = HINIC_CEQ_CTRL_0_SET(msix_entry->entry, INTR_IDX) |
+ HINIC_CEQ_CTRL_0_SET(DMA_ATTR_CEQ_DEFAULT, DMA_ATTR) |
+ HINIC_CEQ_CTRL_0_SET(THRESH_CEQ_DEFAULT, KICK_THRESH) |
+ HINIC_CEQ_CTRL_0_SET(HINIC_HWIF_PCI_INTF(eq->hwif),
+ PCI_INTF_IDX) |
+ HINIC_CEQ_CTRL_0_SET(EQ_INT_MODE_ARMED, INTR_MODE);
+
+ val |= ctrl0;
+ }
+ return val;
+}
+
+static void set_ctrl0(struct hinic_eq *eq)
+{
+ u32 val, addr;
+
+ if (eq->type == HINIC_AEQ)
+ addr = HINIC_CSR_AEQ_CTRL_0_ADDR(eq->q_id);
+ else
+ addr = HINIC_CSR_CEQ_CTRL_0_ADDR(eq->q_id);
+
+ val = get_ctrl0_val(eq, addr);
+
+ hinic_hwif_write_reg(eq->hwif, addr, val);
+}
+
+static u32 get_ctrl1_val(struct hinic_eq *eq, u32 addr)
+{
+ u32 page_size_val, elem_size, val, ctrl1;
+ enum hinic_eq_type type = eq->type;
+
+ if (type == HINIC_AEQ) {
+ /* RMW Ctrl1 */
+ addr = HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id);
+
+ page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq);
+ elem_size = EQ_SET_HW_ELEM_SIZE_VAL(eq);
+
+ val = hinic_hwif_read_reg(eq->hwif, addr);
+
+ val = HINIC_AEQ_CTRL_1_CLEAR(val, LEN) &
+ HINIC_AEQ_CTRL_1_CLEAR(val, ELEM_SIZE) &
+ HINIC_AEQ_CTRL_1_CLEAR(val, PAGE_SIZE);
+
+ ctrl1 = HINIC_AEQ_CTRL_1_SET(eq->q_len, LEN) |
+ HINIC_AEQ_CTRL_1_SET(elem_size, ELEM_SIZE) |
+ HINIC_AEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
+
+ val |= ctrl1;
+ } else {
+ /* RMW Ctrl1 */
+ addr = HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id);
+
+ page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq);
+
+ val = hinic_hwif_read_reg(eq->hwif, addr);
+
+ val = HINIC_CEQ_CTRL_1_CLEAR(val, LEN) &
+ HINIC_CEQ_CTRL_1_CLEAR(val, PAGE_SIZE);
+
+ ctrl1 = HINIC_CEQ_CTRL_1_SET(eq->q_len, LEN) |
+ HINIC_CEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
+
+ val |= ctrl1;
+ }
+ return val;
+}
+
+static void set_ctrl1(struct hinic_eq *eq)
+{
+ u32 addr, val;
+
+ if (eq->type == HINIC_AEQ)
+ addr = HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id);
+ else
+ addr = HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id);
+
+ val = get_ctrl1_val(eq, addr);
+
+ hinic_hwif_write_reg(eq->hwif, addr, val);
+}
+
+static int set_ceq_ctrl_reg(struct hinic_eq *eq)
+{
+ struct hinic_ceq_ctrl_reg ceq_ctrl = {0};
+ struct hinic_hwdev *hwdev = eq->hwdev;
+ u16 out_size = sizeof(ceq_ctrl);
+ u16 in_size = sizeof(ceq_ctrl);
+ struct hinic_pfhwdev *pfhwdev;
+ u32 addr;
+ int err;
+
+ pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+
+ addr = HINIC_CSR_CEQ_CTRL_0_ADDR(eq->q_id);
+ ceq_ctrl.ctrl0 = get_ctrl0_val(eq, addr);
+ addr = HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id);
+ ceq_ctrl.ctrl1 = get_ctrl1_val(eq, addr);
+
+ ceq_ctrl.func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
+ ceq_ctrl.q_id = eq->q_id;
+
+ err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
+ HINIC_COMM_CMD_CEQ_CTRL_REG_WR_BY_UP,
+ &ceq_ctrl, in_size,
+ &ceq_ctrl, &out_size, HINIC_MGMT_MSG_SYNC);
+ if (err || !out_size || ceq_ctrl.status) {
+ dev_err(&hwdev->hwif->pdev->dev,
+ "Failed to set ceq %d ctrl reg, err: %d status: 0x%x, out_size: 0x%x\n",
+ eq->q_id, err, ceq_ctrl.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * set_eq_ctrls - setting eq's ctrl registers
+ * @eq: the Event Queue for setting
+ **/
+static int set_eq_ctrls(struct hinic_eq *eq)
+{
+ if (HINIC_IS_VF(eq->hwif) && eq->type == HINIC_CEQ)
+ return set_ceq_ctrl_reg(eq);
+
+ set_ctrl0(eq);
+ set_ctrl1(eq);
+ return 0;
+}
+
+/**
+ * aeq_elements_init - initialize all the elements in the aeq
+ * @eq: the Async Event Queue
+ * @init_val: value to initialize the elements with it
+ **/
+static void aeq_elements_init(struct hinic_eq *eq, u32 init_val)
+{
+ struct hinic_aeq_elem *aeqe;
+ int i;
+
+ for (i = 0; i < eq->q_len; i++) {
+ aeqe = GET_AEQ_ELEM(eq, i);
+ aeqe->desc = cpu_to_be32(init_val);
+ }
+
+ wmb(); /* Write the initilzation values */
+}
+
+/**
+ * ceq_elements_init - Initialize all the elements in the ceq
+ * @eq: the event queue
+ * @init_val: value to init with it the elements
+ **/
+static void ceq_elements_init(struct hinic_eq *eq, u32 init_val)
+{
+ u32 *ceqe;
+ int i;
+
+ for (i = 0; i < eq->q_len; i++) {
+ ceqe = GET_CEQ_ELEM(eq, i);
+ *(ceqe) = cpu_to_be32(init_val);
+ }
+
+ wmb(); /* Write the initilzation values */
+}
+
+/**
+ * alloc_eq_pages - allocate the pages for the queue
+ * @eq: the event queue
+ *
+ * Return 0 - Success, Negative - Failure
+ **/
+static int alloc_eq_pages(struct hinic_eq *eq)
+{
+ struct hinic_hwif *hwif = eq->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ u32 init_val, addr, val;
+ int err, pg;
+
+ eq->dma_addr = devm_kcalloc(&pdev->dev, eq->num_pages,
+ sizeof(*eq->dma_addr), GFP_KERNEL);
+ if (!eq->dma_addr)
+ return -ENOMEM;
+
+ eq->virt_addr = devm_kcalloc(&pdev->dev, eq->num_pages,
+ sizeof(*eq->virt_addr), GFP_KERNEL);
+ if (!eq->virt_addr) {
+ err = -ENOMEM;
+ goto err_virt_addr_alloc;
+ }
+
+ for (pg = 0; pg < eq->num_pages; pg++) {
+ eq->virt_addr[pg] = dma_alloc_coherent(&pdev->dev,
+ eq->page_size,
+ &eq->dma_addr[pg],
+ GFP_KERNEL);
+ if (!eq->virt_addr[pg]) {
+ err = -ENOMEM;
+ goto err_dma_alloc;
+ }
+
+ addr = EQ_HI_PHYS_ADDR_REG(eq, pg);
+ val = upper_32_bits(eq->dma_addr[pg]);
+
+ hinic_hwif_write_reg(hwif, addr, val);
+
+ addr = EQ_LO_PHYS_ADDR_REG(eq, pg);
+ val = lower_32_bits(eq->dma_addr[pg]);
+
+ hinic_hwif_write_reg(hwif, addr, val);
+ }
+
+ init_val = HINIC_EQ_ELEM_DESC_SET(eq->wrapped, WRAPPED);
+
+ if (eq->type == HINIC_AEQ)
+ aeq_elements_init(eq, init_val);
+ else if (eq->type == HINIC_CEQ)
+ ceq_elements_init(eq, init_val);
+
+ return 0;
+
+err_dma_alloc:
+ while (--pg >= 0)
+ dma_free_coherent(&pdev->dev, eq->page_size,
+ eq->virt_addr[pg],
+ eq->dma_addr[pg]);
+
+ devm_kfree(&pdev->dev, eq->virt_addr);
+
+err_virt_addr_alloc:
+ devm_kfree(&pdev->dev, eq->dma_addr);
+ return err;
+}
+
+/**
+ * free_eq_pages - free the pages of the queue
+ * @eq: the Event Queue
+ **/
+static void free_eq_pages(struct hinic_eq *eq)
+{
+ struct hinic_hwif *hwif = eq->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ int pg;
+
+ for (pg = 0; pg < eq->num_pages; pg++)
+ dma_free_coherent(&pdev->dev, eq->page_size,
+ eq->virt_addr[pg],
+ eq->dma_addr[pg]);
+
+ devm_kfree(&pdev->dev, eq->virt_addr);
+ devm_kfree(&pdev->dev, eq->dma_addr);
+}
+
+/**
+ * init_eq - initialize Event Queue
+ * @eq: the event queue
+ * @hwif: the HW interface of a PCI function device
+ * @type: the type of the event queue, aeq or ceq
+ * @q_id: Queue id number
+ * @q_len: the number of EQ elements
+ * @page_size: the page size of the pages in the event queue
+ * @entry: msix entry associated with the event queue
+ *
+ * Return 0 - Success, Negative - Failure
+ **/
+static int init_eq(struct hinic_eq *eq, struct hinic_hwif *hwif,
+ enum hinic_eq_type type, int q_id, u32 q_len, u32 page_size,
+ struct msix_entry entry)
+{
+ struct pci_dev *pdev = hwif->pdev;
+ int err;
+
+ eq->hwif = hwif;
+ eq->type = type;
+ eq->q_id = q_id;
+ eq->q_len = q_len;
+ eq->page_size = page_size;
+
+ /* Clear PI and CI, also clear the ARM bit */
+ hinic_hwif_write_reg(eq->hwif, EQ_CONS_IDX_REG_ADDR(eq), 0);
+ hinic_hwif_write_reg(eq->hwif, EQ_PROD_IDX_REG_ADDR(eq), 0);
+
+ eq->cons_idx = 0;
+ eq->wrapped = 0;
+
+ if (type == HINIC_AEQ) {
+ eq->elem_size = HINIC_AEQE_SIZE;
+ } else if (type == HINIC_CEQ) {
+ eq->elem_size = HINIC_CEQE_SIZE;
+ } else {
+ dev_err(&pdev->dev, "Invalid EQ type\n");
+ return -EINVAL;
+ }
+
+ eq->num_pages = GET_EQ_NUM_PAGES(eq, page_size);
+ eq->num_elem_in_pg = GET_EQ_NUM_ELEMS_IN_PG(eq, page_size);
+
+ eq->msix_entry = entry;
+
+ if (eq->num_elem_in_pg & (eq->num_elem_in_pg - 1)) {
+ dev_err(&pdev->dev, "num elements in eq page != power of 2\n");
+ return -EINVAL;
+ }
+
+ if (eq->num_pages > EQ_MAX_PAGES) {
+ dev_err(&pdev->dev, "too many pages for eq\n");
+ return -EINVAL;
+ }
+
+ err = set_eq_ctrls(eq);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to set eq ctrls\n");
+ return err;
+ }
+
+ eq_update_ci(eq, EQ_ARMED);
+
+ err = alloc_eq_pages(eq);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to allocate pages for eq\n");
+ return err;
+ }
+
+ if (type == HINIC_AEQ) {
+ struct hinic_eq_work *aeq_work = &eq->aeq_work;
+
+ INIT_WORK(&aeq_work->work, eq_irq_work);
+ } else if (type == HINIC_CEQ) {
+ tasklet_setup(&eq->ceq_tasklet, ceq_tasklet);
+ }
+
+ /* set the attributes of the msix entry */
+ hinic_msix_attr_set(eq->hwif, eq->msix_entry.entry,
+ HINIC_EQ_MSIX_PENDING_LIMIT_DEFAULT,
+ HINIC_EQ_MSIX_COALESC_TIMER_DEFAULT,
+ HINIC_EQ_MSIX_LLI_TIMER_DEFAULT,
+ HINIC_EQ_MSIX_LLI_CREDIT_LIMIT_DEFAULT,
+ HINIC_EQ_MSIX_RESEND_TIMER_DEFAULT);
+
+ if (type == HINIC_AEQ) {
+ snprintf(eq->irq_name, sizeof(eq->irq_name), "hinic_aeq%d@pci:%s", eq->q_id,
+ pci_name(pdev));
+ err = request_irq(entry.vector, aeq_interrupt, 0, eq->irq_name, eq);
+ } else if (type == HINIC_CEQ) {
+ snprintf(eq->irq_name, sizeof(eq->irq_name), "hinic_ceq%d@pci:%s", eq->q_id,
+ pci_name(pdev));
+ err = request_irq(entry.vector, ceq_interrupt, 0, eq->irq_name, eq);
+ }
+
+ if (err) {
+ dev_err(&pdev->dev, "Failed to request irq for the EQ\n");
+ goto err_req_irq;
+ }
+
+ return 0;
+
+err_req_irq:
+ free_eq_pages(eq);
+ return err;
+}
+
+/**
+ * remove_eq - remove Event Queue
+ * @eq: the event queue
+ **/
+static void remove_eq(struct hinic_eq *eq)
+{
+ hinic_set_msix_state(eq->hwif, eq->msix_entry.entry,
+ HINIC_MSIX_DISABLE);
+ free_irq(eq->msix_entry.vector, eq);
+
+ if (eq->type == HINIC_AEQ) {
+ struct hinic_eq_work *aeq_work = &eq->aeq_work;
+
+ cancel_work_sync(&aeq_work->work);
+ /* clear aeq_len to avoid hw access host memory */
+ hinic_hwif_write_reg(eq->hwif,
+ HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0);
+ } else if (eq->type == HINIC_CEQ) {
+ tasklet_kill(&eq->ceq_tasklet);
+ /* clear ceq_len to avoid hw access host memory */
+ hinic_hwif_write_reg(eq->hwif,
+ HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id), 0);
+ }
+
+ /* update cons_idx to avoid invalid interrupt */
+ eq->cons_idx = hinic_hwif_read_reg(eq->hwif, EQ_PROD_IDX_REG_ADDR(eq));
+ eq_update_ci(eq, EQ_NOT_ARMED);
+
+ free_eq_pages(eq);
+}
+
+/**
+ * hinic_aeqs_init - initialize all the aeqs
+ * @aeqs: pointer to Async eqs of the chip
+ * @hwif: the HW interface of a PCI function device
+ * @num_aeqs: number of AEQs
+ * @q_len: number of EQ elements
+ * @page_size: the page size of the pages in the event queue
+ * @msix_entries: msix entries associated with the event queues
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+int hinic_aeqs_init(struct hinic_aeqs *aeqs, struct hinic_hwif *hwif,
+ int num_aeqs, u32 q_len, u32 page_size,
+ struct msix_entry *msix_entries)
+{
+ struct pci_dev *pdev = hwif->pdev;
+ int err, i, q_id;
+
+ aeqs->workq = create_singlethread_workqueue(HINIC_EQS_WQ_NAME);
+ if (!aeqs->workq)
+ return -ENOMEM;
+
+ aeqs->hwif = hwif;
+ aeqs->num_aeqs = num_aeqs;
+
+ for (q_id = 0; q_id < num_aeqs; q_id++) {
+ err = init_eq(&aeqs->aeq[q_id], hwif, HINIC_AEQ, q_id, q_len,
+ page_size, msix_entries[q_id]);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init aeq %d\n", q_id);
+ goto err_init_aeq;
+ }
+ }
+
+ return 0;
+
+err_init_aeq:
+ for (i = 0; i < q_id; i++)
+ remove_eq(&aeqs->aeq[i]);
+
+ destroy_workqueue(aeqs->workq);
+ return err;
+}
+
+/**
+ * hinic_aeqs_free - free all the aeqs
+ * @aeqs: pointer to Async eqs of the chip
+ **/
+void hinic_aeqs_free(struct hinic_aeqs *aeqs)
+{
+ int q_id;
+
+ for (q_id = 0; q_id < aeqs->num_aeqs ; q_id++)
+ remove_eq(&aeqs->aeq[q_id]);
+
+ destroy_workqueue(aeqs->workq);
+}
+
+/**
+ * hinic_ceqs_init - init all the ceqs
+ * @ceqs: ceqs part of the chip
+ * @hwif: the hardware interface of a pci function device
+ * @num_ceqs: number of CEQs
+ * @q_len: number of EQ elements
+ * @page_size: the page size of the event queue
+ * @msix_entries: msix entries associated with the event queues
+ *
+ * Return 0 - Success, Negative - Failure
+ **/
+int hinic_ceqs_init(struct hinic_ceqs *ceqs, struct hinic_hwif *hwif,
+ int num_ceqs, u32 q_len, u32 page_size,
+ struct msix_entry *msix_entries)
+{
+ struct pci_dev *pdev = hwif->pdev;
+ int i, q_id, err;
+
+ ceqs->hwif = hwif;
+ ceqs->num_ceqs = num_ceqs;
+
+ for (q_id = 0; q_id < num_ceqs; q_id++) {
+ ceqs->ceq[q_id].hwdev = ceqs->hwdev;
+ err = init_eq(&ceqs->ceq[q_id], hwif, HINIC_CEQ, q_id, q_len,
+ page_size, msix_entries[q_id]);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init ceq %d\n", q_id);
+ goto err_init_ceq;
+ }
+ }
+
+ return 0;
+
+err_init_ceq:
+ for (i = 0; i < q_id; i++)
+ remove_eq(&ceqs->ceq[i]);
+
+ return err;
+}
+
+/**
+ * hinic_ceqs_free - free all the ceqs
+ * @ceqs: ceqs part of the chip
+ **/
+void hinic_ceqs_free(struct hinic_ceqs *ceqs)
+{
+ int q_id;
+
+ for (q_id = 0; q_id < ceqs->num_ceqs; q_id++)
+ remove_eq(&ceqs->ceq[q_id]);
+}
+
+void hinic_dump_ceq_info(struct hinic_hwdev *hwdev)
+{
+ struct hinic_eq *eq = NULL;
+ u32 addr, ci, pi;
+ int q_id;
+
+ for (q_id = 0; q_id < hwdev->func_to_io.ceqs.num_ceqs; q_id++) {
+ eq = &hwdev->func_to_io.ceqs.ceq[q_id];
+ addr = EQ_CONS_IDX_REG_ADDR(eq);
+ ci = hinic_hwif_read_reg(hwdev->hwif, addr);
+ addr = EQ_PROD_IDX_REG_ADDR(eq);
+ pi = hinic_hwif_read_reg(hwdev->hwif, addr);
+ dev_err(&hwdev->hwif->pdev->dev, "Ceq id: %d, ci: 0x%08x, sw_ci: 0x%08x, pi: 0x%x, tasklet_state: 0x%lx, wrap: %d, ceqe: 0x%x\n",
+ q_id, ci, eq->cons_idx, pi,
+ eq->ceq_tasklet.state,
+ eq->wrapped, be32_to_cpu(*(__be32 *)(GET_CURR_CEQ_ELEM(eq))));
+ }
+}
+
+void hinic_dump_aeq_info(struct hinic_hwdev *hwdev)
+{
+ struct hinic_aeq_elem *aeqe_pos = NULL;
+ struct hinic_eq *eq = NULL;
+ u32 addr, ci, pi;
+ int q_id;
+
+ for (q_id = 0; q_id < hwdev->aeqs.num_aeqs; q_id++) {
+ eq = &hwdev->aeqs.aeq[q_id];
+ addr = EQ_CONS_IDX_REG_ADDR(eq);
+ ci = hinic_hwif_read_reg(hwdev->hwif, addr);
+ addr = EQ_PROD_IDX_REG_ADDR(eq);
+ pi = hinic_hwif_read_reg(hwdev->hwif, addr);
+ aeqe_pos = GET_CURR_AEQ_ELEM(eq);
+ dev_err(&hwdev->hwif->pdev->dev, "Aeq id: %d, ci: 0x%08x, pi: 0x%x, work_state: 0x%x, wrap: %d, desc: 0x%x\n",
+ q_id, ci, pi, work_busy(&eq->aeq_work.work),
+ eq->wrapped, be32_to_cpu(aeqe_pos->desc));
+ }
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h
new file mode 100644
index 0000000000..2f3222174f
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h
@@ -0,0 +1,262 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef HINIC_HW_EQS_H
+#define HINIC_HW_EQS_H
+
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <linux/pci.h>
+#include <linux/sizes.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+
+#include "hinic_hw_if.h"
+
+#define HINIC_AEQ_CTRL_0_INT_IDX_SHIFT 0
+#define HINIC_AEQ_CTRL_0_DMA_ATTR_SHIFT 12
+#define HINIC_AEQ_CTRL_0_PCI_INTF_IDX_SHIFT 20
+#define HINIC_AEQ_CTRL_0_INT_MODE_SHIFT 31
+
+#define HINIC_AEQ_CTRL_0_INT_IDX_MASK 0x3FF
+#define HINIC_AEQ_CTRL_0_DMA_ATTR_MASK 0x3F
+#define HINIC_AEQ_CTRL_0_PCI_INTF_IDX_MASK 0x3
+#define HINIC_AEQ_CTRL_0_INT_MODE_MASK 0x1
+
+#define HINIC_AEQ_CTRL_0_SET(val, member) \
+ (((u32)(val) & HINIC_AEQ_CTRL_0_##member##_MASK) << \
+ HINIC_AEQ_CTRL_0_##member##_SHIFT)
+
+#define HINIC_AEQ_CTRL_0_CLEAR(val, member) \
+ ((val) & (~(HINIC_AEQ_CTRL_0_##member##_MASK \
+ << HINIC_AEQ_CTRL_0_##member##_SHIFT)))
+
+#define HINIC_AEQ_CTRL_1_LEN_SHIFT 0
+#define HINIC_AEQ_CTRL_1_ELEM_SIZE_SHIFT 24
+#define HINIC_AEQ_CTRL_1_PAGE_SIZE_SHIFT 28
+
+#define HINIC_AEQ_CTRL_1_LEN_MASK 0x1FFFFF
+#define HINIC_AEQ_CTRL_1_ELEM_SIZE_MASK 0x3
+#define HINIC_AEQ_CTRL_1_PAGE_SIZE_MASK 0xF
+
+#define HINIC_AEQ_CTRL_1_SET(val, member) \
+ (((u32)(val) & HINIC_AEQ_CTRL_1_##member##_MASK) << \
+ HINIC_AEQ_CTRL_1_##member##_SHIFT)
+
+#define HINIC_AEQ_CTRL_1_CLEAR(val, member) \
+ ((val) & (~(HINIC_AEQ_CTRL_1_##member##_MASK \
+ << HINIC_AEQ_CTRL_1_##member##_SHIFT)))
+
+#define HINIC_CEQ_CTRL_0_INTR_IDX_SHIFT 0
+#define HINIC_CEQ_CTRL_0_DMA_ATTR_SHIFT 12
+#define HINIC_CEQ_CTRL_0_KICK_THRESH_SHIFT 20
+#define HINIC_CEQ_CTRL_0_PCI_INTF_IDX_SHIFT 24
+#define HINIC_CEQ_CTRL_0_INTR_MODE_SHIFT 31
+
+#define HINIC_CEQ_CTRL_0_INTR_IDX_MASK 0x3FF
+#define HINIC_CEQ_CTRL_0_DMA_ATTR_MASK 0x3F
+#define HINIC_CEQ_CTRL_0_KICK_THRESH_MASK 0xF
+#define HINIC_CEQ_CTRL_0_PCI_INTF_IDX_MASK 0x3
+#define HINIC_CEQ_CTRL_0_INTR_MODE_MASK 0x1
+
+#define HINIC_CEQ_CTRL_0_SET(val, member) \
+ (((u32)(val) & HINIC_CEQ_CTRL_0_##member##_MASK) << \
+ HINIC_CEQ_CTRL_0_##member##_SHIFT)
+
+#define HINIC_CEQ_CTRL_0_CLEAR(val, member) \
+ ((val) & (~(HINIC_CEQ_CTRL_0_##member##_MASK \
+ << HINIC_CEQ_CTRL_0_##member##_SHIFT)))
+
+#define HINIC_CEQ_CTRL_1_LEN_SHIFT 0
+#define HINIC_CEQ_CTRL_1_PAGE_SIZE_SHIFT 28
+
+#define HINIC_CEQ_CTRL_1_LEN_MASK 0x1FFFFF
+#define HINIC_CEQ_CTRL_1_PAGE_SIZE_MASK 0xF
+
+#define HINIC_CEQ_CTRL_1_SET(val, member) \
+ (((u32)(val) & HINIC_CEQ_CTRL_1_##member##_MASK) << \
+ HINIC_CEQ_CTRL_1_##member##_SHIFT)
+
+#define HINIC_CEQ_CTRL_1_CLEAR(val, member) \
+ ((val) & (~(HINIC_CEQ_CTRL_1_##member##_MASK \
+ << HINIC_CEQ_CTRL_1_##member##_SHIFT)))
+
+#define HINIC_EQ_ELEM_DESC_TYPE_SHIFT 0
+#define HINIC_EQ_ELEM_DESC_SRC_SHIFT 7
+#define HINIC_EQ_ELEM_DESC_SIZE_SHIFT 8
+#define HINIC_EQ_ELEM_DESC_WRAPPED_SHIFT 31
+
+#define HINIC_EQ_ELEM_DESC_TYPE_MASK 0x7F
+#define HINIC_EQ_ELEM_DESC_SRC_MASK 0x1
+#define HINIC_EQ_ELEM_DESC_SIZE_MASK 0xFF
+#define HINIC_EQ_ELEM_DESC_WRAPPED_MASK 0x1
+
+#define HINIC_EQ_ELEM_DESC_SET(val, member) \
+ (((u32)(val) & HINIC_EQ_ELEM_DESC_##member##_MASK) << \
+ HINIC_EQ_ELEM_DESC_##member##_SHIFT)
+
+#define HINIC_EQ_ELEM_DESC_GET(val, member) \
+ (((val) >> HINIC_EQ_ELEM_DESC_##member##_SHIFT) & \
+ HINIC_EQ_ELEM_DESC_##member##_MASK)
+
+#define HINIC_EQ_CI_IDX_SHIFT 0
+#define HINIC_EQ_CI_WRAPPED_SHIFT 20
+#define HINIC_EQ_CI_XOR_CHKSUM_SHIFT 24
+#define HINIC_EQ_CI_INT_ARMED_SHIFT 31
+
+#define HINIC_EQ_CI_IDX_MASK 0xFFFFF
+#define HINIC_EQ_CI_WRAPPED_MASK 0x1
+#define HINIC_EQ_CI_XOR_CHKSUM_MASK 0xF
+#define HINIC_EQ_CI_INT_ARMED_MASK 0x1
+
+#define HINIC_EQ_CI_SET(val, member) \
+ (((u32)(val) & HINIC_EQ_CI_##member##_MASK) << \
+ HINIC_EQ_CI_##member##_SHIFT)
+
+#define HINIC_EQ_CI_CLEAR(val, member) \
+ ((val) & (~(HINIC_EQ_CI_##member##_MASK \
+ << HINIC_EQ_CI_##member##_SHIFT)))
+
+#define HINIC_MAX_AEQS 4
+#define HINIC_MAX_CEQS 32
+
+#define HINIC_AEQE_SIZE 64
+#define HINIC_CEQE_SIZE 4
+
+#define HINIC_AEQE_DESC_SIZE 4
+#define HINIC_AEQE_DATA_SIZE \
+ (HINIC_AEQE_SIZE - HINIC_AEQE_DESC_SIZE)
+
+#define HINIC_DEFAULT_AEQ_LEN 64
+#define HINIC_DEFAULT_CEQ_LEN 1024
+
+#define HINIC_EQ_PAGE_SIZE SZ_4K
+
+#define HINIC_CEQ_ID_CMDQ 0
+
+enum hinic_eq_type {
+ HINIC_AEQ,
+ HINIC_CEQ,
+};
+
+enum hinic_aeq_type {
+ HINIC_MBX_FROM_FUNC = 1,
+ HINIC_MSG_FROM_MGMT_CPU = 2,
+ HINIC_MBX_SEND_RSLT = 5,
+ HINIC_MAX_AEQ_EVENTS,
+};
+
+enum hinic_ceq_type {
+ HINIC_CEQ_CMDQ = 3,
+
+ HINIC_MAX_CEQ_EVENTS,
+};
+
+enum hinic_eqe_state {
+ HINIC_EQE_ENABLED = BIT(0),
+ HINIC_EQE_RUNNING = BIT(1),
+};
+
+struct hinic_aeq_elem {
+ u8 data[HINIC_AEQE_DATA_SIZE];
+ __be32 desc;
+};
+
+struct hinic_eq_work {
+ struct work_struct work;
+ void *data;
+};
+
+struct hinic_eq {
+ struct hinic_hwif *hwif;
+ struct hinic_hwdev *hwdev;
+ enum hinic_eq_type type;
+ int q_id;
+ u32 q_len;
+ u32 page_size;
+
+ u32 cons_idx;
+ int wrapped;
+
+ size_t elem_size;
+ int num_pages;
+ int num_elem_in_pg;
+
+ struct msix_entry msix_entry;
+ char irq_name[64];
+
+ dma_addr_t *dma_addr;
+ void **virt_addr;
+
+ struct hinic_eq_work aeq_work;
+
+ struct tasklet_struct ceq_tasklet;
+};
+
+struct hinic_hw_event_cb {
+ void (*hwe_handler)(void *handle, void *data, u8 size);
+ void *handle;
+ unsigned long hwe_state;
+};
+
+struct hinic_aeqs {
+ struct hinic_hwif *hwif;
+
+ struct hinic_eq aeq[HINIC_MAX_AEQS];
+ int num_aeqs;
+
+ struct hinic_hw_event_cb hwe_cb[HINIC_MAX_AEQ_EVENTS];
+
+ struct workqueue_struct *workq;
+};
+
+struct hinic_ceq_cb {
+ void (*handler)(void *handle, u32 ceqe_data);
+ void *handle;
+ enum hinic_eqe_state ceqe_state;
+};
+
+struct hinic_ceqs {
+ struct hinic_hwif *hwif;
+ struct hinic_hwdev *hwdev;
+ struct hinic_eq ceq[HINIC_MAX_CEQS];
+ int num_ceqs;
+
+ struct hinic_ceq_cb ceq_cb[HINIC_MAX_CEQ_EVENTS];
+};
+
+void hinic_aeq_register_hw_cb(struct hinic_aeqs *aeqs,
+ enum hinic_aeq_type event, void *handle,
+ void (*hwe_handler)(void *handle, void *data,
+ u8 size));
+
+void hinic_aeq_unregister_hw_cb(struct hinic_aeqs *aeqs,
+ enum hinic_aeq_type event);
+
+void hinic_ceq_register_cb(struct hinic_ceqs *ceqs,
+ enum hinic_ceq_type event, void *handle,
+ void (*ceq_cb)(void *handle, u32 ceqe_data));
+
+void hinic_ceq_unregister_cb(struct hinic_ceqs *ceqs,
+ enum hinic_ceq_type event);
+
+int hinic_aeqs_init(struct hinic_aeqs *aeqs, struct hinic_hwif *hwif,
+ int num_aeqs, u32 q_len, u32 page_size,
+ struct msix_entry *msix_entries);
+
+void hinic_aeqs_free(struct hinic_aeqs *aeqs);
+
+int hinic_ceqs_init(struct hinic_ceqs *ceqs, struct hinic_hwif *hwif,
+ int num_ceqs, u32 q_len, u32 page_size,
+ struct msix_entry *msix_entries);
+
+void hinic_ceqs_free(struct hinic_ceqs *ceqs);
+
+void hinic_dump_ceq_info(struct hinic_hwdev *hwdev);
+
+void hinic_dump_aeq_info(struct hinic_hwdev *hwdev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
new file mode 100644
index 0000000000..88567305d0
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
@@ -0,0 +1,419 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+
+#include "hinic_hw_csr.h"
+#include "hinic_hw_if.h"
+
+#define PCIE_ATTR_ENTRY 0
+
+#define VALID_MSIX_IDX(attr, msix_index) ((msix_index) < (attr)->num_irqs)
+
+#define WAIT_HWIF_READY_TIMEOUT 10000
+
+#define HINIC_SELFTEST_RESULT 0x883C
+
+/**
+ * hinic_msix_attr_set - set message attribute for msix entry
+ * @hwif: the HW interface of a pci function device
+ * @msix_index: msix_index
+ * @pending_limit: the maximum pending interrupt events (unit 8)
+ * @coalesc_timer: coalesc period for interrupt (unit 8 us)
+ * @lli_timer: replenishing period for low latency credit (unit 8 us)
+ * @lli_credit_limit: maximum credits for low latency msix messages (unit 8)
+ * @resend_timer: maximum wait for resending msix (unit coalesc period)
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+int hinic_msix_attr_set(struct hinic_hwif *hwif, u16 msix_index,
+ u8 pending_limit, u8 coalesc_timer,
+ u8 lli_timer, u8 lli_credit_limit,
+ u8 resend_timer)
+{
+ u32 msix_ctrl, addr;
+
+ if (!VALID_MSIX_IDX(&hwif->attr, msix_index))
+ return -EINVAL;
+
+ msix_ctrl = HINIC_MSIX_ATTR_SET(pending_limit, PENDING_LIMIT) |
+ HINIC_MSIX_ATTR_SET(coalesc_timer, COALESC_TIMER) |
+ HINIC_MSIX_ATTR_SET(lli_timer, LLI_TIMER) |
+ HINIC_MSIX_ATTR_SET(lli_credit_limit, LLI_CREDIT) |
+ HINIC_MSIX_ATTR_SET(resend_timer, RESEND_TIMER);
+
+ addr = HINIC_CSR_MSIX_CTRL_ADDR(msix_index);
+
+ hinic_hwif_write_reg(hwif, addr, msix_ctrl);
+ return 0;
+}
+
+/**
+ * hinic_msix_attr_cnt_clear - clear message attribute counters for msix entry
+ * @hwif: the HW interface of a pci function device
+ * @msix_index: msix_index
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+int hinic_msix_attr_cnt_clear(struct hinic_hwif *hwif, u16 msix_index)
+{
+ u32 msix_ctrl, addr;
+
+ if (!VALID_MSIX_IDX(&hwif->attr, msix_index))
+ return -EINVAL;
+
+ msix_ctrl = HINIC_MSIX_CNT_SET(1, RESEND_TIMER);
+ addr = HINIC_CSR_MSIX_CNT_ADDR(msix_index);
+
+ hinic_hwif_write_reg(hwif, addr, msix_ctrl);
+ return 0;
+}
+
+/**
+ * hinic_set_pf_action - set action on pf channel
+ * @hwif: the HW interface of a pci function device
+ * @action: action on pf channel
+ **/
+void hinic_set_pf_action(struct hinic_hwif *hwif, enum hinic_pf_action action)
+{
+ u32 attr5;
+
+ if (HINIC_IS_VF(hwif))
+ return;
+
+ attr5 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR5_ADDR);
+ attr5 = HINIC_FA5_CLEAR(attr5, PF_ACTION);
+ attr5 |= HINIC_FA5_SET(action, PF_ACTION);
+
+ hinic_hwif_write_reg(hwif, HINIC_CSR_FUNC_ATTR5_ADDR, attr5);
+}
+
+enum hinic_outbound_state hinic_outbound_state_get(struct hinic_hwif *hwif)
+{
+ u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR);
+
+ return HINIC_FA4_GET(attr4, OUTBOUND_STATE);
+}
+
+void hinic_outbound_state_set(struct hinic_hwif *hwif,
+ enum hinic_outbound_state outbound_state)
+{
+ u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR);
+
+ attr4 = HINIC_FA4_CLEAR(attr4, OUTBOUND_STATE);
+ attr4 |= HINIC_FA4_SET(outbound_state, OUTBOUND_STATE);
+
+ hinic_hwif_write_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR, attr4);
+}
+
+enum hinic_db_state hinic_db_state_get(struct hinic_hwif *hwif)
+{
+ u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR);
+
+ return HINIC_FA4_GET(attr4, DB_STATE);
+}
+
+void hinic_db_state_set(struct hinic_hwif *hwif,
+ enum hinic_db_state db_state)
+{
+ u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR);
+
+ attr4 = HINIC_FA4_CLEAR(attr4, DB_STATE);
+ attr4 |= HINIC_FA4_SET(db_state, DB_STATE);
+
+ hinic_hwif_write_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR, attr4);
+}
+
+void hinic_set_msix_state(struct hinic_hwif *hwif, u16 msix_idx,
+ enum hinic_msix_state flag)
+{
+ u32 offset = msix_idx * HINIC_PCI_MSIX_ENTRY_SIZE +
+ HINIC_PCI_MSIX_ENTRY_VECTOR_CTRL;
+ u32 mask_bits;
+
+ mask_bits = readl(hwif->intr_regs_base + offset);
+ mask_bits &= ~HINIC_PCI_MSIX_ENTRY_CTRL_MASKBIT;
+
+ if (flag)
+ mask_bits |= HINIC_PCI_MSIX_ENTRY_CTRL_MASKBIT;
+
+ writel(mask_bits, hwif->intr_regs_base + offset);
+}
+
+/**
+ * hwif_ready - test if the HW is ready for use
+ * @hwif: the HW interface of a pci function device
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static int hwif_ready(struct hinic_hwif *hwif)
+{
+ u32 addr, attr1;
+
+ addr = HINIC_CSR_FUNC_ATTR1_ADDR;
+ attr1 = hinic_hwif_read_reg(hwif, addr);
+
+ if (!HINIC_FA1_GET(attr1, MGMT_INIT_STATUS))
+ return -EBUSY;
+
+ if (HINIC_IS_VF(hwif)) {
+ if (!HINIC_FA1_GET(attr1, PF_INIT_STATUS))
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int wait_hwif_ready(struct hinic_hwif *hwif)
+{
+ unsigned long timeout = 0;
+
+ do {
+ if (!hwif_ready(hwif))
+ return 0;
+
+ usleep_range(999, 1000);
+ timeout++;
+ } while (timeout <= WAIT_HWIF_READY_TIMEOUT);
+
+ dev_err(&hwif->pdev->dev, "Wait for hwif timeout\n");
+
+ return -EBUSY;
+}
+
+/**
+ * set_hwif_attr - set the attributes in the relevant members in hwif
+ * @hwif: the HW interface of a pci function device
+ * @attr0: the first attribute that was read from the hw
+ * @attr1: the second attribute that was read from the hw
+ * @attr2: the third attribute that was read from the hw
+ **/
+static void set_hwif_attr(struct hinic_hwif *hwif, u32 attr0, u32 attr1,
+ u32 attr2)
+{
+ hwif->attr.func_idx = HINIC_FA0_GET(attr0, FUNC_IDX);
+ hwif->attr.pf_idx = HINIC_FA0_GET(attr0, PF_IDX);
+ hwif->attr.pci_intf_idx = HINIC_FA0_GET(attr0, PCI_INTF_IDX);
+ hwif->attr.func_type = HINIC_FA0_GET(attr0, FUNC_TYPE);
+
+ hwif->attr.num_aeqs = BIT(HINIC_FA1_GET(attr1, AEQS_PER_FUNC));
+ hwif->attr.num_ceqs = BIT(HINIC_FA1_GET(attr1, CEQS_PER_FUNC));
+ hwif->attr.num_irqs = BIT(HINIC_FA1_GET(attr1, IRQS_PER_FUNC));
+ hwif->attr.num_dma_attr = BIT(HINIC_FA1_GET(attr1, DMA_ATTR_PER_FUNC));
+ hwif->attr.global_vf_id_of_pf = HINIC_FA2_GET(attr2,
+ GLOBAL_VF_ID_OF_PF);
+}
+
+/**
+ * read_hwif_attr - read the attributes and set members in hwif
+ * @hwif: the HW interface of a pci function device
+ **/
+static void read_hwif_attr(struct hinic_hwif *hwif)
+{
+ u32 addr, attr0, attr1, attr2;
+
+ addr = HINIC_CSR_FUNC_ATTR0_ADDR;
+ attr0 = hinic_hwif_read_reg(hwif, addr);
+
+ addr = HINIC_CSR_FUNC_ATTR1_ADDR;
+ attr1 = hinic_hwif_read_reg(hwif, addr);
+
+ addr = HINIC_CSR_FUNC_ATTR2_ADDR;
+ attr2 = hinic_hwif_read_reg(hwif, addr);
+
+ set_hwif_attr(hwif, attr0, attr1, attr2);
+}
+
+/**
+ * set_ppf - try to set hwif as ppf and set the type of hwif in this case
+ * @hwif: the HW interface of a pci function device
+ **/
+static void set_ppf(struct hinic_hwif *hwif)
+{
+ struct hinic_func_attr *attr = &hwif->attr;
+ u32 addr, val, ppf_election;
+
+ /* Read Modify Write */
+ addr = HINIC_CSR_PPF_ELECTION_ADDR(HINIC_HWIF_PCI_INTF(hwif));
+
+ val = hinic_hwif_read_reg(hwif, addr);
+ val = HINIC_PPF_ELECTION_CLEAR(val, IDX);
+
+ ppf_election = HINIC_PPF_ELECTION_SET(HINIC_HWIF_FUNC_IDX(hwif), IDX);
+
+ val |= ppf_election;
+ hinic_hwif_write_reg(hwif, addr, val);
+
+ /* check PPF */
+ val = hinic_hwif_read_reg(hwif, addr);
+
+ attr->ppf_idx = HINIC_PPF_ELECTION_GET(val, IDX);
+ if (attr->ppf_idx == HINIC_HWIF_FUNC_IDX(hwif))
+ attr->func_type = HINIC_PPF;
+}
+
+/**
+ * set_dma_attr - set the dma attributes in the HW
+ * @hwif: the HW interface of a pci function device
+ * @entry_idx: the entry index in the dma table
+ * @st: PCIE TLP steering tag
+ * @at: PCIE TLP AT field
+ * @ph: PCIE TLP Processing Hint field
+ * @no_snooping: PCIE TLP No snooping
+ * @tph_en: PCIE TLP Processing Hint Enable
+ **/
+static void set_dma_attr(struct hinic_hwif *hwif, u32 entry_idx,
+ u8 st, u8 at, u8 ph,
+ enum hinic_pcie_nosnoop no_snooping,
+ enum hinic_pcie_tph tph_en)
+{
+ u32 addr, val, dma_attr_entry;
+
+ /* Read Modify Write */
+ addr = HINIC_CSR_DMA_ATTR_ADDR(entry_idx);
+
+ val = hinic_hwif_read_reg(hwif, addr);
+ val = HINIC_DMA_ATTR_CLEAR(val, ST) &
+ HINIC_DMA_ATTR_CLEAR(val, AT) &
+ HINIC_DMA_ATTR_CLEAR(val, PH) &
+ HINIC_DMA_ATTR_CLEAR(val, NO_SNOOPING) &
+ HINIC_DMA_ATTR_CLEAR(val, TPH_EN);
+
+ dma_attr_entry = HINIC_DMA_ATTR_SET(st, ST) |
+ HINIC_DMA_ATTR_SET(at, AT) |
+ HINIC_DMA_ATTR_SET(ph, PH) |
+ HINIC_DMA_ATTR_SET(no_snooping, NO_SNOOPING) |
+ HINIC_DMA_ATTR_SET(tph_en, TPH_EN);
+
+ val |= dma_attr_entry;
+ hinic_hwif_write_reg(hwif, addr, val);
+}
+
+/**
+ * dma_attr_init - initialize the default dma attributes
+ * @hwif: the HW interface of a pci function device
+ **/
+static void dma_attr_init(struct hinic_hwif *hwif)
+{
+ set_dma_attr(hwif, PCIE_ATTR_ENTRY, HINIC_PCIE_ST_DISABLE,
+ HINIC_PCIE_AT_DISABLE, HINIC_PCIE_PH_DISABLE,
+ HINIC_PCIE_SNOOP, HINIC_PCIE_TPH_DISABLE);
+}
+
+u16 hinic_glb_pf_vf_offset(struct hinic_hwif *hwif)
+{
+ if (!hwif)
+ return 0;
+
+ return hwif->attr.global_vf_id_of_pf;
+}
+
+u16 hinic_global_func_id_hw(struct hinic_hwif *hwif)
+{
+ u32 addr, attr0;
+
+ addr = HINIC_CSR_FUNC_ATTR0_ADDR;
+ attr0 = hinic_hwif_read_reg(hwif, addr);
+
+ return HINIC_FA0_GET(attr0, FUNC_IDX);
+}
+
+u16 hinic_pf_id_of_vf_hw(struct hinic_hwif *hwif)
+{
+ u32 addr, attr0;
+
+ addr = HINIC_CSR_FUNC_ATTR0_ADDR;
+ attr0 = hinic_hwif_read_reg(hwif, addr);
+
+ return HINIC_FA0_GET(attr0, PF_IDX);
+}
+
+static void __print_selftest_reg(struct hinic_hwif *hwif)
+{
+ u32 addr, attr0, attr1;
+
+ addr = HINIC_CSR_FUNC_ATTR1_ADDR;
+ attr1 = hinic_hwif_read_reg(hwif, addr);
+
+ if (attr1 == HINIC_PCIE_LINK_DOWN) {
+ dev_err(&hwif->pdev->dev, "PCIE is link down\n");
+ return;
+ }
+
+ addr = HINIC_CSR_FUNC_ATTR0_ADDR;
+ attr0 = hinic_hwif_read_reg(hwif, addr);
+ if (HINIC_FA0_GET(attr0, FUNC_TYPE) != HINIC_VF &&
+ !HINIC_FA0_GET(attr0, PCI_INTF_IDX))
+ dev_err(&hwif->pdev->dev, "Selftest reg: 0x%08x\n",
+ hinic_hwif_read_reg(hwif, HINIC_SELFTEST_RESULT));
+}
+
+/**
+ * hinic_init_hwif - initialize the hw interface
+ * @hwif: the HW interface of a pci function device
+ * @pdev: the pci device for accessing PCI resources
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+int hinic_init_hwif(struct hinic_hwif *hwif, struct pci_dev *pdev)
+{
+ int err;
+
+ hwif->pdev = pdev;
+
+ hwif->cfg_regs_bar = pci_ioremap_bar(pdev, HINIC_PCI_CFG_REGS_BAR);
+ if (!hwif->cfg_regs_bar) {
+ dev_err(&pdev->dev, "Failed to map configuration regs\n");
+ return -ENOMEM;
+ }
+
+ hwif->intr_regs_base = pci_ioremap_bar(pdev, HINIC_PCI_INTR_REGS_BAR);
+ if (!hwif->intr_regs_base) {
+ dev_err(&pdev->dev, "Failed to map configuration regs\n");
+ err = -ENOMEM;
+ goto err_map_intr_bar;
+ }
+
+ err = wait_hwif_ready(hwif);
+ if (err) {
+ dev_err(&pdev->dev, "HW interface is not ready\n");
+ __print_selftest_reg(hwif);
+ goto err_hwif_ready;
+ }
+
+ read_hwif_attr(hwif);
+
+ if (HINIC_IS_PF(hwif))
+ set_ppf(hwif);
+
+ /* No transactionss before DMA is initialized */
+ dma_attr_init(hwif);
+ return 0;
+
+err_hwif_ready:
+ iounmap(hwif->intr_regs_base);
+
+err_map_intr_bar:
+ iounmap(hwif->cfg_regs_bar);
+
+ return err;
+}
+
+/**
+ * hinic_free_hwif - free the HW interface
+ * @hwif: the HW interface of a pci function device
+ **/
+void hinic_free_hwif(struct hinic_hwif *hwif)
+{
+ iounmap(hwif->intr_regs_base);
+ iounmap(hwif->cfg_regs_bar);
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
new file mode 100644
index 0000000000..3d588896a3
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
@@ -0,0 +1,295 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef HINIC_HW_IF_H
+#define HINIC_HW_IF_H
+
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
+
+#define HINIC_PCIE_LINK_DOWN 0xFFFFFFFF
+
+#define HINIC_DMA_ATTR_ST_SHIFT 0
+#define HINIC_DMA_ATTR_AT_SHIFT 8
+#define HINIC_DMA_ATTR_PH_SHIFT 10
+#define HINIC_DMA_ATTR_NO_SNOOPING_SHIFT 12
+#define HINIC_DMA_ATTR_TPH_EN_SHIFT 13
+
+#define HINIC_DMA_ATTR_ST_MASK 0xFF
+#define HINIC_DMA_ATTR_AT_MASK 0x3
+#define HINIC_DMA_ATTR_PH_MASK 0x3
+#define HINIC_DMA_ATTR_NO_SNOOPING_MASK 0x1
+#define HINIC_DMA_ATTR_TPH_EN_MASK 0x1
+
+#define HINIC_DMA_ATTR_SET(val, member) \
+ (((u32)(val) & HINIC_DMA_ATTR_##member##_MASK) << \
+ HINIC_DMA_ATTR_##member##_SHIFT)
+
+#define HINIC_DMA_ATTR_CLEAR(val, member) \
+ ((val) & (~(HINIC_DMA_ATTR_##member##_MASK \
+ << HINIC_DMA_ATTR_##member##_SHIFT)))
+
+#define HINIC_FA0_FUNC_IDX_SHIFT 0
+#define HINIC_FA0_PF_IDX_SHIFT 10
+#define HINIC_FA0_PCI_INTF_IDX_SHIFT 14
+#define HINIC_FA0_VF_IN_PF_SHIFT 16
+/* reserved members - off 16 */
+#define HINIC_FA0_FUNC_TYPE_SHIFT 24
+
+#define HINIC_FA0_FUNC_IDX_MASK 0x3FF
+#define HINIC_FA0_PF_IDX_MASK 0xF
+#define HINIC_FA0_PCI_INTF_IDX_MASK 0x3
+#define HINIC_FA0_FUNC_TYPE_MASK 0x1
+#define HINIC_FA0_VF_IN_PF_MASK 0xFF
+
+#define HINIC_FA0_GET(val, member) \
+ (((val) >> HINIC_FA0_##member##_SHIFT) & HINIC_FA0_##member##_MASK)
+
+#define HINIC_FA1_AEQS_PER_FUNC_SHIFT 8
+/* reserved members - off 10 */
+#define HINIC_FA1_CEQS_PER_FUNC_SHIFT 12
+/* reserved members - off 15 */
+#define HINIC_FA1_IRQS_PER_FUNC_SHIFT 20
+#define HINIC_FA1_DMA_ATTR_PER_FUNC_SHIFT 24
+/* reserved members - off 27 */
+#define HINIC_FA1_MGMT_INIT_STATUS_SHIFT 30
+#define HINIC_FA1_PF_INIT_STATUS_SHIFT 31
+
+#define HINIC_FA1_AEQS_PER_FUNC_MASK 0x3
+#define HINIC_FA1_CEQS_PER_FUNC_MASK 0x7
+#define HINIC_FA1_IRQS_PER_FUNC_MASK 0xF
+#define HINIC_FA1_DMA_ATTR_PER_FUNC_MASK 0x7
+#define HINIC_FA1_MGMT_INIT_STATUS_MASK 0x1
+#define HINIC_FA1_PF_INIT_STATUS_MASK 0x1
+
+#define HINIC_FA1_GET(val, member) \
+ (((val) >> HINIC_FA1_##member##_SHIFT) & HINIC_FA1_##member##_MASK)
+
+#define HINIC_FA2_GLOBAL_VF_ID_OF_PF_SHIFT 16
+#define HINIC_FA2_GLOBAL_VF_ID_OF_PF_MASK 0x3FF
+
+#define HINIC_FA2_GET(val, member) \
+ (((val) >> HINIC_FA2_##member##_SHIFT) & HINIC_FA2_##member##_MASK)
+
+#define HINIC_FA4_OUTBOUND_STATE_SHIFT 0
+#define HINIC_FA4_DB_STATE_SHIFT 1
+
+#define HINIC_FA4_OUTBOUND_STATE_MASK 0x1
+#define HINIC_FA4_DB_STATE_MASK 0x1
+
+#define HINIC_FA4_GET(val, member) \
+ (((val) >> HINIC_FA4_##member##_SHIFT) & HINIC_FA4_##member##_MASK)
+
+#define HINIC_FA4_SET(val, member) \
+ ((((u32)val) & HINIC_FA4_##member##_MASK) << HINIC_FA4_##member##_SHIFT)
+
+#define HINIC_FA4_CLEAR(val, member) \
+ ((val) & (~(HINIC_FA4_##member##_MASK << HINIC_FA4_##member##_SHIFT)))
+
+#define HINIC_FA5_PF_ACTION_SHIFT 0
+#define HINIC_FA5_PF_ACTION_MASK 0xFFFF
+
+#define HINIC_FA5_SET(val, member) \
+ (((u32)(val) & HINIC_FA5_##member##_MASK) << HINIC_FA5_##member##_SHIFT)
+
+#define HINIC_FA5_CLEAR(val, member) \
+ ((val) & (~(HINIC_FA5_##member##_MASK << HINIC_FA5_##member##_SHIFT)))
+
+#define HINIC_PPF_ELECTION_IDX_SHIFT 0
+#define HINIC_PPF_ELECTION_IDX_MASK 0x1F
+
+#define HINIC_PPF_ELECTION_SET(val, member) \
+ (((u32)(val) & HINIC_PPF_ELECTION_##member##_MASK) << \
+ HINIC_PPF_ELECTION_##member##_SHIFT)
+
+#define HINIC_PPF_ELECTION_GET(val, member) \
+ (((val) >> HINIC_PPF_ELECTION_##member##_SHIFT) & \
+ HINIC_PPF_ELECTION_##member##_MASK)
+
+#define HINIC_PPF_ELECTION_CLEAR(val, member) \
+ ((val) & (~(HINIC_PPF_ELECTION_##member##_MASK \
+ << HINIC_PPF_ELECTION_##member##_SHIFT)))
+
+#define HINIC_MSIX_PENDING_LIMIT_SHIFT 0
+#define HINIC_MSIX_COALESC_TIMER_SHIFT 8
+#define HINIC_MSIX_LLI_TIMER_SHIFT 16
+#define HINIC_MSIX_LLI_CREDIT_SHIFT 24
+#define HINIC_MSIX_RESEND_TIMER_SHIFT 29
+
+#define HINIC_MSIX_PENDING_LIMIT_MASK 0xFF
+#define HINIC_MSIX_COALESC_TIMER_MASK 0xFF
+#define HINIC_MSIX_LLI_TIMER_MASK 0xFF
+#define HINIC_MSIX_LLI_CREDIT_MASK 0x1F
+#define HINIC_MSIX_RESEND_TIMER_MASK 0x7
+
+#define HINIC_MSIX_ATTR_SET(val, member) \
+ (((u32)(val) & HINIC_MSIX_##member##_MASK) << \
+ HINIC_MSIX_##member##_SHIFT)
+
+#define HINIC_MSIX_CNT_RESEND_TIMER_SHIFT 29
+
+#define HINIC_MSIX_CNT_RESEND_TIMER_MASK 0x1
+
+#define HINIC_MSIX_CNT_SET(val, member) \
+ (((u32)(val) & HINIC_MSIX_CNT_##member##_MASK) << \
+ HINIC_MSIX_CNT_##member##_SHIFT)
+
+#define HINIC_HWIF_NUM_AEQS(hwif) ((hwif)->attr.num_aeqs)
+#define HINIC_HWIF_NUM_CEQS(hwif) ((hwif)->attr.num_ceqs)
+#define HINIC_HWIF_NUM_IRQS(hwif) ((hwif)->attr.num_irqs)
+#define HINIC_HWIF_FUNC_IDX(hwif) ((hwif)->attr.func_idx)
+#define HINIC_HWIF_PCI_INTF(hwif) ((hwif)->attr.pci_intf_idx)
+#define HINIC_HWIF_PF_IDX(hwif) ((hwif)->attr.pf_idx)
+#define HINIC_HWIF_PPF_IDX(hwif) ((hwif)->attr.ppf_idx)
+
+#define HINIC_FUNC_TYPE(hwif) ((hwif)->attr.func_type)
+#define HINIC_IS_VF(hwif) (HINIC_FUNC_TYPE(hwif) == HINIC_VF)
+#define HINIC_IS_PF(hwif) (HINIC_FUNC_TYPE(hwif) == HINIC_PF)
+#define HINIC_IS_PPF(hwif) (HINIC_FUNC_TYPE(hwif) == HINIC_PPF)
+
+#define HINIC_PCI_CFG_REGS_BAR 0
+#define HINIC_PCI_INTR_REGS_BAR 2
+#define HINIC_PCI_DB_BAR 4
+
+#define HINIC_PCIE_ST_DISABLE 0
+#define HINIC_PCIE_AT_DISABLE 0
+#define HINIC_PCIE_PH_DISABLE 0
+
+#define HINIC_EQ_MSIX_PENDING_LIMIT_DEFAULT 0 /* Disabled */
+#define HINIC_EQ_MSIX_COALESC_TIMER_DEFAULT 0xFF /* max */
+#define HINIC_EQ_MSIX_LLI_TIMER_DEFAULT 0 /* Disabled */
+#define HINIC_EQ_MSIX_LLI_CREDIT_LIMIT_DEFAULT 0 /* Disabled */
+#define HINIC_EQ_MSIX_RESEND_TIMER_DEFAULT 7 /* max */
+
+#define HINIC_PCI_MSIX_ENTRY_SIZE 16
+#define HINIC_PCI_MSIX_ENTRY_VECTOR_CTRL 12
+#define HINIC_PCI_MSIX_ENTRY_CTRL_MASKBIT 1
+
+enum hinic_pcie_nosnoop {
+ HINIC_PCIE_SNOOP = 0,
+ HINIC_PCIE_NO_SNOOP = 1,
+};
+
+enum hinic_pcie_tph {
+ HINIC_PCIE_TPH_DISABLE = 0,
+ HINIC_PCIE_TPH_ENABLE = 1,
+};
+
+enum hinic_func_type {
+ HINIC_PF = 0,
+ HINIC_VF = 1,
+ HINIC_PPF = 2,
+};
+
+enum hinic_mod_type {
+ HINIC_MOD_COMM = 0, /* HW communication module */
+ HINIC_MOD_L2NIC = 1, /* L2NIC module */
+ HINIC_MOD_CFGM = 7, /* Configuration module */
+ HINIC_MOD_HILINK = 14, /* Hilink module */
+ HINIC_MOD_MAX = 15
+};
+
+enum hinic_node_id {
+ HINIC_NODE_ID_MGMT = 21,
+};
+
+enum hinic_pf_action {
+ HINIC_PF_MGMT_INIT = 0x0,
+
+ HINIC_PF_MGMT_ACTIVE = 0x11,
+};
+
+enum hinic_outbound_state {
+ HINIC_OUTBOUND_ENABLE = 0,
+ HINIC_OUTBOUND_DISABLE = 1,
+};
+
+enum hinic_db_state {
+ HINIC_DB_ENABLE = 0,
+ HINIC_DB_DISABLE = 1,
+};
+
+enum hinic_msix_state {
+ HINIC_MSIX_ENABLE,
+ HINIC_MSIX_DISABLE,
+};
+
+struct hinic_func_attr {
+ u16 func_idx;
+ u8 pf_idx;
+ u8 pci_intf_idx;
+
+ enum hinic_func_type func_type;
+
+ u8 ppf_idx;
+
+ u16 num_irqs;
+ u8 num_aeqs;
+ u8 num_ceqs;
+
+ u8 num_dma_attr;
+
+ u16 global_vf_id_of_pf;
+};
+
+struct hinic_hwif {
+ struct pci_dev *pdev;
+ void __iomem *cfg_regs_bar;
+ void __iomem *intr_regs_base;
+
+ struct hinic_func_attr attr;
+};
+
+static inline u32 hinic_hwif_read_reg(struct hinic_hwif *hwif, u32 reg)
+{
+ u32 out = readl(hwif->cfg_regs_bar + reg);
+
+ return be32_to_cpu(*(__be32 *)&out);
+}
+
+static inline void hinic_hwif_write_reg(struct hinic_hwif *hwif, u32 reg,
+ u32 val)
+{
+ __be32 in = cpu_to_be32(val);
+
+ writel(*(u32 *)&in, hwif->cfg_regs_bar + reg);
+}
+
+int hinic_msix_attr_set(struct hinic_hwif *hwif, u16 msix_index,
+ u8 pending_limit, u8 coalesc_timer,
+ u8 lli_timer_cfg, u8 lli_credit_limit,
+ u8 resend_timer);
+
+void hinic_set_msix_state(struct hinic_hwif *hwif, u16 msix_idx,
+ enum hinic_msix_state flag);
+
+int hinic_msix_attr_cnt_clear(struct hinic_hwif *hwif, u16 msix_index);
+
+void hinic_set_pf_action(struct hinic_hwif *hwif, enum hinic_pf_action action);
+
+enum hinic_outbound_state hinic_outbound_state_get(struct hinic_hwif *hwif);
+
+void hinic_outbound_state_set(struct hinic_hwif *hwif,
+ enum hinic_outbound_state outbound_state);
+
+enum hinic_db_state hinic_db_state_get(struct hinic_hwif *hwif);
+
+void hinic_db_state_set(struct hinic_hwif *hwif,
+ enum hinic_db_state db_state);
+
+u16 hinic_glb_pf_vf_offset(struct hinic_hwif *hwif);
+
+u16 hinic_global_func_id_hw(struct hinic_hwif *hwif);
+
+u16 hinic_pf_id_of_vf_hw(struct hinic_hwif *hwif);
+
+int hinic_init_hwif(struct hinic_hwif *hwif, struct pci_dev *pdev);
+
+void hinic_free_hwif(struct hinic_hwif *hwif);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
new file mode 100644
index 0000000000..c4a0ba6e18
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
@@ -0,0 +1,634 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/semaphore.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/err.h>
+
+#include "hinic_hw_dev.h"
+#include "hinic_hw_if.h"
+#include "hinic_hw_eqs.h"
+#include "hinic_hw_wqe.h"
+#include "hinic_hw_wq.h"
+#include "hinic_hw_cmdq.h"
+#include "hinic_hw_qp_ctxt.h"
+#include "hinic_hw_qp.h"
+#include "hinic_hw_io.h"
+
+#define CI_Q_ADDR_SIZE sizeof(u32)
+
+#define CI_ADDR(base_addr, q_id) ((base_addr) + \
+ (q_id) * CI_Q_ADDR_SIZE)
+
+#define CI_TABLE_SIZE(num_qps) ((num_qps) * CI_Q_ADDR_SIZE)
+
+#define DB_IDX(db, db_base) \
+ (((unsigned long)(db) - (unsigned long)(db_base)) / HINIC_DB_PAGE_SIZE)
+
+#define HINIC_PAGE_SIZE_HW(pg_size) ((u8)ilog2((u32)((pg_size) >> 12)))
+
+enum io_cmd {
+ IO_CMD_MODIFY_QUEUE_CTXT = 0,
+ IO_CMD_CLEAN_QUEUE_CTXT,
+};
+
+static void init_db_area_idx(struct hinic_free_db_area *free_db_area)
+{
+ int i;
+
+ for (i = 0; i < HINIC_DB_MAX_AREAS; i++)
+ free_db_area->db_idx[i] = i;
+
+ free_db_area->alloc_pos = 0;
+ free_db_area->return_pos = HINIC_DB_MAX_AREAS;
+
+ free_db_area->num_free = HINIC_DB_MAX_AREAS;
+
+ sema_init(&free_db_area->idx_lock, 1);
+}
+
+static void __iomem *get_db_area(struct hinic_func_to_io *func_to_io)
+{
+ struct hinic_free_db_area *free_db_area = &func_to_io->free_db_area;
+ int pos, idx;
+
+ down(&free_db_area->idx_lock);
+
+ free_db_area->num_free--;
+
+ if (free_db_area->num_free < 0) {
+ free_db_area->num_free++;
+ up(&free_db_area->idx_lock);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pos = free_db_area->alloc_pos++;
+ pos &= HINIC_DB_MAX_AREAS - 1;
+
+ idx = free_db_area->db_idx[pos];
+
+ free_db_area->db_idx[pos] = -1;
+
+ up(&free_db_area->idx_lock);
+
+ return func_to_io->db_base + idx * HINIC_DB_PAGE_SIZE;
+}
+
+static void return_db_area(struct hinic_func_to_io *func_to_io,
+ void __iomem *db_base)
+{
+ struct hinic_free_db_area *free_db_area = &func_to_io->free_db_area;
+ int pos, idx = DB_IDX(db_base, func_to_io->db_base);
+
+ down(&free_db_area->idx_lock);
+
+ pos = free_db_area->return_pos++;
+ pos &= HINIC_DB_MAX_AREAS - 1;
+
+ free_db_area->db_idx[pos] = idx;
+
+ free_db_area->num_free++;
+
+ up(&free_db_area->idx_lock);
+}
+
+static int write_sq_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn,
+ u16 num_sqs)
+{
+ struct hinic_hwif *hwif = func_to_io->hwif;
+ struct hinic_sq_ctxt_block *sq_ctxt_block;
+ struct pci_dev *pdev = hwif->pdev;
+ struct hinic_cmdq_buf cmdq_buf;
+ struct hinic_sq_ctxt *sq_ctxt;
+ struct hinic_qp *qp;
+ u64 out_param;
+ int err, i;
+
+ err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to allocate cmdq buf\n");
+ return err;
+ }
+
+ sq_ctxt_block = cmdq_buf.buf;
+ sq_ctxt = sq_ctxt_block->sq_ctxt;
+
+ hinic_qp_prepare_header(&sq_ctxt_block->hdr, HINIC_QP_CTXT_TYPE_SQ,
+ num_sqs, func_to_io->max_qps);
+ for (i = 0; i < num_sqs; i++) {
+ qp = &func_to_io->qps[i];
+
+ hinic_sq_prepare_ctxt(&sq_ctxt[i], &qp->sq,
+ base_qpn + qp->q_id);
+ }
+
+ cmdq_buf.size = HINIC_SQ_CTXT_SIZE(num_sqs);
+
+ err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC,
+ IO_CMD_MODIFY_QUEUE_CTXT, &cmdq_buf,
+ &out_param);
+ if (err || out_param != 0) {
+ dev_err(&pdev->dev, "Failed to set SQ ctxts\n");
+ err = -EFAULT;
+ }
+
+ hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
+ return err;
+}
+
+static int write_rq_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn,
+ u16 num_rqs)
+{
+ struct hinic_hwif *hwif = func_to_io->hwif;
+ struct hinic_rq_ctxt_block *rq_ctxt_block;
+ struct pci_dev *pdev = hwif->pdev;
+ struct hinic_cmdq_buf cmdq_buf;
+ struct hinic_rq_ctxt *rq_ctxt;
+ struct hinic_qp *qp;
+ u64 out_param;
+ int err, i;
+
+ err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to allocate cmdq buf\n");
+ return err;
+ }
+
+ rq_ctxt_block = cmdq_buf.buf;
+ rq_ctxt = rq_ctxt_block->rq_ctxt;
+
+ hinic_qp_prepare_header(&rq_ctxt_block->hdr, HINIC_QP_CTXT_TYPE_RQ,
+ num_rqs, func_to_io->max_qps);
+ for (i = 0; i < num_rqs; i++) {
+ qp = &func_to_io->qps[i];
+
+ hinic_rq_prepare_ctxt(&rq_ctxt[i], &qp->rq,
+ base_qpn + qp->q_id);
+ }
+
+ cmdq_buf.size = HINIC_RQ_CTXT_SIZE(num_rqs);
+
+ err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC,
+ IO_CMD_MODIFY_QUEUE_CTXT, &cmdq_buf,
+ &out_param);
+ if (err || out_param != 0) {
+ dev_err(&pdev->dev, "Failed to set RQ ctxts\n");
+ err = -EFAULT;
+ }
+
+ hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
+ return err;
+}
+
+/**
+ * write_qp_ctxts - write the qp ctxt to HW
+ * @func_to_io: func to io channel that holds the IO components
+ * @base_qpn: first qp number
+ * @num_qps: number of qps to write
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static int write_qp_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn,
+ u16 num_qps)
+{
+ return (write_sq_ctxts(func_to_io, base_qpn, num_qps) ||
+ write_rq_ctxts(func_to_io, base_qpn, num_qps));
+}
+
+static int hinic_clean_queue_offload_ctxt(struct hinic_func_to_io *func_to_io,
+ enum hinic_qp_ctxt_type ctxt_type)
+{
+ struct hinic_hwif *hwif = func_to_io->hwif;
+ struct hinic_clean_queue_ctxt *ctxt_block;
+ struct pci_dev *pdev = hwif->pdev;
+ struct hinic_cmdq_buf cmdq_buf;
+ u64 out_param = 0;
+ int err;
+
+ err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to allocate cmdq buf\n");
+ return err;
+ }
+
+ ctxt_block = cmdq_buf.buf;
+ ctxt_block->cmdq_hdr.num_queues = func_to_io->max_qps;
+ ctxt_block->cmdq_hdr.queue_type = ctxt_type;
+ ctxt_block->cmdq_hdr.addr_offset = 0;
+
+ /* TSO/LRO ctxt size: 0x0:0B; 0x1:160B; 0x2:200B; 0x3:240B */
+ ctxt_block->ctxt_size = 0x3;
+
+ hinic_cpu_to_be32(ctxt_block, sizeof(*ctxt_block));
+
+ cmdq_buf.size = sizeof(*ctxt_block);
+
+ err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC,
+ IO_CMD_CLEAN_QUEUE_CTXT,
+ &cmdq_buf, &out_param);
+
+ if (err || out_param) {
+ dev_err(&pdev->dev, "Failed to clean offload ctxts, err: %d, out_param: 0x%llx\n",
+ err, out_param);
+
+ err = -EFAULT;
+ }
+
+ hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
+
+ return err;
+}
+
+static int hinic_clean_qp_offload_ctxt(struct hinic_func_to_io *func_to_io)
+{
+ /* clean LRO/TSO context space */
+ return (hinic_clean_queue_offload_ctxt(func_to_io,
+ HINIC_QP_CTXT_TYPE_SQ) ||
+ hinic_clean_queue_offload_ctxt(func_to_io,
+ HINIC_QP_CTXT_TYPE_RQ));
+}
+
+/**
+ * init_qp - Initialize a Queue Pair
+ * @func_to_io: func to io channel that holds the IO components
+ * @qp: pointer to the qp to initialize
+ * @q_id: the id of the qp
+ * @sq_msix_entry: msix entry for sq
+ * @rq_msix_entry: msix entry for rq
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static int init_qp(struct hinic_func_to_io *func_to_io,
+ struct hinic_qp *qp, int q_id,
+ struct msix_entry *sq_msix_entry,
+ struct msix_entry *rq_msix_entry)
+{
+ struct hinic_hwif *hwif = func_to_io->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ void __iomem *db_base;
+ int err;
+
+ qp->q_id = q_id;
+
+ err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->sq_wq[q_id],
+ HINIC_SQ_WQEBB_SIZE, HINIC_SQ_PAGE_SIZE,
+ func_to_io->sq_depth, HINIC_SQ_WQE_MAX_SIZE);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to allocate WQ for SQ\n");
+ return err;
+ }
+
+ err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->rq_wq[q_id],
+ HINIC_RQ_WQEBB_SIZE, HINIC_RQ_PAGE_SIZE,
+ func_to_io->rq_depth, HINIC_RQ_WQE_SIZE);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to allocate WQ for RQ\n");
+ goto err_rq_alloc;
+ }
+
+ db_base = get_db_area(func_to_io);
+ if (IS_ERR(db_base)) {
+ dev_err(&pdev->dev, "Failed to get DB area for SQ\n");
+ err = PTR_ERR(db_base);
+ goto err_get_db;
+ }
+
+ func_to_io->sq_db[q_id] = db_base;
+
+ qp->sq.qid = q_id;
+ err = hinic_init_sq(&qp->sq, hwif, &func_to_io->sq_wq[q_id],
+ sq_msix_entry,
+ CI_ADDR(func_to_io->ci_addr_base, q_id),
+ CI_ADDR(func_to_io->ci_dma_base, q_id), db_base);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init SQ\n");
+ goto err_sq_init;
+ }
+
+ qp->rq.qid = q_id;
+ err = hinic_init_rq(&qp->rq, hwif, &func_to_io->rq_wq[q_id],
+ rq_msix_entry);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init RQ\n");
+ goto err_rq_init;
+ }
+
+ return 0;
+
+err_rq_init:
+ hinic_clean_sq(&qp->sq);
+
+err_sq_init:
+ return_db_area(func_to_io, db_base);
+
+err_get_db:
+ hinic_wq_free(&func_to_io->wqs, &func_to_io->rq_wq[q_id]);
+
+err_rq_alloc:
+ hinic_wq_free(&func_to_io->wqs, &func_to_io->sq_wq[q_id]);
+ return err;
+}
+
+/**
+ * destroy_qp - Clean the resources of a Queue Pair
+ * @func_to_io: func to io channel that holds the IO components
+ * @qp: pointer to the qp to clean
+ **/
+static void destroy_qp(struct hinic_func_to_io *func_to_io,
+ struct hinic_qp *qp)
+{
+ int q_id = qp->q_id;
+
+ hinic_clean_rq(&qp->rq);
+ hinic_clean_sq(&qp->sq);
+
+ return_db_area(func_to_io, func_to_io->sq_db[q_id]);
+
+ hinic_wq_free(&func_to_io->wqs, &func_to_io->rq_wq[q_id]);
+ hinic_wq_free(&func_to_io->wqs, &func_to_io->sq_wq[q_id]);
+}
+
+/**
+ * hinic_io_create_qps - Create Queue Pairs
+ * @func_to_io: func to io channel that holds the IO components
+ * @base_qpn: base qp number
+ * @num_qps: number queue pairs to create
+ * @sq_msix_entries: msix entries for sq
+ * @rq_msix_entries: msix entries for rq
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+int hinic_io_create_qps(struct hinic_func_to_io *func_to_io,
+ u16 base_qpn, int num_qps,
+ struct msix_entry *sq_msix_entries,
+ struct msix_entry *rq_msix_entries)
+{
+ struct hinic_hwif *hwif = func_to_io->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ void *ci_addr_base;
+ int i, j, err;
+
+ func_to_io->qps = devm_kcalloc(&pdev->dev, num_qps,
+ sizeof(*func_to_io->qps), GFP_KERNEL);
+ if (!func_to_io->qps)
+ return -ENOMEM;
+
+ func_to_io->sq_wq = devm_kcalloc(&pdev->dev, num_qps,
+ sizeof(*func_to_io->sq_wq), GFP_KERNEL);
+ if (!func_to_io->sq_wq) {
+ err = -ENOMEM;
+ goto err_sq_wq;
+ }
+
+ func_to_io->rq_wq = devm_kcalloc(&pdev->dev, num_qps,
+ sizeof(*func_to_io->rq_wq), GFP_KERNEL);
+ if (!func_to_io->rq_wq) {
+ err = -ENOMEM;
+ goto err_rq_wq;
+ }
+
+ func_to_io->sq_db = devm_kcalloc(&pdev->dev, num_qps,
+ sizeof(*func_to_io->sq_db), GFP_KERNEL);
+ if (!func_to_io->sq_db) {
+ err = -ENOMEM;
+ goto err_sq_db;
+ }
+
+ ci_addr_base = dma_alloc_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps),
+ &func_to_io->ci_dma_base,
+ GFP_KERNEL);
+ if (!ci_addr_base) {
+ dev_err(&pdev->dev, "Failed to allocate CI area\n");
+ err = -ENOMEM;
+ goto err_ci_base;
+ }
+
+ func_to_io->ci_addr_base = ci_addr_base;
+
+ for (i = 0; i < num_qps; i++) {
+ err = init_qp(func_to_io, &func_to_io->qps[i], i,
+ &sq_msix_entries[i], &rq_msix_entries[i]);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to create QP %d\n", i);
+ goto err_init_qp;
+ }
+ }
+
+ err = write_qp_ctxts(func_to_io, base_qpn, num_qps);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init QP ctxts\n");
+ goto err_write_qp_ctxts;
+ }
+
+ err = hinic_clean_qp_offload_ctxt(func_to_io);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to clean QP contexts space\n");
+ goto err_write_qp_ctxts;
+ }
+
+ return 0;
+
+err_write_qp_ctxts:
+err_init_qp:
+ for (j = 0; j < i; j++)
+ destroy_qp(func_to_io, &func_to_io->qps[j]);
+
+ dma_free_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps),
+ func_to_io->ci_addr_base, func_to_io->ci_dma_base);
+
+err_ci_base:
+ devm_kfree(&pdev->dev, func_to_io->sq_db);
+
+err_sq_db:
+ devm_kfree(&pdev->dev, func_to_io->rq_wq);
+
+err_rq_wq:
+ devm_kfree(&pdev->dev, func_to_io->sq_wq);
+
+err_sq_wq:
+ devm_kfree(&pdev->dev, func_to_io->qps);
+ return err;
+}
+
+/**
+ * hinic_io_destroy_qps - Destroy the IO Queue Pairs
+ * @func_to_io: func to io channel that holds the IO components
+ * @num_qps: number queue pairs to destroy
+ **/
+void hinic_io_destroy_qps(struct hinic_func_to_io *func_to_io, int num_qps)
+{
+ struct hinic_hwif *hwif = func_to_io->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ size_t ci_table_size;
+ int i;
+
+ ci_table_size = CI_TABLE_SIZE(num_qps);
+
+ for (i = 0; i < num_qps; i++)
+ destroy_qp(func_to_io, &func_to_io->qps[i]);
+
+ dma_free_coherent(&pdev->dev, ci_table_size, func_to_io->ci_addr_base,
+ func_to_io->ci_dma_base);
+
+ devm_kfree(&pdev->dev, func_to_io->sq_db);
+
+ devm_kfree(&pdev->dev, func_to_io->rq_wq);
+ devm_kfree(&pdev->dev, func_to_io->sq_wq);
+
+ devm_kfree(&pdev->dev, func_to_io->qps);
+}
+
+int hinic_set_wq_page_size(struct hinic_hwdev *hwdev, u16 func_idx,
+ u32 page_size)
+{
+ struct hinic_wq_page_size page_size_info = {0};
+ u16 out_size = sizeof(page_size_info);
+ struct hinic_pfhwdev *pfhwdev;
+ int err;
+
+ pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+
+ page_size_info.func_idx = func_idx;
+ page_size_info.ppf_idx = HINIC_HWIF_PPF_IDX(hwdev->hwif);
+ page_size_info.page_size = HINIC_PAGE_SIZE_HW(page_size);
+
+ err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
+ HINIC_COMM_CMD_PAGESIZE_SET, &page_size_info,
+ sizeof(page_size_info), &page_size_info,
+ &out_size, HINIC_MGMT_MSG_SYNC);
+ if (err || !out_size || page_size_info.status) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to set wq page size, err: %d, status: 0x%x, out_size: 0x%0x\n",
+ err, page_size_info.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * hinic_io_init - Initialize the IO components
+ * @func_to_io: func to io channel that holds the IO components
+ * @hwif: HW interface for accessing IO
+ * @max_qps: maximum QPs in HW
+ * @num_ceqs: number completion event queues
+ * @ceq_msix_entries: msix entries for ceqs
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+int hinic_io_init(struct hinic_func_to_io *func_to_io,
+ struct hinic_hwif *hwif, u16 max_qps, int num_ceqs,
+ struct msix_entry *ceq_msix_entries)
+{
+ struct pci_dev *pdev = hwif->pdev;
+ enum hinic_cmdq_type cmdq, type;
+ void __iomem *db_area;
+ int err;
+
+ func_to_io->hwif = hwif;
+ func_to_io->qps = NULL;
+ func_to_io->max_qps = max_qps;
+ func_to_io->ceqs.hwdev = func_to_io->hwdev;
+
+ err = hinic_ceqs_init(&func_to_io->ceqs, hwif, num_ceqs,
+ HINIC_DEFAULT_CEQ_LEN, HINIC_EQ_PAGE_SIZE,
+ ceq_msix_entries);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init CEQs\n");
+ return err;
+ }
+
+ err = hinic_wqs_alloc(&func_to_io->wqs, 2 * max_qps, hwif);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to allocate WQS for IO\n");
+ goto err_wqs_alloc;
+ }
+
+ func_to_io->db_base = pci_ioremap_bar(pdev, HINIC_PCI_DB_BAR);
+ if (!func_to_io->db_base) {
+ dev_err(&pdev->dev, "Failed to remap IO DB area\n");
+ err = -ENOMEM;
+ goto err_db_ioremap;
+ }
+
+ init_db_area_idx(&func_to_io->free_db_area);
+
+ for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++) {
+ db_area = get_db_area(func_to_io);
+ if (IS_ERR(db_area)) {
+ dev_err(&pdev->dev, "Failed to get cmdq db area\n");
+ err = PTR_ERR(db_area);
+ goto err_db_area;
+ }
+
+ func_to_io->cmdq_db_area[cmdq] = db_area;
+ }
+
+ err = hinic_set_wq_page_size(func_to_io->hwdev,
+ HINIC_HWIF_FUNC_IDX(hwif),
+ HINIC_DEFAULT_WQ_PAGE_SIZE);
+ if (err) {
+ dev_err(&func_to_io->hwif->pdev->dev, "Failed to set wq page size\n");
+ goto init_wq_pg_size_err;
+ }
+
+ err = hinic_init_cmdqs(&func_to_io->cmdqs, hwif,
+ func_to_io->cmdq_db_area);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize cmdqs\n");
+ goto err_init_cmdqs;
+ }
+
+ return 0;
+
+err_init_cmdqs:
+ if (!HINIC_IS_VF(func_to_io->hwif))
+ hinic_set_wq_page_size(func_to_io->hwdev,
+ HINIC_HWIF_FUNC_IDX(hwif),
+ HINIC_HW_WQ_PAGE_SIZE);
+init_wq_pg_size_err:
+err_db_area:
+ for (type = HINIC_CMDQ_SYNC; type < cmdq; type++)
+ return_db_area(func_to_io, func_to_io->cmdq_db_area[type]);
+
+ iounmap(func_to_io->db_base);
+
+err_db_ioremap:
+ hinic_wqs_free(&func_to_io->wqs);
+
+err_wqs_alloc:
+ hinic_ceqs_free(&func_to_io->ceqs);
+ return err;
+}
+
+/**
+ * hinic_io_free - Free the IO components
+ * @func_to_io: func to io channel that holds the IO components
+ **/
+void hinic_io_free(struct hinic_func_to_io *func_to_io)
+{
+ enum hinic_cmdq_type cmdq;
+
+ hinic_free_cmdqs(&func_to_io->cmdqs);
+
+ if (!HINIC_IS_VF(func_to_io->hwif))
+ hinic_set_wq_page_size(func_to_io->hwdev,
+ HINIC_HWIF_FUNC_IDX(func_to_io->hwif),
+ HINIC_HW_WQ_PAGE_SIZE);
+
+ for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++)
+ return_db_area(func_to_io, func_to_io->cmdq_db_area[cmdq]);
+
+ iounmap(func_to_io->db_base);
+ hinic_wqs_free(&func_to_io->wqs);
+ hinic_ceqs_free(&func_to_io->ceqs);
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h
new file mode 100644
index 0000000000..52159a9027
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef HINIC_HW_IO_H
+#define HINIC_HW_IO_H
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/semaphore.h>
+#include <linux/sizes.h>
+
+#include "hinic_hw_if.h"
+#include "hinic_hw_eqs.h"
+#include "hinic_hw_wq.h"
+#include "hinic_hw_cmdq.h"
+#include "hinic_hw_qp.h"
+
+#define HINIC_DB_PAGE_SIZE SZ_4K
+#define HINIC_DB_SIZE SZ_4M
+#define HINIC_HW_WQ_PAGE_SIZE SZ_4K
+#define HINIC_DEFAULT_WQ_PAGE_SIZE SZ_256K
+
+#define HINIC_DB_MAX_AREAS (HINIC_DB_SIZE / HINIC_DB_PAGE_SIZE)
+
+enum hinic_db_type {
+ HINIC_DB_CMDQ_TYPE,
+ HINIC_DB_SQ_TYPE,
+};
+
+enum hinic_io_path {
+ HINIC_CTRL_PATH,
+ HINIC_DATA_PATH,
+};
+
+struct hinic_free_db_area {
+ int db_idx[HINIC_DB_MAX_AREAS];
+
+ int alloc_pos;
+ int return_pos;
+
+ int num_free;
+
+ /* Lock for getting db area */
+ struct semaphore idx_lock;
+};
+
+struct hinic_nic_cfg {
+ /* lock for getting nic cfg */
+ struct mutex cfg_mutex;
+ bool pause_set;
+ u32 auto_neg;
+ u32 rx_pause;
+ u32 tx_pause;
+};
+
+struct hinic_func_to_io {
+ struct hinic_hwif *hwif;
+ struct hinic_hwdev *hwdev;
+ u16 global_qpn;
+ struct hinic_ceqs ceqs;
+
+ struct hinic_wqs wqs;
+
+ struct hinic_wq *sq_wq;
+ struct hinic_wq *rq_wq;
+
+ struct hinic_qp *qps;
+ u16 max_qps;
+
+ u16 sq_depth;
+ u16 rq_depth;
+
+ void __iomem **sq_db;
+ void __iomem *db_base;
+
+ void *ci_addr_base;
+ dma_addr_t ci_dma_base;
+
+ struct hinic_free_db_area free_db_area;
+
+ void __iomem *cmdq_db_area[HINIC_MAX_CMDQ_TYPES];
+
+ struct hinic_cmdqs cmdqs;
+
+ u16 max_vfs;
+ struct vf_data_storage *vf_infos;
+ u8 link_status;
+ struct hinic_nic_cfg nic_cfg;
+};
+
+struct hinic_wq_page_size {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_idx;
+ u8 ppf_idx;
+ u8 page_size;
+
+ u32 rsvd1;
+};
+
+int hinic_set_wq_page_size(struct hinic_hwdev *hwdev, u16 func_idx,
+ u32 page_size);
+
+int hinic_io_create_qps(struct hinic_func_to_io *func_to_io,
+ u16 base_qpn, int num_qps,
+ struct msix_entry *sq_msix_entries,
+ struct msix_entry *rq_msix_entries);
+
+void hinic_io_destroy_qps(struct hinic_func_to_io *func_to_io,
+ int num_qps);
+
+int hinic_io_init(struct hinic_func_to_io *func_to_io,
+ struct hinic_hwif *hwif, u16 max_qps, int num_ceqs,
+ struct msix_entry *ceq_msix_entries);
+
+void hinic_io_free(struct hinic_func_to_io *func_to_io);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
new file mode 100644
index 0000000000..3f9c31d292
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
@@ -0,0 +1,1515 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <linux/semaphore.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#include "hinic_hw_if.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_hw_csr.h"
+#include "hinic_hw_dev.h"
+#include "hinic_hw_mbox.h"
+
+#define HINIC_MBOX_INT_DST_FUNC_SHIFT 0
+#define HINIC_MBOX_INT_DST_AEQN_SHIFT 10
+#define HINIC_MBOX_INT_SRC_RESP_AEQN_SHIFT 12
+#define HINIC_MBOX_INT_STAT_DMA_SHIFT 14
+/* The size of data to be sended (unit of 4 bytes) */
+#define HINIC_MBOX_INT_TX_SIZE_SHIFT 20
+/* SO_RO(strong order, relax order) */
+#define HINIC_MBOX_INT_STAT_DMA_SO_RO_SHIFT 25
+#define HINIC_MBOX_INT_WB_EN_SHIFT 28
+
+#define HINIC_MBOX_INT_DST_FUNC_MASK 0x3FF
+#define HINIC_MBOX_INT_DST_AEQN_MASK 0x3
+#define HINIC_MBOX_INT_SRC_RESP_AEQN_MASK 0x3
+#define HINIC_MBOX_INT_STAT_DMA_MASK 0x3F
+#define HINIC_MBOX_INT_TX_SIZE_MASK 0x1F
+#define HINIC_MBOX_INT_STAT_DMA_SO_RO_MASK 0x3
+#define HINIC_MBOX_INT_WB_EN_MASK 0x1
+
+#define HINIC_MBOX_INT_SET(val, field) \
+ (((val) & HINIC_MBOX_INT_##field##_MASK) << \
+ HINIC_MBOX_INT_##field##_SHIFT)
+
+enum hinic_mbox_tx_status {
+ TX_NOT_DONE = 1,
+};
+
+#define HINIC_MBOX_CTRL_TRIGGER_AEQE_SHIFT 0
+
+/* specifies the issue request for the message data.
+ * 0 - Tx request is done;
+ * 1 - Tx request is in process.
+ */
+#define HINIC_MBOX_CTRL_TX_STATUS_SHIFT 1
+
+#define HINIC_MBOX_CTRL_TRIGGER_AEQE_MASK 0x1
+#define HINIC_MBOX_CTRL_TX_STATUS_MASK 0x1
+
+#define HINIC_MBOX_CTRL_SET(val, field) \
+ (((val) & HINIC_MBOX_CTRL_##field##_MASK) << \
+ HINIC_MBOX_CTRL_##field##_SHIFT)
+
+#define HINIC_MBOX_HEADER_MSG_LEN_SHIFT 0
+#define HINIC_MBOX_HEADER_MODULE_SHIFT 11
+#define HINIC_MBOX_HEADER_SEG_LEN_SHIFT 16
+#define HINIC_MBOX_HEADER_NO_ACK_SHIFT 22
+#define HINIC_MBOX_HEADER_SEQID_SHIFT 24
+#define HINIC_MBOX_HEADER_LAST_SHIFT 30
+
+/* specifies the mailbox message direction
+ * 0 - send
+ * 1 - receive
+ */
+#define HINIC_MBOX_HEADER_DIRECTION_SHIFT 31
+#define HINIC_MBOX_HEADER_CMD_SHIFT 32
+#define HINIC_MBOX_HEADER_MSG_ID_SHIFT 40
+#define HINIC_MBOX_HEADER_STATUS_SHIFT 48
+#define HINIC_MBOX_HEADER_SRC_GLB_FUNC_IDX_SHIFT 54
+
+#define HINIC_MBOX_HEADER_MSG_LEN_MASK 0x7FF
+#define HINIC_MBOX_HEADER_MODULE_MASK 0x1F
+#define HINIC_MBOX_HEADER_SEG_LEN_MASK 0x3F
+#define HINIC_MBOX_HEADER_NO_ACK_MASK 0x1
+#define HINIC_MBOX_HEADER_SEQID_MASK 0x3F
+#define HINIC_MBOX_HEADER_LAST_MASK 0x1
+#define HINIC_MBOX_HEADER_DIRECTION_MASK 0x1
+#define HINIC_MBOX_HEADER_CMD_MASK 0xFF
+#define HINIC_MBOX_HEADER_MSG_ID_MASK 0xFF
+#define HINIC_MBOX_HEADER_STATUS_MASK 0x3F
+#define HINIC_MBOX_HEADER_SRC_GLB_FUNC_IDX_MASK 0x3FF
+
+#define HINIC_MBOX_HEADER_GET(val, field) \
+ (((val) >> HINIC_MBOX_HEADER_##field##_SHIFT) & \
+ HINIC_MBOX_HEADER_##field##_MASK)
+#define HINIC_MBOX_HEADER_SET(val, field) \
+ ((u64)((val) & HINIC_MBOX_HEADER_##field##_MASK) << \
+ HINIC_MBOX_HEADER_##field##_SHIFT)
+
+#define MBOX_SEGLEN_MASK \
+ HINIC_MBOX_HEADER_SET(HINIC_MBOX_HEADER_SEG_LEN_MASK, SEG_LEN)
+
+#define HINIC_MBOX_SEG_LEN 48
+#define HINIC_MBOX_COMP_TIME 8000U
+#define MBOX_MSG_POLLING_TIMEOUT 8000
+
+#define HINIC_MBOX_DATA_SIZE 2040
+
+#define MBOX_MAX_BUF_SZ 2048UL
+#define MBOX_HEADER_SZ 8
+
+#define MBOX_INFO_SZ 4
+
+/* MBOX size is 64B, 8B for mbox_header, 4B reserved */
+#define MBOX_SEG_LEN 48
+#define MBOX_SEG_LEN_ALIGN 4
+#define MBOX_WB_STATUS_LEN 16UL
+
+/* mbox write back status is 16B, only first 4B is used */
+#define MBOX_WB_STATUS_ERRCODE_MASK 0xFFFF
+#define MBOX_WB_STATUS_MASK 0xFF
+#define MBOX_WB_ERROR_CODE_MASK 0xFF00
+#define MBOX_WB_STATUS_FINISHED_SUCCESS 0xFF
+#define MBOX_WB_STATUS_NOT_FINISHED 0x00
+
+#define MBOX_STATUS_FINISHED(wb) \
+ (((wb) & MBOX_WB_STATUS_MASK) != MBOX_WB_STATUS_NOT_FINISHED)
+#define MBOX_STATUS_SUCCESS(wb) \
+ (((wb) & MBOX_WB_STATUS_MASK) == MBOX_WB_STATUS_FINISHED_SUCCESS)
+#define MBOX_STATUS_ERRCODE(wb) \
+ ((wb) & MBOX_WB_ERROR_CODE_MASK)
+
+#define SEQ_ID_START_VAL 0
+#define SEQ_ID_MAX_VAL 42
+
+#define NO_DMA_ATTRIBUTE_VAL 0
+
+#define HINIC_MBOX_RSP_AEQN 2
+#define HINIC_MBOX_RECV_AEQN 0
+
+#define MBOX_MSG_NO_DATA_LEN 1
+
+#define MBOX_BODY_FROM_HDR(header) ((u8 *)(header) + MBOX_HEADER_SZ)
+#define MBOX_AREA(hwif) \
+ ((hwif)->cfg_regs_bar + HINIC_FUNC_CSR_MAILBOX_DATA_OFF)
+
+#define IS_PF_OR_PPF_SRC(src_func_idx) ((src_func_idx) < HINIC_MAX_PF_FUNCS)
+
+#define MBOX_MSG_ID_MASK 0xFF
+#define MBOX_MSG_ID(func_to_func) ((func_to_func)->send_msg_id)
+#define MBOX_MSG_ID_INC(func_to_func_mbox) (MBOX_MSG_ID(func_to_func_mbox) = \
+ (MBOX_MSG_ID(func_to_func_mbox) + 1) & MBOX_MSG_ID_MASK)
+
+#define FUNC_ID_OFF_SET_8B 8
+
+/* max message counter wait to process for one function */
+#define HINIC_MAX_MSG_CNT_TO_PROCESS 10
+
+#define HINIC_QUEUE_MIN_DEPTH 6
+#define HINIC_QUEUE_MAX_DEPTH 12
+#define HINIC_MAX_RX_BUFFER_SIZE 15
+
+enum hinic_hwif_direction_type {
+ HINIC_HWIF_DIRECT_SEND = 0,
+ HINIC_HWIF_RESPONSE = 1,
+};
+
+enum mbox_send_mod {
+ MBOX_SEND_MSG_INT,
+};
+
+enum mbox_seg_type {
+ NOT_LAST_SEG,
+ LAST_SEG,
+};
+
+enum mbox_ordering_type {
+ STRONG_ORDER,
+};
+
+enum mbox_write_back_type {
+ WRITE_BACK = 1,
+};
+
+enum mbox_aeq_trig_type {
+ NOT_TRIGGER,
+ TRIGGER,
+};
+
+static bool check_func_id(struct hinic_hwdev *hwdev, u16 src_func_idx,
+ const void *buf_in, u16 in_size, u16 offset)
+{
+ u16 func_idx;
+
+ if (in_size < offset + sizeof(func_idx)) {
+ dev_warn(&hwdev->hwif->pdev->dev,
+ "Receive mailbox msg len: %d less than %d Bytes is invalid\n",
+ in_size, offset);
+ return false;
+ }
+
+ func_idx = *((u16 *)((u8 *)buf_in + offset));
+
+ if (src_func_idx != func_idx) {
+ dev_warn(&hwdev->hwif->pdev->dev,
+ "Receive mailbox function id: 0x%x not equal to msg function id: 0x%x\n",
+ src_func_idx, func_idx);
+ return false;
+ }
+
+ return true;
+}
+
+bool hinic_mbox_check_func_id_8B(struct hinic_hwdev *hwdev, u16 func_idx,
+ void *buf_in, u16 in_size)
+{
+ return check_func_id(hwdev, func_idx, buf_in, in_size,
+ FUNC_ID_OFF_SET_8B);
+}
+
+/**
+ * hinic_register_pf_mbox_cb - register mbox callback for pf
+ * @hwdev: the pointer to hw device
+ * @mod: specific mod that the callback will handle
+ * @callback: callback function
+ * Return: 0 - success, negative - failure
+ */
+int hinic_register_pf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod,
+ hinic_pf_mbox_cb callback)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+
+ if (mod >= HINIC_MOD_MAX)
+ return -EFAULT;
+
+ func_to_func->pf_mbox_cb[mod] = callback;
+
+ set_bit(HINIC_PF_MBOX_CB_REG, &func_to_func->pf_mbox_cb_state[mod]);
+
+ return 0;
+}
+
+/**
+ * hinic_register_vf_mbox_cb - register mbox callback for vf
+ * @hwdev: the pointer to hw device
+ * @mod: specific mod that the callback will handle
+ * @callback: callback function
+ * Return: 0 - success, negative - failure
+ */
+int hinic_register_vf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod,
+ hinic_vf_mbox_cb callback)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+
+ if (mod >= HINIC_MOD_MAX)
+ return -EFAULT;
+
+ func_to_func->vf_mbox_cb[mod] = callback;
+
+ set_bit(HINIC_VF_MBOX_CB_REG, &func_to_func->vf_mbox_cb_state[mod]);
+
+ return 0;
+}
+
+/**
+ * hinic_unregister_pf_mbox_cb - unregister the mbox callback for pf
+ * @hwdev: the pointer to hw device
+ * @mod: specific mod that the callback will handle
+ */
+void hinic_unregister_pf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+
+ clear_bit(HINIC_PF_MBOX_CB_REG, &func_to_func->pf_mbox_cb_state[mod]);
+
+ while (test_bit(HINIC_PF_MBOX_CB_RUNNING,
+ &func_to_func->pf_mbox_cb_state[mod]))
+ usleep_range(900, 1000);
+
+ func_to_func->pf_mbox_cb[mod] = NULL;
+}
+
+/**
+ * hinic_unregister_vf_mbox_cb - unregister the mbox callback for vf
+ * @hwdev: the pointer to hw device
+ * @mod: specific mod that the callback will handle
+ */
+void hinic_unregister_vf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+
+ clear_bit(HINIC_VF_MBOX_CB_REG, &func_to_func->vf_mbox_cb_state[mod]);
+
+ while (test_bit(HINIC_VF_MBOX_CB_RUNNING,
+ &func_to_func->vf_mbox_cb_state[mod]))
+ usleep_range(900, 1000);
+
+ func_to_func->vf_mbox_cb[mod] = NULL;
+}
+
+static int recv_vf_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
+ struct hinic_recv_mbox *recv_mbox,
+ void *buf_out, u16 *out_size)
+{
+ hinic_vf_mbox_cb cb;
+ int ret = 0;
+
+ if (recv_mbox->mod >= HINIC_MOD_MAX) {
+ dev_err(&func_to_func->hwif->pdev->dev, "Receive illegal mbox message, mod = %d\n",
+ recv_mbox->mod);
+ return -EINVAL;
+ }
+
+ set_bit(HINIC_VF_MBOX_CB_RUNNING,
+ &func_to_func->vf_mbox_cb_state[recv_mbox->mod]);
+
+ cb = func_to_func->vf_mbox_cb[recv_mbox->mod];
+ if (cb && test_bit(HINIC_VF_MBOX_CB_REG,
+ &func_to_func->vf_mbox_cb_state[recv_mbox->mod])) {
+ cb(func_to_func->hwdev, recv_mbox->cmd, recv_mbox->mbox,
+ recv_mbox->mbox_len, buf_out, out_size);
+ } else {
+ dev_err(&func_to_func->hwif->pdev->dev, "VF mbox cb is not registered\n");
+ ret = -EINVAL;
+ }
+
+ clear_bit(HINIC_VF_MBOX_CB_RUNNING,
+ &func_to_func->vf_mbox_cb_state[recv_mbox->mod]);
+
+ return ret;
+}
+
+static int
+recv_pf_from_vf_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
+ struct hinic_recv_mbox *recv_mbox,
+ u16 src_func_idx, void *buf_out,
+ u16 *out_size)
+{
+ hinic_pf_mbox_cb cb;
+ u16 vf_id = 0;
+ int ret;
+
+ if (recv_mbox->mod >= HINIC_MOD_MAX) {
+ dev_err(&func_to_func->hwif->pdev->dev, "Receive illegal mbox message, mod = %d\n",
+ recv_mbox->mod);
+ return -EINVAL;
+ }
+
+ set_bit(HINIC_PF_MBOX_CB_RUNNING,
+ &func_to_func->pf_mbox_cb_state[recv_mbox->mod]);
+
+ cb = func_to_func->pf_mbox_cb[recv_mbox->mod];
+ if (cb && test_bit(HINIC_PF_MBOX_CB_REG,
+ &func_to_func->pf_mbox_cb_state[recv_mbox->mod])) {
+ vf_id = src_func_idx -
+ hinic_glb_pf_vf_offset(func_to_func->hwif);
+ ret = cb(func_to_func->hwdev, vf_id, recv_mbox->cmd,
+ recv_mbox->mbox, recv_mbox->mbox_len,
+ buf_out, out_size);
+ } else {
+ dev_err(&func_to_func->hwif->pdev->dev, "PF mbox mod(0x%x) cb is not registered\n",
+ recv_mbox->mod);
+ ret = -EINVAL;
+ }
+
+ clear_bit(HINIC_PF_MBOX_CB_RUNNING,
+ &func_to_func->pf_mbox_cb_state[recv_mbox->mod]);
+
+ return ret;
+}
+
+static bool check_mbox_seq_id_and_seg_len(struct hinic_recv_mbox *recv_mbox,
+ u8 seq_id, u8 seg_len)
+{
+ if (seq_id > SEQ_ID_MAX_VAL || seg_len > MBOX_SEG_LEN)
+ return false;
+
+ if (seq_id == 0) {
+ recv_mbox->seq_id = seq_id;
+ } else {
+ if (seq_id != recv_mbox->seq_id + 1)
+ return false;
+
+ recv_mbox->seq_id = seq_id;
+ }
+
+ return true;
+}
+
+static void resp_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
+ struct hinic_recv_mbox *recv_mbox)
+{
+ spin_lock(&func_to_func->mbox_lock);
+ if (recv_mbox->msg_info.msg_id == func_to_func->send_msg_id &&
+ func_to_func->event_flag == EVENT_START)
+ complete(&recv_mbox->recv_done);
+ else
+ dev_err(&func_to_func->hwif->pdev->dev,
+ "Mbox response timeout, current send msg id(0x%x), recv msg id(0x%x), status(0x%x)\n",
+ func_to_func->send_msg_id, recv_mbox->msg_info.msg_id,
+ recv_mbox->msg_info.status);
+ spin_unlock(&func_to_func->mbox_lock);
+}
+
+static void recv_func_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
+ struct hinic_recv_mbox *recv_mbox,
+ u16 src_func_idx);
+
+static void recv_func_mbox_work_handler(struct work_struct *work)
+{
+ struct hinic_mbox_work *mbox_work =
+ container_of(work, struct hinic_mbox_work, work);
+ struct hinic_recv_mbox *recv_mbox;
+
+ recv_func_mbox_handler(mbox_work->func_to_func, mbox_work->recv_mbox,
+ mbox_work->src_func_idx);
+
+ recv_mbox =
+ &mbox_work->func_to_func->mbox_send[mbox_work->src_func_idx];
+
+ atomic_dec(&recv_mbox->msg_cnt);
+
+ kfree(mbox_work);
+}
+
+static void recv_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
+ void *header, struct hinic_recv_mbox *recv_mbox)
+{
+ void *mbox_body = MBOX_BODY_FROM_HDR(header);
+ struct hinic_recv_mbox *rcv_mbox_temp = NULL;
+ u64 mbox_header = *((u64 *)header);
+ struct hinic_mbox_work *mbox_work;
+ u8 seq_id, seg_len;
+ u16 src_func_idx;
+ int pos;
+
+ seq_id = HINIC_MBOX_HEADER_GET(mbox_header, SEQID);
+ seg_len = HINIC_MBOX_HEADER_GET(mbox_header, SEG_LEN);
+ src_func_idx = HINIC_MBOX_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX);
+
+ if (!check_mbox_seq_id_and_seg_len(recv_mbox, seq_id, seg_len)) {
+ dev_err(&func_to_func->hwif->pdev->dev,
+ "Mailbox sequence and segment check fail, src func id: 0x%x, front id: 0x%x, current id: 0x%x, seg len: 0x%x\n",
+ src_func_idx, recv_mbox->seq_id, seq_id, seg_len);
+ recv_mbox->seq_id = SEQ_ID_MAX_VAL;
+ return;
+ }
+
+ pos = seq_id * MBOX_SEG_LEN;
+ memcpy((u8 *)recv_mbox->mbox + pos, mbox_body,
+ HINIC_MBOX_HEADER_GET(mbox_header, SEG_LEN));
+
+ if (!HINIC_MBOX_HEADER_GET(mbox_header, LAST))
+ return;
+
+ recv_mbox->cmd = HINIC_MBOX_HEADER_GET(mbox_header, CMD);
+ recv_mbox->mod = HINIC_MBOX_HEADER_GET(mbox_header, MODULE);
+ recv_mbox->mbox_len = HINIC_MBOX_HEADER_GET(mbox_header, MSG_LEN);
+ recv_mbox->ack_type = HINIC_MBOX_HEADER_GET(mbox_header, NO_ACK);
+ recv_mbox->msg_info.msg_id = HINIC_MBOX_HEADER_GET(mbox_header, MSG_ID);
+ recv_mbox->msg_info.status = HINIC_MBOX_HEADER_GET(mbox_header, STATUS);
+ recv_mbox->seq_id = SEQ_ID_MAX_VAL;
+
+ if (HINIC_MBOX_HEADER_GET(mbox_header, DIRECTION) ==
+ HINIC_HWIF_RESPONSE) {
+ resp_mbox_handler(func_to_func, recv_mbox);
+ return;
+ }
+
+ if (atomic_read(&recv_mbox->msg_cnt) > HINIC_MAX_MSG_CNT_TO_PROCESS) {
+ dev_warn(&func_to_func->hwif->pdev->dev,
+ "This function(%u) have %d message wait to process,can't add to work queue\n",
+ src_func_idx, atomic_read(&recv_mbox->msg_cnt));
+ return;
+ }
+
+ rcv_mbox_temp = kmemdup(recv_mbox, sizeof(*rcv_mbox_temp), GFP_KERNEL);
+ if (!rcv_mbox_temp)
+ return;
+
+ rcv_mbox_temp->mbox = kmemdup(recv_mbox->mbox, MBOX_MAX_BUF_SZ,
+ GFP_KERNEL);
+ if (!rcv_mbox_temp->mbox)
+ goto err_alloc_rcv_mbox_msg;
+
+ rcv_mbox_temp->buf_out = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL);
+ if (!rcv_mbox_temp->buf_out)
+ goto err_alloc_rcv_mbox_buf;
+
+ mbox_work = kzalloc(sizeof(*mbox_work), GFP_KERNEL);
+ if (!mbox_work)
+ goto err_alloc_mbox_work;
+
+ mbox_work->func_to_func = func_to_func;
+ mbox_work->recv_mbox = rcv_mbox_temp;
+ mbox_work->src_func_idx = src_func_idx;
+
+ atomic_inc(&recv_mbox->msg_cnt);
+ INIT_WORK(&mbox_work->work, recv_func_mbox_work_handler);
+ queue_work(func_to_func->workq, &mbox_work->work);
+
+ return;
+
+err_alloc_mbox_work:
+ kfree(rcv_mbox_temp->buf_out);
+
+err_alloc_rcv_mbox_buf:
+ kfree(rcv_mbox_temp->mbox);
+
+err_alloc_rcv_mbox_msg:
+ kfree(rcv_mbox_temp);
+}
+
+static int set_vf_mbox_random_id(struct hinic_hwdev *hwdev, u16 func_id)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+ struct hinic_set_random_id rand_info = {0};
+ u16 out_size = sizeof(rand_info);
+ struct hinic_pfhwdev *pfhwdev;
+ int ret;
+
+ pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+
+ rand_info.version = HINIC_CMD_VER_FUNC_ID;
+ rand_info.func_idx = func_id;
+ rand_info.vf_in_pf = func_id - hinic_glb_pf_vf_offset(hwdev->hwif);
+ rand_info.random_id = get_random_u32();
+
+ func_to_func->vf_mbx_rand_id[func_id] = rand_info.random_id;
+
+ ret = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_SET_VF_RANDOM_ID,
+ &rand_info, sizeof(rand_info),
+ &rand_info, &out_size, HINIC_MGMT_MSG_SYNC);
+ if ((rand_info.status != HINIC_MGMT_CMD_UNSUPPORTED &&
+ rand_info.status) || !out_size || ret) {
+ dev_err(&hwdev->hwif->pdev->dev, "Set VF random id failed, err: %d, status: 0x%x, out size: 0x%x\n",
+ ret, rand_info.status, out_size);
+ return -EIO;
+ }
+
+ if (rand_info.status == HINIC_MGMT_CMD_UNSUPPORTED)
+ return rand_info.status;
+
+ func_to_func->vf_mbx_old_rand_id[func_id] =
+ func_to_func->vf_mbx_rand_id[func_id];
+
+ return 0;
+}
+
+static void update_random_id_work_handler(struct work_struct *work)
+{
+ struct hinic_mbox_work *mbox_work =
+ container_of(work, struct hinic_mbox_work, work);
+ struct hinic_mbox_func_to_func *func_to_func;
+ u16 src = mbox_work->src_func_idx;
+
+ func_to_func = mbox_work->func_to_func;
+
+ if (set_vf_mbox_random_id(func_to_func->hwdev, src))
+ dev_warn(&func_to_func->hwdev->hwif->pdev->dev, "Update VF id: 0x%x random id failed\n",
+ mbox_work->src_func_idx);
+
+ kfree(mbox_work);
+}
+
+static bool check_vf_mbox_random_id(struct hinic_mbox_func_to_func *func_to_func,
+ u8 *header)
+{
+ struct hinic_hwdev *hwdev = func_to_func->hwdev;
+ struct hinic_mbox_work *mbox_work = NULL;
+ u64 mbox_header = *((u64 *)header);
+ u16 offset, src;
+ u32 random_id;
+ int vf_in_pf;
+
+ src = HINIC_MBOX_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX);
+
+ if (IS_PF_OR_PPF_SRC(src) || !func_to_func->support_vf_random)
+ return true;
+
+ if (!HINIC_IS_PPF(hwdev->hwif)) {
+ offset = hinic_glb_pf_vf_offset(hwdev->hwif);
+ vf_in_pf = src - offset;
+
+ if (vf_in_pf < 1 || vf_in_pf > hwdev->nic_cap.max_vf) {
+ dev_warn(&hwdev->hwif->pdev->dev,
+ "Receive vf id(0x%x) is invalid, vf id should be from 0x%x to 0x%x\n",
+ src, offset + 1,
+ hwdev->nic_cap.max_vf + offset);
+ return false;
+ }
+ }
+
+ random_id = be32_to_cpu(*(u32 *)(header + MBOX_SEG_LEN +
+ MBOX_HEADER_SZ));
+
+ if (random_id == func_to_func->vf_mbx_rand_id[src] ||
+ random_id == func_to_func->vf_mbx_old_rand_id[src])
+ return true;
+
+ dev_warn(&hwdev->hwif->pdev->dev,
+ "The mailbox random id(0x%x) of func_id(0x%x) doesn't match with pf reservation(0x%x)\n",
+ random_id, src, func_to_func->vf_mbx_rand_id[src]);
+
+ mbox_work = kzalloc(sizeof(*mbox_work), GFP_KERNEL);
+ if (!mbox_work)
+ return false;
+
+ mbox_work->func_to_func = func_to_func;
+ mbox_work->src_func_idx = src;
+
+ INIT_WORK(&mbox_work->work, update_random_id_work_handler);
+ queue_work(func_to_func->workq, &mbox_work->work);
+
+ return false;
+}
+
+static void hinic_mbox_func_aeqe_handler(void *handle, void *header, u8 size)
+{
+ struct hinic_mbox_func_to_func *func_to_func;
+ u64 mbox_header = *((u64 *)header);
+ struct hinic_recv_mbox *recv_mbox;
+ u64 src, dir;
+
+ func_to_func = ((struct hinic_hwdev *)handle)->func_to_func;
+
+ dir = HINIC_MBOX_HEADER_GET(mbox_header, DIRECTION);
+ src = HINIC_MBOX_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX);
+
+ if (src >= HINIC_MAX_FUNCTIONS) {
+ dev_err(&func_to_func->hwif->pdev->dev,
+ "Mailbox source function id:%u is invalid\n", (u32)src);
+ return;
+ }
+
+ if (!check_vf_mbox_random_id(func_to_func, header))
+ return;
+
+ recv_mbox = (dir == HINIC_HWIF_DIRECT_SEND) ?
+ &func_to_func->mbox_send[src] :
+ &func_to_func->mbox_resp[src];
+
+ recv_mbox_handler(func_to_func, (u64 *)header, recv_mbox);
+}
+
+static void hinic_mbox_self_aeqe_handler(void *handle, void *header, u8 size)
+{
+ struct hinic_mbox_func_to_func *func_to_func;
+ struct hinic_send_mbox *send_mbox;
+
+ func_to_func = ((struct hinic_hwdev *)handle)->func_to_func;
+ send_mbox = &func_to_func->send_mbox;
+
+ complete(&send_mbox->send_done);
+}
+
+static void clear_mbox_status(struct hinic_send_mbox *mbox)
+{
+ *mbox->wb_status = 0;
+
+ /* clear mailbox write back status */
+ wmb();
+}
+
+static void mbox_copy_header(struct hinic_hwdev *hwdev,
+ struct hinic_send_mbox *mbox, u64 *header)
+{
+ u32 i, idx_max = MBOX_HEADER_SZ / sizeof(u32);
+ u32 *data = (u32 *)header;
+
+ for (i = 0; i < idx_max; i++)
+ __raw_writel(*(data + i), mbox->data + i * sizeof(u32));
+}
+
+static void mbox_copy_send_data(struct hinic_hwdev *hwdev,
+ struct hinic_send_mbox *mbox, void *seg,
+ u16 seg_len)
+{
+ u8 mbox_max_buf[MBOX_SEG_LEN] = {0};
+ u32 data_len, chk_sz = sizeof(u32);
+ u32 *data = seg;
+ u32 i, idx_max;
+
+ /* The mbox message should be aligned in 4 bytes. */
+ if (seg_len % chk_sz) {
+ memcpy(mbox_max_buf, seg, seg_len);
+ data = (u32 *)mbox_max_buf;
+ }
+
+ data_len = seg_len;
+ idx_max = ALIGN(data_len, chk_sz) / chk_sz;
+
+ for (i = 0; i < idx_max; i++)
+ __raw_writel(*(data + i),
+ mbox->data + MBOX_HEADER_SZ + i * sizeof(u32));
+}
+
+static void write_mbox_msg_attr(struct hinic_mbox_func_to_func *func_to_func,
+ u16 dst_func, u16 dst_aeqn, u16 seg_len,
+ int poll)
+{
+ u16 rsp_aeq = (dst_aeqn == 0) ? 0 : HINIC_MBOX_RSP_AEQN;
+ u32 mbox_int, mbox_ctrl;
+
+ mbox_int = HINIC_MBOX_INT_SET(dst_func, DST_FUNC) |
+ HINIC_MBOX_INT_SET(dst_aeqn, DST_AEQN) |
+ HINIC_MBOX_INT_SET(rsp_aeq, SRC_RESP_AEQN) |
+ HINIC_MBOX_INT_SET(NO_DMA_ATTRIBUTE_VAL, STAT_DMA) |
+ HINIC_MBOX_INT_SET(ALIGN(MBOX_SEG_LEN + MBOX_HEADER_SZ +
+ MBOX_INFO_SZ, MBOX_SEG_LEN_ALIGN) >> 2,
+ TX_SIZE) |
+ HINIC_MBOX_INT_SET(STRONG_ORDER, STAT_DMA_SO_RO) |
+ HINIC_MBOX_INT_SET(WRITE_BACK, WB_EN);
+
+ hinic_hwif_write_reg(func_to_func->hwif,
+ HINIC_FUNC_CSR_MAILBOX_INT_OFFSET_OFF, mbox_int);
+
+ wmb(); /* writing the mbox int attributes */
+ mbox_ctrl = HINIC_MBOX_CTRL_SET(TX_NOT_DONE, TX_STATUS);
+
+ if (poll)
+ mbox_ctrl |= HINIC_MBOX_CTRL_SET(NOT_TRIGGER, TRIGGER_AEQE);
+ else
+ mbox_ctrl |= HINIC_MBOX_CTRL_SET(TRIGGER, TRIGGER_AEQE);
+
+ hinic_hwif_write_reg(func_to_func->hwif,
+ HINIC_FUNC_CSR_MAILBOX_CONTROL_OFF, mbox_ctrl);
+}
+
+static void dump_mox_reg(struct hinic_hwdev *hwdev)
+{
+ u32 val;
+
+ val = hinic_hwif_read_reg(hwdev->hwif,
+ HINIC_FUNC_CSR_MAILBOX_CONTROL_OFF);
+ dev_err(&hwdev->hwif->pdev->dev, "Mailbox control reg: 0x%x\n", val);
+
+ val = hinic_hwif_read_reg(hwdev->hwif,
+ HINIC_FUNC_CSR_MAILBOX_INT_OFFSET_OFF);
+ dev_err(&hwdev->hwif->pdev->dev, "Mailbox interrupt offset: 0x%x\n",
+ val);
+}
+
+static u16 get_mbox_status(struct hinic_send_mbox *mbox)
+{
+ /* write back is 16B, but only use first 4B */
+ u64 wb_val = be64_to_cpu(*mbox->wb_status);
+
+ rmb(); /* verify reading before check */
+
+ return (u16)(wb_val & MBOX_WB_STATUS_ERRCODE_MASK);
+}
+
+static int
+wait_for_mbox_seg_completion(struct hinic_mbox_func_to_func *func_to_func,
+ int poll, u16 *wb_status)
+{
+ struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
+ struct hinic_hwdev *hwdev = func_to_func->hwdev;
+ struct completion *done = &send_mbox->send_done;
+ u32 cnt = 0;
+ unsigned long jif;
+
+ if (poll) {
+ while (cnt < MBOX_MSG_POLLING_TIMEOUT) {
+ *wb_status = get_mbox_status(send_mbox);
+ if (MBOX_STATUS_FINISHED(*wb_status))
+ break;
+
+ usleep_range(900, 1000);
+ cnt++;
+ }
+
+ if (cnt == MBOX_MSG_POLLING_TIMEOUT) {
+ dev_err(&hwdev->hwif->pdev->dev, "Send mailbox segment timeout, wb status: 0x%x\n",
+ *wb_status);
+ dump_mox_reg(hwdev);
+ return -ETIMEDOUT;
+ }
+ } else {
+ jif = msecs_to_jiffies(HINIC_MBOX_COMP_TIME);
+ if (!wait_for_completion_timeout(done, jif)) {
+ dev_err(&hwdev->hwif->pdev->dev, "Send mailbox segment timeout\n");
+ dump_mox_reg(hwdev);
+ hinic_dump_aeq_info(hwdev);
+ return -ETIMEDOUT;
+ }
+
+ *wb_status = get_mbox_status(send_mbox);
+ }
+
+ return 0;
+}
+
+static int send_mbox_seg(struct hinic_mbox_func_to_func *func_to_func,
+ u64 header, u16 dst_func, void *seg, u16 seg_len,
+ int poll, void *msg_info)
+{
+ struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
+ u16 seq_dir = HINIC_MBOX_HEADER_GET(header, DIRECTION);
+ struct hinic_hwdev *hwdev = func_to_func->hwdev;
+ struct completion *done = &send_mbox->send_done;
+ u8 num_aeqs = hwdev->hwif->attr.num_aeqs;
+ u16 dst_aeqn, wb_status = 0, errcode;
+
+ if (num_aeqs >= 4)
+ dst_aeqn = (seq_dir == HINIC_HWIF_DIRECT_SEND) ?
+ HINIC_MBOX_RECV_AEQN : HINIC_MBOX_RSP_AEQN;
+ else
+ dst_aeqn = 0;
+
+ if (!poll)
+ init_completion(done);
+
+ clear_mbox_status(send_mbox);
+
+ mbox_copy_header(hwdev, send_mbox, &header);
+
+ mbox_copy_send_data(hwdev, send_mbox, seg, seg_len);
+
+ write_mbox_msg_attr(func_to_func, dst_func, dst_aeqn, seg_len, poll);
+
+ wmb(); /* writing the mbox msg attributes */
+
+ if (wait_for_mbox_seg_completion(func_to_func, poll, &wb_status))
+ return -ETIMEDOUT;
+
+ if (!MBOX_STATUS_SUCCESS(wb_status)) {
+ dev_err(&hwdev->hwif->pdev->dev, "Send mailbox segment to function %d error, wb status: 0x%x\n",
+ dst_func, wb_status);
+ errcode = MBOX_STATUS_ERRCODE(wb_status);
+ return errcode ? errcode : -EFAULT;
+ }
+
+ return 0;
+}
+
+static int send_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func,
+ enum hinic_mod_type mod, u16 cmd, void *msg,
+ u16 msg_len, u16 dst_func,
+ enum hinic_hwif_direction_type direction,
+ enum hinic_mbox_ack_type ack_type,
+ struct mbox_msg_info *msg_info)
+{
+ struct hinic_hwdev *hwdev = func_to_func->hwdev;
+ u16 seg_len = MBOX_SEG_LEN;
+ u8 *msg_seg = (u8 *)msg;
+ u16 left = msg_len;
+ u32 seq_id = 0;
+ u64 header = 0;
+ int err = 0;
+
+ down(&func_to_func->msg_send_sem);
+
+ header = HINIC_MBOX_HEADER_SET(msg_len, MSG_LEN) |
+ HINIC_MBOX_HEADER_SET(mod, MODULE) |
+ HINIC_MBOX_HEADER_SET(seg_len, SEG_LEN) |
+ HINIC_MBOX_HEADER_SET(ack_type, NO_ACK) |
+ HINIC_MBOX_HEADER_SET(SEQ_ID_START_VAL, SEQID) |
+ HINIC_MBOX_HEADER_SET(NOT_LAST_SEG, LAST) |
+ HINIC_MBOX_HEADER_SET(direction, DIRECTION) |
+ HINIC_MBOX_HEADER_SET(cmd, CMD) |
+ /* The vf's offset to it's associated pf */
+ HINIC_MBOX_HEADER_SET(msg_info->msg_id, MSG_ID) |
+ HINIC_MBOX_HEADER_SET(msg_info->status, STATUS) |
+ HINIC_MBOX_HEADER_SET(hinic_global_func_id_hw(hwdev->hwif),
+ SRC_GLB_FUNC_IDX);
+
+ while (!(HINIC_MBOX_HEADER_GET(header, LAST))) {
+ if (left <= HINIC_MBOX_SEG_LEN) {
+ header &= ~MBOX_SEGLEN_MASK;
+ header |= HINIC_MBOX_HEADER_SET(left, SEG_LEN);
+ header |= HINIC_MBOX_HEADER_SET(LAST_SEG, LAST);
+
+ seg_len = left;
+ }
+
+ err = send_mbox_seg(func_to_func, header, dst_func, msg_seg,
+ seg_len, MBOX_SEND_MSG_INT, msg_info);
+ if (err) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to send mbox seg, seq_id=0x%llx\n",
+ HINIC_MBOX_HEADER_GET(header, SEQID));
+ goto err_send_mbox_seg;
+ }
+
+ left -= HINIC_MBOX_SEG_LEN;
+ msg_seg += HINIC_MBOX_SEG_LEN;
+
+ seq_id++;
+ header &= ~(HINIC_MBOX_HEADER_SET(HINIC_MBOX_HEADER_SEQID_MASK,
+ SEQID));
+ header |= HINIC_MBOX_HEADER_SET(seq_id, SEQID);
+ }
+
+err_send_mbox_seg:
+ up(&func_to_func->msg_send_sem);
+
+ return err;
+}
+
+static void
+response_for_recv_func_mbox(struct hinic_mbox_func_to_func *func_to_func,
+ struct hinic_recv_mbox *recv_mbox, int err,
+ u16 out_size, u16 src_func_idx)
+{
+ struct mbox_msg_info msg_info = {0};
+
+ if (recv_mbox->ack_type == MBOX_ACK) {
+ msg_info.msg_id = recv_mbox->msg_info.msg_id;
+ if (err == HINIC_MBOX_PF_BUSY_ACTIVE_FW)
+ msg_info.status = HINIC_MBOX_PF_BUSY_ACTIVE_FW;
+ else if (err == HINIC_MBOX_VF_CMD_ERROR)
+ msg_info.status = HINIC_MBOX_VF_CMD_ERROR;
+ else if (err)
+ msg_info.status = HINIC_MBOX_PF_SEND_ERR;
+
+ /* if no data needs to response, set out_size to 1 */
+ if (!out_size || err)
+ out_size = MBOX_MSG_NO_DATA_LEN;
+
+ send_mbox_to_func(func_to_func, recv_mbox->mod, recv_mbox->cmd,
+ recv_mbox->buf_out, out_size, src_func_idx,
+ HINIC_HWIF_RESPONSE, MBOX_ACK,
+ &msg_info);
+ }
+}
+
+static void recv_func_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
+ struct hinic_recv_mbox *recv_mbox,
+ u16 src_func_idx)
+{
+ void *buf_out = recv_mbox->buf_out;
+ u16 out_size = MBOX_MAX_BUF_SZ;
+ int err = 0;
+
+ if (HINIC_IS_VF(func_to_func->hwif)) {
+ err = recv_vf_mbox_handler(func_to_func, recv_mbox, buf_out,
+ &out_size);
+ } else {
+ if (IS_PF_OR_PPF_SRC(src_func_idx))
+ dev_warn(&func_to_func->hwif->pdev->dev,
+ "Unsupported pf2pf mbox msg\n");
+ else
+ err = recv_pf_from_vf_mbox_handler(func_to_func,
+ recv_mbox,
+ src_func_idx,
+ buf_out, &out_size);
+ }
+
+ response_for_recv_func_mbox(func_to_func, recv_mbox, err, out_size,
+ src_func_idx);
+ kfree(recv_mbox->buf_out);
+ kfree(recv_mbox->mbox);
+ kfree(recv_mbox);
+}
+
+static void set_mbox_to_func_event(struct hinic_mbox_func_to_func *func_to_func,
+ enum mbox_event_state event_flag)
+{
+ spin_lock(&func_to_func->mbox_lock);
+ func_to_func->event_flag = event_flag;
+ spin_unlock(&func_to_func->mbox_lock);
+}
+
+static int mbox_resp_info_handler(struct hinic_mbox_func_to_func *func_to_func,
+ struct hinic_recv_mbox *mbox_for_resp,
+ enum hinic_mod_type mod, u16 cmd,
+ void *buf_out, u16 *out_size)
+{
+ int err;
+
+ if (mbox_for_resp->msg_info.status) {
+ err = mbox_for_resp->msg_info.status;
+ if (err != HINIC_MBOX_PF_BUSY_ACTIVE_FW)
+ dev_err(&func_to_func->hwif->pdev->dev, "Mbox response error(0x%x)\n",
+ mbox_for_resp->msg_info.status);
+ return err;
+ }
+
+ if (buf_out && out_size) {
+ if (*out_size < mbox_for_resp->mbox_len) {
+ dev_err(&func_to_func->hwif->pdev->dev,
+ "Invalid response mbox message length: %d for mod %d cmd %d, should less than: %d\n",
+ mbox_for_resp->mbox_len, mod, cmd, *out_size);
+ return -EFAULT;
+ }
+
+ if (mbox_for_resp->mbox_len)
+ memcpy(buf_out, mbox_for_resp->mbox,
+ mbox_for_resp->mbox_len);
+
+ *out_size = mbox_for_resp->mbox_len;
+ }
+
+ return 0;
+}
+
+int hinic_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func,
+ enum hinic_mod_type mod, u16 cmd, u16 dst_func,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size, u32 timeout)
+{
+ struct hinic_recv_mbox *mbox_for_resp;
+ struct mbox_msg_info msg_info = {0};
+ unsigned long timeo;
+ int err;
+
+ mbox_for_resp = &func_to_func->mbox_resp[dst_func];
+
+ down(&func_to_func->mbox_send_sem);
+
+ init_completion(&mbox_for_resp->recv_done);
+
+ msg_info.msg_id = MBOX_MSG_ID_INC(func_to_func);
+
+ set_mbox_to_func_event(func_to_func, EVENT_START);
+
+ err = send_mbox_to_func(func_to_func, mod, cmd, buf_in, in_size,
+ dst_func, HINIC_HWIF_DIRECT_SEND, MBOX_ACK,
+ &msg_info);
+ if (err) {
+ dev_err(&func_to_func->hwif->pdev->dev, "Send mailbox failed, msg_id: %d\n",
+ msg_info.msg_id);
+ set_mbox_to_func_event(func_to_func, EVENT_FAIL);
+ goto err_send_mbox;
+ }
+
+ timeo = msecs_to_jiffies(timeout ? timeout : HINIC_MBOX_COMP_TIME);
+ if (!wait_for_completion_timeout(&mbox_for_resp->recv_done, timeo)) {
+ set_mbox_to_func_event(func_to_func, EVENT_TIMEOUT);
+ dev_err(&func_to_func->hwif->pdev->dev,
+ "Send mbox msg timeout, msg_id: %d\n", msg_info.msg_id);
+ hinic_dump_aeq_info(func_to_func->hwdev);
+ err = -ETIMEDOUT;
+ goto err_send_mbox;
+ }
+
+ set_mbox_to_func_event(func_to_func, EVENT_END);
+
+ err = mbox_resp_info_handler(func_to_func, mbox_for_resp, mod, cmd,
+ buf_out, out_size);
+
+err_send_mbox:
+ up(&func_to_func->mbox_send_sem);
+
+ return err;
+}
+
+static int mbox_func_params_valid(struct hinic_mbox_func_to_func *func_to_func,
+ void *buf_in, u16 in_size)
+{
+ if (in_size > HINIC_MBOX_DATA_SIZE) {
+ dev_err(&func_to_func->hwif->pdev->dev,
+ "Mbox msg len(%d) exceed limit(%d)\n",
+ in_size, HINIC_MBOX_DATA_SIZE);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_mbox_to_pf(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size, u32 timeout)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+ int err = mbox_func_params_valid(func_to_func, buf_in, in_size);
+
+ if (err)
+ return err;
+
+ if (!HINIC_IS_VF(hwdev->hwif)) {
+ dev_err(&hwdev->hwif->pdev->dev, "Params error, func_type: %d\n",
+ HINIC_FUNC_TYPE(hwdev->hwif));
+ return -EINVAL;
+ }
+
+ return hinic_mbox_to_func(func_to_func, mod, cmd,
+ hinic_pf_id_of_vf_hw(hwdev->hwif), buf_in,
+ in_size, buf_out, out_size, timeout);
+}
+
+int hinic_mbox_to_vf(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod, u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size, u32 timeout)
+{
+ struct hinic_mbox_func_to_func *func_to_func;
+ u16 dst_func_idx;
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ func_to_func = hwdev->func_to_func;
+ err = mbox_func_params_valid(func_to_func, buf_in, in_size);
+ if (err)
+ return err;
+
+ if (HINIC_IS_VF(hwdev->hwif)) {
+ dev_err(&hwdev->hwif->pdev->dev, "Params error, func_type: %d\n",
+ HINIC_FUNC_TYPE(hwdev->hwif));
+ return -EINVAL;
+ }
+
+ if (!vf_id) {
+ dev_err(&hwdev->hwif->pdev->dev,
+ "VF id(%d) error!\n", vf_id);
+ return -EINVAL;
+ }
+
+ /* vf_offset_to_pf + vf_id is the vf's global function id of vf in
+ * this pf
+ */
+ dst_func_idx = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id;
+
+ return hinic_mbox_to_func(func_to_func, mod, cmd, dst_func_idx, buf_in,
+ in_size, buf_out, out_size, timeout);
+}
+
+static int init_mbox_info(struct hinic_recv_mbox *mbox_info)
+{
+ int err;
+
+ mbox_info->seq_id = SEQ_ID_MAX_VAL;
+
+ mbox_info->mbox = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL);
+ if (!mbox_info->mbox)
+ return -ENOMEM;
+
+ mbox_info->buf_out = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL);
+ if (!mbox_info->buf_out) {
+ err = -ENOMEM;
+ goto err_alloc_buf_out;
+ }
+
+ atomic_set(&mbox_info->msg_cnt, 0);
+
+ return 0;
+
+err_alloc_buf_out:
+ kfree(mbox_info->mbox);
+
+ return err;
+}
+
+static void clean_mbox_info(struct hinic_recv_mbox *mbox_info)
+{
+ kfree(mbox_info->buf_out);
+ kfree(mbox_info->mbox);
+}
+
+static int alloc_mbox_info(struct hinic_hwdev *hwdev,
+ struct hinic_recv_mbox *mbox_info)
+{
+ u16 func_idx, i;
+ int err;
+
+ for (func_idx = 0; func_idx < HINIC_MAX_FUNCTIONS; func_idx++) {
+ err = init_mbox_info(&mbox_info[func_idx]);
+ if (err) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to init function %d mbox info\n",
+ func_idx);
+ goto err_init_mbox_info;
+ }
+ }
+
+ return 0;
+
+err_init_mbox_info:
+ for (i = 0; i < func_idx; i++)
+ clean_mbox_info(&mbox_info[i]);
+
+ return err;
+}
+
+static void free_mbox_info(struct hinic_recv_mbox *mbox_info)
+{
+ u16 func_idx;
+
+ for (func_idx = 0; func_idx < HINIC_MAX_FUNCTIONS; func_idx++)
+ clean_mbox_info(&mbox_info[func_idx]);
+}
+
+static void prepare_send_mbox(struct hinic_mbox_func_to_func *func_to_func)
+{
+ struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
+
+ send_mbox->data = MBOX_AREA(func_to_func->hwif);
+}
+
+static int alloc_mbox_wb_status(struct hinic_mbox_func_to_func *func_to_func)
+{
+ struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
+ struct hinic_hwdev *hwdev = func_to_func->hwdev;
+ u32 addr_h, addr_l;
+
+ send_mbox->wb_vaddr = dma_alloc_coherent(&hwdev->hwif->pdev->dev,
+ MBOX_WB_STATUS_LEN,
+ &send_mbox->wb_paddr,
+ GFP_KERNEL);
+ if (!send_mbox->wb_vaddr)
+ return -ENOMEM;
+
+ send_mbox->wb_status = send_mbox->wb_vaddr;
+
+ addr_h = upper_32_bits(send_mbox->wb_paddr);
+ addr_l = lower_32_bits(send_mbox->wb_paddr);
+
+ hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_H_OFF,
+ addr_h);
+ hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_L_OFF,
+ addr_l);
+
+ return 0;
+}
+
+static void free_mbox_wb_status(struct hinic_mbox_func_to_func *func_to_func)
+{
+ struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
+ struct hinic_hwdev *hwdev = func_to_func->hwdev;
+
+ hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_H_OFF,
+ 0);
+ hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_L_OFF,
+ 0);
+
+ dma_free_coherent(&hwdev->hwif->pdev->dev, MBOX_WB_STATUS_LEN,
+ send_mbox->wb_vaddr,
+ send_mbox->wb_paddr);
+}
+
+bool hinic_mbox_check_cmd_valid(struct hinic_hwdev *hwdev,
+ struct vf_cmd_check_handle *cmd_handle,
+ u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, u8 size)
+{
+ u16 src_idx = vf_id + hinic_glb_pf_vf_offset(hwdev->hwif);
+ int i;
+
+ for (i = 0; i < size; i++) {
+ if (cmd == cmd_handle[i].cmd) {
+ if (cmd_handle[i].check_cmd)
+ return cmd_handle[i].check_cmd(hwdev, src_idx,
+ buf_in, in_size);
+ else
+ return true;
+ }
+ }
+
+ dev_err(&hwdev->hwif->pdev->dev,
+ "PF Receive VF(%d) unsupported cmd(0x%x)\n",
+ vf_id + hinic_glb_pf_vf_offset(hwdev->hwif), cmd);
+
+ return false;
+}
+
+static bool hinic_cmdq_check_vf_ctxt(struct hinic_hwdev *hwdev,
+ struct hinic_cmdq_ctxt *cmdq_ctxt)
+{
+ struct hinic_cmdq_ctxt_info *ctxt_info = &cmdq_ctxt->ctxt_info;
+ u64 curr_pg_pfn, wq_block_pfn;
+
+ if (cmdq_ctxt->ppf_idx != HINIC_HWIF_PPF_IDX(hwdev->hwif) ||
+ cmdq_ctxt->cmdq_type > HINIC_MAX_CMDQ_TYPES)
+ return false;
+
+ curr_pg_pfn = HINIC_CMDQ_CTXT_PAGE_INFO_GET
+ (ctxt_info->curr_wqe_page_pfn, CURR_WQE_PAGE_PFN);
+ wq_block_pfn = HINIC_CMDQ_CTXT_BLOCK_INFO_GET
+ (ctxt_info->wq_block_pfn, WQ_BLOCK_PFN);
+ /* VF must use 0-level CLA */
+ if (curr_pg_pfn != wq_block_pfn)
+ return false;
+
+ return true;
+}
+
+static bool check_cmdq_ctxt(struct hinic_hwdev *hwdev, u16 func_idx,
+ void *buf_in, u16 in_size)
+{
+ if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size))
+ return false;
+
+ return hinic_cmdq_check_vf_ctxt(hwdev, buf_in);
+}
+
+#define HW_CTX_QPS_VALID(hw_ctxt) \
+ ((hw_ctxt)->rq_depth >= HINIC_QUEUE_MIN_DEPTH && \
+ (hw_ctxt)->rq_depth <= HINIC_QUEUE_MAX_DEPTH && \
+ (hw_ctxt)->sq_depth >= HINIC_QUEUE_MIN_DEPTH && \
+ (hw_ctxt)->sq_depth <= HINIC_QUEUE_MAX_DEPTH && \
+ (hw_ctxt)->rx_buf_sz_idx <= HINIC_MAX_RX_BUFFER_SIZE)
+
+static bool hw_ctxt_qps_param_valid(struct hinic_cmd_hw_ioctxt *hw_ctxt)
+{
+ if (HW_CTX_QPS_VALID(hw_ctxt))
+ return true;
+
+ if (!hw_ctxt->rq_depth && !hw_ctxt->sq_depth &&
+ !hw_ctxt->rx_buf_sz_idx)
+ return true;
+
+ return false;
+}
+
+static bool check_hwctxt(struct hinic_hwdev *hwdev, u16 func_idx,
+ void *buf_in, u16 in_size)
+{
+ struct hinic_cmd_hw_ioctxt *hw_ctxt = buf_in;
+
+ if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size))
+ return false;
+
+ if (hw_ctxt->ppf_idx != HINIC_HWIF_PPF_IDX(hwdev->hwif))
+ return false;
+
+ if (hw_ctxt->set_cmdq_depth) {
+ if (hw_ctxt->cmdq_depth >= HINIC_QUEUE_MIN_DEPTH &&
+ hw_ctxt->cmdq_depth <= HINIC_QUEUE_MAX_DEPTH)
+ return true;
+
+ return false;
+ }
+
+ return hw_ctxt_qps_param_valid(hw_ctxt);
+}
+
+static bool check_set_wq_page_size(struct hinic_hwdev *hwdev, u16 func_idx,
+ void *buf_in, u16 in_size)
+{
+ struct hinic_wq_page_size *page_size_info = buf_in;
+
+ if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size))
+ return false;
+
+ if (page_size_info->ppf_idx != HINIC_HWIF_PPF_IDX(hwdev->hwif))
+ return false;
+
+ if (((1U << page_size_info->page_size) * SZ_4K) !=
+ HINIC_DEFAULT_WQ_PAGE_SIZE)
+ return false;
+
+ return true;
+}
+
+static struct vf_cmd_check_handle hw_cmd_support_vf[] = {
+ {HINIC_COMM_CMD_START_FLR, hinic_mbox_check_func_id_8B},
+ {HINIC_COMM_CMD_DMA_ATTR_SET, hinic_mbox_check_func_id_8B},
+ {HINIC_COMM_CMD_CMDQ_CTXT_SET, check_cmdq_ctxt},
+ {HINIC_COMM_CMD_CMDQ_CTXT_GET, check_cmdq_ctxt},
+ {HINIC_COMM_CMD_HWCTXT_SET, check_hwctxt},
+ {HINIC_COMM_CMD_HWCTXT_GET, check_hwctxt},
+ {HINIC_COMM_CMD_SQ_HI_CI_SET, hinic_mbox_check_func_id_8B},
+ {HINIC_COMM_CMD_RES_STATE_SET, hinic_mbox_check_func_id_8B},
+ {HINIC_COMM_CMD_IO_RES_CLEAR, hinic_mbox_check_func_id_8B},
+ {HINIC_COMM_CMD_CEQ_CTRL_REG_WR_BY_UP, hinic_mbox_check_func_id_8B},
+ {HINIC_COMM_CMD_MSI_CTRL_REG_WR_BY_UP, hinic_mbox_check_func_id_8B},
+ {HINIC_COMM_CMD_MSI_CTRL_REG_RD_BY_UP, hinic_mbox_check_func_id_8B},
+ {HINIC_COMM_CMD_L2NIC_RESET, hinic_mbox_check_func_id_8B},
+ {HINIC_COMM_CMD_PAGESIZE_SET, check_set_wq_page_size},
+};
+
+static int comm_pf_mbox_handler(void *handle, u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+{
+ u8 size = ARRAY_SIZE(hw_cmd_support_vf);
+ struct hinic_hwdev *hwdev = handle;
+ struct hinic_pfhwdev *pfhwdev;
+ int err = 0;
+
+ pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+
+ if (!hinic_mbox_check_cmd_valid(handle, hw_cmd_support_vf, vf_id, cmd,
+ buf_in, in_size, size)) {
+ dev_err(&hwdev->hwif->pdev->dev,
+ "PF Receive VF: %d common cmd: 0x%x or mbox len: 0x%x is invalid\n",
+ vf_id + hinic_glb_pf_vf_offset(hwdev->hwif), cmd,
+ in_size);
+ return HINIC_MBOX_VF_CMD_ERROR;
+ }
+
+ if (cmd == HINIC_COMM_CMD_START_FLR) {
+ *out_size = 0;
+ } else {
+ err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
+ cmd, buf_in, in_size, buf_out, out_size,
+ HINIC_MGMT_MSG_SYNC);
+ if (err && err != HINIC_MBOX_PF_BUSY_ACTIVE_FW)
+ dev_err(&hwdev->hwif->pdev->dev,
+ "PF mbox common callback handler err: %d\n",
+ err);
+ }
+
+ return err;
+}
+
+int hinic_func_to_func_init(struct hinic_hwdev *hwdev)
+{
+ struct hinic_mbox_func_to_func *func_to_func;
+ struct hinic_pfhwdev *pfhwdev;
+ int err;
+
+ pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+ func_to_func = kzalloc(sizeof(*func_to_func), GFP_KERNEL);
+ if (!func_to_func)
+ return -ENOMEM;
+
+ hwdev->func_to_func = func_to_func;
+ func_to_func->hwdev = hwdev;
+ func_to_func->hwif = hwdev->hwif;
+ sema_init(&func_to_func->mbox_send_sem, 1);
+ sema_init(&func_to_func->msg_send_sem, 1);
+ spin_lock_init(&func_to_func->mbox_lock);
+ func_to_func->workq = create_singlethread_workqueue(HINIC_MBOX_WQ_NAME);
+ if (!func_to_func->workq) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to initialize MBOX workqueue\n");
+ err = -ENOMEM;
+ goto err_create_mbox_workq;
+ }
+
+ err = alloc_mbox_info(hwdev, func_to_func->mbox_send);
+ if (err) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to alloc mem for mbox_active\n");
+ goto err_alloc_mbox_for_send;
+ }
+
+ err = alloc_mbox_info(hwdev, func_to_func->mbox_resp);
+ if (err) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to alloc mem for mbox_passive\n");
+ goto err_alloc_mbox_for_resp;
+ }
+
+ err = alloc_mbox_wb_status(func_to_func);
+ if (err) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to alloc mbox write back status\n");
+ goto err_alloc_wb_status;
+ }
+
+ prepare_send_mbox(func_to_func);
+
+ hinic_aeq_register_hw_cb(&hwdev->aeqs, HINIC_MBX_FROM_FUNC,
+ &pfhwdev->hwdev, hinic_mbox_func_aeqe_handler);
+ hinic_aeq_register_hw_cb(&hwdev->aeqs, HINIC_MBX_SEND_RSLT,
+ &pfhwdev->hwdev, hinic_mbox_self_aeqe_handler);
+
+ if (!HINIC_IS_VF(hwdev->hwif))
+ hinic_register_pf_mbox_cb(hwdev, HINIC_MOD_COMM,
+ comm_pf_mbox_handler);
+
+ return 0;
+
+err_alloc_wb_status:
+ free_mbox_info(func_to_func->mbox_resp);
+
+err_alloc_mbox_for_resp:
+ free_mbox_info(func_to_func->mbox_send);
+
+err_alloc_mbox_for_send:
+ destroy_workqueue(func_to_func->workq);
+
+err_create_mbox_workq:
+ kfree(func_to_func);
+
+ return err;
+}
+
+void hinic_func_to_func_free(struct hinic_hwdev *hwdev)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+
+ hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MBX_FROM_FUNC);
+ hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MBX_SEND_RSLT);
+
+ hinic_unregister_pf_mbox_cb(hwdev, HINIC_MOD_COMM);
+ /* destroy workqueue before free related mbox resources in case of
+ * illegal resource access
+ */
+ destroy_workqueue(func_to_func->workq);
+
+ free_mbox_wb_status(func_to_func);
+ free_mbox_info(func_to_func->mbox_resp);
+ free_mbox_info(func_to_func->mbox_send);
+
+ kfree(func_to_func);
+}
+
+int hinic_vf_mbox_random_id_init(struct hinic_hwdev *hwdev)
+{
+ u16 vf_offset;
+ u8 vf_in_pf;
+ int err = 0;
+
+ if (HINIC_IS_VF(hwdev->hwif))
+ return 0;
+
+ vf_offset = hinic_glb_pf_vf_offset(hwdev->hwif);
+
+ for (vf_in_pf = 1; vf_in_pf <= hwdev->nic_cap.max_vf; vf_in_pf++) {
+ err = set_vf_mbox_random_id(hwdev, vf_offset + vf_in_pf);
+ if (err)
+ break;
+ }
+
+ if (err == HINIC_MGMT_CMD_UNSUPPORTED) {
+ hwdev->func_to_func->support_vf_random = false;
+ err = 0;
+ dev_warn(&hwdev->hwif->pdev->dev, "Mgmt is unsupported to set VF%d random id\n",
+ vf_in_pf - 1);
+ } else if (!err) {
+ hwdev->func_to_func->support_vf_random = true;
+ }
+
+ return err;
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h
new file mode 100644
index 0000000000..33ac7814d3
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h
@@ -0,0 +1,172 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef HINIC_MBOX_H_
+#define HINIC_MBOX_H_
+
+#define HINIC_MBOX_PF_SEND_ERR 0x1
+#define HINIC_MBOX_PF_BUSY_ACTIVE_FW 0x2
+#define HINIC_MBOX_VF_CMD_ERROR 0x3
+
+#define HINIC_MAX_FUNCTIONS 512
+
+#define HINIC_MAX_PF_FUNCS 16
+
+#define HINIC_MBOX_WQ_NAME "hinic_mbox"
+
+#define HINIC_FUNC_CSR_MAILBOX_DATA_OFF 0x80
+#define HINIC_FUNC_CSR_MAILBOX_CONTROL_OFF 0x0100
+#define HINIC_FUNC_CSR_MAILBOX_INT_OFFSET_OFF 0x0104
+#define HINIC_FUNC_CSR_MAILBOX_RESULT_H_OFF 0x0108
+#define HINIC_FUNC_CSR_MAILBOX_RESULT_L_OFF 0x010C
+
+#define MAX_FUNCTION_NUM 512
+
+struct vf_cmd_check_handle {
+ u8 cmd;
+ bool (*check_cmd)(struct hinic_hwdev *hwdev, u16 src_func_idx,
+ void *buf_in, u16 in_size);
+};
+
+enum hinic_mbox_ack_type {
+ MBOX_ACK,
+ MBOX_NO_ACK,
+};
+
+struct mbox_msg_info {
+ u8 msg_id;
+ u8 status;
+};
+
+struct hinic_recv_mbox {
+ struct completion recv_done;
+ void *mbox;
+ u8 cmd;
+ enum hinic_mod_type mod;
+ u16 mbox_len;
+ void *buf_out;
+ enum hinic_mbox_ack_type ack_type;
+ struct mbox_msg_info msg_info;
+ u8 seq_id;
+ atomic_t msg_cnt;
+};
+
+struct hinic_send_mbox {
+ struct completion send_done;
+ u8 *data;
+
+ u64 *wb_status;
+ void *wb_vaddr;
+ dma_addr_t wb_paddr;
+};
+
+typedef void (*hinic_vf_mbox_cb)(void *handle, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size);
+typedef int (*hinic_pf_mbox_cb)(void *handle, u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size);
+
+enum mbox_event_state {
+ EVENT_START = 0,
+ EVENT_FAIL,
+ EVENT_TIMEOUT,
+ EVENT_END,
+};
+
+enum hinic_mbox_cb_state {
+ HINIC_VF_MBOX_CB_REG = 0,
+ HINIC_VF_MBOX_CB_RUNNING,
+ HINIC_PF_MBOX_CB_REG,
+ HINIC_PF_MBOX_CB_RUNNING,
+ HINIC_PPF_MBOX_CB_REG,
+ HINIC_PPF_MBOX_CB_RUNNING,
+ HINIC_PPF_TO_PF_MBOX_CB_REG,
+ HINIC_PPF_TO_PF_MBOX_CB_RUNNIG,
+};
+
+struct hinic_mbox_func_to_func {
+ struct hinic_hwdev *hwdev;
+ struct hinic_hwif *hwif;
+
+ struct semaphore mbox_send_sem;
+ struct semaphore msg_send_sem;
+ struct hinic_send_mbox send_mbox;
+
+ struct workqueue_struct *workq;
+
+ struct hinic_recv_mbox mbox_resp[HINIC_MAX_FUNCTIONS];
+ struct hinic_recv_mbox mbox_send[HINIC_MAX_FUNCTIONS];
+
+ hinic_vf_mbox_cb vf_mbox_cb[HINIC_MOD_MAX];
+ hinic_pf_mbox_cb pf_mbox_cb[HINIC_MOD_MAX];
+ unsigned long pf_mbox_cb_state[HINIC_MOD_MAX];
+ unsigned long vf_mbox_cb_state[HINIC_MOD_MAX];
+
+ u8 send_msg_id;
+ enum mbox_event_state event_flag;
+
+ /* lock for mbox event flag */
+ spinlock_t mbox_lock;
+
+ u32 vf_mbx_old_rand_id[MAX_FUNCTION_NUM];
+ u32 vf_mbx_rand_id[MAX_FUNCTION_NUM];
+ bool support_vf_random;
+};
+
+struct hinic_mbox_work {
+ struct work_struct work;
+ u16 src_func_idx;
+ struct hinic_mbox_func_to_func *func_to_func;
+ struct hinic_recv_mbox *recv_mbox;
+};
+
+struct vf_cmd_msg_handle {
+ u8 cmd;
+ int (*cmd_msg_handler)(void *hwdev, u16 vf_id,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size);
+};
+
+bool hinic_mbox_check_func_id_8B(struct hinic_hwdev *hwdev, u16 func_idx,
+ void *buf_in, u16 in_size);
+
+bool hinic_mbox_check_cmd_valid(struct hinic_hwdev *hwdev,
+ struct vf_cmd_check_handle *cmd_handle,
+ u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, u8 size);
+
+int hinic_register_pf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod,
+ hinic_pf_mbox_cb callback);
+
+int hinic_register_vf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod,
+ hinic_vf_mbox_cb callback);
+
+void hinic_unregister_pf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod);
+
+void hinic_unregister_vf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod);
+
+int hinic_func_to_func_init(struct hinic_hwdev *hwdev);
+
+void hinic_func_to_func_free(struct hinic_hwdev *hwdev);
+
+int hinic_mbox_to_pf(struct hinic_hwdev *hwdev, enum hinic_mod_type mod,
+ u8 cmd, void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size, u32 timeout);
+
+int hinic_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func,
+ enum hinic_mod_type mod, u16 cmd, u16 dst_func,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size, u32 timeout);
+
+int hinic_mbox_to_vf(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod, u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size, u32 timeout);
+
+int hinic_vf_mbox_random_id_init(struct hinic_hwdev *hwdev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
new file mode 100644
index 0000000000..4aa1f433ed
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
@@ -0,0 +1,681 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/semaphore.h>
+#include <linux/completion.h>
+#include <linux/slab.h>
+#include <net/devlink.h>
+#include <asm/barrier.h>
+
+#include "hinic_devlink.h"
+#include "hinic_hw_if.h"
+#include "hinic_hw_eqs.h"
+#include "hinic_hw_api_cmd.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_hw_dev.h"
+
+#define SYNC_MSG_ID_MASK 0x1FF
+
+#define SYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->sync_msg_id)
+
+#define SYNC_MSG_ID_INC(pf_to_mgmt) (SYNC_MSG_ID(pf_to_mgmt) = \
+ ((SYNC_MSG_ID(pf_to_mgmt) + 1) & \
+ SYNC_MSG_ID_MASK))
+
+#define MSG_SZ_IS_VALID(in_size) ((in_size) <= MAX_MSG_LEN)
+
+#define MGMT_MSG_LEN_MIN 20
+#define MGMT_MSG_LEN_STEP 16
+#define MGMT_MSG_RSVD_FOR_DEV 8
+
+#define SEGMENT_LEN 48
+
+#define MAX_PF_MGMT_BUF_SIZE 2048
+
+/* Data should be SEG LEN size aligned */
+#define MAX_MSG_LEN 2016
+
+#define MSG_NOT_RESP 0xFFFF
+
+#define MGMT_MSG_TIMEOUT 5000
+
+#define SET_FUNC_PORT_MBOX_TIMEOUT 30000
+
+#define SET_FUNC_PORT_MGMT_TIMEOUT 25000
+
+#define UPDATE_FW_MGMT_TIMEOUT 20000
+
+#define mgmt_to_pfhwdev(pf_mgmt) \
+ container_of(pf_mgmt, struct hinic_pfhwdev, pf_to_mgmt)
+
+enum msg_segment_type {
+ NOT_LAST_SEGMENT = 0,
+ LAST_SEGMENT = 1,
+};
+
+enum mgmt_direction_type {
+ MGMT_DIRECT_SEND = 0,
+ MGMT_RESP = 1,
+};
+
+enum msg_ack_type {
+ MSG_ACK = 0,
+ MSG_NO_ACK = 1,
+};
+
+/**
+ * hinic_register_mgmt_msg_cb - register msg handler for a msg from a module
+ * @pf_to_mgmt: PF to MGMT channel
+ * @mod: module in the chip that this handler will handle its messages
+ * @handle: private data for the callback
+ * @callback: the handler that will handle messages
+ **/
+void hinic_register_mgmt_msg_cb(struct hinic_pf_to_mgmt *pf_to_mgmt,
+ enum hinic_mod_type mod,
+ void *handle,
+ void (*callback)(void *handle,
+ u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out,
+ u16 *out_size))
+{
+ struct hinic_mgmt_cb *mgmt_cb = &pf_to_mgmt->mgmt_cb[mod];
+
+ mgmt_cb->cb = callback;
+ mgmt_cb->handle = handle;
+ mgmt_cb->state = HINIC_MGMT_CB_ENABLED;
+}
+
+/**
+ * hinic_unregister_mgmt_msg_cb - unregister msg handler for a msg from a module
+ * @pf_to_mgmt: PF to MGMT channel
+ * @mod: module in the chip that this handler handles its messages
+ **/
+void hinic_unregister_mgmt_msg_cb(struct hinic_pf_to_mgmt *pf_to_mgmt,
+ enum hinic_mod_type mod)
+{
+ struct hinic_mgmt_cb *mgmt_cb = &pf_to_mgmt->mgmt_cb[mod];
+
+ mgmt_cb->state &= ~HINIC_MGMT_CB_ENABLED;
+
+ while (mgmt_cb->state & HINIC_MGMT_CB_RUNNING)
+ schedule();
+
+ mgmt_cb->cb = NULL;
+}
+
+/**
+ * prepare_header - prepare the header of the message
+ * @pf_to_mgmt: PF to MGMT channel
+ * @msg_len: the length of the message
+ * @mod: module in the chip that will get the message
+ * @ack_type: ask for response
+ * @direction: the direction of the message
+ * @cmd: command of the message
+ * @msg_id: message id
+ *
+ * Return the prepared header value
+ **/
+static u64 prepare_header(struct hinic_pf_to_mgmt *pf_to_mgmt,
+ u16 msg_len, enum hinic_mod_type mod,
+ enum msg_ack_type ack_type,
+ enum mgmt_direction_type direction,
+ u16 cmd, u16 msg_id)
+{
+ struct hinic_hwif *hwif = pf_to_mgmt->hwif;
+
+ return HINIC_MSG_HEADER_SET(msg_len, MSG_LEN) |
+ HINIC_MSG_HEADER_SET(mod, MODULE) |
+ HINIC_MSG_HEADER_SET(SEGMENT_LEN, SEG_LEN) |
+ HINIC_MSG_HEADER_SET(ack_type, NO_ACK) |
+ HINIC_MSG_HEADER_SET(0, ASYNC_MGMT_TO_PF) |
+ HINIC_MSG_HEADER_SET(0, SEQID) |
+ HINIC_MSG_HEADER_SET(LAST_SEGMENT, LAST) |
+ HINIC_MSG_HEADER_SET(direction, DIRECTION) |
+ HINIC_MSG_HEADER_SET(cmd, CMD) |
+ HINIC_MSG_HEADER_SET(HINIC_HWIF_PCI_INTF(hwif), PCI_INTF) |
+ HINIC_MSG_HEADER_SET(HINIC_HWIF_PF_IDX(hwif), PF_IDX) |
+ HINIC_MSG_HEADER_SET(msg_id, MSG_ID);
+}
+
+/**
+ * prepare_mgmt_cmd - prepare the mgmt command
+ * @mgmt_cmd: pointer to the command to prepare
+ * @header: pointer of the header for the message
+ * @msg: the data of the message
+ * @msg_len: the length of the message
+ **/
+static void prepare_mgmt_cmd(u8 *mgmt_cmd, u64 *header, u8 *msg, u16 msg_len)
+{
+ memset(mgmt_cmd, 0, MGMT_MSG_RSVD_FOR_DEV);
+
+ mgmt_cmd += MGMT_MSG_RSVD_FOR_DEV;
+ memcpy(mgmt_cmd, header, sizeof(*header));
+
+ mgmt_cmd += sizeof(*header);
+ memcpy(mgmt_cmd, msg, msg_len);
+}
+
+/**
+ * mgmt_msg_len - calculate the total message length
+ * @msg_data_len: the length of the message data
+ *
+ * Return the total message length
+ **/
+static u16 mgmt_msg_len(u16 msg_data_len)
+{
+ /* RSVD + HEADER_SIZE + DATA_LEN */
+ u16 msg_len = MGMT_MSG_RSVD_FOR_DEV + sizeof(u64) + msg_data_len;
+
+ if (msg_len > MGMT_MSG_LEN_MIN)
+ msg_len = MGMT_MSG_LEN_MIN +
+ ALIGN((msg_len - MGMT_MSG_LEN_MIN),
+ MGMT_MSG_LEN_STEP);
+ else
+ msg_len = MGMT_MSG_LEN_MIN;
+
+ return msg_len;
+}
+
+/**
+ * send_msg_to_mgmt - send message to mgmt by API CMD
+ * @pf_to_mgmt: PF to MGMT channel
+ * @mod: module in the chip that will get the message
+ * @cmd: command of the message
+ * @data: the msg data
+ * @data_len: the msg data length
+ * @ack_type: ask for response
+ * @direction: the direction of the original message
+ * @resp_msg_id: msg id to response for
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static int send_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt,
+ enum hinic_mod_type mod, u8 cmd,
+ u8 *data, u16 data_len,
+ enum msg_ack_type ack_type,
+ enum mgmt_direction_type direction,
+ u16 resp_msg_id)
+{
+ struct hinic_api_cmd_chain *chain;
+ u64 header;
+ u16 msg_id;
+
+ msg_id = SYNC_MSG_ID(pf_to_mgmt);
+
+ if (direction == MGMT_RESP) {
+ header = prepare_header(pf_to_mgmt, data_len, mod, ack_type,
+ direction, cmd, resp_msg_id);
+ } else {
+ SYNC_MSG_ID_INC(pf_to_mgmt);
+ header = prepare_header(pf_to_mgmt, data_len, mod, ack_type,
+ direction, cmd, msg_id);
+ }
+
+ prepare_mgmt_cmd(pf_to_mgmt->sync_msg_buf, &header, data, data_len);
+
+ chain = pf_to_mgmt->cmd_chain[HINIC_API_CMD_WRITE_TO_MGMT_CPU];
+ return hinic_api_cmd_write(chain, HINIC_NODE_ID_MGMT,
+ pf_to_mgmt->sync_msg_buf,
+ mgmt_msg_len(data_len));
+}
+
+/**
+ * msg_to_mgmt_sync - send sync message to mgmt
+ * @pf_to_mgmt: PF to MGMT channel
+ * @mod: module in the chip that will get the message
+ * @cmd: command of the message
+ * @buf_in: the msg data
+ * @in_size: the msg data length
+ * @buf_out: response
+ * @out_size: response length
+ * @direction: the direction of the original message
+ * @resp_msg_id: msg id to response for
+ * @timeout: time-out period of waiting for response
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt,
+ enum hinic_mod_type mod, u8 cmd,
+ u8 *buf_in, u16 in_size,
+ u8 *buf_out, u16 *out_size,
+ enum mgmt_direction_type direction,
+ u16 resp_msg_id, u32 timeout)
+{
+ struct hinic_hwif *hwif = pf_to_mgmt->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ struct hinic_recv_msg *recv_msg;
+ struct completion *recv_done;
+ unsigned long timeo;
+ u16 msg_id;
+ int err;
+
+ /* Lock the sync_msg_buf */
+ down(&pf_to_mgmt->sync_msg_lock);
+
+ recv_msg = &pf_to_mgmt->recv_resp_msg_from_mgmt;
+ recv_done = &recv_msg->recv_done;
+
+ if (resp_msg_id == MSG_NOT_RESP)
+ msg_id = SYNC_MSG_ID(pf_to_mgmt);
+ else
+ msg_id = resp_msg_id;
+
+ init_completion(recv_done);
+
+ err = send_msg_to_mgmt(pf_to_mgmt, mod, cmd, buf_in, in_size,
+ MSG_ACK, direction, resp_msg_id);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to send sync msg to mgmt\n");
+ goto unlock_sync_msg;
+ }
+
+ timeo = msecs_to_jiffies(timeout ? timeout : MGMT_MSG_TIMEOUT);
+
+ if (!wait_for_completion_timeout(recv_done, timeo)) {
+ dev_err(&pdev->dev, "MGMT timeout, MSG id = %d\n", msg_id);
+ hinic_dump_aeq_info(pf_to_mgmt->hwdev);
+ err = -ETIMEDOUT;
+ goto unlock_sync_msg;
+ }
+
+ smp_rmb(); /* verify reading after completion */
+
+ if (recv_msg->msg_id != msg_id) {
+ dev_err(&pdev->dev, "incorrect MSG for id = %d\n", msg_id);
+ err = -EFAULT;
+ goto unlock_sync_msg;
+ }
+
+ if (buf_out && recv_msg->msg_len <= MAX_PF_MGMT_BUF_SIZE) {
+ memcpy(buf_out, recv_msg->msg, recv_msg->msg_len);
+ *out_size = recv_msg->msg_len;
+ }
+
+unlock_sync_msg:
+ up(&pf_to_mgmt->sync_msg_lock);
+ return err;
+}
+
+/**
+ * msg_to_mgmt_async - send message to mgmt without response
+ * @pf_to_mgmt: PF to MGMT channel
+ * @mod: module in the chip that will get the message
+ * @cmd: command of the message
+ * @buf_in: the msg data
+ * @in_size: the msg data length
+ * @direction: the direction of the original message
+ * @resp_msg_id: msg id to response for
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static int msg_to_mgmt_async(struct hinic_pf_to_mgmt *pf_to_mgmt,
+ enum hinic_mod_type mod, u8 cmd,
+ u8 *buf_in, u16 in_size,
+ enum mgmt_direction_type direction,
+ u16 resp_msg_id)
+{
+ int err;
+
+ /* Lock the sync_msg_buf */
+ down(&pf_to_mgmt->sync_msg_lock);
+
+ err = send_msg_to_mgmt(pf_to_mgmt, mod, cmd, buf_in, in_size,
+ MSG_NO_ACK, direction, resp_msg_id);
+
+ up(&pf_to_mgmt->sync_msg_lock);
+ return err;
+}
+
+/**
+ * hinic_msg_to_mgmt - send message to mgmt
+ * @pf_to_mgmt: PF to MGMT channel
+ * @mod: module in the chip that will get the message
+ * @cmd: command of the message
+ * @buf_in: the msg data
+ * @in_size: the msg data length
+ * @buf_out: response
+ * @out_size: returned response length
+ * @sync: sync msg or async msg
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt,
+ enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size, void *buf_out, u16 *out_size,
+ enum hinic_mgmt_msg_type sync)
+{
+ struct hinic_hwif *hwif = pf_to_mgmt->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ u32 timeout = 0;
+
+ if (sync != HINIC_MGMT_MSG_SYNC) {
+ dev_err(&pdev->dev, "Invalid MGMT msg type\n");
+ return -EINVAL;
+ }
+
+ if (!MSG_SZ_IS_VALID(in_size)) {
+ dev_err(&pdev->dev, "Invalid MGMT msg buffer size\n");
+ return -EINVAL;
+ }
+
+ if (HINIC_IS_VF(hwif)) {
+ if (cmd == HINIC_PORT_CMD_SET_FUNC_STATE)
+ timeout = SET_FUNC_PORT_MBOX_TIMEOUT;
+
+ return hinic_mbox_to_pf(pf_to_mgmt->hwdev, mod, cmd, buf_in,
+ in_size, buf_out, out_size, timeout);
+ } else {
+ if (cmd == HINIC_PORT_CMD_SET_FUNC_STATE)
+ timeout = SET_FUNC_PORT_MGMT_TIMEOUT;
+ else if (cmd == HINIC_PORT_CMD_UPDATE_FW)
+ timeout = UPDATE_FW_MGMT_TIMEOUT;
+
+ return msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size,
+ buf_out, out_size, MGMT_DIRECT_SEND,
+ MSG_NOT_RESP, timeout);
+ }
+}
+
+static void recv_mgmt_msg_work_handler(struct work_struct *work)
+{
+ struct hinic_mgmt_msg_handle_work *mgmt_work =
+ container_of(work, struct hinic_mgmt_msg_handle_work, work);
+ struct hinic_pf_to_mgmt *pf_to_mgmt = mgmt_work->pf_to_mgmt;
+ struct pci_dev *pdev = pf_to_mgmt->hwif->pdev;
+ u8 *buf_out = pf_to_mgmt->mgmt_ack_buf;
+ struct hinic_mgmt_cb *mgmt_cb;
+ unsigned long cb_state;
+ u16 out_size = 0;
+
+ memset(buf_out, 0, MAX_PF_MGMT_BUF_SIZE);
+
+ if (mgmt_work->mod >= HINIC_MOD_MAX) {
+ dev_err(&pdev->dev, "Unknown MGMT MSG module = %d\n",
+ mgmt_work->mod);
+ kfree(mgmt_work->msg);
+ kfree(mgmt_work);
+ return;
+ }
+
+ mgmt_cb = &pf_to_mgmt->mgmt_cb[mgmt_work->mod];
+
+ cb_state = cmpxchg(&mgmt_cb->state,
+ HINIC_MGMT_CB_ENABLED,
+ HINIC_MGMT_CB_ENABLED | HINIC_MGMT_CB_RUNNING);
+
+ if (cb_state == HINIC_MGMT_CB_ENABLED && mgmt_cb->cb)
+ mgmt_cb->cb(mgmt_cb->handle, mgmt_work->cmd,
+ mgmt_work->msg, mgmt_work->msg_len,
+ buf_out, &out_size);
+ else
+ dev_err(&pdev->dev, "No MGMT msg handler, mod: %d, cmd: %d\n",
+ mgmt_work->mod, mgmt_work->cmd);
+
+ mgmt_cb->state &= ~HINIC_MGMT_CB_RUNNING;
+
+ if (!mgmt_work->async_mgmt_to_pf)
+ /* MGMT sent sync msg, send the response */
+ msg_to_mgmt_async(pf_to_mgmt, mgmt_work->mod, mgmt_work->cmd,
+ buf_out, out_size, MGMT_RESP,
+ mgmt_work->msg_id);
+
+ kfree(mgmt_work->msg);
+ kfree(mgmt_work);
+}
+
+/**
+ * mgmt_recv_msg_handler - handler for message from mgmt cpu
+ * @pf_to_mgmt: PF to MGMT channel
+ * @recv_msg: received message details
+ **/
+static void mgmt_recv_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt,
+ struct hinic_recv_msg *recv_msg)
+{
+ struct hinic_mgmt_msg_handle_work *mgmt_work = NULL;
+
+ mgmt_work = kzalloc(sizeof(*mgmt_work), GFP_KERNEL);
+ if (!mgmt_work)
+ return;
+
+ if (recv_msg->msg_len) {
+ mgmt_work->msg = kzalloc(recv_msg->msg_len, GFP_KERNEL);
+ if (!mgmt_work->msg) {
+ kfree(mgmt_work);
+ return;
+ }
+ }
+
+ mgmt_work->pf_to_mgmt = pf_to_mgmt;
+ mgmt_work->msg_len = recv_msg->msg_len;
+ memcpy(mgmt_work->msg, recv_msg->msg, recv_msg->msg_len);
+ mgmt_work->msg_id = recv_msg->msg_id;
+ mgmt_work->mod = recv_msg->mod;
+ mgmt_work->cmd = recv_msg->cmd;
+ mgmt_work->async_mgmt_to_pf = recv_msg->async_mgmt_to_pf;
+
+ INIT_WORK(&mgmt_work->work, recv_mgmt_msg_work_handler);
+ queue_work(pf_to_mgmt->workq, &mgmt_work->work);
+}
+
+/**
+ * mgmt_resp_msg_handler - handler for a response message from mgmt cpu
+ * @pf_to_mgmt: PF to MGMT channel
+ * @recv_msg: received message details
+ **/
+static void mgmt_resp_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt,
+ struct hinic_recv_msg *recv_msg)
+{
+ wmb(); /* verify writing all, before reading */
+
+ complete(&recv_msg->recv_done);
+}
+
+/**
+ * recv_mgmt_msg_handler - handler for a message from mgmt cpu
+ * @pf_to_mgmt: PF to MGMT channel
+ * @header: the header of the message
+ * @recv_msg: received message details
+ **/
+static void recv_mgmt_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt,
+ u64 *header, struct hinic_recv_msg *recv_msg)
+{
+ struct hinic_hwif *hwif = pf_to_mgmt->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ int seq_id, seg_len;
+ u8 *msg_body;
+
+ seq_id = HINIC_MSG_HEADER_GET(*header, SEQID);
+ seg_len = HINIC_MSG_HEADER_GET(*header, SEG_LEN);
+
+ if (seq_id >= (MAX_MSG_LEN / SEGMENT_LEN)) {
+ dev_err(&pdev->dev, "recv big mgmt msg\n");
+ return;
+ }
+
+ msg_body = (u8 *)header + sizeof(*header);
+ memcpy(recv_msg->msg + seq_id * SEGMENT_LEN, msg_body, seg_len);
+
+ if (!HINIC_MSG_HEADER_GET(*header, LAST))
+ return;
+
+ recv_msg->cmd = HINIC_MSG_HEADER_GET(*header, CMD);
+ recv_msg->mod = HINIC_MSG_HEADER_GET(*header, MODULE);
+ recv_msg->async_mgmt_to_pf = HINIC_MSG_HEADER_GET(*header,
+ ASYNC_MGMT_TO_PF);
+ recv_msg->msg_len = HINIC_MSG_HEADER_GET(*header, MSG_LEN);
+ recv_msg->msg_id = HINIC_MSG_HEADER_GET(*header, MSG_ID);
+
+ if (HINIC_MSG_HEADER_GET(*header, DIRECTION) == MGMT_RESP)
+ mgmt_resp_msg_handler(pf_to_mgmt, recv_msg);
+ else
+ mgmt_recv_msg_handler(pf_to_mgmt, recv_msg);
+}
+
+/**
+ * mgmt_msg_aeqe_handler - handler for a mgmt message event
+ * @handle: PF to MGMT channel
+ * @data: the header of the message
+ * @size: unused
+ **/
+static void mgmt_msg_aeqe_handler(void *handle, void *data, u8 size)
+{
+ struct hinic_pf_to_mgmt *pf_to_mgmt = handle;
+ struct hinic_recv_msg *recv_msg;
+ u64 *header = (u64 *)data;
+
+ recv_msg = HINIC_MSG_HEADER_GET(*header, DIRECTION) ==
+ MGMT_DIRECT_SEND ?
+ &pf_to_mgmt->recv_msg_from_mgmt :
+ &pf_to_mgmt->recv_resp_msg_from_mgmt;
+
+ recv_mgmt_msg_handler(pf_to_mgmt, header, recv_msg);
+}
+
+/**
+ * alloc_recv_msg - allocate receive message memory
+ * @pf_to_mgmt: PF to MGMT channel
+ * @recv_msg: pointer that will hold the allocated data
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static int alloc_recv_msg(struct hinic_pf_to_mgmt *pf_to_mgmt,
+ struct hinic_recv_msg *recv_msg)
+{
+ struct hinic_hwif *hwif = pf_to_mgmt->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+
+ recv_msg->msg = devm_kzalloc(&pdev->dev, MAX_PF_MGMT_BUF_SIZE,
+ GFP_KERNEL);
+ if (!recv_msg->msg)
+ return -ENOMEM;
+
+ recv_msg->buf_out = devm_kzalloc(&pdev->dev, MAX_PF_MGMT_BUF_SIZE,
+ GFP_KERNEL);
+ if (!recv_msg->buf_out)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * alloc_msg_buf - allocate all the message buffers of PF to MGMT channel
+ * @pf_to_mgmt: PF to MGMT channel
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static int alloc_msg_buf(struct hinic_pf_to_mgmt *pf_to_mgmt)
+{
+ struct hinic_hwif *hwif = pf_to_mgmt->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ int err;
+
+ err = alloc_recv_msg(pf_to_mgmt,
+ &pf_to_mgmt->recv_msg_from_mgmt);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to allocate recv msg\n");
+ return err;
+ }
+
+ err = alloc_recv_msg(pf_to_mgmt,
+ &pf_to_mgmt->recv_resp_msg_from_mgmt);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to allocate resp recv msg\n");
+ return err;
+ }
+
+ pf_to_mgmt->sync_msg_buf = devm_kzalloc(&pdev->dev,
+ MAX_PF_MGMT_BUF_SIZE,
+ GFP_KERNEL);
+ if (!pf_to_mgmt->sync_msg_buf)
+ return -ENOMEM;
+
+ pf_to_mgmt->mgmt_ack_buf = devm_kzalloc(&pdev->dev,
+ MAX_PF_MGMT_BUF_SIZE,
+ GFP_KERNEL);
+ if (!pf_to_mgmt->mgmt_ack_buf)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * hinic_pf_to_mgmt_init - initialize PF to MGMT channel
+ * @pf_to_mgmt: PF to MGMT channel
+ * @hwif: HW interface the PF to MGMT will use for accessing HW
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+int hinic_pf_to_mgmt_init(struct hinic_pf_to_mgmt *pf_to_mgmt,
+ struct hinic_hwif *hwif)
+{
+ struct hinic_pfhwdev *pfhwdev = mgmt_to_pfhwdev(pf_to_mgmt);
+ struct hinic_hwdev *hwdev = &pfhwdev->hwdev;
+ struct pci_dev *pdev = hwif->pdev;
+ int err;
+
+ pf_to_mgmt->hwif = hwif;
+ pf_to_mgmt->hwdev = hwdev;
+
+ if (HINIC_IS_VF(hwif))
+ return 0;
+
+ err = hinic_health_reporters_create(hwdev->devlink_dev);
+ if (err)
+ return err;
+
+ sema_init(&pf_to_mgmt->sync_msg_lock, 1);
+ pf_to_mgmt->workq = create_singlethread_workqueue("hinic_mgmt");
+ if (!pf_to_mgmt->workq) {
+ dev_err(&pdev->dev, "Failed to initialize MGMT workqueue\n");
+ hinic_health_reporters_destroy(hwdev->devlink_dev);
+ return -ENOMEM;
+ }
+ pf_to_mgmt->sync_msg_id = 0;
+
+ err = alloc_msg_buf(pf_to_mgmt);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to allocate msg buffers\n");
+ destroy_workqueue(pf_to_mgmt->workq);
+ hinic_health_reporters_destroy(hwdev->devlink_dev);
+ return err;
+ }
+
+ err = hinic_api_cmd_init(pf_to_mgmt->cmd_chain, hwif);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize cmd chains\n");
+ destroy_workqueue(pf_to_mgmt->workq);
+ hinic_health_reporters_destroy(hwdev->devlink_dev);
+ return err;
+ }
+
+ hinic_aeq_register_hw_cb(&hwdev->aeqs, HINIC_MSG_FROM_MGMT_CPU,
+ pf_to_mgmt,
+ mgmt_msg_aeqe_handler);
+ return 0;
+}
+
+/**
+ * hinic_pf_to_mgmt_free - free PF to MGMT channel
+ * @pf_to_mgmt: PF to MGMT channel
+ **/
+void hinic_pf_to_mgmt_free(struct hinic_pf_to_mgmt *pf_to_mgmt)
+{
+ struct hinic_pfhwdev *pfhwdev = mgmt_to_pfhwdev(pf_to_mgmt);
+ struct hinic_hwdev *hwdev = &pfhwdev->hwdev;
+
+ if (HINIC_IS_VF(hwdev->hwif))
+ return;
+
+ hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MSG_FROM_MGMT_CPU);
+ hinic_api_cmd_free(pf_to_mgmt->cmd_chain);
+ destroy_workqueue(pf_to_mgmt->workq);
+ hinic_health_reporters_destroy(hwdev->devlink_dev);
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h
new file mode 100644
index 0000000000..4ca81cc838
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef HINIC_HW_MGMT_H
+#define HINIC_HW_MGMT_H
+
+#include <linux/types.h>
+#include <linux/semaphore.h>
+#include <linux/completion.h>
+#include <linux/bitops.h>
+
+#include "hinic_hw_if.h"
+#include "hinic_hw_api_cmd.h"
+
+#define HINIC_MSG_HEADER_MSG_LEN_SHIFT 0
+#define HINIC_MSG_HEADER_MODULE_SHIFT 11
+#define HINIC_MSG_HEADER_SEG_LEN_SHIFT 16
+#define HINIC_MSG_HEADER_NO_ACK_SHIFT 22
+#define HINIC_MSG_HEADER_ASYNC_MGMT_TO_PF_SHIFT 23
+#define HINIC_MSG_HEADER_SEQID_SHIFT 24
+#define HINIC_MSG_HEADER_LAST_SHIFT 30
+#define HINIC_MSG_HEADER_DIRECTION_SHIFT 31
+#define HINIC_MSG_HEADER_CMD_SHIFT 32
+#define HINIC_MSG_HEADER_ZEROS_SHIFT 40
+#define HINIC_MSG_HEADER_PCI_INTF_SHIFT 48
+#define HINIC_MSG_HEADER_PF_IDX_SHIFT 50
+#define HINIC_MSG_HEADER_MSG_ID_SHIFT 54
+
+#define HINIC_MSG_HEADER_MSG_LEN_MASK 0x7FF
+#define HINIC_MSG_HEADER_MODULE_MASK 0x1F
+#define HINIC_MSG_HEADER_SEG_LEN_MASK 0x3F
+#define HINIC_MSG_HEADER_NO_ACK_MASK 0x1
+#define HINIC_MSG_HEADER_ASYNC_MGMT_TO_PF_MASK 0x1
+#define HINIC_MSG_HEADER_SEQID_MASK 0x3F
+#define HINIC_MSG_HEADER_LAST_MASK 0x1
+#define HINIC_MSG_HEADER_DIRECTION_MASK 0x1
+#define HINIC_MSG_HEADER_CMD_MASK 0xFF
+#define HINIC_MSG_HEADER_ZEROS_MASK 0xFF
+#define HINIC_MSG_HEADER_PCI_INTF_MASK 0x3
+#define HINIC_MSG_HEADER_PF_IDX_MASK 0xF
+#define HINIC_MSG_HEADER_MSG_ID_MASK 0x3FF
+
+#define HINIC_MSG_HEADER_SET(val, member) \
+ ((u64)((val) & HINIC_MSG_HEADER_##member##_MASK) << \
+ HINIC_MSG_HEADER_##member##_SHIFT)
+
+#define HINIC_MSG_HEADER_GET(val, member) \
+ (((val) >> HINIC_MSG_HEADER_##member##_SHIFT) & \
+ HINIC_MSG_HEADER_##member##_MASK)
+
+enum hinic_mgmt_msg_type {
+ HINIC_MGMT_MSG_SYNC = 1,
+};
+
+enum hinic_cfg_cmd {
+ HINIC_CFG_NIC_CAP = 0,
+};
+
+enum hinic_comm_cmd {
+ HINIC_COMM_CMD_START_FLR = 0x1,
+ HINIC_COMM_CMD_IO_STATUS_GET = 0x3,
+ HINIC_COMM_CMD_DMA_ATTR_SET = 0x4,
+
+ HINIC_COMM_CMD_CMDQ_CTXT_SET = 0x10,
+ HINIC_COMM_CMD_CMDQ_CTXT_GET = 0x11,
+
+ HINIC_COMM_CMD_HWCTXT_SET = 0x12,
+ HINIC_COMM_CMD_HWCTXT_GET = 0x13,
+
+ HINIC_COMM_CMD_SQ_HI_CI_SET = 0x14,
+
+ HINIC_COMM_CMD_RES_STATE_SET = 0x24,
+
+ HINIC_COMM_CMD_IO_RES_CLEAR = 0x29,
+
+ HINIC_COMM_CMD_CEQ_CTRL_REG_WR_BY_UP = 0x33,
+
+ HINIC_COMM_CMD_MSI_CTRL_REG_WR_BY_UP,
+ HINIC_COMM_CMD_MSI_CTRL_REG_RD_BY_UP,
+
+ HINIC_COMM_CMD_FAULT_REPORT = 0x37,
+
+ HINIC_COMM_CMD_SET_LED_STATUS = 0x4a,
+
+ HINIC_COMM_CMD_L2NIC_RESET = 0x4b,
+
+ HINIC_COMM_CMD_PAGESIZE_SET = 0x50,
+
+ HINIC_COMM_CMD_GET_BOARD_INFO = 0x52,
+
+ HINIC_COMM_CMD_WATCHDOG_INFO = 0x56,
+
+ HINIC_MGMT_CMD_SET_VF_RANDOM_ID = 0x61,
+
+ HINIC_COMM_CMD_MAX,
+};
+
+enum hinic_mgmt_cb_state {
+ HINIC_MGMT_CB_ENABLED = BIT(0),
+ HINIC_MGMT_CB_RUNNING = BIT(1),
+};
+
+struct hinic_recv_msg {
+ u8 *msg;
+ u8 *buf_out;
+
+ struct completion recv_done;
+
+ u16 cmd;
+ enum hinic_mod_type mod;
+ int async_mgmt_to_pf;
+
+ u16 msg_len;
+ u16 msg_id;
+};
+
+struct hinic_mgmt_cb {
+ void (*cb)(void *handle, u8 cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size);
+
+ void *handle;
+ unsigned long state;
+};
+
+struct hinic_pf_to_mgmt {
+ struct hinic_hwif *hwif;
+ struct hinic_hwdev *hwdev;
+ struct semaphore sync_msg_lock;
+ u16 sync_msg_id;
+ u8 *sync_msg_buf;
+ void *mgmt_ack_buf;
+
+ struct hinic_recv_msg recv_resp_msg_from_mgmt;
+ struct hinic_recv_msg recv_msg_from_mgmt;
+
+ struct hinic_api_cmd_chain *cmd_chain[HINIC_API_CMD_MAX];
+
+ struct hinic_mgmt_cb mgmt_cb[HINIC_MOD_MAX];
+
+ struct workqueue_struct *workq;
+};
+
+struct hinic_mgmt_msg_handle_work {
+ struct work_struct work;
+ struct hinic_pf_to_mgmt *pf_to_mgmt;
+
+ void *msg;
+ u16 msg_len;
+
+ enum hinic_mod_type mod;
+ u8 cmd;
+ u16 msg_id;
+ int async_mgmt_to_pf;
+};
+
+void hinic_register_mgmt_msg_cb(struct hinic_pf_to_mgmt *pf_to_mgmt,
+ enum hinic_mod_type mod,
+ void *handle,
+ void (*callback)(void *handle,
+ u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out,
+ u16 *out_size));
+
+void hinic_unregister_mgmt_msg_cb(struct hinic_pf_to_mgmt *pf_to_mgmt,
+ enum hinic_mod_type mod);
+
+int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt,
+ enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size, void *buf_out, u16 *out_size,
+ enum hinic_mgmt_msg_type sync);
+
+int hinic_pf_to_mgmt_init(struct hinic_pf_to_mgmt *pf_to_mgmt,
+ struct hinic_hwif *hwif);
+
+void hinic_pf_to_mgmt_free(struct hinic_pf_to_mgmt *pf_to_mgmt);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
new file mode 100644
index 0000000000..537a8098bc
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
@@ -0,0 +1,971 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include <linux/errno.h>
+#include <linux/sizes.h>
+#include <linux/atomic.h>
+#include <linux/skbuff.h>
+#include <linux/io.h>
+#include <asm/barrier.h>
+#include <asm/byteorder.h>
+
+#include "hinic_common.h"
+#include "hinic_hw_if.h"
+#include "hinic_hw_wqe.h"
+#include "hinic_hw_wq.h"
+#include "hinic_hw_qp_ctxt.h"
+#include "hinic_hw_qp.h"
+#include "hinic_hw_io.h"
+
+#define SQ_DB_OFF SZ_2K
+
+/* The number of cache line to prefetch Until threshold state */
+#define WQ_PREFETCH_MAX 2
+/* The number of cache line to prefetch After threshold state */
+#define WQ_PREFETCH_MIN 1
+/* Threshold state */
+#define WQ_PREFETCH_THRESHOLD 256
+
+/* sizes of the SQ/RQ ctxt */
+#define Q_CTXT_SIZE 48
+#define CTXT_RSVD 240
+
+#define SQ_CTXT_OFFSET(max_sqs, max_rqs, q_id) \
+ (((max_rqs) + (max_sqs)) * CTXT_RSVD + (q_id) * Q_CTXT_SIZE)
+
+#define RQ_CTXT_OFFSET(max_sqs, max_rqs, q_id) \
+ (((max_rqs) + (max_sqs)) * CTXT_RSVD + \
+ (max_sqs + (q_id)) * Q_CTXT_SIZE)
+
+#define SIZE_16BYTES(size) (ALIGN(size, 16) >> 4)
+#define SIZE_8BYTES(size) (ALIGN(size, 8) >> 3)
+#define SECT_SIZE_FROM_8BYTES(size) ((size) << 3)
+
+#define SQ_DB_PI_HI_SHIFT 8
+#define SQ_DB_PI_HI(prod_idx) ((prod_idx) >> SQ_DB_PI_HI_SHIFT)
+
+#define SQ_DB_PI_LOW_MASK 0xFF
+#define SQ_DB_PI_LOW(prod_idx) ((prod_idx) & SQ_DB_PI_LOW_MASK)
+
+#define SQ_DB_ADDR(sq, pi) ((u64 *)((sq)->db_base) + SQ_DB_PI_LOW(pi))
+
+#define SQ_MASKED_IDX(sq, idx) ((idx) & (sq)->wq->mask)
+#define RQ_MASKED_IDX(rq, idx) ((idx) & (rq)->wq->mask)
+
+enum sq_wqe_type {
+ SQ_NORMAL_WQE = 0,
+};
+
+enum rq_completion_fmt {
+ RQ_COMPLETE_SGE = 1
+};
+
+void hinic_qp_prepare_header(struct hinic_qp_ctxt_header *qp_ctxt_hdr,
+ enum hinic_qp_ctxt_type ctxt_type,
+ u16 num_queues, u16 max_queues)
+{
+ u16 max_sqs = max_queues;
+ u16 max_rqs = max_queues;
+
+ qp_ctxt_hdr->num_queues = num_queues;
+ qp_ctxt_hdr->queue_type = ctxt_type;
+
+ if (ctxt_type == HINIC_QP_CTXT_TYPE_SQ)
+ qp_ctxt_hdr->addr_offset = SQ_CTXT_OFFSET(max_sqs, max_rqs, 0);
+ else
+ qp_ctxt_hdr->addr_offset = RQ_CTXT_OFFSET(max_sqs, max_rqs, 0);
+
+ qp_ctxt_hdr->addr_offset = SIZE_16BYTES(qp_ctxt_hdr->addr_offset);
+
+ hinic_cpu_to_be32(qp_ctxt_hdr, sizeof(*qp_ctxt_hdr));
+}
+
+void hinic_sq_prepare_ctxt(struct hinic_sq_ctxt *sq_ctxt,
+ struct hinic_sq *sq, u16 global_qid)
+{
+ u32 wq_page_pfn_hi, wq_page_pfn_lo, wq_block_pfn_hi, wq_block_pfn_lo;
+ u64 wq_page_addr, wq_page_pfn, wq_block_pfn;
+ u16 pi_start, ci_start;
+ struct hinic_wq *wq;
+
+ wq = sq->wq;
+ ci_start = atomic_read(&wq->cons_idx);
+ pi_start = atomic_read(&wq->prod_idx);
+
+ /* Read the first page paddr from the WQ page paddr ptrs */
+ wq_page_addr = be64_to_cpu(*wq->block_vaddr);
+
+ wq_page_pfn = HINIC_WQ_PAGE_PFN(wq_page_addr);
+ wq_page_pfn_hi = upper_32_bits(wq_page_pfn);
+ wq_page_pfn_lo = lower_32_bits(wq_page_pfn);
+
+ /* If only one page, use 0-level CLA */
+ if (wq->num_q_pages == 1)
+ wq_block_pfn = HINIC_WQ_BLOCK_PFN(wq_page_addr);
+ else
+ wq_block_pfn = HINIC_WQ_BLOCK_PFN(wq->block_paddr);
+
+ wq_block_pfn_hi = upper_32_bits(wq_block_pfn);
+ wq_block_pfn_lo = lower_32_bits(wq_block_pfn);
+
+ sq_ctxt->ceq_attr = HINIC_SQ_CTXT_CEQ_ATTR_SET(global_qid,
+ GLOBAL_SQ_ID) |
+ HINIC_SQ_CTXT_CEQ_ATTR_SET(0, EN);
+
+ sq_ctxt->ci_wrapped = HINIC_SQ_CTXT_CI_SET(ci_start, IDX) |
+ HINIC_SQ_CTXT_CI_SET(1, WRAPPED);
+
+ sq_ctxt->wq_hi_pfn_pi =
+ HINIC_SQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) |
+ HINIC_SQ_CTXT_WQ_PAGE_SET(pi_start, PI);
+
+ sq_ctxt->wq_lo_pfn = wq_page_pfn_lo;
+
+ sq_ctxt->pref_cache =
+ HINIC_SQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) |
+ HINIC_SQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) |
+ HINIC_SQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD);
+
+ sq_ctxt->pref_wrapped = 1;
+
+ sq_ctxt->pref_wq_hi_pfn_ci =
+ HINIC_SQ_CTXT_PREF_SET(ci_start, CI) |
+ HINIC_SQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_HI_PFN);
+
+ sq_ctxt->pref_wq_lo_pfn = wq_page_pfn_lo;
+
+ sq_ctxt->wq_block_hi_pfn =
+ HINIC_SQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, HI_PFN);
+
+ sq_ctxt->wq_block_lo_pfn = wq_block_pfn_lo;
+
+ hinic_cpu_to_be32(sq_ctxt, sizeof(*sq_ctxt));
+}
+
+void hinic_rq_prepare_ctxt(struct hinic_rq_ctxt *rq_ctxt,
+ struct hinic_rq *rq, u16 global_qid)
+{
+ u32 wq_page_pfn_hi, wq_page_pfn_lo, wq_block_pfn_hi, wq_block_pfn_lo;
+ u64 wq_page_addr, wq_page_pfn, wq_block_pfn;
+ u16 pi_start, ci_start;
+ struct hinic_wq *wq;
+
+ wq = rq->wq;
+ ci_start = atomic_read(&wq->cons_idx);
+ pi_start = atomic_read(&wq->prod_idx);
+
+ /* Read the first page paddr from the WQ page paddr ptrs */
+ wq_page_addr = be64_to_cpu(*wq->block_vaddr);
+
+ wq_page_pfn = HINIC_WQ_PAGE_PFN(wq_page_addr);
+ wq_page_pfn_hi = upper_32_bits(wq_page_pfn);
+ wq_page_pfn_lo = lower_32_bits(wq_page_pfn);
+
+ wq_block_pfn = HINIC_WQ_BLOCK_PFN(wq->block_paddr);
+ wq_block_pfn_hi = upper_32_bits(wq_block_pfn);
+ wq_block_pfn_lo = lower_32_bits(wq_block_pfn);
+
+ rq_ctxt->ceq_attr = HINIC_RQ_CTXT_CEQ_ATTR_SET(0, EN) |
+ HINIC_RQ_CTXT_CEQ_ATTR_SET(1, WRAPPED);
+
+ rq_ctxt->pi_intr_attr = HINIC_RQ_CTXT_PI_SET(pi_start, IDX) |
+ HINIC_RQ_CTXT_PI_SET(rq->msix_entry, INTR);
+
+ rq_ctxt->wq_hi_pfn_ci = HINIC_RQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi,
+ HI_PFN) |
+ HINIC_RQ_CTXT_WQ_PAGE_SET(ci_start, CI);
+
+ rq_ctxt->wq_lo_pfn = wq_page_pfn_lo;
+
+ rq_ctxt->pref_cache =
+ HINIC_RQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) |
+ HINIC_RQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) |
+ HINIC_RQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD);
+
+ rq_ctxt->pref_wrapped = 1;
+
+ rq_ctxt->pref_wq_hi_pfn_ci =
+ HINIC_RQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_HI_PFN) |
+ HINIC_RQ_CTXT_PREF_SET(ci_start, CI);
+
+ rq_ctxt->pref_wq_lo_pfn = wq_page_pfn_lo;
+
+ rq_ctxt->pi_paddr_hi = upper_32_bits(rq->pi_dma_addr);
+ rq_ctxt->pi_paddr_lo = lower_32_bits(rq->pi_dma_addr);
+
+ rq_ctxt->wq_block_hi_pfn =
+ HINIC_RQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, HI_PFN);
+
+ rq_ctxt->wq_block_lo_pfn = wq_block_pfn_lo;
+
+ hinic_cpu_to_be32(rq_ctxt, sizeof(*rq_ctxt));
+}
+
+/**
+ * alloc_sq_skb_arr - allocate sq array for saved skb
+ * @sq: HW Send Queue
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static int alloc_sq_skb_arr(struct hinic_sq *sq)
+{
+ struct hinic_wq *wq = sq->wq;
+ size_t skb_arr_size;
+
+ skb_arr_size = wq->q_depth * sizeof(*sq->saved_skb);
+ sq->saved_skb = vzalloc(skb_arr_size);
+ if (!sq->saved_skb)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * free_sq_skb_arr - free sq array for saved skb
+ * @sq: HW Send Queue
+ **/
+static void free_sq_skb_arr(struct hinic_sq *sq)
+{
+ vfree(sq->saved_skb);
+}
+
+/**
+ * alloc_rq_skb_arr - allocate rq array for saved skb
+ * @rq: HW Receive Queue
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static int alloc_rq_skb_arr(struct hinic_rq *rq)
+{
+ struct hinic_wq *wq = rq->wq;
+ size_t skb_arr_size;
+
+ skb_arr_size = wq->q_depth * sizeof(*rq->saved_skb);
+ rq->saved_skb = vzalloc(skb_arr_size);
+ if (!rq->saved_skb)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * free_rq_skb_arr - free rq array for saved skb
+ * @rq: HW Receive Queue
+ **/
+static void free_rq_skb_arr(struct hinic_rq *rq)
+{
+ vfree(rq->saved_skb);
+}
+
+/**
+ * hinic_init_sq - Initialize HW Send Queue
+ * @sq: HW Send Queue
+ * @hwif: HW Interface for accessing HW
+ * @wq: Work Queue for the data of the SQ
+ * @entry: msix entry for sq
+ * @ci_addr: address for reading the current HW consumer index
+ * @ci_dma_addr: dma address for reading the current HW consumer index
+ * @db_base: doorbell base address
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+int hinic_init_sq(struct hinic_sq *sq, struct hinic_hwif *hwif,
+ struct hinic_wq *wq, struct msix_entry *entry,
+ void *ci_addr, dma_addr_t ci_dma_addr,
+ void __iomem *db_base)
+{
+ sq->hwif = hwif;
+
+ sq->wq = wq;
+
+ sq->irq = entry->vector;
+ sq->msix_entry = entry->entry;
+
+ sq->hw_ci_addr = ci_addr;
+ sq->hw_ci_dma_addr = ci_dma_addr;
+
+ sq->db_base = db_base + SQ_DB_OFF;
+
+ return alloc_sq_skb_arr(sq);
+}
+
+/**
+ * hinic_clean_sq - Clean HW Send Queue's Resources
+ * @sq: Send Queue
+ **/
+void hinic_clean_sq(struct hinic_sq *sq)
+{
+ free_sq_skb_arr(sq);
+}
+
+/**
+ * alloc_rq_cqe - allocate rq completion queue elements
+ * @rq: HW Receive Queue
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static int alloc_rq_cqe(struct hinic_rq *rq)
+{
+ struct hinic_hwif *hwif = rq->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ size_t cqe_dma_size, cqe_size;
+ struct hinic_wq *wq = rq->wq;
+ int j, i;
+
+ cqe_size = wq->q_depth * sizeof(*rq->cqe);
+ rq->cqe = vzalloc(cqe_size);
+ if (!rq->cqe)
+ return -ENOMEM;
+
+ cqe_dma_size = wq->q_depth * sizeof(*rq->cqe_dma);
+ rq->cqe_dma = vzalloc(cqe_dma_size);
+ if (!rq->cqe_dma)
+ goto err_cqe_dma_arr_alloc;
+
+ for (i = 0; i < wq->q_depth; i++) {
+ rq->cqe[i] = dma_alloc_coherent(&pdev->dev,
+ sizeof(*rq->cqe[i]),
+ &rq->cqe_dma[i], GFP_KERNEL);
+ if (!rq->cqe[i])
+ goto err_cqe_alloc;
+ }
+
+ return 0;
+
+err_cqe_alloc:
+ for (j = 0; j < i; j++)
+ dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[j]), rq->cqe[j],
+ rq->cqe_dma[j]);
+
+ vfree(rq->cqe_dma);
+
+err_cqe_dma_arr_alloc:
+ vfree(rq->cqe);
+ return -ENOMEM;
+}
+
+/**
+ * free_rq_cqe - free rq completion queue elements
+ * @rq: HW Receive Queue
+ **/
+static void free_rq_cqe(struct hinic_rq *rq)
+{
+ struct hinic_hwif *hwif = rq->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ struct hinic_wq *wq = rq->wq;
+ int i;
+
+ for (i = 0; i < wq->q_depth; i++)
+ dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[i]), rq->cqe[i],
+ rq->cqe_dma[i]);
+
+ vfree(rq->cqe_dma);
+ vfree(rq->cqe);
+}
+
+/**
+ * hinic_init_rq - Initialize HW Receive Queue
+ * @rq: HW Receive Queue
+ * @hwif: HW Interface for accessing HW
+ * @wq: Work Queue for the data of the RQ
+ * @entry: msix entry for rq
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+int hinic_init_rq(struct hinic_rq *rq, struct hinic_hwif *hwif,
+ struct hinic_wq *wq, struct msix_entry *entry)
+{
+ struct pci_dev *pdev = hwif->pdev;
+ size_t pi_size;
+ int err;
+
+ rq->hwif = hwif;
+
+ rq->wq = wq;
+
+ rq->irq = entry->vector;
+ rq->msix_entry = entry->entry;
+
+ rq->buf_sz = HINIC_RX_BUF_SZ;
+
+ err = alloc_rq_skb_arr(rq);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to allocate rq priv data\n");
+ return err;
+ }
+
+ err = alloc_rq_cqe(rq);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to allocate rq cqe\n");
+ goto err_alloc_rq_cqe;
+ }
+
+ /* HW requirements: Must be at least 32 bit */
+ pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32));
+ rq->pi_virt_addr = dma_alloc_coherent(&pdev->dev, pi_size,
+ &rq->pi_dma_addr, GFP_KERNEL);
+ if (!rq->pi_virt_addr) {
+ err = -ENOMEM;
+ goto err_pi_virt;
+ }
+
+ return 0;
+
+err_pi_virt:
+ free_rq_cqe(rq);
+
+err_alloc_rq_cqe:
+ free_rq_skb_arr(rq);
+ return err;
+}
+
+/**
+ * hinic_clean_rq - Clean HW Receive Queue's Resources
+ * @rq: HW Receive Queue
+ **/
+void hinic_clean_rq(struct hinic_rq *rq)
+{
+ struct hinic_hwif *hwif = rq->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ size_t pi_size;
+
+ pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32));
+ dma_free_coherent(&pdev->dev, pi_size, rq->pi_virt_addr,
+ rq->pi_dma_addr);
+
+ free_rq_cqe(rq);
+ free_rq_skb_arr(rq);
+}
+
+/**
+ * hinic_get_sq_free_wqebbs - return number of free wqebbs for use
+ * @sq: send queue
+ *
+ * Return number of free wqebbs
+ **/
+int hinic_get_sq_free_wqebbs(struct hinic_sq *sq)
+{
+ struct hinic_wq *wq = sq->wq;
+
+ return atomic_read(&wq->delta) - 1;
+}
+
+/**
+ * hinic_get_rq_free_wqebbs - return number of free wqebbs for use
+ * @rq: recv queue
+ *
+ * Return number of free wqebbs
+ **/
+int hinic_get_rq_free_wqebbs(struct hinic_rq *rq)
+{
+ struct hinic_wq *wq = rq->wq;
+
+ return atomic_read(&wq->delta) - 1;
+}
+
+static void sq_prepare_ctrl(struct hinic_sq_ctrl *ctrl, int nr_descs)
+{
+ u32 ctrl_size, task_size, bufdesc_size;
+
+ ctrl_size = SIZE_8BYTES(sizeof(struct hinic_sq_ctrl));
+ task_size = SIZE_8BYTES(sizeof(struct hinic_sq_task));
+ bufdesc_size = nr_descs * sizeof(struct hinic_sq_bufdesc);
+ bufdesc_size = SIZE_8BYTES(bufdesc_size);
+
+ ctrl->ctrl_info = HINIC_SQ_CTRL_SET(bufdesc_size, BUFDESC_SECT_LEN) |
+ HINIC_SQ_CTRL_SET(task_size, TASKSECT_LEN) |
+ HINIC_SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) |
+ HINIC_SQ_CTRL_SET(ctrl_size, LEN);
+
+ ctrl->queue_info = HINIC_SQ_CTRL_SET(HINIC_MSS_DEFAULT,
+ QUEUE_INFO_MSS) |
+ HINIC_SQ_CTRL_SET(1, QUEUE_INFO_UC);
+}
+
+static void sq_prepare_task(struct hinic_sq_task *task)
+{
+ task->pkt_info0 = 0;
+ task->pkt_info1 = 0;
+ task->pkt_info2 = 0;
+
+ task->ufo_v6_identify = 0;
+
+ task->pkt_info4 = HINIC_SQ_TASK_INFO4_SET(HINIC_L2TYPE_ETH, L2TYPE);
+
+ task->zero_pad = 0;
+}
+
+void hinic_task_set_l2hdr(struct hinic_sq_task *task, u32 len)
+{
+ task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(len, L2HDR_LEN);
+}
+
+void hinic_task_set_outter_l3(struct hinic_sq_task *task,
+ enum hinic_l3_offload_type l3_type,
+ u32 network_len)
+{
+ task->pkt_info2 |= HINIC_SQ_TASK_INFO2_SET(l3_type, OUTER_L3TYPE) |
+ HINIC_SQ_TASK_INFO2_SET(network_len, OUTER_L3LEN);
+}
+
+void hinic_task_set_inner_l3(struct hinic_sq_task *task,
+ enum hinic_l3_offload_type l3_type,
+ u32 network_len)
+{
+ task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(l3_type, INNER_L3TYPE);
+ task->pkt_info1 |= HINIC_SQ_TASK_INFO1_SET(network_len, INNER_L3LEN);
+}
+
+void hinic_task_set_tunnel_l4(struct hinic_sq_task *task,
+ enum hinic_l4_tunnel_type l4_type,
+ u32 tunnel_len)
+{
+ task->pkt_info2 |= HINIC_SQ_TASK_INFO2_SET(l4_type, TUNNEL_L4TYPE) |
+ HINIC_SQ_TASK_INFO2_SET(tunnel_len, TUNNEL_L4LEN);
+}
+
+void hinic_set_cs_inner_l4(struct hinic_sq_task *task, u32 *queue_info,
+ enum hinic_l4_offload_type l4_offload,
+ u32 l4_len, u32 offset)
+{
+ u32 tcp_udp_cs = 0, sctp = 0;
+ u32 mss = HINIC_MSS_DEFAULT;
+
+ if (l4_offload == TCP_OFFLOAD_ENABLE ||
+ l4_offload == UDP_OFFLOAD_ENABLE)
+ tcp_udp_cs = 1;
+ else if (l4_offload == SCTP_OFFLOAD_ENABLE)
+ sctp = 1;
+
+ task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(l4_offload, L4_OFFLOAD);
+ task->pkt_info1 |= HINIC_SQ_TASK_INFO1_SET(l4_len, INNER_L4LEN);
+
+ *queue_info |= HINIC_SQ_CTRL_SET(offset, QUEUE_INFO_PLDOFF) |
+ HINIC_SQ_CTRL_SET(tcp_udp_cs, QUEUE_INFO_TCPUDP_CS) |
+ HINIC_SQ_CTRL_SET(sctp, QUEUE_INFO_SCTP);
+
+ *queue_info = HINIC_SQ_CTRL_CLEAR(*queue_info, QUEUE_INFO_MSS);
+ *queue_info |= HINIC_SQ_CTRL_SET(mss, QUEUE_INFO_MSS);
+}
+
+void hinic_set_tso_inner_l4(struct hinic_sq_task *task, u32 *queue_info,
+ enum hinic_l4_offload_type l4_offload,
+ u32 l4_len, u32 offset, u32 ip_ident, u32 mss)
+{
+ u32 tso = 0, ufo = 0;
+
+ if (l4_offload == TCP_OFFLOAD_ENABLE)
+ tso = 1;
+ else if (l4_offload == UDP_OFFLOAD_ENABLE)
+ ufo = 1;
+
+ task->ufo_v6_identify = ip_ident;
+
+ task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(l4_offload, L4_OFFLOAD);
+ task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(tso || ufo, TSO_FLAG);
+ task->pkt_info1 |= HINIC_SQ_TASK_INFO1_SET(l4_len, INNER_L4LEN);
+
+ *queue_info |= HINIC_SQ_CTRL_SET(offset, QUEUE_INFO_PLDOFF) |
+ HINIC_SQ_CTRL_SET(tso, QUEUE_INFO_TSO) |
+ HINIC_SQ_CTRL_SET(ufo, QUEUE_INFO_UFO) |
+ HINIC_SQ_CTRL_SET(!!l4_offload, QUEUE_INFO_TCPUDP_CS);
+
+ /* set MSS value */
+ *queue_info = HINIC_SQ_CTRL_CLEAR(*queue_info, QUEUE_INFO_MSS);
+ *queue_info |= HINIC_SQ_CTRL_SET(mss, QUEUE_INFO_MSS);
+}
+
+/**
+ * hinic_sq_prepare_wqe - prepare wqe before insert to the queue
+ * @sq: send queue
+ * @sq_wqe: wqe to prepare
+ * @sges: sges for use by the wqe for send for buf addresses
+ * @nr_sges: number of sges
+ **/
+void hinic_sq_prepare_wqe(struct hinic_sq *sq, struct hinic_sq_wqe *sq_wqe,
+ struct hinic_sge *sges, int nr_sges)
+{
+ int i;
+
+ sq_prepare_ctrl(&sq_wqe->ctrl, nr_sges);
+
+ sq_prepare_task(&sq_wqe->task);
+
+ for (i = 0; i < nr_sges; i++)
+ sq_wqe->buf_descs[i].sge = sges[i];
+}
+
+/**
+ * sq_prepare_db - prepare doorbell to write
+ * @sq: send queue
+ * @prod_idx: pi value for the doorbell
+ * @cos: cos of the doorbell
+ *
+ * Return db value
+ **/
+static u32 sq_prepare_db(struct hinic_sq *sq, u16 prod_idx, unsigned int cos)
+{
+ struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq);
+ u8 hi_prod_idx = SQ_DB_PI_HI(SQ_MASKED_IDX(sq, prod_idx));
+
+ /* Data should be written to HW in Big Endian Format */
+ return cpu_to_be32(HINIC_SQ_DB_INFO_SET(hi_prod_idx, PI_HI) |
+ HINIC_SQ_DB_INFO_SET(HINIC_DB_SQ_TYPE, TYPE) |
+ HINIC_SQ_DB_INFO_SET(HINIC_DATA_PATH, PATH) |
+ HINIC_SQ_DB_INFO_SET(cos, COS) |
+ HINIC_SQ_DB_INFO_SET(qp->q_id, QID));
+}
+
+/**
+ * hinic_sq_write_db- write doorbell
+ * @sq: send queue
+ * @prod_idx: pi value for the doorbell
+ * @wqe_size: wqe size
+ * @cos: cos of the wqe
+ **/
+void hinic_sq_write_db(struct hinic_sq *sq, u16 prod_idx, unsigned int wqe_size,
+ unsigned int cos)
+{
+ struct hinic_wq *wq = sq->wq;
+
+ /* increment prod_idx to the next */
+ prod_idx += ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
+ prod_idx = SQ_MASKED_IDX(sq, prod_idx);
+
+ wmb(); /* Write all before the doorbell */
+
+ writel(sq_prepare_db(sq, prod_idx, cos), SQ_DB_ADDR(sq, prod_idx));
+}
+
+/**
+ * hinic_sq_get_wqe - get wqe ptr in the current pi and update the pi
+ * @sq: sq to get wqe from
+ * @wqe_size: wqe size
+ * @prod_idx: returned pi
+ *
+ * Return wqe pointer
+ **/
+struct hinic_sq_wqe *hinic_sq_get_wqe(struct hinic_sq *sq,
+ unsigned int wqe_size, u16 *prod_idx)
+{
+ struct hinic_hw_wqe *hw_wqe = hinic_get_wqe(sq->wq, wqe_size,
+ prod_idx);
+
+ if (IS_ERR(hw_wqe))
+ return NULL;
+
+ return &hw_wqe->sq_wqe;
+}
+
+/**
+ * hinic_sq_return_wqe - return the wqe to the sq
+ * @sq: send queue
+ * @wqe_size: the size of the wqe
+ **/
+void hinic_sq_return_wqe(struct hinic_sq *sq, unsigned int wqe_size)
+{
+ hinic_return_wqe(sq->wq, wqe_size);
+}
+
+/**
+ * hinic_sq_write_wqe - write the wqe to the sq
+ * @sq: send queue
+ * @prod_idx: pi of the wqe
+ * @sq_wqe: the wqe to write
+ * @skb: skb to save
+ * @wqe_size: the size of the wqe
+ **/
+void hinic_sq_write_wqe(struct hinic_sq *sq, u16 prod_idx,
+ struct hinic_sq_wqe *sq_wqe,
+ struct sk_buff *skb, unsigned int wqe_size)
+{
+ struct hinic_hw_wqe *hw_wqe = (struct hinic_hw_wqe *)sq_wqe;
+
+ sq->saved_skb[prod_idx] = skb;
+
+ /* The data in the HW should be in Big Endian Format */
+ hinic_cpu_to_be32(sq_wqe, wqe_size);
+
+ hinic_write_wqe(sq->wq, hw_wqe, wqe_size);
+}
+
+/**
+ * hinic_sq_read_wqebb - read wqe ptr in the current ci and update the ci, the
+ * wqe only have one wqebb
+ * @sq: send queue
+ * @skb: return skb that was saved
+ * @wqe_size: the wqe size ptr
+ * @cons_idx: consumer index of the wqe
+ *
+ * Return wqe in ci position
+ **/
+struct hinic_sq_wqe *hinic_sq_read_wqebb(struct hinic_sq *sq,
+ struct sk_buff **skb,
+ unsigned int *wqe_size, u16 *cons_idx)
+{
+ struct hinic_hw_wqe *hw_wqe;
+ struct hinic_sq_wqe *sq_wqe;
+ struct hinic_sq_ctrl *ctrl;
+ unsigned int buf_sect_len;
+ u32 ctrl_info;
+
+ /* read the ctrl section for getting wqe size */
+ hw_wqe = hinic_read_wqe(sq->wq, sizeof(*ctrl), cons_idx);
+ if (IS_ERR(hw_wqe))
+ return NULL;
+
+ *skb = sq->saved_skb[*cons_idx];
+
+ sq_wqe = &hw_wqe->sq_wqe;
+ ctrl = &sq_wqe->ctrl;
+ ctrl_info = be32_to_cpu(ctrl->ctrl_info);
+ buf_sect_len = HINIC_SQ_CTRL_GET(ctrl_info, BUFDESC_SECT_LEN);
+
+ *wqe_size = sizeof(*ctrl) + sizeof(sq_wqe->task);
+ *wqe_size += SECT_SIZE_FROM_8BYTES(buf_sect_len);
+ *wqe_size = ALIGN(*wqe_size, sq->wq->wqebb_size);
+
+ return &hw_wqe->sq_wqe;
+}
+
+/**
+ * hinic_sq_read_wqe - read wqe ptr in the current ci and update the ci
+ * @sq: send queue
+ * @skb: return skb that was saved
+ * @wqe_size: the size of the wqe
+ * @cons_idx: consumer index of the wqe
+ *
+ * Return wqe in ci position
+ **/
+struct hinic_sq_wqe *hinic_sq_read_wqe(struct hinic_sq *sq,
+ struct sk_buff **skb,
+ unsigned int wqe_size, u16 *cons_idx)
+{
+ struct hinic_hw_wqe *hw_wqe;
+
+ hw_wqe = hinic_read_wqe(sq->wq, wqe_size, cons_idx);
+ *skb = sq->saved_skb[*cons_idx];
+
+ return &hw_wqe->sq_wqe;
+}
+
+/**
+ * hinic_sq_put_wqe - release the ci for new wqes
+ * @sq: send queue
+ * @wqe_size: the size of the wqe
+ **/
+void hinic_sq_put_wqe(struct hinic_sq *sq, unsigned int wqe_size)
+{
+ hinic_put_wqe(sq->wq, wqe_size);
+}
+
+/**
+ * hinic_sq_get_sges - get sges from the wqe
+ * @sq_wqe: wqe to get the sges from its buffer addresses
+ * @sges: returned sges
+ * @nr_sges: number sges to return
+ **/
+void hinic_sq_get_sges(struct hinic_sq_wqe *sq_wqe, struct hinic_sge *sges,
+ int nr_sges)
+{
+ int i;
+
+ for (i = 0; i < nr_sges && i < HINIC_MAX_SQ_BUFDESCS; i++) {
+ sges[i] = sq_wqe->buf_descs[i].sge;
+ hinic_be32_to_cpu(&sges[i], sizeof(sges[i]));
+ }
+}
+
+/**
+ * hinic_rq_get_wqe - get wqe ptr in the current pi and update the pi
+ * @rq: rq to get wqe from
+ * @wqe_size: wqe size
+ * @prod_idx: returned pi
+ *
+ * Return wqe pointer
+ **/
+struct hinic_rq_wqe *hinic_rq_get_wqe(struct hinic_rq *rq,
+ unsigned int wqe_size, u16 *prod_idx)
+{
+ struct hinic_hw_wqe *hw_wqe = hinic_get_wqe(rq->wq, wqe_size,
+ prod_idx);
+
+ if (IS_ERR(hw_wqe))
+ return NULL;
+
+ return &hw_wqe->rq_wqe;
+}
+
+/**
+ * hinic_rq_write_wqe - write the wqe to the rq
+ * @rq: recv queue
+ * @prod_idx: pi of the wqe
+ * @rq_wqe: the wqe to write
+ * @skb: skb to save
+ **/
+void hinic_rq_write_wqe(struct hinic_rq *rq, u16 prod_idx,
+ struct hinic_rq_wqe *rq_wqe, struct sk_buff *skb)
+{
+ struct hinic_hw_wqe *hw_wqe = (struct hinic_hw_wqe *)rq_wqe;
+
+ rq->saved_skb[prod_idx] = skb;
+
+ /* The data in the HW should be in Big Endian Format */
+ hinic_cpu_to_be32(rq_wqe, sizeof(*rq_wqe));
+
+ hinic_write_wqe(rq->wq, hw_wqe, sizeof(*rq_wqe));
+}
+
+/**
+ * hinic_rq_read_wqe - read wqe ptr in the current ci and update the ci
+ * @rq: recv queue
+ * @wqe_size: the size of the wqe
+ * @skb: return saved skb
+ * @cons_idx: consumer index of the wqe
+ *
+ * Return wqe in ci position
+ **/
+struct hinic_rq_wqe *hinic_rq_read_wqe(struct hinic_rq *rq,
+ unsigned int wqe_size,
+ struct sk_buff **skb, u16 *cons_idx)
+{
+ struct hinic_hw_wqe *hw_wqe;
+ struct hinic_rq_cqe *cqe;
+ int rx_done;
+ u32 status;
+
+ hw_wqe = hinic_read_wqe(rq->wq, wqe_size, cons_idx);
+ if (IS_ERR(hw_wqe))
+ return NULL;
+
+ cqe = rq->cqe[*cons_idx];
+
+ status = be32_to_cpu(cqe->status);
+
+ rx_done = HINIC_RQ_CQE_STATUS_GET(status, RXDONE);
+ if (!rx_done)
+ return NULL;
+
+ *skb = rq->saved_skb[*cons_idx];
+
+ return &hw_wqe->rq_wqe;
+}
+
+/**
+ * hinic_rq_read_next_wqe - increment ci and read the wqe in ci position
+ * @rq: recv queue
+ * @wqe_size: the size of the wqe
+ * @skb: return saved skb
+ * @cons_idx: consumer index in the wq
+ *
+ * Return wqe in incremented ci position
+ **/
+struct hinic_rq_wqe *hinic_rq_read_next_wqe(struct hinic_rq *rq,
+ unsigned int wqe_size,
+ struct sk_buff **skb,
+ u16 *cons_idx)
+{
+ struct hinic_wq *wq = rq->wq;
+ struct hinic_hw_wqe *hw_wqe;
+ unsigned int num_wqebbs;
+
+ wqe_size = ALIGN(wqe_size, wq->wqebb_size);
+ num_wqebbs = wqe_size / wq->wqebb_size;
+
+ *cons_idx = RQ_MASKED_IDX(rq, *cons_idx + num_wqebbs);
+
+ *skb = rq->saved_skb[*cons_idx];
+
+ hw_wqe = hinic_read_wqe_direct(wq, *cons_idx);
+
+ return &hw_wqe->rq_wqe;
+}
+
+/**
+ * hinic_rq_put_wqe - release the ci for new wqes
+ * @rq: recv queue
+ * @cons_idx: consumer index of the wqe
+ * @wqe_size: the size of the wqe
+ **/
+void hinic_rq_put_wqe(struct hinic_rq *rq, u16 cons_idx,
+ unsigned int wqe_size)
+{
+ struct hinic_rq_cqe *cqe = rq->cqe[cons_idx];
+ u32 status = be32_to_cpu(cqe->status);
+
+ status = HINIC_RQ_CQE_STATUS_CLEAR(status, RXDONE);
+
+ /* Rx WQE size is 1 WQEBB, no wq shadow*/
+ cqe->status = cpu_to_be32(status);
+
+ wmb(); /* clear done flag */
+
+ hinic_put_wqe(rq->wq, wqe_size);
+}
+
+/**
+ * hinic_rq_get_sge - get sge from the wqe
+ * @rq: recv queue
+ * @rq_wqe: wqe to get the sge from its buf address
+ * @cons_idx: consumer index
+ * @sge: returned sge
+ **/
+void hinic_rq_get_sge(struct hinic_rq *rq, struct hinic_rq_wqe *rq_wqe,
+ u16 cons_idx, struct hinic_sge *sge)
+{
+ struct hinic_rq_cqe *cqe = rq->cqe[cons_idx];
+ u32 len = be32_to_cpu(cqe->len);
+
+ sge->hi_addr = be32_to_cpu(rq_wqe->buf_desc.hi_addr);
+ sge->lo_addr = be32_to_cpu(rq_wqe->buf_desc.lo_addr);
+ sge->len = HINIC_RQ_CQE_SGE_GET(len, LEN);
+}
+
+/**
+ * hinic_rq_prepare_wqe - prepare wqe before insert to the queue
+ * @rq: recv queue
+ * @prod_idx: pi value
+ * @rq_wqe: the wqe
+ * @sge: sge for use by the wqe for recv buf address
+ **/
+void hinic_rq_prepare_wqe(struct hinic_rq *rq, u16 prod_idx,
+ struct hinic_rq_wqe *rq_wqe, struct hinic_sge *sge)
+{
+ struct hinic_rq_cqe_sect *cqe_sect = &rq_wqe->cqe_sect;
+ struct hinic_rq_bufdesc *buf_desc = &rq_wqe->buf_desc;
+ struct hinic_rq_cqe *cqe = rq->cqe[prod_idx];
+ struct hinic_rq_ctrl *ctrl = &rq_wqe->ctrl;
+ dma_addr_t cqe_dma = rq->cqe_dma[prod_idx];
+
+ ctrl->ctrl_info =
+ HINIC_RQ_CTRL_SET(SIZE_8BYTES(sizeof(*ctrl)), LEN) |
+ HINIC_RQ_CTRL_SET(SIZE_8BYTES(sizeof(*cqe_sect)),
+ COMPLETE_LEN) |
+ HINIC_RQ_CTRL_SET(SIZE_8BYTES(sizeof(*buf_desc)),
+ BUFDESC_SECT_LEN) |
+ HINIC_RQ_CTRL_SET(RQ_COMPLETE_SGE, COMPLETE_FORMAT);
+
+ hinic_set_sge(&cqe_sect->sge, cqe_dma, sizeof(*cqe));
+
+ buf_desc->hi_addr = sge->hi_addr;
+ buf_desc->lo_addr = sge->lo_addr;
+}
+
+/**
+ * hinic_rq_update - update pi of the rq
+ * @rq: recv queue
+ * @prod_idx: pi value
+ **/
+void hinic_rq_update(struct hinic_rq *rq, u16 prod_idx)
+{
+ *rq->pi_virt_addr = cpu_to_be16(RQ_MASKED_IDX(rq, prod_idx + 1));
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
new file mode 100644
index 0000000000..178dcc8743
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
@@ -0,0 +1,232 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef HINIC_HW_QP_H
+#define HINIC_HW_QP_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/sizes.h>
+#include <linux/pci.h>
+#include <linux/skbuff.h>
+
+#include "hinic_common.h"
+#include "hinic_hw_if.h"
+#include "hinic_hw_wqe.h"
+#include "hinic_hw_wq.h"
+#include "hinic_hw_qp_ctxt.h"
+
+#define HINIC_SQ_DB_INFO_PI_HI_SHIFT 0
+#define HINIC_SQ_DB_INFO_QID_SHIFT 8
+#define HINIC_SQ_DB_INFO_PATH_SHIFT 23
+#define HINIC_SQ_DB_INFO_COS_SHIFT 24
+#define HINIC_SQ_DB_INFO_TYPE_SHIFT 27
+
+#define HINIC_SQ_DB_INFO_PI_HI_MASK 0xFF
+#define HINIC_SQ_DB_INFO_QID_MASK 0x3FF
+#define HINIC_SQ_DB_INFO_PATH_MASK 0x1
+#define HINIC_SQ_DB_INFO_COS_MASK 0x7
+#define HINIC_SQ_DB_INFO_TYPE_MASK 0x1F
+
+#define HINIC_SQ_DB_INFO_SET(val, member) \
+ (((u32)(val) & HINIC_SQ_DB_INFO_##member##_MASK) \
+ << HINIC_SQ_DB_INFO_##member##_SHIFT)
+
+#define HINIC_SQ_WQEBB_SIZE 64
+#define HINIC_RQ_WQEBB_SIZE 32
+
+#define HINIC_SQ_PAGE_SIZE SZ_256K
+#define HINIC_RQ_PAGE_SIZE SZ_256K
+
+#define HINIC_SQ_DEPTH SZ_4K
+#define HINIC_RQ_DEPTH SZ_4K
+
+#define HINIC_MAX_QUEUE_DEPTH SZ_4K
+#define HINIC_MIN_QUEUE_DEPTH 128
+
+/* In any change to HINIC_RX_BUF_SZ, HINIC_RX_BUF_SZ_IDX must be changed */
+#define HINIC_RX_BUF_SZ 2048
+#define HINIC_RX_BUF_SZ_IDX HINIC_RX_BUF_SZ_2048_IDX
+
+#define HINIC_MIN_TX_WQE_SIZE(wq) \
+ ALIGN(HINIC_SQ_WQE_SIZE(1), (wq)->wqebb_size)
+
+#define HINIC_MIN_TX_NUM_WQEBBS(sq) \
+ (HINIC_MIN_TX_WQE_SIZE((sq)->wq) / (sq)->wq->wqebb_size)
+
+enum hinic_rx_buf_sz_idx {
+ HINIC_RX_BUF_SZ_32_IDX,
+ HINIC_RX_BUF_SZ_64_IDX,
+ HINIC_RX_BUF_SZ_96_IDX,
+ HINIC_RX_BUF_SZ_128_IDX,
+ HINIC_RX_BUF_SZ_192_IDX,
+ HINIC_RX_BUF_SZ_256_IDX,
+ HINIC_RX_BUF_SZ_384_IDX,
+ HINIC_RX_BUF_SZ_512_IDX,
+ HINIC_RX_BUF_SZ_768_IDX,
+ HINIC_RX_BUF_SZ_1024_IDX,
+ HINIC_RX_BUF_SZ_1536_IDX,
+ HINIC_RX_BUF_SZ_2048_IDX,
+ HINIC_RX_BUF_SZ_3072_IDX,
+ HINIC_RX_BUF_SZ_4096_IDX,
+ HINIC_RX_BUF_SZ_8192_IDX,
+ HINIC_RX_BUF_SZ_16384_IDX,
+};
+
+struct hinic_sq {
+ struct hinic_hwif *hwif;
+
+ struct hinic_wq *wq;
+
+ u16 qid;
+
+ u32 irq;
+ u16 msix_entry;
+
+ void *hw_ci_addr;
+ dma_addr_t hw_ci_dma_addr;
+
+ void __iomem *db_base;
+
+ struct sk_buff **saved_skb;
+ struct hinic_debug_priv *dbg;
+};
+
+struct hinic_rq {
+ struct hinic_hwif *hwif;
+
+ struct hinic_wq *wq;
+
+ u16 qid;
+
+ struct cpumask affinity_mask;
+ u32 irq;
+ u16 msix_entry;
+
+ size_t buf_sz;
+
+ struct sk_buff **saved_skb;
+
+ struct hinic_rq_cqe **cqe;
+ dma_addr_t *cqe_dma;
+
+ u16 *pi_virt_addr;
+ dma_addr_t pi_dma_addr;
+ struct hinic_debug_priv *dbg;
+};
+
+struct hinic_qp {
+ struct hinic_sq sq;
+ struct hinic_rq rq;
+
+ u16 q_id;
+};
+
+void hinic_qp_prepare_header(struct hinic_qp_ctxt_header *qp_ctxt_hdr,
+ enum hinic_qp_ctxt_type ctxt_type,
+ u16 num_queues, u16 max_queues);
+
+void hinic_sq_prepare_ctxt(struct hinic_sq_ctxt *sq_ctxt,
+ struct hinic_sq *sq, u16 global_qid);
+
+void hinic_rq_prepare_ctxt(struct hinic_rq_ctxt *rq_ctxt,
+ struct hinic_rq *rq, u16 global_qid);
+
+int hinic_init_sq(struct hinic_sq *sq, struct hinic_hwif *hwif,
+ struct hinic_wq *wq, struct msix_entry *entry, void *ci_addr,
+ dma_addr_t ci_dma_addr, void __iomem *db_base);
+
+void hinic_clean_sq(struct hinic_sq *sq);
+
+int hinic_init_rq(struct hinic_rq *rq, struct hinic_hwif *hwif,
+ struct hinic_wq *wq, struct msix_entry *entry);
+
+void hinic_clean_rq(struct hinic_rq *rq);
+
+int hinic_get_sq_free_wqebbs(struct hinic_sq *sq);
+
+int hinic_get_rq_free_wqebbs(struct hinic_rq *rq);
+
+void hinic_task_set_l2hdr(struct hinic_sq_task *task, u32 len);
+
+void hinic_task_set_outter_l3(struct hinic_sq_task *task,
+ enum hinic_l3_offload_type l3_type,
+ u32 network_len);
+
+void hinic_task_set_inner_l3(struct hinic_sq_task *task,
+ enum hinic_l3_offload_type l3_type,
+ u32 network_len);
+
+void hinic_task_set_tunnel_l4(struct hinic_sq_task *task,
+ enum hinic_l4_tunnel_type l4_type,
+ u32 tunnel_len);
+
+void hinic_set_cs_inner_l4(struct hinic_sq_task *task,
+ u32 *queue_info,
+ enum hinic_l4_offload_type l4_offload,
+ u32 l4_len, u32 offset);
+
+void hinic_set_tso_inner_l4(struct hinic_sq_task *task,
+ u32 *queue_info,
+ enum hinic_l4_offload_type l4_offload,
+ u32 l4_len,
+ u32 offset, u32 ip_ident, u32 mss);
+
+void hinic_sq_prepare_wqe(struct hinic_sq *sq, struct hinic_sq_wqe *wqe,
+ struct hinic_sge *sges, int nr_sges);
+
+void hinic_sq_write_db(struct hinic_sq *sq, u16 prod_idx, unsigned int wqe_size,
+ unsigned int cos);
+
+struct hinic_sq_wqe *hinic_sq_get_wqe(struct hinic_sq *sq,
+ unsigned int wqe_size, u16 *prod_idx);
+
+void hinic_sq_return_wqe(struct hinic_sq *sq, unsigned int wqe_size);
+
+void hinic_sq_write_wqe(struct hinic_sq *sq, u16 prod_idx,
+ struct hinic_sq_wqe *wqe, struct sk_buff *skb,
+ unsigned int wqe_size);
+
+struct hinic_sq_wqe *hinic_sq_read_wqe(struct hinic_sq *sq,
+ struct sk_buff **skb,
+ unsigned int wqe_size, u16 *cons_idx);
+
+struct hinic_sq_wqe *hinic_sq_read_wqebb(struct hinic_sq *sq,
+ struct sk_buff **skb,
+ unsigned int *wqe_size, u16 *cons_idx);
+
+void hinic_sq_put_wqe(struct hinic_sq *sq, unsigned int wqe_size);
+
+void hinic_sq_get_sges(struct hinic_sq_wqe *wqe, struct hinic_sge *sges,
+ int nr_sges);
+
+struct hinic_rq_wqe *hinic_rq_get_wqe(struct hinic_rq *rq,
+ unsigned int wqe_size, u16 *prod_idx);
+
+void hinic_rq_write_wqe(struct hinic_rq *rq, u16 prod_idx,
+ struct hinic_rq_wqe *wqe, struct sk_buff *skb);
+
+struct hinic_rq_wqe *hinic_rq_read_wqe(struct hinic_rq *rq,
+ unsigned int wqe_size,
+ struct sk_buff **skb, u16 *cons_idx);
+
+struct hinic_rq_wqe *hinic_rq_read_next_wqe(struct hinic_rq *rq,
+ unsigned int wqe_size,
+ struct sk_buff **skb,
+ u16 *cons_idx);
+
+void hinic_rq_put_wqe(struct hinic_rq *rq, u16 cons_idx,
+ unsigned int wqe_size);
+
+void hinic_rq_get_sge(struct hinic_rq *rq, struct hinic_rq_wqe *wqe,
+ u16 cons_idx, struct hinic_sge *sge);
+
+void hinic_rq_prepare_wqe(struct hinic_rq *rq, u16 prod_idx,
+ struct hinic_rq_wqe *wqe, struct hinic_sge *sge);
+
+void hinic_rq_update(struct hinic_rq *rq, u16 prod_idx);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h
new file mode 100644
index 0000000000..00900a6640
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h
@@ -0,0 +1,210 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef HINIC_HW_QP_CTXT_H
+#define HINIC_HW_QP_CTXT_H
+
+#include <linux/types.h>
+
+#include "hinic_hw_cmdq.h"
+
+#define HINIC_SQ_CTXT_CEQ_ATTR_GLOBAL_SQ_ID_SHIFT 13
+#define HINIC_SQ_CTXT_CEQ_ATTR_EN_SHIFT 23
+
+#define HINIC_SQ_CTXT_CEQ_ATTR_GLOBAL_SQ_ID_MASK 0x3FF
+#define HINIC_SQ_CTXT_CEQ_ATTR_EN_MASK 0x1
+
+#define HINIC_SQ_CTXT_CEQ_ATTR_SET(val, member) \
+ (((u32)(val) & HINIC_SQ_CTXT_CEQ_ATTR_##member##_MASK) \
+ << HINIC_SQ_CTXT_CEQ_ATTR_##member##_SHIFT)
+
+#define HINIC_SQ_CTXT_CI_IDX_SHIFT 11
+#define HINIC_SQ_CTXT_CI_WRAPPED_SHIFT 23
+
+#define HINIC_SQ_CTXT_CI_IDX_MASK 0xFFF
+#define HINIC_SQ_CTXT_CI_WRAPPED_MASK 0x1
+
+#define HINIC_SQ_CTXT_CI_SET(val, member) \
+ (((u32)(val) & HINIC_SQ_CTXT_CI_##member##_MASK) \
+ << HINIC_SQ_CTXT_CI_##member##_SHIFT)
+
+#define HINIC_SQ_CTXT_WQ_PAGE_HI_PFN_SHIFT 0
+#define HINIC_SQ_CTXT_WQ_PAGE_PI_SHIFT 20
+
+#define HINIC_SQ_CTXT_WQ_PAGE_HI_PFN_MASK 0xFFFFF
+#define HINIC_SQ_CTXT_WQ_PAGE_PI_MASK 0xFFF
+
+#define HINIC_SQ_CTXT_WQ_PAGE_SET(val, member) \
+ (((u32)(val) & HINIC_SQ_CTXT_WQ_PAGE_##member##_MASK) \
+ << HINIC_SQ_CTXT_WQ_PAGE_##member##_SHIFT)
+
+#define HINIC_SQ_CTXT_PREF_CACHE_THRESHOLD_SHIFT 0
+#define HINIC_SQ_CTXT_PREF_CACHE_MAX_SHIFT 14
+#define HINIC_SQ_CTXT_PREF_CACHE_MIN_SHIFT 25
+
+#define HINIC_SQ_CTXT_PREF_CACHE_THRESHOLD_MASK 0x3FFF
+#define HINIC_SQ_CTXT_PREF_CACHE_MAX_MASK 0x7FF
+#define HINIC_SQ_CTXT_PREF_CACHE_MIN_MASK 0x7F
+
+#define HINIC_SQ_CTXT_PREF_WQ_HI_PFN_SHIFT 0
+#define HINIC_SQ_CTXT_PREF_CI_SHIFT 20
+
+#define HINIC_SQ_CTXT_PREF_WQ_HI_PFN_MASK 0xFFFFF
+#define HINIC_SQ_CTXT_PREF_CI_MASK 0xFFF
+
+#define HINIC_SQ_CTXT_PREF_SET(val, member) \
+ (((u32)(val) & HINIC_SQ_CTXT_PREF_##member##_MASK) \
+ << HINIC_SQ_CTXT_PREF_##member##_SHIFT)
+
+#define HINIC_SQ_CTXT_WQ_BLOCK_HI_PFN_SHIFT 0
+
+#define HINIC_SQ_CTXT_WQ_BLOCK_HI_PFN_MASK 0x7FFFFF
+
+#define HINIC_SQ_CTXT_WQ_BLOCK_SET(val, member) \
+ (((u32)(val) & HINIC_SQ_CTXT_WQ_BLOCK_##member##_MASK) \
+ << HINIC_SQ_CTXT_WQ_BLOCK_##member##_SHIFT)
+
+#define HINIC_RQ_CTXT_CEQ_ATTR_EN_SHIFT 0
+#define HINIC_RQ_CTXT_CEQ_ATTR_WRAPPED_SHIFT 1
+
+#define HINIC_RQ_CTXT_CEQ_ATTR_EN_MASK 0x1
+#define HINIC_RQ_CTXT_CEQ_ATTR_WRAPPED_MASK 0x1
+
+#define HINIC_RQ_CTXT_CEQ_ATTR_SET(val, member) \
+ (((u32)(val) & HINIC_RQ_CTXT_CEQ_ATTR_##member##_MASK) \
+ << HINIC_RQ_CTXT_CEQ_ATTR_##member##_SHIFT)
+
+#define HINIC_RQ_CTXT_PI_IDX_SHIFT 0
+#define HINIC_RQ_CTXT_PI_INTR_SHIFT 22
+
+#define HINIC_RQ_CTXT_PI_IDX_MASK 0xFFF
+#define HINIC_RQ_CTXT_PI_INTR_MASK 0x3FF
+
+#define HINIC_RQ_CTXT_PI_SET(val, member) \
+ (((u32)(val) & HINIC_RQ_CTXT_PI_##member##_MASK) << \
+ HINIC_RQ_CTXT_PI_##member##_SHIFT)
+
+#define HINIC_RQ_CTXT_WQ_PAGE_HI_PFN_SHIFT 0
+#define HINIC_RQ_CTXT_WQ_PAGE_CI_SHIFT 20
+
+#define HINIC_RQ_CTXT_WQ_PAGE_HI_PFN_MASK 0xFFFFF
+#define HINIC_RQ_CTXT_WQ_PAGE_CI_MASK 0xFFF
+
+#define HINIC_RQ_CTXT_WQ_PAGE_SET(val, member) \
+ (((u32)(val) & HINIC_RQ_CTXT_WQ_PAGE_##member##_MASK) << \
+ HINIC_RQ_CTXT_WQ_PAGE_##member##_SHIFT)
+
+#define HINIC_RQ_CTXT_PREF_CACHE_THRESHOLD_SHIFT 0
+#define HINIC_RQ_CTXT_PREF_CACHE_MAX_SHIFT 14
+#define HINIC_RQ_CTXT_PREF_CACHE_MIN_SHIFT 25
+
+#define HINIC_RQ_CTXT_PREF_CACHE_THRESHOLD_MASK 0x3FFF
+#define HINIC_RQ_CTXT_PREF_CACHE_MAX_MASK 0x7FF
+#define HINIC_RQ_CTXT_PREF_CACHE_MIN_MASK 0x7F
+
+#define HINIC_RQ_CTXT_PREF_WQ_HI_PFN_SHIFT 0
+#define HINIC_RQ_CTXT_PREF_CI_SHIFT 20
+
+#define HINIC_RQ_CTXT_PREF_WQ_HI_PFN_MASK 0xFFFFF
+#define HINIC_RQ_CTXT_PREF_CI_MASK 0xFFF
+
+#define HINIC_RQ_CTXT_PREF_SET(val, member) \
+ (((u32)(val) & HINIC_RQ_CTXT_PREF_##member##_MASK) << \
+ HINIC_RQ_CTXT_PREF_##member##_SHIFT)
+
+#define HINIC_RQ_CTXT_WQ_BLOCK_HI_PFN_SHIFT 0
+
+#define HINIC_RQ_CTXT_WQ_BLOCK_HI_PFN_MASK 0x7FFFFF
+
+#define HINIC_RQ_CTXT_WQ_BLOCK_SET(val, member) \
+ (((u32)(val) & HINIC_RQ_CTXT_WQ_BLOCK_##member##_MASK) << \
+ HINIC_RQ_CTXT_WQ_BLOCK_##member##_SHIFT)
+
+#define HINIC_SQ_CTXT_SIZE(num_sqs) (sizeof(struct hinic_qp_ctxt_header) \
+ + (num_sqs) * sizeof(struct hinic_sq_ctxt))
+
+#define HINIC_RQ_CTXT_SIZE(num_rqs) (sizeof(struct hinic_qp_ctxt_header) \
+ + (num_rqs) * sizeof(struct hinic_rq_ctxt))
+
+#define HINIC_WQ_PAGE_PFN_SHIFT 12
+#define HINIC_WQ_BLOCK_PFN_SHIFT 9
+
+#define HINIC_WQ_PAGE_PFN(page_addr) ((page_addr) >> HINIC_WQ_PAGE_PFN_SHIFT)
+#define HINIC_WQ_BLOCK_PFN(page_addr) ((page_addr) >> \
+ HINIC_WQ_BLOCK_PFN_SHIFT)
+
+#define HINIC_Q_CTXT_MAX \
+ ((HINIC_CMDQ_BUF_SIZE - sizeof(struct hinic_qp_ctxt_header)) \
+ / sizeof(struct hinic_sq_ctxt))
+
+enum hinic_qp_ctxt_type {
+ HINIC_QP_CTXT_TYPE_SQ,
+ HINIC_QP_CTXT_TYPE_RQ
+};
+
+struct hinic_qp_ctxt_header {
+ u16 num_queues;
+ u16 queue_type;
+ u32 addr_offset;
+};
+
+struct hinic_sq_ctxt {
+ u32 ceq_attr;
+
+ u32 ci_wrapped;
+
+ u32 wq_hi_pfn_pi;
+ u32 wq_lo_pfn;
+
+ u32 pref_cache;
+ u32 pref_wrapped;
+ u32 pref_wq_hi_pfn_ci;
+ u32 pref_wq_lo_pfn;
+
+ u32 rsvd0;
+ u32 rsvd1;
+
+ u32 wq_block_hi_pfn;
+ u32 wq_block_lo_pfn;
+};
+
+struct hinic_rq_ctxt {
+ u32 ceq_attr;
+
+ u32 pi_intr_attr;
+
+ u32 wq_hi_pfn_ci;
+ u32 wq_lo_pfn;
+
+ u32 pref_cache;
+ u32 pref_wrapped;
+
+ u32 pref_wq_hi_pfn_ci;
+ u32 pref_wq_lo_pfn;
+
+ u32 pi_paddr_hi;
+ u32 pi_paddr_lo;
+
+ u32 wq_block_hi_pfn;
+ u32 wq_block_lo_pfn;
+};
+
+struct hinic_clean_queue_ctxt {
+ struct hinic_qp_ctxt_header cmdq_hdr;
+ u32 ctxt_size;
+};
+
+struct hinic_sq_ctxt_block {
+ struct hinic_qp_ctxt_header hdr;
+ struct hinic_sq_ctxt sq_ctxt[HINIC_Q_CTXT_MAX];
+};
+
+struct hinic_rq_ctxt_block {
+ struct hinic_qp_ctxt_header hdr;
+ struct hinic_rq_ctxt rq_ctxt[HINIC_Q_CTXT_MAX];
+};
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
new file mode 100644
index 0000000000..e1a1735c00
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
@@ -0,0 +1,904 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/atomic.h>
+#include <linux/semaphore.h>
+#include <linux/errno.h>
+#include <linux/vmalloc.h>
+#include <linux/err.h>
+#include <asm/byteorder.h>
+
+#include "hinic_hw_if.h"
+#include "hinic_hw_wqe.h"
+#include "hinic_hw_wq.h"
+#include "hinic_hw_cmdq.h"
+
+#define WQS_BLOCKS_PER_PAGE 4
+
+#define WQ_BLOCK_SIZE 4096
+#define WQS_PAGE_SIZE (WQS_BLOCKS_PER_PAGE * WQ_BLOCK_SIZE)
+
+#define WQS_MAX_NUM_BLOCKS 128
+#define WQS_FREE_BLOCKS_SIZE(wqs) (WQS_MAX_NUM_BLOCKS * \
+ sizeof((wqs)->free_blocks[0]))
+
+#define WQ_SIZE(wq) ((wq)->q_depth * (wq)->wqebb_size)
+
+#define WQ_PAGE_ADDR_SIZE sizeof(u64)
+#define WQ_MAX_PAGES (WQ_BLOCK_SIZE / WQ_PAGE_ADDR_SIZE)
+
+#define CMDQ_BLOCK_SIZE 512
+#define CMDQ_PAGE_SIZE 4096
+
+#define CMDQ_WQ_MAX_PAGES (CMDQ_BLOCK_SIZE / WQ_PAGE_ADDR_SIZE)
+
+#define WQ_BASE_VADDR(wqs, wq) \
+ ((void *)((wqs)->page_vaddr[(wq)->page_idx]) \
+ + (wq)->block_idx * WQ_BLOCK_SIZE)
+
+#define WQ_BASE_PADDR(wqs, wq) \
+ ((wqs)->page_paddr[(wq)->page_idx] \
+ + (wq)->block_idx * WQ_BLOCK_SIZE)
+
+#define WQ_BASE_ADDR(wqs, wq) \
+ ((void *)((wqs)->shadow_page_vaddr[(wq)->page_idx]) \
+ + (wq)->block_idx * WQ_BLOCK_SIZE)
+
+#define CMDQ_BASE_VADDR(cmdq_pages, wq) \
+ ((void *)((cmdq_pages)->page_vaddr) \
+ + (wq)->block_idx * CMDQ_BLOCK_SIZE)
+
+#define CMDQ_BASE_PADDR(cmdq_pages, wq) \
+ ((cmdq_pages)->page_paddr \
+ + (wq)->block_idx * CMDQ_BLOCK_SIZE)
+
+#define CMDQ_BASE_ADDR(cmdq_pages, wq) \
+ ((void *)((cmdq_pages)->shadow_page_vaddr) \
+ + (wq)->block_idx * CMDQ_BLOCK_SIZE)
+
+#define WQ_PAGE_ADDR(wq, idx) \
+ ((wq)->shadow_block_vaddr[WQE_PAGE_NUM(wq, idx)])
+
+#define MASKED_WQE_IDX(wq, idx) ((idx) & (wq)->mask)
+
+#define WQE_IN_RANGE(wqe, start, end) \
+ (((unsigned long)(wqe) >= (unsigned long)(start)) && \
+ ((unsigned long)(wqe) < (unsigned long)(end)))
+
+#define WQE_SHADOW_PAGE(wq, wqe) \
+ (((unsigned long)(wqe) - (unsigned long)(wq)->shadow_wqe) \
+ / (wq)->max_wqe_size)
+
+static inline int WQE_PAGE_OFF(struct hinic_wq *wq, u16 idx)
+{
+ return (((idx) & ((wq)->num_wqebbs_per_page - 1))
+ << (wq)->wqebb_size_shift);
+}
+
+static inline int WQE_PAGE_NUM(struct hinic_wq *wq, u16 idx)
+{
+ return (((idx) >> ((wq)->wqebbs_per_page_shift))
+ & ((wq)->num_q_pages - 1));
+}
+
+/**
+ * queue_alloc_page - allocate page for Queue
+ * @hwif: HW interface for allocating DMA
+ * @vaddr: virtual address will be returned in this address
+ * @paddr: physical address will be returned in this address
+ * @shadow_vaddr: VM area will be return here for holding WQ page addresses
+ * @page_sz: page size of each WQ page
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static int queue_alloc_page(struct hinic_hwif *hwif, u64 **vaddr, u64 *paddr,
+ void ***shadow_vaddr, size_t page_sz)
+{
+ struct pci_dev *pdev = hwif->pdev;
+ dma_addr_t dma_addr;
+
+ *vaddr = dma_alloc_coherent(&pdev->dev, page_sz, &dma_addr,
+ GFP_KERNEL);
+ if (!*vaddr) {
+ dev_err(&pdev->dev, "Failed to allocate dma for wqs page\n");
+ return -ENOMEM;
+ }
+
+ *paddr = (u64)dma_addr;
+
+ /* use vzalloc for big mem */
+ *shadow_vaddr = vzalloc(page_sz);
+ if (!*shadow_vaddr)
+ goto err_shadow_vaddr;
+
+ return 0;
+
+err_shadow_vaddr:
+ dma_free_coherent(&pdev->dev, page_sz, *vaddr, dma_addr);
+ return -ENOMEM;
+}
+
+/**
+ * wqs_allocate_page - allocate page for WQ set
+ * @wqs: Work Queue Set
+ * @page_idx: the page index of the page will be allocated
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static int wqs_allocate_page(struct hinic_wqs *wqs, int page_idx)
+{
+ return queue_alloc_page(wqs->hwif, &wqs->page_vaddr[page_idx],
+ &wqs->page_paddr[page_idx],
+ &wqs->shadow_page_vaddr[page_idx],
+ WQS_PAGE_SIZE);
+}
+
+/**
+ * wqs_free_page - free page of WQ set
+ * @wqs: Work Queue Set
+ * @page_idx: the page index of the page will be freed
+ **/
+static void wqs_free_page(struct hinic_wqs *wqs, int page_idx)
+{
+ struct hinic_hwif *hwif = wqs->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+
+ dma_free_coherent(&pdev->dev, WQS_PAGE_SIZE,
+ wqs->page_vaddr[page_idx],
+ (dma_addr_t)wqs->page_paddr[page_idx]);
+ vfree(wqs->shadow_page_vaddr[page_idx]);
+}
+
+/**
+ * cmdq_allocate_page - allocate page for cmdq
+ * @cmdq_pages: the pages of the cmdq queue struct to hold the page
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static int cmdq_allocate_page(struct hinic_cmdq_pages *cmdq_pages)
+{
+ return queue_alloc_page(cmdq_pages->hwif, &cmdq_pages->page_vaddr,
+ &cmdq_pages->page_paddr,
+ &cmdq_pages->shadow_page_vaddr,
+ CMDQ_PAGE_SIZE);
+}
+
+/**
+ * cmdq_free_page - free page from cmdq
+ * @cmdq_pages: the pages of the cmdq queue struct that hold the page
+ **/
+static void cmdq_free_page(struct hinic_cmdq_pages *cmdq_pages)
+{
+ struct hinic_hwif *hwif = cmdq_pages->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+
+ dma_free_coherent(&pdev->dev, CMDQ_PAGE_SIZE,
+ cmdq_pages->page_vaddr,
+ (dma_addr_t)cmdq_pages->page_paddr);
+ vfree(cmdq_pages->shadow_page_vaddr);
+}
+
+static int alloc_page_arrays(struct hinic_wqs *wqs)
+{
+ struct hinic_hwif *hwif = wqs->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+
+ wqs->page_paddr = devm_kcalloc(&pdev->dev, wqs->num_pages,
+ sizeof(*wqs->page_paddr), GFP_KERNEL);
+ if (!wqs->page_paddr)
+ return -ENOMEM;
+
+ wqs->page_vaddr = devm_kcalloc(&pdev->dev, wqs->num_pages,
+ sizeof(*wqs->page_vaddr), GFP_KERNEL);
+ if (!wqs->page_vaddr)
+ goto err_page_vaddr;
+
+ wqs->shadow_page_vaddr = devm_kcalloc(&pdev->dev, wqs->num_pages,
+ sizeof(*wqs->shadow_page_vaddr),
+ GFP_KERNEL);
+ if (!wqs->shadow_page_vaddr)
+ goto err_page_shadow_vaddr;
+
+ return 0;
+
+err_page_shadow_vaddr:
+ devm_kfree(&pdev->dev, wqs->page_vaddr);
+
+err_page_vaddr:
+ devm_kfree(&pdev->dev, wqs->page_paddr);
+ return -ENOMEM;
+}
+
+static void free_page_arrays(struct hinic_wqs *wqs)
+{
+ struct hinic_hwif *hwif = wqs->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+
+ devm_kfree(&pdev->dev, wqs->shadow_page_vaddr);
+ devm_kfree(&pdev->dev, wqs->page_vaddr);
+ devm_kfree(&pdev->dev, wqs->page_paddr);
+}
+
+static int wqs_next_block(struct hinic_wqs *wqs, int *page_idx,
+ int *block_idx)
+{
+ int pos;
+
+ down(&wqs->alloc_blocks_lock);
+
+ wqs->num_free_blks--;
+
+ if (wqs->num_free_blks < 0) {
+ wqs->num_free_blks++;
+ up(&wqs->alloc_blocks_lock);
+ return -ENOMEM;
+ }
+
+ pos = wqs->alloc_blk_pos++;
+ pos &= WQS_MAX_NUM_BLOCKS - 1;
+
+ *page_idx = wqs->free_blocks[pos].page_idx;
+ *block_idx = wqs->free_blocks[pos].block_idx;
+
+ wqs->free_blocks[pos].page_idx = -1;
+ wqs->free_blocks[pos].block_idx = -1;
+
+ up(&wqs->alloc_blocks_lock);
+ return 0;
+}
+
+static void wqs_return_block(struct hinic_wqs *wqs, int page_idx,
+ int block_idx)
+{
+ int pos;
+
+ down(&wqs->alloc_blocks_lock);
+
+ pos = wqs->return_blk_pos++;
+ pos &= WQS_MAX_NUM_BLOCKS - 1;
+
+ wqs->free_blocks[pos].page_idx = page_idx;
+ wqs->free_blocks[pos].block_idx = block_idx;
+
+ wqs->num_free_blks++;
+
+ up(&wqs->alloc_blocks_lock);
+}
+
+static void init_wqs_blocks_arr(struct hinic_wqs *wqs)
+{
+ int page_idx, blk_idx, pos = 0;
+
+ for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) {
+ for (blk_idx = 0; blk_idx < WQS_BLOCKS_PER_PAGE; blk_idx++) {
+ wqs->free_blocks[pos].page_idx = page_idx;
+ wqs->free_blocks[pos].block_idx = blk_idx;
+ pos++;
+ }
+ }
+
+ wqs->alloc_blk_pos = 0;
+ wqs->return_blk_pos = pos;
+ wqs->num_free_blks = pos;
+
+ sema_init(&wqs->alloc_blocks_lock, 1);
+}
+
+/**
+ * hinic_wqs_alloc - allocate Work Queues set
+ * @wqs: Work Queue Set
+ * @max_wqs: maximum wqs to allocate
+ * @hwif: HW interface for use for the allocation
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+int hinic_wqs_alloc(struct hinic_wqs *wqs, int max_wqs,
+ struct hinic_hwif *hwif)
+{
+ struct pci_dev *pdev = hwif->pdev;
+ int err, i, page_idx;
+
+ max_wqs = ALIGN(max_wqs, WQS_BLOCKS_PER_PAGE);
+ if (max_wqs > WQS_MAX_NUM_BLOCKS) {
+ dev_err(&pdev->dev, "Invalid max_wqs = %d\n", max_wqs);
+ return -EINVAL;
+ }
+
+ wqs->hwif = hwif;
+ wqs->num_pages = max_wqs / WQS_BLOCKS_PER_PAGE;
+
+ if (alloc_page_arrays(wqs)) {
+ dev_err(&pdev->dev,
+ "Failed to allocate mem for page addresses\n");
+ return -ENOMEM;
+ }
+
+ for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) {
+ err = wqs_allocate_page(wqs, page_idx);
+ if (err) {
+ dev_err(&pdev->dev, "Failed wq page allocation\n");
+ goto err_wq_allocate_page;
+ }
+ }
+
+ wqs->free_blocks = devm_kzalloc(&pdev->dev, WQS_FREE_BLOCKS_SIZE(wqs),
+ GFP_KERNEL);
+ if (!wqs->free_blocks) {
+ err = -ENOMEM;
+ goto err_alloc_blocks;
+ }
+
+ init_wqs_blocks_arr(wqs);
+ return 0;
+
+err_alloc_blocks:
+err_wq_allocate_page:
+ for (i = 0; i < page_idx; i++)
+ wqs_free_page(wqs, i);
+
+ free_page_arrays(wqs);
+ return err;
+}
+
+/**
+ * hinic_wqs_free - free Work Queues set
+ * @wqs: Work Queue Set
+ **/
+void hinic_wqs_free(struct hinic_wqs *wqs)
+{
+ struct hinic_hwif *hwif = wqs->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ int page_idx;
+
+ devm_kfree(&pdev->dev, wqs->free_blocks);
+
+ for (page_idx = 0; page_idx < wqs->num_pages; page_idx++)
+ wqs_free_page(wqs, page_idx);
+
+ free_page_arrays(wqs);
+}
+
+/**
+ * alloc_wqes_shadow - allocate WQE shadows for WQ
+ * @wq: WQ to allocate shadows for
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static int alloc_wqes_shadow(struct hinic_wq *wq)
+{
+ struct hinic_hwif *hwif = wq->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+
+ wq->shadow_wqe = devm_kcalloc(&pdev->dev, wq->num_q_pages,
+ wq->max_wqe_size, GFP_KERNEL);
+ if (!wq->shadow_wqe)
+ return -ENOMEM;
+
+ wq->shadow_idx = devm_kcalloc(&pdev->dev, wq->num_q_pages,
+ sizeof(*wq->shadow_idx), GFP_KERNEL);
+ if (!wq->shadow_idx)
+ goto err_shadow_idx;
+
+ return 0;
+
+err_shadow_idx:
+ devm_kfree(&pdev->dev, wq->shadow_wqe);
+ return -ENOMEM;
+}
+
+/**
+ * free_wqes_shadow - free WQE shadows of WQ
+ * @wq: WQ to free shadows from
+ **/
+static void free_wqes_shadow(struct hinic_wq *wq)
+{
+ struct hinic_hwif *hwif = wq->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+
+ devm_kfree(&pdev->dev, wq->shadow_idx);
+ devm_kfree(&pdev->dev, wq->shadow_wqe);
+}
+
+/**
+ * free_wq_pages - free pages of WQ
+ * @hwif: HW interface for releasing dma addresses
+ * @wq: WQ to free pages from
+ * @num_q_pages: number pages to free
+ **/
+static void free_wq_pages(struct hinic_wq *wq, struct hinic_hwif *hwif,
+ int num_q_pages)
+{
+ struct pci_dev *pdev = hwif->pdev;
+ int i;
+
+ for (i = 0; i < num_q_pages; i++) {
+ void **vaddr = &wq->shadow_block_vaddr[i];
+ u64 *paddr = &wq->block_vaddr[i];
+ dma_addr_t dma_addr;
+
+ dma_addr = (dma_addr_t)be64_to_cpu(*paddr);
+ dma_free_coherent(&pdev->dev, wq->wq_page_size, *vaddr,
+ dma_addr);
+ }
+
+ free_wqes_shadow(wq);
+}
+
+/**
+ * alloc_wq_pages - alloc pages for WQ
+ * @hwif: HW interface for allocating dma addresses
+ * @wq: WQ to allocate pages for
+ * @max_pages: maximum pages allowed
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static int alloc_wq_pages(struct hinic_wq *wq, struct hinic_hwif *hwif,
+ int max_pages)
+{
+ struct pci_dev *pdev = hwif->pdev;
+ int i, err, num_q_pages;
+
+ num_q_pages = ALIGN(WQ_SIZE(wq), wq->wq_page_size) / wq->wq_page_size;
+ if (num_q_pages > max_pages) {
+ dev_err(&pdev->dev, "Number wq pages exceeds the limit\n");
+ return -EINVAL;
+ }
+
+ if (num_q_pages & (num_q_pages - 1)) {
+ dev_err(&pdev->dev, "Number wq pages must be power of 2\n");
+ return -EINVAL;
+ }
+
+ wq->num_q_pages = num_q_pages;
+
+ err = alloc_wqes_shadow(wq);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to allocate wqe shadow\n");
+ return err;
+ }
+
+ for (i = 0; i < num_q_pages; i++) {
+ void **vaddr = &wq->shadow_block_vaddr[i];
+ u64 *paddr = &wq->block_vaddr[i];
+ dma_addr_t dma_addr;
+
+ *vaddr = dma_alloc_coherent(&pdev->dev, wq->wq_page_size,
+ &dma_addr, GFP_KERNEL);
+ if (!*vaddr) {
+ dev_err(&pdev->dev, "Failed to allocate wq page\n");
+ goto err_alloc_wq_pages;
+ }
+
+ /* HW uses Big Endian Format */
+ *paddr = cpu_to_be64(dma_addr);
+ }
+
+ return 0;
+
+err_alloc_wq_pages:
+ free_wq_pages(wq, hwif, i);
+ return -ENOMEM;
+}
+
+/**
+ * hinic_wq_allocate - Allocate the WQ resources from the WQS
+ * @wqs: WQ set from which to allocate the WQ resources
+ * @wq: WQ to allocate resources for it from the WQ set
+ * @wqebb_size: Work Queue Block Byte Size
+ * @wq_page_size: the page size in the Work Queue
+ * @q_depth: number of wqebbs in WQ
+ * @max_wqe_size: maximum WQE size that will be used in the WQ
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq,
+ u16 wqebb_size, u32 wq_page_size, u16 q_depth,
+ u16 max_wqe_size)
+{
+ struct hinic_hwif *hwif = wqs->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ u16 num_wqebbs_per_page;
+ u16 wqebb_size_shift;
+ int err;
+
+ if (!is_power_of_2(wqebb_size)) {
+ dev_err(&pdev->dev, "wqebb_size must be power of 2\n");
+ return -EINVAL;
+ }
+
+ if (wq_page_size == 0) {
+ dev_err(&pdev->dev, "wq_page_size must be > 0\n");
+ return -EINVAL;
+ }
+
+ if (q_depth & (q_depth - 1)) {
+ dev_err(&pdev->dev, "WQ q_depth must be power of 2\n");
+ return -EINVAL;
+ }
+
+ wqebb_size_shift = ilog2(wqebb_size);
+ num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size)
+ >> wqebb_size_shift;
+
+ if (!is_power_of_2(num_wqebbs_per_page)) {
+ dev_err(&pdev->dev, "num wqebbs per page must be power of 2\n");
+ return -EINVAL;
+ }
+
+ wq->hwif = hwif;
+
+ err = wqs_next_block(wqs, &wq->page_idx, &wq->block_idx);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to get free wqs next block\n");
+ return err;
+ }
+
+ wq->wqebb_size = wqebb_size;
+ wq->wq_page_size = wq_page_size;
+ wq->q_depth = q_depth;
+ wq->max_wqe_size = max_wqe_size;
+ wq->num_wqebbs_per_page = num_wqebbs_per_page;
+ wq->wqebbs_per_page_shift = ilog2(num_wqebbs_per_page);
+ wq->wqebb_size_shift = wqebb_size_shift;
+ wq->block_vaddr = WQ_BASE_VADDR(wqs, wq);
+ wq->shadow_block_vaddr = WQ_BASE_ADDR(wqs, wq);
+ wq->block_paddr = WQ_BASE_PADDR(wqs, wq);
+
+ err = alloc_wq_pages(wq, wqs->hwif, WQ_MAX_PAGES);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to allocate wq pages\n");
+ goto err_alloc_wq_pages;
+ }
+
+ atomic_set(&wq->cons_idx, 0);
+ atomic_set(&wq->prod_idx, 0);
+ atomic_set(&wq->delta, q_depth);
+ wq->mask = q_depth - 1;
+
+ return 0;
+
+err_alloc_wq_pages:
+ wqs_return_block(wqs, wq->page_idx, wq->block_idx);
+ return err;
+}
+
+/**
+ * hinic_wq_free - Free the WQ resources to the WQS
+ * @wqs: WQ set to free the WQ resources to it
+ * @wq: WQ to free its resources to the WQ set resources
+ **/
+void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq)
+{
+ free_wq_pages(wq, wqs->hwif, wq->num_q_pages);
+
+ wqs_return_block(wqs, wq->page_idx, wq->block_idx);
+}
+
+/**
+ * hinic_wqs_cmdq_alloc - Allocate wqs for cmdqs
+ * @cmdq_pages: will hold the pages of the cmdq
+ * @wq: returned wqs
+ * @hwif: HW interface
+ * @cmdq_blocks: number of cmdq blocks/wq to allocate
+ * @wqebb_size: Work Queue Block Byte Size
+ * @wq_page_size: the page size in the Work Queue
+ * @q_depth: number of wqebbs in WQ
+ * @max_wqe_size: maximum WQE size that will be used in the WQ
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
+ struct hinic_wq *wq, struct hinic_hwif *hwif,
+ int cmdq_blocks, u16 wqebb_size, u32 wq_page_size,
+ u16 q_depth, u16 max_wqe_size)
+{
+ struct pci_dev *pdev = hwif->pdev;
+ u16 num_wqebbs_per_page_shift;
+ u16 num_wqebbs_per_page;
+ u16 wqebb_size_shift;
+ int i, j, err = -ENOMEM;
+
+ if (!is_power_of_2(wqebb_size)) {
+ dev_err(&pdev->dev, "wqebb_size must be power of 2\n");
+ return -EINVAL;
+ }
+
+ if (wq_page_size == 0) {
+ dev_err(&pdev->dev, "wq_page_size must be > 0\n");
+ return -EINVAL;
+ }
+
+ if (q_depth & (q_depth - 1)) {
+ dev_err(&pdev->dev, "WQ q_depth must be power of 2\n");
+ return -EINVAL;
+ }
+
+ wqebb_size_shift = ilog2(wqebb_size);
+ num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size)
+ >> wqebb_size_shift;
+
+ if (!is_power_of_2(num_wqebbs_per_page)) {
+ dev_err(&pdev->dev, "num wqebbs per page must be power of 2\n");
+ return -EINVAL;
+ }
+
+ cmdq_pages->hwif = hwif;
+
+ err = cmdq_allocate_page(cmdq_pages);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to allocate CMDQ page\n");
+ return err;
+ }
+ num_wqebbs_per_page_shift = ilog2(num_wqebbs_per_page);
+
+ for (i = 0; i < cmdq_blocks; i++) {
+ wq[i].hwif = hwif;
+ wq[i].page_idx = 0;
+ wq[i].block_idx = i;
+
+ wq[i].wqebb_size = wqebb_size;
+ wq[i].wq_page_size = wq_page_size;
+ wq[i].q_depth = q_depth;
+ wq[i].max_wqe_size = max_wqe_size;
+ wq[i].num_wqebbs_per_page = num_wqebbs_per_page;
+ wq[i].wqebbs_per_page_shift = num_wqebbs_per_page_shift;
+ wq[i].wqebb_size_shift = wqebb_size_shift;
+ wq[i].block_vaddr = CMDQ_BASE_VADDR(cmdq_pages, &wq[i]);
+ wq[i].shadow_block_vaddr = CMDQ_BASE_ADDR(cmdq_pages, &wq[i]);
+ wq[i].block_paddr = CMDQ_BASE_PADDR(cmdq_pages, &wq[i]);
+
+ err = alloc_wq_pages(&wq[i], cmdq_pages->hwif,
+ CMDQ_WQ_MAX_PAGES);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to alloc CMDQ blocks\n");
+ goto err_cmdq_block;
+ }
+
+ atomic_set(&wq[i].cons_idx, 0);
+ atomic_set(&wq[i].prod_idx, 0);
+ atomic_set(&wq[i].delta, q_depth);
+ wq[i].mask = q_depth - 1;
+ }
+
+ return 0;
+
+err_cmdq_block:
+ for (j = 0; j < i; j++)
+ free_wq_pages(&wq[j], cmdq_pages->hwif, wq[j].num_q_pages);
+
+ cmdq_free_page(cmdq_pages);
+ return err;
+}
+
+/**
+ * hinic_wqs_cmdq_free - Free wqs from cmdqs
+ * @cmdq_pages: hold the pages of the cmdq
+ * @wq: wqs to free
+ * @cmdq_blocks: number of wqs to free
+ **/
+void hinic_wqs_cmdq_free(struct hinic_cmdq_pages *cmdq_pages,
+ struct hinic_wq *wq, int cmdq_blocks)
+{
+ int i;
+
+ for (i = 0; i < cmdq_blocks; i++)
+ free_wq_pages(&wq[i], cmdq_pages->hwif, wq[i].num_q_pages);
+
+ cmdq_free_page(cmdq_pages);
+}
+
+static void copy_wqe_to_shadow(struct hinic_wq *wq, void *shadow_addr,
+ int num_wqebbs, u16 idx)
+{
+ void *wqebb_addr;
+ int i;
+
+ for (i = 0; i < num_wqebbs; i++, idx++) {
+ idx = MASKED_WQE_IDX(wq, idx);
+ wqebb_addr = WQ_PAGE_ADDR(wq, idx) +
+ WQE_PAGE_OFF(wq, idx);
+
+ memcpy(shadow_addr, wqebb_addr, wq->wqebb_size);
+
+ shadow_addr += wq->wqebb_size;
+ }
+}
+
+static void copy_wqe_from_shadow(struct hinic_wq *wq, void *shadow_addr,
+ int num_wqebbs, u16 idx)
+{
+ void *wqebb_addr;
+ int i;
+
+ for (i = 0; i < num_wqebbs; i++, idx++) {
+ idx = MASKED_WQE_IDX(wq, idx);
+ wqebb_addr = WQ_PAGE_ADDR(wq, idx) +
+ WQE_PAGE_OFF(wq, idx);
+
+ memcpy(wqebb_addr, shadow_addr, wq->wqebb_size);
+ shadow_addr += wq->wqebb_size;
+ }
+}
+
+/**
+ * hinic_get_wqe - get wqe ptr in the current pi and update the pi
+ * @wq: wq to get wqe from
+ * @wqe_size: wqe size
+ * @prod_idx: returned pi
+ *
+ * Return wqe pointer
+ **/
+struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size,
+ u16 *prod_idx)
+{
+ int curr_pg, end_pg, num_wqebbs;
+ u16 curr_prod_idx, end_prod_idx;
+
+ *prod_idx = MASKED_WQE_IDX(wq, atomic_read(&wq->prod_idx));
+
+ num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) >> wq->wqebb_size_shift;
+
+ if (atomic_sub_return(num_wqebbs, &wq->delta) <= 0) {
+ atomic_add(num_wqebbs, &wq->delta);
+ return ERR_PTR(-EBUSY);
+ }
+
+ end_prod_idx = atomic_add_return(num_wqebbs, &wq->prod_idx);
+
+ end_prod_idx = MASKED_WQE_IDX(wq, end_prod_idx);
+ curr_prod_idx = end_prod_idx - num_wqebbs;
+ curr_prod_idx = MASKED_WQE_IDX(wq, curr_prod_idx);
+
+ /* end prod index points to the next wqebb, therefore minus 1 */
+ end_prod_idx = MASKED_WQE_IDX(wq, end_prod_idx - 1);
+
+ curr_pg = WQE_PAGE_NUM(wq, curr_prod_idx);
+ end_pg = WQE_PAGE_NUM(wq, end_prod_idx);
+
+ *prod_idx = curr_prod_idx;
+
+ /* If we only have one page, still need to get shadown wqe when
+ * wqe rolling-over page
+ */
+ if (curr_pg != end_pg || end_prod_idx < *prod_idx) {
+ void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size];
+
+ copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *prod_idx);
+
+ wq->shadow_idx[curr_pg] = *prod_idx;
+ return shadow_addr;
+ }
+
+ return WQ_PAGE_ADDR(wq, *prod_idx) + WQE_PAGE_OFF(wq, *prod_idx);
+}
+
+/**
+ * hinic_return_wqe - return the wqe when transmit failed
+ * @wq: wq to return wqe
+ * @wqe_size: wqe size
+ **/
+void hinic_return_wqe(struct hinic_wq *wq, unsigned int wqe_size)
+{
+ int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
+
+ atomic_sub(num_wqebbs, &wq->prod_idx);
+
+ atomic_add(num_wqebbs, &wq->delta);
+}
+
+/**
+ * hinic_put_wqe - return the wqe place to use for a new wqe
+ * @wq: wq to return wqe
+ * @wqe_size: wqe size
+ **/
+void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size)
+{
+ int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size)
+ >> wq->wqebb_size_shift;
+
+ atomic_add(num_wqebbs, &wq->cons_idx);
+
+ atomic_add(num_wqebbs, &wq->delta);
+}
+
+/**
+ * hinic_read_wqe - read wqe ptr in the current ci
+ * @wq: wq to get read from
+ * @wqe_size: wqe size
+ * @cons_idx: returned ci
+ *
+ * Return wqe pointer
+ **/
+struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size,
+ u16 *cons_idx)
+{
+ int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size)
+ >> wq->wqebb_size_shift;
+ u16 curr_cons_idx, end_cons_idx;
+ int curr_pg, end_pg;
+
+ if ((atomic_read(&wq->delta) + num_wqebbs) > wq->q_depth)
+ return ERR_PTR(-EBUSY);
+
+ curr_cons_idx = atomic_read(&wq->cons_idx);
+
+ curr_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx);
+ end_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx + num_wqebbs - 1);
+
+ curr_pg = WQE_PAGE_NUM(wq, curr_cons_idx);
+ end_pg = WQE_PAGE_NUM(wq, end_cons_idx);
+
+ *cons_idx = curr_cons_idx;
+
+ /* If we only have one page, still need to get shadown wqe when
+ * wqe rolling-over page
+ */
+ if (curr_pg != end_pg || end_cons_idx < curr_cons_idx) {
+ void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size];
+
+ copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *cons_idx);
+ return shadow_addr;
+ }
+
+ return WQ_PAGE_ADDR(wq, *cons_idx) + WQE_PAGE_OFF(wq, *cons_idx);
+}
+
+/**
+ * hinic_read_wqe_direct - read wqe directly from ci position
+ * @wq: wq
+ * @cons_idx: ci position
+ *
+ * Return wqe
+ **/
+struct hinic_hw_wqe *hinic_read_wqe_direct(struct hinic_wq *wq, u16 cons_idx)
+{
+ return WQ_PAGE_ADDR(wq, cons_idx) + WQE_PAGE_OFF(wq, cons_idx);
+}
+
+/**
+ * wqe_shadow - check if a wqe is shadow
+ * @wq: wq of the wqe
+ * @wqe: the wqe for shadow checking
+ *
+ * Return true - shadow, false - Not shadow
+ **/
+static inline bool wqe_shadow(struct hinic_wq *wq, struct hinic_hw_wqe *wqe)
+{
+ size_t wqe_shadow_size = wq->num_q_pages * wq->max_wqe_size;
+
+ return WQE_IN_RANGE(wqe, wq->shadow_wqe,
+ &wq->shadow_wqe[wqe_shadow_size]);
+}
+
+/**
+ * hinic_write_wqe - write the wqe to the wq
+ * @wq: wq to write wqe to
+ * @wqe: wqe to write
+ * @wqe_size: wqe size
+ **/
+void hinic_write_wqe(struct hinic_wq *wq, struct hinic_hw_wqe *wqe,
+ unsigned int wqe_size)
+{
+ int curr_pg, num_wqebbs;
+ void *shadow_addr;
+ u16 prod_idx;
+
+ if (wqe_shadow(wq, wqe)) {
+ curr_pg = WQE_SHADOW_PAGE(wq, wqe);
+
+ prod_idx = wq->shadow_idx[curr_pg];
+ num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
+ shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size];
+
+ copy_wqe_from_shadow(wq, shadow_addr, num_wqebbs, prod_idx);
+ }
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h
new file mode 100644
index 0000000000..b06f8c0255
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef HINIC_HW_WQ_H
+#define HINIC_HW_WQ_H
+
+#include <linux/types.h>
+#include <linux/semaphore.h>
+#include <linux/atomic.h>
+
+#include "hinic_hw_if.h"
+#include "hinic_hw_wqe.h"
+
+struct hinic_free_block {
+ int page_idx;
+ int block_idx;
+};
+
+struct hinic_wq {
+ struct hinic_hwif *hwif;
+
+ int page_idx;
+ int block_idx;
+
+ u16 wqebb_size;
+ u32 wq_page_size;
+ u16 q_depth;
+ u16 max_wqe_size;
+ u16 num_wqebbs_per_page;
+ u16 wqebbs_per_page_shift;
+ u16 wqebb_size_shift;
+ /* The addresses are 64 bit in the HW */
+ u64 block_paddr;
+ void **shadow_block_vaddr;
+ u64 *block_vaddr;
+
+ int num_q_pages;
+ u8 *shadow_wqe;
+ u16 *shadow_idx;
+
+ atomic_t cons_idx;
+ atomic_t prod_idx;
+ atomic_t delta;
+ u16 mask;
+};
+
+struct hinic_wqs {
+ struct hinic_hwif *hwif;
+ int num_pages;
+
+ /* The addresses are 64 bit in the HW */
+ u64 *page_paddr;
+ u64 **page_vaddr;
+ void ***shadow_page_vaddr;
+
+ struct hinic_free_block *free_blocks;
+ int alloc_blk_pos;
+ int return_blk_pos;
+ int num_free_blks;
+
+ /* Lock for getting a free block from the WQ set */
+ struct semaphore alloc_blocks_lock;
+};
+
+struct hinic_cmdq_pages {
+ /* The addresses are 64 bit in the HW */
+ u64 page_paddr;
+ u64 *page_vaddr;
+ void **shadow_page_vaddr;
+
+ struct hinic_hwif *hwif;
+};
+
+int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
+ struct hinic_wq *wq, struct hinic_hwif *hwif,
+ int cmdq_blocks, u16 wqebb_size, u32 wq_page_size,
+ u16 q_depth, u16 max_wqe_size);
+
+void hinic_wqs_cmdq_free(struct hinic_cmdq_pages *cmdq_pages,
+ struct hinic_wq *wq, int cmdq_blocks);
+
+int hinic_wqs_alloc(struct hinic_wqs *wqs, int num_wqs,
+ struct hinic_hwif *hwif);
+
+void hinic_wqs_free(struct hinic_wqs *wqs);
+
+int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq,
+ u16 wqebb_size, u32 wq_page_size, u16 q_depth,
+ u16 max_wqe_size);
+
+void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq);
+
+struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size,
+ u16 *prod_idx);
+
+void hinic_return_wqe(struct hinic_wq *wq, unsigned int wqe_size);
+
+void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size);
+
+struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size,
+ u16 *cons_idx);
+
+struct hinic_hw_wqe *hinic_read_wqe_direct(struct hinic_wq *wq, u16 cons_idx);
+
+void hinic_write_wqe(struct hinic_wq *wq, struct hinic_hw_wqe *wqe,
+ unsigned int wqe_size);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
new file mode 100644
index 0000000000..c6bdeed560
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
@@ -0,0 +1,430 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef HINIC_HW_WQE_H
+#define HINIC_HW_WQE_H
+
+#include "hinic_common.h"
+
+#define HINIC_CMDQ_CTRL_PI_SHIFT 0
+#define HINIC_CMDQ_CTRL_CMD_SHIFT 16
+#define HINIC_CMDQ_CTRL_MOD_SHIFT 24
+#define HINIC_CMDQ_CTRL_ACK_TYPE_SHIFT 29
+#define HINIC_CMDQ_CTRL_HW_BUSY_BIT_SHIFT 31
+
+#define HINIC_CMDQ_CTRL_PI_MASK 0xFFFF
+#define HINIC_CMDQ_CTRL_CMD_MASK 0xFF
+#define HINIC_CMDQ_CTRL_MOD_MASK 0x1F
+#define HINIC_CMDQ_CTRL_ACK_TYPE_MASK 0x3
+#define HINIC_CMDQ_CTRL_HW_BUSY_BIT_MASK 0x1
+
+#define HINIC_CMDQ_CTRL_SET(val, member) \
+ (((u32)(val) & HINIC_CMDQ_CTRL_##member##_MASK) \
+ << HINIC_CMDQ_CTRL_##member##_SHIFT)
+
+#define HINIC_CMDQ_CTRL_GET(val, member) \
+ (((val) >> HINIC_CMDQ_CTRL_##member##_SHIFT) \
+ & HINIC_CMDQ_CTRL_##member##_MASK)
+
+#define HINIC_CMDQ_WQE_HEADER_BUFDESC_LEN_SHIFT 0
+#define HINIC_CMDQ_WQE_HEADER_COMPLETE_FMT_SHIFT 15
+#define HINIC_CMDQ_WQE_HEADER_DATA_FMT_SHIFT 22
+#define HINIC_CMDQ_WQE_HEADER_COMPLETE_REQ_SHIFT 23
+#define HINIC_CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_SHIFT 27
+#define HINIC_CMDQ_WQE_HEADER_CTRL_LEN_SHIFT 29
+#define HINIC_CMDQ_WQE_HEADER_TOGGLED_WRAPPED_SHIFT 31
+
+#define HINIC_CMDQ_WQE_HEADER_BUFDESC_LEN_MASK 0xFF
+#define HINIC_CMDQ_WQE_HEADER_COMPLETE_FMT_MASK 0x1
+#define HINIC_CMDQ_WQE_HEADER_DATA_FMT_MASK 0x1
+#define HINIC_CMDQ_WQE_HEADER_COMPLETE_REQ_MASK 0x1
+#define HINIC_CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_MASK 0x3
+#define HINIC_CMDQ_WQE_HEADER_CTRL_LEN_MASK 0x3
+#define HINIC_CMDQ_WQE_HEADER_TOGGLED_WRAPPED_MASK 0x1
+
+#define HINIC_CMDQ_WQE_HEADER_SET(val, member) \
+ (((u32)(val) & HINIC_CMDQ_WQE_HEADER_##member##_MASK) \
+ << HINIC_CMDQ_WQE_HEADER_##member##_SHIFT)
+
+#define HINIC_CMDQ_WQE_HEADER_GET(val, member) \
+ (((val) >> HINIC_CMDQ_WQE_HEADER_##member##_SHIFT) \
+ & HINIC_CMDQ_WQE_HEADER_##member##_MASK)
+
+#define HINIC_SQ_CTRL_BUFDESC_SECT_LEN_SHIFT 0
+#define HINIC_SQ_CTRL_TASKSECT_LEN_SHIFT 16
+#define HINIC_SQ_CTRL_DATA_FORMAT_SHIFT 22
+#define HINIC_SQ_CTRL_LEN_SHIFT 29
+
+#define HINIC_SQ_CTRL_BUFDESC_SECT_LEN_MASK 0xFF
+#define HINIC_SQ_CTRL_TASKSECT_LEN_MASK 0x1F
+#define HINIC_SQ_CTRL_DATA_FORMAT_MASK 0x1
+#define HINIC_SQ_CTRL_LEN_MASK 0x3
+
+#define HINIC_SQ_CTRL_QUEUE_INFO_PLDOFF_SHIFT 2
+#define HINIC_SQ_CTRL_QUEUE_INFO_UFO_SHIFT 10
+#define HINIC_SQ_CTRL_QUEUE_INFO_TSO_SHIFT 11
+#define HINIC_SQ_CTRL_QUEUE_INFO_TCPUDP_CS_SHIFT 12
+#define HINIC_SQ_CTRL_QUEUE_INFO_MSS_SHIFT 13
+#define HINIC_SQ_CTRL_QUEUE_INFO_SCTP_SHIFT 27
+#define HINIC_SQ_CTRL_QUEUE_INFO_UC_SHIFT 28
+#define HINIC_SQ_CTRL_QUEUE_INFO_PRI_SHIFT 29
+
+#define HINIC_SQ_CTRL_QUEUE_INFO_PLDOFF_MASK 0xFF
+#define HINIC_SQ_CTRL_QUEUE_INFO_UFO_MASK 0x1
+#define HINIC_SQ_CTRL_QUEUE_INFO_TSO_MASK 0x1
+#define HINIC_SQ_CTRL_QUEUE_INFO_TCPUDP_CS_MASK 0x1
+#define HINIC_SQ_CTRL_QUEUE_INFO_MSS_MASK 0x3FFF
+#define HINIC_SQ_CTRL_QUEUE_INFO_SCTP_MASK 0x1
+#define HINIC_SQ_CTRL_QUEUE_INFO_UC_MASK 0x1
+#define HINIC_SQ_CTRL_QUEUE_INFO_PRI_MASK 0x7
+
+#define HINIC_SQ_CTRL_SET(val, member) \
+ (((u32)(val) & HINIC_SQ_CTRL_##member##_MASK) \
+ << HINIC_SQ_CTRL_##member##_SHIFT)
+
+#define HINIC_SQ_CTRL_GET(val, member) \
+ (((val) >> HINIC_SQ_CTRL_##member##_SHIFT) \
+ & HINIC_SQ_CTRL_##member##_MASK)
+
+#define HINIC_SQ_CTRL_CLEAR(val, member) \
+ ((u32)(val) & (~(HINIC_SQ_CTRL_##member##_MASK \
+ << HINIC_SQ_CTRL_##member##_SHIFT)))
+
+#define HINIC_SQ_TASK_INFO0_L2HDR_LEN_SHIFT 0
+#define HINIC_SQ_TASK_INFO0_L4_OFFLOAD_SHIFT 8
+#define HINIC_SQ_TASK_INFO0_INNER_L3TYPE_SHIFT 10
+#define HINIC_SQ_TASK_INFO0_VLAN_OFFLOAD_SHIFT 12
+#define HINIC_SQ_TASK_INFO0_PARSE_FLAG_SHIFT 13
+/* 1 bit reserved */
+#define HINIC_SQ_TASK_INFO0_TSO_FLAG_SHIFT 15
+#define HINIC_SQ_TASK_INFO0_VLAN_TAG_SHIFT 16
+
+#define HINIC_SQ_TASK_INFO0_L2HDR_LEN_MASK 0xFF
+#define HINIC_SQ_TASK_INFO0_L4_OFFLOAD_MASK 0x3
+#define HINIC_SQ_TASK_INFO0_INNER_L3TYPE_MASK 0x3
+#define HINIC_SQ_TASK_INFO0_VLAN_OFFLOAD_MASK 0x1
+#define HINIC_SQ_TASK_INFO0_PARSE_FLAG_MASK 0x1
+/* 1 bit reserved */
+#define HINIC_SQ_TASK_INFO0_TSO_FLAG_MASK 0x1
+#define HINIC_SQ_TASK_INFO0_VLAN_TAG_MASK 0xFFFF
+
+#define HINIC_SQ_TASK_INFO0_SET(val, member) \
+ (((u32)(val) & HINIC_SQ_TASK_INFO0_##member##_MASK) << \
+ HINIC_SQ_TASK_INFO0_##member##_SHIFT)
+
+/* 8 bits reserved */
+#define HINIC_SQ_TASK_INFO1_MEDIA_TYPE_SHIFT 8
+#define HINIC_SQ_TASK_INFO1_INNER_L4LEN_SHIFT 16
+#define HINIC_SQ_TASK_INFO1_INNER_L3LEN_SHIFT 24
+
+/* 8 bits reserved */
+#define HINIC_SQ_TASK_INFO1_MEDIA_TYPE_MASK 0xFF
+#define HINIC_SQ_TASK_INFO1_INNER_L4LEN_MASK 0xFF
+#define HINIC_SQ_TASK_INFO1_INNER_L3LEN_MASK 0xFF
+
+#define HINIC_SQ_TASK_INFO1_SET(val, member) \
+ (((u32)(val) & HINIC_SQ_TASK_INFO1_##member##_MASK) << \
+ HINIC_SQ_TASK_INFO1_##member##_SHIFT)
+
+#define HINIC_SQ_TASK_INFO2_TUNNEL_L4LEN_SHIFT 0
+#define HINIC_SQ_TASK_INFO2_OUTER_L3LEN_SHIFT 8
+#define HINIC_SQ_TASK_INFO2_TUNNEL_L4TYPE_SHIFT 16
+/* 1 bit reserved */
+#define HINIC_SQ_TASK_INFO2_OUTER_L3TYPE_SHIFT 24
+/* 8 bits reserved */
+
+#define HINIC_SQ_TASK_INFO2_TUNNEL_L4LEN_MASK 0xFF
+#define HINIC_SQ_TASK_INFO2_OUTER_L3LEN_MASK 0xFF
+#define HINIC_SQ_TASK_INFO2_TUNNEL_L4TYPE_MASK 0x7
+/* 1 bit reserved */
+#define HINIC_SQ_TASK_INFO2_OUTER_L3TYPE_MASK 0x3
+/* 8 bits reserved */
+
+#define HINIC_SQ_TASK_INFO2_SET(val, member) \
+ (((u32)(val) & HINIC_SQ_TASK_INFO2_##member##_MASK) << \
+ HINIC_SQ_TASK_INFO2_##member##_SHIFT)
+
+/* 31 bits reserved */
+#define HINIC_SQ_TASK_INFO4_L2TYPE_SHIFT 31
+
+/* 31 bits reserved */
+#define HINIC_SQ_TASK_INFO4_L2TYPE_MASK 0x1
+
+#define HINIC_SQ_TASK_INFO4_SET(val, member) \
+ (((u32)(val) & HINIC_SQ_TASK_INFO4_##member##_MASK) << \
+ HINIC_SQ_TASK_INFO4_##member##_SHIFT)
+
+#define HINIC_RQ_CQE_STATUS_RXDONE_SHIFT 31
+
+#define HINIC_RQ_CQE_STATUS_RXDONE_MASK 0x1
+
+#define HINIC_RQ_CQE_STATUS_CSUM_ERR_SHIFT 0
+
+#define HINIC_RQ_CQE_STATUS_CSUM_ERR_MASK 0xFFFFU
+
+#define HINIC_RQ_CQE_STATUS_GET(val, member) \
+ (((val) >> HINIC_RQ_CQE_STATUS_##member##_SHIFT) & \
+ HINIC_RQ_CQE_STATUS_##member##_MASK)
+
+#define HINIC_RQ_CQE_STATUS_CLEAR(val, member) \
+ ((val) & (~(HINIC_RQ_CQE_STATUS_##member##_MASK << \
+ HINIC_RQ_CQE_STATUS_##member##_SHIFT)))
+
+#define HINIC_RQ_CQE_SGE_LEN_SHIFT 16
+
+#define HINIC_RQ_CQE_SGE_LEN_MASK 0xFFFF
+
+#define HINIC_RQ_CQE_SGE_GET(val, member) \
+ (((val) >> HINIC_RQ_CQE_SGE_##member##_SHIFT) & \
+ HINIC_RQ_CQE_SGE_##member##_MASK)
+
+#define HINIC_RQ_CTRL_BUFDESC_SECT_LEN_SHIFT 0
+#define HINIC_RQ_CTRL_COMPLETE_FORMAT_SHIFT 15
+#define HINIC_RQ_CTRL_COMPLETE_LEN_SHIFT 27
+#define HINIC_RQ_CTRL_LEN_SHIFT 29
+
+#define HINIC_RQ_CTRL_BUFDESC_SECT_LEN_MASK 0xFF
+#define HINIC_RQ_CTRL_COMPLETE_FORMAT_MASK 0x1
+#define HINIC_RQ_CTRL_COMPLETE_LEN_MASK 0x3
+#define HINIC_RQ_CTRL_LEN_MASK 0x3
+
+#define HINIC_RQ_CTRL_SET(val, member) \
+ (((u32)(val) & HINIC_RQ_CTRL_##member##_MASK) << \
+ HINIC_RQ_CTRL_##member##_SHIFT)
+
+#define HINIC_SQ_WQE_SIZE(nr_sges) \
+ (sizeof(struct hinic_sq_ctrl) + \
+ sizeof(struct hinic_sq_task) + \
+ (nr_sges) * sizeof(struct hinic_sq_bufdesc))
+
+#define HINIC_SCMD_DATA_LEN 16
+
+#define HINIC_MAX_SQ_BUFDESCS 17
+
+#define HINIC_SQ_WQE_MAX_SIZE 320
+#define HINIC_RQ_WQE_SIZE 32
+
+#define HINIC_MSS_DEFAULT 0x3E00
+#define HINIC_MSS_MIN 0x50
+
+#define RQ_CQE_STATUS_NUM_LRO_SHIFT 16
+#define RQ_CQE_STATUS_NUM_LRO_MASK 0xFFU
+
+#define RQ_CQE_STATUS_GET(val, member) (((val) >> \
+ RQ_CQE_STATUS_##member##_SHIFT) & \
+ RQ_CQE_STATUS_##member##_MASK)
+
+#define HINIC_GET_RX_NUM_LRO(status) \
+ RQ_CQE_STATUS_GET(status, NUM_LRO)
+
+#define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_SHIFT 0
+#define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_MASK 0xFFFU
+#define RQ_CQE_OFFOLAD_TYPE_VLAN_EN_SHIFT 21
+#define RQ_CQE_OFFOLAD_TYPE_VLAN_EN_MASK 0x1U
+
+#define RQ_CQE_OFFOLAD_TYPE_GET(val, member) (((val) >> \
+ RQ_CQE_OFFOLAD_TYPE_##member##_SHIFT) & \
+ RQ_CQE_OFFOLAD_TYPE_##member##_MASK)
+
+#define HINIC_GET_RX_PKT_TYPE(offload_type) \
+ RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_TYPE)
+
+#define HINIC_GET_RX_VLAN_OFFLOAD_EN(offload_type) \
+ RQ_CQE_OFFOLAD_TYPE_GET(offload_type, VLAN_EN)
+
+#define RQ_CQE_SGE_VLAN_MASK 0xFFFFU
+#define RQ_CQE_SGE_VLAN_SHIFT 0
+
+#define RQ_CQE_SGE_GET(val, member) (((val) >> \
+ RQ_CQE_SGE_##member##_SHIFT) & \
+ RQ_CQE_SGE_##member##_MASK)
+
+#define HINIC_GET_RX_VLAN_TAG(vlan_len) \
+ RQ_CQE_SGE_GET(vlan_len, VLAN)
+
+#define HINIC_RSS_TYPE_VALID_SHIFT 23
+#define HINIC_RSS_TYPE_TCP_IPV6_EXT_SHIFT 24
+#define HINIC_RSS_TYPE_IPV6_EXT_SHIFT 25
+#define HINIC_RSS_TYPE_TCP_IPV6_SHIFT 26
+#define HINIC_RSS_TYPE_IPV6_SHIFT 27
+#define HINIC_RSS_TYPE_TCP_IPV4_SHIFT 28
+#define HINIC_RSS_TYPE_IPV4_SHIFT 29
+#define HINIC_RSS_TYPE_UDP_IPV6_SHIFT 30
+#define HINIC_RSS_TYPE_UDP_IPV4_SHIFT 31
+
+#define HINIC_RSS_TYPE_SET(val, member) \
+ (((u32)(val) & 0x1) << HINIC_RSS_TYPE_##member##_SHIFT)
+
+#define HINIC_RSS_TYPE_GET(val, member) \
+ (((u32)(val) >> HINIC_RSS_TYPE_##member##_SHIFT) & 0x1)
+
+enum hinic_l3_offload_type {
+ L3TYPE_UNKNOWN = 0,
+ IPV6_PKT = 1,
+ IPV4_PKT_NO_CHKSUM_OFFLOAD = 2,
+ IPV4_PKT_WITH_CHKSUM_OFFLOAD = 3,
+};
+
+enum hinic_l4_offload_type {
+ OFFLOAD_DISABLE = 0,
+ TCP_OFFLOAD_ENABLE = 1,
+ SCTP_OFFLOAD_ENABLE = 2,
+ UDP_OFFLOAD_ENABLE = 3,
+};
+
+enum hinic_l4_tunnel_type {
+ NOT_TUNNEL,
+ TUNNEL_UDP_NO_CSUM,
+ TUNNEL_UDP_CSUM,
+};
+
+enum hinic_outer_l3type {
+ HINIC_OUTER_L3TYPE_UNKNOWN = 0,
+ HINIC_OUTER_L3TYPE_IPV6 = 1,
+ HINIC_OUTER_L3TYPE_IPV4_NO_CHKSUM = 2,
+ HINIC_OUTER_L3TYPE_IPV4_CHKSUM = 3,
+};
+
+enum hinic_l2type {
+ HINIC_L2TYPE_ETH = 0,
+};
+
+struct hinic_cmdq_header {
+ u32 header_info;
+ u32 saved_data;
+};
+
+struct hinic_status {
+ u32 status_info;
+};
+
+struct hinic_ctrl {
+ u32 ctrl_info;
+};
+
+struct hinic_sge_resp {
+ struct hinic_sge sge;
+ u32 rsvd;
+};
+
+struct hinic_cmdq_completion {
+ /* HW Format */
+ union {
+ struct hinic_sge_resp sge_resp;
+ u64 direct_resp;
+ };
+};
+
+struct hinic_scmd_bufdesc {
+ u32 buf_len;
+ u32 rsvd;
+ u8 data[HINIC_SCMD_DATA_LEN];
+};
+
+struct hinic_lcmd_bufdesc {
+ struct hinic_sge sge;
+ u32 rsvd1;
+ u64 rsvd2;
+ u64 rsvd3;
+};
+
+struct hinic_cmdq_wqe_scmd {
+ struct hinic_cmdq_header header;
+ u64 rsvd;
+ struct hinic_status status;
+ struct hinic_ctrl ctrl;
+ struct hinic_cmdq_completion completion;
+ struct hinic_scmd_bufdesc buf_desc;
+};
+
+struct hinic_cmdq_wqe_lcmd {
+ struct hinic_cmdq_header header;
+ struct hinic_status status;
+ struct hinic_ctrl ctrl;
+ struct hinic_cmdq_completion completion;
+ struct hinic_lcmd_bufdesc buf_desc;
+};
+
+struct hinic_cmdq_direct_wqe {
+ struct hinic_cmdq_wqe_scmd wqe_scmd;
+};
+
+struct hinic_cmdq_wqe {
+ /* HW Format */
+ union {
+ struct hinic_cmdq_direct_wqe direct_wqe;
+ struct hinic_cmdq_wqe_lcmd wqe_lcmd;
+ };
+};
+
+struct hinic_sq_ctrl {
+ u32 ctrl_info;
+ u32 queue_info;
+};
+
+struct hinic_sq_task {
+ u32 pkt_info0;
+ u32 pkt_info1;
+ u32 pkt_info2;
+ u32 ufo_v6_identify;
+ u32 pkt_info4;
+ u32 zero_pad;
+};
+
+struct hinic_sq_bufdesc {
+ struct hinic_sge sge;
+ u32 rsvd;
+};
+
+struct hinic_sq_wqe {
+ struct hinic_sq_ctrl ctrl;
+ struct hinic_sq_task task;
+ struct hinic_sq_bufdesc buf_descs[HINIC_MAX_SQ_BUFDESCS];
+};
+
+struct hinic_rq_cqe {
+ u32 status;
+ u32 len;
+
+ u32 offload_type;
+ u32 rsvd3;
+ u32 rsvd4;
+ u32 rsvd5;
+ u32 rsvd6;
+ u32 rsvd7;
+};
+
+struct hinic_rq_ctrl {
+ u32 ctrl_info;
+};
+
+struct hinic_rq_cqe_sect {
+ struct hinic_sge sge;
+ u32 rsvd;
+};
+
+struct hinic_rq_bufdesc {
+ u32 hi_addr;
+ u32 lo_addr;
+};
+
+struct hinic_rq_wqe {
+ struct hinic_rq_ctrl ctrl;
+ u32 rsvd;
+ struct hinic_rq_cqe_sect cqe_sect;
+ struct hinic_rq_bufdesc buf_desc;
+};
+
+struct hinic_hw_wqe {
+ /* HW Format */
+ union {
+ struct hinic_cmdq_wqe cmdq_wqe;
+ struct hinic_sq_wqe sq_wqe;
+ struct hinic_rq_wqe rq_wqe;
+ };
+};
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
new file mode 100644
index 0000000000..499c657d37
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -0,0 +1,1506 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+#include <linux/slab.h>
+#include <linux/if_vlan.h>
+#include <linux/semaphore.h>
+#include <linux/workqueue.h>
+#include <net/ip.h>
+#include <net/devlink.h>
+#include <linux/bitops.h>
+#include <linux/bitmap.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+
+#include "hinic_debugfs.h"
+#include "hinic_hw_qp.h"
+#include "hinic_hw_dev.h"
+#include "hinic_devlink.h"
+#include "hinic_port.h"
+#include "hinic_tx.h"
+#include "hinic_rx.h"
+#include "hinic_dev.h"
+#include "hinic_sriov.h"
+
+MODULE_AUTHOR("Huawei Technologies CO., Ltd");
+MODULE_DESCRIPTION("Huawei Intelligent NIC driver");
+MODULE_LICENSE("GPL");
+
+static unsigned int tx_weight = 64;
+module_param(tx_weight, uint, 0644);
+MODULE_PARM_DESC(tx_weight, "Number Tx packets for NAPI budget (default=64)");
+
+static unsigned int rx_weight = 64;
+module_param(rx_weight, uint, 0644);
+MODULE_PARM_DESC(rx_weight, "Number Rx packets for NAPI budget (default=64)");
+
+#define HINIC_DEV_ID_QUAD_PORT_25GE 0x1822
+#define HINIC_DEV_ID_DUAL_PORT_100GE 0x0200
+#define HINIC_DEV_ID_DUAL_PORT_100GE_MEZZ 0x0205
+#define HINIC_DEV_ID_QUAD_PORT_25GE_MEZZ 0x0210
+#define HINIC_DEV_ID_VF 0x375e
+
+#define HINIC_WQ_NAME "hinic_dev"
+
+#define MSG_ENABLE_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
+ NETIF_MSG_IFUP | \
+ NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
+
+#define HINIC_LRO_MAX_WQE_NUM_DEFAULT 8
+
+#define HINIC_LRO_RX_TIMER_DEFAULT 16
+
+#define work_to_rx_mode_work(work) \
+ container_of(work, struct hinic_rx_mode_work, work)
+
+#define rx_mode_work_to_nic_dev(rx_mode_work) \
+ container_of(rx_mode_work, struct hinic_dev, rx_mode_work)
+
+#define HINIC_WAIT_SRIOV_CFG_TIMEOUT 15000
+
+#define HINIC_DEAULT_TXRX_MSIX_PENDING_LIMIT 2
+#define HINIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG 32
+#define HINIC_DEAULT_TXRX_MSIX_RESEND_TIMER_CFG 7
+
+static int change_mac_addr(struct net_device *netdev, const u8 *addr);
+
+static int set_features(struct hinic_dev *nic_dev,
+ netdev_features_t pre_features,
+ netdev_features_t features, bool force_change);
+
+static void gather_rx_stats(struct hinic_rxq_stats *nic_rx_stats, struct hinic_rxq *rxq)
+{
+ struct hinic_rxq_stats rx_stats;
+
+ hinic_rxq_get_stats(rxq, &rx_stats);
+
+ nic_rx_stats->bytes += rx_stats.bytes;
+ nic_rx_stats->pkts += rx_stats.pkts;
+ nic_rx_stats->errors += rx_stats.errors;
+ nic_rx_stats->csum_errors += rx_stats.csum_errors;
+ nic_rx_stats->other_errors += rx_stats.other_errors;
+}
+
+static void gather_tx_stats(struct hinic_txq_stats *nic_tx_stats, struct hinic_txq *txq)
+{
+ struct hinic_txq_stats tx_stats;
+
+ hinic_txq_get_stats(txq, &tx_stats);
+
+ nic_tx_stats->bytes += tx_stats.bytes;
+ nic_tx_stats->pkts += tx_stats.pkts;
+ nic_tx_stats->tx_busy += tx_stats.tx_busy;
+ nic_tx_stats->tx_wake += tx_stats.tx_wake;
+ nic_tx_stats->tx_dropped += tx_stats.tx_dropped;
+ nic_tx_stats->big_frags_pkts += tx_stats.big_frags_pkts;
+}
+
+static void gather_nic_stats(struct hinic_dev *nic_dev,
+ struct hinic_rxq_stats *nic_rx_stats,
+ struct hinic_txq_stats *nic_tx_stats)
+{
+ int i, num_qps = hinic_hwdev_num_qps(nic_dev->hwdev);
+
+ for (i = 0; i < num_qps; i++)
+ gather_rx_stats(nic_rx_stats, &nic_dev->rxqs[i]);
+
+ for (i = 0; i < num_qps; i++)
+ gather_tx_stats(nic_tx_stats, &nic_dev->txqs[i]);
+}
+
+/**
+ * create_txqs - Create the Logical Tx Queues of specific NIC device
+ * @nic_dev: the specific NIC device
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static int create_txqs(struct hinic_dev *nic_dev)
+{
+ int err, i, j, num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev);
+ struct net_device *netdev = nic_dev->netdev;
+
+ if (nic_dev->txqs)
+ return -EINVAL;
+
+ nic_dev->txqs = devm_kcalloc(&netdev->dev, num_txqs,
+ sizeof(*nic_dev->txqs), GFP_KERNEL);
+ if (!nic_dev->txqs)
+ return -ENOMEM;
+
+ hinic_sq_dbgfs_init(nic_dev);
+
+ for (i = 0; i < num_txqs; i++) {
+ struct hinic_sq *sq = hinic_hwdev_get_sq(nic_dev->hwdev, i);
+
+ err = hinic_init_txq(&nic_dev->txqs[i], sq, netdev);
+ if (err) {
+ netif_err(nic_dev, drv, netdev,
+ "Failed to init Txq\n");
+ goto err_init_txq;
+ }
+
+ err = hinic_sq_debug_add(nic_dev, i);
+ if (err) {
+ netif_err(nic_dev, drv, netdev,
+ "Failed to add SQ%d debug\n", i);
+ goto err_add_sq_dbg;
+ }
+ }
+
+ return 0;
+
+err_add_sq_dbg:
+ hinic_clean_txq(&nic_dev->txqs[i]);
+err_init_txq:
+ for (j = 0; j < i; j++) {
+ hinic_sq_debug_rem(nic_dev->txqs[j].sq);
+ hinic_clean_txq(&nic_dev->txqs[j]);
+ }
+
+ hinic_sq_dbgfs_uninit(nic_dev);
+
+ devm_kfree(&netdev->dev, nic_dev->txqs);
+ return err;
+}
+
+static void enable_txqs_napi(struct hinic_dev *nic_dev)
+{
+ int num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev);
+ int i;
+
+ for (i = 0; i < num_txqs; i++)
+ napi_enable(&nic_dev->txqs[i].napi);
+}
+
+static void disable_txqs_napi(struct hinic_dev *nic_dev)
+{
+ int num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev);
+ int i;
+
+ for (i = 0; i < num_txqs; i++)
+ napi_disable(&nic_dev->txqs[i].napi);
+}
+
+/**
+ * free_txqs - Free the Logical Tx Queues of specific NIC device
+ * @nic_dev: the specific NIC device
+ **/
+static void free_txqs(struct hinic_dev *nic_dev)
+{
+ int i, num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev);
+ struct net_device *netdev = nic_dev->netdev;
+
+ if (!nic_dev->txqs)
+ return;
+
+ for (i = 0; i < num_txqs; i++) {
+ hinic_sq_debug_rem(nic_dev->txqs[i].sq);
+ hinic_clean_txq(&nic_dev->txqs[i]);
+ }
+
+ hinic_sq_dbgfs_uninit(nic_dev);
+
+ devm_kfree(&netdev->dev, nic_dev->txqs);
+ nic_dev->txqs = NULL;
+}
+
+/**
+ * create_rxqs - Create the Logical Rx Queues of specific NIC device
+ * @nic_dev: the specific NIC device
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static int create_rxqs(struct hinic_dev *nic_dev)
+{
+ int err, i, j, num_rxqs = hinic_hwdev_num_qps(nic_dev->hwdev);
+ struct net_device *netdev = nic_dev->netdev;
+
+ if (nic_dev->rxqs)
+ return -EINVAL;
+
+ nic_dev->rxqs = devm_kcalloc(&netdev->dev, num_rxqs,
+ sizeof(*nic_dev->rxqs), GFP_KERNEL);
+ if (!nic_dev->rxqs)
+ return -ENOMEM;
+
+ hinic_rq_dbgfs_init(nic_dev);
+
+ for (i = 0; i < num_rxqs; i++) {
+ struct hinic_rq *rq = hinic_hwdev_get_rq(nic_dev->hwdev, i);
+
+ err = hinic_init_rxq(&nic_dev->rxqs[i], rq, netdev);
+ if (err) {
+ netif_err(nic_dev, drv, netdev,
+ "Failed to init rxq\n");
+ goto err_init_rxq;
+ }
+
+ err = hinic_rq_debug_add(nic_dev, i);
+ if (err) {
+ netif_err(nic_dev, drv, netdev,
+ "Failed to add RQ%d debug\n", i);
+ goto err_add_rq_dbg;
+ }
+ }
+
+ return 0;
+
+err_add_rq_dbg:
+ hinic_clean_rxq(&nic_dev->rxqs[i]);
+err_init_rxq:
+ for (j = 0; j < i; j++) {
+ hinic_rq_debug_rem(nic_dev->rxqs[j].rq);
+ hinic_clean_rxq(&nic_dev->rxqs[j]);
+ }
+
+ hinic_rq_dbgfs_uninit(nic_dev);
+
+ devm_kfree(&netdev->dev, nic_dev->rxqs);
+ return err;
+}
+
+/**
+ * free_rxqs - Free the Logical Rx Queues of specific NIC device
+ * @nic_dev: the specific NIC device
+ **/
+static void free_rxqs(struct hinic_dev *nic_dev)
+{
+ int i, num_rxqs = hinic_hwdev_num_qps(nic_dev->hwdev);
+ struct net_device *netdev = nic_dev->netdev;
+
+ if (!nic_dev->rxqs)
+ return;
+
+ for (i = 0; i < num_rxqs; i++) {
+ hinic_rq_debug_rem(nic_dev->rxqs[i].rq);
+ hinic_clean_rxq(&nic_dev->rxqs[i]);
+ }
+
+ hinic_rq_dbgfs_uninit(nic_dev);
+
+ devm_kfree(&netdev->dev, nic_dev->rxqs);
+ nic_dev->rxqs = NULL;
+}
+
+static int hinic_configure_max_qnum(struct hinic_dev *nic_dev)
+{
+ return hinic_set_max_qnum(nic_dev, nic_dev->hwdev->nic_cap.max_qps);
+}
+
+static int hinic_rss_init(struct hinic_dev *nic_dev)
+{
+ u8 default_rss_key[HINIC_RSS_KEY_SIZE];
+ u8 tmpl_idx = nic_dev->rss_tmpl_idx;
+ u32 *indir_tbl;
+ int err, i;
+
+ indir_tbl = kcalloc(HINIC_RSS_INDIR_SIZE, sizeof(u32), GFP_KERNEL);
+ if (!indir_tbl)
+ return -ENOMEM;
+
+ netdev_rss_key_fill(default_rss_key, sizeof(default_rss_key));
+ for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++)
+ indir_tbl[i] = ethtool_rxfh_indir_default(i, nic_dev->num_rss);
+
+ err = hinic_rss_set_template_tbl(nic_dev, tmpl_idx, default_rss_key);
+ if (err)
+ goto out;
+
+ err = hinic_rss_set_indir_tbl(nic_dev, tmpl_idx, indir_tbl);
+ if (err)
+ goto out;
+
+ err = hinic_set_rss_type(nic_dev, tmpl_idx, nic_dev->rss_type);
+ if (err)
+ goto out;
+
+ err = hinic_rss_set_hash_engine(nic_dev, tmpl_idx,
+ nic_dev->rss_hash_engine);
+ if (err)
+ goto out;
+
+ err = hinic_rss_cfg(nic_dev, 1, tmpl_idx);
+ if (err)
+ goto out;
+
+out:
+ kfree(indir_tbl);
+ return err;
+}
+
+static void hinic_rss_deinit(struct hinic_dev *nic_dev)
+{
+ hinic_rss_cfg(nic_dev, 0, nic_dev->rss_tmpl_idx);
+}
+
+static void hinic_init_rss_parameters(struct hinic_dev *nic_dev)
+{
+ nic_dev->rss_hash_engine = HINIC_RSS_HASH_ENGINE_TYPE_XOR;
+ nic_dev->rss_type.tcp_ipv6_ext = 1;
+ nic_dev->rss_type.ipv6_ext = 1;
+ nic_dev->rss_type.tcp_ipv6 = 1;
+ nic_dev->rss_type.ipv6 = 1;
+ nic_dev->rss_type.tcp_ipv4 = 1;
+ nic_dev->rss_type.ipv4 = 1;
+ nic_dev->rss_type.udp_ipv6 = 1;
+ nic_dev->rss_type.udp_ipv4 = 1;
+}
+
+static void hinic_enable_rss(struct hinic_dev *nic_dev)
+{
+ struct net_device *netdev = nic_dev->netdev;
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ int i, node, err = 0;
+ u16 num_cpus = 0;
+
+ if (nic_dev->max_qps <= 1) {
+ nic_dev->flags &= ~HINIC_RSS_ENABLE;
+ nic_dev->rss_limit = nic_dev->max_qps;
+ nic_dev->num_qps = nic_dev->max_qps;
+ nic_dev->num_rss = nic_dev->max_qps;
+
+ return;
+ }
+
+ err = hinic_rss_template_alloc(nic_dev, &nic_dev->rss_tmpl_idx);
+ if (err) {
+ netif_err(nic_dev, drv, netdev,
+ "Failed to alloc tmpl_idx for rss, can't enable rss for this function\n");
+ nic_dev->flags &= ~HINIC_RSS_ENABLE;
+ nic_dev->max_qps = 1;
+ nic_dev->rss_limit = nic_dev->max_qps;
+ nic_dev->num_qps = nic_dev->max_qps;
+ nic_dev->num_rss = nic_dev->max_qps;
+
+ return;
+ }
+
+ nic_dev->flags |= HINIC_RSS_ENABLE;
+
+ for (i = 0; i < num_online_cpus(); i++) {
+ node = cpu_to_node(i);
+ if (node == dev_to_node(&pdev->dev))
+ num_cpus++;
+ }
+
+ if (!num_cpus)
+ num_cpus = num_online_cpus();
+
+ nic_dev->num_qps = hinic_hwdev_num_qps(hwdev);
+ nic_dev->num_qps = min_t(u16, nic_dev->num_qps, num_cpus);
+
+ nic_dev->rss_limit = nic_dev->num_qps;
+ nic_dev->num_rss = nic_dev->num_qps;
+
+ hinic_init_rss_parameters(nic_dev);
+ err = hinic_rss_init(nic_dev);
+ if (err)
+ netif_err(nic_dev, drv, netdev, "Failed to init rss\n");
+}
+
+int hinic_open(struct net_device *netdev)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ enum hinic_port_link_state link_state;
+ int err, ret;
+
+ if (!(nic_dev->flags & HINIC_INTF_UP)) {
+ err = hinic_hwdev_ifup(nic_dev->hwdev, nic_dev->sq_depth,
+ nic_dev->rq_depth);
+ if (err) {
+ netif_err(nic_dev, drv, netdev,
+ "Failed - HW interface up\n");
+ return err;
+ }
+ }
+
+ err = create_txqs(nic_dev);
+ if (err) {
+ netif_err(nic_dev, drv, netdev,
+ "Failed to create Tx queues\n");
+ goto err_create_txqs;
+ }
+
+ enable_txqs_napi(nic_dev);
+
+ err = create_rxqs(nic_dev);
+ if (err) {
+ netif_err(nic_dev, drv, netdev,
+ "Failed to create Rx queues\n");
+ goto err_create_rxqs;
+ }
+
+ hinic_enable_rss(nic_dev);
+
+ err = hinic_configure_max_qnum(nic_dev);
+ if (err) {
+ netif_err(nic_dev, drv, nic_dev->netdev,
+ "Failed to configure the maximum number of queues\n");
+ goto err_port_state;
+ }
+
+ netif_set_real_num_tx_queues(netdev, nic_dev->num_qps);
+ netif_set_real_num_rx_queues(netdev, nic_dev->num_qps);
+
+ err = hinic_port_set_state(nic_dev, HINIC_PORT_ENABLE);
+ if (err) {
+ netif_err(nic_dev, drv, netdev,
+ "Failed to set port state\n");
+ goto err_port_state;
+ }
+
+ err = hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_ENABLE);
+ if (err) {
+ netif_err(nic_dev, drv, netdev,
+ "Failed to set func port state\n");
+ goto err_func_port_state;
+ }
+
+ down(&nic_dev->mgmt_lock);
+
+ err = hinic_port_link_state(nic_dev, &link_state);
+ if (err) {
+ netif_err(nic_dev, drv, netdev, "Failed to get link state\n");
+ goto err_port_link;
+ }
+
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
+ hinic_notify_all_vfs_link_changed(nic_dev->hwdev, link_state);
+
+ if (link_state == HINIC_LINK_STATE_UP) {
+ nic_dev->flags |= HINIC_LINK_UP;
+ nic_dev->cable_unplugged = false;
+ nic_dev->module_unrecognized = false;
+ }
+
+ nic_dev->flags |= HINIC_INTF_UP;
+
+ if ((nic_dev->flags & (HINIC_LINK_UP | HINIC_INTF_UP)) ==
+ (HINIC_LINK_UP | HINIC_INTF_UP)) {
+ netif_info(nic_dev, drv, netdev, "link + intf UP\n");
+ netif_carrier_on(netdev);
+ netif_tx_wake_all_queues(netdev);
+ }
+
+ up(&nic_dev->mgmt_lock);
+
+ netif_info(nic_dev, drv, netdev, "HINIC_INTF is UP\n");
+ return 0;
+
+err_port_link:
+ up(&nic_dev->mgmt_lock);
+ ret = hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE);
+ if (ret)
+ netif_warn(nic_dev, drv, netdev,
+ "Failed to revert func port state\n");
+
+err_func_port_state:
+ ret = hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE);
+ if (ret)
+ netif_warn(nic_dev, drv, netdev,
+ "Failed to revert port state\n");
+err_port_state:
+ free_rxqs(nic_dev);
+ if (nic_dev->flags & HINIC_RSS_ENABLE) {
+ hinic_rss_deinit(nic_dev);
+ hinic_rss_template_free(nic_dev, nic_dev->rss_tmpl_idx);
+ }
+
+err_create_rxqs:
+ disable_txqs_napi(nic_dev);
+ free_txqs(nic_dev);
+
+err_create_txqs:
+ if (!(nic_dev->flags & HINIC_INTF_UP))
+ hinic_hwdev_ifdown(nic_dev->hwdev);
+ return err;
+}
+
+int hinic_close(struct net_device *netdev)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ unsigned int flags;
+
+ /* Disable txq napi firstly to aviod rewaking txq in free_tx_poll */
+ disable_txqs_napi(nic_dev);
+
+ down(&nic_dev->mgmt_lock);
+
+ flags = nic_dev->flags;
+ nic_dev->flags &= ~HINIC_INTF_UP;
+
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+
+ up(&nic_dev->mgmt_lock);
+
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
+ hinic_notify_all_vfs_link_changed(nic_dev->hwdev, 0);
+
+ hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE);
+
+ hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE);
+
+ if (nic_dev->flags & HINIC_RSS_ENABLE) {
+ hinic_rss_deinit(nic_dev);
+ hinic_rss_template_free(nic_dev, nic_dev->rss_tmpl_idx);
+ }
+
+ free_rxqs(nic_dev);
+ free_txqs(nic_dev);
+
+ if (flags & HINIC_INTF_UP)
+ hinic_hwdev_ifdown(nic_dev->hwdev);
+
+ netif_info(nic_dev, drv, netdev, "HINIC_INTF is DOWN\n");
+ return 0;
+}
+
+static int hinic_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ int err;
+
+ netif_info(nic_dev, drv, netdev, "set_mtu = %d\n", new_mtu);
+
+ err = hinic_port_set_mtu(nic_dev, new_mtu);
+ if (err)
+ netif_err(nic_dev, drv, netdev, "Failed to set port mtu\n");
+ else
+ netdev->mtu = new_mtu;
+
+ return err;
+}
+
+/**
+ * change_mac_addr - change the main mac address of network device
+ * @netdev: network device
+ * @addr: mac address to set
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static int change_mac_addr(struct net_device *netdev, const u8 *addr)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ u16 vid = 0;
+ int err;
+
+ if (!is_valid_ether_addr(addr))
+ return -EADDRNOTAVAIL;
+
+ netif_info(nic_dev, drv, netdev, "change mac addr = %02x %02x %02x %02x %02x %02x\n",
+ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+
+ down(&nic_dev->mgmt_lock);
+
+ do {
+ err = hinic_port_del_mac(nic_dev, netdev->dev_addr, vid);
+ if (err) {
+ netif_err(nic_dev, drv, netdev,
+ "Failed to delete mac\n");
+ break;
+ }
+
+ err = hinic_port_add_mac(nic_dev, addr, vid);
+ if (err) {
+ netif_err(nic_dev, drv, netdev, "Failed to add mac\n");
+ break;
+ }
+
+ vid = find_next_bit(nic_dev->vlan_bitmap, VLAN_N_VID, vid + 1);
+ } while (vid != VLAN_N_VID);
+
+ up(&nic_dev->mgmt_lock);
+ return err;
+}
+
+static int hinic_set_mac_addr(struct net_device *netdev, void *addr)
+{
+ unsigned char new_mac[ETH_ALEN];
+ struct sockaddr *saddr = addr;
+ int err;
+
+ memcpy(new_mac, saddr->sa_data, ETH_ALEN);
+
+ err = change_mac_addr(netdev, new_mac);
+ if (!err)
+ eth_hw_addr_set(netdev, new_mac);
+
+ return err;
+}
+
+/**
+ * add_mac_addr - add mac address to network device
+ * @netdev: network device
+ * @addr: mac address to add
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static int add_mac_addr(struct net_device *netdev, const u8 *addr)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ u16 vid = 0;
+ int err;
+
+ netif_info(nic_dev, drv, netdev, "set mac addr = %02x %02x %02x %02x %02x %02x\n",
+ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+
+ down(&nic_dev->mgmt_lock);
+
+ do {
+ err = hinic_port_add_mac(nic_dev, addr, vid);
+ if (err) {
+ netif_err(nic_dev, drv, netdev, "Failed to add mac\n");
+ break;
+ }
+
+ vid = find_next_bit(nic_dev->vlan_bitmap, VLAN_N_VID, vid + 1);
+ } while (vid != VLAN_N_VID);
+
+ up(&nic_dev->mgmt_lock);
+ return err;
+}
+
+/**
+ * remove_mac_addr - remove mac address from network device
+ * @netdev: network device
+ * @addr: mac address to remove
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static int remove_mac_addr(struct net_device *netdev, const u8 *addr)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ u16 vid = 0;
+ int err;
+
+ if (!is_valid_ether_addr(addr))
+ return -EADDRNOTAVAIL;
+
+ netif_info(nic_dev, drv, netdev, "remove mac addr = %02x %02x %02x %02x %02x %02x\n",
+ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+
+ down(&nic_dev->mgmt_lock);
+
+ do {
+ err = hinic_port_del_mac(nic_dev, addr, vid);
+ if (err) {
+ netif_err(nic_dev, drv, netdev,
+ "Failed to delete mac\n");
+ break;
+ }
+
+ vid = find_next_bit(nic_dev->vlan_bitmap, VLAN_N_VID, vid + 1);
+ } while (vid != VLAN_N_VID);
+
+ up(&nic_dev->mgmt_lock);
+ return err;
+}
+
+static int hinic_vlan_rx_add_vid(struct net_device *netdev,
+ __always_unused __be16 proto, u16 vid)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ int ret, err;
+
+ netif_info(nic_dev, drv, netdev, "add vid = %d\n", vid);
+
+ down(&nic_dev->mgmt_lock);
+
+ err = hinic_port_add_vlan(nic_dev, vid);
+ if (err) {
+ netif_err(nic_dev, drv, netdev, "Failed to add vlan\n");
+ goto err_vlan_add;
+ }
+
+ err = hinic_port_add_mac(nic_dev, netdev->dev_addr, vid);
+ if (err && err != HINIC_PF_SET_VF_ALREADY) {
+ netif_err(nic_dev, drv, netdev, "Failed to set mac\n");
+ goto err_add_mac;
+ }
+
+ bitmap_set(nic_dev->vlan_bitmap, vid, 1);
+
+ up(&nic_dev->mgmt_lock);
+ return 0;
+
+err_add_mac:
+ ret = hinic_port_del_vlan(nic_dev, vid);
+ if (ret)
+ netif_err(nic_dev, drv, netdev,
+ "Failed to revert by removing vlan\n");
+
+err_vlan_add:
+ up(&nic_dev->mgmt_lock);
+ return err;
+}
+
+static int hinic_vlan_rx_kill_vid(struct net_device *netdev,
+ __always_unused __be16 proto, u16 vid)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ int err;
+
+ netif_info(nic_dev, drv, netdev, "remove vid = %d\n", vid);
+
+ down(&nic_dev->mgmt_lock);
+
+ err = hinic_port_del_vlan(nic_dev, vid);
+ if (err) {
+ netif_err(nic_dev, drv, netdev, "Failed to delete vlan\n");
+ goto err_del_vlan;
+ }
+
+ bitmap_clear(nic_dev->vlan_bitmap, vid, 1);
+
+ up(&nic_dev->mgmt_lock);
+ return 0;
+
+err_del_vlan:
+ up(&nic_dev->mgmt_lock);
+ return err;
+}
+
+static void set_rx_mode(struct work_struct *work)
+{
+ struct hinic_rx_mode_work *rx_mode_work = work_to_rx_mode_work(work);
+ struct hinic_dev *nic_dev = rx_mode_work_to_nic_dev(rx_mode_work);
+
+ hinic_port_set_rx_mode(nic_dev, rx_mode_work->rx_mode);
+
+ __dev_uc_sync(nic_dev->netdev, add_mac_addr, remove_mac_addr);
+ __dev_mc_sync(nic_dev->netdev, add_mac_addr, remove_mac_addr);
+}
+
+static void hinic_set_rx_mode(struct net_device *netdev)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_rx_mode_work *rx_mode_work;
+ u32 rx_mode;
+
+ rx_mode_work = &nic_dev->rx_mode_work;
+
+ rx_mode = HINIC_RX_MODE_UC |
+ HINIC_RX_MODE_MC |
+ HINIC_RX_MODE_BC;
+
+ if (netdev->flags & IFF_PROMISC) {
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
+ rx_mode |= HINIC_RX_MODE_PROMISC;
+ } else if (netdev->flags & IFF_ALLMULTI) {
+ rx_mode |= HINIC_RX_MODE_MC_ALL;
+ }
+
+ rx_mode_work->rx_mode = rx_mode;
+
+ queue_work(nic_dev->workq, &rx_mode_work->work);
+}
+
+static void hinic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ u16 sw_pi, hw_ci, sw_ci;
+ struct hinic_sq *sq;
+ u16 num_sqs, q_id;
+
+ num_sqs = hinic_hwdev_num_qps(nic_dev->hwdev);
+
+ netif_err(nic_dev, drv, netdev, "Tx timeout\n");
+
+ for (q_id = 0; q_id < num_sqs; q_id++) {
+ if (!netif_xmit_stopped(netdev_get_tx_queue(netdev, q_id)))
+ continue;
+
+ sq = hinic_hwdev_get_sq(nic_dev->hwdev, q_id);
+ sw_pi = atomic_read(&sq->wq->prod_idx) & sq->wq->mask;
+ hw_ci = be16_to_cpu(*(u16 *)(sq->hw_ci_addr)) & sq->wq->mask;
+ sw_ci = atomic_read(&sq->wq->cons_idx) & sq->wq->mask;
+ netif_err(nic_dev, drv, netdev, "Txq%d: sw_pi: %d, hw_ci: %d, sw_ci: %d, napi->state: 0x%lx\n",
+ q_id, sw_pi, hw_ci, sw_ci,
+ nic_dev->txqs[q_id].napi.state);
+ }
+}
+
+static void hinic_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_rxq_stats nic_rx_stats = {};
+ struct hinic_txq_stats nic_tx_stats = {};
+
+ if (nic_dev->flags & HINIC_INTF_UP)
+ gather_nic_stats(nic_dev, &nic_rx_stats, &nic_tx_stats);
+
+ stats->rx_bytes = nic_rx_stats.bytes;
+ stats->rx_packets = nic_rx_stats.pkts;
+ stats->rx_errors = nic_rx_stats.errors;
+
+ stats->tx_bytes = nic_tx_stats.bytes;
+ stats->tx_packets = nic_tx_stats.pkts;
+ stats->tx_errors = nic_tx_stats.tx_dropped;
+}
+
+static int hinic_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+
+ return set_features(nic_dev, nic_dev->netdev->features,
+ features, false);
+}
+
+static netdev_features_t hinic_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+
+ /* If Rx checksum is disabled, then LRO should also be disabled */
+ if (!(features & NETIF_F_RXCSUM)) {
+ netif_info(nic_dev, drv, netdev, "disabling LRO as RXCSUM is off\n");
+ features &= ~NETIF_F_LRO;
+ }
+
+ return features;
+}
+
+static const struct net_device_ops hinic_netdev_ops = {
+ .ndo_open = hinic_open,
+ .ndo_stop = hinic_close,
+ .ndo_change_mtu = hinic_change_mtu,
+ .ndo_set_mac_address = hinic_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_vlan_rx_add_vid = hinic_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = hinic_vlan_rx_kill_vid,
+ .ndo_set_rx_mode = hinic_set_rx_mode,
+ .ndo_start_xmit = hinic_xmit_frame,
+ .ndo_tx_timeout = hinic_tx_timeout,
+ .ndo_get_stats64 = hinic_get_stats64,
+ .ndo_fix_features = hinic_fix_features,
+ .ndo_set_features = hinic_set_features,
+ .ndo_set_vf_mac = hinic_ndo_set_vf_mac,
+ .ndo_set_vf_vlan = hinic_ndo_set_vf_vlan,
+ .ndo_get_vf_config = hinic_ndo_get_vf_config,
+ .ndo_set_vf_trust = hinic_ndo_set_vf_trust,
+ .ndo_set_vf_rate = hinic_ndo_set_vf_bw,
+ .ndo_set_vf_spoofchk = hinic_ndo_set_vf_spoofchk,
+ .ndo_set_vf_link_state = hinic_ndo_set_vf_link_state,
+};
+
+static const struct net_device_ops hinicvf_netdev_ops = {
+ .ndo_open = hinic_open,
+ .ndo_stop = hinic_close,
+ .ndo_change_mtu = hinic_change_mtu,
+ .ndo_set_mac_address = hinic_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_vlan_rx_add_vid = hinic_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = hinic_vlan_rx_kill_vid,
+ .ndo_set_rx_mode = hinic_set_rx_mode,
+ .ndo_start_xmit = hinic_xmit_frame,
+ .ndo_tx_timeout = hinic_tx_timeout,
+ .ndo_get_stats64 = hinic_get_stats64,
+ .ndo_fix_features = hinic_fix_features,
+ .ndo_set_features = hinic_set_features,
+};
+
+static void netdev_features_init(struct net_device *netdev)
+{
+ netdev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_RXCSUM | NETIF_F_LRO |
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
+ netdev->vlan_features = netdev->hw_features;
+
+ netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ netdev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SCTP_CRC |
+ NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_UDP_TUNNEL;
+}
+
+static void hinic_refresh_nic_cfg(struct hinic_dev *nic_dev)
+{
+ struct hinic_nic_cfg *nic_cfg = &nic_dev->hwdev->func_to_io.nic_cfg;
+ struct hinic_pause_config pause_info = {0};
+ struct hinic_port_cap port_cap = {0};
+
+ if (hinic_port_get_cap(nic_dev, &port_cap))
+ return;
+
+ mutex_lock(&nic_cfg->cfg_mutex);
+ if (nic_cfg->pause_set || !port_cap.autoneg_state) {
+ nic_cfg->auto_neg = port_cap.autoneg_state;
+ pause_info.auto_neg = nic_cfg->auto_neg;
+ pause_info.rx_pause = nic_cfg->rx_pause;
+ pause_info.tx_pause = nic_cfg->tx_pause;
+ hinic_set_hw_pause_info(nic_dev->hwdev, &pause_info);
+ }
+ mutex_unlock(&nic_cfg->cfg_mutex);
+}
+
+/**
+ * link_status_event_handler - link event handler
+ * @handle: nic device for the handler
+ * @buf_in: input buffer
+ * @in_size: input size
+ * @buf_out: output buffer
+ * @out_size: returned output size
+ **/
+static void link_status_event_handler(void *handle, void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_port_link_status *link_status, *ret_link_status;
+ struct hinic_dev *nic_dev = handle;
+
+ link_status = buf_in;
+
+ if (link_status->link == HINIC_LINK_STATE_UP) {
+ down(&nic_dev->mgmt_lock);
+
+ nic_dev->flags |= HINIC_LINK_UP;
+ nic_dev->cable_unplugged = false;
+ nic_dev->module_unrecognized = false;
+
+ if ((nic_dev->flags & (HINIC_LINK_UP | HINIC_INTF_UP)) ==
+ (HINIC_LINK_UP | HINIC_INTF_UP)) {
+ netif_carrier_on(nic_dev->netdev);
+ netif_tx_wake_all_queues(nic_dev->netdev);
+ }
+
+ up(&nic_dev->mgmt_lock);
+
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
+ hinic_refresh_nic_cfg(nic_dev);
+
+ netif_info(nic_dev, drv, nic_dev->netdev, "HINIC_Link is UP\n");
+ } else {
+ down(&nic_dev->mgmt_lock);
+
+ nic_dev->flags &= ~HINIC_LINK_UP;
+
+ netif_carrier_off(nic_dev->netdev);
+ netif_tx_disable(nic_dev->netdev);
+
+ up(&nic_dev->mgmt_lock);
+
+ netif_info(nic_dev, drv, nic_dev->netdev, "HINIC_Link is DOWN\n");
+ }
+
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
+ hinic_notify_all_vfs_link_changed(nic_dev->hwdev,
+ link_status->link);
+
+ ret_link_status = buf_out;
+ ret_link_status->status = 0;
+
+ *out_size = sizeof(*ret_link_status);
+}
+
+static void cable_plug_event(void *handle,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_cable_plug_event *plug_event = buf_in;
+ struct hinic_dev *nic_dev = handle;
+
+ nic_dev->cable_unplugged = plug_event->plugged ? false : true;
+
+ *out_size = sizeof(*plug_event);
+ plug_event = buf_out;
+ plug_event->status = 0;
+}
+
+static void link_err_event(void *handle,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_link_err_event *link_err = buf_in;
+ struct hinic_dev *nic_dev = handle;
+
+ if (link_err->err_type >= LINK_ERR_NUM)
+ netif_info(nic_dev, link, nic_dev->netdev,
+ "Link failed, Unknown error type: 0x%x\n",
+ link_err->err_type);
+ else
+ nic_dev->module_unrecognized = true;
+
+ *out_size = sizeof(*link_err);
+ link_err = buf_out;
+ link_err->status = 0;
+}
+
+static int set_features(struct hinic_dev *nic_dev,
+ netdev_features_t pre_features,
+ netdev_features_t features, bool force_change)
+{
+ netdev_features_t changed = force_change ? ~0 : pre_features ^ features;
+ u32 csum_en = HINIC_RX_CSUM_OFFLOAD_EN;
+ netdev_features_t failed_features = 0;
+ int ret = 0;
+ int err = 0;
+
+ if (changed & NETIF_F_TSO) {
+ ret = hinic_port_set_tso(nic_dev, (features & NETIF_F_TSO) ?
+ HINIC_TSO_ENABLE : HINIC_TSO_DISABLE);
+ if (ret) {
+ err = ret;
+ failed_features |= NETIF_F_TSO;
+ }
+ }
+
+ if (changed & NETIF_F_RXCSUM) {
+ ret = hinic_set_rx_csum_offload(nic_dev, csum_en);
+ if (ret) {
+ err = ret;
+ failed_features |= NETIF_F_RXCSUM;
+ }
+ }
+
+ if (changed & NETIF_F_LRO) {
+ ret = hinic_set_rx_lro_state(nic_dev,
+ !!(features & NETIF_F_LRO),
+ HINIC_LRO_RX_TIMER_DEFAULT,
+ HINIC_LRO_MAX_WQE_NUM_DEFAULT);
+ if (ret) {
+ err = ret;
+ failed_features |= NETIF_F_LRO;
+ }
+ }
+
+ if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
+ ret = hinic_set_rx_vlan_offload(nic_dev,
+ !!(features &
+ NETIF_F_HW_VLAN_CTAG_RX));
+ if (ret) {
+ err = ret;
+ failed_features |= NETIF_F_HW_VLAN_CTAG_RX;
+ }
+ }
+
+ if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ ret = hinic_set_vlan_fliter(nic_dev,
+ !!(features &
+ NETIF_F_HW_VLAN_CTAG_FILTER));
+ if (ret) {
+ err = ret;
+ failed_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ }
+ }
+
+ if (err) {
+ nic_dev->netdev->features = features ^ failed_features;
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int hinic_init_intr_coalesce(struct hinic_dev *nic_dev)
+{
+ u64 size;
+ u16 i;
+
+ size = sizeof(struct hinic_intr_coal_info) * nic_dev->max_qps;
+ nic_dev->rx_intr_coalesce = kzalloc(size, GFP_KERNEL);
+ if (!nic_dev->rx_intr_coalesce)
+ return -ENOMEM;
+ nic_dev->tx_intr_coalesce = kzalloc(size, GFP_KERNEL);
+ if (!nic_dev->tx_intr_coalesce) {
+ kfree(nic_dev->rx_intr_coalesce);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < nic_dev->max_qps; i++) {
+ nic_dev->rx_intr_coalesce[i].pending_limt =
+ HINIC_DEAULT_TXRX_MSIX_PENDING_LIMIT;
+ nic_dev->rx_intr_coalesce[i].coalesce_timer_cfg =
+ HINIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG;
+ nic_dev->rx_intr_coalesce[i].resend_timer_cfg =
+ HINIC_DEAULT_TXRX_MSIX_RESEND_TIMER_CFG;
+ nic_dev->tx_intr_coalesce[i].pending_limt =
+ HINIC_DEAULT_TXRX_MSIX_PENDING_LIMIT;
+ nic_dev->tx_intr_coalesce[i].coalesce_timer_cfg =
+ HINIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG;
+ nic_dev->tx_intr_coalesce[i].resend_timer_cfg =
+ HINIC_DEAULT_TXRX_MSIX_RESEND_TIMER_CFG;
+ }
+
+ return 0;
+}
+
+static void hinic_free_intr_coalesce(struct hinic_dev *nic_dev)
+{
+ kfree(nic_dev->tx_intr_coalesce);
+ kfree(nic_dev->rx_intr_coalesce);
+}
+
+/**
+ * nic_dev_init - Initialize the NIC device
+ * @pdev: the NIC pci device
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static int nic_dev_init(struct pci_dev *pdev)
+{
+ struct hinic_rx_mode_work *rx_mode_work;
+ struct hinic_dev *nic_dev;
+ struct net_device *netdev;
+ struct hinic_hwdev *hwdev;
+ struct devlink *devlink;
+ u8 addr[ETH_ALEN];
+ int err, num_qps;
+
+ devlink = hinic_devlink_alloc(&pdev->dev);
+ if (!devlink) {
+ dev_err(&pdev->dev, "Hinic devlink alloc failed\n");
+ return -ENOMEM;
+ }
+
+ hwdev = hinic_init_hwdev(pdev, devlink);
+ if (IS_ERR(hwdev)) {
+ dev_err(&pdev->dev, "Failed to initialize HW device\n");
+ hinic_devlink_free(devlink);
+ return PTR_ERR(hwdev);
+ }
+
+ num_qps = hinic_hwdev_num_qps(hwdev);
+ if (num_qps <= 0) {
+ dev_err(&pdev->dev, "Invalid number of QPS\n");
+ err = -EINVAL;
+ goto err_num_qps;
+ }
+
+ netdev = alloc_etherdev_mq(sizeof(*nic_dev), num_qps);
+ if (!netdev) {
+ dev_err(&pdev->dev, "Failed to allocate Ethernet device\n");
+ err = -ENOMEM;
+ goto err_alloc_etherdev;
+ }
+
+ if (!HINIC_IS_VF(hwdev->hwif))
+ netdev->netdev_ops = &hinic_netdev_ops;
+ else
+ netdev->netdev_ops = &hinicvf_netdev_ops;
+
+ netdev->max_mtu = HINIC_MAX_MTU_SIZE;
+ netdev->min_mtu = HINIC_MIN_MTU_SIZE;
+
+ nic_dev = netdev_priv(netdev);
+ nic_dev->netdev = netdev;
+ nic_dev->hwdev = hwdev;
+ nic_dev->msg_enable = MSG_ENABLE_DEFAULT;
+ nic_dev->flags = 0;
+ nic_dev->txqs = NULL;
+ nic_dev->rxqs = NULL;
+ nic_dev->tx_weight = tx_weight;
+ nic_dev->rx_weight = rx_weight;
+ nic_dev->sq_depth = HINIC_SQ_DEPTH;
+ nic_dev->rq_depth = HINIC_RQ_DEPTH;
+ nic_dev->sriov_info.hwdev = hwdev;
+ nic_dev->sriov_info.pdev = pdev;
+ nic_dev->max_qps = num_qps;
+ nic_dev->devlink = devlink;
+
+ hinic_set_ethtool_ops(netdev);
+
+ sema_init(&nic_dev->mgmt_lock, 1);
+
+ nic_dev->vlan_bitmap = devm_bitmap_zalloc(&pdev->dev, VLAN_N_VID,
+ GFP_KERNEL);
+ if (!nic_dev->vlan_bitmap) {
+ err = -ENOMEM;
+ goto err_vlan_bitmap;
+ }
+
+ nic_dev->workq = create_singlethread_workqueue(HINIC_WQ_NAME);
+ if (!nic_dev->workq) {
+ err = -ENOMEM;
+ goto err_workq;
+ }
+
+ pci_set_drvdata(pdev, netdev);
+
+ err = hinic_port_get_mac(nic_dev, addr);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to get mac address\n");
+ goto err_get_mac;
+ }
+ eth_hw_addr_set(netdev, addr);
+
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) {
+ dev_err(&pdev->dev, "Invalid MAC address\n");
+ err = -EIO;
+ goto err_add_mac;
+ }
+
+ dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
+ netdev->dev_addr);
+ eth_hw_addr_random(netdev);
+ }
+
+ err = hinic_port_add_mac(nic_dev, netdev->dev_addr, 0);
+ if (err && err != HINIC_PF_SET_VF_ALREADY) {
+ dev_err(&pdev->dev, "Failed to add mac\n");
+ goto err_add_mac;
+ }
+
+ err = hinic_port_set_mtu(nic_dev, netdev->mtu);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to set mtu\n");
+ goto err_set_mtu;
+ }
+
+ rx_mode_work = &nic_dev->rx_mode_work;
+ INIT_WORK(&rx_mode_work->work, set_rx_mode);
+
+ netdev_features_init(netdev);
+
+ netif_carrier_off(netdev);
+
+ hinic_hwdev_cb_register(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_LINK_STATUS,
+ nic_dev, link_status_event_handler);
+ hinic_hwdev_cb_register(nic_dev->hwdev,
+ HINIC_MGMT_MSG_CMD_CABLE_PLUG_EVENT,
+ nic_dev, cable_plug_event);
+ hinic_hwdev_cb_register(nic_dev->hwdev,
+ HINIC_MGMT_MSG_CMD_LINK_ERR_EVENT,
+ nic_dev, link_err_event);
+
+ err = set_features(nic_dev, 0, nic_dev->netdev->features, true);
+ if (err)
+ goto err_set_features;
+
+ /* enable pause and disable pfc by default */
+ err = hinic_dcb_set_pfc(nic_dev->hwdev, 0, 0);
+ if (err)
+ goto err_set_pfc;
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ err = hinic_init_intr_coalesce(nic_dev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init_intr_coalesce\n");
+ goto err_init_intr;
+ }
+
+ hinic_dbg_init(nic_dev);
+
+ hinic_func_tbl_dbgfs_init(nic_dev);
+
+ err = hinic_func_table_debug_add(nic_dev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to add func_table debug\n");
+ goto err_add_func_table_dbg;
+ }
+
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register netdev\n");
+ goto err_reg_netdev;
+ }
+
+ return 0;
+
+err_reg_netdev:
+ hinic_func_table_debug_rem(nic_dev);
+err_add_func_table_dbg:
+ hinic_func_tbl_dbgfs_uninit(nic_dev);
+ hinic_dbg_uninit(nic_dev);
+ hinic_free_intr_coalesce(nic_dev);
+err_init_intr:
+err_set_pfc:
+err_set_features:
+ hinic_hwdev_cb_unregister(nic_dev->hwdev,
+ HINIC_MGMT_MSG_CMD_LINK_ERR_EVENT);
+ hinic_hwdev_cb_unregister(nic_dev->hwdev,
+ HINIC_MGMT_MSG_CMD_CABLE_PLUG_EVENT);
+ hinic_hwdev_cb_unregister(nic_dev->hwdev,
+ HINIC_MGMT_MSG_CMD_LINK_STATUS);
+ cancel_work_sync(&rx_mode_work->work);
+
+err_set_mtu:
+ hinic_port_del_mac(nic_dev, netdev->dev_addr, 0);
+err_add_mac:
+err_get_mac:
+ pci_set_drvdata(pdev, NULL);
+ destroy_workqueue(nic_dev->workq);
+err_workq:
+err_vlan_bitmap:
+ free_netdev(netdev);
+
+err_alloc_etherdev:
+err_num_qps:
+ hinic_free_hwdev(hwdev);
+ hinic_devlink_free(devlink);
+ return err;
+}
+
+static int hinic_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int err = pci_enable_device(pdev);
+
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "Failed to enable PCI device\n");
+
+ err = pci_request_regions(pdev, HINIC_DRV_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to request PCI regions\n");
+ goto err_pci_regions;
+ }
+
+ pci_set_master(pdev);
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "Failed to set DMA mask\n");
+ goto err_dma_mask;
+ }
+
+ err = nic_dev_init(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize NIC device\n");
+ goto err_nic_dev_init;
+ }
+
+ dev_info(&pdev->dev, "HiNIC driver - probed\n");
+ return 0;
+
+err_nic_dev_init:
+err_dma_mask:
+ pci_release_regions(pdev);
+
+err_pci_regions:
+ pci_disable_device(pdev);
+ return err;
+}
+
+static void wait_sriov_cfg_complete(struct hinic_dev *nic_dev)
+{
+ struct hinic_sriov_info *sriov_info = &nic_dev->sriov_info;
+ u32 loop_cnt = 0;
+
+ set_bit(HINIC_FUNC_REMOVE, &sriov_info->state);
+ usleep_range(9900, 10000);
+
+ while (loop_cnt < HINIC_WAIT_SRIOV_CFG_TIMEOUT) {
+ if (!test_bit(HINIC_SRIOV_ENABLE, &sriov_info->state) &&
+ !test_bit(HINIC_SRIOV_DISABLE, &sriov_info->state))
+ return;
+
+ usleep_range(9900, 10000);
+ loop_cnt++;
+ }
+}
+
+static void hinic_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct devlink *devlink = nic_dev->devlink;
+ struct hinic_rx_mode_work *rx_mode_work;
+
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) {
+ wait_sriov_cfg_complete(nic_dev);
+ hinic_pci_sriov_disable(pdev);
+ }
+
+ unregister_netdev(netdev);
+
+ hinic_func_table_debug_rem(nic_dev);
+
+ hinic_func_tbl_dbgfs_uninit(nic_dev);
+
+ hinic_dbg_uninit(nic_dev);
+
+ hinic_free_intr_coalesce(nic_dev);
+
+ hinic_port_del_mac(nic_dev, netdev->dev_addr, 0);
+
+ hinic_hwdev_cb_unregister(nic_dev->hwdev,
+ HINIC_MGMT_MSG_CMD_LINK_ERR_EVENT);
+ hinic_hwdev_cb_unregister(nic_dev->hwdev,
+ HINIC_MGMT_MSG_CMD_CABLE_PLUG_EVENT);
+ hinic_hwdev_cb_unregister(nic_dev->hwdev,
+ HINIC_MGMT_MSG_CMD_LINK_STATUS);
+
+ rx_mode_work = &nic_dev->rx_mode_work;
+ cancel_work_sync(&rx_mode_work->work);
+
+ pci_set_drvdata(pdev, NULL);
+
+ destroy_workqueue(nic_dev->workq);
+
+ hinic_free_hwdev(nic_dev->hwdev);
+
+ free_netdev(netdev);
+
+ hinic_devlink_free(devlink);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+
+ dev_info(&pdev->dev, "HiNIC driver - removed\n");
+}
+
+static void hinic_shutdown(struct pci_dev *pdev)
+{
+ pci_disable_device(pdev);
+}
+
+static const struct pci_device_id hinic_pci_table[] = {
+ { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_QUAD_PORT_25GE), 0},
+ { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_DUAL_PORT_100GE), 0},
+ { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_DUAL_PORT_100GE_MEZZ), 0},
+ { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_QUAD_PORT_25GE_MEZZ), 0},
+ { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_VF), 0},
+ { 0, 0}
+};
+MODULE_DEVICE_TABLE(pci, hinic_pci_table);
+
+static struct pci_driver hinic_driver = {
+ .name = HINIC_DRV_NAME,
+ .id_table = hinic_pci_table,
+ .probe = hinic_probe,
+ .remove = hinic_remove,
+ .shutdown = hinic_shutdown,
+ .sriov_configure = hinic_pci_sriov_configure,
+};
+
+static int __init hinic_module_init(void)
+{
+ int ret;
+
+ hinic_dbg_register_debugfs(HINIC_DRV_NAME);
+
+ ret = pci_register_driver(&hinic_driver);
+ if (ret)
+ hinic_dbg_unregister_debugfs();
+
+ return ret;
+}
+
+static void __exit hinic_module_exit(void)
+{
+ pci_unregister_driver(&hinic_driver);
+ hinic_dbg_unregister_debugfs();
+}
+
+module_init(hinic_module_init);
+module_exit(hinic_module_exit);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c
new file mode 100644
index 0000000000..f81a43d2cd
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_port.c
@@ -0,0 +1,1412 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+
+#include "hinic_hw_if.h"
+#include "hinic_hw_dev.h"
+#include "hinic_port.h"
+#include "hinic_dev.h"
+
+enum mac_op {
+ MAC_DEL,
+ MAC_SET,
+};
+
+/**
+ * change_mac - change(add or delete) mac address
+ * @nic_dev: nic device
+ * @addr: mac address
+ * @vlan_id: vlan number to set with the mac
+ * @op: add or delete the mac
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static int change_mac(struct hinic_dev *nic_dev, const u8 *addr,
+ u16 vlan_id, enum mac_op op)
+{
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_port_mac_cmd port_mac_cmd;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ u16 out_size = sizeof(port_mac_cmd);
+ struct pci_dev *pdev = hwif->pdev;
+ enum hinic_port_cmd cmd;
+ int err;
+
+ if (op == MAC_SET)
+ cmd = HINIC_PORT_CMD_SET_MAC;
+ else
+ cmd = HINIC_PORT_CMD_DEL_MAC;
+
+ port_mac_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
+ port_mac_cmd.vlan_id = vlan_id;
+ memcpy(port_mac_cmd.mac, addr, ETH_ALEN);
+
+ err = hinic_port_msg_cmd(hwdev, cmd, &port_mac_cmd,
+ sizeof(port_mac_cmd),
+ &port_mac_cmd, &out_size);
+ if (err || out_size != sizeof(port_mac_cmd) ||
+ (port_mac_cmd.status &&
+ (port_mac_cmd.status != HINIC_PF_SET_VF_ALREADY || !HINIC_IS_VF(hwif)) &&
+ port_mac_cmd.status != HINIC_MGMT_STATUS_EXIST)) {
+ dev_err(&pdev->dev, "Failed to change MAC, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, port_mac_cmd.status, out_size);
+ return -EFAULT;
+ }
+
+ if (port_mac_cmd.status == HINIC_PF_SET_VF_ALREADY) {
+ dev_warn(&pdev->dev, "PF has already set VF mac, ignore %s operation\n",
+ (op == MAC_SET) ? "set" : "del");
+ return HINIC_PF_SET_VF_ALREADY;
+ }
+
+ if (cmd == HINIC_PORT_CMD_SET_MAC && port_mac_cmd.status ==
+ HINIC_MGMT_STATUS_EXIST)
+ dev_warn(&pdev->dev, "MAC is repeated, ignore set operation\n");
+
+ return 0;
+}
+
+/**
+ * hinic_port_add_mac - add mac address
+ * @nic_dev: nic device
+ * @addr: mac address
+ * @vlan_id: vlan number to set with the mac
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+int hinic_port_add_mac(struct hinic_dev *nic_dev,
+ const u8 *addr, u16 vlan_id)
+{
+ return change_mac(nic_dev, addr, vlan_id, MAC_SET);
+}
+
+/**
+ * hinic_port_del_mac - remove mac address
+ * @nic_dev: nic device
+ * @addr: mac address
+ * @vlan_id: vlan number that is connected to the mac
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+int hinic_port_del_mac(struct hinic_dev *nic_dev, const u8 *addr,
+ u16 vlan_id)
+{
+ return change_mac(nic_dev, addr, vlan_id, MAC_DEL);
+}
+
+/**
+ * hinic_port_get_mac - get the mac address of the nic device
+ * @nic_dev: nic device
+ * @addr: returned mac address
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+int hinic_port_get_mac(struct hinic_dev *nic_dev, u8 *addr)
+{
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_port_mac_cmd port_mac_cmd;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ u16 out_size = sizeof(port_mac_cmd);
+ struct pci_dev *pdev = hwif->pdev;
+ int err;
+
+ port_mac_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_MAC,
+ &port_mac_cmd, sizeof(port_mac_cmd),
+ &port_mac_cmd, &out_size);
+ if (err || out_size != sizeof(port_mac_cmd) || port_mac_cmd.status) {
+ dev_err(&pdev->dev, "Failed to get mac, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, port_mac_cmd.status, out_size);
+ return -EFAULT;
+ }
+
+ memcpy(addr, port_mac_cmd.mac, ETH_ALEN);
+ return 0;
+}
+
+/**
+ * hinic_port_set_mtu - set mtu
+ * @nic_dev: nic device
+ * @new_mtu: new mtu
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+int hinic_port_set_mtu(struct hinic_dev *nic_dev, int new_mtu)
+{
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_port_mtu_cmd port_mtu_cmd;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ u16 out_size = sizeof(port_mtu_cmd);
+ struct pci_dev *pdev = hwif->pdev;
+ int err;
+
+ port_mtu_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
+ port_mtu_cmd.mtu = new_mtu;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_CHANGE_MTU,
+ &port_mtu_cmd, sizeof(port_mtu_cmd),
+ &port_mtu_cmd, &out_size);
+ if (err || out_size != sizeof(port_mtu_cmd) || port_mtu_cmd.status) {
+ dev_err(&pdev->dev, "Failed to set mtu, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, port_mtu_cmd.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * hinic_port_add_vlan - add vlan to the nic device
+ * @nic_dev: nic device
+ * @vlan_id: the vlan number to add
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+int hinic_port_add_vlan(struct hinic_dev *nic_dev, u16 vlan_id)
+{
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_port_vlan_cmd port_vlan_cmd;
+
+ port_vlan_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
+ port_vlan_cmd.vlan_id = vlan_id;
+
+ return hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_ADD_VLAN,
+ &port_vlan_cmd, sizeof(port_vlan_cmd),
+ NULL, NULL);
+}
+
+/**
+ * hinic_port_del_vlan - delete vlan from the nic device
+ * @nic_dev: nic device
+ * @vlan_id: the vlan number to delete
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+int hinic_port_del_vlan(struct hinic_dev *nic_dev, u16 vlan_id)
+{
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_port_vlan_cmd port_vlan_cmd;
+
+ port_vlan_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
+ port_vlan_cmd.vlan_id = vlan_id;
+
+ return hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_DEL_VLAN,
+ &port_vlan_cmd, sizeof(port_vlan_cmd),
+ NULL, NULL);
+}
+
+/**
+ * hinic_port_set_rx_mode - set rx mode in the nic device
+ * @nic_dev: nic device
+ * @rx_mode: the rx mode to set
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+int hinic_port_set_rx_mode(struct hinic_dev *nic_dev, u32 rx_mode)
+{
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_port_rx_mode_cmd rx_mode_cmd;
+
+ rx_mode_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
+ rx_mode_cmd.rx_mode = rx_mode;
+
+ return hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_RX_MODE,
+ &rx_mode_cmd, sizeof(rx_mode_cmd),
+ NULL, NULL);
+}
+
+/**
+ * hinic_port_link_state - get the link state
+ * @nic_dev: nic device
+ * @link_state: the returned link state
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+int hinic_port_link_state(struct hinic_dev *nic_dev,
+ enum hinic_port_link_state *link_state)
+{
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct hinic_port_link_cmd link_cmd;
+ struct pci_dev *pdev = hwif->pdev;
+ u16 out_size = sizeof(link_cmd);
+ int err;
+
+ link_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_LINK_STATE,
+ &link_cmd, sizeof(link_cmd),
+ &link_cmd, &out_size);
+ if (err || out_size != sizeof(link_cmd) || link_cmd.status) {
+ dev_err(&pdev->dev, "Failed to get link state, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, link_cmd.status, out_size);
+ return -EINVAL;
+ }
+
+ *link_state = link_cmd.state;
+ return 0;
+}
+
+/**
+ * hinic_port_set_state - set port state
+ * @nic_dev: nic device
+ * @state: the state to set
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+int hinic_port_set_state(struct hinic_dev *nic_dev, enum hinic_port_state state)
+{
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_port_state_cmd port_state;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ u16 out_size = sizeof(port_state);
+ int err;
+
+ if (HINIC_IS_VF(hwdev->hwif))
+ return 0;
+
+ port_state.state = state;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_PORT_STATE,
+ &port_state, sizeof(port_state),
+ &port_state, &out_size);
+ if (err || out_size != sizeof(port_state) || port_state.status) {
+ dev_err(&pdev->dev, "Failed to set port state, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, port_state.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * hinic_port_set_func_state- set func device state
+ * @nic_dev: nic device
+ * @state: the state to set
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+int hinic_port_set_func_state(struct hinic_dev *nic_dev,
+ enum hinic_func_port_state state)
+{
+ struct hinic_port_func_state_cmd func_state;
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ u16 out_size = sizeof(func_state);
+ int err;
+
+ func_state.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
+ func_state.state = state;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_FUNC_STATE,
+ &func_state, sizeof(func_state),
+ &func_state, &out_size);
+ if (err || out_size != sizeof(func_state) || func_state.status) {
+ dev_err(&pdev->dev, "Failed to set port func state, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, func_state.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * hinic_port_get_cap - get port capabilities
+ * @nic_dev: nic device
+ * @port_cap: returned port capabilities
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+int hinic_port_get_cap(struct hinic_dev *nic_dev,
+ struct hinic_port_cap *port_cap)
+{
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ u16 out_size = sizeof(*port_cap);
+ int err;
+
+ port_cap->func_idx = HINIC_HWIF_FUNC_IDX(hwif);
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_CAP,
+ port_cap, sizeof(*port_cap),
+ port_cap, &out_size);
+ if (err || out_size != sizeof(*port_cap) || port_cap->status) {
+ dev_err(&pdev->dev,
+ "Failed to get port capabilities, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, port_cap->status, out_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * hinic_port_set_tso - set port tso configuration
+ * @nic_dev: nic device
+ * @state: the tso state to set
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+int hinic_port_set_tso(struct hinic_dev *nic_dev, enum hinic_tso_state state)
+{
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct hinic_tso_config tso_cfg = {0};
+ struct pci_dev *pdev = hwif->pdev;
+ u16 out_size = sizeof(tso_cfg);
+ int err;
+
+ tso_cfg.func_id = HINIC_HWIF_FUNC_IDX(hwif);
+ tso_cfg.tso_en = state;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_TSO,
+ &tso_cfg, sizeof(tso_cfg),
+ &tso_cfg, &out_size);
+ if (err || out_size != sizeof(tso_cfg) || tso_cfg.status) {
+ dev_err(&pdev->dev,
+ "Failed to set port tso, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, tso_cfg.status, out_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hinic_set_rx_csum_offload(struct hinic_dev *nic_dev, u32 en)
+{
+ struct hinic_checksum_offload rx_csum_cfg = {0};
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ u16 out_size = sizeof(rx_csum_cfg);
+ struct hinic_hwif *hwif;
+ struct pci_dev *pdev;
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ hwif = hwdev->hwif;
+ pdev = hwif->pdev;
+ rx_csum_cfg.func_id = HINIC_HWIF_FUNC_IDX(hwif);
+ rx_csum_cfg.rx_csum_offload = en;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_RX_CSUM,
+ &rx_csum_cfg, sizeof(rx_csum_cfg),
+ &rx_csum_cfg, &out_size);
+ if (err || !out_size || rx_csum_cfg.status) {
+ dev_err(&pdev->dev,
+ "Failed to set rx csum offload, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, rx_csum_cfg.status, out_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hinic_set_rx_vlan_offload(struct hinic_dev *nic_dev, u8 en)
+{
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_vlan_cfg vlan_cfg;
+ struct hinic_hwif *hwif;
+ struct pci_dev *pdev;
+ u16 out_size;
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ out_size = sizeof(vlan_cfg);
+ hwif = hwdev->hwif;
+ pdev = hwif->pdev;
+ vlan_cfg.func_id = HINIC_HWIF_FUNC_IDX(hwif);
+ vlan_cfg.vlan_rx_offload = en;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_RX_VLAN_OFFLOAD,
+ &vlan_cfg, sizeof(vlan_cfg),
+ &vlan_cfg, &out_size);
+ if (err || !out_size || vlan_cfg.status) {
+ dev_err(&pdev->dev,
+ "Failed to set rx vlan offload, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, vlan_cfg.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_set_vlan_fliter(struct hinic_dev *nic_dev, u32 en)
+{
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ struct hinic_vlan_filter vlan_filter;
+ u16 out_size = sizeof(vlan_filter);
+ int err;
+
+ vlan_filter.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
+ vlan_filter.enable = en;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_VLAN_FILTER,
+ &vlan_filter, sizeof(vlan_filter),
+ &vlan_filter, &out_size);
+ if (vlan_filter.status == HINIC_MGMT_CMD_UNSUPPORTED) {
+ err = HINIC_MGMT_CMD_UNSUPPORTED;
+ } else if ((err == HINIC_MBOX_VF_CMD_ERROR) &&
+ HINIC_IS_VF(hwif)) {
+ err = HINIC_MGMT_CMD_UNSUPPORTED;
+ } else if (err || !out_size || vlan_filter.status) {
+ dev_err(&pdev->dev,
+ "Failed to set vlan fliter, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, vlan_filter.status, out_size);
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+int hinic_set_max_qnum(struct hinic_dev *nic_dev, u8 num_rqs)
+{
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct hinic_rq_num rq_num = { 0 };
+ struct pci_dev *pdev = hwif->pdev;
+ u16 out_size = sizeof(rq_num);
+ int err;
+
+ rq_num.func_id = HINIC_HWIF_FUNC_IDX(hwif);
+ rq_num.num_rqs = num_rqs;
+ rq_num.rq_depth = ilog2(nic_dev->rq_depth);
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_RQ_IQ_MAP,
+ &rq_num, sizeof(rq_num),
+ &rq_num, &out_size);
+ if (err || !out_size || rq_num.status) {
+ dev_err(&pdev->dev,
+ "Failed to set rxq number, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, rq_num.status, out_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int hinic_set_rx_lro(struct hinic_dev *nic_dev, u8 ipv4_en, u8 ipv6_en,
+ u8 max_wqe_num)
+{
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_lro_config lro_cfg = { 0 };
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ u16 out_size = sizeof(lro_cfg);
+ int err;
+
+ lro_cfg.func_id = HINIC_HWIF_FUNC_IDX(hwif);
+ lro_cfg.lro_ipv4_en = ipv4_en;
+ lro_cfg.lro_ipv6_en = ipv6_en;
+ lro_cfg.lro_max_wqe_num = max_wqe_num;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_LRO,
+ &lro_cfg, sizeof(lro_cfg),
+ &lro_cfg, &out_size);
+ if (err || !out_size || lro_cfg.status) {
+ dev_err(&pdev->dev,
+ "Failed to set lro offload, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, lro_cfg.status, out_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int hinic_set_rx_lro_timer(struct hinic_dev *nic_dev, u32 timer_value)
+{
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_lro_timer lro_timer = { 0 };
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ u16 out_size = sizeof(lro_timer);
+ int err;
+
+ lro_timer.status = 0;
+ lro_timer.type = 0;
+ lro_timer.enable = 1;
+ lro_timer.timer = timer_value;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_LRO_TIMER,
+ &lro_timer, sizeof(lro_timer),
+ &lro_timer, &out_size);
+ if (lro_timer.status == 0xFF) {
+ /* For this case, we think status (0xFF) is OK */
+ lro_timer.status = 0;
+ dev_dbg(&pdev->dev,
+ "Set lro timer not supported by the current FW version, it will be 1ms default\n");
+ }
+
+ if (err || !out_size || lro_timer.status) {
+ dev_err(&pdev->dev,
+ "Failed to set lro timer, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, lro_timer.status, out_size);
+
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hinic_set_rx_lro_state(struct hinic_dev *nic_dev, u8 lro_en,
+ u32 lro_timer, u32 wqe_num)
+{
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ u8 ipv4_en;
+ u8 ipv6_en;
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ ipv4_en = lro_en ? 1 : 0;
+ ipv6_en = lro_en ? 1 : 0;
+
+ err = hinic_set_rx_lro(nic_dev, ipv4_en, ipv6_en, (u8)wqe_num);
+ if (err)
+ return err;
+
+ if (HINIC_IS_VF(nic_dev->hwdev->hwif))
+ return 0;
+
+ err = hinic_set_rx_lro_timer(nic_dev, lro_timer);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int hinic_rss_set_indir_tbl(struct hinic_dev *nic_dev, u32 tmpl_idx,
+ const u32 *indir_table)
+{
+ struct hinic_rss_indirect_tbl *indir_tbl;
+ struct hinic_func_to_io *func_to_io;
+ struct hinic_cmdq_buf cmd_buf;
+ struct hinic_hwdev *hwdev;
+ struct hinic_hwif *hwif;
+ struct pci_dev *pdev;
+ u32 indir_size;
+ u64 out_param;
+ int err, i;
+ u32 *temp;
+
+ hwdev = nic_dev->hwdev;
+ func_to_io = &hwdev->func_to_io;
+ hwif = hwdev->hwif;
+ pdev = hwif->pdev;
+
+ err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmd_buf);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to allocate cmdq buf\n");
+ return err;
+ }
+
+ cmd_buf.size = sizeof(*indir_tbl);
+
+ indir_tbl = cmd_buf.buf;
+ indir_tbl->group_index = cpu_to_be32(tmpl_idx);
+
+ for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++) {
+ indir_tbl->entry[i] = indir_table[i];
+
+ if (0x3 == (i & 0x3)) {
+ temp = (u32 *)&indir_tbl->entry[i - 3];
+ *temp = cpu_to_be32(*temp);
+ }
+ }
+
+ /* cfg the rss indirect table by command queue */
+ indir_size = HINIC_RSS_INDIR_SIZE / 2;
+ indir_tbl->offset = 0;
+ indir_tbl->size = cpu_to_be32(indir_size);
+
+ err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC,
+ HINIC_UCODE_CMD_SET_RSS_INDIR_TABLE,
+ &cmd_buf, &out_param);
+ if (err || out_param != 0) {
+ dev_err(&pdev->dev, "Failed to set rss indir table\n");
+ err = -EFAULT;
+ goto free_buf;
+ }
+
+ indir_tbl->offset = cpu_to_be32(indir_size);
+ indir_tbl->size = cpu_to_be32(indir_size);
+ memcpy(&indir_tbl->entry[0], &indir_tbl->entry[indir_size], indir_size);
+
+ err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC,
+ HINIC_UCODE_CMD_SET_RSS_INDIR_TABLE,
+ &cmd_buf, &out_param);
+ if (err || out_param != 0) {
+ dev_err(&pdev->dev, "Failed to set rss indir table\n");
+ err = -EFAULT;
+ }
+
+free_buf:
+ hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmd_buf);
+
+ return err;
+}
+
+int hinic_rss_get_indir_tbl(struct hinic_dev *nic_dev, u32 tmpl_idx,
+ u32 *indir_table)
+{
+ struct hinic_rss_indir_table rss_cfg = { 0 };
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ u16 out_size = sizeof(rss_cfg);
+ int err = 0, i;
+
+ rss_cfg.func_id = HINIC_HWIF_FUNC_IDX(hwif);
+ rss_cfg.template_id = tmpl_idx;
+
+ err = hinic_port_msg_cmd(hwdev,
+ HINIC_PORT_CMD_GET_RSS_TEMPLATE_INDIR_TBL,
+ &rss_cfg, sizeof(rss_cfg), &rss_cfg,
+ &out_size);
+ if (err || !out_size || rss_cfg.status) {
+ dev_err(&pdev->dev, "Failed to get indir table, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, rss_cfg.status, out_size);
+ return -EINVAL;
+ }
+
+ hinic_be32_to_cpu(rss_cfg.indir, HINIC_RSS_INDIR_SIZE);
+ for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++)
+ indir_table[i] = rss_cfg.indir[i];
+
+ return 0;
+}
+
+int hinic_set_rss_type(struct hinic_dev *nic_dev, u32 tmpl_idx,
+ struct hinic_rss_type rss_type)
+{
+ struct hinic_rss_context_tbl *ctx_tbl;
+ struct hinic_func_to_io *func_to_io;
+ struct hinic_cmdq_buf cmd_buf;
+ struct hinic_hwdev *hwdev;
+ struct hinic_hwif *hwif;
+ struct pci_dev *pdev;
+ u64 out_param;
+ u32 ctx = 0;
+ int err;
+
+ hwdev = nic_dev->hwdev;
+ func_to_io = &hwdev->func_to_io;
+ hwif = hwdev->hwif;
+ pdev = hwif->pdev;
+
+ err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmd_buf);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to allocate cmd buf\n");
+ return -ENOMEM;
+ }
+
+ ctx |= HINIC_RSS_TYPE_SET(1, VALID) |
+ HINIC_RSS_TYPE_SET(rss_type.ipv4, IPV4) |
+ HINIC_RSS_TYPE_SET(rss_type.ipv6, IPV6) |
+ HINIC_RSS_TYPE_SET(rss_type.ipv6_ext, IPV6_EXT) |
+ HINIC_RSS_TYPE_SET(rss_type.tcp_ipv4, TCP_IPV4) |
+ HINIC_RSS_TYPE_SET(rss_type.tcp_ipv6, TCP_IPV6) |
+ HINIC_RSS_TYPE_SET(rss_type.tcp_ipv6_ext, TCP_IPV6_EXT) |
+ HINIC_RSS_TYPE_SET(rss_type.udp_ipv4, UDP_IPV4) |
+ HINIC_RSS_TYPE_SET(rss_type.udp_ipv6, UDP_IPV6);
+
+ cmd_buf.size = sizeof(struct hinic_rss_context_tbl);
+
+ ctx_tbl = (struct hinic_rss_context_tbl *)cmd_buf.buf;
+ ctx_tbl->group_index = cpu_to_be32(tmpl_idx);
+ ctx_tbl->offset = 0;
+ ctx_tbl->size = sizeof(u32);
+ ctx_tbl->size = cpu_to_be32(ctx_tbl->size);
+ ctx_tbl->rsvd = 0;
+ ctx_tbl->ctx = cpu_to_be32(ctx);
+
+ /* cfg the rss context table by command queue */
+ err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC,
+ HINIC_UCODE_CMD_SET_RSS_CONTEXT_TABLE,
+ &cmd_buf, &out_param);
+
+ hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmd_buf);
+
+ if (err || out_param != 0) {
+ dev_err(&pdev->dev, "Failed to set rss context table, err: %d\n",
+ err);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic_get_rss_type(struct hinic_dev *nic_dev, u32 tmpl_idx,
+ struct hinic_rss_type *rss_type)
+{
+ struct hinic_rss_context_table ctx_tbl = { 0 };
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ u16 out_size = sizeof(ctx_tbl);
+ struct hinic_hwif *hwif;
+ struct pci_dev *pdev;
+ int err;
+
+ if (!hwdev || !rss_type)
+ return -EINVAL;
+
+ hwif = hwdev->hwif;
+ pdev = hwif->pdev;
+
+ ctx_tbl.func_id = HINIC_HWIF_FUNC_IDX(hwif);
+ ctx_tbl.template_id = tmpl_idx;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_RSS_CTX_TBL,
+ &ctx_tbl, sizeof(ctx_tbl),
+ &ctx_tbl, &out_size);
+ if (err || !out_size || ctx_tbl.status) {
+ dev_err(&pdev->dev, "Failed to get hash type, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, ctx_tbl.status, out_size);
+ return -EINVAL;
+ }
+
+ rss_type->ipv4 = HINIC_RSS_TYPE_GET(ctx_tbl.context, IPV4);
+ rss_type->ipv6 = HINIC_RSS_TYPE_GET(ctx_tbl.context, IPV6);
+ rss_type->ipv6_ext = HINIC_RSS_TYPE_GET(ctx_tbl.context, IPV6_EXT);
+ rss_type->tcp_ipv4 = HINIC_RSS_TYPE_GET(ctx_tbl.context, TCP_IPV4);
+ rss_type->tcp_ipv6 = HINIC_RSS_TYPE_GET(ctx_tbl.context, TCP_IPV6);
+ rss_type->tcp_ipv6_ext = HINIC_RSS_TYPE_GET(ctx_tbl.context,
+ TCP_IPV6_EXT);
+ rss_type->udp_ipv4 = HINIC_RSS_TYPE_GET(ctx_tbl.context, UDP_IPV4);
+ rss_type->udp_ipv6 = HINIC_RSS_TYPE_GET(ctx_tbl.context, UDP_IPV6);
+
+ return 0;
+}
+
+int hinic_rss_set_template_tbl(struct hinic_dev *nic_dev, u32 template_id,
+ const u8 *temp)
+{
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct hinic_rss_key rss_key = { 0 };
+ struct pci_dev *pdev = hwif->pdev;
+ u16 out_size = sizeof(rss_key);
+ int err;
+
+ rss_key.func_id = HINIC_HWIF_FUNC_IDX(hwif);
+ rss_key.template_id = template_id;
+ memcpy(rss_key.key, temp, HINIC_RSS_KEY_SIZE);
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_RSS_TEMPLATE_TBL,
+ &rss_key, sizeof(rss_key),
+ &rss_key, &out_size);
+ if (err || !out_size || rss_key.status) {
+ dev_err(&pdev->dev,
+ "Failed to set rss hash key, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, rss_key.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_rss_get_template_tbl(struct hinic_dev *nic_dev, u32 tmpl_idx,
+ u8 *temp)
+{
+ struct hinic_rss_template_key temp_key = { 0 };
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ u16 out_size = sizeof(temp_key);
+ struct hinic_hwif *hwif;
+ struct pci_dev *pdev;
+ int err;
+
+ if (!hwdev || !temp)
+ return -EINVAL;
+
+ hwif = hwdev->hwif;
+ pdev = hwif->pdev;
+
+ temp_key.func_id = HINIC_HWIF_FUNC_IDX(hwif);
+ temp_key.template_id = tmpl_idx;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_RSS_TEMPLATE_TBL,
+ &temp_key, sizeof(temp_key),
+ &temp_key, &out_size);
+ if (err || !out_size || temp_key.status) {
+ dev_err(&pdev->dev, "Failed to set hash key, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, temp_key.status, out_size);
+ return -EINVAL;
+ }
+
+ memcpy(temp, temp_key.key, HINIC_RSS_KEY_SIZE);
+
+ return 0;
+}
+
+int hinic_rss_set_hash_engine(struct hinic_dev *nic_dev, u8 template_id,
+ u8 type)
+{
+ struct hinic_rss_engine_type rss_engine = { 0 };
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ u16 out_size = sizeof(rss_engine);
+ int err;
+
+ rss_engine.func_id = HINIC_HWIF_FUNC_IDX(hwif);
+ rss_engine.hash_engine = type;
+ rss_engine.template_id = template_id;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_RSS_HASH_ENGINE,
+ &rss_engine, sizeof(rss_engine),
+ &rss_engine, &out_size);
+ if (err || !out_size || rss_engine.status) {
+ dev_err(&pdev->dev,
+ "Failed to set hash engine, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, rss_engine.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_rss_get_hash_engine(struct hinic_dev *nic_dev, u8 tmpl_idx, u8 *type)
+{
+ struct hinic_rss_engine_type hash_type = { 0 };
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ u16 out_size = sizeof(hash_type);
+ struct hinic_hwif *hwif;
+ struct pci_dev *pdev;
+ int err;
+
+ if (!hwdev || !type)
+ return -EINVAL;
+
+ hwif = hwdev->hwif;
+ pdev = hwif->pdev;
+
+ hash_type.func_id = HINIC_HWIF_FUNC_IDX(hwif);
+ hash_type.template_id = tmpl_idx;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_RSS_HASH_ENGINE,
+ &hash_type, sizeof(hash_type),
+ &hash_type, &out_size);
+ if (err || !out_size || hash_type.status) {
+ dev_err(&pdev->dev, "Failed to get hash engine, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, hash_type.status, out_size);
+ return -EINVAL;
+ }
+
+ *type = hash_type.hash_engine;
+ return 0;
+}
+
+int hinic_rss_cfg(struct hinic_dev *nic_dev, u8 rss_en, u8 template_id)
+{
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_rss_config rss_cfg = { 0 };
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ u16 out_size = sizeof(rss_cfg);
+ int err;
+
+ rss_cfg.func_id = HINIC_HWIF_FUNC_IDX(hwif);
+ rss_cfg.rss_en = rss_en;
+ rss_cfg.template_id = template_id;
+ rss_cfg.rq_priority_number = 0;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_RSS_CFG,
+ &rss_cfg, sizeof(rss_cfg),
+ &rss_cfg, &out_size);
+ if (err || !out_size || rss_cfg.status) {
+ dev_err(&pdev->dev,
+ "Failed to set rss cfg, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, rss_cfg.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_rss_template_alloc(struct hinic_dev *nic_dev, u8 *tmpl_idx)
+{
+ struct hinic_rss_template_mgmt template_mgmt = { 0 };
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ u16 out_size = sizeof(template_mgmt);
+ struct pci_dev *pdev = hwif->pdev;
+ int err;
+
+ template_mgmt.func_id = HINIC_HWIF_FUNC_IDX(hwif);
+ template_mgmt.cmd = NIC_RSS_CMD_TEMP_ALLOC;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_RSS_TEMP_MGR,
+ &template_mgmt, sizeof(template_mgmt),
+ &template_mgmt, &out_size);
+ if (err || !out_size || template_mgmt.status) {
+ dev_err(&pdev->dev, "Failed to alloc rss template, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, template_mgmt.status, out_size);
+ return -EINVAL;
+ }
+
+ *tmpl_idx = template_mgmt.template_id;
+
+ return 0;
+}
+
+int hinic_rss_template_free(struct hinic_dev *nic_dev, u8 tmpl_idx)
+{
+ struct hinic_rss_template_mgmt template_mgmt = { 0 };
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ u16 out_size = sizeof(template_mgmt);
+ struct pci_dev *pdev = hwif->pdev;
+ int err;
+
+ template_mgmt.func_id = HINIC_HWIF_FUNC_IDX(hwif);
+ template_mgmt.template_id = tmpl_idx;
+ template_mgmt.cmd = NIC_RSS_CMD_TEMP_FREE;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_RSS_TEMP_MGR,
+ &template_mgmt, sizeof(template_mgmt),
+ &template_mgmt, &out_size);
+ if (err || !out_size || template_mgmt.status) {
+ dev_err(&pdev->dev, "Failed to free rss template, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, template_mgmt.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_get_vport_stats(struct hinic_dev *nic_dev,
+ struct hinic_vport_stats *stats)
+{
+ struct hinic_cmd_vport_stats vport_stats = { 0 };
+ struct hinic_port_stats_info stats_info = { 0 };
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ u16 out_size = sizeof(vport_stats);
+ struct pci_dev *pdev = hwif->pdev;
+ int err;
+
+ stats_info.stats_version = HINIC_PORT_STATS_VERSION;
+ stats_info.func_id = HINIC_HWIF_FUNC_IDX(hwif);
+ stats_info.stats_size = sizeof(vport_stats);
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_VPORT_STAT,
+ &stats_info, sizeof(stats_info),
+ &vport_stats, &out_size);
+ if (err || !out_size || vport_stats.status) {
+ dev_err(&pdev->dev,
+ "Failed to get function statistics, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, vport_stats.status, out_size);
+ return -EFAULT;
+ }
+
+ memcpy(stats, &vport_stats.stats, sizeof(*stats));
+ return 0;
+}
+
+int hinic_get_phy_port_stats(struct hinic_dev *nic_dev,
+ struct hinic_phy_port_stats *stats)
+{
+ struct hinic_port_stats_info stats_info = { 0 };
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct hinic_port_stats *port_stats;
+ u16 out_size = sizeof(*port_stats);
+ struct pci_dev *pdev = hwif->pdev;
+ int err;
+
+ port_stats = kzalloc(sizeof(*port_stats), GFP_KERNEL);
+ if (!port_stats)
+ return -ENOMEM;
+
+ stats_info.stats_version = HINIC_PORT_STATS_VERSION;
+ stats_info.stats_size = sizeof(*port_stats);
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_PORT_STATISTICS,
+ &stats_info, sizeof(stats_info),
+ port_stats, &out_size);
+ if (err || !out_size || port_stats->status) {
+ dev_err(&pdev->dev,
+ "Failed to get port statistics, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, port_stats->status, out_size);
+ err = -EINVAL;
+ goto out;
+ }
+
+ memcpy(stats, &port_stats->stats, sizeof(*stats));
+
+out:
+ kfree(port_stats);
+
+ return err;
+}
+
+int hinic_get_mgmt_version(struct hinic_dev *nic_dev, u8 *mgmt_ver)
+{
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_version_info up_ver = {0};
+ u16 out_size = sizeof(up_ver);
+ struct hinic_hwif *hwif;
+ struct pci_dev *pdev;
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ hwif = hwdev->hwif;
+ pdev = hwif->pdev;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_MGMT_VERSION,
+ &up_ver, sizeof(up_ver), &up_ver,
+ &out_size);
+ if (err || !out_size || up_ver.status) {
+ dev_err(&pdev->dev,
+ "Failed to get mgmt version, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, up_ver.status, out_size);
+ return -EINVAL;
+ }
+
+ snprintf(mgmt_ver, HINIC_MGMT_VERSION_MAX_LEN, "%s", up_ver.ver);
+
+ return 0;
+}
+
+int hinic_get_link_mode(struct hinic_hwdev *hwdev,
+ struct hinic_link_mode_cmd *link_mode)
+{
+ u16 out_size;
+ int err;
+
+ if (!hwdev || !link_mode)
+ return -EINVAL;
+
+ link_mode->func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
+ out_size = sizeof(*link_mode);
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_LINK_MODE,
+ link_mode, sizeof(*link_mode),
+ link_mode, &out_size);
+ if (err || !out_size || link_mode->status) {
+ dev_err(&hwdev->hwif->pdev->dev,
+ "Failed to get link mode, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, link_mode->status, out_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hinic_set_autoneg(struct hinic_hwdev *hwdev, bool enable)
+{
+ struct hinic_set_autoneg_cmd autoneg = {0};
+ u16 out_size = sizeof(autoneg);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ autoneg.func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
+ autoneg.enable = enable;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_AUTONEG,
+ &autoneg, sizeof(autoneg),
+ &autoneg, &out_size);
+ if (err || !out_size || autoneg.status) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to %s autoneg, err: %d, status: 0x%x, out size: 0x%x\n",
+ enable ? "enable" : "disable", err, autoneg.status,
+ out_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hinic_set_speed(struct hinic_hwdev *hwdev, enum nic_speed_level speed)
+{
+ struct hinic_speed_cmd speed_info = {0};
+ u16 out_size = sizeof(speed_info);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ speed_info.func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
+ speed_info.speed = speed;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_SPEED,
+ &speed_info, sizeof(speed_info),
+ &speed_info, &out_size);
+ if (err || !out_size || speed_info.status) {
+ dev_err(&hwdev->hwif->pdev->dev,
+ "Failed to set speed, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, speed_info.status, out_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hinic_set_link_settings(struct hinic_hwdev *hwdev,
+ struct hinic_link_ksettings_info *info)
+{
+ u16 out_size = sizeof(*info);
+ int err;
+
+ err = hinic_hilink_msg_cmd(hwdev, HINIC_HILINK_CMD_SET_LINK_SETTINGS,
+ info, sizeof(*info), info, &out_size);
+ if ((info->status != HINIC_MGMT_CMD_UNSUPPORTED &&
+ info->status) || err || !out_size) {
+ dev_err(&hwdev->hwif->pdev->dev,
+ "Failed to set link settings, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, info->status, out_size);
+ return -EFAULT;
+ }
+
+ return info->status;
+}
+
+int hinic_get_hw_pause_info(struct hinic_hwdev *hwdev,
+ struct hinic_pause_config *pause_info)
+{
+ u16 out_size = sizeof(*pause_info);
+ int err;
+
+ pause_info->func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_PAUSE_INFO,
+ pause_info, sizeof(*pause_info),
+ pause_info, &out_size);
+ if (err || !out_size || pause_info->status) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to get pause info, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, pause_info->status, out_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hinic_set_hw_pause_info(struct hinic_hwdev *hwdev,
+ struct hinic_pause_config *pause_info)
+{
+ u16 out_size = sizeof(*pause_info);
+ int err;
+
+ pause_info->func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_PAUSE_INFO,
+ pause_info, sizeof(*pause_info),
+ pause_info, &out_size);
+ if (err || !out_size || pause_info->status) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to set pause info, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, pause_info->status, out_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hinic_dcb_set_pfc(struct hinic_hwdev *hwdev, u8 pfc_en, u8 pfc_bitmap)
+{
+ struct hinic_nic_cfg *nic_cfg = &hwdev->func_to_io.nic_cfg;
+ struct hinic_set_pfc pfc = {0};
+ u16 out_size = sizeof(pfc);
+ int err;
+
+ if (HINIC_IS_VF(hwdev->hwif))
+ return 0;
+
+ mutex_lock(&nic_cfg->cfg_mutex);
+
+ pfc.func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
+ pfc.pfc_bitmap = pfc_bitmap;
+ pfc.pfc_en = pfc_en;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_PFC,
+ &pfc, sizeof(pfc), &pfc, &out_size);
+ if (err || pfc.status || !out_size) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to %s pfc, err: %d, status: 0x%x, out size: 0x%x\n",
+ pfc_en ? "enable" : "disable", err, pfc.status,
+ out_size);
+ mutex_unlock(&nic_cfg->cfg_mutex);
+ return -EIO;
+ }
+
+ /* pause settings is opposite from pfc */
+ nic_cfg->rx_pause = pfc_en ? 0 : 1;
+ nic_cfg->tx_pause = pfc_en ? 0 : 1;
+
+ mutex_unlock(&nic_cfg->cfg_mutex);
+
+ return 0;
+}
+
+int hinic_set_loopback_mode(struct hinic_hwdev *hwdev, u32 mode, u32 enable)
+{
+ struct hinic_port_loopback lb = {0};
+ u16 out_size = sizeof(lb);
+ int err;
+
+ lb.mode = mode;
+ lb.en = enable;
+
+ if (mode < LOOP_MODE_MIN || mode > LOOP_MODE_MAX) {
+ dev_err(&hwdev->hwif->pdev->dev,
+ "Invalid loopback mode %d to set\n", mode);
+ return -EINVAL;
+ }
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_LOOPBACK_MODE,
+ &lb, sizeof(lb), &lb, &out_size);
+ if (err || !out_size || lb.status) {
+ dev_err(&hwdev->hwif->pdev->dev,
+ "Failed to set loopback mode %d en %d, err: %d, status: 0x%x, out size: 0x%x\n",
+ mode, enable, err, lb.status, out_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int _set_led_status(struct hinic_hwdev *hwdev, u8 port,
+ enum hinic_led_type type,
+ enum hinic_led_mode mode, u8 reset)
+{
+ struct hinic_led_info led_info = {0};
+ u16 out_size = sizeof(led_info);
+ struct hinic_pfhwdev *pfhwdev;
+ int err;
+
+ pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+
+ led_info.port = port;
+ led_info.reset = reset;
+
+ led_info.type = type;
+ led_info.mode = mode;
+
+ err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
+ HINIC_COMM_CMD_SET_LED_STATUS,
+ &led_info, sizeof(led_info),
+ &led_info, &out_size, HINIC_MGMT_MSG_SYNC);
+ if (err || led_info.status || !out_size) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to set led status, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, led_info.status, out_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hinic_set_led_status(struct hinic_hwdev *hwdev, u8 port,
+ enum hinic_led_type type, enum hinic_led_mode mode)
+{
+ if (!hwdev)
+ return -EINVAL;
+
+ return _set_led_status(hwdev, port, type, mode, 0);
+}
+
+int hinic_reset_led_status(struct hinic_hwdev *hwdev, u8 port)
+{
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ err = _set_led_status(hwdev, port, HINIC_LED_TYPE_INVALID,
+ HINIC_LED_MODE_INVALID, 1);
+ if (err)
+ dev_err(&hwdev->hwif->pdev->dev,
+ "Failed to reset led status\n");
+
+ return err;
+}
+
+static bool hinic_if_sfp_absent(struct hinic_hwdev *hwdev)
+{
+ struct hinic_cmd_get_light_module_abs sfp_abs = {0};
+ u16 out_size = sizeof(sfp_abs);
+ u8 port_id = hwdev->port_id;
+ int err;
+
+ sfp_abs.port_id = port_id;
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_SFP_ABS,
+ &sfp_abs, sizeof(sfp_abs), &sfp_abs,
+ &out_size);
+ if (sfp_abs.status || err || !out_size) {
+ dev_err(&hwdev->hwif->pdev->dev,
+ "Failed to get port%d sfp absent status, err: %d, status: 0x%x, out size: 0x%x\n",
+ port_id, err, sfp_abs.status, out_size);
+ return true;
+ }
+
+ return ((sfp_abs.abs_status == 0) ? false : true);
+}
+
+int hinic_get_sfp_eeprom(struct hinic_hwdev *hwdev, u8 *data, u16 *len)
+{
+ struct hinic_cmd_get_std_sfp_info sfp_info = {0};
+ u16 out_size = sizeof(sfp_info);
+ u8 port_id;
+ int err;
+
+ if (!hwdev || !data || !len)
+ return -EINVAL;
+
+ port_id = hwdev->port_id;
+
+ if (hinic_if_sfp_absent(hwdev))
+ return -ENXIO;
+
+ sfp_info.port_id = port_id;
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_STD_SFP_INFO,
+ &sfp_info, sizeof(sfp_info), &sfp_info,
+ &out_size);
+ if (sfp_info.status || err || !out_size) {
+ dev_err(&hwdev->hwif->pdev->dev,
+ "Failed to get port%d sfp eeprom information, err: %d, status: 0x%x, out size: 0x%x\n",
+ port_id, err, sfp_info.status, out_size);
+ return -EIO;
+ }
+
+ *len = min_t(u16, sfp_info.eeprom_len, STD_SFP_INFO_MAX_SIZE);
+ memcpy(data, sfp_info.sfp_info, STD_SFP_INFO_MAX_SIZE);
+
+ return 0;
+}
+
+int hinic_get_sfp_type(struct hinic_hwdev *hwdev, u8 *data0, u8 *data1)
+{
+ u8 sfp_data[STD_SFP_INFO_MAX_SIZE];
+ u16 len;
+ int err;
+
+ if (hinic_if_sfp_absent(hwdev))
+ return -ENXIO;
+
+ err = hinic_get_sfp_eeprom(hwdev, sfp_data, &len);
+ if (err)
+ return err;
+
+ *data0 = sfp_data[0];
+ *data1 = sfp_data[1];
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.h b/drivers/net/ethernet/huawei/hinic/hinic_port.h
new file mode 100644
index 0000000000..c8694ac7c7
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_port.h
@@ -0,0 +1,900 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef HINIC_PORT_H
+#define HINIC_PORT_H
+
+#include <linux/types.h>
+#include <linux/ethtool.h>
+#include <linux/etherdevice.h>
+#include <linux/bitops.h>
+
+#include "hinic_dev.h"
+
+#define HINIC_RSS_KEY_SIZE 40
+#define HINIC_RSS_INDIR_SIZE 256
+#define HINIC_PORT_STATS_VERSION 0
+#define HINIC_FW_VERSION_NAME 16
+#define HINIC_COMPILE_TIME_LEN 20
+#define HINIC_MGMT_VERSION_MAX_LEN 32
+
+struct hinic_version_info {
+ u8 status;
+ u8 version;
+ u8 rsvd[6];
+
+ u8 ver[HINIC_FW_VERSION_NAME];
+ u8 time[HINIC_COMPILE_TIME_LEN];
+};
+
+enum hinic_rx_mode {
+ HINIC_RX_MODE_UC = BIT(0),
+ HINIC_RX_MODE_MC = BIT(1),
+ HINIC_RX_MODE_BC = BIT(2),
+ HINIC_RX_MODE_MC_ALL = BIT(3),
+ HINIC_RX_MODE_PROMISC = BIT(4),
+};
+
+enum hinic_port_link_state {
+ HINIC_LINK_STATE_DOWN,
+ HINIC_LINK_STATE_UP,
+};
+
+enum hinic_port_state {
+ HINIC_PORT_DISABLE = 0,
+ HINIC_PORT_ENABLE = 3,
+};
+
+enum hinic_func_port_state {
+ HINIC_FUNC_PORT_DISABLE = 0,
+ HINIC_FUNC_PORT_ENABLE = 2,
+};
+
+enum hinic_autoneg_cap {
+ HINIC_AUTONEG_UNSUPPORTED,
+ HINIC_AUTONEG_SUPPORTED,
+};
+
+enum hinic_autoneg_state {
+ HINIC_AUTONEG_DISABLED,
+ HINIC_AUTONEG_ACTIVE,
+};
+
+enum hinic_duplex {
+ HINIC_DUPLEX_HALF,
+ HINIC_DUPLEX_FULL,
+};
+
+enum hinic_speed {
+ HINIC_SPEED_10MB_LINK = 0,
+ HINIC_SPEED_100MB_LINK,
+ HINIC_SPEED_1000MB_LINK,
+ HINIC_SPEED_10GB_LINK,
+ HINIC_SPEED_25GB_LINK,
+ HINIC_SPEED_40GB_LINK,
+ HINIC_SPEED_100GB_LINK,
+
+ HINIC_SPEED_UNKNOWN = 0xFF,
+};
+
+enum hinic_link_mode {
+ HINIC_10GE_BASE_KR = 0,
+ HINIC_40GE_BASE_KR4 = 1,
+ HINIC_40GE_BASE_CR4 = 2,
+ HINIC_100GE_BASE_KR4 = 3,
+ HINIC_100GE_BASE_CR4 = 4,
+ HINIC_25GE_BASE_KR_S = 5,
+ HINIC_25GE_BASE_CR_S = 6,
+ HINIC_25GE_BASE_KR = 7,
+ HINIC_25GE_BASE_CR = 8,
+ HINIC_GE_BASE_KX = 9,
+ HINIC_LINK_MODE_NUMBERS,
+
+ HINIC_SUPPORTED_UNKNOWN = 0xFFFF,
+};
+
+enum hinic_port_type {
+ HINIC_PORT_TP, /* BASET */
+ HINIC_PORT_AUI,
+ HINIC_PORT_MII,
+ HINIC_PORT_FIBRE, /* OPTICAL */
+ HINIC_PORT_BNC,
+ HINIC_PORT_ELEC,
+ HINIC_PORT_COPPER, /* PORT_DA */
+ HINIC_PORT_AOC,
+ HINIC_PORT_BACKPLANE,
+ HINIC_PORT_NONE = 0xEF,
+ HINIC_PORT_OTHER = 0xFF,
+};
+
+enum hinic_valid_link_settings {
+ HILINK_LINK_SET_SPEED = 0x1,
+ HILINK_LINK_SET_AUTONEG = 0x2,
+ HILINK_LINK_SET_FEC = 0x4,
+};
+
+enum hinic_tso_state {
+ HINIC_TSO_DISABLE = 0,
+ HINIC_TSO_ENABLE = 1,
+};
+
+struct hinic_port_mac_cmd {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_idx;
+ u16 vlan_id;
+ u16 rsvd1;
+ unsigned char mac[ETH_ALEN];
+};
+
+struct hinic_port_mtu_cmd {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_idx;
+ u16 rsvd1;
+ u32 mtu;
+};
+
+struct hinic_port_vlan_cmd {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_idx;
+ u16 vlan_id;
+};
+
+struct hinic_port_rx_mode_cmd {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_idx;
+ u16 rsvd;
+ u32 rx_mode;
+};
+
+struct hinic_port_link_cmd {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_idx;
+ u8 state;
+ u8 rsvd1;
+};
+
+struct hinic_port_state_cmd {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 state;
+ u8 rsvd1[3];
+};
+
+struct hinic_port_link_status {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 link;
+ u8 port_id;
+};
+
+struct hinic_cable_plug_event {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 plugged; /* 0: unplugged, 1: plugged */
+ u8 port_id;
+};
+
+enum link_err_type {
+ LINK_ERR_MODULE_UNRECOGENIZED,
+ LINK_ERR_NUM,
+};
+
+struct hinic_link_err_event {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 err_type;
+ u8 port_id;
+};
+
+struct hinic_port_func_state_cmd {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_idx;
+ u16 rsvd1;
+ u8 state;
+ u8 rsvd2[3];
+};
+
+struct hinic_port_cap {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_idx;
+ u16 rsvd1;
+ u8 port_type;
+ u8 autoneg_cap;
+ u8 autoneg_state;
+ u8 duplex;
+ u8 speed;
+ u8 rsvd2[3];
+};
+
+struct hinic_link_mode_cmd {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u16 supported; /* 0xFFFF represents invalid value */
+ u16 advertised;
+};
+
+struct hinic_speed_cmd {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 speed;
+};
+
+struct hinic_set_autoneg_cmd {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 enable; /* 1: enable , 0: disable */
+};
+
+struct hinic_link_ksettings_info {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+
+ u32 valid_bitmap;
+ u32 speed; /* enum nic_speed_level */
+ u8 autoneg; /* 0 - off; 1 - on */
+ u8 fec; /* 0 - RSFEC; 1 - BASEFEC; 2 - NOFEC */
+ u8 rsvd2[18]; /* reserved for duplex, port, etc. */
+};
+
+struct hinic_tso_config {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u8 tso_en;
+ u8 resv2[3];
+};
+
+struct hinic_checksum_offload {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 rx_csum_offload;
+};
+
+struct hinic_rq_num {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1[33];
+ u32 num_rqs;
+ u32 rq_depth;
+};
+
+struct hinic_lro_config {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u8 lro_ipv4_en;
+ u8 lro_ipv6_en;
+ u8 lro_max_wqe_num;
+ u8 resv2[13];
+};
+
+struct hinic_lro_timer {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 type; /* 0: set timer value, 1: get timer value */
+ u8 enable; /* when set lro time, enable should be 1 */
+ u16 rsvd1;
+ u32 timer;
+};
+
+struct hinic_vlan_cfg {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 vlan_rx_offload;
+ u8 rsvd1[5];
+};
+
+struct hinic_vlan_filter {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_idx;
+ u8 rsvd1[2];
+ u32 enable;
+};
+
+struct hinic_rss_template_mgmt {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 cmd;
+ u8 template_id;
+ u8 rsvd1[4];
+};
+
+struct hinic_rss_template_key {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 template_id;
+ u8 rsvd1;
+ u8 key[HINIC_RSS_KEY_SIZE];
+};
+
+struct hinic_rss_context_tbl {
+ u32 group_index;
+ u32 offset;
+ u32 size;
+ u32 rsvd;
+ u32 ctx;
+};
+
+struct hinic_rss_context_table {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 template_id;
+ u8 rsvd1;
+ u32 context;
+};
+
+struct hinic_rss_indirect_tbl {
+ u32 group_index;
+ u32 offset;
+ u32 size;
+ u32 rsvd;
+ u8 entry[HINIC_RSS_INDIR_SIZE];
+};
+
+struct hinic_rss_indir_table {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 template_id;
+ u8 rsvd1;
+ u8 indir[HINIC_RSS_INDIR_SIZE];
+};
+
+struct hinic_rss_key {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 template_id;
+ u8 rsvd1;
+ u8 key[HINIC_RSS_KEY_SIZE];
+};
+
+struct hinic_rss_engine_type {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 template_id;
+ u8 hash_engine;
+ u8 rsvd1[4];
+};
+
+struct hinic_rss_config {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 rss_en;
+ u8 template_id;
+ u8 rq_priority_number;
+ u8 rsvd1[11];
+};
+
+struct hinic_stats {
+ char name[ETH_GSTRING_LEN];
+ u32 size;
+ int offset;
+};
+
+struct hinic_vport_stats {
+ u64 tx_unicast_pkts_vport;
+ u64 tx_unicast_bytes_vport;
+ u64 tx_multicast_pkts_vport;
+ u64 tx_multicast_bytes_vport;
+ u64 tx_broadcast_pkts_vport;
+ u64 tx_broadcast_bytes_vport;
+
+ u64 rx_unicast_pkts_vport;
+ u64 rx_unicast_bytes_vport;
+ u64 rx_multicast_pkts_vport;
+ u64 rx_multicast_bytes_vport;
+ u64 rx_broadcast_pkts_vport;
+ u64 rx_broadcast_bytes_vport;
+
+ u64 tx_discard_vport;
+ u64 rx_discard_vport;
+ u64 tx_err_vport;
+ u64 rx_err_vport;
+};
+
+struct hinic_phy_port_stats {
+ u64 mac_rx_total_pkt_num;
+ u64 mac_rx_total_oct_num;
+ u64 mac_rx_bad_pkt_num;
+ u64 mac_rx_bad_oct_num;
+ u64 mac_rx_good_pkt_num;
+ u64 mac_rx_good_oct_num;
+ u64 mac_rx_uni_pkt_num;
+ u64 mac_rx_multi_pkt_num;
+ u64 mac_rx_broad_pkt_num;
+
+ u64 mac_tx_total_pkt_num;
+ u64 mac_tx_total_oct_num;
+ u64 mac_tx_bad_pkt_num;
+ u64 mac_tx_bad_oct_num;
+ u64 mac_tx_good_pkt_num;
+ u64 mac_tx_good_oct_num;
+ u64 mac_tx_uni_pkt_num;
+ u64 mac_tx_multi_pkt_num;
+ u64 mac_tx_broad_pkt_num;
+
+ u64 mac_rx_fragment_pkt_num;
+ u64 mac_rx_undersize_pkt_num;
+ u64 mac_rx_undermin_pkt_num;
+ u64 mac_rx_64_oct_pkt_num;
+ u64 mac_rx_65_127_oct_pkt_num;
+ u64 mac_rx_128_255_oct_pkt_num;
+ u64 mac_rx_256_511_oct_pkt_num;
+ u64 mac_rx_512_1023_oct_pkt_num;
+ u64 mac_rx_1024_1518_oct_pkt_num;
+ u64 mac_rx_1519_2047_oct_pkt_num;
+ u64 mac_rx_2048_4095_oct_pkt_num;
+ u64 mac_rx_4096_8191_oct_pkt_num;
+ u64 mac_rx_8192_9216_oct_pkt_num;
+ u64 mac_rx_9217_12287_oct_pkt_num;
+ u64 mac_rx_12288_16383_oct_pkt_num;
+ u64 mac_rx_1519_max_bad_pkt_num;
+ u64 mac_rx_1519_max_good_pkt_num;
+ u64 mac_rx_oversize_pkt_num;
+ u64 mac_rx_jabber_pkt_num;
+
+ u64 mac_rx_pause_num;
+ u64 mac_rx_pfc_pkt_num;
+ u64 mac_rx_pfc_pri0_pkt_num;
+ u64 mac_rx_pfc_pri1_pkt_num;
+ u64 mac_rx_pfc_pri2_pkt_num;
+ u64 mac_rx_pfc_pri3_pkt_num;
+ u64 mac_rx_pfc_pri4_pkt_num;
+ u64 mac_rx_pfc_pri5_pkt_num;
+ u64 mac_rx_pfc_pri6_pkt_num;
+ u64 mac_rx_pfc_pri7_pkt_num;
+ u64 mac_rx_control_pkt_num;
+ u64 mac_rx_y1731_pkt_num;
+ u64 mac_rx_sym_err_pkt_num;
+ u64 mac_rx_fcs_err_pkt_num;
+ u64 mac_rx_send_app_good_pkt_num;
+ u64 mac_rx_send_app_bad_pkt_num;
+
+ u64 mac_tx_fragment_pkt_num;
+ u64 mac_tx_undersize_pkt_num;
+ u64 mac_tx_undermin_pkt_num;
+ u64 mac_tx_64_oct_pkt_num;
+ u64 mac_tx_65_127_oct_pkt_num;
+ u64 mac_tx_128_255_oct_pkt_num;
+ u64 mac_tx_256_511_oct_pkt_num;
+ u64 mac_tx_512_1023_oct_pkt_num;
+ u64 mac_tx_1024_1518_oct_pkt_num;
+ u64 mac_tx_1519_2047_oct_pkt_num;
+ u64 mac_tx_2048_4095_oct_pkt_num;
+ u64 mac_tx_4096_8191_oct_pkt_num;
+ u64 mac_tx_8192_9216_oct_pkt_num;
+ u64 mac_tx_9217_12287_oct_pkt_num;
+ u64 mac_tx_12288_16383_oct_pkt_num;
+ u64 mac_tx_1519_max_bad_pkt_num;
+ u64 mac_tx_1519_max_good_pkt_num;
+ u64 mac_tx_oversize_pkt_num;
+ u64 mac_tx_jabber_pkt_num;
+
+ u64 mac_tx_pause_num;
+ u64 mac_tx_pfc_pkt_num;
+ u64 mac_tx_pfc_pri0_pkt_num;
+ u64 mac_tx_pfc_pri1_pkt_num;
+ u64 mac_tx_pfc_pri2_pkt_num;
+ u64 mac_tx_pfc_pri3_pkt_num;
+ u64 mac_tx_pfc_pri4_pkt_num;
+ u64 mac_tx_pfc_pri5_pkt_num;
+ u64 mac_tx_pfc_pri6_pkt_num;
+ u64 mac_tx_pfc_pri7_pkt_num;
+ u64 mac_tx_control_pkt_num;
+ u64 mac_tx_y1731_pkt_num;
+ u64 mac_tx_1588_pkt_num;
+ u64 mac_tx_err_all_pkt_num;
+ u64 mac_tx_from_app_good_pkt_num;
+ u64 mac_tx_from_app_bad_pkt_num;
+
+ u64 mac_rx_higig2_ext_pkt_num;
+ u64 mac_rx_higig2_message_pkt_num;
+ u64 mac_rx_higig2_error_pkt_num;
+ u64 mac_rx_higig2_cpu_ctrl_pkt_num;
+ u64 mac_rx_higig2_unicast_pkt_num;
+ u64 mac_rx_higig2_broadcast_pkt_num;
+ u64 mac_rx_higig2_l2_multicast_pkt_num;
+ u64 mac_rx_higig2_l3_multicast_pkt_num;
+
+ u64 mac_tx_higig2_message_pkt_num;
+ u64 mac_tx_higig2_ext_pkt_num;
+ u64 mac_tx_higig2_cpu_ctrl_pkt_num;
+ u64 mac_tx_higig2_unicast_pkt_num;
+ u64 mac_tx_higig2_broadcast_pkt_num;
+ u64 mac_tx_higig2_l2_multicast_pkt_num;
+ u64 mac_tx_higig2_l3_multicast_pkt_num;
+};
+
+struct hinic_port_stats_info {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 stats_version;
+ u32 stats_size;
+};
+
+struct hinic_port_stats {
+ u8 status;
+ u8 version;
+ u8 rsvd[6];
+
+ struct hinic_phy_port_stats stats;
+};
+
+struct hinic_cmd_vport_stats {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ struct hinic_vport_stats stats;
+};
+
+struct hinic_tx_rate_cfg_max_min {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 min_rate;
+ u32 max_rate;
+ u8 rsvd2[8];
+};
+
+struct hinic_tx_rate_cfg {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 tx_rate;
+};
+
+enum nic_speed_level {
+ LINK_SPEED_10MB = 0,
+ LINK_SPEED_100MB,
+ LINK_SPEED_1GB,
+ LINK_SPEED_10GB,
+ LINK_SPEED_25GB,
+ LINK_SPEED_40GB,
+ LINK_SPEED_100GB,
+ LINK_SPEED_LEVELS,
+};
+
+struct hinic_spoofchk_set {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 state;
+ u8 rsvd1;
+ u16 func_id;
+};
+
+struct hinic_pause_config {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 auto_neg;
+ u32 rx_pause;
+ u32 tx_pause;
+};
+
+struct hinic_set_pfc {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 pfc_en;
+ u8 pfc_bitmap;
+ u8 rsvd1[4];
+};
+
+/* get or set loopback mode, need to modify by base API */
+#define HINIC_INTERNAL_LP_MODE 5
+#define LOOP_MODE_MIN 1
+#define LOOP_MODE_MAX 6
+
+struct hinic_port_loopback {
+ u8 status;
+ u8 version;
+ u8 rsvd[6];
+
+ u32 mode;
+ u32 en;
+};
+
+struct hinic_led_info {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 port;
+ u8 type;
+ u8 mode;
+ u8 reset;
+};
+
+#define STD_SFP_INFO_MAX_SIZE 640
+
+struct hinic_cmd_get_light_module_abs {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 port_id;
+ u8 abs_status; /* 0:present, 1:absent */
+ u8 rsv[2];
+};
+
+#define STD_SFP_INFO_MAX_SIZE 640
+
+struct hinic_cmd_get_std_sfp_info {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 port_id;
+ u8 wire_type;
+ u16 eeprom_len;
+ u32 rsvd;
+ u8 sfp_info[STD_SFP_INFO_MAX_SIZE];
+};
+
+struct hinic_cmd_update_fw {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ struct {
+ u32 SL:1;
+ u32 SF:1;
+ u32 flag:1;
+ u32 reserved:13;
+ u32 fragment_len:16;
+ } ctl_info;
+
+ struct {
+ u32 FW_section_CRC;
+ u32 FW_section_type;
+ } section_info;
+
+ u32 total_len;
+ u32 setion_total_len;
+ u32 fw_section_version;
+ u32 section_offset;
+ u32 data[384];
+};
+
+int hinic_port_add_mac(struct hinic_dev *nic_dev, const u8 *addr,
+ u16 vlan_id);
+
+int hinic_port_del_mac(struct hinic_dev *nic_dev, const u8 *addr,
+ u16 vlan_id);
+
+int hinic_port_get_mac(struct hinic_dev *nic_dev, u8 *addr);
+
+int hinic_port_set_mtu(struct hinic_dev *nic_dev, int new_mtu);
+
+int hinic_port_add_vlan(struct hinic_dev *nic_dev, u16 vlan_id);
+
+int hinic_port_del_vlan(struct hinic_dev *nic_dev, u16 vlan_id);
+
+int hinic_port_set_rx_mode(struct hinic_dev *nic_dev, u32 rx_mode);
+
+int hinic_port_link_state(struct hinic_dev *nic_dev,
+ enum hinic_port_link_state *link_state);
+
+int hinic_port_set_state(struct hinic_dev *nic_dev,
+ enum hinic_port_state state);
+
+int hinic_port_set_func_state(struct hinic_dev *nic_dev,
+ enum hinic_func_port_state state);
+
+int hinic_port_get_cap(struct hinic_dev *nic_dev,
+ struct hinic_port_cap *port_cap);
+
+int hinic_set_max_qnum(struct hinic_dev *nic_dev, u8 num_rqs);
+
+int hinic_port_set_tso(struct hinic_dev *nic_dev, enum hinic_tso_state state);
+
+int hinic_set_rx_csum_offload(struct hinic_dev *nic_dev, u32 en);
+
+int hinic_set_rx_lro_state(struct hinic_dev *nic_dev, u8 lro_en,
+ u32 lro_timer, u32 wqe_num);
+
+int hinic_set_rss_type(struct hinic_dev *nic_dev, u32 tmpl_idx,
+ struct hinic_rss_type rss_type);
+
+int hinic_rss_set_indir_tbl(struct hinic_dev *nic_dev, u32 tmpl_idx,
+ const u32 *indir_table);
+
+int hinic_rss_set_template_tbl(struct hinic_dev *nic_dev, u32 template_id,
+ const u8 *temp);
+
+int hinic_rss_set_hash_engine(struct hinic_dev *nic_dev, u8 template_id,
+ u8 type);
+
+int hinic_rss_cfg(struct hinic_dev *nic_dev, u8 rss_en, u8 template_id);
+
+int hinic_rss_template_alloc(struct hinic_dev *nic_dev, u8 *tmpl_idx);
+
+int hinic_rss_template_free(struct hinic_dev *nic_dev, u8 tmpl_idx);
+
+void hinic_set_ethtool_ops(struct net_device *netdev);
+
+int hinic_get_rss_type(struct hinic_dev *nic_dev, u32 tmpl_idx,
+ struct hinic_rss_type *rss_type);
+
+int hinic_rss_get_indir_tbl(struct hinic_dev *nic_dev, u32 tmpl_idx,
+ u32 *indir_table);
+
+int hinic_rss_get_template_tbl(struct hinic_dev *nic_dev, u32 tmpl_idx,
+ u8 *temp);
+
+int hinic_rss_get_hash_engine(struct hinic_dev *nic_dev, u8 tmpl_idx,
+ u8 *type);
+
+int hinic_get_phy_port_stats(struct hinic_dev *nic_dev,
+ struct hinic_phy_port_stats *stats);
+
+int hinic_get_vport_stats(struct hinic_dev *nic_dev,
+ struct hinic_vport_stats *stats);
+
+int hinic_set_rx_vlan_offload(struct hinic_dev *nic_dev, u8 en);
+
+int hinic_set_vlan_fliter(struct hinic_dev *nic_dev, u32 en);
+
+int hinic_get_mgmt_version(struct hinic_dev *nic_dev, u8 *mgmt_ver);
+
+int hinic_set_link_settings(struct hinic_hwdev *hwdev,
+ struct hinic_link_ksettings_info *info);
+
+int hinic_get_link_mode(struct hinic_hwdev *hwdev,
+ struct hinic_link_mode_cmd *link_mode);
+
+int hinic_set_autoneg(struct hinic_hwdev *hwdev, bool enable);
+
+int hinic_set_speed(struct hinic_hwdev *hwdev, enum nic_speed_level speed);
+
+int hinic_get_hw_pause_info(struct hinic_hwdev *hwdev,
+ struct hinic_pause_config *pause_info);
+
+int hinic_set_hw_pause_info(struct hinic_hwdev *hwdev,
+ struct hinic_pause_config *pause_info);
+
+int hinic_dcb_set_pfc(struct hinic_hwdev *hwdev, u8 pfc_en, u8 pfc_bitmap);
+
+int hinic_set_loopback_mode(struct hinic_hwdev *hwdev, u32 mode, u32 enable);
+
+enum hinic_led_mode {
+ HINIC_LED_MODE_ON,
+ HINIC_LED_MODE_OFF,
+ HINIC_LED_MODE_FORCE_1HZ,
+ HINIC_LED_MODE_FORCE_2HZ,
+ HINIC_LED_MODE_FORCE_4HZ,
+ HINIC_LED_MODE_1HZ,
+ HINIC_LED_MODE_2HZ,
+ HINIC_LED_MODE_4HZ,
+ HINIC_LED_MODE_INVALID,
+};
+
+enum hinic_led_type {
+ HINIC_LED_TYPE_LINK,
+ HINIC_LED_TYPE_LOW_SPEED,
+ HINIC_LED_TYPE_HIGH_SPEED,
+ HINIC_LED_TYPE_INVALID,
+};
+
+int hinic_reset_led_status(struct hinic_hwdev *hwdev, u8 port);
+
+int hinic_set_led_status(struct hinic_hwdev *hwdev, u8 port,
+ enum hinic_led_type type, enum hinic_led_mode mode);
+
+int hinic_get_sfp_type(struct hinic_hwdev *hwdev, u8 *data0, u8 *data1);
+
+int hinic_get_sfp_eeprom(struct hinic_hwdev *hwdev, u8 *data, u16 *len);
+
+int hinic_open(struct net_device *netdev);
+
+int hinic_close(struct net_device *netdev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
new file mode 100644
index 0000000000..ceec8be2a7
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
@@ -0,0 +1,631 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/skbuff.h>
+#include <linux/dma-mapping.h>
+#include <linux/prefetch.h>
+#include <linux/cpumask.h>
+#include <linux/if_vlan.h>
+#include <asm/barrier.h>
+
+#include "hinic_common.h"
+#include "hinic_hw_if.h"
+#include "hinic_hw_wqe.h"
+#include "hinic_hw_wq.h"
+#include "hinic_hw_qp.h"
+#include "hinic_hw_dev.h"
+#include "hinic_rx.h"
+#include "hinic_dev.h"
+
+#define RX_IRQ_NO_PENDING 0
+#define RX_IRQ_NO_COALESC 0
+#define RX_IRQ_NO_LLI_TIMER 0
+#define RX_IRQ_NO_CREDIT 0
+#define RX_IRQ_NO_RESEND_TIMER 0
+#define HINIC_RX_BUFFER_WRITE 16
+
+#define HINIC_RX_IPV6_PKT 7
+#define LRO_PKT_HDR_LEN_IPV4 66
+#define LRO_PKT_HDR_LEN_IPV6 86
+#define LRO_REPLENISH_THLD 256
+
+#define LRO_PKT_HDR_LEN(cqe) \
+ (HINIC_GET_RX_PKT_TYPE(be32_to_cpu((cqe)->offload_type)) == \
+ HINIC_RX_IPV6_PKT ? LRO_PKT_HDR_LEN_IPV6 : LRO_PKT_HDR_LEN_IPV4)
+
+/**
+ * hinic_rxq_clean_stats - Clean the statistics of specific queue
+ * @rxq: Logical Rx Queue
+ **/
+static void hinic_rxq_clean_stats(struct hinic_rxq *rxq)
+{
+ struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
+
+ u64_stats_update_begin(&rxq_stats->syncp);
+ rxq_stats->pkts = 0;
+ rxq_stats->bytes = 0;
+ rxq_stats->errors = 0;
+ rxq_stats->csum_errors = 0;
+ rxq_stats->other_errors = 0;
+ u64_stats_update_end(&rxq_stats->syncp);
+}
+
+/**
+ * hinic_rxq_get_stats - get statistics of Rx Queue
+ * @rxq: Logical Rx Queue
+ * @stats: return updated stats here
+ **/
+void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats)
+{
+ struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin(&rxq_stats->syncp);
+ stats->pkts = rxq_stats->pkts;
+ stats->bytes = rxq_stats->bytes;
+ stats->errors = rxq_stats->csum_errors +
+ rxq_stats->other_errors;
+ stats->csum_errors = rxq_stats->csum_errors;
+ stats->other_errors = rxq_stats->other_errors;
+ } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
+}
+
+/**
+ * rxq_stats_init - Initialize the statistics of specific queue
+ * @rxq: Logical Rx Queue
+ **/
+static void rxq_stats_init(struct hinic_rxq *rxq)
+{
+ struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
+
+ u64_stats_init(&rxq_stats->syncp);
+ hinic_rxq_clean_stats(rxq);
+}
+
+static void rx_csum(struct hinic_rxq *rxq, u32 status,
+ struct sk_buff *skb)
+{
+ struct net_device *netdev = rxq->netdev;
+ u32 csum_err;
+
+ csum_err = HINIC_RQ_CQE_STATUS_GET(status, CSUM_ERR);
+
+ if (!(netdev->features & NETIF_F_RXCSUM))
+ return;
+
+ if (!csum_err) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ if (!(csum_err & (HINIC_RX_CSUM_HW_CHECK_NONE |
+ HINIC_RX_CSUM_IPSU_OTHER_ERR)))
+ rxq->rxq_stats.csum_errors++;
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+}
+
+/**
+ * rx_alloc_skb - allocate skb and map it to dma address
+ * @rxq: rx queue
+ * @dma_addr: returned dma address for the skb
+ *
+ * Return skb
+ **/
+static struct sk_buff *rx_alloc_skb(struct hinic_rxq *rxq,
+ dma_addr_t *dma_addr)
+{
+ struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ struct sk_buff *skb;
+ dma_addr_t addr;
+ int err;
+
+ skb = netdev_alloc_skb_ip_align(rxq->netdev, rxq->rq->buf_sz);
+ if (!skb)
+ return NULL;
+
+ addr = dma_map_single(&pdev->dev, skb->data, rxq->rq->buf_sz,
+ DMA_FROM_DEVICE);
+ err = dma_mapping_error(&pdev->dev, addr);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to map Rx DMA, err = %d\n", err);
+ goto err_rx_map;
+ }
+
+ *dma_addr = addr;
+ return skb;
+
+err_rx_map:
+ dev_kfree_skb_any(skb);
+ return NULL;
+}
+
+/**
+ * rx_unmap_skb - unmap the dma address of the skb
+ * @rxq: rx queue
+ * @dma_addr: dma address of the skb
+ **/
+static void rx_unmap_skb(struct hinic_rxq *rxq, dma_addr_t dma_addr)
+{
+ struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+
+ dma_unmap_single(&pdev->dev, dma_addr, rxq->rq->buf_sz,
+ DMA_FROM_DEVICE);
+}
+
+/**
+ * rx_free_skb - unmap and free skb
+ * @rxq: rx queue
+ * @skb: skb to free
+ * @dma_addr: dma address of the skb
+ **/
+static void rx_free_skb(struct hinic_rxq *rxq, struct sk_buff *skb,
+ dma_addr_t dma_addr)
+{
+ rx_unmap_skb(rxq, dma_addr);
+ dev_kfree_skb_any(skb);
+}
+
+/**
+ * rx_alloc_pkts - allocate pkts in rx queue
+ * @rxq: rx queue
+ *
+ * Return number of skbs allocated
+ **/
+static int rx_alloc_pkts(struct hinic_rxq *rxq)
+{
+ struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
+ struct hinic_rq_wqe *rq_wqe;
+ unsigned int free_wqebbs;
+ struct hinic_sge sge;
+ dma_addr_t dma_addr;
+ struct sk_buff *skb;
+ u16 prod_idx;
+ int i;
+
+ free_wqebbs = hinic_get_rq_free_wqebbs(rxq->rq);
+
+ /* Limit the allocation chunks */
+ if (free_wqebbs > nic_dev->rx_weight)
+ free_wqebbs = nic_dev->rx_weight;
+
+ for (i = 0; i < free_wqebbs; i++) {
+ skb = rx_alloc_skb(rxq, &dma_addr);
+ if (!skb)
+ goto skb_out;
+
+ hinic_set_sge(&sge, dma_addr, skb->len);
+
+ rq_wqe = hinic_rq_get_wqe(rxq->rq, HINIC_RQ_WQE_SIZE,
+ &prod_idx);
+ if (!rq_wqe) {
+ rx_free_skb(rxq, skb, dma_addr);
+ goto skb_out;
+ }
+
+ hinic_rq_prepare_wqe(rxq->rq, prod_idx, rq_wqe, &sge);
+
+ hinic_rq_write_wqe(rxq->rq, prod_idx, rq_wqe, skb);
+ }
+
+skb_out:
+ if (i) {
+ wmb(); /* write all the wqes before update PI */
+
+ hinic_rq_update(rxq->rq, prod_idx);
+ }
+
+ return i;
+}
+
+/**
+ * free_all_rx_skbs - free all skbs in rx queue
+ * @rxq: rx queue
+ **/
+static void free_all_rx_skbs(struct hinic_rxq *rxq)
+{
+ struct hinic_rq *rq = rxq->rq;
+ struct hinic_hw_wqe *hw_wqe;
+ struct hinic_sge sge;
+ u16 ci;
+
+ while ((hw_wqe = hinic_read_wqe(rq->wq, HINIC_RQ_WQE_SIZE, &ci))) {
+ if (IS_ERR(hw_wqe))
+ break;
+
+ hinic_rq_get_sge(rq, &hw_wqe->rq_wqe, ci, &sge);
+
+ hinic_put_wqe(rq->wq, HINIC_RQ_WQE_SIZE);
+
+ rx_free_skb(rxq, rq->saved_skb[ci], hinic_sge_to_dma(&sge));
+ }
+}
+
+/**
+ * rx_recv_jumbo_pkt - Rx handler for jumbo pkt
+ * @rxq: rx queue
+ * @head_skb: the first skb in the list
+ * @left_pkt_len: left size of the pkt exclude head skb
+ * @ci: consumer index
+ *
+ * Return number of wqes that used for the left of the pkt
+ **/
+static int rx_recv_jumbo_pkt(struct hinic_rxq *rxq, struct sk_buff *head_skb,
+ unsigned int left_pkt_len, u16 ci)
+{
+ struct sk_buff *skb, *curr_skb = head_skb;
+ struct hinic_rq_wqe *rq_wqe;
+ unsigned int curr_len;
+ struct hinic_sge sge;
+ int num_wqes = 0;
+
+ while (left_pkt_len > 0) {
+ rq_wqe = hinic_rq_read_next_wqe(rxq->rq, HINIC_RQ_WQE_SIZE,
+ &skb, &ci);
+
+ num_wqes++;
+
+ hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge);
+
+ rx_unmap_skb(rxq, hinic_sge_to_dma(&sge));
+
+ prefetch(skb->data);
+
+ curr_len = (left_pkt_len > HINIC_RX_BUF_SZ) ? HINIC_RX_BUF_SZ :
+ left_pkt_len;
+
+ left_pkt_len -= curr_len;
+
+ __skb_put(skb, curr_len);
+
+ if (curr_skb == head_skb)
+ skb_shinfo(head_skb)->frag_list = skb;
+ else
+ curr_skb->next = skb;
+
+ head_skb->len += skb->len;
+ head_skb->data_len += skb->len;
+ head_skb->truesize += skb->truesize;
+
+ curr_skb = skb;
+ }
+
+ return num_wqes;
+}
+
+static void hinic_copy_lp_data(struct hinic_dev *nic_dev,
+ struct sk_buff *skb)
+{
+ struct net_device *netdev = nic_dev->netdev;
+ u8 *lb_buf = nic_dev->lb_test_rx_buf;
+ int lb_len = nic_dev->lb_pkt_len;
+ int pkt_offset, frag_len, i;
+ void *frag_data = NULL;
+
+ if (nic_dev->lb_test_rx_idx == LP_PKT_CNT) {
+ nic_dev->lb_test_rx_idx = 0;
+ netif_warn(nic_dev, drv, netdev, "Loopback test warning, receive too more test pkts\n");
+ }
+
+ if (skb->len != nic_dev->lb_pkt_len) {
+ netif_warn(nic_dev, drv, netdev, "Wrong packet length\n");
+ nic_dev->lb_test_rx_idx++;
+ return;
+ }
+
+ pkt_offset = nic_dev->lb_test_rx_idx * lb_len;
+ frag_len = (int)skb_headlen(skb);
+ memcpy(lb_buf + pkt_offset, skb->data, frag_len);
+ pkt_offset += frag_len;
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ frag_data = skb_frag_address(&skb_shinfo(skb)->frags[i]);
+ frag_len = (int)skb_frag_size(&skb_shinfo(skb)->frags[i]);
+ memcpy((lb_buf + pkt_offset), frag_data, frag_len);
+ pkt_offset += frag_len;
+ }
+ nic_dev->lb_test_rx_idx++;
+}
+
+/**
+ * rxq_recv - Rx handler
+ * @rxq: rx queue
+ * @budget: maximum pkts to process
+ *
+ * Return number of pkts received
+ **/
+static int rxq_recv(struct hinic_rxq *rxq, int budget)
+{
+ struct hinic_qp *qp = container_of(rxq->rq, struct hinic_qp, rq);
+ struct net_device *netdev = rxq->netdev;
+ u64 pkt_len = 0, rx_bytes = 0;
+ struct hinic_rq *rq = rxq->rq;
+ struct hinic_rq_wqe *rq_wqe;
+ struct hinic_dev *nic_dev;
+ unsigned int free_wqebbs;
+ struct hinic_rq_cqe *cqe;
+ int num_wqes, pkts = 0;
+ struct hinic_sge sge;
+ unsigned int status;
+ struct sk_buff *skb;
+ u32 offload_type;
+ u16 ci, num_lro;
+ u16 num_wqe = 0;
+ u32 vlan_len;
+ u16 vid;
+
+ nic_dev = netdev_priv(netdev);
+
+ while (pkts < budget) {
+ num_wqes = 0;
+
+ rq_wqe = hinic_rq_read_wqe(rxq->rq, HINIC_RQ_WQE_SIZE, &skb,
+ &ci);
+ if (!rq_wqe)
+ break;
+
+ /* make sure we read rx_done before packet length */
+ dma_rmb();
+
+ cqe = rq->cqe[ci];
+ status = be32_to_cpu(cqe->status);
+ hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge);
+
+ rx_unmap_skb(rxq, hinic_sge_to_dma(&sge));
+
+ rx_csum(rxq, status, skb);
+
+ prefetch(skb->data);
+
+ pkt_len = sge.len;
+
+ if (pkt_len <= HINIC_RX_BUF_SZ) {
+ __skb_put(skb, pkt_len);
+ } else {
+ __skb_put(skb, HINIC_RX_BUF_SZ);
+ num_wqes = rx_recv_jumbo_pkt(rxq, skb, pkt_len -
+ HINIC_RX_BUF_SZ, ci);
+ }
+
+ hinic_rq_put_wqe(rq, ci,
+ (num_wqes + 1) * HINIC_RQ_WQE_SIZE);
+
+ offload_type = be32_to_cpu(cqe->offload_type);
+ vlan_len = be32_to_cpu(cqe->len);
+ if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ HINIC_GET_RX_VLAN_OFFLOAD_EN(offload_type)) {
+ vid = HINIC_GET_RX_VLAN_TAG(vlan_len);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
+ }
+
+ if (unlikely(nic_dev->flags & HINIC_LP_TEST))
+ hinic_copy_lp_data(nic_dev, skb);
+
+ skb_record_rx_queue(skb, qp->q_id);
+ skb->protocol = eth_type_trans(skb, rxq->netdev);
+
+ napi_gro_receive(&rxq->napi, skb);
+
+ pkts++;
+ rx_bytes += pkt_len;
+
+ num_lro = HINIC_GET_RX_NUM_LRO(status);
+ if (num_lro) {
+ rx_bytes += ((num_lro - 1) *
+ LRO_PKT_HDR_LEN(cqe));
+
+ num_wqe +=
+ (u16)(pkt_len >> rxq->rx_buff_shift) +
+ ((pkt_len & (rxq->buf_len - 1)) ? 1 : 0);
+ }
+
+ cqe->status = 0;
+
+ if (num_wqe >= LRO_REPLENISH_THLD)
+ break;
+ }
+
+ free_wqebbs = hinic_get_rq_free_wqebbs(rxq->rq);
+ if (free_wqebbs > HINIC_RX_BUFFER_WRITE)
+ rx_alloc_pkts(rxq);
+
+ u64_stats_update_begin(&rxq->rxq_stats.syncp);
+ rxq->rxq_stats.pkts += pkts;
+ rxq->rxq_stats.bytes += rx_bytes;
+ u64_stats_update_end(&rxq->rxq_stats.syncp);
+
+ return pkts;
+}
+
+static int rx_poll(struct napi_struct *napi, int budget)
+{
+ struct hinic_rxq *rxq = container_of(napi, struct hinic_rxq, napi);
+ struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
+ struct hinic_rq *rq = rxq->rq;
+ int pkts;
+
+ pkts = rxq_recv(rxq, budget);
+ if (pkts >= budget)
+ return budget;
+
+ napi_complete(napi);
+
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
+ hinic_hwdev_set_msix_state(nic_dev->hwdev,
+ rq->msix_entry,
+ HINIC_MSIX_ENABLE);
+
+ return pkts;
+}
+
+static void rx_add_napi(struct hinic_rxq *rxq)
+{
+ struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
+
+ netif_napi_add_weight(rxq->netdev, &rxq->napi, rx_poll,
+ nic_dev->rx_weight);
+ napi_enable(&rxq->napi);
+}
+
+static void rx_del_napi(struct hinic_rxq *rxq)
+{
+ napi_disable(&rxq->napi);
+ netif_napi_del(&rxq->napi);
+}
+
+static irqreturn_t rx_irq(int irq, void *data)
+{
+ struct hinic_rxq *rxq = (struct hinic_rxq *)data;
+ struct hinic_rq *rq = rxq->rq;
+ struct hinic_dev *nic_dev;
+
+ /* Disable the interrupt until napi will be completed */
+ nic_dev = netdev_priv(rxq->netdev);
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
+ hinic_hwdev_set_msix_state(nic_dev->hwdev,
+ rq->msix_entry,
+ HINIC_MSIX_DISABLE);
+
+ nic_dev = netdev_priv(rxq->netdev);
+ hinic_hwdev_msix_cnt_set(nic_dev->hwdev, rq->msix_entry);
+
+ napi_schedule(&rxq->napi);
+ return IRQ_HANDLED;
+}
+
+static int rx_request_irq(struct hinic_rxq *rxq)
+{
+ struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
+ struct hinic_msix_config interrupt_info = {0};
+ struct hinic_intr_coal_info *intr_coal = NULL;
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_rq *rq = rxq->rq;
+ struct hinic_qp *qp;
+ int err;
+
+ qp = container_of(rq, struct hinic_qp, rq);
+
+ rx_add_napi(rxq);
+
+ hinic_hwdev_msix_set(hwdev, rq->msix_entry,
+ RX_IRQ_NO_PENDING, RX_IRQ_NO_COALESC,
+ RX_IRQ_NO_LLI_TIMER, RX_IRQ_NO_CREDIT,
+ RX_IRQ_NO_RESEND_TIMER);
+
+ intr_coal = &nic_dev->rx_intr_coalesce[qp->q_id];
+ interrupt_info.msix_index = rq->msix_entry;
+ interrupt_info.coalesce_timer_cnt = intr_coal->coalesce_timer_cfg;
+ interrupt_info.pending_cnt = intr_coal->pending_limt;
+ interrupt_info.resend_timer_cnt = intr_coal->resend_timer_cfg;
+
+ err = hinic_set_interrupt_cfg(hwdev, &interrupt_info);
+ if (err) {
+ netif_err(nic_dev, drv, rxq->netdev,
+ "Failed to set RX interrupt coalescing attribute\n");
+ goto err_req_irq;
+ }
+
+ err = request_irq(rq->irq, rx_irq, 0, rxq->irq_name, rxq);
+ if (err)
+ goto err_req_irq;
+
+ cpumask_set_cpu(qp->q_id % num_online_cpus(), &rq->affinity_mask);
+ err = irq_set_affinity_and_hint(rq->irq, &rq->affinity_mask);
+ if (err)
+ goto err_irq_affinity;
+
+ return 0;
+
+err_irq_affinity:
+ free_irq(rq->irq, rxq);
+err_req_irq:
+ rx_del_napi(rxq);
+ return err;
+}
+
+static void rx_free_irq(struct hinic_rxq *rxq)
+{
+ struct hinic_rq *rq = rxq->rq;
+
+ irq_update_affinity_hint(rq->irq, NULL);
+ free_irq(rq->irq, rxq);
+ rx_del_napi(rxq);
+}
+
+/**
+ * hinic_init_rxq - Initialize the Rx Queue
+ * @rxq: Logical Rx Queue
+ * @rq: Hardware Rx Queue to connect the Logical queue with
+ * @netdev: network device to connect the Logical queue with
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq,
+ struct net_device *netdev)
+{
+ struct hinic_qp *qp = container_of(rq, struct hinic_qp, rq);
+ int err, pkts;
+
+ rxq->netdev = netdev;
+ rxq->rq = rq;
+ rxq->buf_len = HINIC_RX_BUF_SZ;
+ rxq->rx_buff_shift = ilog2(HINIC_RX_BUF_SZ);
+
+ rxq_stats_init(rxq);
+
+ rxq->irq_name = devm_kasprintf(&netdev->dev, GFP_KERNEL,
+ "%s_rxq%d", netdev->name, qp->q_id);
+ if (!rxq->irq_name)
+ return -ENOMEM;
+
+ pkts = rx_alloc_pkts(rxq);
+ if (!pkts) {
+ err = -ENOMEM;
+ goto err_rx_pkts;
+ }
+
+ err = rx_request_irq(rxq);
+ if (err) {
+ netdev_err(netdev, "Failed to request Rx irq\n");
+ goto err_req_rx_irq;
+ }
+
+ return 0;
+
+err_req_rx_irq:
+err_rx_pkts:
+ free_all_rx_skbs(rxq);
+ devm_kfree(&netdev->dev, rxq->irq_name);
+ return err;
+}
+
+/**
+ * hinic_clean_rxq - Clean the Rx Queue
+ * @rxq: Logical Rx Queue
+ **/
+void hinic_clean_rxq(struct hinic_rxq *rxq)
+{
+ struct net_device *netdev = rxq->netdev;
+
+ rx_free_irq(rxq);
+
+ free_all_rx_skbs(rxq);
+ devm_kfree(&netdev->dev, rxq->irq_name);
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.h b/drivers/net/ethernet/huawei/hinic/hinic_rx.h
new file mode 100644
index 0000000000..8f7bd6a049
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef HINIC_RX_H
+#define HINIC_RX_H
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/interrupt.h>
+
+#include "hinic_hw_qp.h"
+
+#define HINIC_RX_CSUM_OFFLOAD_EN 0xFFF
+#define HINIC_RX_CSUM_HW_CHECK_NONE BIT(7)
+#define HINIC_RX_CSUM_IPSU_OTHER_ERR BIT(8)
+
+struct hinic_rxq_stats {
+ u64 pkts;
+ u64 bytes;
+ u64 errors;
+ u64 csum_errors;
+ u64 other_errors;
+ u64 alloc_skb_err;
+ struct u64_stats_sync syncp;
+};
+
+struct hinic_rxq {
+ struct net_device *netdev;
+ struct hinic_rq *rq;
+
+ struct hinic_rxq_stats rxq_stats;
+
+ char *irq_name;
+ u16 buf_len;
+ u32 rx_buff_shift;
+
+ struct napi_struct napi;
+};
+
+void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats);
+
+int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq,
+ struct net_device *netdev);
+
+void hinic_clean_rxq(struct hinic_rxq *rxq);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
new file mode 100644
index 0000000000..ee357088d0
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
@@ -0,0 +1,1364 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#include <linux/pci.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+#include <linux/module.h>
+
+#include "hinic_hw_dev.h"
+#include "hinic_dev.h"
+#include "hinic_hw_mbox.h"
+#include "hinic_hw_cmdq.h"
+#include "hinic_port.h"
+#include "hinic_sriov.h"
+
+static unsigned char set_vf_link_state;
+module_param(set_vf_link_state, byte, 0444);
+MODULE_PARM_DESC(set_vf_link_state, "Set vf link state, 0 represents link auto, 1 represents link always up, 2 represents link always down. - default is 0.");
+
+#define HINIC_VLAN_PRIORITY_SHIFT 13
+#define HINIC_ADD_VLAN_IN_MAC 0x8000
+#define HINIC_TX_RATE_TABLE_FULL 12
+#define HINIC_MAX_QOS 7
+
+static int hinic_set_mac(struct hinic_hwdev *hwdev, const u8 *mac_addr,
+ u16 vlan_id, u16 func_id)
+{
+ struct hinic_port_mac_cmd mac_info = {0};
+ u16 out_size = sizeof(mac_info);
+ int err;
+
+ mac_info.func_idx = func_id;
+ mac_info.vlan_id = vlan_id;
+ memcpy(mac_info.mac, mac_addr, ETH_ALEN);
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_MAC, &mac_info,
+ sizeof(mac_info), &mac_info, &out_size);
+ if (err || out_size != sizeof(mac_info) ||
+ (mac_info.status && mac_info.status != HINIC_MGMT_STATUS_EXIST)) {
+ dev_err(&hwdev->func_to_io.hwif->pdev->dev, "Failed to set MAC, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, mac_info.status, out_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void hinic_notify_vf_link_status(struct hinic_hwdev *hwdev, u16 vf_id,
+ u8 link_status)
+{
+ struct vf_data_storage *vf_infos = hwdev->func_to_io.vf_infos;
+ struct hinic_port_link_status link = {0};
+ u16 out_size = sizeof(link);
+ int err;
+
+ if (vf_infos[HW_VF_ID_TO_OS(vf_id)].registered) {
+ link.link = link_status;
+ link.func_id = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id;
+ err = hinic_mbox_to_vf(hwdev, HINIC_MOD_L2NIC,
+ vf_id, HINIC_PORT_CMD_LINK_STATUS_REPORT,
+ &link, sizeof(link),
+ &link, &out_size, 0);
+ if (err || !out_size || link.status)
+ dev_err(&hwdev->hwif->pdev->dev,
+ "Send link change event to VF %d failed, err: %d, status: 0x%x, out_size: 0x%x\n",
+ HW_VF_ID_TO_OS(vf_id), err,
+ link.status, out_size);
+ }
+}
+
+/* send link change event mbox msg to active vfs under the pf */
+void hinic_notify_all_vfs_link_changed(struct hinic_hwdev *hwdev,
+ u8 link_status)
+{
+ struct hinic_func_to_io *nic_io = &hwdev->func_to_io;
+ u16 i;
+
+ nic_io->link_status = link_status;
+ for (i = 1; i <= nic_io->max_vfs; i++) {
+ if (!nic_io->vf_infos[HW_VF_ID_TO_OS(i)].link_forced)
+ hinic_notify_vf_link_status(hwdev, i, link_status);
+ }
+}
+
+static u16 hinic_vf_info_vlanprio(struct hinic_hwdev *hwdev, int vf_id)
+{
+ struct hinic_func_to_io *nic_io = &hwdev->func_to_io;
+ u16 pf_vlan, vlanprio;
+ u8 pf_qos;
+
+ pf_vlan = nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan;
+ pf_qos = nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos;
+ vlanprio = pf_vlan | pf_qos << HINIC_VLAN_PRIORITY_SHIFT;
+
+ return vlanprio;
+}
+
+static int hinic_set_vf_vlan(struct hinic_hwdev *hwdev, bool add, u16 vid,
+ u8 qos, int vf_id)
+{
+ struct hinic_vf_vlan_config vf_vlan = {0};
+ u16 out_size = sizeof(vf_vlan);
+ int err;
+ u8 cmd;
+
+ /* VLAN 0 is a special case, don't allow it to be removed */
+ if (!vid && !add)
+ return 0;
+
+ vf_vlan.func_id = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id;
+ vf_vlan.vlan_id = vid;
+ vf_vlan.qos = qos;
+
+ if (add)
+ cmd = HINIC_PORT_CMD_SET_VF_VLAN;
+ else
+ cmd = HINIC_PORT_CMD_CLR_VF_VLAN;
+
+ err = hinic_port_msg_cmd(hwdev, cmd, &vf_vlan,
+ sizeof(vf_vlan), &vf_vlan, &out_size);
+ if (err || !out_size || vf_vlan.status) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to set VF %d vlan, err: %d, status: 0x%x, out size: 0x%x\n",
+ HW_VF_ID_TO_OS(vf_id), err, vf_vlan.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int hinic_set_vf_tx_rate_max_min(struct hinic_hwdev *hwdev, u16 vf_id,
+ u32 max_rate, u32 min_rate)
+{
+ struct hinic_func_to_io *nic_io = &hwdev->func_to_io;
+ struct hinic_tx_rate_cfg_max_min rate_cfg = {0};
+ u16 out_size = sizeof(rate_cfg);
+ int err;
+
+ rate_cfg.func_id = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id;
+ rate_cfg.max_rate = max_rate;
+ rate_cfg.min_rate = min_rate;
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_VF_MAX_MIN_RATE,
+ &rate_cfg, sizeof(rate_cfg), &rate_cfg,
+ &out_size);
+ if ((rate_cfg.status != HINIC_MGMT_CMD_UNSUPPORTED &&
+ rate_cfg.status) || err || !out_size) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to set VF(%d) max rate(%d), min rate(%d), err: %d, status: 0x%x, out size: 0x%x\n",
+ HW_VF_ID_TO_OS(vf_id), max_rate, min_rate, err,
+ rate_cfg.status, out_size);
+ return -EIO;
+ }
+
+ if (!rate_cfg.status) {
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].max_rate = max_rate;
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].min_rate = min_rate;
+ }
+
+ return rate_cfg.status;
+}
+
+static int hinic_set_vf_rate_limit(struct hinic_hwdev *hwdev, u16 vf_id,
+ u32 tx_rate)
+{
+ struct hinic_func_to_io *nic_io = &hwdev->func_to_io;
+ struct hinic_tx_rate_cfg rate_cfg = {0};
+ u16 out_size = sizeof(rate_cfg);
+ int err;
+
+ rate_cfg.func_id = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id;
+ rate_cfg.tx_rate = tx_rate;
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_VF_RATE,
+ &rate_cfg, sizeof(rate_cfg), &rate_cfg,
+ &out_size);
+ if (err || !out_size || rate_cfg.status) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to set VF(%d) rate(%d), err: %d, status: 0x%x, out size: 0x%x\n",
+ HW_VF_ID_TO_OS(vf_id), tx_rate, err, rate_cfg.status,
+ out_size);
+ if (rate_cfg.status)
+ return rate_cfg.status;
+
+ return -EIO;
+ }
+
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].max_rate = tx_rate;
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].min_rate = 0;
+
+ return 0;
+}
+
+static int hinic_set_vf_tx_rate(struct hinic_hwdev *hwdev, u16 vf_id,
+ u32 max_rate, u32 min_rate)
+{
+ int err;
+
+ err = hinic_set_vf_tx_rate_max_min(hwdev, vf_id, max_rate, min_rate);
+ if (err != HINIC_MGMT_CMD_UNSUPPORTED)
+ return err;
+
+ if (min_rate) {
+ dev_err(&hwdev->hwif->pdev->dev, "Current firmware doesn't support to set min tx rate\n");
+ return -EOPNOTSUPP;
+ }
+
+ dev_info(&hwdev->hwif->pdev->dev, "Current firmware doesn't support to set min tx rate, force min_tx_rate = max_tx_rate\n");
+
+ return hinic_set_vf_rate_limit(hwdev, vf_id, max_rate);
+}
+
+static int hinic_init_vf_config(struct hinic_hwdev *hwdev, u16 vf_id)
+{
+ struct vf_data_storage *vf_info;
+ u16 func_id, vlan_id;
+ int err = 0;
+
+ vf_info = hwdev->func_to_io.vf_infos + HW_VF_ID_TO_OS(vf_id);
+ if (vf_info->pf_set_mac) {
+ func_id = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id;
+
+ vlan_id = 0;
+
+ err = hinic_set_mac(hwdev, vf_info->vf_mac_addr, vlan_id,
+ func_id);
+ if (err) {
+ dev_err(&hwdev->func_to_io.hwif->pdev->dev, "Failed to set VF %d MAC\n",
+ HW_VF_ID_TO_OS(vf_id));
+ return err;
+ }
+ }
+
+ if (hinic_vf_info_vlanprio(hwdev, vf_id)) {
+ err = hinic_set_vf_vlan(hwdev, true, vf_info->pf_vlan,
+ vf_info->pf_qos, vf_id);
+ if (err) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to add VF %d VLAN_QOS\n",
+ HW_VF_ID_TO_OS(vf_id));
+ return err;
+ }
+ }
+
+ if (vf_info->max_rate) {
+ err = hinic_set_vf_tx_rate(hwdev, vf_id, vf_info->max_rate,
+ vf_info->min_rate);
+ if (err) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to set VF %d max rate: %d, min rate: %d\n",
+ HW_VF_ID_TO_OS(vf_id), vf_info->max_rate,
+ vf_info->min_rate);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int hinic_register_vf_msg_handler(void *hwdev, u16 vf_id,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_register_vf *register_info = buf_out;
+ struct hinic_hwdev *hw_dev = hwdev;
+ struct hinic_func_to_io *nic_io;
+ int err;
+
+ nic_io = &hw_dev->func_to_io;
+ if (vf_id > nic_io->max_vfs) {
+ dev_err(&hw_dev->hwif->pdev->dev, "Register VF id %d exceed limit[0-%d]\n",
+ HW_VF_ID_TO_OS(vf_id), HW_VF_ID_TO_OS(nic_io->max_vfs));
+ register_info->status = EFAULT;
+ return -EFAULT;
+ }
+
+ *out_size = sizeof(*register_info);
+ err = hinic_init_vf_config(hw_dev, vf_id);
+ if (err) {
+ register_info->status = EFAULT;
+ return err;
+ }
+
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].registered = true;
+
+ return 0;
+}
+
+static int hinic_unregister_vf_msg_handler(void *hwdev, u16 vf_id,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_hwdev *hw_dev = hwdev;
+ struct hinic_func_to_io *nic_io;
+
+ nic_io = &hw_dev->func_to_io;
+ *out_size = 0;
+ if (vf_id > nic_io->max_vfs)
+ return 0;
+
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].registered = false;
+
+ return 0;
+}
+
+static int hinic_change_vf_mtu_msg_handler(void *hwdev, u16 vf_id,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_hwdev *hw_dev = hwdev;
+ int err;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_CHANGE_MTU, buf_in,
+ in_size, buf_out, out_size);
+ if (err) {
+ dev_err(&hw_dev->hwif->pdev->dev, "Failed to set VF %u mtu\n",
+ vf_id);
+ return err;
+ }
+
+ return 0;
+}
+
+static int hinic_get_vf_mac_msg_handler(void *hwdev, u16 vf_id,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_port_mac_cmd *mac_info = buf_out;
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_func_to_io *nic_io;
+ struct vf_data_storage *vf_info;
+
+ nic_io = &dev->func_to_io;
+ vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf_id);
+
+ memcpy(mac_info->mac, vf_info->vf_mac_addr, ETH_ALEN);
+ mac_info->status = 0;
+ *out_size = sizeof(*mac_info);
+
+ return 0;
+}
+
+static int hinic_set_vf_mac_msg_handler(void *hwdev, u16 vf_id,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_port_mac_cmd *mac_out = buf_out;
+ struct hinic_port_mac_cmd *mac_in = buf_in;
+ struct hinic_hwdev *hw_dev = hwdev;
+ struct hinic_func_to_io *nic_io;
+ struct vf_data_storage *vf_info;
+ int err;
+
+ nic_io = &hw_dev->func_to_io;
+ vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf_id);
+ if (vf_info->pf_set_mac && !(vf_info->trust) &&
+ is_valid_ether_addr(mac_in->mac)) {
+ dev_warn(&hw_dev->hwif->pdev->dev, "PF has already set VF %d MAC address\n",
+ HW_VF_ID_TO_OS(vf_id));
+ mac_out->status = HINIC_PF_SET_VF_ALREADY;
+ *out_size = sizeof(*mac_out);
+ return 0;
+ }
+
+ err = hinic_port_msg_cmd(hw_dev, HINIC_PORT_CMD_SET_MAC, buf_in,
+ in_size, buf_out, out_size);
+ if ((err && err != HINIC_MBOX_PF_BUSY_ACTIVE_FW) || !(*out_size)) {
+ dev_err(&hw_dev->hwif->pdev->dev,
+ "Failed to set VF %d MAC address, err: %d, status: 0x%x, out size: 0x%x\n",
+ HW_VF_ID_TO_OS(vf_id), err, mac_out->status, *out_size);
+ return -EFAULT;
+ }
+
+ return err;
+}
+
+static int hinic_del_vf_mac_msg_handler(void *hwdev, u16 vf_id,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_port_mac_cmd *mac_out = buf_out;
+ struct hinic_port_mac_cmd *mac_in = buf_in;
+ struct hinic_hwdev *hw_dev = hwdev;
+ struct hinic_func_to_io *nic_io;
+ struct vf_data_storage *vf_info;
+ int err;
+
+ nic_io = &hw_dev->func_to_io;
+ vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf_id);
+ if (vf_info->pf_set_mac && is_valid_ether_addr(mac_in->mac) &&
+ !memcmp(vf_info->vf_mac_addr, mac_in->mac, ETH_ALEN)) {
+ dev_warn(&hw_dev->hwif->pdev->dev, "PF has already set VF mac.\n");
+ mac_out->status = HINIC_PF_SET_VF_ALREADY;
+ *out_size = sizeof(*mac_out);
+ return 0;
+ }
+
+ err = hinic_port_msg_cmd(hw_dev, HINIC_PORT_CMD_DEL_MAC, buf_in,
+ in_size, buf_out, out_size);
+ if ((err && err != HINIC_MBOX_PF_BUSY_ACTIVE_FW) || !(*out_size)) {
+ dev_err(&hw_dev->hwif->pdev->dev, "Failed to delete VF %d MAC, err: %d, status: 0x%x, out size: 0x%x\n",
+ HW_VF_ID_TO_OS(vf_id), err, mac_out->status, *out_size);
+ return -EFAULT;
+ }
+
+ return err;
+}
+
+static int hinic_get_vf_link_status_msg_handler(void *hwdev, u16 vf_id,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_port_link_cmd *get_link = buf_out;
+ struct hinic_hwdev *hw_dev = hwdev;
+ struct vf_data_storage *vf_infos;
+ struct hinic_func_to_io *nic_io;
+ bool link_forced, link_up;
+
+ nic_io = &hw_dev->func_to_io;
+ vf_infos = nic_io->vf_infos;
+ link_forced = vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced;
+ link_up = vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up;
+
+ if (link_forced)
+ get_link->state = link_up ?
+ HINIC_LINK_STATE_UP : HINIC_LINK_STATE_DOWN;
+ else
+ get_link->state = nic_io->link_status;
+
+ get_link->status = 0;
+ *out_size = sizeof(*get_link);
+
+ return 0;
+}
+
+static bool check_func_table(struct hinic_hwdev *hwdev, u16 func_idx,
+ void *buf_in, u16 in_size)
+{
+ struct hinic_cmd_fw_ctxt *function_table = buf_in;
+
+ if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size) ||
+ !function_table->rx_buf_sz)
+ return false;
+
+ return true;
+}
+
+static struct vf_cmd_msg_handle nic_vf_cmd_msg_handler[] = {
+ {HINIC_PORT_CMD_VF_REGISTER, hinic_register_vf_msg_handler},
+ {HINIC_PORT_CMD_VF_UNREGISTER, hinic_unregister_vf_msg_handler},
+ {HINIC_PORT_CMD_CHANGE_MTU, hinic_change_vf_mtu_msg_handler},
+ {HINIC_PORT_CMD_GET_MAC, hinic_get_vf_mac_msg_handler},
+ {HINIC_PORT_CMD_SET_MAC, hinic_set_vf_mac_msg_handler},
+ {HINIC_PORT_CMD_DEL_MAC, hinic_del_vf_mac_msg_handler},
+ {HINIC_PORT_CMD_GET_LINK_STATE, hinic_get_vf_link_status_msg_handler},
+};
+
+static struct vf_cmd_check_handle nic_cmd_support_vf[] = {
+ {HINIC_PORT_CMD_VF_REGISTER, NULL},
+ {HINIC_PORT_CMD_VF_UNREGISTER, NULL},
+ {HINIC_PORT_CMD_CHANGE_MTU, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_ADD_VLAN, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_DEL_VLAN, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_SET_MAC, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_GET_MAC, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_DEL_MAC, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_SET_RX_MODE, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_GET_PAUSE_INFO, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_GET_LINK_STATE, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_SET_LRO, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_SET_RX_CSUM, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_SET_RX_VLAN_OFFLOAD, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_GET_VPORT_STAT, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_CLEAN_VPORT_STAT, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_GET_RSS_TEMPLATE_INDIR_TBL,
+ hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_SET_RSS_TEMPLATE_TBL, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_GET_RSS_TEMPLATE_TBL, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_SET_RSS_HASH_ENGINE, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_GET_RSS_HASH_ENGINE, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_GET_RSS_CTX_TBL, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_SET_RSS_CTX_TBL, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_RSS_TEMP_MGR, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_RSS_CFG, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_FWCTXT_INIT, check_func_table},
+ {HINIC_PORT_CMD_GET_MGMT_VERSION, NULL},
+ {HINIC_PORT_CMD_SET_FUNC_STATE, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_GET_GLOBAL_QPN, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_SET_TSO, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_SET_RQ_IQ_MAP, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_LINK_STATUS_REPORT, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_UPDATE_MAC, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_GET_CAP, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_GET_LINK_MODE, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_GET_VF_COS, NULL},
+ {HINIC_PORT_CMD_SET_VHD_CFG, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_SET_VLAN_FILTER, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_Q_FILTER, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_TCAM_FILTER, NULL},
+ {HINIC_PORT_CMD_UP_TC_ADD_FLOW, NULL},
+ {HINIC_PORT_CMD_UP_TC_DEL_FLOW, NULL},
+ {HINIC_PORT_CMD_UP_TC_FLUSH_TCAM, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_UP_TC_CTRL_TCAM_BLOCK, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_UP_TC_ENABLE, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_CABLE_PLUG_EVENT, NULL},
+ {HINIC_PORT_CMD_LINK_ERR_EVENT, NULL},
+ {HINIC_PORT_CMD_SET_PORT_STATE, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_SET_ETS, NULL},
+ {HINIC_PORT_CMD_SET_ANTI_ATTACK_RATE, NULL},
+ {HINIC_PORT_CMD_RESET_LINK_CFG, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_SET_LINK_FOLLOW, NULL},
+ {HINIC_PORT_CMD_CLEAR_QP_RES, NULL},
+};
+
+#define CHECK_IPSU_15BIT 0X8000
+
+static
+struct hinic_sriov_info *hinic_get_sriov_info_by_pcidev(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+
+ return &nic_dev->sriov_info;
+}
+
+static int hinic_check_mac_info(u8 status, u16 vlan_id)
+{
+ if ((status && status != HINIC_MGMT_STATUS_EXIST) ||
+ (vlan_id & CHECK_IPSU_15BIT &&
+ status == HINIC_MGMT_STATUS_EXIST))
+ return -EINVAL;
+
+ return 0;
+}
+
+#define HINIC_VLAN_ID_MASK 0x7FFF
+
+static int hinic_update_mac(struct hinic_hwdev *hwdev, u8 *old_mac,
+ u8 *new_mac, u16 vlan_id, u16 func_id)
+{
+ struct hinic_port_mac_update mac_info = {0};
+ u16 out_size = sizeof(mac_info);
+ int err;
+
+ if (!hwdev || !old_mac || !new_mac)
+ return -EINVAL;
+
+ if ((vlan_id & HINIC_VLAN_ID_MASK) >= VLAN_N_VID) {
+ dev_err(&hwdev->hwif->pdev->dev, "Invalid VLAN number: %d\n",
+ (vlan_id & HINIC_VLAN_ID_MASK));
+ return -EINVAL;
+ }
+
+ mac_info.func_id = func_id;
+ mac_info.vlan_id = vlan_id;
+ memcpy(mac_info.old_mac, old_mac, ETH_ALEN);
+ memcpy(mac_info.new_mac, new_mac, ETH_ALEN);
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_UPDATE_MAC, &mac_info,
+ sizeof(mac_info), &mac_info, &out_size);
+
+ if (err || !out_size ||
+ hinic_check_mac_info(mac_info.status, mac_info.vlan_id)) {
+ dev_err(&hwdev->hwif->pdev->dev,
+ "Failed to update MAC, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, mac_info.status, out_size);
+ return -EINVAL;
+ }
+
+ if (mac_info.status == HINIC_MGMT_STATUS_EXIST)
+ dev_warn(&hwdev->hwif->pdev->dev, "MAC is repeated. Ignore update operation\n");
+
+ return 0;
+}
+
+static void hinic_get_vf_config(struct hinic_hwdev *hwdev, u16 vf_id,
+ struct ifla_vf_info *ivi)
+{
+ struct vf_data_storage *vfinfo;
+
+ vfinfo = hwdev->func_to_io.vf_infos + HW_VF_ID_TO_OS(vf_id);
+
+ ivi->vf = HW_VF_ID_TO_OS(vf_id);
+ memcpy(ivi->mac, vfinfo->vf_mac_addr, ETH_ALEN);
+ ivi->vlan = vfinfo->pf_vlan;
+ ivi->qos = vfinfo->pf_qos;
+ ivi->spoofchk = vfinfo->spoofchk;
+ ivi->trusted = vfinfo->trust;
+ ivi->max_tx_rate = vfinfo->max_rate;
+ ivi->min_tx_rate = vfinfo->min_rate;
+
+ if (!vfinfo->link_forced)
+ ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
+ else if (vfinfo->link_up)
+ ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
+ else
+ ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
+}
+
+int hinic_ndo_get_vf_config(struct net_device *netdev,
+ int vf, struct ifla_vf_info *ivi)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_sriov_info *sriov_info;
+
+ sriov_info = &nic_dev->sriov_info;
+ if (vf >= sriov_info->num_vfs)
+ return -EINVAL;
+
+ hinic_get_vf_config(sriov_info->hwdev, OS_VF_ID_TO_HW(vf), ivi);
+
+ return 0;
+}
+
+static int hinic_set_vf_mac(struct hinic_hwdev *hwdev, int vf,
+ unsigned char *mac_addr)
+{
+ struct hinic_func_to_io *nic_io = &hwdev->func_to_io;
+ struct vf_data_storage *vf_info;
+ u16 func_id;
+ int err;
+
+ vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf);
+
+ /* duplicate request, so just return success */
+ if (vf_info->pf_set_mac &&
+ !memcmp(vf_info->vf_mac_addr, mac_addr, ETH_ALEN))
+ return 0;
+
+ vf_info->pf_set_mac = true;
+
+ func_id = hinic_glb_pf_vf_offset(hwdev->hwif) + vf;
+ err = hinic_update_mac(hwdev, vf_info->vf_mac_addr,
+ mac_addr, 0, func_id);
+ if (err) {
+ vf_info->pf_set_mac = false;
+ return err;
+ }
+
+ memcpy(vf_info->vf_mac_addr, mac_addr, ETH_ALEN);
+
+ return 0;
+}
+
+int hinic_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_sriov_info *sriov_info;
+ int err;
+
+ sriov_info = &nic_dev->sriov_info;
+ if (!is_valid_ether_addr(mac) || vf >= sriov_info->num_vfs)
+ return -EINVAL;
+
+ err = hinic_set_vf_mac(sriov_info->hwdev, OS_VF_ID_TO_HW(vf), mac);
+ if (err)
+ return err;
+
+ netif_info(nic_dev, drv, netdev, "Setting MAC %pM on VF %d\n", mac, vf);
+ netif_info(nic_dev, drv, netdev, "Reload the VF driver to make this change effective.");
+
+ return 0;
+}
+
+static int hinic_add_vf_vlan(struct hinic_hwdev *hwdev, int vf_id,
+ u16 vlan, u8 qos)
+{
+ struct hinic_func_to_io *nic_io = &hwdev->func_to_io;
+ int err;
+
+ err = hinic_set_vf_vlan(hwdev, true, vlan, qos, vf_id);
+ if (err)
+ return err;
+
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan = vlan;
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos = qos;
+
+ dev_info(&hwdev->hwif->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
+ vlan, qos, HW_VF_ID_TO_OS(vf_id));
+ return 0;
+}
+
+static int hinic_kill_vf_vlan(struct hinic_hwdev *hwdev, int vf_id)
+{
+ struct hinic_func_to_io *nic_io = &hwdev->func_to_io;
+ int err;
+
+ err = hinic_set_vf_vlan(hwdev, false,
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan,
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos,
+ vf_id);
+ if (err)
+ return err;
+
+ dev_info(&hwdev->hwif->pdev->dev, "Remove VLAN %d on VF %d\n",
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan,
+ HW_VF_ID_TO_OS(vf_id));
+
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan = 0;
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos = 0;
+
+ return 0;
+}
+
+static int hinic_update_mac_vlan(struct hinic_dev *nic_dev, u16 old_vlan,
+ u16 new_vlan, int vf_id)
+{
+ struct vf_data_storage *vf_info;
+ u16 vlan_id;
+ int err;
+
+ if (!nic_dev || old_vlan >= VLAN_N_VID || new_vlan >= VLAN_N_VID)
+ return -EINVAL;
+
+ vf_info = nic_dev->hwdev->func_to_io.vf_infos + HW_VF_ID_TO_OS(vf_id);
+ if (!vf_info->pf_set_mac)
+ return 0;
+
+ vlan_id = old_vlan;
+ if (vlan_id)
+ vlan_id |= HINIC_ADD_VLAN_IN_MAC;
+
+ err = hinic_port_del_mac(nic_dev, vf_info->vf_mac_addr, vlan_id);
+ if (err) {
+ dev_err(&nic_dev->hwdev->hwif->pdev->dev, "Failed to delete VF %d MAC %pM vlan %d\n",
+ HW_VF_ID_TO_OS(vf_id), vf_info->vf_mac_addr, old_vlan);
+ return err;
+ }
+
+ vlan_id = new_vlan;
+ if (vlan_id)
+ vlan_id |= HINIC_ADD_VLAN_IN_MAC;
+
+ err = hinic_port_add_mac(nic_dev, vf_info->vf_mac_addr, vlan_id);
+ if (err) {
+ dev_err(&nic_dev->hwdev->hwif->pdev->dev, "Failed to add VF %d MAC %pM vlan %d\n",
+ HW_VF_ID_TO_OS(vf_id), vf_info->vf_mac_addr, new_vlan);
+ goto out;
+ }
+
+ return 0;
+
+out:
+ vlan_id = old_vlan;
+ if (vlan_id)
+ vlan_id |= HINIC_ADD_VLAN_IN_MAC;
+ hinic_port_add_mac(nic_dev, vf_info->vf_mac_addr, vlan_id);
+
+ return err;
+}
+
+static int set_hw_vf_vlan(struct hinic_dev *nic_dev,
+ u16 cur_vlanprio, int vf, u16 vlan, u8 qos)
+{
+ u16 old_vlan = cur_vlanprio & VLAN_VID_MASK;
+ int err = 0;
+
+ if (vlan || qos) {
+ if (cur_vlanprio) {
+ err = hinic_kill_vf_vlan(nic_dev->hwdev,
+ OS_VF_ID_TO_HW(vf));
+ if (err) {
+ dev_err(&nic_dev->sriov_info.pdev->dev, "Failed to delete vf %d old vlan %d\n",
+ vf, old_vlan);
+ goto out;
+ }
+ }
+ err = hinic_add_vf_vlan(nic_dev->hwdev,
+ OS_VF_ID_TO_HW(vf), vlan, qos);
+ if (err) {
+ dev_err(&nic_dev->sriov_info.pdev->dev, "Failed to add vf %d new vlan %d\n",
+ vf, vlan);
+ goto out;
+ }
+ } else {
+ err = hinic_kill_vf_vlan(nic_dev->hwdev, OS_VF_ID_TO_HW(vf));
+ if (err) {
+ dev_err(&nic_dev->sriov_info.pdev->dev, "Failed to delete vf %d vlan %d\n",
+ vf, old_vlan);
+ goto out;
+ }
+ }
+
+ err = hinic_update_mac_vlan(nic_dev, old_vlan, vlan,
+ OS_VF_ID_TO_HW(vf));
+
+out:
+ return err;
+}
+
+int hinic_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
+ __be16 vlan_proto)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_sriov_info *sriov_info;
+ u16 vlanprio, cur_vlanprio;
+
+ sriov_info = &nic_dev->sriov_info;
+ if (vf >= sriov_info->num_vfs || vlan >= VLAN_N_VID || qos > HINIC_MAX_QOS)
+ return -EINVAL;
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+ vlanprio = vlan | qos << HINIC_VLAN_PRIORITY_SHIFT;
+ cur_vlanprio = hinic_vf_info_vlanprio(nic_dev->hwdev,
+ OS_VF_ID_TO_HW(vf));
+ /* duplicate request, so just return success */
+ if (vlanprio == cur_vlanprio)
+ return 0;
+
+ return set_hw_vf_vlan(nic_dev, cur_vlanprio, vf, vlan, qos);
+}
+
+static int hinic_set_vf_trust(struct hinic_hwdev *hwdev, u16 vf_id,
+ bool trust)
+{
+ struct vf_data_storage *vf_infos;
+ struct hinic_func_to_io *nic_io;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ nic_io = &hwdev->func_to_io;
+ vf_infos = nic_io->vf_infos;
+ vf_infos[vf_id].trust = trust;
+
+ return 0;
+}
+
+int hinic_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
+{
+ struct hinic_dev *adapter = netdev_priv(netdev);
+ struct hinic_sriov_info *sriov_info;
+ struct hinic_func_to_io *nic_io;
+ bool cur_trust;
+ int err;
+
+ sriov_info = &adapter->sriov_info;
+ nic_io = &adapter->hwdev->func_to_io;
+
+ if (vf >= sriov_info->num_vfs)
+ return -EINVAL;
+
+ cur_trust = nic_io->vf_infos[vf].trust;
+ /* same request, so just return success */
+ if (setting == cur_trust)
+ return 0;
+
+ err = hinic_set_vf_trust(adapter->hwdev, vf, setting);
+ if (!err)
+ dev_info(&sriov_info->pdev->dev, "Set VF %d trusted %s succeed\n",
+ vf, setting ? "on" : "off");
+ else
+ dev_err(&sriov_info->pdev->dev, "Failed set VF %d trusted %s\n",
+ vf, setting ? "on" : "off");
+
+ return err;
+}
+
+int hinic_ndo_set_vf_bw(struct net_device *netdev,
+ int vf, int min_tx_rate, int max_tx_rate)
+{
+ static const u32 speeds[] = {
+ SPEED_10, SPEED_100, SPEED_1000, SPEED_10000,
+ SPEED_25000, SPEED_40000, SPEED_100000
+ };
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_port_cap port_cap = { 0 };
+ enum hinic_port_link_state link_state;
+ int err;
+
+ if (vf >= nic_dev->sriov_info.num_vfs) {
+ netif_err(nic_dev, drv, netdev, "VF number must be less than %d\n",
+ nic_dev->sriov_info.num_vfs);
+ return -EINVAL;
+ }
+
+ err = hinic_port_link_state(nic_dev, &link_state);
+ if (err) {
+ netif_err(nic_dev, drv, netdev,
+ "Get link status failed when setting vf tx rate\n");
+ return -EIO;
+ }
+
+ if (link_state == HINIC_LINK_STATE_DOWN) {
+ netif_err(nic_dev, drv, netdev,
+ "Link status must be up when setting vf tx rate\n");
+ return -EPERM;
+ }
+
+ err = hinic_port_get_cap(nic_dev, &port_cap);
+ if (err || port_cap.speed > LINK_SPEED_100GB)
+ return -EIO;
+
+ /* rate limit cannot be less than 0 and greater than link speed */
+ if (max_tx_rate < 0 || max_tx_rate > speeds[port_cap.speed]) {
+ netif_err(nic_dev, drv, netdev, "Max tx rate must be in [0 - %d]\n",
+ speeds[port_cap.speed]);
+ return -EINVAL;
+ }
+
+ err = hinic_set_vf_tx_rate(nic_dev->hwdev, OS_VF_ID_TO_HW(vf),
+ max_tx_rate, min_tx_rate);
+ if (err) {
+ netif_err(nic_dev, drv, netdev,
+ "Unable to set VF %d max rate %d min rate %d%s\n",
+ vf, max_tx_rate, min_tx_rate,
+ err == HINIC_TX_RATE_TABLE_FULL ?
+ ", tx rate profile is full" : "");
+ return -EIO;
+ }
+
+ netif_info(nic_dev, drv, netdev,
+ "Set VF %d max tx rate %d min tx rate %d successfully\n",
+ vf, max_tx_rate, min_tx_rate);
+
+ return 0;
+}
+
+static int hinic_set_vf_spoofchk(struct hinic_hwdev *hwdev, u16 vf_id,
+ bool spoofchk)
+{
+ struct hinic_spoofchk_set spoofchk_cfg = {0};
+ struct vf_data_storage *vf_infos = NULL;
+ u16 out_size = sizeof(spoofchk_cfg);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ vf_infos = hwdev->func_to_io.vf_infos;
+
+ spoofchk_cfg.func_id = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id;
+ spoofchk_cfg.state = spoofchk ? 1 : 0;
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_ENABLE_SPOOFCHK,
+ &spoofchk_cfg, sizeof(spoofchk_cfg),
+ &spoofchk_cfg, &out_size);
+ if (spoofchk_cfg.status == HINIC_MGMT_CMD_UNSUPPORTED) {
+ err = HINIC_MGMT_CMD_UNSUPPORTED;
+ } else if (err || !out_size || spoofchk_cfg.status) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to set VF(%d) spoofchk, err: %d, status: 0x%x, out size: 0x%x\n",
+ HW_VF_ID_TO_OS(vf_id), err, spoofchk_cfg.status,
+ out_size);
+ err = -EIO;
+ }
+
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].spoofchk = spoofchk;
+
+ return err;
+}
+
+int hinic_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_sriov_info *sriov_info;
+ bool cur_spoofchk;
+ int err;
+
+ sriov_info = &nic_dev->sriov_info;
+ if (vf >= sriov_info->num_vfs)
+ return -EINVAL;
+
+ cur_spoofchk = nic_dev->hwdev->func_to_io.vf_infos[vf].spoofchk;
+
+ /* same request, so just return success */
+ if (setting == cur_spoofchk)
+ return 0;
+
+ err = hinic_set_vf_spoofchk(sriov_info->hwdev,
+ OS_VF_ID_TO_HW(vf), setting);
+ if (!err) {
+ netif_info(nic_dev, drv, netdev, "Set VF %d spoofchk %s successfully\n",
+ vf, setting ? "on" : "off");
+ } else if (err == HINIC_MGMT_CMD_UNSUPPORTED) {
+ netif_err(nic_dev, drv, netdev,
+ "Current firmware doesn't support to set vf spoofchk, need to upgrade latest firmware version\n");
+ err = -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+static int hinic_set_vf_link_state(struct hinic_hwdev *hwdev, u16 vf_id,
+ int link)
+{
+ struct hinic_func_to_io *nic_io = &hwdev->func_to_io;
+ struct vf_data_storage *vf_infos = nic_io->vf_infos;
+ u8 link_status = 0;
+
+ switch (link) {
+ case HINIC_IFLA_VF_LINK_STATE_AUTO:
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced = false;
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up = nic_io->link_status ?
+ true : false;
+ link_status = nic_io->link_status;
+ break;
+ case HINIC_IFLA_VF_LINK_STATE_ENABLE:
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced = true;
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up = true;
+ link_status = HINIC_LINK_UP;
+ break;
+ case HINIC_IFLA_VF_LINK_STATE_DISABLE:
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced = true;
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up = false;
+ link_status = HINIC_LINK_DOWN;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Notify the VF of its new link state */
+ hinic_notify_vf_link_status(hwdev, vf_id, link_status);
+
+ return 0;
+}
+
+int hinic_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_sriov_info *sriov_info;
+
+ sriov_info = &nic_dev->sriov_info;
+
+ if (vf_id >= sriov_info->num_vfs) {
+ netif_err(nic_dev, drv, netdev,
+ "Invalid VF Identifier %d\n", vf_id);
+ return -EINVAL;
+ }
+
+ return hinic_set_vf_link_state(sriov_info->hwdev,
+ OS_VF_ID_TO_HW(vf_id), link);
+}
+
+/* pf receive message from vf */
+static int nic_pf_mbox_handler(void *hwdev, u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+{
+ u8 size = ARRAY_SIZE(nic_cmd_support_vf);
+ struct vf_cmd_msg_handle *vf_msg_handle;
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_func_to_io *nic_io;
+ struct hinic_pfhwdev *pfhwdev;
+ int err = 0;
+ u32 i;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ if (!hinic_mbox_check_cmd_valid(hwdev, nic_cmd_support_vf, vf_id, cmd,
+ buf_in, in_size, size)) {
+ dev_err(&dev->hwif->pdev->dev,
+ "PF Receive VF nic cmd: 0x%x, mbox len: 0x%x is invalid\n",
+ cmd, in_size);
+ return HINIC_MBOX_VF_CMD_ERROR;
+ }
+
+ pfhwdev = container_of(dev, struct hinic_pfhwdev, hwdev);
+ nic_io = &dev->func_to_io;
+ for (i = 0; i < ARRAY_SIZE(nic_vf_cmd_msg_handler); i++) {
+ vf_msg_handle = &nic_vf_cmd_msg_handler[i];
+ if (cmd == vf_msg_handle->cmd &&
+ vf_msg_handle->cmd_msg_handler) {
+ err = vf_msg_handle->cmd_msg_handler(hwdev, vf_id,
+ buf_in, in_size,
+ buf_out,
+ out_size);
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(nic_vf_cmd_msg_handler))
+ err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_L2NIC,
+ cmd, buf_in, in_size, buf_out,
+ out_size, HINIC_MGMT_MSG_SYNC);
+
+ if (err && err != HINIC_MBOX_PF_BUSY_ACTIVE_FW)
+ dev_err(&nic_io->hwif->pdev->dev, "PF receive VF L2NIC cmd: %d process error, err:%d\n",
+ cmd, err);
+ return err;
+}
+
+static int cfg_mbx_pf_proc_vf_msg(void *hwdev, u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+{
+ struct hinic_dev_cap *dev_cap = buf_out;
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_cap *cap;
+
+ cap = &dev->nic_cap;
+ memset(dev_cap, 0, sizeof(*dev_cap));
+
+ dev_cap->max_vf = cap->max_vf;
+ dev_cap->max_sqs = cap->max_vf_qps;
+ dev_cap->max_rqs = cap->max_vf_qps;
+ dev_cap->port_id = dev->port_id;
+
+ *out_size = sizeof(*dev_cap);
+
+ return 0;
+}
+
+static int hinic_init_vf_infos(struct hinic_func_to_io *nic_io, u16 vf_id)
+{
+ struct vf_data_storage *vf_infos = nic_io->vf_infos;
+
+ if (set_vf_link_state > HINIC_IFLA_VF_LINK_STATE_DISABLE) {
+ dev_warn(&nic_io->hwif->pdev->dev, "Module Parameter set_vf_link_state value %d is out of range, resetting to %d\n",
+ set_vf_link_state, HINIC_IFLA_VF_LINK_STATE_AUTO);
+ set_vf_link_state = HINIC_IFLA_VF_LINK_STATE_AUTO;
+ }
+
+ switch (set_vf_link_state) {
+ case HINIC_IFLA_VF_LINK_STATE_AUTO:
+ vf_infos[vf_id].link_forced = false;
+ break;
+ case HINIC_IFLA_VF_LINK_STATE_ENABLE:
+ vf_infos[vf_id].link_forced = true;
+ vf_infos[vf_id].link_up = true;
+ break;
+ case HINIC_IFLA_VF_LINK_STATE_DISABLE:
+ vf_infos[vf_id].link_forced = true;
+ vf_infos[vf_id].link_up = false;
+ break;
+ default:
+ dev_err(&nic_io->hwif->pdev->dev, "Invalid input parameter set_vf_link_state: %d\n",
+ set_vf_link_state);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void hinic_clear_vf_infos(struct hinic_dev *nic_dev, u16 vf_id)
+{
+ struct vf_data_storage *vf_infos;
+
+ vf_infos = nic_dev->hwdev->func_to_io.vf_infos + HW_VF_ID_TO_OS(vf_id);
+ if (vf_infos->pf_set_mac)
+ hinic_port_del_mac(nic_dev, vf_infos->vf_mac_addr, 0);
+
+ if (hinic_vf_info_vlanprio(nic_dev->hwdev, vf_id))
+ hinic_kill_vf_vlan(nic_dev->hwdev, vf_id);
+
+ if (vf_infos->max_rate)
+ hinic_set_vf_tx_rate(nic_dev->hwdev, vf_id, 0, 0);
+
+ if (vf_infos->spoofchk)
+ hinic_set_vf_spoofchk(nic_dev->hwdev, vf_id, false);
+
+ if (vf_infos->trust)
+ hinic_set_vf_trust(nic_dev->hwdev, vf_id, false);
+
+ memset(vf_infos, 0, sizeof(*vf_infos));
+ /* set vf_infos to default */
+ hinic_init_vf_infos(&nic_dev->hwdev->func_to_io, HW_VF_ID_TO_OS(vf_id));
+}
+
+static void hinic_deinit_vf_hw(struct hinic_sriov_info *sriov_info,
+ u16 start_vf_id, u16 end_vf_id)
+{
+ struct hinic_dev *nic_dev;
+ u16 func_idx, idx;
+
+ nic_dev = container_of(sriov_info, struct hinic_dev, sriov_info);
+
+ for (idx = start_vf_id; idx <= end_vf_id; idx++) {
+ func_idx = hinic_glb_pf_vf_offset(nic_dev->hwdev->hwif) + idx;
+ hinic_set_wq_page_size(nic_dev->hwdev, func_idx,
+ HINIC_HW_WQ_PAGE_SIZE);
+ hinic_clear_vf_infos(nic_dev, idx);
+ }
+}
+
+int hinic_vf_func_init(struct hinic_hwdev *hwdev)
+{
+ struct hinic_register_vf register_info = {0};
+ u16 out_size = sizeof(register_info);
+ struct hinic_func_to_io *nic_io;
+ int err = 0;
+ u32 size, i;
+
+ err = hinic_vf_mbox_random_id_init(hwdev);
+ if (err) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to init vf mbox random id, err: %d\n",
+ err);
+ return err;
+ }
+
+ nic_io = &hwdev->func_to_io;
+
+ if (HINIC_IS_VF(hwdev->hwif)) {
+ err = hinic_mbox_to_pf(hwdev, HINIC_MOD_L2NIC,
+ HINIC_PORT_CMD_VF_REGISTER,
+ &register_info, sizeof(register_info),
+ &register_info, &out_size, 0);
+ if (err || register_info.status || !out_size) {
+ dev_err(&hwdev->hwif->pdev->dev,
+ "Failed to register VF, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, register_info.status, out_size);
+ return -EIO;
+ }
+ } else {
+ err = hinic_register_pf_mbox_cb(hwdev, HINIC_MOD_CFGM,
+ cfg_mbx_pf_proc_vf_msg);
+ if (err) {
+ dev_err(&hwdev->hwif->pdev->dev,
+ "Register PF mailbox callback failed\n");
+ return err;
+ }
+ nic_io->max_vfs = hwdev->nic_cap.max_vf;
+ size = sizeof(*nic_io->vf_infos) * nic_io->max_vfs;
+ if (size != 0) {
+ nic_io->vf_infos = kzalloc(size, GFP_KERNEL);
+ if (!nic_io->vf_infos) {
+ err = -ENOMEM;
+ goto out_free_nic_io;
+ }
+
+ for (i = 0; i < nic_io->max_vfs; i++) {
+ err = hinic_init_vf_infos(nic_io, i);
+ if (err)
+ goto err_init_vf_infos;
+ }
+
+ err = hinic_register_pf_mbox_cb(hwdev, HINIC_MOD_L2NIC,
+ nic_pf_mbox_handler);
+ if (err)
+ goto err_register_pf_mbox_cb;
+ }
+ }
+
+ return 0;
+
+err_register_pf_mbox_cb:
+err_init_vf_infos:
+ kfree(nic_io->vf_infos);
+out_free_nic_io:
+ return err;
+}
+
+void hinic_vf_func_free(struct hinic_hwdev *hwdev)
+{
+ struct hinic_register_vf unregister = {0};
+ u16 out_size = sizeof(unregister);
+ int err;
+
+ if (HINIC_IS_VF(hwdev->hwif)) {
+ err = hinic_mbox_to_pf(hwdev, HINIC_MOD_L2NIC,
+ HINIC_PORT_CMD_VF_UNREGISTER,
+ &unregister, sizeof(unregister),
+ &unregister, &out_size, 0);
+ if (err || !out_size || unregister.status)
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to unregister VF, err: %d, status: 0x%x, out_size: 0x%x\n",
+ err, unregister.status, out_size);
+ } else {
+ if (hwdev->func_to_io.vf_infos) {
+ hinic_unregister_pf_mbox_cb(hwdev, HINIC_MOD_L2NIC);
+ kfree(hwdev->func_to_io.vf_infos);
+ }
+ }
+}
+
+static int hinic_init_vf_hw(struct hinic_hwdev *hwdev, u16 start_vf_id,
+ u16 end_vf_id)
+{
+ u16 i, func_idx;
+ int err;
+
+ /* vf use 256K as default wq page size, and can't change it */
+ for (i = start_vf_id; i <= end_vf_id; i++) {
+ func_idx = hinic_glb_pf_vf_offset(hwdev->hwif) + i;
+ err = hinic_set_wq_page_size(hwdev, func_idx,
+ HINIC_DEFAULT_WQ_PAGE_SIZE);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+int hinic_pci_sriov_disable(struct pci_dev *pdev)
+{
+ struct hinic_sriov_info *sriov_info;
+ u16 tmp_vfs;
+
+ sriov_info = hinic_get_sriov_info_by_pcidev(pdev);
+ /* if SR-IOV is already disabled then nothing will be done */
+ if (!sriov_info->sriov_enabled)
+ return 0;
+
+ set_bit(HINIC_SRIOV_DISABLE, &sriov_info->state);
+
+ /* If our VFs are assigned we cannot shut down SR-IOV
+ * without causing issues, so just leave the hardware
+ * available but disabled
+ */
+ if (pci_vfs_assigned(sriov_info->pdev)) {
+ clear_bit(HINIC_SRIOV_DISABLE, &sriov_info->state);
+ dev_warn(&pdev->dev, "Unloading driver while VFs are assigned - VFs will not be deallocated\n");
+ return -EPERM;
+ }
+ sriov_info->sriov_enabled = false;
+
+ /* disable iov and allow time for transactions to clear */
+ pci_disable_sriov(sriov_info->pdev);
+
+ tmp_vfs = (u16)sriov_info->num_vfs;
+ sriov_info->num_vfs = 0;
+ hinic_deinit_vf_hw(sriov_info, OS_VF_ID_TO_HW(0),
+ OS_VF_ID_TO_HW(tmp_vfs - 1));
+
+ clear_bit(HINIC_SRIOV_DISABLE, &sriov_info->state);
+
+ return 0;
+}
+
+static int hinic_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
+{
+ struct hinic_sriov_info *sriov_info;
+ int err;
+
+ sriov_info = hinic_get_sriov_info_by_pcidev(pdev);
+
+ if (test_and_set_bit(HINIC_SRIOV_ENABLE, &sriov_info->state)) {
+ dev_err(&pdev->dev,
+ "SR-IOV enable in process, please wait, num_vfs %d\n",
+ num_vfs);
+ return -EPERM;
+ }
+
+ err = hinic_init_vf_hw(sriov_info->hwdev, OS_VF_ID_TO_HW(0),
+ OS_VF_ID_TO_HW((u16)num_vfs - 1));
+ if (err) {
+ dev_err(&sriov_info->pdev->dev,
+ "Failed to init vf in hardware before enable sriov, error %d\n",
+ err);
+ clear_bit(HINIC_SRIOV_ENABLE, &sriov_info->state);
+ return err;
+ }
+
+ err = pci_enable_sriov(sriov_info->pdev, num_vfs);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to enable SR-IOV, error %d\n", err);
+ clear_bit(HINIC_SRIOV_ENABLE, &sriov_info->state);
+ return err;
+ }
+
+ sriov_info->sriov_enabled = true;
+ sriov_info->num_vfs = num_vfs;
+ clear_bit(HINIC_SRIOV_ENABLE, &sriov_info->state);
+
+ return num_vfs;
+}
+
+int hinic_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
+{
+ struct hinic_sriov_info *sriov_info;
+
+ sriov_info = hinic_get_sriov_info_by_pcidev(dev);
+
+ if (test_bit(HINIC_FUNC_REMOVE, &sriov_info->state))
+ return -EBUSY;
+
+ if (!num_vfs)
+ return hinic_pci_sriov_disable(dev);
+ else
+ return hinic_pci_sriov_enable(dev, num_vfs);
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.h b/drivers/net/ethernet/huawei/hinic/hinic_sriov.h
new file mode 100644
index 0000000000..d4d4e63d31
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef HINIC_SRIOV_H
+#define HINIC_SRIOV_H
+
+#include "hinic_hw_dev.h"
+
+#define OS_VF_ID_TO_HW(os_vf_id) ((os_vf_id) + 1)
+#define HW_VF_ID_TO_OS(hw_vf_id) ((hw_vf_id) - 1)
+
+enum hinic_sriov_state {
+ HINIC_SRIOV_DISABLE,
+ HINIC_SRIOV_ENABLE,
+ HINIC_FUNC_REMOVE,
+};
+
+enum {
+ HINIC_IFLA_VF_LINK_STATE_AUTO, /* link state of the uplink */
+ HINIC_IFLA_VF_LINK_STATE_ENABLE, /* link always up */
+ HINIC_IFLA_VF_LINK_STATE_DISABLE, /* link always down */
+};
+
+struct hinic_sriov_info {
+ struct pci_dev *pdev;
+ struct hinic_hwdev *hwdev;
+ bool sriov_enabled;
+ unsigned int num_vfs;
+ unsigned long state;
+};
+
+struct vf_data_storage {
+ u8 vf_mac_addr[ETH_ALEN];
+ bool registered;
+ bool pf_set_mac;
+ u16 pf_vlan;
+ u8 pf_qos;
+ u32 max_rate;
+ u32 min_rate;
+
+ bool link_forced;
+ bool link_up; /* only valid if VF link is forced */
+ bool spoofchk;
+ bool trust;
+};
+
+struct hinic_register_vf {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+};
+
+struct hinic_port_mac_update {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 vlan_id;
+ u16 rsvd1;
+ u8 old_mac[ETH_ALEN];
+ u16 rsvd2;
+ u8 new_mac[ETH_ALEN];
+};
+
+struct hinic_vf_vlan_config {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 vlan_id;
+ u8 qos;
+ u8 rsvd1[7];
+};
+
+int hinic_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
+
+int hinic_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
+ __be16 vlan_proto);
+
+int hinic_ndo_get_vf_config(struct net_device *netdev,
+ int vf, struct ifla_vf_info *ivi);
+
+int hinic_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting);
+
+int hinic_ndo_set_vf_bw(struct net_device *netdev,
+ int vf, int min_tx_rate, int max_tx_rate);
+
+int hinic_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
+
+int hinic_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link);
+
+void hinic_notify_all_vfs_link_changed(struct hinic_hwdev *hwdev,
+ u8 link_status);
+
+int hinic_pci_sriov_disable(struct pci_dev *dev);
+
+int hinic_vf_func_init(struct hinic_hwdev *hwdev);
+
+void hinic_vf_func_free(struct hinic_hwdev *hwdev);
+
+int hinic_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
new file mode 100644
index 0000000000..ad47ac51a1
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -0,0 +1,934 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#include <linux/if_vlan.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/skbuff.h>
+#include <linux/smp.h>
+#include <asm/byteorder.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/sctp.h>
+#include <linux/ipv6.h>
+#include <net/ipv6.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+
+#include "hinic_common.h"
+#include "hinic_hw_if.h"
+#include "hinic_hw_wqe.h"
+#include "hinic_hw_wq.h"
+#include "hinic_hw_qp.h"
+#include "hinic_hw_dev.h"
+#include "hinic_dev.h"
+#include "hinic_tx.h"
+
+#define TX_IRQ_NO_PENDING 0
+#define TX_IRQ_NO_COALESC 0
+#define TX_IRQ_NO_LLI_TIMER 0
+#define TX_IRQ_NO_CREDIT 0
+#define TX_IRQ_NO_RESEND_TIMER 0
+
+#define CI_UPDATE_NO_PENDING 0
+#define CI_UPDATE_NO_COALESC 0
+
+#define HW_CONS_IDX(sq) be16_to_cpu(*(u16 *)((sq)->hw_ci_addr))
+
+#define MIN_SKB_LEN 32
+
+#define MAX_PAYLOAD_OFFSET 221
+#define TRANSPORT_OFFSET(l4_hdr, skb) ((u32)((l4_hdr) - (skb)->data))
+
+union hinic_l3 {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+};
+
+union hinic_l4 {
+ struct tcphdr *tcp;
+ struct udphdr *udp;
+ unsigned char *hdr;
+};
+
+enum hinic_offload_type {
+ TX_OFFLOAD_TSO = BIT(0),
+ TX_OFFLOAD_CSUM = BIT(1),
+ TX_OFFLOAD_VLAN = BIT(2),
+ TX_OFFLOAD_INVALID = BIT(3),
+};
+
+/**
+ * hinic_txq_clean_stats - Clean the statistics of specific queue
+ * @txq: Logical Tx Queue
+ **/
+static void hinic_txq_clean_stats(struct hinic_txq *txq)
+{
+ struct hinic_txq_stats *txq_stats = &txq->txq_stats;
+
+ u64_stats_update_begin(&txq_stats->syncp);
+ txq_stats->pkts = 0;
+ txq_stats->bytes = 0;
+ txq_stats->tx_busy = 0;
+ txq_stats->tx_wake = 0;
+ txq_stats->tx_dropped = 0;
+ txq_stats->big_frags_pkts = 0;
+ u64_stats_update_end(&txq_stats->syncp);
+}
+
+/**
+ * hinic_txq_get_stats - get statistics of Tx Queue
+ * @txq: Logical Tx Queue
+ * @stats: return updated stats here
+ **/
+void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats)
+{
+ struct hinic_txq_stats *txq_stats = &txq->txq_stats;
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin(&txq_stats->syncp);
+ stats->pkts = txq_stats->pkts;
+ stats->bytes = txq_stats->bytes;
+ stats->tx_busy = txq_stats->tx_busy;
+ stats->tx_wake = txq_stats->tx_wake;
+ stats->tx_dropped = txq_stats->tx_dropped;
+ stats->big_frags_pkts = txq_stats->big_frags_pkts;
+ } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
+}
+
+/**
+ * txq_stats_init - Initialize the statistics of specific queue
+ * @txq: Logical Tx Queue
+ **/
+static void txq_stats_init(struct hinic_txq *txq)
+{
+ struct hinic_txq_stats *txq_stats = &txq->txq_stats;
+
+ u64_stats_init(&txq_stats->syncp);
+ hinic_txq_clean_stats(txq);
+}
+
+/**
+ * tx_map_skb - dma mapping for skb and return sges
+ * @nic_dev: nic device
+ * @skb: the skb
+ * @sges: returned sges
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static int tx_map_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
+ struct hinic_sge *sges)
+{
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ skb_frag_t *frag;
+ dma_addr_t dma_addr;
+ int i, j;
+
+ dma_addr = dma_map_single(&pdev->dev, skb->data, skb_headlen(skb),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, dma_addr)) {
+ dev_err(&pdev->dev, "Failed to map Tx skb data\n");
+ return -EFAULT;
+ }
+
+ hinic_set_sge(&sges[0], dma_addr, skb_headlen(skb));
+
+ for (i = 0 ; i < skb_shinfo(skb)->nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+
+ dma_addr = skb_frag_dma_map(&pdev->dev, frag, 0,
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, dma_addr)) {
+ dev_err(&pdev->dev, "Failed to map Tx skb frag\n");
+ goto err_tx_map;
+ }
+
+ hinic_set_sge(&sges[i + 1], dma_addr, skb_frag_size(frag));
+ }
+
+ return 0;
+
+err_tx_map:
+ for (j = 0; j < i; j++)
+ dma_unmap_page(&pdev->dev, hinic_sge_to_dma(&sges[j + 1]),
+ sges[j + 1].len, DMA_TO_DEVICE);
+
+ dma_unmap_single(&pdev->dev, hinic_sge_to_dma(&sges[0]), sges[0].len,
+ DMA_TO_DEVICE);
+ return -EFAULT;
+}
+
+/**
+ * tx_unmap_skb - unmap the dma address of the skb
+ * @nic_dev: nic device
+ * @skb: the skb
+ * @sges: the sges that are connected to the skb
+ **/
+static void tx_unmap_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
+ struct hinic_sge *sges)
+{
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ int i;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags ; i++)
+ dma_unmap_page(&pdev->dev, hinic_sge_to_dma(&sges[i + 1]),
+ sges[i + 1].len, DMA_TO_DEVICE);
+
+ dma_unmap_single(&pdev->dev, hinic_sge_to_dma(&sges[0]), sges[0].len,
+ DMA_TO_DEVICE);
+}
+
+static void get_inner_l3_l4_type(struct sk_buff *skb, union hinic_l3 *ip,
+ union hinic_l4 *l4,
+ enum hinic_offload_type offload_type,
+ enum hinic_l3_offload_type *l3_type,
+ u8 *l4_proto)
+{
+ u8 *exthdr;
+
+ if (ip->v4->version == 4) {
+ *l3_type = (offload_type == TX_OFFLOAD_CSUM) ?
+ IPV4_PKT_NO_CHKSUM_OFFLOAD :
+ IPV4_PKT_WITH_CHKSUM_OFFLOAD;
+ *l4_proto = ip->v4->protocol;
+ } else if (ip->v4->version == 6) {
+ *l3_type = IPV6_PKT;
+ exthdr = ip->hdr + sizeof(*ip->v6);
+ *l4_proto = ip->v6->nexthdr;
+ if (exthdr != l4->hdr) {
+ int start = exthdr - skb->data;
+ __be16 frag_off;
+
+ ipv6_skip_exthdr(skb, start, l4_proto, &frag_off);
+ }
+ } else {
+ *l3_type = L3TYPE_UNKNOWN;
+ *l4_proto = 0;
+ }
+}
+
+static void get_inner_l4_info(struct sk_buff *skb, union hinic_l4 *l4,
+ enum hinic_offload_type offload_type, u8 l4_proto,
+ enum hinic_l4_offload_type *l4_offload,
+ u32 *l4_len, u32 *offset)
+{
+ *l4_offload = OFFLOAD_DISABLE;
+ *offset = 0;
+ *l4_len = 0;
+
+ switch (l4_proto) {
+ case IPPROTO_TCP:
+ *l4_offload = TCP_OFFLOAD_ENABLE;
+ /* doff in unit of 4B */
+ *l4_len = l4->tcp->doff * 4;
+ *offset = *l4_len + TRANSPORT_OFFSET(l4->hdr, skb);
+ break;
+
+ case IPPROTO_UDP:
+ *l4_offload = UDP_OFFLOAD_ENABLE;
+ *l4_len = sizeof(struct udphdr);
+ *offset = TRANSPORT_OFFSET(l4->hdr, skb);
+ break;
+
+ case IPPROTO_SCTP:
+ /* only csum offload support sctp */
+ if (offload_type != TX_OFFLOAD_CSUM)
+ break;
+
+ *l4_offload = SCTP_OFFLOAD_ENABLE;
+ *l4_len = sizeof(struct sctphdr);
+ *offset = TRANSPORT_OFFSET(l4->hdr, skb);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static __sum16 csum_magic(union hinic_l3 *ip, unsigned short proto)
+{
+ return (ip->v4->version == 4) ?
+ csum_tcpudp_magic(ip->v4->saddr, ip->v4->daddr, 0, proto, 0) :
+ csum_ipv6_magic(&ip->v6->saddr, &ip->v6->daddr, 0, proto, 0);
+}
+
+static int offload_tso(struct hinic_sq_task *task, u32 *queue_info,
+ struct sk_buff *skb)
+{
+ u32 offset, l4_len, ip_identify, network_hdr_len;
+ enum hinic_l3_offload_type l3_offload;
+ enum hinic_l4_offload_type l4_offload;
+ union hinic_l3 ip;
+ union hinic_l4 l4;
+ u8 l4_proto;
+
+ if (!skb_is_gso(skb))
+ return 0;
+
+ if (skb_cow_head(skb, 0) < 0)
+ return -EPROTONOSUPPORT;
+
+ if (skb->encapsulation) {
+ u32 gso_type = skb_shinfo(skb)->gso_type;
+ u32 tunnel_type = 0;
+ u32 l4_tunnel_len;
+
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
+ network_hdr_len = skb_inner_network_header_len(skb);
+
+ if (ip.v4->version == 4) {
+ ip.v4->tot_len = 0;
+ l3_offload = IPV4_PKT_WITH_CHKSUM_OFFLOAD;
+ } else if (ip.v4->version == 6) {
+ l3_offload = IPV6_PKT;
+ } else {
+ l3_offload = 0;
+ }
+
+ hinic_task_set_outter_l3(task, l3_offload,
+ skb_network_header_len(skb));
+
+ if (gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
+ l4.udp->check = ~csum_magic(&ip, IPPROTO_UDP);
+ tunnel_type = TUNNEL_UDP_CSUM;
+ } else if (gso_type & SKB_GSO_UDP_TUNNEL) {
+ tunnel_type = TUNNEL_UDP_NO_CSUM;
+ }
+
+ l4_tunnel_len = skb_inner_network_offset(skb) -
+ skb_transport_offset(skb);
+ hinic_task_set_tunnel_l4(task, tunnel_type, l4_tunnel_len);
+
+ ip.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_inner_transport_header(skb);
+ } else {
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
+ network_hdr_len = skb_network_header_len(skb);
+ }
+
+ /* initialize inner IP header fields */
+ if (ip.v4->version == 4)
+ ip.v4->tot_len = 0;
+ else
+ ip.v6->payload_len = 0;
+
+ get_inner_l3_l4_type(skb, &ip, &l4, TX_OFFLOAD_TSO, &l3_offload,
+ &l4_proto);
+
+ hinic_task_set_inner_l3(task, l3_offload, network_hdr_len);
+
+ ip_identify = 0;
+ if (l4_proto == IPPROTO_TCP)
+ l4.tcp->check = ~csum_magic(&ip, IPPROTO_TCP);
+
+ get_inner_l4_info(skb, &l4, TX_OFFLOAD_TSO, l4_proto, &l4_offload,
+ &l4_len, &offset);
+
+ hinic_set_tso_inner_l4(task, queue_info, l4_offload, l4_len, offset,
+ ip_identify, skb_shinfo(skb)->gso_size);
+
+ return 1;
+}
+
+static int offload_csum(struct hinic_sq_task *task, u32 *queue_info,
+ struct sk_buff *skb)
+{
+ enum hinic_l4_offload_type l4_offload;
+ u32 offset, l4_len, network_hdr_len;
+ enum hinic_l3_offload_type l3_type;
+ u32 tunnel_type = NOT_TUNNEL;
+ union hinic_l3 ip;
+ union hinic_l4 l4;
+ u8 l4_proto;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ if (skb->encapsulation) {
+ u32 l4_tunnel_len;
+
+ tunnel_type = TUNNEL_UDP_NO_CSUM;
+ ip.hdr = skb_network_header(skb);
+
+ if (ip.v4->version == 4) {
+ l3_type = IPV4_PKT_NO_CHKSUM_OFFLOAD;
+ l4_proto = ip.v4->protocol;
+ } else if (ip.v4->version == 6) {
+ unsigned char *exthdr;
+ __be16 frag_off;
+
+ l3_type = IPV6_PKT;
+ tunnel_type = TUNNEL_UDP_CSUM;
+ exthdr = ip.hdr + sizeof(*ip.v6);
+ l4_proto = ip.v6->nexthdr;
+ l4.hdr = skb_transport_header(skb);
+ if (l4.hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data,
+ &l4_proto, &frag_off);
+ } else {
+ l3_type = L3TYPE_UNKNOWN;
+ l4_proto = IPPROTO_RAW;
+ }
+
+ hinic_task_set_outter_l3(task, l3_type,
+ skb_network_header_len(skb));
+
+ switch (l4_proto) {
+ case IPPROTO_UDP:
+ l4_tunnel_len = skb_inner_network_offset(skb) -
+ skb_transport_offset(skb);
+ ip.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_inner_transport_header(skb);
+ network_hdr_len = skb_inner_network_header_len(skb);
+ break;
+ case IPPROTO_IPIP:
+ case IPPROTO_IPV6:
+ tunnel_type = NOT_TUNNEL;
+ l4_tunnel_len = 0;
+
+ ip.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
+ network_hdr_len = skb_network_header_len(skb);
+ break;
+ default:
+ /* Unsupported tunnel packet, disable csum offload */
+ skb_checksum_help(skb);
+ return 0;
+ }
+
+ hinic_task_set_tunnel_l4(task, tunnel_type, l4_tunnel_len);
+ } else {
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
+ network_hdr_len = skb_network_header_len(skb);
+ }
+
+ get_inner_l3_l4_type(skb, &ip, &l4, TX_OFFLOAD_CSUM, &l3_type,
+ &l4_proto);
+
+ hinic_task_set_inner_l3(task, l3_type, network_hdr_len);
+
+ get_inner_l4_info(skb, &l4, TX_OFFLOAD_CSUM, l4_proto, &l4_offload,
+ &l4_len, &offset);
+
+ hinic_set_cs_inner_l4(task, queue_info, l4_offload, l4_len, offset);
+
+ return 1;
+}
+
+static void offload_vlan(struct hinic_sq_task *task, u32 *queue_info,
+ u16 vlan_tag, u16 vlan_pri)
+{
+ task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(vlan_tag, VLAN_TAG) |
+ HINIC_SQ_TASK_INFO0_SET(1U, VLAN_OFFLOAD);
+
+ *queue_info |= HINIC_SQ_CTRL_SET(vlan_pri, QUEUE_INFO_PRI);
+}
+
+static int hinic_tx_offload(struct sk_buff *skb, struct hinic_sq_task *task,
+ u32 *queue_info)
+{
+ enum hinic_offload_type offload = 0;
+ u16 vlan_tag;
+ int enabled;
+
+ enabled = offload_tso(task, queue_info, skb);
+ if (enabled > 0) {
+ offload |= TX_OFFLOAD_TSO;
+ } else if (enabled == 0) {
+ enabled = offload_csum(task, queue_info, skb);
+ if (enabled)
+ offload |= TX_OFFLOAD_CSUM;
+ } else {
+ return -EPROTONOSUPPORT;
+ }
+
+ if (unlikely(skb_vlan_tag_present(skb))) {
+ vlan_tag = skb_vlan_tag_get(skb);
+ offload_vlan(task, queue_info, vlan_tag,
+ vlan_tag >> VLAN_PRIO_SHIFT);
+ offload |= TX_OFFLOAD_VLAN;
+ }
+
+ if (offload)
+ hinic_task_set_l2hdr(task, skb_network_offset(skb));
+
+ /* payload offset should not more than 221 */
+ if (HINIC_SQ_CTRL_GET(*queue_info, QUEUE_INFO_PLDOFF) >
+ MAX_PAYLOAD_OFFSET) {
+ return -EPROTONOSUPPORT;
+ }
+
+ /* mss should not less than 80 */
+ if (HINIC_SQ_CTRL_GET(*queue_info, QUEUE_INFO_MSS) < HINIC_MSS_MIN) {
+ *queue_info = HINIC_SQ_CTRL_CLEAR(*queue_info, QUEUE_INFO_MSS);
+ *queue_info |= HINIC_SQ_CTRL_SET(HINIC_MSS_MIN, QUEUE_INFO_MSS);
+ }
+
+ return 0;
+}
+
+netdev_tx_t hinic_lb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ u16 prod_idx, q_id = skb->queue_mapping;
+ struct netdev_queue *netdev_txq;
+ int nr_sges, err = NETDEV_TX_OK;
+ struct hinic_sq_wqe *sq_wqe;
+ unsigned int wqe_size;
+ struct hinic_txq *txq;
+ struct hinic_qp *qp;
+
+ txq = &nic_dev->txqs[q_id];
+ qp = container_of(txq->sq, struct hinic_qp, sq);
+ nr_sges = skb_shinfo(skb)->nr_frags + 1;
+
+ err = tx_map_skb(nic_dev, skb, txq->sges);
+ if (err)
+ goto skb_error;
+
+ wqe_size = HINIC_SQ_WQE_SIZE(nr_sges);
+
+ sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx);
+ if (!sq_wqe) {
+ netif_stop_subqueue(netdev, qp->q_id);
+
+ sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx);
+ if (sq_wqe) {
+ netif_wake_subqueue(nic_dev->netdev, qp->q_id);
+ goto process_sq_wqe;
+ }
+
+ tx_unmap_skb(nic_dev, skb, txq->sges);
+
+ u64_stats_update_begin(&txq->txq_stats.syncp);
+ txq->txq_stats.tx_busy++;
+ u64_stats_update_end(&txq->txq_stats.syncp);
+ err = NETDEV_TX_BUSY;
+ wqe_size = 0;
+ goto flush_skbs;
+ }
+
+process_sq_wqe:
+ hinic_sq_prepare_wqe(txq->sq, sq_wqe, txq->sges, nr_sges);
+ hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size);
+
+flush_skbs:
+ netdev_txq = netdev_get_tx_queue(netdev, q_id);
+ if ((!netdev_xmit_more()) || (netif_xmit_stopped(netdev_txq)))
+ hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0);
+
+ return err;
+
+skb_error:
+ dev_kfree_skb_any(skb);
+ u64_stats_update_begin(&txq->txq_stats.syncp);
+ txq->txq_stats.tx_dropped++;
+ u64_stats_update_end(&txq->txq_stats.syncp);
+
+ return NETDEV_TX_OK;
+}
+
+netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ u16 prod_idx, q_id = skb->queue_mapping;
+ struct netdev_queue *netdev_txq;
+ int nr_sges, err = NETDEV_TX_OK;
+ struct hinic_sq_wqe *sq_wqe;
+ unsigned int wqe_size;
+ struct hinic_txq *txq;
+ struct hinic_qp *qp;
+
+ txq = &nic_dev->txqs[q_id];
+ qp = container_of(txq->sq, struct hinic_qp, sq);
+
+ if (skb->len < MIN_SKB_LEN) {
+ if (skb_pad(skb, MIN_SKB_LEN - skb->len)) {
+ netdev_err(netdev, "Failed to pad skb\n");
+ goto update_error_stats;
+ }
+
+ skb->len = MIN_SKB_LEN;
+ }
+
+ nr_sges = skb_shinfo(skb)->nr_frags + 1;
+ if (nr_sges > 17) {
+ u64_stats_update_begin(&txq->txq_stats.syncp);
+ txq->txq_stats.big_frags_pkts++;
+ u64_stats_update_end(&txq->txq_stats.syncp);
+ }
+
+ if (nr_sges > txq->max_sges) {
+ netdev_err(netdev, "Too many Tx sges\n");
+ goto skb_error;
+ }
+
+ err = tx_map_skb(nic_dev, skb, txq->sges);
+ if (err)
+ goto skb_error;
+
+ wqe_size = HINIC_SQ_WQE_SIZE(nr_sges);
+
+ sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx);
+ if (!sq_wqe) {
+ netif_stop_subqueue(netdev, qp->q_id);
+
+ /* Check for the case free_tx_poll is called in another cpu
+ * and we stopped the subqueue after free_tx_poll check.
+ */
+ sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx);
+ if (sq_wqe) {
+ netif_wake_subqueue(nic_dev->netdev, qp->q_id);
+ goto process_sq_wqe;
+ }
+
+ tx_unmap_skb(nic_dev, skb, txq->sges);
+
+ u64_stats_update_begin(&txq->txq_stats.syncp);
+ txq->txq_stats.tx_busy++;
+ u64_stats_update_end(&txq->txq_stats.syncp);
+ err = NETDEV_TX_BUSY;
+ wqe_size = 0;
+ goto flush_skbs;
+ }
+
+process_sq_wqe:
+ hinic_sq_prepare_wqe(txq->sq, sq_wqe, txq->sges, nr_sges);
+
+ err = hinic_tx_offload(skb, &sq_wqe->task, &sq_wqe->ctrl.queue_info);
+ if (err)
+ goto offload_error;
+
+ hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size);
+
+flush_skbs:
+ netdev_txq = netdev_get_tx_queue(netdev, q_id);
+ if ((!netdev_xmit_more()) || (netif_xmit_stopped(netdev_txq)))
+ hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0);
+
+ return err;
+
+offload_error:
+ hinic_sq_return_wqe(txq->sq, wqe_size);
+ tx_unmap_skb(nic_dev, skb, txq->sges);
+
+skb_error:
+ dev_kfree_skb_any(skb);
+
+update_error_stats:
+ u64_stats_update_begin(&txq->txq_stats.syncp);
+ txq->txq_stats.tx_dropped++;
+ u64_stats_update_end(&txq->txq_stats.syncp);
+
+ return NETDEV_TX_OK;
+}
+
+/**
+ * tx_free_skb - unmap and free skb
+ * @nic_dev: nic device
+ * @skb: the skb
+ * @sges: the sges that are connected to the skb
+ **/
+static void tx_free_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
+ struct hinic_sge *sges)
+{
+ tx_unmap_skb(nic_dev, skb, sges);
+
+ dev_kfree_skb_any(skb);
+}
+
+/**
+ * free_all_tx_skbs - free all skbs in tx queue
+ * @txq: tx queue
+ **/
+static void free_all_tx_skbs(struct hinic_txq *txq)
+{
+ struct hinic_dev *nic_dev = netdev_priv(txq->netdev);
+ struct hinic_sq *sq = txq->sq;
+ struct hinic_sq_wqe *sq_wqe;
+ unsigned int wqe_size;
+ struct sk_buff *skb;
+ int nr_sges;
+ u16 ci;
+
+ while ((sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &ci))) {
+ sq_wqe = hinic_sq_read_wqe(sq, &skb, wqe_size, &ci);
+ if (!sq_wqe)
+ break;
+
+ nr_sges = skb_shinfo(skb)->nr_frags + 1;
+
+ hinic_sq_get_sges(sq_wqe, txq->free_sges, nr_sges);
+
+ hinic_sq_put_wqe(sq, wqe_size);
+
+ tx_free_skb(nic_dev, skb, txq->free_sges);
+ }
+}
+
+/**
+ * free_tx_poll - free finished tx skbs in tx queue that connected to napi
+ * @napi: napi
+ * @budget: number of tx
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+static int free_tx_poll(struct napi_struct *napi, int budget)
+{
+ struct hinic_txq *txq = container_of(napi, struct hinic_txq, napi);
+ struct hinic_qp *qp = container_of(txq->sq, struct hinic_qp, sq);
+ struct hinic_dev *nic_dev = netdev_priv(txq->netdev);
+ struct netdev_queue *netdev_txq;
+ struct hinic_sq *sq = txq->sq;
+ struct hinic_wq *wq = sq->wq;
+ struct hinic_sq_wqe *sq_wqe;
+ unsigned int wqe_size;
+ int nr_sges, pkts = 0;
+ struct sk_buff *skb;
+ u64 tx_bytes = 0;
+ u16 hw_ci, sw_ci;
+
+ do {
+ hw_ci = HW_CONS_IDX(sq) & wq->mask;
+
+ dma_rmb();
+
+ /* Reading a WQEBB to get real WQE size and consumer index. */
+ sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &sw_ci);
+ if (!sq_wqe ||
+ (((hw_ci - sw_ci) & wq->mask) * wq->wqebb_size < wqe_size))
+ break;
+
+ /* If this WQE have multiple WQEBBs, we will read again to get
+ * full size WQE.
+ */
+ if (wqe_size > wq->wqebb_size) {
+ sq_wqe = hinic_sq_read_wqe(sq, &skb, wqe_size, &sw_ci);
+ if (unlikely(!sq_wqe))
+ break;
+ }
+
+ tx_bytes += skb->len;
+ pkts++;
+
+ nr_sges = skb_shinfo(skb)->nr_frags + 1;
+
+ hinic_sq_get_sges(sq_wqe, txq->free_sges, nr_sges);
+
+ hinic_sq_put_wqe(sq, wqe_size);
+
+ tx_free_skb(nic_dev, skb, txq->free_sges);
+ } while (pkts < budget);
+
+ if (__netif_subqueue_stopped(nic_dev->netdev, qp->q_id) &&
+ hinic_get_sq_free_wqebbs(sq) >= HINIC_MIN_TX_NUM_WQEBBS(sq)) {
+ netdev_txq = netdev_get_tx_queue(txq->netdev, qp->q_id);
+
+ __netif_tx_lock(netdev_txq, smp_processor_id());
+ if (!netif_testing(nic_dev->netdev))
+ netif_wake_subqueue(nic_dev->netdev, qp->q_id);
+
+ __netif_tx_unlock(netdev_txq);
+
+ u64_stats_update_begin(&txq->txq_stats.syncp);
+ txq->txq_stats.tx_wake++;
+ u64_stats_update_end(&txq->txq_stats.syncp);
+ }
+
+ u64_stats_update_begin(&txq->txq_stats.syncp);
+ txq->txq_stats.bytes += tx_bytes;
+ txq->txq_stats.pkts += pkts;
+ u64_stats_update_end(&txq->txq_stats.syncp);
+
+ if (pkts < budget) {
+ napi_complete(napi);
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
+ hinic_hwdev_set_msix_state(nic_dev->hwdev,
+ sq->msix_entry,
+ HINIC_MSIX_ENABLE);
+
+ return pkts;
+ }
+
+ return budget;
+}
+
+static irqreturn_t tx_irq(int irq, void *data)
+{
+ struct hinic_txq *txq = data;
+ struct hinic_dev *nic_dev;
+
+ nic_dev = netdev_priv(txq->netdev);
+
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
+ /* Disable the interrupt until napi will be completed */
+ hinic_hwdev_set_msix_state(nic_dev->hwdev,
+ txq->sq->msix_entry,
+ HINIC_MSIX_DISABLE);
+
+ hinic_hwdev_msix_cnt_set(nic_dev->hwdev, txq->sq->msix_entry);
+
+ napi_schedule(&txq->napi);
+ return IRQ_HANDLED;
+}
+
+static int tx_request_irq(struct hinic_txq *txq)
+{
+ struct hinic_dev *nic_dev = netdev_priv(txq->netdev);
+ struct hinic_msix_config interrupt_info = {0};
+ struct hinic_intr_coal_info *intr_coal = NULL;
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ struct hinic_sq *sq = txq->sq;
+ struct hinic_qp *qp;
+ int err;
+
+ qp = container_of(sq, struct hinic_qp, sq);
+
+ netif_napi_add_weight(txq->netdev, &txq->napi, free_tx_poll,
+ nic_dev->tx_weight);
+
+ hinic_hwdev_msix_set(nic_dev->hwdev, sq->msix_entry,
+ TX_IRQ_NO_PENDING, TX_IRQ_NO_COALESC,
+ TX_IRQ_NO_LLI_TIMER, TX_IRQ_NO_CREDIT,
+ TX_IRQ_NO_RESEND_TIMER);
+
+ intr_coal = &nic_dev->tx_intr_coalesce[qp->q_id];
+ interrupt_info.msix_index = sq->msix_entry;
+ interrupt_info.coalesce_timer_cnt = intr_coal->coalesce_timer_cfg;
+ interrupt_info.pending_cnt = intr_coal->pending_limt;
+ interrupt_info.resend_timer_cnt = intr_coal->resend_timer_cfg;
+
+ err = hinic_set_interrupt_cfg(hwdev, &interrupt_info);
+ if (err) {
+ netif_err(nic_dev, drv, txq->netdev,
+ "Failed to set TX interrupt coalescing attribute\n");
+ netif_napi_del(&txq->napi);
+ return err;
+ }
+
+ err = request_irq(sq->irq, tx_irq, 0, txq->irq_name, txq);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to request Tx irq\n");
+ netif_napi_del(&txq->napi);
+ return err;
+ }
+
+ return 0;
+}
+
+static void tx_free_irq(struct hinic_txq *txq)
+{
+ struct hinic_sq *sq = txq->sq;
+
+ free_irq(sq->irq, txq);
+ netif_napi_del(&txq->napi);
+}
+
+/**
+ * hinic_init_txq - Initialize the Tx Queue
+ * @txq: Logical Tx Queue
+ * @sq: Hardware Tx Queue to connect the Logical queue with
+ * @netdev: network device to connect the Logical queue with
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq,
+ struct net_device *netdev)
+{
+ struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq);
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ int err, irqname_len;
+
+ txq->netdev = netdev;
+ txq->sq = sq;
+
+ txq_stats_init(txq);
+
+ txq->max_sges = HINIC_MAX_SQ_BUFDESCS;
+
+ txq->sges = devm_kcalloc(&netdev->dev, txq->max_sges,
+ sizeof(*txq->sges), GFP_KERNEL);
+ if (!txq->sges)
+ return -ENOMEM;
+
+ txq->free_sges = devm_kcalloc(&netdev->dev, txq->max_sges,
+ sizeof(*txq->free_sges), GFP_KERNEL);
+ if (!txq->free_sges) {
+ err = -ENOMEM;
+ goto err_alloc_free_sges;
+ }
+
+ irqname_len = snprintf(NULL, 0, "%s_txq%d", netdev->name, qp->q_id) + 1;
+ txq->irq_name = devm_kzalloc(&netdev->dev, irqname_len, GFP_KERNEL);
+ if (!txq->irq_name) {
+ err = -ENOMEM;
+ goto err_alloc_irqname;
+ }
+
+ sprintf(txq->irq_name, "%s_txq%d", netdev->name, qp->q_id);
+
+ err = hinic_hwdev_hw_ci_addr_set(hwdev, sq, CI_UPDATE_NO_PENDING,
+ CI_UPDATE_NO_COALESC);
+ if (err)
+ goto err_hw_ci;
+
+ err = tx_request_irq(txq);
+ if (err) {
+ netdev_err(netdev, "Failed to request Tx irq\n");
+ goto err_req_tx_irq;
+ }
+
+ return 0;
+
+err_req_tx_irq:
+err_hw_ci:
+ devm_kfree(&netdev->dev, txq->irq_name);
+
+err_alloc_irqname:
+ devm_kfree(&netdev->dev, txq->free_sges);
+
+err_alloc_free_sges:
+ devm_kfree(&netdev->dev, txq->sges);
+ return err;
+}
+
+/**
+ * hinic_clean_txq - Clean the Tx Queue
+ * @txq: Logical Tx Queue
+ **/
+void hinic_clean_txq(struct hinic_txq *txq)
+{
+ struct net_device *netdev = txq->netdev;
+
+ tx_free_irq(txq);
+
+ free_all_tx_skbs(txq);
+
+ devm_kfree(&netdev->dev, txq->irq_name);
+ devm_kfree(&netdev->dev, txq->free_sges);
+ devm_kfree(&netdev->dev, txq->sges);
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.h b/drivers/net/ethernet/huawei/hinic/hinic_tx.h
new file mode 100644
index 0000000000..91dc778362
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef HINIC_TX_H
+#define HINIC_TX_H
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/u64_stats_sync.h>
+
+#include "hinic_common.h"
+#include "hinic_hw_qp.h"
+
+struct hinic_txq_stats {
+ u64 pkts;
+ u64 bytes;
+ u64 tx_busy;
+ u64 tx_wake;
+ u64 tx_dropped;
+ u64 big_frags_pkts;
+
+ struct u64_stats_sync syncp;
+};
+
+struct hinic_txq {
+ struct net_device *netdev;
+ struct hinic_sq *sq;
+
+ struct hinic_txq_stats txq_stats;
+
+ int max_sges;
+ struct hinic_sge *sges;
+ struct hinic_sge *free_sges;
+
+ char *irq_name;
+ struct napi_struct napi;
+};
+
+void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats);
+
+netdev_tx_t hinic_lb_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
+
+netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
+
+int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq,
+ struct net_device *netdev);
+
+void hinic_clean_txq(struct hinic_txq *txq);
+
+#endif