summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/microsoft
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/net/ethernet/microsoft
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/ethernet/microsoft')
-rw-r--r--drivers/net/ethernet/microsoft/Kconfig30
-rw-r--r--drivers/net/ethernet/microsoft/Makefile5
-rw-r--r--drivers/net/ethernet/microsoft/mana/Makefile6
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma.h695
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c1506
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c848
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.h195
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana.h634
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_bpf.c226
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c2273
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_ethtool.c268
-rw-r--r--drivers/net/ethernet/microsoft/mana/shm_channel.c291
-rw-r--r--drivers/net/ethernet/microsoft/mana/shm_channel.h21
13 files changed, 6998 insertions, 0 deletions
diff --git a/drivers/net/ethernet/microsoft/Kconfig b/drivers/net/ethernet/microsoft/Kconfig
new file mode 100644
index 000000000..8b6c4cc37
--- /dev/null
+++ b/drivers/net/ethernet/microsoft/Kconfig
@@ -0,0 +1,30 @@
+#
+# Microsoft Azure network device configuration
+#
+
+config NET_VENDOR_MICROSOFT
+ bool "Microsoft Network Devices"
+ default y
+ help
+ If you have a network (Ethernet) device belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip the
+ question about Microsoft network devices. If you say Y, you will be
+ asked for your specific device in the following question.
+
+if NET_VENDOR_MICROSOFT
+
+config MICROSOFT_MANA
+ tristate "Microsoft Azure Network Adapter (MANA) support"
+ depends on PCI_MSI && X86_64
+ depends on PCI_HYPERV
+ select PAGE_POOL
+ help
+ This driver supports Microsoft Azure Network Adapter (MANA).
+ So far, the driver is only supported on X86_64.
+
+ To compile this driver as a module, choose M here.
+ The module will be called mana.
+
+endif #NET_VENDOR_MICROSOFT
diff --git a/drivers/net/ethernet/microsoft/Makefile b/drivers/net/ethernet/microsoft/Makefile
new file mode 100644
index 000000000..d2ddc2181
--- /dev/null
+++ b/drivers/net/ethernet/microsoft/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Microsoft Azure network device driver.
+#
+
+obj-$(CONFIG_MICROSOFT_MANA) += mana/
diff --git a/drivers/net/ethernet/microsoft/mana/Makefile b/drivers/net/ethernet/microsoft/mana/Makefile
new file mode 100644
index 000000000..e16a4221f
--- /dev/null
+++ b/drivers/net/ethernet/microsoft/mana/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+#
+# Makefile for the Microsoft Azure Network Adapter driver
+
+obj-$(CONFIG_MICROSOFT_MANA) += mana.o
+mana-objs := gdma_main.o shm_channel.o hw_channel.o mana_en.o mana_ethtool.o mana_bpf.o
diff --git a/drivers/net/ethernet/microsoft/mana/gdma.h b/drivers/net/ethernet/microsoft/mana/gdma.h
new file mode 100644
index 000000000..48b0ab56b
--- /dev/null
+++ b/drivers/net/ethernet/microsoft/mana/gdma.h
@@ -0,0 +1,695 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright (c) 2021, Microsoft Corporation. */
+
+#ifndef _GDMA_H
+#define _GDMA_H
+
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+
+#include "shm_channel.h"
+
+/* Structures labeled with "HW DATA" are exchanged with the hardware. All of
+ * them are naturally aligned and hence don't need __packed.
+ */
+
+enum gdma_request_type {
+ GDMA_VERIFY_VF_DRIVER_VERSION = 1,
+ GDMA_QUERY_MAX_RESOURCES = 2,
+ GDMA_LIST_DEVICES = 3,
+ GDMA_REGISTER_DEVICE = 4,
+ GDMA_DEREGISTER_DEVICE = 5,
+ GDMA_GENERATE_TEST_EQE = 10,
+ GDMA_CREATE_QUEUE = 12,
+ GDMA_DISABLE_QUEUE = 13,
+ GDMA_CREATE_DMA_REGION = 25,
+ GDMA_DMA_REGION_ADD_PAGES = 26,
+ GDMA_DESTROY_DMA_REGION = 27,
+};
+
+enum gdma_queue_type {
+ GDMA_INVALID_QUEUE,
+ GDMA_SQ,
+ GDMA_RQ,
+ GDMA_CQ,
+ GDMA_EQ,
+};
+
+enum gdma_work_request_flags {
+ GDMA_WR_NONE = 0,
+ GDMA_WR_OOB_IN_SGL = BIT(0),
+ GDMA_WR_PAD_BY_SGE0 = BIT(1),
+};
+
+enum gdma_eqe_type {
+ GDMA_EQE_COMPLETION = 3,
+ GDMA_EQE_TEST_EVENT = 64,
+ GDMA_EQE_HWC_INIT_EQ_ID_DB = 129,
+ GDMA_EQE_HWC_INIT_DATA = 130,
+ GDMA_EQE_HWC_INIT_DONE = 131,
+};
+
+enum {
+ GDMA_DEVICE_NONE = 0,
+ GDMA_DEVICE_HWC = 1,
+ GDMA_DEVICE_MANA = 2,
+};
+
+struct gdma_resource {
+ /* Protect the bitmap */
+ spinlock_t lock;
+
+ /* The bitmap size in bits. */
+ u32 size;
+
+ /* The bitmap tracks the resources. */
+ unsigned long *map;
+};
+
+union gdma_doorbell_entry {
+ u64 as_uint64;
+
+ struct {
+ u64 id : 24;
+ u64 reserved : 8;
+ u64 tail_ptr : 31;
+ u64 arm : 1;
+ } cq;
+
+ struct {
+ u64 id : 24;
+ u64 wqe_cnt : 8;
+ u64 tail_ptr : 32;
+ } rq;
+
+ struct {
+ u64 id : 24;
+ u64 reserved : 8;
+ u64 tail_ptr : 32;
+ } sq;
+
+ struct {
+ u64 id : 16;
+ u64 reserved : 16;
+ u64 tail_ptr : 31;
+ u64 arm : 1;
+ } eq;
+}; /* HW DATA */
+
+struct gdma_msg_hdr {
+ u32 hdr_type;
+ u32 msg_type;
+ u16 msg_version;
+ u16 hwc_msg_id;
+ u32 msg_size;
+}; /* HW DATA */
+
+struct gdma_dev_id {
+ union {
+ struct {
+ u16 type;
+ u16 instance;
+ };
+
+ u32 as_uint32;
+ };
+}; /* HW DATA */
+
+struct gdma_req_hdr {
+ struct gdma_msg_hdr req;
+ struct gdma_msg_hdr resp; /* The expected response */
+ struct gdma_dev_id dev_id;
+ u32 activity_id;
+}; /* HW DATA */
+
+struct gdma_resp_hdr {
+ struct gdma_msg_hdr response;
+ struct gdma_dev_id dev_id;
+ u32 activity_id;
+ u32 status;
+ u32 reserved;
+}; /* HW DATA */
+
+struct gdma_general_req {
+ struct gdma_req_hdr hdr;
+}; /* HW DATA */
+
+#define GDMA_MESSAGE_V1 1
+
+struct gdma_general_resp {
+ struct gdma_resp_hdr hdr;
+}; /* HW DATA */
+
+#define GDMA_STANDARD_HEADER_TYPE 0
+
+static inline void mana_gd_init_req_hdr(struct gdma_req_hdr *hdr, u32 code,
+ u32 req_size, u32 resp_size)
+{
+ hdr->req.hdr_type = GDMA_STANDARD_HEADER_TYPE;
+ hdr->req.msg_type = code;
+ hdr->req.msg_version = GDMA_MESSAGE_V1;
+ hdr->req.msg_size = req_size;
+
+ hdr->resp.hdr_type = GDMA_STANDARD_HEADER_TYPE;
+ hdr->resp.msg_type = code;
+ hdr->resp.msg_version = GDMA_MESSAGE_V1;
+ hdr->resp.msg_size = resp_size;
+}
+
+/* The 16-byte struct is part of the GDMA work queue entry (WQE). */
+struct gdma_sge {
+ u64 address;
+ u32 mem_key;
+ u32 size;
+}; /* HW DATA */
+
+struct gdma_wqe_request {
+ struct gdma_sge *sgl;
+ u32 num_sge;
+
+ u32 inline_oob_size;
+ const void *inline_oob_data;
+
+ u32 flags;
+ u32 client_data_unit;
+};
+
+enum gdma_page_type {
+ GDMA_PAGE_TYPE_4K,
+};
+
+#define GDMA_INVALID_DMA_REGION 0
+
+struct gdma_mem_info {
+ struct device *dev;
+
+ dma_addr_t dma_handle;
+ void *virt_addr;
+ u64 length;
+
+ /* Allocated by the PF driver */
+ u64 gdma_region;
+};
+
+#define REGISTER_ATB_MST_MKEY_LOWER_SIZE 8
+
+struct gdma_dev {
+ struct gdma_context *gdma_context;
+
+ struct gdma_dev_id dev_id;
+
+ u32 pdid;
+ u32 doorbell;
+ u32 gpa_mkey;
+
+ /* GDMA driver specific pointer */
+ void *driver_data;
+};
+
+#define MINIMUM_SUPPORTED_PAGE_SIZE PAGE_SIZE
+
+#define GDMA_CQE_SIZE 64
+#define GDMA_EQE_SIZE 16
+#define GDMA_MAX_SQE_SIZE 512
+#define GDMA_MAX_RQE_SIZE 256
+
+#define GDMA_COMP_DATA_SIZE 0x3C
+
+#define GDMA_EVENT_DATA_SIZE 0xC
+
+/* The WQE size must be a multiple of the Basic Unit, which is 32 bytes. */
+#define GDMA_WQE_BU_SIZE 32
+
+#define INVALID_PDID UINT_MAX
+#define INVALID_DOORBELL UINT_MAX
+#define INVALID_MEM_KEY UINT_MAX
+#define INVALID_QUEUE_ID UINT_MAX
+#define INVALID_PCI_MSIX_INDEX UINT_MAX
+
+struct gdma_comp {
+ u32 cqe_data[GDMA_COMP_DATA_SIZE / 4];
+ u32 wq_num;
+ bool is_sq;
+};
+
+struct gdma_event {
+ u32 details[GDMA_EVENT_DATA_SIZE / 4];
+ u8 type;
+};
+
+struct gdma_queue;
+
+struct mana_eq {
+ struct gdma_queue *eq;
+};
+
+typedef void gdma_eq_callback(void *context, struct gdma_queue *q,
+ struct gdma_event *e);
+
+typedef void gdma_cq_callback(void *context, struct gdma_queue *q);
+
+/* The 'head' is the producer index. For SQ/RQ, when the driver posts a WQE
+ * (Note: the WQE size must be a multiple of the 32-byte Basic Unit), the
+ * driver increases the 'head' in BUs rather than in bytes, and notifies
+ * the HW of the updated head. For EQ/CQ, the driver uses the 'head' to track
+ * the HW head, and increases the 'head' by 1 for every processed EQE/CQE.
+ *
+ * The 'tail' is the consumer index for SQ/RQ. After the CQE of the SQ/RQ is
+ * processed, the driver increases the 'tail' to indicate that WQEs have
+ * been consumed by the HW, so the driver can post new WQEs into the SQ/RQ.
+ *
+ * The driver doesn't use the 'tail' for EQ/CQ, because the driver ensures
+ * that the EQ/CQ is big enough so they can't overflow, and the driver uses
+ * the owner bits mechanism to detect if the queue has become empty.
+ */
+struct gdma_queue {
+ struct gdma_dev *gdma_dev;
+
+ enum gdma_queue_type type;
+ u32 id;
+
+ struct gdma_mem_info mem_info;
+
+ void *queue_mem_ptr;
+ u32 queue_size;
+
+ bool monitor_avl_buf;
+
+ u32 head;
+ u32 tail;
+
+ /* Extra fields specific to EQ/CQ. */
+ union {
+ struct {
+ bool disable_needed;
+
+ gdma_eq_callback *callback;
+ void *context;
+
+ unsigned int msix_index;
+
+ u32 log2_throttle_limit;
+ } eq;
+
+ struct {
+ gdma_cq_callback *callback;
+ void *context;
+
+ struct gdma_queue *parent; /* For CQ/EQ relationship */
+ } cq;
+ };
+};
+
+struct gdma_queue_spec {
+ enum gdma_queue_type type;
+ bool monitor_avl_buf;
+ unsigned int queue_size;
+
+ /* Extra fields specific to EQ/CQ. */
+ union {
+ struct {
+ gdma_eq_callback *callback;
+ void *context;
+
+ unsigned long log2_throttle_limit;
+ } eq;
+
+ struct {
+ gdma_cq_callback *callback;
+ void *context;
+
+ struct gdma_queue *parent_eq;
+
+ } cq;
+ };
+};
+
+#define MANA_IRQ_NAME_SZ 32
+
+struct gdma_irq_context {
+ void (*handler)(void *arg);
+ void *arg;
+ char name[MANA_IRQ_NAME_SZ];
+};
+
+struct gdma_context {
+ struct device *dev;
+
+ /* Per-vPort max number of queues */
+ unsigned int max_num_queues;
+ unsigned int max_num_msix;
+ unsigned int num_msix_usable;
+ struct gdma_resource msix_resource;
+ struct gdma_irq_context *irq_contexts;
+
+ /* This maps a CQ index to the queue structure. */
+ unsigned int max_num_cqs;
+ struct gdma_queue **cq_table;
+
+ /* Protect eq_test_event and test_event_eq_id */
+ struct mutex eq_test_event_mutex;
+ struct completion eq_test_event;
+ u32 test_event_eq_id;
+
+ bool is_pf;
+ void __iomem *bar0_va;
+ void __iomem *shm_base;
+ void __iomem *db_page_base;
+ u32 db_page_size;
+
+ /* Shared memory chanenl (used to bootstrap HWC) */
+ struct shm_channel shm_channel;
+
+ /* Hardware communication channel (HWC) */
+ struct gdma_dev hwc;
+
+ /* Azure network adapter */
+ struct gdma_dev mana;
+};
+
+#define MAX_NUM_GDMA_DEVICES 4
+
+static inline bool mana_gd_is_mana(struct gdma_dev *gd)
+{
+ return gd->dev_id.type == GDMA_DEVICE_MANA;
+}
+
+static inline bool mana_gd_is_hwc(struct gdma_dev *gd)
+{
+ return gd->dev_id.type == GDMA_DEVICE_HWC;
+}
+
+u8 *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, u32 wqe_offset);
+u32 mana_gd_wq_avail_space(struct gdma_queue *wq);
+
+int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq);
+
+int mana_gd_create_hwc_queue(struct gdma_dev *gd,
+ const struct gdma_queue_spec *spec,
+ struct gdma_queue **queue_ptr);
+
+int mana_gd_create_mana_eq(struct gdma_dev *gd,
+ const struct gdma_queue_spec *spec,
+ struct gdma_queue **queue_ptr);
+
+int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
+ const struct gdma_queue_spec *spec,
+ struct gdma_queue **queue_ptr);
+
+void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue);
+
+int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe);
+
+void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit);
+
+struct gdma_wqe {
+ u32 reserved :24;
+ u32 last_vbytes :8;
+
+ union {
+ u32 flags;
+
+ struct {
+ u32 num_sge :8;
+ u32 inline_oob_size_div4:3;
+ u32 client_oob_in_sgl :1;
+ u32 reserved1 :4;
+ u32 client_data_unit :14;
+ u32 reserved2 :2;
+ };
+ };
+}; /* HW DATA */
+
+#define INLINE_OOB_SMALL_SIZE 8
+#define INLINE_OOB_LARGE_SIZE 24
+
+#define MAX_TX_WQE_SIZE 512
+#define MAX_RX_WQE_SIZE 256
+
+struct gdma_cqe {
+ u32 cqe_data[GDMA_COMP_DATA_SIZE / 4];
+
+ union {
+ u32 as_uint32;
+
+ struct {
+ u32 wq_num : 24;
+ u32 is_sq : 1;
+ u32 reserved : 4;
+ u32 owner_bits : 3;
+ };
+ } cqe_info;
+}; /* HW DATA */
+
+#define GDMA_CQE_OWNER_BITS 3
+
+#define GDMA_CQE_OWNER_MASK ((1 << GDMA_CQE_OWNER_BITS) - 1)
+
+#define SET_ARM_BIT 1
+
+#define GDMA_EQE_OWNER_BITS 3
+
+union gdma_eqe_info {
+ u32 as_uint32;
+
+ struct {
+ u32 type : 8;
+ u32 reserved1 : 8;
+ u32 client_id : 2;
+ u32 reserved2 : 11;
+ u32 owner_bits : 3;
+ };
+}; /* HW DATA */
+
+#define GDMA_EQE_OWNER_MASK ((1 << GDMA_EQE_OWNER_BITS) - 1)
+#define INITIALIZED_OWNER_BIT(log2_num_entries) (1UL << (log2_num_entries))
+
+struct gdma_eqe {
+ u32 details[GDMA_EVENT_DATA_SIZE / 4];
+ u32 eqe_info;
+}; /* HW DATA */
+
+#define GDMA_REG_DB_PAGE_OFFSET 8
+#define GDMA_REG_DB_PAGE_SIZE 0x10
+#define GDMA_REG_SHM_OFFSET 0x18
+
+#define GDMA_PF_REG_DB_PAGE_SIZE 0xD0
+#define GDMA_PF_REG_DB_PAGE_OFF 0xC8
+#define GDMA_PF_REG_SHM_OFF 0x70
+
+#define GDMA_SRIOV_REG_CFG_BASE_OFF 0x108
+
+#define MANA_PF_DEVICE_ID 0x00B9
+#define MANA_VF_DEVICE_ID 0x00BA
+
+struct gdma_posted_wqe_info {
+ u32 wqe_size_in_bu;
+};
+
+/* GDMA_GENERATE_TEST_EQE */
+struct gdma_generate_test_event_req {
+ struct gdma_req_hdr hdr;
+ u32 queue_index;
+}; /* HW DATA */
+
+/* GDMA_VERIFY_VF_DRIVER_VERSION */
+enum {
+ GDMA_PROTOCOL_V1 = 1,
+ GDMA_PROTOCOL_FIRST = GDMA_PROTOCOL_V1,
+ GDMA_PROTOCOL_LAST = GDMA_PROTOCOL_V1,
+};
+
+#define GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT BIT(0)
+
+/* Advertise to the NIC firmware: the NAPI work_done variable race is fixed,
+ * so the driver is able to reliably support features like busy_poll.
+ */
+#define GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX BIT(2)
+
+#define GDMA_DRV_CAP_FLAGS1 \
+ (GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \
+ GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX)
+
+#define GDMA_DRV_CAP_FLAGS2 0
+
+#define GDMA_DRV_CAP_FLAGS3 0
+
+#define GDMA_DRV_CAP_FLAGS4 0
+
+struct gdma_verify_ver_req {
+ struct gdma_req_hdr hdr;
+
+ /* Mandatory fields required for protocol establishment */
+ u64 protocol_ver_min;
+ u64 protocol_ver_max;
+
+ /* Gdma Driver Capability Flags */
+ u64 gd_drv_cap_flags1;
+ u64 gd_drv_cap_flags2;
+ u64 gd_drv_cap_flags3;
+ u64 gd_drv_cap_flags4;
+
+ /* Advisory fields */
+ u64 drv_ver;
+ u32 os_type; /* Linux = 0x10; Windows = 0x20; Other = 0x30 */
+ u32 reserved;
+ u32 os_ver_major;
+ u32 os_ver_minor;
+ u32 os_ver_build;
+ u32 os_ver_platform;
+ u64 reserved_2;
+ u8 os_ver_str1[128];
+ u8 os_ver_str2[128];
+ u8 os_ver_str3[128];
+ u8 os_ver_str4[128];
+}; /* HW DATA */
+
+struct gdma_verify_ver_resp {
+ struct gdma_resp_hdr hdr;
+ u64 gdma_protocol_ver;
+ u64 pf_cap_flags1;
+ u64 pf_cap_flags2;
+ u64 pf_cap_flags3;
+ u64 pf_cap_flags4;
+}; /* HW DATA */
+
+/* GDMA_QUERY_MAX_RESOURCES */
+struct gdma_query_max_resources_resp {
+ struct gdma_resp_hdr hdr;
+ u32 status;
+ u32 max_sq;
+ u32 max_rq;
+ u32 max_cq;
+ u32 max_eq;
+ u32 max_db;
+ u32 max_mst;
+ u32 max_cq_mod_ctx;
+ u32 max_mod_cq;
+ u32 max_msix;
+}; /* HW DATA */
+
+/* GDMA_LIST_DEVICES */
+struct gdma_list_devices_resp {
+ struct gdma_resp_hdr hdr;
+ u32 num_of_devs;
+ u32 reserved;
+ struct gdma_dev_id devs[64];
+}; /* HW DATA */
+
+/* GDMA_REGISTER_DEVICE */
+struct gdma_register_device_resp {
+ struct gdma_resp_hdr hdr;
+ u32 pdid;
+ u32 gpa_mkey;
+ u32 db_id;
+}; /* HW DATA */
+
+/* GDMA_CREATE_QUEUE */
+struct gdma_create_queue_req {
+ struct gdma_req_hdr hdr;
+ u32 type;
+ u32 reserved1;
+ u32 pdid;
+ u32 doolbell_id;
+ u64 gdma_region;
+ u32 reserved2;
+ u32 queue_size;
+ u32 log2_throttle_limit;
+ u32 eq_pci_msix_index;
+ u32 cq_mod_ctx_id;
+ u32 cq_parent_eq_id;
+ u8 rq_drop_on_overrun;
+ u8 rq_err_on_wqe_overflow;
+ u8 rq_chain_rec_wqes;
+ u8 sq_hw_db;
+ u32 reserved3;
+}; /* HW DATA */
+
+struct gdma_create_queue_resp {
+ struct gdma_resp_hdr hdr;
+ u32 queue_index;
+}; /* HW DATA */
+
+/* GDMA_DISABLE_QUEUE */
+struct gdma_disable_queue_req {
+ struct gdma_req_hdr hdr;
+ u32 type;
+ u32 queue_index;
+ u32 alloc_res_id_on_creation;
+}; /* HW DATA */
+
+/* GDMA_CREATE_DMA_REGION */
+struct gdma_create_dma_region_req {
+ struct gdma_req_hdr hdr;
+
+ /* The total size of the DMA region */
+ u64 length;
+
+ /* The offset in the first page */
+ u32 offset_in_page;
+
+ /* enum gdma_page_type */
+ u32 gdma_page_type;
+
+ /* The total number of pages */
+ u32 page_count;
+
+ /* If page_addr_list_len is smaller than page_count,
+ * the remaining page addresses will be added via the
+ * message GDMA_DMA_REGION_ADD_PAGES.
+ */
+ u32 page_addr_list_len;
+ u64 page_addr_list[];
+}; /* HW DATA */
+
+struct gdma_create_dma_region_resp {
+ struct gdma_resp_hdr hdr;
+ u64 gdma_region;
+}; /* HW DATA */
+
+/* GDMA_DMA_REGION_ADD_PAGES */
+struct gdma_dma_region_add_pages_req {
+ struct gdma_req_hdr hdr;
+
+ u64 gdma_region;
+
+ u32 page_addr_list_len;
+ u32 reserved3;
+
+ u64 page_addr_list[];
+}; /* HW DATA */
+
+/* GDMA_DESTROY_DMA_REGION */
+struct gdma_destroy_dma_region_req {
+ struct gdma_req_hdr hdr;
+
+ u64 gdma_region;
+}; /* HW DATA */
+
+int mana_gd_verify_vf_version(struct pci_dev *pdev);
+
+int mana_gd_register_device(struct gdma_dev *gd);
+int mana_gd_deregister_device(struct gdma_dev *gd);
+
+int mana_gd_post_work_request(struct gdma_queue *wq,
+ const struct gdma_wqe_request *wqe_req,
+ struct gdma_posted_wqe_info *wqe_info);
+
+int mana_gd_post_and_ring(struct gdma_queue *queue,
+ const struct gdma_wqe_request *wqe,
+ struct gdma_posted_wqe_info *wqe_info);
+
+int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r);
+void mana_gd_free_res_map(struct gdma_resource *r);
+
+void mana_gd_wq_ring_doorbell(struct gdma_context *gc,
+ struct gdma_queue *queue);
+
+int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
+ struct gdma_mem_info *gmi);
+
+void mana_gd_free_memory(struct gdma_mem_info *gmi);
+
+int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
+ u32 resp_len, void *resp);
+#endif /* _GDMA_H */
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
new file mode 100644
index 000000000..d674ebda2
--- /dev/null
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -0,0 +1,1506 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright (c) 2021, Microsoft Corporation. */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/utsname.h>
+#include <linux/version.h>
+
+#include "mana.h"
+
+static u32 mana_gd_r32(struct gdma_context *g, u64 offset)
+{
+ return readl(g->bar0_va + offset);
+}
+
+static u64 mana_gd_r64(struct gdma_context *g, u64 offset)
+{
+ return readq(g->bar0_va + offset);
+}
+
+static void mana_gd_init_pf_regs(struct pci_dev *pdev)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+ void __iomem *sriov_base_va;
+ u64 sriov_base_off;
+
+ gc->db_page_size = mana_gd_r32(gc, GDMA_PF_REG_DB_PAGE_SIZE) & 0xFFFF;
+ gc->db_page_base = gc->bar0_va +
+ mana_gd_r64(gc, GDMA_PF_REG_DB_PAGE_OFF);
+
+ sriov_base_off = mana_gd_r64(gc, GDMA_SRIOV_REG_CFG_BASE_OFF);
+
+ sriov_base_va = gc->bar0_va + sriov_base_off;
+ gc->shm_base = sriov_base_va +
+ mana_gd_r64(gc, sriov_base_off + GDMA_PF_REG_SHM_OFF);
+}
+
+static void mana_gd_init_vf_regs(struct pci_dev *pdev)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+
+ gc->db_page_size = mana_gd_r32(gc, GDMA_REG_DB_PAGE_SIZE) & 0xFFFF;
+
+ gc->db_page_base = gc->bar0_va +
+ mana_gd_r64(gc, GDMA_REG_DB_PAGE_OFFSET);
+
+ gc->shm_base = gc->bar0_va + mana_gd_r64(gc, GDMA_REG_SHM_OFFSET);
+}
+
+static void mana_gd_init_registers(struct pci_dev *pdev)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+
+ if (gc->is_pf)
+ mana_gd_init_pf_regs(pdev);
+ else
+ mana_gd_init_vf_regs(pdev);
+}
+
+static int mana_gd_query_max_resources(struct pci_dev *pdev)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+ struct gdma_query_max_resources_resp resp = {};
+ struct gdma_general_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, GDMA_QUERY_MAX_RESOURCES,
+ sizeof(req), sizeof(resp));
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+ if (err || resp.hdr.status) {
+ dev_err(gc->dev, "Failed to query resource info: %d, 0x%x\n",
+ err, resp.hdr.status);
+ return err ? err : -EPROTO;
+ }
+
+ if (gc->num_msix_usable > resp.max_msix)
+ gc->num_msix_usable = resp.max_msix;
+
+ if (gc->num_msix_usable <= 1)
+ return -ENOSPC;
+
+ gc->max_num_queues = num_online_cpus();
+ if (gc->max_num_queues > MANA_MAX_NUM_QUEUES)
+ gc->max_num_queues = MANA_MAX_NUM_QUEUES;
+
+ if (gc->max_num_queues > resp.max_eq)
+ gc->max_num_queues = resp.max_eq;
+
+ if (gc->max_num_queues > resp.max_cq)
+ gc->max_num_queues = resp.max_cq;
+
+ if (gc->max_num_queues > resp.max_sq)
+ gc->max_num_queues = resp.max_sq;
+
+ if (gc->max_num_queues > resp.max_rq)
+ gc->max_num_queues = resp.max_rq;
+
+ /* The Hardware Channel (HWC) used 1 MSI-X */
+ if (gc->max_num_queues > gc->num_msix_usable - 1)
+ gc->max_num_queues = gc->num_msix_usable - 1;
+
+ return 0;
+}
+
+static int mana_gd_detect_devices(struct pci_dev *pdev)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+ struct gdma_list_devices_resp resp = {};
+ struct gdma_general_req req = {};
+ struct gdma_dev_id dev;
+ u32 i, max_num_devs;
+ u16 dev_type;
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, GDMA_LIST_DEVICES, sizeof(req),
+ sizeof(resp));
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+ if (err || resp.hdr.status) {
+ dev_err(gc->dev, "Failed to detect devices: %d, 0x%x\n", err,
+ resp.hdr.status);
+ return err ? err : -EPROTO;
+ }
+
+ max_num_devs = min_t(u32, MAX_NUM_GDMA_DEVICES, resp.num_of_devs);
+
+ for (i = 0; i < max_num_devs; i++) {
+ dev = resp.devs[i];
+ dev_type = dev.type;
+
+ /* HWC is already detected in mana_hwc_create_channel(). */
+ if (dev_type == GDMA_DEVICE_HWC)
+ continue;
+
+ if (dev_type == GDMA_DEVICE_MANA) {
+ gc->mana.gdma_context = gc;
+ gc->mana.dev_id = dev;
+ }
+ }
+
+ return gc->mana.dev_id.type == 0 ? -ENODEV : 0;
+}
+
+int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
+ u32 resp_len, void *resp)
+{
+ struct hw_channel_context *hwc = gc->hwc.driver_data;
+
+ return mana_hwc_send_request(hwc, req_len, req, resp_len, resp);
+}
+
+int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
+ struct gdma_mem_info *gmi)
+{
+ dma_addr_t dma_handle;
+ void *buf;
+
+ if (length < PAGE_SIZE || !is_power_of_2(length))
+ return -EINVAL;
+
+ gmi->dev = gc->dev;
+ buf = dma_alloc_coherent(gmi->dev, length, &dma_handle, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ gmi->dma_handle = dma_handle;
+ gmi->virt_addr = buf;
+ gmi->length = length;
+
+ return 0;
+}
+
+void mana_gd_free_memory(struct gdma_mem_info *gmi)
+{
+ dma_free_coherent(gmi->dev, gmi->length, gmi->virt_addr,
+ gmi->dma_handle);
+}
+
+static int mana_gd_create_hw_eq(struct gdma_context *gc,
+ struct gdma_queue *queue)
+{
+ struct gdma_create_queue_resp resp = {};
+ struct gdma_create_queue_req req = {};
+ int err;
+
+ if (queue->type != GDMA_EQ)
+ return -EINVAL;
+
+ mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_QUEUE,
+ sizeof(req), sizeof(resp));
+
+ req.hdr.dev_id = queue->gdma_dev->dev_id;
+ req.type = queue->type;
+ req.pdid = queue->gdma_dev->pdid;
+ req.doolbell_id = queue->gdma_dev->doorbell;
+ req.gdma_region = queue->mem_info.gdma_region;
+ req.queue_size = queue->queue_size;
+ req.log2_throttle_limit = queue->eq.log2_throttle_limit;
+ req.eq_pci_msix_index = queue->eq.msix_index;
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+ if (err || resp.hdr.status) {
+ dev_err(gc->dev, "Failed to create queue: %d, 0x%x\n", err,
+ resp.hdr.status);
+ return err ? err : -EPROTO;
+ }
+
+ queue->id = resp.queue_index;
+ queue->eq.disable_needed = true;
+ queue->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
+ return 0;
+}
+
+static int mana_gd_disable_queue(struct gdma_queue *queue)
+{
+ struct gdma_context *gc = queue->gdma_dev->gdma_context;
+ struct gdma_disable_queue_req req = {};
+ struct gdma_general_resp resp = {};
+ int err;
+
+ WARN_ON(queue->type != GDMA_EQ);
+
+ mana_gd_init_req_hdr(&req.hdr, GDMA_DISABLE_QUEUE,
+ sizeof(req), sizeof(resp));
+
+ req.hdr.dev_id = queue->gdma_dev->dev_id;
+ req.type = queue->type;
+ req.queue_index = queue->id;
+ req.alloc_res_id_on_creation = 1;
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+ if (err || resp.hdr.status) {
+ dev_err(gc->dev, "Failed to disable queue: %d, 0x%x\n", err,
+ resp.hdr.status);
+ return err ? err : -EPROTO;
+ }
+
+ return 0;
+}
+
+#define DOORBELL_OFFSET_SQ 0x0
+#define DOORBELL_OFFSET_RQ 0x400
+#define DOORBELL_OFFSET_CQ 0x800
+#define DOORBELL_OFFSET_EQ 0xFF8
+
+static void mana_gd_ring_doorbell(struct gdma_context *gc, u32 db_index,
+ enum gdma_queue_type q_type, u32 qid,
+ u32 tail_ptr, u8 num_req)
+{
+ void __iomem *addr = gc->db_page_base + gc->db_page_size * db_index;
+ union gdma_doorbell_entry e = {};
+
+ switch (q_type) {
+ case GDMA_EQ:
+ e.eq.id = qid;
+ e.eq.tail_ptr = tail_ptr;
+ e.eq.arm = num_req;
+
+ addr += DOORBELL_OFFSET_EQ;
+ break;
+
+ case GDMA_CQ:
+ e.cq.id = qid;
+ e.cq.tail_ptr = tail_ptr;
+ e.cq.arm = num_req;
+
+ addr += DOORBELL_OFFSET_CQ;
+ break;
+
+ case GDMA_RQ:
+ e.rq.id = qid;
+ e.rq.tail_ptr = tail_ptr;
+ e.rq.wqe_cnt = num_req;
+
+ addr += DOORBELL_OFFSET_RQ;
+ break;
+
+ case GDMA_SQ:
+ e.sq.id = qid;
+ e.sq.tail_ptr = tail_ptr;
+
+ addr += DOORBELL_OFFSET_SQ;
+ break;
+
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ /* Ensure all writes are done before ring doorbell */
+ wmb();
+
+ writeq(e.as_uint64, addr);
+}
+
+void mana_gd_wq_ring_doorbell(struct gdma_context *gc, struct gdma_queue *queue)
+{
+ mana_gd_ring_doorbell(gc, queue->gdma_dev->doorbell, queue->type,
+ queue->id, queue->head * GDMA_WQE_BU_SIZE, 1);
+}
+
+void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit)
+{
+ struct gdma_context *gc = cq->gdma_dev->gdma_context;
+
+ u32 num_cqe = cq->queue_size / GDMA_CQE_SIZE;
+
+ u32 head = cq->head % (num_cqe << GDMA_CQE_OWNER_BITS);
+
+ mana_gd_ring_doorbell(gc, cq->gdma_dev->doorbell, cq->type, cq->id,
+ head, arm_bit);
+}
+
+static void mana_gd_process_eqe(struct gdma_queue *eq)
+{
+ u32 head = eq->head % (eq->queue_size / GDMA_EQE_SIZE);
+ struct gdma_context *gc = eq->gdma_dev->gdma_context;
+ struct gdma_eqe *eq_eqe_ptr = eq->queue_mem_ptr;
+ union gdma_eqe_info eqe_info;
+ enum gdma_eqe_type type;
+ struct gdma_event event;
+ struct gdma_queue *cq;
+ struct gdma_eqe *eqe;
+ u32 cq_id;
+
+ eqe = &eq_eqe_ptr[head];
+ eqe_info.as_uint32 = eqe->eqe_info;
+ type = eqe_info.type;
+
+ switch (type) {
+ case GDMA_EQE_COMPLETION:
+ cq_id = eqe->details[0] & 0xFFFFFF;
+ if (WARN_ON_ONCE(cq_id >= gc->max_num_cqs))
+ break;
+
+ cq = gc->cq_table[cq_id];
+ if (WARN_ON_ONCE(!cq || cq->type != GDMA_CQ || cq->id != cq_id))
+ break;
+
+ if (cq->cq.callback)
+ cq->cq.callback(cq->cq.context, cq);
+
+ break;
+
+ case GDMA_EQE_TEST_EVENT:
+ gc->test_event_eq_id = eq->id;
+ complete(&gc->eq_test_event);
+ break;
+
+ case GDMA_EQE_HWC_INIT_EQ_ID_DB:
+ case GDMA_EQE_HWC_INIT_DATA:
+ case GDMA_EQE_HWC_INIT_DONE:
+ if (!eq->eq.callback)
+ break;
+
+ event.type = type;
+ memcpy(&event.details, &eqe->details, GDMA_EVENT_DATA_SIZE);
+ eq->eq.callback(eq->eq.context, eq, &event);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void mana_gd_process_eq_events(void *arg)
+{
+ u32 owner_bits, new_bits, old_bits;
+ union gdma_eqe_info eqe_info;
+ struct gdma_eqe *eq_eqe_ptr;
+ struct gdma_queue *eq = arg;
+ struct gdma_context *gc;
+ struct gdma_eqe *eqe;
+ u32 head, num_eqe;
+ int i;
+
+ gc = eq->gdma_dev->gdma_context;
+
+ num_eqe = eq->queue_size / GDMA_EQE_SIZE;
+ eq_eqe_ptr = eq->queue_mem_ptr;
+
+ /* Process up to 5 EQEs at a time, and update the HW head. */
+ for (i = 0; i < 5; i++) {
+ eqe = &eq_eqe_ptr[eq->head % num_eqe];
+ eqe_info.as_uint32 = eqe->eqe_info;
+ owner_bits = eqe_info.owner_bits;
+
+ old_bits = (eq->head / num_eqe - 1) & GDMA_EQE_OWNER_MASK;
+ /* No more entries */
+ if (owner_bits == old_bits)
+ break;
+
+ new_bits = (eq->head / num_eqe) & GDMA_EQE_OWNER_MASK;
+ if (owner_bits != new_bits) {
+ dev_err(gc->dev, "EQ %d: overflow detected\n", eq->id);
+ break;
+ }
+
+ /* Per GDMA spec, rmb is necessary after checking owner_bits, before
+ * reading eqe.
+ */
+ rmb();
+
+ mana_gd_process_eqe(eq);
+
+ eq->head++;
+ }
+
+ head = eq->head % (num_eqe << GDMA_EQE_OWNER_BITS);
+
+ mana_gd_ring_doorbell(gc, eq->gdma_dev->doorbell, eq->type, eq->id,
+ head, SET_ARM_BIT);
+}
+
+static int mana_gd_register_irq(struct gdma_queue *queue,
+ const struct gdma_queue_spec *spec)
+{
+ struct gdma_dev *gd = queue->gdma_dev;
+ struct gdma_irq_context *gic;
+ struct gdma_context *gc;
+ struct gdma_resource *r;
+ unsigned int msi_index;
+ unsigned long flags;
+ struct device *dev;
+ int err = 0;
+
+ gc = gd->gdma_context;
+ r = &gc->msix_resource;
+ dev = gc->dev;
+
+ spin_lock_irqsave(&r->lock, flags);
+
+ msi_index = find_first_zero_bit(r->map, r->size);
+ if (msi_index >= r->size || msi_index >= gc->num_msix_usable) {
+ err = -ENOSPC;
+ } else {
+ bitmap_set(r->map, msi_index, 1);
+ queue->eq.msix_index = msi_index;
+ }
+
+ spin_unlock_irqrestore(&r->lock, flags);
+
+ if (err) {
+ dev_err(dev, "Register IRQ err:%d, msi:%u rsize:%u, nMSI:%u",
+ err, msi_index, r->size, gc->num_msix_usable);
+
+ return err;
+ }
+
+ gic = &gc->irq_contexts[msi_index];
+
+ WARN_ON(gic->handler || gic->arg);
+
+ gic->arg = queue;
+
+ gic->handler = mana_gd_process_eq_events;
+
+ return 0;
+}
+
+static void mana_gd_deregiser_irq(struct gdma_queue *queue)
+{
+ struct gdma_dev *gd = queue->gdma_dev;
+ struct gdma_irq_context *gic;
+ struct gdma_context *gc;
+ struct gdma_resource *r;
+ unsigned int msix_index;
+ unsigned long flags;
+
+ gc = gd->gdma_context;
+ r = &gc->msix_resource;
+
+ /* At most num_online_cpus() + 1 interrupts are used. */
+ msix_index = queue->eq.msix_index;
+ if (WARN_ON(msix_index >= gc->num_msix_usable))
+ return;
+
+ gic = &gc->irq_contexts[msix_index];
+ gic->handler = NULL;
+ gic->arg = NULL;
+
+ spin_lock_irqsave(&r->lock, flags);
+ bitmap_clear(r->map, msix_index, 1);
+ spin_unlock_irqrestore(&r->lock, flags);
+
+ queue->eq.msix_index = INVALID_PCI_MSIX_INDEX;
+}
+
+int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq)
+{
+ struct gdma_generate_test_event_req req = {};
+ struct gdma_general_resp resp = {};
+ struct device *dev = gc->dev;
+ int err;
+
+ mutex_lock(&gc->eq_test_event_mutex);
+
+ init_completion(&gc->eq_test_event);
+ gc->test_event_eq_id = INVALID_QUEUE_ID;
+
+ mana_gd_init_req_hdr(&req.hdr, GDMA_GENERATE_TEST_EQE,
+ sizeof(req), sizeof(resp));
+
+ req.hdr.dev_id = eq->gdma_dev->dev_id;
+ req.queue_index = eq->id;
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+ if (err) {
+ dev_err(dev, "test_eq failed: %d\n", err);
+ goto out;
+ }
+
+ err = -EPROTO;
+
+ if (resp.hdr.status) {
+ dev_err(dev, "test_eq failed: 0x%x\n", resp.hdr.status);
+ goto out;
+ }
+
+ if (!wait_for_completion_timeout(&gc->eq_test_event, 30 * HZ)) {
+ dev_err(dev, "test_eq timed out on queue %d\n", eq->id);
+ goto out;
+ }
+
+ if (eq->id != gc->test_event_eq_id) {
+ dev_err(dev, "test_eq got an event on wrong queue %d (%d)\n",
+ gc->test_event_eq_id, eq->id);
+ goto out;
+ }
+
+ err = 0;
+out:
+ mutex_unlock(&gc->eq_test_event_mutex);
+ return err;
+}
+
+static void mana_gd_destroy_eq(struct gdma_context *gc, bool flush_evenets,
+ struct gdma_queue *queue)
+{
+ int err;
+
+ if (flush_evenets) {
+ err = mana_gd_test_eq(gc, queue);
+ if (err)
+ dev_warn(gc->dev, "Failed to flush EQ: %d\n", err);
+ }
+
+ mana_gd_deregiser_irq(queue);
+
+ if (queue->eq.disable_needed)
+ mana_gd_disable_queue(queue);
+}
+
+static int mana_gd_create_eq(struct gdma_dev *gd,
+ const struct gdma_queue_spec *spec,
+ bool create_hwq, struct gdma_queue *queue)
+{
+ struct gdma_context *gc = gd->gdma_context;
+ struct device *dev = gc->dev;
+ u32 log2_num_entries;
+ int err;
+
+ queue->eq.msix_index = INVALID_PCI_MSIX_INDEX;
+
+ log2_num_entries = ilog2(queue->queue_size / GDMA_EQE_SIZE);
+
+ if (spec->eq.log2_throttle_limit > log2_num_entries) {
+ dev_err(dev, "EQ throttling limit (%lu) > maximum EQE (%u)\n",
+ spec->eq.log2_throttle_limit, log2_num_entries);
+ return -EINVAL;
+ }
+
+ err = mana_gd_register_irq(queue, spec);
+ if (err) {
+ dev_err(dev, "Failed to register irq: %d\n", err);
+ return err;
+ }
+
+ queue->eq.callback = spec->eq.callback;
+ queue->eq.context = spec->eq.context;
+ queue->head |= INITIALIZED_OWNER_BIT(log2_num_entries);
+ queue->eq.log2_throttle_limit = spec->eq.log2_throttle_limit ?: 1;
+
+ if (create_hwq) {
+ err = mana_gd_create_hw_eq(gc, queue);
+ if (err)
+ goto out;
+
+ err = mana_gd_test_eq(gc, queue);
+ if (err)
+ goto out;
+ }
+
+ return 0;
+out:
+ dev_err(dev, "Failed to create EQ: %d\n", err);
+ mana_gd_destroy_eq(gc, false, queue);
+ return err;
+}
+
+static void mana_gd_create_cq(const struct gdma_queue_spec *spec,
+ struct gdma_queue *queue)
+{
+ u32 log2_num_entries = ilog2(spec->queue_size / GDMA_CQE_SIZE);
+
+ queue->head |= INITIALIZED_OWNER_BIT(log2_num_entries);
+ queue->cq.parent = spec->cq.parent_eq;
+ queue->cq.context = spec->cq.context;
+ queue->cq.callback = spec->cq.callback;
+}
+
+static void mana_gd_destroy_cq(struct gdma_context *gc,
+ struct gdma_queue *queue)
+{
+ u32 id = queue->id;
+
+ if (id >= gc->max_num_cqs)
+ return;
+
+ if (!gc->cq_table[id])
+ return;
+
+ gc->cq_table[id] = NULL;
+}
+
+int mana_gd_create_hwc_queue(struct gdma_dev *gd,
+ const struct gdma_queue_spec *spec,
+ struct gdma_queue **queue_ptr)
+{
+ struct gdma_context *gc = gd->gdma_context;
+ struct gdma_mem_info *gmi;
+ struct gdma_queue *queue;
+ int err;
+
+ queue = kzalloc(sizeof(*queue), GFP_KERNEL);
+ if (!queue)
+ return -ENOMEM;
+
+ gmi = &queue->mem_info;
+ err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
+ if (err)
+ goto free_q;
+
+ queue->head = 0;
+ queue->tail = 0;
+ queue->queue_mem_ptr = gmi->virt_addr;
+ queue->queue_size = spec->queue_size;
+ queue->monitor_avl_buf = spec->monitor_avl_buf;
+ queue->type = spec->type;
+ queue->gdma_dev = gd;
+
+ if (spec->type == GDMA_EQ)
+ err = mana_gd_create_eq(gd, spec, false, queue);
+ else if (spec->type == GDMA_CQ)
+ mana_gd_create_cq(spec, queue);
+
+ if (err)
+ goto out;
+
+ *queue_ptr = queue;
+ return 0;
+out:
+ mana_gd_free_memory(gmi);
+free_q:
+ kfree(queue);
+ return err;
+}
+
+static void mana_gd_destroy_dma_region(struct gdma_context *gc, u64 gdma_region)
+{
+ struct gdma_destroy_dma_region_req req = {};
+ struct gdma_general_resp resp = {};
+ int err;
+
+ if (gdma_region == GDMA_INVALID_DMA_REGION)
+ return;
+
+ mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_DMA_REGION, sizeof(req),
+ sizeof(resp));
+ req.gdma_region = gdma_region;
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+ if (err || resp.hdr.status)
+ dev_err(gc->dev, "Failed to destroy DMA region: %d, 0x%x\n",
+ err, resp.hdr.status);
+}
+
+static int mana_gd_create_dma_region(struct gdma_dev *gd,
+ struct gdma_mem_info *gmi)
+{
+ unsigned int num_page = gmi->length / PAGE_SIZE;
+ struct gdma_create_dma_region_req *req = NULL;
+ struct gdma_create_dma_region_resp resp = {};
+ struct gdma_context *gc = gd->gdma_context;
+ struct hw_channel_context *hwc;
+ u32 length = gmi->length;
+ size_t req_msg_size;
+ int err;
+ int i;
+
+ if (length < PAGE_SIZE || !is_power_of_2(length))
+ return -EINVAL;
+
+ if (offset_in_page(gmi->virt_addr) != 0)
+ return -EINVAL;
+
+ hwc = gc->hwc.driver_data;
+ req_msg_size = struct_size(req, page_addr_list, num_page);
+ if (req_msg_size > hwc->max_req_msg_size)
+ return -EINVAL;
+
+ req = kzalloc(req_msg_size, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ mana_gd_init_req_hdr(&req->hdr, GDMA_CREATE_DMA_REGION,
+ req_msg_size, sizeof(resp));
+ req->length = length;
+ req->offset_in_page = 0;
+ req->gdma_page_type = GDMA_PAGE_TYPE_4K;
+ req->page_count = num_page;
+ req->page_addr_list_len = num_page;
+
+ for (i = 0; i < num_page; i++)
+ req->page_addr_list[i] = gmi->dma_handle + i * PAGE_SIZE;
+
+ err = mana_gd_send_request(gc, req_msg_size, req, sizeof(resp), &resp);
+ if (err)
+ goto out;
+
+ if (resp.hdr.status || resp.gdma_region == GDMA_INVALID_DMA_REGION) {
+ dev_err(gc->dev, "Failed to create DMA region: 0x%x\n",
+ resp.hdr.status);
+ err = -EPROTO;
+ goto out;
+ }
+
+ gmi->gdma_region = resp.gdma_region;
+out:
+ kfree(req);
+ return err;
+}
+
+int mana_gd_create_mana_eq(struct gdma_dev *gd,
+ const struct gdma_queue_spec *spec,
+ struct gdma_queue **queue_ptr)
+{
+ struct gdma_context *gc = gd->gdma_context;
+ struct gdma_mem_info *gmi;
+ struct gdma_queue *queue;
+ int err;
+
+ if (spec->type != GDMA_EQ)
+ return -EINVAL;
+
+ queue = kzalloc(sizeof(*queue), GFP_KERNEL);
+ if (!queue)
+ return -ENOMEM;
+
+ gmi = &queue->mem_info;
+ err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
+ if (err)
+ goto free_q;
+
+ err = mana_gd_create_dma_region(gd, gmi);
+ if (err)
+ goto out;
+
+ queue->head = 0;
+ queue->tail = 0;
+ queue->queue_mem_ptr = gmi->virt_addr;
+ queue->queue_size = spec->queue_size;
+ queue->monitor_avl_buf = spec->monitor_avl_buf;
+ queue->type = spec->type;
+ queue->gdma_dev = gd;
+
+ err = mana_gd_create_eq(gd, spec, true, queue);
+ if (err)
+ goto out;
+
+ *queue_ptr = queue;
+ return 0;
+out:
+ mana_gd_free_memory(gmi);
+free_q:
+ kfree(queue);
+ return err;
+}
+
+int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
+ const struct gdma_queue_spec *spec,
+ struct gdma_queue **queue_ptr)
+{
+ struct gdma_context *gc = gd->gdma_context;
+ struct gdma_mem_info *gmi;
+ struct gdma_queue *queue;
+ int err;
+
+ if (spec->type != GDMA_CQ && spec->type != GDMA_SQ &&
+ spec->type != GDMA_RQ)
+ return -EINVAL;
+
+ queue = kzalloc(sizeof(*queue), GFP_KERNEL);
+ if (!queue)
+ return -ENOMEM;
+
+ gmi = &queue->mem_info;
+ err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
+ if (err)
+ goto free_q;
+
+ err = mana_gd_create_dma_region(gd, gmi);
+ if (err)
+ goto out;
+
+ queue->head = 0;
+ queue->tail = 0;
+ queue->queue_mem_ptr = gmi->virt_addr;
+ queue->queue_size = spec->queue_size;
+ queue->monitor_avl_buf = spec->monitor_avl_buf;
+ queue->type = spec->type;
+ queue->gdma_dev = gd;
+
+ if (spec->type == GDMA_CQ)
+ mana_gd_create_cq(spec, queue);
+
+ *queue_ptr = queue;
+ return 0;
+out:
+ mana_gd_free_memory(gmi);
+free_q:
+ kfree(queue);
+ return err;
+}
+
+void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue)
+{
+ struct gdma_mem_info *gmi = &queue->mem_info;
+
+ switch (queue->type) {
+ case GDMA_EQ:
+ mana_gd_destroy_eq(gc, queue->eq.disable_needed, queue);
+ break;
+
+ case GDMA_CQ:
+ mana_gd_destroy_cq(gc, queue);
+ break;
+
+ case GDMA_RQ:
+ break;
+
+ case GDMA_SQ:
+ break;
+
+ default:
+ dev_err(gc->dev, "Can't destroy unknown queue: type=%d\n",
+ queue->type);
+ return;
+ }
+
+ mana_gd_destroy_dma_region(gc, gmi->gdma_region);
+ mana_gd_free_memory(gmi);
+ kfree(queue);
+}
+
+int mana_gd_verify_vf_version(struct pci_dev *pdev)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+ struct gdma_verify_ver_resp resp = {};
+ struct gdma_verify_ver_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, GDMA_VERIFY_VF_DRIVER_VERSION,
+ sizeof(req), sizeof(resp));
+
+ req.protocol_ver_min = GDMA_PROTOCOL_FIRST;
+ req.protocol_ver_max = GDMA_PROTOCOL_LAST;
+
+ req.gd_drv_cap_flags1 = GDMA_DRV_CAP_FLAGS1;
+ req.gd_drv_cap_flags2 = GDMA_DRV_CAP_FLAGS2;
+ req.gd_drv_cap_flags3 = GDMA_DRV_CAP_FLAGS3;
+ req.gd_drv_cap_flags4 = GDMA_DRV_CAP_FLAGS4;
+
+ req.drv_ver = 0; /* Unused*/
+ req.os_type = 0x10; /* Linux */
+ req.os_ver_major = LINUX_VERSION_MAJOR;
+ req.os_ver_minor = LINUX_VERSION_PATCHLEVEL;
+ req.os_ver_build = LINUX_VERSION_SUBLEVEL;
+ strscpy(req.os_ver_str1, utsname()->sysname, sizeof(req.os_ver_str1));
+ strscpy(req.os_ver_str2, utsname()->release, sizeof(req.os_ver_str2));
+ strscpy(req.os_ver_str3, utsname()->version, sizeof(req.os_ver_str3));
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+ if (err || resp.hdr.status) {
+ dev_err(gc->dev, "VfVerifyVersionOutput: %d, status=0x%x\n",
+ err, resp.hdr.status);
+ return err ? err : -EPROTO;
+ }
+
+ return 0;
+}
+
+int mana_gd_register_device(struct gdma_dev *gd)
+{
+ struct gdma_context *gc = gd->gdma_context;
+ struct gdma_register_device_resp resp = {};
+ struct gdma_general_req req = {};
+ int err;
+
+ gd->pdid = INVALID_PDID;
+ gd->doorbell = INVALID_DOORBELL;
+ gd->gpa_mkey = INVALID_MEM_KEY;
+
+ mana_gd_init_req_hdr(&req.hdr, GDMA_REGISTER_DEVICE, sizeof(req),
+ sizeof(resp));
+
+ req.hdr.dev_id = gd->dev_id;
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+ if (err || resp.hdr.status) {
+ dev_err(gc->dev, "gdma_register_device_resp failed: %d, 0x%x\n",
+ err, resp.hdr.status);
+ return err ? err : -EPROTO;
+ }
+
+ gd->pdid = resp.pdid;
+ gd->gpa_mkey = resp.gpa_mkey;
+ gd->doorbell = resp.db_id;
+
+ return 0;
+}
+
+int mana_gd_deregister_device(struct gdma_dev *gd)
+{
+ struct gdma_context *gc = gd->gdma_context;
+ struct gdma_general_resp resp = {};
+ struct gdma_general_req req = {};
+ int err;
+
+ if (gd->pdid == INVALID_PDID)
+ return -EINVAL;
+
+ mana_gd_init_req_hdr(&req.hdr, GDMA_DEREGISTER_DEVICE, sizeof(req),
+ sizeof(resp));
+
+ req.hdr.dev_id = gd->dev_id;
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+ if (err || resp.hdr.status) {
+ dev_err(gc->dev, "Failed to deregister device: %d, 0x%x\n",
+ err, resp.hdr.status);
+ if (!err)
+ err = -EPROTO;
+ }
+
+ gd->pdid = INVALID_PDID;
+ gd->doorbell = INVALID_DOORBELL;
+ gd->gpa_mkey = INVALID_MEM_KEY;
+
+ return err;
+}
+
+u32 mana_gd_wq_avail_space(struct gdma_queue *wq)
+{
+ u32 used_space = (wq->head - wq->tail) * GDMA_WQE_BU_SIZE;
+ u32 wq_size = wq->queue_size;
+
+ WARN_ON_ONCE(used_space > wq_size);
+
+ return wq_size - used_space;
+}
+
+u8 *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, u32 wqe_offset)
+{
+ u32 offset = (wqe_offset * GDMA_WQE_BU_SIZE) & (wq->queue_size - 1);
+
+ WARN_ON_ONCE((offset + GDMA_WQE_BU_SIZE) > wq->queue_size);
+
+ return wq->queue_mem_ptr + offset;
+}
+
+static u32 mana_gd_write_client_oob(const struct gdma_wqe_request *wqe_req,
+ enum gdma_queue_type q_type,
+ u32 client_oob_size, u32 sgl_data_size,
+ u8 *wqe_ptr)
+{
+ bool oob_in_sgl = !!(wqe_req->flags & GDMA_WR_OOB_IN_SGL);
+ bool pad_data = !!(wqe_req->flags & GDMA_WR_PAD_BY_SGE0);
+ struct gdma_wqe *header = (struct gdma_wqe *)wqe_ptr;
+ u8 *ptr;
+
+ memset(header, 0, sizeof(struct gdma_wqe));
+ header->num_sge = wqe_req->num_sge;
+ header->inline_oob_size_div4 = client_oob_size / sizeof(u32);
+
+ if (oob_in_sgl) {
+ WARN_ON_ONCE(!pad_data || wqe_req->num_sge < 2);
+
+ header->client_oob_in_sgl = 1;
+
+ if (pad_data)
+ header->last_vbytes = wqe_req->sgl[0].size;
+ }
+
+ if (q_type == GDMA_SQ)
+ header->client_data_unit = wqe_req->client_data_unit;
+
+ /* The size of gdma_wqe + client_oob_size must be less than or equal
+ * to one Basic Unit (i.e. 32 bytes), so the pointer can't go beyond
+ * the queue memory buffer boundary.
+ */
+ ptr = wqe_ptr + sizeof(header);
+
+ if (wqe_req->inline_oob_data && wqe_req->inline_oob_size > 0) {
+ memcpy(ptr, wqe_req->inline_oob_data, wqe_req->inline_oob_size);
+
+ if (client_oob_size > wqe_req->inline_oob_size)
+ memset(ptr + wqe_req->inline_oob_size, 0,
+ client_oob_size - wqe_req->inline_oob_size);
+ }
+
+ return sizeof(header) + client_oob_size;
+}
+
+static void mana_gd_write_sgl(struct gdma_queue *wq, u8 *wqe_ptr,
+ const struct gdma_wqe_request *wqe_req)
+{
+ u32 sgl_size = sizeof(struct gdma_sge) * wqe_req->num_sge;
+ const u8 *address = (u8 *)wqe_req->sgl;
+ u8 *base_ptr, *end_ptr;
+ u32 size_to_end;
+
+ base_ptr = wq->queue_mem_ptr;
+ end_ptr = base_ptr + wq->queue_size;
+ size_to_end = (u32)(end_ptr - wqe_ptr);
+
+ if (size_to_end < sgl_size) {
+ memcpy(wqe_ptr, address, size_to_end);
+
+ wqe_ptr = base_ptr;
+ address += size_to_end;
+ sgl_size -= size_to_end;
+ }
+
+ memcpy(wqe_ptr, address, sgl_size);
+}
+
+int mana_gd_post_work_request(struct gdma_queue *wq,
+ const struct gdma_wqe_request *wqe_req,
+ struct gdma_posted_wqe_info *wqe_info)
+{
+ u32 client_oob_size = wqe_req->inline_oob_size;
+ struct gdma_context *gc;
+ u32 sgl_data_size;
+ u32 max_wqe_size;
+ u32 wqe_size;
+ u8 *wqe_ptr;
+
+ if (wqe_req->num_sge == 0)
+ return -EINVAL;
+
+ if (wq->type == GDMA_RQ) {
+ if (client_oob_size != 0)
+ return -EINVAL;
+
+ client_oob_size = INLINE_OOB_SMALL_SIZE;
+
+ max_wqe_size = GDMA_MAX_RQE_SIZE;
+ } else {
+ if (client_oob_size != INLINE_OOB_SMALL_SIZE &&
+ client_oob_size != INLINE_OOB_LARGE_SIZE)
+ return -EINVAL;
+
+ max_wqe_size = GDMA_MAX_SQE_SIZE;
+ }
+
+ sgl_data_size = sizeof(struct gdma_sge) * wqe_req->num_sge;
+ wqe_size = ALIGN(sizeof(struct gdma_wqe) + client_oob_size +
+ sgl_data_size, GDMA_WQE_BU_SIZE);
+ if (wqe_size > max_wqe_size)
+ return -EINVAL;
+
+ if (wq->monitor_avl_buf && wqe_size > mana_gd_wq_avail_space(wq)) {
+ gc = wq->gdma_dev->gdma_context;
+ dev_err(gc->dev, "unsuccessful flow control!\n");
+ return -ENOSPC;
+ }
+
+ if (wqe_info)
+ wqe_info->wqe_size_in_bu = wqe_size / GDMA_WQE_BU_SIZE;
+
+ wqe_ptr = mana_gd_get_wqe_ptr(wq, wq->head);
+ wqe_ptr += mana_gd_write_client_oob(wqe_req, wq->type, client_oob_size,
+ sgl_data_size, wqe_ptr);
+ if (wqe_ptr >= (u8 *)wq->queue_mem_ptr + wq->queue_size)
+ wqe_ptr -= wq->queue_size;
+
+ mana_gd_write_sgl(wq, wqe_ptr, wqe_req);
+
+ wq->head += wqe_size / GDMA_WQE_BU_SIZE;
+
+ return 0;
+}
+
+int mana_gd_post_and_ring(struct gdma_queue *queue,
+ const struct gdma_wqe_request *wqe_req,
+ struct gdma_posted_wqe_info *wqe_info)
+{
+ struct gdma_context *gc = queue->gdma_dev->gdma_context;
+ int err;
+
+ err = mana_gd_post_work_request(queue, wqe_req, wqe_info);
+ if (err)
+ return err;
+
+ mana_gd_wq_ring_doorbell(gc, queue);
+
+ return 0;
+}
+
+static int mana_gd_read_cqe(struct gdma_queue *cq, struct gdma_comp *comp)
+{
+ unsigned int num_cqe = cq->queue_size / sizeof(struct gdma_cqe);
+ struct gdma_cqe *cq_cqe = cq->queue_mem_ptr;
+ u32 owner_bits, new_bits, old_bits;
+ struct gdma_cqe *cqe;
+
+ cqe = &cq_cqe[cq->head % num_cqe];
+ owner_bits = cqe->cqe_info.owner_bits;
+
+ old_bits = (cq->head / num_cqe - 1) & GDMA_CQE_OWNER_MASK;
+ /* Return 0 if no more entries. */
+ if (owner_bits == old_bits)
+ return 0;
+
+ new_bits = (cq->head / num_cqe) & GDMA_CQE_OWNER_MASK;
+ /* Return -1 if overflow detected. */
+ if (WARN_ON_ONCE(owner_bits != new_bits))
+ return -1;
+
+ /* Per GDMA spec, rmb is necessary after checking owner_bits, before
+ * reading completion info
+ */
+ rmb();
+
+ comp->wq_num = cqe->cqe_info.wq_num;
+ comp->is_sq = cqe->cqe_info.is_sq;
+ memcpy(comp->cqe_data, cqe->cqe_data, GDMA_COMP_DATA_SIZE);
+
+ return 1;
+}
+
+int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe)
+{
+ int cqe_idx;
+ int ret;
+
+ for (cqe_idx = 0; cqe_idx < num_cqe; cqe_idx++) {
+ ret = mana_gd_read_cqe(cq, &comp[cqe_idx]);
+
+ if (ret < 0) {
+ cq->head -= cqe_idx;
+ return ret;
+ }
+
+ if (ret == 0)
+ break;
+
+ cq->head++;
+ }
+
+ return cqe_idx;
+}
+
+static irqreturn_t mana_gd_intr(int irq, void *arg)
+{
+ struct gdma_irq_context *gic = arg;
+
+ if (gic->handler)
+ gic->handler(gic->arg);
+
+ return IRQ_HANDLED;
+}
+
+int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r)
+{
+ r->map = bitmap_zalloc(res_avail, GFP_KERNEL);
+ if (!r->map)
+ return -ENOMEM;
+
+ r->size = res_avail;
+ spin_lock_init(&r->lock);
+
+ return 0;
+}
+
+void mana_gd_free_res_map(struct gdma_resource *r)
+{
+ bitmap_free(r->map);
+ r->map = NULL;
+ r->size = 0;
+}
+
+static int mana_gd_setup_irqs(struct pci_dev *pdev)
+{
+ unsigned int max_queues_per_port = num_online_cpus();
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+ struct gdma_irq_context *gic;
+ unsigned int max_irqs;
+ int nvec, irq;
+ int err, i, j;
+
+ if (max_queues_per_port > MANA_MAX_NUM_QUEUES)
+ max_queues_per_port = MANA_MAX_NUM_QUEUES;
+
+ /* Need 1 interrupt for the Hardware communication Channel (HWC) */
+ max_irqs = max_queues_per_port + 1;
+
+ nvec = pci_alloc_irq_vectors(pdev, 2, max_irqs, PCI_IRQ_MSIX);
+ if (nvec < 0)
+ return nvec;
+
+ gc->irq_contexts = kcalloc(nvec, sizeof(struct gdma_irq_context),
+ GFP_KERNEL);
+ if (!gc->irq_contexts) {
+ err = -ENOMEM;
+ goto free_irq_vector;
+ }
+
+ for (i = 0; i < nvec; i++) {
+ gic = &gc->irq_contexts[i];
+ gic->handler = NULL;
+ gic->arg = NULL;
+
+ if (!i)
+ snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_hwc@pci:%s",
+ pci_name(pdev));
+ else
+ snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_q%d@pci:%s",
+ i - 1, pci_name(pdev));
+
+ irq = pci_irq_vector(pdev, i);
+ if (irq < 0) {
+ err = irq;
+ goto free_irq;
+ }
+
+ err = request_irq(irq, mana_gd_intr, 0, gic->name, gic);
+ if (err)
+ goto free_irq;
+ }
+
+ err = mana_gd_alloc_res_map(nvec, &gc->msix_resource);
+ if (err)
+ goto free_irq;
+
+ gc->max_num_msix = nvec;
+ gc->num_msix_usable = nvec;
+
+ return 0;
+
+free_irq:
+ for (j = i - 1; j >= 0; j--) {
+ irq = pci_irq_vector(pdev, j);
+ gic = &gc->irq_contexts[j];
+ free_irq(irq, gic);
+ }
+
+ kfree(gc->irq_contexts);
+ gc->irq_contexts = NULL;
+free_irq_vector:
+ pci_free_irq_vectors(pdev);
+ return err;
+}
+
+static void mana_gd_remove_irqs(struct pci_dev *pdev)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+ struct gdma_irq_context *gic;
+ int irq, i;
+
+ if (gc->max_num_msix < 1)
+ return;
+
+ mana_gd_free_res_map(&gc->msix_resource);
+
+ for (i = 0; i < gc->max_num_msix; i++) {
+ irq = pci_irq_vector(pdev, i);
+ if (irq < 0)
+ continue;
+
+ gic = &gc->irq_contexts[i];
+ free_irq(irq, gic);
+ }
+
+ pci_free_irq_vectors(pdev);
+
+ gc->max_num_msix = 0;
+ gc->num_msix_usable = 0;
+ kfree(gc->irq_contexts);
+ gc->irq_contexts = NULL;
+}
+
+static int mana_gd_setup(struct pci_dev *pdev)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+ int err;
+
+ mana_gd_init_registers(pdev);
+ mana_smc_init(&gc->shm_channel, gc->dev, gc->shm_base);
+
+ err = mana_gd_setup_irqs(pdev);
+ if (err)
+ return err;
+
+ err = mana_hwc_create_channel(gc);
+ if (err)
+ goto remove_irq;
+
+ err = mana_gd_verify_vf_version(pdev);
+ if (err)
+ goto destroy_hwc;
+
+ err = mana_gd_query_max_resources(pdev);
+ if (err)
+ goto destroy_hwc;
+
+ err = mana_gd_detect_devices(pdev);
+ if (err)
+ goto destroy_hwc;
+
+ return 0;
+
+destroy_hwc:
+ mana_hwc_destroy_channel(gc);
+remove_irq:
+ mana_gd_remove_irqs(pdev);
+ return err;
+}
+
+static void mana_gd_cleanup(struct pci_dev *pdev)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+
+ mana_hwc_destroy_channel(gc);
+
+ mana_gd_remove_irqs(pdev);
+}
+
+static bool mana_is_pf(unsigned short dev_id)
+{
+ return dev_id == MANA_PF_DEVICE_ID;
+}
+
+static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct gdma_context *gc;
+ void __iomem *bar0_va;
+ int bar = 0;
+ int err;
+
+ /* Each port has 2 CQs, each CQ has at most 1 EQE at a time */
+ BUILD_BUG_ON(2 * MAX_PORTS_IN_MANA_DEV * GDMA_EQE_SIZE > EQ_SIZE);
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return -ENXIO;
+
+ pci_set_master(pdev);
+
+ err = pci_request_regions(pdev, "mana");
+ if (err)
+ goto disable_dev;
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err)
+ goto release_region;
+
+ err = -ENOMEM;
+ gc = vzalloc(sizeof(*gc));
+ if (!gc)
+ goto release_region;
+
+ mutex_init(&gc->eq_test_event_mutex);
+ pci_set_drvdata(pdev, gc);
+
+ bar0_va = pci_iomap(pdev, bar, 0);
+ if (!bar0_va)
+ goto free_gc;
+
+ gc->is_pf = mana_is_pf(pdev->device);
+ gc->bar0_va = bar0_va;
+ gc->dev = &pdev->dev;
+
+ err = mana_gd_setup(pdev);
+ if (err)
+ goto unmap_bar;
+
+ err = mana_probe(&gc->mana, false);
+ if (err)
+ goto cleanup_gd;
+
+ return 0;
+
+cleanup_gd:
+ mana_gd_cleanup(pdev);
+unmap_bar:
+ pci_iounmap(pdev, bar0_va);
+free_gc:
+ pci_set_drvdata(pdev, NULL);
+ vfree(gc);
+release_region:
+ pci_release_regions(pdev);
+disable_dev:
+ pci_clear_master(pdev);
+ pci_disable_device(pdev);
+ dev_err(&pdev->dev, "gdma probe failed: err = %d\n", err);
+ return err;
+}
+
+static void mana_gd_remove(struct pci_dev *pdev)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+
+ mana_remove(&gc->mana, false);
+
+ mana_gd_cleanup(pdev);
+
+ pci_iounmap(pdev, gc->bar0_va);
+
+ vfree(gc);
+
+ pci_release_regions(pdev);
+ pci_clear_master(pdev);
+ pci_disable_device(pdev);
+}
+
+/* The 'state' parameter is not used. */
+static int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+
+ mana_remove(&gc->mana, true);
+
+ mana_gd_cleanup(pdev);
+
+ return 0;
+}
+
+/* In case the NIC hardware stops working, the suspend and resume callbacks will
+ * fail -- if this happens, it's safer to just report an error than try to undo
+ * what has been done.
+ */
+static int mana_gd_resume(struct pci_dev *pdev)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+ int err;
+
+ err = mana_gd_setup(pdev);
+ if (err)
+ return err;
+
+ err = mana_probe(&gc->mana, true);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/* Quiesce the device for kexec. This is also called upon reboot/shutdown. */
+static void mana_gd_shutdown(struct pci_dev *pdev)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+
+ dev_info(&pdev->dev, "Shutdown was called\n");
+
+ mana_remove(&gc->mana, true);
+
+ mana_gd_cleanup(pdev);
+
+ pci_disable_device(pdev);
+}
+
+static const struct pci_device_id mana_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, MANA_PF_DEVICE_ID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, MANA_VF_DEVICE_ID) },
+ { }
+};
+
+static struct pci_driver mana_driver = {
+ .name = "mana",
+ .id_table = mana_id_table,
+ .probe = mana_gd_probe,
+ .remove = mana_gd_remove,
+ .suspend = mana_gd_suspend,
+ .resume = mana_gd_resume,
+ .shutdown = mana_gd_shutdown,
+};
+
+module_pci_driver(mana_driver);
+
+MODULE_DEVICE_TABLE(pci, mana_id_table);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Microsoft Azure Network Adapter driver");
diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c
new file mode 100644
index 000000000..543a5d5c3
--- /dev/null
+++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
@@ -0,0 +1,848 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright (c) 2021, Microsoft Corporation. */
+
+#include "gdma.h"
+#include "hw_channel.h"
+
+static int mana_hwc_get_msg_index(struct hw_channel_context *hwc, u16 *msg_id)
+{
+ struct gdma_resource *r = &hwc->inflight_msg_res;
+ unsigned long flags;
+ u32 index;
+
+ down(&hwc->sema);
+
+ spin_lock_irqsave(&r->lock, flags);
+
+ index = find_first_zero_bit(hwc->inflight_msg_res.map,
+ hwc->inflight_msg_res.size);
+
+ bitmap_set(hwc->inflight_msg_res.map, index, 1);
+
+ spin_unlock_irqrestore(&r->lock, flags);
+
+ *msg_id = index;
+
+ return 0;
+}
+
+static void mana_hwc_put_msg_index(struct hw_channel_context *hwc, u16 msg_id)
+{
+ struct gdma_resource *r = &hwc->inflight_msg_res;
+ unsigned long flags;
+
+ spin_lock_irqsave(&r->lock, flags);
+ bitmap_clear(hwc->inflight_msg_res.map, msg_id, 1);
+ spin_unlock_irqrestore(&r->lock, flags);
+
+ up(&hwc->sema);
+}
+
+static int mana_hwc_verify_resp_msg(const struct hwc_caller_ctx *caller_ctx,
+ const struct gdma_resp_hdr *resp_msg,
+ u32 resp_len)
+{
+ if (resp_len < sizeof(*resp_msg))
+ return -EPROTO;
+
+ if (resp_len > caller_ctx->output_buflen)
+ return -EPROTO;
+
+ return 0;
+}
+
+static void mana_hwc_handle_resp(struct hw_channel_context *hwc, u32 resp_len,
+ const struct gdma_resp_hdr *resp_msg)
+{
+ struct hwc_caller_ctx *ctx;
+ int err;
+
+ if (!test_bit(resp_msg->response.hwc_msg_id,
+ hwc->inflight_msg_res.map)) {
+ dev_err(hwc->dev, "hwc_rx: invalid msg_id = %u\n",
+ resp_msg->response.hwc_msg_id);
+ return;
+ }
+
+ ctx = hwc->caller_ctx + resp_msg->response.hwc_msg_id;
+ err = mana_hwc_verify_resp_msg(ctx, resp_msg, resp_len);
+ if (err)
+ goto out;
+
+ ctx->status_code = resp_msg->status;
+
+ memcpy(ctx->output_buf, resp_msg, resp_len);
+out:
+ ctx->error = err;
+ complete(&ctx->comp_event);
+}
+
+static int mana_hwc_post_rx_wqe(const struct hwc_wq *hwc_rxq,
+ struct hwc_work_request *req)
+{
+ struct device *dev = hwc_rxq->hwc->dev;
+ struct gdma_sge *sge;
+ int err;
+
+ sge = &req->sge;
+ sge->address = (u64)req->buf_sge_addr;
+ sge->mem_key = hwc_rxq->msg_buf->gpa_mkey;
+ sge->size = req->buf_len;
+
+ memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
+ req->wqe_req.sgl = sge;
+ req->wqe_req.num_sge = 1;
+ req->wqe_req.client_data_unit = 0;
+
+ err = mana_gd_post_and_ring(hwc_rxq->gdma_wq, &req->wqe_req, NULL);
+ if (err)
+ dev_err(dev, "Failed to post WQE on HWC RQ: %d\n", err);
+ return err;
+}
+
+static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self,
+ struct gdma_event *event)
+{
+ struct hw_channel_context *hwc = ctx;
+ struct gdma_dev *gd = hwc->gdma_dev;
+ union hwc_init_type_data type_data;
+ union hwc_init_eq_id_db eq_db;
+ u32 type, val;
+
+ switch (event->type) {
+ case GDMA_EQE_HWC_INIT_EQ_ID_DB:
+ eq_db.as_uint32 = event->details[0];
+ hwc->cq->gdma_eq->id = eq_db.eq_id;
+ gd->doorbell = eq_db.doorbell;
+ break;
+
+ case GDMA_EQE_HWC_INIT_DATA:
+ type_data.as_uint32 = event->details[0];
+ type = type_data.type;
+ val = type_data.value;
+
+ switch (type) {
+ case HWC_INIT_DATA_CQID:
+ hwc->cq->gdma_cq->id = val;
+ break;
+
+ case HWC_INIT_DATA_RQID:
+ hwc->rxq->gdma_wq->id = val;
+ break;
+
+ case HWC_INIT_DATA_SQID:
+ hwc->txq->gdma_wq->id = val;
+ break;
+
+ case HWC_INIT_DATA_QUEUE_DEPTH:
+ hwc->hwc_init_q_depth_max = (u16)val;
+ break;
+
+ case HWC_INIT_DATA_MAX_REQUEST:
+ hwc->hwc_init_max_req_msg_size = val;
+ break;
+
+ case HWC_INIT_DATA_MAX_RESPONSE:
+ hwc->hwc_init_max_resp_msg_size = val;
+ break;
+
+ case HWC_INIT_DATA_MAX_NUM_CQS:
+ gd->gdma_context->max_num_cqs = val;
+ break;
+
+ case HWC_INIT_DATA_PDID:
+ hwc->gdma_dev->pdid = val;
+ break;
+
+ case HWC_INIT_DATA_GPA_MKEY:
+ hwc->rxq->msg_buf->gpa_mkey = val;
+ hwc->txq->msg_buf->gpa_mkey = val;
+ break;
+
+ case HWC_INIT_DATA_PF_DEST_RQ_ID:
+ hwc->pf_dest_vrq_id = val;
+ break;
+
+ case HWC_INIT_DATA_PF_DEST_CQ_ID:
+ hwc->pf_dest_vrcq_id = val;
+ break;
+ }
+
+ break;
+
+ case GDMA_EQE_HWC_INIT_DONE:
+ complete(&hwc->hwc_init_eqe_comp);
+ break;
+
+ default:
+ /* Ignore unknown events, which should never happen. */
+ break;
+ }
+}
+
+static void mana_hwc_rx_event_handler(void *ctx, u32 gdma_rxq_id,
+ const struct hwc_rx_oob *rx_oob)
+{
+ struct hw_channel_context *hwc = ctx;
+ struct hwc_wq *hwc_rxq = hwc->rxq;
+ struct hwc_work_request *rx_req;
+ struct gdma_resp_hdr *resp;
+ struct gdma_wqe *dma_oob;
+ struct gdma_queue *rq;
+ struct gdma_sge *sge;
+ u64 rq_base_addr;
+ u64 rx_req_idx;
+ u8 *wqe;
+
+ if (WARN_ON_ONCE(hwc_rxq->gdma_wq->id != gdma_rxq_id))
+ return;
+
+ rq = hwc_rxq->gdma_wq;
+ wqe = mana_gd_get_wqe_ptr(rq, rx_oob->wqe_offset / GDMA_WQE_BU_SIZE);
+ dma_oob = (struct gdma_wqe *)wqe;
+
+ sge = (struct gdma_sge *)(wqe + 8 + dma_oob->inline_oob_size_div4 * 4);
+
+ /* Select the RX work request for virtual address and for reposting. */
+ rq_base_addr = hwc_rxq->msg_buf->mem_info.dma_handle;
+ rx_req_idx = (sge->address - rq_base_addr) / hwc->max_req_msg_size;
+
+ rx_req = &hwc_rxq->msg_buf->reqs[rx_req_idx];
+ resp = (struct gdma_resp_hdr *)rx_req->buf_va;
+
+ if (resp->response.hwc_msg_id >= hwc->num_inflight_msg) {
+ dev_err(hwc->dev, "HWC RX: wrong msg_id=%u\n",
+ resp->response.hwc_msg_id);
+ return;
+ }
+
+ mana_hwc_handle_resp(hwc, rx_oob->tx_oob_data_size, resp);
+
+ /* Do no longer use 'resp', because the buffer is posted to the HW
+ * in the below mana_hwc_post_rx_wqe().
+ */
+ resp = NULL;
+
+ mana_hwc_post_rx_wqe(hwc_rxq, rx_req);
+}
+
+static void mana_hwc_tx_event_handler(void *ctx, u32 gdma_txq_id,
+ const struct hwc_rx_oob *rx_oob)
+{
+ struct hw_channel_context *hwc = ctx;
+ struct hwc_wq *hwc_txq = hwc->txq;
+
+ WARN_ON_ONCE(!hwc_txq || hwc_txq->gdma_wq->id != gdma_txq_id);
+}
+
+static int mana_hwc_create_gdma_wq(struct hw_channel_context *hwc,
+ enum gdma_queue_type type, u64 queue_size,
+ struct gdma_queue **queue)
+{
+ struct gdma_queue_spec spec = {};
+
+ if (type != GDMA_SQ && type != GDMA_RQ)
+ return -EINVAL;
+
+ spec.type = type;
+ spec.monitor_avl_buf = false;
+ spec.queue_size = queue_size;
+
+ return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
+}
+
+static int mana_hwc_create_gdma_cq(struct hw_channel_context *hwc,
+ u64 queue_size,
+ void *ctx, gdma_cq_callback *cb,
+ struct gdma_queue *parent_eq,
+ struct gdma_queue **queue)
+{
+ struct gdma_queue_spec spec = {};
+
+ spec.type = GDMA_CQ;
+ spec.monitor_avl_buf = false;
+ spec.queue_size = queue_size;
+ spec.cq.context = ctx;
+ spec.cq.callback = cb;
+ spec.cq.parent_eq = parent_eq;
+
+ return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
+}
+
+static int mana_hwc_create_gdma_eq(struct hw_channel_context *hwc,
+ u64 queue_size,
+ void *ctx, gdma_eq_callback *cb,
+ struct gdma_queue **queue)
+{
+ struct gdma_queue_spec spec = {};
+
+ spec.type = GDMA_EQ;
+ spec.monitor_avl_buf = false;
+ spec.queue_size = queue_size;
+ spec.eq.context = ctx;
+ spec.eq.callback = cb;
+ spec.eq.log2_throttle_limit = DEFAULT_LOG2_THROTTLING_FOR_ERROR_EQ;
+
+ return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
+}
+
+static void mana_hwc_comp_event(void *ctx, struct gdma_queue *q_self)
+{
+ struct hwc_rx_oob comp_data = {};
+ struct gdma_comp *completions;
+ struct hwc_cq *hwc_cq = ctx;
+ int comp_read, i;
+
+ WARN_ON_ONCE(hwc_cq->gdma_cq != q_self);
+
+ completions = hwc_cq->comp_buf;
+ comp_read = mana_gd_poll_cq(q_self, completions, hwc_cq->queue_depth);
+ WARN_ON_ONCE(comp_read <= 0 || comp_read > hwc_cq->queue_depth);
+
+ for (i = 0; i < comp_read; ++i) {
+ comp_data = *(struct hwc_rx_oob *)completions[i].cqe_data;
+
+ if (completions[i].is_sq)
+ hwc_cq->tx_event_handler(hwc_cq->tx_event_ctx,
+ completions[i].wq_num,
+ &comp_data);
+ else
+ hwc_cq->rx_event_handler(hwc_cq->rx_event_ctx,
+ completions[i].wq_num,
+ &comp_data);
+ }
+
+ mana_gd_ring_cq(q_self, SET_ARM_BIT);
+}
+
+static void mana_hwc_destroy_cq(struct gdma_context *gc, struct hwc_cq *hwc_cq)
+{
+ kfree(hwc_cq->comp_buf);
+
+ if (hwc_cq->gdma_cq)
+ mana_gd_destroy_queue(gc, hwc_cq->gdma_cq);
+
+ if (hwc_cq->gdma_eq)
+ mana_gd_destroy_queue(gc, hwc_cq->gdma_eq);
+
+ kfree(hwc_cq);
+}
+
+static int mana_hwc_create_cq(struct hw_channel_context *hwc, u16 q_depth,
+ gdma_eq_callback *callback, void *ctx,
+ hwc_rx_event_handler_t *rx_ev_hdlr,
+ void *rx_ev_ctx,
+ hwc_tx_event_handler_t *tx_ev_hdlr,
+ void *tx_ev_ctx, struct hwc_cq **hwc_cq_ptr)
+{
+ struct gdma_queue *eq, *cq;
+ struct gdma_comp *comp_buf;
+ struct hwc_cq *hwc_cq;
+ u32 eq_size, cq_size;
+ int err;
+
+ eq_size = roundup_pow_of_two(GDMA_EQE_SIZE * q_depth);
+ if (eq_size < MINIMUM_SUPPORTED_PAGE_SIZE)
+ eq_size = MINIMUM_SUPPORTED_PAGE_SIZE;
+
+ cq_size = roundup_pow_of_two(GDMA_CQE_SIZE * q_depth);
+ if (cq_size < MINIMUM_SUPPORTED_PAGE_SIZE)
+ cq_size = MINIMUM_SUPPORTED_PAGE_SIZE;
+
+ hwc_cq = kzalloc(sizeof(*hwc_cq), GFP_KERNEL);
+ if (!hwc_cq)
+ return -ENOMEM;
+
+ err = mana_hwc_create_gdma_eq(hwc, eq_size, ctx, callback, &eq);
+ if (err) {
+ dev_err(hwc->dev, "Failed to create HWC EQ for RQ: %d\n", err);
+ goto out;
+ }
+ hwc_cq->gdma_eq = eq;
+
+ err = mana_hwc_create_gdma_cq(hwc, cq_size, hwc_cq, mana_hwc_comp_event,
+ eq, &cq);
+ if (err) {
+ dev_err(hwc->dev, "Failed to create HWC CQ for RQ: %d\n", err);
+ goto out;
+ }
+ hwc_cq->gdma_cq = cq;
+
+ comp_buf = kcalloc(q_depth, sizeof(*comp_buf), GFP_KERNEL);
+ if (!comp_buf) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ hwc_cq->hwc = hwc;
+ hwc_cq->comp_buf = comp_buf;
+ hwc_cq->queue_depth = q_depth;
+ hwc_cq->rx_event_handler = rx_ev_hdlr;
+ hwc_cq->rx_event_ctx = rx_ev_ctx;
+ hwc_cq->tx_event_handler = tx_ev_hdlr;
+ hwc_cq->tx_event_ctx = tx_ev_ctx;
+
+ *hwc_cq_ptr = hwc_cq;
+ return 0;
+out:
+ mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc_cq);
+ return err;
+}
+
+static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth,
+ u32 max_msg_size,
+ struct hwc_dma_buf **dma_buf_ptr)
+{
+ struct gdma_context *gc = hwc->gdma_dev->gdma_context;
+ struct hwc_work_request *hwc_wr;
+ struct hwc_dma_buf *dma_buf;
+ struct gdma_mem_info *gmi;
+ void *virt_addr;
+ u32 buf_size;
+ u8 *base_pa;
+ int err;
+ u16 i;
+
+ dma_buf = kzalloc(struct_size(dma_buf, reqs, q_depth), GFP_KERNEL);
+ if (!dma_buf)
+ return -ENOMEM;
+
+ dma_buf->num_reqs = q_depth;
+
+ buf_size = PAGE_ALIGN(q_depth * max_msg_size);
+
+ gmi = &dma_buf->mem_info;
+ err = mana_gd_alloc_memory(gc, buf_size, gmi);
+ if (err) {
+ dev_err(hwc->dev, "Failed to allocate DMA buffer: %d\n", err);
+ goto out;
+ }
+
+ virt_addr = dma_buf->mem_info.virt_addr;
+ base_pa = (u8 *)dma_buf->mem_info.dma_handle;
+
+ for (i = 0; i < q_depth; i++) {
+ hwc_wr = &dma_buf->reqs[i];
+
+ hwc_wr->buf_va = virt_addr + i * max_msg_size;
+ hwc_wr->buf_sge_addr = base_pa + i * max_msg_size;
+
+ hwc_wr->buf_len = max_msg_size;
+ }
+
+ *dma_buf_ptr = dma_buf;
+ return 0;
+out:
+ kfree(dma_buf);
+ return err;
+}
+
+static void mana_hwc_dealloc_dma_buf(struct hw_channel_context *hwc,
+ struct hwc_dma_buf *dma_buf)
+{
+ if (!dma_buf)
+ return;
+
+ mana_gd_free_memory(&dma_buf->mem_info);
+
+ kfree(dma_buf);
+}
+
+static void mana_hwc_destroy_wq(struct hw_channel_context *hwc,
+ struct hwc_wq *hwc_wq)
+{
+ mana_hwc_dealloc_dma_buf(hwc, hwc_wq->msg_buf);
+
+ if (hwc_wq->gdma_wq)
+ mana_gd_destroy_queue(hwc->gdma_dev->gdma_context,
+ hwc_wq->gdma_wq);
+
+ kfree(hwc_wq);
+}
+
+static int mana_hwc_create_wq(struct hw_channel_context *hwc,
+ enum gdma_queue_type q_type, u16 q_depth,
+ u32 max_msg_size, struct hwc_cq *hwc_cq,
+ struct hwc_wq **hwc_wq_ptr)
+{
+ struct gdma_queue *queue;
+ struct hwc_wq *hwc_wq;
+ u32 queue_size;
+ int err;
+
+ WARN_ON(q_type != GDMA_SQ && q_type != GDMA_RQ);
+
+ if (q_type == GDMA_RQ)
+ queue_size = roundup_pow_of_two(GDMA_MAX_RQE_SIZE * q_depth);
+ else
+ queue_size = roundup_pow_of_two(GDMA_MAX_SQE_SIZE * q_depth);
+
+ if (queue_size < MINIMUM_SUPPORTED_PAGE_SIZE)
+ queue_size = MINIMUM_SUPPORTED_PAGE_SIZE;
+
+ hwc_wq = kzalloc(sizeof(*hwc_wq), GFP_KERNEL);
+ if (!hwc_wq)
+ return -ENOMEM;
+
+ err = mana_hwc_create_gdma_wq(hwc, q_type, queue_size, &queue);
+ if (err)
+ goto out;
+
+ hwc_wq->hwc = hwc;
+ hwc_wq->gdma_wq = queue;
+ hwc_wq->queue_depth = q_depth;
+ hwc_wq->hwc_cq = hwc_cq;
+
+ err = mana_hwc_alloc_dma_buf(hwc, q_depth, max_msg_size,
+ &hwc_wq->msg_buf);
+ if (err)
+ goto out;
+
+ *hwc_wq_ptr = hwc_wq;
+ return 0;
+out:
+ if (err)
+ mana_hwc_destroy_wq(hwc, hwc_wq);
+ return err;
+}
+
+static int mana_hwc_post_tx_wqe(const struct hwc_wq *hwc_txq,
+ struct hwc_work_request *req,
+ u32 dest_virt_rq_id, u32 dest_virt_rcq_id,
+ bool dest_pf)
+{
+ struct device *dev = hwc_txq->hwc->dev;
+ struct hwc_tx_oob *tx_oob;
+ struct gdma_sge *sge;
+ int err;
+
+ if (req->msg_size == 0 || req->msg_size > req->buf_len) {
+ dev_err(dev, "wrong msg_size: %u, buf_len: %u\n",
+ req->msg_size, req->buf_len);
+ return -EINVAL;
+ }
+
+ tx_oob = &req->tx_oob;
+
+ tx_oob->vrq_id = dest_virt_rq_id;
+ tx_oob->dest_vfid = 0;
+ tx_oob->vrcq_id = dest_virt_rcq_id;
+ tx_oob->vscq_id = hwc_txq->hwc_cq->gdma_cq->id;
+ tx_oob->loopback = false;
+ tx_oob->lso_override = false;
+ tx_oob->dest_pf = dest_pf;
+ tx_oob->vsq_id = hwc_txq->gdma_wq->id;
+
+ sge = &req->sge;
+ sge->address = (u64)req->buf_sge_addr;
+ sge->mem_key = hwc_txq->msg_buf->gpa_mkey;
+ sge->size = req->msg_size;
+
+ memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
+ req->wqe_req.sgl = sge;
+ req->wqe_req.num_sge = 1;
+ req->wqe_req.inline_oob_size = sizeof(struct hwc_tx_oob);
+ req->wqe_req.inline_oob_data = tx_oob;
+ req->wqe_req.client_data_unit = 0;
+
+ err = mana_gd_post_and_ring(hwc_txq->gdma_wq, &req->wqe_req, NULL);
+ if (err)
+ dev_err(dev, "Failed to post WQE on HWC SQ: %d\n", err);
+ return err;
+}
+
+static int mana_hwc_init_inflight_msg(struct hw_channel_context *hwc,
+ u16 num_msg)
+{
+ int err;
+
+ sema_init(&hwc->sema, num_msg);
+
+ err = mana_gd_alloc_res_map(num_msg, &hwc->inflight_msg_res);
+ if (err)
+ dev_err(hwc->dev, "Failed to init inflight_msg_res: %d\n", err);
+ return err;
+}
+
+static int mana_hwc_test_channel(struct hw_channel_context *hwc, u16 q_depth,
+ u32 max_req_msg_size, u32 max_resp_msg_size)
+{
+ struct gdma_context *gc = hwc->gdma_dev->gdma_context;
+ struct hwc_wq *hwc_rxq = hwc->rxq;
+ struct hwc_work_request *req;
+ struct hwc_caller_ctx *ctx;
+ int err;
+ int i;
+
+ /* Post all WQEs on the RQ */
+ for (i = 0; i < q_depth; i++) {
+ req = &hwc_rxq->msg_buf->reqs[i];
+ err = mana_hwc_post_rx_wqe(hwc_rxq, req);
+ if (err)
+ return err;
+ }
+
+ ctx = kcalloc(q_depth, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ for (i = 0; i < q_depth; ++i)
+ init_completion(&ctx[i].comp_event);
+
+ hwc->caller_ctx = ctx;
+
+ return mana_gd_test_eq(gc, hwc->cq->gdma_eq);
+}
+
+static int mana_hwc_establish_channel(struct gdma_context *gc, u16 *q_depth,
+ u32 *max_req_msg_size,
+ u32 *max_resp_msg_size)
+{
+ struct hw_channel_context *hwc = gc->hwc.driver_data;
+ struct gdma_queue *rq = hwc->rxq->gdma_wq;
+ struct gdma_queue *sq = hwc->txq->gdma_wq;
+ struct gdma_queue *eq = hwc->cq->gdma_eq;
+ struct gdma_queue *cq = hwc->cq->gdma_cq;
+ int err;
+
+ init_completion(&hwc->hwc_init_eqe_comp);
+
+ err = mana_smc_setup_hwc(&gc->shm_channel, false,
+ eq->mem_info.dma_handle,
+ cq->mem_info.dma_handle,
+ rq->mem_info.dma_handle,
+ sq->mem_info.dma_handle,
+ eq->eq.msix_index);
+ if (err)
+ return err;
+
+ if (!wait_for_completion_timeout(&hwc->hwc_init_eqe_comp, 60 * HZ))
+ return -ETIMEDOUT;
+
+ *q_depth = hwc->hwc_init_q_depth_max;
+ *max_req_msg_size = hwc->hwc_init_max_req_msg_size;
+ *max_resp_msg_size = hwc->hwc_init_max_resp_msg_size;
+
+ /* Both were set in mana_hwc_init_event_handler(). */
+ if (WARN_ON(cq->id >= gc->max_num_cqs))
+ return -EPROTO;
+
+ gc->cq_table = vzalloc(gc->max_num_cqs * sizeof(struct gdma_queue *));
+ if (!gc->cq_table)
+ return -ENOMEM;
+
+ gc->cq_table[cq->id] = cq;
+
+ return 0;
+}
+
+static int mana_hwc_init_queues(struct hw_channel_context *hwc, u16 q_depth,
+ u32 max_req_msg_size, u32 max_resp_msg_size)
+{
+ int err;
+
+ err = mana_hwc_init_inflight_msg(hwc, q_depth);
+ if (err)
+ return err;
+
+ /* CQ is shared by SQ and RQ, so CQ's queue depth is the sum of SQ
+ * queue depth and RQ queue depth.
+ */
+ err = mana_hwc_create_cq(hwc, q_depth * 2,
+ mana_hwc_init_event_handler, hwc,
+ mana_hwc_rx_event_handler, hwc,
+ mana_hwc_tx_event_handler, hwc, &hwc->cq);
+ if (err) {
+ dev_err(hwc->dev, "Failed to create HWC CQ: %d\n", err);
+ goto out;
+ }
+
+ err = mana_hwc_create_wq(hwc, GDMA_RQ, q_depth, max_req_msg_size,
+ hwc->cq, &hwc->rxq);
+ if (err) {
+ dev_err(hwc->dev, "Failed to create HWC RQ: %d\n", err);
+ goto out;
+ }
+
+ err = mana_hwc_create_wq(hwc, GDMA_SQ, q_depth, max_resp_msg_size,
+ hwc->cq, &hwc->txq);
+ if (err) {
+ dev_err(hwc->dev, "Failed to create HWC SQ: %d\n", err);
+ goto out;
+ }
+
+ hwc->num_inflight_msg = q_depth;
+ hwc->max_req_msg_size = max_req_msg_size;
+
+ return 0;
+out:
+ /* mana_hwc_create_channel() will do the cleanup.*/
+ return err;
+}
+
+int mana_hwc_create_channel(struct gdma_context *gc)
+{
+ u32 max_req_msg_size, max_resp_msg_size;
+ struct gdma_dev *gd = &gc->hwc;
+ struct hw_channel_context *hwc;
+ u16 q_depth_max;
+ int err;
+
+ hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
+ if (!hwc)
+ return -ENOMEM;
+
+ gd->gdma_context = gc;
+ gd->driver_data = hwc;
+ hwc->gdma_dev = gd;
+ hwc->dev = gc->dev;
+
+ /* HWC's instance number is always 0. */
+ gd->dev_id.as_uint32 = 0;
+ gd->dev_id.type = GDMA_DEVICE_HWC;
+
+ gd->pdid = INVALID_PDID;
+ gd->doorbell = INVALID_DOORBELL;
+
+ /* mana_hwc_init_queues() only creates the required data structures,
+ * and doesn't touch the HWC device.
+ */
+ err = mana_hwc_init_queues(hwc, HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH,
+ HW_CHANNEL_MAX_REQUEST_SIZE,
+ HW_CHANNEL_MAX_RESPONSE_SIZE);
+ if (err) {
+ dev_err(hwc->dev, "Failed to initialize HWC: %d\n", err);
+ goto out;
+ }
+
+ err = mana_hwc_establish_channel(gc, &q_depth_max, &max_req_msg_size,
+ &max_resp_msg_size);
+ if (err) {
+ dev_err(hwc->dev, "Failed to establish HWC: %d\n", err);
+ goto out;
+ }
+
+ err = mana_hwc_test_channel(gc->hwc.driver_data,
+ HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH,
+ max_req_msg_size, max_resp_msg_size);
+ if (err) {
+ dev_err(hwc->dev, "Failed to test HWC: %d\n", err);
+ goto out;
+ }
+
+ return 0;
+out:
+ mana_hwc_destroy_channel(gc);
+ return err;
+}
+
+void mana_hwc_destroy_channel(struct gdma_context *gc)
+{
+ struct hw_channel_context *hwc = gc->hwc.driver_data;
+
+ if (!hwc)
+ return;
+
+ /* gc->max_num_cqs is set in mana_hwc_init_event_handler(). If it's
+ * non-zero, the HWC worked and we should tear down the HWC here.
+ */
+ if (gc->max_num_cqs > 0) {
+ mana_smc_teardown_hwc(&gc->shm_channel, false);
+ gc->max_num_cqs = 0;
+ }
+
+ kfree(hwc->caller_ctx);
+ hwc->caller_ctx = NULL;
+
+ if (hwc->txq)
+ mana_hwc_destroy_wq(hwc, hwc->txq);
+
+ if (hwc->rxq)
+ mana_hwc_destroy_wq(hwc, hwc->rxq);
+
+ if (hwc->cq)
+ mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc->cq);
+
+ mana_gd_free_res_map(&hwc->inflight_msg_res);
+
+ hwc->num_inflight_msg = 0;
+
+ hwc->gdma_dev->doorbell = INVALID_DOORBELL;
+ hwc->gdma_dev->pdid = INVALID_PDID;
+
+ kfree(hwc);
+ gc->hwc.driver_data = NULL;
+ gc->hwc.gdma_context = NULL;
+
+ vfree(gc->cq_table);
+ gc->cq_table = NULL;
+}
+
+int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
+ const void *req, u32 resp_len, void *resp)
+{
+ struct gdma_context *gc = hwc->gdma_dev->gdma_context;
+ struct hwc_work_request *tx_wr;
+ struct hwc_wq *txq = hwc->txq;
+ struct gdma_req_hdr *req_msg;
+ struct hwc_caller_ctx *ctx;
+ u32 dest_vrcq = 0;
+ u32 dest_vrq = 0;
+ u16 msg_id;
+ int err;
+
+ mana_hwc_get_msg_index(hwc, &msg_id);
+
+ tx_wr = &txq->msg_buf->reqs[msg_id];
+
+ if (req_len > tx_wr->buf_len) {
+ dev_err(hwc->dev, "HWC: req msg size: %d > %d\n", req_len,
+ tx_wr->buf_len);
+ err = -EINVAL;
+ goto out;
+ }
+
+ ctx = hwc->caller_ctx + msg_id;
+ ctx->output_buf = resp;
+ ctx->output_buflen = resp_len;
+
+ req_msg = (struct gdma_req_hdr *)tx_wr->buf_va;
+ if (req)
+ memcpy(req_msg, req, req_len);
+
+ req_msg->req.hwc_msg_id = msg_id;
+
+ tx_wr->msg_size = req_len;
+
+ if (gc->is_pf) {
+ dest_vrq = hwc->pf_dest_vrq_id;
+ dest_vrcq = hwc->pf_dest_vrcq_id;
+ }
+
+ err = mana_hwc_post_tx_wqe(txq, tx_wr, dest_vrq, dest_vrcq, false);
+ if (err) {
+ dev_err(hwc->dev, "HWC: Failed to post send WQE: %d\n", err);
+ goto out;
+ }
+
+ if (!wait_for_completion_timeout(&ctx->comp_event, 30 * HZ)) {
+ dev_err(hwc->dev, "HWC: Request timed out!\n");
+ err = -ETIMEDOUT;
+ goto out;
+ }
+
+ if (ctx->error) {
+ err = ctx->error;
+ goto out;
+ }
+
+ if (ctx->status_code) {
+ dev_err(hwc->dev, "HWC: Failed hw_channel req: 0x%x\n",
+ ctx->status_code);
+ err = -EPROTO;
+ goto out;
+ }
+out:
+ mana_hwc_put_msg_index(hwc, msg_id);
+ return err;
+}
diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.h b/drivers/net/ethernet/microsoft/mana/hw_channel.h
new file mode 100644
index 000000000..6a757a6e2
--- /dev/null
+++ b/drivers/net/ethernet/microsoft/mana/hw_channel.h
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright (c) 2021, Microsoft Corporation. */
+
+#ifndef _HW_CHANNEL_H
+#define _HW_CHANNEL_H
+
+#define DEFAULT_LOG2_THROTTLING_FOR_ERROR_EQ 4
+
+#define HW_CHANNEL_MAX_REQUEST_SIZE 0x1000
+#define HW_CHANNEL_MAX_RESPONSE_SIZE 0x1000
+
+#define HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH 1
+
+#define HWC_INIT_DATA_CQID 1
+#define HWC_INIT_DATA_RQID 2
+#define HWC_INIT_DATA_SQID 3
+#define HWC_INIT_DATA_QUEUE_DEPTH 4
+#define HWC_INIT_DATA_MAX_REQUEST 5
+#define HWC_INIT_DATA_MAX_RESPONSE 6
+#define HWC_INIT_DATA_MAX_NUM_CQS 7
+#define HWC_INIT_DATA_PDID 8
+#define HWC_INIT_DATA_GPA_MKEY 9
+#define HWC_INIT_DATA_PF_DEST_RQ_ID 10
+#define HWC_INIT_DATA_PF_DEST_CQ_ID 11
+
+/* Structures labeled with "HW DATA" are exchanged with the hardware. All of
+ * them are naturally aligned and hence don't need __packed.
+ */
+
+union hwc_init_eq_id_db {
+ u32 as_uint32;
+
+ struct {
+ u32 eq_id : 16;
+ u32 doorbell : 16;
+ };
+}; /* HW DATA */
+
+union hwc_init_type_data {
+ u32 as_uint32;
+
+ struct {
+ u32 value : 24;
+ u32 type : 8;
+ };
+}; /* HW DATA */
+
+struct hwc_rx_oob {
+ u32 type : 6;
+ u32 eom : 1;
+ u32 som : 1;
+ u32 vendor_err : 8;
+ u32 reserved1 : 16;
+
+ u32 src_virt_wq : 24;
+ u32 src_vfid : 8;
+
+ u32 reserved2;
+
+ union {
+ u32 wqe_addr_low;
+ u32 wqe_offset;
+ };
+
+ u32 wqe_addr_high;
+
+ u32 client_data_unit : 14;
+ u32 reserved3 : 18;
+
+ u32 tx_oob_data_size;
+
+ u32 chunk_offset : 21;
+ u32 reserved4 : 11;
+}; /* HW DATA */
+
+struct hwc_tx_oob {
+ u32 reserved1;
+
+ u32 reserved2;
+
+ u32 vrq_id : 24;
+ u32 dest_vfid : 8;
+
+ u32 vrcq_id : 24;
+ u32 reserved3 : 8;
+
+ u32 vscq_id : 24;
+ u32 loopback : 1;
+ u32 lso_override: 1;
+ u32 dest_pf : 1;
+ u32 reserved4 : 5;
+
+ u32 vsq_id : 24;
+ u32 reserved5 : 8;
+}; /* HW DATA */
+
+struct hwc_work_request {
+ void *buf_va;
+ void *buf_sge_addr;
+ u32 buf_len;
+ u32 msg_size;
+
+ struct gdma_wqe_request wqe_req;
+ struct hwc_tx_oob tx_oob;
+
+ struct gdma_sge sge;
+};
+
+/* hwc_dma_buf represents the array of in-flight WQEs.
+ * mem_info as know as the GDMA mapped memory is partitioned and used by
+ * in-flight WQEs.
+ * The number of WQEs is determined by the number of in-flight messages.
+ */
+struct hwc_dma_buf {
+ struct gdma_mem_info mem_info;
+
+ u32 gpa_mkey;
+
+ u32 num_reqs;
+ struct hwc_work_request reqs[];
+};
+
+typedef void hwc_rx_event_handler_t(void *ctx, u32 gdma_rxq_id,
+ const struct hwc_rx_oob *rx_oob);
+
+typedef void hwc_tx_event_handler_t(void *ctx, u32 gdma_txq_id,
+ const struct hwc_rx_oob *rx_oob);
+
+struct hwc_cq {
+ struct hw_channel_context *hwc;
+
+ struct gdma_queue *gdma_cq;
+ struct gdma_queue *gdma_eq;
+ struct gdma_comp *comp_buf;
+ u16 queue_depth;
+
+ hwc_rx_event_handler_t *rx_event_handler;
+ void *rx_event_ctx;
+
+ hwc_tx_event_handler_t *tx_event_handler;
+ void *tx_event_ctx;
+};
+
+struct hwc_wq {
+ struct hw_channel_context *hwc;
+
+ struct gdma_queue *gdma_wq;
+ struct hwc_dma_buf *msg_buf;
+ u16 queue_depth;
+
+ struct hwc_cq *hwc_cq;
+};
+
+struct hwc_caller_ctx {
+ struct completion comp_event;
+ void *output_buf;
+ u32 output_buflen;
+
+ u32 error; /* Linux error code */
+ u32 status_code;
+};
+
+struct hw_channel_context {
+ struct gdma_dev *gdma_dev;
+ struct device *dev;
+
+ u16 num_inflight_msg;
+ u32 max_req_msg_size;
+
+ u16 hwc_init_q_depth_max;
+ u32 hwc_init_max_req_msg_size;
+ u32 hwc_init_max_resp_msg_size;
+
+ struct completion hwc_init_eqe_comp;
+
+ struct hwc_wq *rxq;
+ struct hwc_wq *txq;
+ struct hwc_cq *cq;
+
+ struct semaphore sema;
+ struct gdma_resource inflight_msg_res;
+
+ u32 pf_dest_vrq_id;
+ u32 pf_dest_vrcq_id;
+
+ struct hwc_caller_ctx *caller_ctx;
+};
+
+int mana_hwc_create_channel(struct gdma_context *gc);
+void mana_hwc_destroy_channel(struct gdma_context *gc);
+
+int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
+ const void *req, u32 resp_len, void *resp);
+
+#endif /* _HW_CHANNEL_H */
diff --git a/drivers/net/ethernet/microsoft/mana/mana.h b/drivers/net/ethernet/microsoft/mana/mana.h
new file mode 100644
index 000000000..d58be6437
--- /dev/null
+++ b/drivers/net/ethernet/microsoft/mana/mana.h
@@ -0,0 +1,634 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright (c) 2021, Microsoft Corporation. */
+
+#ifndef _MANA_H
+#define _MANA_H
+
+#include "gdma.h"
+#include "hw_channel.h"
+
+/* Microsoft Azure Network Adapter (MANA)'s definitions
+ *
+ * Structures labeled with "HW DATA" are exchanged with the hardware. All of
+ * them are naturally aligned and hence don't need __packed.
+ */
+
+/* MANA protocol version */
+#define MANA_MAJOR_VERSION 0
+#define MANA_MINOR_VERSION 1
+#define MANA_MICRO_VERSION 1
+
+typedef u64 mana_handle_t;
+#define INVALID_MANA_HANDLE ((mana_handle_t)-1)
+
+enum TRI_STATE {
+ TRI_STATE_UNKNOWN = -1,
+ TRI_STATE_FALSE = 0,
+ TRI_STATE_TRUE = 1
+};
+
+/* Number of entries for hardware indirection table must be in power of 2 */
+#define MANA_INDIRECT_TABLE_SIZE 64
+#define MANA_INDIRECT_TABLE_MASK (MANA_INDIRECT_TABLE_SIZE - 1)
+
+/* The Toeplitz hash key's length in bytes: should be multiple of 8 */
+#define MANA_HASH_KEY_SIZE 40
+
+#define COMP_ENTRY_SIZE 64
+
+#define ADAPTER_MTU_SIZE 1500
+#define MAX_FRAME_SIZE (ADAPTER_MTU_SIZE + 14)
+
+#define RX_BUFFERS_PER_QUEUE 512
+
+#define MAX_SEND_BUFFERS_PER_QUEUE 256
+
+#define EQ_SIZE (8 * PAGE_SIZE)
+#define LOG2_EQ_THROTTLE 3
+
+#define MAX_PORTS_IN_MANA_DEV 256
+
+struct mana_stats_rx {
+ u64 packets;
+ u64 bytes;
+ u64 xdp_drop;
+ u64 xdp_tx;
+ u64 xdp_redirect;
+ struct u64_stats_sync syncp;
+};
+
+struct mana_stats_tx {
+ u64 packets;
+ u64 bytes;
+ u64 xdp_xmit;
+ struct u64_stats_sync syncp;
+};
+
+struct mana_txq {
+ struct gdma_queue *gdma_sq;
+
+ union {
+ u32 gdma_txq_id;
+ struct {
+ u32 reserved1 : 10;
+ u32 vsq_frame : 14;
+ u32 reserved2 : 8;
+ };
+ };
+
+ u16 vp_offset;
+
+ struct net_device *ndev;
+
+ /* The SKBs are sent to the HW and we are waiting for the CQEs. */
+ struct sk_buff_head pending_skbs;
+ struct netdev_queue *net_txq;
+
+ atomic_t pending_sends;
+
+ struct mana_stats_tx stats;
+};
+
+/* skb data and frags dma mappings */
+struct mana_skb_head {
+ dma_addr_t dma_handle[MAX_SKB_FRAGS + 1];
+
+ u32 size[MAX_SKB_FRAGS + 1];
+};
+
+#define MANA_HEADROOM sizeof(struct mana_skb_head)
+
+enum mana_tx_pkt_format {
+ MANA_SHORT_PKT_FMT = 0,
+ MANA_LONG_PKT_FMT = 1,
+};
+
+struct mana_tx_short_oob {
+ u32 pkt_fmt : 2;
+ u32 is_outer_ipv4 : 1;
+ u32 is_outer_ipv6 : 1;
+ u32 comp_iphdr_csum : 1;
+ u32 comp_tcp_csum : 1;
+ u32 comp_udp_csum : 1;
+ u32 supress_txcqe_gen : 1;
+ u32 vcq_num : 24;
+
+ u32 trans_off : 10; /* Transport header offset */
+ u32 vsq_frame : 14;
+ u32 short_vp_offset : 8;
+}; /* HW DATA */
+
+struct mana_tx_long_oob {
+ u32 is_encap : 1;
+ u32 inner_is_ipv6 : 1;
+ u32 inner_tcp_opt : 1;
+ u32 inject_vlan_pri_tag : 1;
+ u32 reserved1 : 12;
+ u32 pcp : 3; /* 802.1Q */
+ u32 dei : 1; /* 802.1Q */
+ u32 vlan_id : 12; /* 802.1Q */
+
+ u32 inner_frame_offset : 10;
+ u32 inner_ip_rel_offset : 6;
+ u32 long_vp_offset : 12;
+ u32 reserved2 : 4;
+
+ u32 reserved3;
+ u32 reserved4;
+}; /* HW DATA */
+
+struct mana_tx_oob {
+ struct mana_tx_short_oob s_oob;
+ struct mana_tx_long_oob l_oob;
+}; /* HW DATA */
+
+enum mana_cq_type {
+ MANA_CQ_TYPE_RX,
+ MANA_CQ_TYPE_TX,
+};
+
+enum mana_cqe_type {
+ CQE_INVALID = 0,
+ CQE_RX_OKAY = 1,
+ CQE_RX_COALESCED_4 = 2,
+ CQE_RX_OBJECT_FENCE = 3,
+ CQE_RX_TRUNCATED = 4,
+
+ CQE_TX_OKAY = 32,
+ CQE_TX_SA_DROP = 33,
+ CQE_TX_MTU_DROP = 34,
+ CQE_TX_INVALID_OOB = 35,
+ CQE_TX_INVALID_ETH_TYPE = 36,
+ CQE_TX_HDR_PROCESSING_ERROR = 37,
+ CQE_TX_VF_DISABLED = 38,
+ CQE_TX_VPORT_IDX_OUT_OF_RANGE = 39,
+ CQE_TX_VPORT_DISABLED = 40,
+ CQE_TX_VLAN_TAGGING_VIOLATION = 41,
+};
+
+#define MANA_CQE_COMPLETION 1
+
+struct mana_cqe_header {
+ u32 cqe_type : 6;
+ u32 client_type : 2;
+ u32 vendor_err : 24;
+}; /* HW DATA */
+
+/* NDIS HASH Types */
+#define NDIS_HASH_IPV4 BIT(0)
+#define NDIS_HASH_TCP_IPV4 BIT(1)
+#define NDIS_HASH_UDP_IPV4 BIT(2)
+#define NDIS_HASH_IPV6 BIT(3)
+#define NDIS_HASH_TCP_IPV6 BIT(4)
+#define NDIS_HASH_UDP_IPV6 BIT(5)
+#define NDIS_HASH_IPV6_EX BIT(6)
+#define NDIS_HASH_TCP_IPV6_EX BIT(7)
+#define NDIS_HASH_UDP_IPV6_EX BIT(8)
+
+#define MANA_HASH_L3 (NDIS_HASH_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX)
+#define MANA_HASH_L4 \
+ (NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4 | NDIS_HASH_TCP_IPV6 | \
+ NDIS_HASH_UDP_IPV6 | NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX)
+
+struct mana_rxcomp_perpkt_info {
+ u32 pkt_len : 16;
+ u32 reserved1 : 16;
+ u32 reserved2;
+ u32 pkt_hash;
+}; /* HW DATA */
+
+#define MANA_RXCOMP_OOB_NUM_PPI 4
+
+/* Receive completion OOB */
+struct mana_rxcomp_oob {
+ struct mana_cqe_header cqe_hdr;
+
+ u32 rx_vlan_id : 12;
+ u32 rx_vlantag_present : 1;
+ u32 rx_outer_iphdr_csum_succeed : 1;
+ u32 rx_outer_iphdr_csum_fail : 1;
+ u32 reserved1 : 1;
+ u32 rx_hashtype : 9;
+ u32 rx_iphdr_csum_succeed : 1;
+ u32 rx_iphdr_csum_fail : 1;
+ u32 rx_tcp_csum_succeed : 1;
+ u32 rx_tcp_csum_fail : 1;
+ u32 rx_udp_csum_succeed : 1;
+ u32 rx_udp_csum_fail : 1;
+ u32 reserved2 : 1;
+
+ struct mana_rxcomp_perpkt_info ppi[MANA_RXCOMP_OOB_NUM_PPI];
+
+ u32 rx_wqe_offset;
+}; /* HW DATA */
+
+struct mana_tx_comp_oob {
+ struct mana_cqe_header cqe_hdr;
+
+ u32 tx_data_offset;
+
+ u32 tx_sgl_offset : 5;
+ u32 tx_wqe_offset : 27;
+
+ u32 reserved[12];
+}; /* HW DATA */
+
+struct mana_rxq;
+
+#define CQE_POLLING_BUFFER 512
+
+struct mana_cq {
+ struct gdma_queue *gdma_cq;
+
+ /* Cache the CQ id (used to verify if each CQE comes to the right CQ. */
+ u32 gdma_id;
+
+ /* Type of the CQ: TX or RX */
+ enum mana_cq_type type;
+
+ /* Pointer to the mana_rxq that is pushing RX CQEs to the queue.
+ * Only and must be non-NULL if type is MANA_CQ_TYPE_RX.
+ */
+ struct mana_rxq *rxq;
+
+ /* Pointer to the mana_txq that is pushing TX CQEs to the queue.
+ * Only and must be non-NULL if type is MANA_CQ_TYPE_TX.
+ */
+ struct mana_txq *txq;
+
+ /* Buffer which the CQ handler can copy the CQE's into. */
+ struct gdma_comp gdma_comp_buf[CQE_POLLING_BUFFER];
+
+ /* NAPI data */
+ struct napi_struct napi;
+ int work_done;
+ int budget;
+};
+
+#define GDMA_MAX_RQE_SGES 15
+
+struct mana_recv_buf_oob {
+ /* A valid GDMA work request representing the data buffer. */
+ struct gdma_wqe_request wqe_req;
+
+ void *buf_va;
+ dma_addr_t buf_dma_addr;
+
+ /* SGL of the buffer going to be sent has part of the work request. */
+ u32 num_sge;
+ struct gdma_sge sgl[GDMA_MAX_RQE_SGES];
+
+ /* Required to store the result of mana_gd_post_work_request.
+ * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the
+ * work queue when the WQE is consumed.
+ */
+ struct gdma_posted_wqe_info wqe_inf;
+};
+
+struct mana_rxq {
+ struct gdma_queue *gdma_rq;
+ /* Cache the gdma receive queue id */
+ u32 gdma_id;
+
+ /* Index of RQ in the vPort, not gdma receive queue id */
+ u32 rxq_idx;
+
+ u32 datasize;
+
+ mana_handle_t rxobj;
+
+ struct mana_cq rx_cq;
+
+ struct completion fence_event;
+
+ struct net_device *ndev;
+
+ /* Total number of receive buffers to be allocated */
+ u32 num_rx_buf;
+
+ u32 buf_index;
+
+ struct mana_stats_rx stats;
+
+ struct bpf_prog __rcu *bpf_prog;
+ struct xdp_rxq_info xdp_rxq;
+ struct page *xdp_save_page;
+ bool xdp_flush;
+ int xdp_rc; /* XDP redirect return code */
+
+ /* MUST BE THE LAST MEMBER:
+ * Each receive buffer has an associated mana_recv_buf_oob.
+ */
+ struct mana_recv_buf_oob rx_oobs[];
+};
+
+struct mana_tx_qp {
+ struct mana_txq txq;
+
+ struct mana_cq tx_cq;
+
+ mana_handle_t tx_object;
+};
+
+struct mana_ethtool_stats {
+ u64 stop_queue;
+ u64 wake_queue;
+};
+
+struct mana_context {
+ struct gdma_dev *gdma_dev;
+
+ u16 num_ports;
+
+ struct mana_eq *eqs;
+
+ struct net_device *ports[MAX_PORTS_IN_MANA_DEV];
+};
+
+struct mana_port_context {
+ struct mana_context *ac;
+ struct net_device *ndev;
+
+ u8 mac_addr[ETH_ALEN];
+
+ enum TRI_STATE rss_state;
+
+ mana_handle_t default_rxobj;
+ bool tx_shortform_allowed;
+ u16 tx_vp_offset;
+
+ struct mana_tx_qp *tx_qp;
+
+ /* Indirection Table for RX & TX. The values are queue indexes */
+ u32 indir_table[MANA_INDIRECT_TABLE_SIZE];
+
+ /* Indirection table containing RxObject Handles */
+ mana_handle_t rxobj_table[MANA_INDIRECT_TABLE_SIZE];
+
+ /* Hash key used by the NIC */
+ u8 hashkey[MANA_HASH_KEY_SIZE];
+
+ /* This points to an array of num_queues of RQ pointers. */
+ struct mana_rxq **rxqs;
+
+ struct bpf_prog *bpf_prog;
+
+ /* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */
+ unsigned int max_queues;
+ unsigned int num_queues;
+
+ mana_handle_t port_handle;
+ mana_handle_t pf_filter_handle;
+
+ u16 port_idx;
+
+ bool port_is_up;
+ bool port_st_save; /* Saved port state */
+
+ struct mana_ethtool_stats eth_stats;
+};
+
+int mana_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx,
+ bool update_hash, bool update_tab);
+
+int mana_alloc_queues(struct net_device *ndev);
+int mana_attach(struct net_device *ndev);
+int mana_detach(struct net_device *ndev, bool from_close);
+
+int mana_probe(struct gdma_dev *gd, bool resuming);
+void mana_remove(struct gdma_dev *gd, bool suspending);
+
+void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev);
+int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames,
+ u32 flags);
+u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
+ struct xdp_buff *xdp, void *buf_va, uint pkt_len);
+struct bpf_prog *mana_xdp_get(struct mana_port_context *apc);
+void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog);
+int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
+
+extern const struct ethtool_ops mana_ethtool_ops;
+
+struct mana_obj_spec {
+ u32 queue_index;
+ u64 gdma_region;
+ u32 queue_size;
+ u32 attached_eq;
+ u32 modr_ctx_id;
+};
+
+enum mana_command_code {
+ MANA_QUERY_DEV_CONFIG = 0x20001,
+ MANA_QUERY_GF_STAT = 0x20002,
+ MANA_CONFIG_VPORT_TX = 0x20003,
+ MANA_CREATE_WQ_OBJ = 0x20004,
+ MANA_DESTROY_WQ_OBJ = 0x20005,
+ MANA_FENCE_RQ = 0x20006,
+ MANA_CONFIG_VPORT_RX = 0x20007,
+ MANA_QUERY_VPORT_CONFIG = 0x20008,
+
+ /* Privileged commands for the PF mode */
+ MANA_REGISTER_FILTER = 0x28000,
+ MANA_DEREGISTER_FILTER = 0x28001,
+ MANA_REGISTER_HW_PORT = 0x28003,
+ MANA_DEREGISTER_HW_PORT = 0x28004,
+};
+
+/* Query Device Configuration */
+struct mana_query_device_cfg_req {
+ struct gdma_req_hdr hdr;
+
+ /* MANA Nic Driver Capability flags */
+ u64 mn_drv_cap_flags1;
+ u64 mn_drv_cap_flags2;
+ u64 mn_drv_cap_flags3;
+ u64 mn_drv_cap_flags4;
+
+ u32 proto_major_ver;
+ u32 proto_minor_ver;
+ u32 proto_micro_ver;
+
+ u32 reserved;
+}; /* HW DATA */
+
+struct mana_query_device_cfg_resp {
+ struct gdma_resp_hdr hdr;
+
+ u64 pf_cap_flags1;
+ u64 pf_cap_flags2;
+ u64 pf_cap_flags3;
+ u64 pf_cap_flags4;
+
+ u16 max_num_vports;
+ u16 reserved;
+ u32 max_num_eqs;
+}; /* HW DATA */
+
+/* Query vPort Configuration */
+struct mana_query_vport_cfg_req {
+ struct gdma_req_hdr hdr;
+ u32 vport_index;
+}; /* HW DATA */
+
+struct mana_query_vport_cfg_resp {
+ struct gdma_resp_hdr hdr;
+ u32 max_num_sq;
+ u32 max_num_rq;
+ u32 num_indirection_ent;
+ u32 reserved1;
+ u8 mac_addr[6];
+ u8 reserved2[2];
+ mana_handle_t vport;
+}; /* HW DATA */
+
+/* Configure vPort */
+struct mana_config_vport_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t vport;
+ u32 pdid;
+ u32 doorbell_pageid;
+}; /* HW DATA */
+
+struct mana_config_vport_resp {
+ struct gdma_resp_hdr hdr;
+ u16 tx_vport_offset;
+ u8 short_form_allowed;
+ u8 reserved;
+}; /* HW DATA */
+
+/* Create WQ Object */
+struct mana_create_wqobj_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t vport;
+ u32 wq_type;
+ u32 reserved;
+ u64 wq_gdma_region;
+ u64 cq_gdma_region;
+ u32 wq_size;
+ u32 cq_size;
+ u32 cq_moderation_ctx_id;
+ u32 cq_parent_qid;
+}; /* HW DATA */
+
+struct mana_create_wqobj_resp {
+ struct gdma_resp_hdr hdr;
+ u32 wq_id;
+ u32 cq_id;
+ mana_handle_t wq_obj;
+}; /* HW DATA */
+
+/* Destroy WQ Object */
+struct mana_destroy_wqobj_req {
+ struct gdma_req_hdr hdr;
+ u32 wq_type;
+ u32 reserved;
+ mana_handle_t wq_obj_handle;
+}; /* HW DATA */
+
+struct mana_destroy_wqobj_resp {
+ struct gdma_resp_hdr hdr;
+}; /* HW DATA */
+
+/* Fence RQ */
+struct mana_fence_rq_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t wq_obj_handle;
+}; /* HW DATA */
+
+struct mana_fence_rq_resp {
+ struct gdma_resp_hdr hdr;
+}; /* HW DATA */
+
+/* Configure vPort Rx Steering */
+struct mana_cfg_rx_steer_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t vport;
+ u16 num_indir_entries;
+ u16 indir_tab_offset;
+ u32 rx_enable;
+ u32 rss_enable;
+ u8 update_default_rxobj;
+ u8 update_hashkey;
+ u8 update_indir_tab;
+ u8 reserved;
+ mana_handle_t default_rxobj;
+ u8 hashkey[MANA_HASH_KEY_SIZE];
+}; /* HW DATA */
+
+struct mana_cfg_rx_steer_resp {
+ struct gdma_resp_hdr hdr;
+}; /* HW DATA */
+
+/* Register HW vPort */
+struct mana_register_hw_vport_req {
+ struct gdma_req_hdr hdr;
+ u16 attached_gfid;
+ u8 is_pf_default_vport;
+ u8 reserved1;
+ u8 allow_all_ether_types;
+ u8 reserved2;
+ u8 reserved3;
+ u8 reserved4;
+}; /* HW DATA */
+
+struct mana_register_hw_vport_resp {
+ struct gdma_resp_hdr hdr;
+ mana_handle_t hw_vport_handle;
+}; /* HW DATA */
+
+/* Deregister HW vPort */
+struct mana_deregister_hw_vport_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t hw_vport_handle;
+}; /* HW DATA */
+
+struct mana_deregister_hw_vport_resp {
+ struct gdma_resp_hdr hdr;
+}; /* HW DATA */
+
+/* Register filter */
+struct mana_register_filter_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t vport;
+ u8 mac_addr[6];
+ u8 reserved1;
+ u8 reserved2;
+ u8 reserved3;
+ u8 reserved4;
+ u16 reserved5;
+ u32 reserved6;
+ u32 reserved7;
+ u32 reserved8;
+}; /* HW DATA */
+
+struct mana_register_filter_resp {
+ struct gdma_resp_hdr hdr;
+ mana_handle_t filter_handle;
+}; /* HW DATA */
+
+/* Deregister filter */
+struct mana_deregister_filter_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t filter_handle;
+}; /* HW DATA */
+
+struct mana_deregister_filter_resp {
+ struct gdma_resp_hdr hdr;
+}; /* HW DATA */
+
+#define MANA_MAX_NUM_QUEUES 64
+
+#define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1)
+
+struct mana_tx_package {
+ struct gdma_wqe_request wqe_req;
+ struct gdma_sge sgl_array[5];
+ struct gdma_sge *sgl_ptr;
+
+ struct mana_tx_oob tx_oob;
+
+ struct gdma_posted_wqe_info wqe_info;
+};
+
+#endif /* _MANA_H */
diff --git a/drivers/net/ethernet/microsoft/mana/mana_bpf.c b/drivers/net/ethernet/microsoft/mana/mana_bpf.c
new file mode 100644
index 000000000..421fd39ff
--- /dev/null
+++ b/drivers/net/ethernet/microsoft/mana/mana_bpf.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright (c) 2021, Microsoft Corporation. */
+
+#include <linux/inetdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/mm.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
+#include <net/xdp.h>
+
+#include "mana.h"
+
+void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev)
+{
+ u16 txq_idx = skb_get_queue_mapping(skb);
+ struct netdev_queue *ndevtxq;
+ int rc;
+
+ __skb_push(skb, ETH_HLEN);
+
+ ndevtxq = netdev_get_tx_queue(ndev, txq_idx);
+ __netif_tx_lock(ndevtxq, smp_processor_id());
+
+ rc = mana_start_xmit(skb, ndev);
+
+ __netif_tx_unlock(ndevtxq);
+
+ if (dev_xmit_complete(rc))
+ return;
+
+ dev_kfree_skb_any(skb);
+ ndev->stats.tx_dropped++;
+}
+
+static int mana_xdp_xmit_fm(struct net_device *ndev, struct xdp_frame *frame,
+ u16 q_idx)
+{
+ struct sk_buff *skb;
+
+ skb = xdp_build_skb_from_frame(frame, ndev);
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ skb_set_queue_mapping(skb, q_idx);
+
+ mana_xdp_tx(skb, ndev);
+
+ return 0;
+}
+
+int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames,
+ u32 flags)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+ struct mana_stats_tx *tx_stats;
+ int i, count = 0;
+ u16 q_idx;
+
+ if (unlikely(!apc->port_is_up))
+ return 0;
+
+ q_idx = smp_processor_id() % ndev->real_num_tx_queues;
+
+ for (i = 0; i < n; i++) {
+ if (mana_xdp_xmit_fm(ndev, frames[i], q_idx))
+ break;
+
+ count++;
+ }
+
+ tx_stats = &apc->tx_qp[q_idx].txq.stats;
+
+ u64_stats_update_begin(&tx_stats->syncp);
+ tx_stats->xdp_xmit += count;
+ u64_stats_update_end(&tx_stats->syncp);
+
+ return count;
+}
+
+u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
+ struct xdp_buff *xdp, void *buf_va, uint pkt_len)
+{
+ struct mana_stats_rx *rx_stats;
+ struct bpf_prog *prog;
+ u32 act = XDP_PASS;
+
+ rcu_read_lock();
+ prog = rcu_dereference(rxq->bpf_prog);
+
+ if (!prog)
+ goto out;
+
+ xdp_init_buff(xdp, PAGE_SIZE, &rxq->xdp_rxq);
+ xdp_prepare_buff(xdp, buf_va, XDP_PACKET_HEADROOM, pkt_len, false);
+
+ act = bpf_prog_run_xdp(prog, xdp);
+
+ rx_stats = &rxq->stats;
+
+ switch (act) {
+ case XDP_PASS:
+ case XDP_TX:
+ case XDP_DROP:
+ break;
+
+ case XDP_REDIRECT:
+ rxq->xdp_rc = xdp_do_redirect(ndev, xdp, prog);
+ if (!rxq->xdp_rc) {
+ rxq->xdp_flush = true;
+
+ u64_stats_update_begin(&rx_stats->syncp);
+ rx_stats->packets++;
+ rx_stats->bytes += pkt_len;
+ rx_stats->xdp_redirect++;
+ u64_stats_update_end(&rx_stats->syncp);
+
+ break;
+ }
+
+ fallthrough;
+
+ case XDP_ABORTED:
+ trace_xdp_exception(ndev, prog, act);
+ break;
+
+ default:
+ bpf_warn_invalid_xdp_action(ndev, prog, act);
+ }
+
+out:
+ rcu_read_unlock();
+
+ return act;
+}
+
+static unsigned int mana_xdp_fraglen(unsigned int len)
+{
+ return SKB_DATA_ALIGN(len) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+}
+
+struct bpf_prog *mana_xdp_get(struct mana_port_context *apc)
+{
+ ASSERT_RTNL();
+
+ return apc->bpf_prog;
+}
+
+static struct bpf_prog *mana_chn_xdp_get(struct mana_port_context *apc)
+{
+ return rtnl_dereference(apc->rxqs[0]->bpf_prog);
+}
+
+/* Set xdp program on channels */
+void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog)
+{
+ struct bpf_prog *old_prog = mana_chn_xdp_get(apc);
+ unsigned int num_queues = apc->num_queues;
+ int i;
+
+ ASSERT_RTNL();
+
+ if (old_prog == prog)
+ return;
+
+ if (prog)
+ bpf_prog_add(prog, num_queues);
+
+ for (i = 0; i < num_queues; i++)
+ rcu_assign_pointer(apc->rxqs[i]->bpf_prog, prog);
+
+ if (old_prog)
+ for (i = 0; i < num_queues; i++)
+ bpf_prog_put(old_prog);
+}
+
+static int mana_xdp_set(struct net_device *ndev, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+ struct bpf_prog *old_prog;
+ int buf_max;
+
+ old_prog = mana_xdp_get(apc);
+
+ if (!old_prog && !prog)
+ return 0;
+
+ buf_max = XDP_PACKET_HEADROOM + mana_xdp_fraglen(ndev->mtu + ETH_HLEN);
+ if (prog && buf_max > PAGE_SIZE) {
+ netdev_err(ndev, "XDP: mtu:%u too large, buf_max:%u\n",
+ ndev->mtu, buf_max);
+ NL_SET_ERR_MSG_MOD(extack, "XDP: mtu too large");
+
+ return -EOPNOTSUPP;
+ }
+
+ /* One refcnt of the prog is hold by the caller already, so
+ * don't increase refcnt for this one.
+ */
+ apc->bpf_prog = prog;
+
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ if (apc->port_is_up)
+ mana_chn_setxdp(apc, prog);
+
+ return 0;
+}
+
+int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
+{
+ struct netlink_ext_ack *extack = bpf->extack;
+ int ret;
+
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ return mana_xdp_set(ndev, bpf->prog, extack);
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
new file mode 100644
index 000000000..b751b03ed
--- /dev/null
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -0,0 +1,2273 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright (c) 2021, Microsoft Corporation. */
+
+#include <uapi/linux/bpf.h>
+
+#include <linux/inetdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/filter.h>
+#include <linux/mm.h>
+#include <linux/pci.h>
+
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+
+#include "mana.h"
+
+/* Microsoft Azure Network Adapter (MANA) functions */
+
+static int mana_open(struct net_device *ndev)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+ int err;
+
+ err = mana_alloc_queues(ndev);
+ if (err)
+ return err;
+
+ apc->port_is_up = true;
+
+ /* Ensure port state updated before txq state */
+ smp_wmb();
+
+ netif_carrier_on(ndev);
+ netif_tx_wake_all_queues(ndev);
+
+ return 0;
+}
+
+static int mana_close(struct net_device *ndev)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+
+ if (!apc->port_is_up)
+ return 0;
+
+ return mana_detach(ndev, true);
+}
+
+static bool mana_can_tx(struct gdma_queue *wq)
+{
+ return mana_gd_wq_avail_space(wq) >= MAX_TX_WQE_SIZE;
+}
+
+static unsigned int mana_checksum_info(struct sk_buff *skb)
+{
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *ip = ip_hdr(skb);
+
+ if (ip->protocol == IPPROTO_TCP)
+ return IPPROTO_TCP;
+
+ if (ip->protocol == IPPROTO_UDP)
+ return IPPROTO_UDP;
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ struct ipv6hdr *ip6 = ipv6_hdr(skb);
+
+ if (ip6->nexthdr == IPPROTO_TCP)
+ return IPPROTO_TCP;
+
+ if (ip6->nexthdr == IPPROTO_UDP)
+ return IPPROTO_UDP;
+ }
+
+ /* No csum offloading */
+ return 0;
+}
+
+static int mana_map_skb(struct sk_buff *skb, struct mana_port_context *apc,
+ struct mana_tx_package *tp)
+{
+ struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
+ struct gdma_dev *gd = apc->ac->gdma_dev;
+ struct gdma_context *gc;
+ struct device *dev;
+ skb_frag_t *frag;
+ dma_addr_t da;
+ int i;
+
+ gc = gd->gdma_context;
+ dev = gc->dev;
+ da = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
+
+ if (dma_mapping_error(dev, da))
+ return -ENOMEM;
+
+ ash->dma_handle[0] = da;
+ ash->size[0] = skb_headlen(skb);
+
+ tp->wqe_req.sgl[0].address = ash->dma_handle[0];
+ tp->wqe_req.sgl[0].mem_key = gd->gpa_mkey;
+ tp->wqe_req.sgl[0].size = ash->size[0];
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ da = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
+ DMA_TO_DEVICE);
+
+ if (dma_mapping_error(dev, da))
+ goto frag_err;
+
+ ash->dma_handle[i + 1] = da;
+ ash->size[i + 1] = skb_frag_size(frag);
+
+ tp->wqe_req.sgl[i + 1].address = ash->dma_handle[i + 1];
+ tp->wqe_req.sgl[i + 1].mem_key = gd->gpa_mkey;
+ tp->wqe_req.sgl[i + 1].size = ash->size[i + 1];
+ }
+
+ return 0;
+
+frag_err:
+ for (i = i - 1; i >= 0; i--)
+ dma_unmap_page(dev, ash->dma_handle[i + 1], ash->size[i + 1],
+ DMA_TO_DEVICE);
+
+ dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE);
+
+ return -ENOMEM;
+}
+
+int mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT;
+ struct mana_port_context *apc = netdev_priv(ndev);
+ u16 txq_idx = skb_get_queue_mapping(skb);
+ struct gdma_dev *gd = apc->ac->gdma_dev;
+ bool ipv4 = false, ipv6 = false;
+ struct mana_tx_package pkg = {};
+ struct netdev_queue *net_txq;
+ struct mana_stats_tx *tx_stats;
+ struct gdma_queue *gdma_sq;
+ unsigned int csum_type;
+ struct mana_txq *txq;
+ struct mana_cq *cq;
+ int err, len;
+
+ if (unlikely(!apc->port_is_up))
+ goto tx_drop;
+
+ if (skb_cow_head(skb, MANA_HEADROOM))
+ goto tx_drop_count;
+
+ txq = &apc->tx_qp[txq_idx].txq;
+ gdma_sq = txq->gdma_sq;
+ cq = &apc->tx_qp[txq_idx].tx_cq;
+
+ pkg.tx_oob.s_oob.vcq_num = cq->gdma_id;
+ pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame;
+
+ if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) {
+ pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset;
+ pkt_fmt = MANA_LONG_PKT_FMT;
+ } else {
+ pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset;
+ }
+
+ pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt;
+
+ if (pkt_fmt == MANA_SHORT_PKT_FMT)
+ pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob);
+ else
+ pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob);
+
+ pkg.wqe_req.inline_oob_data = &pkg.tx_oob;
+ pkg.wqe_req.flags = 0;
+ pkg.wqe_req.client_data_unit = 0;
+
+ pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags;
+ WARN_ON_ONCE(pkg.wqe_req.num_sge > 30);
+
+ if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) {
+ pkg.wqe_req.sgl = pkg.sgl_array;
+ } else {
+ pkg.sgl_ptr = kmalloc_array(pkg.wqe_req.num_sge,
+ sizeof(struct gdma_sge),
+ GFP_ATOMIC);
+ if (!pkg.sgl_ptr)
+ goto tx_drop_count;
+
+ pkg.wqe_req.sgl = pkg.sgl_ptr;
+ }
+
+ if (skb->protocol == htons(ETH_P_IP))
+ ipv4 = true;
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ ipv6 = true;
+
+ if (skb_is_gso(skb)) {
+ pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
+ pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
+
+ pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
+ pkg.tx_oob.s_oob.comp_tcp_csum = 1;
+ pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb);
+
+ pkg.wqe_req.client_data_unit = skb_shinfo(skb)->gso_size;
+ pkg.wqe_req.flags = GDMA_WR_OOB_IN_SGL | GDMA_WR_PAD_BY_SGE0;
+ if (ipv4) {
+ ip_hdr(skb)->tot_len = 0;
+ ip_hdr(skb)->check = 0;
+ tcp_hdr(skb)->check =
+ ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr, 0,
+ IPPROTO_TCP, 0);
+ } else {
+ ipv6_hdr(skb)->payload_len = 0;
+ tcp_hdr(skb)->check =
+ ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr, 0,
+ IPPROTO_TCP, 0);
+ }
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ csum_type = mana_checksum_info(skb);
+
+ if (csum_type == IPPROTO_TCP) {
+ pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
+ pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
+
+ pkg.tx_oob.s_oob.comp_tcp_csum = 1;
+ pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb);
+
+ } else if (csum_type == IPPROTO_UDP) {
+ pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
+ pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
+
+ pkg.tx_oob.s_oob.comp_udp_csum = 1;
+ } else {
+ /* Can't do offload of this type of checksum */
+ if (skb_checksum_help(skb))
+ goto free_sgl_ptr;
+ }
+ }
+
+ if (mana_map_skb(skb, apc, &pkg))
+ goto free_sgl_ptr;
+
+ skb_queue_tail(&txq->pending_skbs, skb);
+
+ len = skb->len;
+ net_txq = netdev_get_tx_queue(ndev, txq_idx);
+
+ err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req,
+ (struct gdma_posted_wqe_info *)skb->cb);
+ if (!mana_can_tx(gdma_sq)) {
+ netif_tx_stop_queue(net_txq);
+ apc->eth_stats.stop_queue++;
+ }
+
+ if (err) {
+ (void)skb_dequeue_tail(&txq->pending_skbs);
+ netdev_warn(ndev, "Failed to post TX OOB: %d\n", err);
+ err = NETDEV_TX_BUSY;
+ goto tx_busy;
+ }
+
+ err = NETDEV_TX_OK;
+ atomic_inc(&txq->pending_sends);
+
+ mana_gd_wq_ring_doorbell(gd->gdma_context, gdma_sq);
+
+ /* skb may be freed after mana_gd_post_work_request. Do not use it. */
+ skb = NULL;
+
+ tx_stats = &txq->stats;
+ u64_stats_update_begin(&tx_stats->syncp);
+ tx_stats->packets++;
+ tx_stats->bytes += len;
+ u64_stats_update_end(&tx_stats->syncp);
+
+tx_busy:
+ if (netif_tx_queue_stopped(net_txq) && mana_can_tx(gdma_sq)) {
+ netif_tx_wake_queue(net_txq);
+ apc->eth_stats.wake_queue++;
+ }
+
+ kfree(pkg.sgl_ptr);
+ return err;
+
+free_sgl_ptr:
+ kfree(pkg.sgl_ptr);
+tx_drop_count:
+ ndev->stats.tx_dropped++;
+tx_drop:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+static void mana_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *st)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+ unsigned int num_queues = apc->num_queues;
+ struct mana_stats_rx *rx_stats;
+ struct mana_stats_tx *tx_stats;
+ unsigned int start;
+ u64 packets, bytes;
+ int q;
+
+ if (!apc->port_is_up)
+ return;
+
+ netdev_stats_to_stats64(st, &ndev->stats);
+
+ for (q = 0; q < num_queues; q++) {
+ rx_stats = &apc->rxqs[q]->stats;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
+ packets = rx_stats->packets;
+ bytes = rx_stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
+
+ st->rx_packets += packets;
+ st->rx_bytes += bytes;
+ }
+
+ for (q = 0; q < num_queues; q++) {
+ tx_stats = &apc->tx_qp[q].txq.stats;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
+ packets = tx_stats->packets;
+ bytes = tx_stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
+
+ st->tx_packets += packets;
+ st->tx_bytes += bytes;
+ }
+}
+
+static int mana_get_tx_queue(struct net_device *ndev, struct sk_buff *skb,
+ int old_q)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+ u32 hash = skb_get_hash(skb);
+ struct sock *sk = skb->sk;
+ int txq;
+
+ txq = apc->indir_table[hash & MANA_INDIRECT_TABLE_MASK];
+
+ if (txq != old_q && sk && sk_fullsock(sk) &&
+ rcu_access_pointer(sk->sk_dst_cache))
+ sk_tx_queue_set(sk, txq);
+
+ return txq;
+}
+
+static u16 mana_select_queue(struct net_device *ndev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ int txq;
+
+ if (ndev->real_num_tx_queues == 1)
+ return 0;
+
+ txq = sk_tx_queue_get(skb->sk);
+
+ if (txq < 0 || skb->ooo_okay || txq >= ndev->real_num_tx_queues) {
+ if (skb_rx_queue_recorded(skb))
+ txq = skb_get_rx_queue(skb);
+ else
+ txq = mana_get_tx_queue(ndev, skb, txq);
+ }
+
+ return txq;
+}
+
+static const struct net_device_ops mana_devops = {
+ .ndo_open = mana_open,
+ .ndo_stop = mana_close,
+ .ndo_select_queue = mana_select_queue,
+ .ndo_start_xmit = mana_start_xmit,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_get_stats64 = mana_get_stats64,
+ .ndo_bpf = mana_bpf,
+ .ndo_xdp_xmit = mana_xdp_xmit,
+};
+
+static void mana_cleanup_port_context(struct mana_port_context *apc)
+{
+ kfree(apc->rxqs);
+ apc->rxqs = NULL;
+}
+
+static int mana_init_port_context(struct mana_port_context *apc)
+{
+ apc->rxqs = kcalloc(apc->num_queues, sizeof(struct mana_rxq *),
+ GFP_KERNEL);
+
+ return !apc->rxqs ? -ENOMEM : 0;
+}
+
+static int mana_send_request(struct mana_context *ac, void *in_buf,
+ u32 in_len, void *out_buf, u32 out_len)
+{
+ struct gdma_context *gc = ac->gdma_dev->gdma_context;
+ struct gdma_resp_hdr *resp = out_buf;
+ struct gdma_req_hdr *req = in_buf;
+ struct device *dev = gc->dev;
+ static atomic_t activity_id;
+ int err;
+
+ req->dev_id = gc->mana.dev_id;
+ req->activity_id = atomic_inc_return(&activity_id);
+
+ err = mana_gd_send_request(gc, in_len, in_buf, out_len,
+ out_buf);
+ if (err || resp->status) {
+ dev_err(dev, "Failed to send mana message: %d, 0x%x\n",
+ err, resp->status);
+ return err ? err : -EPROTO;
+ }
+
+ if (req->dev_id.as_uint32 != resp->dev_id.as_uint32 ||
+ req->activity_id != resp->activity_id) {
+ dev_err(dev, "Unexpected mana message response: %x,%x,%x,%x\n",
+ req->dev_id.as_uint32, resp->dev_id.as_uint32,
+ req->activity_id, resp->activity_id);
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+static int mana_verify_resp_hdr(const struct gdma_resp_hdr *resp_hdr,
+ const enum mana_command_code expected_code,
+ const u32 min_size)
+{
+ if (resp_hdr->response.msg_type != expected_code)
+ return -EPROTO;
+
+ if (resp_hdr->response.msg_version < GDMA_MESSAGE_V1)
+ return -EPROTO;
+
+ if (resp_hdr->response.msg_size < min_size)
+ return -EPROTO;
+
+ return 0;
+}
+
+static int mana_pf_register_hw_vport(struct mana_port_context *apc)
+{
+ struct mana_register_hw_vport_resp resp = {};
+ struct mana_register_hw_vport_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_HW_PORT,
+ sizeof(req), sizeof(resp));
+ req.attached_gfid = 1;
+ req.is_pf_default_vport = 1;
+ req.allow_all_ether_types = 1;
+
+ err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ sizeof(resp));
+ if (err) {
+ netdev_err(apc->ndev, "Failed to register hw vPort: %d\n", err);
+ return err;
+ }
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_HW_PORT,
+ sizeof(resp));
+ if (err || resp.hdr.status) {
+ netdev_err(apc->ndev, "Failed to register hw vPort: %d, 0x%x\n",
+ err, resp.hdr.status);
+ return err ? err : -EPROTO;
+ }
+
+ apc->port_handle = resp.hw_vport_handle;
+ return 0;
+}
+
+static void mana_pf_deregister_hw_vport(struct mana_port_context *apc)
+{
+ struct mana_deregister_hw_vport_resp resp = {};
+ struct mana_deregister_hw_vport_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_HW_PORT,
+ sizeof(req), sizeof(resp));
+ req.hw_vport_handle = apc->port_handle;
+
+ err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ sizeof(resp));
+ if (err) {
+ netdev_err(apc->ndev, "Failed to unregister hw vPort: %d\n",
+ err);
+ return;
+ }
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_HW_PORT,
+ sizeof(resp));
+ if (err || resp.hdr.status)
+ netdev_err(apc->ndev,
+ "Failed to deregister hw vPort: %d, 0x%x\n",
+ err, resp.hdr.status);
+}
+
+static int mana_pf_register_filter(struct mana_port_context *apc)
+{
+ struct mana_register_filter_resp resp = {};
+ struct mana_register_filter_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_FILTER,
+ sizeof(req), sizeof(resp));
+ req.vport = apc->port_handle;
+ memcpy(req.mac_addr, apc->mac_addr, ETH_ALEN);
+
+ err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ sizeof(resp));
+ if (err) {
+ netdev_err(apc->ndev, "Failed to register filter: %d\n", err);
+ return err;
+ }
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_FILTER,
+ sizeof(resp));
+ if (err || resp.hdr.status) {
+ netdev_err(apc->ndev, "Failed to register filter: %d, 0x%x\n",
+ err, resp.hdr.status);
+ return err ? err : -EPROTO;
+ }
+
+ apc->pf_filter_handle = resp.filter_handle;
+ return 0;
+}
+
+static void mana_pf_deregister_filter(struct mana_port_context *apc)
+{
+ struct mana_deregister_filter_resp resp = {};
+ struct mana_deregister_filter_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_FILTER,
+ sizeof(req), sizeof(resp));
+ req.filter_handle = apc->pf_filter_handle;
+
+ err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ sizeof(resp));
+ if (err) {
+ netdev_err(apc->ndev, "Failed to unregister filter: %d\n",
+ err);
+ return;
+ }
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_FILTER,
+ sizeof(resp));
+ if (err || resp.hdr.status)
+ netdev_err(apc->ndev,
+ "Failed to deregister filter: %d, 0x%x\n",
+ err, resp.hdr.status);
+}
+
+static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
+ u32 proto_minor_ver, u32 proto_micro_ver,
+ u16 *max_num_vports)
+{
+ struct gdma_context *gc = ac->gdma_dev->gdma_context;
+ struct mana_query_device_cfg_resp resp = {};
+ struct mana_query_device_cfg_req req = {};
+ struct device *dev = gc->dev;
+ int err = 0;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG,
+ sizeof(req), sizeof(resp));
+ req.proto_major_ver = proto_major_ver;
+ req.proto_minor_ver = proto_minor_ver;
+ req.proto_micro_ver = proto_micro_ver;
+
+ err = mana_send_request(ac, &req, sizeof(req), &resp, sizeof(resp));
+ if (err) {
+ dev_err(dev, "Failed to query config: %d", err);
+ return err;
+ }
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_DEV_CONFIG,
+ sizeof(resp));
+ if (err || resp.hdr.status) {
+ dev_err(dev, "Invalid query result: %d, 0x%x\n", err,
+ resp.hdr.status);
+ if (!err)
+ err = -EPROTO;
+ return err;
+ }
+
+ *max_num_vports = resp.max_num_vports;
+
+ return 0;
+}
+
+static int mana_query_vport_cfg(struct mana_port_context *apc, u32 vport_index,
+ u32 *max_sq, u32 *max_rq, u32 *num_indir_entry)
+{
+ struct mana_query_vport_cfg_resp resp = {};
+ struct mana_query_vport_cfg_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_VPORT_CONFIG,
+ sizeof(req), sizeof(resp));
+
+ req.vport_index = vport_index;
+
+ err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ sizeof(resp));
+ if (err)
+ return err;
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_VPORT_CONFIG,
+ sizeof(resp));
+ if (err)
+ return err;
+
+ if (resp.hdr.status)
+ return -EPROTO;
+
+ *max_sq = resp.max_num_sq;
+ *max_rq = resp.max_num_rq;
+ *num_indir_entry = resp.num_indirection_ent;
+
+ apc->port_handle = resp.vport;
+ ether_addr_copy(apc->mac_addr, resp.mac_addr);
+
+ return 0;
+}
+
+static int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
+ u32 doorbell_pg_id)
+{
+ struct mana_config_vport_resp resp = {};
+ struct mana_config_vport_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_CONFIG_VPORT_TX,
+ sizeof(req), sizeof(resp));
+ req.vport = apc->port_handle;
+ req.pdid = protection_dom_id;
+ req.doorbell_pageid = doorbell_pg_id;
+
+ err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ sizeof(resp));
+ if (err) {
+ netdev_err(apc->ndev, "Failed to configure vPort: %d\n", err);
+ goto out;
+ }
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_TX,
+ sizeof(resp));
+ if (err || resp.hdr.status) {
+ netdev_err(apc->ndev, "Failed to configure vPort: %d, 0x%x\n",
+ err, resp.hdr.status);
+ if (!err)
+ err = -EPROTO;
+
+ goto out;
+ }
+
+ apc->tx_shortform_allowed = resp.short_form_allowed;
+ apc->tx_vp_offset = resp.tx_vport_offset;
+out:
+ return err;
+}
+
+static int mana_cfg_vport_steering(struct mana_port_context *apc,
+ enum TRI_STATE rx,
+ bool update_default_rxobj, bool update_key,
+ bool update_tab)
+{
+ u16 num_entries = MANA_INDIRECT_TABLE_SIZE;
+ struct mana_cfg_rx_steer_req *req = NULL;
+ struct mana_cfg_rx_steer_resp resp = {};
+ struct net_device *ndev = apc->ndev;
+ mana_handle_t *req_indir_tab;
+ u32 req_buf_size;
+ int err;
+
+ req_buf_size = sizeof(*req) + sizeof(mana_handle_t) * num_entries;
+ req = kzalloc(req_buf_size, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
+ sizeof(resp));
+
+ req->vport = apc->port_handle;
+ req->num_indir_entries = num_entries;
+ req->indir_tab_offset = sizeof(*req);
+ req->rx_enable = rx;
+ req->rss_enable = apc->rss_state;
+ req->update_default_rxobj = update_default_rxobj;
+ req->update_hashkey = update_key;
+ req->update_indir_tab = update_tab;
+ req->default_rxobj = apc->default_rxobj;
+
+ if (update_key)
+ memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE);
+
+ if (update_tab) {
+ req_indir_tab = (mana_handle_t *)(req + 1);
+ memcpy(req_indir_tab, apc->rxobj_table,
+ req->num_indir_entries * sizeof(mana_handle_t));
+ }
+
+ err = mana_send_request(apc->ac, req, req_buf_size, &resp,
+ sizeof(resp));
+ if (err) {
+ netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
+ goto out;
+ }
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_RX,
+ sizeof(resp));
+ if (err) {
+ netdev_err(ndev, "vPort RX configuration failed: %d\n", err);
+ goto out;
+ }
+
+ if (resp.hdr.status) {
+ netdev_err(ndev, "vPort RX configuration failed: 0x%x\n",
+ resp.hdr.status);
+ err = -EPROTO;
+ }
+out:
+ kfree(req);
+ return err;
+}
+
+static int mana_create_wq_obj(struct mana_port_context *apc,
+ mana_handle_t vport,
+ u32 wq_type, struct mana_obj_spec *wq_spec,
+ struct mana_obj_spec *cq_spec,
+ mana_handle_t *wq_obj)
+{
+ struct mana_create_wqobj_resp resp = {};
+ struct mana_create_wqobj_req req = {};
+ struct net_device *ndev = apc->ndev;
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_CREATE_WQ_OBJ,
+ sizeof(req), sizeof(resp));
+ req.vport = vport;
+ req.wq_type = wq_type;
+ req.wq_gdma_region = wq_spec->gdma_region;
+ req.cq_gdma_region = cq_spec->gdma_region;
+ req.wq_size = wq_spec->queue_size;
+ req.cq_size = cq_spec->queue_size;
+ req.cq_moderation_ctx_id = cq_spec->modr_ctx_id;
+ req.cq_parent_qid = cq_spec->attached_eq;
+
+ err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ sizeof(resp));
+ if (err) {
+ netdev_err(ndev, "Failed to create WQ object: %d\n", err);
+ goto out;
+ }
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_CREATE_WQ_OBJ,
+ sizeof(resp));
+ if (err || resp.hdr.status) {
+ netdev_err(ndev, "Failed to create WQ object: %d, 0x%x\n", err,
+ resp.hdr.status);
+ if (!err)
+ err = -EPROTO;
+ goto out;
+ }
+
+ if (resp.wq_obj == INVALID_MANA_HANDLE) {
+ netdev_err(ndev, "Got an invalid WQ object handle\n");
+ err = -EPROTO;
+ goto out;
+ }
+
+ *wq_obj = resp.wq_obj;
+ wq_spec->queue_index = resp.wq_id;
+ cq_spec->queue_index = resp.cq_id;
+
+ return 0;
+out:
+ return err;
+}
+
+static void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
+ mana_handle_t wq_obj)
+{
+ struct mana_destroy_wqobj_resp resp = {};
+ struct mana_destroy_wqobj_req req = {};
+ struct net_device *ndev = apc->ndev;
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_DESTROY_WQ_OBJ,
+ sizeof(req), sizeof(resp));
+ req.wq_type = wq_type;
+ req.wq_obj_handle = wq_obj;
+
+ err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ sizeof(resp));
+ if (err) {
+ netdev_err(ndev, "Failed to destroy WQ object: %d\n", err);
+ return;
+ }
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_DESTROY_WQ_OBJ,
+ sizeof(resp));
+ if (err || resp.hdr.status)
+ netdev_err(ndev, "Failed to destroy WQ object: %d, 0x%x\n", err,
+ resp.hdr.status);
+}
+
+static void mana_destroy_eq(struct mana_context *ac)
+{
+ struct gdma_context *gc = ac->gdma_dev->gdma_context;
+ struct gdma_queue *eq;
+ int i;
+
+ if (!ac->eqs)
+ return;
+
+ for (i = 0; i < gc->max_num_queues; i++) {
+ eq = ac->eqs[i].eq;
+ if (!eq)
+ continue;
+
+ mana_gd_destroy_queue(gc, eq);
+ }
+
+ kfree(ac->eqs);
+ ac->eqs = NULL;
+}
+
+static int mana_create_eq(struct mana_context *ac)
+{
+ struct gdma_dev *gd = ac->gdma_dev;
+ struct gdma_context *gc = gd->gdma_context;
+ struct gdma_queue_spec spec = {};
+ int err;
+ int i;
+
+ ac->eqs = kcalloc(gc->max_num_queues, sizeof(struct mana_eq),
+ GFP_KERNEL);
+ if (!ac->eqs)
+ return -ENOMEM;
+
+ spec.type = GDMA_EQ;
+ spec.monitor_avl_buf = false;
+ spec.queue_size = EQ_SIZE;
+ spec.eq.callback = NULL;
+ spec.eq.context = ac->eqs;
+ spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
+
+ for (i = 0; i < gc->max_num_queues; i++) {
+ err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
+ if (err)
+ goto out;
+ }
+
+ return 0;
+out:
+ mana_destroy_eq(ac);
+ return err;
+}
+
+static int mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq)
+{
+ struct mana_fence_rq_resp resp = {};
+ struct mana_fence_rq_req req = {};
+ int err;
+
+ init_completion(&rxq->fence_event);
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_FENCE_RQ,
+ sizeof(req), sizeof(resp));
+ req.wq_obj_handle = rxq->rxobj;
+
+ err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ sizeof(resp));
+ if (err) {
+ netdev_err(apc->ndev, "Failed to fence RQ %u: %d\n",
+ rxq->rxq_idx, err);
+ return err;
+ }
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_FENCE_RQ, sizeof(resp));
+ if (err || resp.hdr.status) {
+ netdev_err(apc->ndev, "Failed to fence RQ %u: %d, 0x%x\n",
+ rxq->rxq_idx, err, resp.hdr.status);
+ if (!err)
+ err = -EPROTO;
+
+ return err;
+ }
+
+ if (wait_for_completion_timeout(&rxq->fence_event, 10 * HZ) == 0) {
+ netdev_err(apc->ndev, "Failed to fence RQ %u: timed out\n",
+ rxq->rxq_idx);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void mana_fence_rqs(struct mana_port_context *apc)
+{
+ unsigned int rxq_idx;
+ struct mana_rxq *rxq;
+ int err;
+
+ for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
+ rxq = apc->rxqs[rxq_idx];
+ err = mana_fence_rq(apc, rxq);
+
+ /* In case of any error, use sleep instead. */
+ if (err)
+ msleep(100);
+ }
+}
+
+static int mana_move_wq_tail(struct gdma_queue *wq, u32 num_units)
+{
+ u32 used_space_old;
+ u32 used_space_new;
+
+ used_space_old = wq->head - wq->tail;
+ used_space_new = wq->head - (wq->tail + num_units);
+
+ if (WARN_ON_ONCE(used_space_new > used_space_old))
+ return -ERANGE;
+
+ wq->tail += num_units;
+ return 0;
+}
+
+static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc)
+{
+ struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
+ struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
+ struct device *dev = gc->dev;
+ int i;
+
+ dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE);
+
+ for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
+ dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
+ DMA_TO_DEVICE);
+}
+
+static void mana_poll_tx_cq(struct mana_cq *cq)
+{
+ struct gdma_comp *completions = cq->gdma_comp_buf;
+ struct gdma_posted_wqe_info *wqe_info;
+ unsigned int pkt_transmitted = 0;
+ unsigned int wqe_unit_cnt = 0;
+ struct mana_txq *txq = cq->txq;
+ struct mana_port_context *apc;
+ struct netdev_queue *net_txq;
+ struct gdma_queue *gdma_wq;
+ unsigned int avail_space;
+ struct net_device *ndev;
+ struct sk_buff *skb;
+ bool txq_stopped;
+ int comp_read;
+ int i;
+
+ ndev = txq->ndev;
+ apc = netdev_priv(ndev);
+
+ comp_read = mana_gd_poll_cq(cq->gdma_cq, completions,
+ CQE_POLLING_BUFFER);
+
+ if (comp_read < 1)
+ return;
+
+ for (i = 0; i < comp_read; i++) {
+ struct mana_tx_comp_oob *cqe_oob;
+
+ if (WARN_ON_ONCE(!completions[i].is_sq))
+ return;
+
+ cqe_oob = (struct mana_tx_comp_oob *)completions[i].cqe_data;
+ if (WARN_ON_ONCE(cqe_oob->cqe_hdr.client_type !=
+ MANA_CQE_COMPLETION))
+ return;
+
+ switch (cqe_oob->cqe_hdr.cqe_type) {
+ case CQE_TX_OKAY:
+ break;
+
+ case CQE_TX_SA_DROP:
+ case CQE_TX_MTU_DROP:
+ case CQE_TX_INVALID_OOB:
+ case CQE_TX_INVALID_ETH_TYPE:
+ case CQE_TX_HDR_PROCESSING_ERROR:
+ case CQE_TX_VF_DISABLED:
+ case CQE_TX_VPORT_IDX_OUT_OF_RANGE:
+ case CQE_TX_VPORT_DISABLED:
+ case CQE_TX_VLAN_TAGGING_VIOLATION:
+ if (net_ratelimit())
+ netdev_err(ndev, "TX: CQE error %d\n",
+ cqe_oob->cqe_hdr.cqe_type);
+
+ break;
+
+ default:
+ /* If the CQE type is unknown, log an error,
+ * and still free the SKB, update tail, etc.
+ */
+ if (net_ratelimit())
+ netdev_err(ndev, "TX: unknown CQE type %d\n",
+ cqe_oob->cqe_hdr.cqe_type);
+
+ break;
+ }
+
+ if (WARN_ON_ONCE(txq->gdma_txq_id != completions[i].wq_num))
+ return;
+
+ skb = skb_dequeue(&txq->pending_skbs);
+ if (WARN_ON_ONCE(!skb))
+ return;
+
+ wqe_info = (struct gdma_posted_wqe_info *)skb->cb;
+ wqe_unit_cnt += wqe_info->wqe_size_in_bu;
+
+ mana_unmap_skb(skb, apc);
+
+ napi_consume_skb(skb, cq->budget);
+
+ pkt_transmitted++;
+ }
+
+ if (WARN_ON_ONCE(wqe_unit_cnt == 0))
+ return;
+
+ mana_move_wq_tail(txq->gdma_sq, wqe_unit_cnt);
+
+ gdma_wq = txq->gdma_sq;
+ avail_space = mana_gd_wq_avail_space(gdma_wq);
+
+ /* Ensure tail updated before checking q stop */
+ smp_mb();
+
+ net_txq = txq->net_txq;
+ txq_stopped = netif_tx_queue_stopped(net_txq);
+
+ /* Ensure checking txq_stopped before apc->port_is_up. */
+ smp_rmb();
+
+ if (txq_stopped && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
+ netif_tx_wake_queue(net_txq);
+ apc->eth_stats.wake_queue++;
+ }
+
+ if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0)
+ WARN_ON_ONCE(1);
+
+ cq->work_done = pkt_transmitted;
+}
+
+static void mana_post_pkt_rxq(struct mana_rxq *rxq)
+{
+ struct mana_recv_buf_oob *recv_buf_oob;
+ u32 curr_index;
+ int err;
+
+ curr_index = rxq->buf_index++;
+ if (rxq->buf_index == rxq->num_rx_buf)
+ rxq->buf_index = 0;
+
+ recv_buf_oob = &rxq->rx_oobs[curr_index];
+
+ err = mana_gd_post_and_ring(rxq->gdma_rq, &recv_buf_oob->wqe_req,
+ &recv_buf_oob->wqe_inf);
+ if (WARN_ON_ONCE(err))
+ return;
+
+ WARN_ON_ONCE(recv_buf_oob->wqe_inf.wqe_size_in_bu != 1);
+}
+
+static struct sk_buff *mana_build_skb(void *buf_va, uint pkt_len,
+ struct xdp_buff *xdp)
+{
+ struct sk_buff *skb = build_skb(buf_va, PAGE_SIZE);
+
+ if (!skb)
+ return NULL;
+
+ if (xdp->data_hard_start) {
+ skb_reserve(skb, xdp->data - xdp->data_hard_start);
+ skb_put(skb, xdp->data_end - xdp->data);
+ } else {
+ skb_reserve(skb, XDP_PACKET_HEADROOM);
+ skb_put(skb, pkt_len);
+ }
+
+ return skb;
+}
+
+static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
+ struct mana_rxq *rxq)
+{
+ struct mana_stats_rx *rx_stats = &rxq->stats;
+ struct net_device *ndev = rxq->ndev;
+ uint pkt_len = cqe->ppi[0].pkt_len;
+ u16 rxq_idx = rxq->rxq_idx;
+ struct napi_struct *napi;
+ struct xdp_buff xdp = {};
+ struct sk_buff *skb;
+ u32 hash_value;
+ u32 act;
+
+ rxq->rx_cq.work_done++;
+ napi = &rxq->rx_cq.napi;
+
+ if (!buf_va) {
+ ++ndev->stats.rx_dropped;
+ return;
+ }
+
+ act = mana_run_xdp(ndev, rxq, &xdp, buf_va, pkt_len);
+
+ if (act == XDP_REDIRECT && !rxq->xdp_rc)
+ return;
+
+ if (act != XDP_PASS && act != XDP_TX)
+ goto drop_xdp;
+
+ skb = mana_build_skb(buf_va, pkt_len, &xdp);
+
+ if (!skb)
+ goto drop;
+
+ skb->dev = napi->dev;
+
+ skb->protocol = eth_type_trans(skb, ndev);
+ skb_checksum_none_assert(skb);
+ skb_record_rx_queue(skb, rxq_idx);
+
+ if ((ndev->features & NETIF_F_RXCSUM) && cqe->rx_iphdr_csum_succeed) {
+ if (cqe->rx_tcp_csum_succeed || cqe->rx_udp_csum_succeed)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+
+ if (cqe->rx_hashtype != 0 && (ndev->features & NETIF_F_RXHASH)) {
+ hash_value = cqe->ppi[0].pkt_hash;
+
+ if (cqe->rx_hashtype & MANA_HASH_L4)
+ skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L4);
+ else
+ skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L3);
+ }
+
+ u64_stats_update_begin(&rx_stats->syncp);
+ rx_stats->packets++;
+ rx_stats->bytes += pkt_len;
+
+ if (act == XDP_TX)
+ rx_stats->xdp_tx++;
+ u64_stats_update_end(&rx_stats->syncp);
+
+ if (act == XDP_TX) {
+ skb_set_queue_mapping(skb, rxq_idx);
+ mana_xdp_tx(skb, ndev);
+ return;
+ }
+
+ napi_gro_receive(napi, skb);
+
+ return;
+
+drop_xdp:
+ u64_stats_update_begin(&rx_stats->syncp);
+ rx_stats->xdp_drop++;
+ u64_stats_update_end(&rx_stats->syncp);
+
+drop:
+ WARN_ON_ONCE(rxq->xdp_save_page);
+ rxq->xdp_save_page = virt_to_page(buf_va);
+
+ ++ndev->stats.rx_dropped;
+
+ return;
+}
+
+static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
+ struct gdma_comp *cqe)
+{
+ struct mana_rxcomp_oob *oob = (struct mana_rxcomp_oob *)cqe->cqe_data;
+ struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context;
+ struct net_device *ndev = rxq->ndev;
+ struct mana_recv_buf_oob *rxbuf_oob;
+ struct device *dev = gc->dev;
+ void *new_buf, *old_buf;
+ struct page *new_page;
+ u32 curr, pktlen;
+ dma_addr_t da;
+
+ switch (oob->cqe_hdr.cqe_type) {
+ case CQE_RX_OKAY:
+ break;
+
+ case CQE_RX_TRUNCATED:
+ ++ndev->stats.rx_dropped;
+ rxbuf_oob = &rxq->rx_oobs[rxq->buf_index];
+ netdev_warn_once(ndev, "Dropped a truncated packet\n");
+ goto drop;
+
+ case CQE_RX_COALESCED_4:
+ netdev_err(ndev, "RX coalescing is unsupported\n");
+ return;
+
+ case CQE_RX_OBJECT_FENCE:
+ complete(&rxq->fence_event);
+ return;
+
+ default:
+ netdev_err(ndev, "Unknown RX CQE type = %d\n",
+ oob->cqe_hdr.cqe_type);
+ return;
+ }
+
+ pktlen = oob->ppi[0].pkt_len;
+
+ if (pktlen == 0) {
+ /* data packets should never have packetlength of zero */
+ netdev_err(ndev, "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%llx\n",
+ rxq->gdma_id, cq->gdma_id, rxq->rxobj);
+ return;
+ }
+
+ curr = rxq->buf_index;
+ rxbuf_oob = &rxq->rx_oobs[curr];
+ WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1);
+
+ /* Reuse XDP dropped page if available */
+ if (rxq->xdp_save_page) {
+ new_page = rxq->xdp_save_page;
+ rxq->xdp_save_page = NULL;
+ } else {
+ new_page = alloc_page(GFP_ATOMIC);
+ }
+
+ if (new_page) {
+ da = dma_map_page(dev, new_page, XDP_PACKET_HEADROOM, rxq->datasize,
+ DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(dev, da)) {
+ __free_page(new_page);
+ new_page = NULL;
+ }
+ }
+
+ new_buf = new_page ? page_to_virt(new_page) : NULL;
+
+ if (new_buf) {
+ dma_unmap_page(dev, rxbuf_oob->buf_dma_addr, rxq->datasize,
+ DMA_FROM_DEVICE);
+
+ old_buf = rxbuf_oob->buf_va;
+
+ /* refresh the rxbuf_oob with the new page */
+ rxbuf_oob->buf_va = new_buf;
+ rxbuf_oob->buf_dma_addr = da;
+ rxbuf_oob->sgl[0].address = rxbuf_oob->buf_dma_addr;
+ } else {
+ old_buf = NULL; /* drop the packet if no memory */
+ }
+
+ mana_rx_skb(old_buf, oob, rxq);
+
+drop:
+ mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu);
+
+ mana_post_pkt_rxq(rxq);
+}
+
+static void mana_poll_rx_cq(struct mana_cq *cq)
+{
+ struct gdma_comp *comp = cq->gdma_comp_buf;
+ struct mana_rxq *rxq = cq->rxq;
+ int comp_read, i;
+
+ comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER);
+ WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER);
+
+ rxq->xdp_flush = false;
+
+ for (i = 0; i < comp_read; i++) {
+ if (WARN_ON_ONCE(comp[i].is_sq))
+ return;
+
+ /* verify recv cqe references the right rxq */
+ if (WARN_ON_ONCE(comp[i].wq_num != cq->rxq->gdma_id))
+ return;
+
+ mana_process_rx_cqe(rxq, cq, &comp[i]);
+ }
+
+ if (rxq->xdp_flush)
+ xdp_do_flush();
+}
+
+static int mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
+{
+ struct mana_cq *cq = context;
+ u8 arm_bit;
+ int w;
+
+ WARN_ON_ONCE(cq->gdma_cq != gdma_queue);
+
+ if (cq->type == MANA_CQ_TYPE_RX)
+ mana_poll_rx_cq(cq);
+ else
+ mana_poll_tx_cq(cq);
+
+ w = cq->work_done;
+
+ if (w < cq->budget &&
+ napi_complete_done(&cq->napi, w)) {
+ arm_bit = SET_ARM_BIT;
+ } else {
+ arm_bit = 0;
+ }
+
+ mana_gd_ring_cq(gdma_queue, arm_bit);
+
+ return w;
+}
+
+static int mana_poll(struct napi_struct *napi, int budget)
+{
+ struct mana_cq *cq = container_of(napi, struct mana_cq, napi);
+ int w;
+
+ cq->work_done = 0;
+ cq->budget = budget;
+
+ w = mana_cq_handler(cq, cq->gdma_cq);
+
+ return min(w, budget);
+}
+
+static void mana_schedule_napi(void *context, struct gdma_queue *gdma_queue)
+{
+ struct mana_cq *cq = context;
+
+ napi_schedule_irqoff(&cq->napi);
+}
+
+static void mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq)
+{
+ struct gdma_dev *gd = apc->ac->gdma_dev;
+
+ if (!cq->gdma_cq)
+ return;
+
+ mana_gd_destroy_queue(gd->gdma_context, cq->gdma_cq);
+}
+
+static void mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq)
+{
+ struct gdma_dev *gd = apc->ac->gdma_dev;
+
+ if (!txq->gdma_sq)
+ return;
+
+ mana_gd_destroy_queue(gd->gdma_context, txq->gdma_sq);
+}
+
+static void mana_destroy_txq(struct mana_port_context *apc)
+{
+ struct napi_struct *napi;
+ int i;
+
+ if (!apc->tx_qp)
+ return;
+
+ for (i = 0; i < apc->num_queues; i++) {
+ napi = &apc->tx_qp[i].tx_cq.napi;
+ napi_synchronize(napi);
+ napi_disable(napi);
+ netif_napi_del(napi);
+
+ mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
+
+ mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq);
+
+ mana_deinit_txq(apc, &apc->tx_qp[i].txq);
+ }
+
+ kfree(apc->tx_qp);
+ apc->tx_qp = NULL;
+}
+
+static int mana_create_txq(struct mana_port_context *apc,
+ struct net_device *net)
+{
+ struct mana_context *ac = apc->ac;
+ struct gdma_dev *gd = ac->gdma_dev;
+ struct mana_obj_spec wq_spec;
+ struct mana_obj_spec cq_spec;
+ struct gdma_queue_spec spec;
+ struct gdma_context *gc;
+ struct mana_txq *txq;
+ struct mana_cq *cq;
+ u32 txq_size;
+ u32 cq_size;
+ int err;
+ int i;
+
+ apc->tx_qp = kcalloc(apc->num_queues, sizeof(struct mana_tx_qp),
+ GFP_KERNEL);
+ if (!apc->tx_qp)
+ return -ENOMEM;
+
+ /* The minimum size of the WQE is 32 bytes, hence
+ * MAX_SEND_BUFFERS_PER_QUEUE represents the maximum number of WQEs
+ * the SQ can store. This value is then used to size other queues
+ * to prevent overflow.
+ */
+ txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32;
+ BUILD_BUG_ON(!PAGE_ALIGNED(txq_size));
+
+ cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE;
+ cq_size = PAGE_ALIGN(cq_size);
+
+ gc = gd->gdma_context;
+
+ for (i = 0; i < apc->num_queues; i++) {
+ apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE;
+
+ /* Create SQ */
+ txq = &apc->tx_qp[i].txq;
+
+ u64_stats_init(&txq->stats.syncp);
+ txq->ndev = net;
+ txq->net_txq = netdev_get_tx_queue(net, i);
+ txq->vp_offset = apc->tx_vp_offset;
+ skb_queue_head_init(&txq->pending_skbs);
+
+ memset(&spec, 0, sizeof(spec));
+ spec.type = GDMA_SQ;
+ spec.monitor_avl_buf = true;
+ spec.queue_size = txq_size;
+ err = mana_gd_create_mana_wq_cq(gd, &spec, &txq->gdma_sq);
+ if (err)
+ goto out;
+
+ /* Create SQ's CQ */
+ cq = &apc->tx_qp[i].tx_cq;
+ cq->type = MANA_CQ_TYPE_TX;
+
+ cq->txq = txq;
+
+ memset(&spec, 0, sizeof(spec));
+ spec.type = GDMA_CQ;
+ spec.monitor_avl_buf = false;
+ spec.queue_size = cq_size;
+ spec.cq.callback = mana_schedule_napi;
+ spec.cq.parent_eq = ac->eqs[i].eq;
+ spec.cq.context = cq;
+ err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
+ if (err)
+ goto out;
+
+ memset(&wq_spec, 0, sizeof(wq_spec));
+ memset(&cq_spec, 0, sizeof(cq_spec));
+
+ wq_spec.gdma_region = txq->gdma_sq->mem_info.gdma_region;
+ wq_spec.queue_size = txq->gdma_sq->queue_size;
+
+ cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region;
+ cq_spec.queue_size = cq->gdma_cq->queue_size;
+ cq_spec.modr_ctx_id = 0;
+ cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
+
+ err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ,
+ &wq_spec, &cq_spec,
+ &apc->tx_qp[i].tx_object);
+
+ if (err)
+ goto out;
+
+ txq->gdma_sq->id = wq_spec.queue_index;
+ cq->gdma_cq->id = cq_spec.queue_index;
+
+ txq->gdma_sq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
+ cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
+
+ txq->gdma_txq_id = txq->gdma_sq->id;
+
+ cq->gdma_id = cq->gdma_cq->id;
+
+ if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ gc->cq_table[cq->gdma_id] = cq->gdma_cq;
+
+ netif_napi_add_tx(net, &cq->napi, mana_poll);
+ napi_enable(&cq->napi);
+
+ mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
+ }
+
+ return 0;
+out:
+ mana_destroy_txq(apc);
+ return err;
+}
+
+static void mana_destroy_rxq(struct mana_port_context *apc,
+ struct mana_rxq *rxq, bool validate_state)
+
+{
+ struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
+ struct mana_recv_buf_oob *rx_oob;
+ struct device *dev = gc->dev;
+ struct napi_struct *napi;
+ int i;
+
+ if (!rxq)
+ return;
+
+ napi = &rxq->rx_cq.napi;
+
+ if (validate_state)
+ napi_synchronize(napi);
+
+ napi_disable(napi);
+
+ xdp_rxq_info_unreg(&rxq->xdp_rxq);
+
+ netif_napi_del(napi);
+
+ mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
+
+ mana_deinit_cq(apc, &rxq->rx_cq);
+
+ if (rxq->xdp_save_page)
+ __free_page(rxq->xdp_save_page);
+
+ for (i = 0; i < rxq->num_rx_buf; i++) {
+ rx_oob = &rxq->rx_oobs[i];
+
+ if (!rx_oob->buf_va)
+ continue;
+
+ dma_unmap_page(dev, rx_oob->buf_dma_addr, rxq->datasize,
+ DMA_FROM_DEVICE);
+
+ free_page((unsigned long)rx_oob->buf_va);
+ rx_oob->buf_va = NULL;
+ }
+
+ if (rxq->gdma_rq)
+ mana_gd_destroy_queue(gc, rxq->gdma_rq);
+
+ kfree(rxq);
+}
+
+#define MANA_WQE_HEADER_SIZE 16
+#define MANA_WQE_SGE_SIZE 16
+
+static int mana_alloc_rx_wqe(struct mana_port_context *apc,
+ struct mana_rxq *rxq, u32 *rxq_size, u32 *cq_size)
+{
+ struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
+ struct mana_recv_buf_oob *rx_oob;
+ struct device *dev = gc->dev;
+ struct page *page;
+ dma_addr_t da;
+ u32 buf_idx;
+
+ WARN_ON(rxq->datasize == 0 || rxq->datasize > PAGE_SIZE);
+
+ *rxq_size = 0;
+ *cq_size = 0;
+
+ for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
+ rx_oob = &rxq->rx_oobs[buf_idx];
+ memset(rx_oob, 0, sizeof(*rx_oob));
+
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ da = dma_map_page(dev, page, XDP_PACKET_HEADROOM, rxq->datasize,
+ DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(dev, da)) {
+ __free_page(page);
+ return -ENOMEM;
+ }
+
+ rx_oob->buf_va = page_to_virt(page);
+ rx_oob->buf_dma_addr = da;
+
+ rx_oob->num_sge = 1;
+ rx_oob->sgl[0].address = rx_oob->buf_dma_addr;
+ rx_oob->sgl[0].size = rxq->datasize;
+ rx_oob->sgl[0].mem_key = apc->ac->gdma_dev->gpa_mkey;
+
+ rx_oob->wqe_req.sgl = rx_oob->sgl;
+ rx_oob->wqe_req.num_sge = rx_oob->num_sge;
+ rx_oob->wqe_req.inline_oob_size = 0;
+ rx_oob->wqe_req.inline_oob_data = NULL;
+ rx_oob->wqe_req.flags = 0;
+ rx_oob->wqe_req.client_data_unit = 0;
+
+ *rxq_size += ALIGN(MANA_WQE_HEADER_SIZE +
+ MANA_WQE_SGE_SIZE * rx_oob->num_sge, 32);
+ *cq_size += COMP_ENTRY_SIZE;
+ }
+
+ return 0;
+}
+
+static int mana_push_wqe(struct mana_rxq *rxq)
+{
+ struct mana_recv_buf_oob *rx_oob;
+ u32 buf_idx;
+ int err;
+
+ for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
+ rx_oob = &rxq->rx_oobs[buf_idx];
+
+ err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req,
+ &rx_oob->wqe_inf);
+ if (err)
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
+static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
+ u32 rxq_idx, struct mana_eq *eq,
+ struct net_device *ndev)
+{
+ struct gdma_dev *gd = apc->ac->gdma_dev;
+ struct mana_obj_spec wq_spec;
+ struct mana_obj_spec cq_spec;
+ struct gdma_queue_spec spec;
+ struct mana_cq *cq = NULL;
+ struct gdma_context *gc;
+ u32 cq_size, rq_size;
+ struct mana_rxq *rxq;
+ int err;
+
+ gc = gd->gdma_context;
+
+ rxq = kzalloc(struct_size(rxq, rx_oobs, RX_BUFFERS_PER_QUEUE),
+ GFP_KERNEL);
+ if (!rxq)
+ return NULL;
+
+ rxq->ndev = ndev;
+ rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE;
+ rxq->rxq_idx = rxq_idx;
+ rxq->datasize = ALIGN(MAX_FRAME_SIZE, 64);
+ rxq->rxobj = INVALID_MANA_HANDLE;
+
+ err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
+ if (err)
+ goto out;
+
+ rq_size = PAGE_ALIGN(rq_size);
+ cq_size = PAGE_ALIGN(cq_size);
+
+ /* Create RQ */
+ memset(&spec, 0, sizeof(spec));
+ spec.type = GDMA_RQ;
+ spec.monitor_avl_buf = true;
+ spec.queue_size = rq_size;
+ err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq);
+ if (err)
+ goto out;
+
+ /* Create RQ's CQ */
+ cq = &rxq->rx_cq;
+ cq->type = MANA_CQ_TYPE_RX;
+ cq->rxq = rxq;
+
+ memset(&spec, 0, sizeof(spec));
+ spec.type = GDMA_CQ;
+ spec.monitor_avl_buf = false;
+ spec.queue_size = cq_size;
+ spec.cq.callback = mana_schedule_napi;
+ spec.cq.parent_eq = eq->eq;
+ spec.cq.context = cq;
+ err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
+ if (err)
+ goto out;
+
+ memset(&wq_spec, 0, sizeof(wq_spec));
+ memset(&cq_spec, 0, sizeof(cq_spec));
+ wq_spec.gdma_region = rxq->gdma_rq->mem_info.gdma_region;
+ wq_spec.queue_size = rxq->gdma_rq->queue_size;
+
+ cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region;
+ cq_spec.queue_size = cq->gdma_cq->queue_size;
+ cq_spec.modr_ctx_id = 0;
+ cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
+
+ err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ,
+ &wq_spec, &cq_spec, &rxq->rxobj);
+ if (err)
+ goto out;
+
+ rxq->gdma_rq->id = wq_spec.queue_index;
+ cq->gdma_cq->id = cq_spec.queue_index;
+
+ rxq->gdma_rq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
+ cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
+
+ rxq->gdma_id = rxq->gdma_rq->id;
+ cq->gdma_id = cq->gdma_cq->id;
+
+ err = mana_push_wqe(rxq);
+ if (err)
+ goto out;
+
+ if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ gc->cq_table[cq->gdma_id] = cq->gdma_cq;
+
+ netif_napi_add_weight(ndev, &cq->napi, mana_poll, 1);
+
+ WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx,
+ cq->napi.napi_id));
+ WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED, NULL));
+
+ napi_enable(&cq->napi);
+
+ mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
+out:
+ if (!err)
+ return rxq;
+
+ netdev_err(ndev, "Failed to create RXQ: err = %d\n", err);
+
+ mana_destroy_rxq(apc, rxq, false);
+
+ if (cq)
+ mana_deinit_cq(apc, cq);
+
+ return NULL;
+}
+
+static int mana_add_rx_queues(struct mana_port_context *apc,
+ struct net_device *ndev)
+{
+ struct mana_context *ac = apc->ac;
+ struct mana_rxq *rxq;
+ int err = 0;
+ int i;
+
+ for (i = 0; i < apc->num_queues; i++) {
+ rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev);
+ if (!rxq) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ u64_stats_init(&rxq->stats.syncp);
+
+ apc->rxqs[i] = rxq;
+ }
+
+ apc->default_rxobj = apc->rxqs[0]->rxobj;
+out:
+ return err;
+}
+
+static void mana_destroy_vport(struct mana_port_context *apc)
+{
+ struct gdma_dev *gd = apc->ac->gdma_dev;
+ struct mana_rxq *rxq;
+ u32 rxq_idx;
+
+ for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
+ rxq = apc->rxqs[rxq_idx];
+ if (!rxq)
+ continue;
+
+ mana_destroy_rxq(apc, rxq, true);
+ apc->rxqs[rxq_idx] = NULL;
+ }
+
+ mana_destroy_txq(apc);
+
+ if (gd->gdma_context->is_pf)
+ mana_pf_deregister_hw_vport(apc);
+}
+
+static int mana_create_vport(struct mana_port_context *apc,
+ struct net_device *net)
+{
+ struct gdma_dev *gd = apc->ac->gdma_dev;
+ int err;
+
+ apc->default_rxobj = INVALID_MANA_HANDLE;
+
+ if (gd->gdma_context->is_pf) {
+ err = mana_pf_register_hw_vport(apc);
+ if (err)
+ return err;
+ }
+
+ err = mana_cfg_vport(apc, gd->pdid, gd->doorbell);
+ if (err)
+ return err;
+
+ return mana_create_txq(apc, net);
+}
+
+static void mana_rss_table_init(struct mana_port_context *apc)
+{
+ int i;
+
+ for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
+ apc->indir_table[i] =
+ ethtool_rxfh_indir_default(i, apc->num_queues);
+}
+
+int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
+ bool update_hash, bool update_tab)
+{
+ u32 queue_idx;
+ int err;
+ int i;
+
+ if (update_tab) {
+ for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
+ queue_idx = apc->indir_table[i];
+ apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj;
+ }
+ }
+
+ err = mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab);
+ if (err)
+ return err;
+
+ mana_fence_rqs(apc);
+
+ return 0;
+}
+
+static int mana_init_port(struct net_device *ndev)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+ u32 max_txq, max_rxq, max_queues;
+ int port_idx = apc->port_idx;
+ u32 num_indirect_entries;
+ int err;
+
+ err = mana_init_port_context(apc);
+ if (err)
+ return err;
+
+ err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq,
+ &num_indirect_entries);
+ if (err) {
+ netdev_err(ndev, "Failed to query info for vPort %d\n",
+ port_idx);
+ goto reset_apc;
+ }
+
+ max_queues = min_t(u32, max_txq, max_rxq);
+ if (apc->max_queues > max_queues)
+ apc->max_queues = max_queues;
+
+ if (apc->num_queues > apc->max_queues)
+ apc->num_queues = apc->max_queues;
+
+ eth_hw_addr_set(ndev, apc->mac_addr);
+
+ return 0;
+
+reset_apc:
+ kfree(apc->rxqs);
+ apc->rxqs = NULL;
+ return err;
+}
+
+int mana_alloc_queues(struct net_device *ndev)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+ struct gdma_dev *gd = apc->ac->gdma_dev;
+ int err;
+
+ err = mana_create_vport(apc, ndev);
+ if (err)
+ return err;
+
+ err = netif_set_real_num_tx_queues(ndev, apc->num_queues);
+ if (err)
+ goto destroy_vport;
+
+ err = mana_add_rx_queues(apc, ndev);
+ if (err)
+ goto destroy_vport;
+
+ apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE;
+
+ err = netif_set_real_num_rx_queues(ndev, apc->num_queues);
+ if (err)
+ goto destroy_vport;
+
+ mana_rss_table_init(apc);
+
+ err = mana_config_rss(apc, TRI_STATE_TRUE, true, true);
+ if (err)
+ goto destroy_vport;
+
+ if (gd->gdma_context->is_pf) {
+ err = mana_pf_register_filter(apc);
+ if (err)
+ goto destroy_vport;
+ }
+
+ mana_chn_setxdp(apc, mana_xdp_get(apc));
+
+ return 0;
+
+destroy_vport:
+ mana_destroy_vport(apc);
+ return err;
+}
+
+int mana_attach(struct net_device *ndev)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+ int err;
+
+ ASSERT_RTNL();
+
+ err = mana_init_port(ndev);
+ if (err)
+ return err;
+
+ if (apc->port_st_save) {
+ err = mana_alloc_queues(ndev);
+ if (err) {
+ mana_cleanup_port_context(apc);
+ return err;
+ }
+ }
+
+ apc->port_is_up = apc->port_st_save;
+
+ /* Ensure port state updated before txq state */
+ smp_wmb();
+
+ if (apc->port_is_up)
+ netif_carrier_on(ndev);
+
+ netif_device_attach(ndev);
+
+ return 0;
+}
+
+static int mana_dealloc_queues(struct net_device *ndev)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+ unsigned long timeout = jiffies + 120 * HZ;
+ struct gdma_dev *gd = apc->ac->gdma_dev;
+ struct mana_txq *txq;
+ struct sk_buff *skb;
+ int i, err;
+ u32 tsleep;
+
+ if (apc->port_is_up)
+ return -EINVAL;
+
+ mana_chn_setxdp(apc, NULL);
+
+ if (gd->gdma_context->is_pf)
+ mana_pf_deregister_filter(apc);
+
+ /* No packet can be transmitted now since apc->port_is_up is false.
+ * There is still a tiny chance that mana_poll_tx_cq() can re-enable
+ * a txq because it may not timely see apc->port_is_up being cleared
+ * to false, but it doesn't matter since mana_start_xmit() drops any
+ * new packets due to apc->port_is_up being false.
+ *
+ * Drain all the in-flight TX packets.
+ * A timeout of 120 seconds for all the queues is used.
+ * This will break the while loop when h/w is not responding.
+ * This value of 120 has been decided here considering max
+ * number of queues.
+ */
+
+ for (i = 0; i < apc->num_queues; i++) {
+ txq = &apc->tx_qp[i].txq;
+ tsleep = 1000;
+ while (atomic_read(&txq->pending_sends) > 0 &&
+ time_before(jiffies, timeout)) {
+ usleep_range(tsleep, tsleep + 1000);
+ tsleep <<= 1;
+ }
+ if (atomic_read(&txq->pending_sends)) {
+ err = pcie_flr(to_pci_dev(gd->gdma_context->dev));
+ if (err) {
+ netdev_err(ndev, "flr failed %d with %d pkts pending in txq %u\n",
+ err, atomic_read(&txq->pending_sends),
+ txq->gdma_txq_id);
+ }
+ break;
+ }
+ }
+
+ for (i = 0; i < apc->num_queues; i++) {
+ txq = &apc->tx_qp[i].txq;
+ while ((skb = skb_dequeue(&txq->pending_skbs))) {
+ mana_unmap_skb(skb, apc);
+ dev_kfree_skb_any(skb);
+ }
+ atomic_set(&txq->pending_sends, 0);
+ }
+ /* We're 100% sure the queues can no longer be woken up, because
+ * we're sure now mana_poll_tx_cq() can't be running.
+ */
+
+ apc->rss_state = TRI_STATE_FALSE;
+ err = mana_config_rss(apc, TRI_STATE_FALSE, false, false);
+ if (err) {
+ netdev_err(ndev, "Failed to disable vPort: %d\n", err);
+ return err;
+ }
+
+ mana_destroy_vport(apc);
+
+ return 0;
+}
+
+int mana_detach(struct net_device *ndev, bool from_close)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+ int err;
+
+ ASSERT_RTNL();
+
+ apc->port_st_save = apc->port_is_up;
+ apc->port_is_up = false;
+
+ /* Ensure port state updated before txq state */
+ smp_wmb();
+
+ netif_tx_disable(ndev);
+ netif_carrier_off(ndev);
+
+ if (apc->port_st_save) {
+ err = mana_dealloc_queues(ndev);
+ if (err)
+ return err;
+ }
+
+ if (!from_close) {
+ netif_device_detach(ndev);
+ mana_cleanup_port_context(apc);
+ }
+
+ return 0;
+}
+
+static int mana_probe_port(struct mana_context *ac, int port_idx,
+ struct net_device **ndev_storage)
+{
+ struct gdma_context *gc = ac->gdma_dev->gdma_context;
+ struct mana_port_context *apc;
+ struct net_device *ndev;
+ int err;
+
+ ndev = alloc_etherdev_mq(sizeof(struct mana_port_context),
+ gc->max_num_queues);
+ if (!ndev)
+ return -ENOMEM;
+
+ *ndev_storage = ndev;
+
+ apc = netdev_priv(ndev);
+ apc->ac = ac;
+ apc->ndev = ndev;
+ apc->max_queues = gc->max_num_queues;
+ apc->num_queues = gc->max_num_queues;
+ apc->port_handle = INVALID_MANA_HANDLE;
+ apc->pf_filter_handle = INVALID_MANA_HANDLE;
+ apc->port_idx = port_idx;
+
+ ndev->netdev_ops = &mana_devops;
+ ndev->ethtool_ops = &mana_ethtool_ops;
+ ndev->mtu = ETH_DATA_LEN;
+ ndev->max_mtu = ndev->mtu;
+ ndev->min_mtu = ndev->mtu;
+ ndev->needed_headroom = MANA_HEADROOM;
+ SET_NETDEV_DEV(ndev, gc->dev);
+
+ netif_carrier_off(ndev);
+
+ netdev_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE);
+
+ err = mana_init_port(ndev);
+ if (err)
+ goto free_net;
+
+ netdev_lockdep_set_classes(ndev);
+
+ ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ ndev->hw_features |= NETIF_F_RXCSUM;
+ ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
+ ndev->hw_features |= NETIF_F_RXHASH;
+ ndev->features = ndev->hw_features;
+ ndev->vlan_features = 0;
+
+ err = register_netdev(ndev);
+ if (err) {
+ netdev_err(ndev, "Unable to register netdev.\n");
+ goto reset_apc;
+ }
+
+ return 0;
+
+reset_apc:
+ kfree(apc->rxqs);
+ apc->rxqs = NULL;
+free_net:
+ *ndev_storage = NULL;
+ netdev_err(ndev, "Failed to probe vPort %d: %d\n", port_idx, err);
+ free_netdev(ndev);
+ return err;
+}
+
+int mana_probe(struct gdma_dev *gd, bool resuming)
+{
+ struct gdma_context *gc = gd->gdma_context;
+ struct mana_context *ac = gd->driver_data;
+ struct device *dev = gc->dev;
+ u16 num_ports = 0;
+ int err;
+ int i;
+
+ dev_info(dev,
+ "Microsoft Azure Network Adapter protocol version: %d.%d.%d\n",
+ MANA_MAJOR_VERSION, MANA_MINOR_VERSION, MANA_MICRO_VERSION);
+
+ err = mana_gd_register_device(gd);
+ if (err)
+ return err;
+
+ if (!resuming) {
+ ac = kzalloc(sizeof(*ac), GFP_KERNEL);
+ if (!ac)
+ return -ENOMEM;
+
+ ac->gdma_dev = gd;
+ gd->driver_data = ac;
+ }
+
+ err = mana_create_eq(ac);
+ if (err)
+ goto out;
+
+ err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
+ MANA_MICRO_VERSION, &num_ports);
+ if (err)
+ goto out;
+
+ if (!resuming) {
+ ac->num_ports = num_ports;
+ } else {
+ if (ac->num_ports != num_ports) {
+ dev_err(dev, "The number of vPorts changed: %d->%d\n",
+ ac->num_ports, num_ports);
+ err = -EPROTO;
+ goto out;
+ }
+ }
+
+ if (ac->num_ports == 0)
+ dev_err(dev, "Failed to detect any vPort\n");
+
+ if (ac->num_ports > MAX_PORTS_IN_MANA_DEV)
+ ac->num_ports = MAX_PORTS_IN_MANA_DEV;
+
+ if (!resuming) {
+ for (i = 0; i < ac->num_ports; i++) {
+ err = mana_probe_port(ac, i, &ac->ports[i]);
+ if (err)
+ break;
+ }
+ } else {
+ for (i = 0; i < ac->num_ports; i++) {
+ rtnl_lock();
+ err = mana_attach(ac->ports[i]);
+ rtnl_unlock();
+ if (err)
+ break;
+ }
+ }
+out:
+ if (err)
+ mana_remove(gd, false);
+
+ return err;
+}
+
+void mana_remove(struct gdma_dev *gd, bool suspending)
+{
+ struct gdma_context *gc = gd->gdma_context;
+ struct mana_context *ac = gd->driver_data;
+ struct device *dev = gc->dev;
+ struct net_device *ndev;
+ int err;
+ int i;
+
+ for (i = 0; i < ac->num_ports; i++) {
+ ndev = ac->ports[i];
+ if (!ndev) {
+ if (i == 0)
+ dev_err(dev, "No net device to remove\n");
+ goto out;
+ }
+
+ /* All cleanup actions should stay after rtnl_lock(), otherwise
+ * other functions may access partially cleaned up data.
+ */
+ rtnl_lock();
+
+ err = mana_detach(ndev, false);
+ if (err)
+ netdev_err(ndev, "Failed to detach vPort %d: %d\n",
+ i, err);
+
+ if (suspending) {
+ /* No need to unregister the ndev. */
+ rtnl_unlock();
+ continue;
+ }
+
+ unregister_netdevice(ndev);
+
+ rtnl_unlock();
+
+ free_netdev(ndev);
+ }
+
+ mana_destroy_eq(ac);
+
+out:
+ mana_gd_deregister_device(gd);
+
+ if (suspending)
+ return;
+
+ gd->driver_data = NULL;
+ gd->gdma_context = NULL;
+ kfree(ac);
+}
diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
new file mode 100644
index 000000000..c530db768
--- /dev/null
+++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright (c) 2021, Microsoft Corporation. */
+
+#include <linux/inetdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+
+#include "mana.h"
+
+static const struct {
+ char name[ETH_GSTRING_LEN];
+ u16 offset;
+} mana_eth_stats[] = {
+ {"stop_queue", offsetof(struct mana_ethtool_stats, stop_queue)},
+ {"wake_queue", offsetof(struct mana_ethtool_stats, wake_queue)},
+};
+
+static int mana_get_sset_count(struct net_device *ndev, int stringset)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+ unsigned int num_queues = apc->num_queues;
+
+ if (stringset != ETH_SS_STATS)
+ return -EINVAL;
+
+ return ARRAY_SIZE(mana_eth_stats) + num_queues * 8;
+}
+
+static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+ unsigned int num_queues = apc->num_queues;
+ u8 *p = data;
+ int i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(mana_eth_stats); i++) {
+ memcpy(p, mana_eth_stats[i].name, ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < num_queues; i++) {
+ sprintf(p, "rx_%d_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_%d_bytes", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_%d_xdp_drop", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_%d_xdp_tx", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_%d_xdp_redirect", i);
+ p += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < num_queues; i++) {
+ sprintf(p, "tx_%d_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_%d_bytes", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_%d_xdp_xmit", i);
+ p += ETH_GSTRING_LEN;
+ }
+}
+
+static void mana_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *e_stats, u64 *data)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+ unsigned int num_queues = apc->num_queues;
+ void *eth_stats = &apc->eth_stats;
+ struct mana_stats_rx *rx_stats;
+ struct mana_stats_tx *tx_stats;
+ unsigned int start;
+ u64 packets, bytes;
+ u64 xdp_redirect;
+ u64 xdp_xmit;
+ u64 xdp_drop;
+ u64 xdp_tx;
+ int q, i = 0;
+
+ if (!apc->port_is_up)
+ return;
+
+ for (q = 0; q < ARRAY_SIZE(mana_eth_stats); q++)
+ data[i++] = *(u64 *)(eth_stats + mana_eth_stats[q].offset);
+
+ for (q = 0; q < num_queues; q++) {
+ rx_stats = &apc->rxqs[q]->stats;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
+ packets = rx_stats->packets;
+ bytes = rx_stats->bytes;
+ xdp_drop = rx_stats->xdp_drop;
+ xdp_tx = rx_stats->xdp_tx;
+ xdp_redirect = rx_stats->xdp_redirect;
+ } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
+
+ data[i++] = packets;
+ data[i++] = bytes;
+ data[i++] = xdp_drop;
+ data[i++] = xdp_tx;
+ data[i++] = xdp_redirect;
+ }
+
+ for (q = 0; q < num_queues; q++) {
+ tx_stats = &apc->tx_qp[q].txq.stats;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
+ packets = tx_stats->packets;
+ bytes = tx_stats->bytes;
+ xdp_xmit = tx_stats->xdp_xmit;
+ } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
+
+ data[i++] = packets;
+ data[i++] = bytes;
+ data[i++] = xdp_xmit;
+ }
+}
+
+static int mana_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *cmd,
+ u32 *rules)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = apc->num_queues;
+ return 0;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static u32 mana_get_rxfh_key_size(struct net_device *ndev)
+{
+ return MANA_HASH_KEY_SIZE;
+}
+
+static u32 mana_rss_indir_size(struct net_device *ndev)
+{
+ return MANA_INDIRECT_TABLE_SIZE;
+}
+
+static int mana_get_rxfh(struct net_device *ndev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+ int i;
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
+
+ if (indir) {
+ for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
+ indir[i] = apc->indir_table[i];
+ }
+
+ if (key)
+ memcpy(key, apc->hashkey, MANA_HASH_KEY_SIZE);
+
+ return 0;
+}
+
+static int mana_set_rxfh(struct net_device *ndev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+ bool update_hash = false, update_table = false;
+ u32 save_table[MANA_INDIRECT_TABLE_SIZE];
+ u8 save_key[MANA_HASH_KEY_SIZE];
+ int i, err;
+
+ if (!apc->port_is_up)
+ return -EOPNOTSUPP;
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ if (indir) {
+ for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
+ if (indir[i] >= apc->num_queues)
+ return -EINVAL;
+
+ update_table = true;
+ for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
+ save_table[i] = apc->indir_table[i];
+ apc->indir_table[i] = indir[i];
+ }
+ }
+
+ if (key) {
+ update_hash = true;
+ memcpy(save_key, apc->hashkey, MANA_HASH_KEY_SIZE);
+ memcpy(apc->hashkey, key, MANA_HASH_KEY_SIZE);
+ }
+
+ err = mana_config_rss(apc, TRI_STATE_TRUE, update_hash, update_table);
+
+ if (err) { /* recover to original values */
+ if (update_table) {
+ for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
+ apc->indir_table[i] = save_table[i];
+ }
+
+ if (update_hash)
+ memcpy(apc->hashkey, save_key, MANA_HASH_KEY_SIZE);
+
+ mana_config_rss(apc, TRI_STATE_TRUE, update_hash, update_table);
+ }
+
+ return err;
+}
+
+static void mana_get_channels(struct net_device *ndev,
+ struct ethtool_channels *channel)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+
+ channel->max_combined = apc->max_queues;
+ channel->combined_count = apc->num_queues;
+}
+
+static int mana_set_channels(struct net_device *ndev,
+ struct ethtool_channels *channels)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+ unsigned int new_count = channels->combined_count;
+ unsigned int old_count = apc->num_queues;
+ int err, err2;
+
+ err = mana_detach(ndev, false);
+ if (err) {
+ netdev_err(ndev, "mana_detach failed: %d\n", err);
+ return err;
+ }
+
+ apc->num_queues = new_count;
+ err = mana_attach(ndev);
+ if (!err)
+ return 0;
+
+ netdev_err(ndev, "mana_attach failed: %d\n", err);
+
+ /* Try to roll it back to the old configuration. */
+ apc->num_queues = old_count;
+ err2 = mana_attach(ndev);
+ if (err2)
+ netdev_err(ndev, "mana re-attach failed: %d\n", err2);
+
+ return err;
+}
+
+const struct ethtool_ops mana_ethtool_ops = {
+ .get_ethtool_stats = mana_get_ethtool_stats,
+ .get_sset_count = mana_get_sset_count,
+ .get_strings = mana_get_strings,
+ .get_rxnfc = mana_get_rxnfc,
+ .get_rxfh_key_size = mana_get_rxfh_key_size,
+ .get_rxfh_indir_size = mana_rss_indir_size,
+ .get_rxfh = mana_get_rxfh,
+ .set_rxfh = mana_set_rxfh,
+ .get_channels = mana_get_channels,
+ .set_channels = mana_set_channels,
+};
diff --git a/drivers/net/ethernet/microsoft/mana/shm_channel.c b/drivers/net/ethernet/microsoft/mana/shm_channel.c
new file mode 100644
index 000000000..da255da62
--- /dev/null
+++ b/drivers/net/ethernet/microsoft/mana/shm_channel.c
@@ -0,0 +1,291 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright (c) 2021, Microsoft Corporation. */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+
+#include "shm_channel.h"
+
+#define PAGE_FRAME_L48_WIDTH_BYTES 6
+#define PAGE_FRAME_L48_WIDTH_BITS (PAGE_FRAME_L48_WIDTH_BYTES * 8)
+#define PAGE_FRAME_L48_MASK 0x0000FFFFFFFFFFFF
+#define PAGE_FRAME_H4_WIDTH_BITS 4
+#define VECTOR_MASK 0xFFFF
+#define SHMEM_VF_RESET_STATE ((u32)-1)
+
+#define SMC_MSG_TYPE_ESTABLISH_HWC 1
+#define SMC_MSG_TYPE_ESTABLISH_HWC_VERSION 0
+
+#define SMC_MSG_TYPE_DESTROY_HWC 2
+#define SMC_MSG_TYPE_DESTROY_HWC_VERSION 0
+
+#define SMC_MSG_DIRECTION_REQUEST 0
+#define SMC_MSG_DIRECTION_RESPONSE 1
+
+/* Structures labeled with "HW DATA" are exchanged with the hardware. All of
+ * them are naturally aligned and hence don't need __packed.
+ */
+
+/* Shared memory channel protocol header
+ *
+ * msg_type: set on request and response; response matches request.
+ * msg_version: newer PF writes back older response (matching request)
+ * older PF acts on latest version known and sets that version in result
+ * (less than request).
+ * direction: 0 for request, VF->PF; 1 for response, PF->VF.
+ * status: 0 on request,
+ * operation result on response (success = 0, failure = 1 or greater).
+ * reset_vf: If set on either establish or destroy request, indicates perform
+ * FLR before/after the operation.
+ * owner_is_pf: 1 indicates PF owned, 0 indicates VF owned.
+ */
+union smc_proto_hdr {
+ u32 as_uint32;
+
+ struct {
+ u8 msg_type : 3;
+ u8 msg_version : 3;
+ u8 reserved_1 : 1;
+ u8 direction : 1;
+
+ u8 status;
+
+ u8 reserved_2;
+
+ u8 reset_vf : 1;
+ u8 reserved_3 : 6;
+ u8 owner_is_pf : 1;
+ };
+}; /* HW DATA */
+
+#define SMC_APERTURE_BITS 256
+#define SMC_BASIC_UNIT (sizeof(u32))
+#define SMC_APERTURE_DWORDS (SMC_APERTURE_BITS / (SMC_BASIC_UNIT * 8))
+#define SMC_LAST_DWORD (SMC_APERTURE_DWORDS - 1)
+
+static int mana_smc_poll_register(void __iomem *base, bool reset)
+{
+ void __iomem *ptr = base + SMC_LAST_DWORD * SMC_BASIC_UNIT;
+ u32 last_dword;
+ int i;
+
+ /* Poll the hardware for the ownership bit. This should be pretty fast,
+ * but let's do it in a loop just in case the hardware or the PF
+ * driver are temporarily busy.
+ */
+ for (i = 0; i < 20 * 1000; i++) {
+ last_dword = readl(ptr);
+
+ /* shmem reads as 0xFFFFFFFF in the reset case */
+ if (reset && last_dword == SHMEM_VF_RESET_STATE)
+ return 0;
+
+ /* If bit_31 is set, the PF currently owns the SMC. */
+ if (!(last_dword & BIT(31)))
+ return 0;
+
+ usleep_range(1000, 2000);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int mana_smc_read_response(struct shm_channel *sc, u32 msg_type,
+ u32 msg_version, bool reset_vf)
+{
+ void __iomem *base = sc->base;
+ union smc_proto_hdr hdr;
+ int err;
+
+ /* Wait for PF to respond. */
+ err = mana_smc_poll_register(base, reset_vf);
+ if (err)
+ return err;
+
+ hdr.as_uint32 = readl(base + SMC_LAST_DWORD * SMC_BASIC_UNIT);
+
+ if (reset_vf && hdr.as_uint32 == SHMEM_VF_RESET_STATE)
+ return 0;
+
+ /* Validate protocol fields from the PF driver */
+ if (hdr.msg_type != msg_type || hdr.msg_version > msg_version ||
+ hdr.direction != SMC_MSG_DIRECTION_RESPONSE) {
+ dev_err(sc->dev, "Wrong SMC response 0x%x, type=%d, ver=%d\n",
+ hdr.as_uint32, msg_type, msg_version);
+ return -EPROTO;
+ }
+
+ /* Validate the operation result */
+ if (hdr.status != 0) {
+ dev_err(sc->dev, "SMC operation failed: 0x%x\n", hdr.status);
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+void mana_smc_init(struct shm_channel *sc, struct device *dev,
+ void __iomem *base)
+{
+ sc->dev = dev;
+ sc->base = base;
+}
+
+int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr,
+ u64 cq_addr, u64 rq_addr, u64 sq_addr,
+ u32 eq_msix_index)
+{
+ union smc_proto_hdr *hdr;
+ u16 all_addr_h4bits = 0;
+ u16 frame_addr_seq = 0;
+ u64 frame_addr = 0;
+ u8 shm_buf[32];
+ u64 *shmem;
+ u32 *dword;
+ u8 *ptr;
+ int err;
+ int i;
+
+ /* Ensure VF already has possession of shared memory */
+ err = mana_smc_poll_register(sc->base, false);
+ if (err) {
+ dev_err(sc->dev, "Timeout when setting up HWC: %d\n", err);
+ return err;
+ }
+
+ if (!PAGE_ALIGNED(eq_addr) || !PAGE_ALIGNED(cq_addr) ||
+ !PAGE_ALIGNED(rq_addr) || !PAGE_ALIGNED(sq_addr))
+ return -EINVAL;
+
+ if ((eq_msix_index & VECTOR_MASK) != eq_msix_index)
+ return -EINVAL;
+
+ /* Scheme for packing four addresses and extra info into 256 bits.
+ *
+ * Addresses must be page frame aligned, so only frame address bits
+ * are transferred.
+ *
+ * 52-bit frame addresses are split into the lower 48 bits and upper
+ * 4 bits. Lower 48 bits of 4 address are written sequentially from
+ * the start of the 256-bit shared memory region followed by 16 bits
+ * containing the upper 4 bits of the 4 addresses in sequence.
+ *
+ * A 16 bit EQ vector number fills out the next-to-last 32-bit dword.
+ *
+ * The final 32-bit dword is used for protocol control information as
+ * defined in smc_proto_hdr.
+ */
+
+ memset(shm_buf, 0, sizeof(shm_buf));
+ ptr = shm_buf;
+
+ /* EQ addr: low 48 bits of frame address */
+ shmem = (u64 *)ptr;
+ frame_addr = PHYS_PFN(eq_addr);
+ *shmem = frame_addr & PAGE_FRAME_L48_MASK;
+ all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
+ (frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
+ ptr += PAGE_FRAME_L48_WIDTH_BYTES;
+
+ /* CQ addr: low 48 bits of frame address */
+ shmem = (u64 *)ptr;
+ frame_addr = PHYS_PFN(cq_addr);
+ *shmem = frame_addr & PAGE_FRAME_L48_MASK;
+ all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
+ (frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
+ ptr += PAGE_FRAME_L48_WIDTH_BYTES;
+
+ /* RQ addr: low 48 bits of frame address */
+ shmem = (u64 *)ptr;
+ frame_addr = PHYS_PFN(rq_addr);
+ *shmem = frame_addr & PAGE_FRAME_L48_MASK;
+ all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
+ (frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
+ ptr += PAGE_FRAME_L48_WIDTH_BYTES;
+
+ /* SQ addr: low 48 bits of frame address */
+ shmem = (u64 *)ptr;
+ frame_addr = PHYS_PFN(sq_addr);
+ *shmem = frame_addr & PAGE_FRAME_L48_MASK;
+ all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
+ (frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
+ ptr += PAGE_FRAME_L48_WIDTH_BYTES;
+
+ /* High 4 bits of the four frame addresses */
+ *((u16 *)ptr) = all_addr_h4bits;
+ ptr += sizeof(u16);
+
+ /* EQ MSIX vector number */
+ *((u16 *)ptr) = (u16)eq_msix_index;
+ ptr += sizeof(u16);
+
+ /* 32-bit protocol header in final dword */
+ *((u32 *)ptr) = 0;
+
+ hdr = (union smc_proto_hdr *)ptr;
+ hdr->msg_type = SMC_MSG_TYPE_ESTABLISH_HWC;
+ hdr->msg_version = SMC_MSG_TYPE_ESTABLISH_HWC_VERSION;
+ hdr->direction = SMC_MSG_DIRECTION_REQUEST;
+ hdr->reset_vf = reset_vf;
+
+ /* Write 256-message buffer to shared memory (final 32-bit write
+ * triggers HW to set possession bit to PF).
+ */
+ dword = (u32 *)shm_buf;
+ for (i = 0; i < SMC_APERTURE_DWORDS; i++)
+ writel(*dword++, sc->base + i * SMC_BASIC_UNIT);
+
+ /* Read shmem response (polling for VF possession) and validate.
+ * For setup, waiting for response on shared memory is not strictly
+ * necessary, since wait occurs later for results to appear in EQE's.
+ */
+ err = mana_smc_read_response(sc, SMC_MSG_TYPE_ESTABLISH_HWC,
+ SMC_MSG_TYPE_ESTABLISH_HWC_VERSION,
+ reset_vf);
+ if (err) {
+ dev_err(sc->dev, "Error when setting up HWC: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+int mana_smc_teardown_hwc(struct shm_channel *sc, bool reset_vf)
+{
+ union smc_proto_hdr hdr = {};
+ int err;
+
+ /* Ensure already has possession of shared memory */
+ err = mana_smc_poll_register(sc->base, false);
+ if (err) {
+ dev_err(sc->dev, "Timeout when tearing down HWC\n");
+ return err;
+ }
+
+ /* Set up protocol header for HWC destroy message */
+ hdr.msg_type = SMC_MSG_TYPE_DESTROY_HWC;
+ hdr.msg_version = SMC_MSG_TYPE_DESTROY_HWC_VERSION;
+ hdr.direction = SMC_MSG_DIRECTION_REQUEST;
+ hdr.reset_vf = reset_vf;
+
+ /* Write message in high 32 bits of 256-bit shared memory, causing HW
+ * to set possession bit to PF.
+ */
+ writel(hdr.as_uint32, sc->base + SMC_LAST_DWORD * SMC_BASIC_UNIT);
+
+ /* Read shmem response (polling for VF possession) and validate.
+ * For teardown, waiting for response is required to ensure hardware
+ * invalidates MST entries before software frees memory.
+ */
+ err = mana_smc_read_response(sc, SMC_MSG_TYPE_DESTROY_HWC,
+ SMC_MSG_TYPE_DESTROY_HWC_VERSION,
+ reset_vf);
+ if (err) {
+ dev_err(sc->dev, "Error when tearing down HWC: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microsoft/mana/shm_channel.h b/drivers/net/ethernet/microsoft/mana/shm_channel.h
new file mode 100644
index 000000000..5199b4149
--- /dev/null
+++ b/drivers/net/ethernet/microsoft/mana/shm_channel.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright (c) 2021, Microsoft Corporation. */
+
+#ifndef _SHM_CHANNEL_H
+#define _SHM_CHANNEL_H
+
+struct shm_channel {
+ struct device *dev;
+ void __iomem *base;
+};
+
+void mana_smc_init(struct shm_channel *sc, struct device *dev,
+ void __iomem *base);
+
+int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr,
+ u64 cq_addr, u64 rq_addr, u64 sq_addr,
+ u32 eq_msix_index);
+
+int mana_smc_teardown_hwc(struct shm_channel *sc, bool reset_vf);
+
+#endif /* _SHM_CHANNEL_H */