From e6918187568dbd01842d8d1d2c808ce16a894239 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 21 Apr 2024 13:54:28 +0200 Subject: Adding upstream version 18.2.2. Signed-off-by: Daniel Baumann --- src/spdk/dpdk/drivers/net/ionic/Makefile | 31 + src/spdk/dpdk/drivers/net/ionic/ionic.h | 82 + src/spdk/dpdk/drivers/net/ionic/ionic_dev.c | 579 +++++ src/spdk/dpdk/drivers/net/ionic/ionic_dev.h | 271 +++ src/spdk/dpdk/drivers/net/ionic/ionic_ethdev.c | 1327 +++++++++++ src/spdk/dpdk/drivers/net/ionic/ionic_ethdev.h | 22 + src/spdk/dpdk/drivers/net/ionic/ionic_if.h | 2491 ++++++++++++++++++++ src/spdk/dpdk/drivers/net/ionic/ionic_lif.c | 1696 +++++++++++++ src/spdk/dpdk/drivers/net/ionic/ionic_lif.h | 190 ++ src/spdk/dpdk/drivers/net/ionic/ionic_logs.h | 26 + src/spdk/dpdk/drivers/net/ionic/ionic_mac_api.c | 63 + src/spdk/dpdk/drivers/net/ionic/ionic_mac_api.h | 13 + src/spdk/dpdk/drivers/net/ionic/ionic_main.c | 443 ++++ src/spdk/dpdk/drivers/net/ionic/ionic_osdep.h | 58 + src/spdk/dpdk/drivers/net/ionic/ionic_regs.h | 142 ++ src/spdk/dpdk/drivers/net/ionic/ionic_rx_filter.c | 140 ++ src/spdk/dpdk/drivers/net/ionic/ionic_rx_filter.h | 47 + src/spdk/dpdk/drivers/net/ionic/ionic_rxtx.c | 1082 +++++++++ src/spdk/dpdk/drivers/net/ionic/ionic_rxtx.h | 44 + src/spdk/dpdk/drivers/net/ionic/meson.build | 12 + .../drivers/net/ionic/rte_pmd_ionic_version.map | 4 + 21 files changed, 8763 insertions(+) create mode 100644 src/spdk/dpdk/drivers/net/ionic/Makefile create mode 100644 src/spdk/dpdk/drivers/net/ionic/ionic.h create mode 100644 src/spdk/dpdk/drivers/net/ionic/ionic_dev.c create mode 100644 src/spdk/dpdk/drivers/net/ionic/ionic_dev.h create mode 100644 src/spdk/dpdk/drivers/net/ionic/ionic_ethdev.c create mode 100644 src/spdk/dpdk/drivers/net/ionic/ionic_ethdev.h create mode 100644 src/spdk/dpdk/drivers/net/ionic/ionic_if.h create mode 100644 src/spdk/dpdk/drivers/net/ionic/ionic_lif.c create mode 100644 src/spdk/dpdk/drivers/net/ionic/ionic_lif.h create mode 100644 src/spdk/dpdk/drivers/net/ionic/ionic_logs.h create mode 100644 src/spdk/dpdk/drivers/net/ionic/ionic_mac_api.c create mode 100644 src/spdk/dpdk/drivers/net/ionic/ionic_mac_api.h create mode 100644 src/spdk/dpdk/drivers/net/ionic/ionic_main.c create mode 100644 src/spdk/dpdk/drivers/net/ionic/ionic_osdep.h create mode 100644 src/spdk/dpdk/drivers/net/ionic/ionic_regs.h create mode 100644 src/spdk/dpdk/drivers/net/ionic/ionic_rx_filter.c create mode 100644 src/spdk/dpdk/drivers/net/ionic/ionic_rx_filter.h create mode 100644 src/spdk/dpdk/drivers/net/ionic/ionic_rxtx.c create mode 100644 src/spdk/dpdk/drivers/net/ionic/ionic_rxtx.h create mode 100644 src/spdk/dpdk/drivers/net/ionic/meson.build create mode 100644 src/spdk/dpdk/drivers/net/ionic/rte_pmd_ionic_version.map (limited to 'src/spdk/dpdk/drivers/net/ionic') diff --git a/src/spdk/dpdk/drivers/net/ionic/Makefile b/src/spdk/dpdk/drivers/net/ionic/Makefile new file mode 100644 index 000000000..7442e2c5c --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ionic/Makefile @@ -0,0 +1,31 @@ +# SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +# Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved. + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_ionic.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +EXPORT_MAP := rte_pmd_ionic_version.map + +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev -lrte_net +LDLIBS += -lrte_bus_pci + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_IONIC_PMD) += ionic_mac_api.c +SRCS-$(CONFIG_RTE_LIBRTE_IONIC_PMD) += ionic_rx_filter.c +SRCS-$(CONFIG_RTE_LIBRTE_IONIC_PMD) += ionic_rxtx.c +SRCS-$(CONFIG_RTE_LIBRTE_IONIC_PMD) += ionic_dev.c +SRCS-$(CONFIG_RTE_LIBRTE_IONIC_PMD) += ionic_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_IONIC_PMD) += ionic_lif.c +SRCS-$(CONFIG_RTE_LIBRTE_IONIC_PMD) += ionic_main.c + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/spdk/dpdk/drivers/net/ionic/ionic.h b/src/spdk/dpdk/drivers/net/ionic/ionic.h new file mode 100644 index 000000000..1538df309 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ionic/ionic.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved. + */ + +#ifndef _IONIC_H_ +#define _IONIC_H_ + +#include +#include + +#include + +#include "ionic_dev.h" +#include "ionic_if.h" +#include "ionic_osdep.h" + +#define IONIC_DRV_NAME "ionic" +#define IONIC_DRV_DESCRIPTION "Pensando Ethernet NIC Driver" +#define IONIC_DRV_VERSION "0.11.0-49" + +/* Vendor ID */ +#define IONIC_PENSANDO_VENDOR_ID 0x1dd8 + +/* Device IDs */ +#define IONIC_DEV_ID_ETH_PF 0x1002 +#define IONIC_DEV_ID_ETH_VF 0x1003 +#define IONIC_DEV_ID_ETH_MGMT 0x1004 + +enum ionic_mac_type { + IONIC_MAC_UNKNOWN = 0, + IONIC_MAC_CAPRI, + IONIC_NUM_MACS +}; + +struct ionic_mac_info { + enum ionic_mac_type type; +}; + +struct ionic_hw { + struct ionic_mac_info mac; + uint16_t device_id; + uint16_t vendor_id; +}; + +/* + * Structure to store private data for each driver instance (for each adapter). + */ +struct ionic_adapter { + struct ionic_hw hw; + struct ionic_dev idev; + struct ionic_dev_bar bars[IONIC_BARS_MAX]; + struct ionic_identity ident; + struct ionic_lif *lifs[IONIC_LIFS_MAX]; + uint32_t num_bars; + uint32_t nlifs; + uint32_t max_ntxqs_per_lif; + uint32_t max_nrxqs_per_lif; + uint32_t max_mac_addrs; + uint32_t link_speed; + uint32_t nintrs; + bool intrs[IONIC_INTR_CTRL_REGS_MAX]; + bool is_mgmt_nic; + bool link_up; + char fw_version[IONIC_DEVINFO_FWVERS_BUFLEN]; + struct rte_pci_device *pci_dev; + LIST_ENTRY(ionic_adapter) pci_adapters; +}; + +int ionic_adminq_check_err(struct ionic_admin_ctx *ctx, bool timeout); +int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx); +int ionic_dev_cmd_wait_check(struct ionic_dev *idev, unsigned long max_wait); +int ionic_setup(struct ionic_adapter *adapter); + +int ionic_identify(struct ionic_adapter *adapter); +int ionic_init(struct ionic_adapter *adapter); +int ionic_reset(struct ionic_adapter *adapter); + +int ionic_port_identify(struct ionic_adapter *adapter); +int ionic_port_init(struct ionic_adapter *adapter); +int ionic_port_reset(struct ionic_adapter *adapter); + +#endif /* _IONIC_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ionic/ionic_dev.c b/src/spdk/dpdk/drivers/net/ionic/ionic_dev.c new file mode 100644 index 000000000..5c2820b7a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ionic/ionic_dev.c @@ -0,0 +1,579 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved. + */ + +#include + +#include + +#include "ionic_dev.h" +#include "ionic_lif.h" +#include "ionic.h" + +int +ionic_dev_setup(struct ionic_adapter *adapter) +{ + struct ionic_dev_bar *bar = adapter->bars; + unsigned int num_bars = adapter->num_bars; + struct ionic_dev *idev = &adapter->idev; + uint32_t sig; + u_char *bar0_base; + unsigned int i; + + /* BAR0: dev_cmd and interrupts */ + if (num_bars < 1) { + IONIC_PRINT(ERR, "No bars found, aborting"); + return -EFAULT; + } + + if (bar->len < IONIC_BAR0_SIZE) { + IONIC_PRINT(ERR, + "Resource bar size %lu too small, aborting", + bar->len); + return -EFAULT; + } + + bar0_base = bar->vaddr; + idev->dev_info = (union ionic_dev_info_regs *) + &bar0_base[IONIC_BAR0_DEV_INFO_REGS_OFFSET]; + idev->dev_cmd = (union ionic_dev_cmd_regs *) + &bar0_base[IONIC_BAR0_DEV_CMD_REGS_OFFSET]; + idev->intr_status = (struct ionic_intr_status *) + &bar0_base[IONIC_BAR0_INTR_STATUS_OFFSET]; + idev->intr_ctrl = (struct ionic_intr *) + &bar0_base[IONIC_BAR0_INTR_CTRL_OFFSET]; + + sig = ioread32(&idev->dev_info->signature); + if (sig != IONIC_DEV_INFO_SIGNATURE) { + IONIC_PRINT(ERR, "Incompatible firmware signature %" PRIx32 "", + sig); + return -EFAULT; + } + + for (i = 0; i < IONIC_DEVINFO_FWVERS_BUFLEN; i++) + adapter->fw_version[i] = + ioread8(&idev->dev_info->fw_version[i]); + adapter->fw_version[IONIC_DEVINFO_FWVERS_BUFLEN - 1] = '\0'; + + IONIC_PRINT(DEBUG, "Firmware version: %s", adapter->fw_version); + + /* BAR1: doorbells */ + bar++; + if (num_bars < 2) { + IONIC_PRINT(ERR, "Doorbell bar missing, aborting"); + return -EFAULT; + } + + idev->db_pages = bar->vaddr; + idev->phy_db_pages = bar->bus_addr; + + return 0; +} + +/* Devcmd Interface */ + +uint8_t +ionic_dev_cmd_status(struct ionic_dev *idev) +{ + return ioread8(&idev->dev_cmd->comp.comp.status); +} + +bool +ionic_dev_cmd_done(struct ionic_dev *idev) +{ + return ioread32(&idev->dev_cmd->done) & IONIC_DEV_CMD_DONE; +} + +void +ionic_dev_cmd_comp(struct ionic_dev *idev, void *mem) +{ + union ionic_dev_cmd_comp *comp = mem; + unsigned int i; + uint32_t comp_size = sizeof(comp->words) / + sizeof(comp->words[0]); + + for (i = 0; i < comp_size; i++) + comp->words[i] = ioread32(&idev->dev_cmd->comp.words[i]); +} + +void +ionic_dev_cmd_go(struct ionic_dev *idev, union ionic_dev_cmd *cmd) +{ + unsigned int i; + uint32_t cmd_size = sizeof(cmd->words) / + sizeof(cmd->words[0]); + + for (i = 0; i < cmd_size; i++) + iowrite32(cmd->words[i], &idev->dev_cmd->cmd.words[i]); + + iowrite32(0, &idev->dev_cmd->done); + iowrite32(1, &idev->dev_cmd->doorbell); +} + +/* Device commands */ + +void +ionic_dev_cmd_identify(struct ionic_dev *idev, uint8_t ver) +{ + union ionic_dev_cmd cmd = { + .identify.opcode = IONIC_CMD_IDENTIFY, + .identify.ver = ver, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void +ionic_dev_cmd_init(struct ionic_dev *idev) +{ + union ionic_dev_cmd cmd = { + .init.opcode = IONIC_CMD_INIT, + .init.type = 0, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void +ionic_dev_cmd_reset(struct ionic_dev *idev) +{ + union ionic_dev_cmd cmd = { + .reset.opcode = IONIC_CMD_RESET, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +/* Port commands */ + +void +ionic_dev_cmd_port_identify(struct ionic_dev *idev) +{ + union ionic_dev_cmd cmd = { + .port_init.opcode = IONIC_CMD_PORT_IDENTIFY, + .port_init.index = 0, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void +ionic_dev_cmd_port_init(struct ionic_dev *idev) +{ + union ionic_dev_cmd cmd = { + .port_init.opcode = IONIC_CMD_PORT_INIT, + .port_init.index = 0, + .port_init.info_pa = idev->port_info_pa, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void +ionic_dev_cmd_port_reset(struct ionic_dev *idev) +{ + union ionic_dev_cmd cmd = { + .port_reset.opcode = IONIC_CMD_PORT_RESET, + .port_reset.index = 0, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void +ionic_dev_cmd_port_state(struct ionic_dev *idev, uint8_t state) +{ + union ionic_dev_cmd cmd = { + .port_setattr.opcode = IONIC_CMD_PORT_SETATTR, + .port_setattr.index = 0, + .port_setattr.attr = IONIC_PORT_ATTR_STATE, + .port_setattr.state = state, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void +ionic_dev_cmd_port_speed(struct ionic_dev *idev, uint32_t speed) +{ + union ionic_dev_cmd cmd = { + .port_setattr.opcode = IONIC_CMD_PORT_SETATTR, + .port_setattr.index = 0, + .port_setattr.attr = IONIC_PORT_ATTR_SPEED, + .port_setattr.speed = speed, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void +ionic_dev_cmd_port_mtu(struct ionic_dev *idev, uint32_t mtu) +{ + union ionic_dev_cmd cmd = { + .port_setattr.opcode = IONIC_CMD_PORT_SETATTR, + .port_setattr.index = 0, + .port_setattr.attr = IONIC_PORT_ATTR_MTU, + .port_setattr.mtu = mtu, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void +ionic_dev_cmd_port_autoneg(struct ionic_dev *idev, uint8_t an_enable) +{ + union ionic_dev_cmd cmd = { + .port_setattr.opcode = IONIC_CMD_PORT_SETATTR, + .port_setattr.index = 0, + .port_setattr.attr = IONIC_PORT_ATTR_AUTONEG, + .port_setattr.an_enable = an_enable, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void +ionic_dev_cmd_port_fec(struct ionic_dev *idev, uint8_t fec_type) +{ + union ionic_dev_cmd cmd = { + .port_setattr.opcode = IONIC_CMD_PORT_SETATTR, + .port_setattr.index = 0, + .port_setattr.attr = IONIC_PORT_ATTR_FEC, + .port_setattr.fec_type = fec_type, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void +ionic_dev_cmd_port_pause(struct ionic_dev *idev, uint8_t pause_type) +{ + union ionic_dev_cmd cmd = { + .port_setattr.opcode = IONIC_CMD_PORT_SETATTR, + .port_setattr.index = 0, + .port_setattr.attr = IONIC_PORT_ATTR_PAUSE, + .port_setattr.pause_type = pause_type, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void +ionic_dev_cmd_port_loopback(struct ionic_dev *idev, uint8_t loopback_mode) +{ + union ionic_dev_cmd cmd = { + .port_setattr.opcode = IONIC_CMD_PORT_SETATTR, + .port_setattr.index = 0, + .port_setattr.attr = IONIC_PORT_ATTR_LOOPBACK, + .port_setattr.loopback_mode = loopback_mode, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +/* LIF commands */ + +void +ionic_dev_cmd_lif_identify(struct ionic_dev *idev, uint8_t type, uint8_t ver) +{ + union ionic_dev_cmd cmd = { + .lif_identify.opcode = IONIC_CMD_LIF_IDENTIFY, + .lif_identify.type = type, + .lif_identify.ver = ver, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void +ionic_dev_cmd_lif_init(struct ionic_dev *idev, uint16_t lif_index, + rte_iova_t info_pa) +{ + union ionic_dev_cmd cmd = { + .lif_init.opcode = IONIC_CMD_LIF_INIT, + .lif_init.index = lif_index, + .lif_init.info_pa = info_pa, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void +ionic_dev_cmd_lif_reset(struct ionic_dev *idev, uint16_t lif_index) +{ + union ionic_dev_cmd cmd = { + .lif_init.opcode = IONIC_CMD_LIF_RESET, + .lif_init.index = lif_index, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +struct ionic_doorbell * +ionic_db_map(struct ionic_lif *lif, struct ionic_queue *q) +{ + return lif->kern_dbpage + q->hw_type; +} + +int +ionic_db_page_num(struct ionic_lif *lif, int pid) +{ + return (lif->index * 0) + pid; +} + +void +ionic_intr_init(struct ionic_dev *idev, struct ionic_intr_info *intr, + unsigned long index) +{ + ionic_intr_clean(idev->intr_ctrl, index); + intr->index = index; +} + +void +ionic_dev_cmd_adminq_init(struct ionic_dev *idev, + struct ionic_qcq *qcq, + uint16_t lif_index, uint16_t intr_index) +{ + struct ionic_queue *q = &qcq->q; + struct ionic_cq *cq = &qcq->cq; + + union ionic_dev_cmd cmd = { + .q_init.opcode = IONIC_CMD_Q_INIT, + .q_init.lif_index = lif_index, + .q_init.type = q->type, + .q_init.index = q->index, + .q_init.flags = IONIC_QINIT_F_ENA, + .q_init.pid = q->pid, + .q_init.intr_index = intr_index, + .q_init.ring_size = rte_log2_u32(q->num_descs), + .q_init.ring_base = q->base_pa, + .q_init.cq_ring_base = cq->base_pa, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +int +ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq, + struct ionic_intr_info *intr, + uint32_t num_descs, size_t desc_size) +{ + if (desc_size == 0) { + IONIC_PRINT(ERR, "Descriptor size is %zu", desc_size); + return -EINVAL; + } + + if (!rte_is_power_of_2(num_descs) || + num_descs < IONIC_MIN_RING_DESC || + num_descs > IONIC_MAX_RING_DESC) { + IONIC_PRINT(ERR, "%u descriptors (min: %u max: %u)", + num_descs, IONIC_MIN_RING_DESC, IONIC_MAX_RING_DESC); + return -EINVAL; + } + + cq->lif = lif; + cq->bound_intr = intr; + cq->num_descs = num_descs; + cq->desc_size = desc_size; + cq->tail_idx = 0; + cq->done_color = 1; + + return 0; +} + +void +ionic_cq_map(struct ionic_cq *cq, void *base, rte_iova_t base_pa) +{ + cq->base = base; + cq->base_pa = base_pa; +} + +void +ionic_cq_bind(struct ionic_cq *cq, struct ionic_queue *q) +{ + cq->bound_q = q; + q->bound_cq = cq; +} + +uint32_t +ionic_cq_service(struct ionic_cq *cq, uint32_t work_to_do, + ionic_cq_cb cb, void *cb_arg) +{ + uint32_t work_done = 0; + + if (work_to_do == 0) + return 0; + + while (cb(cq, cq->tail_idx, cb_arg)) { + cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1); + if (cq->tail_idx == 0) + cq->done_color = !cq->done_color; + + if (++work_done == work_to_do) + break; + } + + return work_done; +} + +int +ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev, + struct ionic_queue *q, uint32_t index, uint32_t num_descs, + size_t desc_size, size_t sg_desc_size, uint32_t pid) +{ + uint32_t ring_size; + + if (desc_size == 0 || !rte_is_power_of_2(num_descs)) + return -EINVAL; + + ring_size = rte_log2_u32(num_descs); + + if (ring_size < 2 || ring_size > 16) + return -EINVAL; + + q->lif = lif; + q->idev = idev; + q->index = index; + q->num_descs = num_descs; + q->desc_size = desc_size; + q->sg_desc_size = sg_desc_size; + q->head_idx = 0; + q->tail_idx = 0; + q->pid = pid; + + return 0; +} + +void +ionic_q_map(struct ionic_queue *q, void *base, rte_iova_t base_pa) +{ + q->base = base; + q->base_pa = base_pa; +} + +void +ionic_q_sg_map(struct ionic_queue *q, void *base, rte_iova_t base_pa) +{ + q->sg_base = base; + q->sg_base_pa = base_pa; +} + +void +ionic_q_flush(struct ionic_queue *q) +{ + writeq(IONIC_DBELL_QID(q->hw_index) | q->head_idx, q->db); +} + +void +ionic_q_post(struct ionic_queue *q, bool ring_doorbell, desc_cb cb, + void *cb_arg) +{ + struct ionic_desc_info *head = &q->info[q->head_idx]; + + head->cb = cb; + head->cb_arg = cb_arg; + + q->head_idx = (q->head_idx + 1) & (q->num_descs - 1); + + if (ring_doorbell) + ionic_q_flush(q); +} + +uint32_t +ionic_q_space_avail(struct ionic_queue *q) +{ + uint32_t avail = q->tail_idx; + + if (q->head_idx >= avail) + avail += q->num_descs - q->head_idx - 1; + else + avail -= q->head_idx + 1; + + return avail; +} + +bool +ionic_q_has_space(struct ionic_queue *q, uint32_t want) +{ + return ionic_q_space_avail(q) >= want; +} + +void +ionic_q_service(struct ionic_queue *q, uint32_t cq_desc_index, + uint32_t stop_index, void *service_cb_arg) +{ + struct ionic_desc_info *desc_info; + uint32_t curr_q_tail_idx; + + do { + desc_info = &q->info[q->tail_idx]; + + if (desc_info->cb) + desc_info->cb(q, q->tail_idx, cq_desc_index, + desc_info->cb_arg, service_cb_arg); + + desc_info->cb = NULL; + desc_info->cb_arg = NULL; + + curr_q_tail_idx = q->tail_idx; + q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); + + } while (curr_q_tail_idx != stop_index); +} + +static void +ionic_adminq_cb(struct ionic_queue *q, + uint32_t q_desc_index, uint32_t cq_desc_index, + void *cb_arg, void *service_cb_arg __rte_unused) +{ + struct ionic_admin_ctx *ctx = cb_arg; + struct ionic_admin_comp *cq_desc_base = q->bound_cq->base; + struct ionic_admin_comp *cq_desc = &cq_desc_base[cq_desc_index]; + + if (unlikely(cq_desc->comp_index != q_desc_index)) { + IONIC_WARN_ON(cq_desc->comp_index != q_desc_index); + return; + } + + memcpy(&ctx->comp, cq_desc, sizeof(*cq_desc)); + + ctx->pending_work = false; /* done */ +} + +/** ionic_adminq_post - Post an admin command. + * @lif: Handle to lif. + * @cmd_ctx: Api admin command context. + * + * Post the command to an admin queue in the ethernet driver. If this command + * succeeds, then the command has been posted, but that does not indicate a + * completion. If this command returns success, then the completion callback + * will eventually be called. + * + * Return: zero or negative error status. + */ +int +ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx) +{ + struct ionic_queue *adminq = &lif->adminqcq->q; + struct ionic_admin_cmd *q_desc_base = adminq->base; + struct ionic_admin_cmd *q_desc; + int err = 0; + + rte_spinlock_lock(&lif->adminq_lock); + + if (!ionic_q_has_space(adminq, 1)) { + err = -ENOSPC; + goto err_out; + } + + q_desc = &q_desc_base[adminq->head_idx]; + + memcpy(q_desc, &ctx->cmd, sizeof(ctx->cmd)); + + ionic_q_post(adminq, true, ionic_adminq_cb, ctx); + +err_out: + rte_spinlock_unlock(&lif->adminq_lock); + + return err; +} diff --git a/src/spdk/dpdk/drivers/net/ionic/ionic_dev.h b/src/spdk/dpdk/drivers/net/ionic/ionic_dev.h new file mode 100644 index 000000000..532255a60 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ionic/ionic_dev.h @@ -0,0 +1,271 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved. + */ + +#ifndef _IONIC_DEV_H_ +#define _IONIC_DEV_H_ + +#include + +#include "ionic_osdep.h" +#include "ionic_if.h" +#include "ionic_regs.h" + +#define IONIC_MIN_MTU RTE_ETHER_MIN_MTU +#define IONIC_MAX_MTU 9194 + +#define IONIC_MAX_RING_DESC 32768 +#define IONIC_MIN_RING_DESC 16 +#define IONIC_DEF_TXRX_DESC 4096 + +#define IONIC_LIFS_MAX 1024 + +#define IONIC_DEVCMD_TIMEOUT 30 /* devcmd_timeout */ +#define IONIC_ALIGN 4096 + +struct ionic_adapter; + +struct ionic_dev_bar { + void __iomem *vaddr; + rte_iova_t bus_addr; + unsigned long len; +}; + +static inline void ionic_struct_size_checks(void) +{ + RTE_BUILD_BUG_ON(sizeof(struct ionic_doorbell) != 8); + RTE_BUILD_BUG_ON(sizeof(struct ionic_intr) != 32); + RTE_BUILD_BUG_ON(sizeof(struct ionic_intr_status) != 8); + + RTE_BUILD_BUG_ON(sizeof(union ionic_dev_regs) != 4096); + RTE_BUILD_BUG_ON(sizeof(union ionic_dev_info_regs) != 2048); + RTE_BUILD_BUG_ON(sizeof(union ionic_dev_cmd_regs) != 2048); + + RTE_BUILD_BUG_ON(sizeof(struct ionic_lif_stats) != 1024); + + RTE_BUILD_BUG_ON(sizeof(struct ionic_admin_cmd) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_admin_comp) != 16); + RTE_BUILD_BUG_ON(sizeof(struct ionic_nop_cmd) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_nop_comp) != 16); + + /* Device commands */ + RTE_BUILD_BUG_ON(sizeof(struct ionic_dev_identify_cmd) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_dev_identify_comp) != 16); + RTE_BUILD_BUG_ON(sizeof(struct ionic_dev_init_cmd) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_dev_init_comp) != 16); + RTE_BUILD_BUG_ON(sizeof(struct ionic_dev_reset_cmd) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_dev_reset_comp) != 16); + RTE_BUILD_BUG_ON(sizeof(struct ionic_dev_getattr_cmd) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_dev_getattr_comp) != 16); + RTE_BUILD_BUG_ON(sizeof(struct ionic_dev_setattr_cmd) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_dev_setattr_comp) != 16); + + /* Port commands */ + RTE_BUILD_BUG_ON(sizeof(struct ionic_port_identify_cmd) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_port_identify_comp) != 16); + RTE_BUILD_BUG_ON(sizeof(struct ionic_port_init_cmd) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_port_init_comp) != 16); + RTE_BUILD_BUG_ON(sizeof(struct ionic_port_reset_cmd) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_port_reset_comp) != 16); + RTE_BUILD_BUG_ON(sizeof(struct ionic_port_getattr_cmd) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_port_getattr_comp) != 16); + RTE_BUILD_BUG_ON(sizeof(struct ionic_port_setattr_cmd) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_port_setattr_comp) != 16); + + /* LIF commands */ + RTE_BUILD_BUG_ON(sizeof(struct ionic_lif_init_cmd) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_lif_init_comp) != 16); + RTE_BUILD_BUG_ON(sizeof(struct ionic_lif_reset_cmd) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_lif_getattr_cmd) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_lif_getattr_comp) != 16); + RTE_BUILD_BUG_ON(sizeof(struct ionic_lif_setattr_cmd) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_lif_setattr_comp) != 16); + + RTE_BUILD_BUG_ON(sizeof(struct ionic_q_init_cmd) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_q_init_comp) != 16); + RTE_BUILD_BUG_ON(sizeof(struct ionic_q_control_cmd) != 64); + + RTE_BUILD_BUG_ON(sizeof(struct ionic_rx_mode_set_cmd) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_rx_filter_add_cmd) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_rx_filter_add_comp) != 16); + RTE_BUILD_BUG_ON(sizeof(struct ionic_rx_filter_del_cmd) != 64); + + /* RDMA commands */ + RTE_BUILD_BUG_ON(sizeof(struct ionic_rdma_reset_cmd) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_rdma_queue_cmd) != 64); + + /* Events */ + RTE_BUILD_BUG_ON(sizeof(struct ionic_notifyq_cmd) != 4); + RTE_BUILD_BUG_ON(sizeof(union ionic_notifyq_comp) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_notifyq_event) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_link_change_event) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_reset_event) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_heartbeat_event) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_log_event) != 64); + + /* I/O */ + RTE_BUILD_BUG_ON(sizeof(struct ionic_txq_desc) != 16); + RTE_BUILD_BUG_ON(sizeof(struct ionic_txq_sg_desc) != 128); + RTE_BUILD_BUG_ON(sizeof(struct ionic_txq_comp) != 16); + + RTE_BUILD_BUG_ON(sizeof(struct ionic_rxq_desc) != 16); + RTE_BUILD_BUG_ON(sizeof(struct ionic_rxq_sg_desc) != 128); + RTE_BUILD_BUG_ON(sizeof(struct ionic_rxq_comp) != 16); +} + +struct ionic_dev { + union ionic_dev_info_regs __iomem *dev_info; + union ionic_dev_cmd_regs __iomem *dev_cmd; + + struct ionic_doorbell __iomem *db_pages; + rte_iova_t phy_db_pages; + + struct ionic_intr __iomem *intr_ctrl; + + struct ionic_intr_status __iomem *intr_status; + + struct ionic_port_info *port_info; + const struct rte_memzone *port_info_z; + rte_iova_t port_info_pa; + uint32_t port_info_sz; +}; + +struct ionic_queue; +struct ionic_desc_info; + +typedef void (*desc_cb)(struct ionic_queue *q, + uint32_t q_desc_index, + uint32_t cq_desc_index, + void *cb_arg, void *service_cb_arg); + +struct ionic_desc_info { + desc_cb cb; + void *cb_arg; +}; + +struct ionic_queue { + struct ionic_dev *idev; + struct ionic_lif *lif; + struct ionic_cq *bound_cq; + uint32_t index; + uint32_t type; + uint32_t hw_index; + uint32_t hw_type; + void *base; + void *sg_base; + rte_iova_t base_pa; + rte_iova_t sg_base_pa; + struct ionic_desc_info *info; + uint32_t tail_idx; + uint32_t head_idx; + uint32_t num_descs; + uint32_t desc_size; + uint32_t sg_desc_size; + uint32_t pid; + uint32_t qid; + uint32_t qtype; + struct ionic_doorbell __iomem *db; + void *nop_desc; +}; + +#define IONIC_INTR_INDEX_NOT_ASSIGNED (-1) +#define IONIC_INTR_NAME_MAX_SZ (32) + +struct ionic_intr_info { + char name[IONIC_INTR_NAME_MAX_SZ]; + int index; + uint32_t vector; + struct ionic_intr __iomem *ctrl; +}; + +struct ionic_cq { + struct ionic_lif *lif; + struct ionic_queue *bound_q; + uint32_t tail_idx; + uint32_t num_descs; + uint32_t desc_size; + bool done_color; + void *base; + rte_iova_t base_pa; + struct ionic_intr_info *bound_intr; +}; + +/** ionic_admin_ctx - Admin command context. + * @pending_work: Flag that indicates a completion. + * @cmd: Admin command (64B) to be copied to the queue. + * @comp: Admin completion (16B) copied from the queue. + */ +struct ionic_admin_ctx { + bool pending_work; + union ionic_adminq_cmd cmd; + union ionic_adminq_comp comp; +}; + +struct ionic_lif; +struct ionic_adapter; +struct ionic_qcq; + +void ionic_intr_init(struct ionic_dev *idev, struct ionic_intr_info *intr, + unsigned long index); + +int ionic_dev_setup(struct ionic_adapter *adapter); + +void ionic_dev_cmd_go(struct ionic_dev *idev, union ionic_dev_cmd *cmd); +uint8_t ionic_dev_cmd_status(struct ionic_dev *idev); +bool ionic_dev_cmd_done(struct ionic_dev *idev); +void ionic_dev_cmd_comp(struct ionic_dev *idev, void *mem); + +void ionic_dev_cmd_identify(struct ionic_dev *idev, uint8_t ver); +void ionic_dev_cmd_init(struct ionic_dev *idev); +void ionic_dev_cmd_reset(struct ionic_dev *idev); + +void ionic_dev_cmd_port_identify(struct ionic_dev *idev); +void ionic_dev_cmd_port_init(struct ionic_dev *idev); +void ionic_dev_cmd_port_reset(struct ionic_dev *idev); +void ionic_dev_cmd_port_state(struct ionic_dev *idev, uint8_t state); +void ionic_dev_cmd_port_speed(struct ionic_dev *idev, uint32_t speed); +void ionic_dev_cmd_port_mtu(struct ionic_dev *idev, uint32_t mtu); +void ionic_dev_cmd_port_autoneg(struct ionic_dev *idev, uint8_t an_enable); +void ionic_dev_cmd_port_fec(struct ionic_dev *idev, uint8_t fec_type); +void ionic_dev_cmd_port_pause(struct ionic_dev *idev, uint8_t pause_type); +void ionic_dev_cmd_port_loopback(struct ionic_dev *idev, + uint8_t loopback_mode); + +void ionic_dev_cmd_lif_identify(struct ionic_dev *idev, uint8_t type, + uint8_t ver); +void ionic_dev_cmd_lif_init(struct ionic_dev *idev, uint16_t lif_index, + rte_iova_t addr); +void ionic_dev_cmd_lif_reset(struct ionic_dev *idev, uint16_t lif_index); +void ionic_dev_cmd_adminq_init(struct ionic_dev *idev, struct ionic_qcq *qcq, + uint16_t lif_index, uint16_t intr_index); + +struct ionic_doorbell __iomem *ionic_db_map(struct ionic_lif *lif, + struct ionic_queue *q); +int ionic_db_page_num(struct ionic_lif *lif, int pid); + +int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq, + struct ionic_intr_info *intr, uint32_t num_descs, + size_t desc_size); +void ionic_cq_map(struct ionic_cq *cq, void *base, rte_iova_t base_pa); +void ionic_cq_bind(struct ionic_cq *cq, struct ionic_queue *q); +typedef bool (*ionic_cq_cb)(struct ionic_cq *cq, uint32_t cq_desc_index, + void *cb_arg); +uint32_t ionic_cq_service(struct ionic_cq *cq, uint32_t work_to_do, + ionic_cq_cb cb, void *cb_arg); + +int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev, + struct ionic_queue *q, uint32_t index, uint32_t num_descs, + size_t desc_size, size_t sg_desc_size, uint32_t pid); +void ionic_q_map(struct ionic_queue *q, void *base, rte_iova_t base_pa); +void ionic_q_sg_map(struct ionic_queue *q, void *base, rte_iova_t base_pa); +void ionic_q_flush(struct ionic_queue *q); +void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, desc_cb cb, + void *cb_arg); +uint32_t ionic_q_space_avail(struct ionic_queue *q); +bool ionic_q_has_space(struct ionic_queue *q, uint32_t want); +void ionic_q_service(struct ionic_queue *q, uint32_t cq_desc_index, + uint32_t stop_index, void *service_cb_arg); + +int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx); + +#endif /* _IONIC_DEV_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ionic/ionic_ethdev.c b/src/spdk/dpdk/drivers/net/ionic/ionic_ethdev.c new file mode 100644 index 000000000..363f0cf00 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ionic/ionic_ethdev.c @@ -0,0 +1,1327 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include + +#include "ionic_logs.h" +#include "ionic.h" +#include "ionic_dev.h" +#include "ionic_mac_api.h" +#include "ionic_lif.h" +#include "ionic_ethdev.h" +#include "ionic_rxtx.h" + +static int eth_ionic_dev_init(struct rte_eth_dev *eth_dev, void *init_params); +static int eth_ionic_dev_uninit(struct rte_eth_dev *eth_dev); +static int ionic_dev_info_get(struct rte_eth_dev *eth_dev, + struct rte_eth_dev_info *dev_info); +static int ionic_dev_configure(struct rte_eth_dev *dev); +static int ionic_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); +static int ionic_dev_start(struct rte_eth_dev *dev); +static void ionic_dev_stop(struct rte_eth_dev *dev); +static void ionic_dev_close(struct rte_eth_dev *dev); +static int ionic_dev_set_link_up(struct rte_eth_dev *dev); +static int ionic_dev_set_link_down(struct rte_eth_dev *dev); +static int ionic_dev_link_update(struct rte_eth_dev *eth_dev, + int wait_to_complete); +static int ionic_flow_ctrl_get(struct rte_eth_dev *eth_dev, + struct rte_eth_fc_conf *fc_conf); +static int ionic_flow_ctrl_set(struct rte_eth_dev *eth_dev, + struct rte_eth_fc_conf *fc_conf); +static int ionic_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask); +static int ionic_dev_rss_reta_update(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size); +static int ionic_dev_rss_reta_query(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size); +static int ionic_dev_rss_hash_conf_get(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_conf *rss_conf); +static int ionic_dev_rss_hash_update(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_conf *rss_conf); +static int ionic_dev_stats_get(struct rte_eth_dev *eth_dev, + struct rte_eth_stats *stats); +static int ionic_dev_stats_reset(struct rte_eth_dev *eth_dev); +static int ionic_dev_xstats_get(struct rte_eth_dev *dev, + struct rte_eth_xstat *xstats, unsigned int n); +static int ionic_dev_xstats_get_by_id(struct rte_eth_dev *dev, + const uint64_t *ids, uint64_t *values, unsigned int n); +static int ionic_dev_xstats_reset(struct rte_eth_dev *dev); +static int ionic_dev_xstats_get_names(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, unsigned int size); +static int ionic_dev_xstats_get_names_by_id(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, const uint64_t *ids, + unsigned int limit); +static int ionic_dev_fw_version_get(struct rte_eth_dev *eth_dev, + char *fw_version, size_t fw_size); + +int ionic_logtype; + +static const struct rte_pci_id pci_id_ionic_map[] = { + { RTE_PCI_DEVICE(IONIC_PENSANDO_VENDOR_ID, IONIC_DEV_ID_ETH_PF) }, + { RTE_PCI_DEVICE(IONIC_PENSANDO_VENDOR_ID, IONIC_DEV_ID_ETH_VF) }, + { RTE_PCI_DEVICE(IONIC_PENSANDO_VENDOR_ID, IONIC_DEV_ID_ETH_MGMT) }, + { .vendor_id = 0, /* sentinel */ }, +}; + +static const struct rte_eth_desc_lim rx_desc_lim = { + .nb_max = IONIC_MAX_RING_DESC, + .nb_min = IONIC_MIN_RING_DESC, + .nb_align = 1, +}; + +static const struct rte_eth_desc_lim tx_desc_lim = { + .nb_max = IONIC_MAX_RING_DESC, + .nb_min = IONIC_MIN_RING_DESC, + .nb_align = 1, + .nb_seg_max = IONIC_TX_MAX_SG_ELEMS, + .nb_mtu_seg_max = IONIC_TX_MAX_SG_ELEMS, +}; + +static const struct eth_dev_ops ionic_eth_dev_ops = { + .dev_infos_get = ionic_dev_info_get, + .dev_configure = ionic_dev_configure, + .mtu_set = ionic_dev_mtu_set, + .dev_start = ionic_dev_start, + .dev_stop = ionic_dev_stop, + .dev_close = ionic_dev_close, + .link_update = ionic_dev_link_update, + .dev_set_link_up = ionic_dev_set_link_up, + .dev_set_link_down = ionic_dev_set_link_down, + .mac_addr_add = ionic_dev_add_mac, + .mac_addr_remove = ionic_dev_remove_mac, + .mac_addr_set = ionic_dev_set_mac, + .vlan_filter_set = ionic_dev_vlan_filter_set, + .promiscuous_enable = ionic_dev_promiscuous_enable, + .promiscuous_disable = ionic_dev_promiscuous_disable, + .allmulticast_enable = ionic_dev_allmulticast_enable, + .allmulticast_disable = ionic_dev_allmulticast_disable, + .flow_ctrl_get = ionic_flow_ctrl_get, + .flow_ctrl_set = ionic_flow_ctrl_set, + .rxq_info_get = ionic_rxq_info_get, + .txq_info_get = ionic_txq_info_get, + .rx_queue_setup = ionic_dev_rx_queue_setup, + .rx_queue_release = ionic_dev_rx_queue_release, + .rx_queue_start = ionic_dev_rx_queue_start, + .rx_queue_stop = ionic_dev_rx_queue_stop, + .tx_queue_setup = ionic_dev_tx_queue_setup, + .tx_queue_release = ionic_dev_tx_queue_release, + .tx_queue_start = ionic_dev_tx_queue_start, + .tx_queue_stop = ionic_dev_tx_queue_stop, + .vlan_offload_set = ionic_vlan_offload_set, + .reta_update = ionic_dev_rss_reta_update, + .reta_query = ionic_dev_rss_reta_query, + .rss_hash_conf_get = ionic_dev_rss_hash_conf_get, + .rss_hash_update = ionic_dev_rss_hash_update, + .stats_get = ionic_dev_stats_get, + .stats_reset = ionic_dev_stats_reset, + .xstats_get = ionic_dev_xstats_get, + .xstats_get_by_id = ionic_dev_xstats_get_by_id, + .xstats_reset = ionic_dev_xstats_reset, + .xstats_get_names = ionic_dev_xstats_get_names, + .xstats_get_names_by_id = ionic_dev_xstats_get_names_by_id, + .fw_version_get = ionic_dev_fw_version_get, +}; + +struct rte_ionic_xstats_name_off { + char name[RTE_ETH_XSTATS_NAME_SIZE]; + unsigned int offset; +}; + +static const struct rte_ionic_xstats_name_off rte_ionic_xstats_strings[] = { + /* RX */ + {"rx_ucast_bytes", offsetof(struct ionic_lif_stats, + rx_ucast_bytes)}, + {"rx_ucast_packets", offsetof(struct ionic_lif_stats, + rx_ucast_packets)}, + {"rx_mcast_bytes", offsetof(struct ionic_lif_stats, + rx_mcast_bytes)}, + {"rx_mcast_packets", offsetof(struct ionic_lif_stats, + rx_mcast_packets)}, + {"rx_bcast_bytes", offsetof(struct ionic_lif_stats, + rx_bcast_bytes)}, + {"rx_bcast_packets", offsetof(struct ionic_lif_stats, + rx_bcast_packets)}, + /* RX drops */ + {"rx_ucast_drop_bytes", offsetof(struct ionic_lif_stats, + rx_ucast_drop_bytes)}, + {"rx_ucast_drop_packets", offsetof(struct ionic_lif_stats, + rx_ucast_drop_packets)}, + {"rx_mcast_drop_bytes", offsetof(struct ionic_lif_stats, + rx_mcast_drop_bytes)}, + {"rx_mcast_drop_packets", offsetof(struct ionic_lif_stats, + rx_mcast_drop_packets)}, + {"rx_bcast_drop_bytes", offsetof(struct ionic_lif_stats, + rx_bcast_drop_bytes)}, + {"rx_bcast_drop_packets", offsetof(struct ionic_lif_stats, + rx_bcast_drop_packets)}, + {"rx_dma_error", offsetof(struct ionic_lif_stats, + rx_dma_error)}, + /* TX */ + {"tx_ucast_bytes", offsetof(struct ionic_lif_stats, + tx_ucast_bytes)}, + {"tx_ucast_packets", offsetof(struct ionic_lif_stats, + tx_ucast_packets)}, + {"tx_mcast_bytes", offsetof(struct ionic_lif_stats, + tx_mcast_bytes)}, + {"tx_mcast_packets", offsetof(struct ionic_lif_stats, + tx_mcast_packets)}, + {"tx_bcast_bytes", offsetof(struct ionic_lif_stats, + tx_bcast_bytes)}, + {"tx_bcast_packets", offsetof(struct ionic_lif_stats, + tx_bcast_packets)}, + /* TX drops */ + {"tx_ucast_drop_bytes", offsetof(struct ionic_lif_stats, + tx_ucast_drop_bytes)}, + {"tx_ucast_drop_packets", offsetof(struct ionic_lif_stats, + tx_ucast_drop_packets)}, + {"tx_mcast_drop_bytes", offsetof(struct ionic_lif_stats, + tx_mcast_drop_bytes)}, + {"tx_mcast_drop_packets", offsetof(struct ionic_lif_stats, + tx_mcast_drop_packets)}, + {"tx_bcast_drop_bytes", offsetof(struct ionic_lif_stats, + tx_bcast_drop_bytes)}, + {"tx_bcast_drop_packets", offsetof(struct ionic_lif_stats, + tx_bcast_drop_packets)}, + {"tx_dma_error", offsetof(struct ionic_lif_stats, + tx_dma_error)}, + /* Rx Queue/Ring drops */ + {"rx_queue_disabled", offsetof(struct ionic_lif_stats, + rx_queue_disabled)}, + {"rx_queue_empty", offsetof(struct ionic_lif_stats, + rx_queue_empty)}, + {"rx_queue_error", offsetof(struct ionic_lif_stats, + rx_queue_error)}, + {"rx_desc_fetch_error", offsetof(struct ionic_lif_stats, + rx_desc_fetch_error)}, + {"rx_desc_data_error", offsetof(struct ionic_lif_stats, + rx_desc_data_error)}, + /* Tx Queue/Ring drops */ + {"tx_queue_disabled", offsetof(struct ionic_lif_stats, + tx_queue_disabled)}, + {"tx_queue_error", offsetof(struct ionic_lif_stats, + tx_queue_error)}, + {"tx_desc_fetch_error", offsetof(struct ionic_lif_stats, + tx_desc_fetch_error)}, + {"tx_desc_data_error", offsetof(struct ionic_lif_stats, + tx_desc_data_error)}, +}; + +#define IONIC_NB_HW_STATS (sizeof(rte_ionic_xstats_strings) / \ + sizeof(rte_ionic_xstats_strings[0])) + +static int +ionic_dev_fw_version_get(struct rte_eth_dev *eth_dev, + char *fw_version, size_t fw_size) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + struct ionic_adapter *adapter = lif->adapter; + + if (fw_version == NULL || fw_size <= 0) + return -EINVAL; + + snprintf(fw_version, fw_size, "%s", + adapter->fw_version); + fw_version[fw_size - 1] = '\0'; + + return 0; +} + +/* + * Set device link up, enable tx. + */ +static int +ionic_dev_set_link_up(struct rte_eth_dev *eth_dev) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + struct ionic_adapter *adapter = lif->adapter; + struct ionic_dev *idev = &adapter->idev; + int err; + + IONIC_PRINT_CALL(); + + ionic_dev_cmd_port_state(idev, IONIC_PORT_ADMIN_STATE_UP); + + err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT); + if (err) { + IONIC_PRINT(WARNING, "Failed to bring port UP"); + return err; + } + + return 0; +} + +/* + * Set device link down, disable tx. + */ +static int +ionic_dev_set_link_down(struct rte_eth_dev *eth_dev) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + struct ionic_adapter *adapter = lif->adapter; + struct ionic_dev *idev = &adapter->idev; + int err; + + IONIC_PRINT_CALL(); + + ionic_dev_cmd_port_state(idev, IONIC_PORT_ADMIN_STATE_DOWN); + + err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT); + if (err) { + IONIC_PRINT(WARNING, "Failed to bring port DOWN"); + return err; + } + + return 0; +} + +static int +ionic_dev_link_update(struct rte_eth_dev *eth_dev, + int wait_to_complete __rte_unused) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + struct ionic_adapter *adapter = lif->adapter; + struct rte_eth_link link; + + IONIC_PRINT_CALL(); + + /* Initialize */ + memset(&link, 0, sizeof(link)); + link.link_autoneg = ETH_LINK_AUTONEG; + + if (!adapter->link_up) { + /* Interface is down */ + link.link_status = ETH_LINK_DOWN; + link.link_duplex = ETH_LINK_HALF_DUPLEX; + link.link_speed = ETH_SPEED_NUM_NONE; + } else { + /* Interface is up */ + link.link_status = ETH_LINK_UP; + link.link_duplex = ETH_LINK_FULL_DUPLEX; + switch (adapter->link_speed) { + case 10000: + link.link_speed = ETH_SPEED_NUM_10G; + break; + case 25000: + link.link_speed = ETH_SPEED_NUM_25G; + break; + case 40000: + link.link_speed = ETH_SPEED_NUM_40G; + break; + case 50000: + link.link_speed = ETH_SPEED_NUM_50G; + break; + case 100000: + link.link_speed = ETH_SPEED_NUM_100G; + break; + default: + link.link_speed = ETH_SPEED_NUM_NONE; + break; + } + } + + return rte_eth_linkstatus_set(eth_dev, &link); +} + +/** + * Interrupt handler triggered by NIC for handling + * specific interrupt. + * + * @param param + * The address of parameter registered before. + * + * @return + * void + */ +static void +ionic_dev_interrupt_handler(void *param) +{ + struct ionic_adapter *adapter = (struct ionic_adapter *)param; + uint32_t i; + + IONIC_PRINT(DEBUG, "->"); + + for (i = 0; i < adapter->nlifs; i++) { + if (adapter->lifs[i]) + ionic_notifyq_handler(adapter->lifs[i], -1); + } +} + +static int +ionic_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + uint32_t max_frame_size; + int err; + + IONIC_PRINT_CALL(); + + /* + * Note: mtu check against IONIC_MIN_MTU, IONIC_MAX_MTU + * is done by the the API. + */ + + /* + * Max frame size is MTU + Ethernet header + VLAN + QinQ + * (plus ETHER_CRC_LEN if the adapter is able to keep CRC) + */ + max_frame_size = mtu + RTE_ETHER_HDR_LEN + 4 + 4; + + if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len < max_frame_size) + return -EINVAL; + + err = ionic_lif_change_mtu(lif, mtu); + if (err) + return err; + + return 0; +} + +static int +ionic_dev_info_get(struct rte_eth_dev *eth_dev, + struct rte_eth_dev_info *dev_info) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + struct ionic_adapter *adapter = lif->adapter; + struct ionic_identity *ident = &adapter->ident; + + IONIC_PRINT_CALL(); + + dev_info->max_rx_queues = (uint16_t) + ident->lif.eth.config.queue_count[IONIC_QTYPE_RXQ]; + dev_info->max_tx_queues = (uint16_t) + ident->lif.eth.config.queue_count[IONIC_QTYPE_TXQ]; + /* Also add ETHER_CRC_LEN if the adapter is able to keep CRC */ + dev_info->min_rx_bufsize = IONIC_MIN_MTU + RTE_ETHER_HDR_LEN; + dev_info->max_rx_pktlen = IONIC_MAX_MTU + RTE_ETHER_HDR_LEN; + dev_info->max_mac_addrs = adapter->max_mac_addrs; + dev_info->min_mtu = IONIC_MIN_MTU; + dev_info->max_mtu = IONIC_MAX_MTU; + + dev_info->hash_key_size = IONIC_RSS_HASH_KEY_SIZE; + dev_info->reta_size = ident->lif.eth.rss_ind_tbl_sz; + dev_info->flow_type_rss_offloads = IONIC_ETH_RSS_OFFLOAD_ALL; + + dev_info->speed_capa = + ETH_LINK_SPEED_10G | + ETH_LINK_SPEED_25G | + ETH_LINK_SPEED_40G | + ETH_LINK_SPEED_50G | + ETH_LINK_SPEED_100G; + + /* + * Per-queue capabilities. Actually most of the offloads are enabled + * by default on the port and can be used on selected queues (by adding + * packet flags at runtime when required) + */ + + dev_info->rx_queue_offload_capa = + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + 0; + + dev_info->tx_queue_offload_capa = + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_TX_OFFLOAD_OUTER_UDP_CKSUM | + 0; + + /* + * Per-port capabilities + * See ionic_set_features to request and check supported features + */ + + dev_info->rx_offload_capa = dev_info->rx_queue_offload_capa | + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_SCATTER | + 0; + + dev_info->tx_offload_capa = dev_info->tx_queue_offload_capa | + DEV_TX_OFFLOAD_MULTI_SEGS | + DEV_TX_OFFLOAD_TCP_TSO | + 0; + + dev_info->rx_desc_lim = rx_desc_lim; + dev_info->tx_desc_lim = tx_desc_lim; + + /* Driver-preferred Rx/Tx parameters */ + dev_info->default_rxportconf.burst_size = 32; + dev_info->default_txportconf.burst_size = 32; + dev_info->default_rxportconf.nb_queues = 1; + dev_info->default_txportconf.nb_queues = 1; + dev_info->default_rxportconf.ring_size = IONIC_DEF_TXRX_DESC; + dev_info->default_txportconf.ring_size = IONIC_DEF_TXRX_DESC; + + return 0; +} + +static int +ionic_flow_ctrl_get(struct rte_eth_dev *eth_dev, + struct rte_eth_fc_conf *fc_conf) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + struct ionic_adapter *adapter = lif->adapter; + struct ionic_dev *idev = &adapter->idev; + + if (idev->port_info) { + fc_conf->autoneg = idev->port_info->config.an_enable; + + if (idev->port_info->config.pause_type) + fc_conf->mode = RTE_FC_FULL; + else + fc_conf->mode = RTE_FC_NONE; + } + + return 0; +} + +static int +ionic_flow_ctrl_set(struct rte_eth_dev *eth_dev, + struct rte_eth_fc_conf *fc_conf) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + struct ionic_adapter *adapter = lif->adapter; + struct ionic_dev *idev = &adapter->idev; + uint8_t pause_type = IONIC_PORT_PAUSE_TYPE_NONE; + uint8_t an_enable; + + switch (fc_conf->mode) { + case RTE_FC_NONE: + pause_type = IONIC_PORT_PAUSE_TYPE_NONE; + break; + case RTE_FC_FULL: + pause_type = IONIC_PORT_PAUSE_TYPE_LINK; + break; + case RTE_FC_RX_PAUSE: + case RTE_FC_TX_PAUSE: + return -ENOTSUP; + } + + an_enable = fc_conf->autoneg; + + ionic_dev_cmd_port_pause(idev, pause_type); + ionic_dev_cmd_port_autoneg(idev, an_enable); + + return 0; +} + +static int +ionic_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + struct rte_eth_rxmode *rxmode; + rxmode = ð_dev->data->dev_conf.rxmode; + int i; + + if (mask & ETH_VLAN_STRIP_MASK) { + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) { + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { + struct ionic_qcq *rxq = + eth_dev->data->rx_queues[i]; + rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + } + lif->features |= IONIC_ETH_HW_VLAN_RX_STRIP; + } else { + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { + struct ionic_qcq *rxq = + eth_dev->data->rx_queues[i]; + rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; + } + lif->features &= ~IONIC_ETH_HW_VLAN_RX_STRIP; + } + } + + if (mask & ETH_VLAN_FILTER_MASK) { + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) + lif->features |= IONIC_ETH_HW_VLAN_RX_FILTER; + else + lif->features &= ~IONIC_ETH_HW_VLAN_RX_FILTER; + } + + ionic_lif_set_features(lif); + + return 0; +} + +static int +ionic_dev_rss_reta_update(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + struct ionic_adapter *adapter = lif->adapter; + struct ionic_identity *ident = &adapter->ident; + uint32_t i, j, index, num; + + IONIC_PRINT_CALL(); + + if (!lif->rss_ind_tbl) { + IONIC_PRINT(ERR, "RSS RETA not initialized, " + "can't update the table"); + return -EINVAL; + } + + if (reta_size != ident->lif.eth.rss_ind_tbl_sz) { + IONIC_PRINT(ERR, "The size of hash lookup table configured " + "(%d) doesn't match the number hardware can supported " + "(%d)", + reta_size, ident->lif.eth.rss_ind_tbl_sz); + return -EINVAL; + } + + num = lif->adapter->ident.lif.eth.rss_ind_tbl_sz / RTE_RETA_GROUP_SIZE; + + for (i = 0; i < num; i++) { + for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) { + if (reta_conf[i].mask & ((uint64_t)1 << j)) { + index = (i * RTE_RETA_GROUP_SIZE) + j; + lif->rss_ind_tbl[index] = reta_conf[i].reta[j]; + } + } + } + + return ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL); +} + +static int +ionic_dev_rss_reta_query(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + struct ionic_adapter *adapter = lif->adapter; + struct ionic_identity *ident = &adapter->ident; + int i, num; + + IONIC_PRINT_CALL(); + + if (reta_size != ident->lif.eth.rss_ind_tbl_sz) { + IONIC_PRINT(ERR, "The size of hash lookup table configured " + "(%d) doesn't match the number hardware can supported " + "(%d)", + reta_size, ident->lif.eth.rss_ind_tbl_sz); + return -EINVAL; + } + + if (!lif->rss_ind_tbl) { + IONIC_PRINT(ERR, "RSS RETA has not been built yet"); + return -EINVAL; + } + + num = reta_size / RTE_RETA_GROUP_SIZE; + + for (i = 0; i < num; i++) { + memcpy(reta_conf->reta, + &lif->rss_ind_tbl[i * RTE_RETA_GROUP_SIZE], + RTE_RETA_GROUP_SIZE); + reta_conf++; + } + + return 0; +} + +static int +ionic_dev_rss_hash_conf_get(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + uint64_t rss_hf = 0; + + IONIC_PRINT_CALL(); + + if (!lif->rss_ind_tbl) { + IONIC_PRINT(NOTICE, "RSS not enabled"); + return 0; + } + + /* Get key value (if not null, rss_key is 40-byte) */ + if (rss_conf->rss_key != NULL && + rss_conf->rss_key_len >= IONIC_RSS_HASH_KEY_SIZE) + memcpy(rss_conf->rss_key, lif->rss_hash_key, + IONIC_RSS_HASH_KEY_SIZE); + + if (lif->rss_types & IONIC_RSS_TYPE_IPV4) + rss_hf |= ETH_RSS_IPV4; + if (lif->rss_types & IONIC_RSS_TYPE_IPV4_TCP) + rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; + if (lif->rss_types & IONIC_RSS_TYPE_IPV4_UDP) + rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; + if (lif->rss_types & IONIC_RSS_TYPE_IPV6) + rss_hf |= ETH_RSS_IPV6; + if (lif->rss_types & IONIC_RSS_TYPE_IPV6_TCP) + rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; + if (lif->rss_types & IONIC_RSS_TYPE_IPV6_UDP) + rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; + + rss_conf->rss_hf = rss_hf; + + return 0; +} + +static int +ionic_dev_rss_hash_update(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + uint32_t rss_types = 0; + uint8_t *key = NULL; + + IONIC_PRINT_CALL(); + + if (rss_conf->rss_key) + key = rss_conf->rss_key; + + if ((rss_conf->rss_hf & IONIC_ETH_RSS_OFFLOAD_ALL) == 0) { + /* + * Can't disable rss through hash flags, + * if it is enabled by default during init + */ + if (lif->rss_ind_tbl) + return -EINVAL; + } else { + /* Can't enable rss if disabled by default during init */ + if (!lif->rss_ind_tbl) + return -EINVAL; + + if (rss_conf->rss_hf & ETH_RSS_IPV4) + rss_types |= IONIC_RSS_TYPE_IPV4; + if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) + rss_types |= IONIC_RSS_TYPE_IPV4_TCP; + if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) + rss_types |= IONIC_RSS_TYPE_IPV4_UDP; + if (rss_conf->rss_hf & ETH_RSS_IPV6) + rss_types |= IONIC_RSS_TYPE_IPV6; + if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) + rss_types |= IONIC_RSS_TYPE_IPV6_TCP; + if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) + rss_types |= IONIC_RSS_TYPE_IPV6_UDP; + + ionic_lif_rss_config(lif, rss_types, key, NULL); + } + + return 0; +} + +static int +ionic_dev_stats_get(struct rte_eth_dev *eth_dev, + struct rte_eth_stats *stats) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + + ionic_lif_get_stats(lif, stats); + + return 0; +} + +static int +ionic_dev_stats_reset(struct rte_eth_dev *eth_dev) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + + IONIC_PRINT_CALL(); + + ionic_lif_reset_stats(lif); + + return 0; +} + +static int +ionic_dev_xstats_get_names(__rte_unused struct rte_eth_dev *eth_dev, + struct rte_eth_xstat_name *xstats_names, + __rte_unused unsigned int size) +{ + unsigned int i; + + if (xstats_names != NULL) { + for (i = 0; i < IONIC_NB_HW_STATS; i++) { + snprintf(xstats_names[i].name, + sizeof(xstats_names[i].name), + "%s", rte_ionic_xstats_strings[i].name); + } + } + + return IONIC_NB_HW_STATS; +} + +static int +ionic_dev_xstats_get_names_by_id(struct rte_eth_dev *eth_dev, + struct rte_eth_xstat_name *xstats_names, const uint64_t *ids, + unsigned int limit) +{ + struct rte_eth_xstat_name xstats_names_copy[IONIC_NB_HW_STATS]; + uint16_t i; + + if (!ids) { + if (xstats_names != NULL) { + for (i = 0; i < IONIC_NB_HW_STATS; i++) { + snprintf(xstats_names[i].name, + sizeof(xstats_names[i].name), + "%s", rte_ionic_xstats_strings[i].name); + } + } + + return IONIC_NB_HW_STATS; + } + + ionic_dev_xstats_get_names_by_id(eth_dev, xstats_names_copy, NULL, + IONIC_NB_HW_STATS); + + for (i = 0; i < limit; i++) { + if (ids[i] >= IONIC_NB_HW_STATS) { + IONIC_PRINT(ERR, "id value isn't valid"); + return -1; + } + + strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name); + } + + return limit; +} + +static int +ionic_dev_xstats_get(struct rte_eth_dev *eth_dev, struct rte_eth_xstat *xstats, + unsigned int n) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + struct ionic_lif_stats hw_stats; + uint16_t i; + + if (n < IONIC_NB_HW_STATS) + return IONIC_NB_HW_STATS; + + ionic_lif_get_hw_stats(lif, &hw_stats); + + for (i = 0; i < IONIC_NB_HW_STATS; i++) { + xstats[i].value = *(uint64_t *)(((char *)&hw_stats) + + rte_ionic_xstats_strings[i].offset); + xstats[i].id = i; + } + + return IONIC_NB_HW_STATS; +} + +static int +ionic_dev_xstats_get_by_id(struct rte_eth_dev *eth_dev, const uint64_t *ids, + uint64_t *values, unsigned int n) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + struct ionic_lif_stats hw_stats; + uint64_t values_copy[IONIC_NB_HW_STATS]; + uint16_t i; + + if (!ids) { + if (!ids && n < IONIC_NB_HW_STATS) + return IONIC_NB_HW_STATS; + + ionic_lif_get_hw_stats(lif, &hw_stats); + + for (i = 0; i < IONIC_NB_HW_STATS; i++) { + values[i] = *(uint64_t *)(((char *)&hw_stats) + + rte_ionic_xstats_strings[i].offset); + } + + return IONIC_NB_HW_STATS; + } + + ionic_dev_xstats_get_by_id(eth_dev, NULL, values_copy, + IONIC_NB_HW_STATS); + + for (i = 0; i < n; i++) { + if (ids[i] >= IONIC_NB_HW_STATS) { + IONIC_PRINT(ERR, "id value isn't valid"); + return -1; + } + + values[i] = values_copy[ids[i]]; + } + + return n; +} + +static int +ionic_dev_xstats_reset(struct rte_eth_dev *eth_dev) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + + ionic_lif_reset_hw_stats(lif); + + return 0; +} + +static int +ionic_dev_configure(struct rte_eth_dev *eth_dev) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + int err; + + IONIC_PRINT_CALL(); + + err = ionic_lif_configure(lif); + if (err) { + IONIC_PRINT(ERR, "Cannot configure LIF: %d", err); + return err; + } + + return 0; +} + +static inline uint32_t +ionic_parse_link_speeds(uint16_t link_speeds) +{ + if (link_speeds & ETH_LINK_SPEED_100G) + return 100000; + else if (link_speeds & ETH_LINK_SPEED_50G) + return 50000; + else if (link_speeds & ETH_LINK_SPEED_40G) + return 40000; + else if (link_speeds & ETH_LINK_SPEED_25G) + return 25000; + else if (link_speeds & ETH_LINK_SPEED_10G) + return 10000; + else + return 0; +} + +/* + * Configure device link speed and setup link. + * It returns 0 on success. + */ +static int +ionic_dev_start(struct rte_eth_dev *eth_dev) +{ + struct rte_eth_conf *dev_conf = ð_dev->data->dev_conf; + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + struct ionic_adapter *adapter = lif->adapter; + struct ionic_dev *idev = &adapter->idev; + uint32_t allowed_speeds; + int err; + + IONIC_PRINT_CALL(); + + allowed_speeds = + ETH_LINK_SPEED_FIXED | + ETH_LINK_SPEED_10G | + ETH_LINK_SPEED_25G | + ETH_LINK_SPEED_40G | + ETH_LINK_SPEED_50G | + ETH_LINK_SPEED_100G; + + if (dev_conf->link_speeds & ~allowed_speeds) { + IONIC_PRINT(ERR, "Invalid link setting"); + return -EINVAL; + } + + err = ionic_lif_start(lif); + if (err) { + IONIC_PRINT(ERR, "Cannot start LIF: %d", err); + return err; + } + + if (eth_dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) { + uint32_t speed = ionic_parse_link_speeds(dev_conf->link_speeds); + + if (speed) + ionic_dev_cmd_port_speed(idev, speed); + } + + ionic_dev_link_update(eth_dev, 0); + + return 0; +} + +/* + * Stop device: disable rx and tx functions to allow for reconfiguring. + */ +static void +ionic_dev_stop(struct rte_eth_dev *eth_dev) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + int err; + + IONIC_PRINT_CALL(); + + err = ionic_lif_stop(lif); + if (err) + IONIC_PRINT(ERR, "Cannot stop LIF: %d", err); +} + +/* + * Reset and stop device. + */ +static void +ionic_dev_close(struct rte_eth_dev *eth_dev) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + int err; + + IONIC_PRINT_CALL(); + + err = ionic_lif_stop(lif); + if (err) { + IONIC_PRINT(ERR, "Cannot stop LIF: %d", err); + return; + } + + err = eth_ionic_dev_uninit(eth_dev); + if (err) { + IONIC_PRINT(ERR, "Cannot destroy LIF: %d", err); + return; + } +} + +static int +eth_ionic_dev_init(struct rte_eth_dev *eth_dev, void *init_params) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + struct ionic_adapter *adapter = (struct ionic_adapter *)init_params; + int err; + + IONIC_PRINT_CALL(); + + eth_dev->dev_ops = &ionic_eth_dev_ops; + eth_dev->rx_pkt_burst = &ionic_recv_pkts; + eth_dev->tx_pkt_burst = &ionic_xmit_pkts; + eth_dev->tx_pkt_prepare = &ionic_prep_pkts; + + /* Multi-process not supported, primary does initialization anyway */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + rte_eth_copy_pci_info(eth_dev, pci_dev); + + lif->index = adapter->nlifs; + lif->eth_dev = eth_dev; + lif->adapter = adapter; + adapter->lifs[adapter->nlifs] = lif; + + IONIC_PRINT(DEBUG, "Up to %u MAC addresses supported", + adapter->max_mac_addrs); + + /* Allocate memory for storing MAC addresses */ + eth_dev->data->mac_addrs = rte_zmalloc("ionic", + RTE_ETHER_ADDR_LEN * adapter->max_mac_addrs, 0); + + if (eth_dev->data->mac_addrs == NULL) { + IONIC_PRINT(ERR, "Failed to allocate %u bytes needed to " + "store MAC addresses", + RTE_ETHER_ADDR_LEN * adapter->max_mac_addrs); + err = -ENOMEM; + goto err; + } + + err = ionic_lif_alloc(lif); + if (err) { + IONIC_PRINT(ERR, "Cannot allocate LIFs: %d, aborting", + err); + goto err; + } + + err = ionic_lif_init(lif); + if (err) { + IONIC_PRINT(ERR, "Cannot init LIFs: %d, aborting", err); + goto err_free_lif; + } + + /* Copy the MAC address */ + rte_ether_addr_copy((struct rte_ether_addr *)lif->mac_addr, + ð_dev->data->mac_addrs[0]); + + IONIC_PRINT(DEBUG, "Port %u initialized", eth_dev->data->port_id); + + return 0; + +err_free_lif: + ionic_lif_free(lif); +err: + return err; +} + +static int +eth_ionic_dev_uninit(struct rte_eth_dev *eth_dev) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + struct ionic_adapter *adapter = lif->adapter; + + IONIC_PRINT_CALL(); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + adapter->lifs[lif->index] = NULL; + + ionic_lif_deinit(lif); + ionic_lif_free(lif); + + eth_dev->dev_ops = NULL; + eth_dev->rx_pkt_burst = NULL; + eth_dev->tx_pkt_burst = NULL; + eth_dev->tx_pkt_prepare = NULL; + + return 0; +} + +static int +ionic_configure_intr(struct ionic_adapter *adapter) +{ + struct rte_pci_device *pci_dev = adapter->pci_dev; + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + int err; + + IONIC_PRINT(DEBUG, "Configuring %u intrs", adapter->nintrs); + + if (rte_intr_efd_enable(intr_handle, adapter->nintrs)) { + IONIC_PRINT(ERR, "Fail to create eventfd"); + return -1; + } + + if (rte_intr_dp_is_en(intr_handle)) + IONIC_PRINT(DEBUG, + "Packet I/O interrupt on datapath is enabled"); + + if (!intr_handle->intr_vec) { + intr_handle->intr_vec = rte_zmalloc("intr_vec", + adapter->nintrs * sizeof(int), 0); + + if (!intr_handle->intr_vec) { + IONIC_PRINT(ERR, "Failed to allocate %u vectors", + adapter->nintrs); + return -ENOMEM; + } + } + + err = rte_intr_callback_register(intr_handle, + ionic_dev_interrupt_handler, + adapter); + + if (err) { + IONIC_PRINT(ERR, + "Failure registering interrupts handler (%d)", + err); + return err; + } + + /* enable intr mapping */ + err = rte_intr_enable(intr_handle); + + if (err) { + IONIC_PRINT(ERR, "Failure enabling interrupts (%d)", err); + return err; + } + + return 0; +} + +static void +ionic_unconfigure_intr(struct ionic_adapter *adapter) +{ + struct rte_pci_device *pci_dev = adapter->pci_dev; + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + + rte_intr_disable(intr_handle); + + rte_intr_callback_unregister(intr_handle, + ionic_dev_interrupt_handler, + adapter); +} + +static int +eth_ionic_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + char name[RTE_ETH_NAME_MAX_LEN]; + struct rte_mem_resource *resource; + struct ionic_adapter *adapter; + struct ionic_hw *hw; + unsigned long i; + int err; + + /* Check structs (trigger error at compilation time) */ + ionic_struct_size_checks(); + + /* Multi-process not supported */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + err = -EPERM; + goto err; + } + + IONIC_PRINT(DEBUG, "Initializing device %s", + pci_dev->device.name); + + adapter = rte_zmalloc("ionic", sizeof(*adapter), 0); + if (!adapter) { + IONIC_PRINT(ERR, "OOM"); + err = -ENOMEM; + goto err; + } + + adapter->pci_dev = pci_dev; + hw = &adapter->hw; + + hw->device_id = pci_dev->id.device_id; + hw->vendor_id = pci_dev->id.vendor_id; + + err = ionic_init_mac(hw); + if (err != 0) { + IONIC_PRINT(ERR, "Mac init failed: %d", err); + err = -EIO; + goto err_free_adapter; + } + + adapter->is_mgmt_nic = (pci_dev->id.device_id == IONIC_DEV_ID_ETH_MGMT); + + adapter->num_bars = 0; + for (i = 0; i < PCI_MAX_RESOURCE && i < IONIC_BARS_MAX; i++) { + resource = &pci_dev->mem_resource[i]; + if (resource->phys_addr == 0 || resource->len == 0) + continue; + adapter->bars[adapter->num_bars].vaddr = resource->addr; + adapter->bars[adapter->num_bars].bus_addr = resource->phys_addr; + adapter->bars[adapter->num_bars].len = resource->len; + adapter->num_bars++; + } + + /* Discover ionic dev resources */ + + err = ionic_setup(adapter); + if (err) { + IONIC_PRINT(ERR, "Cannot setup device: %d, aborting", err); + goto err_free_adapter; + } + + err = ionic_identify(adapter); + if (err) { + IONIC_PRINT(ERR, "Cannot identify device: %d, aborting", + err); + goto err_free_adapter; + } + + err = ionic_init(adapter); + if (err) { + IONIC_PRINT(ERR, "Cannot init device: %d, aborting", err); + goto err_free_adapter; + } + + /* Configure the ports */ + err = ionic_port_identify(adapter); + if (err) { + IONIC_PRINT(ERR, "Cannot identify port: %d, aborting", + err); + goto err_free_adapter; + } + + err = ionic_port_init(adapter); + if (err) { + IONIC_PRINT(ERR, "Cannot init port: %d, aborting", err); + goto err_free_adapter; + } + + /* Configure LIFs */ + err = ionic_lif_identify(adapter); + if (err) { + IONIC_PRINT(ERR, "Cannot identify lif: %d, aborting", err); + goto err_free_adapter; + } + + /* Allocate and init LIFs */ + err = ionic_lifs_size(adapter); + if (err) { + IONIC_PRINT(ERR, "Cannot size LIFs: %d, aborting", err); + goto err_free_adapter; + } + + adapter->max_mac_addrs = adapter->ident.lif.eth.max_ucast_filters; + + adapter->nlifs = 0; + for (i = 0; i < adapter->ident.dev.nlifs; i++) { + snprintf(name, sizeof(name), "net_%s_lif_%lu", + pci_dev->device.name, i); + + err = rte_eth_dev_create(&pci_dev->device, name, + sizeof(struct ionic_lif), + NULL, NULL, + eth_ionic_dev_init, adapter); + if (err) { + IONIC_PRINT(ERR, "Cannot create eth device for " + "ionic lif %s", name); + break; + } + + adapter->nlifs++; + } + + err = ionic_configure_intr(adapter); + + if (err) { + IONIC_PRINT(ERR, "Failed to configure interrupts"); + goto err_free_adapter; + } + + return 0; + +err_free_adapter: + rte_free(adapter); +err: + return err; +} + +static int +eth_ionic_pci_remove(struct rte_pci_device *pci_dev __rte_unused) +{ + char name[RTE_ETH_NAME_MAX_LEN]; + struct ionic_adapter *adapter = NULL; + struct rte_eth_dev *eth_dev; + struct ionic_lif *lif; + uint32_t i; + + /* Adapter lookup is using (the first) eth_dev name */ + snprintf(name, sizeof(name), "net_%s_lif_0", + pci_dev->device.name); + + eth_dev = rte_eth_dev_allocated(name); + if (eth_dev) { + lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + adapter = lif->adapter; + } + + if (adapter) { + ionic_unconfigure_intr(adapter); + + for (i = 0; i < adapter->nlifs; i++) { + lif = adapter->lifs[i]; + rte_eth_dev_destroy(lif->eth_dev, eth_ionic_dev_uninit); + } + + rte_free(adapter); + } + + return 0; +} + +static struct rte_pci_driver rte_ionic_pmd = { + .id_table = pci_id_ionic_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .probe = eth_ionic_pci_probe, + .remove = eth_ionic_pci_remove, +}; + +RTE_PMD_REGISTER_PCI(net_ionic, rte_ionic_pmd); +RTE_PMD_REGISTER_PCI_TABLE(net_ionic, pci_id_ionic_map); +RTE_PMD_REGISTER_KMOD_DEP(net_ionic, "* igb_uio | uio_pci_generic | vfio-pci"); + +RTE_INIT(ionic_init_log) +{ + ionic_logtype = rte_log_register("pmd.net.ionic"); + if (ionic_logtype >= 0) + rte_log_set_level(ionic_logtype, RTE_LOG_NOTICE); +} diff --git a/src/spdk/dpdk/drivers/net/ionic/ionic_ethdev.h b/src/spdk/dpdk/drivers/net/ionic/ionic_ethdev.h new file mode 100644 index 000000000..578e2301f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ionic/ionic_ethdev.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved. + */ + +#ifndef _IONIC_ETHDEV_H_ +#define _IONIC_ETHDEV_H_ + +#define IONIC_ETH_RSS_OFFLOAD_ALL ( \ + ETH_RSS_IPV4 | \ + ETH_RSS_NONFRAG_IPV4_TCP | \ + ETH_RSS_NONFRAG_IPV4_UDP | \ + ETH_RSS_IPV6 | \ + ETH_RSS_NONFRAG_IPV6_TCP | \ + ETH_RSS_NONFRAG_IPV6_UDP) + +#define IONIC_ETH_DEV_TO_LIF(eth_dev) ((struct ionic_lif *) \ + (eth_dev)->data->dev_private) +#define IONIC_ETH_DEV_TO_ADAPTER(eth_dev) \ + (IONIC_ETH_DEV_TO_LIF(eth_dev)->adapter) + +#endif /* _IONIC_ETHDEV_H_ */ + diff --git a/src/spdk/dpdk/drivers/net/ionic/ionic_if.h b/src/spdk/dpdk/drivers/net/ionic/ionic_if.h new file mode 100644 index 000000000..f83c8711b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ionic/ionic_if.h @@ -0,0 +1,2491 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB OR BSD-3-Clause */ +/* Copyright (c) 2017-2019 Pensando Systems, Inc. All rights reserved. */ + +#ifndef _IONIC_IF_H_ +#define _IONIC_IF_H_ + +#pragma pack(push, 1) + +#define IONIC_DEV_INFO_SIGNATURE 0x44455649 /* 'DEVI' */ +#define IONIC_DEV_INFO_VERSION 1 +#define IONIC_IFNAMSIZ 16 + +/** + * Commands + */ +enum ionic_cmd_opcode { + IONIC_CMD_NOP = 0, + + /* Device commands */ + IONIC_CMD_IDENTIFY = 1, + IONIC_CMD_INIT = 2, + IONIC_CMD_RESET = 3, + IONIC_CMD_GETATTR = 4, + IONIC_CMD_SETATTR = 5, + + /* Port commands */ + IONIC_CMD_PORT_IDENTIFY = 10, + IONIC_CMD_PORT_INIT = 11, + IONIC_CMD_PORT_RESET = 12, + IONIC_CMD_PORT_GETATTR = 13, + IONIC_CMD_PORT_SETATTR = 14, + + /* LIF commands */ + IONIC_CMD_LIF_IDENTIFY = 20, + IONIC_CMD_LIF_INIT = 21, + IONIC_CMD_LIF_RESET = 22, + IONIC_CMD_LIF_GETATTR = 23, + IONIC_CMD_LIF_SETATTR = 24, + + IONIC_CMD_RX_MODE_SET = 30, + IONIC_CMD_RX_FILTER_ADD = 31, + IONIC_CMD_RX_FILTER_DEL = 32, + + /* Queue commands */ + IONIC_CMD_Q_INIT = 40, + IONIC_CMD_Q_CONTROL = 41, + + /* RDMA commands */ + IONIC_CMD_RDMA_RESET_LIF = 50, + IONIC_CMD_RDMA_CREATE_EQ = 51, + IONIC_CMD_RDMA_CREATE_CQ = 52, + IONIC_CMD_RDMA_CREATE_ADMINQ = 53, + + /* QoS commands */ + IONIC_CMD_QOS_CLASS_IDENTIFY = 240, + IONIC_CMD_QOS_CLASS_INIT = 241, + IONIC_CMD_QOS_CLASS_RESET = 242, + + /* Firmware commands */ + IONIC_CMD_FW_DOWNLOAD = 254, + IONIC_CMD_FW_CONTROL = 255, +}; + +/** + * Command Return codes + */ +enum ionic_status_code { + IONIC_RC_SUCCESS = 0, /* Success */ + IONIC_RC_EVERSION = 1, /* Incorrect version for request */ + IONIC_RC_EOPCODE = 2, /* Invalid cmd opcode */ + IONIC_RC_EIO = 3, /* I/O error */ + IONIC_RC_EPERM = 4, /* Permission denied */ + IONIC_RC_EQID = 5, /* Bad qid */ + IONIC_RC_EQTYPE = 6, /* Bad qtype */ + IONIC_RC_ENOENT = 7, /* No such element */ + IONIC_RC_EINTR = 8, /* operation interrupted */ + IONIC_RC_EAGAIN = 9, /* Try again */ + IONIC_RC_ENOMEM = 10, /* Out of memory */ + IONIC_RC_EFAULT = 11, /* Bad address */ + IONIC_RC_EBUSY = 12, /* Device or resource busy */ + IONIC_RC_EEXIST = 13, /* object already exists */ + IONIC_RC_EINVAL = 14, /* Invalid argument */ + IONIC_RC_ENOSPC = 15, /* No space left or alloc failure */ + IONIC_RC_ERANGE = 16, /* Parameter out of range */ + IONIC_RC_BAD_ADDR = 17, /* Descriptor contains a bad ptr */ + IONIC_RC_DEV_CMD = 18, /* Device cmd attempted on AdminQ */ + IONIC_RC_ENOSUPP = 19, /* Operation not supported */ + IONIC_RC_ERROR = 29, /* Generic error */ + + IONIC_RC_ERDMA = 30, /* Generic RDMA error */ +}; + +enum ionic_notifyq_opcode { + IONIC_EVENT_LINK_CHANGE = 1, + IONIC_EVENT_RESET = 2, + IONIC_EVENT_HEARTBEAT = 3, + IONIC_EVENT_LOG = 4, +}; + +/** + * struct cmd - General admin command format + * @opcode: Opcode for the command + * @lif_index: LIF index + * @cmd_data: Opcode-specific command bytes + */ +struct ionic_admin_cmd { + u8 opcode; + u8 rsvd; + __le16 lif_index; + u8 cmd_data[60]; +}; + +/** + * struct ionic_admin_comp - General admin command completion format + * @status: The status of the command (enum status_code) + * @comp_index: The index in the descriptor ring for which this + * is the completion. + * @cmd_data: Command-specific bytes. + * @color: Color bit. (Always 0 for commands issued to the + * Device Cmd Registers.) + */ +struct ionic_admin_comp { + u8 status; + u8 rsvd; + __le16 comp_index; + u8 cmd_data[11]; + u8 color; +#define IONIC_COMP_COLOR_MASK 0x80 +}; + +static inline u8 color_match(u8 color, u8 done_color) +{ + return (!!(color & IONIC_COMP_COLOR_MASK)) == done_color; +} + +/** + * struct ionic_nop_cmd - NOP command + * @opcode: opcode + */ +struct ionic_nop_cmd { + u8 opcode; + u8 rsvd[63]; +}; + +/** + * struct ionic_nop_comp - NOP command completion + * @status: The status of the command (enum status_code) + */ +struct ionic_nop_comp { + u8 status; + u8 rsvd[15]; +}; + +/** + * struct ionic_dev_init_cmd - Device init command + * @opcode: opcode + * @type: device type + */ +struct ionic_dev_init_cmd { + u8 opcode; + u8 type; + u8 rsvd[62]; +}; + +/** + * struct init_comp - Device init command completion + * @status: The status of the command (enum status_code) + */ +struct ionic_dev_init_comp { + u8 status; + u8 rsvd[15]; +}; + +/** + * struct ionic_dev_reset_cmd - Device reset command + * @opcode: opcode + */ +struct ionic_dev_reset_cmd { + u8 opcode; + u8 rsvd[63]; +}; + +/** + * struct reset_comp - Reset command completion + * @status: The status of the command (enum status_code) + */ +struct ionic_dev_reset_comp { + u8 status; + u8 rsvd[15]; +}; + +#define IONIC_IDENTITY_VERSION_1 1 + +/** + * struct ionic_dev_identify_cmd - Driver/device identify command + * @opcode: opcode + * @ver: Highest version of identify supported by driver + */ +struct ionic_dev_identify_cmd { + u8 opcode; + u8 ver; + u8 rsvd[62]; +}; + +/** + * struct dev_identify_comp - Driver/device identify command completion + * @status: The status of the command (enum status_code) + * @ver: Version of identify returned by device + */ +struct ionic_dev_identify_comp { + u8 status; + u8 ver; + u8 rsvd[14]; +}; + +enum ionic_os_type { + IONIC_OS_TYPE_LINUX = 1, + IONIC_OS_TYPE_WIN = 2, + IONIC_OS_TYPE_DPDK = 3, + IONIC_OS_TYPE_FREEBSD = 4, + IONIC_OS_TYPE_IPXE = 5, + IONIC_OS_TYPE_ESXI = 6, +}; + +/** + * union drv_identity - driver identity information + * @os_type: OS type (see enum os_type) + * @os_dist: OS distribution, numeric format + * @os_dist_str: OS distribution, string format + * @kernel_ver: Kernel version, numeric format + * @kernel_ver_str: Kernel version, string format + * @driver_ver_str: Driver version, string format + */ +union ionic_drv_identity { + struct { + __le32 os_type; + __le32 os_dist; + char os_dist_str[128]; + __le32 kernel_ver; + char kernel_ver_str[32]; + char driver_ver_str[32]; + }; + __le32 words[512]; +}; + +/** + * union dev_identity - device identity information + * @version: Version of device identify + * @type: Identify type (0 for now) + * @nports: Number of ports provisioned + * @nlifs: Number of LIFs provisioned + * @nintrs: Number of interrupts provisioned + * @ndbpgs_per_lif: Number of doorbell pages per LIF + * @intr_coal_mult: Interrupt coalescing multiplication factor. + * Scale user-supplied interrupt coalescing + * value in usecs to device units using: + * device units = usecs * mult / div + * @intr_coal_div: Interrupt coalescing division factor. + * Scale user-supplied interrupt coalescing + * value in usecs to device units using: + * device units = usecs * mult / div + * + */ +union ionic_dev_identity { + struct { + u8 version; + u8 type; + u8 rsvd[2]; + u8 nports; + u8 rsvd2[3]; + __le32 nlifs; + __le32 nintrs; + __le32 ndbpgs_per_lif; + __le32 intr_coal_mult; + __le32 intr_coal_div; + }; + __le32 words[512]; +}; + +enum ionic_lif_type { + IONIC_LIF_TYPE_CLASSIC = 0, + IONIC_LIF_TYPE_MACVLAN = 1, + IONIC_LIF_TYPE_NETQUEUE = 2, +}; + +/** + * struct ionic_lif_identify_cmd - lif identify command + * @opcode: opcode + * @type: lif type (enum lif_type) + * @ver: version of identify returned by device + */ +struct ionic_lif_identify_cmd { + u8 opcode; + u8 type; + u8 ver; + u8 rsvd[61]; +}; + +/** + * struct ionic_lif_identify_comp - lif identify command completion + * @status: status of the command (enum status_code) + * @ver: version of identify returned by device + */ +struct ionic_lif_identify_comp { + u8 status; + u8 ver; + u8 rsvd2[14]; +}; + +enum ionic_lif_capability { + IONIC_LIF_CAP_ETH = BIT(0), + IONIC_LIF_CAP_RDMA = BIT(1), +}; + +/** + * Logical Queue Types + */ +enum ionic_logical_qtype { + IONIC_QTYPE_ADMINQ = 0, + IONIC_QTYPE_NOTIFYQ = 1, + IONIC_QTYPE_RXQ = 2, + IONIC_QTYPE_TXQ = 3, + IONIC_QTYPE_EQ = 4, + IONIC_QTYPE_MAX = 16, +}; + +/** + * struct ionic_lif_logical_qtype - Descriptor of logical to hardware queue + * type. + * @qtype: Hardware Queue Type. + * @qid_count: Number of Queue IDs of the logical type. + * @qid_base: Minimum Queue ID of the logical type. + */ +struct ionic_lif_logical_qtype { + u8 qtype; + u8 rsvd[3]; + __le32 qid_count; + __le32 qid_base; +}; + +enum ionic_lif_state { + IONIC_LIF_DISABLE = 0, + IONIC_LIF_ENABLE = 1, + IONIC_LIF_HANG_RESET = 2, +}; + +/** + * LIF configuration + * @state: lif state (enum lif_state) + * @name: lif name + * @mtu: mtu + * @mac: station mac address + * @features: features (enum ionic_eth_hw_features) + * @queue_count: queue counts per queue-type + */ +union ionic_lif_config { + struct { + u8 state; + u8 rsvd[3]; + char name[IONIC_IFNAMSIZ]; + __le32 mtu; + u8 mac[6]; + u8 rsvd2[2]; + __le64 features; + __le32 queue_count[IONIC_QTYPE_MAX]; + }; + __le32 words[64]; +}; + +/** + * struct ionic_lif_identity - lif identity information (type-specific) + * + * @capabilities LIF capabilities + * + * Ethernet: + * @version: Ethernet identify structure version. + * @features: Ethernet features supported on this lif type. + * @max_ucast_filters: Number of perfect unicast addresses supported. + * @max_mcast_filters: Number of perfect multicast addresses supported. + * @min_frame_size: Minimum size of frames to be sent + * @max_frame_size: Maximum size of frames to be sent + * @config: LIF config struct with features, mtu, mac, q counts + * + * RDMA: + * @version: RDMA version of opcodes and queue descriptors. + * @qp_opcodes: Number of rdma queue pair opcodes supported. + * @admin_opcodes: Number of rdma admin opcodes supported. + * @npts_per_lif: Page table size per lif + * @nmrs_per_lif: Number of memory regions per lif + * @nahs_per_lif: Number of address handles per lif + * @max_stride: Max work request stride. + * @cl_stride: Cache line stride. + * @pte_stride: Page table entry stride. + * @rrq_stride: Remote RQ work request stride. + * @rsq_stride: Remote SQ work request stride. + * @dcqcn_profiles: Number of DCQCN profiles + * @aq_qtype: RDMA Admin Qtype. + * @sq_qtype: RDMA Send Qtype. + * @rq_qtype: RDMA Receive Qtype. + * @cq_qtype: RDMA Completion Qtype. + * @eq_qtype: RDMA Event Qtype. + */ +union ionic_lif_identity { + struct { + __le64 capabilities; + + struct { + u8 version; + u8 rsvd[3]; + __le32 max_ucast_filters; + __le32 max_mcast_filters; + __le16 rss_ind_tbl_sz; + __le32 min_frame_size; + __le32 max_frame_size; + u8 rsvd2[106]; + union ionic_lif_config config; + } eth; + + struct { + u8 version; + u8 qp_opcodes; + u8 admin_opcodes; + u8 rsvd; + __le32 npts_per_lif; + __le32 nmrs_per_lif; + __le32 nahs_per_lif; + u8 max_stride; + u8 cl_stride; + u8 pte_stride; + u8 rrq_stride; + u8 rsq_stride; + u8 dcqcn_profiles; + u8 rsvd_dimensions[10]; + struct ionic_lif_logical_qtype aq_qtype; + struct ionic_lif_logical_qtype sq_qtype; + struct ionic_lif_logical_qtype rq_qtype; + struct ionic_lif_logical_qtype cq_qtype; + struct ionic_lif_logical_qtype eq_qtype; + } rdma; + }; + __le32 words[512]; +}; + +/** + * struct ionic_lif_init_cmd - LIF init command + * @opcode: opcode + * @type: LIF type (enum lif_type) + * @index: LIF index + * @info_pa: destination address for lif info (struct ionic_lif_info) + */ +struct ionic_lif_init_cmd { + u8 opcode; + u8 type; + __le16 index; + __le32 rsvd; + __le64 info_pa; + u8 rsvd2[48]; +}; + +/** + * struct ionic_lif_init_comp - LIF init command completion + * @status: The status of the command (enum status_code) + */ +struct ionic_lif_init_comp { + u8 status; + u8 rsvd; + __le16 hw_index; + u8 rsvd2[12]; +}; + +/** + * struct ionic_q_init_cmd - Queue init command + * @opcode: opcode + * @type: Logical queue type + * @ver: Queue version (defines opcode/descriptor scope) + * @lif_index: LIF index + * @index: (lif, qtype) relative admin queue index + * @intr_index: Interrupt control register index + * @pid: Process ID + * @flags: + * IRQ: Interrupt requested on completion + * ENA: Enable the queue. If ENA=0 the queue is initialized + * but remains disabled, to be later enabled with the + * Queue Enable command. If ENA=1, then queue is + * initialized and then enabled. + * SG: Enable Scatter-Gather on the queue. + * in number of descs. The actual ring size is + * (1 << ring_size). For example, to + * select a ring size of 64 descriptors write + * ring_size = 6. The minimum ring_size value is 2 + * for a ring size of 4 descriptors. The maximum + * ring_size value is 16 for a ring size of 64k + * descriptors. Values of ring_size <2 and >16 are + * reserved. + * EQ: Enable the Event Queue + * @cos: Class of service for this queue. + * @ring_size: Queue ring size, encoded as a log2(size) + * @ring_base: Queue ring base address + * @cq_ring_base: Completion queue ring base address + * @sg_ring_base: Scatter/Gather ring base address + * @eq_index: Event queue index + */ +struct ionic_q_init_cmd { + u8 opcode; + u8 rsvd; + __le16 lif_index; + u8 type; + u8 ver; + u8 rsvd1[2]; + __le32 index; + __le16 pid; + __le16 intr_index; + __le16 flags; +#define IONIC_QINIT_F_IRQ 0x01 /* Request interrupt on completion */ +#define IONIC_QINIT_F_ENA 0x02 /* Enable the queue */ +#define IONIC_QINIT_F_SG 0x04 /* Enable scatter/gather on the queue */ +#define IONIC_QINIT_F_EQ 0x08 /* Enable event queue */ +#define IONIC_QINIT_F_DEBUG 0x80 /* Enable queue debugging */ + u8 cos; + u8 ring_size; + __le64 ring_base; + __le64 cq_ring_base; + __le64 sg_ring_base; + __le32 eq_index; + u8 rsvd2[16]; +}; + +/** + * struct ionic_q_init_comp - Queue init command completion + * @status: The status of the command (enum status_code) + * @ver: Queue version (defines opcode/descriptor scope) + * @comp_index: The index in the descriptor ring for which this + * is the completion. + * @hw_index: Hardware Queue ID + * @hw_type: Hardware Queue type + * @color: Color + */ +struct ionic_q_init_comp { + u8 status; + u8 ver; + __le16 comp_index; + __le32 hw_index; + u8 hw_type; + u8 rsvd2[6]; + u8 color; +}; + +/* the device's internal addressing uses up to 52 bits */ +#define IONIC_ADDR_LEN 52 +#define IONIC_ADDR_MASK (BIT_ULL(IONIC_ADDR_LEN) - 1) + +enum ionic_txq_desc_opcode { + IONIC_TXQ_DESC_OPCODE_CSUM_NONE = 0, + IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL = 1, + IONIC_TXQ_DESC_OPCODE_CSUM_HW = 2, + IONIC_TXQ_DESC_OPCODE_TSO = 3, +}; + +/** + * struct ionic_txq_desc - Ethernet Tx queue descriptor format + * @opcode: Tx operation, see TXQ_DESC_OPCODE_*: + * + * IONIC_TXQ_DESC_OPCODE_CSUM_NONE: + * + * Non-offload send. No segmentation, + * fragmentation or checksum calc/insertion is + * performed by device; packet is prepared + * to send by software stack and requires + * no further manipulation from device. + * + * IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL: + * + * Offload 16-bit L4 checksum + * calculation/insertion. The device will + * calculate the L4 checksum value and + * insert the result in the packet's L4 + * header checksum field. The L4 checksum + * is calculated starting at @csum_start bytes + * into the packet to the end of the packet. + * The checksum insertion position is given + * in @csum_offset. This feature is only + * applicable to protocols such as TCP, UDP + * and ICMP where a standard (i.e. the + * 'IP-style' checksum) one's complement + * 16-bit checksum is used, using an IP + * pseudo-header to seed the calculation. + * Software will preload the L4 checksum + * field with the IP pseudo-header checksum. + * + * For tunnel encapsulation, @csum_start and + * @csum_offset refer to the inner L4 + * header. Supported tunnels encapsulations + * are: IPIP, GRE, and UDP. If the @encap + * is clear, no further processing by the + * device is required; software will + * calculate the outer header checksums. If + * the @encap is set, the device will + * offload the outer header checksums using + * LCO (local checksum offload) (see + * Documentation/networking/checksum- + * offloads.txt for more info). + * + * IONIC_TXQ_DESC_OPCODE_CSUM_HW: + * + * Offload 16-bit checksum computation to hardware. + * If @csum_l3 is set then the packet's L3 checksum is + * updated. Similarly, if @csum_l4 is set the the L4 + * checksum is updated. If @encap is set then encap header + * checksums are also updated. + * + * IONIC_TXQ_DESC_OPCODE_TSO: + * + * Device performs TCP segmentation offload + * (TSO). @hdr_len is the number of bytes + * to the end of TCP header (the offset to + * the TCP payload). @mss is the desired + * MSS, the TCP payload length for each + * segment. The device will calculate/ + * insert IP (IPv4 only) and TCP checksums + * for each segment. In the first data + * buffer containing the header template, + * the driver will set IPv4 checksum to 0 + * and preload TCP checksum with the IP + * pseudo header calculated with IP length = 0. + * + * Supported tunnel encapsulations are IPIP, + * layer-3 GRE, and UDP. @hdr_len includes + * both outer and inner headers. The driver + * will set IPv4 checksum to zero and + * preload TCP checksum with IP pseudo + * header on the inner header. + * + * TCP ECN offload is supported. The device + * will set CWR flag in the first segment if + * CWR is set in the template header, and + * clear CWR in remaining segments. + * @flags: + * vlan: + * Insert an L2 VLAN header using @vlan_tci. + * encap: + * Calculate encap header checksum. + * csum_l3: + * Compute L3 header checksum. + * csum_l4: + * Compute L4 header checksum. + * tso_sot: + * TSO start + * tso_eot: + * TSO end + * @num_sg_elems: Number of scatter-gather elements in SG + * descriptor + * @addr: First data buffer's DMA address. + * (Subsequent data buffers are on txq_sg_desc). + * @len: First data buffer's length, in bytes + * @vlan_tci: VLAN tag to insert in the packet (if requested + * by @V-bit). Includes .1p and .1q tags + * @hdr_len: Length of packet headers, including + * encapsulating outer header, if applicable. + * Valid for opcodes TXQ_DESC_OPCODE_CALC_CSUM and + * TXQ_DESC_OPCODE_TSO. Should be set to zero for + * all other modes. For + * TXQ_DESC_OPCODE_CALC_CSUM, @hdr_len is length + * of headers up to inner-most L4 header. For + * TXQ_DESC_OPCODE_TSO, @hdr_len is up to + * inner-most L4 payload, so inclusive of + * inner-most L4 header. + * @mss: Desired MSS value for TSO. Only applicable for + * TXQ_DESC_OPCODE_TSO. + * @csum_start: Offset into inner-most L3 header of checksum + * @csum_offset: Offset into inner-most L4 header of checksum + */ + +#define IONIC_TXQ_DESC_OPCODE_MASK 0xf +#define IONIC_TXQ_DESC_OPCODE_SHIFT 4 +#define IONIC_TXQ_DESC_FLAGS_MASK 0xf +#define IONIC_TXQ_DESC_FLAGS_SHIFT 0 +#define IONIC_TXQ_DESC_NSGE_MASK 0xf +#define IONIC_TXQ_DESC_NSGE_SHIFT 8 +#define IONIC_TXQ_DESC_ADDR_MASK (BIT_ULL(IONIC_ADDR_LEN) - 1) +#define IONIC_TXQ_DESC_ADDR_SHIFT 12 + +/* common flags */ +#define IONIC_TXQ_DESC_FLAG_VLAN 0x1 +#define IONIC_TXQ_DESC_FLAG_ENCAP 0x2 + +/* flags for csum_hw opcode */ +#define IONIC_TXQ_DESC_FLAG_CSUM_L3 0x4 +#define IONIC_TXQ_DESC_FLAG_CSUM_L4 0x8 + +/* flags for tso opcode */ +#define IONIC_TXQ_DESC_FLAG_TSO_SOT 0x4 +#define IONIC_TXQ_DESC_FLAG_TSO_EOT 0x8 + +struct ionic_txq_desc { + __le64 cmd; + __le16 len; + union { + __le16 vlan_tci; + __le16 hword0; + }; + union { + __le16 csum_start; + __le16 hdr_len; + __le16 hword1; + }; + union { + __le16 csum_offset; + __le16 mss; + __le16 hword2; + }; +}; + +static inline u64 encode_txq_desc_cmd(u8 opcode, u8 flags, + u8 nsge, u64 addr) +{ + u64 cmd; + + cmd = (opcode & IONIC_TXQ_DESC_OPCODE_MASK) << + IONIC_TXQ_DESC_OPCODE_SHIFT; + cmd |= (flags & IONIC_TXQ_DESC_FLAGS_MASK) << + IONIC_TXQ_DESC_FLAGS_SHIFT; + cmd |= (nsge & IONIC_TXQ_DESC_NSGE_MASK) << IONIC_TXQ_DESC_NSGE_SHIFT; + cmd |= (addr & IONIC_TXQ_DESC_ADDR_MASK) << IONIC_TXQ_DESC_ADDR_SHIFT; + + return cmd; +}; + +static inline void decode_txq_desc_cmd(u64 cmd, u8 *opcode, u8 *flags, + u8 *nsge, u64 *addr) +{ + *opcode = (cmd >> IONIC_TXQ_DESC_OPCODE_SHIFT) & + IONIC_TXQ_DESC_OPCODE_MASK; + *flags = (cmd >> IONIC_TXQ_DESC_FLAGS_SHIFT) & + IONIC_TXQ_DESC_FLAGS_MASK; + *nsge = (cmd >> IONIC_TXQ_DESC_NSGE_SHIFT) & IONIC_TXQ_DESC_NSGE_MASK; + *addr = (cmd >> IONIC_TXQ_DESC_ADDR_SHIFT) & IONIC_TXQ_DESC_ADDR_MASK; +}; + +#define IONIC_TX_MAX_SG_ELEMS 8 +#define IONIC_RX_MAX_SG_ELEMS 8 + +/** + * struct ionic_txq_sg_desc - Transmit scatter-gather (SG) list + * @addr: DMA address of SG element data buffer + * @len: Length of SG element data buffer, in bytes + */ +struct ionic_txq_sg_desc { + struct ionic_txq_sg_elem { + __le64 addr; + __le16 len; + __le16 rsvd[3]; + } elems[IONIC_TX_MAX_SG_ELEMS]; +}; + +/** + * struct ionic_txq_comp - Ethernet transmit queue completion descriptor + * @status: The status of the command (enum status_code) + * @comp_index: The index in the descriptor ring for which this + * is the completion. + * @color: Color bit. + */ +struct ionic_txq_comp { + u8 status; + u8 rsvd; + __le16 comp_index; + u8 rsvd2[11]; + u8 color; +}; + +enum ionic_rxq_desc_opcode { + IONIC_RXQ_DESC_OPCODE_SIMPLE = 0, + IONIC_RXQ_DESC_OPCODE_SG = 1, +}; + +/** + * struct ionic_rxq_desc - Ethernet Rx queue descriptor format + * @opcode: Rx operation, see RXQ_DESC_OPCODE_*: + * + * RXQ_DESC_OPCODE_SIMPLE: + * + * Receive full packet into data buffer + * starting at @addr. Results of + * receive, including actual bytes received, + * are recorded in Rx completion descriptor. + * + * @len: Data buffer's length, in bytes. + * @addr: Data buffer's DMA address + */ +struct ionic_rxq_desc { + u8 opcode; + u8 rsvd[5]; + __le16 len; + __le64 addr; +}; + +/** + * struct ionic_rxq_sg_desc - Receive scatter-gather (SG) list + * @addr: DMA address of SG element data buffer + * @len: Length of SG element data buffer, in bytes + */ +struct ionic_rxq_sg_desc { + struct ionic_rxq_sg_elem { + __le64 addr; + __le16 len; + __le16 rsvd[3]; + } elems[IONIC_RX_MAX_SG_ELEMS]; +}; + +/** + * struct ionic_rxq_comp - Ethernet receive queue completion descriptor + * @status: The status of the command (enum status_code) + * @num_sg_elems: Number of SG elements used by this descriptor + * @comp_index: The index in the descriptor ring for which this + * is the completion. + * @rss_hash: 32-bit RSS hash + * @csum: 16-bit sum of the packet's L2 payload. + * If the packet's L2 payload is odd length, an extra + * zero-value byte is included in the @csum calculation but + * not included in @len. + * @vlan_tci: VLAN tag stripped from the packet. Valid if @VLAN is + * set. Includes .1p and .1q tags. + * @len: Received packet length, in bytes. Excludes FCS. + * @csum_calc L2 payload checksum is computed or not + * @csum_tcp_ok: The TCP checksum calculated by the device + * matched the checksum in the receive packet's + * TCP header + * @csum_tcp_bad: The TCP checksum calculated by the device did + * not match the checksum in the receive packet's + * TCP header. + * @csum_udp_ok: The UDP checksum calculated by the device + * matched the checksum in the receive packet's + * UDP header + * @csum_udp_bad: The UDP checksum calculated by the device did + * not match the checksum in the receive packet's + * UDP header. + * @csum_ip_ok: The IPv4 checksum calculated by the device + * matched the checksum in the receive packet's + * first IPv4 header. If the receive packet + * contains both a tunnel IPv4 header and a + * transport IPv4 header, the device validates the + * checksum for the both IPv4 headers. + * @csum_ip_bad: The IPv4 checksum calculated by the device did + * not match the checksum in the receive packet's + * first IPv4 header. If the receive packet + * contains both a tunnel IPv4 header and a + * transport IPv4 header, the device validates the + * checksum for both IP headers. + * @VLAN: VLAN header was stripped and placed in @vlan_tci. + * @pkt_type: Packet type + * @color: Color bit. + */ +struct ionic_rxq_comp { + u8 status; + u8 num_sg_elems; + __le16 comp_index; + __le32 rss_hash; + __le16 csum; + __le16 vlan_tci; + __le16 len; + u8 csum_flags; +#define IONIC_RXQ_COMP_CSUM_F_TCP_OK 0x01 +#define IONIC_RXQ_COMP_CSUM_F_TCP_BAD 0x02 +#define IONIC_RXQ_COMP_CSUM_F_UDP_OK 0x04 +#define IONIC_RXQ_COMP_CSUM_F_UDP_BAD 0x08 +#define IONIC_RXQ_COMP_CSUM_F_IP_OK 0x10 +#define IONIC_RXQ_COMP_CSUM_F_IP_BAD 0x20 +#define IONIC_RXQ_COMP_CSUM_F_VLAN 0x40 +#define IONIC_RXQ_COMP_CSUM_F_CALC 0x80 + u8 pkt_type_color; +#define IONIC_RXQ_COMP_PKT_TYPE_MASK 0x7f +}; + +enum ionic_pkt_type { + IONIC_PKT_TYPE_NON_IP = 0x000, + IONIC_PKT_TYPE_IPV4 = 0x001, + IONIC_PKT_TYPE_IPV4_TCP = 0x003, + IONIC_PKT_TYPE_IPV4_UDP = 0x005, + IONIC_PKT_TYPE_IPV6 = 0x008, + IONIC_PKT_TYPE_IPV6_TCP = 0x018, + IONIC_PKT_TYPE_IPV6_UDP = 0x028, +}; + +enum ionic_eth_hw_features { + IONIC_ETH_HW_VLAN_TX_TAG = BIT(0), + IONIC_ETH_HW_VLAN_RX_STRIP = BIT(1), + IONIC_ETH_HW_VLAN_RX_FILTER = BIT(2), + IONIC_ETH_HW_RX_HASH = BIT(3), + IONIC_ETH_HW_RX_CSUM = BIT(4), + IONIC_ETH_HW_TX_SG = BIT(5), + IONIC_ETH_HW_RX_SG = BIT(6), + IONIC_ETH_HW_TX_CSUM = BIT(7), + IONIC_ETH_HW_TSO = BIT(8), + IONIC_ETH_HW_TSO_IPV6 = BIT(9), + IONIC_ETH_HW_TSO_ECN = BIT(10), + IONIC_ETH_HW_TSO_GRE = BIT(11), + IONIC_ETH_HW_TSO_GRE_CSUM = BIT(12), + IONIC_ETH_HW_TSO_IPXIP4 = BIT(13), + IONIC_ETH_HW_TSO_IPXIP6 = BIT(14), + IONIC_ETH_HW_TSO_UDP = BIT(15), + IONIC_ETH_HW_TSO_UDP_CSUM = BIT(16), +}; + +/** + * struct ionic_q_control_cmd - Queue control command + * @opcode: opcode + * @type: Queue type + * @lif_index: LIF index + * @index: Queue index + * @oper: Operation (enum q_control_oper) + */ +struct ionic_q_control_cmd { + u8 opcode; + u8 type; + __le16 lif_index; + __le32 index; + u8 oper; + u8 rsvd[55]; +}; + +typedef struct ionic_admin_comp ionic_q_control_comp; + +enum q_control_oper { + IONIC_Q_DISABLE = 0, + IONIC_Q_ENABLE = 1, + IONIC_Q_HANG_RESET = 2, +}; + +/** + * Physical connection type + */ +enum ionic_phy_type { + IONIC_PHY_TYPE_NONE = 0, + IONIC_PHY_TYPE_COPPER = 1, + IONIC_PHY_TYPE_FIBER = 2, +}; + +/** + * Transceiver status + */ +enum ionic_xcvr_state { + IONIC_XCVR_STATE_REMOVED = 0, + IONIC_XCVR_STATE_INSERTED = 1, + IONIC_XCVR_STATE_PENDING = 2, + IONIC_XCVR_STATE_SPROM_READ = 3, + IONIC_XCVR_STATE_SPROM_READ_ERR = 4, +}; + +/** + * Supported link modes + */ +enum ionic_xcvr_pid { + IONIC_XCVR_PID_UNKNOWN = 0, + + /* CU */ + IONIC_XCVR_PID_QSFP_100G_CR4 = 1, + IONIC_XCVR_PID_QSFP_40GBASE_CR4 = 2, + IONIC_XCVR_PID_SFP_25GBASE_CR_S = 3, + IONIC_XCVR_PID_SFP_25GBASE_CR_L = 4, + IONIC_XCVR_PID_SFP_25GBASE_CR_N = 5, + + /* Fiber */ + IONIC_XCVR_PID_QSFP_100G_AOC = 50, + IONIC_XCVR_PID_QSFP_100G_ACC = 51, + IONIC_XCVR_PID_QSFP_100G_SR4 = 52, + IONIC_XCVR_PID_QSFP_100G_LR4 = 53, + IONIC_XCVR_PID_QSFP_100G_ER4 = 54, + IONIC_XCVR_PID_QSFP_40GBASE_ER4 = 55, + IONIC_XCVR_PID_QSFP_40GBASE_SR4 = 56, + IONIC_XCVR_PID_QSFP_40GBASE_LR4 = 57, + IONIC_XCVR_PID_QSFP_40GBASE_AOC = 58, + IONIC_XCVR_PID_SFP_25GBASE_SR = 59, + IONIC_XCVR_PID_SFP_25GBASE_LR = 60, + IONIC_XCVR_PID_SFP_25GBASE_ER = 61, + IONIC_XCVR_PID_SFP_25GBASE_AOC = 62, + IONIC_XCVR_PID_SFP_10GBASE_SR = 63, + IONIC_XCVR_PID_SFP_10GBASE_LR = 64, + IONIC_XCVR_PID_SFP_10GBASE_LRM = 65, + IONIC_XCVR_PID_SFP_10GBASE_ER = 66, + IONIC_XCVR_PID_SFP_10GBASE_AOC = 67, + IONIC_XCVR_PID_SFP_10GBASE_CU = 68, + IONIC_XCVR_PID_QSFP_100G_CWDM4 = 69, + IONIC_XCVR_PID_QSFP_100G_PSM4 = 70, +}; + +/** + * Port types + */ +enum ionic_port_type { + IONIC_PORT_TYPE_NONE = 0, /* port type not configured */ + IONIC_PORT_TYPE_ETH = 1, /* port carries ethernet traffic (inband) */ + IONIC_PORT_TYPE_MGMT = 2, /* port carries mgmt traffic (out-of-band) */ +}; + +/** + * Port config state + */ +enum ionic_port_admin_state { + IONIC_PORT_ADMIN_STATE_NONE = 0, /* port admin state not configured */ + IONIC_PORT_ADMIN_STATE_DOWN = 1, /* port is admin disabled */ + IONIC_PORT_ADMIN_STATE_UP = 2, /* port is admin enabled */ +}; + +/** + * Port operational status + */ +enum ionic_port_oper_status { + IONIC_PORT_OPER_STATUS_NONE = 0, /* port is disabled */ + IONIC_PORT_OPER_STATUS_UP = 1, /* port is linked up */ + IONIC_PORT_OPER_STATUS_DOWN = 2, /* port link status is down */ +}; + +/** + * Ethernet Forward error correction (fec) modes + */ +enum ionic_port_fec_type { + IONIC_PORT_FEC_TYPE_NONE = 0, /* Disabled */ + IONIC_PORT_FEC_TYPE_FC = 1, /* FireCode */ + IONIC_PORT_FEC_TYPE_RS = 2, /* ReedSolomon */ +}; + +/** + * Ethernet pause (flow control) modes + */ +enum ionic_port_pause_type { + IONIC_PORT_PAUSE_TYPE_NONE = 0, /* Disable Pause */ + IONIC_PORT_PAUSE_TYPE_LINK = 1, /* Link level pause */ + IONIC_PORT_PAUSE_TYPE_PFC = 2, /* Priority-Flow control */ +}; + +/** + * Loopback modes + */ +enum ionic_port_loopback_mode { + IONIC_PORT_LOOPBACK_MODE_NONE = 0, /* Disable loopback */ + IONIC_PORT_LOOPBACK_MODE_MAC = 1, /* MAC loopback */ + IONIC_PORT_LOOPBACK_MODE_PHY = 2, /* PHY/Serdes loopback */ +}; + +/** + * Transceiver Status information + * @state: Transceiver status (enum ionic_xcvr_state) + * @phy: Physical connection type (enum ionic_phy_type) + * @pid: Transceiver link mode (enum pid) + * @sprom: Transceiver sprom contents + */ +struct ionic_xcvr_status { + u8 state; + u8 phy; + __le16 pid; + u8 sprom[256]; +}; + +/** + * Port configuration + * @speed: port speed (in Mbps) + * @mtu: mtu + * @state: port admin state (enum port_admin_state) + * @an_enable: autoneg enable + * @fec_type: fec type (enum ionic_port_fec_type) + * @pause_type: pause type (enum ionic_port_pause_type) + * @loopback_mode: loopback mode (enum ionic_port_loopback_mode) + */ +union ionic_port_config { + struct { +#define IONIC_SPEED_100G 100000 /* 100G in Mbps */ +#define IONIC_SPEED_50G 50000 /* 50G in Mbps */ +#define IONIC_SPEED_40G 40000 /* 40G in Mbps */ +#define IONIC_SPEED_25G 25000 /* 25G in Mbps */ +#define IONIC_SPEED_10G 10000 /* 10G in Mbps */ +#define IONIC_SPEED_1G 1000 /* 1G in Mbps */ + __le32 speed; + __le32 mtu; + u8 state; + u8 an_enable; + u8 fec_type; +#define IONIC_PAUSE_TYPE_MASK 0x0f +#define IONIC_PAUSE_FLAGS_MASK 0xf0 +#define IONIC_PAUSE_F_TX 0x10 +#define IONIC_PAUSE_F_RX 0x20 + u8 pause_type; + u8 loopback_mode; + }; + __le32 words[64]; +}; + +/** + * Port Status information + * @status: link status (enum ionic_port_oper_status) + * @id: port id + * @speed: link speed (in Mbps) + * @xcvr: transceiver status + */ +struct ionic_port_status { + __le32 id; + __le32 speed; + u8 status; + u8 rsvd[51]; + struct ionic_xcvr_status xcvr; +}; + +/** + * struct ionic_port_identify_cmd - Port identify command + * @opcode: opcode + * @index: port index + * @ver: Highest version of identify supported by driver + */ +struct ionic_port_identify_cmd { + u8 opcode; + u8 index; + u8 ver; + u8 rsvd[61]; +}; + +/** + * struct ionic_port_identify_comp - Port identify command completion + * @status: The status of the command (enum status_code) + * @ver: Version of identify returned by device + */ +struct ionic_port_identify_comp { + u8 status; + u8 ver; + u8 rsvd[14]; +}; + +/** + * struct ionic_port_init_cmd - Port initialization command + * @opcode: opcode + * @index: port index + * @info_pa: destination address for port info (struct ionic_port_info) + */ +struct ionic_port_init_cmd { + u8 opcode; + u8 index; + u8 rsvd[6]; + __le64 info_pa; + u8 rsvd2[48]; +}; + +/** + * struct ionic_port_init_comp - Port initialization command completion + * @status: The status of the command (enum status_code) + */ +struct ionic_port_init_comp { + u8 status; + u8 rsvd[15]; +}; + +/** + * struct ionic_port_reset_cmd - Port reset command + * @opcode: opcode + * @index: port index + */ +struct ionic_port_reset_cmd { + u8 opcode; + u8 index; + u8 rsvd[62]; +}; + +/** + * struct ionic_port_reset_comp - Port reset command completion + * @status: The status of the command (enum status_code) + */ +struct ionic_port_reset_comp { + u8 status; + u8 rsvd[15]; +}; + +/** + * enum stats_ctl_cmd - List of commands for stats control + */ +enum ionic_stats_ctl_cmd { + IONIC_STATS_CTL_RESET = 0, +}; + + +/** + * enum ionic_port_attr - List of device attributes + */ +enum ionic_port_attr { + IONIC_PORT_ATTR_STATE = 0, + IONIC_PORT_ATTR_SPEED = 1, + IONIC_PORT_ATTR_MTU = 2, + IONIC_PORT_ATTR_AUTONEG = 3, + IONIC_PORT_ATTR_FEC = 4, + IONIC_PORT_ATTR_PAUSE = 5, + IONIC_PORT_ATTR_LOOPBACK = 6, + IONIC_PORT_ATTR_STATS_CTRL = 7, +}; + +/** + * struct ionic_port_setattr_cmd - Set port attributes on the NIC + * @opcode: Opcode + * @index: port index + * @attr: Attribute type (enum ionic_port_attr) + */ +struct ionic_port_setattr_cmd { + u8 opcode; + u8 index; + u8 attr; + u8 rsvd; + union { + u8 state; + __le32 speed; + __le32 mtu; + u8 an_enable; + u8 fec_type; + u8 pause_type; + u8 loopback_mode; + u8 stats_ctl; + u8 rsvd2[60]; + }; +}; + +/** + * struct ionic_port_setattr_comp - Port set attr command completion + * @status: The status of the command (enum status_code) + * @color: Color bit + */ +struct ionic_port_setattr_comp { + u8 status; + u8 rsvd[14]; + u8 color; +}; + +/** + * struct ionic_port_getattr_cmd - Get port attributes from the NIC + * @opcode: Opcode + * @index: port index + * @attr: Attribute type (enum ionic_port_attr) + */ +struct ionic_port_getattr_cmd { + u8 opcode; + u8 index; + u8 attr; + u8 rsvd[61]; +}; + +/** + * struct ionic_port_getattr_comp - Port get attr command completion + * @status: The status of the command (enum status_code) + * @color: Color bit + */ +struct ionic_port_getattr_comp { + u8 status; + u8 rsvd[3]; + union { + u8 state; + __le32 speed; + __le32 mtu; + u8 an_enable; + u8 fec_type; + u8 pause_type; + u8 loopback_mode; + u8 rsvd2[11]; + }; + u8 color; +}; + +/** + * struct ionic_lif_status - Lif status register + * @eid: most recent NotifyQ event id + * @port_num: port the lif is connected to + * @link_status: port status (enum ionic_port_oper_status) + * @link_speed: speed of link in Mbps + * @link_down_count: number of times link status changes + */ +struct ionic_lif_status { + __le64 eid; + u8 port_num; + u8 rsvd; + __le16 link_status; + __le32 link_speed; /* units of 1Mbps: eg 10000 = 10Gbps */ + __le16 link_down_count; + u8 rsvd2[46]; +}; + +/** + * struct ionic_lif_reset_cmd - LIF reset command + * @opcode: opcode + * @index: LIF index + */ +struct ionic_lif_reset_cmd { + u8 opcode; + u8 rsvd; + __le16 index; + __le32 rsvd2[15]; +}; + +typedef struct ionic_admin_comp ionic_lif_reset_comp; + +enum ionic_dev_state { + IONIC_DEV_DISABLE = 0, + IONIC_DEV_ENABLE = 1, + IONIC_DEV_HANG_RESET = 2, +}; + +/** + * enum ionic_dev_attr - List of device attributes + */ +enum ionic_dev_attr { + IONIC_DEV_ATTR_STATE = 0, + IONIC_DEV_ATTR_NAME = 1, + IONIC_DEV_ATTR_FEATURES = 2, +}; + +/** + * struct ionic_dev_setattr_cmd - Set Device attributes on the NIC + * @opcode: Opcode + * @attr: Attribute type (enum ionic_dev_attr) + * @state: Device state (enum ionic_dev_state) + * @name: The bus info, e.g. PCI slot-device-function, 0 terminated + * @features: Device features + */ +struct ionic_dev_setattr_cmd { + u8 opcode; + u8 attr; + __le16 rsvd; + union { + u8 state; + char name[IONIC_IFNAMSIZ]; + __le64 features; + u8 rsvd2[60]; + }; +}; + +/** + * struct ionic_dev_setattr_comp - Device set attr command completion + * @status: The status of the command (enum status_code) + * @features: Device features + * @color: Color bit + */ +struct ionic_dev_setattr_comp { + u8 status; + u8 rsvd[3]; + union { + __le64 features; + u8 rsvd2[11]; + }; + u8 color; +}; + +/** + * struct ionic_dev_getattr_cmd - Get Device attributes from the NIC + * @opcode: opcode + * @attr: Attribute type (enum ionic_dev_attr) + */ +struct ionic_dev_getattr_cmd { + u8 opcode; + u8 attr; + u8 rsvd[62]; +}; + +/** + * struct ionic_dev_setattr_comp - Device set attr command completion + * @status: The status of the command (enum status_code) + * @features: Device features + * @color: Color bit + */ +struct ionic_dev_getattr_comp { + u8 status; + u8 rsvd[3]; + union { + __le64 features; + u8 rsvd2[11]; + }; + u8 color; +}; + +/** + * RSS parameters + */ +#define IONIC_RSS_HASH_KEY_SIZE 40 + +enum ionic_rss_hash_types { + IONIC_RSS_TYPE_IPV4 = BIT(0), + IONIC_RSS_TYPE_IPV4_TCP = BIT(1), + IONIC_RSS_TYPE_IPV4_UDP = BIT(2), + IONIC_RSS_TYPE_IPV6 = BIT(3), + IONIC_RSS_TYPE_IPV6_TCP = BIT(4), + IONIC_RSS_TYPE_IPV6_UDP = BIT(5), +}; + +/** + * enum ionic_lif_attr - List of LIF attributes + */ +enum ionic_lif_attr { + IONIC_LIF_ATTR_STATE = 0, + IONIC_LIF_ATTR_NAME = 1, + IONIC_LIF_ATTR_MTU = 2, + IONIC_LIF_ATTR_MAC = 3, + IONIC_LIF_ATTR_FEATURES = 4, + IONIC_LIF_ATTR_RSS = 5, + IONIC_LIF_ATTR_STATS_CTRL = 6, +}; + +/** + * struct ionic_lif_setattr_cmd - Set LIF attributes on the NIC + * @opcode: Opcode + * @type: Attribute type (enum ionic_lif_attr) + * @index: LIF index + * @state: lif state (enum lif_state) + * @name: The netdev name string, 0 terminated + * @mtu: Mtu + * @mac: Station mac + * @features: Features (enum ionic_eth_hw_features) + * @rss: RSS properties + * @types: The hash types to enable (see rss_hash_types). + * @key: The hash secret key. + * @addr: Address for the indirection table shared memory. + * @stats_ctl: stats control commands (enum stats_ctl_cmd) + */ +struct ionic_lif_setattr_cmd { + u8 opcode; + u8 attr; + __le16 index; + union { + u8 state; + char name[IONIC_IFNAMSIZ]; + __le32 mtu; + u8 mac[6]; + __le64 features; + struct { + __le16 types; + u8 key[IONIC_RSS_HASH_KEY_SIZE]; + u8 rsvd[6]; + __le64 addr; + } rss; + u8 stats_ctl; + u8 rsvd[60]; + }; +}; + +/** + * struct ionic_lif_setattr_comp - LIF set attr command completion + * @status: The status of the command (enum status_code) + * @comp_index: The index in the descriptor ring for which this + * is the completion. + * @features: features (enum ionic_eth_hw_features) + * @color: Color bit + */ +struct ionic_lif_setattr_comp { + u8 status; + u8 rsvd; + __le16 comp_index; + union { + __le64 features; + u8 rsvd2[11]; + }; + u8 color; +}; + +/** + * struct ionic_lif_getattr_cmd - Get LIF attributes from the NIC + * @opcode: Opcode + * @attr: Attribute type (enum ionic_lif_attr) + * @index: LIF index + */ +struct ionic_lif_getattr_cmd { + u8 opcode; + u8 attr; + __le16 index; + u8 rsvd[60]; +}; + +/** + * struct ionic_lif_getattr_comp - LIF get attr command completion + * @status: The status of the command (enum status_code) + * @comp_index: The index in the descriptor ring for which this + * is the completion. + * @state: lif state (enum lif_state) + * @name: The netdev name string, 0 terminated + * @mtu: Mtu + * @mac: Station mac + * @features: Features (enum ionic_eth_hw_features) + * @color: Color bit + */ +struct ionic_lif_getattr_comp { + u8 status; + u8 rsvd; + __le16 comp_index; + union { + u8 state; + __le32 mtu; + u8 mac[6]; + __le64 features; + u8 rsvd2[11]; + }; + u8 color; +}; + +enum ionic_rx_mode { + IONIC_RX_MODE_F_UNICAST = BIT(0), + IONIC_RX_MODE_F_MULTICAST = BIT(1), + IONIC_RX_MODE_F_BROADCAST = BIT(2), + IONIC_RX_MODE_F_PROMISC = BIT(3), + IONIC_RX_MODE_F_ALLMULTI = BIT(4), +}; + +/** + * struct ionic_rx_mode_set_cmd - Set LIF's Rx mode command + * @opcode: opcode + * @lif_index: LIF index + * @rx_mode: Rx mode flags: + * IONIC_RX_MODE_F_UNICAST: Accept known unicast packets. + * IONIC_RX_MODE_F_MULTICAST: Accept known multicast packets. + * IONIC_RX_MODE_F_BROADCAST: Accept broadcast packets. + * IONIC_RX_MODE_F_PROMISC: Accept any packets. + * IONIC_RX_MODE_F_ALLMULTI: Accept any multicast packets. + */ +struct ionic_rx_mode_set_cmd { + u8 opcode; + u8 rsvd; + __le16 lif_index; + __le16 rx_mode; + __le16 rsvd2[29]; +}; + +typedef struct ionic_admin_comp ionic_rx_mode_set_comp; + +enum ionic_rx_filter_match_type { + IONIC_RX_FILTER_MATCH_VLAN = 0, + IONIC_RX_FILTER_MATCH_MAC, + IONIC_RX_FILTER_MATCH_MAC_VLAN, +}; + +/** + * struct ionic_rx_filter_add_cmd - Add LIF Rx filter command + * @opcode: opcode + * @qtype: Queue type + * @lif_index: LIF index + * @qid: Queue ID + * @match: Rx filter match type. (See IONIC_RX_FILTER_MATCH_xxx) + * @vlan: VLAN ID + * @addr: MAC address (network-byte order) + */ +struct ionic_rx_filter_add_cmd { + u8 opcode; + u8 qtype; + __le16 lif_index; + __le32 qid; + __le16 match; + union { + struct { + __le16 vlan; + } vlan; + struct { + u8 addr[6]; + } mac; + struct { + __le16 vlan; + u8 addr[6]; + } mac_vlan; + u8 rsvd[54]; + }; +}; + +/** + * struct ionic_rx_filter_add_comp - Add LIF Rx filter command completion + * @status: The status of the command (enum status_code) + * @comp_index: The index in the descriptor ring for which this + * is the completion. + * @filter_id: Filter ID + * @color: Color bit. + */ +struct ionic_rx_filter_add_comp { + u8 status; + u8 rsvd; + __le16 comp_index; + __le32 filter_id; + u8 rsvd2[7]; + u8 color; +}; + +/** + * struct ionic_rx_filter_del_cmd - Delete LIF Rx filter command + * @opcode: opcode + * @lif_index: LIF index + * @filter_id: Filter ID + */ +struct ionic_rx_filter_del_cmd { + u8 opcode; + u8 rsvd; + __le16 lif_index; + __le32 filter_id; + u8 rsvd2[56]; +}; + +typedef struct ionic_admin_comp ionic_rx_filter_del_comp; + +/** + * struct ionic_qos_identify_cmd - QoS identify command + * @opcode: opcode + * @ver: Highest version of identify supported by driver + * + */ +struct ionic_qos_identify_cmd { + u8 opcode; + u8 ver; + u8 rsvd[62]; +}; + +/** + * struct ionic_qos_identify_comp - QoS identify command completion + * @status: The status of the command (enum status_code) + * @ver: Version of identify returned by device + */ +struct ionic_qos_identify_comp { + u8 status; + u8 ver; + u8 rsvd[14]; +}; + +#define IONIC_QOS_CLASS_MAX 7 +#define IONIC_QOS_CLASS_NAME_SZ 32 +#define IONIC_QOS_DSCP_MAX_VALUES 64 + +/** + * enum ionic_qos_class + */ +enum ionic_qos_class { + IONIC_QOS_CLASS_DEFAULT = 0, + IONIC_QOS_CLASS_USER_DEFINED_1 = 1, + IONIC_QOS_CLASS_USER_DEFINED_2 = 2, + IONIC_QOS_CLASS_USER_DEFINED_3 = 3, + IONIC_QOS_CLASS_USER_DEFINED_4 = 4, + IONIC_QOS_CLASS_USER_DEFINED_5 = 5, + IONIC_QOS_CLASS_USER_DEFINED_6 = 6, +}; + +/** + * enum ionic_qos_class_type - Traffic classification criteria + */ +enum ionic_qos_class_type { + IONIC_QOS_CLASS_TYPE_NONE = 0, + IONIC_QOS_CLASS_TYPE_PCP = 1, /* Dot1Q pcp */ + IONIC_QOS_CLASS_TYPE_DSCP = 2, /* IP dscp */ +}; + +/** + * enum ionic_qos_sched_type - Qos class scheduling type + */ +enum ionic_qos_sched_type { + /* Strict priority */ + IONIC_QOS_SCHED_TYPE_STRICT = 0, + /* Deficit weighted round-robin */ + IONIC_QOS_SCHED_TYPE_DWRR = 1, +}; + +/** + * union ionic_qos_config - Qos configuration structure + * @flags: Configuration flags + * IONIC_QOS_CONFIG_F_ENABLE enable + * IONIC_QOS_CONFIG_F_DROP drop/nodrop + * IONIC_QOS_CONFIG_F_RW_DOT1Q_PCP enable dot1q pcp rewrite + * IONIC_QOS_CONFIG_F_RW_IP_DSCP enable ip dscp rewrite + * @sched_type: Qos class scheduling type (enum ionic_qos_sched_type) + * @class_type: Qos class type (enum ionic_qos_class_type) + * @pause_type: Qos pause type (enum qos_pause_type) + * @name: Qos class name + * @mtu: MTU of the class + * @pfc_dot1q_pcp: Pcp value for pause frames (valid iff F_NODROP) + * @dwrr_weight: Qos class scheduling weight + * @strict_rlmt: Rate limit for strict priority scheduling + * @rw_dot1q_pcp: Rewrite dot1q pcp to this value + * (valid iff F_RW_DOT1Q_PCP) + * @rw_ip_dscp: Rewrite ip dscp to this value + * (valid iff F_RW_IP_DSCP) + * @dot1q_pcp: Dot1q pcp value + * @ndscp: Number of valid dscp values in the ip_dscp field + * @ip_dscp: IP dscp values + */ +union ionic_qos_config { + struct { +#define IONIC_QOS_CONFIG_F_ENABLE BIT(0) +#define IONIC_QOS_CONFIG_F_DROP BIT(1) +#define IONIC_QOS_CONFIG_F_RW_DOT1Q_PCP BIT(2) +#define IONIC_QOS_CONFIG_F_RW_IP_DSCP BIT(3) + u8 flags; + u8 sched_type; + u8 class_type; + u8 pause_type; + char name[IONIC_QOS_CLASS_NAME_SZ]; + __le32 mtu; + /* flow control */ + u8 pfc_cos; + /* scheduler */ + union { + u8 dwrr_weight; + __le64 strict_rlmt; + }; + /* marking */ + union { + u8 rw_dot1q_pcp; + u8 rw_ip_dscp; + }; + /* classification */ + union { + u8 dot1q_pcp; + struct { + u8 ndscp; + u8 ip_dscp[IONIC_QOS_DSCP_MAX_VALUES]; + }; + }; + }; + __le32 words[64]; +}; + +/** + * union ionic_qos_identity - QoS identity structure + * @version: Version of the identify structure + * @type: QoS system type + * @nclasses: Number of usable QoS classes + * @config: Current configuration of classes + */ +union ionic_qos_identity { + struct { + u8 version; + u8 type; + u8 rsvd[62]; + union ionic_qos_config config[IONIC_QOS_CLASS_MAX]; + }; + __le32 words[512]; +}; + +/** + * struct qos_init_cmd - QoS config init command + * @opcode: Opcode + * @group: Qos class id + * @info_pa: destination address for qos info + */ +struct ionic_qos_init_cmd { + u8 opcode; + u8 group; + u8 rsvd[6]; + __le64 info_pa; + u8 rsvd1[48]; +}; + +typedef struct ionic_admin_comp ionic_qos_init_comp; + +/** + * struct ionic_qos_reset_cmd - Qos config reset command + * @opcode: Opcode + */ +struct ionic_qos_reset_cmd { + u8 opcode; + u8 group; + u8 rsvd[62]; +}; + +typedef struct ionic_admin_comp ionic_qos_reset_comp; + +/** + * struct ionic_fw_download_cmd - Firmware download command + * @opcode: opcode + * @addr: dma address of the firmware buffer + * @offset: offset of the firmware buffer within the full image + * @length: number of valid bytes in the firmware buffer + */ +struct ionic_fw_download_cmd { + u8 opcode; + u8 rsvd[3]; + __le32 offset; + __le64 addr; + __le32 length; +}; + +typedef struct ionic_admin_comp ionic_fw_download_comp; + +enum ionic_fw_control_oper { + IONIC_FW_RESET = 0, /* Reset firmware */ + IONIC_FW_INSTALL = 1, /* Install firmware */ + IONIC_FW_ACTIVATE = 2, /* Activate firmware */ +}; + +/** + * struct ionic_fw_control_cmd - Firmware control command + * @opcode: opcode + * @oper: firmware control operation (enum ionic_fw_control_oper) + * @slot: slot to activate + */ +struct ionic_fw_control_cmd { + u8 opcode; + u8 rsvd[3]; + u8 oper; + u8 slot; + u8 rsvd1[58]; +}; + +/** + * struct ionic_fw_control_comp - Firmware control copletion + * @opcode: opcode + * @slot: slot where the firmware was installed + */ +struct ionic_fw_control_comp { + u8 status; + u8 rsvd; + __le16 comp_index; + u8 slot; + u8 rsvd1[10]; + u8 color; +}; + +/****************************************************************** + ******************* RDMA Commands ******************************** + ******************************************************************/ + +/** + * struct ionic_rdma_reset_cmd - Reset RDMA LIF cmd + * @opcode: opcode + * @lif_index: lif index + * + * There is no rdma specific dev command completion struct. Completion uses + * the common struct ionic_admin_comp. Only the status is indicated. + * Nonzero status means the LIF does not support rdma. + **/ +struct ionic_rdma_reset_cmd { + u8 opcode; + u8 rsvd; + __le16 lif_index; + u8 rsvd2[60]; +}; + +/** + * struct ionic_rdma_queue_cmd - Create RDMA Queue command + * @opcode: opcode, 52, 53 + * @lif_index lif index + * @qid_ver: (qid | (rdma version << 24)) + * @cid: intr, eq_id, or cq_id + * @dbid: doorbell page id + * @depth_log2: log base two of queue depth + * @stride_log2: log base two of queue stride + * @dma_addr: address of the queue memory + * @xxx_table_index: temporary, but should not need pgtbl for contig. queues. + * + * The same command struct is used to create an rdma event queue, completion + * queue, or rdma admin queue. The cid is an interrupt number for an event + * queue, an event queue id for a completion queue, or a completion queue id + * for an rdma admin queue. + * + * The queue created via a dev command must be contiguous in dma space. + * + * The dev commands are intended only to be used during driver initialization, + * to create queues supporting the rdma admin queue. Other queues, and other + * types of rdma resources like memory regions, will be created and registered + * via the rdma admin queue, and will support a more complete interface + * providing scatter gather lists for larger, scattered queue buffers and + * memory registration. + * + * There is no rdma specific dev command completion struct. Completion uses + * the common struct ionic_admin_comp. Only the status is indicated. + **/ +struct ionic_rdma_queue_cmd { + u8 opcode; + u8 rsvd; + __le16 lif_index; + __le32 qid_ver; + __le32 cid; + __le16 dbid; + u8 depth_log2; + u8 stride_log2; + __le64 dma_addr; + u8 rsvd2[36]; + __le32 xxx_table_index; +}; + +/****************************************************************** + ******************* Notify Events ******************************** + ******************************************************************/ + +/** + * struct ionic_notifyq_event + * @eid: event number + * @ecode: event code + * @data: unspecified data about the event + * + * This is the generic event report struct from which the other + * actual events will be formed. + */ +struct ionic_notifyq_event { + __le64 eid; + __le16 ecode; + u8 data[54]; +}; + +/** + * struct ionic_link_change_event + * @eid: event number + * @ecode: event code = EVENT_OPCODE_LINK_CHANGE + * @link_status: link up or down, with error bits (enum port_status) + * @link_speed: speed of the network link + * + * Sent when the network link state changes between UP and DOWN + */ +struct ionic_link_change_event { + __le64 eid; + __le16 ecode; + __le16 link_status; + __le32 link_speed; /* units of 1Mbps: e.g. 10000 = 10Gbps */ + u8 rsvd[48]; +}; + +/** + * struct ionic_reset_event + * @eid: event number + * @ecode: event code = EVENT_OPCODE_RESET + * @reset_code: reset type + * @state: 0=pending, 1=complete, 2=error + * + * Sent when the NIC or some subsystem is going to be or + * has been reset. + */ +struct ionic_reset_event { + __le64 eid; + __le16 ecode; + u8 reset_code; + u8 state; + u8 rsvd[52]; +}; + +/** + * struct ionic_heartbeat_event + * @eid: event number + * @ecode: event code = EVENT_OPCODE_HEARTBEAT + * + * Sent periodically by the NIC to indicate continued health + */ +struct ionic_heartbeat_event { + __le64 eid; + __le16 ecode; + u8 rsvd[54]; +}; + +/** + * struct ionic_log_event + * @eid: event number + * @ecode: event code = EVENT_OPCODE_LOG + * @data: log data + * + * Sent to notify the driver of an internal error. + */ +struct ionic_log_event { + __le64 eid; + __le16 ecode; + u8 data[54]; +}; + +/** + * struct ionic_port_stats + */ +struct ionic_port_stats { + __le64 frames_rx_ok; + __le64 frames_rx_all; + __le64 frames_rx_bad_fcs; + __le64 frames_rx_bad_all; + __le64 octets_rx_ok; + __le64 octets_rx_all; + __le64 frames_rx_unicast; + __le64 frames_rx_multicast; + __le64 frames_rx_broadcast; + __le64 frames_rx_pause; + __le64 frames_rx_bad_length; + __le64 frames_rx_undersized; + __le64 frames_rx_oversized; + __le64 frames_rx_fragments; + __le64 frames_rx_jabber; + __le64 frames_rx_pripause; + __le64 frames_rx_stomped_crc; + __le64 frames_rx_too_long; + __le64 frames_rx_vlan_good; + __le64 frames_rx_dropped; + __le64 frames_rx_less_than_64b; + __le64 frames_rx_64b; + __le64 frames_rx_65b_127b; + __le64 frames_rx_128b_255b; + __le64 frames_rx_256b_511b; + __le64 frames_rx_512b_1023b; + __le64 frames_rx_1024b_1518b; + __le64 frames_rx_1519b_2047b; + __le64 frames_rx_2048b_4095b; + __le64 frames_rx_4096b_8191b; + __le64 frames_rx_8192b_9215b; + __le64 frames_rx_other; + __le64 frames_tx_ok; + __le64 frames_tx_all; + __le64 frames_tx_bad; + __le64 octets_tx_ok; + __le64 octets_tx_total; + __le64 frames_tx_unicast; + __le64 frames_tx_multicast; + __le64 frames_tx_broadcast; + __le64 frames_tx_pause; + __le64 frames_tx_pripause; + __le64 frames_tx_vlan; + __le64 frames_tx_less_than_64b; + __le64 frames_tx_64b; + __le64 frames_tx_65b_127b; + __le64 frames_tx_128b_255b; + __le64 frames_tx_256b_511b; + __le64 frames_tx_512b_1023b; + __le64 frames_tx_1024b_1518b; + __le64 frames_tx_1519b_2047b; + __le64 frames_tx_2048b_4095b; + __le64 frames_tx_4096b_8191b; + __le64 frames_tx_8192b_9215b; + __le64 frames_tx_other; + __le64 frames_tx_pri_0; + __le64 frames_tx_pri_1; + __le64 frames_tx_pri_2; + __le64 frames_tx_pri_3; + __le64 frames_tx_pri_4; + __le64 frames_tx_pri_5; + __le64 frames_tx_pri_6; + __le64 frames_tx_pri_7; + __le64 frames_rx_pri_0; + __le64 frames_rx_pri_1; + __le64 frames_rx_pri_2; + __le64 frames_rx_pri_3; + __le64 frames_rx_pri_4; + __le64 frames_rx_pri_5; + __le64 frames_rx_pri_6; + __le64 frames_rx_pri_7; + __le64 tx_pripause_0_1us_count; + __le64 tx_pripause_1_1us_count; + __le64 tx_pripause_2_1us_count; + __le64 tx_pripause_3_1us_count; + __le64 tx_pripause_4_1us_count; + __le64 tx_pripause_5_1us_count; + __le64 tx_pripause_6_1us_count; + __le64 tx_pripause_7_1us_count; + __le64 rx_pripause_0_1us_count; + __le64 rx_pripause_1_1us_count; + __le64 rx_pripause_2_1us_count; + __le64 rx_pripause_3_1us_count; + __le64 rx_pripause_4_1us_count; + __le64 rx_pripause_5_1us_count; + __le64 rx_pripause_6_1us_count; + __le64 rx_pripause_7_1us_count; + __le64 rx_pause_1us_count; + __le64 frames_tx_truncated; +}; + +struct ionic_mgmt_port_stats { + __le64 frames_rx_ok; + __le64 frames_rx_all; + __le64 frames_rx_bad_fcs; + __le64 frames_rx_bad_all; + __le64 octets_rx_ok; + __le64 octets_rx_all; + __le64 frames_rx_unicast; + __le64 frames_rx_multicast; + __le64 frames_rx_broadcast; + __le64 frames_rx_pause; + __le64 frames_rx_bad_length0; + __le64 frames_rx_undersized1; + __le64 frames_rx_oversized2; + __le64 frames_rx_fragments3; + __le64 frames_rx_jabber4; + __le64 frames_rx_64b5; + __le64 frames_rx_65b_127b6; + __le64 frames_rx_128b_255b7; + __le64 frames_rx_256b_511b8; + __le64 frames_rx_512b_1023b9; + __le64 frames_rx_1024b_1518b0; + __le64 frames_rx_gt_1518b1; + __le64 frames_rx_fifo_full2; + __le64 frames_tx_ok3; + __le64 frames_tx_all4; + __le64 frames_tx_bad5; + __le64 octets_tx_ok6; + __le64 octets_tx_total7; + __le64 frames_tx_unicast8; + __le64 frames_tx_multicast9; + __le64 frames_tx_broadcast0; + __le64 frames_tx_pause1; +}; + +/** + * struct ionic_port_identity - port identity structure + * @version: identity structure version + * @type: type of port (enum port_type) + * @num_lanes: number of lanes for the port + * @autoneg: autoneg supported + * @min_frame_size: minimum frame size supported + * @max_frame_size: maximum frame size supported + * @fec_type: supported fec types + * @pause_type: supported pause types + * @loopback_mode: supported loopback mode + * @speeds: supported speeds + * @config: current port configuration + */ +union ionic_port_identity { + struct { + u8 version; + u8 type; + u8 num_lanes; + u8 autoneg; + __le32 min_frame_size; + __le32 max_frame_size; + u8 fec_type[4]; + u8 pause_type[2]; + u8 loopback_mode[2]; + __le32 speeds[16]; + u8 rsvd2[44]; + union ionic_port_config config; + }; + __le32 words[512]; +}; + +/** + * struct ionic_port_info - port info structure + * @port_status: port status + * @port_stats: port stats + */ +struct ionic_port_info { + union ionic_port_config config; + struct ionic_port_status status; + struct ionic_port_stats stats; +}; + +/** + * struct ionic_lif_stats + */ +struct ionic_lif_stats { + /* RX */ + __le64 rx_ucast_bytes; + __le64 rx_ucast_packets; + __le64 rx_mcast_bytes; + __le64 rx_mcast_packets; + __le64 rx_bcast_bytes; + __le64 rx_bcast_packets; + __le64 rsvd0; + __le64 rsvd1; + /* RX drops */ + __le64 rx_ucast_drop_bytes; + __le64 rx_ucast_drop_packets; + __le64 rx_mcast_drop_bytes; + __le64 rx_mcast_drop_packets; + __le64 rx_bcast_drop_bytes; + __le64 rx_bcast_drop_packets; + __le64 rx_dma_error; + __le64 rsvd2; + /* TX */ + __le64 tx_ucast_bytes; + __le64 tx_ucast_packets; + __le64 tx_mcast_bytes; + __le64 tx_mcast_packets; + __le64 tx_bcast_bytes; + __le64 tx_bcast_packets; + __le64 rsvd3; + __le64 rsvd4; + /* TX drops */ + __le64 tx_ucast_drop_bytes; + __le64 tx_ucast_drop_packets; + __le64 tx_mcast_drop_bytes; + __le64 tx_mcast_drop_packets; + __le64 tx_bcast_drop_bytes; + __le64 tx_bcast_drop_packets; + __le64 tx_dma_error; + __le64 rsvd5; + /* Rx Queue/Ring drops */ + __le64 rx_queue_disabled; + __le64 rx_queue_empty; + __le64 rx_queue_error; + __le64 rx_desc_fetch_error; + __le64 rx_desc_data_error; + __le64 rsvd6; + __le64 rsvd7; + __le64 rsvd8; + /* Tx Queue/Ring drops */ + __le64 tx_queue_disabled; + __le64 tx_queue_error; + __le64 tx_desc_fetch_error; + __le64 tx_desc_data_error; + __le64 rsvd9; + __le64 rsvd10; + __le64 rsvd11; + __le64 rsvd12; + + /* RDMA/ROCE TX */ + __le64 tx_rdma_ucast_bytes; + __le64 tx_rdma_ucast_packets; + __le64 tx_rdma_mcast_bytes; + __le64 tx_rdma_mcast_packets; + __le64 tx_rdma_cnp_packets; + __le64 rsvd13; + __le64 rsvd14; + __le64 rsvd15; + + /* RDMA/ROCE RX */ + __le64 rx_rdma_ucast_bytes; + __le64 rx_rdma_ucast_packets; + __le64 rx_rdma_mcast_bytes; + __le64 rx_rdma_mcast_packets; + __le64 rx_rdma_cnp_packets; + __le64 rx_rdma_ecn_packets; + __le64 rsvd16; + __le64 rsvd17; + + __le64 rsvd18; + __le64 rsvd19; + __le64 rsvd20; + __le64 rsvd21; + __le64 rsvd22; + __le64 rsvd23; + __le64 rsvd24; + __le64 rsvd25; + + __le64 rsvd26; + __le64 rsvd27; + __le64 rsvd28; + __le64 rsvd29; + __le64 rsvd30; + __le64 rsvd31; + __le64 rsvd32; + __le64 rsvd33; + + __le64 rsvd34; + __le64 rsvd35; + __le64 rsvd36; + __le64 rsvd37; + __le64 rsvd38; + __le64 rsvd39; + __le64 rsvd40; + __le64 rsvd41; + + __le64 rsvd42; + __le64 rsvd43; + __le64 rsvd44; + __le64 rsvd45; + __le64 rsvd46; + __le64 rsvd47; + __le64 rsvd48; + __le64 rsvd49; + + /* RDMA/ROCE REQ Error/Debugs (768 - 895) */ + __le64 rdma_req_rx_pkt_seq_err; + __le64 rdma_req_rx_rnr_retry_err; + __le64 rdma_req_rx_remote_access_err; + __le64 rdma_req_rx_remote_inv_req_err; + __le64 rdma_req_rx_remote_oper_err; + __le64 rdma_req_rx_implied_nak_seq_err; + __le64 rdma_req_rx_cqe_err; + __le64 rdma_req_rx_cqe_flush_err; + + __le64 rdma_req_rx_dup_responses; + __le64 rdma_req_rx_invalid_packets; + __le64 rdma_req_tx_local_access_err; + __le64 rdma_req_tx_local_oper_err; + __le64 rdma_req_tx_memory_mgmt_err; + __le64 rsvd52; + __le64 rsvd53; + __le64 rsvd54; + + /* RDMA/ROCE RESP Error/Debugs (896 - 1023) */ + __le64 rdma_resp_rx_dup_requests; + __le64 rdma_resp_rx_out_of_buffer; + __le64 rdma_resp_rx_out_of_seq_pkts; + __le64 rdma_resp_rx_cqe_err; + __le64 rdma_resp_rx_cqe_flush_err; + __le64 rdma_resp_rx_local_len_err; + __le64 rdma_resp_rx_inv_request_err; + __le64 rdma_resp_rx_local_qp_oper_err; + + __le64 rdma_resp_rx_out_of_atomic_resource; + __le64 rdma_resp_tx_pkt_seq_err; + __le64 rdma_resp_tx_remote_inv_req_err; + __le64 rdma_resp_tx_remote_access_err; + __le64 rdma_resp_tx_remote_oper_err; + __le64 rdma_resp_tx_rnr_retry_err; + __le64 rsvd57; + __le64 rsvd58; +}; + +/** + * struct ionic_lif_info - lif info structure + */ +struct ionic_lif_info { + union ionic_lif_config config; + struct ionic_lif_status status; + struct ionic_lif_stats stats; +}; + +union ionic_dev_cmd { + u32 words[16]; + struct ionic_admin_cmd cmd; + struct ionic_nop_cmd nop; + + struct ionic_dev_identify_cmd identify; + struct ionic_dev_init_cmd init; + struct ionic_dev_reset_cmd reset; + struct ionic_dev_getattr_cmd getattr; + struct ionic_dev_setattr_cmd setattr; + + struct ionic_port_identify_cmd port_identify; + struct ionic_port_init_cmd port_init; + struct ionic_port_reset_cmd port_reset; + struct ionic_port_getattr_cmd port_getattr; + struct ionic_port_setattr_cmd port_setattr; + + struct ionic_lif_identify_cmd lif_identify; + struct ionic_lif_init_cmd lif_init; + struct ionic_lif_reset_cmd lif_reset; + + struct ionic_qos_identify_cmd qos_identify; + struct ionic_qos_init_cmd qos_init; + struct ionic_qos_reset_cmd qos_reset; + + struct ionic_q_init_cmd q_init; +}; + +union ionic_dev_cmd_comp { + u32 words[4]; + u8 status; + struct ionic_admin_comp comp; + struct ionic_nop_comp nop; + + struct ionic_dev_identify_comp identify; + struct ionic_dev_init_comp init; + struct ionic_dev_reset_comp reset; + struct ionic_dev_getattr_comp getattr; + struct ionic_dev_setattr_comp setattr; + + struct ionic_port_identify_comp port_identify; + struct ionic_port_init_comp port_init; + struct ionic_port_reset_comp port_reset; + struct ionic_port_getattr_comp port_getattr; + struct ionic_port_setattr_comp port_setattr; + + struct ionic_lif_identify_comp lif_identify; + struct ionic_lif_init_comp lif_init; + ionic_lif_reset_comp lif_reset; + + struct ionic_qos_identify_comp qos_identify; + ionic_qos_init_comp qos_init; + ionic_qos_reset_comp qos_reset; + + struct ionic_q_init_comp q_init; +}; + +/** + * union dev_info - Device info register format (read-only) + * @signature: Signature value of 0x44455649 ('DEVI'). + * @version: Current version of info. + * @asic_type: Asic type. + * @asic_rev: Asic revision. + * @fw_status: Firmware status. + * @fw_heartbeat: Firmware heartbeat counter. + * @serial_num: Serial number. + * @fw_version: Firmware version. + */ +union ionic_dev_info_regs { +#define IONIC_DEVINFO_FWVERS_BUFLEN 32 +#define IONIC_DEVINFO_SERIAL_BUFLEN 32 + struct { + u32 signature; + u8 version; + u8 asic_type; + u8 asic_rev; + u8 fw_status; + u32 fw_heartbeat; + char fw_version[IONIC_DEVINFO_FWVERS_BUFLEN]; + char serial_num[IONIC_DEVINFO_SERIAL_BUFLEN]; + }; + u32 words[512]; +}; + +/** + * union ionic_dev_cmd_regs - Device command register format (read-write) + * @doorbell: Device Cmd Doorbell, write-only. + * Write a 1 to signal device to process cmd, + * poll done for completion. + * @done: Done indicator, bit 0 == 1 when command is complete. + * @cmd: Opcode-specific command bytes + * @comp: Opcode-specific response bytes + * @data: Opcode-specific side-data + */ +union ionic_dev_cmd_regs { + struct { + u32 doorbell; + u32 done; + union ionic_dev_cmd cmd; + union ionic_dev_cmd_comp comp; + u8 rsvd[48]; + u32 data[478]; + }; + u32 words[512]; +}; + +/** + * union ionic_dev_regs - Device register format in for bar 0 page 0 + * @info: Device info registers + * @devcmd: Device command registers + */ +union ionic_dev_regs { + struct { + union ionic_dev_info_regs info; + union ionic_dev_cmd_regs devcmd; + }; + __le32 words[1024]; +}; + +union ionic_adminq_cmd { + struct ionic_admin_cmd cmd; + struct ionic_nop_cmd nop; + struct ionic_q_init_cmd q_init; + struct ionic_q_control_cmd q_control; + struct ionic_lif_setattr_cmd lif_setattr; + struct ionic_lif_getattr_cmd lif_getattr; + struct ionic_rx_mode_set_cmd rx_mode_set; + struct ionic_rx_filter_add_cmd rx_filter_add; + struct ionic_rx_filter_del_cmd rx_filter_del; + struct ionic_rdma_reset_cmd rdma_reset; + struct ionic_rdma_queue_cmd rdma_queue; + struct ionic_fw_download_cmd fw_download; + struct ionic_fw_control_cmd fw_control; +}; + +union ionic_adminq_comp { + struct ionic_admin_comp comp; + struct ionic_nop_comp nop; + struct ionic_q_init_comp q_init; + struct ionic_lif_setattr_comp lif_setattr; + struct ionic_lif_getattr_comp lif_getattr; + struct ionic_rx_filter_add_comp rx_filter_add; + struct ionic_fw_control_comp fw_control; +}; + +#define IONIC_BARS_MAX 6 +#define IONIC_PCI_BAR_DBELL 1 + +/* BAR0 */ +#define IONIC_BAR0_SIZE 0x8000 + +#define IONIC_BAR0_DEV_INFO_REGS_OFFSET 0x0000 +#define IONIC_BAR0_DEV_CMD_REGS_OFFSET 0x0800 +#define IONIC_BAR0_DEV_CMD_DATA_REGS_OFFSET 0x0c00 +#define IONIC_BAR0_INTR_STATUS_OFFSET 0x1000 +#define IONIC_BAR0_INTR_CTRL_OFFSET 0x2000 +#define IONIC_DEV_CMD_DONE 0x00000001 + +#define IONIC_ASIC_TYPE_CAPRI 0 + +/** + * struct ionic_doorbell - Doorbell register layout + * @p_index: Producer index + * @ring: Selects the specific ring of the queue to update. + * Type-specific meaning: + * ring=0: Default producer/consumer queue. + * ring=1: (CQ, EQ) Re-Arm queue. RDMA CQs + * send events to EQs when armed. EQs send + * interrupts when armed. + * @qid: The queue id selects the queue destination for the + * producer index and flags. + */ +struct ionic_doorbell { + __le16 p_index; + u8 ring; + u8 qid_lo; + __le16 qid_hi; + u16 rsvd2; +}; + +struct ionic_intr_status { + u32 status[2]; +}; + +struct ionic_notifyq_cmd { + __le32 data; /* Not used but needed for qcq structure */ +}; + +union ionic_notifyq_comp { + struct ionic_notifyq_event event; + struct ionic_link_change_event link_change; + struct ionic_reset_event reset; + struct ionic_heartbeat_event heartbeat; + struct ionic_log_event log; +}; + +/* Deprecate */ +struct ionic_identity { + union ionic_drv_identity drv; + union ionic_dev_identity dev; + union ionic_lif_identity lif; + union ionic_port_identity port; + union ionic_qos_identity qos; +}; + +#pragma pack(pop) + +#endif /* _IONIC_IF_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ionic/ionic_lif.c b/src/spdk/dpdk/drivers/net/ionic/ionic_lif.c new file mode 100644 index 000000000..60a5f3d53 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ionic/ionic_lif.c @@ -0,0 +1,1696 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved. + */ + +#include +#include + +#include "ionic.h" +#include "ionic_logs.h" +#include "ionic_lif.h" +#include "ionic_ethdev.h" +#include "ionic_rx_filter.h" +#include "ionic_rxtx.h" + +static int ionic_lif_addr_add(struct ionic_lif *lif, const uint8_t *addr); +static int ionic_lif_addr_del(struct ionic_lif *lif, const uint8_t *addr); + +int +ionic_qcq_enable(struct ionic_qcq *qcq) +{ + struct ionic_queue *q = &qcq->q; + struct ionic_lif *lif = q->lif; + struct ionic_dev *idev = &lif->adapter->idev; + struct ionic_admin_ctx ctx = { + .pending_work = true, + .cmd.q_control = { + .opcode = IONIC_CMD_Q_CONTROL, + .lif_index = lif->index, + .type = q->type, + .index = q->index, + .oper = IONIC_Q_ENABLE, + }, + }; + + if (qcq->flags & IONIC_QCQ_F_INTR) { + ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, + IONIC_INTR_MASK_CLEAR); + } + + return ionic_adminq_post_wait(lif, &ctx); +} + +int +ionic_qcq_disable(struct ionic_qcq *qcq) +{ + struct ionic_queue *q = &qcq->q; + struct ionic_lif *lif = q->lif; + struct ionic_dev *idev = &lif->adapter->idev; + struct ionic_admin_ctx ctx = { + .pending_work = true, + .cmd.q_control = { + .opcode = IONIC_CMD_Q_CONTROL, + .lif_index = lif->index, + .type = q->type, + .index = q->index, + .oper = IONIC_Q_DISABLE, + }, + }; + + if (qcq->flags & IONIC_QCQ_F_INTR) { + ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, + IONIC_INTR_MASK_SET); + } + + return ionic_adminq_post_wait(lif, &ctx); +} + +int +ionic_lif_stop(struct ionic_lif *lif __rte_unused) +{ + /* Carrier OFF here */ + + return 0; +} + +void +ionic_lif_reset(struct ionic_lif *lif) +{ + struct ionic_dev *idev = &lif->adapter->idev; + + IONIC_PRINT_CALL(); + + ionic_dev_cmd_lif_reset(idev, lif->index); + ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT); +} + +static void +ionic_lif_get_abs_stats(const struct ionic_lif *lif, struct rte_eth_stats *stats) +{ + struct ionic_lif_stats *ls = &lif->info->stats; + uint32_t i; + uint32_t num_rx_q_counters = RTE_MIN(lif->nrxqcqs, (uint32_t) + RTE_ETHDEV_QUEUE_STAT_CNTRS); + uint32_t num_tx_q_counters = RTE_MIN(lif->ntxqcqs, (uint32_t) + RTE_ETHDEV_QUEUE_STAT_CNTRS); + + memset(stats, 0, sizeof(*stats)); + + if (ls == NULL) { + IONIC_PRINT(DEBUG, "Stats on port %u not yet initialized", + lif->port_id); + return; + } + + /* RX */ + + stats->ipackets = ls->rx_ucast_packets + + ls->rx_mcast_packets + + ls->rx_bcast_packets; + + stats->ibytes = ls->rx_ucast_bytes + + ls->rx_mcast_bytes + + ls->rx_bcast_bytes; + + for (i = 0; i < lif->nrxqcqs; i++) { + struct ionic_rx_stats *rx_stats = &lif->rxqcqs[i]->stats.rx; + stats->imissed += + rx_stats->no_cb_arg + + rx_stats->bad_cq_status + + rx_stats->no_room + + rx_stats->bad_len; + } + + stats->imissed += + ls->rx_ucast_drop_packets + + ls->rx_mcast_drop_packets + + ls->rx_bcast_drop_packets; + + stats->imissed += + ls->rx_queue_empty + + ls->rx_dma_error + + ls->rx_queue_disabled + + ls->rx_desc_fetch_error + + ls->rx_desc_data_error; + + for (i = 0; i < num_rx_q_counters; i++) { + struct ionic_rx_stats *rx_stats = &lif->rxqcqs[i]->stats.rx; + stats->q_ipackets[i] = rx_stats->packets; + stats->q_ibytes[i] = rx_stats->bytes; + stats->q_errors[i] = + rx_stats->no_cb_arg + + rx_stats->bad_cq_status + + rx_stats->no_room + + rx_stats->bad_len; + } + + /* TX */ + + stats->opackets = ls->tx_ucast_packets + + ls->tx_mcast_packets + + ls->tx_bcast_packets; + + stats->obytes = ls->tx_ucast_bytes + + ls->tx_mcast_bytes + + ls->tx_bcast_bytes; + + for (i = 0; i < lif->ntxqcqs; i++) { + struct ionic_tx_stats *tx_stats = &lif->txqcqs[i]->stats.tx; + stats->oerrors += tx_stats->drop; + } + + stats->oerrors += + ls->tx_ucast_drop_packets + + ls->tx_mcast_drop_packets + + ls->tx_bcast_drop_packets; + + stats->oerrors += + ls->tx_dma_error + + ls->tx_queue_disabled + + ls->tx_desc_fetch_error + + ls->tx_desc_data_error; + + for (i = 0; i < num_tx_q_counters; i++) { + struct ionic_tx_stats *tx_stats = &lif->txqcqs[i]->stats.tx; + stats->q_opackets[i] = tx_stats->packets; + stats->q_obytes[i] = tx_stats->bytes; + } +} + +void +ionic_lif_get_stats(const struct ionic_lif *lif, + struct rte_eth_stats *stats) +{ + ionic_lif_get_abs_stats(lif, stats); + + stats->ipackets -= lif->stats_base.ipackets; + stats->opackets -= lif->stats_base.opackets; + stats->ibytes -= lif->stats_base.ibytes; + stats->obytes -= lif->stats_base.obytes; + stats->imissed -= lif->stats_base.imissed; + stats->ierrors -= lif->stats_base.ierrors; + stats->oerrors -= lif->stats_base.oerrors; + stats->rx_nombuf -= lif->stats_base.rx_nombuf; +} + +void +ionic_lif_reset_stats(struct ionic_lif *lif) +{ + uint32_t i; + + for (i = 0; i < lif->nrxqcqs; i++) { + memset(&lif->rxqcqs[i]->stats.rx, 0, + sizeof(struct ionic_rx_stats)); + memset(&lif->txqcqs[i]->stats.tx, 0, + sizeof(struct ionic_tx_stats)); + } + + ionic_lif_get_abs_stats(lif, &lif->stats_base); +} + +void +ionic_lif_get_hw_stats(struct ionic_lif *lif, struct ionic_lif_stats *stats) +{ + uint16_t i, count = sizeof(struct ionic_lif_stats) / sizeof(uint64_t); + uint64_t *stats64 = (uint64_t *)stats; + uint64_t *lif_stats64 = (uint64_t *)&lif->info->stats; + uint64_t *lif_stats64_base = (uint64_t *)&lif->lif_stats_base; + + for (i = 0; i < count; i++) + stats64[i] = lif_stats64[i] - lif_stats64_base[i]; +} + +void +ionic_lif_reset_hw_stats(struct ionic_lif *lif) +{ + uint16_t i, count = sizeof(struct ionic_lif_stats) / sizeof(uint64_t); + uint64_t *lif_stats64 = (uint64_t *)&lif->info->stats; + uint64_t *lif_stats64_base = (uint64_t *)&lif->lif_stats_base; + + for (i = 0; i < count; i++) + lif_stats64_base[i] = lif_stats64[i]; +} + +static int +ionic_lif_addr_add(struct ionic_lif *lif, const uint8_t *addr) +{ + struct ionic_admin_ctx ctx = { + .pending_work = true, + .cmd.rx_filter_add = { + .opcode = IONIC_CMD_RX_FILTER_ADD, + .match = IONIC_RX_FILTER_MATCH_MAC, + }, + }; + int err; + + memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, RTE_ETHER_ADDR_LEN); + + err = ionic_adminq_post_wait(lif, &ctx); + if (err) + return err; + + IONIC_PRINT(INFO, "rx_filter add (id %d)", + ctx.comp.rx_filter_add.filter_id); + + return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, &ctx); +} + +static int +ionic_lif_addr_del(struct ionic_lif *lif, const uint8_t *addr) +{ + struct ionic_admin_ctx ctx = { + .pending_work = true, + .cmd.rx_filter_del = { + .opcode = IONIC_CMD_RX_FILTER_DEL, + }, + }; + struct ionic_rx_filter *f; + int err; + + IONIC_PRINT_CALL(); + + rte_spinlock_lock(&lif->rx_filters.lock); + + f = ionic_rx_filter_by_addr(lif, addr); + if (!f) { + rte_spinlock_unlock(&lif->rx_filters.lock); + return -ENOENT; + } + + ctx.cmd.rx_filter_del.filter_id = f->filter_id; + ionic_rx_filter_free(f); + + rte_spinlock_unlock(&lif->rx_filters.lock); + + err = ionic_adminq_post_wait(lif, &ctx); + if (err) + return err; + + IONIC_PRINT(INFO, "rx_filter del (id %d)", + ctx.cmd.rx_filter_del.filter_id); + + return 0; +} + +int +ionic_dev_add_mac(struct rte_eth_dev *eth_dev, + struct rte_ether_addr *mac_addr, + uint32_t index __rte_unused, uint32_t pool __rte_unused) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + + IONIC_PRINT_CALL(); + + return ionic_lif_addr_add(lif, (const uint8_t *)mac_addr); +} + +void +ionic_dev_remove_mac(struct rte_eth_dev *eth_dev, uint32_t index __rte_unused) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + struct ionic_adapter *adapter = lif->adapter; + + IONIC_PRINT_CALL(); + + if (index >= adapter->max_mac_addrs) { + IONIC_PRINT(WARNING, + "Index %u is above MAC filter limit %u", + index, adapter->max_mac_addrs); + return; + } + + if (!rte_is_valid_assigned_ether_addr(ð_dev->data->mac_addrs[index])) + return; + + ionic_lif_addr_del(lif, (const uint8_t *) + ð_dev->data->mac_addrs[index]); +} + +int +ionic_dev_set_mac(struct rte_eth_dev *eth_dev, struct rte_ether_addr *mac_addr) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + + IONIC_PRINT_CALL(); + + if (mac_addr == NULL) { + IONIC_PRINT(NOTICE, "New mac is null"); + return -1; + } + + if (!rte_is_zero_ether_addr((struct rte_ether_addr *)lif->mac_addr)) { + IONIC_PRINT(INFO, "Deleting mac addr %pM", + lif->mac_addr); + ionic_lif_addr_del(lif, lif->mac_addr); + memset(lif->mac_addr, 0, RTE_ETHER_ADDR_LEN); + } + + IONIC_PRINT(INFO, "Updating mac addr"); + + rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)lif->mac_addr); + + return ionic_lif_addr_add(lif, (const uint8_t *)mac_addr); +} + +static int +ionic_vlan_rx_add_vid(struct ionic_lif *lif, uint16_t vid) +{ + struct ionic_admin_ctx ctx = { + .pending_work = true, + .cmd.rx_filter_add = { + .opcode = IONIC_CMD_RX_FILTER_ADD, + .match = IONIC_RX_FILTER_MATCH_VLAN, + .vlan.vlan = vid, + }, + }; + int err; + + err = ionic_adminq_post_wait(lif, &ctx); + if (err) + return err; + + IONIC_PRINT(INFO, "rx_filter add VLAN %d (id %d)", vid, + ctx.comp.rx_filter_add.filter_id); + + return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, &ctx); +} + +static int +ionic_vlan_rx_kill_vid(struct ionic_lif *lif, uint16_t vid) +{ + struct ionic_admin_ctx ctx = { + .pending_work = true, + .cmd.rx_filter_del = { + .opcode = IONIC_CMD_RX_FILTER_DEL, + }, + }; + struct ionic_rx_filter *f; + int err; + + IONIC_PRINT_CALL(); + + rte_spinlock_lock(&lif->rx_filters.lock); + + f = ionic_rx_filter_by_vlan(lif, vid); + if (!f) { + rte_spinlock_unlock(&lif->rx_filters.lock); + return -ENOENT; + } + + ctx.cmd.rx_filter_del.filter_id = f->filter_id; + ionic_rx_filter_free(f); + rte_spinlock_unlock(&lif->rx_filters.lock); + + err = ionic_adminq_post_wait(lif, &ctx); + if (err) + return err; + + IONIC_PRINT(INFO, "rx_filter del VLAN %d (id %d)", vid, + ctx.cmd.rx_filter_del.filter_id); + + return 0; +} + +int +ionic_dev_vlan_filter_set(struct rte_eth_dev *eth_dev, uint16_t vlan_id, + int on) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + int err; + + if (on) + err = ionic_vlan_rx_add_vid(lif, vlan_id); + else + err = ionic_vlan_rx_kill_vid(lif, vlan_id); + + return err; +} + +static void +ionic_lif_rx_mode(struct ionic_lif *lif, uint32_t rx_mode) +{ + struct ionic_admin_ctx ctx = { + .pending_work = true, + .cmd.rx_mode_set = { + .opcode = IONIC_CMD_RX_MODE_SET, + .lif_index = lif->index, + .rx_mode = rx_mode, + }, + }; + int err; + + if (rx_mode & IONIC_RX_MODE_F_UNICAST) + IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_UNICAST"); + if (rx_mode & IONIC_RX_MODE_F_MULTICAST) + IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_MULTICAST"); + if (rx_mode & IONIC_RX_MODE_F_BROADCAST) + IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_BROADCAST"); + if (rx_mode & IONIC_RX_MODE_F_PROMISC) + IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_PROMISC"); + if (rx_mode & IONIC_RX_MODE_F_ALLMULTI) + IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_ALLMULTI"); + + err = ionic_adminq_post_wait(lif, &ctx); + if (err) + IONIC_PRINT(ERR, "Failure setting RX mode"); +} + +static void +ionic_set_rx_mode(struct ionic_lif *lif, uint32_t rx_mode) +{ + if (lif->rx_mode != rx_mode) { + lif->rx_mode = rx_mode; + ionic_lif_rx_mode(lif, rx_mode); + } +} + +int +ionic_dev_promiscuous_enable(struct rte_eth_dev *eth_dev) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + uint32_t rx_mode = lif->rx_mode; + + IONIC_PRINT_CALL(); + + rx_mode |= IONIC_RX_MODE_F_PROMISC; + + ionic_set_rx_mode(lif, rx_mode); + + return 0; +} + +int +ionic_dev_promiscuous_disable(struct rte_eth_dev *eth_dev) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + uint32_t rx_mode = lif->rx_mode; + + rx_mode &= ~IONIC_RX_MODE_F_PROMISC; + + ionic_set_rx_mode(lif, rx_mode); + + return 0; +} + +int +ionic_dev_allmulticast_enable(struct rte_eth_dev *eth_dev) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + uint32_t rx_mode = lif->rx_mode; + + rx_mode |= IONIC_RX_MODE_F_ALLMULTI; + + ionic_set_rx_mode(lif, rx_mode); + + return 0; +} + +int +ionic_dev_allmulticast_disable(struct rte_eth_dev *eth_dev) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + uint32_t rx_mode = lif->rx_mode; + + rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI; + + ionic_set_rx_mode(lif, rx_mode); + + return 0; +} + +int +ionic_lif_change_mtu(struct ionic_lif *lif, int new_mtu) +{ + struct ionic_admin_ctx ctx = { + .pending_work = true, + .cmd.lif_setattr = { + .opcode = IONIC_CMD_LIF_SETATTR, + .index = lif->index, + .attr = IONIC_LIF_ATTR_MTU, + .mtu = new_mtu, + }, + }; + int err; + + err = ionic_adminq_post_wait(lif, &ctx); + if (err) + return err; + + lif->mtu = new_mtu; + + return 0; +} + +int +ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr) +{ + struct ionic_adapter *adapter = lif->adapter; + struct ionic_dev *idev = &adapter->idev; + unsigned long index; + + /* + * Note: interrupt handler is called for index = 0 only + * (we use interrupts for the notifyq only anyway, + * which hash index = 0) + */ + + for (index = 0; index < adapter->nintrs; index++) + if (!adapter->intrs[index]) + break; + + if (index == adapter->nintrs) + return -ENOSPC; + + adapter->intrs[index] = true; + + ionic_intr_init(idev, intr, index); + + return 0; +} + +void +ionic_intr_free(struct ionic_lif *lif, struct ionic_intr_info *intr) +{ + if (intr->index != IONIC_INTR_INDEX_NOT_ASSIGNED) + lif->adapter->intrs[intr->index] = false; +} + +static int +ionic_qcq_alloc(struct ionic_lif *lif, uint8_t type, + uint32_t index, + const char *base, uint32_t flags, + uint32_t num_descs, + uint32_t desc_size, + uint32_t cq_desc_size, + uint32_t sg_desc_size, + uint32_t pid, struct ionic_qcq **qcq) +{ + struct ionic_dev *idev = &lif->adapter->idev; + struct ionic_qcq *new; + uint32_t q_size, cq_size, sg_size, total_size; + void *q_base, *cq_base, *sg_base; + rte_iova_t q_base_pa = 0; + rte_iova_t cq_base_pa = 0; + rte_iova_t sg_base_pa = 0; + uint32_t socket_id = rte_socket_id(); + int err; + + *qcq = NULL; + + q_size = num_descs * desc_size; + cq_size = num_descs * cq_desc_size; + sg_size = num_descs * sg_desc_size; + + total_size = RTE_ALIGN(q_size, PAGE_SIZE) + + RTE_ALIGN(cq_size, PAGE_SIZE); + /* + * Note: aligning q_size/cq_size is not enough due to cq_base address + * aligning as q_base could be not aligned to the page. + * Adding PAGE_SIZE. + */ + total_size += PAGE_SIZE; + + if (flags & IONIC_QCQ_F_SG) { + total_size += RTE_ALIGN(sg_size, PAGE_SIZE); + total_size += PAGE_SIZE; + } + + new = rte_zmalloc("ionic", sizeof(*new), 0); + if (!new) { + IONIC_PRINT(ERR, "Cannot allocate queue structure"); + return -ENOMEM; + } + + new->lif = lif; + new->flags = flags; + + new->q.info = rte_zmalloc("ionic", sizeof(*new->q.info) * num_descs, 0); + if (!new->q.info) { + IONIC_PRINT(ERR, "Cannot allocate queue info"); + return -ENOMEM; + } + + new->q.type = type; + + err = ionic_q_init(lif, idev, &new->q, index, num_descs, + desc_size, sg_desc_size, pid); + if (err) { + IONIC_PRINT(ERR, "Queue initialization failed"); + return err; + } + + if (flags & IONIC_QCQ_F_INTR) { + err = ionic_intr_alloc(lif, &new->intr); + if (err) + return err; + + ionic_intr_mask_assert(idev->intr_ctrl, new->intr.index, + IONIC_INTR_MASK_SET); + } else { + new->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED; + } + + err = ionic_cq_init(lif, &new->cq, &new->intr, + num_descs, cq_desc_size); + if (err) { + IONIC_PRINT(ERR, "Completion queue initialization failed"); + goto err_out_free_intr; + } + + new->base_z = rte_eth_dma_zone_reserve(lif->eth_dev, + base /* name */, index /* queue_idx */, + total_size, IONIC_ALIGN, socket_id); + + if (!new->base_z) { + IONIC_PRINT(ERR, "Cannot reserve queue DMA memory"); + err = -ENOMEM; + goto err_out_free_intr; + } + + new->base = new->base_z->addr; + new->base_pa = new->base_z->iova; + new->total_size = total_size; + + q_base = new->base; + q_base_pa = new->base_pa; + + cq_base = (void *)RTE_ALIGN((uintptr_t)q_base + q_size, PAGE_SIZE); + cq_base_pa = RTE_ALIGN(q_base_pa + q_size, PAGE_SIZE); + + if (flags & IONIC_QCQ_F_SG) { + sg_base = (void *)RTE_ALIGN((uintptr_t)cq_base + cq_size, + PAGE_SIZE); + sg_base_pa = RTE_ALIGN(cq_base_pa + cq_size, PAGE_SIZE); + ionic_q_sg_map(&new->q, sg_base, sg_base_pa); + } + + IONIC_PRINT(DEBUG, "Q-Base-PA = %ju CQ-Base-PA = %ju " + "SG-base-PA = %ju", + q_base_pa, cq_base_pa, sg_base_pa); + + ionic_q_map(&new->q, q_base, q_base_pa); + ionic_cq_map(&new->cq, cq_base, cq_base_pa); + ionic_cq_bind(&new->cq, &new->q); + + *qcq = new; + + return 0; + +err_out_free_intr: + if (flags & IONIC_QCQ_F_INTR) + ionic_intr_free(lif, &new->intr); + + return err; +} + +void +ionic_qcq_free(struct ionic_qcq *qcq) +{ + if (qcq->base_z) { + qcq->base = NULL; + qcq->base_pa = 0; + rte_memzone_free(qcq->base_z); + qcq->base_z = NULL; + } + + if (qcq->q.info) { + rte_free(qcq->q.info); + qcq->q.info = NULL; + } + + rte_free(qcq); +} + +int +ionic_rx_qcq_alloc(struct ionic_lif *lif, uint32_t index, uint16_t nrxq_descs, + struct ionic_qcq **qcq) +{ + uint32_t flags; + int err = -ENOMEM; + + flags = IONIC_QCQ_F_SG; + err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, index, "rx", flags, + nrxq_descs, + sizeof(struct ionic_rxq_desc), + sizeof(struct ionic_rxq_comp), + sizeof(struct ionic_rxq_sg_desc), + lif->kern_pid, &lif->rxqcqs[index]); + if (err) + return err; + + *qcq = lif->rxqcqs[index]; + + return 0; +} + +int +ionic_tx_qcq_alloc(struct ionic_lif *lif, uint32_t index, uint16_t ntxq_descs, + struct ionic_qcq **qcq) +{ + uint32_t flags; + int err = -ENOMEM; + + flags = IONIC_QCQ_F_SG; + err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, index, "tx", flags, + ntxq_descs, + sizeof(struct ionic_txq_desc), + sizeof(struct ionic_txq_comp), + sizeof(struct ionic_txq_sg_desc), + lif->kern_pid, &lif->txqcqs[index]); + if (err) + return err; + + *qcq = lif->txqcqs[index]; + + return 0; +} + +static int +ionic_admin_qcq_alloc(struct ionic_lif *lif) +{ + uint32_t flags; + int err = -ENOMEM; + + flags = 0; + err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags, + IONIC_ADMINQ_LENGTH, + sizeof(struct ionic_admin_cmd), + sizeof(struct ionic_admin_comp), + 0, + lif->kern_pid, &lif->adminqcq); + if (err) + return err; + + return 0; +} + +static int +ionic_notify_qcq_alloc(struct ionic_lif *lif) +{ + uint32_t flags; + int err = -ENOMEM; + + flags = IONIC_QCQ_F_NOTIFYQ | IONIC_QCQ_F_INTR; + + err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notify", + flags, + IONIC_NOTIFYQ_LENGTH, + sizeof(struct ionic_notifyq_cmd), + sizeof(union ionic_notifyq_comp), + 0, + lif->kern_pid, &lif->notifyqcq); + if (err) + return err; + + return 0; +} + +static void * +ionic_bus_map_dbpage(struct ionic_adapter *adapter, int page_num) +{ + char *vaddr = adapter->bars[IONIC_PCI_BAR_DBELL].vaddr; + + if (adapter->num_bars <= IONIC_PCI_BAR_DBELL) + return NULL; + + return (void *)&vaddr[page_num << PAGE_SHIFT]; +} + +int +ionic_lif_alloc(struct ionic_lif *lif) +{ + struct ionic_adapter *adapter = lif->adapter; + uint32_t socket_id = rte_socket_id(); + int dbpage_num; + int err; + + snprintf(lif->name, sizeof(lif->name), "lif%u", lif->index); + + IONIC_PRINT(DEBUG, "Allocating Lif Info"); + + rte_spinlock_init(&lif->adminq_lock); + rte_spinlock_init(&lif->adminq_service_lock); + + lif->kern_pid = 0; + + dbpage_num = ionic_db_page_num(lif, 0); + + lif->kern_dbpage = ionic_bus_map_dbpage(adapter, dbpage_num); + if (!lif->kern_dbpage) { + IONIC_PRINT(ERR, "Cannot map dbpage, aborting"); + return -ENOMEM; + } + + lif->txqcqs = rte_zmalloc("ionic", sizeof(*lif->txqcqs) * + adapter->max_ntxqs_per_lif, 0); + + if (!lif->txqcqs) { + IONIC_PRINT(ERR, "Cannot allocate tx queues array"); + return -ENOMEM; + } + + lif->rxqcqs = rte_zmalloc("ionic", sizeof(*lif->rxqcqs) * + adapter->max_nrxqs_per_lif, 0); + + if (!lif->rxqcqs) { + IONIC_PRINT(ERR, "Cannot allocate rx queues array"); + return -ENOMEM; + } + + IONIC_PRINT(DEBUG, "Allocating Notify Queue"); + + err = ionic_notify_qcq_alloc(lif); + if (err) { + IONIC_PRINT(ERR, "Cannot allocate notify queue"); + return err; + } + + IONIC_PRINT(DEBUG, "Allocating Admin Queue"); + + IONIC_PRINT(DEBUG, "Allocating Admin Queue"); + + err = ionic_admin_qcq_alloc(lif); + if (err) { + IONIC_PRINT(ERR, "Cannot allocate admin queue"); + return err; + } + + IONIC_PRINT(DEBUG, "Allocating Lif Info"); + + lif->info_sz = RTE_ALIGN(sizeof(*lif->info), PAGE_SIZE); + + lif->info_z = rte_eth_dma_zone_reserve(lif->eth_dev, + "lif_info", 0 /* queue_idx*/, + lif->info_sz, IONIC_ALIGN, socket_id); + if (!lif->info_z) { + IONIC_PRINT(ERR, "Cannot allocate lif info memory"); + return -ENOMEM; + } + + lif->info = lif->info_z->addr; + lif->info_pa = lif->info_z->iova; + + return 0; +} + +void +ionic_lif_free(struct ionic_lif *lif) +{ + if (lif->notifyqcq) { + ionic_qcq_free(lif->notifyqcq); + lif->notifyqcq = NULL; + } + + if (lif->adminqcq) { + ionic_qcq_free(lif->adminqcq); + lif->adminqcq = NULL; + } + + if (lif->txqcqs) { + rte_free(lif->txqcqs); + lif->txqcqs = NULL; + } + + if (lif->rxqcqs) { + rte_free(lif->rxqcqs); + lif->rxqcqs = NULL; + } + + if (lif->info) { + rte_memzone_free(lif->info_z); + lif->info = NULL; + } +} + +int +ionic_lif_rss_config(struct ionic_lif *lif, + const uint16_t types, const uint8_t *key, const uint32_t *indir) +{ + struct ionic_admin_ctx ctx = { + .pending_work = true, + .cmd.lif_setattr = { + .opcode = IONIC_CMD_LIF_SETATTR, + .attr = IONIC_LIF_ATTR_RSS, + .rss.types = types, + .rss.addr = lif->rss_ind_tbl_pa, + }, + }; + unsigned int i; + + IONIC_PRINT_CALL(); + + lif->rss_types = types; + + if (key) + memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE); + + if (indir) + for (i = 0; i < lif->adapter->ident.lif.eth.rss_ind_tbl_sz; i++) + lif->rss_ind_tbl[i] = indir[i]; + + memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key, + IONIC_RSS_HASH_KEY_SIZE); + + return ionic_adminq_post_wait(lif, &ctx); +} + +static int +ionic_lif_rss_setup(struct ionic_lif *lif) +{ + size_t tbl_size = sizeof(*lif->rss_ind_tbl) * + lif->adapter->ident.lif.eth.rss_ind_tbl_sz; + static const uint8_t toeplitz_symmetric_key[] = { + 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, + 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, + 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, + 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, + 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, + }; + uint32_t socket_id = rte_socket_id(); + uint32_t i; + int err; + + IONIC_PRINT_CALL(); + + lif->rss_ind_tbl_z = rte_eth_dma_zone_reserve(lif->eth_dev, + "rss_ind_tbl", + 0 /* queue_idx*/, tbl_size, IONIC_ALIGN, socket_id); + + if (!lif->rss_ind_tbl_z) { + IONIC_PRINT(ERR, "OOM"); + return -ENOMEM; + } + + lif->rss_ind_tbl = lif->rss_ind_tbl_z->addr; + lif->rss_ind_tbl_pa = lif->rss_ind_tbl_z->iova; + + /* Fill indirection table with 'default' values */ + for (i = 0; i < lif->adapter->ident.lif.eth.rss_ind_tbl_sz; i++) + lif->rss_ind_tbl[i] = i % lif->nrxqcqs; + + err = ionic_lif_rss_config(lif, IONIC_RSS_OFFLOAD_ALL, + toeplitz_symmetric_key, NULL); + if (err) + return err; + + return 0; +} + +static void +ionic_lif_rss_teardown(struct ionic_lif *lif) +{ + if (!lif->rss_ind_tbl) + return; + + if (lif->rss_ind_tbl_z) { + /* Disable RSS on the NIC */ + ionic_lif_rss_config(lif, 0x0, NULL, NULL); + + lif->rss_ind_tbl = NULL; + lif->rss_ind_tbl_pa = 0; + rte_memzone_free(lif->rss_ind_tbl_z); + lif->rss_ind_tbl_z = NULL; + } +} + +static void +ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq) +{ + struct ionic_dev *idev = &lif->adapter->idev; + + if (!(qcq->flags & IONIC_QCQ_F_INITED)) + return; + + if (qcq->flags & IONIC_QCQ_F_INTR) + ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, + IONIC_INTR_MASK_SET); + + qcq->flags &= ~IONIC_QCQ_F_INITED; +} + +void +ionic_lif_txq_deinit(struct ionic_qcq *qcq) +{ + ionic_lif_qcq_deinit(qcq->lif, qcq); +} + +void +ionic_lif_rxq_deinit(struct ionic_qcq *qcq) +{ + ionic_lif_qcq_deinit(qcq->lif, qcq); +} + +bool +ionic_adminq_service(struct ionic_cq *cq, uint32_t cq_desc_index, + void *cb_arg __rte_unused) +{ + struct ionic_admin_comp *cq_desc_base = cq->base; + struct ionic_admin_comp *cq_desc = &cq_desc_base[cq_desc_index]; + + if (!color_match(cq_desc->color, cq->done_color)) + return false; + + ionic_q_service(cq->bound_q, cq_desc_index, cq_desc->comp_index, NULL); + + return true; +} + +/* This acts like ionic_napi */ +int +ionic_qcq_service(struct ionic_qcq *qcq, int budget, ionic_cq_cb cb, + void *cb_arg) +{ + struct ionic_cq *cq = &qcq->cq; + uint32_t work_done; + + work_done = ionic_cq_service(cq, budget, cb, cb_arg); + + return work_done; +} + +static void +ionic_link_status_check(struct ionic_lif *lif) +{ + struct ionic_adapter *adapter = lif->adapter; + bool link_up; + + lif->state &= ~IONIC_LIF_F_LINK_CHECK_NEEDED; + + if (!lif->info) + return; + + link_up = (lif->info->status.link_status == IONIC_PORT_OPER_STATUS_UP); + + if ((link_up && adapter->link_up) || + (!link_up && !adapter->link_up)) + return; + + if (link_up) { + IONIC_PRINT(DEBUG, "Link up - %d Gbps", + lif->info->status.link_speed); + adapter->link_speed = lif->info->status.link_speed; + } else { + IONIC_PRINT(DEBUG, "Link down"); + } + + adapter->link_up = link_up; +} + +static bool +ionic_notifyq_cb(struct ionic_cq *cq, uint32_t cq_desc_index, void *cb_arg) +{ + union ionic_notifyq_comp *cq_desc_base = cq->base; + union ionic_notifyq_comp *cq_desc = &cq_desc_base[cq_desc_index]; + struct ionic_lif *lif = cb_arg; + + IONIC_PRINT(DEBUG, "Notifyq callback eid = %jd ecode = %d", + cq_desc->event.eid, cq_desc->event.ecode); + + /* Have we run out of new completions to process? */ + if (!(cq_desc->event.eid > lif->last_eid)) + return false; + + lif->last_eid = cq_desc->event.eid; + + switch (cq_desc->event.ecode) { + case IONIC_EVENT_LINK_CHANGE: + IONIC_PRINT(DEBUG, + "Notifyq IONIC_EVENT_LINK_CHANGE eid=%jd link_status=%d link_speed=%d", + cq_desc->event.eid, + cq_desc->link_change.link_status, + cq_desc->link_change.link_speed); + + lif->state |= IONIC_LIF_F_LINK_CHECK_NEEDED; + + break; + default: + IONIC_PRINT(WARNING, "Notifyq bad event ecode=%d eid=%jd", + cq_desc->event.ecode, cq_desc->event.eid); + break; + } + + return true; +} + +int +ionic_notifyq_handler(struct ionic_lif *lif, int budget) +{ + struct ionic_dev *idev = &lif->adapter->idev; + struct ionic_qcq *qcq = lif->notifyqcq; + uint32_t work_done; + + if (!(qcq->flags & IONIC_QCQ_F_INITED)) { + IONIC_PRINT(DEBUG, "Notifyq not yet initialized"); + return -1; + } + + ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, + IONIC_INTR_MASK_SET); + + work_done = ionic_qcq_service(qcq, budget, ionic_notifyq_cb, lif); + + if (lif->state & IONIC_LIF_F_LINK_CHECK_NEEDED) + ionic_link_status_check(lif); + + ionic_intr_credits(idev->intr_ctrl, qcq->intr.index, + work_done, IONIC_INTR_CRED_RESET_COALESCE); + + ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, + IONIC_INTR_MASK_CLEAR); + + return 0; +} + +static int +ionic_lif_adminq_init(struct ionic_lif *lif) +{ + struct ionic_dev *idev = &lif->adapter->idev; + struct ionic_qcq *qcq = lif->adminqcq; + struct ionic_queue *q = &qcq->q; + struct ionic_q_init_comp comp; + int err; + + ionic_dev_cmd_adminq_init(idev, qcq, lif->index, qcq->intr.index); + err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT); + if (err) + return err; + + ionic_dev_cmd_comp(idev, &comp); + + q->hw_type = comp.hw_type; + q->hw_index = comp.hw_index; + q->db = ionic_db_map(lif, q); + + IONIC_PRINT(DEBUG, "adminq->hw_type %d", q->hw_type); + IONIC_PRINT(DEBUG, "adminq->hw_index %d", q->hw_index); + IONIC_PRINT(DEBUG, "adminq->db %p", q->db); + + if (qcq->flags & IONIC_QCQ_F_INTR) + ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, + IONIC_INTR_MASK_CLEAR); + + qcq->flags |= IONIC_QCQ_F_INITED; + + return 0; +} + +static int +ionic_lif_notifyq_init(struct ionic_lif *lif) +{ + struct ionic_dev *idev = &lif->adapter->idev; + struct ionic_qcq *qcq = lif->notifyqcq; + struct ionic_queue *q = &qcq->q; + int err; + + struct ionic_admin_ctx ctx = { + .pending_work = true, + .cmd.q_init = { + .opcode = IONIC_CMD_Q_INIT, + .lif_index = lif->index, + .type = q->type, + .index = q->index, + .flags = (IONIC_QINIT_F_IRQ | IONIC_QINIT_F_ENA), + .intr_index = qcq->intr.index, + .pid = q->pid, + .ring_size = rte_log2_u32(q->num_descs), + .ring_base = q->base_pa, + } + }; + + IONIC_PRINT(DEBUG, "notifyq_init.pid %d", ctx.cmd.q_init.pid); + IONIC_PRINT(DEBUG, "notifyq_init.index %d", + ctx.cmd.q_init.index); + IONIC_PRINT(DEBUG, "notifyq_init.ring_base 0x%" PRIx64 "", + ctx.cmd.q_init.ring_base); + IONIC_PRINT(DEBUG, "notifyq_init.ring_size %d", + ctx.cmd.q_init.ring_size); + + err = ionic_adminq_post_wait(lif, &ctx); + if (err) + return err; + + q->hw_type = ctx.comp.q_init.hw_type; + q->hw_index = ctx.comp.q_init.hw_index; + q->db = NULL; + + IONIC_PRINT(DEBUG, "notifyq->hw_type %d", q->hw_type); + IONIC_PRINT(DEBUG, "notifyq->hw_index %d", q->hw_index); + IONIC_PRINT(DEBUG, "notifyq->db %p", q->db); + + if (qcq->flags & IONIC_QCQ_F_INTR) + ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, + IONIC_INTR_MASK_CLEAR); + + qcq->flags |= IONIC_QCQ_F_INITED; + + return 0; +} + +int +ionic_lif_set_features(struct ionic_lif *lif) +{ + struct ionic_admin_ctx ctx = { + .pending_work = true, + .cmd.lif_setattr = { + .opcode = IONIC_CMD_LIF_SETATTR, + .index = lif->index, + .attr = IONIC_LIF_ATTR_FEATURES, + .features = lif->features, + }, + }; + int err; + + err = ionic_adminq_post_wait(lif, &ctx); + if (err) + return err; + + lif->hw_features = (ctx.cmd.lif_setattr.features & + ctx.comp.lif_setattr.features); + + if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG) + IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_VLAN_TX_TAG"); + if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP) + IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_VLAN_RX_STRIP"); + if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER) + IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_VLAN_RX_FILTER"); + if (lif->hw_features & IONIC_ETH_HW_RX_HASH) + IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_RX_HASH"); + if (lif->hw_features & IONIC_ETH_HW_TX_SG) + IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TX_SG"); + if (lif->hw_features & IONIC_ETH_HW_RX_SG) + IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_RX_SG"); + if (lif->hw_features & IONIC_ETH_HW_TX_CSUM) + IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TX_CSUM"); + if (lif->hw_features & IONIC_ETH_HW_RX_CSUM) + IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_RX_CSUM"); + if (lif->hw_features & IONIC_ETH_HW_TSO) + IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO"); + if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6) + IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_IPV6"); + if (lif->hw_features & IONIC_ETH_HW_TSO_ECN) + IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_ECN"); + if (lif->hw_features & IONIC_ETH_HW_TSO_GRE) + IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_GRE"); + if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM) + IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_GRE_CSUM"); + if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4) + IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_IPXIP4"); + if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6) + IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_IPXIP6"); + if (lif->hw_features & IONIC_ETH_HW_TSO_UDP) + IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_UDP"); + if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM) + IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_UDP_CSUM"); + + return 0; +} + +int +ionic_lif_txq_init(struct ionic_qcq *qcq) +{ + struct ionic_queue *q = &qcq->q; + struct ionic_lif *lif = qcq->lif; + struct ionic_cq *cq = &qcq->cq; + struct ionic_admin_ctx ctx = { + .pending_work = true, + .cmd.q_init = { + .opcode = IONIC_CMD_Q_INIT, + .lif_index = lif->index, + .type = q->type, + .index = q->index, + .flags = IONIC_QINIT_F_SG, + .intr_index = cq->bound_intr->index, + .pid = q->pid, + .ring_size = rte_log2_u32(q->num_descs), + .ring_base = q->base_pa, + .cq_ring_base = cq->base_pa, + .sg_ring_base = q->sg_base_pa, + }, + }; + int err; + + IONIC_PRINT(DEBUG, "txq_init.pid %d", ctx.cmd.q_init.pid); + IONIC_PRINT(DEBUG, "txq_init.index %d", ctx.cmd.q_init.index); + IONIC_PRINT(DEBUG, "txq_init.ring_base 0x%" PRIx64 "", + ctx.cmd.q_init.ring_base); + IONIC_PRINT(DEBUG, "txq_init.ring_size %d", + ctx.cmd.q_init.ring_size); + + err = ionic_adminq_post_wait(qcq->lif, &ctx); + if (err) + return err; + + q->hw_type = ctx.comp.q_init.hw_type; + q->hw_index = ctx.comp.q_init.hw_index; + q->db = ionic_db_map(lif, q); + + IONIC_PRINT(DEBUG, "txq->hw_type %d", q->hw_type); + IONIC_PRINT(DEBUG, "txq->hw_index %d", q->hw_index); + IONIC_PRINT(DEBUG, "txq->db %p", q->db); + + qcq->flags |= IONIC_QCQ_F_INITED; + + return 0; +} + +int +ionic_lif_rxq_init(struct ionic_qcq *qcq) +{ + struct ionic_queue *q = &qcq->q; + struct ionic_lif *lif = qcq->lif; + struct ionic_cq *cq = &qcq->cq; + struct ionic_admin_ctx ctx = { + .pending_work = true, + .cmd.q_init = { + .opcode = IONIC_CMD_Q_INIT, + .lif_index = lif->index, + .type = q->type, + .index = q->index, + .flags = IONIC_QINIT_F_SG, + .intr_index = cq->bound_intr->index, + .pid = q->pid, + .ring_size = rte_log2_u32(q->num_descs), + .ring_base = q->base_pa, + .cq_ring_base = cq->base_pa, + .sg_ring_base = q->sg_base_pa, + }, + }; + int err; + + IONIC_PRINT(DEBUG, "rxq_init.pid %d", ctx.cmd.q_init.pid); + IONIC_PRINT(DEBUG, "rxq_init.index %d", ctx.cmd.q_init.index); + IONIC_PRINT(DEBUG, "rxq_init.ring_base 0x%" PRIx64 "", + ctx.cmd.q_init.ring_base); + IONIC_PRINT(DEBUG, "rxq_init.ring_size %d", + ctx.cmd.q_init.ring_size); + + err = ionic_adminq_post_wait(qcq->lif, &ctx); + if (err) + return err; + + q->hw_type = ctx.comp.q_init.hw_type; + q->hw_index = ctx.comp.q_init.hw_index; + q->db = ionic_db_map(lif, q); + + qcq->flags |= IONIC_QCQ_F_INITED; + + IONIC_PRINT(DEBUG, "rxq->hw_type %d", q->hw_type); + IONIC_PRINT(DEBUG, "rxq->hw_index %d", q->hw_index); + IONIC_PRINT(DEBUG, "rxq->db %p", q->db); + + return 0; +} + +static int +ionic_station_set(struct ionic_lif *lif) +{ + struct ionic_admin_ctx ctx = { + .pending_work = true, + .cmd.lif_getattr = { + .opcode = IONIC_CMD_LIF_GETATTR, + .index = lif->index, + .attr = IONIC_LIF_ATTR_MAC, + }, + }; + int err; + + IONIC_PRINT_CALL(); + + err = ionic_adminq_post_wait(lif, &ctx); + if (err) + return err; + + if (!rte_is_zero_ether_addr((struct rte_ether_addr *) + lif->mac_addr)) { + IONIC_PRINT(INFO, "deleting station MAC addr"); + + ionic_lif_addr_del(lif, lif->mac_addr); + } + + memcpy(lif->mac_addr, ctx.comp.lif_getattr.mac, RTE_ETHER_ADDR_LEN); + + if (rte_is_zero_ether_addr((struct rte_ether_addr *)lif->mac_addr)) { + IONIC_PRINT(NOTICE, "empty MAC addr (VF?)"); + return 0; + } + + IONIC_PRINT(DEBUG, "adding station MAC addr"); + + ionic_lif_addr_add(lif, lif->mac_addr); + + return 0; +} + +static void +ionic_lif_set_name(struct ionic_lif *lif) +{ + struct ionic_admin_ctx ctx = { + .pending_work = true, + .cmd.lif_setattr = { + .opcode = IONIC_CMD_LIF_SETATTR, + .index = lif->index, + .attr = IONIC_LIF_ATTR_NAME, + }, + }; + + snprintf(ctx.cmd.lif_setattr.name, sizeof(ctx.cmd.lif_setattr.name), + "%d", lif->port_id); + + ionic_adminq_post_wait(lif, &ctx); +} + +int +ionic_lif_init(struct ionic_lif *lif) +{ + struct ionic_dev *idev = &lif->adapter->idev; + struct ionic_q_init_comp comp; + int err; + + memset(&lif->stats_base, 0, sizeof(lif->stats_base)); + + ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa); + err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT); + ionic_dev_cmd_comp(idev, &comp); + if (err) + return err; + + lif->hw_index = comp.hw_index; + + err = ionic_lif_adminq_init(lif); + if (err) + return err; + + err = ionic_lif_notifyq_init(lif); + if (err) + goto err_out_adminq_deinit; + + lif->features = + IONIC_ETH_HW_VLAN_TX_TAG + | IONIC_ETH_HW_VLAN_RX_STRIP + | IONIC_ETH_HW_VLAN_RX_FILTER + | IONIC_ETH_HW_RX_HASH + | IONIC_ETH_HW_TX_SG + | IONIC_ETH_HW_RX_SG + | IONIC_ETH_HW_TX_CSUM + | IONIC_ETH_HW_RX_CSUM + | IONIC_ETH_HW_TSO + | IONIC_ETH_HW_TSO_IPV6 + | IONIC_ETH_HW_TSO_ECN; + + err = ionic_lif_set_features(lif); + if (err) + goto err_out_notifyq_deinit; + + err = ionic_rx_filters_init(lif); + if (err) + goto err_out_notifyq_deinit; + + err = ionic_station_set(lif); + if (err) + goto err_out_rx_filter_deinit; + + ionic_lif_set_name(lif); + + lif->state |= IONIC_LIF_F_INITED; + + return 0; + +err_out_rx_filter_deinit: + ionic_rx_filters_deinit(lif); + +err_out_notifyq_deinit: + ionic_lif_qcq_deinit(lif, lif->notifyqcq); + +err_out_adminq_deinit: + ionic_lif_qcq_deinit(lif, lif->adminqcq); + + return err; +} + +void +ionic_lif_deinit(struct ionic_lif *lif) +{ + if (!(lif->state & IONIC_LIF_F_INITED)) + return; + + ionic_rx_filters_deinit(lif); + ionic_lif_rss_teardown(lif); + ionic_lif_qcq_deinit(lif, lif->notifyqcq); + ionic_lif_qcq_deinit(lif, lif->adminqcq); + + lif->state &= ~IONIC_LIF_F_INITED; +} + +int +ionic_lif_configure(struct ionic_lif *lif) +{ + struct ionic_identity *ident = &lif->adapter->ident; + uint32_t ntxqs_per_lif = + ident->lif.eth.config.queue_count[IONIC_QTYPE_TXQ]; + uint32_t nrxqs_per_lif = + ident->lif.eth.config.queue_count[IONIC_QTYPE_RXQ]; + uint32_t nrxqs = lif->eth_dev->data->nb_rx_queues; + uint32_t ntxqs = lif->eth_dev->data->nb_tx_queues; + + lif->port_id = lif->eth_dev->data->port_id; + + IONIC_PRINT(DEBUG, "Configuring LIF on port %u", + lif->port_id); + + if (nrxqs > 0) + nrxqs_per_lif = RTE_MIN(nrxqs_per_lif, nrxqs); + + if (ntxqs > 0) + ntxqs_per_lif = RTE_MIN(ntxqs_per_lif, ntxqs); + + lif->nrxqcqs = nrxqs_per_lif; + lif->ntxqcqs = ntxqs_per_lif; + + return 0; +} + +int +ionic_lif_start(struct ionic_lif *lif) +{ + uint32_t rx_mode = 0; + uint32_t i; + int err; + + IONIC_PRINT(DEBUG, "Setting RSS configuration on port %u", + lif->port_id); + + err = ionic_lif_rss_setup(lif); + if (err) + return err; + + IONIC_PRINT(DEBUG, "Setting RX mode on port %u", + lif->port_id); + + rx_mode |= IONIC_RX_MODE_F_UNICAST; + rx_mode |= IONIC_RX_MODE_F_MULTICAST; + rx_mode |= IONIC_RX_MODE_F_BROADCAST; + + lif->rx_mode = 0; /* set by ionic_set_rx_mode */ + + ionic_set_rx_mode(lif, rx_mode); + + IONIC_PRINT(DEBUG, "Starting %u RX queues and %u TX queues " + "on port %u", + lif->nrxqcqs, lif->ntxqcqs, lif->port_id); + + for (i = 0; i < lif->nrxqcqs; i++) { + struct ionic_qcq *rxq = lif->rxqcqs[i]; + if (!rxq->deferred_start) { + err = ionic_dev_rx_queue_start(lif->eth_dev, i); + + if (err) + return err; + } + } + + for (i = 0; i < lif->ntxqcqs; i++) { + struct ionic_qcq *txq = lif->txqcqs[i]; + if (!txq->deferred_start) { + err = ionic_dev_tx_queue_start(lif->eth_dev, i); + + if (err) + return err; + } + } + + ionic_link_status_check(lif); + + /* Carrier ON here */ + + return 0; +} + +int +ionic_lif_identify(struct ionic_adapter *adapter) +{ + struct ionic_dev *idev = &adapter->idev; + struct ionic_identity *ident = &adapter->ident; + int err; + unsigned int i; + unsigned int lif_words = sizeof(ident->lif.words) / + sizeof(ident->lif.words[0]); + unsigned int cmd_words = sizeof(idev->dev_cmd->data) / + sizeof(idev->dev_cmd->data[0]); + unsigned int nwords; + + ionic_dev_cmd_lif_identify(idev, IONIC_LIF_TYPE_CLASSIC, + IONIC_IDENTITY_VERSION_1); + err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT); + if (err) + return (err); + + nwords = RTE_MIN(lif_words, cmd_words); + for (i = 0; i < nwords; i++) + ident->lif.words[i] = ioread32(&idev->dev_cmd->data[i]); + + IONIC_PRINT(INFO, "capabilities 0x%" PRIx64 " ", + ident->lif.capabilities); + + IONIC_PRINT(INFO, "eth.max_ucast_filters 0x%" PRIx32 " ", + ident->lif.eth.max_ucast_filters); + IONIC_PRINT(INFO, "eth.max_mcast_filters 0x%" PRIx32 " ", + ident->lif.eth.max_mcast_filters); + + IONIC_PRINT(INFO, "eth.features 0x%" PRIx64 " ", + ident->lif.eth.config.features); + IONIC_PRINT(INFO, "eth.queue_count[IONIC_QTYPE_ADMINQ] 0x%" PRIx32 " ", + ident->lif.eth.config.queue_count[IONIC_QTYPE_ADMINQ]); + IONIC_PRINT(INFO, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] 0x%" PRIx32 " ", + ident->lif.eth.config.queue_count[IONIC_QTYPE_NOTIFYQ]); + IONIC_PRINT(INFO, "eth.queue_count[IONIC_QTYPE_RXQ] 0x%" PRIx32 " ", + ident->lif.eth.config.queue_count[IONIC_QTYPE_RXQ]); + IONIC_PRINT(INFO, "eth.queue_count[IONIC_QTYPE_TXQ] 0x%" PRIx32 " ", + ident->lif.eth.config.queue_count[IONIC_QTYPE_TXQ]); + + return 0; +} + +int +ionic_lifs_size(struct ionic_adapter *adapter) +{ + struct ionic_identity *ident = &adapter->ident; + uint32_t nlifs = ident->dev.nlifs; + uint32_t nintrs, dev_nintrs = ident->dev.nintrs; + + adapter->max_ntxqs_per_lif = + ident->lif.eth.config.queue_count[IONIC_QTYPE_TXQ]; + adapter->max_nrxqs_per_lif = + ident->lif.eth.config.queue_count[IONIC_QTYPE_RXQ]; + + nintrs = nlifs * 1 /* notifyq */; + + if (nintrs > dev_nintrs) { + IONIC_PRINT(ERR, "At most %d intr queues supported, minimum required is %u", + dev_nintrs, nintrs); + return -ENOSPC; + } + + adapter->nintrs = nintrs; + + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/ionic/ionic_lif.h b/src/spdk/dpdk/drivers/net/ionic/ionic_lif.h new file mode 100644 index 000000000..425762d65 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ionic/ionic_lif.h @@ -0,0 +1,190 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved. + */ + +#ifndef _IONIC_LIF_H_ +#define _IONIC_LIF_H_ + +#include + +#include +#include + +#include "ionic_osdep.h" +#include "ionic_dev.h" +#include "ionic_rx_filter.h" + +#define IONIC_ADMINQ_LENGTH 16 /* must be a power of two */ +#define IONIC_NOTIFYQ_LENGTH 64 /* must be a power of two */ + +#define IONIC_RSS_OFFLOAD_ALL ( \ + IONIC_RSS_TYPE_IPV4 | \ + IONIC_RSS_TYPE_IPV4_TCP | \ + IONIC_RSS_TYPE_IPV4_UDP | \ + IONIC_RSS_TYPE_IPV6 | \ + IONIC_RSS_TYPE_IPV6_TCP | \ + IONIC_RSS_TYPE_IPV6_UDP) + +#define IONIC_GET_SG_CNTR_IDX(num_sg_elems) (num_sg_elems) + +struct ionic_tx_stats { + uint64_t packets; + uint64_t bytes; + uint64_t drop; + uint64_t stop; + uint64_t no_csum; + uint64_t tso; + uint64_t frags; +}; + +struct ionic_rx_stats { + uint64_t packets; + uint64_t bytes; + uint64_t no_cb_arg; + uint64_t bad_cq_status; + uint64_t no_room; + uint64_t bad_len; +}; + +#define IONIC_QCQ_F_INITED BIT(0) +#define IONIC_QCQ_F_SG BIT(1) +#define IONIC_QCQ_F_INTR BIT(2) +#define IONIC_QCQ_F_NOTIFYQ BIT(3) + +/* Queue / Completion Queue */ +struct ionic_qcq { + uint64_t offloads; + struct ionic_queue q; /**< Queue */ + struct ionic_cq cq; /**< Completion Queue */ + struct ionic_lif *lif; /**< LIF */ + struct rte_mempool *mb_pool; /**< mbuf pool to populate the RX ring */ + union { + struct ionic_tx_stats tx; + struct ionic_rx_stats rx; + } stats; + const struct rte_memzone *base_z; + void *base; + rte_iova_t base_pa; + uint32_t total_size; + uint32_t flags; + struct ionic_intr_info intr; + bool deferred_start; +}; + +#define IONIC_Q_TO_QCQ(q) container_of(q, struct ionic_qcq, q) +#define IONIC_Q_TO_TX_STATS(q) (&IONIC_Q_TO_QCQ(q)->stats.tx) +#define IONIC_Q_TO_RX_STATS(q) (&IONIC_Q_TO_QCQ(q)->stats.rx) + +#define IONIC_LIF_F_INITED BIT(0) +#define IONIC_LIF_F_LINK_CHECK_NEEDED BIT(1) + +#define IONIC_LIF_NAME_MAX_SZ (32) + +struct ionic_lif { + struct ionic_adapter *adapter; + struct rte_eth_dev *eth_dev; + uint16_t port_id; /**< Device port identifier */ + uint16_t mtu; + uint32_t index; + uint32_t hw_index; + uint32_t state; + uint32_t ntxqcqs; + uint32_t nrxqcqs; + uint32_t kern_pid; + rte_spinlock_t adminq_lock; + rte_spinlock_t adminq_service_lock; + struct ionic_qcq *adminqcq; + struct ionic_qcq *notifyqcq; + struct ionic_qcq **txqcqs; + struct ionic_qcq **rxqcqs; + struct ionic_rx_filters rx_filters; + struct ionic_doorbell __iomem *kern_dbpage; + uint64_t last_eid; + uint64_t features; + uint32_t hw_features; + uint32_t rx_mode; + char name[IONIC_LIF_NAME_MAX_SZ]; + uint8_t mac_addr[RTE_ETHER_ADDR_LEN]; + uint16_t rss_types; + uint8_t rss_hash_key[IONIC_RSS_HASH_KEY_SIZE]; + uint8_t *rss_ind_tbl; + rte_iova_t rss_ind_tbl_pa; + const struct rte_memzone *rss_ind_tbl_z; + uint32_t info_sz; + struct ionic_lif_info *info; + rte_iova_t info_pa; + const struct rte_memzone *info_z; + struct rte_eth_stats stats_base; + struct ionic_lif_stats lif_stats_base; +}; + +int ionic_lif_identify(struct ionic_adapter *adapter); +int ionic_lifs_size(struct ionic_adapter *ionic); + +int ionic_lif_alloc(struct ionic_lif *lif); +void ionic_lif_free(struct ionic_lif *lif); + +int ionic_lif_init(struct ionic_lif *lif); +void ionic_lif_deinit(struct ionic_lif *lif); + +int ionic_lif_start(struct ionic_lif *lif); +int ionic_lif_stop(struct ionic_lif *lif); + +int ionic_lif_configure(struct ionic_lif *lif); +void ionic_lif_reset(struct ionic_lif *lif); + +int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr); +void ionic_intr_free(struct ionic_lif *lif, struct ionic_intr_info *intr); + +bool ionic_adminq_service(struct ionic_cq *cq, uint32_t cq_desc_index, + void *cb_arg); +int ionic_qcq_service(struct ionic_qcq *qcq, int budget, ionic_cq_cb cb, + void *cb_arg); + +int ionic_lif_change_mtu(struct ionic_lif *lif, int new_mtu); + +int ionic_dev_add_mac(struct rte_eth_dev *eth_dev, + struct rte_ether_addr *mac_addr, + uint32_t index __rte_unused, uint32_t pool __rte_unused); +void ionic_dev_remove_mac(struct rte_eth_dev *eth_dev, + uint32_t index __rte_unused); +int ionic_dev_set_mac(struct rte_eth_dev *eth_dev, + struct rte_ether_addr *mac_addr); +int ionic_dev_vlan_filter_set(struct rte_eth_dev *eth_dev, uint16_t vlan_id, + int on); +int ionic_dev_promiscuous_enable(struct rte_eth_dev *dev); +int ionic_dev_promiscuous_disable(struct rte_eth_dev *dev); +int ionic_dev_allmulticast_enable(struct rte_eth_dev *dev); +int ionic_dev_allmulticast_disable(struct rte_eth_dev *dev); + +int ionic_rx_qcq_alloc(struct ionic_lif *lif, uint32_t index, + uint16_t nrxq_descs, struct ionic_qcq **qcq); +int ionic_tx_qcq_alloc(struct ionic_lif *lif, uint32_t index, + uint16_t ntxq_descs, struct ionic_qcq **qcq); +void ionic_qcq_free(struct ionic_qcq *qcq); + +int ionic_qcq_enable(struct ionic_qcq *qcq); +int ionic_qcq_disable(struct ionic_qcq *qcq); + +int ionic_lif_rxq_init(struct ionic_qcq *qcq); +void ionic_lif_rxq_deinit(struct ionic_qcq *qcq); + +int ionic_lif_txq_init(struct ionic_qcq *qcq); +void ionic_lif_txq_deinit(struct ionic_qcq *qcq); + +int ionic_lif_rss_config(struct ionic_lif *lif, const uint16_t types, + const uint8_t *key, const uint32_t *indir); + +int ionic_lif_set_features(struct ionic_lif *lif); + +void ionic_lif_get_stats(const struct ionic_lif *lif, + struct rte_eth_stats *stats); +void ionic_lif_reset_stats(struct ionic_lif *lif); + +void ionic_lif_get_hw_stats(struct ionic_lif *lif, + struct ionic_lif_stats *stats); +void ionic_lif_reset_hw_stats(struct ionic_lif *lif); + +int ionic_notifyq_handler(struct ionic_lif *lif, int budget); + +#endif /* _IONIC_LIF_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ionic/ionic_logs.h b/src/spdk/dpdk/drivers/net/ionic/ionic_logs.h new file mode 100644 index 000000000..bc10ad174 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ionic/ionic_logs.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved. + */ + +#ifndef _IONIC_LOGS_H_ +#define _IONIC_LOGS_H_ + +#include + +extern int ionic_logtype; + +#define IONIC_PRINT(level, fmt, args...) rte_log(RTE_LOG_ ## level, \ + ionic_logtype, "%s(): " fmt "\n", __func__, ##args) + +#define IONIC_PRINT_CALL() IONIC_PRINT(DEBUG, " >>") + +#ifndef IONIC_WARN_ON +#define IONIC_WARN_ON(x) do { \ + int ret = !!(x); \ + if (unlikely(ret)) \ + IONIC_PRINT(WARNING, "WARN_ON: \"" #x "\" at %s:%d\n", \ + __func__, __LINE__); \ +} while (0) +#endif + +#endif /* _IONIC_LOGS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ionic/ionic_mac_api.c b/src/spdk/dpdk/drivers/net/ionic/ionic_mac_api.c new file mode 100644 index 000000000..c0ea042bc --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ionic/ionic_mac_api.c @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved. + */ + +#include + +#include "ionic_mac_api.h" + +int32_t +ionic_init_mac(struct ionic_hw *hw) +{ + int err = 0; + + IONIC_PRINT_CALL(); + + /* + * Set the mac type + */ + ionic_set_mac_type(hw); + + switch (hw->mac.type) { + case IONIC_MAC_CAPRI: + break; + default: + err = -EINVAL; + break; + } + + return err; +} + +int32_t +ionic_set_mac_type(struct ionic_hw *hw) +{ + int err = 0; + + IONIC_PRINT_CALL(); + + if (hw->vendor_id != IONIC_PENSANDO_VENDOR_ID) { + IONIC_PRINT(ERR, "Unsupported vendor id: %" PRIx32 "", + hw->vendor_id); + return -EINVAL; + } + + switch (hw->device_id) { + case IONIC_DEV_ID_ETH_PF: + case IONIC_DEV_ID_ETH_VF: + case IONIC_DEV_ID_ETH_MGMT: + hw->mac.type = IONIC_MAC_CAPRI; + break; + default: + err = -EINVAL; + IONIC_PRINT(ERR, "Unsupported device id: %" PRIx32 "", + hw->device_id); + break; + } + + IONIC_PRINT(INFO, "Mac: %d (%d)", + hw->mac.type, err); + + return err; +} + diff --git a/src/spdk/dpdk/drivers/net/ionic/ionic_mac_api.h b/src/spdk/dpdk/drivers/net/ionic/ionic_mac_api.h new file mode 100644 index 000000000..ed9e059a6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ionic/ionic_mac_api.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved. + */ + +#ifndef _IONIC_API_H_ +#define _IONIC_API_H_ + +#include "ionic.h" + +int32_t ionic_init_mac(struct ionic_hw *hw); +int32_t ionic_set_mac_type(struct ionic_hw *hw); + +#endif /* _IONIC_API_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ionic/ionic_main.c b/src/spdk/dpdk/drivers/net/ionic/ionic_main.c new file mode 100644 index 000000000..2ade213d2 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ionic/ionic_main.c @@ -0,0 +1,443 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved. + */ + +#include + +#include + +#include "ionic.h" +#include "ionic_ethdev.h" +#include "ionic_lif.h" + +static const char * +ionic_error_to_str(enum ionic_status_code code) +{ + switch (code) { + case IONIC_RC_SUCCESS: + return "IONIC_RC_SUCCESS"; + case IONIC_RC_EVERSION: + return "IONIC_RC_EVERSION"; + case IONIC_RC_EOPCODE: + return "IONIC_RC_EOPCODE"; + case IONIC_RC_EIO: + return "IONIC_RC_EIO"; + case IONIC_RC_EPERM: + return "IONIC_RC_EPERM"; + case IONIC_RC_EQID: + return "IONIC_RC_EQID"; + case IONIC_RC_EQTYPE: + return "IONIC_RC_EQTYPE"; + case IONIC_RC_ENOENT: + return "IONIC_RC_ENOENT"; + case IONIC_RC_EINTR: + return "IONIC_RC_EINTR"; + case IONIC_RC_EAGAIN: + return "IONIC_RC_EAGAIN"; + case IONIC_RC_ENOMEM: + return "IONIC_RC_ENOMEM"; + case IONIC_RC_EFAULT: + return "IONIC_RC_EFAULT"; + case IONIC_RC_EBUSY: + return "IONIC_RC_EBUSY"; + case IONIC_RC_EEXIST: + return "IONIC_RC_EEXIST"; + case IONIC_RC_EINVAL: + return "IONIC_RC_EINVAL"; + case IONIC_RC_ENOSPC: + return "IONIC_RC_ENOSPC"; + case IONIC_RC_ERANGE: + return "IONIC_RC_ERANGE"; + case IONIC_RC_BAD_ADDR: + return "IONIC_RC_BAD_ADDR"; + case IONIC_RC_DEV_CMD: + return "IONIC_RC_DEV_CMD"; + case IONIC_RC_ERROR: + return "IONIC_RC_ERROR"; + case IONIC_RC_ERDMA: + return "IONIC_RC_ERDMA"; + default: + return "IONIC_RC_UNKNOWN"; + } +} + +static const char * +ionic_opcode_to_str(enum ionic_cmd_opcode opcode) +{ + switch (opcode) { + case IONIC_CMD_NOP: + return "IONIC_CMD_NOP"; + case IONIC_CMD_INIT: + return "IONIC_CMD_INIT"; + case IONIC_CMD_RESET: + return "IONIC_CMD_RESET"; + case IONIC_CMD_IDENTIFY: + return "IONIC_CMD_IDENTIFY"; + case IONIC_CMD_GETATTR: + return "IONIC_CMD_GETATTR"; + case IONIC_CMD_SETATTR: + return "IONIC_CMD_SETATTR"; + case IONIC_CMD_PORT_IDENTIFY: + return "IONIC_CMD_PORT_IDENTIFY"; + case IONIC_CMD_PORT_INIT: + return "IONIC_CMD_PORT_INIT"; + case IONIC_CMD_PORT_RESET: + return "IONIC_CMD_PORT_RESET"; + case IONIC_CMD_PORT_GETATTR: + return "IONIC_CMD_PORT_GETATTR"; + case IONIC_CMD_PORT_SETATTR: + return "IONIC_CMD_PORT_SETATTR"; + case IONIC_CMD_LIF_INIT: + return "IONIC_CMD_LIF_INIT"; + case IONIC_CMD_LIF_RESET: + return "IONIC_CMD_LIF_RESET"; + case IONIC_CMD_LIF_IDENTIFY: + return "IONIC_CMD_LIF_IDENTIFY"; + case IONIC_CMD_LIF_SETATTR: + return "IONIC_CMD_LIF_SETATTR"; + case IONIC_CMD_LIF_GETATTR: + return "IONIC_CMD_LIF_GETATTR"; + case IONIC_CMD_RX_MODE_SET: + return "IONIC_CMD_RX_MODE_SET"; + case IONIC_CMD_RX_FILTER_ADD: + return "IONIC_CMD_RX_FILTER_ADD"; + case IONIC_CMD_RX_FILTER_DEL: + return "IONIC_CMD_RX_FILTER_DEL"; + case IONIC_CMD_Q_INIT: + return "IONIC_CMD_Q_INIT"; + case IONIC_CMD_Q_CONTROL: + return "IONIC_CMD_Q_CONTROL"; + case IONIC_CMD_RDMA_RESET_LIF: + return "IONIC_CMD_RDMA_RESET_LIF"; + case IONIC_CMD_RDMA_CREATE_EQ: + return "IONIC_CMD_RDMA_CREATE_EQ"; + case IONIC_CMD_RDMA_CREATE_CQ: + return "IONIC_CMD_RDMA_CREATE_CQ"; + case IONIC_CMD_RDMA_CREATE_ADMINQ: + return "IONIC_CMD_RDMA_CREATE_ADMINQ"; + default: + return "DEVCMD_UNKNOWN"; + } +} + +int +ionic_adminq_check_err(struct ionic_admin_ctx *ctx, bool timeout) +{ + const char *name; + const char *status; + + if (ctx->comp.comp.status || timeout) { + name = ionic_opcode_to_str(ctx->cmd.cmd.opcode); + status = ionic_error_to_str(ctx->comp.comp.status); + IONIC_PRINT(ERR, "%s (%d) failed: %s (%d)", + name, + ctx->cmd.cmd.opcode, + timeout ? "TIMEOUT" : status, + timeout ? -1 : ctx->comp.comp.status); + return -EIO; + } + + return 0; +} + +static int +ionic_wait_ctx_for_completion(struct ionic_lif *lif, struct ionic_qcq *qcq, + struct ionic_admin_ctx *ctx, unsigned long max_wait) +{ + unsigned long step_msec = 1; + unsigned int max_wait_msec = max_wait * 1000; + unsigned long elapsed_msec = 0; + int budget = 8; + + while (ctx->pending_work && elapsed_msec < max_wait_msec) { + /* + * Locking here as adminq is served inline (this could be called + * from multiple places) + */ + rte_spinlock_lock(&lif->adminq_service_lock); + + ionic_qcq_service(qcq, budget, ionic_adminq_service, NULL); + + rte_spinlock_unlock(&lif->adminq_service_lock); + + msec_delay(step_msec); + elapsed_msec += step_msec; + } + + return (!ctx->pending_work); +} + +int +ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx) +{ + struct ionic_qcq *qcq = lif->adminqcq; + bool done; + int err; + + IONIC_PRINT(DEBUG, "Sending %s to the admin queue", + ionic_opcode_to_str(ctx->cmd.cmd.opcode)); + + err = ionic_adminq_post(lif, ctx); + if (err) { + IONIC_PRINT(ERR, "Failure posting to the admin queue %d (%d)", + ctx->cmd.cmd.opcode, err); + + return err; + } + + done = ionic_wait_ctx_for_completion(lif, qcq, ctx, + IONIC_DEVCMD_TIMEOUT); + + err = ionic_adminq_check_err(ctx, !done /* timed out */); + return err; +} + +static int +ionic_dev_cmd_wait(struct ionic_dev *idev, unsigned long max_wait) +{ + unsigned long step_msec = 100; + unsigned int max_wait_msec = max_wait * 1000; + unsigned long elapsed_msec = 0; + int done; + + /* Wait for dev cmd to complete.. but no more than max_wait sec */ + + do { + done = ionic_dev_cmd_done(idev); + if (done) { + IONIC_PRINT(DEBUG, "DEVCMD %d done took %ld msecs", + idev->dev_cmd->cmd.cmd.opcode, + elapsed_msec); + return 0; + } + + msec_delay(step_msec); + + elapsed_msec += step_msec; + } while (elapsed_msec < max_wait_msec); + + IONIC_PRINT(DEBUG, "DEVCMD %d timeout after %ld msecs", + idev->dev_cmd->cmd.cmd.opcode, + elapsed_msec); + + return -ETIMEDOUT; +} + +static int +ionic_dev_cmd_check_error(struct ionic_dev *idev) +{ + uint8_t status; + + status = ionic_dev_cmd_status(idev); + if (status == 0) + return 0; + + return -EIO; +} + +int +ionic_dev_cmd_wait_check(struct ionic_dev *idev, unsigned long max_wait) +{ + int err; + + err = ionic_dev_cmd_wait(idev, max_wait); + if (err) + return err; + + return ionic_dev_cmd_check_error(idev); +} + +int +ionic_setup(struct ionic_adapter *adapter) +{ + return ionic_dev_setup(adapter); +} + +int +ionic_identify(struct ionic_adapter *adapter) +{ + struct ionic_dev *idev = &adapter->idev; + struct ionic_identity *ident = &adapter->ident; + int err = 0; + uint32_t i; + unsigned int nwords; + uint32_t drv_size = sizeof(ident->drv.words) / + sizeof(ident->drv.words[0]); + uint32_t cmd_size = sizeof(idev->dev_cmd->data) / + sizeof(idev->dev_cmd->data[0]); + uint32_t dev_size = sizeof(ident->dev.words) / + sizeof(ident->dev.words[0]); + + memset(ident, 0, sizeof(*ident)); + + ident->drv.os_type = IONIC_OS_TYPE_LINUX; + ident->drv.os_dist = 0; + snprintf(ident->drv.os_dist_str, + sizeof(ident->drv.os_dist_str), "Unknown"); + ident->drv.kernel_ver = 0; + snprintf(ident->drv.kernel_ver_str, + sizeof(ident->drv.kernel_ver_str), "DPDK"); + strncpy(ident->drv.driver_ver_str, IONIC_DRV_VERSION, + sizeof(ident->drv.driver_ver_str) - 1); + + nwords = RTE_MIN(drv_size, cmd_size); + for (i = 0; i < nwords; i++) + iowrite32(ident->drv.words[i], &idev->dev_cmd->data[i]); + + ionic_dev_cmd_identify(idev, IONIC_IDENTITY_VERSION_1); + err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT); + if (!err) { + nwords = RTE_MIN(dev_size, cmd_size); + for (i = 0; i < nwords; i++) + ident->dev.words[i] = ioread32(&idev->dev_cmd->data[i]); + } + + return err; +} + +int +ionic_init(struct ionic_adapter *adapter) +{ + struct ionic_dev *idev = &adapter->idev; + int err; + + ionic_dev_cmd_init(idev); + err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT); + return err; +} + +int +ionic_reset(struct ionic_adapter *adapter) +{ + struct ionic_dev *idev = &adapter->idev; + int err; + + ionic_dev_cmd_reset(idev); + err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT); + return err; +} + +int +ionic_port_identify(struct ionic_adapter *adapter) +{ + struct ionic_dev *idev = &adapter->idev; + struct ionic_identity *ident = &adapter->ident; + unsigned int port_words = sizeof(ident->port.words) / + sizeof(ident->port.words[0]); + unsigned int cmd_words = sizeof(idev->dev_cmd->data) / + sizeof(idev->dev_cmd->data[0]); + unsigned int i; + unsigned int nwords; + int err; + + ionic_dev_cmd_port_identify(idev); + err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT); + if (!err) { + nwords = RTE_MIN(port_words, cmd_words); + for (i = 0; i < nwords; i++) + ident->port.words[i] = + ioread32(&idev->dev_cmd->data[i]); + } + + IONIC_PRINT(INFO, "speed %d ", ident->port.config.speed); + IONIC_PRINT(INFO, "mtu %d ", ident->port.config.mtu); + IONIC_PRINT(INFO, "state %d ", ident->port.config.state); + IONIC_PRINT(INFO, "an_enable %d ", ident->port.config.an_enable); + IONIC_PRINT(INFO, "fec_type %d ", ident->port.config.fec_type); + IONIC_PRINT(INFO, "pause_type %d ", ident->port.config.pause_type); + IONIC_PRINT(INFO, "loopback_mode %d", + ident->port.config.loopback_mode); + + return err; +} + +static const struct rte_memzone * +ionic_memzone_reserve(const char *name, uint32_t len, int socket_id) +{ + const struct rte_memzone *mz; + + mz = rte_memzone_lookup(name); + if (mz) + return mz; + + mz = rte_memzone_reserve_aligned(name, len, socket_id, + RTE_MEMZONE_IOVA_CONTIG, IONIC_ALIGN); + return mz; +} + +int +ionic_port_init(struct ionic_adapter *adapter) +{ + struct ionic_dev *idev = &adapter->idev; + struct ionic_identity *ident = &adapter->ident; + char z_name[RTE_MEMZONE_NAMESIZE]; + unsigned int config_words = sizeof(ident->port.config.words) / + sizeof(ident->port.config.words[0]); + unsigned int cmd_words = sizeof(idev->dev_cmd->data) / + sizeof(idev->dev_cmd->data[0]); + unsigned int nwords; + unsigned int i; + int err; + + if (idev->port_info) + return 0; + + idev->port_info_sz = RTE_ALIGN(sizeof(*idev->port_info), PAGE_SIZE); + + snprintf(z_name, sizeof(z_name), "%s_port_%s_info", + IONIC_DRV_NAME, + adapter->pci_dev->device.name); + + idev->port_info_z = ionic_memzone_reserve(z_name, idev->port_info_sz, + SOCKET_ID_ANY); + if (!idev->port_info_z) { + IONIC_PRINT(ERR, "Cannot reserve port info DMA memory"); + return -ENOMEM; + } + + idev->port_info = idev->port_info_z->addr; + idev->port_info_pa = idev->port_info_z->iova; + + nwords = RTE_MIN(config_words, cmd_words); + + for (i = 0; i < nwords; i++) + iowrite32(ident->port.config.words[i], &idev->dev_cmd->data[i]); + + ionic_dev_cmd_port_init(idev); + err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT); + if (err) { + IONIC_PRINT(ERR, "Failed to init port"); + return err; + } + + ionic_dev_cmd_port_state(idev, IONIC_PORT_ADMIN_STATE_UP); + err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT); + if (err) { + IONIC_PRINT(WARNING, "Failed to bring port UP"); + return err; + } + + return 0; +} + +int +ionic_port_reset(struct ionic_adapter *adapter) +{ + struct ionic_dev *idev = &adapter->idev; + int err; + + if (!idev->port_info) + return 0; + + ionic_dev_cmd_port_reset(idev); + err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT); + if (err) { + IONIC_PRINT(ERR, "Failed to reset port"); + return err; + } + + idev->port_info = NULL; + idev->port_info_pa = 0; + + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/ionic/ionic_osdep.h b/src/spdk/dpdk/drivers/net/ionic/ionic_osdep.h new file mode 100644 index 000000000..e04bb8f65 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ionic/ionic_osdep.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved. + */ + +#ifndef _IONIC_OSDEP_ +#define _IONIC_OSDEP_ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "ionic_logs.h" + +#define DELAY(x) rte_delay_us(x) +#define usec_delay(x) DELAY(x) +#define msec_delay(x) DELAY(1000 * (x)) + +#define BIT(nr) (1UL << (nr)) +#define BIT_ULL(nr) (1ULL << (nr)) +#define BITS_TO_LONGS(nr) div_round_up(nr, 8 * sizeof(long)) + +#ifndef PAGE_SHIFT +#define PAGE_SHIFT 12 +#define PAGE_SIZE (1 << PAGE_SHIFT) +#endif + +#define __iomem + +typedef uint8_t u8; +typedef uint16_t u16; +typedef uint32_t u32; +typedef uint64_t u64; + +typedef uint16_t __le16; +typedef uint32_t __le32; +typedef uint64_t __le64; + +static inline uint32_t div_round_up(uint32_t n, uint32_t d) +{ + return (n + d - 1) / d; +} + +#define ioread8(reg) rte_read8(reg) +#define ioread32(reg) rte_read32(reg) +#define iowrite8(value, reg) rte_write8(value, reg) +#define iowrite32(value, reg) rte_write32(value, reg) +#define writeq(value, reg) rte_write64(value, reg) + +#endif diff --git a/src/spdk/dpdk/drivers/net/ionic/ionic_regs.h b/src/spdk/dpdk/drivers/net/ionic/ionic_regs.h new file mode 100644 index 000000000..3adc2bc7c --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ionic/ionic_regs.h @@ -0,0 +1,142 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved. + */ + +#ifndef _IONIC_REGS_H_ +#define _IONIC_REGS_H_ + +/** struct ionic_intr - interrupt control register set. + * @coal_init: coalesce timer initial value. + * @mask: interrupt mask value. + * @credits: interrupt credit count and return. + * @mask_assert: interrupt mask value on assert. + * @coal: coalesce timer time remaining. + */ +struct ionic_intr { + uint32_t coal_init; + uint32_t mask; + uint32_t credits; + uint32_t mask_assert; + uint32_t coal; + uint32_t rsvd[3]; +}; + +#define IONIC_INTR_CTRL_REGS_MAX 2048 +#define IONIC_INTR_CTRL_COAL_MAX 0x3F + +/** enum ionic_intr_mask_vals - valid values for mask and mask_assert. + * @IONIC_INTR_MASK_CLEAR: unmask interrupt. + * @IONIC_INTR_MASK_SET: mask interrupt. + */ +enum ionic_intr_mask_vals { + IONIC_INTR_MASK_CLEAR = 0, + IONIC_INTR_MASK_SET = 1, +}; + +/** enum ionic_intr_credits_bits - bitwise composition of credits values. + * @IONIC_INTR_CRED_COUNT: bit mask of credit count, no shift needed. + * @IONIC_INTR_CRED_COUNT_SIGNED: bit mask of credit count, including sign bit. + * @IONIC_INTR_CRED_UNMASK: unmask the interrupt. + * @IONIC_INTR_CRED_RESET_COALESCE: reset the coalesce timer. + * @IONIC_INTR_CRED_REARM: unmask the and reset the timer. + */ +enum ionic_intr_credits_bits { + IONIC_INTR_CRED_COUNT = 0x7fffu, + IONIC_INTR_CRED_COUNT_SIGNED = 0xffffu, + IONIC_INTR_CRED_UNMASK = 0x10000u, + IONIC_INTR_CRED_RESET_COALESCE = 0x20000u, + IONIC_INTR_CRED_REARM = (IONIC_INTR_CRED_UNMASK | + IONIC_INTR_CRED_RESET_COALESCE), +}; + +static inline void +ionic_intr_coal_init(struct ionic_intr __iomem *intr_ctrl, + int intr_idx, uint32_t coal) +{ + iowrite32(coal, &intr_ctrl[intr_idx].coal_init); +} + +static inline void +ionic_intr_mask(struct ionic_intr __iomem *intr_ctrl, + int intr_idx, uint32_t mask) +{ + iowrite32(mask, &intr_ctrl[intr_idx].mask); +} + +static inline void +ionic_intr_credits(struct ionic_intr __iomem *intr_ctrl, + int intr_idx, uint32_t cred, uint32_t flags) +{ + if (cred > IONIC_INTR_CRED_COUNT) { + IONIC_WARN_ON(cred > IONIC_INTR_CRED_COUNT); + cred = ioread32(&intr_ctrl[intr_idx].credits); + cred &= IONIC_INTR_CRED_COUNT_SIGNED; + } + + iowrite32(cred | flags, &intr_ctrl[intr_idx].credits); +} + +static inline void +ionic_intr_clean(struct ionic_intr __iomem *intr_ctrl, + int intr_idx) +{ + uint32_t cred; + + cred = ioread32(&intr_ctrl[intr_idx].credits); + cred &= IONIC_INTR_CRED_COUNT_SIGNED; + cred |= IONIC_INTR_CRED_RESET_COALESCE; + iowrite32(cred, &intr_ctrl[intr_idx].credits); +} + +static inline void +ionic_intr_mask_assert(struct ionic_intr __iomem *intr_ctrl, + int intr_idx, uint32_t mask) +{ + iowrite32(mask, &intr_ctrl[intr_idx].mask_assert); +} + +/** enum ionic_dbell_bits - bitwise composition of dbell values. + * + * @IONIC_DBELL_QID_MASK: unshifted mask of valid queue id bits. + * @IONIC_DBELL_QID_SHIFT: queue id shift amount in dbell value. + * @IONIC_DBELL_QID: macro to build QID component of dbell value. + * + * @IONIC_DBELL_RING_MASK: unshifted mask of valid ring bits. + * @IONIC_DBELL_RING_SHIFT: ring shift amount in dbell value. + * @IONIC_DBELL_RING: macro to build ring component of dbell value. + * + * @IONIC_DBELL_RING_0: ring zero dbell component value. + * @IONIC_DBELL_RING_1: ring one dbell component value. + * @IONIC_DBELL_RING_2: ring two dbell component value. + * @IONIC_DBELL_RING_3: ring three dbell component value. + * + * @IONIC_DBELL_INDEX_MASK: bit mask of valid index bits, no shift needed. + */ +enum ionic_dbell_bits { + IONIC_DBELL_QID_MASK = 0xffffff, + IONIC_DBELL_QID_SHIFT = 24, + +#define IONIC_DBELL_QID(n) \ + (((u64)(n) & IONIC_DBELL_QID_MASK) << IONIC_DBELL_QID_SHIFT) + + IONIC_DBELL_RING_MASK = 0x7, + IONIC_DBELL_RING_SHIFT = 16, + +#define IONIC_DBELL_RING(n) \ + (((u64)(n) & IONIC_DBELL_RING_MASK) << IONIC_DBELL_RING_SHIFT) + + IONIC_DBELL_RING_0 = 0, + IONIC_DBELL_RING_1 = IONIC_DBELL_RING(1), + IONIC_DBELL_RING_2 = IONIC_DBELL_RING(2), + IONIC_DBELL_RING_3 = IONIC_DBELL_RING(3), + + IONIC_DBELL_INDEX_MASK = 0xffff, +}; + +static inline void +ionic_dbell_ring(u64 __iomem *db_page, int qtype, u64 val) +{ + writeq(val, &db_page[qtype]); +} + +#endif /* _IONIC_REGS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ionic/ionic_rx_filter.c b/src/spdk/dpdk/drivers/net/ionic/ionic_rx_filter.c new file mode 100644 index 000000000..fe624538d --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ionic/ionic_rx_filter.c @@ -0,0 +1,140 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved. + */ + +#include +#include + +#include + +#include "ionic_lif.h" +#include "ionic_rx_filter.h" + +void +ionic_rx_filter_free(struct ionic_rx_filter *f) +{ + LIST_REMOVE(f, by_id); + LIST_REMOVE(f, by_hash); + rte_free(f); +} + +int +ionic_rx_filter_del(struct ionic_lif *lif, struct ionic_rx_filter *f) +{ + struct ionic_admin_ctx ctx = { + .pending_work = true, + .cmd.rx_filter_del = { + .opcode = IONIC_CMD_RX_FILTER_DEL, + .filter_id = f->filter_id, + }, + }; + + return ionic_adminq_post(lif, &ctx); +} + +int +ionic_rx_filters_init(struct ionic_lif *lif) +{ + uint32_t i; + + rte_spinlock_init(&lif->rx_filters.lock); + + for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) { + LIST_INIT(&lif->rx_filters.by_hash[i]); + LIST_INIT(&lif->rx_filters.by_id[i]); + } + + return 0; +} + +void +ionic_rx_filters_deinit(struct ionic_lif *lif) +{ + struct ionic_rx_filter *f; + uint32_t i; + + for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) { + while (!LIST_EMPTY(&lif->rx_filters.by_id[i])) { + f = LIST_FIRST(&lif->rx_filters.by_id[i]); + ionic_rx_filter_free(f); + } + } +} + +int +ionic_rx_filter_save(struct ionic_lif *lif, uint32_t flow_id, + uint16_t rxq_index, struct ionic_admin_ctx *ctx) +{ + struct ionic_rx_filter *f; + uint32_t key; + + f = rte_zmalloc("ionic", sizeof(*f), 0); + + if (!f) + return -ENOMEM; + + f->flow_id = flow_id; + f->filter_id = ctx->comp.rx_filter_add.filter_id; + f->rxq_index = rxq_index; + memcpy(&f->cmd, &ctx->cmd, sizeof(f->cmd)); + + switch (f->cmd.match) { + case IONIC_RX_FILTER_MATCH_VLAN: + key = f->cmd.vlan.vlan & IONIC_RX_FILTER_HLISTS_MASK; + break; + case IONIC_RX_FILTER_MATCH_MAC: + memcpy(&key, f->cmd.mac.addr, sizeof(key)); + key &= IONIC_RX_FILTER_HLISTS_MASK; + break; + case IONIC_RX_FILTER_MATCH_MAC_VLAN: + key = f->cmd.mac_vlan.vlan & IONIC_RX_FILTER_HLISTS_MASK; + break; + default: + return -EINVAL; + } + + rte_spinlock_lock(&lif->rx_filters.lock); + + LIST_INSERT_HEAD(&lif->rx_filters.by_hash[key], f, by_hash); + + key = f->filter_id & IONIC_RX_FILTER_HLISTS_MASK; + + LIST_INSERT_HEAD(&lif->rx_filters.by_id[key], f, by_id); + + rte_spinlock_unlock(&lif->rx_filters.lock); + + return 0; +} + +struct ionic_rx_filter * +ionic_rx_filter_by_vlan(struct ionic_lif *lif, uint16_t vid) +{ + uint32_t key = vid & IONIC_RX_FILTER_HLISTS_MASK; + struct ionic_rx_filter *f; + + LIST_FOREACH(f, &lif->rx_filters.by_hash[key], by_hash) { + if (f->cmd.match != IONIC_RX_FILTER_MATCH_VLAN) + continue; + if (f->cmd.vlan.vlan == vid) + return f; + } + + return NULL; +} + +struct ionic_rx_filter * +ionic_rx_filter_by_addr(struct ionic_lif *lif, const uint8_t *addr) +{ + const uint32_t key = *(const uint32_t *)addr & + IONIC_RX_FILTER_HLISTS_MASK; + struct ionic_rx_filter *f; + + LIST_FOREACH(f, &lif->rx_filters.by_hash[key], by_hash) { + if (f->cmd.match != IONIC_RX_FILTER_MATCH_MAC) + continue; + if (memcmp(addr, f->cmd.mac.addr, RTE_ETHER_ADDR_LEN) == 0) + return f; + } + + return NULL; +} diff --git a/src/spdk/dpdk/drivers/net/ionic/ionic_rx_filter.h b/src/spdk/dpdk/drivers/net/ionic/ionic_rx_filter.h new file mode 100644 index 000000000..6204a7b53 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ionic/ionic_rx_filter.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved. + */ + +#ifndef _IONIC_RX_FILTER_H_ +#define _IONIC_RX_FILTER_H_ + +#include + +#include "ionic_osdep.h" +#include "ionic_if.h" + +#define IONIC_RXQ_INDEX_ANY (0xFFFF) +struct ionic_rx_filter { + uint32_t flow_id; + uint32_t filter_id; + uint16_t rxq_index; + struct ionic_rx_filter_add_cmd cmd; + LIST_ENTRY(ionic_rx_filter) by_hash; + LIST_ENTRY(ionic_rx_filter) by_id; +}; + +#define IONIC_RX_FILTER_HLISTS (1 << 10) +#define IONIC_RX_FILTER_HLISTS_MASK (IONIC_RX_FILTER_HLISTS - 1) +struct ionic_rx_filters { + rte_spinlock_t lock; + LIST_HEAD(rx_filters_by_hash, ionic_rx_filter) + by_hash[IONIC_RX_FILTER_HLISTS]; /* by pkt hash */ + LIST_HEAD(rx_filters_by_id, ionic_rx_filter) + by_id[IONIC_RX_FILTER_HLISTS]; /* by filter_id */ +}; + +struct ionic_admin_ctx; +struct ionic_lif; + +void ionic_rx_filter_free(struct ionic_rx_filter *f); +int ionic_rx_filter_del(struct ionic_lif *lif, struct ionic_rx_filter *f); +int ionic_rx_filters_init(struct ionic_lif *lif); +void ionic_rx_filters_deinit(struct ionic_lif *lif); +int ionic_rx_filter_save(struct ionic_lif *lif, uint32_t flow_id, + uint16_t rxq_index, struct ionic_admin_ctx *ctx); +struct ionic_rx_filter *ionic_rx_filter_by_vlan(struct ionic_lif *lif, + uint16_t vid); +struct ionic_rx_filter *ionic_rx_filter_by_addr(struct ionic_lif *lif, + const uint8_t *addr); + +#endif /* _IONIC_RX_FILTER_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ionic/ionic_rxtx.c b/src/spdk/dpdk/drivers/net/ionic/ionic_rxtx.c new file mode 100644 index 000000000..2592f5cab --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ionic/ionic_rxtx.c @@ -0,0 +1,1082 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ionic_logs.h" +#include "ionic_mac_api.h" +#include "ionic_ethdev.h" +#include "ionic_lif.h" +#include "ionic_rxtx.h" + +#define IONIC_RX_RING_DOORBELL_STRIDE (32 - 1) + +/********************************************************************* + * + * TX functions + * + **********************************************************************/ + +void +ionic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo) +{ + struct ionic_qcq *txq = dev->data->tx_queues[queue_id]; + struct ionic_queue *q = &txq->q; + + qinfo->nb_desc = q->num_descs; + qinfo->conf.offloads = txq->offloads; + qinfo->conf.tx_deferred_start = txq->deferred_start; +} + +static inline void __rte_cold +ionic_tx_flush(struct ionic_cq *cq) +{ + struct ionic_queue *q = cq->bound_q; + struct ionic_desc_info *q_desc_info; + struct rte_mbuf *txm, *next; + struct ionic_txq_comp *cq_desc_base = cq->base; + struct ionic_txq_comp *cq_desc; + u_int32_t comp_index = (u_int32_t)-1; + + cq_desc = &cq_desc_base[cq->tail_idx]; + while (color_match(cq_desc->color, cq->done_color)) { + cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1); + + /* Prefetch the next 4 descriptors (not really useful here) */ + if ((cq->tail_idx & 0x3) == 0) + rte_prefetch0(&cq_desc_base[cq->tail_idx]); + + if (cq->tail_idx == 0) + cq->done_color = !cq->done_color; + + comp_index = cq_desc->comp_index; + + cq_desc = &cq_desc_base[cq->tail_idx]; + } + + if (comp_index != (u_int32_t)-1) { + while (q->tail_idx != comp_index) { + q_desc_info = &q->info[q->tail_idx]; + + q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); + + /* Prefetch the next 4 descriptors */ + if ((q->tail_idx & 0x3) == 0) + /* q desc info */ + rte_prefetch0(&q->info[q->tail_idx]); + + /* + * Note: you can just use rte_pktmbuf_free, + * but this loop is faster + */ + txm = q_desc_info->cb_arg; + while (txm != NULL) { + next = txm->next; + rte_pktmbuf_free_seg(txm); + txm = next; + } + } + } +} + +void __rte_cold +ionic_dev_tx_queue_release(void *tx_queue) +{ + struct ionic_qcq *txq = (struct ionic_qcq *)tx_queue; + + IONIC_PRINT_CALL(); + + ionic_qcq_free(txq); +} + +int __rte_cold +ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) +{ + struct ionic_qcq *txq; + + IONIC_PRINT_CALL(); + + txq = eth_dev->data->tx_queues[tx_queue_id]; + + /* + * Note: we should better post NOP Tx desc and wait for its completion + * before disabling Tx queue + */ + + ionic_qcq_disable(txq); + + ionic_tx_flush(&txq->cq); + + ionic_lif_txq_deinit(txq); + + eth_dev->data->tx_queue_state[tx_queue_id] = + RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + +int __rte_cold +ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id, + uint16_t nb_desc, uint32_t socket_id __rte_unused, + const struct rte_eth_txconf *tx_conf) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + struct ionic_qcq *txq; + uint64_t offloads; + int err; + + IONIC_PRINT_CALL(); + + IONIC_PRINT(DEBUG, "Configuring TX queue %u with %u buffers", + tx_queue_id, nb_desc); + + if (tx_queue_id >= lif->ntxqcqs) { + IONIC_PRINT(DEBUG, "Queue index %u not available " + "(max %u queues)", + tx_queue_id, lif->ntxqcqs); + return -EINVAL; + } + + offloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads; + + /* Validate number of receive descriptors */ + if (!rte_is_power_of_2(nb_desc) || nb_desc < IONIC_MIN_RING_DESC) + return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */ + + /* Free memory prior to re-allocation if needed... */ + if (eth_dev->data->tx_queues[tx_queue_id] != NULL) { + void *tx_queue = eth_dev->data->tx_queues[tx_queue_id]; + ionic_dev_tx_queue_release(tx_queue); + eth_dev->data->tx_queues[tx_queue_id] = NULL; + } + + err = ionic_tx_qcq_alloc(lif, tx_queue_id, nb_desc, &txq); + if (err) { + IONIC_PRINT(DEBUG, "Queue allocation failure"); + return -EINVAL; + } + + /* Do not start queue with rte_eth_dev_start() */ + txq->deferred_start = tx_conf->tx_deferred_start; + + txq->offloads = offloads; + + eth_dev->data->tx_queues[tx_queue_id] = txq; + + return 0; +} + +/* + * Start Transmit Units for specified queue. + */ +int __rte_cold +ionic_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) +{ + struct ionic_qcq *txq; + int err; + + IONIC_PRINT_CALL(); + + txq = eth_dev->data->tx_queues[tx_queue_id]; + + err = ionic_lif_txq_init(txq); + if (err) + return err; + + ionic_qcq_enable(txq); + + eth_dev->data->tx_queue_state[tx_queue_id] = + RTE_ETH_QUEUE_STATE_STARTED; + + return 0; +} + +static void +ionic_tx_tcp_pseudo_csum(struct rte_mbuf *txm) +{ + struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *); + char *l3_hdr = ((char *)eth_hdr) + txm->l2_len; + struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *) + (l3_hdr + txm->l3_len); + + if (txm->ol_flags & PKT_TX_IP_CKSUM) { + struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; + ipv4_hdr->hdr_checksum = 0; + tcp_hdr->cksum = 0; + tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr); + } else { + struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; + tcp_hdr->cksum = 0; + tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr); + } +} + +static void +ionic_tx_tcp_inner_pseudo_csum(struct rte_mbuf *txm) +{ + struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *); + char *l3_hdr = ((char *)eth_hdr) + txm->outer_l2_len + + txm->outer_l3_len + txm->l2_len; + struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *) + (l3_hdr + txm->l3_len); + + if (txm->ol_flags & PKT_TX_IPV4) { + struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; + ipv4_hdr->hdr_checksum = 0; + tcp_hdr->cksum = 0; + tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr); + } else { + struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; + tcp_hdr->cksum = 0; + tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr); + } +} + +static void +ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc, + struct rte_mbuf *txm, + rte_iova_t addr, uint8_t nsge, uint16_t len, + uint32_t hdrlen, uint32_t mss, + bool encap, + uint16_t vlan_tci, bool has_vlan, + bool start, bool done) +{ + uint8_t flags = 0; + flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; + flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; + flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0; + flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0; + + desc->cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, + flags, nsge, addr); + desc->len = len; + desc->vlan_tci = vlan_tci; + desc->hdr_len = hdrlen; + desc->mss = mss; + + ionic_q_post(q, done, NULL, done ? txm : NULL); +} + +static struct ionic_txq_desc * +ionic_tx_tso_next(struct ionic_queue *q, struct ionic_txq_sg_elem **elem) +{ + struct ionic_txq_desc *desc_base = q->base; + struct ionic_txq_sg_desc *sg_desc_base = q->sg_base; + struct ionic_txq_desc *desc = &desc_base[q->head_idx]; + struct ionic_txq_sg_desc *sg_desc = &sg_desc_base[q->head_idx]; + + *elem = sg_desc->elems; + return desc; +} + +static int +ionic_tx_tso(struct ionic_queue *q, struct rte_mbuf *txm, + uint64_t offloads __rte_unused, bool not_xmit_more) +{ + struct ionic_tx_stats *stats = IONIC_Q_TO_TX_STATS(q); + struct ionic_txq_desc *desc; + struct ionic_txq_sg_elem *elem; + struct rte_mbuf *txm_seg; + uint64_t desc_addr = 0; + uint16_t desc_len = 0; + uint8_t desc_nsge; + uint32_t hdrlen; + uint32_t mss = txm->tso_segsz; + uint32_t frag_left = 0; + uint32_t left; + uint32_t seglen; + uint32_t len; + uint32_t offset = 0; + bool start, done; + bool encap; + bool has_vlan = !!(txm->ol_flags & PKT_TX_VLAN_PKT); + uint16_t vlan_tci = txm->vlan_tci; + uint64_t ol_flags = txm->ol_flags; + + encap = ((ol_flags & PKT_TX_OUTER_IP_CKSUM) || + (ol_flags & PKT_TX_OUTER_UDP_CKSUM)) && + ((ol_flags & PKT_TX_OUTER_IPV4) || + (ol_flags & PKT_TX_OUTER_IPV6)); + + /* Preload inner-most TCP csum field with IP pseudo hdr + * calculated with IP length set to zero. HW will later + * add in length to each TCP segment resulting from the TSO. + */ + + if (encap) { + ionic_tx_tcp_inner_pseudo_csum(txm); + hdrlen = txm->outer_l2_len + txm->outer_l3_len + + txm->l2_len + txm->l3_len + txm->l4_len; + } else { + ionic_tx_tcp_pseudo_csum(txm); + hdrlen = txm->l2_len + txm->l3_len + txm->l4_len; + } + + seglen = hdrlen + mss; + left = txm->data_len; + + desc = ionic_tx_tso_next(q, &elem); + start = true; + + /* Chop data up into desc segments */ + + while (left > 0) { + len = RTE_MIN(seglen, left); + frag_left = seglen - len; + desc_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(txm)); + desc_len = len; + desc_nsge = 0; + left -= len; + offset += len; + if (txm->nb_segs > 1 && frag_left > 0) + continue; + done = (txm->nb_segs == 1 && left == 0); + ionic_tx_tso_post(q, desc, txm, + desc_addr, desc_nsge, desc_len, + hdrlen, mss, + encap, + vlan_tci, has_vlan, + start, done && not_xmit_more); + desc = ionic_tx_tso_next(q, &elem); + start = false; + seglen = mss; + } + + /* Chop frags into desc segments */ + + txm_seg = txm->next; + while (txm_seg != NULL) { + offset = 0; + left = txm_seg->data_len; + stats->frags++; + + while (left > 0) { + rte_iova_t data_iova; + data_iova = rte_mbuf_data_iova(txm_seg); + elem->addr = rte_cpu_to_le_64(data_iova) + offset; + if (frag_left > 0) { + len = RTE_MIN(frag_left, left); + frag_left -= len; + elem->len = len; + elem++; + desc_nsge++; + } else { + len = RTE_MIN(mss, left); + frag_left = mss - len; + data_iova = rte_mbuf_data_iova(txm_seg); + desc_addr = rte_cpu_to_le_64(data_iova); + desc_len = len; + desc_nsge = 0; + } + left -= len; + offset += len; + if (txm_seg->next != NULL && frag_left > 0) + continue; + done = (txm_seg->next == NULL && left == 0); + ionic_tx_tso_post(q, desc, txm_seg, + desc_addr, desc_nsge, desc_len, + hdrlen, mss, + encap, + vlan_tci, has_vlan, + start, done && not_xmit_more); + desc = ionic_tx_tso_next(q, &elem); + start = false; + } + + txm_seg = txm_seg->next; + } + + stats->tso++; + + return 0; +} + +static int +ionic_tx(struct ionic_queue *q, struct rte_mbuf *txm, + uint64_t offloads, bool not_xmit_more) +{ + struct ionic_txq_desc *desc_base = q->base; + struct ionic_txq_sg_desc *sg_desc_base = q->sg_base; + struct ionic_txq_desc *desc = &desc_base[q->head_idx]; + struct ionic_txq_sg_desc *sg_desc = &sg_desc_base[q->head_idx]; + struct ionic_txq_sg_elem *elem = sg_desc->elems; + struct ionic_tx_stats *stats = IONIC_Q_TO_TX_STATS(q); + struct rte_mbuf *txm_seg; + bool encap; + bool has_vlan; + uint64_t ol_flags = txm->ol_flags; + uint64_t addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(txm)); + uint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE; + uint8_t flags = 0; + + if ((ol_flags & PKT_TX_IP_CKSUM) && + (offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)) { + opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW; + flags |= IONIC_TXQ_DESC_FLAG_CSUM_L3; + if (((ol_flags & PKT_TX_TCP_CKSUM) && + (offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) || + ((ol_flags & PKT_TX_UDP_CKSUM) && + (offloads & DEV_TX_OFFLOAD_UDP_CKSUM))) + flags |= IONIC_TXQ_DESC_FLAG_CSUM_L4; + } else { + stats->no_csum++; + } + + has_vlan = (ol_flags & PKT_TX_VLAN_PKT); + encap = ((ol_flags & PKT_TX_OUTER_IP_CKSUM) || + (ol_flags & PKT_TX_OUTER_UDP_CKSUM)) && + ((ol_flags & PKT_TX_OUTER_IPV4) || + (ol_flags & PKT_TX_OUTER_IPV6)); + + flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; + flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; + + desc->cmd = encode_txq_desc_cmd(opcode, flags, txm->nb_segs - 1, addr); + desc->len = txm->data_len; + desc->vlan_tci = txm->vlan_tci; + + txm_seg = txm->next; + while (txm_seg != NULL) { + elem->len = txm_seg->data_len; + elem->addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm_seg)); + stats->frags++; + elem++; + txm_seg = txm_seg->next; + } + + ionic_q_post(q, not_xmit_more, NULL, txm); + + return 0; +} + +uint16_t +ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct ionic_qcq *txq = (struct ionic_qcq *)tx_queue; + struct ionic_queue *q = &txq->q; + struct ionic_cq *cq = &txq->cq; + struct ionic_tx_stats *stats = IONIC_Q_TO_TX_STATS(q); + uint32_t next_q_head_idx; + uint32_t bytes_tx = 0; + uint16_t nb_tx = 0; + int err; + bool last; + + /* Cleaning old buffers */ + ionic_tx_flush(cq); + + if (unlikely(ionic_q_space_avail(q) < nb_pkts)) { + stats->stop += nb_pkts; + return 0; + } + + while (nb_tx < nb_pkts) { + last = (nb_tx == (nb_pkts - 1)); + + next_q_head_idx = (q->head_idx + 1) & (q->num_descs - 1); + if ((next_q_head_idx & 0x3) == 0) { + struct ionic_txq_desc *desc_base = q->base; + rte_prefetch0(&desc_base[next_q_head_idx]); + rte_prefetch0(&q->info[next_q_head_idx]); + } + + if (tx_pkts[nb_tx]->ol_flags & PKT_TX_TCP_SEG) + err = ionic_tx_tso(q, tx_pkts[nb_tx], txq->offloads, + last); + else + err = ionic_tx(q, tx_pkts[nb_tx], txq->offloads, last); + if (err) { + stats->drop += nb_pkts - nb_tx; + if (nb_tx > 0) + ionic_q_flush(q); + break; + } + + bytes_tx += tx_pkts[nb_tx]->pkt_len; + nb_tx++; + } + + stats->packets += nb_tx; + stats->bytes += bytes_tx; + + return nb_tx; +} + +/********************************************************************* + * + * TX prep functions + * + **********************************************************************/ + +#define IONIC_TX_OFFLOAD_MASK ( \ + PKT_TX_IPV4 | \ + PKT_TX_IPV6 | \ + PKT_TX_VLAN | \ + PKT_TX_IP_CKSUM | \ + PKT_TX_TCP_SEG | \ + PKT_TX_L4_MASK) + +#define IONIC_TX_OFFLOAD_NOTSUP_MASK \ + (PKT_TX_OFFLOAD_MASK ^ IONIC_TX_OFFLOAD_MASK) + +uint16_t +ionic_prep_pkts(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct rte_mbuf *txm; + uint64_t offloads; + int i = 0; + + for (i = 0; i < nb_pkts; i++) { + txm = tx_pkts[i]; + + if (txm->nb_segs > IONIC_TX_MAX_SG_ELEMS) { + rte_errno = -EINVAL; + break; + } + + offloads = txm->ol_flags; + + if (offloads & IONIC_TX_OFFLOAD_NOTSUP_MASK) { + rte_errno = -ENOTSUP; + break; + } + } + + return i; +} + +/********************************************************************* + * + * RX functions + * + **********************************************************************/ + +static void ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index, + struct rte_mbuf *mbuf); + +void +ionic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo) +{ + struct ionic_qcq *rxq = dev->data->rx_queues[queue_id]; + struct ionic_queue *q = &rxq->q; + + qinfo->mp = rxq->mb_pool; + qinfo->scattered_rx = dev->data->scattered_rx; + qinfo->nb_desc = q->num_descs; + qinfo->conf.rx_deferred_start = rxq->deferred_start; + qinfo->conf.offloads = rxq->offloads; +} + +static void __rte_cold +ionic_rx_empty(struct ionic_queue *q) +{ + struct ionic_qcq *rxq = IONIC_Q_TO_QCQ(q); + struct ionic_desc_info *cur; + struct rte_mbuf *mbuf; + + while (q->tail_idx != q->head_idx) { + cur = &q->info[q->tail_idx]; + mbuf = cur->cb_arg; + rte_mempool_put(rxq->mb_pool, mbuf); + + q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); + } +} + +void __rte_cold +ionic_dev_rx_queue_release(void *rx_queue) +{ + struct ionic_qcq *rxq = (struct ionic_qcq *)rx_queue; + + IONIC_PRINT_CALL(); + + ionic_rx_empty(&rxq->q); + + ionic_qcq_free(rxq); +} + +int __rte_cold +ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, + uint16_t rx_queue_id, + uint16_t nb_desc, + uint32_t socket_id __rte_unused, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + struct ionic_qcq *rxq; + uint64_t offloads; + int err; + + IONIC_PRINT_CALL(); + + IONIC_PRINT(DEBUG, "Configuring RX queue %u with %u buffers", + rx_queue_id, nb_desc); + + if (rx_queue_id >= lif->nrxqcqs) { + IONIC_PRINT(ERR, + "Queue index %u not available (max %u queues)", + rx_queue_id, lif->nrxqcqs); + return -EINVAL; + } + + offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads; + + /* Validate number of receive descriptors */ + if (!rte_is_power_of_2(nb_desc) || + nb_desc < IONIC_MIN_RING_DESC || + nb_desc > IONIC_MAX_RING_DESC) { + IONIC_PRINT(ERR, + "Bad number of descriptors (%u) for queue %u (min: %u)", + nb_desc, rx_queue_id, IONIC_MIN_RING_DESC); + return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */ + } + + if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) + eth_dev->data->scattered_rx = 1; + + /* Free memory prior to re-allocation if needed... */ + if (eth_dev->data->rx_queues[rx_queue_id] != NULL) { + void *rx_queue = eth_dev->data->rx_queues[rx_queue_id]; + ionic_dev_rx_queue_release(rx_queue); + eth_dev->data->rx_queues[rx_queue_id] = NULL; + } + + err = ionic_rx_qcq_alloc(lif, rx_queue_id, nb_desc, &rxq); + if (err) { + IONIC_PRINT(ERR, "Queue allocation failure"); + return -EINVAL; + } + + rxq->mb_pool = mp; + + /* + * Note: the interface does not currently support + * DEV_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN + * when the adapter will be able to keep the CRC and subtract + * it to the length for all received packets: + * if (eth_dev->data->dev_conf.rxmode.offloads & + * DEV_RX_OFFLOAD_KEEP_CRC) + * rxq->crc_len = ETHER_CRC_LEN; + */ + + /* Do not start queue with rte_eth_dev_start() */ + rxq->deferred_start = rx_conf->rx_deferred_start; + + rxq->offloads = offloads; + + eth_dev->data->rx_queues[rx_queue_id] = rxq; + + return 0; +} + +static void +ionic_rx_clean(struct ionic_queue *q, + uint32_t q_desc_index, uint32_t cq_desc_index, + void *cb_arg, void *service_cb_arg) +{ + struct ionic_rxq_comp *cq_desc_base = q->bound_cq->base; + struct ionic_rxq_comp *cq_desc = &cq_desc_base[cq_desc_index]; + struct rte_mbuf *rxm = cb_arg; + struct rte_mbuf *rxm_seg; + struct ionic_qcq *rxq = IONIC_Q_TO_QCQ(q); + uint32_t max_frame_size = + rxq->lif->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len; + uint64_t pkt_flags = 0; + uint32_t pkt_type; + struct ionic_rx_stats *stats = IONIC_Q_TO_RX_STATS(q); + struct ionic_rx_service *recv_args = (struct ionic_rx_service *) + service_cb_arg; + uint32_t buf_size = (uint16_t) + (rte_pktmbuf_data_room_size(rxq->mb_pool) - + RTE_PKTMBUF_HEADROOM); + uint32_t left; + + if (!recv_args) { + stats->no_cb_arg++; + /* Flush */ + rte_pktmbuf_free(rxm); + /* + * Note: rte_mempool_put is faster with no segs + * rte_mempool_put(rxq->mb_pool, rxm); + */ + return; + } + + if (cq_desc->status) { + stats->bad_cq_status++; + ionic_rx_recycle(q, q_desc_index, rxm); + return; + } + + if (recv_args->nb_rx >= recv_args->nb_pkts) { + stats->no_room++; + ionic_rx_recycle(q, q_desc_index, rxm); + return; + } + + if (cq_desc->len > max_frame_size || + cq_desc->len == 0) { + stats->bad_len++; + ionic_rx_recycle(q, q_desc_index, rxm); + return; + } + + rxm->data_off = RTE_PKTMBUF_HEADROOM; + rte_prefetch1((char *)rxm->buf_addr + rxm->data_off); + rxm->nb_segs = 1; /* cq_desc->num_sg_elems */ + rxm->pkt_len = cq_desc->len; + rxm->port = rxq->lif->port_id; + + left = cq_desc->len; + + rxm->data_len = RTE_MIN(buf_size, left); + left -= rxm->data_len; + + rxm_seg = rxm->next; + while (rxm_seg && left) { + rxm_seg->data_len = RTE_MIN(buf_size, left); + left -= rxm_seg->data_len; + + rxm_seg = rxm_seg->next; + rxm->nb_segs++; + } + + /* RSS */ + pkt_flags |= PKT_RX_RSS_HASH; + rxm->hash.rss = cq_desc->rss_hash; + + /* Vlan Strip */ + if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) { + pkt_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; + rxm->vlan_tci = cq_desc->vlan_tci; + } + + /* Checksum */ + if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) { + if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_OK) + pkt_flags |= PKT_RX_IP_CKSUM_GOOD; + else if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD) + pkt_flags |= PKT_RX_IP_CKSUM_BAD; + + if ((cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_OK) || + (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_OK)) + pkt_flags |= PKT_RX_L4_CKSUM_GOOD; + else if ((cq_desc->csum_flags & + IONIC_RXQ_COMP_CSUM_F_TCP_BAD) || + (cq_desc->csum_flags & + IONIC_RXQ_COMP_CSUM_F_UDP_BAD)) + pkt_flags |= PKT_RX_L4_CKSUM_BAD; + } + + rxm->ol_flags = pkt_flags; + + /* Packet Type */ + switch (cq_desc->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) { + case IONIC_PKT_TYPE_IPV4: + pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4; + break; + case IONIC_PKT_TYPE_IPV6: + pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6; + break; + case IONIC_PKT_TYPE_IPV4_TCP: + pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | + RTE_PTYPE_L4_TCP; + break; + case IONIC_PKT_TYPE_IPV6_TCP: + pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | + RTE_PTYPE_L4_TCP; + break; + case IONIC_PKT_TYPE_IPV4_UDP: + pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | + RTE_PTYPE_L4_UDP; + break; + case IONIC_PKT_TYPE_IPV6_UDP: + pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | + RTE_PTYPE_L4_UDP; + break; + default: + { + struct rte_ether_hdr *eth_h = rte_pktmbuf_mtod(rxm, + struct rte_ether_hdr *); + uint16_t ether_type = eth_h->ether_type; + if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) + pkt_type = RTE_PTYPE_L2_ETHER_ARP; + else + pkt_type = RTE_PTYPE_UNKNOWN; + break; + } + } + + rxm->packet_type = pkt_type; + + recv_args->rx_pkts[recv_args->nb_rx] = rxm; + recv_args->nb_rx++; + + stats->packets++; + stats->bytes += rxm->pkt_len; +} + +static void +ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index, + struct rte_mbuf *mbuf) +{ + struct ionic_rxq_desc *desc_base = q->base; + struct ionic_rxq_desc *old = &desc_base[q_desc_index]; + struct ionic_rxq_desc *new = &desc_base[q->head_idx]; + + new->addr = old->addr; + new->len = old->len; + + ionic_q_post(q, true, ionic_rx_clean, mbuf); +} + +static int __rte_cold +ionic_rx_fill(struct ionic_qcq *rxq, uint32_t len) +{ + struct ionic_queue *q = &rxq->q; + struct ionic_rxq_desc *desc_base = q->base; + struct ionic_rxq_sg_desc *sg_desc_base = q->sg_base; + struct ionic_rxq_desc *desc; + struct ionic_rxq_sg_desc *sg_desc; + struct ionic_rxq_sg_elem *elem; + rte_iova_t dma_addr; + uint32_t i, j, nsegs, buf_size, size; + bool ring_doorbell; + + buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - + RTE_PKTMBUF_HEADROOM); + + /* Initialize software ring entries */ + for (i = ionic_q_space_avail(q); i; i--) { + struct rte_mbuf *rxm = rte_mbuf_raw_alloc(rxq->mb_pool); + struct rte_mbuf *prev_rxm_seg; + + if (rxm == NULL) { + IONIC_PRINT(ERR, "RX mbuf alloc failed"); + return -ENOMEM; + } + + nsegs = (len + buf_size - 1) / buf_size; + + desc = &desc_base[q->head_idx]; + dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(rxm)); + desc->addr = dma_addr; + desc->len = buf_size; + size = buf_size; + desc->opcode = (nsegs > 1) ? IONIC_RXQ_DESC_OPCODE_SG : + IONIC_RXQ_DESC_OPCODE_SIMPLE; + rxm->next = NULL; + + prev_rxm_seg = rxm; + sg_desc = &sg_desc_base[q->head_idx]; + elem = sg_desc->elems; + for (j = 0; j < nsegs - 1 && j < IONIC_RX_MAX_SG_ELEMS; j++) { + struct rte_mbuf *rxm_seg; + rte_iova_t data_iova; + + rxm_seg = rte_mbuf_raw_alloc(rxq->mb_pool); + if (rxm_seg == NULL) { + IONIC_PRINT(ERR, "RX mbuf alloc failed"); + return -ENOMEM; + } + + data_iova = rte_mbuf_data_iova(rxm_seg); + dma_addr = rte_cpu_to_le_64(data_iova); + elem->addr = dma_addr; + elem->len = buf_size; + size += buf_size; + elem++; + rxm_seg->next = NULL; + prev_rxm_seg->next = rxm_seg; + prev_rxm_seg = rxm_seg; + } + + if (size < len) + IONIC_PRINT(ERR, "Rx SG size is not sufficient (%d < %d)", + size, len); + + ring_doorbell = ((q->head_idx + 1) & + IONIC_RX_RING_DOORBELL_STRIDE) == 0; + + ionic_q_post(q, ring_doorbell, ionic_rx_clean, rxm); + } + + return 0; +} + +/* + * Start Receive Units for specified queue. + */ +int __rte_cold +ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) +{ + uint32_t frame_size = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len; + struct ionic_qcq *rxq; + int err; + + IONIC_PRINT_CALL(); + + IONIC_PRINT(DEBUG, "Allocating RX queue buffers (size: %u)", + frame_size); + + rxq = eth_dev->data->rx_queues[rx_queue_id]; + + err = ionic_lif_rxq_init(rxq); + if (err) + return err; + + /* Allocate buffers for descriptor rings */ + if (ionic_rx_fill(rxq, frame_size) != 0) { + IONIC_PRINT(ERR, "Could not alloc mbuf for queue:%d", + rx_queue_id); + return -1; + } + + ionic_qcq_enable(rxq); + + eth_dev->data->rx_queue_state[rx_queue_id] = + RTE_ETH_QUEUE_STATE_STARTED; + + return 0; +} + +static inline void __rte_cold +ionic_rxq_service(struct ionic_cq *cq, uint32_t work_to_do, + void *service_cb_arg) +{ + struct ionic_queue *q = cq->bound_q; + struct ionic_desc_info *q_desc_info; + struct ionic_rxq_comp *cq_desc_base = cq->base; + struct ionic_rxq_comp *cq_desc; + bool more; + uint32_t curr_q_tail_idx, curr_cq_tail_idx; + uint32_t work_done = 0; + + if (work_to_do == 0) + return; + + cq_desc = &cq_desc_base[cq->tail_idx]; + while (color_match(cq_desc->pkt_type_color, cq->done_color)) { + curr_cq_tail_idx = cq->tail_idx; + cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1); + + if (cq->tail_idx == 0) + cq->done_color = !cq->done_color; + + /* Prefetch the next 4 descriptors */ + if ((cq->tail_idx & 0x3) == 0) + rte_prefetch0(&cq_desc_base[cq->tail_idx]); + + do { + more = (q->tail_idx != cq_desc->comp_index); + + q_desc_info = &q->info[q->tail_idx]; + + curr_q_tail_idx = q->tail_idx; + q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); + + /* Prefetch the next 4 descriptors */ + if ((q->tail_idx & 0x3) == 0) + /* q desc info */ + rte_prefetch0(&q->info[q->tail_idx]); + + ionic_rx_clean(q, curr_q_tail_idx, curr_cq_tail_idx, + q_desc_info->cb_arg, service_cb_arg); + + } while (more); + + if (++work_done == work_to_do) + break; + + cq_desc = &cq_desc_base[cq->tail_idx]; + } +} + +/* + * Stop Receive Units for specified queue. + */ +int __rte_cold +ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) +{ + struct ionic_qcq *rxq; + + IONIC_PRINT_CALL(); + + rxq = eth_dev->data->rx_queues[rx_queue_id]; + + ionic_qcq_disable(rxq); + + /* Flush */ + ionic_rxq_service(&rxq->cq, -1, NULL); + + ionic_lif_rxq_deinit(rxq); + + eth_dev->data->rx_queue_state[rx_queue_id] = + RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + +uint16_t +ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct ionic_qcq *rxq = (struct ionic_qcq *)rx_queue; + uint32_t frame_size = + rxq->lif->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len; + struct ionic_cq *cq = &rxq->cq; + struct ionic_rx_service service_cb_arg; + + service_cb_arg.rx_pkts = rx_pkts; + service_cb_arg.nb_pkts = nb_pkts; + service_cb_arg.nb_rx = 0; + + ionic_rxq_service(cq, nb_pkts, &service_cb_arg); + + ionic_rx_fill(rxq, frame_size); + + return service_cb_arg.nb_rx; +} diff --git a/src/spdk/dpdk/drivers/net/ionic/ionic_rxtx.h b/src/spdk/dpdk/drivers/net/ionic/ionic_rxtx.h new file mode 100644 index 000000000..5c85b9c49 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ionic/ionic_rxtx.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved. + */ + +#ifndef _IONIC_RXTX_H_ +#define _IONIC_RXTX_H_ + +#include + +struct ionic_rx_service { + /* cb in */ + struct rte_mbuf **rx_pkts; + uint16_t nb_pkts; + /* cb out */ + uint16_t nb_rx; +}; + +uint16_t ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +uint16_t ionic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + +int ionic_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, + uint16_t nb_desc, uint32_t socket_id, + const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp); +void ionic_dev_rx_queue_release(void *rxq); +int ionic_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); +int ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id); + +int ionic_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, + uint16_t nb_desc, uint32_t socket_id, + const struct rte_eth_txconf *tx_conf); +void ionic_dev_tx_queue_release(void *tx_queue); +int ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id); +int ionic_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); + +void ionic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo); +void ionic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo); + +#endif /* _IONIC_RXTX_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ionic/meson.build b/src/spdk/dpdk/drivers/net/ionic/meson.build new file mode 100644 index 000000000..1c6362d27 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ionic/meson.build @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +# Copyright(c) 2019 Pensando + +sources = files( + 'ionic_mac_api.c', + 'ionic_rx_filter.c', + 'ionic_rxtx.c', + 'ionic_dev.c', + 'ionic_ethdev.c', + 'ionic_lif.c', + 'ionic_main.c' +) diff --git a/src/spdk/dpdk/drivers/net/ionic/rte_pmd_ionic_version.map b/src/spdk/dpdk/drivers/net/ionic/rte_pmd_ionic_version.map new file mode 100644 index 000000000..acdaf587d --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ionic/rte_pmd_ionic_version.map @@ -0,0 +1,4 @@ +DPDK_21 { + + local: *; +}; -- cgit v1.2.3