summaryrefslogtreecommitdiffstats
path: root/src/spdk/dpdk/drivers/net/ark
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
commite6918187568dbd01842d8d1d2c808ce16a894239 (patch)
tree64f88b554b444a49f656b6c656111a145cbbaa28 /src/spdk/dpdk/drivers/net/ark
parentInitial commit. (diff)
downloadceph-e6918187568dbd01842d8d1d2c808ce16a894239.tar.xz
ceph-e6918187568dbd01842d8d1d2c808ce16a894239.zip
Adding upstream version 18.2.2.upstream/18.2.2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/spdk/dpdk/drivers/net/ark')
-rw-r--r--src/spdk/dpdk/drivers/net/ark/Makefile39
-rw-r--r--src/spdk/dpdk/drivers/net/ark/ark_ddm.c130
-rw-r--r--src/spdk/dpdk/drivers/net/ark/ark_ddm.h151
-rw-r--r--src/spdk/dpdk/drivers/net/ark/ark_ethdev.c1027
-rw-r--r--src/spdk/dpdk/drivers/net/ark/ark_ethdev_rx.c680
-rw-r--r--src/spdk/dpdk/drivers/net/ark/ark_ethdev_rx.h36
-rw-r--r--src/spdk/dpdk/drivers/net/ark/ark_ethdev_tx.c436
-rw-r--r--src/spdk/dpdk/drivers/net/ark/ark_ethdev_tx.h30
-rw-r--r--src/spdk/dpdk/drivers/net/ark/ark_ext.h90
-rw-r--r--src/spdk/dpdk/drivers/net/ark/ark_global.h134
-rw-r--r--src/spdk/dpdk/drivers/net/ark/ark_logs.h93
-rw-r--r--src/spdk/dpdk/drivers/net/ark/ark_mpu.c152
-rw-r--r--src/spdk/dpdk/drivers/net/ark/ark_mpu.h125
-rw-r--r--src/spdk/dpdk/drivers/net/ark/ark_pktchkr.c450
-rw-r--r--src/spdk/dpdk/drivers/net/ark/ark_pktchkr.h88
-rw-r--r--src/spdk/dpdk/drivers/net/ark/ark_pktdir.c56
-rw-r--r--src/spdk/dpdk/drivers/net/ark/ark_pktdir.h41
-rw-r--r--src/spdk/dpdk/drivers/net/ark/ark_pktgen.c472
-rw-r--r--src/spdk/dpdk/drivers/net/ark/ark_pktgen.h79
-rw-r--r--src/spdk/dpdk/drivers/net/ark/ark_rqp.c68
-rw-r--r--src/spdk/dpdk/drivers/net/ark/ark_rqp.h57
-rw-r--r--src/spdk/dpdk/drivers/net/ark/ark_udm.c197
-rw-r--r--src/spdk/dpdk/drivers/net/ark/ark_udm.h163
-rw-r--r--src/spdk/dpdk/drivers/net/ark/meson.build13
-rw-r--r--src/spdk/dpdk/drivers/net/ark/rte_pmd_ark_version.map3
25 files changed, 4810 insertions, 0 deletions
diff --git a/src/spdk/dpdk/drivers/net/ark/Makefile b/src/spdk/dpdk/drivers/net/ark/Makefile
new file mode 100644
index 000000000..c02080bdd
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/Makefile
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2015-2018 Atomic Rules LLC
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_ark.a
+
+CFLAGS += -O3 -I./
+CFLAGS += $(WERROR_FLAGS) -Werror
+
+EXPORT_MAP := rte_pmd_ark_version.map
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_ddm.c
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_ethdev_rx.c
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_ethdev_tx.c
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_mpu.c
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_pktchkr.c
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_pktdir.c
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_pktgen.c
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_rqp.c
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_udm.c
+
+# this lib depends upon:
+LDLIBS += -lpthread
+ifdef CONFIG_RTE_EXEC_ENV_LINUX
+LDLIBS += -ldl
+endif
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_pci
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/net/ark/ark_ddm.c b/src/spdk/dpdk/drivers/net/ark/ark_ddm.c
new file mode 100644
index 000000000..57026f8d1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/ark_ddm.c
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
+ */
+
+#include <unistd.h>
+
+#include "ark_logs.h"
+#include "ark_ddm.h"
+
+/* ************************************************************************* */
+int
+ark_ddm_verify(struct ark_ddm_t *ddm)
+{
+ uint32_t hw_const;
+ if (sizeof(struct ark_ddm_t) != ARK_DDM_EXPECTED_SIZE) {
+ PMD_DRV_LOG(ERR, "ARK: DDM structure looks incorrect %d vs %zd\n",
+ ARK_DDM_EXPECTED_SIZE, sizeof(struct ark_ddm_t));
+ return -1;
+ }
+
+ hw_const = ddm->cfg.const0;
+ if (hw_const == ARK_DDM_CONST1) {
+ PMD_DRV_LOG(ERR,
+ "ARK: DDM module is version 1, "
+ "PMD expects version 2\n");
+ return -1;
+ } else if (hw_const != ARK_DDM_CONST2) {
+ PMD_DRV_LOG(ERR,
+ "ARK: DDM module not found as expected 0x%08x\n",
+ ddm->cfg.const0);
+ return -1;
+ }
+ return 0;
+}
+
+void
+ark_ddm_start(struct ark_ddm_t *ddm)
+{
+ ddm->cfg.command = 1;
+}
+
+int
+ark_ddm_stop(struct ark_ddm_t *ddm, const int wait)
+{
+ int cnt = 0;
+
+ ddm->cfg.command = 2;
+ while (wait && (ddm->cfg.stop_flushed & 0x01) == 0) {
+ if (cnt++ > 1000)
+ return 1;
+
+ usleep(10);
+ }
+ return 0;
+}
+
+void
+ark_ddm_reset(struct ark_ddm_t *ddm)
+{
+ int status;
+
+ /* reset only works if ddm has stopped properly. */
+ status = ark_ddm_stop(ddm, 1);
+
+ if (status != 0) {
+ PMD_DEBUG_LOG(INFO, "%s stop failed doing forced reset\n",
+ __func__);
+ ddm->cfg.command = 4;
+ usleep(10);
+ }
+ ddm->cfg.command = 3;
+}
+
+void
+ark_ddm_setup(struct ark_ddm_t *ddm, rte_iova_t cons_addr, uint32_t interval)
+{
+ ddm->setup.cons_write_index_addr = cons_addr;
+ ddm->setup.write_index_interval = interval / 4; /* 4 ns period */
+}
+
+void
+ark_ddm_stats_reset(struct ark_ddm_t *ddm)
+{
+ ddm->cfg.tlp_stats_clear = 1;
+}
+
+void
+ark_ddm_dump(struct ark_ddm_t *ddm, const char *msg)
+{
+ PMD_FUNC_LOG(DEBUG, "%s Stopped: %d\n", msg,
+ ark_ddm_is_stopped(ddm)
+ );
+}
+
+void
+ark_ddm_dump_stats(struct ark_ddm_t *ddm, const char *msg)
+{
+ struct ark_ddm_stats_t *stats = &ddm->stats;
+
+ PMD_STATS_LOG(INFO, "DDM Stats: %s"
+ ARK_SU64 ARK_SU64 ARK_SU64
+ "\n", msg,
+ "Bytes:", stats->tx_byte_count,
+ "Packets:", stats->tx_pkt_count,
+ "MBufs", stats->tx_mbuf_count);
+}
+
+int
+ark_ddm_is_stopped(struct ark_ddm_t *ddm)
+{
+ return (ddm->cfg.stop_flushed & 0x01) != 0;
+}
+
+uint64_t
+ark_ddm_queue_byte_count(struct ark_ddm_t *ddm)
+{
+ return ddm->queue_stats.byte_count;
+}
+
+uint64_t
+ark_ddm_queue_pkt_count(struct ark_ddm_t *ddm)
+{
+ return ddm->queue_stats.pkt_count;
+}
+
+void
+ark_ddm_queue_reset_stats(struct ark_ddm_t *ddm)
+{
+ ddm->queue_stats.byte_count = 1;
+}
diff --git a/src/spdk/dpdk/drivers/net/ark/ark_ddm.h b/src/spdk/dpdk/drivers/net/ark/ark_ddm.h
new file mode 100644
index 000000000..5456b4b5c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/ark_ddm.h
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
+ */
+
+#ifndef _ARK_DDM_H_
+#define _ARK_DDM_H_
+
+#include <stdint.h>
+
+#include <rte_memory.h>
+
+
+/* The DDM or Downstream Data Mover is an internal Arkville hardware
+ * module for moving packet from host memory to the TX packet streams.
+ * This module is *not* intended for end-user manipulation, hence
+ * there is minimal documentation.
+ */
+
+/* struct defining Tx meta data -- fixed in FPGA -- 16 bytes */
+struct ark_tx_meta {
+ uint64_t physaddr;
+ uint32_t user1;
+ uint16_t data_len; /* of this MBUF */
+#define ARK_DDM_EOP 0x01
+#define ARK_DDM_SOP 0x02
+ uint8_t flags; /* bit 0 indicates last mbuf in chain. */
+ uint8_t reserved[1];
+};
+
+
+/*
+ * DDM core hardware structures
+ * These are overlay structures to a memory mapped FPGA device. These
+ * structs will never be instantiated in ram memory
+ */
+#define ARK_DDM_CFG 0x0000
+/* Set unique HW ID for hardware version */
+#define ARK_DDM_CONST2 (0x324d4444)
+#define ARK_DDM_CONST1 (0xfacecafe)
+
+struct ark_ddm_cfg_t {
+ uint32_t r0;
+ volatile uint32_t tlp_stats_clear;
+ uint32_t const0;
+ volatile uint32_t tag_max;
+ volatile uint32_t command;
+ volatile uint32_t stop_flushed;
+};
+
+#define ARK_DDM_STATS 0x0020
+struct ark_ddm_stats_t {
+ volatile uint64_t tx_byte_count;
+ volatile uint64_t tx_pkt_count;
+ volatile uint64_t tx_mbuf_count;
+};
+
+#define ARK_DDM_MRDQ 0x0040
+struct ark_ddm_mrdq_t {
+ volatile uint32_t mrd_q1;
+ volatile uint32_t mrd_q2;
+ volatile uint32_t mrd_q3;
+ volatile uint32_t mrd_q4;
+ volatile uint32_t mrd_full;
+};
+
+#define ARK_DDM_CPLDQ 0x0068
+struct ark_ddm_cpldq_t {
+ volatile uint32_t cpld_q1;
+ volatile uint32_t cpld_q2;
+ volatile uint32_t cpld_q3;
+ volatile uint32_t cpld_q4;
+ volatile uint32_t cpld_full;
+};
+
+#define ARK_DDM_MRD_PS 0x0090
+struct ark_ddm_mrd_ps_t {
+ volatile uint32_t mrd_ps_min;
+ volatile uint32_t mrd_ps_max;
+ volatile uint32_t mrd_full_ps_min;
+ volatile uint32_t mrd_full_ps_max;
+ volatile uint32_t mrd_dw_ps_min;
+ volatile uint32_t mrd_dw_ps_max;
+};
+
+#define ARK_DDM_QUEUE_STATS 0x00a8
+struct ark_ddm_qstats_t {
+ volatile uint64_t byte_count;
+ volatile uint64_t pkt_count;
+ volatile uint64_t mbuf_count;
+};
+
+#define ARK_DDM_CPLD_PS 0x00c0
+struct ark_ddm_cpld_ps_t {
+ volatile uint32_t cpld_ps_min;
+ volatile uint32_t cpld_ps_max;
+ volatile uint32_t cpld_full_ps_min;
+ volatile uint32_t cpld_full_ps_max;
+ volatile uint32_t cpld_dw_ps_min;
+ volatile uint32_t cpld_dw_ps_max;
+};
+
+#define ARK_DDM_SETUP 0x00e0
+struct ark_ddm_setup_t {
+ rte_iova_t cons_write_index_addr;
+ uint32_t write_index_interval; /* 4ns each */
+ volatile uint32_t cons_index;
+};
+
+#define ARK_DDM_EXPECTED_SIZE 256
+#define ARK_DDM_QOFFSET ARK_DDM_EXPECTED_SIZE
+/* Consolidated structure */
+struct ark_ddm_t {
+ struct ark_ddm_cfg_t cfg;
+ uint8_t reserved0[(ARK_DDM_STATS - ARK_DDM_CFG) -
+ sizeof(struct ark_ddm_cfg_t)];
+ struct ark_ddm_stats_t stats;
+ uint8_t reserved1[(ARK_DDM_MRDQ - ARK_DDM_STATS) -
+ sizeof(struct ark_ddm_stats_t)];
+ struct ark_ddm_mrdq_t mrdq;
+ uint8_t reserved2[(ARK_DDM_CPLDQ - ARK_DDM_MRDQ) -
+ sizeof(struct ark_ddm_mrdq_t)];
+ struct ark_ddm_cpldq_t cpldq;
+ uint8_t reserved3[(ARK_DDM_MRD_PS - ARK_DDM_CPLDQ) -
+ sizeof(struct ark_ddm_cpldq_t)];
+ struct ark_ddm_mrd_ps_t mrd_ps;
+ struct ark_ddm_qstats_t queue_stats;
+ struct ark_ddm_cpld_ps_t cpld_ps;
+ uint8_t reserved5[(ARK_DDM_SETUP - ARK_DDM_CPLD_PS) -
+ sizeof(struct ark_ddm_cpld_ps_t)];
+ struct ark_ddm_setup_t setup;
+ uint8_t reserved_p[(ARK_DDM_EXPECTED_SIZE - ARK_DDM_SETUP) -
+ sizeof(struct ark_ddm_setup_t)];
+};
+
+
+/* DDM function prototype */
+int ark_ddm_verify(struct ark_ddm_t *ddm);
+void ark_ddm_start(struct ark_ddm_t *ddm);
+int ark_ddm_stop(struct ark_ddm_t *ddm, const int wait);
+void ark_ddm_reset(struct ark_ddm_t *ddm);
+void ark_ddm_stats_reset(struct ark_ddm_t *ddm);
+void ark_ddm_setup(struct ark_ddm_t *ddm, rte_iova_t cons_addr,
+ uint32_t interval);
+void ark_ddm_dump_stats(struct ark_ddm_t *ddm, const char *msg);
+void ark_ddm_dump(struct ark_ddm_t *ddm, const char *msg);
+int ark_ddm_is_stopped(struct ark_ddm_t *ddm);
+uint64_t ark_ddm_queue_byte_count(struct ark_ddm_t *ddm);
+uint64_t ark_ddm_queue_pkt_count(struct ark_ddm_t *ddm);
+void ark_ddm_queue_reset_stats(struct ark_ddm_t *ddm);
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/ark/ark_ethdev.c b/src/spdk/dpdk/drivers/net/ark/ark_ethdev.c
new file mode 100644
index 000000000..c3642012d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/ark_ethdev.c
@@ -0,0 +1,1027 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
+ */
+
+#include <unistd.h>
+#include <sys/stat.h>
+#include <dlfcn.h>
+
+#include <rte_bus_pci.h>
+#include <rte_ethdev_pci.h>
+#include <rte_kvargs.h>
+
+#include "ark_global.h"
+#include "ark_logs.h"
+#include "ark_ethdev_tx.h"
+#include "ark_ethdev_rx.h"
+#include "ark_mpu.h"
+#include "ark_ddm.h"
+#include "ark_udm.h"
+#include "ark_rqp.h"
+#include "ark_pktdir.h"
+#include "ark_pktgen.h"
+#include "ark_pktchkr.h"
+
+/* Internal prototypes */
+static int eth_ark_check_args(struct ark_adapter *ark, const char *params);
+static int eth_ark_dev_init(struct rte_eth_dev *dev);
+static int ark_config_device(struct rte_eth_dev *dev);
+static int eth_ark_dev_uninit(struct rte_eth_dev *eth_dev);
+static int eth_ark_dev_configure(struct rte_eth_dev *dev);
+static int eth_ark_dev_start(struct rte_eth_dev *dev);
+static void eth_ark_dev_stop(struct rte_eth_dev *dev);
+static void eth_ark_dev_close(struct rte_eth_dev *dev);
+static int eth_ark_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+static int eth_ark_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete);
+static int eth_ark_dev_set_link_up(struct rte_eth_dev *dev);
+static int eth_ark_dev_set_link_down(struct rte_eth_dev *dev);
+static int eth_ark_dev_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats);
+static int eth_ark_dev_stats_reset(struct rte_eth_dev *dev);
+static int eth_ark_set_default_mac_addr(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mac_addr);
+static int eth_ark_macaddr_add(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mac_addr,
+ uint32_t index,
+ uint32_t pool);
+static void eth_ark_macaddr_remove(struct rte_eth_dev *dev,
+ uint32_t index);
+static int eth_ark_set_mtu(struct rte_eth_dev *dev, uint16_t size);
+
+/*
+ * The packet generator is a functional block used to generate packet
+ * patterns for testing. It is not intended for nominal use.
+ */
+#define ARK_PKTGEN_ARG "Pkt_gen"
+
+/*
+ * The packet checker is a functional block used to verify packet
+ * patterns for testing. It is not intended for nominal use.
+ */
+#define ARK_PKTCHKR_ARG "Pkt_chkr"
+
+/*
+ * The packet director is used to select the internal ingress and
+ * egress packets paths during testing. It is not intended for
+ * nominal use.
+ */
+#define ARK_PKTDIR_ARG "Pkt_dir"
+
+/* Devinfo configurations */
+#define ARK_RX_MAX_QUEUE (4096 * 4)
+#define ARK_RX_MIN_QUEUE (512)
+#define ARK_RX_MAX_PKT_LEN ((16 * 1024) - 128)
+#define ARK_RX_MIN_BUFSIZE (1024)
+
+#define ARK_TX_MAX_QUEUE (4096 * 4)
+#define ARK_TX_MIN_QUEUE (256)
+
+int ark_logtype;
+
+static const char * const valid_arguments[] = {
+ ARK_PKTGEN_ARG,
+ ARK_PKTCHKR_ARG,
+ ARK_PKTDIR_ARG,
+ NULL
+};
+
+static const struct rte_pci_id pci_id_ark_map[] = {
+ {RTE_PCI_DEVICE(0x1d6c, 0x100d)},
+ {RTE_PCI_DEVICE(0x1d6c, 0x100e)},
+ {.vendor_id = 0, /* sentinel */ },
+};
+
+static int
+eth_ark_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ struct rte_eth_dev *eth_dev;
+ int ret;
+
+ eth_dev = rte_eth_dev_pci_allocate(pci_dev, sizeof(struct ark_adapter));
+
+ if (eth_dev == NULL)
+ return -ENOMEM;
+
+ ret = eth_ark_dev_init(eth_dev);
+ if (ret)
+ rte_eth_dev_pci_release(eth_dev);
+
+ return ret;
+}
+
+static int
+eth_ark_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_ark_dev_uninit);
+}
+
+static struct rte_pci_driver rte_ark_pmd = {
+ .id_table = pci_id_ark_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = eth_ark_pci_probe,
+ .remove = eth_ark_pci_remove,
+};
+
+static const struct eth_dev_ops ark_eth_dev_ops = {
+ .dev_configure = eth_ark_dev_configure,
+ .dev_start = eth_ark_dev_start,
+ .dev_stop = eth_ark_dev_stop,
+ .dev_close = eth_ark_dev_close,
+
+ .dev_infos_get = eth_ark_dev_info_get,
+
+ .rx_queue_setup = eth_ark_dev_rx_queue_setup,
+ .rx_queue_count = eth_ark_dev_rx_queue_count,
+ .tx_queue_setup = eth_ark_tx_queue_setup,
+
+ .link_update = eth_ark_dev_link_update,
+ .dev_set_link_up = eth_ark_dev_set_link_up,
+ .dev_set_link_down = eth_ark_dev_set_link_down,
+
+ .rx_queue_start = eth_ark_rx_start_queue,
+ .rx_queue_stop = eth_ark_rx_stop_queue,
+
+ .tx_queue_start = eth_ark_tx_queue_start,
+ .tx_queue_stop = eth_ark_tx_queue_stop,
+
+ .stats_get = eth_ark_dev_stats_get,
+ .stats_reset = eth_ark_dev_stats_reset,
+
+ .mac_addr_add = eth_ark_macaddr_add,
+ .mac_addr_remove = eth_ark_macaddr_remove,
+ .mac_addr_set = eth_ark_set_default_mac_addr,
+
+ .mtu_set = eth_ark_set_mtu,
+};
+
+static int
+check_for_ext(struct ark_adapter *ark)
+{
+ int found = 0;
+
+ /* Get the env */
+ const char *dllpath = getenv("ARK_EXT_PATH");
+
+ if (dllpath == NULL) {
+ PMD_DEBUG_LOG(DEBUG, "ARK EXT NO dll path specified\n");
+ return 0;
+ }
+ PMD_DRV_LOG(INFO, "ARK EXT found dll path at %s\n", dllpath);
+
+ /* Open and load the .so */
+ ark->d_handle = dlopen(dllpath, RTLD_LOCAL | RTLD_LAZY);
+ if (ark->d_handle == NULL) {
+ PMD_DRV_LOG(ERR, "Could not load user extension %s\n",
+ dllpath);
+ return -1;
+ }
+ PMD_DRV_LOG(INFO, "SUCCESS: loaded user extension %s\n",
+ dllpath);
+
+ /* Get the entry points */
+ ark->user_ext.dev_init =
+ (void *(*)(struct rte_eth_dev *, void *, int))
+ dlsym(ark->d_handle, "dev_init");
+ PMD_DEBUG_LOG(DEBUG, "device ext init pointer = %p\n",
+ ark->user_ext.dev_init);
+ ark->user_ext.dev_get_port_count =
+ (int (*)(struct rte_eth_dev *, void *))
+ dlsym(ark->d_handle, "dev_get_port_count");
+ ark->user_ext.dev_uninit =
+ (void (*)(struct rte_eth_dev *, void *))
+ dlsym(ark->d_handle, "dev_uninit");
+ ark->user_ext.dev_configure =
+ (int (*)(struct rte_eth_dev *, void *))
+ dlsym(ark->d_handle, "dev_configure");
+ ark->user_ext.dev_start =
+ (int (*)(struct rte_eth_dev *, void *))
+ dlsym(ark->d_handle, "dev_start");
+ ark->user_ext.dev_stop =
+ (void (*)(struct rte_eth_dev *, void *))
+ dlsym(ark->d_handle, "dev_stop");
+ ark->user_ext.dev_close =
+ (void (*)(struct rte_eth_dev *, void *))
+ dlsym(ark->d_handle, "dev_close");
+ ark->user_ext.link_update =
+ (int (*)(struct rte_eth_dev *, int, void *))
+ dlsym(ark->d_handle, "link_update");
+ ark->user_ext.dev_set_link_up =
+ (int (*)(struct rte_eth_dev *, void *))
+ dlsym(ark->d_handle, "dev_set_link_up");
+ ark->user_ext.dev_set_link_down =
+ (int (*)(struct rte_eth_dev *, void *))
+ dlsym(ark->d_handle, "dev_set_link_down");
+ ark->user_ext.stats_get =
+ (int (*)(struct rte_eth_dev *, struct rte_eth_stats *,
+ void *))
+ dlsym(ark->d_handle, "stats_get");
+ ark->user_ext.stats_reset =
+ (void (*)(struct rte_eth_dev *, void *))
+ dlsym(ark->d_handle, "stats_reset");
+ ark->user_ext.mac_addr_add =
+ (void (*)(struct rte_eth_dev *, struct rte_ether_addr *,
+ uint32_t, uint32_t, void *))
+ dlsym(ark->d_handle, "mac_addr_add");
+ ark->user_ext.mac_addr_remove =
+ (void (*)(struct rte_eth_dev *, uint32_t, void *))
+ dlsym(ark->d_handle, "mac_addr_remove");
+ ark->user_ext.mac_addr_set =
+ (void (*)(struct rte_eth_dev *, struct rte_ether_addr *,
+ void *))
+ dlsym(ark->d_handle, "mac_addr_set");
+ ark->user_ext.set_mtu =
+ (int (*)(struct rte_eth_dev *, uint16_t,
+ void *))
+ dlsym(ark->d_handle, "set_mtu");
+
+ return found;
+}
+
+static int
+eth_ark_dev_init(struct rte_eth_dev *dev)
+{
+ struct ark_adapter *ark = dev->data->dev_private;
+ struct rte_pci_device *pci_dev;
+ int ret;
+ int port_count = 1;
+ int p;
+
+ ark->eth_dev = dev;
+
+ PMD_FUNC_LOG(DEBUG, "\n");
+
+ /* Check to see if there is an extension that we need to load */
+ ret = check_for_ext(ark);
+ if (ret)
+ return ret;
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ rte_eth_copy_pci_info(dev, pci_dev);
+
+ /* Use dummy function until setup */
+ dev->rx_pkt_burst = &eth_ark_recv_pkts_noop;
+ dev->tx_pkt_burst = &eth_ark_xmit_pkts_noop;
+ /* Let rte_eth_dev_close() release the port resources */
+ dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
+
+ ark->bar0 = (uint8_t *)pci_dev->mem_resource[0].addr;
+ ark->a_bar = (uint8_t *)pci_dev->mem_resource[2].addr;
+
+ ark->sysctrl.v = (void *)&ark->bar0[ARK_SYSCTRL_BASE];
+ ark->mpurx.v = (void *)&ark->bar0[ARK_MPU_RX_BASE];
+ ark->udm.v = (void *)&ark->bar0[ARK_UDM_BASE];
+ ark->mputx.v = (void *)&ark->bar0[ARK_MPU_TX_BASE];
+ ark->ddm.v = (void *)&ark->bar0[ARK_DDM_BASE];
+ ark->cmac.v = (void *)&ark->bar0[ARK_CMAC_BASE];
+ ark->external.v = (void *)&ark->bar0[ARK_EXTERNAL_BASE];
+ ark->pktdir.v = (void *)&ark->bar0[ARK_PKTDIR_BASE];
+ ark->pktgen.v = (void *)&ark->bar0[ARK_PKTGEN_BASE];
+ ark->pktchkr.v = (void *)&ark->bar0[ARK_PKTCHKR_BASE];
+
+ ark->rqpacing =
+ (struct ark_rqpace_t *)(ark->bar0 + ARK_RCPACING_BASE);
+ ark->started = 0;
+
+ PMD_DEBUG_LOG(INFO, "Sys Ctrl Const = 0x%x HW Commit_ID: %08x\n",
+ ark->sysctrl.t32[4],
+ rte_be_to_cpu_32(ark->sysctrl.t32[0x20 / 4]));
+ PMD_DRV_LOG(INFO, "Arkville HW Commit_ID: %08x\n",
+ rte_be_to_cpu_32(ark->sysctrl.t32[0x20 / 4]));
+
+ /* If HW sanity test fails, return an error */
+ if (ark->sysctrl.t32[4] != 0xcafef00d) {
+ PMD_DRV_LOG(ERR,
+ "HW Sanity test has failed, expected constant"
+ " 0x%x, read 0x%x (%s)\n",
+ 0xcafef00d,
+ ark->sysctrl.t32[4], __func__);
+ return -1;
+ }
+ if (ark->sysctrl.t32[3] != 0) {
+ if (ark_rqp_lasped(ark->rqpacing)) {
+ PMD_DRV_LOG(ERR, "Arkville Evaluation System - "
+ "Timer has Expired\n");
+ return -1;
+ }
+ PMD_DRV_LOG(WARNING, "Arkville Evaluation System - "
+ "Timer is Running\n");
+ }
+
+ PMD_DRV_LOG(INFO,
+ "HW Sanity test has PASSED, expected constant"
+ " 0x%x, read 0x%x (%s)\n",
+ 0xcafef00d, ark->sysctrl.t32[4], __func__);
+
+ /* We are a single function multi-port device. */
+ ret = ark_config_device(dev);
+ if (ret)
+ return -1;
+
+ dev->dev_ops = &ark_eth_dev_ops;
+
+ dev->data->mac_addrs = rte_zmalloc("ark", RTE_ETHER_ADDR_LEN, 0);
+ if (!dev->data->mac_addrs) {
+ PMD_DRV_LOG(ERR,
+ "Failed to allocated memory for storing mac address"
+ );
+ }
+
+ if (ark->user_ext.dev_init) {
+ ark->user_data[dev->data->port_id] =
+ ark->user_ext.dev_init(dev, ark->a_bar, 0);
+ if (!ark->user_data[dev->data->port_id]) {
+ PMD_DRV_LOG(INFO,
+ "Failed to initialize PMD extension!"
+ " continuing without it\n");
+ memset(&ark->user_ext, 0, sizeof(struct ark_user_ext));
+ dlclose(ark->d_handle);
+ }
+ }
+
+ if (pci_dev->device.devargs)
+ ret = eth_ark_check_args(ark, pci_dev->device.devargs->args);
+ else
+ PMD_DRV_LOG(INFO, "No Device args found\n");
+
+ if (ret)
+ goto error;
+ /*
+ * We will create additional devices based on the number of requested
+ * ports
+ */
+ if (ark->user_ext.dev_get_port_count)
+ port_count =
+ ark->user_ext.dev_get_port_count(dev,
+ ark->user_data[dev->data->port_id]);
+ ark->num_ports = port_count;
+
+ for (p = 0; p < port_count; p++) {
+ struct rte_eth_dev *eth_dev;
+ char name[RTE_ETH_NAME_MAX_LEN];
+
+ snprintf(name, sizeof(name), "arketh%d",
+ dev->data->port_id + p);
+
+ if (p == 0) {
+ /* First port is already allocated by DPDK */
+ eth_dev = ark->eth_dev;
+ rte_eth_dev_probing_finish(eth_dev);
+ continue;
+ }
+
+ /* reserve an ethdev entry */
+ eth_dev = rte_eth_dev_allocate(name);
+ if (!eth_dev) {
+ PMD_DRV_LOG(ERR,
+ "Could not allocate eth_dev for port %d\n",
+ p);
+ goto error;
+ }
+
+ eth_dev->device = &pci_dev->device;
+ eth_dev->data->dev_private = ark;
+ eth_dev->dev_ops = ark->eth_dev->dev_ops;
+ eth_dev->tx_pkt_burst = ark->eth_dev->tx_pkt_burst;
+ eth_dev->rx_pkt_burst = ark->eth_dev->rx_pkt_burst;
+
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+
+ eth_dev->data->mac_addrs = rte_zmalloc(name,
+ RTE_ETHER_ADDR_LEN, 0);
+ if (!eth_dev->data->mac_addrs) {
+ PMD_DRV_LOG(ERR,
+ "Memory allocation for MAC failed!"
+ " Exiting.\n");
+ goto error;
+ }
+
+ if (ark->user_ext.dev_init) {
+ ark->user_data[eth_dev->data->port_id] =
+ ark->user_ext.dev_init(dev, ark->a_bar, p);
+ }
+
+ rte_eth_dev_probing_finish(eth_dev);
+ }
+
+ return ret;
+
+error:
+ rte_free(dev->data->mac_addrs);
+ dev->data->mac_addrs = NULL;
+ return -1;
+}
+
+/*
+ *Initial device configuration when device is opened
+ * setup the DDM, and UDM
+ * Called once per PCIE device
+ */
+static int
+ark_config_device(struct rte_eth_dev *dev)
+{
+ struct ark_adapter *ark = dev->data->dev_private;
+ uint16_t num_q, i;
+ struct ark_mpu_t *mpu;
+
+ /*
+ * Make sure that the packet director, generator and checker are in a
+ * known state
+ */
+ ark->start_pg = 0;
+ ark->pg = ark_pktgen_init(ark->pktgen.v, 0, 1);
+ if (ark->pg == NULL)
+ return -1;
+ ark_pktgen_reset(ark->pg);
+ ark->pc = ark_pktchkr_init(ark->pktchkr.v, 0, 1);
+ if (ark->pc == NULL)
+ return -1;
+ ark_pktchkr_stop(ark->pc);
+ ark->pd = ark_pktdir_init(ark->pktdir.v);
+ if (ark->pd == NULL)
+ return -1;
+
+ /* Verify HW */
+ if (ark_udm_verify(ark->udm.v))
+ return -1;
+ if (ark_ddm_verify(ark->ddm.v))
+ return -1;
+
+ /* UDM */
+ if (ark_udm_reset(ark->udm.v)) {
+ PMD_DRV_LOG(ERR, "Unable to stop and reset UDM\n");
+ return -1;
+ }
+ /* Keep in reset until the MPU are cleared */
+
+ /* MPU reset */
+ mpu = ark->mpurx.v;
+ num_q = ark_api_num_queues(mpu);
+ ark->rx_queues = num_q;
+ for (i = 0; i < num_q; i++) {
+ ark_mpu_reset(mpu);
+ mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
+ }
+
+ ark_udm_stop(ark->udm.v, 0);
+ ark_udm_configure(ark->udm.v,
+ RTE_PKTMBUF_HEADROOM,
+ RTE_MBUF_DEFAULT_DATAROOM,
+ ARK_RX_WRITE_TIME_NS);
+ ark_udm_stats_reset(ark->udm.v);
+ ark_udm_stop(ark->udm.v, 0);
+
+ /* TX -- DDM */
+ if (ark_ddm_stop(ark->ddm.v, 1))
+ PMD_DRV_LOG(ERR, "Unable to stop DDM\n");
+
+ mpu = ark->mputx.v;
+ num_q = ark_api_num_queues(mpu);
+ ark->tx_queues = num_q;
+ for (i = 0; i < num_q; i++) {
+ ark_mpu_reset(mpu);
+ mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
+ }
+
+ ark_ddm_reset(ark->ddm.v);
+ ark_ddm_stats_reset(ark->ddm.v);
+
+ ark_ddm_stop(ark->ddm.v, 0);
+ ark_rqp_stats_reset(ark->rqpacing);
+
+ return 0;
+}
+
+static int
+eth_ark_dev_uninit(struct rte_eth_dev *dev)
+{
+ struct ark_adapter *ark = dev->data->dev_private;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ if (ark->user_ext.dev_uninit)
+ ark->user_ext.dev_uninit(dev,
+ ark->user_data[dev->data->port_id]);
+
+ ark_pktgen_uninit(ark->pg);
+ ark_pktchkr_uninit(ark->pc);
+
+ dev->dev_ops = NULL;
+ dev->rx_pkt_burst = NULL;
+ dev->tx_pkt_burst = NULL;
+ return 0;
+}
+
+static int
+eth_ark_dev_configure(struct rte_eth_dev *dev)
+{
+ PMD_FUNC_LOG(DEBUG, "\n");
+ struct ark_adapter *ark = dev->data->dev_private;
+
+ eth_ark_dev_set_link_up(dev);
+ if (ark->user_ext.dev_configure)
+ return ark->user_ext.dev_configure(dev,
+ ark->user_data[dev->data->port_id]);
+ return 0;
+}
+
+static void *
+delay_pg_start(void *arg)
+{
+ struct ark_adapter *ark = (struct ark_adapter *)arg;
+
+ /* This function is used exclusively for regression testing, We
+ * perform a blind sleep here to ensure that the external test
+ * application has time to setup the test before we generate packets
+ */
+ usleep(100000);
+ ark_pktgen_run(ark->pg);
+ return NULL;
+}
+
+static int
+eth_ark_dev_start(struct rte_eth_dev *dev)
+{
+ struct ark_adapter *ark = dev->data->dev_private;
+ int i;
+
+ PMD_FUNC_LOG(DEBUG, "\n");
+
+ /* RX Side */
+ /* start UDM */
+ ark_udm_start(ark->udm.v);
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ eth_ark_rx_start_queue(dev, i);
+
+ /* TX Side */
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ eth_ark_tx_queue_start(dev, i);
+
+ /* start DDM */
+ ark_ddm_start(ark->ddm.v);
+
+ ark->started = 1;
+ /* set xmit and receive function */
+ dev->rx_pkt_burst = &eth_ark_recv_pkts;
+ dev->tx_pkt_burst = &eth_ark_xmit_pkts;
+
+ if (ark->start_pg)
+ ark_pktchkr_run(ark->pc);
+
+ if (ark->start_pg && (dev->data->port_id == 0)) {
+ pthread_t thread;
+
+ /* Delay packet generatpr start allow the hardware to be ready
+ * This is only used for sanity checking with internal generator
+ */
+ if (pthread_create(&thread, NULL, delay_pg_start, ark)) {
+ PMD_DRV_LOG(ERR, "Could not create pktgen "
+ "starter thread\n");
+ return -1;
+ }
+ }
+
+ if (ark->user_ext.dev_start)
+ ark->user_ext.dev_start(dev,
+ ark->user_data[dev->data->port_id]);
+
+ return 0;
+}
+
+static void
+eth_ark_dev_stop(struct rte_eth_dev *dev)
+{
+ uint16_t i;
+ int status;
+ struct ark_adapter *ark = dev->data->dev_private;
+ struct ark_mpu_t *mpu;
+
+ PMD_FUNC_LOG(DEBUG, "\n");
+
+ if (ark->started == 0)
+ return;
+ ark->started = 0;
+
+ /* Stop the extension first */
+ if (ark->user_ext.dev_stop)
+ ark->user_ext.dev_stop(dev,
+ ark->user_data[dev->data->port_id]);
+
+ /* Stop the packet generator */
+ if (ark->start_pg)
+ ark_pktgen_pause(ark->pg);
+
+ dev->rx_pkt_burst = &eth_ark_recv_pkts_noop;
+ dev->tx_pkt_burst = &eth_ark_xmit_pkts_noop;
+
+ /* STOP TX Side */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ status = eth_ark_tx_queue_stop(dev, i);
+ if (status != 0) {
+ uint16_t port = dev->data->port_id;
+ PMD_DRV_LOG(ERR,
+ "tx_queue stop anomaly"
+ " port %u, queue %u\n",
+ port, i);
+ }
+ }
+
+ /* Stop DDM */
+ /* Wait up to 0.1 second. each stop is up to 1000 * 10 useconds */
+ for (i = 0; i < 10; i++) {
+ status = ark_ddm_stop(ark->ddm.v, 1);
+ if (status == 0)
+ break;
+ }
+ if (status || i != 0) {
+ PMD_DRV_LOG(ERR, "DDM stop anomaly. status:"
+ " %d iter: %u. (%s)\n",
+ status,
+ i,
+ __func__);
+ ark_ddm_dump(ark->ddm.v, "Stop anomaly");
+
+ mpu = ark->mputx.v;
+ for (i = 0; i < ark->tx_queues; i++) {
+ ark_mpu_dump(mpu, "DDM failure dump", i);
+ mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
+ }
+ }
+
+ /* STOP RX Side */
+ /* Stop UDM multiple tries attempted */
+ for (i = 0; i < 10; i++) {
+ status = ark_udm_stop(ark->udm.v, 1);
+ if (status == 0)
+ break;
+ }
+ if (status || i != 0) {
+ PMD_DRV_LOG(ERR, "UDM stop anomaly. status %d iter: %u. (%s)\n",
+ status, i, __func__);
+ ark_udm_dump(ark->udm.v, "Stop anomaly");
+
+ mpu = ark->mpurx.v;
+ for (i = 0; i < ark->rx_queues; i++) {
+ ark_mpu_dump(mpu, "UDM Stop anomaly", i);
+ mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
+ }
+ }
+
+ ark_udm_dump_stats(ark->udm.v, "Post stop");
+ ark_udm_dump_perf(ark->udm.v, "Post stop");
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ eth_ark_rx_dump_queue(dev, i, __func__);
+
+ /* Stop the packet checker if it is running */
+ if (ark->start_pg) {
+ ark_pktchkr_dump_stats(ark->pc);
+ ark_pktchkr_stop(ark->pc);
+ }
+}
+
+static void
+eth_ark_dev_close(struct rte_eth_dev *dev)
+{
+ struct ark_adapter *ark = dev->data->dev_private;
+ uint16_t i;
+
+ if (ark->user_ext.dev_close)
+ ark->user_ext.dev_close(dev,
+ ark->user_data[dev->data->port_id]);
+
+ eth_ark_dev_stop(dev);
+ eth_ark_udm_force_close(dev);
+
+ /*
+ * TODO This should only be called once for the device during shutdown
+ */
+ ark_rqp_dump(ark->rqpacing);
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ eth_ark_tx_queue_release(dev->data->tx_queues[i]);
+ dev->data->tx_queues[i] = 0;
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ eth_ark_dev_rx_queue_release(dev->data->rx_queues[i]);
+ dev->data->rx_queues[i] = 0;
+ }
+
+ rte_free(dev->data->mac_addrs);
+ dev->data->mac_addrs = 0;
+}
+
+static int
+eth_ark_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct ark_adapter *ark = dev->data->dev_private;
+ struct ark_mpu_t *tx_mpu = RTE_PTR_ADD(ark->bar0, ARK_MPU_TX_BASE);
+ struct ark_mpu_t *rx_mpu = RTE_PTR_ADD(ark->bar0, ARK_MPU_RX_BASE);
+ uint16_t ports = ark->num_ports;
+
+ dev_info->max_rx_pktlen = ARK_RX_MAX_PKT_LEN;
+ dev_info->min_rx_bufsize = ARK_RX_MIN_BUFSIZE;
+
+ dev_info->max_rx_queues = ark_api_num_queues_per_port(rx_mpu, ports);
+ dev_info->max_tx_queues = ark_api_num_queues_per_port(tx_mpu, ports);
+
+ dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = ARK_RX_MAX_QUEUE,
+ .nb_min = ARK_RX_MIN_QUEUE,
+ .nb_align = ARK_RX_MIN_QUEUE}; /* power of 2 */
+
+ dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = ARK_TX_MAX_QUEUE,
+ .nb_min = ARK_TX_MIN_QUEUE,
+ .nb_align = ARK_TX_MIN_QUEUE}; /* power of 2 */
+
+ /* ARK PMD supports all line rates, how do we indicate that here ?? */
+ dev_info->speed_capa = (ETH_LINK_SPEED_1G |
+ ETH_LINK_SPEED_10G |
+ ETH_LINK_SPEED_25G |
+ ETH_LINK_SPEED_40G |
+ ETH_LINK_SPEED_50G |
+ ETH_LINK_SPEED_100G);
+
+ return 0;
+}
+
+static int
+eth_ark_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+ PMD_DEBUG_LOG(DEBUG, "link status = %d\n",
+ dev->data->dev_link.link_status);
+ struct ark_adapter *ark = dev->data->dev_private;
+
+ if (ark->user_ext.link_update) {
+ return ark->user_ext.link_update
+ (dev, wait_to_complete,
+ ark->user_data[dev->data->port_id]);
+ }
+ return 0;
+}
+
+static int
+eth_ark_dev_set_link_up(struct rte_eth_dev *dev)
+{
+ dev->data->dev_link.link_status = 1;
+ struct ark_adapter *ark = dev->data->dev_private;
+
+ if (ark->user_ext.dev_set_link_up)
+ return ark->user_ext.dev_set_link_up(dev,
+ ark->user_data[dev->data->port_id]);
+ return 0;
+}
+
+static int
+eth_ark_dev_set_link_down(struct rte_eth_dev *dev)
+{
+ dev->data->dev_link.link_status = 0;
+ struct ark_adapter *ark = dev->data->dev_private;
+
+ if (ark->user_ext.dev_set_link_down)
+ return ark->user_ext.dev_set_link_down(dev,
+ ark->user_data[dev->data->port_id]);
+ return 0;
+}
+
+static int
+eth_ark_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ uint16_t i;
+ struct ark_adapter *ark = dev->data->dev_private;
+
+ stats->ipackets = 0;
+ stats->ibytes = 0;
+ stats->opackets = 0;
+ stats->obytes = 0;
+ stats->imissed = 0;
+ stats->oerrors = 0;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ eth_tx_queue_stats_get(dev->data->tx_queues[i], stats);
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ eth_rx_queue_stats_get(dev->data->rx_queues[i], stats);
+ if (ark->user_ext.stats_get)
+ return ark->user_ext.stats_get(dev, stats,
+ ark->user_data[dev->data->port_id]);
+ return 0;
+}
+
+static int
+eth_ark_dev_stats_reset(struct rte_eth_dev *dev)
+{
+ uint16_t i;
+ struct ark_adapter *ark = dev->data->dev_private;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ eth_tx_queue_stats_reset(dev->data->tx_queues[i]);
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ eth_rx_queue_stats_reset(dev->data->rx_queues[i]);
+ if (ark->user_ext.stats_reset)
+ ark->user_ext.stats_reset(dev,
+ ark->user_data[dev->data->port_id]);
+
+ return 0;
+}
+
+static int
+eth_ark_macaddr_add(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mac_addr,
+ uint32_t index,
+ uint32_t pool)
+{
+ struct ark_adapter *ark = dev->data->dev_private;
+
+ if (ark->user_ext.mac_addr_add) {
+ ark->user_ext.mac_addr_add(dev,
+ mac_addr,
+ index,
+ pool,
+ ark->user_data[dev->data->port_id]);
+ return 0;
+ }
+ return -ENOTSUP;
+}
+
+static void
+eth_ark_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct ark_adapter *ark = dev->data->dev_private;
+
+ if (ark->user_ext.mac_addr_remove)
+ ark->user_ext.mac_addr_remove(dev, index,
+ ark->user_data[dev->data->port_id]);
+}
+
+static int
+eth_ark_set_default_mac_addr(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mac_addr)
+{
+ struct ark_adapter *ark = dev->data->dev_private;
+
+ if (ark->user_ext.mac_addr_set) {
+ ark->user_ext.mac_addr_set(dev, mac_addr,
+ ark->user_data[dev->data->port_id]);
+ return 0;
+ }
+ return -ENOTSUP;
+}
+
+static int
+eth_ark_set_mtu(struct rte_eth_dev *dev, uint16_t size)
+{
+ struct ark_adapter *ark = dev->data->dev_private;
+
+ if (ark->user_ext.set_mtu)
+ return ark->user_ext.set_mtu(dev, size,
+ ark->user_data[dev->data->port_id]);
+
+ return -ENOTSUP;
+}
+
+static inline int
+process_pktdir_arg(const char *key, const char *value,
+ void *extra_args)
+{
+ PMD_FUNC_LOG(DEBUG, "key = %s, value = %s\n",
+ key, value);
+ struct ark_adapter *ark =
+ (struct ark_adapter *)extra_args;
+
+ ark->pkt_dir_v = strtol(value, NULL, 16);
+ PMD_FUNC_LOG(DEBUG, "pkt_dir_v = 0x%x\n", ark->pkt_dir_v);
+ return 0;
+}
+
+static inline int
+process_file_args(const char *key, const char *value, void *extra_args)
+{
+ PMD_FUNC_LOG(DEBUG, "key = %s, value = %s\n",
+ key, value);
+ char *args = (char *)extra_args;
+
+ /* Open the configuration file */
+ FILE *file = fopen(value, "r");
+ char line[ARK_MAX_ARG_LEN];
+ int size = 0;
+ int first = 1;
+
+ if (file == NULL) {
+ PMD_DRV_LOG(ERR, "Unable to open "
+ "config file %s\n", value);
+ return -1;
+ }
+
+ while (fgets(line, sizeof(line), file)) {
+ size += strlen(line);
+ if (size >= ARK_MAX_ARG_LEN) {
+ PMD_DRV_LOG(ERR, "Unable to parse file %s args, "
+ "parameter list is too long\n", value);
+ fclose(file);
+ return -1;
+ }
+ if (first) {
+ strncpy(args, line, ARK_MAX_ARG_LEN);
+ first = 0;
+ } else {
+ strncat(args, line, ARK_MAX_ARG_LEN);
+ }
+ }
+ PMD_FUNC_LOG(DEBUG, "file = %s\n", args);
+ fclose(file);
+ return 0;
+}
+
+static int
+eth_ark_check_args(struct ark_adapter *ark, const char *params)
+{
+ struct rte_kvargs *kvlist;
+ unsigned int k_idx;
+ struct rte_kvargs_pair *pair = NULL;
+ int ret = -1;
+
+ kvlist = rte_kvargs_parse(params, valid_arguments);
+ if (kvlist == NULL)
+ return 0;
+
+ ark->pkt_gen_args[0] = 0;
+ ark->pkt_chkr_args[0] = 0;
+
+ for (k_idx = 0; k_idx < kvlist->count; k_idx++) {
+ pair = &kvlist->pairs[k_idx];
+ PMD_FUNC_LOG(DEBUG, "**** Arg passed to PMD = %s:%s\n",
+ pair->key,
+ pair->value);
+ }
+
+ if (rte_kvargs_process(kvlist,
+ ARK_PKTDIR_ARG,
+ &process_pktdir_arg,
+ ark) != 0) {
+ PMD_DRV_LOG(ERR, "Unable to parse arg %s\n", ARK_PKTDIR_ARG);
+ goto free_kvlist;
+ }
+
+ if (rte_kvargs_process(kvlist,
+ ARK_PKTGEN_ARG,
+ &process_file_args,
+ ark->pkt_gen_args) != 0) {
+ PMD_DRV_LOG(ERR, "Unable to parse arg %s\n", ARK_PKTGEN_ARG);
+ goto free_kvlist;
+ }
+
+ if (rte_kvargs_process(kvlist,
+ ARK_PKTCHKR_ARG,
+ &process_file_args,
+ ark->pkt_chkr_args) != 0) {
+ PMD_DRV_LOG(ERR, "Unable to parse arg %s\n", ARK_PKTCHKR_ARG);
+ goto free_kvlist;
+ }
+
+ PMD_DRV_LOG(INFO, "packet director set to 0x%x\n", ark->pkt_dir_v);
+ /* Setup the packet director */
+ ark_pktdir_setup(ark->pd, ark->pkt_dir_v);
+
+ /* Setup the packet generator */
+ if (ark->pkt_gen_args[0]) {
+ PMD_DRV_LOG(INFO, "Setting up the packet generator\n");
+ ark_pktgen_parse(ark->pkt_gen_args);
+ ark_pktgen_reset(ark->pg);
+ ark_pktgen_setup(ark->pg);
+ ark->start_pg = 1;
+ }
+
+ /* Setup the packet checker */
+ if (ark->pkt_chkr_args[0]) {
+ ark_pktchkr_parse(ark->pkt_chkr_args);
+ ark_pktchkr_setup(ark->pc);
+ }
+
+ ret = 0;
+
+free_kvlist:
+ rte_kvargs_free(kvlist);
+
+ return ret;
+}
+
+RTE_PMD_REGISTER_PCI(net_ark, rte_ark_pmd);
+RTE_PMD_REGISTER_KMOD_DEP(net_ark, "* igb_uio | uio_pci_generic ");
+RTE_PMD_REGISTER_PCI_TABLE(net_ark, pci_id_ark_map);
+RTE_PMD_REGISTER_PARAM_STRING(net_ark,
+ ARK_PKTGEN_ARG "=<filename> "
+ ARK_PKTCHKR_ARG "=<filename> "
+ ARK_PKTDIR_ARG "=<bitmap>");
+
+RTE_INIT(ark_init_log)
+{
+ ark_logtype = rte_log_register("pmd.net.ark");
+ if (ark_logtype >= 0)
+ rte_log_set_level(ark_logtype, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/net/ark/ark_ethdev_rx.c b/src/spdk/dpdk/drivers/net/ark/ark_ethdev_rx.c
new file mode 100644
index 000000000..4d518d558
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/ark_ethdev_rx.c
@@ -0,0 +1,680 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
+ */
+
+#include <unistd.h>
+
+#include "ark_ethdev_rx.h"
+#include "ark_global.h"
+#include "ark_logs.h"
+#include "ark_mpu.h"
+#include "ark_udm.h"
+
+#define ARK_RX_META_SIZE 32
+#define ARK_RX_META_OFFSET (RTE_PKTMBUF_HEADROOM - ARK_RX_META_SIZE)
+#define ARK_RX_MAX_NOCHAIN (RTE_MBUF_DEFAULT_DATAROOM)
+
+/* Forward declarations */
+struct ark_rx_queue;
+struct ark_rx_meta;
+
+static void dump_mbuf_data(struct rte_mbuf *mbuf, uint16_t lo, uint16_t hi);
+static void ark_ethdev_rx_dump(const char *name, struct ark_rx_queue *queue);
+static uint32_t eth_ark_rx_jumbo(struct ark_rx_queue *queue,
+ struct ark_rx_meta *meta,
+ struct rte_mbuf *mbuf0,
+ uint32_t cons_index);
+static inline int eth_ark_rx_seed_mbufs(struct ark_rx_queue *queue);
+static int eth_ark_rx_seed_recovery(struct ark_rx_queue *queue,
+ uint32_t *pnb,
+ struct rte_mbuf **mbufs);
+
+/* ************************************************************************* */
+struct ark_rx_queue {
+ /* array of mbufs to populate */
+ struct rte_mbuf **reserve_q;
+ /* array of physical addresses of the mbuf data pointer */
+ /* This point is a virtual address */
+ rte_iova_t *paddress_q;
+ struct rte_mempool *mb_pool;
+
+ struct ark_udm_t *udm;
+ struct ark_mpu_t *mpu;
+
+ uint32_t queue_size;
+ uint32_t queue_mask;
+
+ uint32_t seed_index; /* step 1 set with empty mbuf */
+ uint32_t cons_index; /* step 3 consumed by driver */
+
+ /* The queue Id is used to identify the HW Q */
+ uint16_t phys_qid;
+
+ /* The queue Index is used within the dpdk device structures */
+ uint16_t queue_index;
+
+ uint32_t last_cons;
+
+ /* separate cache line */
+ /* second cache line - fields only used in slow path */
+ RTE_MARKER cacheline1 __rte_cache_min_aligned;
+
+ volatile uint32_t prod_index; /* step 2 filled by FPGA */
+} __rte_cache_aligned;
+
+
+/* ************************************************************************* */
+static int
+eth_ark_rx_hw_setup(struct rte_eth_dev *dev,
+ struct ark_rx_queue *queue,
+ uint16_t rx_queue_id __rte_unused, uint16_t rx_queue_idx)
+{
+ rte_iova_t queue_base;
+ rte_iova_t phys_addr_q_base;
+ rte_iova_t phys_addr_prod_index;
+
+ queue_base = rte_malloc_virt2iova(queue);
+ phys_addr_prod_index = queue_base +
+ offsetof(struct ark_rx_queue, prod_index);
+
+ phys_addr_q_base = rte_malloc_virt2iova(queue->paddress_q);
+
+ /* Verify HW */
+ if (ark_mpu_verify(queue->mpu, sizeof(rte_iova_t))) {
+ PMD_DRV_LOG(ERR, "Illegal configuration rx queue\n");
+ return -1;
+ }
+
+ /* Stop and Reset and configure MPU */
+ ark_mpu_configure(queue->mpu, phys_addr_q_base, queue->queue_size, 0);
+
+ ark_udm_write_addr(queue->udm, phys_addr_prod_index);
+
+ /* advance the valid pointer, but don't start until the queue starts */
+ ark_mpu_reset_stats(queue->mpu);
+
+ /* The seed is the producer index for the HW */
+ ark_mpu_set_producer(queue->mpu, queue->seed_index);
+ dev->data->rx_queue_state[rx_queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+static inline void
+eth_ark_rx_update_cons_index(struct ark_rx_queue *queue, uint32_t cons_index)
+{
+ queue->cons_index = cons_index;
+ eth_ark_rx_seed_mbufs(queue);
+ if (((cons_index - queue->last_cons) >= 64U)) {
+ queue->last_cons = cons_index;
+ ark_mpu_set_producer(queue->mpu, queue->seed_index);
+ }
+}
+
+/* ************************************************************************* */
+int
+eth_ark_dev_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool)
+{
+ static int warning1; /* = 0 */
+ struct ark_adapter *ark = dev->data->dev_private;
+
+ struct ark_rx_queue *queue;
+ uint32_t i;
+ int status;
+
+ int qidx = queue_idx;
+
+ /* We may already be setup, free memory prior to re-allocation */
+ if (dev->data->rx_queues[queue_idx] != NULL) {
+ eth_ark_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ dev->data->rx_queues[queue_idx] = NULL;
+ }
+
+ if (rx_conf != NULL && warning1 == 0) {
+ warning1 = 1;
+ PMD_DRV_LOG(INFO,
+ "Arkville ignores rte_eth_rxconf argument.\n");
+ }
+
+ if (RTE_PKTMBUF_HEADROOM < ARK_RX_META_SIZE) {
+ PMD_DRV_LOG(ERR,
+ "Error: DPDK Arkville requires head room > %d bytes (%s)\n",
+ ARK_RX_META_SIZE, __func__);
+ return -1; /* ERROR CODE */
+ }
+
+ if (!rte_is_power_of_2(nb_desc)) {
+ PMD_DRV_LOG(ERR,
+ "DPDK Arkville configuration queue size must be power of two %u (%s)\n",
+ nb_desc, __func__);
+ return -1; /* ERROR CODE */
+ }
+
+ /* Allocate queue struct */
+ queue = rte_zmalloc_socket("Ark_rxqueue",
+ sizeof(struct ark_rx_queue),
+ 64,
+ socket_id);
+ if (queue == 0) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory in %s\n", __func__);
+ return -ENOMEM;
+ }
+
+ /* NOTE zmalloc is used, no need to 0 indexes, etc. */
+ queue->mb_pool = mb_pool;
+ queue->phys_qid = qidx;
+ queue->queue_index = queue_idx;
+ queue->queue_size = nb_desc;
+ queue->queue_mask = nb_desc - 1;
+
+ queue->reserve_q =
+ rte_zmalloc_socket("Ark_rx_queue mbuf",
+ nb_desc * sizeof(struct rte_mbuf *),
+ 64,
+ socket_id);
+ queue->paddress_q =
+ rte_zmalloc_socket("Ark_rx_queue paddr",
+ nb_desc * sizeof(rte_iova_t),
+ 64,
+ socket_id);
+
+ if (queue->reserve_q == 0 || queue->paddress_q == 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to allocate queue memory in %s\n",
+ __func__);
+ rte_free(queue->reserve_q);
+ rte_free(queue->paddress_q);
+ rte_free(queue);
+ return -ENOMEM;
+ }
+
+ dev->data->rx_queues[queue_idx] = queue;
+ queue->udm = RTE_PTR_ADD(ark->udm.v, qidx * ARK_UDM_QOFFSET);
+ queue->mpu = RTE_PTR_ADD(ark->mpurx.v, qidx * ARK_MPU_QOFFSET);
+
+ /* populate mbuf reserve */
+ status = eth_ark_rx_seed_mbufs(queue);
+
+ if (queue->seed_index != nb_desc) {
+ PMD_DRV_LOG(ERR, "ARK: Failed to allocate %u mbufs for RX queue %d\n",
+ nb_desc, qidx);
+ status = -1;
+ }
+ /* MPU Setup */
+ if (status == 0)
+ status = eth_ark_rx_hw_setup(dev, queue, qidx, queue_idx);
+
+ if (unlikely(status != 0)) {
+ struct rte_mbuf **mbuf;
+
+ PMD_DRV_LOG(ERR, "Failed to initialize RX queue %d %s\n",
+ qidx,
+ __func__);
+ /* Free the mbufs allocated */
+ for (i = 0, mbuf = queue->reserve_q;
+ i < queue->seed_index; ++i, mbuf++) {
+ rte_pktmbuf_free(*mbuf);
+ }
+ rte_free(queue->reserve_q);
+ rte_free(queue->paddress_q);
+ rte_free(queue);
+ return -1; /* ERROR CODE */
+ }
+
+ return 0;
+}
+
+/* ************************************************************************* */
+uint16_t
+eth_ark_recv_pkts_noop(void *rx_queue __rte_unused,
+ struct rte_mbuf **rx_pkts __rte_unused,
+ uint16_t nb_pkts __rte_unused)
+{
+ return 0;
+}
+
+/* ************************************************************************* */
+uint16_t
+eth_ark_recv_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct ark_rx_queue *queue;
+ register uint32_t cons_index, prod_index;
+ uint16_t nb;
+ struct rte_mbuf *mbuf;
+ struct ark_rx_meta *meta;
+
+ queue = (struct ark_rx_queue *)rx_queue;
+ if (unlikely(queue == 0))
+ return 0;
+ if (unlikely(nb_pkts == 0))
+ return 0;
+ prod_index = queue->prod_index;
+ cons_index = queue->cons_index;
+ nb = 0;
+
+ while (prod_index != cons_index) {
+ mbuf = queue->reserve_q[cons_index & queue->queue_mask];
+ /* prefetch mbuf */
+ rte_mbuf_prefetch_part1(mbuf);
+ rte_mbuf_prefetch_part2(mbuf);
+
+ /* META DATA embedded in headroom */
+ meta = RTE_PTR_ADD(mbuf->buf_addr, ARK_RX_META_OFFSET);
+
+ mbuf->port = meta->port;
+ mbuf->pkt_len = meta->pkt_len;
+ mbuf->data_len = meta->pkt_len;
+ mbuf->timestamp = meta->timestamp;
+ mbuf->udata64 = meta->user_data;
+
+ if (ARK_RX_DEBUG) { /* debug sanity checks */
+ if ((meta->pkt_len > (1024 * 16)) ||
+ (meta->pkt_len == 0)) {
+ PMD_RX_LOG(DEBUG, "RX: Bad Meta Q: %u"
+ " cons: %" PRIU32
+ " prod: %" PRIU32
+ " seed_index %" PRIU32
+ "\n",
+ queue->phys_qid,
+ cons_index,
+ queue->prod_index,
+ queue->seed_index);
+
+
+ PMD_RX_LOG(DEBUG, " : UDM"
+ " prod: %" PRIU32
+ " len: %u\n",
+ queue->udm->rt_cfg.prod_idx,
+ meta->pkt_len);
+ ark_mpu_dump(queue->mpu,
+ " ",
+ queue->phys_qid);
+ dump_mbuf_data(mbuf, 0, 256);
+ /* its FUBAR so fix it */
+ mbuf->pkt_len = 63;
+ meta->pkt_len = 63;
+ }
+ /* seqn is only set under debug */
+ mbuf->seqn = cons_index;
+ }
+
+ if (unlikely(meta->pkt_len > ARK_RX_MAX_NOCHAIN))
+ cons_index = eth_ark_rx_jumbo
+ (queue, meta, mbuf, cons_index + 1);
+ else
+ cons_index += 1;
+
+ rx_pkts[nb] = mbuf;
+ nb++;
+ if (nb >= nb_pkts)
+ break;
+ }
+
+ if (unlikely(nb != 0))
+ /* report next free to FPGA */
+ eth_ark_rx_update_cons_index(queue, cons_index);
+
+ return nb;
+}
+
+/* ************************************************************************* */
+static uint32_t
+eth_ark_rx_jumbo(struct ark_rx_queue *queue,
+ struct ark_rx_meta *meta,
+ struct rte_mbuf *mbuf0,
+ uint32_t cons_index)
+{
+ struct rte_mbuf *mbuf_prev;
+ struct rte_mbuf *mbuf;
+
+ uint16_t remaining;
+ uint16_t data_len;
+ uint16_t segments;
+
+ /* first buf populated by called */
+ mbuf_prev = mbuf0;
+ segments = 1;
+ data_len = RTE_MIN(meta->pkt_len, RTE_MBUF_DEFAULT_DATAROOM);
+ remaining = meta->pkt_len - data_len;
+ mbuf0->data_len = data_len;
+
+ /* HW guarantees that the data does not exceed prod_index! */
+ while (remaining != 0) {
+ data_len = RTE_MIN(remaining,
+ RTE_MBUF_DEFAULT_DATAROOM +
+ RTE_PKTMBUF_HEADROOM);
+
+ remaining -= data_len;
+ segments += 1;
+
+ mbuf = queue->reserve_q[cons_index & queue->queue_mask];
+ mbuf_prev->next = mbuf;
+ mbuf_prev = mbuf;
+ mbuf->data_len = data_len;
+ mbuf->data_off = 0;
+ if (ARK_RX_DEBUG)
+ mbuf->seqn = cons_index; /* for debug only */
+
+ cons_index += 1;
+ }
+
+ mbuf0->nb_segs = segments;
+ return cons_index;
+}
+
+/* Drain the internal queue allowing hw to clear out. */
+static void
+eth_ark_rx_queue_drain(struct ark_rx_queue *queue)
+{
+ register uint32_t cons_index;
+ struct rte_mbuf *mbuf;
+
+ cons_index = queue->cons_index;
+
+ /* NOT performance optimized, since this is a one-shot call */
+ while ((cons_index ^ queue->prod_index) & queue->queue_mask) {
+ mbuf = queue->reserve_q[cons_index & queue->queue_mask];
+ rte_pktmbuf_free(mbuf);
+ cons_index++;
+ eth_ark_rx_update_cons_index(queue, cons_index);
+ }
+}
+
+uint32_t
+eth_ark_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct ark_rx_queue *queue;
+
+ queue = dev->data->rx_queues[queue_id];
+ return (queue->prod_index - queue->cons_index); /* mod arith */
+}
+
+/* ************************************************************************* */
+int
+eth_ark_rx_start_queue(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct ark_rx_queue *queue;
+
+ queue = dev->data->rx_queues[queue_id];
+ if (queue == 0)
+ return -1;
+
+ dev->data->rx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ ark_mpu_set_producer(queue->mpu, queue->seed_index);
+ ark_mpu_start(queue->mpu);
+
+ ark_udm_queue_enable(queue->udm, 1);
+
+ return 0;
+}
+
+/* ************************************************************************* */
+
+/* Queue can be restarted. data remains
+ */
+int
+eth_ark_rx_stop_queue(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct ark_rx_queue *queue;
+
+ queue = dev->data->rx_queues[queue_id];
+ if (queue == 0)
+ return -1;
+
+ ark_udm_queue_enable(queue->udm, 0);
+
+ dev->data->rx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+/* ************************************************************************* */
+static inline int
+eth_ark_rx_seed_mbufs(struct ark_rx_queue *queue)
+{
+ uint32_t limit = queue->cons_index + queue->queue_size;
+ uint32_t seed_index = queue->seed_index;
+
+ uint32_t count = 0;
+ uint32_t seed_m = queue->seed_index & queue->queue_mask;
+
+ uint32_t nb = limit - seed_index;
+
+ /* Handle wrap around -- remainder is filled on the next call */
+ if (unlikely(seed_m + nb > queue->queue_size))
+ nb = queue->queue_size - seed_m;
+
+ struct rte_mbuf **mbufs = &queue->reserve_q[seed_m];
+ int status = rte_pktmbuf_alloc_bulk(queue->mb_pool, mbufs, nb);
+
+ if (unlikely(status != 0)) {
+ /* Try to recover from lack of mbufs in pool */
+ status = eth_ark_rx_seed_recovery(queue, &nb, mbufs);
+ if (unlikely(status != 0)) {
+ return -1;
+ }
+ }
+
+ if (ARK_RX_DEBUG) { /* DEBUG */
+ while (count != nb) {
+ struct rte_mbuf *mbuf_init =
+ queue->reserve_q[seed_m + count];
+
+ memset(mbuf_init->buf_addr, -1, 512);
+ *((uint32_t *)mbuf_init->buf_addr) =
+ seed_index + count;
+ *(uint16_t *)RTE_PTR_ADD(mbuf_init->buf_addr, 4) =
+ queue->phys_qid;
+ count++;
+ }
+ count = 0;
+ } /* DEBUG */
+ queue->seed_index += nb;
+
+ /* Duff's device https://en.wikipedia.org/wiki/Duff's_device */
+ switch (nb % 4) {
+ case 0:
+ while (count != nb) {
+ queue->paddress_q[seed_m++] =
+ (*mbufs++)->buf_iova;
+ count++;
+ /* FALLTHROUGH */
+ case 3:
+ queue->paddress_q[seed_m++] =
+ (*mbufs++)->buf_iova;
+ count++;
+ /* FALLTHROUGH */
+ case 2:
+ queue->paddress_q[seed_m++] =
+ (*mbufs++)->buf_iova;
+ count++;
+ /* FALLTHROUGH */
+ case 1:
+ queue->paddress_q[seed_m++] =
+ (*mbufs++)->buf_iova;
+ count++;
+ /* FALLTHROUGH */
+
+ } /* while (count != nb) */
+ } /* switch */
+
+ return 0;
+}
+
+int
+eth_ark_rx_seed_recovery(struct ark_rx_queue *queue,
+ uint32_t *pnb,
+ struct rte_mbuf **mbufs)
+{
+ int status = -1;
+
+ /* Ignore small allocation failures */
+ if (*pnb <= 64)
+ return -1;
+
+ *pnb = 64U;
+ status = rte_pktmbuf_alloc_bulk(queue->mb_pool, mbufs, *pnb);
+ if (status != 0) {
+ PMD_DRV_LOG(ERR,
+ "ARK: Could not allocate %u mbufs from pool for RX queue %u;"
+ " %u free buffers remaining in queue\n",
+ *pnb, queue->queue_index,
+ queue->seed_index - queue->cons_index);
+ }
+ return status;
+}
+
+void
+eth_ark_rx_dump_queue(struct rte_eth_dev *dev, uint16_t queue_id,
+ const char *msg)
+{
+ struct ark_rx_queue *queue;
+
+ queue = dev->data->rx_queues[queue_id];
+
+ ark_ethdev_rx_dump(msg, queue);
+}
+
+/* ************************************************************************* */
+/* Call on device closed no user API, queue is stopped */
+void
+eth_ark_dev_rx_queue_release(void *vqueue)
+{
+ struct ark_rx_queue *queue;
+ uint32_t i;
+
+ queue = (struct ark_rx_queue *)vqueue;
+ if (queue == 0)
+ return;
+
+ ark_udm_queue_enable(queue->udm, 0);
+ /* Stop the MPU since pointer are going away */
+ ark_mpu_stop(queue->mpu);
+
+ /* Need to clear out mbufs here, dropping packets along the way */
+ eth_ark_rx_queue_drain(queue);
+
+ for (i = 0; i < queue->queue_size; ++i)
+ rte_pktmbuf_free(queue->reserve_q[i]);
+
+ rte_free(queue->reserve_q);
+ rte_free(queue->paddress_q);
+ rte_free(queue);
+}
+
+void
+eth_rx_queue_stats_get(void *vqueue, struct rte_eth_stats *stats)
+{
+ struct ark_rx_queue *queue;
+ struct ark_udm_t *udm;
+
+ queue = vqueue;
+ if (queue == 0)
+ return;
+ udm = queue->udm;
+
+ uint64_t ibytes = ark_udm_bytes(udm);
+ uint64_t ipackets = ark_udm_packets(udm);
+ uint64_t idropped = ark_udm_dropped(queue->udm);
+
+ stats->q_ipackets[queue->queue_index] = ipackets;
+ stats->q_ibytes[queue->queue_index] = ibytes;
+ stats->q_errors[queue->queue_index] = idropped;
+ stats->ipackets += ipackets;
+ stats->ibytes += ibytes;
+ stats->imissed += idropped;
+}
+
+void
+eth_rx_queue_stats_reset(void *vqueue)
+{
+ struct ark_rx_queue *queue;
+
+ queue = vqueue;
+ if (queue == 0)
+ return;
+
+ ark_mpu_reset_stats(queue->mpu);
+ ark_udm_queue_stats_reset(queue->udm);
+}
+
+void
+eth_ark_udm_force_close(struct rte_eth_dev *dev)
+{
+ struct ark_adapter *ark = dev->data->dev_private;
+ struct ark_rx_queue *queue;
+ uint32_t index;
+ uint16_t i;
+
+ if (!ark_udm_is_flushed(ark->udm.v)) {
+ /* restart the MPUs */
+ PMD_DRV_LOG(ERR, "ARK: %s UDM not flushed\n", __func__);
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ queue = (struct ark_rx_queue *)dev->data->rx_queues[i];
+ if (queue == 0)
+ continue;
+
+ ark_mpu_start(queue->mpu);
+ /* Add some buffers */
+ index = 100000 + queue->seed_index;
+ ark_mpu_set_producer(queue->mpu, index);
+ }
+ /* Wait to allow data to pass */
+ usleep(100);
+
+ PMD_DEBUG_LOG(DEBUG, "UDM forced flush attempt, stopped = %d\n",
+ ark_udm_is_flushed(ark->udm.v));
+ }
+ ark_udm_reset(ark->udm.v);
+}
+
+static void
+ark_ethdev_rx_dump(const char *name, struct ark_rx_queue *queue)
+{
+ if (queue == NULL)
+ return;
+ PMD_DEBUG_LOG(DEBUG, "RX QUEUE %d -- %s", queue->phys_qid, name);
+ PMD_DEBUG_LOG(DEBUG, ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 "\n",
+ "queue_size", queue->queue_size,
+ "seed_index", queue->seed_index,
+ "prod_index", queue->prod_index,
+ "cons_index", queue->cons_index);
+
+ ark_mpu_dump(queue->mpu, name, queue->phys_qid);
+ ark_mpu_dump_setup(queue->mpu, queue->phys_qid);
+ ark_udm_dump(queue->udm, name);
+ ark_udm_dump_setup(queue->udm, queue->phys_qid);
+}
+
+/* Only used in debug.
+ * This function is a raw memory dump of a portion of an mbuf's memory
+ * region. The usual function, rte_pktmbuf_dump() only shows data
+ * with respect to the data_off field. This function show data
+ * anywhere in the mbuf's buffer. This is useful for examining
+ * data in the headroom or tailroom portion of an mbuf.
+ */
+static void
+dump_mbuf_data(struct rte_mbuf *mbuf, uint16_t lo, uint16_t hi)
+{
+ uint16_t i, j;
+
+ PMD_DRV_LOG(INFO, " MBUF: %p len %d, off: %d, seq: %" PRIU32 "\n", mbuf,
+ mbuf->pkt_len, mbuf->data_off, mbuf->seqn);
+ for (i = lo; i < hi; i += 16) {
+ uint8_t *dp = RTE_PTR_ADD(mbuf->buf_addr, i);
+
+ PMD_DRV_LOG(INFO, " %6d: ", i);
+ for (j = 0; j < 16; j++)
+ PMD_DRV_LOG(INFO, " %02x", dp[j]);
+
+ PMD_DRV_LOG(INFO, "\n");
+ }
+}
diff --git a/src/spdk/dpdk/drivers/net/ark/ark_ethdev_rx.h b/src/spdk/dpdk/drivers/net/ark/ark_ethdev_rx.h
new file mode 100644
index 000000000..0fdd29b1a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/ark_ethdev_rx.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
+ */
+
+#ifndef _ARK_ETHDEV_RX_H_
+#define _ARK_ETHDEV_RX_H_
+
+#include <stdint.h>
+
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+#include <rte_ethdev_driver.h>
+
+
+int eth_ark_dev_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp);
+uint32_t eth_ark_dev_rx_queue_count(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id);
+int eth_ark_rx_stop_queue(struct rte_eth_dev *dev, uint16_t queue_id);
+int eth_ark_rx_start_queue(struct rte_eth_dev *dev, uint16_t queue_id);
+uint16_t eth_ark_recv_pkts_noop(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t eth_ark_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+void eth_ark_dev_rx_queue_release(void *rx_queue);
+void eth_rx_queue_stats_get(void *vqueue, struct rte_eth_stats *stats);
+void eth_rx_queue_stats_reset(void *vqueue);
+void eth_ark_rx_dump_queue(struct rte_eth_dev *dev, uint16_t queue_id,
+ const char *msg);
+void eth_ark_udm_force_close(struct rte_eth_dev *dev);
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/ark/ark_ethdev_tx.c b/src/spdk/dpdk/drivers/net/ark/ark_ethdev_tx.c
new file mode 100644
index 000000000..289668774
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/ark_ethdev_tx.c
@@ -0,0 +1,436 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
+ */
+
+#include <unistd.h>
+
+#include "ark_ethdev_tx.h"
+#include "ark_global.h"
+#include "ark_mpu.h"
+#include "ark_ddm.h"
+#include "ark_logs.h"
+
+#define ARK_TX_META_SIZE 32
+#define ARK_TX_META_OFFSET (RTE_PKTMBUF_HEADROOM - ARK_TX_META_SIZE)
+#define ARK_TX_MAX_NOCHAIN (RTE_MBUF_DEFAULT_DATAROOM)
+
+
+/* ************************************************************************* */
+struct ark_tx_queue {
+ struct ark_tx_meta *meta_q;
+ struct rte_mbuf **bufs;
+
+ /* handles for hw objects */
+ struct ark_mpu_t *mpu;
+ struct ark_ddm_t *ddm;
+
+ /* Stats HW tracks bytes and packets, need to count send errors */
+ uint64_t tx_errors;
+
+ uint32_t queue_size;
+ uint32_t queue_mask;
+
+ /* 3 indexes to the paired data rings. */
+ uint32_t prod_index; /* where to put the next one */
+ uint32_t free_index; /* mbuf has been freed */
+
+ /* The queue Id is used to identify the HW Q */
+ uint16_t phys_qid;
+ /* The queue Index within the dpdk device structures */
+ uint16_t queue_index;
+
+ uint32_t pad[1];
+
+ /* second cache line - fields only used in slow path */
+ RTE_MARKER cacheline1 __rte_cache_min_aligned;
+ uint32_t cons_index; /* hw is done, can be freed */
+} __rte_cache_aligned;
+
+/* Forward declarations */
+static uint32_t eth_ark_tx_jumbo(struct ark_tx_queue *queue,
+ struct rte_mbuf *mbuf);
+static int eth_ark_tx_hw_queue_config(struct ark_tx_queue *queue);
+static void free_completed_tx(struct ark_tx_queue *queue);
+
+static inline void
+ark_tx_hw_queue_stop(struct ark_tx_queue *queue)
+{
+ ark_mpu_stop(queue->mpu);
+}
+
+/* ************************************************************************* */
+static inline void
+eth_ark_tx_meta_from_mbuf(struct ark_tx_meta *meta,
+ const struct rte_mbuf *mbuf,
+ uint8_t flags)
+{
+ meta->physaddr = rte_mbuf_data_iova(mbuf);
+ meta->user1 = (uint32_t)mbuf->udata64;
+ meta->data_len = rte_pktmbuf_data_len(mbuf);
+ meta->flags = flags;
+}
+
+/* ************************************************************************* */
+uint16_t
+eth_ark_xmit_pkts_noop(void *vtxq __rte_unused,
+ struct rte_mbuf **tx_pkts __rte_unused,
+ uint16_t nb_pkts __rte_unused)
+{
+ return 0;
+}
+
+/* ************************************************************************* */
+uint16_t
+eth_ark_xmit_pkts(void *vtxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct ark_tx_queue *queue;
+ struct rte_mbuf *mbuf;
+ struct ark_tx_meta *meta;
+
+ uint32_t idx;
+ uint32_t prod_index_limit;
+ int stat;
+ uint16_t nb;
+
+ queue = (struct ark_tx_queue *)vtxq;
+
+ /* free any packets after the HW is done with them */
+ free_completed_tx(queue);
+
+ prod_index_limit = queue->queue_size + queue->free_index;
+
+ for (nb = 0;
+ (nb < nb_pkts) && (queue->prod_index != prod_index_limit);
+ ++nb) {
+ mbuf = tx_pkts[nb];
+
+ if (ARK_TX_PAD_TO_60) {
+ if (unlikely(rte_pktmbuf_pkt_len(mbuf) < 60)) {
+ /* this packet even if it is small can be split,
+ * be sure to add to the end mbuf
+ */
+ uint16_t to_add =
+ 60 - rte_pktmbuf_pkt_len(mbuf);
+ char *appended =
+ rte_pktmbuf_append(mbuf, to_add);
+
+ if (appended == 0) {
+ /* This packet is in error,
+ * we cannot send it so just
+ * count it and delete it.
+ */
+ queue->tx_errors += 1;
+ rte_pktmbuf_free(mbuf);
+ continue;
+ }
+ memset(appended, 0, to_add);
+ }
+ }
+
+ if (unlikely(mbuf->nb_segs != 1)) {
+ stat = eth_ark_tx_jumbo(queue, mbuf);
+ if (unlikely(stat != 0))
+ break; /* Queue is full */
+ } else {
+ idx = queue->prod_index & queue->queue_mask;
+ queue->bufs[idx] = mbuf;
+ meta = &queue->meta_q[idx];
+ eth_ark_tx_meta_from_mbuf(meta,
+ mbuf,
+ ARK_DDM_SOP |
+ ARK_DDM_EOP);
+ queue->prod_index++;
+ }
+ }
+
+ if (ARK_TX_DEBUG && (nb != nb_pkts)) {
+ PMD_TX_LOG(DEBUG, "TX: Failure to send:"
+ " req: %" PRIU32
+ " sent: %" PRIU32
+ " prod: %" PRIU32
+ " cons: %" PRIU32
+ " free: %" PRIU32 "\n",
+ nb_pkts, nb,
+ queue->prod_index,
+ queue->cons_index,
+ queue->free_index);
+ ark_mpu_dump(queue->mpu,
+ "TX Failure MPU: ",
+ queue->phys_qid);
+ }
+
+ /* let FPGA know producer index. */
+ if (likely(nb != 0))
+ ark_mpu_set_producer(queue->mpu, queue->prod_index);
+
+ return nb;
+}
+
+/* ************************************************************************* */
+static uint32_t
+eth_ark_tx_jumbo(struct ark_tx_queue *queue, struct rte_mbuf *mbuf)
+{
+ struct rte_mbuf *next;
+ struct ark_tx_meta *meta;
+ uint32_t free_queue_space;
+ uint32_t idx;
+ uint8_t flags = ARK_DDM_SOP;
+
+ free_queue_space = queue->queue_mask -
+ (queue->prod_index - queue->free_index);
+ if (unlikely(free_queue_space < mbuf->nb_segs))
+ return -1;
+
+ while (mbuf != NULL) {
+ next = mbuf->next;
+
+ idx = queue->prod_index & queue->queue_mask;
+ queue->bufs[idx] = mbuf;
+ meta = &queue->meta_q[idx];
+
+ flags |= (next == NULL) ? ARK_DDM_EOP : 0;
+ eth_ark_tx_meta_from_mbuf(meta, mbuf, flags);
+ queue->prod_index++;
+
+ flags &= ~ARK_DDM_SOP; /* drop SOP flags */
+ mbuf = next;
+ }
+
+ return 0;
+}
+
+/* ************************************************************************* */
+int
+eth_ark_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+ struct ark_adapter *ark = dev->data->dev_private;
+ struct ark_tx_queue *queue;
+ int status;
+
+ int qidx = queue_idx;
+
+ if (!rte_is_power_of_2(nb_desc)) {
+ PMD_DRV_LOG(ERR,
+ "DPDK Arkville configuration queue size"
+ " must be power of two %u (%s)\n",
+ nb_desc, __func__);
+ return -1;
+ }
+
+ /* Allocate queue struct */
+ queue = rte_zmalloc_socket("Ark_txqueue",
+ sizeof(struct ark_tx_queue),
+ 64,
+ socket_id);
+ if (queue == 0) {
+ PMD_DRV_LOG(ERR, "Failed to allocate tx "
+ "queue memory in %s\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ /* we use zmalloc no need to initialize fields */
+ queue->queue_size = nb_desc;
+ queue->queue_mask = nb_desc - 1;
+ queue->phys_qid = qidx;
+ queue->queue_index = queue_idx;
+ dev->data->tx_queues[queue_idx] = queue;
+
+ queue->meta_q =
+ rte_zmalloc_socket("Ark_txqueue meta",
+ nb_desc * sizeof(struct ark_tx_meta),
+ 64,
+ socket_id);
+ queue->bufs =
+ rte_zmalloc_socket("Ark_txqueue bufs",
+ nb_desc * sizeof(struct rte_mbuf *),
+ 64,
+ socket_id);
+
+ if (queue->meta_q == 0 || queue->bufs == 0) {
+ PMD_DRV_LOG(ERR, "Failed to allocate "
+ "queue memory in %s\n", __func__);
+ rte_free(queue->meta_q);
+ rte_free(queue->bufs);
+ rte_free(queue);
+ return -ENOMEM;
+ }
+
+ queue->ddm = RTE_PTR_ADD(ark->ddm.v, qidx * ARK_DDM_QOFFSET);
+ queue->mpu = RTE_PTR_ADD(ark->mputx.v, qidx * ARK_MPU_QOFFSET);
+
+ status = eth_ark_tx_hw_queue_config(queue);
+
+ if (unlikely(status != 0)) {
+ rte_free(queue->meta_q);
+ rte_free(queue->bufs);
+ rte_free(queue);
+ return -1; /* ERROR CODE */
+ }
+
+ return 0;
+}
+
+/* ************************************************************************* */
+static int
+eth_ark_tx_hw_queue_config(struct ark_tx_queue *queue)
+{
+ rte_iova_t queue_base, ring_base, cons_index_addr;
+ uint32_t write_interval_ns;
+
+ /* Verify HW -- MPU */
+ if (ark_mpu_verify(queue->mpu, sizeof(struct ark_tx_meta)))
+ return -1;
+
+ queue_base = rte_malloc_virt2iova(queue);
+ ring_base = rte_malloc_virt2iova(queue->meta_q);
+ cons_index_addr =
+ queue_base + offsetof(struct ark_tx_queue, cons_index);
+
+ ark_mpu_stop(queue->mpu);
+ ark_mpu_reset(queue->mpu);
+
+ /* Stop and Reset and configure MPU */
+ ark_mpu_configure(queue->mpu, ring_base, queue->queue_size, 1);
+
+ /*
+ * Adjust the write interval based on queue size --
+ * increase pcie traffic when low mbuf count
+ * Queue sizes less than 128 are not allowed
+ */
+ switch (queue->queue_size) {
+ case 128:
+ write_interval_ns = 500;
+ break;
+ case 256:
+ write_interval_ns = 500;
+ break;
+ case 512:
+ write_interval_ns = 1000;
+ break;
+ default:
+ write_interval_ns = 2000;
+ break;
+ }
+
+ /* Completion address in UDM */
+ ark_ddm_setup(queue->ddm, cons_index_addr, write_interval_ns);
+
+ return 0;
+}
+
+/* ************************************************************************* */
+void
+eth_ark_tx_queue_release(void *vtx_queue)
+{
+ struct ark_tx_queue *queue;
+
+ queue = (struct ark_tx_queue *)vtx_queue;
+
+ ark_tx_hw_queue_stop(queue);
+
+ queue->cons_index = queue->prod_index;
+ free_completed_tx(queue);
+
+ rte_free(queue->meta_q);
+ rte_free(queue->bufs);
+ rte_free(queue);
+}
+
+/* ************************************************************************* */
+int
+eth_ark_tx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct ark_tx_queue *queue;
+ int cnt = 0;
+
+ queue = dev->data->tx_queues[queue_id];
+
+ /* Wait for DDM to send out all packets. */
+ while (queue->cons_index != queue->prod_index) {
+ usleep(100);
+ if (cnt++ > 10000)
+ return -1;
+ }
+
+ ark_mpu_stop(queue->mpu);
+ free_completed_tx(queue);
+
+ dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+int
+eth_ark_tx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct ark_tx_queue *queue;
+
+ queue = dev->data->tx_queues[queue_id];
+ if (dev->data->tx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_STARTED)
+ return 0;
+
+ ark_mpu_start(queue->mpu);
+ dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return 0;
+}
+
+/* ************************************************************************* */
+static void
+free_completed_tx(struct ark_tx_queue *queue)
+{
+ struct rte_mbuf *mbuf;
+ struct ark_tx_meta *meta;
+ uint32_t top_index;
+
+ top_index = queue->cons_index; /* read once */
+ while (queue->free_index != top_index) {
+ meta = &queue->meta_q[queue->free_index & queue->queue_mask];
+ mbuf = queue->bufs[queue->free_index & queue->queue_mask];
+
+ if (likely((meta->flags & ARK_DDM_SOP) != 0)) {
+ /* ref count of the mbuf is checked in this call. */
+ rte_pktmbuf_free(mbuf);
+ }
+ queue->free_index++;
+ }
+}
+
+/* ************************************************************************* */
+void
+eth_tx_queue_stats_get(void *vqueue, struct rte_eth_stats *stats)
+{
+ struct ark_tx_queue *queue;
+ struct ark_ddm_t *ddm;
+ uint64_t bytes, pkts;
+
+ queue = vqueue;
+ ddm = queue->ddm;
+
+ bytes = ark_ddm_queue_byte_count(ddm);
+ pkts = ark_ddm_queue_pkt_count(ddm);
+
+ stats->q_opackets[queue->queue_index] = pkts;
+ stats->q_obytes[queue->queue_index] = bytes;
+ stats->opackets += pkts;
+ stats->obytes += bytes;
+ stats->oerrors += queue->tx_errors;
+}
+
+void
+eth_tx_queue_stats_reset(void *vqueue)
+{
+ struct ark_tx_queue *queue;
+ struct ark_ddm_t *ddm;
+
+ queue = vqueue;
+ ddm = queue->ddm;
+
+ ark_ddm_queue_reset_stats(ddm);
+ queue->tx_errors = 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/ark/ark_ethdev_tx.h b/src/spdk/dpdk/drivers/net/ark/ark_ethdev_tx.h
new file mode 100644
index 000000000..e448ce222
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/ark_ethdev_tx.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
+ */
+
+#ifndef _ARK_ETHDEV_TX_H_
+#define _ARK_ETHDEV_TX_H_
+
+#include <stdint.h>
+
+#include <rte_ethdev_driver.h>
+
+
+uint16_t eth_ark_xmit_pkts_noop(void *vtxq,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+uint16_t eth_ark_xmit_pkts(void *vtxq,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+int eth_ark_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+void eth_ark_tx_queue_release(void *vtx_queue);
+int eth_ark_tx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id);
+int eth_ark_tx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id);
+void eth_tx_queue_stats_get(void *vqueue, struct rte_eth_stats *stats);
+void eth_tx_queue_stats_reset(void *vqueue);
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/ark/ark_ext.h b/src/spdk/dpdk/drivers/net/ark/ark_ext.h
new file mode 100644
index 000000000..5a987e4d6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/ark_ext.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
+ */
+
+#ifndef _ARK_EXT_H_
+#define _ARK_EXT_H_
+
+#include <rte_ethdev_driver.h>
+
+/*
+ * This is the template file for users who which to define a dynamic
+ * extension to the Arkville PMD. User's who create an extension
+ * should include this file and define the necessary and desired
+ * functions.
+ * Only 1 function is required for an extension, dev_init(); all other
+ * functions prototyped in this file are optional.
+ */
+
+/*
+ * Called post PMD init.
+ * The implementation returns its private data that gets passed into
+ * all other functions as user_data
+ * The ARK extension implementation MUST implement this function
+ */
+void *dev_init(struct rte_eth_dev *dev, void *a_bar, int port_id);
+
+/* Called during device shutdown */
+void dev_uninit(struct rte_eth_dev *dev, void *user_data);
+
+/* This call is optional and allows the
+ * extension to specify the number of supported ports.
+ */
+uint8_t dev_get_port_count(struct rte_eth_dev *dev,
+ void *user_data);
+
+/*
+ * The following functions are optional and are directly mapped
+ * from the DPDK PMD ops structure.
+ * Each function if implemented is called after the ARK PMD
+ * implementation executes.
+ */
+
+int dev_configure(struct rte_eth_dev *dev,
+ void *user_data);
+
+int dev_start(struct rte_eth_dev *dev,
+ void *user_data);
+
+void dev_stop(struct rte_eth_dev *dev,
+ void *user_data);
+
+void dev_close(struct rte_eth_dev *dev,
+ void *user_data);
+
+int link_update(struct rte_eth_dev *dev,
+ int wait_to_complete,
+ void *user_data);
+
+int dev_set_link_up(struct rte_eth_dev *dev,
+ void *user_data);
+
+int dev_set_link_down(struct rte_eth_dev *dev,
+ void *user_data);
+
+int stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats,
+ void *user_data);
+
+void stats_reset(struct rte_eth_dev *dev,
+ void *user_data);
+
+void mac_addr_add(struct rte_eth_dev *dev,
+ struct rte_ether_addr *macadr,
+ uint32_t index,
+ uint32_t pool,
+ void *user_data);
+
+void mac_addr_remove(struct rte_eth_dev *dev,
+ uint32_t index,
+ void *user_data);
+
+void mac_addr_set(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mac_addr,
+ void *user_data);
+
+int set_mtu(struct rte_eth_dev *dev,
+ uint16_t size,
+ void *user_data);
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/ark/ark_global.h b/src/spdk/dpdk/drivers/net/ark/ark_global.h
new file mode 100644
index 000000000..403df5900
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/ark_global.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
+ */
+
+#ifndef _ARK_GLOBAL_H_
+#define _ARK_GLOBAL_H_
+
+#include <time.h>
+#include <assert.h>
+
+#include <rte_mbuf.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_cycles.h>
+#include <rte_kvargs.h>
+#include <rte_dev.h>
+#include <rte_version.h>
+
+#include "ark_pktdir.h"
+#include "ark_pktgen.h"
+#include "ark_pktchkr.h"
+
+#define ETH_ARK_ARG_MAXLEN 64
+#define ARK_SYSCTRL_BASE 0x0
+#define ARK_PKTGEN_BASE 0x10000
+#define ARK_MPU_RX_BASE 0x20000
+#define ARK_UDM_BASE 0x30000
+#define ARK_MPU_TX_BASE 0x40000
+#define ARK_DDM_BASE 0x60000
+#define ARK_CMAC_BASE 0x80000
+#define ARK_PKTDIR_BASE 0xa0000
+#define ARK_PKTCHKR_BASE 0x90000
+#define ARK_RCPACING_BASE 0xb0000
+#define ARK_EXTERNAL_BASE 0x100000
+#define ARK_MPU_QOFFSET 0x00100
+#define ARK_MAX_PORTS RTE_MAX_ETHPORTS
+
+#define offset8(n) n
+#define offset16(n) ((n) / 2)
+#define offset32(n) ((n) / 4)
+#define offset64(n) ((n) / 8)
+
+/* Maximum length of arg list in bytes */
+#define ARK_MAX_ARG_LEN 256
+
+/*
+ * Structure to store private data for each PF/VF instance.
+ */
+#define def_ptr(type, name) \
+ union type { \
+ uint64_t *t64; \
+ uint32_t *t32; \
+ uint16_t *t16; \
+ uint8_t *t8; \
+ void *v; \
+ } name
+
+struct ark_user_ext {
+ void *(*dev_init)(struct rte_eth_dev *, void *abar, int port_id);
+ void (*dev_uninit)(struct rte_eth_dev *, void *);
+ int (*dev_get_port_count)(struct rte_eth_dev *, void *);
+ int (*dev_configure)(struct rte_eth_dev *, void *);
+ int (*dev_start)(struct rte_eth_dev *, void *);
+ void (*dev_stop)(struct rte_eth_dev *, void *);
+ void (*dev_close)(struct rte_eth_dev *, void *);
+ int (*link_update)(struct rte_eth_dev *, int wait_to_complete, void *);
+ int (*dev_set_link_up)(struct rte_eth_dev *, void *);
+ int (*dev_set_link_down)(struct rte_eth_dev *, void *);
+ int (*stats_get)(struct rte_eth_dev *, struct rte_eth_stats *, void *);
+ void (*stats_reset)(struct rte_eth_dev *, void *);
+ void (*mac_addr_add)(struct rte_eth_dev *,
+ struct rte_ether_addr *,
+ uint32_t,
+ uint32_t,
+ void *);
+ void (*mac_addr_remove)(struct rte_eth_dev *, uint32_t, void *);
+ void (*mac_addr_set)(struct rte_eth_dev *, struct rte_ether_addr *,
+ void *);
+ int (*set_mtu)(struct rte_eth_dev *, uint16_t, void *);
+};
+
+struct ark_adapter {
+ /* User extension private data */
+ void *user_data[ARK_MAX_PORTS];
+
+ /* Pointers to packet generator and checker */
+ int start_pg;
+ ark_pkt_gen_t pg;
+ ark_pkt_chkr_t pc;
+ ark_pkt_dir_t pd;
+
+ int num_ports;
+
+ /* Packet generator/checker args */
+ char pkt_gen_args[ARK_MAX_ARG_LEN];
+ char pkt_chkr_args[ARK_MAX_ARG_LEN];
+ uint32_t pkt_dir_v;
+
+ /* eth device */
+ struct rte_eth_dev *eth_dev;
+
+ void *d_handle;
+ struct ark_user_ext user_ext;
+
+ /* Our Bar 0 */
+ uint8_t *bar0;
+
+ /* Application Bar */
+ uint8_t *a_bar;
+
+ /* Arkville demo block offsets */
+ def_ptr(sys_ctrl, sysctrl);
+ def_ptr(pkt_gen, pktgen);
+ def_ptr(mpu_rx, mpurx);
+ def_ptr(UDM, udm);
+ def_ptr(mpu_tx, mputx);
+ def_ptr(DDM, ddm);
+ def_ptr(CMAC, cmac);
+ def_ptr(external, external);
+ def_ptr(pkt_dir, pktdir);
+ def_ptr(pkt_chkr, pktchkr);
+
+ int started;
+ uint16_t rx_queues;
+ uint16_t tx_queues;
+
+ struct ark_rqpace_t *rqpacing;
+};
+
+typedef uint32_t *ark_t;
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/ark/ark_logs.h b/src/spdk/dpdk/drivers/net/ark/ark_logs.h
new file mode 100644
index 000000000..44aac6102
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/ark_logs.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
+ */
+
+#ifndef _ARK_DEBUG_H_
+#define _ARK_DEBUG_H_
+
+#include <inttypes.h>
+#include <rte_log.h>
+
+
+/* Configuration option to pad TX packets to 60 bytes */
+#ifdef RTE_LIBRTE_ARK_PAD_TX
+#define ARK_TX_PAD_TO_60 1
+#else
+#define ARK_TX_PAD_TO_60 0
+#endif
+
+/* system camel case definition changed to upper case */
+#define PRIU32 PRIu32
+#define PRIU64 PRIu64
+
+/* Format specifiers for string data pairs */
+#define ARK_SU32 "\n\t%-20s %'20" PRIU32
+#define ARK_SU64 "\n\t%-20s %'20" PRIU64
+#define ARK_SU64X "\n\t%-20s %#20" PRIx64
+#define ARK_SPTR "\n\t%-20s %20p"
+
+extern int ark_logtype;
+
+#define PMD_DRV_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ##level, ark_logtype, fmt, ## args)
+
+/* Conditional trace definitions */
+#define ARK_TRACE_ON(level, fmt, args...) \
+ PMD_DRV_LOG(level, fmt, ## args)
+
+/* This pattern allows compiler check arguments even if disabled */
+#define ARK_TRACE_OFF(level, fmt, args...) \
+ do { \
+ if (0) \
+ PMD_DRV_LOG(level, fmt, ## args); \
+ } while (0)
+
+/* tracing including the function name */
+#define ARK_FUNC_ON(level, fmt, args...) \
+ PMD_DRV_LOG(level, "%s(): " fmt, __func__, ## args)
+
+/* tracing including the function name */
+#define ARK_FUNC_OFF(level, fmt, args...) \
+ do { \
+ if (0) \
+ PMD_DRV_LOG(level, "%s(): " fmt, __func__, ## args); \
+ } while (0)
+
+
+/* Debug macro for tracing full behavior, function tracing and messages*/
+#ifdef RTE_LIBRTE_ARK_DEBUG_TRACE
+#define PMD_FUNC_LOG(level, fmt, ...) ARK_FUNC_ON(level, fmt, ##__VA_ARGS__)
+#define PMD_DEBUG_LOG(level, fmt, ...) ARK_TRACE_ON(level, fmt, ##__VA_ARGS__)
+#else
+#define PMD_FUNC_LOG(level, fmt, ...) ARK_FUNC_OFF(level, fmt, ##__VA_ARGS__)
+#define PMD_DEBUG_LOG(level, fmt, ...) ARK_TRACE_OFF(level, fmt, ##__VA_ARGS__)
+#endif
+
+
+/* Debug macro for reporting FPGA statistics */
+#ifdef RTE_LIBRTE_ARK_DEBUG_STATS
+#define PMD_STATS_LOG(level, fmt, ...) ARK_TRACE_ON(level, fmt, ##__VA_ARGS__)
+#else
+#define PMD_STATS_LOG(level, fmt, ...) ARK_TRACE_OFF(level, fmt, ##__VA_ARGS__)
+#endif
+
+
+/* Debug macro for RX path */
+#ifdef RTE_LIBRTE_ARK_DEBUG_RX
+#define ARK_RX_DEBUG 1
+#define PMD_RX_LOG(level, fmt, ...) ARK_TRACE_ON(level, fmt, ##__VA_ARGS__)
+#else
+#define ARK_RX_DEBUG 0
+#define PMD_RX_LOG(level, fmt, ...) ARK_TRACE_OFF(level, fmt, ##__VA_ARGS__)
+#endif
+
+/* Debug macro for TX path */
+#ifdef RTE_LIBRTE_ARK_DEBUG_TX
+#define ARK_TX_DEBUG 1
+#define PMD_TX_LOG(level, fmt, ...) ARK_TRACE_ON(level, fmt, ##__VA_ARGS__)
+#else
+#define ARK_TX_DEBUG 0
+#define PMD_TX_LOG(level, fmt, ...) ARK_TRACE_OFF(level, fmt, ##__VA_ARGS__)
+#endif
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/ark/ark_mpu.c b/src/spdk/dpdk/drivers/net/ark/ark_mpu.c
new file mode 100644
index 000000000..21f840f3c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/ark_mpu.c
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
+ */
+
+#include <unistd.h>
+
+#include "ark_logs.h"
+#include "ark_mpu.h"
+
+uint16_t
+ark_api_num_queues(struct ark_mpu_t *mpu)
+{
+ return mpu->hw.num_queues;
+}
+
+uint16_t
+ark_api_num_queues_per_port(struct ark_mpu_t *mpu, uint16_t ark_ports)
+{
+ return mpu->hw.num_queues / ark_ports;
+}
+
+int
+ark_mpu_verify(struct ark_mpu_t *mpu, uint32_t obj_size)
+{
+ uint32_t version;
+
+ version = mpu->id.vernum & 0x0000fF00;
+ if ((mpu->id.idnum != 0x2055504d) ||
+ (mpu->hw.obj_size != obj_size) ||
+ (version != 0x00003100)) {
+ PMD_DRV_LOG(ERR,
+ " MPU module not found as expected %08x"
+ " \"%c%c%c%c %c%c%c%c\"\n",
+ mpu->id.idnum,
+ mpu->id.id[0], mpu->id.id[1],
+ mpu->id.id[2], mpu->id.id[3],
+ mpu->id.ver[0], mpu->id.ver[1],
+ mpu->id.ver[2], mpu->id.ver[3]);
+ PMD_DRV_LOG(ERR,
+ " MPU HW num_queues: %u hw_depth %u,"
+ " obj_size: %u, obj_per_mrr: %u"
+ " Expected size %u\n",
+ mpu->hw.num_queues,
+ mpu->hw.hw_depth,
+ mpu->hw.obj_size,
+ mpu->hw.obj_per_mrr,
+ obj_size);
+ return -1;
+ }
+ return 0;
+}
+
+void
+ark_mpu_stop(struct ark_mpu_t *mpu)
+{
+ mpu->cfg.command = MPU_CMD_STOP;
+}
+
+void
+ark_mpu_start(struct ark_mpu_t *mpu)
+{
+ mpu->cfg.command = MPU_CMD_RUN;
+}
+
+int
+ark_mpu_reset(struct ark_mpu_t *mpu)
+{
+ int cnt = 0;
+
+ mpu->cfg.command = MPU_CMD_RESET;
+
+ while (mpu->cfg.command != MPU_CMD_IDLE) {
+ if (cnt++ > 1000)
+ break;
+ usleep(10);
+ }
+ if (mpu->cfg.command != MPU_CMD_IDLE) {
+ mpu->cfg.command = MPU_CMD_FORCE_RESET;
+ usleep(10);
+ }
+ ark_mpu_reset_stats(mpu);
+ return mpu->cfg.command != MPU_CMD_IDLE;
+}
+
+void
+ark_mpu_reset_stats(struct ark_mpu_t *mpu)
+{
+ mpu->stats.pci_request = 1; /* reset stats */
+}
+
+int
+ark_mpu_configure(struct ark_mpu_t *mpu, rte_iova_t ring, uint32_t ring_size,
+ int is_tx)
+{
+ ark_mpu_reset(mpu);
+
+ if (!rte_is_power_of_2(ring_size)) {
+ PMD_DRV_LOG(ERR, "ARK: Invalid ring size for MPU %d\n",
+ ring_size);
+ return -1;
+ }
+
+ mpu->cfg.ring_base = ring;
+ mpu->cfg.ring_size = ring_size;
+ mpu->cfg.ring_mask = ring_size - 1;
+ mpu->cfg.min_host_move = is_tx ? 1 : mpu->hw.obj_per_mrr;
+ mpu->cfg.min_hw_move = mpu->hw.obj_per_mrr;
+ mpu->cfg.sw_prod_index = 0;
+ mpu->cfg.hw_cons_index = 0;
+ return 0;
+}
+
+void
+ark_mpu_dump(struct ark_mpu_t *mpu, const char *code, uint16_t qid)
+{
+ /* DUMP to see that we have started */
+ PMD_DEBUG_LOG(DEBUG, "MPU: %s Q: %3u sw_prod %u, hw_cons: %u\n",
+ code, qid,
+ mpu->cfg.sw_prod_index, mpu->cfg.hw_cons_index);
+ PMD_DEBUG_LOG(DEBUG, "MPU: %s state: %d count %d, reserved %d"
+ " data 0x%08x_%08x 0x%08x_%08x\n",
+ code,
+ mpu->debug.state, mpu->debug.count,
+ mpu->debug.reserved,
+ mpu->debug.peek[1],
+ mpu->debug.peek[0],
+ mpu->debug.peek[3],
+ mpu->debug.peek[2]
+ );
+ PMD_STATS_LOG(INFO, "MPU: %s Q: %3u"
+ ARK_SU64 ARK_SU64 ARK_SU64 ARK_SU64
+ ARK_SU64 ARK_SU64 ARK_SU64 "\n",
+ code, qid,
+ "PCI Request:", mpu->stats.pci_request,
+ "Queue_empty", mpu->stats.q_empty,
+ "Queue_q1", mpu->stats.q_q1,
+ "Queue_q2", mpu->stats.q_q2,
+ "Queue_q3", mpu->stats.q_q3,
+ "Queue_q4", mpu->stats.q_q4,
+ "Queue_full", mpu->stats.q_full
+ );
+}
+
+void
+ark_mpu_dump_setup(struct ark_mpu_t *mpu, uint16_t q_id)
+{
+ PMD_DEBUG_LOG(DEBUG, "MPU Setup Q: %u"
+ ARK_SU64X "\n",
+ q_id,
+ "ring_base", mpu->cfg.ring_base
+ );
+}
diff --git a/src/spdk/dpdk/drivers/net/ark/ark_mpu.h b/src/spdk/dpdk/drivers/net/ark/ark_mpu.h
new file mode 100644
index 000000000..92c3e67c8
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/ark_mpu.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
+ */
+
+#ifndef _ARK_MPU_H_
+#define _ARK_MPU_H_
+
+#include <stdint.h>
+
+#include <rte_memory.h>
+
+/* The MPU or Memory Prefetch Unit is an internal Arkville hardware
+ * module for moving data between host memory and the hardware FPGA.
+ * This module is *not* intended for end-user manipulation, hence
+ * there is minimal documentation.
+ */
+
+/*
+ * MPU hardware structures
+ * These are overlay structures to a memory mapped FPGA device. These
+ * structs will never be instantiated in ram memory
+ */
+
+#define ARK_MPU_ID 0x00
+struct ark_mpu_id_t {
+ union {
+ char id[4];
+ uint32_t idnum;
+ };
+ union {
+ char ver[4];
+ uint32_t vernum;
+ };
+ uint32_t phys_id;
+ uint32_t mrr_code;
+};
+
+#define ARK_MPU_HW 0x010
+struct ark_mpu_hw_t {
+ uint16_t num_queues;
+ uint16_t reserved;
+ uint32_t hw_depth;
+ uint32_t obj_size;
+ uint32_t obj_per_mrr;
+};
+
+#define ARK_MPU_CFG 0x040
+struct ark_mpu_cfg_t {
+ rte_iova_t ring_base; /* rte_iova_t is a uint64_t */
+ uint32_t ring_size;
+ uint32_t ring_mask;
+ uint32_t min_host_move;
+ uint32_t min_hw_move;
+ volatile uint32_t sw_prod_index;
+ volatile uint32_t hw_cons_index;
+ volatile uint32_t command;
+};
+enum ARK_MPU_COMMAND {
+ MPU_CMD_IDLE = 1,
+ MPU_CMD_RUN = 2,
+ MPU_CMD_STOP = 4,
+ MPU_CMD_RESET = 8,
+ MPU_CMD_FORCE_RESET = 16,
+ MPU_COMMAND_LIMIT = 0xfFFFFFFF
+};
+
+#define ARK_MPU_STATS 0x080
+struct ark_mpu_stats_t {
+ volatile uint64_t pci_request;
+ volatile uint64_t q_empty;
+ volatile uint64_t q_q1;
+ volatile uint64_t q_q2;
+ volatile uint64_t q_q3;
+ volatile uint64_t q_q4;
+ volatile uint64_t q_full;
+};
+
+#define ARK_MPU_DEBUG 0x0C0
+struct ark_mpu_debug_t {
+ volatile uint32_t state;
+ uint32_t reserved;
+ volatile uint32_t count;
+ volatile uint32_t take;
+ volatile uint32_t peek[4];
+};
+
+/* Consolidated structure */
+struct ark_mpu_t {
+ struct ark_mpu_id_t id;
+ uint8_t reserved0[(ARK_MPU_HW - ARK_MPU_ID)
+ - sizeof(struct ark_mpu_id_t)];
+ struct ark_mpu_hw_t hw;
+ uint8_t reserved1[(ARK_MPU_CFG - ARK_MPU_HW) -
+ sizeof(struct ark_mpu_hw_t)];
+ struct ark_mpu_cfg_t cfg;
+ uint8_t reserved2[(ARK_MPU_STATS - ARK_MPU_CFG) -
+ sizeof(struct ark_mpu_cfg_t)];
+ struct ark_mpu_stats_t stats;
+ uint8_t reserved3[(ARK_MPU_DEBUG - ARK_MPU_STATS) -
+ sizeof(struct ark_mpu_stats_t)];
+ struct ark_mpu_debug_t debug;
+};
+
+uint16_t ark_api_num_queues(struct ark_mpu_t *mpu);
+uint16_t ark_api_num_queues_per_port(struct ark_mpu_t *mpu,
+ uint16_t ark_ports);
+int ark_mpu_verify(struct ark_mpu_t *mpu, uint32_t obj_size);
+void ark_mpu_stop(struct ark_mpu_t *mpu);
+void ark_mpu_start(struct ark_mpu_t *mpu);
+int ark_mpu_reset(struct ark_mpu_t *mpu);
+int ark_mpu_configure(struct ark_mpu_t *mpu, rte_iova_t ring,
+ uint32_t ring_size, int is_tx);
+
+void ark_mpu_dump(struct ark_mpu_t *mpu, const char *msg, uint16_t idx);
+void ark_mpu_dump_setup(struct ark_mpu_t *mpu, uint16_t qid);
+void ark_mpu_reset_stats(struct ark_mpu_t *mpu);
+
+/* this action is in a performance critical path */
+static inline void
+ark_mpu_set_producer(struct ark_mpu_t *mpu, uint32_t idx)
+{
+ mpu->cfg.sw_prod_index = idx;
+}
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/ark/ark_pktchkr.c b/src/spdk/dpdk/drivers/net/ark/ark_pktchkr.c
new file mode 100644
index 000000000..ef861eea3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/ark_pktchkr.c
@@ -0,0 +1,450 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
+ */
+
+#include <getopt.h>
+#include <sys/time.h>
+#include <locale.h>
+#include <unistd.h>
+
+#include <rte_string_fns.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+
+#include "ark_pktchkr.h"
+#include "ark_logs.h"
+
+static int set_arg(char *arg, char *val);
+static int ark_pktchkr_is_gen_forever(ark_pkt_chkr_t handle);
+
+#define ARK_MAX_STR_LEN 64
+union OPTV {
+ int INT;
+ int BOOL;
+ uint64_t LONG;
+ char STR[ARK_MAX_STR_LEN];
+};
+
+enum OPTYPE {
+ OTINT,
+ OTLONG,
+ OTBOOL,
+ OTSTRING
+};
+
+struct OPTIONS {
+ char opt[ARK_MAX_STR_LEN];
+ enum OPTYPE t;
+ union OPTV v;
+};
+
+static struct OPTIONS toptions[] = {
+ {{"configure"}, OTBOOL, {1} },
+ {{"port"}, OTINT, {0} },
+ {{"mac-dump"}, OTBOOL, {0} },
+ {{"dg-mode"}, OTBOOL, {1} },
+ {{"run"}, OTBOOL, {0} },
+ {{"stop"}, OTBOOL, {0} },
+ {{"dump"}, OTBOOL, {0} },
+ {{"en_resync"}, OTBOOL, {0} },
+ {{"tuser_err_val"}, OTINT, {1} },
+ {{"gen_forever"}, OTBOOL, {0} },
+ {{"en_slaved_start"}, OTBOOL, {0} },
+ {{"vary_length"}, OTBOOL, {0} },
+ {{"incr_payload"}, OTINT, {0} },
+ {{"incr_first_byte"}, OTBOOL, {0} },
+ {{"ins_seq_num"}, OTBOOL, {0} },
+ {{"ins_time_stamp"}, OTBOOL, {1} },
+ {{"ins_udp_hdr"}, OTBOOL, {0} },
+ {{"num_pkts"}, OTLONG, .v.LONG = 10000000000000L},
+ {{"payload_byte"}, OTINT, {0x55} },
+ {{"pkt_spacing"}, OTINT, {60} },
+ {{"pkt_size_min"}, OTINT, {2005} },
+ {{"pkt_size_max"}, OTINT, {1514} },
+ {{"pkt_size_incr"}, OTINT, {1} },
+ {{"eth_type"}, OTINT, {0x0800} },
+ {{"src_mac_addr"}, OTLONG, .v.LONG = 0xdC3cF6425060L},
+ {{"dst_mac_addr"}, OTLONG, .v.LONG = 0x112233445566L},
+ {{"hdr_dW0"}, OTINT, {0x0016e319} },
+ {{"hdr_dW1"}, OTINT, {0x27150004} },
+ {{"hdr_dW2"}, OTINT, {0x76967bda} },
+ {{"hdr_dW3"}, OTINT, {0x08004500} },
+ {{"hdr_dW4"}, OTINT, {0x005276ed} },
+ {{"hdr_dW5"}, OTINT, {0x40004006} },
+ {{"hdr_dW6"}, OTINT, {0x56cfc0a8} },
+ {{"start_offset"}, OTINT, {0} },
+ {{"dst_ip"}, OTSTRING, .v.STR = "169.254.10.240"},
+ {{"dst_port"}, OTINT, {65536} },
+ {{"src_port"}, OTINT, {65536} },
+};
+
+ark_pkt_chkr_t
+ark_pktchkr_init(void *addr, int ord, int l2_mode)
+{
+ struct ark_pkt_chkr_inst *inst =
+ rte_malloc("ark_pkt_chkr_inst",
+ sizeof(struct ark_pkt_chkr_inst), 0);
+ if (inst == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to malloc ark_pkt_chkr_inst.\n");
+ return inst;
+ }
+ inst->sregs = (struct ark_pkt_chkr_stat_regs *)addr;
+ inst->cregs =
+ (struct ark_pkt_chkr_ctl_regs *)(((uint8_t *)addr) + 0x100);
+ inst->ordinal = ord;
+ inst->l2_mode = l2_mode;
+ return inst;
+}
+
+void
+ark_pktchkr_uninit(ark_pkt_chkr_t handle)
+{
+ rte_free(handle);
+}
+
+void
+ark_pktchkr_run(ark_pkt_chkr_t handle)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ inst->sregs->pkt_start_stop = 0;
+ inst->sregs->pkt_start_stop = 0x1;
+}
+
+int
+ark_pktchkr_stopped(ark_pkt_chkr_t handle)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+ uint32_t r = inst->sregs->pkt_start_stop;
+
+ return (((r >> 16) & 1) == 1);
+}
+
+void
+ark_pktchkr_stop(ark_pkt_chkr_t handle)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+ int wait_cycle = 10;
+
+ inst->sregs->pkt_start_stop = 0;
+ while (!ark_pktchkr_stopped(handle) && (wait_cycle > 0)) {
+ usleep(1000);
+ wait_cycle--;
+ PMD_DEBUG_LOG(DEBUG, "Waiting for pktchk %d to stop...\n",
+ inst->ordinal);
+ }
+ PMD_DEBUG_LOG(DEBUG, "Pktchk %d stopped.\n", inst->ordinal);
+}
+
+int
+ark_pktchkr_is_running(ark_pkt_chkr_t handle)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+ uint32_t r = inst->sregs->pkt_start_stop;
+
+ return ((r & 1) == 1);
+}
+
+static void
+ark_pktchkr_set_pkt_ctrl(ark_pkt_chkr_t handle,
+ uint32_t gen_forever,
+ uint32_t vary_length,
+ uint32_t incr_payload,
+ uint32_t incr_first_byte,
+ uint32_t ins_seq_num,
+ uint32_t ins_udp_hdr,
+ uint32_t en_resync,
+ uint32_t tuser_err_val,
+ uint32_t ins_time_stamp)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+ uint32_t r = (tuser_err_val << 16) | (en_resync << 0);
+
+ inst->sregs->pkt_ctrl = r;
+ if (!inst->l2_mode)
+ ins_udp_hdr = 0;
+ r = ((gen_forever << 24) |
+ (vary_length << 16) |
+ (incr_payload << 12) |
+ (incr_first_byte << 8) |
+ (ins_time_stamp << 5) |
+ (ins_seq_num << 4) |
+ ins_udp_hdr);
+ inst->cregs->pkt_ctrl = r;
+}
+
+static
+int
+ark_pktchkr_is_gen_forever(ark_pkt_chkr_t handle)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+ uint32_t r = inst->cregs->pkt_ctrl;
+
+ return (((r >> 24) & 1) == 1);
+}
+
+int
+ark_pktchkr_wait_done(ark_pkt_chkr_t handle)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ if (ark_pktchkr_is_gen_forever(handle)) {
+ PMD_DEBUG_LOG(ERR, "Pktchk wait_done will not terminate"
+ " because gen_forever=1\n");
+ return -1;
+ }
+ int wait_cycle = 10;
+
+ while (!ark_pktchkr_stopped(handle) && (wait_cycle > 0)) {
+ usleep(1000);
+ wait_cycle--;
+ PMD_DEBUG_LOG(DEBUG, "Waiting for packet checker %d's"
+ " internal pktgen to finish sending...\n",
+ inst->ordinal);
+ PMD_DEBUG_LOG(DEBUG, "Pktchk %d's pktgen done.\n",
+ inst->ordinal);
+ }
+ return 0;
+}
+
+int
+ark_pktchkr_get_pkts_sent(ark_pkt_chkr_t handle)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ return inst->cregs->pkts_sent;
+}
+
+void
+ark_pktchkr_set_payload_byte(ark_pkt_chkr_t handle, uint32_t b)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ inst->cregs->pkt_payload = b;
+}
+
+void
+ark_pktchkr_set_pkt_size_min(ark_pkt_chkr_t handle, uint32_t x)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ inst->cregs->pkt_size_min = x;
+}
+
+void
+ark_pktchkr_set_pkt_size_max(ark_pkt_chkr_t handle, uint32_t x)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ inst->cregs->pkt_size_max = x;
+}
+
+void
+ark_pktchkr_set_pkt_size_incr(ark_pkt_chkr_t handle, uint32_t x)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ inst->cregs->pkt_size_incr = x;
+}
+
+void
+ark_pktchkr_set_num_pkts(ark_pkt_chkr_t handle, uint32_t x)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ inst->cregs->num_pkts = x;
+}
+
+void
+ark_pktchkr_set_src_mac_addr(ark_pkt_chkr_t handle, uint64_t mac_addr)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ inst->cregs->src_mac_addr_h = (mac_addr >> 32) & 0xffff;
+ inst->cregs->src_mac_addr_l = mac_addr & 0xffffffff;
+}
+
+void
+ark_pktchkr_set_dst_mac_addr(ark_pkt_chkr_t handle, uint64_t mac_addr)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ inst->cregs->dst_mac_addr_h = (mac_addr >> 32) & 0xffff;
+ inst->cregs->dst_mac_addr_l = mac_addr & 0xffffffff;
+}
+
+void
+ark_pktchkr_set_eth_type(ark_pkt_chkr_t handle, uint32_t x)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ inst->cregs->eth_type = x;
+}
+
+void
+ark_pktchkr_set_hdr_dW(ark_pkt_chkr_t handle, uint32_t *hdr)
+{
+ uint32_t i;
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ for (i = 0; i < 7; i++)
+ inst->cregs->hdr_dw[i] = hdr[i];
+}
+
+void
+ark_pktchkr_dump_stats(ark_pkt_chkr_t handle)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ PMD_STATS_LOG(INFO, "pkts_rcvd = (%'u)\n",
+ inst->sregs->pkts_rcvd);
+ PMD_STATS_LOG(INFO, "bytes_rcvd = (%'" PRIU64 ")\n",
+ inst->sregs->bytes_rcvd);
+ PMD_STATS_LOG(INFO, "pkts_ok = (%'u)\n",
+ inst->sregs->pkts_ok);
+ PMD_STATS_LOG(INFO, "pkts_mismatch = (%'u)\n",
+ inst->sregs->pkts_mismatch);
+ PMD_STATS_LOG(INFO, "pkts_err = (%'u)\n",
+ inst->sregs->pkts_err);
+ PMD_STATS_LOG(INFO, "first_mismatch = (%'u)\n",
+ inst->sregs->first_mismatch);
+ PMD_STATS_LOG(INFO, "resync_events = (%'u)\n",
+ inst->sregs->resync_events);
+ PMD_STATS_LOG(INFO, "pkts_missing = (%'u)\n",
+ inst->sregs->pkts_missing);
+ PMD_STATS_LOG(INFO, "min_latency = (%'u)\n",
+ inst->sregs->min_latency);
+ PMD_STATS_LOG(INFO, "max_latency = (%'u)\n",
+ inst->sregs->max_latency);
+}
+
+static struct OPTIONS *
+options(const char *id)
+{
+ unsigned int i;
+
+ for (i = 0; i < sizeof(toptions) / sizeof(struct OPTIONS); i++) {
+ if (strcmp(id, toptions[i].opt) == 0)
+ return &toptions[i];
+ }
+ PMD_DRV_LOG(ERR,
+ "pktchkr: Could not find requested option!, option = %s\n",
+ id);
+ return NULL;
+}
+
+static int
+set_arg(char *arg, char *val)
+{
+ struct OPTIONS *o = options(arg);
+
+ if (o) {
+ switch (o->t) {
+ case OTINT:
+ case OTBOOL:
+ o->v.INT = atoi(val);
+ break;
+ case OTLONG:
+ o->v.INT = atoll(val);
+ break;
+ case OTSTRING:
+ strlcpy(o->v.STR, val, ARK_MAX_STR_LEN);
+ break;
+ }
+ return 1;
+ }
+ return 0;
+}
+
+/******
+ * Arg format = "opt0=v,opt_n=v ..."
+ ******/
+void
+ark_pktchkr_parse(char *args)
+{
+ char *argv, *v;
+ const char toks[] = "=\n\t\v\f \r";
+ argv = strtok(args, toks);
+ v = strtok(NULL, toks);
+ while (argv && v) {
+ set_arg(argv, v);
+ argv = strtok(NULL, toks);
+ v = strtok(NULL, toks);
+ }
+}
+
+static int32_t parse_ipv4_string(char const *ip_address);
+static int32_t
+parse_ipv4_string(char const *ip_address)
+{
+ unsigned int ip[4];
+
+ if (sscanf(ip_address, "%u.%u.%u.%u",
+ &ip[0], &ip[1], &ip[2], &ip[3]) != 4)
+ return 0;
+ return ip[3] + ip[2] * 0x100 + ip[1] * 0x10000ul + ip[0] * 0x1000000ul;
+}
+
+void
+ark_pktchkr_setup(ark_pkt_chkr_t handle)
+{
+ uint32_t hdr[7];
+ int32_t dst_ip = parse_ipv4_string(options("dst_ip")->v.STR);
+
+ if (!options("stop")->v.BOOL && options("configure")->v.BOOL) {
+ ark_pktchkr_set_payload_byte(handle,
+ options("payload_byte")->v.INT);
+ ark_pktchkr_set_src_mac_addr(handle,
+ options("src_mac_addr")->v.INT);
+ ark_pktchkr_set_dst_mac_addr(handle,
+ options("dst_mac_addr")->v.LONG);
+
+ ark_pktchkr_set_eth_type(handle,
+ options("eth_type")->v.INT);
+ if (options("dg-mode")->v.BOOL) {
+ hdr[0] = options("hdr_dW0")->v.INT;
+ hdr[1] = options("hdr_dW1")->v.INT;
+ hdr[2] = options("hdr_dW2")->v.INT;
+ hdr[3] = options("hdr_dW3")->v.INT;
+ hdr[4] = options("hdr_dW4")->v.INT;
+ hdr[5] = options("hdr_dW5")->v.INT;
+ hdr[6] = options("hdr_dW6")->v.INT;
+ } else {
+ hdr[0] = dst_ip;
+ hdr[1] = options("dst_port")->v.INT;
+ hdr[2] = options("src_port")->v.INT;
+ hdr[3] = 0;
+ hdr[4] = 0;
+ hdr[5] = 0;
+ hdr[6] = 0;
+ }
+ ark_pktchkr_set_hdr_dW(handle, hdr);
+ ark_pktchkr_set_num_pkts(handle,
+ options("num_pkts")->v.INT);
+ ark_pktchkr_set_pkt_size_min(handle,
+ options("pkt_size_min")->v.INT);
+ ark_pktchkr_set_pkt_size_max(handle,
+ options("pkt_size_max")->v.INT);
+ ark_pktchkr_set_pkt_size_incr(handle,
+ options("pkt_size_incr")->v.INT);
+ ark_pktchkr_set_pkt_ctrl(handle,
+ options("gen_forever")->v.BOOL,
+ options("vary_length")->v.BOOL,
+ options("incr_payload")->v.BOOL,
+ options("incr_first_byte")->v.BOOL,
+ options("ins_seq_num")->v.INT,
+ options("ins_udp_hdr")->v.BOOL,
+ options("en_resync")->v.BOOL,
+ options("tuser_err_val")->v.INT,
+ options("ins_time_stamp")->v.INT);
+ }
+
+ if (options("stop")->v.BOOL)
+ ark_pktchkr_stop(handle);
+
+ if (options("run")->v.BOOL) {
+ PMD_DEBUG_LOG(DEBUG, "Starting packet checker on port %d\n",
+ options("port")->v.INT);
+ ark_pktchkr_run(handle);
+ }
+}
diff --git a/src/spdk/dpdk/drivers/net/ark/ark_pktchkr.h b/src/spdk/dpdk/drivers/net/ark/ark_pktchkr.h
new file mode 100644
index 000000000..b36228177
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/ark_pktchkr.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
+ */
+
+#ifndef _ARK_PKTCHKR_H_
+#define _ARK_PKTCHKR_H_
+
+#include <stdint.h>
+#include <inttypes.h>
+
+#define ARK_PKTCHKR_BASE_ADR 0x90000
+
+typedef void *ark_pkt_chkr_t;
+
+/* The packet checker is an internal Arkville hardware module, which
+ * verifies packet streams generated from the corresponding packet
+ * generator. This module is used for Arkville testing.
+ * This module is *not* intended for end-user manipulation, hence
+ * there is minimal documentation.
+ */
+
+/*
+ * This are overlay structures to a memory mapped FPGA device. These
+ * structs will never be instantiated in ram memory
+ */
+struct ark_pkt_chkr_stat_regs {
+ uint32_t r0;
+ uint32_t pkt_start_stop;
+ uint32_t pkt_ctrl;
+ uint32_t pkts_rcvd;
+ uint64_t bytes_rcvd;
+ uint32_t pkts_ok;
+ uint32_t pkts_mismatch;
+ uint32_t pkts_err;
+ uint32_t first_mismatch;
+ uint32_t resync_events;
+ uint32_t pkts_missing;
+ uint32_t min_latency;
+ uint32_t max_latency;
+} __rte_packed;
+
+struct ark_pkt_chkr_ctl_regs {
+ uint32_t pkt_ctrl;
+ uint32_t pkt_payload;
+ uint32_t pkt_size_min;
+ uint32_t pkt_size_max;
+ uint32_t pkt_size_incr;
+ uint32_t num_pkts;
+ uint32_t pkts_sent;
+ uint32_t src_mac_addr_l;
+ uint32_t src_mac_addr_h;
+ uint32_t dst_mac_addr_l;
+ uint32_t dst_mac_addr_h;
+ uint32_t eth_type;
+ uint32_t hdr_dw[7];
+} __rte_packed;
+
+struct ark_pkt_chkr_inst {
+ struct rte_eth_dev_info *dev_info;
+ volatile struct ark_pkt_chkr_stat_regs *sregs;
+ volatile struct ark_pkt_chkr_ctl_regs *cregs;
+ int l2_mode;
+ int ordinal;
+};
+
+/* packet checker functions */
+ark_pkt_chkr_t ark_pktchkr_init(void *addr, int ord, int l2_mode);
+void ark_pktchkr_uninit(ark_pkt_chkr_t handle);
+void ark_pktchkr_run(ark_pkt_chkr_t handle);
+int ark_pktchkr_stopped(ark_pkt_chkr_t handle);
+void ark_pktchkr_stop(ark_pkt_chkr_t handle);
+int ark_pktchkr_is_running(ark_pkt_chkr_t handle);
+int ark_pktchkr_get_pkts_sent(ark_pkt_chkr_t handle);
+void ark_pktchkr_set_payload_byte(ark_pkt_chkr_t handle, uint32_t b);
+void ark_pktchkr_set_pkt_size_min(ark_pkt_chkr_t handle, uint32_t x);
+void ark_pktchkr_set_pkt_size_max(ark_pkt_chkr_t handle, uint32_t x);
+void ark_pktchkr_set_pkt_size_incr(ark_pkt_chkr_t handle, uint32_t x);
+void ark_pktchkr_set_num_pkts(ark_pkt_chkr_t handle, uint32_t x);
+void ark_pktchkr_set_src_mac_addr(ark_pkt_chkr_t handle, uint64_t mac_addr);
+void ark_pktchkr_set_dst_mac_addr(ark_pkt_chkr_t handle, uint64_t mac_addr);
+void ark_pktchkr_set_eth_type(ark_pkt_chkr_t handle, uint32_t x);
+void ark_pktchkr_set_hdr_dW(ark_pkt_chkr_t handle, uint32_t *hdr);
+void ark_pktchkr_parse(char *args);
+void ark_pktchkr_setup(ark_pkt_chkr_t handle);
+void ark_pktchkr_dump_stats(ark_pkt_chkr_t handle);
+int ark_pktchkr_wait_done(ark_pkt_chkr_t handle);
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/ark/ark_pktdir.c b/src/spdk/dpdk/drivers/net/ark/ark_pktdir.c
new file mode 100644
index 000000000..1f2c8182a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/ark_pktdir.c
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
+ */
+
+#include <stdint.h>
+#include <inttypes.h>
+
+#include "ark_pktdir.h"
+#include "ark_global.h"
+#include "ark_logs.h"
+
+
+ark_pkt_dir_t
+ark_pktdir_init(void *base)
+{
+ struct ark_pkt_dir_inst *inst =
+ rte_malloc("ark_pkt_dir_inst",
+ sizeof(struct ark_pkt_dir_inst),
+ 0);
+ if (inst == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to malloc ark_pkt_dir_inst.\n");
+ return inst;
+ }
+ inst->regs = (struct ark_pkt_dir_regs *)base;
+ inst->regs->ctrl = 0x00110110; /* POR state */
+ return inst;
+}
+
+void
+ark_pktdir_uninit(ark_pkt_dir_t handle)
+{
+ struct ark_pkt_dir_inst *inst = (struct ark_pkt_dir_inst *)handle;
+
+ rte_free(inst);
+}
+
+void
+ark_pktdir_setup(ark_pkt_dir_t handle, uint32_t v)
+{
+ struct ark_pkt_dir_inst *inst = (struct ark_pkt_dir_inst *)handle;
+ inst->regs->ctrl = v;
+}
+
+uint32_t
+ark_pktdir_status(ark_pkt_dir_t handle)
+{
+ struct ark_pkt_dir_inst *inst = (struct ark_pkt_dir_inst *)handle;
+ return inst->regs->ctrl;
+}
+
+uint32_t
+ark_pktdir_stall_cnt(ark_pkt_dir_t handle)
+{
+ struct ark_pkt_dir_inst *inst = (struct ark_pkt_dir_inst *)handle;
+ return inst->regs->stall_cnt;
+}
diff --git a/src/spdk/dpdk/drivers/net/ark/ark_pktdir.h b/src/spdk/dpdk/drivers/net/ark/ark_pktdir.h
new file mode 100644
index 000000000..4afd128f9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/ark_pktdir.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
+ */
+
+#ifndef _ARK_PKTDIR_H_
+#define _ARK_PKTDIR_H_
+
+#include <stdint.h>
+
+#define ARK_PKTDIR_BASE_ADR 0xa0000
+
+typedef void *ark_pkt_dir_t;
+
+
+/* The packet director is an internal Arkville hardware module for
+ * directing packet data in non-typical flows, such as testing.
+ * This module is *not* intended for end-user manipulation, hence
+ * there is minimal documentation.
+ */
+
+/*
+ * This is an overlay structures to a memory mapped FPGA device. These
+ * structs will never be instantiated in ram memory
+ */
+struct ark_pkt_dir_regs {
+ uint32_t ctrl;
+ uint32_t status;
+ uint32_t stall_cnt;
+} __rte_packed;
+
+struct ark_pkt_dir_inst {
+ volatile struct ark_pkt_dir_regs *regs;
+};
+
+ark_pkt_dir_t ark_pktdir_init(void *base);
+void ark_pktdir_uninit(ark_pkt_dir_t handle);
+void ark_pktdir_setup(ark_pkt_dir_t handle, uint32_t v);
+uint32_t ark_pktdir_stall_cnt(ark_pkt_dir_t handle);
+uint32_t ark_pktdir_status(ark_pkt_dir_t handle);
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/ark/ark_pktgen.c b/src/spdk/dpdk/drivers/net/ark/ark_pktgen.c
new file mode 100644
index 000000000..2cae252d6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/ark_pktgen.c
@@ -0,0 +1,472 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
+ */
+
+#include <getopt.h>
+#include <sys/time.h>
+#include <locale.h>
+#include <unistd.h>
+
+#include <rte_string_fns.h>
+#include <rte_eal.h>
+
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+
+#include "ark_pktgen.h"
+#include "ark_logs.h"
+
+#define ARK_MAX_STR_LEN 64
+union OPTV {
+ int INT;
+ int BOOL;
+ uint64_t LONG;
+ char STR[ARK_MAX_STR_LEN];
+};
+
+enum OPTYPE {
+ OTINT,
+ OTLONG,
+ OTBOOL,
+ OTSTRING
+};
+
+struct OPTIONS {
+ char opt[ARK_MAX_STR_LEN];
+ enum OPTYPE t;
+ union OPTV v;
+};
+
+static struct OPTIONS toptions[] = {
+ {{"configure"}, OTBOOL, {1} },
+ {{"dg-mode"}, OTBOOL, {1} },
+ {{"run"}, OTBOOL, {0} },
+ {{"pause"}, OTBOOL, {0} },
+ {{"reset"}, OTBOOL, {0} },
+ {{"dump"}, OTBOOL, {0} },
+ {{"gen_forever"}, OTBOOL, {0} },
+ {{"en_slaved_start"}, OTBOOL, {0} },
+ {{"vary_length"}, OTBOOL, {0} },
+ {{"incr_payload"}, OTBOOL, {0} },
+ {{"incr_first_byte"}, OTBOOL, {0} },
+ {{"ins_seq_num"}, OTBOOL, {0} },
+ {{"ins_time_stamp"}, OTBOOL, {1} },
+ {{"ins_udp_hdr"}, OTBOOL, {0} },
+ {{"num_pkts"}, OTLONG, .v.LONG = 100000000},
+ {{"payload_byte"}, OTINT, {0x55} },
+ {{"pkt_spacing"}, OTINT, {130} },
+ {{"pkt_size_min"}, OTINT, {2006} },
+ {{"pkt_size_max"}, OTINT, {1514} },
+ {{"pkt_size_incr"}, OTINT, {1} },
+ {{"eth_type"}, OTINT, {0x0800} },
+ {{"src_mac_addr"}, OTLONG, .v.LONG = 0xdC3cF6425060L},
+ {{"dst_mac_addr"}, OTLONG, .v.LONG = 0x112233445566L},
+ {{"hdr_dW0"}, OTINT, {0x0016e319} },
+ {{"hdr_dW1"}, OTINT, {0x27150004} },
+ {{"hdr_dW2"}, OTINT, {0x76967bda} },
+ {{"hdr_dW3"}, OTINT, {0x08004500} },
+ {{"hdr_dW4"}, OTINT, {0x005276ed} },
+ {{"hdr_dW5"}, OTINT, {0x40004006} },
+ {{"hdr_dW6"}, OTINT, {0x56cfc0a8} },
+ {{"start_offset"}, OTINT, {0} },
+ {{"bytes_per_cycle"}, OTINT, {10} },
+ {{"shaping"}, OTBOOL, {0} },
+ {{"dst_ip"}, OTSTRING, .v.STR = "169.254.10.240"},
+ {{"dst_port"}, OTINT, {65536} },
+ {{"src_port"}, OTINT, {65536} },
+};
+
+ark_pkt_gen_t
+ark_pktgen_init(void *adr, int ord, int l2_mode)
+{
+ struct ark_pkt_gen_inst *inst =
+ rte_malloc("ark_pkt_gen_inst_pmd",
+ sizeof(struct ark_pkt_gen_inst), 0);
+ if (inst == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to malloc ark_pkt_gen_inst.\n");
+ return inst;
+ }
+ inst->regs = (struct ark_pkt_gen_regs *)adr;
+ inst->ordinal = ord;
+ inst->l2_mode = l2_mode;
+ return inst;
+}
+
+void
+ark_pktgen_uninit(ark_pkt_gen_t handle)
+{
+ rte_free(handle);
+}
+
+void
+ark_pktgen_run(ark_pkt_gen_t handle)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+
+ inst->regs->pkt_start_stop = 1;
+}
+
+uint32_t
+ark_pktgen_paused(ark_pkt_gen_t handle)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ uint32_t r = inst->regs->pkt_start_stop;
+
+ return (((r >> 16) & 1) == 1);
+}
+
+void
+ark_pktgen_pause(ark_pkt_gen_t handle)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ int cnt = 0;
+
+ inst->regs->pkt_start_stop = 0;
+
+ while (!ark_pktgen_paused(handle)) {
+ usleep(1000);
+ if (cnt++ > 100) {
+ PMD_DRV_LOG(ERR, "Pktgen %d failed to pause.\n",
+ inst->ordinal);
+ break;
+ }
+ }
+ PMD_DEBUG_LOG(DEBUG, "Pktgen %d paused.\n", inst->ordinal);
+}
+
+void
+ark_pktgen_reset(ark_pkt_gen_t handle)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+
+ if (!ark_pktgen_is_running(handle) &&
+ !ark_pktgen_paused(handle)) {
+ PMD_DEBUG_LOG(DEBUG, "Pktgen %d is not running"
+ " and is not paused. No need to reset.\n",
+ inst->ordinal);
+ return;
+ }
+
+ if (ark_pktgen_is_running(handle) &&
+ !ark_pktgen_paused(handle)) {
+ PMD_DEBUG_LOG(DEBUG,
+ "Pktgen %d is not paused. Pausing first.\n",
+ inst->ordinal);
+ ark_pktgen_pause(handle);
+ }
+
+ PMD_DEBUG_LOG(DEBUG, "Resetting pktgen %d.\n", inst->ordinal);
+ inst->regs->pkt_start_stop = (1 << 8);
+}
+
+uint32_t
+ark_pktgen_tx_done(ark_pkt_gen_t handle)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ uint32_t r = inst->regs->pkt_start_stop;
+
+ return (((r >> 24) & 1) == 1);
+}
+
+uint32_t
+ark_pktgen_is_running(ark_pkt_gen_t handle)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ uint32_t r = inst->regs->pkt_start_stop;
+
+ return ((r & 1) == 1);
+}
+
+uint32_t
+ark_pktgen_is_gen_forever(ark_pkt_gen_t handle)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ uint32_t r = inst->regs->pkt_ctrl;
+
+ return (((r >> 24) & 1) == 1);
+}
+
+void
+ark_pktgen_wait_done(ark_pkt_gen_t handle)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ int wait_cycle = 10;
+
+ if (ark_pktgen_is_gen_forever(handle))
+ PMD_DRV_LOG(ERR, "Pktgen wait_done will not terminate"
+ " because gen_forever=1\n");
+
+ while (!ark_pktgen_tx_done(handle) && (wait_cycle > 0)) {
+ usleep(1000);
+ wait_cycle--;
+ PMD_DEBUG_LOG(DEBUG,
+ "Waiting for pktgen %d to finish sending...\n",
+ inst->ordinal);
+ }
+ PMD_DEBUG_LOG(DEBUG, "Pktgen %d done.\n", inst->ordinal);
+}
+
+uint32_t
+ark_pktgen_get_pkts_sent(ark_pkt_gen_t handle)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ return inst->regs->pkts_sent;
+}
+
+void
+ark_pktgen_set_payload_byte(ark_pkt_gen_t handle, uint32_t b)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ inst->regs->pkt_payload = b;
+}
+
+void
+ark_pktgen_set_pkt_spacing(ark_pkt_gen_t handle, uint32_t x)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ inst->regs->pkt_spacing = x;
+}
+
+void
+ark_pktgen_set_pkt_size_min(ark_pkt_gen_t handle, uint32_t x)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ inst->regs->pkt_size_min = x;
+}
+
+void
+ark_pktgen_set_pkt_size_max(ark_pkt_gen_t handle, uint32_t x)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ inst->regs->pkt_size_max = x;
+}
+
+void
+ark_pktgen_set_pkt_size_incr(ark_pkt_gen_t handle, uint32_t x)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ inst->regs->pkt_size_incr = x;
+}
+
+void
+ark_pktgen_set_num_pkts(ark_pkt_gen_t handle, uint32_t x)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ inst->regs->num_pkts = x;
+}
+
+void
+ark_pktgen_set_src_mac_addr(ark_pkt_gen_t handle, uint64_t mac_addr)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ inst->regs->src_mac_addr_h = (mac_addr >> 32) & 0xffff;
+ inst->regs->src_mac_addr_l = mac_addr & 0xffffffff;
+}
+
+void
+ark_pktgen_set_dst_mac_addr(ark_pkt_gen_t handle, uint64_t mac_addr)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ inst->regs->dst_mac_addr_h = (mac_addr >> 32) & 0xffff;
+ inst->regs->dst_mac_addr_l = mac_addr & 0xffffffff;
+}
+
+void
+ark_pktgen_set_eth_type(ark_pkt_gen_t handle, uint32_t x)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ inst->regs->eth_type = x;
+}
+
+void
+ark_pktgen_set_hdr_dW(ark_pkt_gen_t handle, uint32_t *hdr)
+{
+ uint32_t i;
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+
+ for (i = 0; i < 7; i++)
+ inst->regs->hdr_dw[i] = hdr[i];
+}
+
+void
+ark_pktgen_set_start_offset(ark_pkt_gen_t handle, uint32_t x)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+
+ inst->regs->start_offset = x;
+}
+
+static struct OPTIONS *
+options(const char *id)
+{
+ unsigned int i;
+
+ for (i = 0; i < sizeof(toptions) / sizeof(struct OPTIONS); i++) {
+ if (strcmp(id, toptions[i].opt) == 0)
+ return &toptions[i];
+ }
+
+ PMD_DRV_LOG(ERR,
+ "Pktgen: Could not find requested option!, "
+ "option = %s\n",
+ id
+ );
+ return NULL;
+}
+
+static int pmd_set_arg(char *arg, char *val);
+static int
+pmd_set_arg(char *arg, char *val)
+{
+ struct OPTIONS *o = options(arg);
+
+ if (o) {
+ switch (o->t) {
+ case OTINT:
+ case OTBOOL:
+ o->v.INT = atoi(val);
+ break;
+ case OTLONG:
+ o->v.INT = atoll(val);
+ break;
+ case OTSTRING:
+ strlcpy(o->v.STR, val, ARK_MAX_STR_LEN);
+ break;
+ }
+ return 1;
+ }
+ return 0;
+}
+
+/******
+ * Arg format = "opt0=v,opt_n=v ..."
+ ******/
+void
+ark_pktgen_parse(char *args)
+{
+ char *argv, *v;
+ const char toks[] = " =\n\t\v\f \r";
+ argv = strtok(args, toks);
+ v = strtok(NULL, toks);
+ while (argv && v) {
+ pmd_set_arg(argv, v);
+ argv = strtok(NULL, toks);
+ v = strtok(NULL, toks);
+ }
+}
+
+static int32_t parse_ipv4_string(char const *ip_address);
+static int32_t
+parse_ipv4_string(char const *ip_address)
+{
+ unsigned int ip[4];
+
+ if (sscanf(ip_address, "%u.%u.%u.%u",
+ &ip[0], &ip[1], &ip[2], &ip[3]) != 4)
+ return 0;
+ return ip[3] + ip[2] * 0x100 + ip[1] * 0x10000ul + ip[0] * 0x1000000ul;
+}
+
+static void
+ark_pktgen_set_pkt_ctrl(ark_pkt_gen_t handle,
+ uint32_t gen_forever,
+ uint32_t en_slaved_start,
+ uint32_t vary_length,
+ uint32_t incr_payload,
+ uint32_t incr_first_byte,
+ uint32_t ins_seq_num,
+ uint32_t ins_udp_hdr,
+ uint32_t ins_time_stamp)
+{
+ uint32_t r;
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+
+ if (!inst->l2_mode)
+ ins_udp_hdr = 0;
+
+ r = ((gen_forever << 24) |
+ (en_slaved_start << 20) |
+ (vary_length << 16) |
+ (incr_payload << 12) |
+ (incr_first_byte << 8) |
+ (ins_time_stamp << 5) |
+ (ins_seq_num << 4) |
+ ins_udp_hdr);
+
+ inst->regs->bytes_per_cycle = options("bytes_per_cycle")->v.INT;
+ if (options("shaping")->v.BOOL)
+ r = r | (1 << 28); /* enable shaping */
+
+ inst->regs->pkt_ctrl = r;
+}
+
+void
+ark_pktgen_setup(ark_pkt_gen_t handle)
+{
+ uint32_t hdr[7];
+ int32_t dst_ip = parse_ipv4_string(options("dst_ip")->v.STR);
+
+ if (!options("pause")->v.BOOL &&
+ (!options("reset")->v.BOOL &&
+ (options("configure")->v.BOOL))) {
+ ark_pktgen_set_payload_byte(handle,
+ options("payload_byte")->v.INT);
+ ark_pktgen_set_src_mac_addr(handle,
+ options("src_mac_addr")->v.INT);
+ ark_pktgen_set_dst_mac_addr(handle,
+ options("dst_mac_addr")->v.LONG);
+ ark_pktgen_set_eth_type(handle,
+ options("eth_type")->v.INT);
+
+ if (options("dg-mode")->v.BOOL) {
+ hdr[0] = options("hdr_dW0")->v.INT;
+ hdr[1] = options("hdr_dW1")->v.INT;
+ hdr[2] = options("hdr_dW2")->v.INT;
+ hdr[3] = options("hdr_dW3")->v.INT;
+ hdr[4] = options("hdr_dW4")->v.INT;
+ hdr[5] = options("hdr_dW5")->v.INT;
+ hdr[6] = options("hdr_dW6")->v.INT;
+ } else {
+ hdr[0] = dst_ip;
+ hdr[1] = options("dst_port")->v.INT;
+ hdr[2] = options("src_port")->v.INT;
+ hdr[3] = 0;
+ hdr[4] = 0;
+ hdr[5] = 0;
+ hdr[6] = 0;
+ }
+ ark_pktgen_set_hdr_dW(handle, hdr);
+ ark_pktgen_set_num_pkts(handle,
+ options("num_pkts")->v.INT);
+ ark_pktgen_set_pkt_size_min(handle,
+ options("pkt_size_min")->v.INT);
+ ark_pktgen_set_pkt_size_max(handle,
+ options("pkt_size_max")->v.INT);
+ ark_pktgen_set_pkt_size_incr(handle,
+ options("pkt_size_incr")->v.INT);
+ ark_pktgen_set_pkt_spacing(handle,
+ options("pkt_spacing")->v.INT);
+ ark_pktgen_set_start_offset(handle,
+ options("start_offset")->v.INT);
+ ark_pktgen_set_pkt_ctrl(handle,
+ options("gen_forever")->v.BOOL,
+ options("en_slaved_start")->v.BOOL,
+ options("vary_length")->v.BOOL,
+ options("incr_payload")->v.BOOL,
+ options("incr_first_byte")->v.BOOL,
+ options("ins_seq_num")->v.INT,
+ options("ins_udp_hdr")->v.BOOL,
+ options("ins_time_stamp")->v.INT);
+ }
+
+ if (options("pause")->v.BOOL)
+ ark_pktgen_pause(handle);
+
+ if (options("reset")->v.BOOL)
+ ark_pktgen_reset(handle);
+ if (options("run")->v.BOOL) {
+ PMD_DEBUG_LOG(DEBUG, "Starting packet generator on port %d\n",
+ options("port")->v.INT);
+ ark_pktgen_run(handle);
+ }
+}
diff --git a/src/spdk/dpdk/drivers/net/ark/ark_pktgen.h b/src/spdk/dpdk/drivers/net/ark/ark_pktgen.h
new file mode 100644
index 000000000..c61dfee6d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/ark_pktgen.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
+ */
+
+#ifndef _ARK_PKTGEN_H_
+#define _ARK_PKTGEN_H_
+
+#include <stdint.h>
+#include <inttypes.h>
+
+#define ARK_PKTGEN_BASE_ADR 0x10000
+
+typedef void *ark_pkt_gen_t;
+
+/* The packet generator is an internal Arkville hardware module, which
+ * generates known packets for use in integrity and line-rate testing.
+ * This module is *not* intended for end-user manipulation, hence
+ * there is minimal documentation.
+ */
+
+/*
+ * This is an overlay structure to a memory mapped FPGA device. These
+ * structs will never be instantiated in ram memory
+ */
+struct ark_pkt_gen_regs {
+ uint32_t r0;
+ volatile uint32_t pkt_start_stop;
+ volatile uint32_t pkt_ctrl;
+ uint32_t pkt_payload;
+ uint32_t pkt_spacing;
+ uint32_t pkt_size_min;
+ uint32_t pkt_size_max;
+ uint32_t pkt_size_incr;
+ volatile uint32_t num_pkts;
+ volatile uint32_t pkts_sent;
+ uint32_t src_mac_addr_l;
+ uint32_t src_mac_addr_h;
+ uint32_t dst_mac_addr_l;
+ uint32_t dst_mac_addr_h;
+ uint32_t eth_type;
+ uint32_t hdr_dw[7];
+ uint32_t start_offset;
+ uint32_t bytes_per_cycle;
+} __rte_packed;
+
+struct ark_pkt_gen_inst {
+ struct rte_eth_dev_info *dev_info;
+ struct ark_pkt_gen_regs *regs;
+ int l2_mode;
+ int ordinal;
+};
+
+/* packet generator functions */
+ark_pkt_gen_t ark_pktgen_init(void *arg, int ord, int l2_mode);
+void ark_pktgen_uninit(ark_pkt_gen_t handle);
+void ark_pktgen_run(ark_pkt_gen_t handle);
+void ark_pktgen_pause(ark_pkt_gen_t handle);
+uint32_t ark_pktgen_paused(ark_pkt_gen_t handle);
+uint32_t ark_pktgen_is_gen_forever(ark_pkt_gen_t handle);
+uint32_t ark_pktgen_is_running(ark_pkt_gen_t handle);
+uint32_t ark_pktgen_tx_done(ark_pkt_gen_t handle);
+void ark_pktgen_reset(ark_pkt_gen_t handle);
+void ark_pktgen_wait_done(ark_pkt_gen_t handle);
+uint32_t ark_pktgen_get_pkts_sent(ark_pkt_gen_t handle);
+void ark_pktgen_set_payload_byte(ark_pkt_gen_t handle, uint32_t b);
+void ark_pktgen_set_pkt_spacing(ark_pkt_gen_t handle, uint32_t x);
+void ark_pktgen_set_pkt_size_min(ark_pkt_gen_t handle, uint32_t x);
+void ark_pktgen_set_pkt_size_max(ark_pkt_gen_t handle, uint32_t x);
+void ark_pktgen_set_pkt_size_incr(ark_pkt_gen_t handle, uint32_t x);
+void ark_pktgen_set_num_pkts(ark_pkt_gen_t handle, uint32_t x);
+void ark_pktgen_set_src_mac_addr(ark_pkt_gen_t handle, uint64_t mac_addr);
+void ark_pktgen_set_dst_mac_addr(ark_pkt_gen_t handle, uint64_t mac_addr);
+void ark_pktgen_set_eth_type(ark_pkt_gen_t handle, uint32_t x);
+void ark_pktgen_set_hdr_dW(ark_pkt_gen_t handle, uint32_t *hdr);
+void ark_pktgen_set_start_offset(ark_pkt_gen_t handle, uint32_t x);
+void ark_pktgen_parse(char *argv);
+void ark_pktgen_setup(ark_pkt_gen_t handle);
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/ark/ark_rqp.c b/src/spdk/dpdk/drivers/net/ark/ark_rqp.c
new file mode 100644
index 000000000..bf1af4d61
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/ark_rqp.c
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
+ */
+
+#include <unistd.h>
+
+#include "ark_rqp.h"
+#include "ark_logs.h"
+
+/* ************************************************************************* */
+void
+ark_rqp_stats_reset(struct ark_rqpace_t *rqp)
+{
+ rqp->stats_clear = 1;
+ /* POR 992 */
+ /* rqp->cpld_max = 992; */
+ /* POR 64 */
+ /* rqp->cplh_max = 64; */
+}
+
+/* ************************************************************************* */
+void
+ark_rqp_dump(struct ark_rqpace_t *rqp)
+{
+ if (rqp->err_count_other != 0)
+ PMD_DRV_LOG(ERR,
+ "RQP Errors noted: ctrl: %d cplh_hmax %d cpld_max %d"
+ ARK_SU32
+ ARK_SU32 "\n",
+ rqp->ctrl, rqp->cplh_max, rqp->cpld_max,
+ "Error Count", rqp->err_cnt,
+ "Error General", rqp->err_count_other);
+
+ PMD_STATS_LOG(INFO, "RQP Dump: ctrl: %d cplh_hmax %d cpld_max %d"
+ ARK_SU32
+ ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32
+ ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32
+ ARK_SU32 ARK_SU32 ARK_SU32
+ ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 "\n",
+ rqp->ctrl, rqp->cplh_max, rqp->cpld_max,
+ "Error Count", rqp->err_cnt,
+ "Error General", rqp->err_count_other,
+ "stall_pS", rqp->stall_ps,
+ "stall_pS Min", rqp->stall_ps_min,
+ "stall_pS Max", rqp->stall_ps_max,
+ "req_pS", rqp->req_ps,
+ "req_pS Min", rqp->req_ps_min,
+ "req_pS Max", rqp->req_ps_max,
+ "req_dWPS", rqp->req_dw_ps,
+ "req_dWPS Min", rqp->req_dw_ps_min,
+ "req_dWPS Max", rqp->req_dw_ps_max,
+ "cpl_pS", rqp->cpl_ps,
+ "cpl_pS Min", rqp->cpl_ps_min,
+ "cpl_pS Max", rqp->cpl_ps_max,
+ "cpl_dWPS", rqp->cpl_dw_ps,
+ "cpl_dWPS Min", rqp->cpl_dw_ps_min,
+ "cpl_dWPS Max", rqp->cpl_dw_ps_max,
+ "cplh pending", rqp->cplh_pending,
+ "cpld pending", rqp->cpld_pending,
+ "cplh pending max", rqp->cplh_pending_max,
+ "cpld pending max", rqp->cpld_pending_max);
+}
+
+int
+ark_rqp_lasped(struct ark_rqpace_t *rqp)
+{
+ return rqp->lasped;
+}
diff --git a/src/spdk/dpdk/drivers/net/ark/ark_rqp.h b/src/spdk/dpdk/drivers/net/ark/ark_rqp.h
new file mode 100644
index 000000000..6c8046062
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/ark_rqp.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
+ */
+
+#ifndef _ARK_RQP_H_
+#define _ARK_RQP_H_
+
+#include <stdint.h>
+
+#include <rte_memory.h>
+
+/* The RQP or ReQuest Pacer is an internal Arkville hardware module
+ * which limits the PCIE data flow to insure correct operation for the
+ * particular hardware PCIE endpoint.
+ * This module is *not* intended for end-user manipulation, hence
+ * there is minimal documentation.
+ */
+
+/*
+ * RQ Pacing core hardware structure
+ * This is an overlay structures to a memory mapped FPGA device. These
+ * structs will never be instantiated in ram memory
+ */
+struct ark_rqpace_t {
+ volatile uint32_t ctrl;
+ volatile uint32_t stats_clear;
+ volatile uint32_t cplh_max;
+ volatile uint32_t cpld_max;
+ volatile uint32_t err_cnt;
+ volatile uint32_t stall_ps;
+ volatile uint32_t stall_ps_min;
+ volatile uint32_t stall_ps_max;
+ volatile uint32_t req_ps;
+ volatile uint32_t req_ps_min;
+ volatile uint32_t req_ps_max;
+ volatile uint32_t req_dw_ps;
+ volatile uint32_t req_dw_ps_min;
+ volatile uint32_t req_dw_ps_max;
+ volatile uint32_t cpl_ps;
+ volatile uint32_t cpl_ps_min;
+ volatile uint32_t cpl_ps_max;
+ volatile uint32_t cpl_dw_ps;
+ volatile uint32_t cpl_dw_ps_min;
+ volatile uint32_t cpl_dw_ps_max;
+ volatile uint32_t cplh_pending;
+ volatile uint32_t cpld_pending;
+ volatile uint32_t cplh_pending_max;
+ volatile uint32_t cpld_pending_max;
+ volatile uint32_t err_count_other;
+ char eval[4];
+ volatile int lasped;
+};
+
+void ark_rqp_dump(struct ark_rqpace_t *rqp);
+void ark_rqp_stats_reset(struct ark_rqpace_t *rqp);
+int ark_rqp_lasped(struct ark_rqpace_t *rqp);
+#endif
diff --git a/src/spdk/dpdk/drivers/net/ark/ark_udm.c b/src/spdk/dpdk/drivers/net/ark/ark_udm.c
new file mode 100644
index 000000000..03f1922c6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/ark_udm.c
@@ -0,0 +1,197 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
+ */
+
+#include <unistd.h>
+
+#include "ark_logs.h"
+#include "ark_udm.h"
+
+int
+ark_udm_verify(struct ark_udm_t *udm)
+{
+ if (sizeof(struct ark_udm_t) != ARK_UDM_EXPECT_SIZE) {
+ PMD_DRV_LOG(ERR,
+ "ARK: UDM structure looks incorrect %d vs %zd\n",
+ ARK_UDM_EXPECT_SIZE, sizeof(struct ark_udm_t));
+ return -1;
+ }
+
+ if (udm->setup.const0 != ARK_UDM_CONST) {
+ PMD_DRV_LOG(ERR,
+ "ARK: UDM module not found as expected 0x%08x\n",
+ udm->setup.const0);
+ return -1;
+ }
+ return 0;
+}
+
+int
+ark_udm_stop(struct ark_udm_t *udm, const int wait)
+{
+ int cnt = 0;
+
+ udm->cfg.command = 2;
+
+ while (wait && (udm->cfg.stop_flushed & 0x01) == 0) {
+ if (cnt++ > 1000)
+ return 1;
+
+ usleep(10);
+ }
+ return 0;
+}
+
+int
+ark_udm_reset(struct ark_udm_t *udm)
+{
+ int status;
+
+ status = ark_udm_stop(udm, 1);
+ if (status != 0) {
+ PMD_DEBUG_LOG(INFO, "%s stop failed doing forced reset\n",
+ __func__);
+ udm->cfg.command = 4;
+ usleep(10);
+ udm->cfg.command = 3;
+ status = ark_udm_stop(udm, 0);
+ PMD_DEBUG_LOG(INFO, "%s stop status %d post failure"
+ " and forced reset\n",
+ __func__, status);
+ } else {
+ udm->cfg.command = 3;
+ }
+
+ return status;
+}
+
+void
+ark_udm_start(struct ark_udm_t *udm)
+{
+ udm->cfg.command = 1;
+}
+
+void
+ark_udm_stats_reset(struct ark_udm_t *udm)
+{
+ udm->pcibp.pci_clear = 1;
+ udm->tlp_ps.tlp_clear = 1;
+}
+
+void
+ark_udm_configure(struct ark_udm_t *udm,
+ uint32_t headroom,
+ uint32_t dataroom,
+ uint32_t write_interval_ns)
+{
+ /* headroom and data room are in DWords in the UDM */
+ udm->cfg.dataroom = dataroom / 4;
+ udm->cfg.headroom = headroom / 4;
+
+ /* 4 NS period ns */
+ udm->rt_cfg.write_interval = write_interval_ns / 4;
+}
+
+void
+ark_udm_write_addr(struct ark_udm_t *udm, rte_iova_t addr)
+{
+ udm->rt_cfg.hw_prod_addr = addr;
+}
+
+int
+ark_udm_is_flushed(struct ark_udm_t *udm)
+{
+ return (udm->cfg.stop_flushed & 0x01) != 0;
+}
+
+uint64_t
+ark_udm_dropped(struct ark_udm_t *udm)
+{
+ return udm->qstats.q_pkt_drop;
+}
+
+uint64_t
+ark_udm_bytes(struct ark_udm_t *udm)
+{
+ return udm->qstats.q_byte_count;
+}
+
+uint64_t
+ark_udm_packets(struct ark_udm_t *udm)
+{
+ return udm->qstats.q_ff_packet_count;
+}
+
+void
+ark_udm_dump_stats(struct ark_udm_t *udm, const char *msg)
+{
+ PMD_STATS_LOG(INFO, "UDM Stats: %s"
+ ARK_SU64 ARK_SU64 ARK_SU64 ARK_SU64 ARK_SU64 "\n",
+ msg,
+ "Pkts Received", udm->stats.rx_packet_count,
+ "Pkts Finalized", udm->stats.rx_sent_packets,
+ "Pkts Dropped", udm->tlp.pkt_drop,
+ "Bytes Count", udm->stats.rx_byte_count,
+ "MBuf Count", udm->stats.rx_mbuf_count);
+}
+
+void
+ark_udm_dump_queue_stats(struct ark_udm_t *udm, const char *msg, uint16_t qid)
+{
+ PMD_STATS_LOG(INFO, "UDM Queue %3u Stats: %s"
+ ARK_SU64 ARK_SU64
+ ARK_SU64 ARK_SU64
+ ARK_SU64 "\n",
+ qid, msg,
+ "Pkts Received", udm->qstats.q_packet_count,
+ "Pkts Finalized", udm->qstats.q_ff_packet_count,
+ "Pkts Dropped", udm->qstats.q_pkt_drop,
+ "Bytes Count", udm->qstats.q_byte_count,
+ "MBuf Count", udm->qstats.q_mbuf_count);
+}
+
+void
+ark_udm_dump(struct ark_udm_t *udm, const char *msg)
+{
+ PMD_DEBUG_LOG(DEBUG, "UDM Dump: %s Stopped: %d\n", msg,
+ udm->cfg.stop_flushed);
+}
+
+void
+ark_udm_dump_setup(struct ark_udm_t *udm, uint16_t q_id)
+{
+ PMD_DEBUG_LOG(DEBUG, "UDM Setup Q: %u"
+ ARK_SU64X ARK_SU32 "\n",
+ q_id,
+ "hw_prod_addr", udm->rt_cfg.hw_prod_addr,
+ "prod_idx", udm->rt_cfg.prod_idx);
+}
+
+void
+ark_udm_dump_perf(struct ark_udm_t *udm, const char *msg)
+{
+ struct ark_udm_pcibp_t *bp = &udm->pcibp;
+
+ PMD_STATS_LOG(INFO, "UDM Performance %s"
+ ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32
+ "\n",
+ msg,
+ "PCI Empty", bp->pci_empty,
+ "PCI Q1", bp->pci_q1,
+ "PCI Q2", bp->pci_q2,
+ "PCI Q3", bp->pci_q3,
+ "PCI Q4", bp->pci_q4,
+ "PCI Full", bp->pci_full);
+}
+
+void
+ark_udm_queue_stats_reset(struct ark_udm_t *udm)
+{
+ udm->qstats.q_byte_count = 1;
+}
+
+void
+ark_udm_queue_enable(struct ark_udm_t *udm, int enable)
+{
+ udm->qstats.q_enable = enable ? 1 : 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/ark/ark_udm.h b/src/spdk/dpdk/drivers/net/ark/ark_udm.h
new file mode 100644
index 000000000..5846c825b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/ark_udm.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
+ */
+
+#ifndef _ARK_UDM_H_
+#define _ARK_UDM_H_
+
+#include <stdint.h>
+
+#include <rte_memory.h>
+
+/* The UDM or Upstream Data Mover is an internal Arkville hardware
+ * module for moving packet from the RX packet streams to host memory.
+ * This module is *not* intended for end-user manipulation, hence
+ * there is minimal documentation.
+ */
+
+/* Meta data structure apssed from FPGA, must match layout in FPGA */
+struct ark_rx_meta {
+ uint64_t timestamp;
+ uint64_t user_data;
+ uint8_t port;
+ uint8_t dst_queue;
+ uint16_t pkt_len;
+};
+
+/*
+ * UDM hardware structures
+ * These are overlay structures to a memory mapped FPGA device. These
+ * structs will never be instantiated in ram memory
+ */
+
+#define ARK_RX_WRITE_TIME_NS 2500
+#define ARK_UDM_SETUP 0
+#define ARK_UDM_CONST 0xbACECACE
+struct ark_udm_setup_t {
+ uint32_t r0;
+ uint32_t r4;
+ volatile uint32_t cycle_count;
+ uint32_t const0;
+};
+
+#define ARK_UDM_CFG 0x010
+struct ark_udm_cfg_t {
+ volatile uint32_t stop_flushed; /* RO */
+ volatile uint32_t command;
+ uint32_t dataroom;
+ uint32_t headroom;
+};
+
+typedef enum {
+ ARK_UDM_START = 0x1,
+ ARK_UDM_STOP = 0x2,
+ ARK_UDM_RESET = 0x3
+} ark_udm_commands;
+
+#define ARK_UDM_STATS 0x020
+struct ark_udm_stats_t {
+ volatile uint64_t rx_byte_count;
+ volatile uint64_t rx_packet_count;
+ volatile uint64_t rx_mbuf_count;
+ volatile uint64_t rx_sent_packets;
+};
+
+#define ARK_UDM_PQ 0x040
+struct ark_udm_queue_stats_t {
+ volatile uint64_t q_byte_count;
+ volatile uint64_t q_packet_count; /* includes drops */
+ volatile uint64_t q_mbuf_count;
+ volatile uint64_t q_ff_packet_count;
+ volatile uint64_t q_pkt_drop;
+ uint32_t q_enable;
+};
+
+#define ARK_UDM_TLP 0x0070
+struct ark_udm_tlp_t {
+ volatile uint64_t pkt_drop; /* global */
+ volatile uint32_t tlp_q1;
+ volatile uint32_t tlp_q2;
+ volatile uint32_t tlp_q3;
+ volatile uint32_t tlp_q4;
+ volatile uint32_t tlp_full;
+};
+
+#define ARK_UDM_PCIBP 0x00a0
+struct ark_udm_pcibp_t {
+ volatile uint32_t pci_clear;
+ volatile uint32_t pci_empty;
+ volatile uint32_t pci_q1;
+ volatile uint32_t pci_q2;
+ volatile uint32_t pci_q3;
+ volatile uint32_t pci_q4;
+ volatile uint32_t pci_full;
+};
+
+#define ARK_UDM_TLP_PS 0x00bc
+struct ark_udm_tlp_ps_t {
+ volatile uint32_t tlp_clear;
+ volatile uint32_t tlp_ps_min;
+ volatile uint32_t tlp_ps_max;
+ volatile uint32_t tlp_full_ps_min;
+ volatile uint32_t tlp_full_ps_max;
+ volatile uint32_t tlp_dw_ps_min;
+ volatile uint32_t tlp_dw_ps_max;
+ volatile uint32_t tlp_pldw_ps_min;
+ volatile uint32_t tlp_pldw_ps_max;
+};
+
+#define ARK_UDM_RT_CFG 0x00e0
+struct ark_udm_rt_cfg_t {
+ rte_iova_t hw_prod_addr;
+ uint32_t write_interval; /* 4ns cycles */
+ volatile uint32_t prod_idx; /* RO */
+};
+
+/* Consolidated structure */
+#define ARK_UDM_EXPECT_SIZE (0x00fc + 4)
+#define ARK_UDM_QOFFSET ARK_UDM_EXPECT_SIZE
+struct ark_udm_t {
+ struct ark_udm_setup_t setup;
+ struct ark_udm_cfg_t cfg;
+ struct ark_udm_stats_t stats;
+ struct ark_udm_queue_stats_t qstats;
+ uint8_t reserved1[(ARK_UDM_TLP - ARK_UDM_PQ) -
+ sizeof(struct ark_udm_queue_stats_t)];
+ struct ark_udm_tlp_t tlp;
+ uint8_t reserved2[(ARK_UDM_PCIBP - ARK_UDM_TLP) -
+ sizeof(struct ark_udm_tlp_t)];
+ struct ark_udm_pcibp_t pcibp;
+ struct ark_udm_tlp_ps_t tlp_ps;
+ struct ark_udm_rt_cfg_t rt_cfg;
+ int8_t reserved3[(ARK_UDM_EXPECT_SIZE - ARK_UDM_RT_CFG) -
+ sizeof(struct ark_udm_rt_cfg_t)];
+};
+
+
+int ark_udm_verify(struct ark_udm_t *udm);
+int ark_udm_stop(struct ark_udm_t *udm, int wait);
+void ark_udm_start(struct ark_udm_t *udm);
+int ark_udm_reset(struct ark_udm_t *udm);
+void ark_udm_configure(struct ark_udm_t *udm,
+ uint32_t headroom,
+ uint32_t dataroom,
+ uint32_t write_interval_ns);
+void ark_udm_write_addr(struct ark_udm_t *udm, rte_iova_t addr);
+void ark_udm_stats_reset(struct ark_udm_t *udm);
+void ark_udm_dump_stats(struct ark_udm_t *udm, const char *msg);
+void ark_udm_dump_queue_stats(struct ark_udm_t *udm, const char *msg,
+ uint16_t qid);
+void ark_udm_dump(struct ark_udm_t *udm, const char *msg);
+void ark_udm_dump_perf(struct ark_udm_t *udm, const char *msg);
+void ark_udm_dump_setup(struct ark_udm_t *udm, uint16_t q_id);
+int ark_udm_is_flushed(struct ark_udm_t *udm);
+
+/* Per queue data */
+uint64_t ark_udm_dropped(struct ark_udm_t *udm);
+uint64_t ark_udm_bytes(struct ark_udm_t *udm);
+uint64_t ark_udm_packets(struct ark_udm_t *udm);
+
+void ark_udm_queue_stats_reset(struct ark_udm_t *udm);
+void ark_udm_queue_enable(struct ark_udm_t *udm, int enable);
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/ark/meson.build b/src/spdk/dpdk/drivers/net/ark/meson.build
new file mode 100644
index 000000000..99151bba1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/meson.build
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+sources = files('ark_ddm.c',
+ 'ark_ethdev.c',
+ 'ark_ethdev_rx.c',
+ 'ark_ethdev_tx.c',
+ 'ark_mpu.c',
+ 'ark_pktchkr.c',
+ 'ark_pktdir.c',
+ 'ark_pktgen.c',
+ 'ark_rqp.c',
+ 'ark_udm.c')
diff --git a/src/spdk/dpdk/drivers/net/ark/rte_pmd_ark_version.map b/src/spdk/dpdk/drivers/net/ark/rte_pmd_ark_version.map
new file mode 100644
index 000000000..f9f17e4f6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ark/rte_pmd_ark_version.map
@@ -0,0 +1,3 @@
+DPDK_20.0 {
+ local: *;
+};