summaryrefslogtreecommitdiffstats
path: root/src/spdk/dpdk/drivers/net/octeontx
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
commite6918187568dbd01842d8d1d2c808ce16a894239 (patch)
tree64f88b554b444a49f656b6c656111a145cbbaa28 /src/spdk/dpdk/drivers/net/octeontx
parentInitial commit. (diff)
downloadceph-e6918187568dbd01842d8d1d2c808ce16a894239.tar.xz
ceph-e6918187568dbd01842d8d1d2c808ce16a894239.zip
Adding upstream version 18.2.2.upstream/18.2.2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/spdk/dpdk/drivers/net/octeontx')
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx/Makefile53
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx/base/meson.build25
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx/base/octeontx_bgx.c378
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx/base/octeontx_bgx.h168
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx/base/octeontx_io.h128
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pki_var.h250
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pkivf.c239
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pkivf.h372
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pkovf.c640
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pkovf.h83
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx/meson.build14
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx/octeontx_ethdev.c1672
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx/octeontx_ethdev.h187
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx/octeontx_ethdev_ops.c343
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx/octeontx_logs.h36
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx/octeontx_rxtx.c76
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx/octeontx_rxtx.h504
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx/rte_pmd_octeontx_version.map7
18 files changed, 5175 insertions, 0 deletions
diff --git a/src/spdk/dpdk/drivers/net/octeontx/Makefile b/src/spdk/dpdk/drivers/net/octeontx/Makefile
new file mode 100644
index 000000000..c4db87800
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx/Makefile
@@ -0,0 +1,53 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Cavium, Inc
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_octeontx.a
+
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -I$(RTE_SDK)/drivers/common/octeontx/
+CFLAGS += -I$(RTE_SDK)/drivers/mempool/octeontx/
+
+EXPORT_MAP := rte_pmd_octeontx_version.map
+
+OBJS_BASE_DRIVER=$(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c)))
+$(foreach obj, $(OBJS_BASE_DRIVER), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER)))
+
+VPATH += $(SRCDIR)/base
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_PMD) += octeontx_rxtx.c
+SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_PMD) += octeontx_pkovf.c
+SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_PMD) += octeontx_pkivf.c
+SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_PMD) += octeontx_bgx.c
+SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_PMD) += octeontx_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_PMD) += octeontx_ethdev_ops.c
+
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+CFLAGS_octeontx_rxtx.o += -fno-prefetch-loop-arrays
+
+ifeq ($(shell test $(GCC_VERSION) -ge 46 && echo 1), 1)
+CFLAGS_octeontx_rxtx.o += -O3 -Ofast
+else
+CFLAGS_octeontx_rxtx.o += -O3 -ffast-math
+endif
+
+else
+CFLAGS_octeontx_rxtx.o += -O3 -Ofast
+endif
+
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_common_octeontx
+LDLIBS += -lrte_mempool_octeontx
+LDLIBS += -lrte_eventdev
+LDLIBS += -lrte_bus_pci
+LDLIBS += -lrte_bus_vdev
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/net/octeontx/base/meson.build b/src/spdk/dpdk/drivers/net/octeontx/base/meson.build
new file mode 100644
index 000000000..b8fe4b301
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx/base/meson.build
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Cavium, Inc
+
+sources = [
+ 'octeontx_pkovf.c',
+ 'octeontx_pkivf.c',
+ 'octeontx_bgx.c'
+]
+
+depends = ['ethdev', 'mempool_octeontx']
+static_objs = []
+foreach d: depends
+ if not is_variable('shared_rte_' + d)
+ subdir_done()
+ endif
+ static_objs += get_variable('static_rte_' + d)
+endforeach
+
+c_args = cflags
+base_lib = static_library('octeontx_base', sources,
+ c_args: c_args,
+ dependencies: static_objs,
+)
+
+base_objs = base_lib.extract_all_objects()
diff --git a/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_bgx.c b/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_bgx.c
new file mode 100644
index 000000000..ac856ff86
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_bgx.c
@@ -0,0 +1,378 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include <string.h>
+
+#include "octeontx_bgx.h"
+
+int
+octeontx_bgx_port_open(int port, octeontx_mbox_bgx_port_conf_t *conf)
+{
+ struct octeontx_mbox_hdr hdr;
+ octeontx_mbox_bgx_port_conf_t bgx_conf;
+ int len = sizeof(octeontx_mbox_bgx_port_conf_t);
+ int res;
+
+ memset(&bgx_conf, 0, sizeof(octeontx_mbox_bgx_port_conf_t));
+ hdr.coproc = OCTEONTX_BGX_COPROC;
+ hdr.msg = MBOX_BGX_PORT_OPEN;
+ hdr.vfid = port;
+
+ res = octeontx_mbox_send(&hdr, NULL, 0, &bgx_conf, len);
+ if (res < 0)
+ return -EACCES;
+
+ conf->enable = bgx_conf.enable;
+ conf->promisc = bgx_conf.promisc;
+ conf->bpen = bgx_conf.bpen;
+ conf->node = bgx_conf.node;
+ conf->base_chan = bgx_conf.base_chan;
+ conf->num_chans = bgx_conf.num_chans;
+ conf->mtu = bgx_conf.mtu;
+ conf->bgx = bgx_conf.bgx;
+ conf->lmac = bgx_conf.lmac;
+ conf->mode = bgx_conf.mode;
+ conf->pkind = bgx_conf.pkind;
+ memcpy(conf->macaddr, bgx_conf.macaddr, 6);
+
+ return res;
+}
+
+int
+octeontx_bgx_port_close(int port)
+{
+ struct octeontx_mbox_hdr hdr;
+ int res;
+
+ hdr.coproc = OCTEONTX_BGX_COPROC;
+ hdr.msg = MBOX_BGX_PORT_CLOSE;
+ hdr.vfid = port;
+
+ res = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+
+ return res;
+}
+
+int
+octeontx_bgx_port_start(int port)
+{
+ struct octeontx_mbox_hdr hdr;
+ int res;
+
+ hdr.coproc = OCTEONTX_BGX_COPROC;
+ hdr.msg = MBOX_BGX_PORT_START;
+ hdr.vfid = port;
+
+ res = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+
+ return res;
+}
+
+int
+octeontx_bgx_port_stop(int port)
+{
+ struct octeontx_mbox_hdr hdr;
+ int res;
+
+ hdr.coproc = OCTEONTX_BGX_COPROC;
+ hdr.msg = MBOX_BGX_PORT_STOP;
+ hdr.vfid = port;
+
+ res = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+
+ return res;
+}
+
+int
+octeontx_bgx_port_get_config(int port, octeontx_mbox_bgx_port_conf_t *conf)
+{
+ struct octeontx_mbox_hdr hdr;
+ octeontx_mbox_bgx_port_conf_t bgx_conf;
+ int len = sizeof(octeontx_mbox_bgx_port_conf_t);
+ int res;
+
+ hdr.coproc = OCTEONTX_BGX_COPROC;
+ hdr.msg = MBOX_BGX_PORT_GET_CONFIG;
+ hdr.vfid = port;
+
+ memset(&bgx_conf, 0, sizeof(octeontx_mbox_bgx_port_conf_t));
+ res = octeontx_mbox_send(&hdr, NULL, 0, &bgx_conf, len);
+ if (res < 0)
+ return -EACCES;
+
+ conf->enable = bgx_conf.enable;
+ conf->promisc = bgx_conf.promisc;
+ conf->bpen = bgx_conf.bpen;
+ conf->node = bgx_conf.node;
+ conf->base_chan = bgx_conf.base_chan;
+ conf->num_chans = bgx_conf.num_chans;
+ conf->mtu = bgx_conf.mtu;
+ conf->bgx = bgx_conf.bgx;
+ conf->lmac = bgx_conf.lmac;
+ conf->mode = bgx_conf.mode;
+ conf->pkind = bgx_conf.pkind;
+ memcpy(conf->macaddr, bgx_conf.macaddr, 6);
+
+ return res;
+}
+
+int
+octeontx_bgx_port_status(int port, octeontx_mbox_bgx_port_status_t *stat)
+{
+ struct octeontx_mbox_hdr hdr;
+ octeontx_mbox_bgx_port_status_t bgx_stat;
+ int len = sizeof(octeontx_mbox_bgx_port_status_t);
+ int res;
+
+ hdr.coproc = OCTEONTX_BGX_COPROC;
+ hdr.msg = MBOX_BGX_PORT_GET_STATUS;
+ hdr.vfid = port;
+
+ res = octeontx_mbox_send(&hdr, NULL, 0, &bgx_stat, len);
+ if (res < 0)
+ return -EACCES;
+
+ stat->link_up = bgx_stat.link_up;
+
+ return res;
+}
+
+int
+octeontx_bgx_port_stats(int port, octeontx_mbox_bgx_port_stats_t *stats)
+{
+ struct octeontx_mbox_hdr hdr;
+ octeontx_mbox_bgx_port_stats_t bgx_stats;
+ int len = sizeof(octeontx_mbox_bgx_port_stats_t);
+ int res;
+
+ hdr.coproc = OCTEONTX_BGX_COPROC;
+ hdr.msg = MBOX_BGX_PORT_GET_STATS;
+ hdr.vfid = port;
+
+ res = octeontx_mbox_send(&hdr, NULL, 0, &bgx_stats, len);
+ if (res < 0)
+ return -EACCES;
+
+ stats->rx_packets = bgx_stats.rx_packets;
+ stats->rx_bytes = bgx_stats.rx_bytes;
+ stats->rx_dropped = bgx_stats.rx_dropped;
+ stats->rx_errors = bgx_stats.rx_errors;
+ stats->tx_packets = bgx_stats.tx_packets;
+ stats->tx_bytes = bgx_stats.tx_bytes;
+ stats->tx_dropped = bgx_stats.tx_dropped;
+ stats->tx_errors = bgx_stats.tx_errors;
+ return res;
+}
+
+int
+octeontx_bgx_port_stats_clr(int port)
+{
+ struct octeontx_mbox_hdr hdr;
+ int res;
+
+ hdr.coproc = OCTEONTX_BGX_COPROC;
+ hdr.msg = MBOX_BGX_PORT_CLR_STATS;
+ hdr.vfid = port;
+
+ res = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+
+ return res;
+}
+
+int
+octeontx_bgx_port_link_status(int port)
+{
+ struct octeontx_mbox_hdr hdr;
+ uint8_t link;
+ int len = sizeof(uint8_t);
+ int res;
+
+ hdr.coproc = OCTEONTX_BGX_COPROC;
+ hdr.msg = MBOX_BGX_PORT_GET_LINK_STATUS;
+ hdr.vfid = port;
+
+ res = octeontx_mbox_send(&hdr, NULL, 0, &link, len);
+ if (res < 0)
+ return -EACCES;
+
+ return link;
+}
+
+int
+octeontx_bgx_port_set_link_state(int port, bool enable)
+{
+ struct octeontx_mbox_hdr hdr;
+
+ hdr.coproc = OCTEONTX_BGX_COPROC;
+ hdr.msg = MBOX_BGX_PORT_SET_LINK_STATE;
+ hdr.vfid = port;
+
+ return octeontx_mbox_send(&hdr, &enable, sizeof(bool), NULL, 0);
+}
+
+int
+octeontx_bgx_port_promisc_set(int port, int en)
+{
+ struct octeontx_mbox_hdr hdr;
+ uint8_t prom;
+ int res;
+
+ hdr.coproc = OCTEONTX_BGX_COPROC;
+ hdr.msg = MBOX_BGX_PORT_SET_PROMISC;
+ hdr.vfid = port;
+ prom = en ? 1 : 0;
+
+ res = octeontx_mbox_send(&hdr, &prom, sizeof(prom), NULL, 0);
+ if (res < 0)
+ return -EACCES;
+
+ return res;
+}
+
+int
+octeontx_bgx_port_mtu_set(int port, int mtu)
+{
+ struct octeontx_mbox_hdr hdr;
+ int res;
+
+ hdr.coproc = OCTEONTX_BGX_COPROC;
+ hdr.msg = MBOX_BGX_PORT_SET_MTU;
+ hdr.vfid = port;
+
+ res = octeontx_mbox_send(&hdr, &mtu, sizeof(mtu), NULL, 0);
+ if (res < 0)
+ return -EACCES;
+
+ return res;
+}
+
+int
+octeontx_bgx_port_mac_set(int port, uint8_t *mac_addr)
+{
+ struct octeontx_mbox_hdr hdr;
+ int len = 6;
+ int res = 0;
+
+ hdr.coproc = OCTEONTX_BGX_COPROC;
+ hdr.msg = MBOX_BGX_PORT_SET_MACADDR;
+ hdr.vfid = port;
+
+ res = octeontx_mbox_send(&hdr, mac_addr, len, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+
+ return res;
+}
+
+int
+octeontx_bgx_port_mac_add(int port, uint8_t *mac_addr, int index)
+{
+ struct octeontx_mbox_bgx_port_mac_filter filter;
+ struct octeontx_mbox_hdr hdr;
+ int len = 6;
+
+ hdr.coproc = OCTEONTX_BGX_COPROC;
+ hdr.msg = MBOX_BGX_PORT_ADD_MACADDR;
+ hdr.vfid = port;
+
+ memcpy(filter.mac_addr, mac_addr, len);
+ filter.index = index;
+ len = sizeof(struct octeontx_mbox_bgx_port_mac_filter);
+
+ return octeontx_mbox_send(&hdr, &filter, len, NULL, 0);
+}
+
+int
+octeontx_bgx_port_mac_del(int port, uint32_t index)
+{
+ struct octeontx_mbox_hdr hdr;
+ int len = sizeof(uint32_t);
+ int res = 0;
+
+ hdr.coproc = OCTEONTX_BGX_COPROC;
+ hdr.msg = MBOX_BGX_PORT_DEL_MACADDR;
+ hdr.vfid = port;
+
+ res = octeontx_mbox_send(&hdr, &index, len, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+
+ return res;
+}
+
+int
+octeontx_bgx_port_mac_entries_get(int port)
+{
+ struct octeontx_mbox_hdr hdr;
+ int resp = 6;
+ int res = 0;
+
+ hdr.coproc = OCTEONTX_BGX_COPROC;
+ hdr.msg = MBOX_BGX_PORT_GET_MACADDR_ENTRIES;
+ hdr.vfid = port;
+
+ res = octeontx_mbox_send(&hdr, NULL, 0, &resp, sizeof(int));
+ if (res < 0)
+ return -EACCES;
+
+ return resp;
+}
+
+int octeontx_bgx_port_get_fifo_cfg(int port,
+ octeontx_mbox_bgx_port_fifo_cfg_t *cfg)
+{
+ int len = sizeof(octeontx_mbox_bgx_port_fifo_cfg_t);
+ octeontx_mbox_bgx_port_fifo_cfg_t conf;
+ struct octeontx_mbox_hdr hdr;
+
+ hdr.coproc = OCTEONTX_BGX_COPROC;
+ hdr.msg = MBOX_BGX_PORT_GET_FIFO_CFG;
+ hdr.vfid = port;
+
+ if (octeontx_mbox_send(&hdr, NULL, 0, &conf, len) < 0)
+ return -EACCES;
+
+ cfg->rx_fifosz = conf.rx_fifosz;
+
+ return 0;
+}
+
+int octeontx_bgx_port_flow_ctrl_cfg(int port,
+ octeontx_mbox_bgx_port_fc_cfg_t *cfg)
+{
+ int len = sizeof(octeontx_mbox_bgx_port_fc_cfg_t);
+ octeontx_mbox_bgx_port_fc_cfg_t conf;
+ struct octeontx_mbox_hdr hdr;
+
+ hdr.coproc = OCTEONTX_BGX_COPROC;
+ hdr.msg = MBOX_BGX_PORT_FLOW_CTRL_CFG;
+ hdr.vfid = port;
+
+ if (cfg->fc_cfg == BGX_PORT_FC_CFG_SET)
+ memcpy(&conf, cfg, len);
+ else
+ memset(&conf, 0, len);
+
+ if (octeontx_mbox_send(&hdr, &conf, len, &conf, len) < 0)
+ return -EACCES;
+
+ if (cfg->fc_cfg == BGX_PORT_FC_CFG_SET)
+ goto done;
+
+ cfg->rx_pause = conf.rx_pause;
+ cfg->tx_pause = conf.tx_pause;
+ cfg->low_water = conf.low_water;
+ cfg->high_water = conf.high_water;
+
+done:
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_bgx.h b/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_bgx.h
new file mode 100644
index 000000000..d126a0b7f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_bgx.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef __OCTEONTX_BGX_H__
+#define __OCTEONTX_BGX_H__
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <octeontx_mbox.h>
+
+#define OCTEONTX_BGX_RSVD_RX_FIFOBYTES 0x40
+
+#define OCTEONTX_BGX_COPROC 6
+
+/* BGX messages */
+#define MBOX_BGX_PORT_OPEN 0
+#define MBOX_BGX_PORT_CLOSE 1
+#define MBOX_BGX_PORT_START 2
+#define MBOX_BGX_PORT_STOP 3
+#define MBOX_BGX_PORT_GET_CONFIG 4
+#define MBOX_BGX_PORT_GET_STATUS 5
+#define MBOX_BGX_PORT_GET_STATS 6
+#define MBOX_BGX_PORT_CLR_STATS 7
+#define MBOX_BGX_PORT_GET_LINK_STATUS 8
+#define MBOX_BGX_PORT_SET_PROMISC 9
+#define MBOX_BGX_PORT_SET_MACADDR 10
+#define MBOX_BGX_PORT_SET_BP 11
+#define MBOX_BGX_PORT_SET_BCAST 12
+#define MBOX_BGX_PORT_SET_MCAST 13
+#define MBOX_BGX_PORT_SET_MTU 14
+#define MBOX_BGX_PORT_ADD_MACADDR 15
+#define MBOX_BGX_PORT_DEL_MACADDR 16
+#define MBOX_BGX_PORT_GET_MACADDR_ENTRIES 17
+#define MBOX_BGX_PORT_GET_FIFO_CFG 18
+#define MBOX_BGX_PORT_FLOW_CTRL_CFG 19
+#define MBOX_BGX_PORT_SET_LINK_STATE 20
+
+/* BGX port configuration parameters: */
+typedef struct octeontx_mbox_bgx_port_conf {
+ uint8_t enable;
+ uint8_t promisc;
+ uint8_t bpen;
+ uint8_t macaddr[6]; /* MAC address.*/
+ uint8_t fcs_strip;
+ uint8_t bcast_mode;
+ uint8_t mcast_mode;
+ uint8_t node; /* CPU node */
+ uint16_t base_chan;
+ uint16_t num_chans;
+ uint16_t mtu;
+ uint8_t bgx;
+ uint8_t lmac;
+ uint8_t mode;
+ uint8_t pkind;
+} octeontx_mbox_bgx_port_conf_t;
+
+/* BGX port status: */
+typedef struct octeontx_mbox_bgx_port_status {
+ uint8_t link_up;
+ uint8_t bp;
+ uint8_t duplex;
+ uint32_t speed;
+} octeontx_mbox_bgx_port_status_t;
+
+/* BGX port statistics: */
+typedef struct octeontx_mbox_bgx_port_stats {
+ uint64_t rx_packets;
+ uint64_t tx_packets;
+ uint64_t rx_bytes;
+ uint64_t tx_bytes;
+ uint64_t rx_errors;
+ uint64_t tx_errors;
+ uint64_t rx_dropped;
+ uint64_t tx_dropped;
+ uint64_t multicast;
+ uint64_t collisions;
+
+ uint64_t rx_length_errors;
+ uint64_t rx_over_errors;
+ uint64_t rx_crc_errors;
+ uint64_t rx_frame_errors;
+ uint64_t rx_fifo_errors;
+ uint64_t rx_missed_errors;
+
+ /* Detailed transmit errors. */
+ uint64_t tx_aborted_errors;
+ uint64_t tx_carrier_errors;
+ uint64_t tx_fifo_errors;
+ uint64_t tx_heartbeat_errors;
+ uint64_t tx_window_errors;
+
+ /* Extended statistics based on RFC2819. */
+ uint64_t rx_1_to_64_packets;
+ uint64_t rx_65_to_127_packets;
+ uint64_t rx_128_to_255_packets;
+ uint64_t rx_256_to_511_packets;
+ uint64_t rx_512_to_1023_packets;
+ uint64_t rx_1024_to_1522_packets;
+ uint64_t rx_1523_to_max_packets;
+
+ uint64_t tx_1_to_64_packets;
+ uint64_t tx_65_to_127_packets;
+ uint64_t tx_128_to_255_packets;
+ uint64_t tx_256_to_511_packets;
+ uint64_t tx_512_to_1023_packets;
+ uint64_t tx_1024_to_1522_packets;
+ uint64_t tx_1523_to_max_packets;
+
+ uint64_t tx_multicast_packets;
+ uint64_t rx_broadcast_packets;
+ uint64_t tx_broadcast_packets;
+ uint64_t rx_undersized_errors;
+ uint64_t rx_oversize_errors;
+ uint64_t rx_fragmented_errors;
+ uint64_t rx_jabber_errors;
+} octeontx_mbox_bgx_port_stats_t;
+
+struct octeontx_mbox_bgx_port_mac_filter {
+ uint8_t mac_addr[6];
+ int index;
+};
+
+/* BGX port fifo config: */
+typedef struct octeontx_mbox_bgx_port_fifo_cfg {
+ uint32_t rx_fifosz; /* in Bytes */
+} octeontx_mbox_bgx_port_fifo_cfg_t;
+
+typedef enum {
+ BGX_PORT_FC_CFG_GET = 0,
+ BGX_PORT_FC_CFG_SET = 1
+} bgx_port_fc_t;
+
+/* BGX port flow control config: */
+typedef struct octeontx_mbox_bgx_port_fc_cfg {
+ /* BP on/off threshold levels in Bytes, must be a multiple of 16 */
+ uint16_t high_water;
+ uint16_t low_water;
+ uint8_t rx_pause; /* rx_pause = 1/0 to enable/disable fc on Tx */
+ uint8_t tx_pause; /* tx_pause = 1/0 to enable/disable fc on Rx */
+ bgx_port_fc_t fc_cfg;
+} octeontx_mbox_bgx_port_fc_cfg_t;
+
+int octeontx_bgx_port_open(int port, octeontx_mbox_bgx_port_conf_t *conf);
+int octeontx_bgx_port_close(int port);
+int octeontx_bgx_port_start(int port);
+int octeontx_bgx_port_stop(int port);
+int octeontx_bgx_port_get_config(int port, octeontx_mbox_bgx_port_conf_t *conf);
+int octeontx_bgx_port_status(int port, octeontx_mbox_bgx_port_status_t *stat);
+int octeontx_bgx_port_stats(int port, octeontx_mbox_bgx_port_stats_t *stats);
+int octeontx_bgx_port_stats_clr(int port);
+int octeontx_bgx_port_link_status(int port);
+int octeontx_bgx_port_promisc_set(int port, int en);
+int octeontx_bgx_port_mac_set(int port, uint8_t *mac_addr);
+int octeontx_bgx_port_mac_add(int port, uint8_t *mac_addr, int index);
+int octeontx_bgx_port_mac_del(int port, uint32_t index);
+int octeontx_bgx_port_mac_entries_get(int port);
+int octeontx_bgx_port_mtu_set(int port, int mtu);
+int octeontx_bgx_port_set_link_state(int port, bool en);
+int octeontx_bgx_port_get_fifo_cfg(int port,
+ octeontx_mbox_bgx_port_fifo_cfg_t *cfg);
+int octeontx_bgx_port_flow_ctrl_cfg(int port,
+ octeontx_mbox_bgx_port_fc_cfg_t *cfg);
+
+#endif /* __OCTEONTX_BGX_H__ */
+
diff --git a/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_io.h b/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_io.h
new file mode 100644
index 000000000..04b9ce191
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_io.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef __OCTEONTX_IO_H__
+#define __OCTEONTX_IO_H__
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <rte_io.h>
+
+/* In Cavium OCTEON TX SoC, all accesses to the device registers are
+ * implicitly strongly ordered. So, The relaxed version of IO operation is
+ * safe to use with out any IO memory barriers.
+ */
+#define octeontx_read64 rte_read64_relaxed
+#define octeontx_write64 rte_write64_relaxed
+
+/* ARM64 specific functions */
+#if defined(RTE_ARCH_ARM64)
+#define octeontx_prefetch_store_keep(_ptr) ({\
+ asm volatile("prfm pstl1keep, %a0\n" : : "p" (_ptr)); })
+
+#define octeontx_load_pair(val0, val1, addr) ({ \
+ asm volatile( \
+ "ldp %x[x0], %x[x1], [%x[p1]]" \
+ :[x0]"=r"(val0), [x1]"=r"(val1) \
+ :[p1]"r"(addr) \
+ ); })
+
+#define octeontx_store_pair(val0, val1, addr) ({ \
+ asm volatile( \
+ "stp %x[x0], %x[x1], [%x[p1]]" \
+ ::[x0]"r"(val0), [x1]"r"(val1), [p1]"r"(addr) \
+ ); })
+#else /* Un optimized functions for building on non arm64 arch */
+
+#define octeontx_prefetch_store_keep(_ptr) do {} while (0)
+
+#define octeontx_load_pair(val0, val1, addr) \
+do { \
+ val0 = rte_read64(addr); \
+ val1 = rte_read64(((uint8_t *)addr) + 8); \
+} while (0)
+
+#define octeontx_store_pair(val0, val1, addr) \
+do { \
+ rte_write64(val0, addr); \
+ rte_write64(val1, (((uint8_t *)addr) + 8)); \
+} while (0)
+#endif
+
+#if defined(RTE_ARCH_ARM64)
+/**
+ * Perform an atomic fetch-and-add operation.
+ */
+static inline uint64_t
+octeontx_reg_ldadd_u64(void *addr, int64_t off)
+{
+ uint64_t old_val;
+
+ __asm__ volatile(
+ " .cpu generic+lse\n"
+ " ldadd %1, %0, [%2]\n"
+ : "=r" (old_val) : "r" (off), "r" (addr) : "memory");
+
+ return old_val;
+}
+
+/**
+ * Perform a LMTST operation - an atomic write of up to 128 byte to
+ * an I/O block that supports this operation type.
+ *
+ * @param lmtline_va is the address where LMTLINE is mapped
+ * @param ioreg_va is the virtual address of the device register
+ * @param cmdbuf is the array of peripheral commands to execute
+ * @param cmdsize is the number of 64-bit words in 'cmdbuf'
+ *
+ * @return N/A
+ */
+static inline void
+octeontx_reg_lmtst(void *lmtline_va, void *ioreg_va, const uint64_t cmdbuf[],
+ uint64_t cmdsize)
+{
+ uint64_t result;
+ uint64_t word_count;
+ uint64_t *lmtline = lmtline_va;
+
+ word_count = cmdsize;
+
+ do {
+ /* Copy commands to LMTLINE */
+ for (result = 0; result < word_count; result += 2) {
+ lmtline[result + 0] = cmdbuf[result + 0];
+ lmtline[result + 1] = cmdbuf[result + 1];
+ }
+
+ /* LDEOR initiates atomic transfer to I/O device */
+ __asm__ volatile(
+ " .cpu generic+lse\n"
+ " ldeor xzr, %0, [%1]\n"
+ : "=r" (result) : "r" (ioreg_va) : "memory");
+ } while (!result);
+}
+
+#else
+
+static inline uint64_t
+octeontx_reg_ldadd_u64(void *addr, int64_t off)
+{
+ RTE_SET_USED(addr);
+ RTE_SET_USED(off);
+ return 0;
+}
+
+static inline void
+octeontx_reg_lmtst(void *lmtline_va, void *ioreg_va, const uint64_t cmdbuf[],
+ uint64_t cmdsize)
+{
+ RTE_SET_USED(lmtline_va);
+ RTE_SET_USED(ioreg_va);
+ RTE_SET_USED(cmdbuf);
+ RTE_SET_USED(cmdsize);
+}
+
+#endif
+#endif /* __OCTEONTX_IO_H__ */
diff --git a/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pki_var.h b/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pki_var.h
new file mode 100644
index 000000000..4445369ce
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pki_var.h
@@ -0,0 +1,250 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef __OCTEONTX_PKI_VAR_H__
+#define __OCTEONTX_PKI_VAR_H__
+
+#include <rte_byteorder.h>
+
+#define OCTTX_PACKET_WQE_SKIP 128
+#define OCTTX_PACKET_FIRST_SKIP_MAXREGVAL 496
+#define OCTTX_PACKET_FIRST_SKIP_MAXLEN 512
+#define OCTTX_PACKET_FIRST_SKIP_ADJUST(x) \
+ (RTE_MIN(x, OCTTX_PACKET_FIRST_SKIP_MAXREGVAL))
+#define OCTTX_PACKET_FIRST_SKIP_SUM(p) \
+ (OCTTX_PACKET_WQE_SKIP \
+ + rte_pktmbuf_priv_size(p) \
+ + RTE_PKTMBUF_HEADROOM)
+#define OCTTX_PACKET_FIRST_SKIP(p) \
+ OCTTX_PACKET_FIRST_SKIP_ADJUST(OCTTX_PACKET_FIRST_SKIP_SUM(p))
+#define OCTTX_PACKET_LATER_SKIP 128
+
+/* WQE descriptor */
+typedef union octtx_wqe_s {
+ uint64_t w[6];
+
+ struct {
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ struct {
+ uint64_t pknd : 6;
+ uint64_t rsvd0 : 10;
+ uint64_t style : 8;
+ uint64_t bufs : 8;
+ uint64_t chan : 12;
+ uint64_t apad : 3;
+ uint64_t rsvd1 : 1;
+ uint64_t aura : 12;
+ uint64_t rsvd2 : 4;
+ } w0;
+
+ struct {
+ uint64_t tag : 32;
+ uint64_t tt : 2;
+ uint64_t grp : 10;
+ uint64_t rsvd0 : 2;
+ uint64_t rsvd1 : 2;
+ uint64_t len : 16;
+ } w1;
+
+ struct {
+ uint64_t op_code : 8;
+ uint64_t err_lev : 3;
+ uint64_t raw : 1;
+ uint64_t l2m : 1;
+ uint64_t l2b : 1;
+ uint64_t l3m : 1;
+ uint64_t l3b : 1;
+ uint64_t l3fr : 1;
+ uint64_t pf1 : 1;
+ uint64_t pf2 : 1;
+ uint64_t pf3 : 1;
+ uint64_t pf4 : 1;
+ uint64_t sh : 1;
+ uint64_t vs : 1;
+ uint64_t vv : 1;
+ uint64_t rsvd0 : 8;
+ uint64_t lae : 1;
+ uint64_t lbty : 5;
+ uint64_t lcty : 5;
+ uint64_t ldty : 5;
+ uint64_t lety : 5;
+ uint64_t lfty : 5;
+ uint64_t lgty : 5;
+ uint64_t sw : 1;
+ } w2;
+
+ struct {
+ uint64_t addr; /* Byte addr of start-of-pkt */
+ } w3;
+
+ struct {
+ uint64_t laptr : 8;
+ uint64_t lbptr : 8;
+ uint64_t lcptr : 8;
+ uint64_t ldprt : 8;
+ uint64_t leptr : 8;
+ uint64_t lfptr : 8;
+ uint64_t lgptr : 8;
+ uint64_t vlptr : 8;
+ } w4;
+
+ struct {
+ uint64_t rsvd0 : 47;
+ uint64_t dwd : 1;
+ uint64_t size : 16;
+ } w5;
+#else
+ struct {
+ uint64_t rsvd2 : 4;
+ uint64_t aura : 12;
+ uint64_t rsvd1 : 1;
+ uint64_t apad : 3;
+ uint64_t chan : 12;
+ uint64_t bufs : 8;
+ uint64_t style : 8;
+ uint64_t rsvd0 : 10;
+ uint64_t pknd : 6;
+ } w0;
+
+ struct {
+ uint64_t len : 16;
+ uint64_t rsvd1 : 2;
+ uint64_t rsvd0 : 2;
+ uint64_t grp : 10;
+ uint64_t tt : 2;
+ uint64_t tag : 32;
+ } w1;
+
+ struct {
+ uint64_t sw : 1;
+ uint64_t lgty : 5;
+ uint64_t lfty : 5;
+ uint64_t lety : 5;
+ uint64_t ldty : 5;
+ uint64_t lcty : 5;
+ uint64_t lbty : 5;
+ uint64_t lae : 1;
+ uint64_t rsvd0 : 8;
+ uint64_t vv : 1;
+ uint64_t vs : 1;
+ uint64_t sh : 1;
+ uint64_t pf4 : 1;
+ uint64_t pf3 : 1;
+ uint64_t pf2 : 1;
+ uint64_t pf1 : 1;
+ uint64_t l3fr : 1;
+ uint64_t l3b : 1;
+ uint64_t l3m : 1;
+ uint64_t l2b : 1;
+ uint64_t l2m : 1;
+ uint64_t raw : 1;
+ uint64_t err_lev : 3;
+ uint64_t op_code : 8;
+ } w2;
+
+ struct {
+ uint64_t addr; /* Byte addr of start-of-pkt */
+ } w3;
+
+ struct {
+ uint64_t vlptr : 8;
+ uint64_t lgptr : 8;
+ uint64_t lfptr : 8;
+ uint64_t leptr : 8;
+ uint64_t ldprt : 8;
+ uint64_t lcptr : 8;
+ uint64_t lbptr : 8;
+ uint64_t laptr : 8;
+ } w4;
+#endif
+ } s;
+
+} __rte_packed octtx_wqe_t;
+
+enum occtx_pki_ltype_e {
+ OCCTX_PKI_LTYPE_NONE = 0,
+ OCCTX_PKI_LTYPE_ENET = 1,
+ OCCTX_PKI_LTYPE_VLAN = 2,
+ OCCTX_PKI_LTYPE_SNAP_PAYLD = 5,
+ OCCTX_PKI_LTYPE_ARP = 6,
+ OCCTX_PKI_LTYPE_RARP = 7,
+ OCCTX_PKI_LTYPE_IP4 = 8,
+ OCCTX_PKI_LTYPE_IP4_OPT = 9,
+ OCCTX_PKI_LTYPE_IP6 = 0xa,
+ OCCTX_PKI_LTYPE_IP6_OPT = 0xb,
+ OCCTX_PKI_LTYPE_IPSEC_ESP = 0xc,
+ OCCTX_PKI_LTYPE_IPFRAG = 0xd,
+ OCCTX_PKI_LTYPE_IPCOMP = 0xe,
+ OCCTX_PKI_LTYPE_TCP = 0x10,
+ OCCTX_PKI_LTYPE_UDP = 0x11,
+ OCCTX_PKI_LTYPE_SCTP = 0x12,
+ OCCTX_PKI_LTYPE_UDP_VXLAN = 0x13,
+ OCCTX_PKI_LTYPE_GRE = 0x14,
+ OCCTX_PKI_LTYPE_NVGRE = 0x15,
+ OCCTX_PKI_LTYPE_GTP = 0x16,
+ OCCTX_PKI_LTYPE_UDP_GENEVE = 0x17,
+ OCCTX_PKI_LTYPE_SW28 = 0x1c,
+ OCCTX_PKI_LTYPE_SW29 = 0x1d,
+ OCCTX_PKI_LTYPE_SW30 = 0x1e,
+ OCCTX_PKI_LTYPE_SW31 = 0x1f,
+ OCCTX_PKI_LTYPE_LAST
+};
+
+enum lc_type_e {
+ LC_NONE = OCCTX_PKI_LTYPE_NONE,
+ LC_IPV4 = OCCTX_PKI_LTYPE_IP4,
+ LC_IPV4_OPT = OCCTX_PKI_LTYPE_IP4_OPT,
+ LC_IPV6 = OCCTX_PKI_LTYPE_IP6,
+ LC_IPV6_OPT = OCCTX_PKI_LTYPE_IP6_OPT,
+};
+
+enum le_type_e {
+ LE_NONE = OCCTX_PKI_LTYPE_NONE,
+};
+
+enum lf_type_e {
+ LF_NONE = OCCTX_PKI_LTYPE_NONE,
+ LF_IPSEC_ESP = OCCTX_PKI_LTYPE_IPSEC_ESP,
+ LF_IPFRAG = OCCTX_PKI_LTYPE_IPFRAG,
+ LF_IPCOMP = OCCTX_PKI_LTYPE_IPCOMP,
+ LF_TCP = OCCTX_PKI_LTYPE_TCP,
+ LF_UDP = OCCTX_PKI_LTYPE_UDP,
+ LF_GRE = OCCTX_PKI_LTYPE_GRE,
+ LF_UDP_GENEVE = OCCTX_PKI_LTYPE_UDP_GENEVE,
+ LF_UDP_VXLAN = OCCTX_PKI_LTYPE_UDP_VXLAN,
+ LF_NVGRE = OCCTX_PKI_LTYPE_NVGRE,
+};
+
+/* Word 0 of HW segment buflink structure */
+typedef union octtx_pki_buflink_w0_u {
+ uint64_t v;
+ struct {
+ uint64_t size:16;
+ uint64_t rsvd1:15;
+ uint64_t invfree:1;
+ /** Aura number of the next segment */
+ uint64_t aura:16;
+ uint64_t sw:9;
+ uint64_t later_invfree:1;
+ uint64_t rsvd2:5;
+ /** 1 if aura number is set */
+ uint64_t has_aura:1;
+ } s;
+} octtx_pki_buflink_w0_t;
+
+/* Word 1 of HW segment buflink structure */
+typedef union octtx_pki_buflink_w1_u {
+ uint64_t v;
+ struct {
+ uint64_t addr;
+ } s;
+} octtx_pki_buflink_w1_t;
+
+/* HW structure linking packet segments into singly linked list */
+typedef struct octtx_pki_buflink_s {
+ octtx_pki_buflink_w0_t w0; /* Word 0 of the buflink */
+ octtx_pki_buflink_w1_t w1; /* Word 1 of the buflink */
+} octtx_pki_buflink_t;
+
+#endif /* __OCTEONTX_PKI_VAR_H__ */
diff --git a/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pkivf.c b/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pkivf.c
new file mode 100644
index 000000000..0ddff5488
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pkivf.c
@@ -0,0 +1,239 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include <string.h>
+
+#include <rte_eal.h>
+#include <rte_bus_pci.h>
+
+#include "../octeontx_logs.h"
+#include "octeontx_io.h"
+#include "octeontx_pkivf.h"
+
+
+struct octeontx_pkivf {
+ uint8_t *bar0;
+ uint8_t status;
+ uint16_t domain;
+ uint16_t vfid;
+};
+
+struct octeontx_pki_vf_ctl_s {
+ struct octeontx_pkivf pki[PKI_VF_MAX];
+};
+
+static struct octeontx_pki_vf_ctl_s pki_vf_ctl;
+
+int
+octeontx_pki_port_open(int port)
+{
+ uint16_t global_domain = octeontx_get_global_domain();
+ struct octeontx_mbox_hdr hdr;
+ pki_port_type_t port_type;
+ int i, res;
+
+ /* Check if atleast one PKI vf is in application domain. */
+ for (i = 0; i < PKI_VF_MAX; i++) {
+ if (pki_vf_ctl.pki[i].domain != global_domain)
+ continue;
+ break;
+ }
+
+ if (i == PKI_VF_MAX)
+ return -ENODEV;
+
+ port_type.port_type = OCTTX_PORT_TYPE_NET;
+ hdr.coproc = OCTEONTX_PKI_COPROC;
+ hdr.msg = MBOX_PKI_PORT_OPEN;
+ hdr.vfid = port;
+
+ res = octeontx_mbox_send(&hdr, &port_type, sizeof(pki_port_type_t),
+ NULL, 0);
+ if (res < 0)
+ return -EACCES;
+ return res;
+}
+
+int
+octeontx_pki_port_hash_config(int port, pki_hash_cfg_t *hash_cfg)
+{
+ struct octeontx_mbox_hdr hdr;
+ int res;
+
+ pki_hash_cfg_t h_cfg = *(pki_hash_cfg_t *)hash_cfg;
+ int len = sizeof(pki_hash_cfg_t);
+
+ hdr.coproc = OCTEONTX_PKI_COPROC;
+ hdr.msg = MBOX_PKI_PORT_HASH_CONFIG;
+ hdr.vfid = port;
+
+ res = octeontx_mbox_send(&hdr, &h_cfg, len, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+
+ return res;
+}
+
+int
+octeontx_pki_port_pktbuf_config(int port, pki_pktbuf_cfg_t *buf_cfg)
+{
+ struct octeontx_mbox_hdr hdr;
+ int res;
+
+ pki_pktbuf_cfg_t b_cfg = *(pki_pktbuf_cfg_t *)buf_cfg;
+ int len = sizeof(pki_pktbuf_cfg_t);
+
+ hdr.coproc = OCTEONTX_PKI_COPROC;
+ hdr.msg = MBOX_PKI_PORT_PKTBUF_CONFIG;
+ hdr.vfid = port;
+
+ res = octeontx_mbox_send(&hdr, &b_cfg, len, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+ return res;
+}
+
+int
+octeontx_pki_port_create_qos(int port, pki_qos_cfg_t *qos_cfg)
+{
+ struct octeontx_mbox_hdr hdr;
+ int res;
+
+ pki_qos_cfg_t q_cfg = *(pki_qos_cfg_t *)qos_cfg;
+ int len = sizeof(pki_qos_cfg_t);
+
+ hdr.coproc = OCTEONTX_PKI_COPROC;
+ hdr.msg = MBOX_PKI_PORT_CREATE_QOS;
+ hdr.vfid = port;
+
+ res = octeontx_mbox_send(&hdr, &q_cfg, len, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+
+ return res;
+}
+
+
+int
+octeontx_pki_port_errchk_config(int port, pki_errchk_cfg_t *cfg)
+{
+ struct octeontx_mbox_hdr hdr;
+ int res;
+
+ pki_errchk_cfg_t e_cfg;
+ e_cfg = *((pki_errchk_cfg_t *)(cfg));
+ int len = sizeof(pki_errchk_cfg_t);
+
+ hdr.coproc = OCTEONTX_PKI_COPROC;
+ hdr.msg = MBOX_PKI_PORT_ERRCHK_CONFIG;
+ hdr.vfid = port;
+
+ res = octeontx_mbox_send(&hdr, &e_cfg, len, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+
+ return res;
+}
+
+int
+octeontx_pki_port_vlan_fltr_config(int port,
+ pki_port_vlan_filter_config_t *fltr_cfg)
+{
+ struct octeontx_mbox_hdr hdr;
+ int res;
+
+ pki_port_vlan_filter_config_t cfg = *fltr_cfg;
+ int len = sizeof(pki_port_vlan_filter_config_t);
+
+ hdr.coproc = OCTEONTX_PKI_COPROC;
+ hdr.msg = MBOX_PKI_PORT_VLAN_FILTER_CONFIG;
+ hdr.vfid = port;
+
+ res = octeontx_mbox_send(&hdr, &cfg, len, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+ return res;
+}
+
+int
+octeontx_pki_port_vlan_fltr_entry_config(int port,
+ pki_port_vlan_filter_entry_config_t *e_cfg)
+{
+ struct octeontx_mbox_hdr hdr;
+ int res;
+
+ pki_port_vlan_filter_entry_config_t cfg = *e_cfg;
+ int len = sizeof(pki_port_vlan_filter_entry_config_t);
+
+ hdr.coproc = OCTEONTX_PKI_COPROC;
+ hdr.msg = MBOX_PKI_PORT_VLAN_FILTER_ENTRY_CONFIG;
+ hdr.vfid = port;
+
+ res = octeontx_mbox_send(&hdr, &cfg, len, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+ return res;
+}
+
+#define PCI_VENDOR_ID_CAVIUM 0x177D
+#define PCI_DEVICE_ID_OCTEONTX_PKI_VF 0xA0DD
+
+/* PKIVF pcie device */
+static int
+pkivf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
+{
+ struct octeontx_pkivf *res;
+ static uint8_t vf_cnt;
+ uint16_t domain;
+ uint16_t vfid;
+ uint8_t *bar0;
+ uint64_t val;
+
+ RTE_SET_USED(pci_drv);
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ if (pci_dev->mem_resource[0].addr == NULL) {
+ octeontx_log_err("PKI Empty bar[0] %p",
+ pci_dev->mem_resource[0].addr);
+ return -ENODEV;
+ }
+
+ bar0 = pci_dev->mem_resource[0].addr;
+ val = octeontx_read64(bar0);
+ domain = val & 0xffff;
+ vfid = (val >> 16) & 0xffff;
+
+ if (unlikely(vfid >= PKI_VF_MAX)) {
+ octeontx_log_err("pki: Invalid vfid %d", vfid);
+ return -EINVAL;
+ }
+
+ res = &pki_vf_ctl.pki[vf_cnt++];
+ res->vfid = vfid;
+ res->domain = domain;
+ res->bar0 = bar0;
+
+ octeontx_log_dbg("PKI Domain=%d vfid=%d", res->domain, res->vfid);
+ return 0;
+}
+
+static const struct rte_pci_id pci_pkivf_map[] = {
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_OCTEONTX_PKI_VF)
+ },
+ {
+ .vendor_id = 0,
+ },
+};
+
+static struct rte_pci_driver pci_pkivf = {
+ .id_table = pci_pkivf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = pkivf_probe,
+};
+
+RTE_PMD_REGISTER_PCI(octeontx_pkivf, pci_pkivf);
diff --git a/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pkivf.h b/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pkivf.h
new file mode 100644
index 000000000..d41eaa57e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pkivf.h
@@ -0,0 +1,372 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef __OCTEONTX_PKI_H__
+#define __OCTEONTX_PKI_H__
+
+#include <stdint.h>
+
+#include <octeontx_mbox.h>
+
+#define OCTEONTX_PKI_COPROC 5
+
+/* PKI messages */
+
+#define MBOX_PKI_PORT_OPEN 1
+#define MBOX_PKI_PORT_START 2
+#define MBOX_PKI_PORT_STOP 3
+#define MBOX_PKI_PORT_CLOSE 4
+#define MBOX_PKI_PORT_CONFIG 5
+#define MBOX_PKI_PORT_OPT_PARSER_CONFIG 6
+#define MBOX_PKI_PORT_CUSTOM_PARSER_CONFIG 7
+#define MBOX_PKI_PORT_PKTBUF_CONFIG 8
+#define MBOX_PKI_PORT_HASH_CONFIG 9
+#define MBOX_PKI_PORT_ERRCHK_CONFIG 10
+#define MBOX_PKI_PORT_CREATE_QOS 11
+#define MBOX_PKI_PORT_MODIFY_QOS 12
+#define MBOX_PKI_PORT_DELETE_QOS 13
+#define MBOX_PKI_PORT_PKTDROP_CONFIG 14
+#define MBOX_PKI_PORT_WQE_GEN_CONFIG 15
+#define MBOX_PKI_BACKPRESSURE_CONFIG 16
+#define MBOX_PKI_PORT_GET_STATS 17
+#define MBOX_PKI_PORT_RESET_STATS 18
+#define MBOX_PKI_GET_PORT_CONFIG 19
+#define MBOX_PKI_GET_PORT_QOS_CONFIG 20
+#define MBOX_PKI_PORT_ALLOC_QPG 21
+#define MBOX_PKI_PORT_FREE_QPG 22
+#define MBOX_PKI_SET_PORT_CONFIG 23
+#define MBOX_PKI_PORT_VLAN_FILTER_CONFIG 24
+#define MBOX_PKI_PORT_VLAN_FILTER_ENTRY_CONFIG 25
+
+#define MBOX_PKI_MAX_QOS_ENTRY 64
+
+/* PKI maximum constants */
+#define PKI_VF_MAX (32)
+#define PKI_MAX_PKTLEN (32768)
+
+/* Interface types: */
+enum {
+ OCTTX_PORT_TYPE_NET, /* Network interface ports */
+ OCTTX_PORT_TYPE_INT, /* CPU internal interface ports */
+ OCTTX_PORT_TYPE_PCI, /* DPI/PCIe interface ports */
+ OCTTX_PORT_TYPE_MAX
+};
+
+/* pki pkind parse mode */
+enum {
+ PKI_PARSE_LA_TO_LG = 0,
+ PKI_PARSE_LB_TO_LG = 1,
+ PKI_PARSE_LC_TO_LG = 3,
+ PKI_PARSE_LG = 0x3f,
+ PKI_PARSE_NOTHING = 0x7f
+};
+
+/* CACHE MODE*/
+enum {
+ PKI_OPC_MODE_STT = 0LL,
+ PKI_OPC_MODE_STF = 1LL,
+ PKI_OPC_MODE_STF1_STT = 2LL,
+ PKI_OPC_MODE_STF2_STT = 3LL
+};
+
+/* PKI QPG QOS*/
+enum {
+ PKI_QPG_QOS_NONE = 0,
+ PKI_QPG_QOS_VLAN,
+ PKI_QPG_QOS_MPLS,
+ PKI_QPG_QOS_DSA_SRC,
+ PKI_QPG_QOS_DIFFSERV,
+ PKI_QPG_QOS_HIGIG,
+};
+
+/* pki port config */
+typedef struct pki_port_type {
+ uint8_t port_type;
+} pki_port_type_t;
+
+/* pki port config */
+typedef struct pki_port_cfg {
+ uint8_t port_type;
+ struct {
+ uint8_t fcs_pres:1;
+ uint8_t fcs_skip:1;
+ uint8_t parse_mode:1;
+ uint8_t mpls_parse:1;
+ uint8_t inst_hdr_parse:1;
+ uint8_t fulc_parse:1;
+ uint8_t dsa_parse:1;
+ uint8_t hg2_parse:1;
+ uint8_t hg_parse:1;
+ } mmask;
+ uint8_t fcs_pres;
+ uint8_t fcs_skip;
+ uint8_t parse_mode;
+ uint8_t mpls_parse;
+ uint8_t inst_hdr_parse;
+ uint8_t fulc_parse;
+ uint8_t dsa_parse;
+ uint8_t hg2_parse;
+ uint8_t hg_parse;
+} pki_prt_cfg_t;
+
+
+/* pki Flow/style packet buffer config */
+typedef struct pki_port_pktbuf_cfg {
+ uint8_t port_type;
+ struct {
+ uint16_t f_mbuff_size:1;
+ uint16_t f_wqe_skip:1;
+ uint16_t f_first_skip:1;
+ uint16_t f_later_skip:1;
+ uint16_t f_pkt_outside_wqe:1;
+ uint16_t f_wqe_endian:1;
+ uint16_t f_cache_mode:1;
+ } mmask;
+ uint16_t mbuff_size;
+ uint16_t wqe_skip;
+ uint16_t first_skip;
+ uint16_t later_skip;
+ uint8_t pkt_outside_wqe;
+ uint8_t wqe_endian;
+ uint8_t cache_mode;
+} pki_pktbuf_cfg_t;
+
+/* pki flow/style tag config */
+typedef struct pki_port_hash_cfg {
+ uint8_t port_type;
+ uint32_t tag_slf:1;
+ uint32_t tag_sle:1;
+ uint32_t tag_sld:1;
+ uint32_t tag_slc:1;
+ uint32_t tag_dlf:1;
+ uint32_t tag_dle:1;
+ uint32_t tag_dld:1;
+ uint32_t tag_dlc:1;
+ uint32_t tag_prt:1;
+ uint32_t tag_vlan0:1;
+ uint32_t tag_vlan1:1;
+ uint32_t tag_ip_pctl:1;
+ uint32_t tag_sync:1;
+ uint32_t tag_spi:1;
+ uint32_t tag_gtp:1;
+ uint32_t tag_vni:1;
+} pki_hash_cfg_t;
+
+/* pki flow/style errcheck config */
+typedef struct pki_port_errcheck_cfg {
+ uint8_t port_type;
+ struct {
+ uint32_t f_ip6_udp_opt:1;
+ uint32_t f_lenerr_en:1;
+ uint32_t f_maxerr_en:1;
+ uint32_t f_minerr_en:1;
+ uint32_t f_fcs_chk:1;
+ uint32_t f_fcs_strip:1;
+ uint32_t f_len_lf:1;
+ uint32_t f_len_le:1;
+ uint32_t f_len_ld:1;
+ uint32_t f_len_lc:1;
+ uint32_t f_csum_lf:1;
+ uint32_t f_csum_le:1;
+ uint32_t f_csum_ld:1;
+ uint32_t f_csum_lc:1;
+ uint32_t f_min_frame_len;
+ uint32_t f_max_frame_len;
+ } mmask;
+ uint64_t ip6_udp_opt:1;
+ uint64_t lenerr_en:1;
+ uint64_t maxerr_en:1;
+ uint64_t minerr_en:1;
+ uint64_t fcs_chk:1;
+ uint64_t fcs_strip:1;
+ uint64_t len_lf:1;
+ uint64_t len_le:1;
+ uint64_t len_ld:1;
+ uint64_t len_lc:1;
+ uint64_t csum_lf:1;
+ uint64_t csum_le:1;
+ uint64_t csum_ld:1;
+ uint64_t csum_lc:1;
+ uint64_t min_frame_len;
+ uint64_t max_frame_len;
+} pki_errchk_cfg_t;
+
+struct pki_qos_entry {
+ uint16_t port_add;
+ uint16_t ggrp_ok;
+ uint16_t ggrp_bad;
+ uint16_t gaura;
+ uint8_t grptag_ok;
+ uint8_t grptag_bad;
+ uint8_t ena_red;
+ uint8_t ena_drop;
+ uint8_t tag_type;
+};
+
+#define PKO_MAX_QOS_ENTRY 64
+
+/* pki flow/style enable qos */
+typedef struct pki_port_create_qos {
+ uint8_t port_type;
+ uint8_t qpg_qos;
+ uint8_t num_entry;
+ uint8_t tag_type;
+ uint8_t drop_policy;
+ struct pki_qos_entry qos_entry[PKO_MAX_QOS_ENTRY];
+} pki_qos_cfg_t;
+
+/* pki flow/style enable qos */
+typedef struct pki_port_delete_qos_entry {
+ uint8_t port_type;
+ uint16_t index;
+} pki_del_qos_t;
+
+/* pki flow/style enable qos */
+typedef struct pki_port_modify_qos_entry {
+ uint8_t port_type;
+ uint16_t index;
+ struct {
+ uint8_t f_port_add:1;
+ uint8_t f_grp_ok:1;
+ uint8_t f_grp_bad:1;
+ uint8_t f_gaura:1;
+ uint8_t f_grptag_ok:1;
+ uint8_t f_grptag_bad:1;
+ uint8_t f_tag_type:1;
+ } mmask;
+ struct pki_qos_entry qos_entry;
+} pki_mod_qos_t;
+
+/* pki port VLAN filter config */
+typedef struct pki_port_vlan_filter_config {
+ uint8_t port_type; /* OCTTX_PORT_TYPE_[NET/INT/PCI] */
+ uint8_t fltr_conf; /* '1' to enable & '0' to disable */
+} pki_port_vlan_filter_config_t;
+
+/* pki port VLAN filter entry config */
+typedef struct pki_port_vlan_filter_entry_config {
+ uint8_t port_type; /* OCTTX_PORT_TYPE_[NET/INT/PCI] */
+ uint8_t entry_conf; /* '1' to add & '0' to remove */
+ uint16_t vlan_tpid; /* in host byte-order */
+ uint16_t vlan_id; /* in host byte-order */
+} pki_port_vlan_filter_entry_config_t;
+
+static inline int
+octeontx_pki_port_modify_qos(int port, pki_mod_qos_t *qos_cfg)
+{
+ struct octeontx_mbox_hdr hdr;
+ int res;
+
+ pki_mod_qos_t q_cfg = *(pki_mod_qos_t *)qos_cfg;
+ int len = sizeof(pki_mod_qos_t);
+
+ hdr.coproc = OCTEONTX_PKI_COPROC;
+ hdr.msg = MBOX_PKI_PORT_MODIFY_QOS;
+ hdr.vfid = port;
+
+ res = octeontx_mbox_send(&hdr, &q_cfg, len, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+
+ return res;
+}
+
+static inline int
+octeontx_pki_port_delete_qos(int port, pki_del_qos_t *qos_cfg)
+{
+ struct octeontx_mbox_hdr hdr;
+ int res;
+
+ pki_del_qos_t q_cfg = *(pki_del_qos_t *)qos_cfg;
+ int len = sizeof(pki_del_qos_t);
+
+ hdr.coproc = OCTEONTX_PKI_COPROC;
+ hdr.msg = MBOX_PKI_PORT_DELETE_QOS;
+ hdr.vfid = port;
+
+ res = octeontx_mbox_send(&hdr, &q_cfg, len, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+
+ return res;
+}
+
+static inline int
+octeontx_pki_port_close(int port)
+{
+ struct octeontx_mbox_hdr hdr;
+ int res;
+
+ pki_port_type_t ptype;
+ int len = sizeof(pki_port_type_t);
+ memset(&ptype, 0, len);
+ ptype.port_type = OCTTX_PORT_TYPE_NET;
+
+ hdr.coproc = OCTEONTX_PKI_COPROC;
+ hdr.msg = MBOX_PKI_PORT_CLOSE;
+ hdr.vfid = port;
+
+ res = octeontx_mbox_send(&hdr, &ptype, len, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+
+ return res;
+}
+
+static inline int
+octeontx_pki_port_start(int port)
+{
+ struct octeontx_mbox_hdr hdr;
+ int res;
+
+ pki_port_type_t ptype;
+ int len = sizeof(pki_port_type_t);
+ memset(&ptype, 0, len);
+ ptype.port_type = OCTTX_PORT_TYPE_NET;
+
+ hdr.coproc = OCTEONTX_PKI_COPROC;
+ hdr.msg = MBOX_PKI_PORT_START;
+ hdr.vfid = port;
+
+ res = octeontx_mbox_send(&hdr, &ptype, len, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+
+ return res;
+}
+
+static inline int
+octeontx_pki_port_stop(int port)
+{
+ struct octeontx_mbox_hdr hdr;
+ int res;
+
+ pki_port_type_t ptype;
+ int len = sizeof(pki_port_type_t);
+ memset(&ptype, 0, len);
+ ptype.port_type = OCTTX_PORT_TYPE_NET;
+
+ hdr.coproc = OCTEONTX_PKI_COPROC;
+ hdr.msg = MBOX_PKI_PORT_STOP;
+ hdr.vfid = port;
+
+ res = octeontx_mbox_send(&hdr, &ptype, len, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+
+ return res;
+}
+
+int octeontx_pki_port_open(int port);
+int octeontx_pki_port_hash_config(int port, pki_hash_cfg_t *hash_cfg);
+int octeontx_pki_port_pktbuf_config(int port, pki_pktbuf_cfg_t *buf_cfg);
+int octeontx_pki_port_create_qos(int port, pki_qos_cfg_t *qos_cfg);
+int octeontx_pki_port_close(int port);
+int octeontx_pki_port_errchk_config(int port, pki_errchk_cfg_t *cfg);
+int octeontx_pki_port_vlan_fltr_config(int port,
+ pki_port_vlan_filter_config_t *fltr_cfg);
+int octeontx_pki_port_vlan_fltr_entry_config(int port,
+ pki_port_vlan_filter_entry_config_t *entry_cfg);
+
+#endif /* __OCTEONTX_PKI_H__ */
diff --git a/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pkovf.c b/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pkovf.c
new file mode 100644
index 000000000..bf28bc799
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pkovf.c
@@ -0,0 +1,640 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include <stdbool.h>
+#include <string.h>
+#include <stdio.h>
+
+#include <rte_eal.h>
+#include <rte_cycles.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_bus_pci.h>
+#include <rte_spinlock.h>
+
+#include "../octeontx_logs.h"
+#include "octeontx_io.h"
+#include "octeontx_pkovf.h"
+
+struct octeontx_pko_iomem {
+ uint8_t *va;
+ rte_iova_t iova;
+ size_t size;
+};
+
+#define PKO_IOMEM_NULL (struct octeontx_pko_iomem){0, 0, 0}
+#define PKO_VALID 0x1
+#define PKO_INUSE 0x2
+
+struct octeontx_pko_fc_ctl_s {
+ int64_t buf_cnt;
+ int64_t padding[(PKO_DQ_FC_STRIDE / 8) - 1];
+};
+
+struct octeontx_pkovf {
+ uint8_t *bar0;
+ uint8_t *bar2;
+ uint8_t status;
+ uint16_t domain;
+ uint16_t vfid;
+};
+
+struct octeontx_pko_vf_ctl_s {
+ rte_spinlock_t lock;
+ uint16_t global_domain;
+ struct octeontx_pko_iomem fc_iomem;
+ struct octeontx_pko_fc_ctl_s *fc_ctl;
+ struct octeontx_pkovf pko[PKO_VF_MAX];
+ struct {
+ uint64_t chanid;
+ } dq_map[PKO_VF_MAX * PKO_VF_NUM_DQ];
+};
+
+static struct octeontx_pko_vf_ctl_s pko_vf_ctl;
+
+static void *
+octeontx_pko_dq_vf_bar0(uint16_t txq)
+{
+ int vf_ix;
+
+ vf_ix = txq / PKO_VF_NUM_DQ;
+ return pko_vf_ctl.pko[vf_ix].bar0;
+}
+
+static int
+octeontx_pko_dq_gdq(uint16_t txq)
+{
+ return txq % PKO_VF_NUM_DQ;
+}
+
+/**
+ * Open a PKO DQ.
+ */
+static inline
+int octeontx_pko_dq_open(uint16_t txq)
+{
+ unsigned int reg_off;
+ uint8_t *vf_bar0;
+ uint64_t rtn;
+ int gdq;
+
+ vf_bar0 = octeontx_pko_dq_vf_bar0(txq);
+ gdq = octeontx_pko_dq_gdq(txq);
+
+ if (unlikely(gdq < 0 || vf_bar0 == NULL))
+ return -EINVAL;
+ *(volatile int64_t*)(pko_vf_ctl.fc_ctl + txq) =
+ PKO_DQ_FC_DEPTH_PAGES - PKO_DQ_FC_SKID;
+
+ rte_wmb();
+
+ octeontx_write64(PKO_DQ_FC_DEPTH_PAGES,
+ vf_bar0 + PKO_VF_DQ_FC_STATUS(gdq));
+
+ /* Set the register to return descriptor (packet) count as DEPTH */
+ /* KIND=1, NCB_QUERY_RSP=0 */
+ octeontx_write64(1ull << PKO_DQ_KIND_BIT,
+ vf_bar0 + PKO_VF_DQ_WM_CTL(gdq));
+ reg_off = PKO_VF_DQ_OP_OPEN(gdq);
+
+ rtn = octeontx_reg_ldadd_u64(vf_bar0 + reg_off, 0);
+
+ /* PKO_DQOP_E::OPEN */
+ if (((rtn >> PKO_DQ_OP_BIT) & 0x3) != 0x1)
+ return -EIO;
+
+ switch (rtn >> PKO_DQ_STATUS_BIT) {
+ case 0xC: /* DQALREADYCREATED */
+ case 0x0: /* PASS */
+ break;
+ default:
+ return -EIO;
+ }
+
+ /* DRAIN=0, DRAIN_NULL_LINK=0, SW_XOFF=0 */
+ octeontx_write64(0, vf_bar0 + PKO_VF_DQ_SW_XOFF(gdq));
+
+ return rtn & ((1ull << PKO_DQ_OP_BIT) - 1);
+}
+
+/**
+ * Close a PKO DQ
+ * Flush all packets pending.
+ */
+static inline
+int octeontx_pko_dq_close(uint16_t txq)
+{
+ unsigned int reg_off;
+ uint8_t *vf_bar0;
+ uint64_t rtn;
+ int res;
+
+ vf_bar0 = octeontx_pko_dq_vf_bar0(txq);
+ res = octeontx_pko_dq_gdq(txq);
+
+ if (unlikely(res < 0 || vf_bar0 == NULL))
+ return -EINVAL;
+
+ reg_off = PKO_VF_DQ_OP_CLOSE(res);
+
+ rtn = octeontx_reg_ldadd_u64(vf_bar0 + reg_off, 0);
+
+ /* PKO_DQOP_E::CLOSE */
+ if (((rtn >> PKO_DQ_OP_BIT) & 0x3) != 0x2)
+ return -EIO;
+
+ switch (rtn >> PKO_DQ_STATUS_BIT) {
+ case 0xD: /* DQNOTCREATED */
+ case 0x0: /* PASS */
+ break;
+ default:
+ return -EIO;
+ }
+
+ res = rtn & ((1ull << PKO_DQ_OP_BIT) - 1); /* DEPTH */
+ return res;
+}
+
+/* Flush all packets pending on a DQ */
+static inline
+int octeontx_pko_dq_drain(uint16_t txq)
+{
+ unsigned int gdq;
+ uint8_t *vf_bar0;
+ uint64_t reg;
+ int res, timo = PKO_DQ_DRAIN_TO;
+
+ vf_bar0 = octeontx_pko_dq_vf_bar0(txq);
+ res = octeontx_pko_dq_gdq(txq);
+ gdq = res;
+
+ /* DRAIN=1, DRAIN_NULL_LINK=0, SW_XOFF=1 */
+ octeontx_write64(0x3, vf_bar0 + PKO_VF_DQ_SW_XOFF(gdq));
+ /* Wait until buffers leave DQs */
+ reg = octeontx_read64(vf_bar0 + PKO_VF_DQ_WM_CNT(gdq));
+ while (reg && timo > 0) {
+ rte_delay_us(100);
+ timo--;
+ reg = octeontx_read64(vf_bar0 + PKO_VF_DQ_WM_CNT(gdq));
+ }
+ /* DRAIN=0, DRAIN_NULL_LINK=0, SW_XOFF=0 */
+ octeontx_write64(0, vf_bar0 + PKO_VF_DQ_SW_XOFF(gdq));
+
+ return reg;
+}
+
+static inline int
+octeontx_pko_dq_range_lookup(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid,
+ unsigned int dq_num, unsigned int dq_from)
+{
+ unsigned int dq, dq_cnt;
+ unsigned int dq_base;
+
+ dq_cnt = 0;
+ dq = dq_from;
+ while (dq < RTE_DIM(ctl->dq_map)) {
+ dq_base = dq;
+ dq_cnt = 0;
+ while (ctl->dq_map[dq].chanid == ~chanid &&
+ dq < RTE_DIM(ctl->dq_map)) {
+ dq_cnt++;
+ if (dq_cnt == dq_num)
+ return dq_base;
+ dq++;
+ }
+ dq++;
+ }
+ return -1;
+}
+
+static inline void
+octeontx_pko_dq_range_assign(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid,
+ unsigned int dq_base, unsigned int dq_num)
+{
+ unsigned int dq, dq_cnt;
+
+ dq_cnt = 0;
+ while (dq_cnt < dq_num) {
+ dq = dq_base + dq_cnt;
+
+ octeontx_log_dbg("DQ# %u assigned to CHAN# %" PRIx64 "", dq,
+ chanid);
+
+ ctl->dq_map[dq].chanid = ~chanid;
+ dq_cnt++;
+ }
+}
+
+static inline int
+octeontx_pko_dq_claim(struct octeontx_pko_vf_ctl_s *ctl, unsigned int dq_base,
+ unsigned int dq_num, uint64_t chanid)
+{
+ const uint64_t null_chanid = ~0ull;
+ int dq;
+
+ rte_spinlock_lock(&ctl->lock);
+
+ dq = octeontx_pko_dq_range_lookup(ctl, null_chanid, dq_num, dq_base);
+ if (dq < 0 || (unsigned int)dq != dq_base) {
+ rte_spinlock_unlock(&ctl->lock);
+ return -1;
+ }
+ octeontx_pko_dq_range_assign(ctl, chanid, dq_base, dq_num);
+
+ rte_spinlock_unlock(&ctl->lock);
+
+ return 0;
+}
+
+static inline int
+octeontx_pko_dq_free(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid)
+{
+ const uint64_t null_chanid = ~0ull;
+ unsigned int dq = 0, dq_cnt = 0;
+
+ rte_spinlock_lock(&ctl->lock);
+ while (dq < RTE_DIM(ctl->dq_map)) {
+ if (ctl->dq_map[dq].chanid == ~chanid) {
+ ctl->dq_map[dq].chanid = ~null_chanid;
+ dq_cnt++;
+ }
+ dq++;
+ }
+ rte_spinlock_unlock(&ctl->lock);
+
+ return dq_cnt > 0 ? 0 : -EINVAL;
+}
+
+int
+octeontx_pko_channel_open(int dq_base, int dq_num, int chanid)
+{
+ struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl;
+ int res;
+
+ res = octeontx_pko_dq_claim(ctl, dq_base, dq_num, chanid);
+ if (res < 0)
+ return -1;
+
+ return 0;
+}
+
+int
+octeontx_pko_channel_close(int chanid)
+{
+ struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl;
+ int res;
+
+ res = octeontx_pko_dq_free(ctl, chanid);
+ if (res < 0)
+ return -1;
+
+ return 0;
+}
+
+static inline int
+octeontx_pko_chan_start(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid)
+{
+ unsigned int dq_vf;
+ unsigned int dq, dq_cnt;
+
+ dq_cnt = 0;
+ dq = 0;
+ while (dq < RTE_DIM(ctl->dq_map)) {
+ dq_vf = dq / PKO_VF_NUM_DQ;
+
+ if (!ctl->pko[dq_vf].bar0) {
+ dq += PKO_VF_NUM_DQ;
+ continue;
+ }
+
+ if (ctl->dq_map[dq].chanid != ~chanid) {
+ dq++;
+ continue;
+ }
+
+ if (octeontx_pko_dq_open(dq) < 0)
+ break;
+
+ dq_cnt++;
+ dq++;
+ }
+
+ return dq_cnt;
+}
+
+int
+octeontx_pko_channel_start(int chanid)
+{
+ struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl;
+ int dq_cnt;
+
+ dq_cnt = octeontx_pko_chan_start(ctl, chanid);
+ if (dq_cnt < 0)
+ return -1;
+
+ return dq_cnt;
+}
+
+static inline int
+octeontx_pko_chan_stop(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid)
+{
+ unsigned int dq, dq_cnt, dq_vf;
+ int res;
+
+ dq_cnt = 0;
+ dq = 0;
+ while (dq < RTE_DIM(ctl->dq_map)) {
+ dq_vf = dq / PKO_VF_NUM_DQ;
+
+ if (!ctl->pko[dq_vf].bar0) {
+ dq += PKO_VF_NUM_DQ;
+ continue;
+ }
+
+ if (ctl->dq_map[dq].chanid != ~chanid) {
+ dq++;
+ continue;
+ }
+
+ res = octeontx_pko_dq_drain(dq);
+ if (res > 0)
+ octeontx_log_err("draining DQ%d, buffers left: %x",
+ dq, res);
+
+ res = octeontx_pko_dq_close(dq);
+ if (res < 0)
+ octeontx_log_err("closing DQ%d failed\n", dq);
+
+ dq_cnt++;
+ dq++;
+ }
+ return dq_cnt;
+}
+
+int
+octeontx_pko_channel_stop(int chanid)
+{
+ struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl;
+
+ octeontx_pko_chan_stop(ctl, chanid);
+ return 0;
+}
+
+static inline int
+octeontx_pko_channel_query(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid,
+ void *out, size_t out_elem_size,
+ size_t dq_num, octeontx_pko_dq_getter_t getter)
+{
+ octeontx_dq_t curr;
+ unsigned int dq_vf;
+ unsigned int dq;
+
+ RTE_SET_USED(out_elem_size);
+ memset(&curr, 0, sizeof(octeontx_dq_t));
+
+ dq_vf = dq_num / PKO_VF_NUM_DQ;
+ dq = dq_num % PKO_VF_NUM_DQ;
+
+ if (!ctl->pko[dq_vf].bar0)
+ return -EINVAL;
+
+ if (ctl->dq_map[dq_num].chanid != ~chanid)
+ return -EINVAL;
+
+ uint8_t *iter = (uint8_t *)out;
+ curr.lmtline_va = ctl->pko[dq_vf].bar2;
+ curr.ioreg_va = (void *)((uintptr_t)ctl->pko[dq_vf].bar0
+ + PKO_VF_DQ_OP_SEND((dq), 0));
+ curr.fc_status_va = ctl->fc_ctl + dq_num;
+
+ octeontx_log_dbg("lmtline=%p ioreg_va=%p fc_status_va=%p",
+ curr.lmtline_va, curr.ioreg_va,
+ curr.fc_status_va);
+
+ getter(&curr, (void *)iter);
+ return 0;
+}
+
+int
+octeontx_pko_channel_query_dqs(int chanid, void *out, size_t out_elem_size,
+ size_t dq_num, octeontx_pko_dq_getter_t getter)
+{
+ struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl;
+ int dq_cnt;
+
+ dq_cnt = octeontx_pko_channel_query(ctl, chanid, out, out_elem_size,
+ dq_num, getter);
+ if (dq_cnt < 0)
+ return -1;
+
+ return dq_cnt;
+}
+
+int
+octeontx_pko_vf_count(void)
+{
+ uint16_t global_domain = octeontx_get_global_domain();
+ int vf_cnt;
+
+ pko_vf_ctl.global_domain = global_domain;
+ vf_cnt = 0;
+ while (pko_vf_ctl.pko[vf_cnt].bar0)
+ vf_cnt++;
+
+ return vf_cnt;
+}
+
+size_t
+octeontx_pko_get_vfid(void)
+{
+ size_t vf_cnt = octeontx_pko_vf_count();
+ size_t vf_idx;
+
+
+ for (vf_idx = 0; vf_idx < vf_cnt; vf_idx++) {
+ if (!(pko_vf_ctl.pko[vf_idx].status & PKO_VALID))
+ continue;
+ if (pko_vf_ctl.pko[vf_idx].status & PKO_INUSE)
+ continue;
+
+ pko_vf_ctl.pko[vf_idx].status |= PKO_INUSE;
+ return pko_vf_ctl.pko[vf_idx].vfid;
+ }
+
+ return SIZE_MAX;
+}
+
+int
+octeontx_pko_send_mtu(int port, int mtu)
+{
+ struct octeontx_mbox_hdr hdr;
+ int res;
+ mbox_pko_mtu_cfg_t cfg;
+
+ cfg.mtu = mtu;
+
+ hdr.coproc = OCTEONTX_PKO_COPROC;
+ hdr.msg = MBOX_PKO_MTU_CONFIG;
+ hdr.vfid = port;
+
+ res = octeontx_mbox_send(&hdr, &cfg, sizeof(mbox_pko_mtu_cfg_t),
+ NULL, 0);
+ if (res < 0)
+ return -EACCES;
+
+ return res;
+}
+
+int
+octeontx_pko_init_fc(const size_t pko_vf_count)
+{
+ int dq_ix;
+ uint64_t reg;
+ uint8_t *vf_bar0;
+ size_t vf_idx;
+ size_t fc_mem_size;
+
+ fc_mem_size = sizeof(struct octeontx_pko_fc_ctl_s) *
+ pko_vf_count * PKO_VF_NUM_DQ;
+
+ pko_vf_ctl.fc_iomem.va = rte_malloc(NULL, fc_mem_size, 128);
+ if (unlikely(!pko_vf_ctl.fc_iomem.va)) {
+ octeontx_log_err("fc_iomem: not enough memory");
+ return -ENOMEM;
+ }
+
+ pko_vf_ctl.fc_iomem.iova = rte_malloc_virt2iova((void *)
+ pko_vf_ctl.fc_iomem.va);
+ pko_vf_ctl.fc_iomem.size = fc_mem_size;
+
+ pko_vf_ctl.fc_ctl =
+ (struct octeontx_pko_fc_ctl_s *)pko_vf_ctl.fc_iomem.va;
+
+ /* Configure Flow-Control feature for all DQs of open VFs */
+ for (vf_idx = 0; vf_idx < pko_vf_count; vf_idx++) {
+ if (pko_vf_ctl.pko[vf_idx].domain != pko_vf_ctl.global_domain)
+ continue;
+
+ dq_ix = pko_vf_ctl.pko[vf_idx].vfid * PKO_VF_NUM_DQ;
+ vf_bar0 = pko_vf_ctl.pko[vf_idx].bar0;
+
+ reg = (pko_vf_ctl.fc_iomem.iova +
+ (sizeof(struct octeontx_pko_fc_ctl_s) * dq_ix)) & ~0x7F;
+ reg |= /* BASE */
+ (0x2 << 3) | /* HYST_BITS */
+ (((PKO_DQ_FC_STRIDE == PKO_DQ_FC_STRIDE_16) ? 1 : 0) << 2) |
+ (0x1 << 0); /* ENABLE */
+
+ octeontx_write64(reg, vf_bar0 + PKO_VF_DQ_FC_CONFIG);
+ pko_vf_ctl.pko[vf_idx].status = PKO_VALID;
+
+ octeontx_log_dbg("PKO: bar0 %p VF_idx %d DQ_FC_CFG=%" PRIx64 "",
+ vf_bar0, (int)vf_idx, reg);
+ }
+ return 0;
+}
+
+void
+octeontx_pko_fc_free(void)
+{
+ rte_free(pko_vf_ctl.fc_iomem.va);
+}
+
+static void
+octeontx_pkovf_setup(void)
+{
+ static bool init_once;
+
+ if (!init_once) {
+ unsigned int i;
+
+ rte_spinlock_init(&pko_vf_ctl.lock);
+
+ pko_vf_ctl.fc_iomem = PKO_IOMEM_NULL;
+ pko_vf_ctl.fc_ctl = NULL;
+
+ for (i = 0; i < PKO_VF_MAX; i++) {
+ pko_vf_ctl.pko[i].bar0 = NULL;
+ pko_vf_ctl.pko[i].bar2 = NULL;
+ pko_vf_ctl.pko[i].domain = ~(uint16_t)0;
+ pko_vf_ctl.pko[i].vfid = ~(uint16_t)0;
+ }
+
+ for (i = 0; i < (PKO_VF_MAX * PKO_VF_NUM_DQ); i++)
+ pko_vf_ctl.dq_map[i].chanid = 0;
+
+ init_once = true;
+ }
+}
+
+/* PKOVF pcie device*/
+static int
+pkovf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
+{
+ uint64_t val;
+ uint16_t vfid;
+ uint16_t domain;
+ uint8_t *bar0;
+ uint8_t *bar2;
+ static uint8_t vf_cnt;
+ struct octeontx_pkovf *res;
+
+ RTE_SET_USED(pci_drv);
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ if (pci_dev->mem_resource[0].addr == NULL ||
+ pci_dev->mem_resource[2].addr == NULL) {
+ octeontx_log_err("Empty bars %p %p",
+ pci_dev->mem_resource[0].addr,
+ pci_dev->mem_resource[2].addr);
+ return -ENODEV;
+ }
+ bar0 = pci_dev->mem_resource[0].addr;
+ bar2 = pci_dev->mem_resource[2].addr;
+
+ octeontx_pkovf_setup();
+
+ /* get vfid and domain */
+ val = octeontx_read64(bar0 + PKO_VF_DQ_FC_CONFIG);
+ domain = (val >> 7) & 0xffff;
+ vfid = (val >> 23) & 0xffff;
+
+ if (unlikely(vfid >= PKO_VF_MAX)) {
+ octeontx_log_err("pko: Invalid vfid %d", vfid);
+ return -EINVAL;
+ }
+
+ res = &pko_vf_ctl.pko[vf_cnt++];
+ res->vfid = vfid;
+ res->domain = domain;
+ res->bar0 = bar0;
+ res->bar2 = bar2;
+
+ octeontx_log_dbg("Domain=%d group=%d", res->domain, res->vfid);
+ return 0;
+}
+
+#define PCI_VENDOR_ID_CAVIUM 0x177D
+#define PCI_DEVICE_ID_OCTEONTX_PKO_VF 0xA049
+
+static const struct rte_pci_id pci_pkovf_map[] = {
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_OCTEONTX_PKO_VF)
+ },
+ {
+ .vendor_id = 0,
+ },
+};
+
+static struct rte_pci_driver pci_pkovf = {
+ .id_table = pci_pkovf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = pkovf_probe,
+};
+
+RTE_PMD_REGISTER_PCI(octeontx_pkovf, pci_pkovf);
diff --git a/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pkovf.h b/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pkovf.h
new file mode 100644
index 000000000..7e1aba3e3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pkovf.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef __OCTEONTX_PKO_H__
+#define __OCTEONTX_PKO_H__
+
+#include <octeontx_mbox.h>
+
+/* PKO maximum constants */
+#define PKO_VF_MAX (32)
+#define PKO_VF_NUM_DQ (8)
+#define PKO_MAX_NUM_DQ (8)
+#define PKO_DQ_DRAIN_TO (1000)
+
+#define PKO_DQ_FC_SKID (4)
+#define PKO_DQ_FC_DEPTH_PAGES (2048)
+#define PKO_DQ_FC_STRIDE_16 (16)
+#define PKO_DQ_FC_STRIDE_128 (128)
+#define PKO_DQ_FC_STRIDE PKO_DQ_FC_STRIDE_16
+
+#define PKO_DQ_KIND_BIT 49
+#define PKO_DQ_STATUS_BIT 60
+#define PKO_DQ_OP_BIT 48
+
+/* PKO VF register offsets from VF_BAR0 */
+#define PKO_VF_DQ_SW_XOFF(gdq) (0x000100 | (gdq) << 17)
+#define PKO_VF_DQ_WM_CTL(gdq) (0x000130 | (gdq) << 17)
+#define PKO_VF_DQ_WM_CNT(gdq) (0x000150 | (gdq) << 17)
+#define PKO_VF_DQ_FC_CONFIG (0x000160)
+#define PKO_VF_DQ_FC_STATUS(gdq) (0x000168 | (gdq) << 17)
+#define PKO_VF_DQ_OP_SEND(gdq, op) (0x001000 | (gdq) << 17 | (op) << 3)
+#define PKO_VF_DQ_OP_OPEN(gdq) (0x001100 | (gdq) << 17)
+#define PKO_VF_DQ_OP_CLOSE(gdq) (0x001200 | (gdq) << 17)
+#define PKO_VF_DQ_OP_QUERY(gdq) (0x001300 | (gdq) << 17)
+
+/* pko_send_hdr_s + pko_send_link */
+#define PKO_CMD_SZ (2 << 1)
+#define PKO_SEND_BUFLINK_SUBDC (0x0ull << 60)
+#define PKO_SEND_BUFLINK_LDTYPE(x) ((x) << 58)
+#define PKO_SEND_BUFLINK_GAUAR(x) ((x) << 24)
+#define PKO_SEND_GATHER_SUBDC (0x2ull << 60)
+#define PKO_SEND_GATHER_LDTYPE(x) ((x) << 58)
+#define PKO_SEND_GATHER_GAUAR(x) ((x) << 24)
+
+#define OCTEONTX_PKO_COPROC 4
+#define MBOX_PKO_MTU_CONFIG 1
+
+typedef struct mbox_pko_mtu_cfg {
+ uint32_t mtu;
+} mbox_pko_mtu_cfg_t;
+
+typedef struct octeontx_dq_s {
+ void *lmtline_va;
+ void *ioreg_va;
+ void *fc_status_va;
+} octeontx_dq_t;
+
+/**
+ * Function for extracting information out of a given DQ.
+ *
+ * It is intended to be used in slow path (configuration) in
+ * octeontx_pko_channel_query().
+ *
+ * @param dq The DQ to extract information from.
+ * @param out Pointer to the user's structure he wants to fill.
+ */
+typedef void (*octeontx_pko_dq_getter_t)(octeontx_dq_t *dq, void *out);
+
+int
+octeontx_pko_channel_query_dqs(int chanid, void *out, size_t out_elem_size,
+ size_t dq_num, octeontx_pko_dq_getter_t getter);
+int octeontx_pko_channel_open(int dq_base, int dq_num, int chanid);
+int octeontx_pko_channel_close(int chanid);
+int octeontx_pko_channel_start(int chanid);
+int octeontx_pko_channel_stop(int chanid);
+int octeontx_pko_vf_count(void);
+size_t octeontx_pko_get_vfid(void);
+int octeontx_pko_init_fc(const size_t pko_vf_count);
+void octeontx_pko_fc_free(void);
+int octeontx_pko_send_mtu(int port, int mtu);
+
+#endif /* __OCTEONTX_PKO_H__ */
diff --git a/src/spdk/dpdk/drivers/net/octeontx/meson.build b/src/spdk/dpdk/drivers/net/octeontx/meson.build
new file mode 100644
index 000000000..e8d3ff4a3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx/meson.build
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Cavium, Inc
+
+subdir('base')
+objs = [base_objs]
+
+sources = files('octeontx_rxtx.c',
+ 'octeontx_ethdev.c',
+ 'octeontx_ethdev_ops.c'
+ )
+
+deps += ['mempool_octeontx', 'eventdev']
+
+includes += include_directories('base')
diff --git a/src/spdk/dpdk/drivers/net/octeontx/octeontx_ethdev.c b/src/spdk/dpdk/drivers/net/octeontx/octeontx_ethdev.c
new file mode 100644
index 000000000..d5371ae07
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx/octeontx_ethdev.c
@@ -0,0 +1,1672 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <rte_alarm.h>
+#include <rte_branch_prediction.h>
+#include <rte_bus_vdev.h>
+#include <rte_cycles.h>
+#include <rte_debug.h>
+#include <rte_devargs.h>
+#include <rte_dev.h>
+#include <rte_kvargs.h>
+#include <rte_malloc.h>
+#include <rte_mbuf_pool_ops.h>
+#include <rte_prefetch.h>
+
+#include "octeontx_ethdev.h"
+#include "octeontx_rxtx.h"
+#include "octeontx_logs.h"
+
+struct evdev_priv_data {
+ OFFLOAD_FLAGS; /*Sequence should not be changed */
+} __rte_cache_aligned;
+
+struct octeontx_vdev_init_params {
+ uint8_t nr_port;
+};
+
+uint16_t
+rte_octeontx_pchan_map[OCTEONTX_MAX_BGX_PORTS][OCTEONTX_MAX_LMAC_PER_BGX];
+
+enum octeontx_link_speed {
+ OCTEONTX_LINK_SPEED_SGMII,
+ OCTEONTX_LINK_SPEED_XAUI,
+ OCTEONTX_LINK_SPEED_RXAUI,
+ OCTEONTX_LINK_SPEED_10G_R,
+ OCTEONTX_LINK_SPEED_40G_R,
+ OCTEONTX_LINK_SPEED_RESERVE1,
+ OCTEONTX_LINK_SPEED_QSGMII,
+ OCTEONTX_LINK_SPEED_RESERVE2
+};
+
+int otx_net_logtype_mbox;
+int otx_net_logtype_init;
+int otx_net_logtype_driver;
+
+RTE_INIT(otx_net_init_log)
+{
+ otx_net_logtype_mbox = rte_log_register("pmd.net.octeontx.mbox");
+ if (otx_net_logtype_mbox >= 0)
+ rte_log_set_level(otx_net_logtype_mbox, RTE_LOG_NOTICE);
+
+ otx_net_logtype_init = rte_log_register("pmd.net.octeontx.init");
+ if (otx_net_logtype_init >= 0)
+ rte_log_set_level(otx_net_logtype_init, RTE_LOG_NOTICE);
+
+ otx_net_logtype_driver = rte_log_register("pmd.net.octeontx.driver");
+ if (otx_net_logtype_driver >= 0)
+ rte_log_set_level(otx_net_logtype_driver, RTE_LOG_NOTICE);
+}
+
+/* Parse integer from integer argument */
+static int
+parse_integer_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ int *i = (int *)extra_args;
+
+ *i = atoi(value);
+ if (*i < 0) {
+ octeontx_log_err("argument has to be positive.");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+octeontx_parse_vdev_init_params(struct octeontx_vdev_init_params *params,
+ struct rte_vdev_device *dev)
+{
+ struct rte_kvargs *kvlist = NULL;
+ int ret = 0;
+
+ static const char * const octeontx_vdev_valid_params[] = {
+ OCTEONTX_VDEV_NR_PORT_ARG,
+ NULL
+ };
+
+ const char *input_args = rte_vdev_device_args(dev);
+ if (params == NULL)
+ return -EINVAL;
+
+
+ if (input_args) {
+ kvlist = rte_kvargs_parse(input_args,
+ octeontx_vdev_valid_params);
+ if (kvlist == NULL)
+ return -1;
+
+ ret = rte_kvargs_process(kvlist,
+ OCTEONTX_VDEV_NR_PORT_ARG,
+ &parse_integer_arg,
+ &params->nr_port);
+ if (ret < 0)
+ goto free_kvlist;
+ }
+
+free_kvlist:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
+static int
+octeontx_port_open(struct octeontx_nic *nic)
+{
+ octeontx_mbox_bgx_port_conf_t bgx_port_conf;
+ octeontx_mbox_bgx_port_fifo_cfg_t fifo_cfg;
+ int res;
+
+ res = 0;
+ memset(&bgx_port_conf, 0x0, sizeof(bgx_port_conf));
+ PMD_INIT_FUNC_TRACE();
+
+ res = octeontx_bgx_port_open(nic->port_id, &bgx_port_conf);
+ if (res < 0) {
+ octeontx_log_err("failed to open port %d", res);
+ return res;
+ }
+
+ nic->node = bgx_port_conf.node;
+ nic->port_ena = bgx_port_conf.enable;
+ nic->base_ichan = bgx_port_conf.base_chan;
+ nic->base_ochan = bgx_port_conf.base_chan;
+ nic->num_ichans = bgx_port_conf.num_chans;
+ nic->num_ochans = bgx_port_conf.num_chans;
+ nic->bgx_mtu = bgx_port_conf.mtu;
+ nic->bpen = bgx_port_conf.bpen;
+ nic->fcs_strip = bgx_port_conf.fcs_strip;
+ nic->bcast_mode = bgx_port_conf.bcast_mode;
+ nic->mcast_mode = bgx_port_conf.mcast_mode;
+ nic->speed = bgx_port_conf.mode;
+
+ memset(&fifo_cfg, 0x0, sizeof(fifo_cfg));
+
+ res = octeontx_bgx_port_get_fifo_cfg(nic->port_id, &fifo_cfg);
+ if (res < 0) {
+ octeontx_log_err("failed to get port %d fifo cfg", res);
+ return res;
+ }
+
+ nic->fc.rx_fifosz = fifo_cfg.rx_fifosz;
+
+ memcpy(&nic->mac_addr[0], &bgx_port_conf.macaddr[0],
+ RTE_ETHER_ADDR_LEN);
+
+ octeontx_log_dbg("port opened %d", nic->port_id);
+ return res;
+}
+
+static void
+octeontx_link_status_print(struct rte_eth_dev *eth_dev,
+ struct rte_eth_link *link)
+{
+ if (link && link->link_status)
+ octeontx_log_info("Port %u: Link Up - speed %u Mbps - %s",
+ (eth_dev->data->port_id),
+ link->link_speed,
+ link->link_duplex == ETH_LINK_FULL_DUPLEX ?
+ "full-duplex" : "half-duplex");
+ else
+ octeontx_log_info("Port %d: Link Down",
+ (int)(eth_dev->data->port_id));
+}
+
+static void
+octeontx_link_status_update(struct octeontx_nic *nic,
+ struct rte_eth_link *link)
+{
+ memset(link, 0, sizeof(*link));
+
+ link->link_status = nic->link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
+
+ switch (nic->speed) {
+ case OCTEONTX_LINK_SPEED_SGMII:
+ link->link_speed = ETH_SPEED_NUM_1G;
+ break;
+
+ case OCTEONTX_LINK_SPEED_XAUI:
+ link->link_speed = ETH_SPEED_NUM_10G;
+ break;
+
+ case OCTEONTX_LINK_SPEED_RXAUI:
+ case OCTEONTX_LINK_SPEED_10G_R:
+ link->link_speed = ETH_SPEED_NUM_10G;
+ break;
+ case OCTEONTX_LINK_SPEED_QSGMII:
+ link->link_speed = ETH_SPEED_NUM_5G;
+ break;
+ case OCTEONTX_LINK_SPEED_40G_R:
+ link->link_speed = ETH_SPEED_NUM_40G;
+ break;
+
+ case OCTEONTX_LINK_SPEED_RESERVE1:
+ case OCTEONTX_LINK_SPEED_RESERVE2:
+ default:
+ link->link_speed = ETH_SPEED_NUM_NONE;
+ octeontx_log_err("incorrect link speed %d", nic->speed);
+ break;
+ }
+
+ link->link_duplex = ETH_LINK_FULL_DUPLEX;
+ link->link_autoneg = ETH_LINK_AUTONEG;
+}
+
+static void
+octeontx_link_status_poll(void *arg)
+{
+ struct octeontx_nic *nic = arg;
+ struct rte_eth_link link;
+ struct rte_eth_dev *dev;
+ int res;
+
+ PMD_INIT_FUNC_TRACE();
+
+ dev = nic->dev;
+
+ res = octeontx_bgx_port_link_status(nic->port_id);
+ if (res < 0) {
+ octeontx_log_err("Failed to get port %d link status",
+ nic->port_id);
+ } else {
+ if (nic->link_up != (uint8_t)res) {
+ nic->link_up = (uint8_t)res;
+ octeontx_link_status_update(nic, &link);
+ octeontx_link_status_print(dev, &link);
+ rte_eth_linkstatus_set(dev, &link);
+ _rte_eth_dev_callback_process(dev,
+ RTE_ETH_EVENT_INTR_LSC,
+ NULL);
+ }
+ }
+
+ res = rte_eal_alarm_set(OCCTX_INTR_POLL_INTERVAL_MS * 1000,
+ octeontx_link_status_poll, nic);
+ if (res < 0)
+ octeontx_log_err("Failed to restart alarm for port %d, err: %d",
+ nic->port_id, res);
+}
+
+static void
+octeontx_port_close(struct octeontx_nic *nic)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ rte_eal_alarm_cancel(octeontx_link_status_poll, nic);
+ octeontx_bgx_port_close(nic->port_id);
+ octeontx_log_dbg("port closed %d", nic->port_id);
+}
+
+static int
+octeontx_port_start(struct octeontx_nic *nic)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return octeontx_bgx_port_start(nic->port_id);
+}
+
+static int
+octeontx_port_stop(struct octeontx_nic *nic)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return octeontx_bgx_port_stop(nic->port_id);
+}
+
+static int
+octeontx_port_promisc_set(struct octeontx_nic *nic, int en)
+{
+ struct rte_eth_dev *dev;
+ int res;
+
+ res = 0;
+ PMD_INIT_FUNC_TRACE();
+ dev = nic->dev;
+
+ res = octeontx_bgx_port_promisc_set(nic->port_id, en);
+ if (res < 0) {
+ octeontx_log_err("failed to set promiscuous mode %d",
+ nic->port_id);
+ return res;
+ }
+
+ /* Set proper flag for the mode */
+ dev->data->promiscuous = (en != 0) ? 1 : 0;
+
+ octeontx_log_dbg("port %d : promiscuous mode %s",
+ nic->port_id, en ? "set" : "unset");
+
+ return 0;
+}
+
+static int
+octeontx_port_stats(struct octeontx_nic *nic, struct rte_eth_stats *stats)
+{
+ octeontx_mbox_bgx_port_stats_t bgx_stats;
+ int res;
+
+ PMD_INIT_FUNC_TRACE();
+
+ res = octeontx_bgx_port_stats(nic->port_id, &bgx_stats);
+ if (res < 0) {
+ octeontx_log_err("failed to get port stats %d", nic->port_id);
+ return res;
+ }
+
+ stats->ipackets = bgx_stats.rx_packets;
+ stats->ibytes = bgx_stats.rx_bytes;
+ stats->imissed = bgx_stats.rx_dropped;
+ stats->ierrors = bgx_stats.rx_errors;
+ stats->opackets = bgx_stats.tx_packets;
+ stats->obytes = bgx_stats.tx_bytes;
+ stats->oerrors = bgx_stats.tx_errors;
+
+ octeontx_log_dbg("port%d stats inpkts=%" PRIx64 " outpkts=%" PRIx64 "",
+ nic->port_id, stats->ipackets, stats->opackets);
+
+ return 0;
+}
+
+static int
+octeontx_port_stats_clr(struct octeontx_nic *nic)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return octeontx_bgx_port_stats_clr(nic->port_id);
+}
+
+static inline void
+devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
+ struct rte_event_dev_info *info)
+{
+ memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
+ dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
+
+ dev_conf->nb_event_ports = info->max_event_ports;
+ dev_conf->nb_event_queues = info->max_event_queues;
+
+ dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
+ dev_conf->nb_event_port_dequeue_depth =
+ info->max_event_port_dequeue_depth;
+ dev_conf->nb_event_port_enqueue_depth =
+ info->max_event_port_enqueue_depth;
+ dev_conf->nb_event_port_enqueue_depth =
+ info->max_event_port_enqueue_depth;
+ dev_conf->nb_events_limit =
+ info->max_num_events;
+}
+
+static uint16_t
+octeontx_tx_offload_flags(struct rte_eth_dev *eth_dev)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(eth_dev);
+ uint16_t flags = 0;
+
+ if (nic->tx_offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
+ nic->tx_offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+ flags |= OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F;
+
+ if (nic->tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM ||
+ nic->tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM ||
+ nic->tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM ||
+ nic->tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM)
+ flags |= OCCTX_TX_OFFLOAD_L3_L4_CSUM_F;
+
+ if (!(nic->tx_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+ flags |= OCCTX_TX_OFFLOAD_MBUF_NOFF_F;
+
+ if (nic->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+ flags |= OCCTX_TX_MULTI_SEG_F;
+
+ return flags;
+}
+
+static uint16_t
+octeontx_rx_offload_flags(struct rte_eth_dev *eth_dev)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(eth_dev);
+ uint16_t flags = 0;
+
+ if (nic->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM))
+ flags |= OCCTX_RX_OFFLOAD_CSUM_F;
+
+ if (nic->rx_offloads & (DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+ flags |= OCCTX_RX_OFFLOAD_CSUM_F;
+
+ if (nic->rx_offloads & DEV_RX_OFFLOAD_SCATTER) {
+ flags |= OCCTX_RX_MULTI_SEG_F;
+ eth_dev->data->scattered_rx = 1;
+ /* If scatter mode is enabled, TX should also be in multi
+ * seg mode, else memory leak will occur
+ */
+ nic->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+ }
+
+ return flags;
+}
+
+static int
+octeontx_dev_configure(struct rte_eth_dev *dev)
+{
+ struct rte_eth_dev_data *data = dev->data;
+ struct rte_eth_conf *conf = &data->dev_conf;
+ struct rte_eth_rxmode *rxmode = &conf->rxmode;
+ struct rte_eth_txmode *txmode = &conf->txmode;
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+ RTE_SET_USED(conf);
+
+ if (!rte_eal_has_hugepages()) {
+ octeontx_log_err("huge page is not configured");
+ return -EINVAL;
+ }
+
+ if (txmode->mq_mode) {
+ octeontx_log_err("tx mq_mode DCB or VMDq not supported");
+ return -EINVAL;
+ }
+
+ if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
+ rxmode->mq_mode != ETH_MQ_RX_RSS) {
+ octeontx_log_err("unsupported rx qmode %d", rxmode->mq_mode);
+ return -EINVAL;
+ }
+
+ if (!(txmode->offloads & DEV_TX_OFFLOAD_MT_LOCKFREE)) {
+ PMD_INIT_LOG(NOTICE, "cant disable lockfree tx");
+ txmode->offloads |= DEV_TX_OFFLOAD_MT_LOCKFREE;
+ }
+
+ if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
+ octeontx_log_err("setting link speed/duplex not supported");
+ return -EINVAL;
+ }
+
+ if (conf->dcb_capability_en) {
+ octeontx_log_err("DCB enable not supported");
+ return -EINVAL;
+ }
+
+ if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
+ octeontx_log_err("flow director not supported");
+ return -EINVAL;
+ }
+
+ nic->num_tx_queues = dev->data->nb_tx_queues;
+
+ ret = octeontx_pko_channel_open(nic->pko_vfid * PKO_VF_NUM_DQ,
+ nic->num_tx_queues,
+ nic->base_ochan);
+ if (ret) {
+ octeontx_log_err("failed to open channel %d no-of-txq %d",
+ nic->base_ochan, nic->num_tx_queues);
+ return -EFAULT;
+ }
+
+ ret = octeontx_dev_vlan_offload_init(dev);
+ if (ret) {
+ octeontx_log_err("failed to initialize vlan offload");
+ return -EFAULT;
+ }
+
+ nic->pki.classifier_enable = false;
+ nic->pki.hash_enable = true;
+ nic->pki.initialized = false;
+
+ nic->rx_offloads |= rxmode->offloads;
+ nic->tx_offloads |= txmode->offloads;
+ nic->rx_offload_flags |= octeontx_rx_offload_flags(dev);
+ nic->tx_offload_flags |= octeontx_tx_offload_flags(dev);
+
+ return 0;
+}
+
+static void
+octeontx_dev_close(struct rte_eth_dev *dev)
+{
+ struct octeontx_txq *txq = NULL;
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+ unsigned int i;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ rte_event_dev_close(nic->evdev);
+
+ octeontx_dev_flow_ctrl_fini(dev);
+
+ octeontx_dev_vlan_offload_fini(dev);
+
+ ret = octeontx_pko_channel_close(nic->base_ochan);
+ if (ret < 0) {
+ octeontx_log_err("failed to close channel %d VF%d %d %d",
+ nic->base_ochan, nic->port_id, nic->num_tx_queues,
+ ret);
+ }
+ /* Free txq resources for this port */
+ for (i = 0; i < nic->num_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ if (!txq)
+ continue;
+
+ rte_free(txq);
+ }
+
+ /* Free MAC address table */
+ rte_free(dev->data->mac_addrs);
+ dev->data->mac_addrs = NULL;
+
+ octeontx_port_close(nic);
+
+ dev->tx_pkt_burst = NULL;
+ dev->rx_pkt_burst = NULL;
+}
+
+static int
+octeontx_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
+{
+ uint32_t buffsz, frame_size = mtu + OCCTX_L2_OVERHEAD;
+ struct octeontx_nic *nic = octeontx_pmd_priv(eth_dev);
+ struct rte_eth_dev_data *data = eth_dev->data;
+ int rc = 0;
+
+ /* Check if MTU is within the allowed range */
+ if (frame_size < OCCTX_MIN_FRS || frame_size > OCCTX_MAX_FRS)
+ return -EINVAL;
+
+ buffsz = data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
+
+ /* Refuse MTU that requires the support of scattered packets
+ * when this feature has not been enabled before.
+ */
+ if (data->dev_started && frame_size > buffsz &&
+ !(nic->rx_offloads & DEV_RX_OFFLOAD_SCATTER)) {
+ octeontx_log_err("Scatter mode is disabled");
+ return -EINVAL;
+ }
+
+ /* Check <seg size> * <max_seg> >= max_frame */
+ if ((nic->rx_offloads & DEV_RX_OFFLOAD_SCATTER) &&
+ (frame_size > buffsz * OCCTX_RX_NB_SEG_MAX))
+ return -EINVAL;
+
+ rc = octeontx_pko_send_mtu(nic->port_id, frame_size);
+ if (rc)
+ return rc;
+
+ rc = octeontx_bgx_port_mtu_set(nic->port_id, frame_size);
+ if (rc)
+ return rc;
+
+ if (frame_size > RTE_ETHER_MAX_LEN)
+ nic->rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+ else
+ nic->rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+ /* Update max_rx_pkt_len */
+ data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+ octeontx_log_info("Received pkt beyond maxlen %d will be dropped",
+ frame_size);
+
+ return rc;
+}
+
+static int
+octeontx_recheck_rx_offloads(struct octeontx_rxq *rxq)
+{
+ struct rte_eth_dev *eth_dev = rxq->eth_dev;
+ struct octeontx_nic *nic = octeontx_pmd_priv(eth_dev);
+ struct rte_eth_dev_data *data = eth_dev->data;
+ struct rte_pktmbuf_pool_private *mbp_priv;
+ struct evdev_priv_data *evdev_priv;
+ struct rte_eventdev *dev;
+ uint32_t buffsz;
+
+ /* Get rx buffer size */
+ mbp_priv = rte_mempool_get_priv(rxq->pool);
+ buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
+
+ /* Setup scatter mode if needed by jumbo */
+ if (data->dev_conf.rxmode.max_rx_pkt_len > buffsz) {
+ nic->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
+ nic->rx_offload_flags |= octeontx_rx_offload_flags(eth_dev);
+ nic->tx_offload_flags |= octeontx_tx_offload_flags(eth_dev);
+ }
+
+ /* Sharing offload flags via eventdev priv region */
+ dev = &rte_eventdevs[rxq->evdev];
+ evdev_priv = dev->data->dev_private;
+ evdev_priv->rx_offload_flags = nic->rx_offload_flags;
+ evdev_priv->tx_offload_flags = nic->tx_offload_flags;
+
+ /* Setup MTU based on max_rx_pkt_len */
+ nic->mtu = data->dev_conf.rxmode.max_rx_pkt_len - OCCTX_L2_OVERHEAD;
+
+ return 0;
+}
+
+static int
+octeontx_dev_start(struct rte_eth_dev *dev)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+ struct octeontx_rxq *rxq;
+ int ret, i;
+
+ PMD_INIT_FUNC_TRACE();
+ /* Rechecking if any new offload set to update
+ * rx/tx burst function pointer accordingly.
+ */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ octeontx_recheck_rx_offloads(rxq);
+ }
+
+ /* Setting up the mtu based on max_rx_pkt_len */
+ ret = octeontx_dev_mtu_set(dev, nic->mtu);
+ if (ret) {
+ octeontx_log_err("Failed to set default MTU size %d", ret);
+ goto error;
+ }
+
+ /*
+ * Tx start
+ */
+ octeontx_set_tx_function(dev);
+ ret = octeontx_pko_channel_start(nic->base_ochan);
+ if (ret < 0) {
+ octeontx_log_err("fail to conf VF%d no. txq %d chan %d ret %d",
+ nic->port_id, nic->num_tx_queues, nic->base_ochan,
+ ret);
+ goto error;
+ }
+
+ /*
+ * Rx start
+ */
+ dev->rx_pkt_burst = octeontx_recv_pkts;
+ ret = octeontx_pki_port_start(nic->port_id);
+ if (ret < 0) {
+ octeontx_log_err("fail to start Rx on port %d", nic->port_id);
+ goto channel_stop_error;
+ }
+
+ /*
+ * Start port
+ */
+ ret = octeontx_port_start(nic);
+ if (ret < 0) {
+ octeontx_log_err("failed start port %d", ret);
+ goto pki_port_stop_error;
+ }
+
+ PMD_TX_LOG(DEBUG, "pko: start channel %d no.of txq %d port %d",
+ nic->base_ochan, nic->num_tx_queues, nic->port_id);
+
+ ret = rte_event_dev_start(nic->evdev);
+ if (ret < 0) {
+ octeontx_log_err("failed to start evdev: ret (%d)", ret);
+ goto pki_port_stop_error;
+ }
+
+ /* Success */
+ return ret;
+
+pki_port_stop_error:
+ octeontx_pki_port_stop(nic->port_id);
+channel_stop_error:
+ octeontx_pko_channel_stop(nic->base_ochan);
+error:
+ return ret;
+}
+
+static void
+octeontx_dev_stop(struct rte_eth_dev *dev)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ rte_event_dev_stop(nic->evdev);
+
+ ret = octeontx_port_stop(nic);
+ if (ret < 0) {
+ octeontx_log_err("failed to req stop port %d res=%d",
+ nic->port_id, ret);
+ return;
+ }
+
+ ret = octeontx_pki_port_stop(nic->port_id);
+ if (ret < 0) {
+ octeontx_log_err("failed to stop pki port %d res=%d",
+ nic->port_id, ret);
+ return;
+ }
+
+ ret = octeontx_pko_channel_stop(nic->base_ochan);
+ if (ret < 0) {
+ octeontx_log_err("failed to stop channel %d VF%d %d %d",
+ nic->base_ochan, nic->port_id, nic->num_tx_queues,
+ ret);
+ return;
+ }
+}
+
+static int
+octeontx_dev_promisc_enable(struct rte_eth_dev *dev)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+
+ PMD_INIT_FUNC_TRACE();
+ return octeontx_port_promisc_set(nic, 1);
+}
+
+static int
+octeontx_dev_promisc_disable(struct rte_eth_dev *dev)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+
+ PMD_INIT_FUNC_TRACE();
+ return octeontx_port_promisc_set(nic, 0);
+}
+
+static int
+octeontx_port_link_status(struct octeontx_nic *nic)
+{
+ int res;
+
+ PMD_INIT_FUNC_TRACE();
+ res = octeontx_bgx_port_link_status(nic->port_id);
+ if (res < 0) {
+ octeontx_log_err("failed to get port %d link status",
+ nic->port_id);
+ return res;
+ }
+
+ if (nic->link_up != (uint8_t)res || nic->print_flag == -1) {
+ nic->link_up = (uint8_t)res;
+ nic->print_flag = 1;
+ }
+ octeontx_log_dbg("port %d link status %d", nic->port_id, nic->link_up);
+
+ return res;
+}
+
+/*
+ * Return 0 means link status changed, -1 means not changed
+ */
+static int
+octeontx_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete __rte_unused)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+ struct rte_eth_link link;
+ int res;
+
+ PMD_INIT_FUNC_TRACE();
+
+ res = octeontx_port_link_status(nic);
+ if (res < 0) {
+ octeontx_log_err("failed to request link status %d", res);
+ return res;
+ }
+
+ octeontx_link_status_update(nic, &link);
+ if (nic->print_flag) {
+ octeontx_link_status_print(nic->dev, &link);
+ nic->print_flag = 0;
+ }
+
+ return rte_eth_linkstatus_set(dev, &link);
+}
+
+static int
+octeontx_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+
+ PMD_INIT_FUNC_TRACE();
+ return octeontx_port_stats(nic, stats);
+}
+
+static int
+octeontx_dev_stats_reset(struct rte_eth_dev *dev)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+
+ PMD_INIT_FUNC_TRACE();
+ return octeontx_port_stats_clr(nic);
+}
+
+static void
+octeontx_dev_mac_addr_del(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+ int ret;
+
+ ret = octeontx_bgx_port_mac_del(nic->port_id, index);
+ if (ret != 0)
+ octeontx_log_err("failed to del MAC address filter on port %d",
+ nic->port_id);
+}
+
+static int
+octeontx_dev_mac_addr_add(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mac_addr,
+ uint32_t index,
+ __rte_unused uint32_t vmdq)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+ int ret;
+
+ ret = octeontx_bgx_port_mac_add(nic->port_id, mac_addr->addr_bytes,
+ index);
+ if (ret < 0) {
+ octeontx_log_err("failed to add MAC address filter on port %d",
+ nic->port_id);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+octeontx_dev_default_mac_addr_set(struct rte_eth_dev *dev,
+ struct rte_ether_addr *addr)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+ int ret;
+
+ ret = octeontx_bgx_port_mac_set(nic->port_id, addr->addr_bytes);
+ if (ret == 0) {
+ /* Update same mac address to BGX CAM table */
+ ret = octeontx_bgx_port_mac_add(nic->port_id, addr->addr_bytes,
+ 0);
+ }
+ if (ret < 0) {
+ octeontx_log_err("failed to set MAC address on port %d",
+ nic->port_id);
+ }
+
+ return ret;
+}
+
+static int
+octeontx_dev_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+
+ /* Autonegotiation may be disabled */
+ dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
+ dev_info->speed_capa |= ETH_LINK_SPEED_10M | ETH_LINK_SPEED_100M |
+ ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
+ ETH_LINK_SPEED_40G;
+
+ /* Min/Max MTU supported */
+ dev_info->min_rx_bufsize = OCCTX_MIN_FRS;
+ dev_info->max_rx_pktlen = OCCTX_MAX_FRS;
+ dev_info->max_mtu = dev_info->max_rx_pktlen - OCCTX_L2_OVERHEAD;
+ dev_info->min_mtu = dev_info->min_rx_bufsize - OCCTX_L2_OVERHEAD;
+
+ dev_info->max_mac_addrs =
+ octeontx_bgx_port_mac_entries_get(nic->port_id);
+ dev_info->max_rx_pktlen = PKI_MAX_PKTLEN;
+ dev_info->max_rx_queues = 1;
+ dev_info->max_tx_queues = PKO_MAX_NUM_DQ;
+ dev_info->min_rx_bufsize = 0;
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_free_thresh = 0,
+ .rx_drop_en = 0,
+ .offloads = OCTEONTX_RX_OFFLOADS,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_free_thresh = 0,
+ .offloads = OCTEONTX_TX_OFFLOADS,
+ };
+
+ dev_info->rx_offload_capa = OCTEONTX_RX_OFFLOADS;
+ dev_info->tx_offload_capa = OCTEONTX_TX_OFFLOADS;
+ dev_info->rx_queue_offload_capa = OCTEONTX_RX_OFFLOADS;
+ dev_info->tx_queue_offload_capa = OCTEONTX_TX_OFFLOADS;
+
+ return 0;
+}
+
+static void
+octeontx_dq_info_getter(octeontx_dq_t *dq, void *out)
+{
+ ((octeontx_dq_t *)out)->lmtline_va = dq->lmtline_va;
+ ((octeontx_dq_t *)out)->ioreg_va = dq->ioreg_va;
+ ((octeontx_dq_t *)out)->fc_status_va = dq->fc_status_va;
+}
+
+static int
+octeontx_vf_start_tx_queue(struct rte_eth_dev *dev, struct octeontx_nic *nic,
+ uint16_t qidx)
+{
+ struct octeontx_txq *txq;
+ int res;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED)
+ return 0;
+
+ txq = dev->data->tx_queues[qidx];
+
+ res = octeontx_pko_channel_query_dqs(nic->base_ochan,
+ &txq->dq,
+ sizeof(octeontx_dq_t),
+ txq->queue_id,
+ octeontx_dq_info_getter);
+ if (res < 0) {
+ res = -EFAULT;
+ goto close_port;
+ }
+
+ dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
+ return res;
+
+close_port:
+ (void)octeontx_port_stop(nic);
+ octeontx_pko_channel_stop(nic->base_ochan);
+ octeontx_pko_channel_close(nic->base_ochan);
+ dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
+ return res;
+}
+
+int
+octeontx_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+
+ PMD_INIT_FUNC_TRACE();
+ qidx = qidx % PKO_VF_NUM_DQ;
+ return octeontx_vf_start_tx_queue(dev, nic, qidx);
+}
+
+static inline int
+octeontx_vf_stop_tx_queue(struct rte_eth_dev *dev, struct octeontx_nic *nic,
+ uint16_t qidx)
+{
+ int ret = 0;
+
+ RTE_SET_USED(nic);
+ PMD_INIT_FUNC_TRACE();
+
+ if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED)
+ return 0;
+
+ dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
+ return ret;
+}
+
+int
+octeontx_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+
+ PMD_INIT_FUNC_TRACE();
+ qidx = qidx % PKO_VF_NUM_DQ;
+
+ return octeontx_vf_stop_tx_queue(dev, nic, qidx);
+}
+
+static void
+octeontx_dev_tx_queue_release(void *tx_queue)
+{
+ struct octeontx_txq *txq = tx_queue;
+ int res;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (txq) {
+ res = octeontx_dev_tx_queue_stop(txq->eth_dev, txq->queue_id);
+ if (res < 0)
+ octeontx_log_err("failed stop tx_queue(%d)\n",
+ txq->queue_id);
+
+ rte_free(txq);
+ }
+}
+
+static int
+octeontx_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+ struct octeontx_txq *txq = NULL;
+ uint16_t dq_num;
+ int res = 0;
+
+ RTE_SET_USED(nb_desc);
+ RTE_SET_USED(socket_id);
+
+ dq_num = (nic->pko_vfid * PKO_VF_NUM_DQ) + qidx;
+
+ /* Socket id check */
+ if (socket_id != (unsigned int)SOCKET_ID_ANY &&
+ socket_id != (unsigned int)nic->node)
+ PMD_TX_LOG(INFO, "socket_id expected %d, configured %d",
+ socket_id, nic->node);
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->tx_queues[qidx] != NULL) {
+ PMD_TX_LOG(DEBUG, "freeing memory prior to re-allocation %d",
+ qidx);
+ octeontx_dev_tx_queue_release(dev->data->tx_queues[qidx]);
+ dev->data->tx_queues[qidx] = NULL;
+ }
+
+ /* Allocating tx queue data structure */
+ txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct octeontx_txq),
+ RTE_CACHE_LINE_SIZE, nic->node);
+ if (txq == NULL) {
+ octeontx_log_err("failed to allocate txq=%d", qidx);
+ res = -ENOMEM;
+ goto err;
+ }
+
+ txq->eth_dev = dev;
+ txq->queue_id = dq_num;
+ dev->data->tx_queues[qidx] = txq;
+ dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ res = octeontx_pko_channel_query_dqs(nic->base_ochan,
+ &txq->dq,
+ sizeof(octeontx_dq_t),
+ txq->queue_id,
+ octeontx_dq_info_getter);
+ if (res < 0) {
+ res = -EFAULT;
+ goto err;
+ }
+
+ PMD_TX_LOG(DEBUG, "[%d]:[%d] txq=%p nb_desc=%d lmtline=%p ioreg_va=%p fc_status_va=%p",
+ qidx, txq->queue_id, txq, nb_desc, txq->dq.lmtline_va,
+ txq->dq.ioreg_va,
+ txq->dq.fc_status_va);
+
+ return res;
+
+err:
+ if (txq)
+ rte_free(txq);
+
+ return res;
+}
+
+static int
+octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+ struct rte_mempool_ops *mp_ops = NULL;
+ struct octeontx_rxq *rxq = NULL;
+ pki_pktbuf_cfg_t pktbuf_conf;
+ pki_hash_cfg_t pki_hash;
+ pki_qos_cfg_t pki_qos;
+ uintptr_t pool;
+ int ret, port;
+ uint16_t gaura;
+ unsigned int ev_queues = (nic->ev_queues * nic->port_id) + qidx;
+ unsigned int ev_ports = (nic->ev_ports * nic->port_id) + qidx;
+
+ RTE_SET_USED(nb_desc);
+
+ memset(&pktbuf_conf, 0, sizeof(pktbuf_conf));
+ memset(&pki_hash, 0, sizeof(pki_hash));
+ memset(&pki_qos, 0, sizeof(pki_qos));
+
+ mp_ops = rte_mempool_get_ops(mb_pool->ops_index);
+ if (strcmp(mp_ops->name, "octeontx_fpavf")) {
+ octeontx_log_err("failed to find octeontx_fpavf mempool");
+ return -ENOTSUP;
+ }
+
+ /* Handle forbidden configurations */
+ if (nic->pki.classifier_enable) {
+ octeontx_log_err("cannot setup queue %d. "
+ "Classifier option unsupported", qidx);
+ return -EINVAL;
+ }
+
+ port = nic->port_id;
+
+ /* Rx deferred start is not supported */
+ if (rx_conf->rx_deferred_start) {
+ octeontx_log_err("rx deferred start not supported");
+ return -EINVAL;
+ }
+
+ /* Verify queue index */
+ if (qidx >= dev->data->nb_rx_queues) {
+ octeontx_log_err("QID %d not supporteded (0 - %d available)\n",
+ qidx, (dev->data->nb_rx_queues - 1));
+ return -ENOTSUP;
+ }
+
+ /* Socket id check */
+ if (socket_id != (unsigned int)SOCKET_ID_ANY &&
+ socket_id != (unsigned int)nic->node)
+ PMD_RX_LOG(INFO, "socket_id expected %d, configured %d",
+ socket_id, nic->node);
+
+ /* Allocating rx queue data structure */
+ rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct octeontx_rxq),
+ RTE_CACHE_LINE_SIZE, nic->node);
+ if (rxq == NULL) {
+ octeontx_log_err("failed to allocate rxq=%d", qidx);
+ return -ENOMEM;
+ }
+
+ if (!nic->pki.initialized) {
+ pktbuf_conf.port_type = 0;
+ pki_hash.port_type = 0;
+ pki_qos.port_type = 0;
+
+ pktbuf_conf.mmask.f_wqe_skip = 1;
+ pktbuf_conf.mmask.f_first_skip = 1;
+ pktbuf_conf.mmask.f_later_skip = 1;
+ pktbuf_conf.mmask.f_mbuff_size = 1;
+ pktbuf_conf.mmask.f_cache_mode = 1;
+
+ pktbuf_conf.wqe_skip = OCTTX_PACKET_WQE_SKIP;
+ pktbuf_conf.first_skip = OCTTX_PACKET_FIRST_SKIP(mb_pool);
+ pktbuf_conf.later_skip = OCTTX_PACKET_LATER_SKIP;
+ pktbuf_conf.mbuff_size = (mb_pool->elt_size -
+ RTE_PKTMBUF_HEADROOM -
+ rte_pktmbuf_priv_size(mb_pool) -
+ sizeof(struct rte_mbuf));
+
+ pktbuf_conf.cache_mode = PKI_OPC_MODE_STF2_STT;
+
+ ret = octeontx_pki_port_pktbuf_config(port, &pktbuf_conf);
+ if (ret != 0) {
+ octeontx_log_err("fail to configure pktbuf for port %d",
+ port);
+ rte_free(rxq);
+ return ret;
+ }
+ PMD_RX_LOG(DEBUG, "Port %d Rx pktbuf configured:\n"
+ "\tmbuf_size:\t0x%0x\n"
+ "\twqe_skip:\t0x%0x\n"
+ "\tfirst_skip:\t0x%0x\n"
+ "\tlater_skip:\t0x%0x\n"
+ "\tcache_mode:\t%s\n",
+ port,
+ pktbuf_conf.mbuff_size,
+ pktbuf_conf.wqe_skip,
+ pktbuf_conf.first_skip,
+ pktbuf_conf.later_skip,
+ (pktbuf_conf.cache_mode ==
+ PKI_OPC_MODE_STT) ?
+ "STT" :
+ (pktbuf_conf.cache_mode ==
+ PKI_OPC_MODE_STF) ?
+ "STF" :
+ (pktbuf_conf.cache_mode ==
+ PKI_OPC_MODE_STF1_STT) ?
+ "STF1_STT" : "STF2_STT");
+
+ if (nic->pki.hash_enable) {
+ pki_hash.tag_dlc = 1;
+ pki_hash.tag_slc = 1;
+ pki_hash.tag_dlf = 1;
+ pki_hash.tag_slf = 1;
+ pki_hash.tag_prt = 1;
+ octeontx_pki_port_hash_config(port, &pki_hash);
+ }
+
+ pool = (uintptr_t)mb_pool->pool_id;
+
+ /* Get the gaura Id */
+ gaura = octeontx_fpa_bufpool_gaura(pool);
+
+ pki_qos.qpg_qos = PKI_QPG_QOS_NONE;
+ pki_qos.num_entry = 1;
+ pki_qos.drop_policy = 0;
+ pki_qos.tag_type = 0L;
+ pki_qos.qos_entry[0].port_add = 0;
+ pki_qos.qos_entry[0].gaura = gaura;
+ pki_qos.qos_entry[0].ggrp_ok = ev_queues;
+ pki_qos.qos_entry[0].ggrp_bad = ev_queues;
+ pki_qos.qos_entry[0].grptag_bad = 0;
+ pki_qos.qos_entry[0].grptag_ok = 0;
+
+ ret = octeontx_pki_port_create_qos(port, &pki_qos);
+ if (ret < 0) {
+ octeontx_log_err("failed to create QOS port=%d, q=%d",
+ port, qidx);
+ rte_free(rxq);
+ return ret;
+ }
+ nic->pki.initialized = true;
+ }
+
+ rxq->port_id = nic->port_id;
+ rxq->eth_dev = dev;
+ rxq->queue_id = qidx;
+ rxq->evdev = nic->evdev;
+ rxq->ev_queues = ev_queues;
+ rxq->ev_ports = ev_ports;
+ rxq->pool = mb_pool;
+
+ octeontx_recheck_rx_offloads(rxq);
+ dev->data->rx_queues[qidx] = rxq;
+ dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+static void
+octeontx_dev_rx_queue_release(void *rxq)
+{
+ rte_free(rxq);
+}
+
+static const uint32_t *
+octeontx_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ static const uint32_t ptypes[] = {
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_L3_IPV4_EXT,
+ RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_L3_IPV6_EXT,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_L4_FRAG,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ if (dev->rx_pkt_burst == octeontx_recv_pkts)
+ return ptypes;
+
+ return NULL;
+}
+
+static int
+octeontx_pool_ops(struct rte_eth_dev *dev, const char *pool)
+{
+ RTE_SET_USED(dev);
+
+ if (!strcmp(pool, "octeontx_fpavf"))
+ return 0;
+
+ return -ENOTSUP;
+}
+
+/* Initialize and register driver with DPDK Application */
+static const struct eth_dev_ops octeontx_dev_ops = {
+ .dev_configure = octeontx_dev_configure,
+ .dev_infos_get = octeontx_dev_info,
+ .dev_close = octeontx_dev_close,
+ .dev_start = octeontx_dev_start,
+ .dev_stop = octeontx_dev_stop,
+ .promiscuous_enable = octeontx_dev_promisc_enable,
+ .promiscuous_disable = octeontx_dev_promisc_disable,
+ .link_update = octeontx_dev_link_update,
+ .stats_get = octeontx_dev_stats_get,
+ .stats_reset = octeontx_dev_stats_reset,
+ .mac_addr_remove = octeontx_dev_mac_addr_del,
+ .mac_addr_add = octeontx_dev_mac_addr_add,
+ .mac_addr_set = octeontx_dev_default_mac_addr_set,
+ .vlan_offload_set = octeontx_dev_vlan_offload_set,
+ .vlan_filter_set = octeontx_dev_vlan_filter_set,
+ .tx_queue_start = octeontx_dev_tx_queue_start,
+ .tx_queue_stop = octeontx_dev_tx_queue_stop,
+ .tx_queue_setup = octeontx_dev_tx_queue_setup,
+ .tx_queue_release = octeontx_dev_tx_queue_release,
+ .rx_queue_setup = octeontx_dev_rx_queue_setup,
+ .rx_queue_release = octeontx_dev_rx_queue_release,
+ .dev_set_link_up = octeontx_dev_set_link_up,
+ .dev_set_link_down = octeontx_dev_set_link_down,
+ .dev_supported_ptypes_get = octeontx_dev_supported_ptypes_get,
+ .mtu_set = octeontx_dev_mtu_set,
+ .pool_ops_supported = octeontx_pool_ops,
+ .flow_ctrl_get = octeontx_dev_flow_ctrl_get,
+ .flow_ctrl_set = octeontx_dev_flow_ctrl_set,
+};
+
+/* Create Ethdev interface per BGX LMAC ports */
+static int
+octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev,
+ int socket_id)
+{
+ int res;
+ size_t pko_vfid;
+ char octtx_name[OCTEONTX_MAX_NAME_LEN];
+ struct octeontx_nic *nic = NULL;
+ struct rte_eth_dev *eth_dev = NULL;
+ struct rte_eth_dev_data *data;
+ const char *name = rte_vdev_device_name(dev);
+ int max_entries;
+
+ PMD_INIT_FUNC_TRACE();
+
+ sprintf(octtx_name, "%s_%d", name, port);
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ eth_dev = rte_eth_dev_attach_secondary(octtx_name);
+ if (eth_dev == NULL)
+ return -ENODEV;
+
+ eth_dev->dev_ops = &octeontx_dev_ops;
+ eth_dev->device = &dev->device;
+ octeontx_set_tx_function(eth_dev);
+ eth_dev->rx_pkt_burst = octeontx_recv_pkts;
+ rte_eth_dev_probing_finish(eth_dev);
+ return 0;
+ }
+
+ /* Reserve an ethdev entry */
+ eth_dev = rte_eth_dev_allocate(octtx_name);
+ if (eth_dev == NULL) {
+ octeontx_log_err("failed to allocate rte_eth_dev");
+ res = -ENOMEM;
+ goto err;
+ }
+ data = eth_dev->data;
+
+ nic = rte_zmalloc_socket(octtx_name, sizeof(*nic), 0, socket_id);
+ if (nic == NULL) {
+ octeontx_log_err("failed to allocate nic structure");
+ res = -ENOMEM;
+ goto err;
+ }
+ data->dev_private = nic;
+ pko_vfid = octeontx_pko_get_vfid();
+
+ if (pko_vfid == SIZE_MAX) {
+ octeontx_log_err("failed to get pko vfid");
+ res = -ENODEV;
+ goto err;
+ }
+
+ nic->pko_vfid = pko_vfid;
+ nic->port_id = port;
+ nic->evdev = evdev;
+
+ res = octeontx_port_open(nic);
+ if (res < 0)
+ goto err;
+
+ /* Rx side port configuration */
+ res = octeontx_pki_port_open(port);
+ if (res != 0) {
+ octeontx_log_err("failed to open PKI port %d", port);
+ res = -ENODEV;
+ goto err;
+ }
+
+ eth_dev->device = &dev->device;
+ eth_dev->intr_handle = NULL;
+ eth_dev->data->kdrv = RTE_KDRV_NONE;
+ eth_dev->data->numa_node = dev->device.numa_node;
+
+ data->port_id = eth_dev->data->port_id;
+
+ nic->ev_queues = 1;
+ nic->ev_ports = 1;
+ nic->print_flag = -1;
+
+ data->dev_link.link_status = ETH_LINK_DOWN;
+ data->dev_started = 0;
+ data->promiscuous = 0;
+ data->all_multicast = 0;
+ data->scattered_rx = 0;
+
+ /* Get maximum number of supported MAC entries */
+ max_entries = octeontx_bgx_port_mac_entries_get(nic->port_id);
+ if (max_entries < 0) {
+ octeontx_log_err("Failed to get max entries for mac addr");
+ res = -ENOTSUP;
+ goto err;
+ }
+
+ data->mac_addrs = rte_zmalloc_socket(octtx_name, max_entries *
+ RTE_ETHER_ADDR_LEN, 0,
+ socket_id);
+ if (data->mac_addrs == NULL) {
+ octeontx_log_err("failed to allocate memory for mac_addrs");
+ res = -ENOMEM;
+ goto err;
+ }
+
+ eth_dev->dev_ops = &octeontx_dev_ops;
+
+ /* Finally save ethdev pointer to the NIC structure */
+ nic->dev = eth_dev;
+
+ if (nic->port_id != data->port_id) {
+ octeontx_log_err("eth_dev->port_id (%d) is diff to orig (%d)",
+ data->port_id, nic->port_id);
+ res = -EINVAL;
+ goto free_mac_addrs;
+ }
+
+ res = rte_eal_alarm_set(OCCTX_INTR_POLL_INTERVAL_MS * 1000,
+ octeontx_link_status_poll, nic);
+ if (res) {
+ octeontx_log_err("Failed to start link polling alarm");
+ goto err;
+ }
+
+ /* Update port_id mac to eth_dev */
+ memcpy(data->mac_addrs, nic->mac_addr, RTE_ETHER_ADDR_LEN);
+
+ /* Update same mac address to BGX CAM table at index 0 */
+ octeontx_bgx_port_mac_add(nic->port_id, nic->mac_addr, 0);
+
+ res = octeontx_dev_flow_ctrl_init(eth_dev);
+ if (res < 0)
+ goto err;
+
+ PMD_INIT_LOG(DEBUG, "ethdev info: ");
+ PMD_INIT_LOG(DEBUG, "port %d, port_ena %d ochan %d num_ochan %d tx_q %d",
+ nic->port_id, nic->port_ena,
+ nic->base_ochan, nic->num_ochans,
+ nic->num_tx_queues);
+ PMD_INIT_LOG(DEBUG, "speed %d mtu %d", nic->speed, nic->bgx_mtu);
+
+ rte_octeontx_pchan_map[(nic->base_ochan >> 8) & 0x7]
+ [(nic->base_ochan >> 4) & 0xF] = data->port_id;
+
+ rte_eth_dev_probing_finish(eth_dev);
+ return data->port_id;
+
+free_mac_addrs:
+ rte_free(data->mac_addrs);
+ data->mac_addrs = NULL;
+err:
+ if (nic)
+ octeontx_port_close(nic);
+
+ rte_eth_dev_release_port(eth_dev);
+
+ return res;
+}
+
+/* Un initialize octeontx device */
+static int
+octeontx_remove(struct rte_vdev_device *dev)
+{
+ char octtx_name[OCTEONTX_MAX_NAME_LEN];
+ struct rte_eth_dev *eth_dev = NULL;
+ struct octeontx_nic *nic = NULL;
+ int i;
+
+ if (dev == NULL)
+ return -EINVAL;
+
+ for (i = 0; i < OCTEONTX_VDEV_DEFAULT_MAX_NR_PORT; i++) {
+ sprintf(octtx_name, "eth_octeontx_%d", i);
+
+ /* reserve an ethdev entry */
+ eth_dev = rte_eth_dev_allocated(octtx_name);
+ if (eth_dev == NULL)
+ return -ENODEV;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ rte_eth_dev_release_port(eth_dev);
+ continue;
+ }
+
+ nic = octeontx_pmd_priv(eth_dev);
+ rte_event_dev_stop(nic->evdev);
+ PMD_INIT_LOG(INFO, "Closing octeontx device %s", octtx_name);
+
+ rte_eth_dev_release_port(eth_dev);
+ rte_event_dev_close(nic->evdev);
+ }
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ /* Free FC resource */
+ octeontx_pko_fc_free();
+
+ return 0;
+}
+
+/* Initialize octeontx device */
+static int
+octeontx_probe(struct rte_vdev_device *dev)
+{
+ const char *dev_name;
+ static int probe_once;
+ uint8_t socket_id, qlist;
+ int tx_vfcnt, port_id, evdev, qnum, pnum, res, i;
+ struct rte_event_dev_config dev_conf;
+ const char *eventdev_name = "event_octeontx";
+ struct rte_event_dev_info info;
+ struct rte_eth_dev *eth_dev;
+
+ struct octeontx_vdev_init_params init_params = {
+ OCTEONTX_VDEV_DEFAULT_MAX_NR_PORT
+ };
+
+ dev_name = rte_vdev_device_name(dev);
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
+ strlen(rte_vdev_device_args(dev)) == 0) {
+ eth_dev = rte_eth_dev_attach_secondary(dev_name);
+ if (!eth_dev) {
+ PMD_INIT_LOG(ERR, "Failed to probe %s", dev_name);
+ return -1;
+ }
+ /* TODO: request info from primary to set up Rx and Tx */
+ eth_dev->dev_ops = &octeontx_dev_ops;
+ eth_dev->device = &dev->device;
+ rte_eth_dev_probing_finish(eth_dev);
+ return 0;
+ }
+
+ res = octeontx_parse_vdev_init_params(&init_params, dev);
+ if (res < 0)
+ return -EINVAL;
+
+ if (init_params.nr_port > OCTEONTX_VDEV_DEFAULT_MAX_NR_PORT) {
+ octeontx_log_err("nr_port (%d) > max (%d)", init_params.nr_port,
+ OCTEONTX_VDEV_DEFAULT_MAX_NR_PORT);
+ return -ENOTSUP;
+ }
+
+ PMD_INIT_LOG(DEBUG, "initializing %s pmd", dev_name);
+
+ socket_id = rte_socket_id();
+
+ tx_vfcnt = octeontx_pko_vf_count();
+
+ if (tx_vfcnt < init_params.nr_port) {
+ octeontx_log_err("not enough PKO (%d) for port number (%d)",
+ tx_vfcnt, init_params.nr_port);
+ return -EINVAL;
+ }
+ evdev = rte_event_dev_get_dev_id(eventdev_name);
+ if (evdev < 0) {
+ octeontx_log_err("eventdev %s not found", eventdev_name);
+ return -ENODEV;
+ }
+
+ res = rte_event_dev_info_get(evdev, &info);
+ if (res < 0) {
+ octeontx_log_err("failed to eventdev info %d", res);
+ return -EINVAL;
+ }
+
+ PMD_INIT_LOG(DEBUG, "max_queue %d max_port %d",
+ info.max_event_queues, info.max_event_ports);
+
+ if (octeontx_pko_init_fc(tx_vfcnt))
+ return -ENOMEM;
+
+ devconf_set_default_sane_values(&dev_conf, &info);
+ res = rte_event_dev_configure(evdev, &dev_conf);
+ if (res < 0)
+ goto parse_error;
+
+ rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ (uint32_t *)&pnum);
+ rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ (uint32_t *)&qnum);
+ if (pnum < qnum) {
+ octeontx_log_err("too few event ports (%d) for event_q(%d)",
+ pnum, qnum);
+ res = -EINVAL;
+ goto parse_error;
+ }
+
+ /* Enable all queues available */
+ for (i = 0; i < qnum; i++) {
+ res = rte_event_queue_setup(evdev, i, NULL);
+ if (res < 0) {
+ octeontx_log_err("failed to setup event_q(%d): res %d",
+ i, res);
+ goto parse_error;
+ }
+ }
+
+ /* Enable all ports available */
+ for (i = 0; i < pnum; i++) {
+ res = rte_event_port_setup(evdev, i, NULL);
+ if (res < 0) {
+ res = -ENODEV;
+ octeontx_log_err("failed to setup ev port(%d) res=%d",
+ i, res);
+ goto parse_error;
+ }
+ }
+
+ /*
+ * Do 1:1 links for ports & queues. All queues would be mapped to
+ * one port. If there are more ports than queues, then some ports
+ * won't be linked to any queue.
+ */
+ for (i = 0; i < qnum; i++) {
+ /* Link one queue to one event port */
+ qlist = i;
+ res = rte_event_port_link(evdev, i, &qlist, NULL, 1);
+ if (res < 0) {
+ res = -ENODEV;
+ octeontx_log_err("failed to link port (%d): res=%d",
+ i, res);
+ goto parse_error;
+ }
+ }
+
+ /* Create ethdev interface */
+ for (i = 0; i < init_params.nr_port; i++) {
+ port_id = octeontx_create(dev, i, evdev, socket_id);
+ if (port_id < 0) {
+ octeontx_log_err("failed to create device %s",
+ dev_name);
+ res = -ENODEV;
+ goto parse_error;
+ }
+
+ PMD_INIT_LOG(INFO, "created ethdev %s for port %d", dev_name,
+ port_id);
+ }
+
+ if (probe_once) {
+ octeontx_log_err("interface %s not supported", dev_name);
+ octeontx_remove(dev);
+ res = -ENOTSUP;
+ goto parse_error;
+ }
+ rte_mbuf_set_platform_mempool_ops("octeontx_fpavf");
+ probe_once = 1;
+
+ return 0;
+
+parse_error:
+ octeontx_pko_fc_free();
+ return res;
+}
+
+static struct rte_vdev_driver octeontx_pmd_drv = {
+ .probe = octeontx_probe,
+ .remove = octeontx_remove,
+};
+
+RTE_PMD_REGISTER_VDEV(OCTEONTX_PMD, octeontx_pmd_drv);
+RTE_PMD_REGISTER_ALIAS(OCTEONTX_PMD, eth_octeontx);
+RTE_PMD_REGISTER_PARAM_STRING(OCTEONTX_PMD, "nr_port=<int> ");
diff --git a/src/spdk/dpdk/drivers/net/octeontx/octeontx_ethdev.h b/src/spdk/dpdk/drivers/net/octeontx/octeontx_ethdev.h
new file mode 100644
index 000000000..7246fb6d1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx/octeontx_ethdev.h
@@ -0,0 +1,187 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef __OCTEONTX_ETHDEV_H__
+#define __OCTEONTX_ETHDEV_H__
+
+#include <stdbool.h>
+
+#include <rte_common.h>
+#include <rte_ethdev_driver.h>
+#include <rte_eventdev.h>
+#include <rte_mempool.h>
+#include <rte_memory.h>
+
+#include <octeontx_fpavf.h>
+
+#include "base/octeontx_bgx.h"
+#include "base/octeontx_pki_var.h"
+#include "base/octeontx_pkivf.h"
+#include "base/octeontx_pkovf.h"
+#include "base/octeontx_io.h"
+
+#define OCTEONTX_PMD net_octeontx
+#define OCTEONTX_VDEV_DEFAULT_MAX_NR_PORT 12
+#define OCTEONTX_VDEV_NR_PORT_ARG ("nr_port")
+#define OCTEONTX_MAX_NAME_LEN 32
+
+#define OCTEONTX_MAX_BGX_PORTS 4
+#define OCTEONTX_MAX_LMAC_PER_BGX 4
+
+#define OCCTX_RX_NB_SEG_MAX 6
+#define OCCTX_INTR_POLL_INTERVAL_MS 1000
+/* VLAN tag inserted by OCCTX_TX_VTAG_ACTION.
+ * In Tx space is always reserved for this in FRS.
+ */
+#define OCCTX_MAX_VTAG_INS 2
+#define OCCTX_MAX_VTAG_ACT_SIZE (4 * OCCTX_MAX_VTAG_INS)
+
+/* HW config of frame size doesn't include FCS */
+#define OCCTX_MAX_HW_FRS 9212
+#define OCCTX_MIN_HW_FRS 60
+
+/* ETH_HLEN+ETH_FCS+2*VLAN_HLEN */
+#define OCCTX_L2_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + \
+ OCCTX_MAX_VTAG_ACT_SIZE)
+
+/* Since HW FRS includes NPC VTAG insertion space, user has reduced FRS */
+#define OCCTX_MAX_FRS \
+ (OCCTX_MAX_HW_FRS + RTE_ETHER_CRC_LEN - OCCTX_MAX_VTAG_ACT_SIZE)
+
+#define OCCTX_MIN_FRS (OCCTX_MIN_HW_FRS + RTE_ETHER_CRC_LEN)
+
+#define OCCTX_MAX_MTU (OCCTX_MAX_FRS - OCCTX_L2_OVERHEAD)
+
+#define OCTEONTX_RX_OFFLOADS ( \
+ DEV_RX_OFFLOAD_CHECKSUM | \
+ DEV_RX_OFFLOAD_SCTP_CKSUM | \
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
+ DEV_RX_OFFLOAD_SCATTER | \
+ DEV_RX_OFFLOAD_SCATTER | \
+ DEV_RX_OFFLOAD_JUMBO_FRAME | \
+ DEV_RX_OFFLOAD_VLAN_FILTER)
+
+#define OCTEONTX_TX_OFFLOADS ( \
+ DEV_TX_OFFLOAD_MBUF_FAST_FREE | \
+ DEV_TX_OFFLOAD_MT_LOCKFREE | \
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+ DEV_TX_OFFLOAD_OUTER_UDP_CKSUM | \
+ DEV_TX_OFFLOAD_IPV4_CKSUM | \
+ DEV_TX_OFFLOAD_TCP_CKSUM | \
+ DEV_TX_OFFLOAD_UDP_CKSUM | \
+ DEV_TX_OFFLOAD_SCTP_CKSUM | \
+ DEV_TX_OFFLOAD_MULTI_SEGS)
+
+static inline struct octeontx_nic *
+octeontx_pmd_priv(struct rte_eth_dev *dev)
+{
+ return dev->data->dev_private;
+}
+
+extern uint16_t
+rte_octeontx_pchan_map[OCTEONTX_MAX_BGX_PORTS][OCTEONTX_MAX_LMAC_PER_BGX];
+
+struct vlan_entry {
+ TAILQ_ENTRY(vlan_entry) next;
+ uint16_t vlan_id;
+};
+
+TAILQ_HEAD(octeontx_vlan_filter_tbl, vlan_entry);
+
+struct octeontx_vlan_info {
+ struct octeontx_vlan_filter_tbl fltr_tbl;
+ uint8_t filter_on;
+};
+
+struct octeontx_fc_info {
+ enum rte_eth_fc_mode mode; /**< Link flow control mode */
+ enum rte_eth_fc_mode def_mode;
+ uint16_t high_water;
+ uint16_t low_water;
+ uint16_t def_highmark;
+ uint16_t def_lowmark;
+ uint32_t rx_fifosz;
+};
+
+/* Octeontx ethdev nic */
+struct octeontx_nic {
+ struct rte_eth_dev *dev;
+ int node;
+ int port_id;
+ int port_ena;
+ int base_ichan;
+ int num_ichans;
+ int base_ochan;
+ int num_ochans;
+ uint8_t evdev;
+ uint8_t bpen;
+ uint8_t fcs_strip;
+ uint8_t bcast_mode;
+ uint8_t mcast_mode;
+ uint16_t num_tx_queues;
+ uint64_t hwcap;
+ uint8_t pko_vfid;
+ uint8_t link_up;
+ uint8_t duplex;
+ uint8_t speed;
+ uint16_t bgx_mtu;
+ uint16_t mtu;
+ uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
+ /* Rx port parameters */
+ struct {
+ bool classifier_enable;
+ bool hash_enable;
+ bool initialized;
+ } pki;
+
+ uint16_t ev_queues;
+ uint16_t ev_ports;
+ uint64_t rx_offloads;
+ uint16_t rx_offload_flags;
+ uint64_t tx_offloads;
+ uint16_t tx_offload_flags;
+ struct octeontx_vlan_info vlan_info;
+ int print_flag;
+ struct octeontx_fc_info fc;
+} __rte_cache_aligned;
+
+struct octeontx_txq {
+ uint16_t queue_id;
+ octeontx_dq_t dq;
+ struct rte_eth_dev *eth_dev;
+} __rte_cache_aligned;
+
+struct octeontx_rxq {
+ uint16_t queue_id;
+ uint16_t port_id;
+ uint8_t evdev;
+ struct rte_eth_dev *eth_dev;
+ uint16_t ev_queues;
+ uint16_t ev_ports;
+ struct rte_mempool *pool;
+} __rte_cache_aligned;
+
+void
+octeontx_set_tx_function(struct rte_eth_dev *dev);
+
+/* VLAN */
+int octeontx_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx);
+int octeontx_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx);
+int octeontx_dev_vlan_offload_init(struct rte_eth_dev *dev);
+int octeontx_dev_vlan_offload_fini(struct rte_eth_dev *eth_dev);
+int octeontx_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+int octeontx_dev_vlan_filter_set(struct rte_eth_dev *dev,
+ uint16_t vlan_id, int on);
+int octeontx_dev_set_link_up(struct rte_eth_dev *eth_dev);
+int octeontx_dev_set_link_down(struct rte_eth_dev *eth_dev);
+
+/* Flow control */
+int octeontx_dev_flow_ctrl_init(struct rte_eth_dev *dev);
+int octeontx_dev_flow_ctrl_fini(struct rte_eth_dev *dev);
+int octeontx_dev_flow_ctrl_get(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf);
+int octeontx_dev_flow_ctrl_set(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf);
+
+#endif /* __OCTEONTX_ETHDEV_H__ */
diff --git a/src/spdk/dpdk/drivers/net/octeontx/octeontx_ethdev_ops.c b/src/spdk/dpdk/drivers/net/octeontx/octeontx_ethdev_ops.c
new file mode 100644
index 000000000..ff627a68e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx/octeontx_ethdev_ops.c
@@ -0,0 +1,343 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2020 Marvell International Ltd.
+ */
+
+#include <rte_malloc.h>
+
+#include "octeontx_ethdev.h"
+#include "octeontx_logs.h"
+#include "octeontx_rxtx.h"
+
+static int
+octeontx_vlan_hw_filter(struct octeontx_nic *nic, uint8_t flag)
+{
+ struct octeontx_vlan_info *vlan = &nic->vlan_info;
+ pki_port_vlan_filter_config_t fltr_conf;
+ int rc = 0;
+
+ if (vlan->filter_on == flag)
+ return rc;
+
+ fltr_conf.port_type = OCTTX_PORT_TYPE_NET;
+ fltr_conf.fltr_conf = flag;
+
+ rc = octeontx_pki_port_vlan_fltr_config(nic->port_id, &fltr_conf);
+ if (rc != 0) {
+ octeontx_log_err("Fail to configure vlan hw filter for port %d",
+ nic->port_id);
+ goto done;
+ }
+
+ vlan->filter_on = flag;
+
+done:
+ return rc;
+}
+
+int
+octeontx_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+ struct rte_eth_rxmode *rxmode;
+ int rc = 0;
+
+ rxmode = &dev->data->dev_conf.rxmode;
+
+ if (mask & ETH_VLAN_EXTEND_MASK) {
+ octeontx_log_err("Extend offload not supported");
+ return -ENOTSUP;
+ }
+
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ octeontx_log_err("VLAN strip offload not supported");
+ return -ENOTSUP;
+ }
+
+ if (mask & ETH_VLAN_FILTER_MASK) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+ rc = octeontx_vlan_hw_filter(nic, true);
+ if (rc)
+ goto done;
+
+ nic->rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+ nic->rx_offload_flags |= OCCTX_RX_VLAN_FLTR_F;
+ } else {
+ rc = octeontx_vlan_hw_filter(nic, false);
+ if (rc)
+ goto done;
+
+ nic->rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
+ nic->rx_offload_flags &= ~OCCTX_RX_VLAN_FLTR_F;
+ }
+ }
+
+done:
+ return rc;
+}
+
+int
+octeontx_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+ struct octeontx_vlan_info *vlan = &nic->vlan_info;
+ pki_port_vlan_filter_entry_config_t fltr_entry;
+ struct vlan_entry *entry = NULL;
+ int entry_count = 0;
+ int rc = -EINVAL;
+
+ if (on) {
+ TAILQ_FOREACH(entry, &vlan->fltr_tbl, next)
+ if (entry->vlan_id == vlan_id) {
+ octeontx_log_dbg("Vlan Id is already set");
+ return 0;
+ }
+ } else {
+ TAILQ_FOREACH(entry, &vlan->fltr_tbl, next)
+ entry_count++;
+
+ if (!entry_count)
+ return 0;
+ }
+
+ fltr_entry.port_type = OCTTX_PORT_TYPE_NET;
+ fltr_entry.vlan_tpid = RTE_ETHER_TYPE_VLAN;
+ fltr_entry.vlan_id = vlan_id;
+ fltr_entry.entry_conf = on;
+
+ if (on) {
+ entry = rte_zmalloc("octeontx_nic_vlan_entry",
+ sizeof(struct vlan_entry), 0);
+ if (!entry) {
+ octeontx_log_err("Failed to allocate memory");
+ return -ENOMEM;
+ }
+ }
+
+ rc = octeontx_pki_port_vlan_fltr_entry_config(nic->port_id,
+ &fltr_entry);
+ if (rc != 0) {
+ octeontx_log_err("Fail to configure vlan filter entry "
+ "for port %d", nic->port_id);
+ if (entry)
+ rte_free(entry);
+
+ goto done;
+ }
+
+ if (on) {
+ entry->vlan_id = vlan_id;
+ TAILQ_INSERT_HEAD(&vlan->fltr_tbl, entry, next);
+ } else {
+ TAILQ_FOREACH(entry, &vlan->fltr_tbl, next) {
+ if (entry->vlan_id == vlan_id) {
+ TAILQ_REMOVE(&vlan->fltr_tbl, entry, next);
+ rte_free(entry);
+ break;
+ }
+ }
+ }
+
+done:
+ return rc;
+}
+
+int
+octeontx_dev_vlan_offload_init(struct rte_eth_dev *dev)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+ int rc;
+
+ TAILQ_INIT(&nic->vlan_info.fltr_tbl);
+
+ rc = octeontx_dev_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
+ if (rc)
+ octeontx_log_err("Failed to set vlan offload rc=%d", rc);
+
+ return rc;
+}
+
+int
+octeontx_dev_vlan_offload_fini(struct rte_eth_dev *dev)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+ struct octeontx_vlan_info *vlan = &nic->vlan_info;
+ pki_port_vlan_filter_entry_config_t fltr_entry;
+ struct vlan_entry *entry;
+ int rc = 0;
+
+ TAILQ_FOREACH(entry, &vlan->fltr_tbl, next) {
+ fltr_entry.port_type = OCTTX_PORT_TYPE_NET;
+ fltr_entry.vlan_tpid = RTE_ETHER_TYPE_VLAN;
+ fltr_entry.vlan_id = entry->vlan_id;
+ fltr_entry.entry_conf = 0;
+
+ rc = octeontx_pki_port_vlan_fltr_entry_config(nic->port_id,
+ &fltr_entry);
+ if (rc != 0) {
+ octeontx_log_err("Fail to configure vlan filter entry "
+ "for port %d", nic->port_id);
+ break;
+ }
+ }
+
+ return rc;
+}
+
+int
+octeontx_dev_set_link_up(struct rte_eth_dev *eth_dev)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(eth_dev);
+ int rc, i;
+
+ rc = octeontx_bgx_port_set_link_state(nic->port_id, true);
+ if (rc)
+ goto done;
+
+ /* Start tx queues */
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
+ octeontx_dev_tx_queue_start(eth_dev, i);
+
+done:
+ return rc;
+}
+
+int
+octeontx_dev_set_link_down(struct rte_eth_dev *eth_dev)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(eth_dev);
+ int i;
+
+ /* Stop tx queues */
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
+ octeontx_dev_tx_queue_stop(eth_dev, i);
+
+ return octeontx_bgx_port_set_link_state(nic->port_id, false);
+}
+
+int
+octeontx_dev_flow_ctrl_get(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+ octeontx_mbox_bgx_port_fc_cfg_t conf;
+ int rc;
+
+ memset(&conf, 0, sizeof(octeontx_mbox_bgx_port_fc_cfg_t));
+
+ rc = octeontx_bgx_port_flow_ctrl_cfg(nic->port_id, &conf);
+ if (rc)
+ return rc;
+
+ if (conf.rx_pause && conf.tx_pause)
+ fc_conf->mode = RTE_FC_FULL;
+ else if (conf.rx_pause)
+ fc_conf->mode = RTE_FC_RX_PAUSE;
+ else if (conf.tx_pause)
+ fc_conf->mode = RTE_FC_TX_PAUSE;
+ else
+ fc_conf->mode = RTE_FC_NONE;
+
+ /* low_water & high_water values are in Bytes */
+ fc_conf->low_water = conf.low_water;
+ fc_conf->high_water = conf.high_water;
+
+ return rc;
+}
+
+int
+octeontx_dev_flow_ctrl_set(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+ struct octeontx_fc_info *fc = &nic->fc;
+ octeontx_mbox_bgx_port_fc_cfg_t conf;
+ uint8_t tx_pause, rx_pause;
+ uint16_t max_high_water;
+ int rc;
+
+ if (fc_conf->pause_time || fc_conf->mac_ctrl_frame_fwd ||
+ fc_conf->autoneg) {
+ octeontx_log_err("Below flowctrl parameters are not supported "
+ "pause_time, mac_ctrl_frame_fwd and autoneg");
+ return -EINVAL;
+ }
+
+ if (fc_conf->high_water == fc->high_water &&
+ fc_conf->low_water == fc->low_water &&
+ fc_conf->mode == fc->mode)
+ return 0;
+
+ max_high_water = fc->rx_fifosz - OCTEONTX_BGX_RSVD_RX_FIFOBYTES;
+
+ if (fc_conf->high_water > max_high_water ||
+ fc_conf->high_water < fc_conf->low_water) {
+ octeontx_log_err("Invalid high/low water values "
+ "High_water(in Bytes) must <= 0x%x ",
+ max_high_water);
+ return -EINVAL;
+ }
+
+ if (fc_conf->high_water % BIT(4) || fc_conf->low_water % BIT(4)) {
+ octeontx_log_err("High/low water value must be multiple of 16");
+ return -EINVAL;
+ }
+
+ rx_pause = (fc_conf->mode == RTE_FC_FULL) ||
+ (fc_conf->mode == RTE_FC_RX_PAUSE);
+ tx_pause = (fc_conf->mode == RTE_FC_FULL) ||
+ (fc_conf->mode == RTE_FC_TX_PAUSE);
+
+ conf.high_water = fc_conf->high_water;
+ conf.low_water = fc_conf->low_water;
+ conf.fc_cfg = BGX_PORT_FC_CFG_SET;
+ conf.rx_pause = rx_pause;
+ conf.tx_pause = tx_pause;
+
+ rc = octeontx_bgx_port_flow_ctrl_cfg(nic->port_id, &conf);
+ if (rc)
+ return rc;
+
+ fc->high_water = fc_conf->high_water;
+ fc->low_water = fc_conf->low_water;
+ fc->mode = fc_conf->mode;
+
+ return rc;
+}
+
+int
+octeontx_dev_flow_ctrl_init(struct rte_eth_dev *dev)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+ struct octeontx_fc_info *fc = &nic->fc;
+ struct rte_eth_fc_conf fc_conf;
+ int rc;
+
+ rc = octeontx_dev_flow_ctrl_get(dev, &fc_conf);
+ if (rc) {
+ octeontx_log_err("Failed to get flow control info");
+ return rc;
+ }
+
+ fc->def_highmark = fc_conf.high_water;
+ fc->def_lowmark = fc_conf.low_water;
+ fc->def_mode = fc_conf.mode;
+
+ return rc;
+}
+
+int
+octeontx_dev_flow_ctrl_fini(struct rte_eth_dev *dev)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+ struct octeontx_fc_info *fc = &nic->fc;
+ struct rte_eth_fc_conf fc_conf;
+
+ memset(&fc_conf, 0, sizeof(struct rte_eth_fc_conf));
+
+ /* Restore flow control parameters with default values */
+ fc_conf.high_water = fc->def_highmark;
+ fc_conf.low_water = fc->def_lowmark;
+ fc_conf.mode = fc->def_mode;
+
+ return octeontx_dev_flow_ctrl_set(dev, &fc_conf);
+}
diff --git a/src/spdk/dpdk/drivers/net/octeontx/octeontx_logs.h b/src/spdk/dpdk/drivers/net/octeontx/octeontx_logs.h
new file mode 100644
index 000000000..dec8042c6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx/octeontx_logs.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef __OCTEONTX_LOGS_H__
+#define __OCTEONTX_LOGS_H__
+
+#define PMD_INIT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, otx_net_logtype_init, \
+ "%s(): " fmt "\n", __func__, ## args)
+
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, ">>")
+
+#define PMD_DRV_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, otx_net_logtype_driver, \
+ "%s(): " fmt "\n", __func__, ## args)
+
+#define PMD_MBOX_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, otx_net_logtype_mbox, \
+ "%s(): " fmt "\n", __func__, ## args)
+
+#define octeontx_log_info(fmt, args...) \
+ RTE_LOG(INFO, PMD, fmt "\n", ## args)
+
+#define octeontx_log_err(s, ...) PMD_INIT_LOG(ERR, s, ##__VA_ARGS__)
+#define octeontx_log_dbg(s, ...) PMD_DRV_LOG(DEBUG, s, ##__VA_ARGS__)
+#define octeontx_mbox_log(s, ...) PMD_MBOX_LOG(DEBUG, s, ##__VA_ARGS__)
+
+#define PMD_RX_LOG PMD_DRV_LOG
+#define PMD_TX_LOG PMD_DRV_LOG
+
+extern int otx_net_logtype_init;
+extern int otx_net_logtype_driver;
+extern int otx_net_logtype_mbox;
+
+#endif /* __OCTEONTX_LOGS_H__*/
diff --git a/src/spdk/dpdk/drivers/net/octeontx/octeontx_rxtx.c b/src/spdk/dpdk/drivers/net/octeontx/octeontx_rxtx.c
new file mode 100644
index 000000000..bbe43a874
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx/octeontx_rxtx.c
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <rte_atomic.h>
+#include <rte_common.h>
+#include <rte_ethdev_driver.h>
+#include <rte_ether.h>
+#include <rte_log.h>
+#include <rte_mbuf.h>
+#include <rte_prefetch.h>
+
+#include "octeontx_ethdev.h"
+#include "octeontx_rxtx.h"
+#include "octeontx_logs.h"
+
+uint16_t __rte_hot
+octeontx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct octeontx_rxq *rxq;
+ struct rte_event ev;
+ size_t count;
+ uint16_t valid_event;
+
+ rxq = rx_queue;
+ count = 0;
+ while (count < nb_pkts) {
+ valid_event = rte_event_dequeue_burst(rxq->evdev,
+ rxq->ev_ports, &ev,
+ 1, 0);
+ if (!valid_event)
+ break;
+ rx_pkts[count++] = ev.mbuf;
+ }
+
+ return count; /* return number of pkts received */
+}
+
+#define T(name, f3, f2, f1, f0, sz, flags) \
+static uint16_t __rte_noinline __rte_hot \
+octeontx_xmit_pkts_ ##name(void *tx_queue, \
+ struct rte_mbuf **tx_pkts, uint16_t pkts) \
+{ \
+ uint64_t cmd[(sz)]; \
+ \
+ return __octeontx_xmit_pkts(tx_queue, tx_pkts, pkts, cmd, \
+ flags); \
+}
+
+OCCTX_TX_FASTPATH_MODES
+#undef T
+
+void __rte_hot
+octeontx_set_tx_function(struct rte_eth_dev *dev)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+
+ const eth_tx_burst_t tx_burst_func[2][2][2][2] = {
+#define T(name, f3, f2, f1, f0, sz, flags) \
+ [f3][f2][f1][f0] = octeontx_xmit_pkts_ ##name,
+
+OCCTX_TX_FASTPATH_MODES
+#undef T
+ };
+
+ dev->tx_pkt_burst = tx_burst_func
+ [!!(nic->tx_offload_flags & OCCTX_TX_OFFLOAD_MBUF_NOFF_F)]
+ [!!(nic->tx_offload_flags & OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
+ [!!(nic->tx_offload_flags & OCCTX_TX_OFFLOAD_L3_L4_CSUM_F)]
+ [!!(nic->tx_offload_flags & OCCTX_TX_MULTI_SEG_F)];
+}
diff --git a/src/spdk/dpdk/drivers/net/octeontx/octeontx_rxtx.h b/src/spdk/dpdk/drivers/net/octeontx/octeontx_rxtx.h
new file mode 100644
index 000000000..8b46105b6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx/octeontx_rxtx.h
@@ -0,0 +1,504 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef __OCTEONTX_RXTX_H__
+#define __OCTEONTX_RXTX_H__
+
+#include <rte_ethdev_driver.h>
+
+#define OFFLOAD_FLAGS \
+ uint16_t rx_offload_flags; \
+ uint16_t tx_offload_flags
+
+#define BIT(nr) (1UL << (nr))
+
+#define OCCTX_RX_OFFLOAD_NONE (0)
+#define OCCTX_RX_MULTI_SEG_F BIT(0)
+#define OCCTX_RX_OFFLOAD_CSUM_F BIT(1)
+#define OCCTX_RX_VLAN_FLTR_F BIT(2)
+
+#define OCCTX_TX_OFFLOAD_NONE (0)
+#define OCCTX_TX_MULTI_SEG_F BIT(0)
+#define OCCTX_TX_OFFLOAD_L3_L4_CSUM_F BIT(1)
+#define OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F BIT(2)
+#define OCCTX_TX_OFFLOAD_MBUF_NOFF_F BIT(3)
+
+/* Packet type table */
+#define PTYPE_SIZE OCCTX_PKI_LTYPE_LAST
+
+/* octeontx send header sub descriptor structure */
+RTE_STD_C11
+union octeontx_send_hdr_w0_u {
+ uint64_t u;
+ struct {
+ uint64_t total : 16;
+ uint64_t markptr : 8;
+ uint64_t l3ptr : 8;
+ uint64_t l4ptr : 8;
+ uint64_t ii : 1;
+ uint64_t shp_dis : 1;
+ uint64_t ckle : 1;
+ uint64_t cklf : 2;
+ uint64_t ckl3 : 1;
+ uint64_t ckl4 : 2;
+ uint64_t p : 1;
+ uint64_t format : 7;
+ uint64_t tstamp : 1;
+ uint64_t tso_eom : 1;
+ uint64_t df : 1;
+ uint64_t tso : 1;
+ uint64_t n2 : 1;
+ uint64_t scntn1 : 3;
+ };
+};
+
+RTE_STD_C11
+union octeontx_send_hdr_w1_u {
+ uint64_t u;
+ struct {
+ uint64_t tso_mss : 14;
+ uint64_t shp_ra : 2;
+ uint64_t tso_sb : 8;
+ uint64_t leptr : 8;
+ uint64_t lfptr : 8;
+ uint64_t shp_chg : 9;
+ uint64_t tso_fn : 7;
+ uint64_t l2len : 8;
+ };
+};
+
+struct octeontx_send_hdr_s {
+ union octeontx_send_hdr_w0_u w0;
+ union octeontx_send_hdr_w1_u w1;
+};
+
+static const uint32_t __rte_cache_aligned
+ptype_table[PTYPE_SIZE][PTYPE_SIZE][PTYPE_SIZE] = {
+ [LC_NONE][LE_NONE][LF_NONE] = RTE_PTYPE_UNKNOWN,
+ [LC_NONE][LE_NONE][LF_IPSEC_ESP] = RTE_PTYPE_UNKNOWN,
+ [LC_NONE][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L4_FRAG,
+ [LC_NONE][LE_NONE][LF_IPCOMP] = RTE_PTYPE_UNKNOWN,
+ [LC_NONE][LE_NONE][LF_TCP] = RTE_PTYPE_L4_TCP,
+ [LC_NONE][LE_NONE][LF_UDP] = RTE_PTYPE_L4_UDP,
+ [LC_NONE][LE_NONE][LF_GRE] = RTE_PTYPE_TUNNEL_GRE,
+ [LC_NONE][LE_NONE][LF_UDP_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
+ [LC_NONE][LE_NONE][LF_UDP_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
+ [LC_NONE][LE_NONE][LF_NVGRE] = RTE_PTYPE_TUNNEL_NVGRE,
+
+ [LC_IPV4][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
+ [LC_IPV4][LE_NONE][LF_IPSEC_ESP] =
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV4,
+ [LC_IPV4][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG,
+ [LC_IPV4][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
+ [LC_IPV4][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
+ [LC_IPV4][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
+ [LC_IPV4][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GRE,
+ [LC_IPV4][LE_NONE][LF_UDP_GENEVE] =
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GENEVE,
+ [LC_IPV4][LE_NONE][LF_UDP_VXLAN] =
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_VXLAN,
+ [LC_IPV4][LE_NONE][LF_NVGRE] =
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
+
+ [LC_IPV4_OPT][LE_NONE][LF_NONE] =
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
+ [LC_IPV4_OPT][LE_NONE][LF_IPSEC_ESP] =
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L3_IPV4,
+ [LC_IPV4_OPT][LE_NONE][LF_IPFRAG] =
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_FRAG,
+ [LC_IPV4_OPT][LE_NONE][LF_IPCOMP] =
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
+ [LC_IPV4_OPT][LE_NONE][LF_TCP] =
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
+ [LC_IPV4_OPT][LE_NONE][LF_UDP] =
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
+ [LC_IPV4_OPT][LE_NONE][LF_GRE] =
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GRE,
+ [LC_IPV4_OPT][LE_NONE][LF_UDP_GENEVE] =
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GENEVE,
+ [LC_IPV4_OPT][LE_NONE][LF_UDP_VXLAN] =
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_VXLAN,
+ [LC_IPV4_OPT][LE_NONE][LF_NVGRE] =
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_NVGRE,
+
+ [LC_IPV6][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
+ [LC_IPV6][LE_NONE][LF_IPSEC_ESP] =
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L3_IPV4,
+ [LC_IPV6][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG,
+ [LC_IPV6][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
+ [LC_IPV6][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
+ [LC_IPV6][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
+ [LC_IPV6][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GRE,
+ [LC_IPV6][LE_NONE][LF_UDP_GENEVE] =
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GENEVE,
+ [LC_IPV6][LE_NONE][LF_UDP_VXLAN] =
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_VXLAN,
+ [LC_IPV6][LE_NONE][LF_NVGRE] =
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
+ [LC_IPV6_OPT][LE_NONE][LF_NONE] =
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
+ [LC_IPV6_OPT][LE_NONE][LF_IPSEC_ESP] =
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L3_IPV4,
+ [LC_IPV6_OPT][LE_NONE][LF_IPFRAG] =
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_FRAG,
+ [LC_IPV6_OPT][LE_NONE][LF_IPCOMP] =
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
+ [LC_IPV6_OPT][LE_NONE][LF_TCP] =
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
+ [LC_IPV6_OPT][LE_NONE][LF_UDP] =
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
+ [LC_IPV6_OPT][LE_NONE][LF_GRE] =
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GRE,
+ [LC_IPV6_OPT][LE_NONE][LF_UDP_GENEVE] =
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GENEVE,
+ [LC_IPV6_OPT][LE_NONE][LF_UDP_VXLAN] =
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_VXLAN,
+ [LC_IPV6_OPT][LE_NONE][LF_NVGRE] =
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_NVGRE,
+
+};
+
+
+static __rte_always_inline uint64_t
+octeontx_pktmbuf_detach(struct rte_mbuf *m)
+{
+ struct rte_mempool *mp = m->pool;
+ uint32_t mbuf_size, buf_len;
+ struct rte_mbuf *md;
+ uint16_t priv_size;
+ uint16_t refcount;
+
+ /* Update refcount of direct mbuf */
+ md = rte_mbuf_from_indirect(m);
+ refcount = rte_mbuf_refcnt_update(md, -1);
+
+ priv_size = rte_pktmbuf_priv_size(mp);
+ mbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size);
+ buf_len = rte_pktmbuf_data_room_size(mp);
+
+ m->priv_size = priv_size;
+ m->buf_addr = (char *)m + mbuf_size;
+ m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
+ m->buf_len = (uint16_t)buf_len;
+ rte_pktmbuf_reset_headroom(m);
+ m->data_len = 0;
+ m->ol_flags = 0;
+ m->next = NULL;
+ m->nb_segs = 1;
+
+ /* Now indirect mbuf is safe to free */
+ rte_pktmbuf_free(m);
+
+ if (refcount == 0) {
+ rte_mbuf_refcnt_set(md, 1);
+ md->data_len = 0;
+ md->ol_flags = 0;
+ md->next = NULL;
+ md->nb_segs = 1;
+ return 0;
+ } else {
+ return 1;
+ }
+}
+
+static __rte_always_inline uint64_t
+octeontx_prefree_seg(struct rte_mbuf *m)
+{
+ if (likely(rte_mbuf_refcnt_read(m) == 1)) {
+ if (!RTE_MBUF_DIRECT(m))
+ return octeontx_pktmbuf_detach(m);
+
+ m->next = NULL;
+ m->nb_segs = 1;
+ return 0;
+ } else if (rte_mbuf_refcnt_update(m, -1) == 0) {
+ if (!RTE_MBUF_DIRECT(m))
+ return octeontx_pktmbuf_detach(m);
+
+ rte_mbuf_refcnt_set(m, 1);
+ m->next = NULL;
+ m->nb_segs = 1;
+ return 0;
+ }
+
+ /* Mbuf is having refcount more than 1 so need not to be freed */
+ return 1;
+}
+
+static __rte_always_inline void
+octeontx_tx_checksum_offload(uint64_t *cmd_buf, const uint16_t flags,
+ struct rte_mbuf *m)
+{
+ struct octeontx_send_hdr_s *send_hdr =
+ (struct octeontx_send_hdr_s *)cmd_buf;
+ uint64_t ol_flags = m->ol_flags;
+
+ /* PKO Checksum L4 Algorithm Enumeration
+ * 0x0 - No checksum
+ * 0x1 - UDP L4 checksum
+ * 0x2 - TCP L4 checksum
+ * 0x3 - SCTP L4 checksum
+ */
+ const uint8_t csum = (!(((ol_flags ^ PKT_TX_UDP_CKSUM) >> 52) & 0x3) +
+ (!(((ol_flags ^ PKT_TX_TCP_CKSUM) >> 52) & 0x3) * 2) +
+ (!(((ol_flags ^ PKT_TX_SCTP_CKSUM) >> 52) & 0x3) * 3));
+
+ const uint8_t is_tunnel_parsed = (!!(ol_flags & PKT_TX_TUNNEL_GTP) ||
+ !!(ol_flags & PKT_TX_TUNNEL_VXLAN_GPE) ||
+ !!(ol_flags & PKT_TX_TUNNEL_VXLAN) ||
+ !!(ol_flags & PKT_TX_TUNNEL_GRE) ||
+ !!(ol_flags & PKT_TX_TUNNEL_GENEVE) ||
+ !!(ol_flags & PKT_TX_TUNNEL_IP) ||
+ !!(ol_flags & PKT_TX_TUNNEL_IPIP));
+
+ const uint8_t csum_outer = (!!(ol_flags & PKT_TX_OUTER_UDP_CKSUM) ||
+ !!(ol_flags & PKT_TX_TUNNEL_UDP));
+ const uint8_t outer_l2_len = m->outer_l2_len;
+ const uint8_t l2_len = m->l2_len;
+
+ if ((flags & OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&
+ (flags & OCCTX_TX_OFFLOAD_L3_L4_CSUM_F)) {
+ if (is_tunnel_parsed) {
+ /* Outer L3 */
+ send_hdr->w0.l3ptr = outer_l2_len;
+ send_hdr->w0.l4ptr = outer_l2_len + m->outer_l3_len;
+ /* Set clk3 for PKO to calculate IPV4 header checksum */
+ send_hdr->w0.ckl3 = !!(ol_flags & PKT_TX_OUTER_IPV4);
+
+ /* Outer L4 */
+ send_hdr->w0.ckl4 = csum_outer;
+
+ /* Inner L3 */
+ send_hdr->w1.leptr = send_hdr->w0.l4ptr + l2_len;
+ send_hdr->w1.lfptr = send_hdr->w1.leptr + m->l3_len;
+ /* Set clke for PKO to calculate inner IPV4 header
+ * checksum.
+ */
+ send_hdr->w0.ckle = !!(ol_flags & PKT_TX_IPV4);
+
+ /* Inner L4 */
+ send_hdr->w0.cklf = csum;
+ } else {
+ /* Inner L3 */
+ send_hdr->w0.l3ptr = l2_len;
+ send_hdr->w0.l4ptr = l2_len + m->l3_len;
+ /* Set clk3 for PKO to calculate IPV4 header checksum */
+ send_hdr->w0.ckl3 = !!(ol_flags & PKT_TX_IPV4);
+
+ /* Inner L4 */
+ send_hdr->w0.ckl4 = csum;
+ }
+ } else if (flags & OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F) {
+ /* Outer L3 */
+ send_hdr->w0.l3ptr = outer_l2_len;
+ send_hdr->w0.l4ptr = outer_l2_len + m->outer_l3_len;
+ /* Set clk3 for PKO to calculate IPV4 header checksum */
+ send_hdr->w0.ckl3 = !!(ol_flags & PKT_TX_OUTER_IPV4);
+
+ /* Outer L4 */
+ send_hdr->w0.ckl4 = csum_outer;
+ } else if (flags & OCCTX_TX_OFFLOAD_L3_L4_CSUM_F) {
+ /* Inner L3 */
+ send_hdr->w0.l3ptr = l2_len;
+ send_hdr->w0.l4ptr = l2_len + m->l3_len;
+ /* Set clk3 for PKO to calculate IPV4 header checksum */
+ send_hdr->w0.ckl3 = !!(ol_flags & PKT_TX_IPV4);
+
+ /* Inner L4 */
+ send_hdr->w0.ckl4 = csum;
+ }
+}
+
+static __rte_always_inline uint16_t
+__octeontx_xmit_prepare(struct rte_mbuf *tx_pkt, uint64_t *cmd_buf,
+ const uint16_t flag)
+{
+ uint16_t gaura_id, nb_desc = 0;
+
+ /* Setup PKO_SEND_HDR_S */
+ cmd_buf[nb_desc++] = tx_pkt->data_len & 0xffff;
+ cmd_buf[nb_desc++] = 0x0;
+
+ /* Enable tx checksum offload */
+ if ((flag & OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F) ||
+ (flag & OCCTX_TX_OFFLOAD_L3_L4_CSUM_F))
+ octeontx_tx_checksum_offload(cmd_buf, flag, tx_pkt);
+
+ /* SEND_HDR[DF] bit controls if buffer is to be freed or
+ * not, as SG_DESC[I] and SEND_HDR[II] are clear.
+ */
+ if (flag & OCCTX_TX_OFFLOAD_MBUF_NOFF_F)
+ cmd_buf[0] |= (octeontx_prefree_seg(tx_pkt) <<
+ 58);
+
+ /* Mark mempool object as "put" since it is freed by PKO */
+ if (!(cmd_buf[0] & (1ULL << 58)))
+ __mempool_check_cookies(tx_pkt->pool, (void **)&tx_pkt,
+ 1, 0);
+ /* Get the gaura Id */
+ gaura_id = octeontx_fpa_bufpool_gpool((uintptr_t)
+ tx_pkt->pool->pool_id);
+
+ /* Setup PKO_SEND_BUFLINK_S */
+ cmd_buf[nb_desc++] = PKO_SEND_BUFLINK_SUBDC |
+ PKO_SEND_BUFLINK_LDTYPE(0x1ull) |
+ PKO_SEND_BUFLINK_GAUAR((long)gaura_id) |
+ tx_pkt->data_len;
+ cmd_buf[nb_desc++] = rte_mbuf_data_iova(tx_pkt);
+
+ return nb_desc;
+}
+
+static __rte_always_inline uint16_t
+__octeontx_xmit_mseg_prepare(struct rte_mbuf *tx_pkt, uint64_t *cmd_buf,
+ const uint16_t flag)
+{
+ uint16_t nb_segs, nb_desc = 0;
+ uint16_t gaura_id, len = 0;
+ struct rte_mbuf *m_next = NULL;
+
+ nb_segs = tx_pkt->nb_segs;
+ /* Setup PKO_SEND_HDR_S */
+ cmd_buf[nb_desc++] = tx_pkt->pkt_len & 0xffff;
+ cmd_buf[nb_desc++] = 0x0;
+
+ /* Enable tx checksum offload */
+ if ((flag & OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F) ||
+ (flag & OCCTX_TX_OFFLOAD_L3_L4_CSUM_F))
+ octeontx_tx_checksum_offload(cmd_buf, flag, tx_pkt);
+
+ do {
+ m_next = tx_pkt->next;
+ /* To handle case where mbufs belong to diff pools, like
+ * fragmentation
+ */
+ gaura_id = octeontx_fpa_bufpool_gpool((uintptr_t)
+ tx_pkt->pool->pool_id);
+
+ /* Setup PKO_SEND_GATHER_S */
+ cmd_buf[nb_desc] = PKO_SEND_GATHER_SUBDC |
+ PKO_SEND_GATHER_LDTYPE(0x1ull) |
+ PKO_SEND_GATHER_GAUAR((long)gaura_id) |
+ tx_pkt->data_len;
+
+ /* SG_DESC[I] bit controls if buffer is to be freed or
+ * not, as SEND_HDR[DF] and SEND_HDR[II] are clear.
+ */
+ if (flag & OCCTX_TX_OFFLOAD_MBUF_NOFF_F) {
+ cmd_buf[nb_desc] |=
+ (octeontx_prefree_seg(tx_pkt) << 57);
+ }
+
+ /* Mark mempool object as "put" since it is freed by
+ * PKO.
+ */
+ if (!(cmd_buf[nb_desc] & (1ULL << 57))) {
+ tx_pkt->next = NULL;
+ __mempool_check_cookies(tx_pkt->pool,
+ (void **)&tx_pkt, 1, 0);
+ }
+ nb_desc++;
+
+ cmd_buf[nb_desc++] = rte_mbuf_data_iova(tx_pkt);
+
+ nb_segs--;
+ len += tx_pkt->data_len;
+ tx_pkt = m_next;
+ } while (nb_segs);
+
+ return nb_desc;
+}
+
+static __rte_always_inline uint16_t
+__octeontx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts, uint64_t *cmd_buf,
+ const uint16_t flags)
+{
+ struct octeontx_txq *txq = tx_queue;
+ octeontx_dq_t *dq = &txq->dq;
+ uint16_t count = 0, nb_desc;
+ rte_cio_wmb();
+
+ while (count < nb_pkts) {
+ if (unlikely(*((volatile int64_t *)dq->fc_status_va) < 0))
+ break;
+
+ if (flags & OCCTX_TX_MULTI_SEG_F) {
+ nb_desc = __octeontx_xmit_mseg_prepare(tx_pkts[count],
+ cmd_buf, flags);
+ } else {
+ nb_desc = __octeontx_xmit_prepare(tx_pkts[count],
+ cmd_buf, flags);
+ }
+
+ octeontx_reg_lmtst(dq->lmtline_va, dq->ioreg_va, cmd_buf,
+ nb_desc);
+
+ count++;
+ }
+ return count;
+}
+
+uint16_t
+octeontx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+
+#define L3L4CSUM_F OCCTX_TX_OFFLOAD_L3_L4_CSUM_F
+#define OL3OL4CSUM_F OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F
+#define NOFF_F OCCTX_TX_OFFLOAD_MBUF_NOFF_F
+#define MULT_F OCCTX_TX_MULTI_SEG_F
+
+/* [L3L4CSUM_F] [OL3OL4CSUM_F] [NOFF] [MULTI_SEG] */
+#define OCCTX_TX_FASTPATH_MODES \
+T(no_offload, 0, 0, 0, 0, 4, \
+ OCCTX_TX_OFFLOAD_NONE) \
+T(mseg, 0, 0, 0, 1, 14, \
+ MULT_F) \
+T(l3l4csum, 0, 0, 1, 0, 4, \
+ L3L4CSUM_F) \
+T(l3l4csum_mseg, 0, 0, 1, 1, 14, \
+ L3L4CSUM_F | MULT_F) \
+T(ol3ol4csum, 0, 1, 0, 0, 4, \
+ OL3OL4CSUM_F) \
+T(ol3l4csum_mseg, 0, 1, 0, 1, 14, \
+ OL3OL4CSUM_F | MULT_F) \
+T(ol3l4csum_l3l4csum, 0, 1, 1, 0, 4, \
+ OL3OL4CSUM_F | L3L4CSUM_F) \
+T(ol3l4csum_l3l4csum_mseg, 0, 1, 1, 1, 14, \
+ OL3OL4CSUM_F | L3L4CSUM_F | MULT_F) \
+T(noff, 1, 0, 0, 0, 4, \
+ NOFF_F) \
+T(noff_mseg, 1, 0, 0, 1, 14, \
+ NOFF_F | MULT_F) \
+T(noff_l3l4csum, 1, 0, 1, 0, 4, \
+ NOFF_F | L3L4CSUM_F) \
+T(noff_l3l4csum_mseg, 1, 0, 1, 1, 14, \
+ NOFF_F | L3L4CSUM_F | MULT_F) \
+T(noff_ol3ol4csum, 1, 1, 0, 0, 4, \
+ NOFF_F | OL3OL4CSUM_F) \
+T(noff_ol3ol4csum_mseg, 1, 1, 0, 1, 14, \
+ NOFF_F | OL3OL4CSUM_F | MULT_F) \
+T(noff_ol3ol4csum_l3l4csum, 1, 1, 1, 0, 4, \
+ NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(noff_ol3ol4csum_l3l4csum_mseg, 1, 1, 1, 1, 14, \
+ NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F | \
+ MULT_F)
+
+/* RX offload macros */
+#define VLAN_FLTR_F OCCTX_RX_VLAN_FLTR_F
+#define CSUM_F OCCTX_RX_OFFLOAD_CSUM_F
+#define MULT_RX_F OCCTX_RX_MULTI_SEG_F
+
+/* [VLAN_FLTR] [CSUM_F] [MULTI_SEG] */
+#define OCCTX_RX_FASTPATH_MODES \
+R(no_offload, 0, 0, 0, OCCTX_RX_OFFLOAD_NONE) \
+R(mseg, 0, 0, 1, MULT_RX_F) \
+R(csum, 0, 1, 0, CSUM_F) \
+R(csum_mseg, 0, 1, 1, CSUM_F | MULT_RX_F) \
+R(vlan, 1, 0, 0, VLAN_FLTR_F) \
+R(vlan_mseg, 1, 0, 1, VLAN_FLTR_F | MULT_RX_F) \
+R(vlan_csum, 1, 1, 0, VLAN_FLTR_F | CSUM_F) \
+R(vlan_csum_mseg, 1, 1, 1, CSUM_F | VLAN_FLTR_F | \
+ MULT_RX_F)
+
+ #endif /* __OCTEONTX_RXTX_H__ */
diff --git a/src/spdk/dpdk/drivers/net/octeontx/rte_pmd_octeontx_version.map b/src/spdk/dpdk/drivers/net/octeontx/rte_pmd_octeontx_version.map
new file mode 100644
index 000000000..f7cae02fa
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx/rte_pmd_octeontx_version.map
@@ -0,0 +1,7 @@
+DPDK_20.0 {
+ global:
+
+ rte_octeontx_pchan_map;
+
+ local: *;
+};